nwo
stringlengths 5
86
| sha
stringlengths 40
40
| path
stringlengths 4
189
| language
stringclasses 1
value | identifier
stringlengths 1
94
| parameters
stringlengths 2
4.03k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
11.5k
| docstring
stringlengths 1
33.2k
| docstring_summary
stringlengths 0
5.15k
| docstring_tokens
sequence | function
stringlengths 34
151k
| function_tokens
sequence | url
stringlengths 90
278
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/_pyio.py | python | IOBase._checkReadable | (self, msg=None) | Internal: raise UnsupportedOperation if file is not readable | Internal: raise UnsupportedOperation if file is not readable | [
"Internal",
":",
"raise",
"UnsupportedOperation",
"if",
"file",
"is",
"not",
"readable"
] | def _checkReadable(self, msg=None):
"""Internal: raise UnsupportedOperation if file is not readable
"""
if not self.readable():
raise UnsupportedOperation("File or stream is not readable."
if msg is None else msg) | [
"def",
"_checkReadable",
"(",
"self",
",",
"msg",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"readable",
"(",
")",
":",
"raise",
"UnsupportedOperation",
"(",
"\"File or stream is not readable.\"",
"if",
"msg",
"is",
"None",
"else",
"msg",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/_pyio.py#L410-L415 |
||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/prompt-toolkit/py3/prompt_toolkit/history.py | python | ThreadedHistory.load | (self) | Like `History.load(), but call `self.load_history_strings()` in a
background thread. | Like `History.load(), but call `self.load_history_strings()` in a
background thread. | [
"Like",
"History",
".",
"load",
"()",
"but",
"call",
"self",
".",
"load_history_strings",
"()",
"in",
"a",
"background",
"thread",
"."
] | async def load(self) -> AsyncGenerator[str, None]:
"""
Like `History.load(), but call `self.load_history_strings()` in a
background thread.
"""
# Start the load thread, if this is called for the first time.
if not self._load_thread:
self._load_thread = threading.Thread(
target=self._in_load_thread,
daemon=True,
)
self._load_thread.start()
# Consume the `_loaded_strings` list, using asyncio.
loop = get_event_loop()
# Create threading Event so that we can wait for new items.
event = threading.Event()
event.set()
self._string_load_events.append(event)
items_yielded = 0
try:
while True:
# Wait for new items to be available.
# (Use a timeout, because the executor thread is not a daemon
# thread. The "slow-history.py" example would otherwise hang if
# Control-C is pressed before the history is fully loaded,
# because there's still this non-daemon executor thread waiting
# for this event.)
got_timeout = await loop.run_in_executor(
None, lambda: event.wait(timeout=0.5)
)
if not got_timeout:
continue
# Read new items (in lock).
def in_executor() -> Tuple[List[str], bool]:
with self._lock:
new_items = self._loaded_strings[items_yielded:]
done = self._loaded
event.clear()
return new_items, done
new_items, done = await loop.run_in_executor(None, in_executor)
items_yielded += len(new_items)
for item in new_items:
yield item
if done:
break
finally:
self._string_load_events.remove(event) | [
"async",
"def",
"load",
"(",
"self",
")",
"->",
"AsyncGenerator",
"[",
"str",
",",
"None",
"]",
":",
"# Start the load thread, if this is called for the first time.",
"if",
"not",
"self",
".",
"_load_thread",
":",
"self",
".",
"_load_thread",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"self",
".",
"_in_load_thread",
",",
"daemon",
"=",
"True",
",",
")",
"self",
".",
"_load_thread",
".",
"start",
"(",
")",
"# Consume the `_loaded_strings` list, using asyncio.",
"loop",
"=",
"get_event_loop",
"(",
")",
"# Create threading Event so that we can wait for new items.",
"event",
"=",
"threading",
".",
"Event",
"(",
")",
"event",
".",
"set",
"(",
")",
"self",
".",
"_string_load_events",
".",
"append",
"(",
"event",
")",
"items_yielded",
"=",
"0",
"try",
":",
"while",
"True",
":",
"# Wait for new items to be available.",
"# (Use a timeout, because the executor thread is not a daemon",
"# thread. The \"slow-history.py\" example would otherwise hang if",
"# Control-C is pressed before the history is fully loaded,",
"# because there's still this non-daemon executor thread waiting",
"# for this event.)",
"got_timeout",
"=",
"await",
"loop",
".",
"run_in_executor",
"(",
"None",
",",
"lambda",
":",
"event",
".",
"wait",
"(",
"timeout",
"=",
"0.5",
")",
")",
"if",
"not",
"got_timeout",
":",
"continue",
"# Read new items (in lock).",
"def",
"in_executor",
"(",
")",
"->",
"Tuple",
"[",
"List",
"[",
"str",
"]",
",",
"bool",
"]",
":",
"with",
"self",
".",
"_lock",
":",
"new_items",
"=",
"self",
".",
"_loaded_strings",
"[",
"items_yielded",
":",
"]",
"done",
"=",
"self",
".",
"_loaded",
"event",
".",
"clear",
"(",
")",
"return",
"new_items",
",",
"done",
"new_items",
",",
"done",
"=",
"await",
"loop",
".",
"run_in_executor",
"(",
"None",
",",
"in_executor",
")",
"items_yielded",
"+=",
"len",
"(",
"new_items",
")",
"for",
"item",
"in",
"new_items",
":",
"yield",
"item",
"if",
"done",
":",
"break",
"finally",
":",
"self",
".",
"_string_load_events",
".",
"remove",
"(",
"event",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/prompt-toolkit/py3/prompt_toolkit/history.py#L124-L179 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/lib/agw/aui/framemanager.py | python | AuiPaneInfo.Dockable | (self, b=True) | return self.TopDockable(b).BottomDockable(b).LeftDockable(b).RightDockable(b) | Specifies whether a frame can be docked or not. It is the same as specifying
:meth:`TopDockable` . :meth:`BottomDockable` . :meth:`LeftDockable` . :meth:`RightDockable` .
:param bool `b`: whether the frame can be docked or not. | Specifies whether a frame can be docked or not. It is the same as specifying
:meth:`TopDockable` . :meth:`BottomDockable` . :meth:`LeftDockable` . :meth:`RightDockable` . | [
"Specifies",
"whether",
"a",
"frame",
"can",
"be",
"docked",
"or",
"not",
".",
"It",
"is",
"the",
"same",
"as",
"specifying",
":",
"meth",
":",
"TopDockable",
".",
":",
"meth",
":",
"BottomDockable",
".",
":",
"meth",
":",
"LeftDockable",
".",
":",
"meth",
":",
"RightDockable",
"."
] | def Dockable(self, b=True):
"""
Specifies whether a frame can be docked or not. It is the same as specifying
:meth:`TopDockable` . :meth:`BottomDockable` . :meth:`LeftDockable` . :meth:`RightDockable` .
:param bool `b`: whether the frame can be docked or not.
"""
return self.TopDockable(b).BottomDockable(b).LeftDockable(b).RightDockable(b) | [
"def",
"Dockable",
"(",
"self",
",",
"b",
"=",
"True",
")",
":",
"return",
"self",
".",
"TopDockable",
"(",
"b",
")",
".",
"BottomDockable",
"(",
"b",
")",
".",
"LeftDockable",
"(",
"b",
")",
".",
"RightDockable",
"(",
"b",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/aui/framemanager.py#L1609-L1617 |
|
eclipse/sumo | 7132a9b8b6eea734bdec38479026b4d8c4336d03 | tools/contributed/sumopy/coremodules/misc/shapeformat.py | python | facilities_to_shapefile | (facilities, filepath, dataname='facilitydata',
is_access=True, parent=None, log=None) | return True | Export network edges to shapefile. | Export network edges to shapefile. | [
"Export",
"network",
"edges",
"to",
"shapefile",
"."
] | def facilities_to_shapefile(facilities, filepath, dataname='facilitydata',
is_access=True, parent=None, log=None):
"""
Export network edges to shapefile.
"""
net = facilities.get_net()
shapedata = Shapedata(parent, dataname, name='Facilities shape data',
filepath=filepath,
shapetype=SHAPETYPES['PolyLine'],
projparams_shape=net.get_projparams(),
offset=net.get_offset(), log=log)
#attrname, ftype, flen, fdigit = field
attrlist = [
('id', 'id', 'ID_FACIL', 'N', 32, 0),
('ids_roadedge_closest', 'id', 'ID_ARC', 'N', 32, 0),
('positions_roadedge_closest', 'id', 'POS_ARC', 'N', 12, 5),
('osmkeys', 'val', 'OSMKEY', 'C', 32, 0),
]
print 'facilities_to_shapefile', filepath
for attr in attrlist:
shapedata.add_field(attr[2:])
ids_fac = facilities.get_ids()
ids_shape = shapedata.add_rows(len(ids_fac))
# print ' shapedata.ID_ARC',shapedata.ID_ARC,'dir',dir(shapedata.ID_ARC)
shapedata.ID_FACIL[ids_shape] = ids_fac
shapedata.shapes[ids_shape] = facilities.shapes[ids_fac]
for netattrname, gettype, shapeattrname, x1, x2, x3 in attrlist:
if netattrname not in ('id',):
getattr(shapedata, shapeattrname)[ids_shape] = getattr(facilities, netattrname)[ids_fac]
shapedata.adjust_fieldlength()
shapedata.export_shapefile()
return True | [
"def",
"facilities_to_shapefile",
"(",
"facilities",
",",
"filepath",
",",
"dataname",
"=",
"'facilitydata'",
",",
"is_access",
"=",
"True",
",",
"parent",
"=",
"None",
",",
"log",
"=",
"None",
")",
":",
"net",
"=",
"facilities",
".",
"get_net",
"(",
")",
"shapedata",
"=",
"Shapedata",
"(",
"parent",
",",
"dataname",
",",
"name",
"=",
"'Facilities shape data'",
",",
"filepath",
"=",
"filepath",
",",
"shapetype",
"=",
"SHAPETYPES",
"[",
"'PolyLine'",
"]",
",",
"projparams_shape",
"=",
"net",
".",
"get_projparams",
"(",
")",
",",
"offset",
"=",
"net",
".",
"get_offset",
"(",
")",
",",
"log",
"=",
"log",
")",
"#attrname, ftype, flen, fdigit = field",
"attrlist",
"=",
"[",
"(",
"'id'",
",",
"'id'",
",",
"'ID_FACIL'",
",",
"'N'",
",",
"32",
",",
"0",
")",
",",
"(",
"'ids_roadedge_closest'",
",",
"'id'",
",",
"'ID_ARC'",
",",
"'N'",
",",
"32",
",",
"0",
")",
",",
"(",
"'positions_roadedge_closest'",
",",
"'id'",
",",
"'POS_ARC'",
",",
"'N'",
",",
"12",
",",
"5",
")",
",",
"(",
"'osmkeys'",
",",
"'val'",
",",
"'OSMKEY'",
",",
"'C'",
",",
"32",
",",
"0",
")",
",",
"]",
"print",
"'facilities_to_shapefile'",
",",
"filepath",
"for",
"attr",
"in",
"attrlist",
":",
"shapedata",
".",
"add_field",
"(",
"attr",
"[",
"2",
":",
"]",
")",
"ids_fac",
"=",
"facilities",
".",
"get_ids",
"(",
")",
"ids_shape",
"=",
"shapedata",
".",
"add_rows",
"(",
"len",
"(",
"ids_fac",
")",
")",
"# print ' shapedata.ID_ARC',shapedata.ID_ARC,'dir',dir(shapedata.ID_ARC)",
"shapedata",
".",
"ID_FACIL",
"[",
"ids_shape",
"]",
"=",
"ids_fac",
"shapedata",
".",
"shapes",
"[",
"ids_shape",
"]",
"=",
"facilities",
".",
"shapes",
"[",
"ids_fac",
"]",
"for",
"netattrname",
",",
"gettype",
",",
"shapeattrname",
",",
"x1",
",",
"x2",
",",
"x3",
"in",
"attrlist",
":",
"if",
"netattrname",
"not",
"in",
"(",
"'id'",
",",
")",
":",
"getattr",
"(",
"shapedata",
",",
"shapeattrname",
")",
"[",
"ids_shape",
"]",
"=",
"getattr",
"(",
"facilities",
",",
"netattrname",
")",
"[",
"ids_fac",
"]",
"shapedata",
".",
"adjust_fieldlength",
"(",
")",
"shapedata",
".",
"export_shapefile",
"(",
")",
"return",
"True"
] | https://github.com/eclipse/sumo/blob/7132a9b8b6eea734bdec38479026b4d8c4336d03/tools/contributed/sumopy/coremodules/misc/shapeformat.py#L1179-L1217 |
|
adobe/chromium | cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7 | tools/code_coverage/coverage_posix.py | python | RunProgramThread._run_posix | (self) | No deadlock problem so use the simple answer. The windows solution
appears to add extra buffering which we don't want on other platforms. | No deadlock problem so use the simple answer. The windows solution
appears to add extra buffering which we don't want on other platforms. | [
"No",
"deadlock",
"problem",
"so",
"use",
"the",
"simple",
"answer",
".",
"The",
"windows",
"solution",
"appears",
"to",
"add",
"extra",
"buffering",
"which",
"we",
"don",
"t",
"want",
"on",
"other",
"platforms",
"."
] | def _run_posix(self):
"""No deadlock problem so use the simple answer. The windows solution
appears to add extra buffering which we don't want on other platforms."""
self._process = subprocess.Popen(self._cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
gChildPIDs.append(self._process.pid)
try:
while True:
line = self._process.stdout.readline()
if not line: # EOF
break
print line,
self._queue.put(RunProgramThread.PROGRESS, True)
except IOError:
pass
# If we get here the process is done.
gChildPIDs.remove(self._process.pid)
self._queue.put(RunProgramThread.DONE) | [
"def",
"_run_posix",
"(",
"self",
")",
":",
"self",
".",
"_process",
"=",
"subprocess",
".",
"Popen",
"(",
"self",
".",
"_cmd",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
")",
"gChildPIDs",
".",
"append",
"(",
"self",
".",
"_process",
".",
"pid",
")",
"try",
":",
"while",
"True",
":",
"line",
"=",
"self",
".",
"_process",
".",
"stdout",
".",
"readline",
"(",
")",
"if",
"not",
"line",
":",
"# EOF",
"break",
"print",
"line",
",",
"self",
".",
"_queue",
".",
"put",
"(",
"RunProgramThread",
".",
"PROGRESS",
",",
"True",
")",
"except",
"IOError",
":",
"pass",
"# If we get here the process is done.",
"gChildPIDs",
".",
"remove",
"(",
"self",
".",
"_process",
".",
"pid",
")",
"self",
".",
"_queue",
".",
"put",
"(",
"RunProgramThread",
".",
"DONE",
")"
] | https://github.com/adobe/chromium/blob/cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7/tools/code_coverage/coverage_posix.py#L233-L251 |
||
lhmRyan/deep-supervised-hashing-DSH | 631901f82e2ab031fbac33f914a5b08ef8e21d57 | scripts/cpp_lint.py | python | _CppLintState.SetVerboseLevel | (self, level) | return last_verbose_level | Sets the module's verbosity, and returns the previous setting. | Sets the module's verbosity, and returns the previous setting. | [
"Sets",
"the",
"module",
"s",
"verbosity",
"and",
"returns",
"the",
"previous",
"setting",
"."
] | def SetVerboseLevel(self, level):
"""Sets the module's verbosity, and returns the previous setting."""
last_verbose_level = self.verbose_level
self.verbose_level = level
return last_verbose_level | [
"def",
"SetVerboseLevel",
"(",
"self",
",",
"level",
")",
":",
"last_verbose_level",
"=",
"self",
".",
"verbose_level",
"self",
".",
"verbose_level",
"=",
"level",
"return",
"last_verbose_level"
] | https://github.com/lhmRyan/deep-supervised-hashing-DSH/blob/631901f82e2ab031fbac33f914a5b08ef8e21d57/scripts/cpp_lint.py#L707-L711 |
|
qt/qtbase | 81b9ee66b8e40ed145185fe46b7c91929688cafd | util/locale_database/cldr.py | python | CldrAccess.fileLocales | (self) | Generator for locale IDs seen in file-names.
All *.xml other than root.xml in common/main/ are assumed to
identify locales. | Generator for locale IDs seen in file-names. | [
"Generator",
"for",
"locale",
"IDs",
"seen",
"in",
"file",
"-",
"names",
"."
] | def fileLocales(self) -> Iterable[str]:
"""Generator for locale IDs seen in file-names.
All *.xml other than root.xml in common/main/ are assumed to
identify locales."""
for path in self.root.joinpath('common/main').glob('*.xml'):
if path.stem != 'root':
yield path.stem | [
"def",
"fileLocales",
"(",
"self",
")",
"->",
"Iterable",
"[",
"str",
"]",
":",
"for",
"path",
"in",
"self",
".",
"root",
".",
"joinpath",
"(",
"'common/main'",
")",
".",
"glob",
"(",
"'*.xml'",
")",
":",
"if",
"path",
".",
"stem",
"!=",
"'root'",
":",
"yield",
"path",
".",
"stem"
] | https://github.com/qt/qtbase/blob/81b9ee66b8e40ed145185fe46b7c91929688cafd/util/locale_database/cldr.py#L283-L290 |
||
kamyu104/LeetCode-Solutions | 77605708a927ea3b85aee5a479db733938c7c211 | Python/maximum-score-from-performing-multiplication-operations.py | python | Solution.maximumScore | (self, nums, multipliers) | return dp[0] | :type nums: List[int]
:type multipliers: List[int]
:rtype: int | :type nums: List[int]
:type multipliers: List[int]
:rtype: int | [
":",
"type",
"nums",
":",
"List",
"[",
"int",
"]",
":",
"type",
"multipliers",
":",
"List",
"[",
"int",
"]",
":",
"rtype",
":",
"int"
] | def maximumScore(self, nums, multipliers):
"""
:type nums: List[int]
:type multipliers: List[int]
:rtype: int
"""
dp = [0]*(len(multipliers)+1)
for l, m in enumerate(reversed(multipliers), start=len(nums)-len(multipliers)):
dp = [max(m*nums[i]+dp[i+1], m*nums[i+l]+dp[i]) for i in xrange(len(dp)-1)]
return dp[0] | [
"def",
"maximumScore",
"(",
"self",
",",
"nums",
",",
"multipliers",
")",
":",
"dp",
"=",
"[",
"0",
"]",
"*",
"(",
"len",
"(",
"multipliers",
")",
"+",
"1",
")",
"for",
"l",
",",
"m",
"in",
"enumerate",
"(",
"reversed",
"(",
"multipliers",
")",
",",
"start",
"=",
"len",
"(",
"nums",
")",
"-",
"len",
"(",
"multipliers",
")",
")",
":",
"dp",
"=",
"[",
"max",
"(",
"m",
"*",
"nums",
"[",
"i",
"]",
"+",
"dp",
"[",
"i",
"+",
"1",
"]",
",",
"m",
"*",
"nums",
"[",
"i",
"+",
"l",
"]",
"+",
"dp",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"xrange",
"(",
"len",
"(",
"dp",
")",
"-",
"1",
")",
"]",
"return",
"dp",
"[",
"0",
"]"
] | https://github.com/kamyu104/LeetCode-Solutions/blob/77605708a927ea3b85aee5a479db733938c7c211/Python/maximum-score-from-performing-multiplication-operations.py#L5-L14 |
|
lmb-freiburg/ogn | 974f72ef4bf840d6f6693d22d1843a79223e77ce | python/caffe/io.py | python | Transformer.set_channel_swap | (self, in_, order) | Set the input channel order for e.g. RGB to BGR conversion
as needed for the reference ImageNet model.
N.B. this assumes the channels are the first dimension AFTER transpose.
Parameters
----------
in_ : which input to assign this channel order
order : the order to take the channels.
(2,1,0) maps RGB to BGR for example. | Set the input channel order for e.g. RGB to BGR conversion
as needed for the reference ImageNet model.
N.B. this assumes the channels are the first dimension AFTER transpose. | [
"Set",
"the",
"input",
"channel",
"order",
"for",
"e",
".",
"g",
".",
"RGB",
"to",
"BGR",
"conversion",
"as",
"needed",
"for",
"the",
"reference",
"ImageNet",
"model",
".",
"N",
".",
"B",
".",
"this",
"assumes",
"the",
"channels",
"are",
"the",
"first",
"dimension",
"AFTER",
"transpose",
"."
] | def set_channel_swap(self, in_, order):
"""
Set the input channel order for e.g. RGB to BGR conversion
as needed for the reference ImageNet model.
N.B. this assumes the channels are the first dimension AFTER transpose.
Parameters
----------
in_ : which input to assign this channel order
order : the order to take the channels.
(2,1,0) maps RGB to BGR for example.
"""
self.__check_input(in_)
if len(order) != self.inputs[in_][1]:
raise Exception('Channel swap needs to have the same number of '
'dimensions as the input channels.')
self.channel_swap[in_] = order | [
"def",
"set_channel_swap",
"(",
"self",
",",
"in_",
",",
"order",
")",
":",
"self",
".",
"__check_input",
"(",
"in_",
")",
"if",
"len",
"(",
"order",
")",
"!=",
"self",
".",
"inputs",
"[",
"in_",
"]",
"[",
"1",
"]",
":",
"raise",
"Exception",
"(",
"'Channel swap needs to have the same number of '",
"'dimensions as the input channels.'",
")",
"self",
".",
"channel_swap",
"[",
"in_",
"]",
"=",
"order"
] | https://github.com/lmb-freiburg/ogn/blob/974f72ef4bf840d6f6693d22d1843a79223e77ce/python/caffe/io.py#L203-L219 |
||
cms-sw/cmssw | fd9de012d503d3405420bcbeec0ec879baa57cf2 | Alignment/MuonAlignment/python/svgfig.py | python | Frame.__init__ | (self, xmin, xmax, ymin, ymax, *d, **kwds) | Acts like Fig, but draws a coordinate frame around the data. You also need to supply plot ranges.
Frame(xmin, xmax, ymin, ymax, obj, obj, obj..., keyword options...)
xmin, xmax required minimum and maximum x values (in the objs' coordinates)
ymin, ymax required minimum and maximum y values (in the objs' coordinates)
obj optional list drawing primatives
keyword options keyword list options defined below
The following are keyword options, with their default values:
x, y 20, 5 upper-left corner of the Frame in SVG coordinates
width, height 75, 80 width and height of the Frame in SVG coordinates
flipx, flipy False, True flip the sign of the coordinate axis
minusInfinity -1000 if an axis is logarithmic and an object is plotted at 0 or
a negative value, -1000 will be used as a stand-in for NaN
xtitle None if a string, label the x axis
xticks -10 request ticks according to the standard tick specification
(see help(Ticks))
xminiticks True request miniticks according to the standard minitick
specification
xlabels True request tick labels according to the standard tick label
specification
xlogbase None if a number, the axis and transformation are logarithmic
with ticks at the given base (10 being the most common)
(same for y)
text_attr {} a dictionary of attributes for label text
axis_attr {} a dictionary of attributes for the axis lines | Acts like Fig, but draws a coordinate frame around the data. You also need to supply plot ranges. | [
"Acts",
"like",
"Fig",
"but",
"draws",
"a",
"coordinate",
"frame",
"around",
"the",
"data",
".",
"You",
"also",
"need",
"to",
"supply",
"plot",
"ranges",
"."
] | def __init__(self, xmin, xmax, ymin, ymax, *d, **kwds):
"""Acts like Fig, but draws a coordinate frame around the data. You also need to supply plot ranges.
Frame(xmin, xmax, ymin, ymax, obj, obj, obj..., keyword options...)
xmin, xmax required minimum and maximum x values (in the objs' coordinates)
ymin, ymax required minimum and maximum y values (in the objs' coordinates)
obj optional list drawing primatives
keyword options keyword list options defined below
The following are keyword options, with their default values:
x, y 20, 5 upper-left corner of the Frame in SVG coordinates
width, height 75, 80 width and height of the Frame in SVG coordinates
flipx, flipy False, True flip the sign of the coordinate axis
minusInfinity -1000 if an axis is logarithmic and an object is plotted at 0 or
a negative value, -1000 will be used as a stand-in for NaN
xtitle None if a string, label the x axis
xticks -10 request ticks according to the standard tick specification
(see help(Ticks))
xminiticks True request miniticks according to the standard minitick
specification
xlabels True request tick labels according to the standard tick label
specification
xlogbase None if a number, the axis and transformation are logarithmic
with ticks at the given base (10 being the most common)
(same for y)
text_attr {} a dictionary of attributes for label text
axis_attr {} a dictionary of attributes for the axis lines
"""
self.xmin, self.xmax, self.ymin, self.ymax = xmin, xmax, ymin, ymax
self.d = list(d)
defaults = {"x":20, "y":5, "width":75, "height":80, "flipx":False, "flipy":True, "minusInfinity":-1000, \
"xtitle":None, "xticks":-10, "xminiticks":True, "xlabels":True, "x2labels":None, "xlogbase":None, \
"ytitle":None, "yticks":-10, "yminiticks":True, "ylabels":True, "y2labels":None, "ylogbase":None, \
"text_attr":{}, "axis_attr":{}}
defaults.update(kwds)
kwds = defaults
self.x = kwds["x"]; del kwds["x"]
self.y = kwds["y"]; del kwds["y"]
self.width = kwds["width"]; del kwds["width"]
self.height = kwds["height"]; del kwds["height"]
self.flipx = kwds["flipx"]; del kwds["flipx"]
self.flipy = kwds["flipy"]; del kwds["flipy"]
self.minusInfinity = kwds["minusInfinity"]; del kwds["minusInfinity"]
self.xtitle = kwds["xtitle"]; del kwds["xtitle"]
self.xticks = kwds["xticks"]; del kwds["xticks"]
self.xminiticks = kwds["xminiticks"]; del kwds["xminiticks"]
self.xlabels = kwds["xlabels"]; del kwds["xlabels"]
self.x2labels = kwds["x2labels"]; del kwds["x2labels"]
self.xlogbase = kwds["xlogbase"]; del kwds["xlogbase"]
self.ytitle = kwds["ytitle"]; del kwds["ytitle"]
self.yticks = kwds["yticks"]; del kwds["yticks"]
self.yminiticks = kwds["yminiticks"]; del kwds["yminiticks"]
self.ylabels = kwds["ylabels"]; del kwds["ylabels"]
self.y2labels = kwds["y2labels"]; del kwds["y2labels"]
self.ylogbase = kwds["ylogbase"]; del kwds["ylogbase"]
self.text_attr = dict(self.text_defaults)
self.text_attr.update(kwds["text_attr"]); del kwds["text_attr"]
self.axis_attr = dict(self.axis_defaults)
self.axis_attr.update(kwds["axis_attr"]); del kwds["axis_attr"]
if len(kwds) != 0:
raise TypeError("Frame() got unexpected keyword arguments %s" % kwds.keys()) | [
"def",
"__init__",
"(",
"self",
",",
"xmin",
",",
"xmax",
",",
"ymin",
",",
"ymax",
",",
"*",
"d",
",",
"*",
"*",
"kwds",
")",
":",
"self",
".",
"xmin",
",",
"self",
".",
"xmax",
",",
"self",
".",
"ymin",
",",
"self",
".",
"ymax",
"=",
"xmin",
",",
"xmax",
",",
"ymin",
",",
"ymax",
"self",
".",
"d",
"=",
"list",
"(",
"d",
")",
"defaults",
"=",
"{",
"\"x\"",
":",
"20",
",",
"\"y\"",
":",
"5",
",",
"\"width\"",
":",
"75",
",",
"\"height\"",
":",
"80",
",",
"\"flipx\"",
":",
"False",
",",
"\"flipy\"",
":",
"True",
",",
"\"minusInfinity\"",
":",
"-",
"1000",
",",
"\"xtitle\"",
":",
"None",
",",
"\"xticks\"",
":",
"-",
"10",
",",
"\"xminiticks\"",
":",
"True",
",",
"\"xlabels\"",
":",
"True",
",",
"\"x2labels\"",
":",
"None",
",",
"\"xlogbase\"",
":",
"None",
",",
"\"ytitle\"",
":",
"None",
",",
"\"yticks\"",
":",
"-",
"10",
",",
"\"yminiticks\"",
":",
"True",
",",
"\"ylabels\"",
":",
"True",
",",
"\"y2labels\"",
":",
"None",
",",
"\"ylogbase\"",
":",
"None",
",",
"\"text_attr\"",
":",
"{",
"}",
",",
"\"axis_attr\"",
":",
"{",
"}",
"}",
"defaults",
".",
"update",
"(",
"kwds",
")",
"kwds",
"=",
"defaults",
"self",
".",
"x",
"=",
"kwds",
"[",
"\"x\"",
"]",
"del",
"kwds",
"[",
"\"x\"",
"]",
"self",
".",
"y",
"=",
"kwds",
"[",
"\"y\"",
"]",
"del",
"kwds",
"[",
"\"y\"",
"]",
"self",
".",
"width",
"=",
"kwds",
"[",
"\"width\"",
"]",
"del",
"kwds",
"[",
"\"width\"",
"]",
"self",
".",
"height",
"=",
"kwds",
"[",
"\"height\"",
"]",
"del",
"kwds",
"[",
"\"height\"",
"]",
"self",
".",
"flipx",
"=",
"kwds",
"[",
"\"flipx\"",
"]",
"del",
"kwds",
"[",
"\"flipx\"",
"]",
"self",
".",
"flipy",
"=",
"kwds",
"[",
"\"flipy\"",
"]",
"del",
"kwds",
"[",
"\"flipy\"",
"]",
"self",
".",
"minusInfinity",
"=",
"kwds",
"[",
"\"minusInfinity\"",
"]",
"del",
"kwds",
"[",
"\"minusInfinity\"",
"]",
"self",
".",
"xtitle",
"=",
"kwds",
"[",
"\"xtitle\"",
"]",
"del",
"kwds",
"[",
"\"xtitle\"",
"]",
"self",
".",
"xticks",
"=",
"kwds",
"[",
"\"xticks\"",
"]",
"del",
"kwds",
"[",
"\"xticks\"",
"]",
"self",
".",
"xminiticks",
"=",
"kwds",
"[",
"\"xminiticks\"",
"]",
"del",
"kwds",
"[",
"\"xminiticks\"",
"]",
"self",
".",
"xlabels",
"=",
"kwds",
"[",
"\"xlabels\"",
"]",
"del",
"kwds",
"[",
"\"xlabels\"",
"]",
"self",
".",
"x2labels",
"=",
"kwds",
"[",
"\"x2labels\"",
"]",
"del",
"kwds",
"[",
"\"x2labels\"",
"]",
"self",
".",
"xlogbase",
"=",
"kwds",
"[",
"\"xlogbase\"",
"]",
"del",
"kwds",
"[",
"\"xlogbase\"",
"]",
"self",
".",
"ytitle",
"=",
"kwds",
"[",
"\"ytitle\"",
"]",
"del",
"kwds",
"[",
"\"ytitle\"",
"]",
"self",
".",
"yticks",
"=",
"kwds",
"[",
"\"yticks\"",
"]",
"del",
"kwds",
"[",
"\"yticks\"",
"]",
"self",
".",
"yminiticks",
"=",
"kwds",
"[",
"\"yminiticks\"",
"]",
"del",
"kwds",
"[",
"\"yminiticks\"",
"]",
"self",
".",
"ylabels",
"=",
"kwds",
"[",
"\"ylabels\"",
"]",
"del",
"kwds",
"[",
"\"ylabels\"",
"]",
"self",
".",
"y2labels",
"=",
"kwds",
"[",
"\"y2labels\"",
"]",
"del",
"kwds",
"[",
"\"y2labels\"",
"]",
"self",
".",
"ylogbase",
"=",
"kwds",
"[",
"\"ylogbase\"",
"]",
"del",
"kwds",
"[",
"\"ylogbase\"",
"]",
"self",
".",
"text_attr",
"=",
"dict",
"(",
"self",
".",
"text_defaults",
")",
"self",
".",
"text_attr",
".",
"update",
"(",
"kwds",
"[",
"\"text_attr\"",
"]",
")",
"del",
"kwds",
"[",
"\"text_attr\"",
"]",
"self",
".",
"axis_attr",
"=",
"dict",
"(",
"self",
".",
"axis_defaults",
")",
"self",
".",
"axis_attr",
".",
"update",
"(",
"kwds",
"[",
"\"axis_attr\"",
"]",
")",
"del",
"kwds",
"[",
"\"axis_attr\"",
"]",
"if",
"len",
"(",
"kwds",
")",
"!=",
"0",
":",
"raise",
"TypeError",
"(",
"\"Frame() got unexpected keyword arguments %s\"",
"%",
"kwds",
".",
"keys",
"(",
")",
")"
] | https://github.com/cms-sw/cmssw/blob/fd9de012d503d3405420bcbeec0ec879baa57cf2/Alignment/MuonAlignment/python/svgfig.py#L887-L954 |
||
stan-dev/math | 5fd79f89933269a4ca4d8dd1fde2a36d53d4768c | lib/cpplint_1.4.5/cpplint.py | python | _CppLintState.ResetErrorCounts | (self) | Sets the module's error statistic back to zero. | Sets the module's error statistic back to zero. | [
"Sets",
"the",
"module",
"s",
"error",
"statistic",
"back",
"to",
"zero",
"."
] | def ResetErrorCounts(self):
"""Sets the module's error statistic back to zero."""
self.error_count = 0
self.errors_by_category = {} | [
"def",
"ResetErrorCounts",
"(",
"self",
")",
":",
"self",
".",
"error_count",
"=",
"0",
"self",
".",
"errors_by_category",
"=",
"{",
"}"
] | https://github.com/stan-dev/math/blob/5fd79f89933269a4ca4d8dd1fde2a36d53d4768c/lib/cpplint_1.4.5/cpplint.py#L1087-L1090 |
||
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | tools/git/for-all-touched-files.py | python | FilenamesFromGit | (branch_name, extensions) | return filenames | Provides a list of all new and modified files listed by [git diff
branch_name] where branch_name can be blank to get a diff of the
workspace.
Excludes deleted files.
If extensions is not an empty list, include only files with one of
the extensions on the list. | Provides a list of all new and modified files listed by [git diff
branch_name] where branch_name can be blank to get a diff of the
workspace. | [
"Provides",
"a",
"list",
"of",
"all",
"new",
"and",
"modified",
"files",
"listed",
"by",
"[",
"git",
"diff",
"branch_name",
"]",
"where",
"branch_name",
"can",
"be",
"blank",
"to",
"get",
"a",
"diff",
"of",
"the",
"workspace",
"."
] | def FilenamesFromGit(branch_name, extensions):
"""Provides a list of all new and modified files listed by [git diff
branch_name] where branch_name can be blank to get a diff of the
workspace.
Excludes deleted files.
If extensions is not an empty list, include only files with one of
the extensions on the list.
"""
lines = GitShell('git diff --stat=600,500 %s' % branch_name)
filenames = []
for line in lines:
line = line.lstrip()
# Avoid summary line, and files that have been deleted (no plus).
if line.find('|') != -1 and line.find('+') != -1:
filename = line.split()[0]
if filename:
filename = filename.rstrip()
ext = filename.rsplit('.')[-1]
if not extensions or ext in extensions:
filenames.append(filename)
return filenames | [
"def",
"FilenamesFromGit",
"(",
"branch_name",
",",
"extensions",
")",
":",
"lines",
"=",
"GitShell",
"(",
"'git diff --stat=600,500 %s'",
"%",
"branch_name",
")",
"filenames",
"=",
"[",
"]",
"for",
"line",
"in",
"lines",
":",
"line",
"=",
"line",
".",
"lstrip",
"(",
")",
"# Avoid summary line, and files that have been deleted (no plus).",
"if",
"line",
".",
"find",
"(",
"'|'",
")",
"!=",
"-",
"1",
"and",
"line",
".",
"find",
"(",
"'+'",
")",
"!=",
"-",
"1",
":",
"filename",
"=",
"line",
".",
"split",
"(",
")",
"[",
"0",
"]",
"if",
"filename",
":",
"filename",
"=",
"filename",
".",
"rstrip",
"(",
")",
"ext",
"=",
"filename",
".",
"rsplit",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
"if",
"not",
"extensions",
"or",
"ext",
"in",
"extensions",
":",
"filenames",
".",
"append",
"(",
"filename",
")",
"return",
"filenames"
] | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/tools/git/for-all-touched-files.py#L53-L75 |
|
eomahony/Numberjack | 53fa9e994a36f881ffd320d8d04158097190aad8 | Numberjack/__init__.py | python | VarArray.__ge__ | (self, other) | return LeqLex(other, self) | Syntactic sugar for the lexicographic order constraint :class:`LeqLex`
so it can be specified on two VarArray like so: `X >= Y`
:param VarArray other: Another VarArray of the same length.
:rtype: LeqLex | Syntactic sugar for the lexicographic order constraint :class:`LeqLex`
so it can be specified on two VarArray like so: `X >= Y` | [
"Syntactic",
"sugar",
"for",
"the",
"lexicographic",
"order",
"constraint",
":",
"class",
":",
"LeqLex",
"so",
"it",
"can",
"be",
"specified",
"on",
"two",
"VarArray",
"like",
"so",
":",
"X",
">",
"=",
"Y"
] | def __ge__(self, other):
"""
Syntactic sugar for the lexicographic order constraint :class:`LeqLex`
so it can be specified on two VarArray like so: `X >= Y`
:param VarArray other: Another VarArray of the same length.
:rtype: LeqLex
"""
return LeqLex(other, self) | [
"def",
"__ge__",
"(",
"self",
",",
"other",
")",
":",
"return",
"LeqLex",
"(",
"other",
",",
"self",
")"
] | https://github.com/eomahony/Numberjack/blob/53fa9e994a36f881ffd320d8d04158097190aad8/Numberjack/__init__.py#L1194-L1202 |
|
SoarGroup/Soar | a1c5e249499137a27da60533c72969eef3b8ab6b | scons/scons-local-4.1.0/SCons/Node/__init__.py | python | Node.get_binfo | (self) | return binfo | Fetch a node's build information.
node - the node whose sources will be collected
cache - alternate node to use for the signature cache
returns - the build signature
This no longer handles the recursive descent of the
node's children's signatures. We expect that they're
already built and updated by someone else, if that's
what's wanted. | Fetch a node's build information. | [
"Fetch",
"a",
"node",
"s",
"build",
"information",
"."
] | def get_binfo(self):
"""
Fetch a node's build information.
node - the node whose sources will be collected
cache - alternate node to use for the signature cache
returns - the build signature
This no longer handles the recursive descent of the
node's children's signatures. We expect that they're
already built and updated by someone else, if that's
what's wanted.
"""
try:
return self.binfo
except AttributeError:
pass
binfo = self.new_binfo()
self.binfo = binfo
executor = self.get_executor()
ignore_set = self.ignore_set
if self.has_builder():
binfo.bact = str(executor)
binfo.bactsig = MD5signature(executor.get_contents())
if self._specific_sources:
sources = [s for s in self.sources if s not in ignore_set]
else:
sources = executor.get_unignored_sources(self, self.ignore)
seen = set()
binfo.bsources = [s for s in sources if s not in seen and not seen.add(s)]
binfo.bsourcesigs = [s.get_ninfo() for s in binfo.bsources]
binfo.bdepends = [d for d in self.depends if d not in ignore_set]
binfo.bdependsigs = [d.get_ninfo() for d in self.depends]
# Because self.implicit is initialized to None (and not empty list [])
# we have to handle this case
if not self.implicit:
binfo.bimplicit = []
binfo.bimplicitsigs = []
else:
binfo.bimplicit = [i for i in self.implicit if i not in ignore_set]
binfo.bimplicitsigs = [i.get_ninfo() for i in binfo.bimplicit]
return binfo | [
"def",
"get_binfo",
"(",
"self",
")",
":",
"try",
":",
"return",
"self",
".",
"binfo",
"except",
"AttributeError",
":",
"pass",
"binfo",
"=",
"self",
".",
"new_binfo",
"(",
")",
"self",
".",
"binfo",
"=",
"binfo",
"executor",
"=",
"self",
".",
"get_executor",
"(",
")",
"ignore_set",
"=",
"self",
".",
"ignore_set",
"if",
"self",
".",
"has_builder",
"(",
")",
":",
"binfo",
".",
"bact",
"=",
"str",
"(",
"executor",
")",
"binfo",
".",
"bactsig",
"=",
"MD5signature",
"(",
"executor",
".",
"get_contents",
"(",
")",
")",
"if",
"self",
".",
"_specific_sources",
":",
"sources",
"=",
"[",
"s",
"for",
"s",
"in",
"self",
".",
"sources",
"if",
"s",
"not",
"in",
"ignore_set",
"]",
"else",
":",
"sources",
"=",
"executor",
".",
"get_unignored_sources",
"(",
"self",
",",
"self",
".",
"ignore",
")",
"seen",
"=",
"set",
"(",
")",
"binfo",
".",
"bsources",
"=",
"[",
"s",
"for",
"s",
"in",
"sources",
"if",
"s",
"not",
"in",
"seen",
"and",
"not",
"seen",
".",
"add",
"(",
"s",
")",
"]",
"binfo",
".",
"bsourcesigs",
"=",
"[",
"s",
".",
"get_ninfo",
"(",
")",
"for",
"s",
"in",
"binfo",
".",
"bsources",
"]",
"binfo",
".",
"bdepends",
"=",
"[",
"d",
"for",
"d",
"in",
"self",
".",
"depends",
"if",
"d",
"not",
"in",
"ignore_set",
"]",
"binfo",
".",
"bdependsigs",
"=",
"[",
"d",
".",
"get_ninfo",
"(",
")",
"for",
"d",
"in",
"self",
".",
"depends",
"]",
"# Because self.implicit is initialized to None (and not empty list [])",
"# we have to handle this case",
"if",
"not",
"self",
".",
"implicit",
":",
"binfo",
".",
"bimplicit",
"=",
"[",
"]",
"binfo",
".",
"bimplicitsigs",
"=",
"[",
"]",
"else",
":",
"binfo",
".",
"bimplicit",
"=",
"[",
"i",
"for",
"i",
"in",
"self",
".",
"implicit",
"if",
"i",
"not",
"in",
"ignore_set",
"]",
"binfo",
".",
"bimplicitsigs",
"=",
"[",
"i",
".",
"get_ninfo",
"(",
")",
"for",
"i",
"in",
"binfo",
".",
"bimplicit",
"]",
"return",
"binfo"
] | https://github.com/SoarGroup/Soar/blob/a1c5e249499137a27da60533c72969eef3b8ab6b/scons/scons-local-4.1.0/SCons/Node/__init__.py#L1144-L1194 |
|
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/htmllib.py | python | HTMLParser.__init__ | (self, formatter, verbose=0) | Creates an instance of the HTMLParser class.
The formatter parameter is the formatter instance associated with
the parser. | Creates an instance of the HTMLParser class. | [
"Creates",
"an",
"instance",
"of",
"the",
"HTMLParser",
"class",
"."
] | def __init__(self, formatter, verbose=0):
"""Creates an instance of the HTMLParser class.
The formatter parameter is the formatter instance associated with
the parser.
"""
sgmllib.SGMLParser.__init__(self, verbose)
self.formatter = formatter | [
"def",
"__init__",
"(",
"self",
",",
"formatter",
",",
"verbose",
"=",
"0",
")",
":",
"sgmllib",
".",
"SGMLParser",
".",
"__init__",
"(",
"self",
",",
"verbose",
")",
"self",
".",
"formatter",
"=",
"formatter"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/htmllib.py#L34-L42 |
||
pytorch/pytorch | 7176c92687d3cc847cc046bf002269c6949a21c2 | torch/package/package_importer.py | python | PackageImporter._add_file | (self, filename: str) | Assembles a Python module out of the given file. Will ignore files in the .data directory.
Args:
filename (str): the name of the file inside of the package archive to be added | Assembles a Python module out of the given file. Will ignore files in the .data directory. | [
"Assembles",
"a",
"Python",
"module",
"out",
"of",
"the",
"given",
"file",
".",
"Will",
"ignore",
"files",
"in",
"the",
".",
"data",
"directory",
"."
] | def _add_file(self, filename: str):
"""Assembles a Python module out of the given file. Will ignore files in the .data directory.
Args:
filename (str): the name of the file inside of the package archive to be added
"""
*prefix, last = filename.split("/")
if len(prefix) > 1 and prefix[0] == ".data":
return
package = self._get_or_create_package(prefix)
if isinstance(package, _ExternNode):
raise ImportError(
f"inconsistent module structure. package contains a module file {filename}"
f" that is a subpackage of a module marked external."
)
if last == "__init__.py":
package.source_file = filename
elif last.endswith(".py"):
package_name = last[: -len(".py")]
package.children[package_name] = _ModuleNode(filename) | [
"def",
"_add_file",
"(",
"self",
",",
"filename",
":",
"str",
")",
":",
"*",
"prefix",
",",
"last",
"=",
"filename",
".",
"split",
"(",
"\"/\"",
")",
"if",
"len",
"(",
"prefix",
")",
">",
"1",
"and",
"prefix",
"[",
"0",
"]",
"==",
"\".data\"",
":",
"return",
"package",
"=",
"self",
".",
"_get_or_create_package",
"(",
"prefix",
")",
"if",
"isinstance",
"(",
"package",
",",
"_ExternNode",
")",
":",
"raise",
"ImportError",
"(",
"f\"inconsistent module structure. package contains a module file {filename}\"",
"f\" that is a subpackage of a module marked external.\"",
")",
"if",
"last",
"==",
"\"__init__.py\"",
":",
"package",
".",
"source_file",
"=",
"filename",
"elif",
"last",
".",
"endswith",
"(",
"\".py\"",
")",
":",
"package_name",
"=",
"last",
"[",
":",
"-",
"len",
"(",
"\".py\"",
")",
"]",
"package",
".",
"children",
"[",
"package_name",
"]",
"=",
"_ModuleNode",
"(",
"filename",
")"
] | https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/torch/package/package_importer.py#L558-L577 |
||
miyosuda/TensorFlowAndroidDemo | 35903e0221aa5f109ea2dbef27f20b52e317f42d | jni-build/jni/include/tensorflow/contrib/lookup/lookup_ops.py | python | LookupInterface.__init__ | (self, key_dtype, value_dtype, name) | Construct a lookup table interface.
Args:
key_dtype: The table key type.
value_dtype: The table value type.
name: A name for the operation (optional). | Construct a lookup table interface. | [
"Construct",
"a",
"lookup",
"table",
"interface",
"."
] | def __init__(self, key_dtype, value_dtype, name):
"""Construct a lookup table interface.
Args:
key_dtype: The table key type.
value_dtype: The table value type.
name: A name for the operation (optional).
"""
self._key_dtype = dtypes.as_dtype(key_dtype)
self._value_dtype = dtypes.as_dtype(value_dtype)
self._name = name | [
"def",
"__init__",
"(",
"self",
",",
"key_dtype",
",",
"value_dtype",
",",
"name",
")",
":",
"self",
".",
"_key_dtype",
"=",
"dtypes",
".",
"as_dtype",
"(",
"key_dtype",
")",
"self",
".",
"_value_dtype",
"=",
"dtypes",
".",
"as_dtype",
"(",
"value_dtype",
")",
"self",
".",
"_name",
"=",
"name"
] | https://github.com/miyosuda/TensorFlowAndroidDemo/blob/35903e0221aa5f109ea2dbef27f20b52e317f42d/jni-build/jni/include/tensorflow/contrib/lookup/lookup_ops.py#L32-L42 |
||
NVIDIAGameWorks/kaolin | e5148d05e9c1e2ce92a07881ce3593b1c5c3f166 | kaolin/io/usd.py | python | add_pointcloud | (stage, points, scene_path, colors=None, time=None, points_type='point_instancer') | return stage | r"""Add a pointcloud to an existing USD stage.
Create a pointcloud represented by point instances of a sphere centered at each point coordinate.
The stage is modified but not saved to disk.
Args:
stage (Usd.Stage): Stage onto which to add the pointcloud.
points (torch.FloatTensor): Pointcloud tensor containing ``N`` points of shape ``(N, 3)``.
scene_path (str): Absolute path of pointcloud within the USD file scene. Must be a valid Sdf.Path.
colors (torch.FloatTensor, optional): Color tensor corresponding each point in the pointcloud
tensor of shape ``(N, 3)``. colors only works if points_type is 'usd_geom_points'.
time (convertible to float, optional): Positive integer defining the time at which the supplied parameters
correspond to.
points_type (str): String that indicates whether to save pointcloud as UsdGeomPoints or PointInstancer.
'usd_geom_points' indicates UsdGeomPoints and 'point_instancer' indicates PointInstancer.
Please refer here for UsdGeomPoints:
https://graphics.pixar.com/usd/docs/api/class_usd_geom_points.html and here for PointInstancer
https://graphics.pixar.com/usd/docs/api/class_usd_geom_point_instancer.html. Default: 'point_instancer'.
Returns:
(Usd.Stage)
Example:
>>> stage = create_stage('./new_stage.usd')
>>> points = torch.rand(100, 3)
>>> stage = add_pointcloud(stage, points, '/World/PointClouds/pointcloud_0')
>>> stage.Save() | r"""Add a pointcloud to an existing USD stage. | [
"r",
"Add",
"a",
"pointcloud",
"to",
"an",
"existing",
"USD",
"stage",
"."
] | def add_pointcloud(stage, points, scene_path, colors=None, time=None, points_type='point_instancer'):
r"""Add a pointcloud to an existing USD stage.
Create a pointcloud represented by point instances of a sphere centered at each point coordinate.
The stage is modified but not saved to disk.
Args:
stage (Usd.Stage): Stage onto which to add the pointcloud.
points (torch.FloatTensor): Pointcloud tensor containing ``N`` points of shape ``(N, 3)``.
scene_path (str): Absolute path of pointcloud within the USD file scene. Must be a valid Sdf.Path.
colors (torch.FloatTensor, optional): Color tensor corresponding each point in the pointcloud
tensor of shape ``(N, 3)``. colors only works if points_type is 'usd_geom_points'.
time (convertible to float, optional): Positive integer defining the time at which the supplied parameters
correspond to.
points_type (str): String that indicates whether to save pointcloud as UsdGeomPoints or PointInstancer.
'usd_geom_points' indicates UsdGeomPoints and 'point_instancer' indicates PointInstancer.
Please refer here for UsdGeomPoints:
https://graphics.pixar.com/usd/docs/api/class_usd_geom_points.html and here for PointInstancer
https://graphics.pixar.com/usd/docs/api/class_usd_geom_point_instancer.html. Default: 'point_instancer'.
Returns:
(Usd.Stage)
Example:
>>> stage = create_stage('./new_stage.usd')
>>> points = torch.rand(100, 3)
>>> stage = add_pointcloud(stage, points, '/World/PointClouds/pointcloud_0')
>>> stage.Save()
"""
scene_path = Sdf.Path(scene_path)
if time is None:
time = Usd.TimeCode.Default()
if stage.GetPrimAtPath(scene_path):
points_prim = stage.GetPrimAtPath(scene_path)
else:
if points_type == 'point_instancer':
points_prim = stage.DefinePrim(scene_path, 'PointInstancer')
elif points_type == 'usd_geom_points':
points_prim = stage.DefinePrim(scene_path, 'Points')
else:
raise ValueError('Expected points_type to be "usd_geom_points" or "point_instancer", '
f'but got "{points_type}".')
if points_type == 'point_instancer':
geom_points = UsdGeom.PointInstancer(points_prim)
sphere = UsdGeom.Sphere.Define(stage, f'{scene_path}/sphere')
sphere.GetRadiusAttr().Set(0.5)
geom_points.CreatePrototypesRel().SetTargets([sphere.GetPath()])
elif points_type == 'usd_geom_points':
geom_points = UsdGeom.Points(points_prim)
# Calculate default point scale
bounds = points.max(dim=0)[0] - points.min(dim=0)[0]
min_bound = min(bounds)
scale = (min_bound / points.size(0) ** (1 / 3)).item()
# Generate instancer parameters
positions = points.detach().cpu().tolist()
scales = np.asarray([scale, ] * points.size(0))
if points_type == 'point_instancer':
indices = [0] * points.size(0)
# Populate point instancer
geom_points.GetProtoIndicesAttr().Set(indices, time=time)
geom_points.GetPositionsAttr().Set(positions, time=time)
scales = [(scale,) * 3] * points.size(0)
geom_points.GetScalesAttr().Set(scales, time=time)
elif points_type == 'usd_geom_points':
# Populate UsdGeomPoints
geom_points.GetPointsAttr().Set(points.numpy(), time=time)
geom_points.GetWidthsAttr().Set(Vt.FloatArray.FromNumpy(scales), time=time)
if colors is not None and points_type == 'usd_geom_points':
assert colors.shape == points.shape, 'Colors and points must have the same shape.'
geom_points.GetDisplayColorAttr().Set(colors.numpy(), time=time)
return stage | [
"def",
"add_pointcloud",
"(",
"stage",
",",
"points",
",",
"scene_path",
",",
"colors",
"=",
"None",
",",
"time",
"=",
"None",
",",
"points_type",
"=",
"'point_instancer'",
")",
":",
"scene_path",
"=",
"Sdf",
".",
"Path",
"(",
"scene_path",
")",
"if",
"time",
"is",
"None",
":",
"time",
"=",
"Usd",
".",
"TimeCode",
".",
"Default",
"(",
")",
"if",
"stage",
".",
"GetPrimAtPath",
"(",
"scene_path",
")",
":",
"points_prim",
"=",
"stage",
".",
"GetPrimAtPath",
"(",
"scene_path",
")",
"else",
":",
"if",
"points_type",
"==",
"'point_instancer'",
":",
"points_prim",
"=",
"stage",
".",
"DefinePrim",
"(",
"scene_path",
",",
"'PointInstancer'",
")",
"elif",
"points_type",
"==",
"'usd_geom_points'",
":",
"points_prim",
"=",
"stage",
".",
"DefinePrim",
"(",
"scene_path",
",",
"'Points'",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Expected points_type to be \"usd_geom_points\" or \"point_instancer\", '",
"f'but got \"{points_type}\".'",
")",
"if",
"points_type",
"==",
"'point_instancer'",
":",
"geom_points",
"=",
"UsdGeom",
".",
"PointInstancer",
"(",
"points_prim",
")",
"sphere",
"=",
"UsdGeom",
".",
"Sphere",
".",
"Define",
"(",
"stage",
",",
"f'{scene_path}/sphere'",
")",
"sphere",
".",
"GetRadiusAttr",
"(",
")",
".",
"Set",
"(",
"0.5",
")",
"geom_points",
".",
"CreatePrototypesRel",
"(",
")",
".",
"SetTargets",
"(",
"[",
"sphere",
".",
"GetPath",
"(",
")",
"]",
")",
"elif",
"points_type",
"==",
"'usd_geom_points'",
":",
"geom_points",
"=",
"UsdGeom",
".",
"Points",
"(",
"points_prim",
")",
"# Calculate default point scale",
"bounds",
"=",
"points",
".",
"max",
"(",
"dim",
"=",
"0",
")",
"[",
"0",
"]",
"-",
"points",
".",
"min",
"(",
"dim",
"=",
"0",
")",
"[",
"0",
"]",
"min_bound",
"=",
"min",
"(",
"bounds",
")",
"scale",
"=",
"(",
"min_bound",
"/",
"points",
".",
"size",
"(",
"0",
")",
"**",
"(",
"1",
"/",
"3",
")",
")",
".",
"item",
"(",
")",
"# Generate instancer parameters",
"positions",
"=",
"points",
".",
"detach",
"(",
")",
".",
"cpu",
"(",
")",
".",
"tolist",
"(",
")",
"scales",
"=",
"np",
".",
"asarray",
"(",
"[",
"scale",
",",
"]",
"*",
"points",
".",
"size",
"(",
"0",
")",
")",
"if",
"points_type",
"==",
"'point_instancer'",
":",
"indices",
"=",
"[",
"0",
"]",
"*",
"points",
".",
"size",
"(",
"0",
")",
"# Populate point instancer",
"geom_points",
".",
"GetProtoIndicesAttr",
"(",
")",
".",
"Set",
"(",
"indices",
",",
"time",
"=",
"time",
")",
"geom_points",
".",
"GetPositionsAttr",
"(",
")",
".",
"Set",
"(",
"positions",
",",
"time",
"=",
"time",
")",
"scales",
"=",
"[",
"(",
"scale",
",",
")",
"*",
"3",
"]",
"*",
"points",
".",
"size",
"(",
"0",
")",
"geom_points",
".",
"GetScalesAttr",
"(",
")",
".",
"Set",
"(",
"scales",
",",
"time",
"=",
"time",
")",
"elif",
"points_type",
"==",
"'usd_geom_points'",
":",
"# Populate UsdGeomPoints",
"geom_points",
".",
"GetPointsAttr",
"(",
")",
".",
"Set",
"(",
"points",
".",
"numpy",
"(",
")",
",",
"time",
"=",
"time",
")",
"geom_points",
".",
"GetWidthsAttr",
"(",
")",
".",
"Set",
"(",
"Vt",
".",
"FloatArray",
".",
"FromNumpy",
"(",
"scales",
")",
",",
"time",
"=",
"time",
")",
"if",
"colors",
"is",
"not",
"None",
"and",
"points_type",
"==",
"'usd_geom_points'",
":",
"assert",
"colors",
".",
"shape",
"==",
"points",
".",
"shape",
",",
"'Colors and points must have the same shape.'",
"geom_points",
".",
"GetDisplayColorAttr",
"(",
")",
".",
"Set",
"(",
"colors",
".",
"numpy",
"(",
")",
",",
"time",
"=",
"time",
")",
"return",
"stage"
] | https://github.com/NVIDIAGameWorks/kaolin/blob/e5148d05e9c1e2ce92a07881ce3593b1c5c3f166/kaolin/io/usd.py#L918-L994 |
|
OSGeo/gdal | 3748fc4ba4fba727492774b2b908a2130c864a83 | swig/python/osgeo/ogr.py | python | DataSource.CopyLayer | (self, *args, **kwargs) | return _ogr.DataSource_CopyLayer(self, *args, **kwargs) | r"""
CopyLayer(DataSource self, Layer src_layer, char const * new_name, char ** options=None) -> Layer
OGRLayerH
OGR_DS_CopyLayer(OGRDataSourceH hDS, OGRLayerH hSrcLayer, const char
*pszNewName, char **papszOptions)
Duplicate an existing layer.
This function creates a new layer, duplicate the field definitions of
the source layer and then duplicate each features of the source layer.
The papszOptions argument can be used to control driver specific
creation options. These options are normally documented in the format
specific documentation. The source layer may come from another
dataset.
Deprecated Use GDALDatasetCopyLayer() in GDAL 2.0
Parameters:
-----------
hDS: handle to the data source where to create the new layer
hSrcLayer: handle to the source layer.
pszNewName: the name of the layer to create.
papszOptions: a StringList of name=value options. Options are driver
specific.
a handle to the layer, or NULL if an error occurs. | r"""
CopyLayer(DataSource self, Layer src_layer, char const * new_name, char ** options=None) -> Layer
OGRLayerH
OGR_DS_CopyLayer(OGRDataSourceH hDS, OGRLayerH hSrcLayer, const char
*pszNewName, char **papszOptions) | [
"r",
"CopyLayer",
"(",
"DataSource",
"self",
"Layer",
"src_layer",
"char",
"const",
"*",
"new_name",
"char",
"**",
"options",
"=",
"None",
")",
"-",
">",
"Layer",
"OGRLayerH",
"OGR_DS_CopyLayer",
"(",
"OGRDataSourceH",
"hDS",
"OGRLayerH",
"hSrcLayer",
"const",
"char",
"*",
"pszNewName",
"char",
"**",
"papszOptions",
")"
] | def CopyLayer(self, *args, **kwargs):
r"""
CopyLayer(DataSource self, Layer src_layer, char const * new_name, char ** options=None) -> Layer
OGRLayerH
OGR_DS_CopyLayer(OGRDataSourceH hDS, OGRLayerH hSrcLayer, const char
*pszNewName, char **papszOptions)
Duplicate an existing layer.
This function creates a new layer, duplicate the field definitions of
the source layer and then duplicate each features of the source layer.
The papszOptions argument can be used to control driver specific
creation options. These options are normally documented in the format
specific documentation. The source layer may come from another
dataset.
Deprecated Use GDALDatasetCopyLayer() in GDAL 2.0
Parameters:
-----------
hDS: handle to the data source where to create the new layer
hSrcLayer: handle to the source layer.
pszNewName: the name of the layer to create.
papszOptions: a StringList of name=value options. Options are driver
specific.
a handle to the layer, or NULL if an error occurs.
"""
return _ogr.DataSource_CopyLayer(self, *args, **kwargs) | [
"def",
"CopyLayer",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_ogr",
".",
"DataSource_CopyLayer",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/OSGeo/gdal/blob/3748fc4ba4fba727492774b2b908a2130c864a83/swig/python/osgeo/ogr.py#L739-L771 |
|
emscripten-core/emscripten | 0d413d3c5af8b28349682496edc14656f5700c2f | third_party/ply/example/BASIC/basparse.py | python | p_command_def | (p) | command : DEF ID LPAREN ID RPAREN EQUALS expr | command : DEF ID LPAREN ID RPAREN EQUALS expr | [
"command",
":",
"DEF",
"ID",
"LPAREN",
"ID",
"RPAREN",
"EQUALS",
"expr"
] | def p_command_def(p):
'''command : DEF ID LPAREN ID RPAREN EQUALS expr'''
p[0] = ('FUNC',p[2],p[4],p[7]) | [
"def",
"p_command_def",
"(",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"(",
"'FUNC'",
",",
"p",
"[",
"2",
"]",
",",
"p",
"[",
"4",
"]",
",",
"p",
"[",
"7",
"]",
")"
] | https://github.com/emscripten-core/emscripten/blob/0d413d3c5af8b28349682496edc14656f5700c2f/third_party/ply/example/BASIC/basparse.py#L221-L223 |
||
windystrife/UnrealEngine_NVIDIAGameWorks | b50e6338a7c5b26374d66306ebc7807541ff815e | Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/lib-tk/turtle.py | python | RawTurtle._cc | (self, args) | return "#%02x%02x%02x" % (r, g, b) | Convert colortriples to hexstrings. | Convert colortriples to hexstrings. | [
"Convert",
"colortriples",
"to",
"hexstrings",
"."
] | def _cc(self, args):
"""Convert colortriples to hexstrings.
"""
if isinstance(args, str):
return args
try:
r, g, b = args
except:
raise TurtleGraphicsError("bad color arguments: %s" % str(args))
if self.screen._colormode == 1.0:
r, g, b = [round(255.0*x) for x in (r, g, b)]
if not ((0 <= r <= 255) and (0 <= g <= 255) and (0 <= b <= 255)):
raise TurtleGraphicsError("bad color sequence: %s" % str(args))
return "#%02x%02x%02x" % (r, g, b) | [
"def",
"_cc",
"(",
"self",
",",
"args",
")",
":",
"if",
"isinstance",
"(",
"args",
",",
"str",
")",
":",
"return",
"args",
"try",
":",
"r",
",",
"g",
",",
"b",
"=",
"args",
"except",
":",
"raise",
"TurtleGraphicsError",
"(",
"\"bad color arguments: %s\"",
"%",
"str",
"(",
"args",
")",
")",
"if",
"self",
".",
"screen",
".",
"_colormode",
"==",
"1.0",
":",
"r",
",",
"g",
",",
"b",
"=",
"[",
"round",
"(",
"255.0",
"*",
"x",
")",
"for",
"x",
"in",
"(",
"r",
",",
"g",
",",
"b",
")",
"]",
"if",
"not",
"(",
"(",
"0",
"<=",
"r",
"<=",
"255",
")",
"and",
"(",
"0",
"<=",
"g",
"<=",
"255",
")",
"and",
"(",
"0",
"<=",
"b",
"<=",
"255",
")",
")",
":",
"raise",
"TurtleGraphicsError",
"(",
"\"bad color sequence: %s\"",
"%",
"str",
"(",
"args",
")",
")",
"return",
"\"#%02x%02x%02x\"",
"%",
"(",
"r",
",",
"g",
",",
"b",
")"
] | https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/lib-tk/turtle.py#L2602-L2615 |
|
MythTV/mythtv | d282a209cb8be85d036f85a62a8ec971b67d45f4 | mythtv/programs/scripts/internetcontent/nv_python_libs/pbs/pbs_api.py | python | Videos.getPBSConfig | (self) | return | Read the MNV PBS grabber "pbs_config.xml" configuration file
return nothing | Read the MNV PBS grabber "pbs_config.xml" configuration file
return nothing | [
"Read",
"the",
"MNV",
"PBS",
"grabber",
"pbs_config",
".",
"xml",
"configuration",
"file",
"return",
"nothing"
] | def getPBSConfig(self):
''' Read the MNV PBS grabber "pbs_config.xml" configuration file
return nothing
'''
# Read the grabber pbs_config.xml configuration file
url = 'file://%s/nv_python_libs/configs/XML/pbs_config.xml' % (baseProcessingDir, )
if not os.path.isfile(url[7:]):
raise PBSConfigFileError(self.error_messages['PBSConfigFileError'] % (url[7:], ))
if self.config['debug_enabled']:
print(url)
print()
try:
self.pbs_config = etree.parse(url)
except Exception as e:
raise PBSUrlError(self.error_messages['PBSUrlError'] % (url, errormsg))
return | [
"def",
"getPBSConfig",
"(",
"self",
")",
":",
"# Read the grabber pbs_config.xml configuration file",
"url",
"=",
"'file://%s/nv_python_libs/configs/XML/pbs_config.xml'",
"%",
"(",
"baseProcessingDir",
",",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"url",
"[",
"7",
":",
"]",
")",
":",
"raise",
"PBSConfigFileError",
"(",
"self",
".",
"error_messages",
"[",
"'PBSConfigFileError'",
"]",
"%",
"(",
"url",
"[",
"7",
":",
"]",
",",
")",
")",
"if",
"self",
".",
"config",
"[",
"'debug_enabled'",
"]",
":",
"print",
"(",
"url",
")",
"print",
"(",
")",
"try",
":",
"self",
".",
"pbs_config",
"=",
"etree",
".",
"parse",
"(",
"url",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"PBSUrlError",
"(",
"self",
".",
"error_messages",
"[",
"'PBSUrlError'",
"]",
"%",
"(",
"url",
",",
"errormsg",
")",
")",
"return"
] | https://github.com/MythTV/mythtv/blob/d282a209cb8be85d036f85a62a8ec971b67d45f4/mythtv/programs/scripts/internetcontent/nv_python_libs/pbs/pbs_api.py#L200-L216 |
|
LiquidPlayer/LiquidCore | 9405979363f2353ac9a71ad8ab59685dd7f919c9 | deps/node-10.15.3/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/cmake.py | python | NormjoinPath | (base_path, rel_path) | return os.path.normpath(os.path.join(base_path, rel_path)) | Resolves rel_path against base_path and returns the result.
TODO: what is this really used for?
If rel_path begins with '$' it is returned unchanged.
Otherwise it is resolved against base_path if relative, then normalized. | Resolves rel_path against base_path and returns the result.
TODO: what is this really used for?
If rel_path begins with '$' it is returned unchanged.
Otherwise it is resolved against base_path if relative, then normalized. | [
"Resolves",
"rel_path",
"against",
"base_path",
"and",
"returns",
"the",
"result",
".",
"TODO",
":",
"what",
"is",
"this",
"really",
"used",
"for?",
"If",
"rel_path",
"begins",
"with",
"$",
"it",
"is",
"returned",
"unchanged",
".",
"Otherwise",
"it",
"is",
"resolved",
"against",
"base_path",
"if",
"relative",
"then",
"normalized",
"."
] | def NormjoinPath(base_path, rel_path):
"""Resolves rel_path against base_path and returns the result.
TODO: what is this really used for?
If rel_path begins with '$' it is returned unchanged.
Otherwise it is resolved against base_path if relative, then normalized.
"""
if rel_path.startswith('$') and not rel_path.startswith('${configuration}'):
return rel_path
return os.path.normpath(os.path.join(base_path, rel_path)) | [
"def",
"NormjoinPath",
"(",
"base_path",
",",
"rel_path",
")",
":",
"if",
"rel_path",
".",
"startswith",
"(",
"'$'",
")",
"and",
"not",
"rel_path",
".",
"startswith",
"(",
"'${configuration}'",
")",
":",
"return",
"rel_path",
"return",
"os",
".",
"path",
".",
"normpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"base_path",
",",
"rel_path",
")",
")"
] | https://github.com/LiquidPlayer/LiquidCore/blob/9405979363f2353ac9a71ad8ab59685dd7f919c9/deps/node-10.15.3/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/cmake.py#L110-L118 |
|
miyosuda/TensorFlowAndroidMNIST | 7b5a4603d2780a8a2834575706e9001977524007 | jni-build/jni/include/tensorflow/contrib/distributions/python/ops/normal.py | python | Normal.log_prob | (self, x, name="log_prob") | Log prob of observations in `x` under these Normal distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `mu` and `sigma`.
name: The name to give this op.
Returns:
log_prob: tensor of dtype `dtype`, the log-PDFs of `x`. | Log prob of observations in `x` under these Normal distribution(s). | [
"Log",
"prob",
"of",
"observations",
"in",
"x",
"under",
"these",
"Normal",
"distribution",
"(",
"s",
")",
"."
] | def log_prob(self, x, name="log_prob"):
"""Log prob of observations in `x` under these Normal distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `mu` and `sigma`.
name: The name to give this op.
Returns:
log_prob: tensor of dtype `dtype`, the log-PDFs of `x`.
"""
with ops.name_scope(self.name):
with ops.op_scope([self._mu, self._sigma, x], name):
x = ops.convert_to_tensor(x)
if x.dtype != self.dtype:
raise TypeError("Input x dtype does not match dtype: %s vs. %s"
% (x.dtype, self.dtype))
log_2_pi = constant_op.constant(math.log(2 * math.pi), dtype=self.dtype)
return (-0.5*log_2_pi - math_ops.log(self._sigma)
-0.5*math_ops.square((x - self._mu) / self._sigma)) | [
"def",
"log_prob",
"(",
"self",
",",
"x",
",",
"name",
"=",
"\"log_prob\"",
")",
":",
"with",
"ops",
".",
"name_scope",
"(",
"self",
".",
"name",
")",
":",
"with",
"ops",
".",
"op_scope",
"(",
"[",
"self",
".",
"_mu",
",",
"self",
".",
"_sigma",
",",
"x",
"]",
",",
"name",
")",
":",
"x",
"=",
"ops",
".",
"convert_to_tensor",
"(",
"x",
")",
"if",
"x",
".",
"dtype",
"!=",
"self",
".",
"dtype",
":",
"raise",
"TypeError",
"(",
"\"Input x dtype does not match dtype: %s vs. %s\"",
"%",
"(",
"x",
".",
"dtype",
",",
"self",
".",
"dtype",
")",
")",
"log_2_pi",
"=",
"constant_op",
".",
"constant",
"(",
"math",
".",
"log",
"(",
"2",
"*",
"math",
".",
"pi",
")",
",",
"dtype",
"=",
"self",
".",
"dtype",
")",
"return",
"(",
"-",
"0.5",
"*",
"log_2_pi",
"-",
"math_ops",
".",
"log",
"(",
"self",
".",
"_sigma",
")",
"-",
"0.5",
"*",
"math_ops",
".",
"square",
"(",
"(",
"x",
"-",
"self",
".",
"_mu",
")",
"/",
"self",
".",
"_sigma",
")",
")"
] | https://github.com/miyosuda/TensorFlowAndroidMNIST/blob/7b5a4603d2780a8a2834575706e9001977524007/jni-build/jni/include/tensorflow/contrib/distributions/python/ops/normal.py#L223-L241 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/windows/Lib/tkinter/__init__.py | python | Button.invoke | (self) | return self.tk.call(self._w, 'invoke') | Invoke the command associated with the button.
The return value is the return value from the command,
or an empty string if there is no command associated with
the button. This command is ignored if the button's state
is disabled. | Invoke the command associated with the button. | [
"Invoke",
"the",
"command",
"associated",
"with",
"the",
"button",
"."
] | def invoke(self):
"""Invoke the command associated with the button.
The return value is the return value from the command,
or an empty string if there is no command associated with
the button. This command is ignored if the button's state
is disabled.
"""
return self.tk.call(self._w, 'invoke') | [
"def",
"invoke",
"(",
"self",
")",
":",
"return",
"self",
".",
"tk",
".",
"call",
"(",
"self",
".",
"_w",
",",
"'invoke'",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/tkinter/__init__.py#L2383-L2391 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/gtk/aui.py | python | AuiToolBarItem.GetBitmap | (*args, **kwargs) | return _aui.AuiToolBarItem_GetBitmap(*args, **kwargs) | GetBitmap(self) -> Bitmap | GetBitmap(self) -> Bitmap | [
"GetBitmap",
"(",
"self",
")",
"-",
">",
"Bitmap"
] | def GetBitmap(*args, **kwargs):
"""GetBitmap(self) -> Bitmap"""
return _aui.AuiToolBarItem_GetBitmap(*args, **kwargs) | [
"def",
"GetBitmap",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_aui",
".",
"AuiToolBarItem_GetBitmap",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/aui.py#L1781-L1783 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/email/message.py | python | Message.get_content_charset | (self, failobj=None) | return charset.lower() | Return the charset parameter of the Content-Type header.
The returned string is always coerced to lower case. If there is no
Content-Type header, or if that header has no charset parameter,
failobj is returned. | Return the charset parameter of the Content-Type header. | [
"Return",
"the",
"charset",
"parameter",
"of",
"the",
"Content",
"-",
"Type",
"header",
"."
] | def get_content_charset(self, failobj=None):
"""Return the charset parameter of the Content-Type header.
The returned string is always coerced to lower case. If there is no
Content-Type header, or if that header has no charset parameter,
failobj is returned.
"""
missing = object()
charset = self.get_param('charset', missing)
if charset is missing:
return failobj
if isinstance(charset, tuple):
# RFC 2231 encoded, so decode it, and it better end up as ascii.
pcharset = charset[0] or 'us-ascii'
try:
# LookupError will be raised if the charset isn't known to
# Python. UnicodeError will be raised if the encoded text
# contains a character not in the charset.
as_bytes = charset[2].encode('raw-unicode-escape')
charset = str(as_bytes, pcharset)
except (LookupError, UnicodeError):
charset = charset[2]
# charset characters must be in us-ascii range
try:
charset.encode('us-ascii')
except UnicodeError:
return failobj
# RFC 2046, $4.1.2 says charsets are not case sensitive
return charset.lower() | [
"def",
"get_content_charset",
"(",
"self",
",",
"failobj",
"=",
"None",
")",
":",
"missing",
"=",
"object",
"(",
")",
"charset",
"=",
"self",
".",
"get_param",
"(",
"'charset'",
",",
"missing",
")",
"if",
"charset",
"is",
"missing",
":",
"return",
"failobj",
"if",
"isinstance",
"(",
"charset",
",",
"tuple",
")",
":",
"# RFC 2231 encoded, so decode it, and it better end up as ascii.",
"pcharset",
"=",
"charset",
"[",
"0",
"]",
"or",
"'us-ascii'",
"try",
":",
"# LookupError will be raised if the charset isn't known to",
"# Python. UnicodeError will be raised if the encoded text",
"# contains a character not in the charset.",
"as_bytes",
"=",
"charset",
"[",
"2",
"]",
".",
"encode",
"(",
"'raw-unicode-escape'",
")",
"charset",
"=",
"str",
"(",
"as_bytes",
",",
"pcharset",
")",
"except",
"(",
"LookupError",
",",
"UnicodeError",
")",
":",
"charset",
"=",
"charset",
"[",
"2",
"]",
"# charset characters must be in us-ascii range",
"try",
":",
"charset",
".",
"encode",
"(",
"'us-ascii'",
")",
"except",
"UnicodeError",
":",
"return",
"failobj",
"# RFC 2046, $4.1.2 says charsets are not case sensitive",
"return",
"charset",
".",
"lower",
"(",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/email/message.py#L881-L909 |
|
RobotLocomotion/drake | 0e18a34604c45ed65bc9018a54f7610f91cdad5b | tools/workspace/drake_visualizer/_drake_visualizer_builtin_scripts/show_hydroelastic_contact.py | python | HydroelasticContactVisualizer.toggle_show_traction_vectors | (self, state) | Slot for dialog widget | Slot for dialog widget | [
"Slot",
"for",
"dialog",
"widget"
] | def toggle_show_traction_vectors(self, state):
"""Slot for dialog widget"""
self.show_traction_vectors = state
self.update_visual_data_from_message() | [
"def",
"toggle_show_traction_vectors",
"(",
"self",
",",
"state",
")",
":",
"self",
".",
"show_traction_vectors",
"=",
"state",
"self",
".",
"update_visual_data_from_message",
"(",
")"
] | https://github.com/RobotLocomotion/drake/blob/0e18a34604c45ed65bc9018a54f7610f91cdad5b/tools/workspace/drake_visualizer/_drake_visualizer_builtin_scripts/show_hydroelastic_contact.py#L1073-L1076 |
||
tensorflow/tensorflow | 419e3a6b650ea4bd1b0cba23c4348f8a69f3272e | tensorflow/python/keras/layers/legacy_rnn/rnn_cell_impl.py | python | _zero_state_tensors | (state_size, batch_size, dtype) | return nest.map_structure(get_state_shape, state_size) | Create tensors of zeros based on state_size, batch_size, and dtype. | Create tensors of zeros based on state_size, batch_size, and dtype. | [
"Create",
"tensors",
"of",
"zeros",
"based",
"on",
"state_size",
"batch_size",
"and",
"dtype",
"."
] | def _zero_state_tensors(state_size, batch_size, dtype):
"""Create tensors of zeros based on state_size, batch_size, and dtype."""
def get_state_shape(s):
"""Combine s with batch_size to get a proper tensor shape."""
c = _concat(batch_size, s)
size = array_ops.zeros(c, dtype=dtype)
if not context.executing_eagerly():
c_static = _concat(batch_size, s, static=True)
size.set_shape(c_static)
return size
return nest.map_structure(get_state_shape, state_size) | [
"def",
"_zero_state_tensors",
"(",
"state_size",
",",
"batch_size",
",",
"dtype",
")",
":",
"def",
"get_state_shape",
"(",
"s",
")",
":",
"\"\"\"Combine s with batch_size to get a proper tensor shape.\"\"\"",
"c",
"=",
"_concat",
"(",
"batch_size",
",",
"s",
")",
"size",
"=",
"array_ops",
".",
"zeros",
"(",
"c",
",",
"dtype",
"=",
"dtype",
")",
"if",
"not",
"context",
".",
"executing_eagerly",
"(",
")",
":",
"c_static",
"=",
"_concat",
"(",
"batch_size",
",",
"s",
",",
"static",
"=",
"True",
")",
"size",
".",
"set_shape",
"(",
"c_static",
")",
"return",
"size",
"return",
"nest",
".",
"map_structure",
"(",
"get_state_shape",
",",
"state_size",
")"
] | https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/keras/layers/legacy_rnn/rnn_cell_impl.py#L168-L180 |
|
pmq20/node-packer | 12c46c6e44fbc14d9ee645ebd17d5296b324f7e0 | lts/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/make.py | python | MakefileWriter.WriteActions | (self, actions, extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all) | Write Makefile code for any 'actions' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
actions (used to make other pieces dependent on these
actions)
part_of_all: flag indicating this target is part of 'all' | Write Makefile code for any 'actions' from the gyp input. | [
"Write",
"Makefile",
"code",
"for",
"any",
"actions",
"from",
"the",
"gyp",
"input",
"."
] | def WriteActions(self, actions, extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all):
"""Write Makefile code for any 'actions' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
actions (used to make other pieces dependent on these
actions)
part_of_all: flag indicating this target is part of 'all'
"""
env = self.GetSortedXcodeEnv()
for action in actions:
name = StringToMakefileVariable('%s_%s' % (self.qualified_target,
action['action_name']))
self.WriteLn('### Rules for action "%s":' % action['action_name'])
inputs = action['inputs']
outputs = action['outputs']
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set()
for out in outputs:
dir = os.path.split(out)[0]
if dir:
dirs.add(dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
# Write the actual command.
action_commands = action['action']
if self.flavor == 'mac':
action_commands = [gyp.xcode_emulation.ExpandEnvVars(command, env)
for command in action_commands]
command = gyp.common.EncodePOSIXShellList(action_commands)
if 'message' in action:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, action['message']))
else:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, name))
if len(dirs) > 0:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
cd_action = 'cd %s; ' % Sourceify(self.path or '.')
# command and cd_action get written to a toplevel variable called
# cmd_foo. Toplevel variables can't handle things that change per
# makefile like $(TARGET), so hardcode the target.
command = command.replace('$(TARGET)', self.target)
cd_action = cd_action.replace('$(TARGET)', self.target)
# Set LD_LIBRARY_PATH in case the action runs an executable from this
# build which links to shared libs from this build.
# actions run on the host, so they should in theory only use host
# libraries, but until everything is made cross-compile safe, also use
# target libraries.
# TODO(piman): when everything is cross-compile safe, remove lib.target
self.WriteLn('cmd_%s = LD_LIBRARY_PATH=$(builddir)/lib.host:'
'$(builddir)/lib.target:$$LD_LIBRARY_PATH; '
'export LD_LIBRARY_PATH; '
'%s%s'
% (name, cd_action, command))
self.WriteLn()
outputs = [self.Absolutify(output) for output in outputs]
# The makefile rules are all relative to the top dir, but the gyp actions
# are defined relative to their containing dir. This replaces the obj
# variable for the action rule with an absolute version so that the output
# goes in the right place.
# Only write the 'obj' and 'builddir' rules for the "primary" output (:1);
# it's superfluous for the "extra outputs", and this avoids accidentally
# writing duplicate dummy rules for those outputs.
# Same for environment.
self.WriteLn("%s: obj := $(abs_obj)" % QuoteSpaces(outputs[0]))
self.WriteLn("%s: builddir := $(abs_builddir)" % QuoteSpaces(outputs[0]))
self.WriteSortedXcodeEnv(outputs[0], self.GetSortedXcodeEnv())
for input in inputs:
assert ' ' not in input, (
"Spaces in action input filenames not supported (%s)" % input)
for output in outputs:
assert ' ' not in output, (
"Spaces in action output filenames not supported (%s)" % output)
# See the comment in WriteCopies about expanding env vars.
outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs]
inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs]
self.WriteDoCmd(outputs, [Sourceify(self.Absolutify(i)) for i in inputs],
part_of_all=part_of_all, command=name)
# Stuff the outputs in a variable so we can refer to them later.
outputs_variable = 'action_%s_outputs' % name
self.WriteLn('%s := %s' % (outputs_variable, ' '.join(outputs)))
extra_outputs.append('$(%s)' % outputs_variable)
self.WriteLn()
self.WriteLn() | [
"def",
"WriteActions",
"(",
"self",
",",
"actions",
",",
"extra_sources",
",",
"extra_outputs",
",",
"extra_mac_bundle_resources",
",",
"part_of_all",
")",
":",
"env",
"=",
"self",
".",
"GetSortedXcodeEnv",
"(",
")",
"for",
"action",
"in",
"actions",
":",
"name",
"=",
"StringToMakefileVariable",
"(",
"'%s_%s'",
"%",
"(",
"self",
".",
"qualified_target",
",",
"action",
"[",
"'action_name'",
"]",
")",
")",
"self",
".",
"WriteLn",
"(",
"'### Rules for action \"%s\":'",
"%",
"action",
"[",
"'action_name'",
"]",
")",
"inputs",
"=",
"action",
"[",
"'inputs'",
"]",
"outputs",
"=",
"action",
"[",
"'outputs'",
"]",
"# Build up a list of outputs.",
"# Collect the output dirs we'll need.",
"dirs",
"=",
"set",
"(",
")",
"for",
"out",
"in",
"outputs",
":",
"dir",
"=",
"os",
".",
"path",
".",
"split",
"(",
"out",
")",
"[",
"0",
"]",
"if",
"dir",
":",
"dirs",
".",
"add",
"(",
"dir",
")",
"if",
"int",
"(",
"action",
".",
"get",
"(",
"'process_outputs_as_sources'",
",",
"False",
")",
")",
":",
"extra_sources",
"+=",
"outputs",
"if",
"int",
"(",
"action",
".",
"get",
"(",
"'process_outputs_as_mac_bundle_resources'",
",",
"False",
")",
")",
":",
"extra_mac_bundle_resources",
"+=",
"outputs",
"# Write the actual command.",
"action_commands",
"=",
"action",
"[",
"'action'",
"]",
"if",
"self",
".",
"flavor",
"==",
"'mac'",
":",
"action_commands",
"=",
"[",
"gyp",
".",
"xcode_emulation",
".",
"ExpandEnvVars",
"(",
"command",
",",
"env",
")",
"for",
"command",
"in",
"action_commands",
"]",
"command",
"=",
"gyp",
".",
"common",
".",
"EncodePOSIXShellList",
"(",
"action_commands",
")",
"if",
"'message'",
"in",
"action",
":",
"self",
".",
"WriteLn",
"(",
"'quiet_cmd_%s = ACTION %s $@'",
"%",
"(",
"name",
",",
"action",
"[",
"'message'",
"]",
")",
")",
"else",
":",
"self",
".",
"WriteLn",
"(",
"'quiet_cmd_%s = ACTION %s $@'",
"%",
"(",
"name",
",",
"name",
")",
")",
"if",
"len",
"(",
"dirs",
")",
">",
"0",
":",
"command",
"=",
"'mkdir -p %s'",
"%",
"' '",
".",
"join",
"(",
"dirs",
")",
"+",
"'; '",
"+",
"command",
"cd_action",
"=",
"'cd %s; '",
"%",
"Sourceify",
"(",
"self",
".",
"path",
"or",
"'.'",
")",
"# command and cd_action get written to a toplevel variable called",
"# cmd_foo. Toplevel variables can't handle things that change per",
"# makefile like $(TARGET), so hardcode the target.",
"command",
"=",
"command",
".",
"replace",
"(",
"'$(TARGET)'",
",",
"self",
".",
"target",
")",
"cd_action",
"=",
"cd_action",
".",
"replace",
"(",
"'$(TARGET)'",
",",
"self",
".",
"target",
")",
"# Set LD_LIBRARY_PATH in case the action runs an executable from this",
"# build which links to shared libs from this build.",
"# actions run on the host, so they should in theory only use host",
"# libraries, but until everything is made cross-compile safe, also use",
"# target libraries.",
"# TODO(piman): when everything is cross-compile safe, remove lib.target",
"self",
".",
"WriteLn",
"(",
"'cmd_%s = LD_LIBRARY_PATH=$(builddir)/lib.host:'",
"'$(builddir)/lib.target:$$LD_LIBRARY_PATH; '",
"'export LD_LIBRARY_PATH; '",
"'%s%s'",
"%",
"(",
"name",
",",
"cd_action",
",",
"command",
")",
")",
"self",
".",
"WriteLn",
"(",
")",
"outputs",
"=",
"[",
"self",
".",
"Absolutify",
"(",
"output",
")",
"for",
"output",
"in",
"outputs",
"]",
"# The makefile rules are all relative to the top dir, but the gyp actions",
"# are defined relative to their containing dir. This replaces the obj",
"# variable for the action rule with an absolute version so that the output",
"# goes in the right place.",
"# Only write the 'obj' and 'builddir' rules for the \"primary\" output (:1);",
"# it's superfluous for the \"extra outputs\", and this avoids accidentally",
"# writing duplicate dummy rules for those outputs.",
"# Same for environment.",
"self",
".",
"WriteLn",
"(",
"\"%s: obj := $(abs_obj)\"",
"%",
"QuoteSpaces",
"(",
"outputs",
"[",
"0",
"]",
")",
")",
"self",
".",
"WriteLn",
"(",
"\"%s: builddir := $(abs_builddir)\"",
"%",
"QuoteSpaces",
"(",
"outputs",
"[",
"0",
"]",
")",
")",
"self",
".",
"WriteSortedXcodeEnv",
"(",
"outputs",
"[",
"0",
"]",
",",
"self",
".",
"GetSortedXcodeEnv",
"(",
")",
")",
"for",
"input",
"in",
"inputs",
":",
"assert",
"' '",
"not",
"in",
"input",
",",
"(",
"\"Spaces in action input filenames not supported (%s)\"",
"%",
"input",
")",
"for",
"output",
"in",
"outputs",
":",
"assert",
"' '",
"not",
"in",
"output",
",",
"(",
"\"Spaces in action output filenames not supported (%s)\"",
"%",
"output",
")",
"# See the comment in WriteCopies about expanding env vars.",
"outputs",
"=",
"[",
"gyp",
".",
"xcode_emulation",
".",
"ExpandEnvVars",
"(",
"o",
",",
"env",
")",
"for",
"o",
"in",
"outputs",
"]",
"inputs",
"=",
"[",
"gyp",
".",
"xcode_emulation",
".",
"ExpandEnvVars",
"(",
"i",
",",
"env",
")",
"for",
"i",
"in",
"inputs",
"]",
"self",
".",
"WriteDoCmd",
"(",
"outputs",
",",
"[",
"Sourceify",
"(",
"self",
".",
"Absolutify",
"(",
"i",
")",
")",
"for",
"i",
"in",
"inputs",
"]",
",",
"part_of_all",
"=",
"part_of_all",
",",
"command",
"=",
"name",
")",
"# Stuff the outputs in a variable so we can refer to them later.",
"outputs_variable",
"=",
"'action_%s_outputs'",
"%",
"name",
"self",
".",
"WriteLn",
"(",
"'%s := %s'",
"%",
"(",
"outputs_variable",
",",
"' '",
".",
"join",
"(",
"outputs",
")",
")",
")",
"extra_outputs",
".",
"append",
"(",
"'$(%s)'",
"%",
"outputs_variable",
")",
"self",
".",
"WriteLn",
"(",
")",
"self",
".",
"WriteLn",
"(",
")"
] | https://github.com/pmq20/node-packer/blob/12c46c6e44fbc14d9ee645ebd17d5296b324f7e0/lts/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/make.py#L889-L986 |
||
OSGeo/gdal | 3748fc4ba4fba727492774b2b908a2130c864a83 | swig/python/osgeo/gdal.py | python | Band.GetColorTable | (self, *args) | return _gdal.Band_GetColorTable(self, *args) | r"""GetColorTable(Band self) -> ColorTable | r"""GetColorTable(Band self) -> ColorTable | [
"r",
"GetColorTable",
"(",
"Band",
"self",
")",
"-",
">",
"ColorTable"
] | def GetColorTable(self, *args):
r"""GetColorTable(Band self) -> ColorTable"""
return _gdal.Band_GetColorTable(self, *args) | [
"def",
"GetColorTable",
"(",
"self",
",",
"*",
"args",
")",
":",
"return",
"_gdal",
".",
"Band_GetColorTable",
"(",
"self",
",",
"*",
"args",
")"
] | https://github.com/OSGeo/gdal/blob/3748fc4ba4fba727492774b2b908a2130c864a83/swig/python/osgeo/gdal.py#L3512-L3514 |
|
PaddlePaddle/Paddle | 1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c | python/paddle/fluid/incubate/fleet/base/role_maker.py | python | MPISymetricRoleMaker._all_reduce | (self, input, output, mode="sum") | all reduce between trainers if current role is TRAINER,
only support array of one dim.
Args:
input(list/numpy.array): array of one dim
output(list/numpy.array): array of one dim
mode(str): "sum" or "min" or "max" | all reduce between trainers if current role is TRAINER,
only support array of one dim. | [
"all",
"reduce",
"between",
"trainers",
"if",
"current",
"role",
"is",
"TRAINER",
"only",
"support",
"array",
"of",
"one",
"dim",
"."
] | def _all_reduce(self, input, output, mode="sum"):
"""
all reduce between trainers if current role is TRAINER,
only support array of one dim.
Args:
input(list/numpy.array): array of one dim
output(list/numpy.array): array of one dim
mode(str): "sum" or "min" or "max"
"""
if not self._role_is_generated:
self.generate_role()
if mode == "sum":
mode = self.MPI.SUM
elif mode == "max":
mode = self.MPI.MAX
elif mode == "min":
mode = self.MPI.MIN
else:
raise ValueError("unknown mode: %s" % mode)
self._node_type_comm.Allreduce(input, output, op=mode) | [
"def",
"_all_reduce",
"(",
"self",
",",
"input",
",",
"output",
",",
"mode",
"=",
"\"sum\"",
")",
":",
"if",
"not",
"self",
".",
"_role_is_generated",
":",
"self",
".",
"generate_role",
"(",
")",
"if",
"mode",
"==",
"\"sum\"",
":",
"mode",
"=",
"self",
".",
"MPI",
".",
"SUM",
"elif",
"mode",
"==",
"\"max\"",
":",
"mode",
"=",
"self",
".",
"MPI",
".",
"MAX",
"elif",
"mode",
"==",
"\"min\"",
":",
"mode",
"=",
"self",
".",
"MPI",
".",
"MIN",
"else",
":",
"raise",
"ValueError",
"(",
"\"unknown mode: %s\"",
"%",
"mode",
")",
"self",
".",
"_node_type_comm",
".",
"Allreduce",
"(",
"input",
",",
"output",
",",
"op",
"=",
"mode",
")"
] | https://github.com/PaddlePaddle/Paddle/blob/1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c/python/paddle/fluid/incubate/fleet/base/role_maker.py#L419-L439 |
||
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/lib2to3/pytree.py | python | Base.pre_order | (self) | Return a pre-order iterator for the tree.
This must be implemented by the concrete subclass. | Return a pre-order iterator for the tree. | [
"Return",
"a",
"pre",
"-",
"order",
"iterator",
"for",
"the",
"tree",
"."
] | def pre_order(self):
"""
Return a pre-order iterator for the tree.
This must be implemented by the concrete subclass.
"""
raise NotImplementedError | [
"def",
"pre_order",
"(",
"self",
")",
":",
"raise",
"NotImplementedError"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/lib2to3/pytree.py#L104-L110 |
||
Xilinx/Vitis-AI | fc74d404563d9951b57245443c73bef389f3657f | tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/ops/gradients_util.py | python | _GetGrad | (grads, t, unconnected_gradients) | return t_grad | Gets gradient for tensor "t". | Gets gradient for tensor "t". | [
"Gets",
"gradient",
"for",
"tensor",
"t",
"."
] | def _GetGrad(grads, t, unconnected_gradients):
"""Gets gradient for tensor "t"."""
op = t.op
op_grads = grads.get(op)
if not op_grads:
if unconnected_gradients == UnconnectedGradients.ZERO:
t_dtype = default_gradient.get_zeros_dtype(t)
if t.dtype == dtypes.resource:
return array_ops.zeros(
resource_variable_ops.variable_shape(t), dtype=t_dtype)
else:
return array_ops.zeros_like(t, dtype=t_dtype)
elif unconnected_gradients == UnconnectedGradients.NONE:
return None
else:
raise ValueError(
"Unknown value for unconnected_gradients: %r" % unconnected_gradients)
t_grad = op_grads[t.value_index]
assert not isinstance(
t_grad, list), ("gradients list should have been aggregated by now.")
return t_grad | [
"def",
"_GetGrad",
"(",
"grads",
",",
"t",
",",
"unconnected_gradients",
")",
":",
"op",
"=",
"t",
".",
"op",
"op_grads",
"=",
"grads",
".",
"get",
"(",
"op",
")",
"if",
"not",
"op_grads",
":",
"if",
"unconnected_gradients",
"==",
"UnconnectedGradients",
".",
"ZERO",
":",
"t_dtype",
"=",
"default_gradient",
".",
"get_zeros_dtype",
"(",
"t",
")",
"if",
"t",
".",
"dtype",
"==",
"dtypes",
".",
"resource",
":",
"return",
"array_ops",
".",
"zeros",
"(",
"resource_variable_ops",
".",
"variable_shape",
"(",
"t",
")",
",",
"dtype",
"=",
"t_dtype",
")",
"else",
":",
"return",
"array_ops",
".",
"zeros_like",
"(",
"t",
",",
"dtype",
"=",
"t_dtype",
")",
"elif",
"unconnected_gradients",
"==",
"UnconnectedGradients",
".",
"NONE",
":",
"return",
"None",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unknown value for unconnected_gradients: %r\"",
"%",
"unconnected_gradients",
")",
"t_grad",
"=",
"op_grads",
"[",
"t",
".",
"value_index",
"]",
"assert",
"not",
"isinstance",
"(",
"t_grad",
",",
"list",
")",
",",
"(",
"\"gradients list should have been aggregated by now.\"",
")",
"return",
"t_grad"
] | https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/ops/gradients_util.py#L794-L815 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/gtk/combo.py | python | ComboPopup.DestroyPopup | (*args, **kwargs) | return _combo.ComboPopup_DestroyPopup(*args, **kwargs) | DestroyPopup(self) | DestroyPopup(self) | [
"DestroyPopup",
"(",
"self",
")"
] | def DestroyPopup(*args, **kwargs):
"""DestroyPopup(self)"""
return _combo.ComboPopup_DestroyPopup(*args, **kwargs) | [
"def",
"DestroyPopup",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_combo",
".",
"ComboPopup_DestroyPopup",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/combo.py#L633-L635 |
|
google/llvm-propeller | 45c226984fe8377ebfb2ad7713c680d652ba678d | compiler-rt/lib/sanitizer_common/scripts/cpplint.py | python | CheckCommaSpacing | (filename, clean_lines, linenum, error) | Checks for horizontal spacing near commas and semicolons.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found. | Checks for horizontal spacing near commas and semicolons. | [
"Checks",
"for",
"horizontal",
"spacing",
"near",
"commas",
"and",
"semicolons",
"."
] | def CheckCommaSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing near commas and semicolons.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
raw = clean_lines.lines_without_raw_strings
line = clean_lines.elided[linenum]
# You should always have a space after a comma (either as fn arg or operator)
#
# This does not apply when the non-space character following the
# comma is another comma, since the only time when that happens is
# for empty macro arguments.
#
# We run this check in two passes: first pass on elided lines to
# verify that lines contain missing whitespaces, second pass on raw
# lines to confirm that those missing whitespaces are not due to
# elided comments.
if (Search(r',[^,\s]', ReplaceAll(r'\boperator\s*,\s*\(', 'F(', line)) and
Search(r',[^,\s]', raw[linenum])):
error(filename, linenum, 'whitespace/comma', 3,
'Missing space after ,')
# You should always have a space after a semicolon
# except for few corner cases
# TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
# space after ;
if Search(r';[^\s};\\)/]', line):
error(filename, linenum, 'whitespace/semicolon', 3,
'Missing space after ;') | [
"def",
"CheckCommaSpacing",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
":",
"raw",
"=",
"clean_lines",
".",
"lines_without_raw_strings",
"line",
"=",
"clean_lines",
".",
"elided",
"[",
"linenum",
"]",
"# You should always have a space after a comma (either as fn arg or operator)",
"#",
"# This does not apply when the non-space character following the",
"# comma is another comma, since the only time when that happens is",
"# for empty macro arguments.",
"#",
"# We run this check in two passes: first pass on elided lines to",
"# verify that lines contain missing whitespaces, second pass on raw",
"# lines to confirm that those missing whitespaces are not due to",
"# elided comments.",
"if",
"(",
"Search",
"(",
"r',[^,\\s]'",
",",
"ReplaceAll",
"(",
"r'\\boperator\\s*,\\s*\\('",
",",
"'F('",
",",
"line",
")",
")",
"and",
"Search",
"(",
"r',[^,\\s]'",
",",
"raw",
"[",
"linenum",
"]",
")",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/comma'",
",",
"3",
",",
"'Missing space after ,'",
")",
"# You should always have a space after a semicolon",
"# except for few corner cases",
"# TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more",
"# space after ;",
"if",
"Search",
"(",
"r';[^\\s};\\\\)/]'",
",",
"line",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/semicolon'",
",",
"3",
",",
"'Missing space after ;'",
")"
] | https://github.com/google/llvm-propeller/blob/45c226984fe8377ebfb2ad7713c680d652ba678d/compiler-rt/lib/sanitizer_common/scripts/cpplint.py#L3452-L3485 |
||
mindspore-ai/mindspore | fb8fd3338605bb34fa5cea054e535a8b1d753fab | mindspore/python/mindspore/ops/_op_impl/aicpu/uniform_real.py | python | _uniform_real_aicpu | () | return | RandomUniformReal AiCPU register | RandomUniformReal AiCPU register | [
"RandomUniformReal",
"AiCPU",
"register"
] | def _uniform_real_aicpu():
"""RandomUniformReal AiCPU register"""
return | [
"def",
"_uniform_real_aicpu",
"(",
")",
":",
"return"
] | https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/ops/_op_impl/aicpu/uniform_real.py#L31-L33 |
|
panda3d/panda3d | 833ad89ebad58395d0af0b7ec08538e5e4308265 | direct/src/controls/PhysicsWalker.py | python | PhysicsWalker.displayDebugInfo | (self) | For debug use. | For debug use. | [
"For",
"debug",
"use",
"."
] | def displayDebugInfo(self):
"""
For debug use.
"""
onScreenDebug.add("w controls", "PhysicsWalker")
if self.useLifter:
onScreenDebug.add("w airborneHeight", self.lifter.getAirborneHeight())
onScreenDebug.add("w isOnGround", self.lifter.isOnGround())
#onScreenDebug.add("w gravity", self.lifter.getGravity())
onScreenDebug.add("w contact normal", self.lifter.getContactNormal().pPrintValues())
onScreenDebug.add("w impact", self.lifter.getImpactVelocity())
onScreenDebug.add("w velocity", self.lifter.getVelocity())
onScreenDebug.add("w hasContact", self.lifter.hasContact())
#onScreenDebug.add("w falling", self.falling)
#onScreenDebug.add("w jumpForce", self.avatarControlJumpForce)
#onScreenDebug.add("w mayJump", self.mayJump)
onScreenDebug.add("w isAirborne", self.isAirborne) | [
"def",
"displayDebugInfo",
"(",
"self",
")",
":",
"onScreenDebug",
".",
"add",
"(",
"\"w controls\"",
",",
"\"PhysicsWalker\"",
")",
"if",
"self",
".",
"useLifter",
":",
"onScreenDebug",
".",
"add",
"(",
"\"w airborneHeight\"",
",",
"self",
".",
"lifter",
".",
"getAirborneHeight",
"(",
")",
")",
"onScreenDebug",
".",
"add",
"(",
"\"w isOnGround\"",
",",
"self",
".",
"lifter",
".",
"isOnGround",
"(",
")",
")",
"#onScreenDebug.add(\"w gravity\", self.lifter.getGravity())",
"onScreenDebug",
".",
"add",
"(",
"\"w contact normal\"",
",",
"self",
".",
"lifter",
".",
"getContactNormal",
"(",
")",
".",
"pPrintValues",
"(",
")",
")",
"onScreenDebug",
".",
"add",
"(",
"\"w impact\"",
",",
"self",
".",
"lifter",
".",
"getImpactVelocity",
"(",
")",
")",
"onScreenDebug",
".",
"add",
"(",
"\"w velocity\"",
",",
"self",
".",
"lifter",
".",
"getVelocity",
"(",
")",
")",
"onScreenDebug",
".",
"add",
"(",
"\"w hasContact\"",
",",
"self",
".",
"lifter",
".",
"hasContact",
"(",
")",
")",
"#onScreenDebug.add(\"w falling\", self.falling)",
"#onScreenDebug.add(\"w jumpForce\", self.avatarControlJumpForce)",
"#onScreenDebug.add(\"w mayJump\", self.mayJump)",
"onScreenDebug",
".",
"add",
"(",
"\"w isAirborne\"",
",",
"self",
".",
"isAirborne",
")"
] | https://github.com/panda3d/panda3d/blob/833ad89ebad58395d0af0b7ec08538e5e4308265/direct/src/controls/PhysicsWalker.py#L373-L390 |
||
SequoiaDB/SequoiaDB | 2894ed7e5bd6fe57330afc900cf76d0ff0df9f64 | tools/server/php_linux/libxml2/lib/python2.4/site-packages/libxml2.py | python | xmlNs.setNs | (self, node) | Associate a namespace to a node, a posteriori. | Associate a namespace to a node, a posteriori. | [
"Associate",
"a",
"namespace",
"to",
"a",
"node",
"a",
"posteriori",
"."
] | def setNs(self, node):
"""Associate a namespace to a node, a posteriori. """
if node is None: node__o = None
else: node__o = node._o
libxml2mod.xmlSetNs(node__o, self._o) | [
"def",
"setNs",
"(",
"self",
",",
"node",
")",
":",
"if",
"node",
"is",
"None",
":",
"node__o",
"=",
"None",
"else",
":",
"node__o",
"=",
"node",
".",
"_o",
"libxml2mod",
".",
"xmlSetNs",
"(",
"node__o",
",",
"self",
".",
"_o",
")"
] | https://github.com/SequoiaDB/SequoiaDB/blob/2894ed7e5bd6fe57330afc900cf76d0ff0df9f64/tools/server/php_linux/libxml2/lib/python2.4/site-packages/libxml2.py#L5942-L5946 |
||
miyosuda/TensorFlowAndroidMNIST | 7b5a4603d2780a8a2834575706e9001977524007 | jni-build/jni/include/tensorflow/python/summary/impl/io_wrapper.py | python | ListDirectoryAbsolute | (directory) | Yields all files in the given directory. The paths are absolute. | Yields all files in the given directory. The paths are absolute. | [
"Yields",
"all",
"files",
"in",
"the",
"given",
"directory",
".",
"The",
"paths",
"are",
"absolute",
"."
] | def ListDirectoryAbsolute(directory):
"""Yields all files in the given directory. The paths are absolute."""
if gcs.IsGCSPath(directory):
return gcs.ListDirectory(directory)
else:
return (os.path.join(directory, path)
for path in gfile.ListDirectory(directory)) | [
"def",
"ListDirectoryAbsolute",
"(",
"directory",
")",
":",
"if",
"gcs",
".",
"IsGCSPath",
"(",
"directory",
")",
":",
"return",
"gcs",
".",
"ListDirectory",
"(",
"directory",
")",
"else",
":",
"return",
"(",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"path",
")",
"for",
"path",
"in",
"gfile",
".",
"ListDirectory",
"(",
"directory",
")",
")"
] | https://github.com/miyosuda/TensorFlowAndroidMNIST/blob/7b5a4603d2780a8a2834575706e9001977524007/jni-build/jni/include/tensorflow/python/summary/impl/io_wrapper.py#L48-L54 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/uuid.py | python | uuid3 | (namespace, name) | return UUID(bytes=hash[:16], version=3) | Generate a UUID from the MD5 hash of a namespace UUID and a name. | Generate a UUID from the MD5 hash of a namespace UUID and a name. | [
"Generate",
"a",
"UUID",
"from",
"the",
"MD5",
"hash",
"of",
"a",
"namespace",
"UUID",
"and",
"a",
"name",
"."
] | def uuid3(namespace, name):
"""Generate a UUID from the MD5 hash of a namespace UUID and a name."""
from hashlib import md5
hash = md5(namespace.bytes + bytes(name, "utf-8")).digest()
return UUID(bytes=hash[:16], version=3) | [
"def",
"uuid3",
"(",
"namespace",
",",
"name",
")",
":",
"from",
"hashlib",
"import",
"md5",
"hash",
"=",
"md5",
"(",
"namespace",
".",
"bytes",
"+",
"bytes",
"(",
"name",
",",
"\"utf-8\"",
")",
")",
".",
"digest",
"(",
")",
"return",
"UUID",
"(",
"bytes",
"=",
"hash",
"[",
":",
"16",
"]",
",",
"version",
"=",
"3",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/uuid.py#L753-L757 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/lib/agw/toasterbox.py | python | ToasterBox.SetPopupScrollSpeed | (self, speed) | Sets the :class:`ToasterBox` scroll speed.
:param `speed`: it is the pause time (in milliseconds) for every step in the
`ScrollUp` method. | Sets the :class:`ToasterBox` scroll speed. | [
"Sets",
"the",
":",
"class",
":",
"ToasterBox",
"scroll",
"speed",
"."
] | def SetPopupScrollSpeed(self, speed):
"""
Sets the :class:`ToasterBox` scroll speed.
:param `speed`: it is the pause time (in milliseconds) for every step in the
`ScrollUp` method.
"""
self._sleeptime = speed | [
"def",
"SetPopupScrollSpeed",
"(",
"self",
",",
"speed",
")",
":",
"self",
".",
"_sleeptime",
"=",
"speed"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/toasterbox.py#L473-L481 |
||
ceph/ceph | 959663007321a369c83218414a29bd9dbc8bda3a | src/pybind/mgr/dashboard/controllers/_version.py | python | APIVersion.from_string | (cls, version_string: str) | return cls._make(int(s) for s in version_string.split('.')) | >>> APIVersion.from_string("1.0")
APIVersion(major=1, minor=0) | >>> APIVersion.from_string("1.0")
APIVersion(major=1, minor=0) | [
">>>",
"APIVersion",
".",
"from_string",
"(",
"1",
".",
"0",
")",
"APIVersion",
"(",
"major",
"=",
"1",
"minor",
"=",
"0",
")"
] | def from_string(cls, version_string: str) -> 'APIVersion':
"""
>>> APIVersion.from_string("1.0")
APIVersion(major=1, minor=0)
"""
return cls._make(int(s) for s in version_string.split('.')) | [
"def",
"from_string",
"(",
"cls",
",",
"version_string",
":",
"str",
")",
"->",
"'APIVersion'",
":",
"return",
"cls",
".",
"_make",
"(",
"int",
"(",
"s",
")",
"for",
"s",
"in",
"version_string",
".",
"split",
"(",
"'.'",
")",
")"
] | https://github.com/ceph/ceph/blob/959663007321a369c83218414a29bd9dbc8bda3a/src/pybind/mgr/dashboard/controllers/_version.py#L27-L32 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_cocoa/_windows.py | python | Frame.DoMenuUpdates | (*args, **kwargs) | return _windows_.Frame_DoMenuUpdates(*args, **kwargs) | DoMenuUpdates(self, Menu menu=None) | DoMenuUpdates(self, Menu menu=None) | [
"DoMenuUpdates",
"(",
"self",
"Menu",
"menu",
"=",
"None",
")"
] | def DoMenuUpdates(*args, **kwargs):
"""DoMenuUpdates(self, Menu menu=None)"""
return _windows_.Frame_DoMenuUpdates(*args, **kwargs) | [
"def",
"DoMenuUpdates",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_windows_",
".",
"Frame_DoMenuUpdates",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_windows.py#L665-L667 |
|
domino-team/openwrt-cc | 8b181297c34d14d3ca521cc9f31430d561dbc688 | package/gli-pub/openwrt-node-packages-master/node/node-v6.9.1/deps/v8/tools/run_perf.py | python | AccumulateGenericResults | (graph_names, suite_units, iter_output) | return reduce(lambda r, t: r + t, traces.itervalues(), Results()) | Iterates over the output of multiple benchmark reruns and accumulates
generic results.
Args:
graph_names: List of names that configure the base path of the traces. E.g.
['v8', 'Octane'].
suite_units: Measurement default units as defined by the benchmark suite.
iter_output: Iterator over the standard output of each test run.
Returns: A "Results" object. | Iterates over the output of multiple benchmark reruns and accumulates
generic results. | [
"Iterates",
"over",
"the",
"output",
"of",
"multiple",
"benchmark",
"reruns",
"and",
"accumulates",
"generic",
"results",
"."
] | def AccumulateGenericResults(graph_names, suite_units, iter_output):
"""Iterates over the output of multiple benchmark reruns and accumulates
generic results.
Args:
graph_names: List of names that configure the base path of the traces. E.g.
['v8', 'Octane'].
suite_units: Measurement default units as defined by the benchmark suite.
iter_output: Iterator over the standard output of each test run.
Returns: A "Results" object.
"""
traces = OrderedDict()
for stdout in iter_output():
if stdout is None:
# The None value is used as a null object to simplify logic.
continue
for line in stdout.strip().splitlines():
match = GENERIC_RESULTS_RE.match(line)
if match:
stddev = ""
graph = match.group(1)
trace = match.group(2)
body = match.group(3)
units = match.group(4)
match_stddev = RESULT_STDDEV_RE.match(body)
match_list = RESULT_LIST_RE.match(body)
errors = []
if match_stddev:
result, stddev = map(str.strip, match_stddev.group(1).split(","))
results = [result]
elif match_list:
results = map(str.strip, match_list.group(1).split(","))
else:
results = [body.strip()]
try:
results = map(lambda r: str(float(r)), results)
except ValueError:
results = []
errors = ["Found non-numeric in %s" %
"/".join(graph_names + [graph, trace])]
trace_result = traces.setdefault(trace, Results([{
"graphs": graph_names + [graph, trace],
"units": (units or suite_units).strip(),
"results": [],
"stddev": "",
}], errors))
trace_result.traces[0]["results"].extend(results)
trace_result.traces[0]["stddev"] = stddev
return reduce(lambda r, t: r + t, traces.itervalues(), Results()) | [
"def",
"AccumulateGenericResults",
"(",
"graph_names",
",",
"suite_units",
",",
"iter_output",
")",
":",
"traces",
"=",
"OrderedDict",
"(",
")",
"for",
"stdout",
"in",
"iter_output",
"(",
")",
":",
"if",
"stdout",
"is",
"None",
":",
"# The None value is used as a null object to simplify logic.",
"continue",
"for",
"line",
"in",
"stdout",
".",
"strip",
"(",
")",
".",
"splitlines",
"(",
")",
":",
"match",
"=",
"GENERIC_RESULTS_RE",
".",
"match",
"(",
"line",
")",
"if",
"match",
":",
"stddev",
"=",
"\"\"",
"graph",
"=",
"match",
".",
"group",
"(",
"1",
")",
"trace",
"=",
"match",
".",
"group",
"(",
"2",
")",
"body",
"=",
"match",
".",
"group",
"(",
"3",
")",
"units",
"=",
"match",
".",
"group",
"(",
"4",
")",
"match_stddev",
"=",
"RESULT_STDDEV_RE",
".",
"match",
"(",
"body",
")",
"match_list",
"=",
"RESULT_LIST_RE",
".",
"match",
"(",
"body",
")",
"errors",
"=",
"[",
"]",
"if",
"match_stddev",
":",
"result",
",",
"stddev",
"=",
"map",
"(",
"str",
".",
"strip",
",",
"match_stddev",
".",
"group",
"(",
"1",
")",
".",
"split",
"(",
"\",\"",
")",
")",
"results",
"=",
"[",
"result",
"]",
"elif",
"match_list",
":",
"results",
"=",
"map",
"(",
"str",
".",
"strip",
",",
"match_list",
".",
"group",
"(",
"1",
")",
".",
"split",
"(",
"\",\"",
")",
")",
"else",
":",
"results",
"=",
"[",
"body",
".",
"strip",
"(",
")",
"]",
"try",
":",
"results",
"=",
"map",
"(",
"lambda",
"r",
":",
"str",
"(",
"float",
"(",
"r",
")",
")",
",",
"results",
")",
"except",
"ValueError",
":",
"results",
"=",
"[",
"]",
"errors",
"=",
"[",
"\"Found non-numeric in %s\"",
"%",
"\"/\"",
".",
"join",
"(",
"graph_names",
"+",
"[",
"graph",
",",
"trace",
"]",
")",
"]",
"trace_result",
"=",
"traces",
".",
"setdefault",
"(",
"trace",
",",
"Results",
"(",
"[",
"{",
"\"graphs\"",
":",
"graph_names",
"+",
"[",
"graph",
",",
"trace",
"]",
",",
"\"units\"",
":",
"(",
"units",
"or",
"suite_units",
")",
".",
"strip",
"(",
")",
",",
"\"results\"",
":",
"[",
"]",
",",
"\"stddev\"",
":",
"\"\"",
",",
"}",
"]",
",",
"errors",
")",
")",
"trace_result",
".",
"traces",
"[",
"0",
"]",
"[",
"\"results\"",
"]",
".",
"extend",
"(",
"results",
")",
"trace_result",
".",
"traces",
"[",
"0",
"]",
"[",
"\"stddev\"",
"]",
"=",
"stddev",
"return",
"reduce",
"(",
"lambda",
"r",
",",
"t",
":",
"r",
"+",
"t",
",",
"traces",
".",
"itervalues",
"(",
")",
",",
"Results",
"(",
")",
")"
] | https://github.com/domino-team/openwrt-cc/blob/8b181297c34d14d3ca521cc9f31430d561dbc688/package/gli-pub/openwrt-node-packages-master/node/node-v6.9.1/deps/v8/tools/run_perf.py#L288-L339 |
|
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/lib-tk/Tkinter.py | python | Wm.wm_aspect | (self,
minNumer=None, minDenom=None,
maxNumer=None, maxDenom=None) | return self._getints(
self.tk.call('wm', 'aspect', self._w,
minNumer, minDenom,
maxNumer, maxDenom)) | Instruct the window manager to set the aspect ratio (width/height)
of this widget to be between MINNUMER/MINDENOM and MAXNUMER/MAXDENOM. Return a tuple
of the actual values if no argument is given. | Instruct the window manager to set the aspect ratio (width/height)
of this widget to be between MINNUMER/MINDENOM and MAXNUMER/MAXDENOM. Return a tuple
of the actual values if no argument is given. | [
"Instruct",
"the",
"window",
"manager",
"to",
"set",
"the",
"aspect",
"ratio",
"(",
"width",
"/",
"height",
")",
"of",
"this",
"widget",
"to",
"be",
"between",
"MINNUMER",
"/",
"MINDENOM",
"and",
"MAXNUMER",
"/",
"MAXDENOM",
".",
"Return",
"a",
"tuple",
"of",
"the",
"actual",
"values",
"if",
"no",
"argument",
"is",
"given",
"."
] | def wm_aspect(self,
minNumer=None, minDenom=None,
maxNumer=None, maxDenom=None):
"""Instruct the window manager to set the aspect ratio (width/height)
of this widget to be between MINNUMER/MINDENOM and MAXNUMER/MAXDENOM. Return a tuple
of the actual values if no argument is given."""
return self._getints(
self.tk.call('wm', 'aspect', self._w,
minNumer, minDenom,
maxNumer, maxDenom)) | [
"def",
"wm_aspect",
"(",
"self",
",",
"minNumer",
"=",
"None",
",",
"minDenom",
"=",
"None",
",",
"maxNumer",
"=",
"None",
",",
"maxDenom",
"=",
"None",
")",
":",
"return",
"self",
".",
"_getints",
"(",
"self",
".",
"tk",
".",
"call",
"(",
"'wm'",
",",
"'aspect'",
",",
"self",
".",
"_w",
",",
"minNumer",
",",
"minDenom",
",",
"maxNumer",
",",
"maxDenom",
")",
")"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/lib-tk/Tkinter.py#L1522-L1531 |
|
epiqc/ScaffCC | 66a79944ee4cd116b27bc1a69137276885461db8 | clang/bindings/python/clang/cindex.py | python | Type.get_named_type | (self) | return conf.lib.clang_Type_getNamedType(self) | Retrieve the type named by the qualified-id. | Retrieve the type named by the qualified-id. | [
"Retrieve",
"the",
"type",
"named",
"by",
"the",
"qualified",
"-",
"id",
"."
] | def get_named_type(self):
"""
Retrieve the type named by the qualified-id.
"""
return conf.lib.clang_Type_getNamedType(self) | [
"def",
"get_named_type",
"(",
"self",
")",
":",
"return",
"conf",
".",
"lib",
".",
"clang_Type_getNamedType",
"(",
"self",
")"
] | https://github.com/epiqc/ScaffCC/blob/66a79944ee4cd116b27bc1a69137276885461db8/clang/bindings/python/clang/cindex.py#L2371-L2375 |
|
avast/retdec | b9879088a5f0278508185ec645494e6c5c57a455 | scripts/retdec-unpacker.py | python | Unpacker._unpack | (self, output) | Try to unpack the given file. | Try to unpack the given file. | [
"Try",
"to",
"unpack",
"the",
"given",
"file",
"."
] | def _unpack(self, output):
"""Try to unpack the given file.
"""
unpacker_params = [self.input, '-o', output]
if self.args.max_memory:
unpacker_params.extend(['--max-memory', self.args.max_memory])
elif self.args.max_memory_half_ram:
unpacker_params.append('--max-memory-half-ram')
self._print('\n##### Trying to unpack ' + self.input + ' into ' + output + ' by using generic unpacker...')
out, unpacker_rc, _ = CmdRunner.run_cmd([UNPACKER] + unpacker_params, buffer_output=True, print_run_msg=True)
self._print(out)
if unpacker_rc == self.UNPACKER_EXIT_CODE_OK:
self._print('##### Unpacking by using generic unpacker: successfully unpacked')
return self.unpacker_output, self.RET_UNPACK_OK
elif unpacker_rc == self.UNPACKER_EXIT_CODE_NOTHING_TO_DO:
self._print('##### Unpacking by using generic unpacker: nothing to do')
else:
# Do not return -> try the next unpacker
self._print('##### Unpacking by using generic unpacker: failed')
if utils.tool_exists('upx'):
# Do not return -> try the next unpacker
# Try to unpack via UPX
self._print('\n##### Trying to unpack ' + self.input + ' into ' + output + ' by using UPX...')
out, upx_rc, _ = CmdRunner.run_cmd(['upx', '-d', self.input, '-o', output], buffer_output=True, discard_stdout=True, print_run_msg=True)
self._print(out)
if upx_rc == 0:
self._print('##### Unpacking by using UPX: successfully unpacked')
if self.args.extended_exit_codes:
if unpacker_rc == self.UNPACKER_EXIT_CODE_NOTHING_TO_DO:
return self.unpacker_output, self.RET_UNPACKER_NOTHING_TO_DO_OTHERS_OK
elif unpacker_rc >= self.UNPACKER_EXIT_CODE_UNPACKING_FAILED:
return self.unpacker_output, self.RET_UNPACKER_FAILED_OTHERS_OK
else:
return self.unpacker_output, self.RET_UNPACK_OK
else:
# We cannot distinguish whether upx failed or the input file was
# not upx-packed
self._print('##### Unpacking by using UPX: nothing to do')
else:
self._print('##### \'upx\' not available: nothing to do')
if unpacker_rc >= self.UNPACKER_EXIT_CODE_UNPACKING_FAILED:
return self.unpacker_output, self.RET_UNPACKER_FAILED
else:
return self.unpacker_output, self.RET_NOTHING_TO_DO | [
"def",
"_unpack",
"(",
"self",
",",
"output",
")",
":",
"unpacker_params",
"=",
"[",
"self",
".",
"input",
",",
"'-o'",
",",
"output",
"]",
"if",
"self",
".",
"args",
".",
"max_memory",
":",
"unpacker_params",
".",
"extend",
"(",
"[",
"'--max-memory'",
",",
"self",
".",
"args",
".",
"max_memory",
"]",
")",
"elif",
"self",
".",
"args",
".",
"max_memory_half_ram",
":",
"unpacker_params",
".",
"append",
"(",
"'--max-memory-half-ram'",
")",
"self",
".",
"_print",
"(",
"'\\n##### Trying to unpack '",
"+",
"self",
".",
"input",
"+",
"' into '",
"+",
"output",
"+",
"' by using generic unpacker...'",
")",
"out",
",",
"unpacker_rc",
",",
"_",
"=",
"CmdRunner",
".",
"run_cmd",
"(",
"[",
"UNPACKER",
"]",
"+",
"unpacker_params",
",",
"buffer_output",
"=",
"True",
",",
"print_run_msg",
"=",
"True",
")",
"self",
".",
"_print",
"(",
"out",
")",
"if",
"unpacker_rc",
"==",
"self",
".",
"UNPACKER_EXIT_CODE_OK",
":",
"self",
".",
"_print",
"(",
"'##### Unpacking by using generic unpacker: successfully unpacked'",
")",
"return",
"self",
".",
"unpacker_output",
",",
"self",
".",
"RET_UNPACK_OK",
"elif",
"unpacker_rc",
"==",
"self",
".",
"UNPACKER_EXIT_CODE_NOTHING_TO_DO",
":",
"self",
".",
"_print",
"(",
"'##### Unpacking by using generic unpacker: nothing to do'",
")",
"else",
":",
"# Do not return -> try the next unpacker",
"self",
".",
"_print",
"(",
"'##### Unpacking by using generic unpacker: failed'",
")",
"if",
"utils",
".",
"tool_exists",
"(",
"'upx'",
")",
":",
"# Do not return -> try the next unpacker",
"# Try to unpack via UPX",
"self",
".",
"_print",
"(",
"'\\n##### Trying to unpack '",
"+",
"self",
".",
"input",
"+",
"' into '",
"+",
"output",
"+",
"' by using UPX...'",
")",
"out",
",",
"upx_rc",
",",
"_",
"=",
"CmdRunner",
".",
"run_cmd",
"(",
"[",
"'upx'",
",",
"'-d'",
",",
"self",
".",
"input",
",",
"'-o'",
",",
"output",
"]",
",",
"buffer_output",
"=",
"True",
",",
"discard_stdout",
"=",
"True",
",",
"print_run_msg",
"=",
"True",
")",
"self",
".",
"_print",
"(",
"out",
")",
"if",
"upx_rc",
"==",
"0",
":",
"self",
".",
"_print",
"(",
"'##### Unpacking by using UPX: successfully unpacked'",
")",
"if",
"self",
".",
"args",
".",
"extended_exit_codes",
":",
"if",
"unpacker_rc",
"==",
"self",
".",
"UNPACKER_EXIT_CODE_NOTHING_TO_DO",
":",
"return",
"self",
".",
"unpacker_output",
",",
"self",
".",
"RET_UNPACKER_NOTHING_TO_DO_OTHERS_OK",
"elif",
"unpacker_rc",
">=",
"self",
".",
"UNPACKER_EXIT_CODE_UNPACKING_FAILED",
":",
"return",
"self",
".",
"unpacker_output",
",",
"self",
".",
"RET_UNPACKER_FAILED_OTHERS_OK",
"else",
":",
"return",
"self",
".",
"unpacker_output",
",",
"self",
".",
"RET_UNPACK_OK",
"else",
":",
"# We cannot distinguish whether upx failed or the input file was",
"# not upx-packed",
"self",
".",
"_print",
"(",
"'##### Unpacking by using UPX: nothing to do'",
")",
"else",
":",
"self",
".",
"_print",
"(",
"'##### \\'upx\\' not available: nothing to do'",
")",
"if",
"unpacker_rc",
">=",
"self",
".",
"UNPACKER_EXIT_CODE_UNPACKING_FAILED",
":",
"return",
"self",
".",
"unpacker_output",
",",
"self",
".",
"RET_UNPACKER_FAILED",
"else",
":",
"return",
"self",
".",
"unpacker_output",
",",
"self",
".",
"RET_NOTHING_TO_DO"
] | https://github.com/avast/retdec/blob/b9879088a5f0278508185ec645494e6c5c57a455/scripts/retdec-unpacker.py#L129-L179 |
||
apache/incubator-mxnet | f03fb23f1d103fec9541b5ae59ee06b1734a51d9 | python/mxnet/ndarray/ndarray.py | python | NDArray.mish | (self, *args, **kwargs) | return op.mish(self, *args, **kwargs) | Convenience fluent method for :py:func:`mish`.
The arguments are the same as for :py:func:`mish`, with
this array as data. | Convenience fluent method for :py:func:`mish`. | [
"Convenience",
"fluent",
"method",
"for",
":",
"py",
":",
"func",
":",
"mish",
"."
] | def mish(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`mish`.
The arguments are the same as for :py:func:`mish`, with
this array as data.
"""
return op.mish(self, *args, **kwargs) | [
"def",
"mish",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"op",
".",
"mish",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/apache/incubator-mxnet/blob/f03fb23f1d103fec9541b5ae59ee06b1734a51d9/python/mxnet/ndarray/ndarray.py#L2270-L2276 |
|
google/perfetto | fe68c7a7f7657aa71ced68efb126dcac4107c745 | python/perfetto/batch_trace_processor/api.py | python | BatchTraceProcessor.query_and_flatten | (self, sql: str) | return self.execute_and_flatten(lambda tp: tp.query(sql).
as_pandas_dataframe()) | Executes the provided SQL statement and flattens the result.
The execution happens in parallel across all the traces and the
resulting Pandas dataframes are flattened into a single dataframe.
Args:
sql: The SQL statement to execute.
Returns:
A concatenated Pandas dataframe containing the result of executing the
query across all the traces.
If an URI or a trace resolver was passed to the constructor, the
contents of the |metadata| dictionary emitted by the resolver will also
be emitted as extra columns (key being column name, value being the
value in the dataframe).
For example:
class CustomResolver(TraceResolver):
def resolve(self):
return [TraceResolver.Result(trace='/tmp/path',
metadata={
'path': '/tmp/path'
'foo': 'bar'
})]
with BatchTraceProcessor(CustomResolver()) as btp:
df = btp.query_and_flatten('select count(1) as cnt from slice')
Then df will look like this:
cnt path foo
100 /tmp/path bar
Raises:
TraceProcessorException: An error occurred running the query. | Executes the provided SQL statement and flattens the result. | [
"Executes",
"the",
"provided",
"SQL",
"statement",
"and",
"flattens",
"the",
"result",
"."
] | def query_and_flatten(self, sql: str):
"""Executes the provided SQL statement and flattens the result.
The execution happens in parallel across all the traces and the
resulting Pandas dataframes are flattened into a single dataframe.
Args:
sql: The SQL statement to execute.
Returns:
A concatenated Pandas dataframe containing the result of executing the
query across all the traces.
If an URI or a trace resolver was passed to the constructor, the
contents of the |metadata| dictionary emitted by the resolver will also
be emitted as extra columns (key being column name, value being the
value in the dataframe).
For example:
class CustomResolver(TraceResolver):
def resolve(self):
return [TraceResolver.Result(trace='/tmp/path',
metadata={
'path': '/tmp/path'
'foo': 'bar'
})]
with BatchTraceProcessor(CustomResolver()) as btp:
df = btp.query_and_flatten('select count(1) as cnt from slice')
Then df will look like this:
cnt path foo
100 /tmp/path bar
Raises:
TraceProcessorException: An error occurred running the query.
"""
return self.execute_and_flatten(lambda tp: tp.query(sql).
as_pandas_dataframe()) | [
"def",
"query_and_flatten",
"(",
"self",
",",
"sql",
":",
"str",
")",
":",
"return",
"self",
".",
"execute_and_flatten",
"(",
"lambda",
"tp",
":",
"tp",
".",
"query",
"(",
"sql",
")",
".",
"as_pandas_dataframe",
"(",
")",
")"
] | https://github.com/google/perfetto/blob/fe68c7a7f7657aa71ced68efb126dcac4107c745/python/perfetto/batch_trace_processor/api.py#L152-L190 |
|
livecode/livecode | 4606a10ea10b16d5071d0f9f263ccdd7ede8b31d | gyp/pylib/gyp/xcodeproj_file.py | python | XCObject.Name | (self) | Return the name corresponding to an object.
Not all objects necessarily need to be nameable, and not all that do have
a "name" property. Override as needed. | Return the name corresponding to an object. | [
"Return",
"the",
"name",
"corresponding",
"to",
"an",
"object",
"."
] | def Name(self):
"""Return the name corresponding to an object.
Not all objects necessarily need to be nameable, and not all that do have
a "name" property. Override as needed.
"""
# If the schema indicates that "name" is required, try to access the
# property even if it doesn't exist. This will result in a KeyError
# being raised for the property that should be present, which seems more
# appropriate than NotImplementedError in this case.
if 'name' in self._properties or \
('name' in self._schema and self._schema['name'][3]):
return self._properties['name']
raise NotImplementedError(self.__class__.__name__ + ' must implement Name') | [
"def",
"Name",
"(",
"self",
")",
":",
"# If the schema indicates that \"name\" is required, try to access the",
"# property even if it doesn't exist. This will result in a KeyError",
"# being raised for the property that should be present, which seems more",
"# appropriate than NotImplementedError in this case.",
"if",
"'name'",
"in",
"self",
".",
"_properties",
"or",
"(",
"'name'",
"in",
"self",
".",
"_schema",
"and",
"self",
".",
"_schema",
"[",
"'name'",
"]",
"[",
"3",
"]",
")",
":",
"return",
"self",
".",
"_properties",
"[",
"'name'",
"]",
"raise",
"NotImplementedError",
"(",
"self",
".",
"__class__",
".",
"__name__",
"+",
"' must implement Name'",
")"
] | https://github.com/livecode/livecode/blob/4606a10ea10b16d5071d0f9f263ccdd7ede8b31d/gyp/pylib/gyp/xcodeproj_file.py#L354-L369 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/msw/_gdi.py | python | NativeFontInfo.GetWeight | (*args, **kwargs) | return _gdi_.NativeFontInfo_GetWeight(*args, **kwargs) | GetWeight(self) -> int | GetWeight(self) -> int | [
"GetWeight",
"(",
"self",
")",
"-",
">",
"int"
] | def GetWeight(*args, **kwargs):
"""GetWeight(self) -> int"""
return _gdi_.NativeFontInfo_GetWeight(*args, **kwargs) | [
"def",
"GetWeight",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_gdi_",
".",
"NativeFontInfo_GetWeight",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_gdi.py#L1978-L1980 |
|
eventql/eventql | 7ca0dbb2e683b525620ea30dc40540a22d5eb227 | deps/3rdparty/spidermonkey/mozjs/python/requests/requests/packages/urllib3/util/ssl_.py | python | create_urllib3_context | (ssl_version=None, cert_reqs=ssl.CERT_REQUIRED,
options=None, ciphers=None) | return context | All arguments have the same meaning as ``ssl_wrap_socket``.
By default, this function does a lot of the same work that
``ssl.create_default_context`` does on Python 3.4+. It:
- Disables SSLv2, SSLv3, and compression
- Sets a restricted set of server ciphers
If you wish to enable SSLv3, you can do::
from urllib3.util import ssl_
context = ssl_.create_urllib3_context()
context.options &= ~ssl_.OP_NO_SSLv3
You can do the same to enable compression (substituting ``COMPRESSION``
for ``SSLv3`` in the last line above).
:param ssl_version:
The desired protocol version to use. This will default to
PROTOCOL_SSLv23 which will negotiate the highest protocol that both
the server and your installation of OpenSSL support.
:param cert_reqs:
Whether to require the certificate verification. This defaults to
``ssl.CERT_REQUIRED``.
:param options:
Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``,
``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``.
:param ciphers:
Which cipher suites to allow the server to select.
:returns:
Constructed SSLContext object with specified options
:rtype: SSLContext | All arguments have the same meaning as ``ssl_wrap_socket``. | [
"All",
"arguments",
"have",
"the",
"same",
"meaning",
"as",
"ssl_wrap_socket",
"."
] | def create_urllib3_context(ssl_version=None, cert_reqs=ssl.CERT_REQUIRED,
options=None, ciphers=None):
"""All arguments have the same meaning as ``ssl_wrap_socket``.
By default, this function does a lot of the same work that
``ssl.create_default_context`` does on Python 3.4+. It:
- Disables SSLv2, SSLv3, and compression
- Sets a restricted set of server ciphers
If you wish to enable SSLv3, you can do::
from urllib3.util import ssl_
context = ssl_.create_urllib3_context()
context.options &= ~ssl_.OP_NO_SSLv3
You can do the same to enable compression (substituting ``COMPRESSION``
for ``SSLv3`` in the last line above).
:param ssl_version:
The desired protocol version to use. This will default to
PROTOCOL_SSLv23 which will negotiate the highest protocol that both
the server and your installation of OpenSSL support.
:param cert_reqs:
Whether to require the certificate verification. This defaults to
``ssl.CERT_REQUIRED``.
:param options:
Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``,
``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``.
:param ciphers:
Which cipher suites to allow the server to select.
:returns:
Constructed SSLContext object with specified options
:rtype: SSLContext
"""
context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23)
if options is None:
options = 0
# SSLv2 is easily broken and is considered harmful and dangerous
options |= OP_NO_SSLv2
# SSLv3 has several problems and is now dangerous
options |= OP_NO_SSLv3
# Disable compression to prevent CRIME attacks for OpenSSL 1.0+
# (issue #309)
options |= OP_NO_COMPRESSION
context.options |= options
if getattr(context, 'supports_set_ciphers', True): # Platform-specific: Python 2.6
context.set_ciphers(ciphers or _DEFAULT_CIPHERS)
context.verify_mode = cert_reqs
if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2
context.check_hostname = (context.verify_mode == ssl.CERT_REQUIRED)
return context | [
"def",
"create_urllib3_context",
"(",
"ssl_version",
"=",
"None",
",",
"cert_reqs",
"=",
"ssl",
".",
"CERT_REQUIRED",
",",
"options",
"=",
"None",
",",
"ciphers",
"=",
"None",
")",
":",
"context",
"=",
"SSLContext",
"(",
"ssl_version",
"or",
"ssl",
".",
"PROTOCOL_SSLv23",
")",
"if",
"options",
"is",
"None",
":",
"options",
"=",
"0",
"# SSLv2 is easily broken and is considered harmful and dangerous",
"options",
"|=",
"OP_NO_SSLv2",
"# SSLv3 has several problems and is now dangerous",
"options",
"|=",
"OP_NO_SSLv3",
"# Disable compression to prevent CRIME attacks for OpenSSL 1.0+",
"# (issue #309)",
"options",
"|=",
"OP_NO_COMPRESSION",
"context",
".",
"options",
"|=",
"options",
"if",
"getattr",
"(",
"context",
",",
"'supports_set_ciphers'",
",",
"True",
")",
":",
"# Platform-specific: Python 2.6",
"context",
".",
"set_ciphers",
"(",
"ciphers",
"or",
"_DEFAULT_CIPHERS",
")",
"context",
".",
"verify_mode",
"=",
"cert_reqs",
"if",
"getattr",
"(",
"context",
",",
"'check_hostname'",
",",
"None",
")",
"is",
"not",
"None",
":",
"# Platform-specific: Python 3.2",
"context",
".",
"check_hostname",
"=",
"(",
"context",
".",
"verify_mode",
"==",
"ssl",
".",
"CERT_REQUIRED",
")",
"return",
"context"
] | https://github.com/eventql/eventql/blob/7ca0dbb2e683b525620ea30dc40540a22d5eb227/deps/3rdparty/spidermonkey/mozjs/python/requests/requests/packages/urllib3/util/ssl_.py#L160-L215 |
|
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | third_party/catapult/third_party/apiclient/googleapiclient/discovery.py | python | ResourceMethodParameters.__init__ | (self, method_desc) | Constructor for ResourceMethodParameters.
Sets default values and defers to set_parameters to populate.
Args:
method_desc: Dictionary with metadata describing an API method. Value
comes from the dictionary of methods stored in the 'methods' key in
the deserialized discovery document. | Constructor for ResourceMethodParameters. | [
"Constructor",
"for",
"ResourceMethodParameters",
"."
] | def __init__(self, method_desc):
"""Constructor for ResourceMethodParameters.
Sets default values and defers to set_parameters to populate.
Args:
method_desc: Dictionary with metadata describing an API method. Value
comes from the dictionary of methods stored in the 'methods' key in
the deserialized discovery document.
"""
self.argmap = {}
self.required_params = []
self.repeated_params = []
self.pattern_params = {}
self.query_params = []
# TODO(dhermes): Change path_params to a list if the extra URITEMPLATE
# parsing is gotten rid of.
self.path_params = set()
self.param_types = {}
self.enum_params = {}
self.set_parameters(method_desc) | [
"def",
"__init__",
"(",
"self",
",",
"method_desc",
")",
":",
"self",
".",
"argmap",
"=",
"{",
"}",
"self",
".",
"required_params",
"=",
"[",
"]",
"self",
".",
"repeated_params",
"=",
"[",
"]",
"self",
".",
"pattern_params",
"=",
"{",
"}",
"self",
".",
"query_params",
"=",
"[",
"]",
"# TODO(dhermes): Change path_params to a list if the extra URITEMPLATE",
"# parsing is gotten rid of.",
"self",
".",
"path_params",
"=",
"set",
"(",
")",
"self",
".",
"param_types",
"=",
"{",
"}",
"self",
".",
"enum_params",
"=",
"{",
"}",
"self",
".",
"set_parameters",
"(",
"method_desc",
")"
] | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/apiclient/googleapiclient/discovery.py#L540-L561 |
||
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | third_party/catapult/catapult_build/module_finder.py | python | FindModule | (name) | return imp.find_module(name)[1] | Gets the path of the named module.
This is useful for cases where we want to use subprocess.call on a module we
have imported, and safer than using __file__ since that can point to .pyc
files.
Args:
name: the string name of a module (e.g. 'dev_appserver')
Returns:
The path to the module. | Gets the path of the named module. | [
"Gets",
"the",
"path",
"of",
"the",
"named",
"module",
"."
] | def FindModule(name):
"""Gets the path of the named module.
This is useful for cases where we want to use subprocess.call on a module we
have imported, and safer than using __file__ since that can point to .pyc
files.
Args:
name: the string name of a module (e.g. 'dev_appserver')
Returns:
The path to the module.
"""
return imp.find_module(name)[1] | [
"def",
"FindModule",
"(",
"name",
")",
":",
"return",
"imp",
".",
"find_module",
"(",
"name",
")",
"[",
"1",
"]"
] | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/catapult_build/module_finder.py#L7-L19 |
|
hfinkel/llvm-project-cxxjit | 91084ef018240bbb8e24235ff5cd8c355a9c1a1e | lldb/examples/python/performance.py | python | MemoryMeasurement.__str__ | (self) | return s | Dump the MemoryMeasurement current value | Dump the MemoryMeasurement current value | [
"Dump",
"the",
"MemoryMeasurement",
"current",
"value"
] | def __str__(self):
'''Dump the MemoryMeasurement current value'''
s = ''
for key in self.value.keys():
if s:
s += "\n"
s += "%8s = %s" % (key, self.value[key])
return s | [
"def",
"__str__",
"(",
"self",
")",
":",
"s",
"=",
"''",
"for",
"key",
"in",
"self",
".",
"value",
".",
"keys",
"(",
")",
":",
"if",
"s",
":",
"s",
"+=",
"\"\\n\"",
"s",
"+=",
"\"%8s = %s\"",
"%",
"(",
"key",
",",
"self",
".",
"value",
"[",
"key",
"]",
")",
"return",
"s"
] | https://github.com/hfinkel/llvm-project-cxxjit/blob/91084ef018240bbb8e24235ff5cd8c355a9c1a1e/lldb/examples/python/performance.py#L322-L329 |
|
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | third_party/catapult/third_party/coverage/coverage/python.py | python | PythonFileReporter.parser | (self) | return self._parser | Lazily create a :class:`PythonParser`. | Lazily create a :class:`PythonParser`. | [
"Lazily",
"create",
"a",
":",
"class",
":",
"PythonParser",
"."
] | def parser(self):
"""Lazily create a :class:`PythonParser`."""
if self._parser is None:
self._parser = PythonParser(
filename=self.filename,
exclude=self.coverage._exclude_regex('exclude'),
)
return self._parser | [
"def",
"parser",
"(",
"self",
")",
":",
"if",
"self",
".",
"_parser",
"is",
"None",
":",
"self",
".",
"_parser",
"=",
"PythonParser",
"(",
"filename",
"=",
"self",
".",
"filename",
",",
"exclude",
"=",
"self",
".",
"coverage",
".",
"_exclude_regex",
"(",
"'exclude'",
")",
",",
")",
"return",
"self",
".",
"_parser"
] | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/coverage/coverage/python.py#L126-L133 |
|
generalized-intelligence/GAAS | 29ab17d3e8a4ba18edef3a57c36d8db6329fac73 | algorithms/src/LocalizationAndMapping/icp_lidar_localization/fast_gicp/thirdparty/pybind11/setup.py | python | TemporaryDirectory | () | Prepare a temporary directory, cleanup when done | Prepare a temporary directory, cleanup when done | [
"Prepare",
"a",
"temporary",
"directory",
"cleanup",
"when",
"done"
] | def TemporaryDirectory(): # noqa: N802
"Prepare a temporary directory, cleanup when done"
try:
tmpdir = tempfile.mkdtemp()
yield tmpdir
finally:
shutil.rmtree(tmpdir) | [
"def",
"TemporaryDirectory",
"(",
")",
":",
"# noqa: N802",
"try",
":",
"tmpdir",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")",
"yield",
"tmpdir",
"finally",
":",
"shutil",
".",
"rmtree",
"(",
"tmpdir",
")"
] | https://github.com/generalized-intelligence/GAAS/blob/29ab17d3e8a4ba18edef3a57c36d8db6329fac73/algorithms/src/LocalizationAndMapping/icp_lidar_localization/fast_gicp/thirdparty/pybind11/setup.py#L82-L88 |
||
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | third_party/catapult/third_party/gsutil/third_party/protorpc/protorpc/protojson.py | python | ProtoJson.decode_field | (self, field, value) | return value | Decode a JSON value to a python value.
Args:
field: A ProtoRPC field instance.
value: A serialized JSON value.
Return:
A Python value compatible with field. | Decode a JSON value to a python value. | [
"Decode",
"a",
"JSON",
"value",
"to",
"a",
"python",
"value",
"."
] | def decode_field(self, field, value):
"""Decode a JSON value to a python value.
Args:
field: A ProtoRPC field instance.
value: A serialized JSON value.
Return:
A Python value compatible with field.
"""
if isinstance(field, messages.EnumField):
try:
return field.type(value)
except TypeError:
raise messages.DecodeError('Invalid enum value "%s"' % (value or ''))
elif isinstance(field, messages.BytesField):
try:
return base64.b64decode(value)
except (binascii.Error, TypeError) as err:
raise messages.DecodeError('Base64 decoding error: %s' % err)
elif isinstance(field, message_types.DateTimeField):
try:
return util.decode_datetime(value)
except ValueError as err:
raise messages.DecodeError(err)
elif (isinstance(field, messages.MessageField) and
issubclass(field.type, messages.Message)):
return self.__decode_dictionary(field.type, value)
elif (isinstance(field, messages.FloatField) and
isinstance(value, (six.integer_types, six.string_types))):
try:
return float(value)
except:
pass
elif (isinstance(field, messages.IntegerField) and
isinstance(value, six.string_types)):
try:
return int(value)
except:
pass
return value | [
"def",
"decode_field",
"(",
"self",
",",
"field",
",",
"value",
")",
":",
"if",
"isinstance",
"(",
"field",
",",
"messages",
".",
"EnumField",
")",
":",
"try",
":",
"return",
"field",
".",
"type",
"(",
"value",
")",
"except",
"TypeError",
":",
"raise",
"messages",
".",
"DecodeError",
"(",
"'Invalid enum value \"%s\"'",
"%",
"(",
"value",
"or",
"''",
")",
")",
"elif",
"isinstance",
"(",
"field",
",",
"messages",
".",
"BytesField",
")",
":",
"try",
":",
"return",
"base64",
".",
"b64decode",
"(",
"value",
")",
"except",
"(",
"binascii",
".",
"Error",
",",
"TypeError",
")",
"as",
"err",
":",
"raise",
"messages",
".",
"DecodeError",
"(",
"'Base64 decoding error: %s'",
"%",
"err",
")",
"elif",
"isinstance",
"(",
"field",
",",
"message_types",
".",
"DateTimeField",
")",
":",
"try",
":",
"return",
"util",
".",
"decode_datetime",
"(",
"value",
")",
"except",
"ValueError",
"as",
"err",
":",
"raise",
"messages",
".",
"DecodeError",
"(",
"err",
")",
"elif",
"(",
"isinstance",
"(",
"field",
",",
"messages",
".",
"MessageField",
")",
"and",
"issubclass",
"(",
"field",
".",
"type",
",",
"messages",
".",
"Message",
")",
")",
":",
"return",
"self",
".",
"__decode_dictionary",
"(",
"field",
".",
"type",
",",
"value",
")",
"elif",
"(",
"isinstance",
"(",
"field",
",",
"messages",
".",
"FloatField",
")",
"and",
"isinstance",
"(",
"value",
",",
"(",
"six",
".",
"integer_types",
",",
"six",
".",
"string_types",
")",
")",
")",
":",
"try",
":",
"return",
"float",
"(",
"value",
")",
"except",
":",
"pass",
"elif",
"(",
"isinstance",
"(",
"field",
",",
"messages",
".",
"IntegerField",
")",
"and",
"isinstance",
"(",
"value",
",",
"six",
".",
"string_types",
")",
")",
":",
"try",
":",
"return",
"int",
"(",
"value",
")",
"except",
":",
"pass",
"return",
"value"
] | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/gsutil/third_party/protorpc/protorpc/protojson.py#L292-L338 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/tkinter/__init__.py | python | Misc.destroy | (self) | Internal function.
Delete all Tcl commands created for
this widget in the Tcl interpreter. | Internal function. | [
"Internal",
"function",
"."
] | def destroy(self):
"""Internal function.
Delete all Tcl commands created for
this widget in the Tcl interpreter."""
if self._tclCommands is not None:
for name in self._tclCommands:
#print '- Tkinter: deleted command', name
self.tk.deletecommand(name)
self._tclCommands = None | [
"def",
"destroy",
"(",
"self",
")",
":",
"if",
"self",
".",
"_tclCommands",
"is",
"not",
"None",
":",
"for",
"name",
"in",
"self",
".",
"_tclCommands",
":",
"#print '- Tkinter: deleted command', name",
"self",
".",
"tk",
".",
"deletecommand",
"(",
"name",
")",
"self",
".",
"_tclCommands",
"=",
"None"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/tkinter/__init__.py#L584-L593 |
||
OpenLightingProject/ola | d1433a1bed73276fbe55ce18c03b1c208237decc | python/ola/PidStore.py | python | PidStore._PidProtoToObject | (self, pid_pb) | return Pid(pid_pb.name,
pid_pb.value,
discovery_request,
discovery_response,
get_request,
get_response,
set_request,
set_response,
discovery_validators,
get_validators,
set_validators) | Convert the protobuf representation of a PID to a PID object.
Args:
pid_pb: The protobuf version of the pid
Returns:
A PIDStore.PID object. | Convert the protobuf representation of a PID to a PID object. | [
"Convert",
"the",
"protobuf",
"representation",
"of",
"a",
"PID",
"to",
"a",
"PID",
"object",
"."
] | def _PidProtoToObject(self, pid_pb):
"""Convert the protobuf representation of a PID to a PID object.
Args:
pid_pb: The protobuf version of the pid
Returns:
A PIDStore.PID object.
"""
def BuildList(field_name):
if not pid_pb.HasField(field_name):
return None
try:
group = self._FrameFormatToGroup(getattr(pid_pb, field_name))
except PidStructureException as e:
raise PidStructureException(
"The structure for the %s in %s isn't valid: %s" %
(field_name, pid_pb.name, e))
return group
discovery_request = BuildList('discovery_request')
discovery_response = BuildList('discovery_response')
get_request = BuildList('get_request')
get_response = BuildList('get_response')
set_request = BuildList('set_request')
set_response = BuildList('set_response')
discovery_validators = []
if pid_pb.HasField('discovery_sub_device_range'):
discovery_validators.append(self._SubDeviceRangeToValidator(
pid_pb.discovery_sub_device_range))
get_validators = []
if pid_pb.HasField('get_sub_device_range'):
get_validators.append(self._SubDeviceRangeToValidator(
pid_pb.get_sub_device_range))
set_validators = []
if pid_pb.HasField('set_sub_device_range'):
set_validators.append(self._SubDeviceRangeToValidator(
pid_pb.set_sub_device_range))
return Pid(pid_pb.name,
pid_pb.value,
discovery_request,
discovery_response,
get_request,
get_response,
set_request,
set_response,
discovery_validators,
get_validators,
set_validators) | [
"def",
"_PidProtoToObject",
"(",
"self",
",",
"pid_pb",
")",
":",
"def",
"BuildList",
"(",
"field_name",
")",
":",
"if",
"not",
"pid_pb",
".",
"HasField",
"(",
"field_name",
")",
":",
"return",
"None",
"try",
":",
"group",
"=",
"self",
".",
"_FrameFormatToGroup",
"(",
"getattr",
"(",
"pid_pb",
",",
"field_name",
")",
")",
"except",
"PidStructureException",
"as",
"e",
":",
"raise",
"PidStructureException",
"(",
"\"The structure for the %s in %s isn't valid: %s\"",
"%",
"(",
"field_name",
",",
"pid_pb",
".",
"name",
",",
"e",
")",
")",
"return",
"group",
"discovery_request",
"=",
"BuildList",
"(",
"'discovery_request'",
")",
"discovery_response",
"=",
"BuildList",
"(",
"'discovery_response'",
")",
"get_request",
"=",
"BuildList",
"(",
"'get_request'",
")",
"get_response",
"=",
"BuildList",
"(",
"'get_response'",
")",
"set_request",
"=",
"BuildList",
"(",
"'set_request'",
")",
"set_response",
"=",
"BuildList",
"(",
"'set_response'",
")",
"discovery_validators",
"=",
"[",
"]",
"if",
"pid_pb",
".",
"HasField",
"(",
"'discovery_sub_device_range'",
")",
":",
"discovery_validators",
".",
"append",
"(",
"self",
".",
"_SubDeviceRangeToValidator",
"(",
"pid_pb",
".",
"discovery_sub_device_range",
")",
")",
"get_validators",
"=",
"[",
"]",
"if",
"pid_pb",
".",
"HasField",
"(",
"'get_sub_device_range'",
")",
":",
"get_validators",
".",
"append",
"(",
"self",
".",
"_SubDeviceRangeToValidator",
"(",
"pid_pb",
".",
"get_sub_device_range",
")",
")",
"set_validators",
"=",
"[",
"]",
"if",
"pid_pb",
".",
"HasField",
"(",
"'set_sub_device_range'",
")",
":",
"set_validators",
".",
"append",
"(",
"self",
".",
"_SubDeviceRangeToValidator",
"(",
"pid_pb",
".",
"set_sub_device_range",
")",
")",
"return",
"Pid",
"(",
"pid_pb",
".",
"name",
",",
"pid_pb",
".",
"value",
",",
"discovery_request",
",",
"discovery_response",
",",
"get_request",
",",
"get_response",
",",
"set_request",
",",
"set_response",
",",
"discovery_validators",
",",
"get_validators",
",",
"set_validators",
")"
] | https://github.com/OpenLightingProject/ola/blob/d1433a1bed73276fbe55ce18c03b1c208237decc/python/ola/PidStore.py#L1170-L1221 |
|
trailofbits/llvm-sanitizer-tutorial | d29dfeec7f51fbf234fd0080f28f2b30cd0b6e99 | llvm/examples/Kaleidoscope/MCJIT/cached/genk-timing.py | python | TimingScriptGenerator.writeTimingCall | (self, filename, numFuncs, funcsCalled, totalCalls) | Echo some comments and invoke both versions of toy | Echo some comments and invoke both versions of toy | [
"Echo",
"some",
"comments",
"and",
"invoke",
"both",
"versions",
"of",
"toy"
] | def writeTimingCall(self, filename, numFuncs, funcsCalled, totalCalls):
"""Echo some comments and invoke both versions of toy"""
rootname = filename
if '.' in filename:
rootname = filename[:filename.rfind('.')]
self.shfile.write("echo \"%s: Calls %d of %d functions, %d total\" >> %s\n" % (filename, funcsCalled, numFuncs, totalCalls, self.timeFile))
self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
self.shfile.write("echo \"With MCJIT\" >> %s\n" % self.timeFile)
self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
self.shfile.write(" -o %s -a " % self.timeFile)
self.shfile.write("./toy-mcjit < %s > %s-mcjit.out 2> %s-mcjit.err\n" % (filename, rootname, rootname))
self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
self.shfile.write("echo \"With JIT\" >> %s\n" % self.timeFile)
self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
self.shfile.write(" -o %s -a " % self.timeFile)
self.shfile.write("./toy-jit < %s > %s-jit.out 2> %s-jit.err\n" % (filename, rootname, rootname))
self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
self.shfile.write("echo \"\" >> %s\n" % self.timeFile) | [
"def",
"writeTimingCall",
"(",
"self",
",",
"filename",
",",
"numFuncs",
",",
"funcsCalled",
",",
"totalCalls",
")",
":",
"rootname",
"=",
"filename",
"if",
"'.'",
"in",
"filename",
":",
"rootname",
"=",
"filename",
"[",
":",
"filename",
".",
"rfind",
"(",
"'.'",
")",
"]",
"self",
".",
"shfile",
".",
"write",
"(",
"\"echo \\\"%s: Calls %d of %d functions, %d total\\\" >> %s\\n\"",
"%",
"(",
"filename",
",",
"funcsCalled",
",",
"numFuncs",
",",
"totalCalls",
",",
"self",
".",
"timeFile",
")",
")",
"self",
".",
"shfile",
".",
"write",
"(",
"\"echo \\\"\\\" >> %s\\n\"",
"%",
"self",
".",
"timeFile",
")",
"self",
".",
"shfile",
".",
"write",
"(",
"\"echo \\\"With MCJIT\\\" >> %s\\n\"",
"%",
"self",
".",
"timeFile",
")",
"self",
".",
"shfile",
".",
"write",
"(",
"\"/usr/bin/time -f \\\"Command %C\\\\n\\\\tuser time: %U s\\\\n\\\\tsytem time: %S s\\\\n\\\\tmax set: %M kb\\\"\"",
")",
"self",
".",
"shfile",
".",
"write",
"(",
"\" -o %s -a \"",
"%",
"self",
".",
"timeFile",
")",
"self",
".",
"shfile",
".",
"write",
"(",
"\"./toy-mcjit < %s > %s-mcjit.out 2> %s-mcjit.err\\n\"",
"%",
"(",
"filename",
",",
"rootname",
",",
"rootname",
")",
")",
"self",
".",
"shfile",
".",
"write",
"(",
"\"echo \\\"\\\" >> %s\\n\"",
"%",
"self",
".",
"timeFile",
")",
"self",
".",
"shfile",
".",
"write",
"(",
"\"echo \\\"With JIT\\\" >> %s\\n\"",
"%",
"self",
".",
"timeFile",
")",
"self",
".",
"shfile",
".",
"write",
"(",
"\"/usr/bin/time -f \\\"Command %C\\\\n\\\\tuser time: %U s\\\\n\\\\tsytem time: %S s\\\\n\\\\tmax set: %M kb\\\"\"",
")",
"self",
".",
"shfile",
".",
"write",
"(",
"\" -o %s -a \"",
"%",
"self",
".",
"timeFile",
")",
"self",
".",
"shfile",
".",
"write",
"(",
"\"./toy-jit < %s > %s-jit.out 2> %s-jit.err\\n\"",
"%",
"(",
"filename",
",",
"rootname",
",",
"rootname",
")",
")",
"self",
".",
"shfile",
".",
"write",
"(",
"\"echo \\\"\\\" >> %s\\n\"",
"%",
"self",
".",
"timeFile",
")",
"self",
".",
"shfile",
".",
"write",
"(",
"\"echo \\\"\\\" >> %s\\n\"",
"%",
"self",
".",
"timeFile",
")"
] | https://github.com/trailofbits/llvm-sanitizer-tutorial/blob/d29dfeec7f51fbf234fd0080f28f2b30cd0b6e99/llvm/examples/Kaleidoscope/MCJIT/cached/genk-timing.py#L15-L32 |
||
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | third_party/catapult/third_party/gsutil/third_party/protorpc/protorpc/remote.py | python | _RemoteMethodInfo.method | (self) | return self.__method | Original undecorated method. | Original undecorated method. | [
"Original",
"undecorated",
"method",
"."
] | def method(self):
"""Original undecorated method."""
return self.__method | [
"def",
"method",
"(",
"self",
")",
":",
"return",
"self",
".",
"__method"
] | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/gsutil/third_party/protorpc/protorpc/remote.py#L313-L315 |
|
ChromiumWebApps/chromium | c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7 | tools/telemetry/telemetry/page/actions/page_action.py | python | PageAction.BindMeasurementJavaScript | (
self, tab, start_js, stop_js) | Let this action determine when measurements should start and stop.
A measurement can call this method to provide the action
with JavaScript code that starts and stops measurements. The action
determines when to execute the provided JavaScript code, for more accurate
timings.
Args:
tab: The tab to do everything on.
start_js: JavaScript code that starts measurements.
stop_js: JavaScript code that stops measurements. | Let this action determine when measurements should start and stop. | [
"Let",
"this",
"action",
"determine",
"when",
"measurements",
"should",
"start",
"and",
"stop",
"."
] | def BindMeasurementJavaScript(
self, tab, start_js, stop_js): # pylint: disable=W0613
"""Let this action determine when measurements should start and stop.
A measurement can call this method to provide the action
with JavaScript code that starts and stops measurements. The action
determines when to execute the provided JavaScript code, for more accurate
timings.
Args:
tab: The tab to do everything on.
start_js: JavaScript code that starts measurements.
stop_js: JavaScript code that stops measurements.
"""
raise Exception('This action cannot be bound.') | [
"def",
"BindMeasurementJavaScript",
"(",
"self",
",",
"tab",
",",
"start_js",
",",
"stop_js",
")",
":",
"# pylint: disable=W0613",
"raise",
"Exception",
"(",
"'This action cannot be bound.'",
")"
] | https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/tools/telemetry/telemetry/page/actions/page_action.py#L72-L86 |
||
apache/openoffice | 97289b2620590d8b431bcc408f87252db6203818 | main/pyuno/source/module/uno.py | python | getClass | ( typeName ) | return pyuno.getClass(typeName) | returns the class of a concrete uno exception, struct or interface | returns the class of a concrete uno exception, struct or interface | [
"returns",
"the",
"class",
"of",
"a",
"concrete",
"uno",
"exception",
"struct",
"or",
"interface"
] | def getClass( typeName ):
"""returns the class of a concrete uno exception, struct or interface
"""
return pyuno.getClass(typeName) | [
"def",
"getClass",
"(",
"typeName",
")",
":",
"return",
"pyuno",
".",
"getClass",
"(",
"typeName",
")"
] | https://github.com/apache/openoffice/blob/97289b2620590d8b431bcc408f87252db6203818/main/pyuno/source/module/uno.py#L73-L76 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/scikit-learn/py2/sklearn/model_selection/_split.py | python | StratifiedShuffleSplit.split | (self, X, y, groups=None) | return super(StratifiedShuffleSplit, self).split(X, y, groups) | Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Note that providing ``y`` is sufficient to generate the splits and
hence ``np.zeros(n_samples)`` may be used as a placeholder for
``X`` instead of actual training data.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
Stratification is done based on the y labels.
groups : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split. | Generate indices to split data into training and test set. | [
"Generate",
"indices",
"to",
"split",
"data",
"into",
"training",
"and",
"test",
"set",
"."
] | def split(self, X, y, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Note that providing ``y`` is sufficient to generate the splits and
hence ``np.zeros(n_samples)`` may be used as a placeholder for
``X`` instead of actual training data.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
Stratification is done based on the y labels.
groups : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
y = check_array(y, ensure_2d=False, dtype=None)
return super(StratifiedShuffleSplit, self).split(X, y, groups) | [
"def",
"split",
"(",
"self",
",",
"X",
",",
"y",
",",
"groups",
"=",
"None",
")",
":",
"y",
"=",
"check_array",
"(",
"y",
",",
"ensure_2d",
"=",
"False",
",",
"dtype",
"=",
"None",
")",
"return",
"super",
"(",
"StratifiedShuffleSplit",
",",
"self",
")",
".",
"split",
"(",
"X",
",",
"y",
",",
"groups",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scikit-learn/py2/sklearn/model_selection/_split.py#L1296-L1325 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/ipython/py2/IPython/core/completer.py | python | Completer.__init__ | (self, namespace=None, global_namespace=None, **kwargs) | Create a new completer for the command line.
Completer(namespace=ns, global_namespace=ns2) -> completer instance.
If unspecified, the default namespace where completions are performed
is __main__ (technically, __main__.__dict__). Namespaces should be
given as dictionaries.
An optional second namespace can be given. This allows the completer
to handle cases where both the local and global scopes need to be
distinguished.
Completer instances should be used as the completion mechanism of
readline via the set_completer() call:
readline.set_completer(Completer(my_namespace).complete) | Create a new completer for the command line. | [
"Create",
"a",
"new",
"completer",
"for",
"the",
"command",
"line",
"."
] | def __init__(self, namespace=None, global_namespace=None, **kwargs):
"""Create a new completer for the command line.
Completer(namespace=ns, global_namespace=ns2) -> completer instance.
If unspecified, the default namespace where completions are performed
is __main__ (technically, __main__.__dict__). Namespaces should be
given as dictionaries.
An optional second namespace can be given. This allows the completer
to handle cases where both the local and global scopes need to be
distinguished.
Completer instances should be used as the completion mechanism of
readline via the set_completer() call:
readline.set_completer(Completer(my_namespace).complete)
"""
# Don't bind to namespace quite yet, but flag whether the user wants a
# specific namespace or to use __main__.__dict__. This will allow us
# to bind to __main__.__dict__ at completion time, not now.
if namespace is None:
self.use_main_ns = 1
else:
self.use_main_ns = 0
self.namespace = namespace
# The global namespace, if given, can be bound directly
if global_namespace is None:
self.global_namespace = {}
else:
self.global_namespace = global_namespace
super(Completer, self).__init__(**kwargs) | [
"def",
"__init__",
"(",
"self",
",",
"namespace",
"=",
"None",
",",
"global_namespace",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# Don't bind to namespace quite yet, but flag whether the user wants a",
"# specific namespace or to use __main__.__dict__. This will allow us",
"# to bind to __main__.__dict__ at completion time, not now.",
"if",
"namespace",
"is",
"None",
":",
"self",
".",
"use_main_ns",
"=",
"1",
"else",
":",
"self",
".",
"use_main_ns",
"=",
"0",
"self",
".",
"namespace",
"=",
"namespace",
"# The global namespace, if given, can be bound directly",
"if",
"global_namespace",
"is",
"None",
":",
"self",
".",
"global_namespace",
"=",
"{",
"}",
"else",
":",
"self",
".",
"global_namespace",
"=",
"global_namespace",
"super",
"(",
"Completer",
",",
"self",
")",
".",
"__init__",
"(",
"*",
"*",
"kwargs",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/ipython/py2/IPython/core/completer.py#L251-L285 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_vendor/urllib3/util/url.py | python | split_first | (s, delims) | return s[:min_idx], s[min_idx + 1 :], min_delim | .. deprecated:: 1.25
Given a string and an iterable of delimiters, split on the first found
delimiter. Return two split parts and the matched delimiter.
If not found, then the first part is the full input string.
Example::
>>> split_first('foo/bar?baz', '?/=')
('foo', 'bar?baz', '/')
>>> split_first('foo/bar?baz', '123')
('foo/bar?baz', '', None)
Scales linearly with number of delims. Not ideal for large number of delims. | [] | def split_first(s, delims):
"""
.. deprecated:: 1.25
Given a string and an iterable of delimiters, split on the first found
delimiter. Return two split parts and the matched delimiter.
If not found, then the first part is the full input string.
Example::
>>> split_first('foo/bar?baz', '?/=')
('foo', 'bar?baz', '/')
>>> split_first('foo/bar?baz', '123')
('foo/bar?baz', '', None)
Scales linearly with number of delims. Not ideal for large number of delims.
"""
min_idx = None
min_delim = None
for d in delims:
idx = s.find(d)
if idx < 0:
continue
if min_idx is None or idx < min_idx:
min_idx = idx
min_delim = d
if min_idx is None or min_idx < 0:
return s, "", None
return s[:min_idx], s[min_idx + 1 :], min_delim | [
"def",
"split_first",
"(",
"s",
",",
"delims",
")",
":",
"min_idx",
"=",
"None",
"min_delim",
"=",
"None",
"for",
"d",
"in",
"delims",
":",
"idx",
"=",
"s",
".",
"find",
"(",
"d",
")",
"if",
"idx",
"<",
"0",
":",
"continue",
"if",
"min_idx",
"is",
"None",
"or",
"idx",
"<",
"min_idx",
":",
"min_idx",
"=",
"idx",
"min_delim",
"=",
"d",
"if",
"min_idx",
"is",
"None",
"or",
"min_idx",
"<",
"0",
":",
"return",
"s",
",",
"\"\"",
",",
"None",
"return",
"s",
"[",
":",
"min_idx",
"]",
",",
"s",
"[",
"min_idx",
"+",
"1",
":",
"]",
",",
"min_delim"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_vendor/urllib3/util/url.py#L349-L413 |
||
arkenthera/electron-vibrancy | 383153ef9ccb23a6c7517150d6bb0794dff3115e | scripts/cpplint.py | python | CheckVlogArguments | (filename, clean_lines, linenum, error) | Checks that VLOG() is only used for defining a logging level.
For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and
VLOG(FATAL) are not.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found. | Checks that VLOG() is only used for defining a logging level. | [
"Checks",
"that",
"VLOG",
"()",
"is",
"only",
"used",
"for",
"defining",
"a",
"logging",
"level",
"."
] | def CheckVlogArguments(filename, clean_lines, linenum, error):
"""Checks that VLOG() is only used for defining a logging level.
For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and
VLOG(FATAL) are not.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line):
error(filename, linenum, 'runtime/vlog', 5,
'VLOG() should be used with numeric verbosity level. '
'Use LOG() if you want symbolic severity levels.') | [
"def",
"CheckVlogArguments",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
":",
"line",
"=",
"clean_lines",
".",
"elided",
"[",
"linenum",
"]",
"if",
"Search",
"(",
"r'\\bVLOG\\((INFO|ERROR|WARNING|DFATAL|FATAL)\\)'",
",",
"line",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'runtime/vlog'",
",",
"5",
",",
"'VLOG() should be used with numeric verbosity level. '",
"'Use LOG() if you want symbolic severity levels.'",
")"
] | https://github.com/arkenthera/electron-vibrancy/blob/383153ef9ccb23a6c7517150d6bb0794dff3115e/scripts/cpplint.py#L1726-L1742 |
||
gem5/gem5 | 141cc37c2d4b93959d4c249b8f7e6a8b2ef75338 | ext/ply/example/ansic/cparse.py | python | p_function_definition_3 | (t) | function_definition : declarator compound_statement | function_definition : declarator compound_statement | [
"function_definition",
":",
"declarator",
"compound_statement"
] | def p_function_definition_3(t):
'function_definition : declarator compound_statement'
pass | [
"def",
"p_function_definition_3",
"(",
"t",
")",
":",
"pass"
] | https://github.com/gem5/gem5/blob/141cc37c2d4b93959d4c249b8f7e6a8b2ef75338/ext/ply/example/ansic/cparse.py#L44-L46 |
||
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/lib-tk/tkMessageBox.py | python | askretrycancel | (title=None, message=None, **options) | return s == RETRY | Ask if operation should be retried; return true if the answer is yes | Ask if operation should be retried; return true if the answer is yes | [
"Ask",
"if",
"operation",
"should",
"be",
"retried",
";",
"return",
"true",
"if",
"the",
"answer",
"is",
"yes"
] | def askretrycancel(title=None, message=None, **options):
"Ask if operation should be retried; return true if the answer is yes"
s = _show(title, message, WARNING, RETRYCANCEL, **options)
return s == RETRY | [
"def",
"askretrycancel",
"(",
"title",
"=",
"None",
",",
"message",
"=",
"None",
",",
"*",
"*",
"options",
")",
":",
"s",
"=",
"_show",
"(",
"title",
",",
"message",
",",
"WARNING",
",",
"RETRYCANCEL",
",",
"*",
"*",
"options",
")",
"return",
"s",
"==",
"RETRY"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/lib-tk/tkMessageBox.py#L116-L119 |
|
gwaldron/osgearth | 4c521857d59a69743e4a9cedba00afe570f984e8 | src/third_party/tinygltf/deps/cpplint.py | python | CheckForFunctionLengths | (filename, clean_lines, linenum,
function_state, error) | Reports for long function bodies.
For an overview why this is done, see:
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
Uses a simplistic algorithm assuming other style guidelines
(especially spacing) are followed.
Only checks unindented functions, so class members are unchecked.
Trivial bodies are unchecked, so constructors with huge initializer lists
may be missed.
Blank/comment lines are not counted so as to avoid encouraging the removal
of vertical space and comments just to get through a lint check.
NOLINT *on the last line of a function* disables this check.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found. | Reports for long function bodies. | [
"Reports",
"for",
"long",
"function",
"bodies",
"."
] | def CheckForFunctionLengths(filename, clean_lines, linenum,
function_state, error):
"""Reports for long function bodies.
For an overview why this is done, see:
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
Uses a simplistic algorithm assuming other style guidelines
(especially spacing) are followed.
Only checks unindented functions, so class members are unchecked.
Trivial bodies are unchecked, so constructors with huge initializer lists
may be missed.
Blank/comment lines are not counted so as to avoid encouraging the removal
of vertical space and comments just to get through a lint check.
NOLINT *on the last line of a function* disables this check.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
lines = clean_lines.lines
line = lines[linenum]
joined_line = ''
starting_func = False
regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
match_result = Match(regexp, line)
if match_result:
# If the name is all caps and underscores, figure it's a macro and
# ignore it, unless it's TEST or TEST_F.
function_name = match_result.group(1).split()[-1]
if function_name == 'TEST' or function_name == 'TEST_F' or (
not Match(r'[A-Z_]+$', function_name)):
starting_func = True
if starting_func:
body_found = False
for start_linenum in xrange(linenum, clean_lines.NumLines()):
start_line = lines[start_linenum]
joined_line += ' ' + start_line.lstrip()
if Search(r'(;|})', start_line): # Declarations and trivial functions
body_found = True
break # ... ignore
elif Search(r'{', start_line):
body_found = True
function = Search(r'((\w|:)*)\(', line).group(1)
if Match(r'TEST', function): # Handle TEST... macros
parameter_regexp = Search(r'(\(.*\))', joined_line)
if parameter_regexp: # Ignore bad syntax
function += parameter_regexp.group(1)
else:
function += '()'
function_state.Begin(function)
break
if not body_found:
# No body for the function (or evidence of a non-function) was found.
error(filename, linenum, 'readability/fn_size', 5,
'Lint failed to find start of function body.')
elif Match(r'^\}\s*$', line): # function end
function_state.Check(error, filename, linenum)
function_state.End()
elif not Match(r'^\s*$', line):
function_state.Count() | [
"def",
"CheckForFunctionLengths",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"function_state",
",",
"error",
")",
":",
"lines",
"=",
"clean_lines",
".",
"lines",
"line",
"=",
"lines",
"[",
"linenum",
"]",
"joined_line",
"=",
"''",
"starting_func",
"=",
"False",
"regexp",
"=",
"r'(\\w(\\w|::|\\*|\\&|\\s)*)\\('",
"# decls * & space::name( ...",
"match_result",
"=",
"Match",
"(",
"regexp",
",",
"line",
")",
"if",
"match_result",
":",
"# If the name is all caps and underscores, figure it's a macro and",
"# ignore it, unless it's TEST or TEST_F.",
"function_name",
"=",
"match_result",
".",
"group",
"(",
"1",
")",
".",
"split",
"(",
")",
"[",
"-",
"1",
"]",
"if",
"function_name",
"==",
"'TEST'",
"or",
"function_name",
"==",
"'TEST_F'",
"or",
"(",
"not",
"Match",
"(",
"r'[A-Z_]+$'",
",",
"function_name",
")",
")",
":",
"starting_func",
"=",
"True",
"if",
"starting_func",
":",
"body_found",
"=",
"False",
"for",
"start_linenum",
"in",
"xrange",
"(",
"linenum",
",",
"clean_lines",
".",
"NumLines",
"(",
")",
")",
":",
"start_line",
"=",
"lines",
"[",
"start_linenum",
"]",
"joined_line",
"+=",
"' '",
"+",
"start_line",
".",
"lstrip",
"(",
")",
"if",
"Search",
"(",
"r'(;|})'",
",",
"start_line",
")",
":",
"# Declarations and trivial functions",
"body_found",
"=",
"True",
"break",
"# ... ignore",
"elif",
"Search",
"(",
"r'{'",
",",
"start_line",
")",
":",
"body_found",
"=",
"True",
"function",
"=",
"Search",
"(",
"r'((\\w|:)*)\\('",
",",
"line",
")",
".",
"group",
"(",
"1",
")",
"if",
"Match",
"(",
"r'TEST'",
",",
"function",
")",
":",
"# Handle TEST... macros",
"parameter_regexp",
"=",
"Search",
"(",
"r'(\\(.*\\))'",
",",
"joined_line",
")",
"if",
"parameter_regexp",
":",
"# Ignore bad syntax",
"function",
"+=",
"parameter_regexp",
".",
"group",
"(",
"1",
")",
"else",
":",
"function",
"+=",
"'()'",
"function_state",
".",
"Begin",
"(",
"function",
")",
"break",
"if",
"not",
"body_found",
":",
"# No body for the function (or evidence of a non-function) was found.",
"error",
"(",
"filename",
",",
"linenum",
",",
"'readability/fn_size'",
",",
"5",
",",
"'Lint failed to find start of function body.'",
")",
"elif",
"Match",
"(",
"r'^\\}\\s*$'",
",",
"line",
")",
":",
"# function end",
"function_state",
".",
"Check",
"(",
"error",
",",
"filename",
",",
"linenum",
")",
"function_state",
".",
"End",
"(",
")",
"elif",
"not",
"Match",
"(",
"r'^\\s*$'",
",",
"line",
")",
":",
"function_state",
".",
"Count",
"(",
")"
] | https://github.com/gwaldron/osgearth/blob/4c521857d59a69743e4a9cedba00afe570f984e8/src/third_party/tinygltf/deps/cpplint.py#L2842-L2907 |
||
gromacs/gromacs | 7dec3a3f99993cf5687a122de3e12de31c21c399 | python_packaging/src/gmxapi/operation.py | python | SourceResource.data | (self) | Get the output data proxy. | Get the output data proxy. | [
"Get",
"the",
"output",
"data",
"proxy",
"."
] | def data(self) -> _OutputDataProxyType:
"""Get the output data proxy."""
# Warning: this should probably be renamed, but "output_data_proxy" is already
# a member in at least one derived class.
... | [
"def",
"data",
"(",
"self",
")",
"->",
"_OutputDataProxyType",
":",
"# Warning: this should probably be renamed, but \"output_data_proxy\" is already",
"# a member in at least one derived class.",
"..."
] | https://github.com/gromacs/gromacs/blob/7dec3a3f99993cf5687a122de3e12de31c21c399/python_packaging/src/gmxapi/operation.py#L768-L772 |
||
microsoft/CNTK | e9396480025b9ca457d26b6f33dd07c474c6aa04 | bindings/python/cntk/contrib/deeprl/agent/agent.py | python | AgentBaseClass._choose_action | (self, state) | Choose an action according to the policy.
Args:
state (object): observation seen by agent, which can be different
from what is provided by the environment. The difference comes
from preprcessing.
Returns:
action (int): action choosen by agent.
debug_info (str): auxiliary diagnostic information. | Choose an action according to the policy. | [
"Choose",
"an",
"action",
"according",
"to",
"the",
"policy",
"."
] | def _choose_action(self, state):
"""
Choose an action according to the policy.
Args:
state (object): observation seen by agent, which can be different
from what is provided by the environment. The difference comes
from preprcessing.
Returns:
action (int): action choosen by agent.
debug_info (str): auxiliary diagnostic information.
"""
pass | [
"def",
"_choose_action",
"(",
"self",
",",
"state",
")",
":",
"pass"
] | https://github.com/microsoft/CNTK/blob/e9396480025b9ca457d26b6f33dd07c474c6aa04/bindings/python/cntk/contrib/deeprl/agent/agent.py#L160-L173 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/stc.py | python | StyledTextCtrl.StyleResetDefault | (*args, **kwargs) | return _stc.StyledTextCtrl_StyleResetDefault(*args, **kwargs) | StyleResetDefault(self)
Reset the default style to its state at startup | StyleResetDefault(self) | [
"StyleResetDefault",
"(",
"self",
")"
] | def StyleResetDefault(*args, **kwargs):
"""
StyleResetDefault(self)
Reset the default style to its state at startup
"""
return _stc.StyledTextCtrl_StyleResetDefault(*args, **kwargs) | [
"def",
"StyleResetDefault",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_stc",
".",
"StyledTextCtrl_StyleResetDefault",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/stc.py#L2570-L2576 |
|
qt/qt | 0a2f2382541424726168804be2c90b91381608c6 | src/3rdparty/webkit/Source/ThirdParty/gyp/pylib/gyp/generator/scons.py | python | EscapeShellArgument | (s) | return "'" + s.replace("'", "'\\''") + "'" | Quotes an argument so that it will be interpreted literally by a POSIX
shell. Taken from
http://stackoverflow.com/questions/35817/whats-the-best-way-to-escape-ossystem-calls-in-python | Quotes an argument so that it will be interpreted literally by a POSIX
shell. Taken from
http://stackoverflow.com/questions/35817/whats-the-best-way-to-escape-ossystem-calls-in-python | [
"Quotes",
"an",
"argument",
"so",
"that",
"it",
"will",
"be",
"interpreted",
"literally",
"by",
"a",
"POSIX",
"shell",
".",
"Taken",
"from",
"http",
":",
"//",
"stackoverflow",
".",
"com",
"/",
"questions",
"/",
"35817",
"/",
"whats",
"-",
"the",
"-",
"best",
"-",
"way",
"-",
"to",
"-",
"escape",
"-",
"ossystem",
"-",
"calls",
"-",
"in",
"-",
"python"
] | def EscapeShellArgument(s):
"""Quotes an argument so that it will be interpreted literally by a POSIX
shell. Taken from
http://stackoverflow.com/questions/35817/whats-the-best-way-to-escape-ossystem-calls-in-python
"""
return "'" + s.replace("'", "'\\''") + "'" | [
"def",
"EscapeShellArgument",
"(",
"s",
")",
":",
"return",
"\"'\"",
"+",
"s",
".",
"replace",
"(",
"\"'\"",
",",
"\"'\\\\''\"",
")",
"+",
"\"'\""
] | https://github.com/qt/qt/blob/0a2f2382541424726168804be2c90b91381608c6/src/3rdparty/webkit/Source/ThirdParty/gyp/pylib/gyp/generator/scons.py#L170-L175 |
|
ApolloAuto/apollo-platform | 86d9dc6743b496ead18d597748ebabd34a513289 | ros/third_party/lib_x86_64/python2.7/dist-packages/numpy/oldnumeric/ma.py | python | right_shift | (a, n) | Right shift n bits | Right shift n bits | [
"Right",
"shift",
"n",
"bits"
] | def right_shift (a, n):
"Right shift n bits"
m = getmask(a)
if m is nomask:
d = umath.right_shift(filled(a), n)
return masked_array(d)
else:
d = umath.right_shift(filled(a, 0), n)
return masked_array(d, m) | [
"def",
"right_shift",
"(",
"a",
",",
"n",
")",
":",
"m",
"=",
"getmask",
"(",
"a",
")",
"if",
"m",
"is",
"nomask",
":",
"d",
"=",
"umath",
".",
"right_shift",
"(",
"filled",
"(",
"a",
")",
",",
"n",
")",
"return",
"masked_array",
"(",
"d",
")",
"else",
":",
"d",
"=",
"umath",
".",
"right_shift",
"(",
"filled",
"(",
"a",
",",
"0",
")",
",",
"n",
")",
"return",
"masked_array",
"(",
"d",
",",
"m",
")"
] | https://github.com/ApolloAuto/apollo-platform/blob/86d9dc6743b496ead18d597748ebabd34a513289/ros/third_party/lib_x86_64/python2.7/dist-packages/numpy/oldnumeric/ma.py#L1522-L1530 |
||
baidu-research/tensorflow-allreduce | 66d5b855e90b0949e9fa5cca5599fd729a70e874 | tensorflow/python/summary/writer/writer.py | python | SummaryToEventTransformer.__init__ | (self, event_writer, graph=None, graph_def=None) | Creates a `SummaryWriter` and an event file.
On construction the summary writer creates a new event file in `logdir`.
This event file will contain `Event` protocol buffers constructed when you
call one of the following functions: `add_summary()`, `add_session_log()`,
`add_event()`, or `add_graph()`.
If you pass a `Graph` to the constructor it is added to
the event file. (This is equivalent to calling `add_graph()` later).
TensorBoard will pick the graph from the file and display it graphically so
you can interactively explore the graph you built. You will usually pass
the graph from the session in which you launched it:
```python
...create a graph...
# Launch the graph in a session.
sess = tf.Session()
# Create a summary writer, add the 'graph' to the event file.
writer = tf.summary.FileWriter(<some-directory>, sess.graph)
```
Args:
event_writer: An EventWriter. Implements add_event and get_logdir.
graph: A `Graph` object, such as `sess.graph`.
graph_def: DEPRECATED: Use the `graph` argument instead. | Creates a `SummaryWriter` and an event file. | [
"Creates",
"a",
"SummaryWriter",
"and",
"an",
"event",
"file",
"."
] | def __init__(self, event_writer, graph=None, graph_def=None):
"""Creates a `SummaryWriter` and an event file.
On construction the summary writer creates a new event file in `logdir`.
This event file will contain `Event` protocol buffers constructed when you
call one of the following functions: `add_summary()`, `add_session_log()`,
`add_event()`, or `add_graph()`.
If you pass a `Graph` to the constructor it is added to
the event file. (This is equivalent to calling `add_graph()` later).
TensorBoard will pick the graph from the file and display it graphically so
you can interactively explore the graph you built. You will usually pass
the graph from the session in which you launched it:
```python
...create a graph...
# Launch the graph in a session.
sess = tf.Session()
# Create a summary writer, add the 'graph' to the event file.
writer = tf.summary.FileWriter(<some-directory>, sess.graph)
```
Args:
event_writer: An EventWriter. Implements add_event and get_logdir.
graph: A `Graph` object, such as `sess.graph`.
graph_def: DEPRECATED: Use the `graph` argument instead.
"""
self.event_writer = event_writer
# For storing used tags for session.run() outputs.
self._session_run_tags = {}
if graph is not None or graph_def is not None:
# Calling it with both graph and graph_def for backward compatibility.
self.add_graph(graph=graph, graph_def=graph_def)
# Also export the meta_graph_def in this case.
# graph may itself be a graph_def due to positional arguments
maybe_graph_as_def = (graph.as_graph_def(add_shapes=True)
if isinstance(graph, ops.Graph) else graph)
self.add_meta_graph(
meta_graph.create_meta_graph_def(graph_def=graph_def or
maybe_graph_as_def))
# This set contains tags of Summary Values that have been encountered
# already. The motivation here is that the SummaryWriter only keeps the
# metadata property (which is a SummaryMetadata proto) of the first Summary
# Value encountered for each tag. The SummaryWriter strips away the
# SummaryMetadata for all subsequent Summary Values with tags seen
# previously. This saves space.
self._seen_summary_tags = set() | [
"def",
"__init__",
"(",
"self",
",",
"event_writer",
",",
"graph",
"=",
"None",
",",
"graph_def",
"=",
"None",
")",
":",
"self",
".",
"event_writer",
"=",
"event_writer",
"# For storing used tags for session.run() outputs.",
"self",
".",
"_session_run_tags",
"=",
"{",
"}",
"if",
"graph",
"is",
"not",
"None",
"or",
"graph_def",
"is",
"not",
"None",
":",
"# Calling it with both graph and graph_def for backward compatibility.",
"self",
".",
"add_graph",
"(",
"graph",
"=",
"graph",
",",
"graph_def",
"=",
"graph_def",
")",
"# Also export the meta_graph_def in this case.",
"# graph may itself be a graph_def due to positional arguments",
"maybe_graph_as_def",
"=",
"(",
"graph",
".",
"as_graph_def",
"(",
"add_shapes",
"=",
"True",
")",
"if",
"isinstance",
"(",
"graph",
",",
"ops",
".",
"Graph",
")",
"else",
"graph",
")",
"self",
".",
"add_meta_graph",
"(",
"meta_graph",
".",
"create_meta_graph_def",
"(",
"graph_def",
"=",
"graph_def",
"or",
"maybe_graph_as_def",
")",
")",
"# This set contains tags of Summary Values that have been encountered",
"# already. The motivation here is that the SummaryWriter only keeps the",
"# metadata property (which is a SummaryMetadata proto) of the first Summary",
"# Value encountered for each tag. The SummaryWriter strips away the",
"# SummaryMetadata for all subsequent Summary Values with tags seen",
"# previously. This saves space.",
"self",
".",
"_seen_summary_tags",
"=",
"set",
"(",
")"
] | https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/python/summary/writer/writer.py#L46-L95 |
||
apple/turicreate | cce55aa5311300e3ce6af93cb45ba791fd1bdf49 | deps/src/libxml2-2.9.1/python/libxml2class.py | python | xmlNode.xpathNodeEval | (self, str, ctx) | return xpathObjectRet(ret) | Evaluate the XPath Location Path in the given context. The
node 'node' is set as the context node. The context node is
not restored. | Evaluate the XPath Location Path in the given context. The
node 'node' is set as the context node. The context node is
not restored. | [
"Evaluate",
"the",
"XPath",
"Location",
"Path",
"in",
"the",
"given",
"context",
".",
"The",
"node",
"node",
"is",
"set",
"as",
"the",
"context",
"node",
".",
"The",
"context",
"node",
"is",
"not",
"restored",
"."
] | def xpathNodeEval(self, str, ctx):
"""Evaluate the XPath Location Path in the given context. The
node 'node' is set as the context node. The context node is
not restored. """
if ctx is None: ctx__o = None
else: ctx__o = ctx._o
ret = libxml2mod.xmlXPathNodeEval(self._o, str, ctx__o)
if ret is None:raise xpathError('xmlXPathNodeEval() failed')
return xpathObjectRet(ret) | [
"def",
"xpathNodeEval",
"(",
"self",
",",
"str",
",",
"ctx",
")",
":",
"if",
"ctx",
"is",
"None",
":",
"ctx__o",
"=",
"None",
"else",
":",
"ctx__o",
"=",
"ctx",
".",
"_o",
"ret",
"=",
"libxml2mod",
".",
"xmlXPathNodeEval",
"(",
"self",
".",
"_o",
",",
"str",
",",
"ctx__o",
")",
"if",
"ret",
"is",
"None",
":",
"raise",
"xpathError",
"(",
"'xmlXPathNodeEval() failed'",
")",
"return",
"xpathObjectRet",
"(",
"ret",
")"
] | https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/deps/src/libxml2-2.9.1/python/libxml2class.py#L2935-L2943 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemFramework/v1/ResourceManager/lib/Crypto/Cipher/_mode_eax.py | python | EaxMode.update | (self, assoc_data) | return self | Protect associated data
If there is any associated data, the caller has to invoke
this function one or more times, before using
``decrypt`` or ``encrypt``.
By *associated data* it is meant any data (e.g. packet headers) that
will not be encrypted and will be transmitted in the clear.
However, the receiver is still able to detect any modification to it.
If there is no associated data, this method must not be called.
The caller may split associated data in segments of any size, and
invoke this method multiple times, each time with the next segment.
:Parameters:
assoc_data : bytes/bytearray/memoryview
A piece of associated data. There are no restrictions on its size. | Protect associated data | [
"Protect",
"associated",
"data"
] | def update(self, assoc_data):
"""Protect associated data
If there is any associated data, the caller has to invoke
this function one or more times, before using
``decrypt`` or ``encrypt``.
By *associated data* it is meant any data (e.g. packet headers) that
will not be encrypted and will be transmitted in the clear.
However, the receiver is still able to detect any modification to it.
If there is no associated data, this method must not be called.
The caller may split associated data in segments of any size, and
invoke this method multiple times, each time with the next segment.
:Parameters:
assoc_data : bytes/bytearray/memoryview
A piece of associated data. There are no restrictions on its size.
"""
if self.update not in self._next:
raise TypeError("update() can only be called"
" immediately after initialization")
self._next = [self.update, self.encrypt, self.decrypt,
self.digest, self.verify]
self._signer.update(assoc_data)
return self | [
"def",
"update",
"(",
"self",
",",
"assoc_data",
")",
":",
"if",
"self",
".",
"update",
"not",
"in",
"self",
".",
"_next",
":",
"raise",
"TypeError",
"(",
"\"update() can only be called\"",
"\" immediately after initialization\"",
")",
"self",
".",
"_next",
"=",
"[",
"self",
".",
"update",
",",
"self",
".",
"encrypt",
",",
"self",
".",
"decrypt",
",",
"self",
".",
"digest",
",",
"self",
".",
"verify",
"]",
"self",
".",
"_signer",
".",
"update",
"(",
"assoc_data",
")",
"return",
"self"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemFramework/v1/ResourceManager/lib/Crypto/Cipher/_mode_eax.py#L127-L156 |
|
adobe/chromium | cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7 | tools/json_schema_compiler/cc_generator.py | python | CCGenerator._GenerateTypePopulateProperty | (self, prop, src, dst) | return c | Generate the code to populate a single property in a type.
src: DictionaryValue*
dst: Type* | Generate the code to populate a single property in a type. | [
"Generate",
"the",
"code",
"to",
"populate",
"a",
"single",
"property",
"in",
"a",
"type",
"."
] | def _GenerateTypePopulateProperty(self, prop, src, dst):
"""Generate the code to populate a single property in a type.
src: DictionaryValue*
dst: Type*
"""
c = Code()
value_var = prop.unix_name + '_value'
c.Append('Value* %(value_var)s = NULL;')
if prop.optional:
(c.Sblock(
'if (%(src)s->GetWithoutPathExpansion("%(key)s", &%(value_var)s)) {'
)
.Concat(self._GeneratePopulatePropertyFromValue(
prop, value_var, dst, 'false'))
.Eblock('}')
)
else:
(c.Append(
'if (!%(src)s->GetWithoutPathExpansion("%(key)s", &%(value_var)s))')
.Append(' return false;')
.Concat(self._GeneratePopulatePropertyFromValue(
prop, value_var, dst, 'false'))
)
c.Append()
c.Substitute({'value_var': value_var, 'key': prop.name, 'src': src})
return c | [
"def",
"_GenerateTypePopulateProperty",
"(",
"self",
",",
"prop",
",",
"src",
",",
"dst",
")",
":",
"c",
"=",
"Code",
"(",
")",
"value_var",
"=",
"prop",
".",
"unix_name",
"+",
"'_value'",
"c",
".",
"Append",
"(",
"'Value* %(value_var)s = NULL;'",
")",
"if",
"prop",
".",
"optional",
":",
"(",
"c",
".",
"Sblock",
"(",
"'if (%(src)s->GetWithoutPathExpansion(\"%(key)s\", &%(value_var)s)) {'",
")",
".",
"Concat",
"(",
"self",
".",
"_GeneratePopulatePropertyFromValue",
"(",
"prop",
",",
"value_var",
",",
"dst",
",",
"'false'",
")",
")",
".",
"Eblock",
"(",
"'}'",
")",
")",
"else",
":",
"(",
"c",
".",
"Append",
"(",
"'if (!%(src)s->GetWithoutPathExpansion(\"%(key)s\", &%(value_var)s))'",
")",
".",
"Append",
"(",
"' return false;'",
")",
".",
"Concat",
"(",
"self",
".",
"_GeneratePopulatePropertyFromValue",
"(",
"prop",
",",
"value_var",
",",
"dst",
",",
"'false'",
")",
")",
")",
"c",
".",
"Append",
"(",
")",
"c",
".",
"Substitute",
"(",
"{",
"'value_var'",
":",
"value_var",
",",
"'key'",
":",
"prop",
".",
"name",
",",
"'src'",
":",
"src",
"}",
")",
"return",
"c"
] | https://github.com/adobe/chromium/blob/cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7/tools/json_schema_compiler/cc_generator.py#L209-L235 |
|
krishauser/Klampt | 972cc83ea5befac3f653c1ba20f80155768ad519 | Python/klampt/src/robotsim.py | python | Widget.hasFocus | (self) | return _robotsim.Widget_hasFocus(self) | r"""
hasFocus(Widget self) -> bool | r"""
hasFocus(Widget self) -> bool | [
"r",
"hasFocus",
"(",
"Widget",
"self",
")",
"-",
">",
"bool"
] | def hasFocus(self) -> "bool":
r"""
hasFocus(Widget self) -> bool
"""
return _robotsim.Widget_hasFocus(self) | [
"def",
"hasFocus",
"(",
"self",
")",
"->",
"\"bool\"",
":",
"return",
"_robotsim",
".",
"Widget_hasFocus",
"(",
"self",
")"
] | https://github.com/krishauser/Klampt/blob/972cc83ea5befac3f653c1ba20f80155768ad519/Python/klampt/src/robotsim.py#L3423-L3429 |
|
tensorflow/ngraph-bridge | ea6422491ec75504e78a63db029e7f74ec3479a5 | examples/retrain_ngraph.py | python | ensure_dir_exists | (dir_name) | Makes sure the folder exists on disk.
Args:
dir_name: Path string to the folder we want to create. | Makes sure the folder exists on disk. | [
"Makes",
"sure",
"the",
"folder",
"exists",
"on",
"disk",
"."
] | def ensure_dir_exists(dir_name):
"""Makes sure the folder exists on disk.
Args:
dir_name: Path string to the folder we want to create.
"""
if not os.path.exists(dir_name):
os.makedirs(dir_name) | [
"def",
"ensure_dir_exists",
"(",
"dir_name",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"dir_name",
")",
":",
"os",
".",
"makedirs",
"(",
"dir_name",
")"
] | https://github.com/tensorflow/ngraph-bridge/blob/ea6422491ec75504e78a63db029e7f74ec3479a5/examples/retrain_ngraph.py#L371-L378 |
||
Xilinx/Vitis-AI | fc74d404563d9951b57245443c73bef389f3657f | tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/keras/wrappers/scikit_learn.py | python | BaseWrapper.filter_sk_params | (self, fn, override=None) | return res | Filters `sk_params` and returns those in `fn`'s arguments.
Arguments:
fn : arbitrary function
override: dictionary, values to override `sk_params`
Returns:
res : dictionary containing variables
in both `sk_params` and `fn`'s arguments. | Filters `sk_params` and returns those in `fn`'s arguments. | [
"Filters",
"sk_params",
"and",
"returns",
"those",
"in",
"fn",
"s",
"arguments",
"."
] | def filter_sk_params(self, fn, override=None):
"""Filters `sk_params` and returns those in `fn`'s arguments.
Arguments:
fn : arbitrary function
override: dictionary, values to override `sk_params`
Returns:
res : dictionary containing variables
in both `sk_params` and `fn`'s arguments.
"""
override = override or {}
res = {}
for name, value in self.sk_params.items():
if has_arg(fn, name):
res.update({name: value})
res.update(override)
return res | [
"def",
"filter_sk_params",
"(",
"self",
",",
"fn",
",",
"override",
"=",
"None",
")",
":",
"override",
"=",
"override",
"or",
"{",
"}",
"res",
"=",
"{",
"}",
"for",
"name",
",",
"value",
"in",
"self",
".",
"sk_params",
".",
"items",
"(",
")",
":",
"if",
"has_arg",
"(",
"fn",
",",
"name",
")",
":",
"res",
".",
"update",
"(",
"{",
"name",
":",
"value",
"}",
")",
"res",
".",
"update",
"(",
"override",
")",
"return",
"res"
] | https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/keras/wrappers/scikit_learn.py#L170-L187 |
|
Cisco-Talos/moflow | ed71dfb0540d9e0d7a4c72f0881b58958d573728 | BAP-0.7-moflow/libtracewrap/libtrace/protobuf/python/google/protobuf/service.py | python | RpcController.StartCancel | (self) | Initiate cancellation.
Advises the RPC system that the caller desires that the RPC call be
canceled. The RPC system may cancel it immediately, may wait awhile and
then cancel it, or may not even cancel the call at all. If the call is
canceled, the "done" callback will still be called and the RpcController
will indicate that the call failed at that time. | Initiate cancellation. | [
"Initiate",
"cancellation",
"."
] | def StartCancel(self):
"""Initiate cancellation.
Advises the RPC system that the caller desires that the RPC call be
canceled. The RPC system may cancel it immediately, may wait awhile and
then cancel it, or may not even cancel the call at all. If the call is
canceled, the "done" callback will still be called and the RpcController
will indicate that the call failed at that time.
"""
raise NotImplementedError | [
"def",
"StartCancel",
"(",
"self",
")",
":",
"raise",
"NotImplementedError"
] | https://github.com/Cisco-Talos/moflow/blob/ed71dfb0540d9e0d7a4c72f0881b58958d573728/BAP-0.7-moflow/libtracewrap/libtrace/protobuf/python/google/protobuf/service.py#L154-L163 |
||
miyosuda/TensorFlowAndroidDemo | 35903e0221aa5f109ea2dbef27f20b52e317f42d | jni-build/jni/include/tensorflow/contrib/losses/python/losses/loss_ops.py | python | _scale_losses | (losses, weight) | return math_ops.reduce_sum(reduced_losses) | Computes the scaled loss.
Args:
losses: A `Tensor` of size [batch_size, d1, ... dN].
weight: A `Tensor` of size [1], [batch_size] or [batch_size, d1, ... dN].
The `losses` are reduced (tf.reduce_sum) until its dimension matches
that of `weight` at which point the reduced `losses` are element-wise
multiplied by `weight` and a final reduce_sum is computed on the result.
Conceptually, this operation is equivalent to broadcasting (tiling)
`weight` to be the same size as `losses`, performing an element-wise
multiplication, and summing the result.
Returns:
A scalar tf.float32 `Tensor` whose value represents the sum of the scaled
`losses`. | Computes the scaled loss. | [
"Computes",
"the",
"scaled",
"loss",
"."
] | def _scale_losses(losses, weight):
"""Computes the scaled loss.
Args:
losses: A `Tensor` of size [batch_size, d1, ... dN].
weight: A `Tensor` of size [1], [batch_size] or [batch_size, d1, ... dN].
The `losses` are reduced (tf.reduce_sum) until its dimension matches
that of `weight` at which point the reduced `losses` are element-wise
multiplied by `weight` and a final reduce_sum is computed on the result.
Conceptually, this operation is equivalent to broadcasting (tiling)
`weight` to be the same size as `losses`, performing an element-wise
multiplication, and summing the result.
Returns:
A scalar tf.float32 `Tensor` whose value represents the sum of the scaled
`losses`.
"""
# First, compute the sum of the losses over all elements:
start_index = max(0, weight.get_shape().ndims)
reduction_indices = list(range(start_index, losses.get_shape().ndims))
reduced_losses = math_ops.reduce_sum(losses,
reduction_indices=reduction_indices)
reduced_losses = math_ops.mul(reduced_losses, weight)
return math_ops.reduce_sum(reduced_losses) | [
"def",
"_scale_losses",
"(",
"losses",
",",
"weight",
")",
":",
"# First, compute the sum of the losses over all elements:",
"start_index",
"=",
"max",
"(",
"0",
",",
"weight",
".",
"get_shape",
"(",
")",
".",
"ndims",
")",
"reduction_indices",
"=",
"list",
"(",
"range",
"(",
"start_index",
",",
"losses",
".",
"get_shape",
"(",
")",
".",
"ndims",
")",
")",
"reduced_losses",
"=",
"math_ops",
".",
"reduce_sum",
"(",
"losses",
",",
"reduction_indices",
"=",
"reduction_indices",
")",
"reduced_losses",
"=",
"math_ops",
".",
"mul",
"(",
"reduced_losses",
",",
"weight",
")",
"return",
"math_ops",
".",
"reduce_sum",
"(",
"reduced_losses",
")"
] | https://github.com/miyosuda/TensorFlowAndroidDemo/blob/35903e0221aa5f109ea2dbef27f20b52e317f42d/jni-build/jni/include/tensorflow/contrib/losses/python/losses/loss_ops.py#L45-L68 |
|
Xilinx/Vitis-AI | fc74d404563d9951b57245443c73bef389f3657f | tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/ops/summary_ops_v2.py | python | create_file_writer_v2 | (logdir,
max_queue=None,
flush_millis=None,
filename_suffix=None,
name=None) | Creates a summary file writer for the given log directory.
Args:
logdir: a string specifying the directory in which to write an event file.
max_queue: the largest number of summaries to keep in a queue; will
flush once the queue gets bigger than this. Defaults to 10.
flush_millis: the largest interval between flushes. Defaults to 120,000.
filename_suffix: optional suffix for the event file name. Defaults to `.v2`.
name: a name for the op that creates the writer.
Returns:
A SummaryWriter object. | Creates a summary file writer for the given log directory. | [
"Creates",
"a",
"summary",
"file",
"writer",
"for",
"the",
"given",
"log",
"directory",
"."
] | def create_file_writer_v2(logdir,
max_queue=None,
flush_millis=None,
filename_suffix=None,
name=None):
"""Creates a summary file writer for the given log directory.
Args:
logdir: a string specifying the directory in which to write an event file.
max_queue: the largest number of summaries to keep in a queue; will
flush once the queue gets bigger than this. Defaults to 10.
flush_millis: the largest interval between flushes. Defaults to 120,000.
filename_suffix: optional suffix for the event file name. Defaults to `.v2`.
name: a name for the op that creates the writer.
Returns:
A SummaryWriter object.
"""
if logdir is None:
raise ValueError("logdir cannot be None")
inside_function = ops.inside_function()
with ops.name_scope(name, "create_file_writer") as scope, ops.device("cpu:0"):
# Run init inside an init_scope() to hoist it out of tf.functions.
with ops.init_scope():
if context.executing_eagerly():
_check_create_file_writer_args(
inside_function,
logdir=logdir,
max_queue=max_queue,
flush_millis=flush_millis,
filename_suffix=filename_suffix)
logdir = ops.convert_to_tensor(logdir, dtype=dtypes.string)
if max_queue is None:
max_queue = constant_op.constant(10)
if flush_millis is None:
flush_millis = constant_op.constant(2 * 60 * 1000)
if filename_suffix is None:
filename_suffix = constant_op.constant(".v2")
# Prepend the PID and a process-local UID to the filename suffix to avoid
# filename collisions within the machine (the filename already contains
# the hostname to avoid cross-machine collisions).
unique_prefix = constant_op.constant(".%s.%s" % (os.getpid(), ops.uid()))
filename_suffix = unique_prefix + filename_suffix
# Use a unique shared_name to prevent resource sharing.
if context.executing_eagerly():
shared_name = context.shared_name()
else:
shared_name = ops.name_from_scope_name(scope) # pylint: disable=protected-access
return ResourceSummaryWriter(
shared_name=shared_name,
init_op_fn=functools.partial(
gen_summary_ops.create_summary_file_writer,
logdir=logdir,
max_queue=max_queue,
flush_millis=flush_millis,
filename_suffix=filename_suffix),
name=name,
v2=True) | [
"def",
"create_file_writer_v2",
"(",
"logdir",
",",
"max_queue",
"=",
"None",
",",
"flush_millis",
"=",
"None",
",",
"filename_suffix",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"if",
"logdir",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"logdir cannot be None\"",
")",
"inside_function",
"=",
"ops",
".",
"inside_function",
"(",
")",
"with",
"ops",
".",
"name_scope",
"(",
"name",
",",
"\"create_file_writer\"",
")",
"as",
"scope",
",",
"ops",
".",
"device",
"(",
"\"cpu:0\"",
")",
":",
"# Run init inside an init_scope() to hoist it out of tf.functions.",
"with",
"ops",
".",
"init_scope",
"(",
")",
":",
"if",
"context",
".",
"executing_eagerly",
"(",
")",
":",
"_check_create_file_writer_args",
"(",
"inside_function",
",",
"logdir",
"=",
"logdir",
",",
"max_queue",
"=",
"max_queue",
",",
"flush_millis",
"=",
"flush_millis",
",",
"filename_suffix",
"=",
"filename_suffix",
")",
"logdir",
"=",
"ops",
".",
"convert_to_tensor",
"(",
"logdir",
",",
"dtype",
"=",
"dtypes",
".",
"string",
")",
"if",
"max_queue",
"is",
"None",
":",
"max_queue",
"=",
"constant_op",
".",
"constant",
"(",
"10",
")",
"if",
"flush_millis",
"is",
"None",
":",
"flush_millis",
"=",
"constant_op",
".",
"constant",
"(",
"2",
"*",
"60",
"*",
"1000",
")",
"if",
"filename_suffix",
"is",
"None",
":",
"filename_suffix",
"=",
"constant_op",
".",
"constant",
"(",
"\".v2\"",
")",
"# Prepend the PID and a process-local UID to the filename suffix to avoid",
"# filename collisions within the machine (the filename already contains",
"# the hostname to avoid cross-machine collisions).",
"unique_prefix",
"=",
"constant_op",
".",
"constant",
"(",
"\".%s.%s\"",
"%",
"(",
"os",
".",
"getpid",
"(",
")",
",",
"ops",
".",
"uid",
"(",
")",
")",
")",
"filename_suffix",
"=",
"unique_prefix",
"+",
"filename_suffix",
"# Use a unique shared_name to prevent resource sharing.",
"if",
"context",
".",
"executing_eagerly",
"(",
")",
":",
"shared_name",
"=",
"context",
".",
"shared_name",
"(",
")",
"else",
":",
"shared_name",
"=",
"ops",
".",
"name_from_scope_name",
"(",
"scope",
")",
"# pylint: disable=protected-access",
"return",
"ResourceSummaryWriter",
"(",
"shared_name",
"=",
"shared_name",
",",
"init_op_fn",
"=",
"functools",
".",
"partial",
"(",
"gen_summary_ops",
".",
"create_summary_file_writer",
",",
"logdir",
"=",
"logdir",
",",
"max_queue",
"=",
"max_queue",
",",
"flush_millis",
"=",
"flush_millis",
",",
"filename_suffix",
"=",
"filename_suffix",
")",
",",
"name",
"=",
"name",
",",
"v2",
"=",
"True",
")"
] | https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/ops/summary_ops_v2.py#L334-L391 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemFramework/v1/AWS/common-code/lib/pycparser/plyparser.py | python | PLYParser._create_opt_rule | (self, rulename) | Given a rule name, creates an optional ply.yacc rule
for it. The name of the optional rule is
<rulename>_opt | Given a rule name, creates an optional ply.yacc rule
for it. The name of the optional rule is
<rulename>_opt | [
"Given",
"a",
"rule",
"name",
"creates",
"an",
"optional",
"ply",
".",
"yacc",
"rule",
"for",
"it",
".",
"The",
"name",
"of",
"the",
"optional",
"rule",
"is",
"<rulename",
">",
"_opt"
] | def _create_opt_rule(self, rulename):
""" Given a rule name, creates an optional ply.yacc rule
for it. The name of the optional rule is
<rulename>_opt
"""
optname = rulename + '_opt'
def optrule(self, p):
p[0] = p[1]
optrule.__doc__ = '%s : empty\n| %s' % (optname, rulename)
optrule.__name__ = 'p_%s' % optname
setattr(self.__class__, optrule.__name__, optrule) | [
"def",
"_create_opt_rule",
"(",
"self",
",",
"rulename",
")",
":",
"optname",
"=",
"rulename",
"+",
"'_opt'",
"def",
"optrule",
"(",
"self",
",",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"p",
"[",
"1",
"]",
"optrule",
".",
"__doc__",
"=",
"'%s : empty\\n| %s'",
"%",
"(",
"optname",
",",
"rulename",
")",
"optrule",
".",
"__name__",
"=",
"'p_%s'",
"%",
"optname",
"setattr",
"(",
"self",
".",
"__class__",
",",
"optrule",
".",
"__name__",
",",
"optrule",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemFramework/v1/AWS/common-code/lib/pycparser/plyparser.py#L35-L47 |
||
rapidsai/cudf | d5b2448fc69f17509304d594f029d0df56984962 | python/cudf/cudf/core/column/string.py | python | StringMethods.upper | (self) | return self._return_or_inplace(libstrings.to_upper(self._column)) | Convert each string to uppercase.
This only applies to ASCII characters at this time.
Equivalent to `str.upper()
<https://docs.python.org/3/library/stdtypes.html#str.upper>`_.
Returns
-------
Series or Index of object
See also
--------
lower
Converts all characters to lowercase.
upper
Converts all characters to uppercase.
title
Converts first character of each word to uppercase and
remaining to lowercase.
capitalize
Converts first character to uppercase and remaining to
lowercase.
swapcase
Converts uppercase to lowercase and lowercase to uppercase.
Examples
--------
>>> import cudf
>>> data = ['lower', 'CAPITALS', 'this is a sentence', 'SwApCaSe']
>>> s = cudf.Series(data)
>>> s
0 lower
1 CAPITALS
2 this is a sentence
3 SwApCaSe
dtype: object
>>> s.str.upper()
0 LOWER
1 CAPITALS
2 THIS IS A SENTENCE
3 SWAPCASE
dtype: object | Convert each string to uppercase.
This only applies to ASCII characters at this time. | [
"Convert",
"each",
"string",
"to",
"uppercase",
".",
"This",
"only",
"applies",
"to",
"ASCII",
"characters",
"at",
"this",
"time",
"."
] | def upper(self) -> SeriesOrIndex:
"""
Convert each string to uppercase.
This only applies to ASCII characters at this time.
Equivalent to `str.upper()
<https://docs.python.org/3/library/stdtypes.html#str.upper>`_.
Returns
-------
Series or Index of object
See also
--------
lower
Converts all characters to lowercase.
upper
Converts all characters to uppercase.
title
Converts first character of each word to uppercase and
remaining to lowercase.
capitalize
Converts first character to uppercase and remaining to
lowercase.
swapcase
Converts uppercase to lowercase and lowercase to uppercase.
Examples
--------
>>> import cudf
>>> data = ['lower', 'CAPITALS', 'this is a sentence', 'SwApCaSe']
>>> s = cudf.Series(data)
>>> s
0 lower
1 CAPITALS
2 this is a sentence
3 SwApCaSe
dtype: object
>>> s.str.upper()
0 LOWER
1 CAPITALS
2 THIS IS A SENTENCE
3 SWAPCASE
dtype: object
"""
return self._return_or_inplace(libstrings.to_upper(self._column)) | [
"def",
"upper",
"(",
"self",
")",
"->",
"SeriesOrIndex",
":",
"return",
"self",
".",
"_return_or_inplace",
"(",
"libstrings",
".",
"to_upper",
"(",
"self",
".",
"_column",
")",
")"
] | https://github.com/rapidsai/cudf/blob/d5b2448fc69f17509304d594f029d0df56984962/python/cudf/cudf/core/column/string.py#L1760-L1809 |
|
ChromiumWebApps/chromium | c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7 | tools/idl_parser/idl_ppapi_parser.py | python | IDLPPAPIParser.p_LabelCont | (self, p) | LabelCont : ',' LabelList
| | LabelCont : ',' LabelList
| | [
"LabelCont",
":",
"LabelList",
"|"
] | def p_LabelCont(self, p):
"""LabelCont : ',' LabelList
|"""
if len(p) > 1:
p[0] = p[2] | [
"def",
"p_LabelCont",
"(",
"self",
",",
"p",
")",
":",
"if",
"len",
"(",
"p",
")",
">",
"1",
":",
"p",
"[",
"0",
"]",
"=",
"p",
"[",
"2",
"]"
] | https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/tools/idl_parser/idl_ppapi_parser.py#L102-L106 |
||
digibyte/digibyte | 0b8a04fb06d5470a15168e2f675aec57bcc24dac | contrib/devtools/optimize-pngs.py | python | content_hash | (filename) | return hashlib.sha256(data).hexdigest() | Return hash of RGBA contents of image | Return hash of RGBA contents of image | [
"Return",
"hash",
"of",
"RGBA",
"contents",
"of",
"image"
] | def content_hash(filename):
'''Return hash of RGBA contents of image'''
i = Image.open(filename)
i = i.convert('RGBA')
data = i.tobytes()
return hashlib.sha256(data).hexdigest() | [
"def",
"content_hash",
"(",
"filename",
")",
":",
"i",
"=",
"Image",
".",
"open",
"(",
"filename",
")",
"i",
"=",
"i",
".",
"convert",
"(",
"'RGBA'",
")",
"data",
"=",
"i",
".",
"tobytes",
"(",
")",
"return",
"hashlib",
".",
"sha256",
"(",
"data",
")",
".",
"hexdigest",
"(",
")"
] | https://github.com/digibyte/digibyte/blob/0b8a04fb06d5470a15168e2f675aec57bcc24dac/contrib/devtools/optimize-pngs.py#L20-L25 |
|
nasa/trick | 7b85aa66329d62fe8816462627c09a353aac8299 | share/trick/trickops/TrickWorkflow.py | python | SingleRun._sim_time | (self) | return 'Sim Time: {0:7.1f} sec'.format(
self._tics.value / self._tics_per_sec.value) | Get a string displaying the sim time.
Returns
-------
str
A string for displaying sim time. | Get a string displaying the sim time. | [
"Get",
"a",
"string",
"displaying",
"the",
"sim",
"time",
"."
] | def _sim_time(self):
"""
Get a string displaying the sim time.
Returns
-------
str
A string for displaying sim time.
"""
return 'Sim Time: {0:7.1f} sec'.format(
self._tics.value / self._tics_per_sec.value) | [
"def",
"_sim_time",
"(",
"self",
")",
":",
"return",
"'Sim Time: {0:7.1f} sec'",
".",
"format",
"(",
"self",
".",
"_tics",
".",
"value",
"/",
"self",
".",
"_tics_per_sec",
".",
"value",
")"
] | https://github.com/nasa/trick/blob/7b85aa66329d62fe8816462627c09a353aac8299/share/trick/trickops/TrickWorkflow.py#L1440-L1450 |
|
HyeonwooNoh/caffe | d9e8494a2832d67b25dee37194c7bcb9d52d0e42 | scripts/cpp_lint.py | python | CheckForCopyright | (filename, lines, error) | Logs an error if a Copyright message appears at the top of the file. | Logs an error if a Copyright message appears at the top of the file. | [
"Logs",
"an",
"error",
"if",
"a",
"Copyright",
"message",
"appears",
"at",
"the",
"top",
"of",
"the",
"file",
"."
] | def CheckForCopyright(filename, lines, error):
"""Logs an error if a Copyright message appears at the top of the file."""
# We'll check up to line 10. Don't forget there's a
# dummy line at the front.
for line in xrange(1, min(len(lines), 11)):
if _RE_COPYRIGHT.search(lines[line], re.I):
error(filename, 0, 'legal/copyright', 5,
'Copyright message found. '
'You should not include a copyright line.') | [
"def",
"CheckForCopyright",
"(",
"filename",
",",
"lines",
",",
"error",
")",
":",
"# We'll check up to line 10. Don't forget there's a",
"# dummy line at the front.",
"for",
"line",
"in",
"xrange",
"(",
"1",
",",
"min",
"(",
"len",
"(",
"lines",
")",
",",
"11",
")",
")",
":",
"if",
"_RE_COPYRIGHT",
".",
"search",
"(",
"lines",
"[",
"line",
"]",
",",
"re",
".",
"I",
")",
":",
"error",
"(",
"filename",
",",
"0",
",",
"'legal/copyright'",
",",
"5",
",",
"'Copyright message found. '",
"'You should not include a copyright line.'",
")"
] | https://github.com/HyeonwooNoh/caffe/blob/d9e8494a2832d67b25dee37194c7bcb9d52d0e42/scripts/cpp_lint.py#L1372-L1381 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/grid.py | python | Grid.GetSelectionBlockBottomRight | (*args, **kwargs) | return _grid.Grid_GetSelectionBlockBottomRight(*args, **kwargs) | GetSelectionBlockBottomRight(self) -> wxGridCellCoordsArray | GetSelectionBlockBottomRight(self) -> wxGridCellCoordsArray | [
"GetSelectionBlockBottomRight",
"(",
"self",
")",
"-",
">",
"wxGridCellCoordsArray"
] | def GetSelectionBlockBottomRight(*args, **kwargs):
"""GetSelectionBlockBottomRight(self) -> wxGridCellCoordsArray"""
return _grid.Grid_GetSelectionBlockBottomRight(*args, **kwargs) | [
"def",
"GetSelectionBlockBottomRight",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_grid",
".",
"Grid_GetSelectionBlockBottomRight",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/grid.py#L2065-L2067 |
|
eclipse/sumo | 7132a9b8b6eea734bdec38479026b4d8c4336d03 | tools/traci/_polygon.py | python | PolygonDomain.getType | (self, polygonID) | return self._getUniversal(tc.VAR_TYPE, polygonID) | getType(string) -> string
Returns the (abstract) type of the polygon. | getType(string) -> string | [
"getType",
"(",
"string",
")",
"-",
">",
"string"
] | def getType(self, polygonID):
"""getType(string) -> string
Returns the (abstract) type of the polygon.
"""
return self._getUniversal(tc.VAR_TYPE, polygonID) | [
"def",
"getType",
"(",
"self",
",",
"polygonID",
")",
":",
"return",
"self",
".",
"_getUniversal",
"(",
"tc",
".",
"VAR_TYPE",
",",
"polygonID",
")"
] | https://github.com/eclipse/sumo/blob/7132a9b8b6eea734bdec38479026b4d8c4336d03/tools/traci/_polygon.py#L33-L38 |
|
trailofbits/llvm-sanitizer-tutorial | d29dfeec7f51fbf234fd0080f28f2b30cd0b6e99 | llvm/utils/lit/lit/llvm/subst.py | python | ToolSubst.__init__ | (self, key, command=None, pre=r'.-^/\<', post='-.', verbatim=False,
unresolved='warn', extra_args=None) | Construct a ToolSubst.
key: The text which is to be substituted.
command: The command to substitute when the key is matched. By default,
this will treat `key` as a tool name and search for it. If it is
a string, it is intereprted as an exact path. If it is an instance of
FindTool, the specified tool name is searched for on disk.
pre: If specified, the substitution will not find matches where
the character immediately preceding the word-boundary that begins
`key` is any of the characters in the string `pre`.
post: If specified, the substitution will not find matches where
the character immediately after the word-boundary that ends `key`
is any of the characters specified in the string `post`.
verbatim: If True, `key` is an exact regex that is passed to the
underlying substitution
unresolved: Action to take if the tool substitution cannot be
resolved. Valid values:
'warn' - log a warning but add the substitution anyway.
'fatal' - Exit the test suite and log a fatal error.
'break' - Don't add any of the substitutions from the current
group, and return a value indicating a failure.
'ignore' - Don't add the substitution, and don't log an error
extra_args: If specified, represents a list of arguments that will be
appended to the tool's substitution.
explicit_path: If specified, the exact path will be used as a substitution.
Otherwise, the tool will be searched for as if by calling which(tool) | Construct a ToolSubst. | [
"Construct",
"a",
"ToolSubst",
"."
] | def __init__(self, key, command=None, pre=r'.-^/\<', post='-.', verbatim=False,
unresolved='warn', extra_args=None):
"""Construct a ToolSubst.
key: The text which is to be substituted.
command: The command to substitute when the key is matched. By default,
this will treat `key` as a tool name and search for it. If it is
a string, it is intereprted as an exact path. If it is an instance of
FindTool, the specified tool name is searched for on disk.
pre: If specified, the substitution will not find matches where
the character immediately preceding the word-boundary that begins
`key` is any of the characters in the string `pre`.
post: If specified, the substitution will not find matches where
the character immediately after the word-boundary that ends `key`
is any of the characters specified in the string `post`.
verbatim: If True, `key` is an exact regex that is passed to the
underlying substitution
unresolved: Action to take if the tool substitution cannot be
resolved. Valid values:
'warn' - log a warning but add the substitution anyway.
'fatal' - Exit the test suite and log a fatal error.
'break' - Don't add any of the substitutions from the current
group, and return a value indicating a failure.
'ignore' - Don't add the substitution, and don't log an error
extra_args: If specified, represents a list of arguments that will be
appended to the tool's substitution.
explicit_path: If specified, the exact path will be used as a substitution.
Otherwise, the tool will be searched for as if by calling which(tool)
"""
self.unresolved = unresolved
self.extra_args = extra_args
self.key = key
self.command = command if command is not None else FindTool(key)
self.was_resolved = False
if verbatim:
self.regex = key
return
def not_in(chars, where=''):
if not chars:
return ''
pattern_str = '|'.join(re.escape(x) for x in chars)
return r'(?{}!({}))'.format(where, pattern_str)
def wordify(word):
match = wordifier.match(word)
introducer = match.group(1)
word = match.group(2)
return introducer + r'\b' + word + r'\b'
self.regex = not_in(pre, '<') + wordify(key) + not_in(post) | [
"def",
"__init__",
"(",
"self",
",",
"key",
",",
"command",
"=",
"None",
",",
"pre",
"=",
"r'.-^/\\<'",
",",
"post",
"=",
"'-.'",
",",
"verbatim",
"=",
"False",
",",
"unresolved",
"=",
"'warn'",
",",
"extra_args",
"=",
"None",
")",
":",
"self",
".",
"unresolved",
"=",
"unresolved",
"self",
".",
"extra_args",
"=",
"extra_args",
"self",
".",
"key",
"=",
"key",
"self",
".",
"command",
"=",
"command",
"if",
"command",
"is",
"not",
"None",
"else",
"FindTool",
"(",
"key",
")",
"self",
".",
"was_resolved",
"=",
"False",
"if",
"verbatim",
":",
"self",
".",
"regex",
"=",
"key",
"return",
"def",
"not_in",
"(",
"chars",
",",
"where",
"=",
"''",
")",
":",
"if",
"not",
"chars",
":",
"return",
"''",
"pattern_str",
"=",
"'|'",
".",
"join",
"(",
"re",
".",
"escape",
"(",
"x",
")",
"for",
"x",
"in",
"chars",
")",
"return",
"r'(?{}!({}))'",
".",
"format",
"(",
"where",
",",
"pattern_str",
")",
"def",
"wordify",
"(",
"word",
")",
":",
"match",
"=",
"wordifier",
".",
"match",
"(",
"word",
")",
"introducer",
"=",
"match",
".",
"group",
"(",
"1",
")",
"word",
"=",
"match",
".",
"group",
"(",
"2",
")",
"return",
"introducer",
"+",
"r'\\b'",
"+",
"word",
"+",
"r'\\b'",
"self",
".",
"regex",
"=",
"not_in",
"(",
"pre",
",",
"'<'",
")",
"+",
"wordify",
"(",
"key",
")",
"+",
"not_in",
"(",
"post",
")"
] | https://github.com/trailofbits/llvm-sanitizer-tutorial/blob/d29dfeec7f51fbf234fd0080f28f2b30cd0b6e99/llvm/utils/lit/lit/llvm/subst.py#L42-L100 |
||
BlzFans/wke | b0fa21158312e40c5fbd84682d643022b6c34a93 | cygwin/lib/python2.6/trace.py | python | find_executable_linenos | (filename) | return find_lines(code, strs) | Return dict where keys are line numbers in the line number table. | Return dict where keys are line numbers in the line number table. | [
"Return",
"dict",
"where",
"keys",
"are",
"line",
"numbers",
"in",
"the",
"line",
"number",
"table",
"."
] | def find_executable_linenos(filename):
"""Return dict where keys are line numbers in the line number table."""
try:
prog = open(filename, "rU").read()
except IOError, err:
print >> sys.stderr, ("Not printing coverage data for %r: %s"
% (filename, err))
return {}
code = compile(prog, filename, "exec")
strs = find_strings(filename)
return find_lines(code, strs) | [
"def",
"find_executable_linenos",
"(",
"filename",
")",
":",
"try",
":",
"prog",
"=",
"open",
"(",
"filename",
",",
"\"rU\"",
")",
".",
"read",
"(",
")",
"except",
"IOError",
",",
"err",
":",
"print",
">>",
"sys",
".",
"stderr",
",",
"(",
"\"Not printing coverage data for %r: %s\"",
"%",
"(",
"filename",
",",
"err",
")",
")",
"return",
"{",
"}",
"code",
"=",
"compile",
"(",
"prog",
",",
"filename",
",",
"\"exec\"",
")",
"strs",
"=",
"find_strings",
"(",
"filename",
")",
"return",
"find_lines",
"(",
"code",
",",
"strs",
")"
] | https://github.com/BlzFans/wke/blob/b0fa21158312e40c5fbd84682d643022b6c34a93/cygwin/lib/python2.6/trace.py#L427-L437 |
|
apiaryio/snowcrash | b5b39faa85f88ee17459edf39fdc6fe4fc70d2e3 | tools/gyp/pylib/gyp/win_tool.py | python | WinTool.ExecActionWrapper | (self, arch, rspfile, *dir) | return subprocess.call(args, shell=True, env=env, cwd=dir) | Runs an action command line from a response file using the environment
for |arch|. If |dir| is supplied, use that as the working directory. | Runs an action command line from a response file using the environment
for |arch|. If |dir| is supplied, use that as the working directory. | [
"Runs",
"an",
"action",
"command",
"line",
"from",
"a",
"response",
"file",
"using",
"the",
"environment",
"for",
"|arch|",
".",
"If",
"|dir|",
"is",
"supplied",
"use",
"that",
"as",
"the",
"working",
"directory",
"."
] | def ExecActionWrapper(self, arch, rspfile, *dir):
"""Runs an action command line from a response file using the environment
for |arch|. If |dir| is supplied, use that as the working directory."""
env = self._GetEnv(arch)
# TODO(scottmg): This is a temporary hack to get some specific variables
# through to actions that are set after gyp-time. http://crbug.com/333738.
for k, v in os.environ.iteritems():
if k not in env:
env[k] = v
args = open(rspfile).read()
dir = dir[0] if dir else None
return subprocess.call(args, shell=True, env=env, cwd=dir) | [
"def",
"ExecActionWrapper",
"(",
"self",
",",
"arch",
",",
"rspfile",
",",
"*",
"dir",
")",
":",
"env",
"=",
"self",
".",
"_GetEnv",
"(",
"arch",
")",
"# TODO(scottmg): This is a temporary hack to get some specific variables",
"# through to actions that are set after gyp-time. http://crbug.com/333738.",
"for",
"k",
",",
"v",
"in",
"os",
".",
"environ",
".",
"iteritems",
"(",
")",
":",
"if",
"k",
"not",
"in",
"env",
":",
"env",
"[",
"k",
"]",
"=",
"v",
"args",
"=",
"open",
"(",
"rspfile",
")",
".",
"read",
"(",
")",
"dir",
"=",
"dir",
"[",
"0",
"]",
"if",
"dir",
"else",
"None",
"return",
"subprocess",
".",
"call",
"(",
"args",
",",
"shell",
"=",
"True",
",",
"env",
"=",
"env",
",",
"cwd",
"=",
"dir",
")"
] | https://github.com/apiaryio/snowcrash/blob/b5b39faa85f88ee17459edf39fdc6fe4fc70d2e3/tools/gyp/pylib/gyp/win_tool.py#L289-L300 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/pandas/py2/pandas/tseries/offsets.py | python | BusinessHourMixin.rollforward | (self, dt) | return dt | Roll provided date forward to next offset only if not on offset. | Roll provided date forward to next offset only if not on offset. | [
"Roll",
"provided",
"date",
"forward",
"to",
"next",
"offset",
"only",
"if",
"not",
"on",
"offset",
"."
] | def rollforward(self, dt):
"""
Roll provided date forward to next offset only if not on offset.
"""
if not self.onOffset(dt):
if self.n >= 0:
return self._next_opening_time(dt)
else:
return self._prev_opening_time(dt)
return dt | [
"def",
"rollforward",
"(",
"self",
",",
"dt",
")",
":",
"if",
"not",
"self",
".",
"onOffset",
"(",
"dt",
")",
":",
"if",
"self",
".",
"n",
">=",
"0",
":",
"return",
"self",
".",
"_next_opening_time",
"(",
"dt",
")",
"else",
":",
"return",
"self",
".",
"_prev_opening_time",
"(",
"dt",
")",
"return",
"dt"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py2/pandas/tseries/offsets.py#L671-L680 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/scipy/scipy/signal/ltisys.py | python | lti.output | (self, U, T, X0=None) | return lsim(self, U, T, X0=X0) | Return the response of a continuous-time system to input `U`.
See `lsim` for details. | Return the response of a continuous-time system to input `U`.
See `lsim` for details. | [
"Return",
"the",
"response",
"of",
"a",
"continuous",
"-",
"time",
"system",
"to",
"input",
"U",
".",
"See",
"lsim",
"for",
"details",
"."
] | def output(self, U, T, X0=None):
"""
Return the response of a continuous-time system to input `U`.
See `lsim` for details.
"""
return lsim(self, U, T, X0=X0) | [
"def",
"output",
"(",
"self",
",",
"U",
",",
"T",
",",
"X0",
"=",
"None",
")",
":",
"return",
"lsim",
"(",
"self",
",",
"U",
",",
"T",
",",
"X0",
"=",
"X0",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/scipy/signal/ltisys.py#L414-L419 |