nwo
stringlengths 5
106
| sha
stringlengths 40
40
| path
stringlengths 4
174
| language
stringclasses 1
value | identifier
stringlengths 1
140
| parameters
stringlengths 0
87.7k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
426k
| docstring
stringlengths 0
64.3k
| docstring_summary
stringlengths 0
26.3k
| docstring_tokens
list | function
stringlengths 18
4.83M
| function_tokens
list | url
stringlengths 83
304
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
containernet/containernet
|
7b2ae38d691b2ed8da2b2700b85ed03562271d01
|
examples/sshd.py
|
python
|
TreeNet
|
( depth=1, fanout=2, **kwargs )
|
return Mininet( topo, waitConnected=True, **kwargs )
|
Convenience function for creating tree networks.
|
Convenience function for creating tree networks.
|
[
"Convenience",
"function",
"for",
"creating",
"tree",
"networks",
"."
] |
def TreeNet( depth=1, fanout=2, **kwargs ):
"Convenience function for creating tree networks."
topo = TreeTopo( depth, fanout )
return Mininet( topo, waitConnected=True, **kwargs )
|
[
"def",
"TreeNet",
"(",
"depth",
"=",
"1",
",",
"fanout",
"=",
"2",
",",
"*",
"*",
"kwargs",
")",
":",
"topo",
"=",
"TreeTopo",
"(",
"depth",
",",
"fanout",
")",
"return",
"Mininet",
"(",
"topo",
",",
"waitConnected",
"=",
"True",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/containernet/containernet/blob/7b2ae38d691b2ed8da2b2700b85ed03562271d01/examples/sshd.py#L29-L32
|
|
huggingface/datasets
|
249b4a38390bf1543f5b6e2f3dc208b5689c1c13
|
datasets/assin/assin.py
|
python
|
Assin._split_generators
|
(self, dl_manager)
|
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepaths": train_paths,
"files": dl_manager.iter_archive(archive),
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepaths": test_paths,
"files": dl_manager.iter_archive(archive),
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepaths": dev_paths,
"files": dl_manager.iter_archive(archive),
},
),
]
|
Returns SplitGenerators.
|
Returns SplitGenerators.
|
[
"Returns",
"SplitGenerators",
"."
] |
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
archive = dl_manager.download(_URL)
train_paths = []
dev_paths = []
test_paths = []
if self.config.name == "full" or self.config.name == "ptpt":
train_paths.append("assin-ptpt-train.xml")
dev_paths.append("assin-ptpt-dev.xml")
test_paths.append("assin-ptpt-test.xml")
if self.config.name == "full" or self.config.name == "ptbr":
train_paths.append("assin-ptbr-train.xml")
dev_paths.append("assin-ptbr-dev.xml")
test_paths.append("assin-ptbr-test.xml")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepaths": train_paths,
"files": dl_manager.iter_archive(archive),
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepaths": test_paths,
"files": dl_manager.iter_archive(archive),
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepaths": dev_paths,
"files": dl_manager.iter_archive(archive),
},
),
]
|
[
"def",
"_split_generators",
"(",
"self",
",",
"dl_manager",
")",
":",
"archive",
"=",
"dl_manager",
".",
"download",
"(",
"_URL",
")",
"train_paths",
"=",
"[",
"]",
"dev_paths",
"=",
"[",
"]",
"test_paths",
"=",
"[",
"]",
"if",
"self",
".",
"config",
".",
"name",
"==",
"\"full\"",
"or",
"self",
".",
"config",
".",
"name",
"==",
"\"ptpt\"",
":",
"train_paths",
".",
"append",
"(",
"\"assin-ptpt-train.xml\"",
")",
"dev_paths",
".",
"append",
"(",
"\"assin-ptpt-dev.xml\"",
")",
"test_paths",
".",
"append",
"(",
"\"assin-ptpt-test.xml\"",
")",
"if",
"self",
".",
"config",
".",
"name",
"==",
"\"full\"",
"or",
"self",
".",
"config",
".",
"name",
"==",
"\"ptbr\"",
":",
"train_paths",
".",
"append",
"(",
"\"assin-ptbr-train.xml\"",
")",
"dev_paths",
".",
"append",
"(",
"\"assin-ptbr-dev.xml\"",
")",
"test_paths",
".",
"append",
"(",
"\"assin-ptbr-test.xml\"",
")",
"return",
"[",
"datasets",
".",
"SplitGenerator",
"(",
"name",
"=",
"datasets",
".",
"Split",
".",
"TRAIN",
",",
"gen_kwargs",
"=",
"{",
"\"filepaths\"",
":",
"train_paths",
",",
"\"files\"",
":",
"dl_manager",
".",
"iter_archive",
"(",
"archive",
")",
",",
"}",
",",
")",
",",
"datasets",
".",
"SplitGenerator",
"(",
"name",
"=",
"datasets",
".",
"Split",
".",
"TEST",
",",
"gen_kwargs",
"=",
"{",
"\"filepaths\"",
":",
"test_paths",
",",
"\"files\"",
":",
"dl_manager",
".",
"iter_archive",
"(",
"archive",
")",
",",
"}",
",",
")",
",",
"datasets",
".",
"SplitGenerator",
"(",
"name",
"=",
"datasets",
".",
"Split",
".",
"VALIDATION",
",",
"gen_kwargs",
"=",
"{",
"\"filepaths\"",
":",
"dev_paths",
",",
"\"files\"",
":",
"dl_manager",
".",
"iter_archive",
"(",
"archive",
")",
",",
"}",
",",
")",
",",
"]"
] |
https://github.com/huggingface/datasets/blob/249b4a38390bf1543f5b6e2f3dc208b5689c1c13/datasets/assin/assin.py#L110-L150
|
|
wonderworks-software/PyFlow
|
57e2c858933bf63890d769d985396dfad0fca0f0
|
PyFlow/Core/GraphBase.py
|
python
|
GraphBase.count
|
(self)
|
return self._nodes.__len__()
|
Returns number of nodes
:rtype: int
|
Returns number of nodes
|
[
"Returns",
"number",
"of",
"nodes"
] |
def count(self):
"""Returns number of nodes
:rtype: int
"""
return self._nodes.__len__()
|
[
"def",
"count",
"(",
"self",
")",
":",
"return",
"self",
".",
"_nodes",
".",
"__len__",
"(",
")"
] |
https://github.com/wonderworks-software/PyFlow/blob/57e2c858933bf63890d769d985396dfad0fca0f0/PyFlow/Core/GraphBase.py#L481-L486
|
|
uqfoundation/mystic
|
154e6302d1f2f94e8f13e88ecc5f24241cc28ac7
|
mystic/math/measures.py
|
python
|
tvariance
|
(samples, weights=None, k=0, clip=False)
|
return mean(abs(samples - trim_mean)**2, weights)
|
calculate the (weighted) trimmed variance for a list of points
Inputs:
samples -- a list of sample points
weights -- a list of sample weights
k -- percent samples to trim (k%) [tuple (lo,hi) or float if lo=hi]
clip -- if True, winsorize instead of trimming k% of samples
NOTE: if all samples are excluded, will return nan
|
calculate the (weighted) trimmed variance for a list of points
|
[
"calculate",
"the",
"(",
"weighted",
")",
"trimmed",
"variance",
"for",
"a",
"list",
"of",
"points"
] |
def tvariance(samples, weights=None, k=0, clip=False):
"""calculate the (weighted) trimmed variance for a list of points
Inputs:
samples -- a list of sample points
weights -- a list of sample weights
k -- percent samples to trim (k%) [tuple (lo,hi) or float if lo=hi]
clip -- if True, winsorize instead of trimming k% of samples
NOTE: if all samples are excluded, will return nan
"""
samples,weights = _sort(samples,weights)
weights = _k(weights,k,clip)
trim_mean = sum(samples * weights)/sum(weights)
return mean(abs(samples - trim_mean)**2, weights)
|
[
"def",
"tvariance",
"(",
"samples",
",",
"weights",
"=",
"None",
",",
"k",
"=",
"0",
",",
"clip",
"=",
"False",
")",
":",
"samples",
",",
"weights",
"=",
"_sort",
"(",
"samples",
",",
"weights",
")",
"weights",
"=",
"_k",
"(",
"weights",
",",
"k",
",",
"clip",
")",
"trim_mean",
"=",
"sum",
"(",
"samples",
"*",
"weights",
")",
"/",
"sum",
"(",
"weights",
")",
"return",
"mean",
"(",
"abs",
"(",
"samples",
"-",
"trim_mean",
")",
"**",
"2",
",",
"weights",
")"
] |
https://github.com/uqfoundation/mystic/blob/154e6302d1f2f94e8f13e88ecc5f24241cc28ac7/mystic/math/measures.py#L1582-L1596
|
|
kanzure/nanoengineer
|
874e4c9f8a9190f093625b267f9767e19f82e6c4
|
cad/src/cnt/commands/InsertNanotube/InsertNanotube_EditCommand.py
|
python
|
InsertNanotube_EditCommand.getCursorText
|
(self, endPoint1, endPoint2)
|
return text , textColor
|
This is used as a callback method in CntLine mode
@see: NanotubeLineMode.setParams, NanotubeLineMode_GM.Draw
|
This is used as a callback method in CntLine mode
|
[
"This",
"is",
"used",
"as",
"a",
"callback",
"method",
"in",
"CntLine",
"mode"
] |
def getCursorText(self, endPoint1, endPoint2):
"""
This is used as a callback method in CntLine mode
@see: NanotubeLineMode.setParams, NanotubeLineMode_GM.Draw
"""
text = ""
textColor = env.prefs[cursorTextColor_prefs_key] # Mark 2008-08-28
if endPoint1 is None or endPoint2 is None:
return text, textColor
if not env.prefs[insertNanotubeEditCommand_showCursorTextCheckBox_prefs_key]:
return text, textColor
vec = endPoint2 - endPoint1
ntLength = vlen(vec)
lengthString = self._getCursorText_length(ntLength)
thetaString = ''
if env.prefs[insertNanotubeEditCommand_cursorTextCheckBox_angle_prefs_key]:
theta = self.glpane.get_angle_made_with_screen_right(vec)
thetaString = '%5.2f deg'%theta
commaString = ", "
text = lengthString
if text and thetaString:
text += commaString
text += thetaString
return text , textColor
|
[
"def",
"getCursorText",
"(",
"self",
",",
"endPoint1",
",",
"endPoint2",
")",
":",
"text",
"=",
"\"\"",
"textColor",
"=",
"env",
".",
"prefs",
"[",
"cursorTextColor_prefs_key",
"]",
"# Mark 2008-08-28",
"if",
"endPoint1",
"is",
"None",
"or",
"endPoint2",
"is",
"None",
":",
"return",
"text",
",",
"textColor",
"if",
"not",
"env",
".",
"prefs",
"[",
"insertNanotubeEditCommand_showCursorTextCheckBox_prefs_key",
"]",
":",
"return",
"text",
",",
"textColor",
"vec",
"=",
"endPoint2",
"-",
"endPoint1",
"ntLength",
"=",
"vlen",
"(",
"vec",
")",
"lengthString",
"=",
"self",
".",
"_getCursorText_length",
"(",
"ntLength",
")",
"thetaString",
"=",
"''",
"if",
"env",
".",
"prefs",
"[",
"insertNanotubeEditCommand_cursorTextCheckBox_angle_prefs_key",
"]",
":",
"theta",
"=",
"self",
".",
"glpane",
".",
"get_angle_made_with_screen_right",
"(",
"vec",
")",
"thetaString",
"=",
"'%5.2f deg'",
"%",
"theta",
"commaString",
"=",
"\", \"",
"text",
"=",
"lengthString",
"if",
"text",
"and",
"thetaString",
":",
"text",
"+=",
"commaString",
"text",
"+=",
"thetaString",
"return",
"text",
",",
"textColor"
] |
https://github.com/kanzure/nanoengineer/blob/874e4c9f8a9190f093625b267f9767e19f82e6c4/cad/src/cnt/commands/InsertNanotube/InsertNanotube_EditCommand.py#L324-L359
|
|
google-research/pegasus
|
649a5978e45a078e1574ed01c92fc12d3aa05f7f
|
pegasus/params/estimator_utils.py
|
python
|
get_assignment_map_from_checkpoint
|
(tvars, init_checkpoint)
|
return (assignment_map, initialized_variable_names)
|
Compute the union of the current variables and checkpoint variables.
|
Compute the union of the current variables and checkpoint variables.
|
[
"Compute",
"the",
"union",
"of",
"the",
"current",
"variables",
"and",
"checkpoint",
"variables",
"."
] |
def get_assignment_map_from_checkpoint(tvars, init_checkpoint):
"""Compute the union of the current variables and checkpoint variables."""
assignment_map = {}
initialized_variable_names = {}
name_to_variable = collections.OrderedDict()
for var in tvars:
name = var.name
m = re.match("^(.*):\\d+$", name)
if m is not None:
name = m.group(1)
name_to_variable[name] = var
init_vars = tf.train.list_variables(init_checkpoint)
assignment_map = collections.OrderedDict()
for x in init_vars:
(name, var) = (x[0], x[1])
if name not in name_to_variable:
continue
assignment_map[name] = name
initialized_variable_names[name] = 1
initialized_variable_names[name + ":0"] = 1
return (assignment_map, initialized_variable_names)
|
[
"def",
"get_assignment_map_from_checkpoint",
"(",
"tvars",
",",
"init_checkpoint",
")",
":",
"assignment_map",
"=",
"{",
"}",
"initialized_variable_names",
"=",
"{",
"}",
"name_to_variable",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"for",
"var",
"in",
"tvars",
":",
"name",
"=",
"var",
".",
"name",
"m",
"=",
"re",
".",
"match",
"(",
"\"^(.*):\\\\d+$\"",
",",
"name",
")",
"if",
"m",
"is",
"not",
"None",
":",
"name",
"=",
"m",
".",
"group",
"(",
"1",
")",
"name_to_variable",
"[",
"name",
"]",
"=",
"var",
"init_vars",
"=",
"tf",
".",
"train",
".",
"list_variables",
"(",
"init_checkpoint",
")",
"assignment_map",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"for",
"x",
"in",
"init_vars",
":",
"(",
"name",
",",
"var",
")",
"=",
"(",
"x",
"[",
"0",
"]",
",",
"x",
"[",
"1",
"]",
")",
"if",
"name",
"not",
"in",
"name_to_variable",
":",
"continue",
"assignment_map",
"[",
"name",
"]",
"=",
"name",
"initialized_variable_names",
"[",
"name",
"]",
"=",
"1",
"initialized_variable_names",
"[",
"name",
"+",
"\":0\"",
"]",
"=",
"1",
"return",
"(",
"assignment_map",
",",
"initialized_variable_names",
")"
] |
https://github.com/google-research/pegasus/blob/649a5978e45a078e1574ed01c92fc12d3aa05f7f/pegasus/params/estimator_utils.py#L216-L240
|
|
jazzband/django-admin2
|
7770da8a4931db60326f87d9fa7a15b1ef704c4c
|
example/polls/models.py
|
python
|
Choice.__str__
|
(self)
|
return self.choice_text
|
[] |
def __str__(self):
return self.choice_text
|
[
"def",
"__str__",
"(",
"self",
")",
":",
"return",
"self",
".",
"choice_text"
] |
https://github.com/jazzband/django-admin2/blob/7770da8a4931db60326f87d9fa7a15b1ef704c4c/example/polls/models.py#L36-L37
|
|||
holzschu/Carnets
|
44effb10ddfc6aa5c8b0687582a724ba82c6b547
|
Library/lib/python3.7/site-packages/numpy-1.16.0-py3.7-macosx-10.9-x86_64.egg/numpy/distutils/npy_pkg_config.py
|
python
|
VariableSet.variables
|
(self)
|
return list(self._raw_data.keys())
|
Return the list of variable names.
Parameters
----------
None
Returns
-------
names : list of str
The names of all variables in the `VariableSet` instance.
|
Return the list of variable names.
|
[
"Return",
"the",
"list",
"of",
"variable",
"names",
"."
] |
def variables(self):
"""
Return the list of variable names.
Parameters
----------
None
Returns
-------
names : list of str
The names of all variables in the `VariableSet` instance.
"""
return list(self._raw_data.keys())
|
[
"def",
"variables",
"(",
"self",
")",
":",
"return",
"list",
"(",
"self",
".",
"_raw_data",
".",
"keys",
"(",
")",
")"
] |
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/numpy-1.16.0-py3.7-macosx-10.9-x86_64.egg/numpy/distutils/npy_pkg_config.py#L197-L211
|
|
securesystemslab/zippy
|
ff0e84ac99442c2c55fe1d285332cfd4e185e089
|
zippy/lib-python/3/turtledemo/clock.py
|
python
|
hand
|
(laenge, spitze)
|
[] |
def hand(laenge, spitze):
fd(laenge*1.15)
rt(90)
fd(spitze/2.0)
lt(120)
fd(spitze)
lt(120)
fd(spitze)
lt(120)
fd(spitze/2.0)
|
[
"def",
"hand",
"(",
"laenge",
",",
"spitze",
")",
":",
"fd",
"(",
"laenge",
"*",
"1.15",
")",
"rt",
"(",
"90",
")",
"fd",
"(",
"spitze",
"/",
"2.0",
")",
"lt",
"(",
"120",
")",
"fd",
"(",
"spitze",
")",
"lt",
"(",
"120",
")",
"fd",
"(",
"spitze",
")",
"lt",
"(",
"120",
")",
"fd",
"(",
"spitze",
"/",
"2.0",
")"
] |
https://github.com/securesystemslab/zippy/blob/ff0e84ac99442c2c55fe1d285332cfd4e185e089/zippy/lib-python/3/turtledemo/clock.py#L25-L34
|
||||
FoglyOgly/Meowth
|
91829c57767ffa4db1083eaac348b220cb984b94
|
meowth/exts/map/map_cog.py
|
python
|
Mapper.importgymsheet
|
(self, ctx, *args)
|
Delete current Gyms and import the fresh list of Gyms from a Google spreadsheet.
Format must match the [template.](https://docs.google.com/spreadsheets/d/1W-VTAzlnDefgBIXoc7kuRcxJIlYo7iojqRRQ0uwTifc/edit?usp=sharing)
Gyms will only be usable by the server they were imported in.
|
Delete current Gyms and import the fresh list of Gyms from a Google spreadsheet.
|
[
"Delete",
"current",
"Gyms",
"and",
"import",
"the",
"fresh",
"list",
"of",
"Gyms",
"from",
"a",
"Google",
"spreadsheet",
"."
] |
async def importgymsheet(self, ctx, *args):
"""Delete current Gyms and import the fresh list of Gyms from a Google spreadsheet.
Format must match the [template.](https://docs.google.com/spreadsheets/d/1W-VTAzlnDefgBIXoc7kuRcxJIlYo7iojqRRQ0uwTifc/edit?usp=sharing)
Gyms will only be usable by the server they were imported in.
"""
if args:
url = args[0]
ids = self.spreadsheet_ids_from_url(url)
if not ids:
await ctx.send("Please provide a link to a Google spreadsheet.")
return
# TODO: Save ids to database
# await ctx.send("Saving spreadsheet link.")
else:
# TODO: Get ids from database
ids = None
if not ids:
await ctx.send("Please provide a link to a Google spreadsheet.")
return
await ctx.send("Using saved spreadsheet link.")
f = self.download_spreadsheet(*ids)
if not f:
await ctx.send("Failed to get data from Google.")
return
await ctx.send("Downloaded spreadsheet.")
# Delete old gyms.
guild_id = ctx.guild.id
table = ctx.bot.dbi.table('gyms')
query = table.query
query.where(guild=guild_id)
await query.delete()
await ctx.send("Deleted old Gyms, starting import...")
# Import new gyms.
if await self.gyms_from_csv(ctx, f):
await ctx.send("Import successful!")
else:
await ctx.send("Import failed.")
|
[
"async",
"def",
"importgymsheet",
"(",
"self",
",",
"ctx",
",",
"*",
"args",
")",
":",
"if",
"args",
":",
"url",
"=",
"args",
"[",
"0",
"]",
"ids",
"=",
"self",
".",
"spreadsheet_ids_from_url",
"(",
"url",
")",
"if",
"not",
"ids",
":",
"await",
"ctx",
".",
"send",
"(",
"\"Please provide a link to a Google spreadsheet.\"",
")",
"return",
"# TODO: Save ids to database",
"# await ctx.send(\"Saving spreadsheet link.\")",
"else",
":",
"# TODO: Get ids from database",
"ids",
"=",
"None",
"if",
"not",
"ids",
":",
"await",
"ctx",
".",
"send",
"(",
"\"Please provide a link to a Google spreadsheet.\"",
")",
"return",
"await",
"ctx",
".",
"send",
"(",
"\"Using saved spreadsheet link.\"",
")",
"f",
"=",
"self",
".",
"download_spreadsheet",
"(",
"*",
"ids",
")",
"if",
"not",
"f",
":",
"await",
"ctx",
".",
"send",
"(",
"\"Failed to get data from Google.\"",
")",
"return",
"await",
"ctx",
".",
"send",
"(",
"\"Downloaded spreadsheet.\"",
")",
"# Delete old gyms.",
"guild_id",
"=",
"ctx",
".",
"guild",
".",
"id",
"table",
"=",
"ctx",
".",
"bot",
".",
"dbi",
".",
"table",
"(",
"'gyms'",
")",
"query",
"=",
"table",
".",
"query",
"query",
".",
"where",
"(",
"guild",
"=",
"guild_id",
")",
"await",
"query",
".",
"delete",
"(",
")",
"await",
"ctx",
".",
"send",
"(",
"\"Deleted old Gyms, starting import...\"",
")",
"# Import new gyms.",
"if",
"await",
"self",
".",
"gyms_from_csv",
"(",
"ctx",
",",
"f",
")",
":",
"await",
"ctx",
".",
"send",
"(",
"\"Import successful!\"",
")",
"else",
":",
"await",
"ctx",
".",
"send",
"(",
"\"Import failed.\"",
")"
] |
https://github.com/FoglyOgly/Meowth/blob/91829c57767ffa4db1083eaac348b220cb984b94/meowth/exts/map/map_cog.py#L1151-L1188
|
||
zhl2008/awd-platform
|
0416b31abea29743387b10b3914581fbe8e7da5e
|
web_flaskbb/lib/python2.7/site-packages/billiard/forkserver.py
|
python
|
_serve_one
|
(s, listener, alive_r, handler)
|
[] |
def _serve_one(s, listener, alive_r, handler):
# close unnecessary stuff and reset SIGCHLD handler
listener.close()
os.close(alive_r)
signal.signal(signal.SIGCHLD, handler)
# receive fds from parent process
fds = reduction.recvfds(s, MAXFDS_TO_SEND + 1)
s.close()
assert len(fds) <= MAXFDS_TO_SEND
(child_r, child_w, _forkserver._forkserver_alive_fd,
stfd, _forkserver._inherited_fds) = __unpack_fds(*fds)
semaphore_tracker._semaphore_tracker._fd = stfd
# send pid to client processes
write_unsigned(child_w, os.getpid())
# reseed random number generator
if 'random' in sys.modules:
import random
random.seed()
# run process object received over pipe
code = spawn._main(child_r)
# write the exit code to the pipe
write_unsigned(child_w, code)
|
[
"def",
"_serve_one",
"(",
"s",
",",
"listener",
",",
"alive_r",
",",
"handler",
")",
":",
"# close unnecessary stuff and reset SIGCHLD handler",
"listener",
".",
"close",
"(",
")",
"os",
".",
"close",
"(",
"alive_r",
")",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGCHLD",
",",
"handler",
")",
"# receive fds from parent process",
"fds",
"=",
"reduction",
".",
"recvfds",
"(",
"s",
",",
"MAXFDS_TO_SEND",
"+",
"1",
")",
"s",
".",
"close",
"(",
")",
"assert",
"len",
"(",
"fds",
")",
"<=",
"MAXFDS_TO_SEND",
"(",
"child_r",
",",
"child_w",
",",
"_forkserver",
".",
"_forkserver_alive_fd",
",",
"stfd",
",",
"_forkserver",
".",
"_inherited_fds",
")",
"=",
"__unpack_fds",
"(",
"*",
"fds",
")",
"semaphore_tracker",
".",
"_semaphore_tracker",
".",
"_fd",
"=",
"stfd",
"# send pid to client processes",
"write_unsigned",
"(",
"child_w",
",",
"os",
".",
"getpid",
"(",
")",
")",
"# reseed random number generator",
"if",
"'random'",
"in",
"sys",
".",
"modules",
":",
"import",
"random",
"random",
".",
"seed",
"(",
")",
"# run process object received over pipe",
"code",
"=",
"spawn",
".",
"_main",
"(",
"child_r",
")",
"# write the exit code to the pipe",
"write_unsigned",
"(",
"child_w",
",",
"code",
")"
] |
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/billiard/forkserver.py#L205-L232
|
||||
aws-cloudformation/cfn-lint
|
16df5d0ca0d8ebcf9330ebea701e83d883b47217
|
src/cfnlint/rules/parameters/Used.py
|
python
|
Used.searchstring
|
(self, string, parameter)
|
return regex.findall(string)
|
Search string for tokenized fields
|
Search string for tokenized fields
|
[
"Search",
"string",
"for",
"tokenized",
"fields"
] |
def searchstring(self, string, parameter):
"""Search string for tokenized fields"""
regex = re.compile(r'\${(%s)}' % parameter)
return regex.findall(string)
|
[
"def",
"searchstring",
"(",
"self",
",",
"string",
",",
"parameter",
")",
":",
"regex",
"=",
"re",
".",
"compile",
"(",
"r'\\${(%s)}'",
"%",
"parameter",
")",
"return",
"regex",
".",
"findall",
"(",
"string",
")"
] |
https://github.com/aws-cloudformation/cfn-lint/blob/16df5d0ca0d8ebcf9330ebea701e83d883b47217/src/cfnlint/rules/parameters/Used.py#L20-L23
|
|
Yelp/clusterman
|
54beef89c01a2681aafd1fbb93b6ad5f6252d6cf
|
clusterman/aws/auto_scaling_resource_group.py
|
python
|
AutoScalingResourceGroup._get_options_for_instance_type
|
(
self, instance_type: str, weight: Optional[float] = None,
)
|
return options
|
Generate a list of possible ClusterNode types that could be added to this ASG,
given a particular instance type
|
Generate a list of possible ClusterNode types that could be added to this ASG,
given a particular instance type
|
[
"Generate",
"a",
"list",
"of",
"possible",
"ClusterNode",
"types",
"that",
"could",
"be",
"added",
"to",
"this",
"ASG",
"given",
"a",
"particular",
"instance",
"type"
] |
def _get_options_for_instance_type(
self, instance_type: str, weight: Optional[float] = None,
) -> List[ClusterNodeMetadata]:
""" Generate a list of possible ClusterNode types that could be added to this ASG,
given a particular instance type """
options = []
az_options = self._group_config["AvailabilityZones"]
for az in az_options:
instance_market = InstanceMarket(instance_type, az)
weight = weight or self.market_weight(instance_market)
options.append(
ClusterNodeMetadata(
agent=AgentMetadata(total_resources=ClustermanResources.from_instance_type(instance_type)),
instance=InstanceMetadata(market=instance_market, weight=weight),
)
)
return options
|
[
"def",
"_get_options_for_instance_type",
"(",
"self",
",",
"instance_type",
":",
"str",
",",
"weight",
":",
"Optional",
"[",
"float",
"]",
"=",
"None",
",",
")",
"->",
"List",
"[",
"ClusterNodeMetadata",
"]",
":",
"options",
"=",
"[",
"]",
"az_options",
"=",
"self",
".",
"_group_config",
"[",
"\"AvailabilityZones\"",
"]",
"for",
"az",
"in",
"az_options",
":",
"instance_market",
"=",
"InstanceMarket",
"(",
"instance_type",
",",
"az",
")",
"weight",
"=",
"weight",
"or",
"self",
".",
"market_weight",
"(",
"instance_market",
")",
"options",
".",
"append",
"(",
"ClusterNodeMetadata",
"(",
"agent",
"=",
"AgentMetadata",
"(",
"total_resources",
"=",
"ClustermanResources",
".",
"from_instance_type",
"(",
"instance_type",
")",
")",
",",
"instance",
"=",
"InstanceMetadata",
"(",
"market",
"=",
"instance_market",
",",
"weight",
"=",
"weight",
")",
",",
")",
")",
"return",
"options"
] |
https://github.com/Yelp/clusterman/blob/54beef89c01a2681aafd1fbb93b6ad5f6252d6cf/clusterman/aws/auto_scaling_resource_group.py#L197-L214
|
|
missionpinball/mpf
|
8e6b74cff4ba06d2fec9445742559c1068b88582
|
mpf/platforms/virtual_pinball/virtual_pinball.py
|
python
|
VirtualPinballPlatform.vpx_changed_lamps
|
(self)
|
return self._get_changed_lights_by_subtype("matrix")
|
Return changed lamps since last call.
|
Return changed lamps since last call.
|
[
"Return",
"changed",
"lamps",
"since",
"last",
"call",
"."
] |
def vpx_changed_lamps(self):
"""Return changed lamps since last call."""
return self._get_changed_lights_by_subtype("matrix")
|
[
"def",
"vpx_changed_lamps",
"(",
"self",
")",
":",
"return",
"self",
".",
"_get_changed_lights_by_subtype",
"(",
"\"matrix\"",
")"
] |
https://github.com/missionpinball/mpf/blob/8e6b74cff4ba06d2fec9445742559c1068b88582/mpf/platforms/virtual_pinball/virtual_pinball.py#L230-L232
|
|
jython/jython3
|
def4f8ec47cb7a9c799ea4c745f12badf92c5769
|
lib-python/3.5.1/hashlib.py
|
python
|
__hash_new
|
(name, data=b'')
|
new(name, data=b'') - Return a new hashing object using the named algorithm;
optionally initialized with data (which must be bytes).
|
new(name, data=b'') - Return a new hashing object using the named algorithm;
optionally initialized with data (which must be bytes).
|
[
"new",
"(",
"name",
"data",
"=",
"b",
")",
"-",
"Return",
"a",
"new",
"hashing",
"object",
"using",
"the",
"named",
"algorithm",
";",
"optionally",
"initialized",
"with",
"data",
"(",
"which",
"must",
"be",
"bytes",
")",
"."
] |
def __hash_new(name, data=b''):
"""new(name, data=b'') - Return a new hashing object using the named algorithm;
optionally initialized with data (which must be bytes).
"""
try:
return _hashlib.new(name, data)
except ValueError:
# If the _hashlib module (OpenSSL) doesn't support the named
# hash, try using our builtin implementations.
# This allows for SHA224/256 and SHA384/512 support even though
# the OpenSSL library prior to 0.9.8 doesn't provide them.
return __get_builtin_constructor(name)(data)
|
[
"def",
"__hash_new",
"(",
"name",
",",
"data",
"=",
"b''",
")",
":",
"try",
":",
"return",
"_hashlib",
".",
"new",
"(",
"name",
",",
"data",
")",
"except",
"ValueError",
":",
"# If the _hashlib module (OpenSSL) doesn't support the named",
"# hash, try using our builtin implementations.",
"# This allows for SHA224/256 and SHA384/512 support even though",
"# the OpenSSL library prior to 0.9.8 doesn't provide them.",
"return",
"__get_builtin_constructor",
"(",
"name",
")",
"(",
"data",
")"
] |
https://github.com/jython/jython3/blob/def4f8ec47cb7a9c799ea4c745f12badf92c5769/lib-python/3.5.1/hashlib.py#L117-L128
|
||
Kinto/kinto
|
a9e46e57de8f33c7be098c6f583de18df03b2824
|
kinto/plugins/default_bucket/__init__.py
|
python
|
resource_create_object
|
(request, resource_cls, uri)
|
return obj
|
Implicitly create a resource (or fail silently).
In the default bucket, the bucket and collection are implicitly
created. This helper creates one of those resources using a
simulated request and context that is appropriate for the
resource. Also runs create events as though the resource were
created in a subrequest.
If the resource already exists, do nothing.
|
Implicitly create a resource (or fail silently).
|
[
"Implicitly",
"create",
"a",
"resource",
"(",
"or",
"fail",
"silently",
")",
"."
] |
def resource_create_object(request, resource_cls, uri):
"""Implicitly create a resource (or fail silently).
In the default bucket, the bucket and collection are implicitly
created. This helper creates one of those resources using a
simulated request and context that is appropriate for the
resource. Also runs create events as though the resource were
created in a subrequest.
If the resource already exists, do nothing.
"""
resource_name, matchdict = view_lookup(request, uri)
# Build a fake request, mainly used to populate the create events that
# will be triggered by the resource.
fakerequest = build_request(request, {"method": "PUT", "path": uri})
fakerequest.matchdict = matchdict
fakerequest.bound_data = request.bound_data
fakerequest.authn_type = request.authn_type
fakerequest.selected_userid = request.selected_userid
fakerequest.errors = request.errors
fakerequest.current_resource_name = resource_name
obj_id = matchdict["id"]
# Fake context, required to instantiate a resource.
context = RouteFactory(fakerequest)
context.resource_name = resource_name
resource = resource_cls(fakerequest, context)
# Check that provided id is valid for this resource.
if not resource.model.id_generator.match(obj_id):
error_details = {"location": "path", "description": f"Invalid {resource_name} id"}
raise_invalid(resource.request, **error_details)
data = {"id": obj_id}
try:
obj = resource.model.create_object(data)
except UnicityError:
# The record already exists; skip running events
return {}
# Since the current request is not a resource (but a straight Service),
# we simulate a request on a resource.
# This will be used in the resource event payload.
resource.postprocess(obj, action=ACTIONS.CREATE)
return obj
|
[
"def",
"resource_create_object",
"(",
"request",
",",
"resource_cls",
",",
"uri",
")",
":",
"resource_name",
",",
"matchdict",
"=",
"view_lookup",
"(",
"request",
",",
"uri",
")",
"# Build a fake request, mainly used to populate the create events that",
"# will be triggered by the resource.",
"fakerequest",
"=",
"build_request",
"(",
"request",
",",
"{",
"\"method\"",
":",
"\"PUT\"",
",",
"\"path\"",
":",
"uri",
"}",
")",
"fakerequest",
".",
"matchdict",
"=",
"matchdict",
"fakerequest",
".",
"bound_data",
"=",
"request",
".",
"bound_data",
"fakerequest",
".",
"authn_type",
"=",
"request",
".",
"authn_type",
"fakerequest",
".",
"selected_userid",
"=",
"request",
".",
"selected_userid",
"fakerequest",
".",
"errors",
"=",
"request",
".",
"errors",
"fakerequest",
".",
"current_resource_name",
"=",
"resource_name",
"obj_id",
"=",
"matchdict",
"[",
"\"id\"",
"]",
"# Fake context, required to instantiate a resource.",
"context",
"=",
"RouteFactory",
"(",
"fakerequest",
")",
"context",
".",
"resource_name",
"=",
"resource_name",
"resource",
"=",
"resource_cls",
"(",
"fakerequest",
",",
"context",
")",
"# Check that provided id is valid for this resource.",
"if",
"not",
"resource",
".",
"model",
".",
"id_generator",
".",
"match",
"(",
"obj_id",
")",
":",
"error_details",
"=",
"{",
"\"location\"",
":",
"\"path\"",
",",
"\"description\"",
":",
"f\"Invalid {resource_name} id\"",
"}",
"raise_invalid",
"(",
"resource",
".",
"request",
",",
"*",
"*",
"error_details",
")",
"data",
"=",
"{",
"\"id\"",
":",
"obj_id",
"}",
"try",
":",
"obj",
"=",
"resource",
".",
"model",
".",
"create_object",
"(",
"data",
")",
"except",
"UnicityError",
":",
"# The record already exists; skip running events",
"return",
"{",
"}",
"# Since the current request is not a resource (but a straight Service),",
"# we simulate a request on a resource.",
"# This will be used in the resource event payload.",
"resource",
".",
"postprocess",
"(",
"obj",
",",
"action",
"=",
"ACTIONS",
".",
"CREATE",
")",
"return",
"obj"
] |
https://github.com/Kinto/kinto/blob/a9e46e57de8f33c7be098c6f583de18df03b2824/kinto/plugins/default_bucket/__init__.py#L60-L107
|
|
dib-lab/khmer
|
fb65d21eaedf0d397d49ae3debc578897f9d6eb4
|
sandbox/sweep-reads.py
|
python
|
ReadBuffer.__init__
|
(self)
|
[] |
def __init__(self):
self.buf = []
|
[
"def",
"__init__",
"(",
"self",
")",
":",
"self",
".",
"buf",
"=",
"[",
"]"
] |
https://github.com/dib-lab/khmer/blob/fb65d21eaedf0d397d49ae3debc578897f9d6eb4/sandbox/sweep-reads.py#L100-L101
|
||||
mdiazcl/fuzzbunch-debian
|
2b76c2249ade83a389ae3badb12a1bd09901fd2c
|
windows/Resources/Python/Core/Lib/lib2to3/btm_matcher.py
|
python
|
BottomMatcher.add
|
(self, pattern, start)
|
Recursively adds a linear pattern to the AC automaton
|
Recursively adds a linear pattern to the AC automaton
|
[
"Recursively",
"adds",
"a",
"linear",
"pattern",
"to",
"the",
"AC",
"automaton"
] |
def add(self, pattern, start):
"""Recursively adds a linear pattern to the AC automaton"""
if not pattern:
return [
start]
else:
if isinstance(pattern[0], tuple):
match_nodes = []
for alternative in pattern[0]:
end_nodes = self.add(alternative, start=start)
for end in end_nodes:
match_nodes.extend(self.add(pattern[1:], end))
return match_nodes
if pattern[0] not in start.transition_table:
next_node = BMNode()
start.transition_table[pattern[0]] = next_node
else:
next_node = start.transition_table[pattern[0]]
if pattern[1:]:
end_nodes = self.add(pattern[1:], start=next_node)
else:
end_nodes = [
next_node]
return end_nodes
|
[
"def",
"add",
"(",
"self",
",",
"pattern",
",",
"start",
")",
":",
"if",
"not",
"pattern",
":",
"return",
"[",
"start",
"]",
"else",
":",
"if",
"isinstance",
"(",
"pattern",
"[",
"0",
"]",
",",
"tuple",
")",
":",
"match_nodes",
"=",
"[",
"]",
"for",
"alternative",
"in",
"pattern",
"[",
"0",
"]",
":",
"end_nodes",
"=",
"self",
".",
"add",
"(",
"alternative",
",",
"start",
"=",
"start",
")",
"for",
"end",
"in",
"end_nodes",
":",
"match_nodes",
".",
"extend",
"(",
"self",
".",
"add",
"(",
"pattern",
"[",
"1",
":",
"]",
",",
"end",
")",
")",
"return",
"match_nodes",
"if",
"pattern",
"[",
"0",
"]",
"not",
"in",
"start",
".",
"transition_table",
":",
"next_node",
"=",
"BMNode",
"(",
")",
"start",
".",
"transition_table",
"[",
"pattern",
"[",
"0",
"]",
"]",
"=",
"next_node",
"else",
":",
"next_node",
"=",
"start",
".",
"transition_table",
"[",
"pattern",
"[",
"0",
"]",
"]",
"if",
"pattern",
"[",
"1",
":",
"]",
":",
"end_nodes",
"=",
"self",
".",
"add",
"(",
"pattern",
"[",
"1",
":",
"]",
",",
"start",
"=",
"next_node",
")",
"else",
":",
"end_nodes",
"=",
"[",
"next_node",
"]",
"return",
"end_nodes"
] |
https://github.com/mdiazcl/fuzzbunch-debian/blob/2b76c2249ade83a389ae3badb12a1bd09901fd2c/windows/Resources/Python/Core/Lib/lib2to3/btm_matcher.py#L53-L77
|
||
dmlc/dgl
|
8d14a739bc9e446d6c92ef83eafe5782398118de
|
python/dgl/distributed/rpc.py
|
python
|
Request.__getstate__
|
(self)
|
Get serializable states.
Must be inherited by subclasses. For array members, return them as
individual return values (i.e., do not put them in containers like
dictionary or list).
|
Get serializable states.
|
[
"Get",
"serializable",
"states",
"."
] |
def __getstate__(self):
"""Get serializable states.
Must be inherited by subclasses. For array members, return them as
individual return values (i.e., do not put them in containers like
dictionary or list).
"""
|
[
"def",
"__getstate__",
"(",
"self",
")",
":"
] |
https://github.com/dmlc/dgl/blob/8d14a739bc9e446d6c92ef83eafe5782398118de/python/dgl/distributed/rpc.py#L335-L341
|
||
SUSE/DeepSea
|
9c7fad93915ba1250c40d50c855011e9fe41ed21
|
srv/modules/runners/osd.py
|
python
|
OSDUtil.replace
|
(self)
|
return self._call()
|
1) ceph osd out $id
2) systemctl stop ceph-osd@$id (maybe do more see osd.py (terminate()))
2.1) also maybe wait if not force
3) ceph osd destroy $id --yes-i-really-mean-it
4) ceph-volume lvm zap --osd-id $id
|
1) ceph osd out $id
2) systemctl stop ceph-osd
|
[
"1",
")",
"ceph",
"osd",
"out",
"$id",
"2",
")",
"systemctl",
"stop",
"ceph",
"-",
"osd"
] |
def replace(self):
"""
1) ceph osd out $id
2) systemctl stop ceph-osd@$id (maybe do more see osd.py (terminate()))
2.1) also maybe wait if not force
3) ceph osd destroy $id --yes-i-really-mean-it
4) ceph-volume lvm zap --osd-id $id
"""
log.info("Preparing replacement of osd {} on host {}".format(
self.osd_id, self.host))
return self._call()
|
[
"def",
"replace",
"(",
"self",
")",
":",
"log",
".",
"info",
"(",
"\"Preparing replacement of osd {} on host {}\"",
".",
"format",
"(",
"self",
".",
"osd_id",
",",
"self",
".",
"host",
")",
")",
"return",
"self",
".",
"_call",
"(",
")"
] |
https://github.com/SUSE/DeepSea/blob/9c7fad93915ba1250c40d50c855011e9fe41ed21/srv/modules/runners/osd.py#L167-L177
|
|
amazon-archives/aws-security-benchmark
|
672cacf5e8244d7b090ed6de613e91139b585dbd
|
aws_cis_foundation_framework/aws-cis-foundation-benchmark-checklist.py
|
python
|
control_2_6_ensure_cloudtrail_bucket_logging
|
(cloudtrails)
|
return {'Result': result, 'failReason': failReason, 'Offenders': offenders, 'ScoredControl': scored, 'Description': description, 'ControlId': control}
|
Summary
Args:
cloudtrails (TYPE): Description
Returns:
TYPE: Description
|
Summary
|
[
"Summary"
] |
def control_2_6_ensure_cloudtrail_bucket_logging(cloudtrails):
"""Summary
Args:
cloudtrails (TYPE): Description
Returns:
TYPE: Description
"""
result = True
failReason = ""
offenders = []
control = "2.6"
description = "Ensure S3 bucket access logging is enabled on the CloudTrail S3 bucket"
scored = True
for m, n in cloudtrails.iteritems():
for o in n:
# it is possible to have a cloudtrail configured with a nonexistant bucket
try:
response = S3_CLIENT.get_bucket_logging(Bucket=o['S3BucketName'])
except:
result = False
failReason = "Cloudtrail not configured to log to S3. "
offenders.append(str(o['TrailARN']))
try:
if response['LoggingEnabled']:
pass
except:
result = False
failReason = failReason + "CloudTrail S3 bucket without logging discovered"
offenders.append("Trail:" + str(o['TrailARN']) + " - S3Bucket:" + str(o['S3BucketName']))
return {'Result': result, 'failReason': failReason, 'Offenders': offenders, 'ScoredControl': scored, 'Description': description, 'ControlId': control}
|
[
"def",
"control_2_6_ensure_cloudtrail_bucket_logging",
"(",
"cloudtrails",
")",
":",
"result",
"=",
"True",
"failReason",
"=",
"\"\"",
"offenders",
"=",
"[",
"]",
"control",
"=",
"\"2.6\"",
"description",
"=",
"\"Ensure S3 bucket access logging is enabled on the CloudTrail S3 bucket\"",
"scored",
"=",
"True",
"for",
"m",
",",
"n",
"in",
"cloudtrails",
".",
"iteritems",
"(",
")",
":",
"for",
"o",
"in",
"n",
":",
"# it is possible to have a cloudtrail configured with a nonexistant bucket",
"try",
":",
"response",
"=",
"S3_CLIENT",
".",
"get_bucket_logging",
"(",
"Bucket",
"=",
"o",
"[",
"'S3BucketName'",
"]",
")",
"except",
":",
"result",
"=",
"False",
"failReason",
"=",
"\"Cloudtrail not configured to log to S3. \"",
"offenders",
".",
"append",
"(",
"str",
"(",
"o",
"[",
"'TrailARN'",
"]",
")",
")",
"try",
":",
"if",
"response",
"[",
"'LoggingEnabled'",
"]",
":",
"pass",
"except",
":",
"result",
"=",
"False",
"failReason",
"=",
"failReason",
"+",
"\"CloudTrail S3 bucket without logging discovered\"",
"offenders",
".",
"append",
"(",
"\"Trail:\"",
"+",
"str",
"(",
"o",
"[",
"'TrailARN'",
"]",
")",
"+",
"\" - S3Bucket:\"",
"+",
"str",
"(",
"o",
"[",
"'S3BucketName'",
"]",
")",
")",
"return",
"{",
"'Result'",
":",
"result",
",",
"'failReason'",
":",
"failReason",
",",
"'Offenders'",
":",
"offenders",
",",
"'ScoredControl'",
":",
"scored",
",",
"'Description'",
":",
"description",
",",
"'ControlId'",
":",
"control",
"}"
] |
https://github.com/amazon-archives/aws-security-benchmark/blob/672cacf5e8244d7b090ed6de613e91139b585dbd/aws_cis_foundation_framework/aws-cis-foundation-benchmark-checklist.py#L1013-L1044
|
|
materialsproject/pymatgen
|
8128f3062a334a2edd240e4062b5b9bdd1ae6f58
|
pymatgen/core/surface.py
|
python
|
SlabGenerator.get_slab
|
(self, shift=0, tol=0.1, energy=None)
|
return Slab(
slab.lattice,
slab.species_and_occu,
slab.frac_coords,
self.miller_index,
ouc,
shift,
scale_factor,
energy=energy,
site_properties=slab.site_properties,
reorient_lattice=self.reorient_lattice,
)
|
This method takes in shift value for the c lattice direction and
generates a slab based on the given shift. You should rarely use this
method. Instead, it is used by other generation algorithms to obtain
all slabs.
Arg:
shift (float): A shift value in Angstrom that determines how much a
slab should be shifted.
tol (float): Tolerance to determine primitive cell.
energy (float): An energy to assign to the slab.
Returns:
(Slab) A Slab object with a particular shifted oriented unit cell.
|
This method takes in shift value for the c lattice direction and
generates a slab based on the given shift. You should rarely use this
method. Instead, it is used by other generation algorithms to obtain
all slabs.
|
[
"This",
"method",
"takes",
"in",
"shift",
"value",
"for",
"the",
"c",
"lattice",
"direction",
"and",
"generates",
"a",
"slab",
"based",
"on",
"the",
"given",
"shift",
".",
"You",
"should",
"rarely",
"use",
"this",
"method",
".",
"Instead",
"it",
"is",
"used",
"by",
"other",
"generation",
"algorithms",
"to",
"obtain",
"all",
"slabs",
"."
] |
def get_slab(self, shift=0, tol=0.1, energy=None):
"""
This method takes in shift value for the c lattice direction and
generates a slab based on the given shift. You should rarely use this
method. Instead, it is used by other generation algorithms to obtain
all slabs.
Arg:
shift (float): A shift value in Angstrom that determines how much a
slab should be shifted.
tol (float): Tolerance to determine primitive cell.
energy (float): An energy to assign to the slab.
Returns:
(Slab) A Slab object with a particular shifted oriented unit cell.
"""
h = self._proj_height
p = round(h / self.parent.lattice.d_hkl(self.miller_index), 8)
if self.in_unit_planes:
nlayers_slab = int(math.ceil(self.min_slab_size / p))
nlayers_vac = int(math.ceil(self.min_vac_size / p))
else:
nlayers_slab = int(math.ceil(self.min_slab_size / h))
nlayers_vac = int(math.ceil(self.min_vac_size / h))
nlayers = nlayers_slab + nlayers_vac
species = self.oriented_unit_cell.species_and_occu
props = self.oriented_unit_cell.site_properties
props = {k: v * nlayers_slab for k, v in props.items()}
frac_coords = self.oriented_unit_cell.frac_coords
frac_coords = np.array(frac_coords) + np.array([0, 0, -shift])[None, :]
frac_coords -= np.floor(frac_coords)
a, b, c = self.oriented_unit_cell.lattice.matrix
new_lattice = [a, b, nlayers * c]
frac_coords[:, 2] = frac_coords[:, 2] / nlayers
all_coords = []
for i in range(nlayers_slab):
fcoords = frac_coords.copy()
fcoords[:, 2] += i / nlayers
all_coords.extend(fcoords)
slab = Structure(new_lattice, species * nlayers_slab, all_coords, site_properties=props)
scale_factor = self.slab_scale_factor
# Whether or not to orthogonalize the structure
if self.lll_reduce:
lll_slab = slab.copy(sanitize=True)
mapping = lll_slab.lattice.find_mapping(slab.lattice)
scale_factor = np.dot(mapping[2], scale_factor)
slab = lll_slab
# Whether or not to center the slab layer around the vacuum
if self.center_slab:
avg_c = np.average([c[2] for c in slab.frac_coords])
slab.translate_sites(list(range(len(slab))), [0, 0, 0.5 - avg_c])
if self.primitive:
prim = slab.get_primitive_structure(tolerance=tol)
if energy is not None:
energy = prim.volume / slab.volume * energy
slab = prim
# Reorient the lattice to get the correct reduced cell
ouc = self.oriented_unit_cell.copy()
if self.primitive:
# find a reduced ouc
slab_l = slab.lattice
ouc = ouc.get_primitive_structure(
constrain_latt={
"a": slab_l.a,
"b": slab_l.b,
"alpha": slab_l.alpha,
"beta": slab_l.beta,
"gamma": slab_l.gamma,
}
)
# Check this is the correct oriented unit cell
ouc = self.oriented_unit_cell if slab_l.a != ouc.lattice.a or slab_l.b != ouc.lattice.b else ouc
return Slab(
slab.lattice,
slab.species_and_occu,
slab.frac_coords,
self.miller_index,
ouc,
shift,
scale_factor,
energy=energy,
site_properties=slab.site_properties,
reorient_lattice=self.reorient_lattice,
)
|
[
"def",
"get_slab",
"(",
"self",
",",
"shift",
"=",
"0",
",",
"tol",
"=",
"0.1",
",",
"energy",
"=",
"None",
")",
":",
"h",
"=",
"self",
".",
"_proj_height",
"p",
"=",
"round",
"(",
"h",
"/",
"self",
".",
"parent",
".",
"lattice",
".",
"d_hkl",
"(",
"self",
".",
"miller_index",
")",
",",
"8",
")",
"if",
"self",
".",
"in_unit_planes",
":",
"nlayers_slab",
"=",
"int",
"(",
"math",
".",
"ceil",
"(",
"self",
".",
"min_slab_size",
"/",
"p",
")",
")",
"nlayers_vac",
"=",
"int",
"(",
"math",
".",
"ceil",
"(",
"self",
".",
"min_vac_size",
"/",
"p",
")",
")",
"else",
":",
"nlayers_slab",
"=",
"int",
"(",
"math",
".",
"ceil",
"(",
"self",
".",
"min_slab_size",
"/",
"h",
")",
")",
"nlayers_vac",
"=",
"int",
"(",
"math",
".",
"ceil",
"(",
"self",
".",
"min_vac_size",
"/",
"h",
")",
")",
"nlayers",
"=",
"nlayers_slab",
"+",
"nlayers_vac",
"species",
"=",
"self",
".",
"oriented_unit_cell",
".",
"species_and_occu",
"props",
"=",
"self",
".",
"oriented_unit_cell",
".",
"site_properties",
"props",
"=",
"{",
"k",
":",
"v",
"*",
"nlayers_slab",
"for",
"k",
",",
"v",
"in",
"props",
".",
"items",
"(",
")",
"}",
"frac_coords",
"=",
"self",
".",
"oriented_unit_cell",
".",
"frac_coords",
"frac_coords",
"=",
"np",
".",
"array",
"(",
"frac_coords",
")",
"+",
"np",
".",
"array",
"(",
"[",
"0",
",",
"0",
",",
"-",
"shift",
"]",
")",
"[",
"None",
",",
":",
"]",
"frac_coords",
"-=",
"np",
".",
"floor",
"(",
"frac_coords",
")",
"a",
",",
"b",
",",
"c",
"=",
"self",
".",
"oriented_unit_cell",
".",
"lattice",
".",
"matrix",
"new_lattice",
"=",
"[",
"a",
",",
"b",
",",
"nlayers",
"*",
"c",
"]",
"frac_coords",
"[",
":",
",",
"2",
"]",
"=",
"frac_coords",
"[",
":",
",",
"2",
"]",
"/",
"nlayers",
"all_coords",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"nlayers_slab",
")",
":",
"fcoords",
"=",
"frac_coords",
".",
"copy",
"(",
")",
"fcoords",
"[",
":",
",",
"2",
"]",
"+=",
"i",
"/",
"nlayers",
"all_coords",
".",
"extend",
"(",
"fcoords",
")",
"slab",
"=",
"Structure",
"(",
"new_lattice",
",",
"species",
"*",
"nlayers_slab",
",",
"all_coords",
",",
"site_properties",
"=",
"props",
")",
"scale_factor",
"=",
"self",
".",
"slab_scale_factor",
"# Whether or not to orthogonalize the structure",
"if",
"self",
".",
"lll_reduce",
":",
"lll_slab",
"=",
"slab",
".",
"copy",
"(",
"sanitize",
"=",
"True",
")",
"mapping",
"=",
"lll_slab",
".",
"lattice",
".",
"find_mapping",
"(",
"slab",
".",
"lattice",
")",
"scale_factor",
"=",
"np",
".",
"dot",
"(",
"mapping",
"[",
"2",
"]",
",",
"scale_factor",
")",
"slab",
"=",
"lll_slab",
"# Whether or not to center the slab layer around the vacuum",
"if",
"self",
".",
"center_slab",
":",
"avg_c",
"=",
"np",
".",
"average",
"(",
"[",
"c",
"[",
"2",
"]",
"for",
"c",
"in",
"slab",
".",
"frac_coords",
"]",
")",
"slab",
".",
"translate_sites",
"(",
"list",
"(",
"range",
"(",
"len",
"(",
"slab",
")",
")",
")",
",",
"[",
"0",
",",
"0",
",",
"0.5",
"-",
"avg_c",
"]",
")",
"if",
"self",
".",
"primitive",
":",
"prim",
"=",
"slab",
".",
"get_primitive_structure",
"(",
"tolerance",
"=",
"tol",
")",
"if",
"energy",
"is",
"not",
"None",
":",
"energy",
"=",
"prim",
".",
"volume",
"/",
"slab",
".",
"volume",
"*",
"energy",
"slab",
"=",
"prim",
"# Reorient the lattice to get the correct reduced cell",
"ouc",
"=",
"self",
".",
"oriented_unit_cell",
".",
"copy",
"(",
")",
"if",
"self",
".",
"primitive",
":",
"# find a reduced ouc",
"slab_l",
"=",
"slab",
".",
"lattice",
"ouc",
"=",
"ouc",
".",
"get_primitive_structure",
"(",
"constrain_latt",
"=",
"{",
"\"a\"",
":",
"slab_l",
".",
"a",
",",
"\"b\"",
":",
"slab_l",
".",
"b",
",",
"\"alpha\"",
":",
"slab_l",
".",
"alpha",
",",
"\"beta\"",
":",
"slab_l",
".",
"beta",
",",
"\"gamma\"",
":",
"slab_l",
".",
"gamma",
",",
"}",
")",
"# Check this is the correct oriented unit cell",
"ouc",
"=",
"self",
".",
"oriented_unit_cell",
"if",
"slab_l",
".",
"a",
"!=",
"ouc",
".",
"lattice",
".",
"a",
"or",
"slab_l",
".",
"b",
"!=",
"ouc",
".",
"lattice",
".",
"b",
"else",
"ouc",
"return",
"Slab",
"(",
"slab",
".",
"lattice",
",",
"slab",
".",
"species_and_occu",
",",
"slab",
".",
"frac_coords",
",",
"self",
".",
"miller_index",
",",
"ouc",
",",
"shift",
",",
"scale_factor",
",",
"energy",
"=",
"energy",
",",
"site_properties",
"=",
"slab",
".",
"site_properties",
",",
"reorient_lattice",
"=",
"self",
".",
"reorient_lattice",
",",
")"
] |
https://github.com/materialsproject/pymatgen/blob/8128f3062a334a2edd240e4062b5b9bdd1ae6f58/pymatgen/core/surface.py#L931-L1022
|
|
pymedusa/Medusa
|
1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38
|
ext/tvdbapiv2/models/episode_data_query_params.py
|
python
|
EpisodeDataQueryParams.data
|
(self, data)
|
Sets the data of this EpisodeDataQueryParams.
:param data: The data of this EpisodeDataQueryParams.
:type: list[text_type]
|
Sets the data of this EpisodeDataQueryParams.
|
[
"Sets",
"the",
"data",
"of",
"this",
"EpisodeDataQueryParams",
"."
] |
def data(self, data):
"""
Sets the data of this EpisodeDataQueryParams.
:param data: The data of this EpisodeDataQueryParams.
:type: list[text_type]
"""
self._data = data
|
[
"def",
"data",
"(",
"self",
",",
"data",
")",
":",
"self",
".",
"_data",
"=",
"data"
] |
https://github.com/pymedusa/Medusa/blob/1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38/ext/tvdbapiv2/models/episode_data_query_params.py#L64-L72
|
||
sagemath/sage
|
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
|
src/sage/topology/simplicial_complex_morphism.py
|
python
|
SimplicialComplexMorphism.is_injective
|
(self)
|
return True
|
Return ``True`` if and only if ``self`` is injective.
EXAMPLES::
sage: S = simplicial_complexes.Sphere(1)
sage: T = simplicial_complexes.Sphere(2)
sage: U = simplicial_complexes.Sphere(3)
sage: H = Hom(T,S)
sage: G = Hom(T,U)
sage: f = {0:0,1:1,2:0,3:1}
sage: x = H(f)
sage: g = {0:0,1:1,2:2,3:3}
sage: y = G(g)
sage: x.is_injective()
False
sage: y.is_injective()
True
|
Return ``True`` if and only if ``self`` is injective.
|
[
"Return",
"True",
"if",
"and",
"only",
"if",
"self",
"is",
"injective",
"."
] |
def is_injective(self):
"""
Return ``True`` if and only if ``self`` is injective.
EXAMPLES::
sage: S = simplicial_complexes.Sphere(1)
sage: T = simplicial_complexes.Sphere(2)
sage: U = simplicial_complexes.Sphere(3)
sage: H = Hom(T,S)
sage: G = Hom(T,U)
sage: f = {0:0,1:1,2:0,3:1}
sage: x = H(f)
sage: g = {0:0,1:1,2:2,3:3}
sage: y = G(g)
sage: x.is_injective()
False
sage: y.is_injective()
True
"""
v = [self._vertex_dictionary[i[0]] for i in self.domain().faces()[0]]
for i in v:
if v.count(i) > 1:
return False
return True
|
[
"def",
"is_injective",
"(",
"self",
")",
":",
"v",
"=",
"[",
"self",
".",
"_vertex_dictionary",
"[",
"i",
"[",
"0",
"]",
"]",
"for",
"i",
"in",
"self",
".",
"domain",
"(",
")",
".",
"faces",
"(",
")",
"[",
"0",
"]",
"]",
"for",
"i",
"in",
"v",
":",
"if",
"v",
".",
"count",
"(",
"i",
")",
">",
"1",
":",
"return",
"False",
"return",
"True"
] |
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/topology/simplicial_complex_morphism.py#L494-L519
|
|
p5py/p5
|
4ef1580b26179f1973c1669751da4522c5823f17
|
p5/core/api.py
|
python
|
textDescent
|
()
|
return text_descent()
|
Returns descent of the current font at its current size
:returns: descent of the current font at its current size
:rtype: float
|
Returns descent of the current font at its current size
|
[
"Returns",
"descent",
"of",
"the",
"current",
"font",
"at",
"its",
"current",
"size"
] |
def textDescent():
"""Returns descent of the current font at its current size
:returns: descent of the current font at its current size
:rtype: float
"""
return text_descent()
|
[
"def",
"textDescent",
"(",
")",
":",
"return",
"text_descent",
"(",
")"
] |
https://github.com/p5py/p5/blob/4ef1580b26179f1973c1669751da4522c5823f17/p5/core/api.py#L428-L435
|
|
selfteaching/selfteaching-python-camp
|
9982ee964b984595e7d664b07c389cddaf158f1e
|
exercises/1901040031/d07/mymodule/stats_word.py
|
python
|
stats_text
|
(text)
|
合并统计英文词频和中文词频
|
合并统计英文词频和中文词频
|
[
"合并统计英文词频和中文词频"
] |
def stats_text(text):
'''合并统计英文词频和中文词频'''
print(stats_text_en(text)+stats_text_cn(text))
|
[
"def",
"stats_text",
"(",
"text",
")",
":",
"print",
"(",
"stats_text_en",
"(",
"text",
")",
"+",
"stats_text_cn",
"(",
"text",
")",
")"
] |
https://github.com/selfteaching/selfteaching-python-camp/blob/9982ee964b984595e7d664b07c389cddaf158f1e/exercises/1901040031/d07/mymodule/stats_word.py#L61-L63
|
||
omz/PythonistaAppTemplate
|
f560f93f8876d82a21d108977f90583df08d55af
|
PythonistaAppTemplate/PythonistaKit.framework/pylib_ext/sympy/plotting/pygletplot/plot_object.py
|
python
|
PlotObject.draw
|
(self)
|
OpenGL rendering code for the plot object.
Override in base class.
|
OpenGL rendering code for the plot object.
Override in base class.
|
[
"OpenGL",
"rendering",
"code",
"for",
"the",
"plot",
"object",
".",
"Override",
"in",
"base",
"class",
"."
] |
def draw(self):
"""
OpenGL rendering code for the plot object.
Override in base class.
"""
pass
|
[
"def",
"draw",
"(",
"self",
")",
":",
"pass"
] |
https://github.com/omz/PythonistaAppTemplate/blob/f560f93f8876d82a21d108977f90583df08d55af/PythonistaAppTemplate/PythonistaKit.framework/pylib_ext/sympy/plotting/pygletplot/plot_object.py#L14-L19
|
||
ganeti/ganeti
|
d340a9ddd12f501bef57da421b5f9b969a4ba905
|
lib/cmdlib/instance_utils.py
|
python
|
NICListToTuple
|
(lu, nics)
|
return hooks_nics
|
Build a list of nic information tuples.
This list is suitable to be passed to _BuildInstanceHookEnv or as a return
value in LUInstanceQueryData.
@type lu: L{LogicalUnit}
@param lu: the logical unit on whose behalf we execute
@type nics: list of L{objects.NIC}
@param nics: list of nics to convert to hooks tuples
|
Build a list of nic information tuples.
|
[
"Build",
"a",
"list",
"of",
"nic",
"information",
"tuples",
"."
] |
def NICListToTuple(lu, nics):
"""Build a list of nic information tuples.
This list is suitable to be passed to _BuildInstanceHookEnv or as a return
value in LUInstanceQueryData.
@type lu: L{LogicalUnit}
@param lu: the logical unit on whose behalf we execute
@type nics: list of L{objects.NIC}
@param nics: list of nics to convert to hooks tuples
"""
hooks_nics = []
for nic in nics:
hooks_nics.append(NICToTuple(lu, nic))
return hooks_nics
|
[
"def",
"NICListToTuple",
"(",
"lu",
",",
"nics",
")",
":",
"hooks_nics",
"=",
"[",
"]",
"for",
"nic",
"in",
"nics",
":",
"hooks_nics",
".",
"append",
"(",
"NICToTuple",
"(",
"lu",
",",
"nic",
")",
")",
"return",
"hooks_nics"
] |
https://github.com/ganeti/ganeti/blob/d340a9ddd12f501bef57da421b5f9b969a4ba905/lib/cmdlib/instance_utils.py#L407-L422
|
|
pypa/pipenv
|
b21baade71a86ab3ee1429f71fbc14d4f95fb75d
|
pipenv/vendor/importlib_metadata/__init__.py
|
python
|
Distribution.at
|
(path)
|
return PathDistribution(pathlib.Path(path))
|
Return a Distribution for the indicated metadata path
:param path: a string or path-like object
:return: a concrete Distribution instance for the path
|
Return a Distribution for the indicated metadata path
|
[
"Return",
"a",
"Distribution",
"for",
"the",
"indicated",
"metadata",
"path"
] |
def at(path):
"""Return a Distribution for the indicated metadata path
:param path: a string or path-like object
:return: a concrete Distribution instance for the path
"""
return PathDistribution(pathlib.Path(path))
|
[
"def",
"at",
"(",
"path",
")",
":",
"return",
"PathDistribution",
"(",
"pathlib",
".",
"Path",
"(",
"path",
")",
")"
] |
https://github.com/pypa/pipenv/blob/b21baade71a86ab3ee1429f71fbc14d4f95fb75d/pipenv/vendor/importlib_metadata/__init__.py#L555-L561
|
|
django/django
|
0a17666045de6739ae1c2ac695041823d5f827f7
|
django/core/mail/message.py
|
python
|
EmailMessage.attach
|
(self, filename=None, content=None, mimetype=None)
|
Attach a file with the given filename and content. The filename can
be omitted and the mimetype is guessed, if not provided.
If the first parameter is a MIMEBase subclass, insert it directly
into the resulting message attachments.
For a text/* mimetype (guessed or specified), when a bytes object is
specified as content, decode it as UTF-8. If that fails, set the
mimetype to DEFAULT_ATTACHMENT_MIME_TYPE and don't decode the content.
|
Attach a file with the given filename and content. The filename can
be omitted and the mimetype is guessed, if not provided.
|
[
"Attach",
"a",
"file",
"with",
"the",
"given",
"filename",
"and",
"content",
".",
"The",
"filename",
"can",
"be",
"omitted",
"and",
"the",
"mimetype",
"is",
"guessed",
"if",
"not",
"provided",
"."
] |
def attach(self, filename=None, content=None, mimetype=None):
"""
Attach a file with the given filename and content. The filename can
be omitted and the mimetype is guessed, if not provided.
If the first parameter is a MIMEBase subclass, insert it directly
into the resulting message attachments.
For a text/* mimetype (guessed or specified), when a bytes object is
specified as content, decode it as UTF-8. If that fails, set the
mimetype to DEFAULT_ATTACHMENT_MIME_TYPE and don't decode the content.
"""
if isinstance(filename, MIMEBase):
if content is not None or mimetype is not None:
raise ValueError(
'content and mimetype must not be given when a MIMEBase '
'instance is provided.'
)
self.attachments.append(filename)
elif content is None:
raise ValueError('content must be provided.')
else:
mimetype = mimetype or mimetypes.guess_type(filename)[0] or DEFAULT_ATTACHMENT_MIME_TYPE
basetype, subtype = mimetype.split('/', 1)
if basetype == 'text':
if isinstance(content, bytes):
try:
content = content.decode()
except UnicodeDecodeError:
# If mimetype suggests the file is text but it's
# actually binary, read() raises a UnicodeDecodeError.
mimetype = DEFAULT_ATTACHMENT_MIME_TYPE
self.attachments.append((filename, content, mimetype))
|
[
"def",
"attach",
"(",
"self",
",",
"filename",
"=",
"None",
",",
"content",
"=",
"None",
",",
"mimetype",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"filename",
",",
"MIMEBase",
")",
":",
"if",
"content",
"is",
"not",
"None",
"or",
"mimetype",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"'content and mimetype must not be given when a MIMEBase '",
"'instance is provided.'",
")",
"self",
".",
"attachments",
".",
"append",
"(",
"filename",
")",
"elif",
"content",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'content must be provided.'",
")",
"else",
":",
"mimetype",
"=",
"mimetype",
"or",
"mimetypes",
".",
"guess_type",
"(",
"filename",
")",
"[",
"0",
"]",
"or",
"DEFAULT_ATTACHMENT_MIME_TYPE",
"basetype",
",",
"subtype",
"=",
"mimetype",
".",
"split",
"(",
"'/'",
",",
"1",
")",
"if",
"basetype",
"==",
"'text'",
":",
"if",
"isinstance",
"(",
"content",
",",
"bytes",
")",
":",
"try",
":",
"content",
"=",
"content",
".",
"decode",
"(",
")",
"except",
"UnicodeDecodeError",
":",
"# If mimetype suggests the file is text but it's",
"# actually binary, read() raises a UnicodeDecodeError.",
"mimetype",
"=",
"DEFAULT_ATTACHMENT_MIME_TYPE",
"self",
".",
"attachments",
".",
"append",
"(",
"(",
"filename",
",",
"content",
",",
"mimetype",
")",
")"
] |
https://github.com/django/django/blob/0a17666045de6739ae1c2ac695041823d5f827f7/django/core/mail/message.py#L286-L320
|
||
home-assistant/supervisor
|
69c2517d5211b483fdfe968b0a2b36b672ee7ab2
|
supervisor/services/modules/mysql.py
|
python
|
MySQLService.slug
|
(self)
|
return SERVICE_MYSQL
|
Return slug of this service.
|
Return slug of this service.
|
[
"Return",
"slug",
"of",
"this",
"service",
"."
] |
def slug(self) -> str:
"""Return slug of this service."""
return SERVICE_MYSQL
|
[
"def",
"slug",
"(",
"self",
")",
"->",
"str",
":",
"return",
"SERVICE_MYSQL"
] |
https://github.com/home-assistant/supervisor/blob/69c2517d5211b483fdfe968b0a2b36b672ee7ab2/supervisor/services/modules/mysql.py#L41-L43
|
|
spl0k/supysonic
|
62bad3b9878a1d22cf040f25dab0fa28a252ba38
|
supysonic/frontend/user.py
|
python
|
change_password_post
|
(uid, user)
|
return change_password_form(uid, user)
|
[] |
def change_password_post(uid, user):
error = False
if user.id == request.user.id:
current = request.form.get("current")
if not current:
flash("The current password is required")
error = True
new, confirm = map(request.form.get, ("new", "confirm"))
if not new:
flash("The new password is required")
error = True
if new != confirm:
flash("The new password and its confirmation don't match")
error = True
if not error:
try:
if user.id == request.user.id:
UserManager.change_password(user.id, current, new)
else:
UserManager.change_password2(user.name, new)
flash("Password changed")
return redirect(url_for("frontend.user_profile", uid=uid))
except ValueError as e:
flash(str(e), "error")
return change_password_form(uid, user)
|
[
"def",
"change_password_post",
"(",
"uid",
",",
"user",
")",
":",
"error",
"=",
"False",
"if",
"user",
".",
"id",
"==",
"request",
".",
"user",
".",
"id",
":",
"current",
"=",
"request",
".",
"form",
".",
"get",
"(",
"\"current\"",
")",
"if",
"not",
"current",
":",
"flash",
"(",
"\"The current password is required\"",
")",
"error",
"=",
"True",
"new",
",",
"confirm",
"=",
"map",
"(",
"request",
".",
"form",
".",
"get",
",",
"(",
"\"new\"",
",",
"\"confirm\"",
")",
")",
"if",
"not",
"new",
":",
"flash",
"(",
"\"The new password is required\"",
")",
"error",
"=",
"True",
"if",
"new",
"!=",
"confirm",
":",
"flash",
"(",
"\"The new password and its confirmation don't match\"",
")",
"error",
"=",
"True",
"if",
"not",
"error",
":",
"try",
":",
"if",
"user",
".",
"id",
"==",
"request",
".",
"user",
".",
"id",
":",
"UserManager",
".",
"change_password",
"(",
"user",
".",
"id",
",",
"current",
",",
"new",
")",
"else",
":",
"UserManager",
".",
"change_password2",
"(",
"user",
".",
"name",
",",
"new",
")",
"flash",
"(",
"\"Password changed\"",
")",
"return",
"redirect",
"(",
"url_for",
"(",
"\"frontend.user_profile\"",
",",
"uid",
"=",
"uid",
")",
")",
"except",
"ValueError",
"as",
"e",
":",
"flash",
"(",
"str",
"(",
"e",
")",
",",
"\"error\"",
")",
"return",
"change_password_form",
"(",
"uid",
",",
"user",
")"
] |
https://github.com/spl0k/supysonic/blob/62bad3b9878a1d22cf040f25dab0fa28a252ba38/supysonic/frontend/user.py#L190-L219
|
|||
log2timeline/plaso
|
fe2e316b8c76a0141760c0f2f181d84acb83abc2
|
plaso/parsers/presets.py
|
python
|
ParserPresetsManager._ReadOperatingSystemArtifactValues
|
(self, operating_system_values)
|
return artifacts.OperatingSystemArtifact(
family=family, product=product, version=version)
|
Reads an operating system artifact from a dictionary.
Args:
operating_system_values (dict[str, object]): operating system values.
Returns:
OperatingSystemArtifact: an operating system artifact attribute container.
Raises:
MalformedPresetError: if the format of the operating system values are
not set or incorrect.
|
Reads an operating system artifact from a dictionary.
|
[
"Reads",
"an",
"operating",
"system",
"artifact",
"from",
"a",
"dictionary",
"."
] |
def _ReadOperatingSystemArtifactValues(self, operating_system_values):
"""Reads an operating system artifact from a dictionary.
Args:
operating_system_values (dict[str, object]): operating system values.
Returns:
OperatingSystemArtifact: an operating system artifact attribute container.
Raises:
MalformedPresetError: if the format of the operating system values are
not set or incorrect.
"""
if not operating_system_values:
raise errors.MalformedPresetError('Missing operating system values.')
family = operating_system_values.get('family', None)
product = operating_system_values.get('product', None)
version = operating_system_values.get('version', None)
if not family and not product:
raise errors.MalformedPresetError(
'Invalid operating system missing family and product.')
return artifacts.OperatingSystemArtifact(
family=family, product=product, version=version)
|
[
"def",
"_ReadOperatingSystemArtifactValues",
"(",
"self",
",",
"operating_system_values",
")",
":",
"if",
"not",
"operating_system_values",
":",
"raise",
"errors",
".",
"MalformedPresetError",
"(",
"'Missing operating system values.'",
")",
"family",
"=",
"operating_system_values",
".",
"get",
"(",
"'family'",
",",
"None",
")",
"product",
"=",
"operating_system_values",
".",
"get",
"(",
"'product'",
",",
"None",
")",
"version",
"=",
"operating_system_values",
".",
"get",
"(",
"'version'",
",",
"None",
")",
"if",
"not",
"family",
"and",
"not",
"product",
":",
"raise",
"errors",
".",
"MalformedPresetError",
"(",
"'Invalid operating system missing family and product.'",
")",
"return",
"artifacts",
".",
"OperatingSystemArtifact",
"(",
"family",
"=",
"family",
",",
"product",
"=",
"product",
",",
"version",
"=",
"version",
")"
] |
https://github.com/log2timeline/plaso/blob/fe2e316b8c76a0141760c0f2f181d84acb83abc2/plaso/parsers/presets.py#L42-L67
|
|
ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework
|
cb692f527e4e819b6c228187c5702d990a180043
|
bin/x86/Debug/scripting_engine/Lib/decimal.py
|
python
|
Decimal._round
|
(self, places, rounding)
|
return ans
|
Round a nonzero, nonspecial Decimal to a fixed number of
significant figures, using the given rounding mode.
Infinities, NaNs and zeros are returned unaltered.
This operation is quiet: it raises no flags, and uses no
information from the context.
|
Round a nonzero, nonspecial Decimal to a fixed number of
significant figures, using the given rounding mode.
|
[
"Round",
"a",
"nonzero",
"nonspecial",
"Decimal",
"to",
"a",
"fixed",
"number",
"of",
"significant",
"figures",
"using",
"the",
"given",
"rounding",
"mode",
"."
] |
def _round(self, places, rounding):
"""Round a nonzero, nonspecial Decimal to a fixed number of
significant figures, using the given rounding mode.
Infinities, NaNs and zeros are returned unaltered.
This operation is quiet: it raises no flags, and uses no
information from the context.
"""
if places <= 0:
raise ValueError("argument should be at least 1 in _round")
if self._is_special or not self:
return Decimal(self)
ans = self._rescale(self.adjusted()+1-places, rounding)
# it can happen that the rescale alters the adjusted exponent;
# for example when rounding 99.97 to 3 significant figures.
# When this happens we end up with an extra 0 at the end of
# the number; a second rescale fixes this.
if ans.adjusted() != self.adjusted():
ans = ans._rescale(ans.adjusted()+1-places, rounding)
return ans
|
[
"def",
"_round",
"(",
"self",
",",
"places",
",",
"rounding",
")",
":",
"if",
"places",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"argument should be at least 1 in _round\"",
")",
"if",
"self",
".",
"_is_special",
"or",
"not",
"self",
":",
"return",
"Decimal",
"(",
"self",
")",
"ans",
"=",
"self",
".",
"_rescale",
"(",
"self",
".",
"adjusted",
"(",
")",
"+",
"1",
"-",
"places",
",",
"rounding",
")",
"# it can happen that the rescale alters the adjusted exponent;",
"# for example when rounding 99.97 to 3 significant figures.",
"# When this happens we end up with an extra 0 at the end of",
"# the number; a second rescale fixes this.",
"if",
"ans",
".",
"adjusted",
"(",
")",
"!=",
"self",
".",
"adjusted",
"(",
")",
":",
"ans",
"=",
"ans",
".",
"_rescale",
"(",
"ans",
".",
"adjusted",
"(",
")",
"+",
"1",
"-",
"places",
",",
"rounding",
")",
"return",
"ans"
] |
https://github.com/ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework/blob/cb692f527e4e819b6c228187c5702d990a180043/bin/x86/Debug/scripting_engine/Lib/decimal.py#L2520-L2541
|
|
WPO-Foundation/wptagent
|
94470f007294213f900dcd9a207678b5b9fce5d3
|
internal/chrome_android.py
|
python
|
ChromeAndroid.run_task
|
(self, task)
|
Run an individual test
|
Run an individual test
|
[
"Run",
"an",
"individual",
"test"
] |
def run_task(self, task):
"""Run an individual test"""
if self.connected:
DevtoolsBrowser.run_task(self, task)
|
[
"def",
"run_task",
"(",
"self",
",",
"task",
")",
":",
"if",
"self",
".",
"connected",
":",
"DevtoolsBrowser",
".",
"run_task",
"(",
"self",
",",
"task",
")"
] |
https://github.com/WPO-Foundation/wptagent/blob/94470f007294213f900dcd9a207678b5b9fce5d3/internal/chrome_android.py#L290-L293
|
||
tendenci/tendenci
|
0f2c348cc0e7d41bc56f50b00ce05544b083bf1d
|
tendenci/apps/corporate_memberships/admin.py
|
python
|
CorporateMembershipTypeAdmin.reps_groups
|
(self, instance)
|
return reps_groups_links
|
[] |
def reps_groups(self, instance):
reps_groups_links = ''
if instance.pending_group:
reps_groups_links = '<a href="%s">%s</a>' % (
reverse('group.detail',
args=[instance.pending_group.slug]),
_('Pending'))
if instance.active_group:
if reps_groups_links:
reps_groups_links += '<br />'
reps_groups_links += '<a href="%s">%s</a>' % (
reverse('group.detail',
args=[instance.active_group.slug]),
_('Active'))
return reps_groups_links
|
[
"def",
"reps_groups",
"(",
"self",
",",
"instance",
")",
":",
"reps_groups_links",
"=",
"''",
"if",
"instance",
".",
"pending_group",
":",
"reps_groups_links",
"=",
"'<a href=\"%s\">%s</a>'",
"%",
"(",
"reverse",
"(",
"'group.detail'",
",",
"args",
"=",
"[",
"instance",
".",
"pending_group",
".",
"slug",
"]",
")",
",",
"_",
"(",
"'Pending'",
")",
")",
"if",
"instance",
".",
"active_group",
":",
"if",
"reps_groups_links",
":",
"reps_groups_links",
"+=",
"'<br />'",
"reps_groups_links",
"+=",
"'<a href=\"%s\">%s</a>'",
"%",
"(",
"reverse",
"(",
"'group.detail'",
",",
"args",
"=",
"[",
"instance",
".",
"active_group",
".",
"slug",
"]",
")",
",",
"_",
"(",
"'Active'",
")",
")",
"return",
"reps_groups_links"
] |
https://github.com/tendenci/tendenci/blob/0f2c348cc0e7d41bc56f50b00ce05544b083bf1d/tendenci/apps/corporate_memberships/admin.py#L80-L94
|
|||
AutodeskRoboticsLab/Mimic
|
85447f0d346be66988303a6a054473d92f1ed6f4
|
mimic/scripts/extern/pyqtgraph_0_11_0_dev0/pyqtgraph/graphicsItems/InfiniteLine.py
|
python
|
InfiniteLine.setMovable
|
(self, m)
|
Set whether the line is movable by the user.
|
Set whether the line is movable by the user.
|
[
"Set",
"whether",
"the",
"line",
"is",
"movable",
"by",
"the",
"user",
"."
] |
def setMovable(self, m):
"""Set whether the line is movable by the user."""
self.movable = m
self.setAcceptHoverEvents(m)
|
[
"def",
"setMovable",
"(",
"self",
",",
"m",
")",
":",
"self",
".",
"movable",
"=",
"m",
"self",
".",
"setAcceptHoverEvents",
"(",
"m",
")"
] |
https://github.com/AutodeskRoboticsLab/Mimic/blob/85447f0d346be66988303a6a054473d92f1ed6f4/mimic/scripts/extern/pyqtgraph_0_11_0_dev0/pyqtgraph/graphicsItems/InfiniteLine.py#L113-L116
|
||
justinsalamon/scaper
|
7fc9c2be303715fdd56f222753c8b2ce23e58efb
|
scaper/core.py
|
python
|
_validate_distribution
|
(dist_tuple)
|
Check whether a tuple specifying a parameter distribution has a valid
format, if not raise an error.
Parameters
----------
dist_tuple : tuple
Tuple specifying a distribution to sample from. See Scaper.add_event
for details about the expected format of the tuple and allowed values.
Raises
------
ScaperError
If the tuple does not have a valid format.
See Also
--------
Scaper.add_event : Add a foreground sound event to the foreground
specification.
|
Check whether a tuple specifying a parameter distribution has a valid
format, if not raise an error.
|
[
"Check",
"whether",
"a",
"tuple",
"specifying",
"a",
"parameter",
"distribution",
"has",
"a",
"valid",
"format",
"if",
"not",
"raise",
"an",
"error",
"."
] |
def _validate_distribution(dist_tuple):
'''
Check whether a tuple specifying a parameter distribution has a valid
format, if not raise an error.
Parameters
----------
dist_tuple : tuple
Tuple specifying a distribution to sample from. See Scaper.add_event
for details about the expected format of the tuple and allowed values.
Raises
------
ScaperError
If the tuple does not have a valid format.
See Also
--------
Scaper.add_event : Add a foreground sound event to the foreground
specification.
'''
# Make sure it's a tuple
if not isinstance(dist_tuple, tuple):
raise ScaperError('Distribution tuple must be of type tuple.')
# Make sure the tuple contains at least 2 items
if len(dist_tuple) < 2:
raise ScaperError('Distribution tuple must be at least of length 2.')
# Make sure the first item is one of the supported distribution names
if dist_tuple[0] not in SUPPORTED_DIST.keys():
raise ScaperError(
"Unsupported distribution name: {:s}".format(dist_tuple[0]))
# If it's a constant distribution, tuple must be of length 2
if dist_tuple[0] == 'const':
if len(dist_tuple) != 2:
raise ScaperError('"const" distribution tuple must be of length 2')
# If it's a choose, tuple must be of length 2 and second item of type list
elif dist_tuple[0] == 'choose':
if len(dist_tuple) != 2 or not isinstance(dist_tuple[1], list):
raise ScaperError(
'The "choose" distribution tuple must be of length 2 where '
'the second item is a list.')
# If it's a choose_weighted, tuple must be of length 3, items 2 and 3 must
# be lists of the same length, and the list in item 3 must contain floats
# in the range [0, 1] that sum to 1 (i.e. valid probabilities).
elif dist_tuple[0] == 'choose_weighted':
if len(dist_tuple) != 3:
raise ScaperError('"choose_weighted" distribution tuple must have length 3')
if not isinstance(dist_tuple[1], list) or \
not isinstance(dist_tuple[2], list) or \
len(dist_tuple[1]) != len(dist_tuple[2]):
msg = ('The 2nd and 3rd items of the "choose_weighted" distribution tuple '
'must be lists of the same length.')
raise ScaperError(msg)
probabilities = np.asarray(dist_tuple[2])
if probabilities.min() < 0 or probabilities.max() > 1:
msg = ('Values in the probabilities list of the "choose_weighted" '
'distribution tuple must be in the range [0, 1].')
raise ScaperError(msg)
if not np.allclose(probabilities.sum(), 1):
msg = ('Values in the probabilities list of the "choose_weighted" '
'distribution tuple must sum to 1.')
raise ScaperError(msg)
# If it's a uniform distribution, tuple must be of length 3, 2nd item must
# be a real number and 3rd item must be real and greater/equal to the 2nd.
elif dist_tuple[0] == 'uniform':
if (len(dist_tuple) != 3 or
not is_real_number(dist_tuple[1]) or
not is_real_number(dist_tuple[2]) or
dist_tuple[1] > dist_tuple[2]):
raise ScaperError(
'The "uniform" distribution tuple be of length 2, where the '
'2nd item is a real number and the 3rd item is a real number '
'and greater/equal to the 2nd item.')
# If it's a normal distribution, tuple must be of length 3, 2nd item must
# be a real number and 3rd item must be a non-negative real
elif dist_tuple[0] == 'normal':
if (len(dist_tuple) != 3 or
not is_real_number(dist_tuple[1]) or
not is_real_number(dist_tuple[2]) or
dist_tuple[2] < 0):
raise ScaperError(
'The "normal" distribution tuple must be of length 3, where '
'the 2nd item (mean) is a real number and the 3rd item (std '
'dev) is real and non-negative.')
elif dist_tuple[0] == 'truncnorm':
if (len(dist_tuple) != 5 or
not is_real_number(dist_tuple[1]) or
not is_real_number(dist_tuple[2]) or
not is_real_number(dist_tuple[3]) or
not is_real_number(dist_tuple[4]) or
dist_tuple[2] < 0 or
dist_tuple[4] < dist_tuple[3]):
raise ScaperError(
'The "truncnorm" distribution tuple must be of length 5, '
'where the 2nd item (mean) is a real number, the 3rd item '
'(std dev) is real and non-negative, the 4th item (trunc_min) '
'is a real number and the 5th item (trun_max) is a real '
'number that is equal to or greater than trunc_min.')
|
[
"def",
"_validate_distribution",
"(",
"dist_tuple",
")",
":",
"# Make sure it's a tuple",
"if",
"not",
"isinstance",
"(",
"dist_tuple",
",",
"tuple",
")",
":",
"raise",
"ScaperError",
"(",
"'Distribution tuple must be of type tuple.'",
")",
"# Make sure the tuple contains at least 2 items",
"if",
"len",
"(",
"dist_tuple",
")",
"<",
"2",
":",
"raise",
"ScaperError",
"(",
"'Distribution tuple must be at least of length 2.'",
")",
"# Make sure the first item is one of the supported distribution names",
"if",
"dist_tuple",
"[",
"0",
"]",
"not",
"in",
"SUPPORTED_DIST",
".",
"keys",
"(",
")",
":",
"raise",
"ScaperError",
"(",
"\"Unsupported distribution name: {:s}\"",
".",
"format",
"(",
"dist_tuple",
"[",
"0",
"]",
")",
")",
"# If it's a constant distribution, tuple must be of length 2",
"if",
"dist_tuple",
"[",
"0",
"]",
"==",
"'const'",
":",
"if",
"len",
"(",
"dist_tuple",
")",
"!=",
"2",
":",
"raise",
"ScaperError",
"(",
"'\"const\" distribution tuple must be of length 2'",
")",
"# If it's a choose, tuple must be of length 2 and second item of type list",
"elif",
"dist_tuple",
"[",
"0",
"]",
"==",
"'choose'",
":",
"if",
"len",
"(",
"dist_tuple",
")",
"!=",
"2",
"or",
"not",
"isinstance",
"(",
"dist_tuple",
"[",
"1",
"]",
",",
"list",
")",
":",
"raise",
"ScaperError",
"(",
"'The \"choose\" distribution tuple must be of length 2 where '",
"'the second item is a list.'",
")",
"# If it's a choose_weighted, tuple must be of length 3, items 2 and 3 must",
"# be lists of the same length, and the list in item 3 must contain floats ",
"# in the range [0, 1] that sum to 1 (i.e. valid probabilities).",
"elif",
"dist_tuple",
"[",
"0",
"]",
"==",
"'choose_weighted'",
":",
"if",
"len",
"(",
"dist_tuple",
")",
"!=",
"3",
":",
"raise",
"ScaperError",
"(",
"'\"choose_weighted\" distribution tuple must have length 3'",
")",
"if",
"not",
"isinstance",
"(",
"dist_tuple",
"[",
"1",
"]",
",",
"list",
")",
"or",
"not",
"isinstance",
"(",
"dist_tuple",
"[",
"2",
"]",
",",
"list",
")",
"or",
"len",
"(",
"dist_tuple",
"[",
"1",
"]",
")",
"!=",
"len",
"(",
"dist_tuple",
"[",
"2",
"]",
")",
":",
"msg",
"=",
"(",
"'The 2nd and 3rd items of the \"choose_weighted\" distribution tuple '",
"'must be lists of the same length.'",
")",
"raise",
"ScaperError",
"(",
"msg",
")",
"probabilities",
"=",
"np",
".",
"asarray",
"(",
"dist_tuple",
"[",
"2",
"]",
")",
"if",
"probabilities",
".",
"min",
"(",
")",
"<",
"0",
"or",
"probabilities",
".",
"max",
"(",
")",
">",
"1",
":",
"msg",
"=",
"(",
"'Values in the probabilities list of the \"choose_weighted\" '",
"'distribution tuple must be in the range [0, 1].'",
")",
"raise",
"ScaperError",
"(",
"msg",
")",
"if",
"not",
"np",
".",
"allclose",
"(",
"probabilities",
".",
"sum",
"(",
")",
",",
"1",
")",
":",
"msg",
"=",
"(",
"'Values in the probabilities list of the \"choose_weighted\" '",
"'distribution tuple must sum to 1.'",
")",
"raise",
"ScaperError",
"(",
"msg",
")",
"# If it's a uniform distribution, tuple must be of length 3, 2nd item must",
"# be a real number and 3rd item must be real and greater/equal to the 2nd.",
"elif",
"dist_tuple",
"[",
"0",
"]",
"==",
"'uniform'",
":",
"if",
"(",
"len",
"(",
"dist_tuple",
")",
"!=",
"3",
"or",
"not",
"is_real_number",
"(",
"dist_tuple",
"[",
"1",
"]",
")",
"or",
"not",
"is_real_number",
"(",
"dist_tuple",
"[",
"2",
"]",
")",
"or",
"dist_tuple",
"[",
"1",
"]",
">",
"dist_tuple",
"[",
"2",
"]",
")",
":",
"raise",
"ScaperError",
"(",
"'The \"uniform\" distribution tuple be of length 2, where the '",
"'2nd item is a real number and the 3rd item is a real number '",
"'and greater/equal to the 2nd item.'",
")",
"# If it's a normal distribution, tuple must be of length 3, 2nd item must",
"# be a real number and 3rd item must be a non-negative real",
"elif",
"dist_tuple",
"[",
"0",
"]",
"==",
"'normal'",
":",
"if",
"(",
"len",
"(",
"dist_tuple",
")",
"!=",
"3",
"or",
"not",
"is_real_number",
"(",
"dist_tuple",
"[",
"1",
"]",
")",
"or",
"not",
"is_real_number",
"(",
"dist_tuple",
"[",
"2",
"]",
")",
"or",
"dist_tuple",
"[",
"2",
"]",
"<",
"0",
")",
":",
"raise",
"ScaperError",
"(",
"'The \"normal\" distribution tuple must be of length 3, where '",
"'the 2nd item (mean) is a real number and the 3rd item (std '",
"'dev) is real and non-negative.'",
")",
"elif",
"dist_tuple",
"[",
"0",
"]",
"==",
"'truncnorm'",
":",
"if",
"(",
"len",
"(",
"dist_tuple",
")",
"!=",
"5",
"or",
"not",
"is_real_number",
"(",
"dist_tuple",
"[",
"1",
"]",
")",
"or",
"not",
"is_real_number",
"(",
"dist_tuple",
"[",
"2",
"]",
")",
"or",
"not",
"is_real_number",
"(",
"dist_tuple",
"[",
"3",
"]",
")",
"or",
"not",
"is_real_number",
"(",
"dist_tuple",
"[",
"4",
"]",
")",
"or",
"dist_tuple",
"[",
"2",
"]",
"<",
"0",
"or",
"dist_tuple",
"[",
"4",
"]",
"<",
"dist_tuple",
"[",
"3",
"]",
")",
":",
"raise",
"ScaperError",
"(",
"'The \"truncnorm\" distribution tuple must be of length 5, '",
"'where the 2nd item (mean) is a real number, the 3rd item '",
"'(std dev) is real and non-negative, the 4th item (trunc_min) '",
"'is a real number and the 5th item (trun_max) is a real '",
"'number that is equal to or greater than trunc_min.'",
")"
] |
https://github.com/justinsalamon/scaper/blob/7fc9c2be303715fdd56f222753c8b2ce23e58efb/scaper/core.py#L427-L527
|
||
khalim19/gimp-plugin-export-layers
|
b37255f2957ad322f4d332689052351cdea6e563
|
export_layers/pygimplib/setting/presenter.py
|
python
|
NullPresenter.__init__
|
(self, setting, element, *args, **kwargs)
|
`element` is ignored - its attributes are not read or set.
|
`element` is ignored - its attributes are not read or set.
|
[
"element",
"is",
"ignored",
"-",
"its",
"attributes",
"are",
"not",
"read",
"or",
"set",
"."
] |
def __init__(self, setting, element, *args, **kwargs):
"""
`element` is ignored - its attributes are not read or set.
"""
self._value = None
self._sensitive = True
self._visible = True
super().__init__(setting, self._NULL_GUI_ELEMENT, *args, **kwargs)
|
[
"def",
"__init__",
"(",
"self",
",",
"setting",
",",
"element",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_value",
"=",
"None",
"self",
".",
"_sensitive",
"=",
"True",
"self",
".",
"_visible",
"=",
"True",
"super",
"(",
")",
".",
"__init__",
"(",
"setting",
",",
"self",
".",
"_NULL_GUI_ELEMENT",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/khalim19/gimp-plugin-export-layers/blob/b37255f2957ad322f4d332689052351cdea6e563/export_layers/pygimplib/setting/presenter.py#L301-L309
|
||
shiweibsw/Translation-Tools
|
2fbbf902364e557fa7017f9a74a8797b7440c077
|
venv/Lib/site-packages/pip-9.0.3-py3.6.egg/pip/_vendor/urllib3/_collections.py
|
python
|
HTTPHeaderDict.__iter__
|
(self)
|
[] |
def __iter__(self):
# Only provide the originally cased names
for vals in self._container.values():
yield vals[0]
|
[
"def",
"__iter__",
"(",
"self",
")",
":",
"# Only provide the originally cased names",
"for",
"vals",
"in",
"self",
".",
"_container",
".",
"values",
"(",
")",
":",
"yield",
"vals",
"[",
"0",
"]"
] |
https://github.com/shiweibsw/Translation-Tools/blob/2fbbf902364e557fa7017f9a74a8797b7440c077/venv/Lib/site-packages/pip-9.0.3-py3.6.egg/pip/_vendor/urllib3/_collections.py#L180-L183
|
||||
frobelbest/BANet
|
4015642c9dfe8287c5f146d7a90df594625f2560
|
legacy/deeptam/python/deeptam_tracker/utils/vis_utils.py
|
python
|
convert_array_to_colorimg
|
(inp)
|
return Image.fromarray(image_arr)
|
Returns the img as PIL images
|
Returns the img as PIL images
|
[
"Returns",
"the",
"img",
"as",
"PIL",
"images"
] |
def convert_array_to_colorimg(inp):
"""Returns the img as PIL images"""
image_arr = inp.copy()
if image_arr.dtype == np.float32:
image_arr += 0.5
image_arr *= 255
image_arr = image_arr.astype(np.uint8)
image_arr = image_arr[0:3,:,:]
image_arr = np.rollaxis(image_arr,0,3)
return Image.fromarray(image_arr)
|
[
"def",
"convert_array_to_colorimg",
"(",
"inp",
")",
":",
"image_arr",
"=",
"inp",
".",
"copy",
"(",
")",
"if",
"image_arr",
".",
"dtype",
"==",
"np",
".",
"float32",
":",
"image_arr",
"+=",
"0.5",
"image_arr",
"*=",
"255",
"image_arr",
"=",
"image_arr",
".",
"astype",
"(",
"np",
".",
"uint8",
")",
"image_arr",
"=",
"image_arr",
"[",
"0",
":",
"3",
",",
":",
",",
":",
"]",
"image_arr",
"=",
"np",
".",
"rollaxis",
"(",
"image_arr",
",",
"0",
",",
"3",
")",
"return",
"Image",
".",
"fromarray",
"(",
"image_arr",
")"
] |
https://github.com/frobelbest/BANet/blob/4015642c9dfe8287c5f146d7a90df594625f2560/legacy/deeptam/python/deeptam_tracker/utils/vis_utils.py#L6-L15
|
|
trakt/Plex-Trakt-Scrobbler
|
aeb0bfbe62fad4b06c164f1b95581da7f35dce0b
|
Trakttv.bundle/Contents/Libraries/Shared/elftools/dwarf/dwarfinfo.py
|
python
|
DWARFInfo.range_lists
|
(self)
|
Get a RangeLists object representing the .debug_ranges section of
the DWARF data, or None if this section doesn't exist.
|
Get a RangeLists object representing the .debug_ranges section of
the DWARF data, or None if this section doesn't exist.
|
[
"Get",
"a",
"RangeLists",
"object",
"representing",
"the",
".",
"debug_ranges",
"section",
"of",
"the",
"DWARF",
"data",
"or",
"None",
"if",
"this",
"section",
"doesn",
"t",
"exist",
"."
] |
def range_lists(self):
""" Get a RangeLists object representing the .debug_ranges section of
the DWARF data, or None if this section doesn't exist.
"""
if self.debug_ranges_sec:
return RangeLists(self.debug_ranges_sec.stream, self.structs)
else:
return None
|
[
"def",
"range_lists",
"(",
"self",
")",
":",
"if",
"self",
".",
"debug_ranges_sec",
":",
"return",
"RangeLists",
"(",
"self",
".",
"debug_ranges_sec",
".",
"stream",
",",
"self",
".",
"structs",
")",
"else",
":",
"return",
"None"
] |
https://github.com/trakt/Plex-Trakt-Scrobbler/blob/aeb0bfbe62fad4b06c164f1b95581da7f35dce0b/Trakttv.bundle/Contents/Libraries/Shared/elftools/dwarf/dwarfinfo.py#L180-L187
|
||
BradNeuberg/cloudless
|
052d16e314a24b3ff36d9da94f2e9e53e0e1e0e0
|
src/cloudless/train/prepare_data.py
|
python
|
_load_numpy_image
|
(image_path, width, height)
|
return data
|
Turns one of our testing image paths into an actual image, converted into a numpy array.
|
Turns one of our testing image paths into an actual image, converted into a numpy array.
|
[
"Turns",
"one",
"of",
"our",
"testing",
"image",
"paths",
"into",
"an",
"actual",
"image",
"converted",
"into",
"a",
"numpy",
"array",
"."
] |
def _load_numpy_image(image_path, width, height):
"""
Turns one of our testing image paths into an actual image, converted into a numpy array.
"""
im = Image.open(image_path)
# Scale the image to the size required by our neural network.
im = im.resize((width, height))
data = np.asarray(im)
data = np.reshape(data, (3, height, width))
return data
|
[
"def",
"_load_numpy_image",
"(",
"image_path",
",",
"width",
",",
"height",
")",
":",
"im",
"=",
"Image",
".",
"open",
"(",
"image_path",
")",
"# Scale the image to the size required by our neural network.",
"im",
"=",
"im",
".",
"resize",
"(",
"(",
"width",
",",
"height",
")",
")",
"data",
"=",
"np",
".",
"asarray",
"(",
"im",
")",
"data",
"=",
"np",
".",
"reshape",
"(",
"data",
",",
"(",
"3",
",",
"height",
",",
"width",
")",
")",
"return",
"data"
] |
https://github.com/BradNeuberg/cloudless/blob/052d16e314a24b3ff36d9da94f2e9e53e0e1e0e0/src/cloudless/train/prepare_data.py#L446-L455
|
|
IronLanguages/main
|
a949455434b1fda8c783289e897e78a9a0caabb5
|
External.LCA_RESTRICTED/Languages/IronPython/27/Doc/jinja2/utils.py
|
python
|
LRUCache.items
|
(self)
|
return result
|
Return a list of items.
|
Return a list of items.
|
[
"Return",
"a",
"list",
"of",
"items",
"."
] |
def items(self):
"""Return a list of items."""
result = [(key, self._mapping[key]) for key in list(self._queue)]
result.reverse()
return result
|
[
"def",
"items",
"(",
"self",
")",
":",
"result",
"=",
"[",
"(",
"key",
",",
"self",
".",
"_mapping",
"[",
"key",
"]",
")",
"for",
"key",
"in",
"list",
"(",
"self",
".",
"_queue",
")",
"]",
"result",
".",
"reverse",
"(",
")",
"return",
"result"
] |
https://github.com/IronLanguages/main/blob/a949455434b1fda8c783289e897e78a9a0caabb5/External.LCA_RESTRICTED/Languages/IronPython/27/Doc/jinja2/utils.py#L654-L658
|
|
linxid/Machine_Learning_Study_Path
|
558e82d13237114bbb8152483977806fc0c222af
|
Machine Learning In Action/Chapter5-LogisticRegression/venv/Lib/encodings/cp855.py
|
python
|
IncrementalDecoder.decode
|
(self, input, final=False)
|
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
|
[] |
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
|
[
"def",
"decode",
"(",
"self",
",",
"input",
",",
"final",
"=",
"False",
")",
":",
"return",
"codecs",
".",
"charmap_decode",
"(",
"input",
",",
"self",
".",
"errors",
",",
"decoding_table",
")",
"[",
"0",
"]"
] |
https://github.com/linxid/Machine_Learning_Study_Path/blob/558e82d13237114bbb8152483977806fc0c222af/Machine Learning In Action/Chapter5-LogisticRegression/venv/Lib/encodings/cp855.py#L22-L23
|
|||
urwid/urwid
|
e2423b5069f51d318ea1ac0f355a0efe5448f7eb
|
urwid/util.py
|
python
|
rle_join_modify
|
( rle, rle2 )
|
Append attribute list rle2 to rle.
Merge last run of rle with first run of rle2 when possible.
MODIFIES attr parameter contents. Returns None.
|
Append attribute list rle2 to rle.
Merge last run of rle with first run of rle2 when possible.
|
[
"Append",
"attribute",
"list",
"rle2",
"to",
"rle",
".",
"Merge",
"last",
"run",
"of",
"rle",
"with",
"first",
"run",
"of",
"rle2",
"when",
"possible",
"."
] |
def rle_join_modify( rle, rle2 ):
"""
Append attribute list rle2 to rle.
Merge last run of rle with first run of rle2 when possible.
MODIFIES attr parameter contents. Returns None.
"""
if not rle2:
return
rle_append_modify(rle, rle2[0])
rle += rle2[1:]
|
[
"def",
"rle_join_modify",
"(",
"rle",
",",
"rle2",
")",
":",
"if",
"not",
"rle2",
":",
"return",
"rle_append_modify",
"(",
"rle",
",",
"rle2",
"[",
"0",
"]",
")",
"rle",
"+=",
"rle2",
"[",
"1",
":",
"]"
] |
https://github.com/urwid/urwid/blob/e2423b5069f51d318ea1ac0f355a0efe5448f7eb/urwid/util.py#L324-L334
|
||
cloudera/hue
|
23f02102d4547c17c32bd5ea0eb24e9eadd657a4
|
desktop/core/ext-py/boto-2.46.1/boto/mws/connection.py
|
python
|
MWSConnection.get_capture_details
|
(self, request, response, **kw)
|
return self._post_request(request, kw, response)
|
Returns the status of a particular capture and the total amount
refunded on the capture.
|
Returns the status of a particular capture and the total amount
refunded on the capture.
|
[
"Returns",
"the",
"status",
"of",
"a",
"particular",
"capture",
"and",
"the",
"total",
"amount",
"refunded",
"on",
"the",
"capture",
"."
] |
def get_capture_details(self, request, response, **kw):
"""Returns the status of a particular capture and the total amount
refunded on the capture.
"""
return self._post_request(request, kw, response)
|
[
"def",
"get_capture_details",
"(",
"self",
",",
"request",
",",
"response",
",",
"*",
"*",
"kw",
")",
":",
"return",
"self",
".",
"_post_request",
"(",
"request",
",",
"kw",
",",
"response",
")"
] |
https://github.com/cloudera/hue/blob/23f02102d4547c17c32bd5ea0eb24e9eadd657a4/desktop/core/ext-py/boto-2.46.1/boto/mws/connection.py#L1135-L1139
|
|
Pagure/pagure
|
512f23f5cd1f965276969747792edeb1215cba68
|
pagure/lib/query.py
|
python
|
tokenize_search_string
|
(pattern)
|
return custom_search, remaining.strip()
|
This function tokenizes search patterns into key:value and rest.
It will also correctly parse key values between quotes.
|
This function tokenizes search patterns into key:value and rest.
|
[
"This",
"function",
"tokenizes",
"search",
"patterns",
"into",
"key",
":",
"value",
"and",
"rest",
"."
] |
def tokenize_search_string(pattern):
"""This function tokenizes search patterns into key:value and rest.
It will also correctly parse key values between quotes.
"""
if pattern is None:
return {}, None
def finalize_token(token, custom_search):
if ":" in token:
# This was a "key:value" parameter
key, value = token.split(":", 1)
custom_search[key] = value
return ""
else:
# This was a token without colon, thus a search pattern
return "%s " % token
custom_search = {}
# Remaining is the remaining real search_pattern (aka, non-key:values)
remaining = ""
# Token is the current "search token" we are processing
token = ""
in_quotes = False
for char in pattern:
if char == " " and not in_quotes:
remaining += finalize_token(token, custom_search)
token = ""
elif char == '"':
in_quotes = not in_quotes
else:
token += char
# Parse the final token
remaining += finalize_token(token, custom_search)
return custom_search, remaining.strip()
|
[
"def",
"tokenize_search_string",
"(",
"pattern",
")",
":",
"if",
"pattern",
"is",
"None",
":",
"return",
"{",
"}",
",",
"None",
"def",
"finalize_token",
"(",
"token",
",",
"custom_search",
")",
":",
"if",
"\":\"",
"in",
"token",
":",
"# This was a \"key:value\" parameter",
"key",
",",
"value",
"=",
"token",
".",
"split",
"(",
"\":\"",
",",
"1",
")",
"custom_search",
"[",
"key",
"]",
"=",
"value",
"return",
"\"\"",
"else",
":",
"# This was a token without colon, thus a search pattern",
"return",
"\"%s \"",
"%",
"token",
"custom_search",
"=",
"{",
"}",
"# Remaining is the remaining real search_pattern (aka, non-key:values)",
"remaining",
"=",
"\"\"",
"# Token is the current \"search token\" we are processing",
"token",
"=",
"\"\"",
"in_quotes",
"=",
"False",
"for",
"char",
"in",
"pattern",
":",
"if",
"char",
"==",
"\" \"",
"and",
"not",
"in_quotes",
":",
"remaining",
"+=",
"finalize_token",
"(",
"token",
",",
"custom_search",
")",
"token",
"=",
"\"\"",
"elif",
"char",
"==",
"'\"'",
":",
"in_quotes",
"=",
"not",
"in_quotes",
"else",
":",
"token",
"+=",
"char",
"# Parse the final token",
"remaining",
"+=",
"finalize_token",
"(",
"token",
",",
"custom_search",
")",
"return",
"custom_search",
",",
"remaining",
".",
"strip",
"(",
")"
] |
https://github.com/Pagure/pagure/blob/512f23f5cd1f965276969747792edeb1215cba68/pagure/lib/query.py#L5339-L5375
|
|
NoGameNoLife00/mybolg
|
afe17ea5bfe405e33766e5682c43a4262232ee12
|
libs/jinja2/environment.py
|
python
|
Environment.compile_templates
|
(self, target, extensions=None, filter_func=None,
zip='deflated', log_function=None,
ignore_errors=True, py_compile=False)
|
Finds all the templates the loader can find, compiles them
and stores them in `target`. If `zip` is `None`, instead of in a
zipfile, the templates will be will be stored in a directory.
By default a deflate zip algorithm is used, to switch to
the stored algorithm, `zip` can be set to ``'stored'``.
`extensions` and `filter_func` are passed to :meth:`list_templates`.
Each template returned will be compiled to the target folder or
zipfile.
By default template compilation errors are ignored. In case a
log function is provided, errors are logged. If you want template
syntax errors to abort the compilation you can set `ignore_errors`
to `False` and you will get an exception on syntax errors.
If `py_compile` is set to `True` .pyc files will be written to the
target instead of standard .py files. This flag does not do anything
on pypy and Python 3 where pyc files are not picked up by itself and
don't give much benefit.
.. versionadded:: 2.4
|
Finds all the templates the loader can find, compiles them
and stores them in `target`. If `zip` is `None`, instead of in a
zipfile, the templates will be will be stored in a directory.
By default a deflate zip algorithm is used, to switch to
the stored algorithm, `zip` can be set to ``'stored'``.
|
[
"Finds",
"all",
"the",
"templates",
"the",
"loader",
"can",
"find",
"compiles",
"them",
"and",
"stores",
"them",
"in",
"target",
".",
"If",
"zip",
"is",
"None",
"instead",
"of",
"in",
"a",
"zipfile",
"the",
"templates",
"will",
"be",
"will",
"be",
"stored",
"in",
"a",
"directory",
".",
"By",
"default",
"a",
"deflate",
"zip",
"algorithm",
"is",
"used",
"to",
"switch",
"to",
"the",
"stored",
"algorithm",
"zip",
"can",
"be",
"set",
"to",
"stored",
"."
] |
def compile_templates(self, target, extensions=None, filter_func=None,
zip='deflated', log_function=None,
ignore_errors=True, py_compile=False):
"""Finds all the templates the loader can find, compiles them
and stores them in `target`. If `zip` is `None`, instead of in a
zipfile, the templates will be will be stored in a directory.
By default a deflate zip algorithm is used, to switch to
the stored algorithm, `zip` can be set to ``'stored'``.
`extensions` and `filter_func` are passed to :meth:`list_templates`.
Each template returned will be compiled to the target folder or
zipfile.
By default template compilation errors are ignored. In case a
log function is provided, errors are logged. If you want template
syntax errors to abort the compilation you can set `ignore_errors`
to `False` and you will get an exception on syntax errors.
If `py_compile` is set to `True` .pyc files will be written to the
target instead of standard .py files. This flag does not do anything
on pypy and Python 3 where pyc files are not picked up by itself and
don't give much benefit.
.. versionadded:: 2.4
"""
from jinja2.loaders import ModuleLoader
if log_function is None:
log_function = lambda x: None
if py_compile:
if not PY2 or PYPY:
from warnings import warn
warn(Warning('py_compile has no effect on pypy or Python 3'))
py_compile = False
else:
import imp, marshal
py_header = imp.get_magic() + \
u'\xff\xff\xff\xff'.encode('iso-8859-15')
# Python 3.3 added a source filesize to the header
if sys.version_info >= (3, 3):
py_header += u'\x00\x00\x00\x00'.encode('iso-8859-15')
def write_file(filename, data, mode):
if zip:
info = ZipInfo(filename)
info.external_attr = 0o755 << 16
zip_file.writestr(info, data)
else:
f = open(os.path.join(target, filename), mode)
try:
f.write(data)
finally:
f.close()
if zip is not None:
from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED, ZIP_STORED
zip_file = ZipFile(target, 'w', dict(deflated=ZIP_DEFLATED,
stored=ZIP_STORED)[zip])
log_function('Compiling into Zip archive "%s"' % target)
else:
if not os.path.isdir(target):
os.makedirs(target)
log_function('Compiling into folder "%s"' % target)
try:
for name in self.list_templates(extensions, filter_func):
source, filename, _ = self.loader.get_source(self, name)
try:
code = self.compile(source, name, filename, True, True)
except TemplateSyntaxError as e:
if not ignore_errors:
raise
log_function('Could not compile "%s": %s' % (name, e))
continue
filename = ModuleLoader.get_module_filename(name)
if py_compile:
c = self._compile(code, encode_filename(filename))
write_file(filename + 'c', py_header +
marshal.dumps(c), 'wb')
log_function('Byte-compiled "%s" as %s' %
(name, filename + 'c'))
else:
write_file(filename, code, 'w')
log_function('Compiled "%s" as %s' % (name, filename))
finally:
if zip:
zip_file.close()
log_function('Finished compiling templates')
|
[
"def",
"compile_templates",
"(",
"self",
",",
"target",
",",
"extensions",
"=",
"None",
",",
"filter_func",
"=",
"None",
",",
"zip",
"=",
"'deflated'",
",",
"log_function",
"=",
"None",
",",
"ignore_errors",
"=",
"True",
",",
"py_compile",
"=",
"False",
")",
":",
"from",
"jinja2",
".",
"loaders",
"import",
"ModuleLoader",
"if",
"log_function",
"is",
"None",
":",
"log_function",
"=",
"lambda",
"x",
":",
"None",
"if",
"py_compile",
":",
"if",
"not",
"PY2",
"or",
"PYPY",
":",
"from",
"warnings",
"import",
"warn",
"warn",
"(",
"Warning",
"(",
"'py_compile has no effect on pypy or Python 3'",
")",
")",
"py_compile",
"=",
"False",
"else",
":",
"import",
"imp",
",",
"marshal",
"py_header",
"=",
"imp",
".",
"get_magic",
"(",
")",
"+",
"u'\\xff\\xff\\xff\\xff'",
".",
"encode",
"(",
"'iso-8859-15'",
")",
"# Python 3.3 added a source filesize to the header",
"if",
"sys",
".",
"version_info",
">=",
"(",
"3",
",",
"3",
")",
":",
"py_header",
"+=",
"u'\\x00\\x00\\x00\\x00'",
".",
"encode",
"(",
"'iso-8859-15'",
")",
"def",
"write_file",
"(",
"filename",
",",
"data",
",",
"mode",
")",
":",
"if",
"zip",
":",
"info",
"=",
"ZipInfo",
"(",
"filename",
")",
"info",
".",
"external_attr",
"=",
"0o755",
"<<",
"16",
"zip_file",
".",
"writestr",
"(",
"info",
",",
"data",
")",
"else",
":",
"f",
"=",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"target",
",",
"filename",
")",
",",
"mode",
")",
"try",
":",
"f",
".",
"write",
"(",
"data",
")",
"finally",
":",
"f",
".",
"close",
"(",
")",
"if",
"zip",
"is",
"not",
"None",
":",
"from",
"zipfile",
"import",
"ZipFile",
",",
"ZipInfo",
",",
"ZIP_DEFLATED",
",",
"ZIP_STORED",
"zip_file",
"=",
"ZipFile",
"(",
"target",
",",
"'w'",
",",
"dict",
"(",
"deflated",
"=",
"ZIP_DEFLATED",
",",
"stored",
"=",
"ZIP_STORED",
")",
"[",
"zip",
"]",
")",
"log_function",
"(",
"'Compiling into Zip archive \"%s\"'",
"%",
"target",
")",
"else",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"target",
")",
":",
"os",
".",
"makedirs",
"(",
"target",
")",
"log_function",
"(",
"'Compiling into folder \"%s\"'",
"%",
"target",
")",
"try",
":",
"for",
"name",
"in",
"self",
".",
"list_templates",
"(",
"extensions",
",",
"filter_func",
")",
":",
"source",
",",
"filename",
",",
"_",
"=",
"self",
".",
"loader",
".",
"get_source",
"(",
"self",
",",
"name",
")",
"try",
":",
"code",
"=",
"self",
".",
"compile",
"(",
"source",
",",
"name",
",",
"filename",
",",
"True",
",",
"True",
")",
"except",
"TemplateSyntaxError",
"as",
"e",
":",
"if",
"not",
"ignore_errors",
":",
"raise",
"log_function",
"(",
"'Could not compile \"%s\": %s'",
"%",
"(",
"name",
",",
"e",
")",
")",
"continue",
"filename",
"=",
"ModuleLoader",
".",
"get_module_filename",
"(",
"name",
")",
"if",
"py_compile",
":",
"c",
"=",
"self",
".",
"_compile",
"(",
"code",
",",
"encode_filename",
"(",
"filename",
")",
")",
"write_file",
"(",
"filename",
"+",
"'c'",
",",
"py_header",
"+",
"marshal",
".",
"dumps",
"(",
"c",
")",
",",
"'wb'",
")",
"log_function",
"(",
"'Byte-compiled \"%s\" as %s'",
"%",
"(",
"name",
",",
"filename",
"+",
"'c'",
")",
")",
"else",
":",
"write_file",
"(",
"filename",
",",
"code",
",",
"'w'",
")",
"log_function",
"(",
"'Compiled \"%s\" as %s'",
"%",
"(",
"name",
",",
"filename",
")",
")",
"finally",
":",
"if",
"zip",
":",
"zip_file",
".",
"close",
"(",
")",
"log_function",
"(",
"'Finished compiling templates'",
")"
] |
https://github.com/NoGameNoLife00/mybolg/blob/afe17ea5bfe405e33766e5682c43a4262232ee12/libs/jinja2/environment.py#L601-L693
|
||
BlueBrain/BluePyOpt
|
6d4185479bc6dddb3daad84fa27e0b8457d69652
|
bluepyopt/evaluators.py
|
python
|
Evaluator.evaluate_with_lists
|
(self, params)
|
Evaluate parameter a parameter set (abstract).
Args:
params (list of Parameters):
The parameter values to be evaluated.
Returns:
objectives (list of Objectives):
List of Objectives with values calculated by the Evaluator.
|
Evaluate parameter a parameter set (abstract).
|
[
"Evaluate",
"parameter",
"a",
"parameter",
"set",
"(",
"abstract",
")",
"."
] |
def evaluate_with_lists(self, params):
"""Evaluate parameter a parameter set (abstract).
Args:
params (list of Parameters):
The parameter values to be evaluated.
Returns:
objectives (list of Objectives):
List of Objectives with values calculated by the Evaluator.
"""
|
[
"def",
"evaluate_with_lists",
"(",
"self",
",",
"params",
")",
":"
] |
https://github.com/BlueBrain/BluePyOpt/blob/6d4185479bc6dddb3daad84fa27e0b8457d69652/bluepyopt/evaluators.py#L63-L74
|
||
biosbits/bits
|
19da7046a7303f1de8b53165eea1a6f486757c03
|
python/bits/__init__.py
|
python
|
pci_write
|
(bus, device, function, register, value, bytes=None)
|
Write a value of the specified size to the PCI device specified by bus:device.function register
|
Write a value of the specified size to the PCI device specified by bus:device.function register
|
[
"Write",
"a",
"value",
"of",
"the",
"specified",
"size",
"to",
"the",
"PCI",
"device",
"specified",
"by",
"bus",
":",
"device",
".",
"function",
"register"
] |
def pci_write(bus, device, function, register, value, bytes=None):
"""Write a value of the specified size to the PCI device specified by bus:device.function register"""
bytes, port = _pci_op(bus, device, function, register, bytes)
{ 1: outb, 2: outw, 4: outl }[bytes](port, value)
|
[
"def",
"pci_write",
"(",
"bus",
",",
"device",
",",
"function",
",",
"register",
",",
"value",
",",
"bytes",
"=",
"None",
")",
":",
"bytes",
",",
"port",
"=",
"_pci_op",
"(",
"bus",
",",
"device",
",",
"function",
",",
"register",
",",
"bytes",
")",
"{",
"1",
":",
"outb",
",",
"2",
":",
"outw",
",",
"4",
":",
"outl",
"}",
"[",
"bytes",
"]",
"(",
"port",
",",
"value",
")"
] |
https://github.com/biosbits/bits/blob/19da7046a7303f1de8b53165eea1a6f486757c03/python/bits/__init__.py#L118-L121
|
||
tensorflow/lingvo
|
ce10019243d954c3c3ebe739f7589b5eebfdf907
|
lingvo/jax/trainer_lib.py
|
python
|
shard_on_batch_dim_partition_spec
|
(
mesh_names: Sequence[str], x: jax.ShapeDtypeStruct)
|
return base_layer.to_partition_spec(sharding, mesh_names)
|
Fully shards x on the batch dimension.
|
Fully shards x on the batch dimension.
|
[
"Fully",
"shards",
"x",
"on",
"the",
"batch",
"dimension",
"."
] |
def shard_on_batch_dim_partition_spec(
mesh_names: Sequence[str], x: jax.ShapeDtypeStruct) -> pjit.PartitionSpec:
"""Fully shards x on the batch dimension."""
x_dim = len(x.shape)
assert x_dim >= 1
sharding = [-1] * x_dim
# Assume the first dim is batch, and fully shard the batch dim over the entire
# mesh.
sharding[0] = tuple(mesh_names)
return base_layer.to_partition_spec(sharding, mesh_names)
|
[
"def",
"shard_on_batch_dim_partition_spec",
"(",
"mesh_names",
":",
"Sequence",
"[",
"str",
"]",
",",
"x",
":",
"jax",
".",
"ShapeDtypeStruct",
")",
"->",
"pjit",
".",
"PartitionSpec",
":",
"x_dim",
"=",
"len",
"(",
"x",
".",
"shape",
")",
"assert",
"x_dim",
">=",
"1",
"sharding",
"=",
"[",
"-",
"1",
"]",
"*",
"x_dim",
"# Assume the first dim is batch, and fully shard the batch dim over the entire",
"# mesh.",
"sharding",
"[",
"0",
"]",
"=",
"tuple",
"(",
"mesh_names",
")",
"return",
"base_layer",
".",
"to_partition_spec",
"(",
"sharding",
",",
"mesh_names",
")"
] |
https://github.com/tensorflow/lingvo/blob/ce10019243d954c3c3ebe739f7589b5eebfdf907/lingvo/jax/trainer_lib.py#L499-L508
|
|
HenryAlbu/FB-Messenger-Whatsapp-Discord-message-spammer
|
3bcad20e68ee68d01fc720ec694105dce91c2b68
|
app.py
|
python
|
long_operation_thread
|
()
|
[] |
def long_operation_thread():
spammer.start_spam()
|
[
"def",
"long_operation_thread",
"(",
")",
":",
"spammer",
".",
"start_spam",
"(",
")"
] |
https://github.com/HenryAlbu/FB-Messenger-Whatsapp-Discord-message-spammer/blob/3bcad20e68ee68d01fc720ec694105dce91c2b68/app.py#L8-L9
|
||||
home-assistant/core
|
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
|
homeassistant/components/homekit_controller/alarm_control_panel.py
|
python
|
HomeKitAlarmControlPanelEntity.icon
|
(self)
|
return ICON
|
Return icon.
|
Return icon.
|
[
"Return",
"icon",
"."
] |
def icon(self):
"""Return icon."""
return ICON
|
[
"def",
"icon",
"(",
"self",
")",
":",
"return",
"ICON"
] |
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/homekit_controller/alarm_control_panel.py#L75-L77
|
|
limodou/ulipad
|
4c7d590234f39cac80bb1d36dca095b646e287fb
|
modules/meide.py
|
python
|
LayoutBase.add
|
(self, element, name='', proportion=None, flag=None, border=None)
|
return element
|
Add a element to it.
element
It could be any Element object, or event Element class name. And you
can also pass a real wx widget object. meide will automatically
wrap it to a SimpleElement or a SimpleValueElement according whether
the element has GetValue() and SetValue() functions.
name
If you don't specify a name parameter, then meide will create one for
you. The format is '_id_%d', and %d will automatically increase. So
every element will have a name, and you can use this name to get the
element back via find(name).
proportion, flag, border
Just like the same parameters of sizer class. `propertion` and `flag`
will effect with `proportion` class attribute.
If they are `None`, then meide will guess the suitable value for them.
`proportion` via _guess_proportion()
`flag` via _guess_expand() and _get_flag()
`border` via padding
add method supports lazy execution. So if you'v created the element, when
you invoking add() method, the widget which is being added is created
immediately, but if not, the widget will be created when you invoking
create() function.
|
Add a element to it.
element
It could be any Element object, or event Element class name. And you
can also pass a real wx widget object. meide will automatically
wrap it to a SimpleElement or a SimpleValueElement according whether
the element has GetValue() and SetValue() functions.
name
If you don't specify a name parameter, then meide will create one for
you. The format is '_id_%d', and %d will automatically increase. So
every element will have a name, and you can use this name to get the
element back via find(name).
proportion, flag, border
Just like the same parameters of sizer class. `propertion` and `flag`
will effect with `proportion` class attribute.
If they are `None`, then meide will guess the suitable value for them.
`proportion` via _guess_proportion()
`flag` via _guess_expand() and _get_flag()
`border` via padding
add method supports lazy execution. So if you'v created the element, when
you invoking add() method, the widget which is being added is created
immediately, but if not, the widget will be created when you invoking
create() function.
|
[
"Add",
"a",
"element",
"to",
"it",
".",
"element",
"It",
"could",
"be",
"any",
"Element",
"object",
"or",
"event",
"Element",
"class",
"name",
".",
"And",
"you",
"can",
"also",
"pass",
"a",
"real",
"wx",
"widget",
"object",
".",
"meide",
"will",
"automatically",
"wrap",
"it",
"to",
"a",
"SimpleElement",
"or",
"a",
"SimpleValueElement",
"according",
"whether",
"the",
"element",
"has",
"GetValue",
"()",
"and",
"SetValue",
"()",
"functions",
".",
"name",
"If",
"you",
"don",
"t",
"specify",
"a",
"name",
"parameter",
"then",
"meide",
"will",
"create",
"one",
"for",
"you",
".",
"The",
"format",
"is",
"_id_%d",
"and",
"%d",
"will",
"automatically",
"increase",
".",
"So",
"every",
"element",
"will",
"have",
"a",
"name",
"and",
"you",
"can",
"use",
"this",
"name",
"to",
"get",
"the",
"element",
"back",
"via",
"find",
"(",
"name",
")",
".",
"proportion",
"flag",
"border",
"Just",
"like",
"the",
"same",
"parameters",
"of",
"sizer",
"class",
".",
"propertion",
"and",
"flag",
"will",
"effect",
"with",
"proportion",
"class",
"attribute",
".",
"If",
"they",
"are",
"None",
"then",
"meide",
"will",
"guess",
"the",
"suitable",
"value",
"for",
"them",
".",
"proportion",
"via",
"_guess_proportion",
"()",
"flag",
"via",
"_guess_expand",
"()",
"and",
"_get_flag",
"()",
"border",
"via",
"padding",
"add",
"method",
"supports",
"lazy",
"execution",
".",
"So",
"if",
"you",
"v",
"created",
"the",
"element",
"when",
"you",
"invoking",
"add",
"()",
"method",
"the",
"widget",
"which",
"is",
"being",
"added",
"is",
"created",
"immediately",
"but",
"if",
"not",
"the",
"widget",
"will",
"be",
"created",
"when",
"you",
"invoking",
"create",
"()",
"function",
"."
] |
def add(self, element, name='', proportion=None, flag=None, border=None):
"""
Add a element to it.
element
It could be any Element object, or event Element class name. And you
can also pass a real wx widget object. meide will automatically
wrap it to a SimpleElement or a SimpleValueElement according whether
the element has GetValue() and SetValue() functions.
name
If you don't specify a name parameter, then meide will create one for
you. The format is '_id_%d', and %d will automatically increase. So
every element will have a name, and you can use this name to get the
element back via find(name).
proportion, flag, border
Just like the same parameters of sizer class. `propertion` and `flag`
will effect with `proportion` class attribute.
If they are `None`, then meide will guess the suitable value for them.
`proportion` via _guess_proportion()
`flag` via _guess_expand() and _get_flag()
`border` via padding
add method supports lazy execution. So if you'v created the element, when
you invoking add() method, the widget which is being added is created
immediately, but if not, the widget will be created when you invoking
create() function.
"""
if not name:
self._id += 1
name = '_id_%d' % self._id
element = self._prepare_element(element)
self.elements[name] = element
element.name = name
args = {'proportion':proportion, 'flag':flag, 'border':border}
self.elements_args[name] = args
self.orders.append(name)
if self.created:
self._create_element(name, element, args, len(self.orders) - 1)
self._layout()
return element
|
[
"def",
"add",
"(",
"self",
",",
"element",
",",
"name",
"=",
"''",
",",
"proportion",
"=",
"None",
",",
"flag",
"=",
"None",
",",
"border",
"=",
"None",
")",
":",
"if",
"not",
"name",
":",
"self",
".",
"_id",
"+=",
"1",
"name",
"=",
"'_id_%d'",
"%",
"self",
".",
"_id",
"element",
"=",
"self",
".",
"_prepare_element",
"(",
"element",
")",
"self",
".",
"elements",
"[",
"name",
"]",
"=",
"element",
"element",
".",
"name",
"=",
"name",
"args",
"=",
"{",
"'proportion'",
":",
"proportion",
",",
"'flag'",
":",
"flag",
",",
"'border'",
":",
"border",
"}",
"self",
".",
"elements_args",
"[",
"name",
"]",
"=",
"args",
"self",
".",
"orders",
".",
"append",
"(",
"name",
")",
"if",
"self",
".",
"created",
":",
"self",
".",
"_create_element",
"(",
"name",
",",
"element",
",",
"args",
",",
"len",
"(",
"self",
".",
"orders",
")",
"-",
"1",
")",
"self",
".",
"_layout",
"(",
")",
"return",
"element"
] |
https://github.com/limodou/ulipad/blob/4c7d590234f39cac80bb1d36dca095b646e287fb/modules/meide.py#L489-L531
|
|
urinieto/msaf
|
17db5b698e06d662dfa5c7442d826022746454b7
|
msaf/base.py
|
python
|
Features.compute_beat_sync_features
|
(self, beat_frames, beat_times, pad)
|
return beatsync_feats, beatsync_times
|
Make the features beat-synchronous.
Parameters
----------
beat_frames: np.array
The frame indeces of the beat positions.
beat_times: np.array
The time points of the beat positions (in seconds).
pad: boolean
If `True`, `beat_frames` is padded to span the full range.
Returns
-------
beatsync_feats: np.array
The beat-synchronized features.
`None` if the beat_frames was `None`.
beatsync_times: np.array
The beat-synchronized times.
`None` if the beat_frames was `None`.
|
Make the features beat-synchronous.
|
[
"Make",
"the",
"features",
"beat",
"-",
"synchronous",
"."
] |
def compute_beat_sync_features(self, beat_frames, beat_times, pad):
"""Make the features beat-synchronous.
Parameters
----------
beat_frames: np.array
The frame indeces of the beat positions.
beat_times: np.array
The time points of the beat positions (in seconds).
pad: boolean
If `True`, `beat_frames` is padded to span the full range.
Returns
-------
beatsync_feats: np.array
The beat-synchronized features.
`None` if the beat_frames was `None`.
beatsync_times: np.array
The beat-synchronized times.
`None` if the beat_frames was `None`.
"""
if beat_frames is None:
return None, None
# Make beat synchronous
beatsync_feats = librosa.util.utils.sync(self._framesync_features.T,
beat_frames, pad=pad).T
# Assign times (and add last time if padded)
beatsync_times = np.copy(beat_times)
if beatsync_times.shape[0] != beatsync_feats.shape[0]:
beatsync_times = np.concatenate((beatsync_times,
[self._framesync_times[-1]]))
return beatsync_feats, beatsync_times
|
[
"def",
"compute_beat_sync_features",
"(",
"self",
",",
"beat_frames",
",",
"beat_times",
",",
"pad",
")",
":",
"if",
"beat_frames",
"is",
"None",
":",
"return",
"None",
",",
"None",
"# Make beat synchronous",
"beatsync_feats",
"=",
"librosa",
".",
"util",
".",
"utils",
".",
"sync",
"(",
"self",
".",
"_framesync_features",
".",
"T",
",",
"beat_frames",
",",
"pad",
"=",
"pad",
")",
".",
"T",
"# Assign times (and add last time if padded)",
"beatsync_times",
"=",
"np",
".",
"copy",
"(",
"beat_times",
")",
"if",
"beatsync_times",
".",
"shape",
"[",
"0",
"]",
"!=",
"beatsync_feats",
".",
"shape",
"[",
"0",
"]",
":",
"beatsync_times",
"=",
"np",
".",
"concatenate",
"(",
"(",
"beatsync_times",
",",
"[",
"self",
".",
"_framesync_times",
"[",
"-",
"1",
"]",
"]",
")",
")",
"return",
"beatsync_feats",
",",
"beatsync_times"
] |
https://github.com/urinieto/msaf/blob/17db5b698e06d662dfa5c7442d826022746454b7/msaf/base.py#L174-L207
|
|
medbenali/CyberScan
|
ca85794cfce5e83e9cc5fca1512ba6edf2f14dee
|
pygeoip/util.py
|
python
|
ip2long
|
(ip)
|
Wrapper function for IPv4 and IPv6 converters.
:arg ip: IPv4 or IPv6 address
|
Wrapper function for IPv4 and IPv6 converters.
|
[
"Wrapper",
"function",
"for",
"IPv4",
"and",
"IPv6",
"converters",
"."
] |
def ip2long(ip):
"""
Wrapper function for IPv4 and IPv6 converters.
:arg ip: IPv4 or IPv6 address
"""
try:
return int(binascii.hexlify(socket.inet_aton(ip)), 16)
except socket.error:
return int(binascii.hexlify(socket.inet_pton(socket.AF_INET6, ip)), 16)
|
[
"def",
"ip2long",
"(",
"ip",
")",
":",
"try",
":",
"return",
"int",
"(",
"binascii",
".",
"hexlify",
"(",
"socket",
".",
"inet_aton",
"(",
"ip",
")",
")",
",",
"16",
")",
"except",
"socket",
".",
"error",
":",
"return",
"int",
"(",
"binascii",
".",
"hexlify",
"(",
"socket",
".",
"inet_pton",
"(",
"socket",
".",
"AF_INET6",
",",
"ip",
")",
")",
",",
"16",
")"
] |
https://github.com/medbenali/CyberScan/blob/ca85794cfce5e83e9cc5fca1512ba6edf2f14dee/pygeoip/util.py#L30-L39
|
||
shiweibsw/Translation-Tools
|
2fbbf902364e557fa7017f9a74a8797b7440c077
|
venv/Lib/site-packages/xlwt/antlr.py
|
python
|
BaseAST.toStringTree
|
(self)
|
return ts
|
[] |
def toStringTree(self):
ts = ""
kid = self.getFirstChild()
if kid:
ts += " ("
ts += " " + self.toString()
if kid:
ts += kid.toStringList()
ts += " )"
return ts
|
[
"def",
"toStringTree",
"(",
"self",
")",
":",
"ts",
"=",
"\"\"",
"kid",
"=",
"self",
".",
"getFirstChild",
"(",
")",
"if",
"kid",
":",
"ts",
"+=",
"\" (\"",
"ts",
"+=",
"\" \"",
"+",
"self",
".",
"toString",
"(",
")",
"if",
"kid",
":",
"ts",
"+=",
"kid",
".",
"toStringList",
"(",
")",
"ts",
"+=",
"\" )\"",
"return",
"ts"
] |
https://github.com/shiweibsw/Translation-Tools/blob/2fbbf902364e557fa7017f9a74a8797b7440c077/venv/Lib/site-packages/xlwt/antlr.py#L2495-L2504
|
|||
spender-sandbox/cuckoo-modified
|
eb93ef3d41b8fee51b4330306dcd315d8101e021
|
modules/machinery/vsphere.py
|
python
|
vSphere.start
|
(self, label)
|
Start a machine.
@param label: machine name.
@raise CuckooMachineError: if unable to start machine.
|
Start a machine.
|
[
"Start",
"a",
"machine",
"."
] |
def start(self, label):
"""Start a machine.
@param label: machine name.
@raise CuckooMachineError: if unable to start machine.
"""
name = self.db.view_machine_by_label(label).snapshot
with SmartConnection(**self.connect_opts) as conn:
vm = self._get_virtual_machine_by_label(conn, label)
if vm:
self._revert_snapshot(vm, name)
else:
raise CuckooMachineError("Machine {0} not found on host"
.format(label))
|
[
"def",
"start",
"(",
"self",
",",
"label",
")",
":",
"name",
"=",
"self",
".",
"db",
".",
"view_machine_by_label",
"(",
"label",
")",
".",
"snapshot",
"with",
"SmartConnection",
"(",
"*",
"*",
"self",
".",
"connect_opts",
")",
"as",
"conn",
":",
"vm",
"=",
"self",
".",
"_get_virtual_machine_by_label",
"(",
"conn",
",",
"label",
")",
"if",
"vm",
":",
"self",
".",
"_revert_snapshot",
"(",
"vm",
",",
"name",
")",
"else",
":",
"raise",
"CuckooMachineError",
"(",
"\"Machine {0} not found on host\"",
".",
"format",
"(",
"label",
")",
")"
] |
https://github.com/spender-sandbox/cuckoo-modified/blob/eb93ef3d41b8fee51b4330306dcd315d8101e021/modules/machinery/vsphere.py#L130-L142
|
||
scikit-learn/scikit-learn
|
1d1aadd0711b87d2a11c80aad15df6f8cf156712
|
sklearn/metrics/cluster/_supervised.py
|
python
|
entropy
|
(labels)
|
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
|
Calculates the entropy for a labeling.
Parameters
----------
labels : int array, shape = [n_samples]
The labels
Notes
-----
The logarithm used is the natural logarithm (base-e).
|
Calculates the entropy for a labeling.
|
[
"Calculates",
"the",
"entropy",
"for",
"a",
"labeling",
"."
] |
def entropy(labels):
"""Calculates the entropy for a labeling.
Parameters
----------
labels : int array, shape = [n_samples]
The labels
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = np.bincount(label_idx).astype(np.float64)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
|
[
"def",
"entropy",
"(",
"labels",
")",
":",
"if",
"len",
"(",
"labels",
")",
"==",
"0",
":",
"return",
"1.0",
"label_idx",
"=",
"np",
".",
"unique",
"(",
"labels",
",",
"return_inverse",
"=",
"True",
")",
"[",
"1",
"]",
"pi",
"=",
"np",
".",
"bincount",
"(",
"label_idx",
")",
".",
"astype",
"(",
"np",
".",
"float64",
")",
"pi",
"=",
"pi",
"[",
"pi",
">",
"0",
"]",
"pi_sum",
"=",
"np",
".",
"sum",
"(",
"pi",
")",
"# log(a / b) should be calculated as log(a) - log(b) for",
"# possible loss of precision",
"return",
"-",
"np",
".",
"sum",
"(",
"(",
"pi",
"/",
"pi_sum",
")",
"*",
"(",
"np",
".",
"log",
"(",
"pi",
")",
"-",
"log",
"(",
"pi_sum",
")",
")",
")"
] |
https://github.com/scikit-learn/scikit-learn/blob/1d1aadd0711b87d2a11c80aad15df6f8cf156712/sklearn/metrics/cluster/_supervised.py#L1123-L1143
|
|
NervanaSystems/neon
|
8c3fb8a93b4a89303467b25817c60536542d08bd
|
neon/backends/backend.py
|
python
|
Backend.exp
|
(self, a, out=None)
|
return OpTreeNode.build("exp", a, None, out=out)
|
Perform element-wise exponential transformation on Tensor `a`, storing
the result in Tensor out. Both Tensor's should have identical shape.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
|
Perform element-wise exponential transformation on Tensor `a`, storing
the result in Tensor out. Both Tensor's should have identical shape.
|
[
"Perform",
"element",
"-",
"wise",
"exponential",
"transformation",
"on",
"Tensor",
"a",
"storing",
"the",
"result",
"in",
"Tensor",
"out",
".",
"Both",
"Tensor",
"s",
"should",
"have",
"identical",
"shape",
"."
] |
def exp(self, a, out=None):
"""
Perform element-wise exponential transformation on Tensor `a`, storing
the result in Tensor out. Both Tensor's should have identical shape.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
return OpTreeNode.build("exp", a, None, out=out)
|
[
"def",
"exp",
"(",
"self",
",",
"a",
",",
"out",
"=",
"None",
")",
":",
"return",
"OpTreeNode",
".",
"build",
"(",
"\"exp\"",
",",
"a",
",",
"None",
",",
"out",
"=",
"out",
")"
] |
https://github.com/NervanaSystems/neon/blob/8c3fb8a93b4a89303467b25817c60536542d08bd/neon/backends/backend.py#L953-L966
|
|
scrapinghub/splash
|
802d8391984bae049ef95a3fe1a74feaee95a233
|
splash/kernel/kernel.py
|
python
|
DeferredSplashRunner.run
|
(self, main_coro)
|
return d
|
Run main_coro Lua coroutine, passing it a Splash
instance as an argument. Return a Deferred.
|
Run main_coro Lua coroutine, passing it a Splash
instance as an argument. Return a Deferred.
|
[
"Run",
"main_coro",
"Lua",
"coroutine",
"passing",
"it",
"a",
"Splash",
"instance",
"as",
"an",
"argument",
".",
"Return",
"a",
"Deferred",
"."
] |
def run(self, main_coro):
"""
Run main_coro Lua coroutine, passing it a Splash
instance as an argument. Return a Deferred.
"""
d = defer.Deferred()
def return_result(result):
d.callback(result)
def return_error(err):
d.errback(err)
self.runner.start(
main_coro=main_coro,
return_result=return_result,
return_error=return_error,
)
return d
|
[
"def",
"run",
"(",
"self",
",",
"main_coro",
")",
":",
"d",
"=",
"defer",
".",
"Deferred",
"(",
")",
"def",
"return_result",
"(",
"result",
")",
":",
"d",
".",
"callback",
"(",
"result",
")",
"def",
"return_error",
"(",
"err",
")",
":",
"d",
".",
"errback",
"(",
"err",
")",
"self",
".",
"runner",
".",
"start",
"(",
"main_coro",
"=",
"main_coro",
",",
"return_result",
"=",
"return_result",
",",
"return_error",
"=",
"return_error",
",",
")",
"return",
"d"
] |
https://github.com/scrapinghub/splash/blob/802d8391984bae049ef95a3fe1a74feaee95a233/splash/kernel/kernel.py#L79-L97
|
|
BangLiu/ArticlePairMatching
|
51745af80e093391f668477d8d00ae59a0481d6f
|
src/models/CCIG/util/file_utils.py
|
python
|
pickle_dump_large_file
|
(obj, filepath)
|
This is a defensive way to write pickle.write,
allowing for very large files on all platforms
|
This is a defensive way to write pickle.write,
allowing for very large files on all platforms
|
[
"This",
"is",
"a",
"defensive",
"way",
"to",
"write",
"pickle",
".",
"write",
"allowing",
"for",
"very",
"large",
"files",
"on",
"all",
"platforms"
] |
def pickle_dump_large_file(obj, filepath):
"""
This is a defensive way to write pickle.write,
allowing for very large files on all platforms
"""
max_bytes = 2**31 - 1
bytes_out = pickle.dumps(obj)
n_bytes = sys.getsizeof(bytes_out)
with open(filepath, 'wb') as f_out:
for idx in range(0, n_bytes, max_bytes):
f_out.write(bytes_out[idx:idx + max_bytes])
|
[
"def",
"pickle_dump_large_file",
"(",
"obj",
",",
"filepath",
")",
":",
"max_bytes",
"=",
"2",
"**",
"31",
"-",
"1",
"bytes_out",
"=",
"pickle",
".",
"dumps",
"(",
"obj",
")",
"n_bytes",
"=",
"sys",
".",
"getsizeof",
"(",
"bytes_out",
")",
"with",
"open",
"(",
"filepath",
",",
"'wb'",
")",
"as",
"f_out",
":",
"for",
"idx",
"in",
"range",
"(",
"0",
",",
"n_bytes",
",",
"max_bytes",
")",
":",
"f_out",
".",
"write",
"(",
"bytes_out",
"[",
"idx",
":",
"idx",
"+",
"max_bytes",
"]",
")"
] |
https://github.com/BangLiu/ArticlePairMatching/blob/51745af80e093391f668477d8d00ae59a0481d6f/src/models/CCIG/util/file_utils.py#L32-L42
|
||
spack/spack
|
675210bd8bd1c5d32ad1cc83d898fb43b569ed74
|
lib/spack/spack/cmd/pkg.py
|
python
|
pkg_removed
|
(args)
|
show packages removed since a commit
|
show packages removed since a commit
|
[
"show",
"packages",
"removed",
"since",
"a",
"commit"
] |
def pkg_removed(args):
"""show packages removed since a commit"""
u1, u2 = diff_packages(args.rev1, args.rev2)
if u1:
colify(sorted(u1))
|
[
"def",
"pkg_removed",
"(",
"args",
")",
":",
"u1",
",",
"u2",
"=",
"diff_packages",
"(",
"args",
".",
"rev1",
",",
"args",
".",
"rev2",
")",
"if",
"u1",
":",
"colify",
"(",
"sorted",
"(",
"u1",
")",
")"
] |
https://github.com/spack/spack/blob/675210bd8bd1c5d32ad1cc83d898fb43b569ed74/lib/spack/spack/cmd/pkg.py#L158-L162
|
||
google/timesketch
|
1ce6b60e125d104e6644947c6f1dbe1b82ac76b6
|
api_client/python/timesketch_api_client/view.py
|
python
|
View.__init__
|
(self, view_id, view_name, sketch_id, api)
|
Initializes the View object.
Args:
view_id: Primary key ID for the view.
view_name: The name of the view.
sketch_id: ID of a sketch.
api: Instance of a TimesketchApi object.
|
Initializes the View object.
|
[
"Initializes",
"the",
"View",
"object",
"."
] |
def __init__(self, view_id, view_name, sketch_id, api):
"""Initializes the View object.
Args:
view_id: Primary key ID for the view.
view_name: The name of the view.
sketch_id: ID of a sketch.
api: Instance of a TimesketchApi object.
"""
logger.info(
'View objects will be deprecated soon, consider transitioning '
'into using the search.Search object instead')
self.id = view_id
self.name = view_name
resource_uri = 'sketches/{0:d}/views/{1:d}/'.format(sketch_id, self.id)
super().__init__(api, resource_uri)
|
[
"def",
"__init__",
"(",
"self",
",",
"view_id",
",",
"view_name",
",",
"sketch_id",
",",
"api",
")",
":",
"logger",
".",
"info",
"(",
"'View objects will be deprecated soon, consider transitioning '",
"'into using the search.Search object instead'",
")",
"self",
".",
"id",
"=",
"view_id",
"self",
".",
"name",
"=",
"view_name",
"resource_uri",
"=",
"'sketches/{0:d}/views/{1:d}/'",
".",
"format",
"(",
"sketch_id",
",",
"self",
".",
"id",
")",
"super",
"(",
")",
".",
"__init__",
"(",
"api",
",",
"resource_uri",
")"
] |
https://github.com/google/timesketch/blob/1ce6b60e125d104e6644947c6f1dbe1b82ac76b6/api_client/python/timesketch_api_client/view.py#L34-L49
|
||
KalleHallden/AutoTimer
|
2d954216700c4930baa154e28dbddc34609af7ce
|
env/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/_structures.py
|
python
|
Infinity.__ge__
|
(self, other)
|
return True
|
[] |
def __ge__(self, other):
return True
|
[
"def",
"__ge__",
"(",
"self",
",",
"other",
")",
":",
"return",
"True"
] |
https://github.com/KalleHallden/AutoTimer/blob/2d954216700c4930baa154e28dbddc34609af7ce/env/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/_structures.py#L30-L31
|
|||
nutonomy/nuscenes-devkit
|
05d05b3c994fb3c17b6643016d9f622a001c7275
|
python-sdk/nuscenes/map_expansion/arcline_path_utils.py
|
python
|
project_pose_to_lane
|
(pose: Pose, lane: List[ArcLinePath], resolution_meters: float = 0.5)
|
return closest_pose, distance_along_lane
|
Find the closest pose on a lane to a query pose and additionally return the
distance along the lane for this pose. Note that this function does
not take the heading of the query pose into account.
:param pose: Query pose.
:param lane: Will find the closest pose on this lane.
:param resolution_meters: How finely to discretize the lane.
:return: Tuple of the closest pose and the distance along the lane
|
Find the closest pose on a lane to a query pose and additionally return the
distance along the lane for this pose. Note that this function does
not take the heading of the query pose into account.
:param pose: Query pose.
:param lane: Will find the closest pose on this lane.
:param resolution_meters: How finely to discretize the lane.
:return: Tuple of the closest pose and the distance along the lane
|
[
"Find",
"the",
"closest",
"pose",
"on",
"a",
"lane",
"to",
"a",
"query",
"pose",
"and",
"additionally",
"return",
"the",
"distance",
"along",
"the",
"lane",
"for",
"this",
"pose",
".",
"Note",
"that",
"this",
"function",
"does",
"not",
"take",
"the",
"heading",
"of",
"the",
"query",
"pose",
"into",
"account",
".",
":",
"param",
"pose",
":",
"Query",
"pose",
".",
":",
"param",
"lane",
":",
"Will",
"find",
"the",
"closest",
"pose",
"on",
"this",
"lane",
".",
":",
"param",
"resolution_meters",
":",
"How",
"finely",
"to",
"discretize",
"the",
"lane",
".",
":",
"return",
":",
"Tuple",
"of",
"the",
"closest",
"pose",
"and",
"the",
"distance",
"along",
"the",
"lane"
] |
def project_pose_to_lane(pose: Pose, lane: List[ArcLinePath], resolution_meters: float = 0.5) -> Tuple[Pose, float]:
"""
Find the closest pose on a lane to a query pose and additionally return the
distance along the lane for this pose. Note that this function does
not take the heading of the query pose into account.
:param pose: Query pose.
:param lane: Will find the closest pose on this lane.
:param resolution_meters: How finely to discretize the lane.
:return: Tuple of the closest pose and the distance along the lane
"""
discretized_lane = discretize_lane(lane, resolution_meters=resolution_meters)
xy_points = np.array(discretized_lane)[:, :2]
closest_pose_index = np.linalg.norm(xy_points - pose[:2], axis=1).argmin()
closest_pose = discretized_lane[closest_pose_index]
distance_along_lane = closest_pose_index * resolution_meters
return closest_pose, distance_along_lane
|
[
"def",
"project_pose_to_lane",
"(",
"pose",
":",
"Pose",
",",
"lane",
":",
"List",
"[",
"ArcLinePath",
"]",
",",
"resolution_meters",
":",
"float",
"=",
"0.5",
")",
"->",
"Tuple",
"[",
"Pose",
",",
"float",
"]",
":",
"discretized_lane",
"=",
"discretize_lane",
"(",
"lane",
",",
"resolution_meters",
"=",
"resolution_meters",
")",
"xy_points",
"=",
"np",
".",
"array",
"(",
"discretized_lane",
")",
"[",
":",
",",
":",
"2",
"]",
"closest_pose_index",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"xy_points",
"-",
"pose",
"[",
":",
"2",
"]",
",",
"axis",
"=",
"1",
")",
".",
"argmin",
"(",
")",
"closest_pose",
"=",
"discretized_lane",
"[",
"closest_pose_index",
"]",
"distance_along_lane",
"=",
"closest_pose_index",
"*",
"resolution_meters",
"return",
"closest_pose",
",",
"distance_along_lane"
] |
https://github.com/nutonomy/nuscenes-devkit/blob/05d05b3c994fb3c17b6643016d9f622a001c7275/python-sdk/nuscenes/map_expansion/arcline_path_utils.py#L221-L239
|
|
amundsen-io/amundsendatabuilder
|
a0af611350fde12438450d4bfd83b226ef220c3f
|
databuilder/publisher/mysql_csv_publisher.py
|
python
|
MySQLCSVPublisher._publish
|
(self, record_file: str, session: Session)
|
Iterate over each row of the given csv file and convert each record to a rds model instance.
Then the model instance will be inserted/updated in mysql.
:param record_file:
:param session:
:return:
|
Iterate over each row of the given csv file and convert each record to a rds model instance.
Then the model instance will be inserted/updated in mysql.
:param record_file:
:param session:
:return:
|
[
"Iterate",
"over",
"each",
"row",
"of",
"the",
"given",
"csv",
"file",
"and",
"convert",
"each",
"record",
"to",
"a",
"rds",
"model",
"instance",
".",
"Then",
"the",
"model",
"instance",
"will",
"be",
"inserted",
"/",
"updated",
"in",
"mysql",
".",
":",
"param",
"record_file",
":",
":",
"param",
"session",
":",
":",
"return",
":"
] |
def _publish(self, record_file: str, session: Session) -> None:
"""
Iterate over each row of the given csv file and convert each record to a rds model instance.
Then the model instance will be inserted/updated in mysql.
:param record_file:
:param session:
:return:
"""
with open(record_file, 'r', encoding='utf8') as record_csv:
table_name = self._get_table_name_from_file(record_file)
table_model = self._get_model_from_table_name(table_name)
if not table_model:
raise RuntimeError(f'Failed to get model for table: {table_name}')
for record_dict in pandas.read_csv(record_csv, na_filter=False).to_dict(orient='records'):
record = self._create_record(model=table_model, record_dict=record_dict)
session.merge(record)
self._execute(session)
session.commit()
|
[
"def",
"_publish",
"(",
"self",
",",
"record_file",
":",
"str",
",",
"session",
":",
"Session",
")",
"->",
"None",
":",
"with",
"open",
"(",
"record_file",
",",
"'r'",
",",
"encoding",
"=",
"'utf8'",
")",
"as",
"record_csv",
":",
"table_name",
"=",
"self",
".",
"_get_table_name_from_file",
"(",
"record_file",
")",
"table_model",
"=",
"self",
".",
"_get_model_from_table_name",
"(",
"table_name",
")",
"if",
"not",
"table_model",
":",
"raise",
"RuntimeError",
"(",
"f'Failed to get model for table: {table_name}'",
")",
"for",
"record_dict",
"in",
"pandas",
".",
"read_csv",
"(",
"record_csv",
",",
"na_filter",
"=",
"False",
")",
".",
"to_dict",
"(",
"orient",
"=",
"'records'",
")",
":",
"record",
"=",
"self",
".",
"_create_record",
"(",
"model",
"=",
"table_model",
",",
"record_dict",
"=",
"record_dict",
")",
"session",
".",
"merge",
"(",
"record",
")",
"self",
".",
"_execute",
"(",
"session",
")",
"session",
".",
"commit",
"(",
")"
] |
https://github.com/amundsen-io/amundsendatabuilder/blob/a0af611350fde12438450d4bfd83b226ef220c3f/databuilder/publisher/mysql_csv_publisher.py#L143-L161
|
||
dansoutner/LSTM
|
1817b881d2f03e9d3746f39e734426c00134fda7
|
LSTM.py
|
python
|
LSTM.save
|
(self, filename)
|
cPickle net to filename
|
cPickle net to filename
|
[
"cPickle",
"net",
"to",
"filename"
] |
def save(self, filename):
"""
cPickle net to filename
"""
# attributes that we want to save
to_save = set(['CEC', 'cell_blocks',
'context', 'dic', 'full_hidden_dimension',
'full_input_dimension', 'hidden_dimension',
'independent', 'input_dimension', 'output_dimension',
'peepForgetGate', 'peepInputGate', 'peepOutputGate',
'version', 'weightsForgetGate', 'weightsGlobalOutput',
'weightsInputGate', 'weightsNetInput', 'weightsOutputGate',
'lda', 'lda_len', 'out_word_to_class', 'out_ppst_to_class',
'out_class', 'projections', 'len_projections', 'lda', 'len_lda',
'classes', 'len_classes', "input_type",
'stopwords', 'len_cache',
'class_size', 'class_max_cn', 'class_cn', 'class_words',
'idx2class_hash', 'word2class_hash', 'idx2word_hash', 'word2idx_hash'])
# need to convert memoryviews to array
convert_and_save = set(['CEC', 'context',
'peepForgetGate', 'peepInputGate', 'peepOutputGate',
'weightsForgetGate', 'weightsGlobalOutput',
'weightsInputGate', 'weightsNetInput', 'weightsOutputGate',])
# this is rest which we do not convert
only_save = to_save - convert_and_save
lstm_container = {}
for attr in dir(self):
if attr in convert_and_save:
lstm_container[attr] = np.asarray(getattr(self,attr))
if attr in only_save:
lstm_container[attr] = getattr(self, attr)
try:
cPickle.dump(lstm_container, open(filename+".lstm", "wb"), protocol=cPickle.HIGHEST_PROTOCOL)
except IOError:
raise
|
[
"def",
"save",
"(",
"self",
",",
"filename",
")",
":",
"# attributes that we want to save",
"to_save",
"=",
"set",
"(",
"[",
"'CEC'",
",",
"'cell_blocks'",
",",
"'context'",
",",
"'dic'",
",",
"'full_hidden_dimension'",
",",
"'full_input_dimension'",
",",
"'hidden_dimension'",
",",
"'independent'",
",",
"'input_dimension'",
",",
"'output_dimension'",
",",
"'peepForgetGate'",
",",
"'peepInputGate'",
",",
"'peepOutputGate'",
",",
"'version'",
",",
"'weightsForgetGate'",
",",
"'weightsGlobalOutput'",
",",
"'weightsInputGate'",
",",
"'weightsNetInput'",
",",
"'weightsOutputGate'",
",",
"'lda'",
",",
"'lda_len'",
",",
"'out_word_to_class'",
",",
"'out_ppst_to_class'",
",",
"'out_class'",
",",
"'projections'",
",",
"'len_projections'",
",",
"'lda'",
",",
"'len_lda'",
",",
"'classes'",
",",
"'len_classes'",
",",
"\"input_type\"",
",",
"'stopwords'",
",",
"'len_cache'",
",",
"'class_size'",
",",
"'class_max_cn'",
",",
"'class_cn'",
",",
"'class_words'",
",",
"'idx2class_hash'",
",",
"'word2class_hash'",
",",
"'idx2word_hash'",
",",
"'word2idx_hash'",
"]",
")",
"# need to convert memoryviews to array",
"convert_and_save",
"=",
"set",
"(",
"[",
"'CEC'",
",",
"'context'",
",",
"'peepForgetGate'",
",",
"'peepInputGate'",
",",
"'peepOutputGate'",
",",
"'weightsForgetGate'",
",",
"'weightsGlobalOutput'",
",",
"'weightsInputGate'",
",",
"'weightsNetInput'",
",",
"'weightsOutputGate'",
",",
"]",
")",
"# this is rest which we do not convert",
"only_save",
"=",
"to_save",
"-",
"convert_and_save",
"lstm_container",
"=",
"{",
"}",
"for",
"attr",
"in",
"dir",
"(",
"self",
")",
":",
"if",
"attr",
"in",
"convert_and_save",
":",
"lstm_container",
"[",
"attr",
"]",
"=",
"np",
".",
"asarray",
"(",
"getattr",
"(",
"self",
",",
"attr",
")",
")",
"if",
"attr",
"in",
"only_save",
":",
"lstm_container",
"[",
"attr",
"]",
"=",
"getattr",
"(",
"self",
",",
"attr",
")",
"try",
":",
"cPickle",
".",
"dump",
"(",
"lstm_container",
",",
"open",
"(",
"filename",
"+",
"\".lstm\"",
",",
"\"wb\"",
")",
",",
"protocol",
"=",
"cPickle",
".",
"HIGHEST_PROTOCOL",
")",
"except",
"IOError",
":",
"raise"
] |
https://github.com/dansoutner/LSTM/blob/1817b881d2f03e9d3746f39e734426c00134fda7/LSTM.py#L536-L574
|
||
DataDog/integrations-core
|
934674b29d94b70ccc008f76ea172d0cdae05e1e
|
datadog_checks_base/datadog_checks/base/utils/db/transform.py
|
python
|
_compile_service_check_statuses
|
(modifiers)
|
return status_map
|
[] |
def _compile_service_check_statuses(modifiers):
# type: (Dict[str, Any]) -> Dict[str, ServiceCheckStatus]
status_map = modifiers.pop('status_map', None)
if status_map is None:
raise ValueError('the `status_map` parameter is required')
elif not isinstance(status_map, dict):
raise ValueError('the `status_map` parameter must be a mapping')
elif not status_map:
raise ValueError('the `status_map` parameter must not be empty')
for value, status_string in list(status_map.items()):
if not isinstance(status_string, str):
raise ValueError(
'status `{}` for value `{}` of parameter `status_map` is not a string'.format(status_string, value)
)
status = getattr(ServiceCheck, status_string.upper(), None)
if status is None:
raise ValueError(
'invalid status `{}` for value `{}` of parameter `status_map`'.format(status_string, value)
)
status_map[value] = status
return status_map
|
[
"def",
"_compile_service_check_statuses",
"(",
"modifiers",
")",
":",
"# type: (Dict[str, Any]) -> Dict[str, ServiceCheckStatus]",
"status_map",
"=",
"modifiers",
".",
"pop",
"(",
"'status_map'",
",",
"None",
")",
"if",
"status_map",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'the `status_map` parameter is required'",
")",
"elif",
"not",
"isinstance",
"(",
"status_map",
",",
"dict",
")",
":",
"raise",
"ValueError",
"(",
"'the `status_map` parameter must be a mapping'",
")",
"elif",
"not",
"status_map",
":",
"raise",
"ValueError",
"(",
"'the `status_map` parameter must not be empty'",
")",
"for",
"value",
",",
"status_string",
"in",
"list",
"(",
"status_map",
".",
"items",
"(",
")",
")",
":",
"if",
"not",
"isinstance",
"(",
"status_string",
",",
"str",
")",
":",
"raise",
"ValueError",
"(",
"'status `{}` for value `{}` of parameter `status_map` is not a string'",
".",
"format",
"(",
"status_string",
",",
"value",
")",
")",
"status",
"=",
"getattr",
"(",
"ServiceCheck",
",",
"status_string",
".",
"upper",
"(",
")",
",",
"None",
")",
"if",
"status",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'invalid status `{}` for value `{}` of parameter `status_map`'",
".",
"format",
"(",
"status_string",
",",
"value",
")",
")",
"status_map",
"[",
"value",
"]",
"=",
"status",
"return",
"status_map"
] |
https://github.com/DataDog/integrations-core/blob/934674b29d94b70ccc008f76ea172d0cdae05e1e/datadog_checks_base/datadog_checks/base/utils/db/transform.py#L492-L516
|
|||
thunlp/ERNIE
|
9a4ab4af54bccb70b4eb53cbfe71a2bc16b9e93f
|
code/knowledge_bert/file_utils.py
|
python
|
cached_path
|
(url_or_filename: Union[str, Path], cache_dir: Union[str, Path] = None)
|
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
|
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
|
[
"Given",
"something",
"that",
"might",
"be",
"a",
"URL",
"(",
"or",
"might",
"be",
"a",
"local",
"path",
")",
"determine",
"which",
".",
"If",
"it",
"s",
"a",
"URL",
"download",
"the",
"file",
"and",
"cache",
"it",
"and",
"return",
"the",
"path",
"to",
"the",
"cached",
"file",
".",
"If",
"it",
"s",
"already",
"a",
"local",
"path",
"make",
"sure",
"the",
"file",
"exists",
"and",
"then",
"return",
"the",
"path",
"."
] |
def cached_path(url_or_filename: Union[str, Path], cache_dir: Union[str, Path] = None) -> str:
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise FileNotFoundError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
|
[
"def",
"cached_path",
"(",
"url_or_filename",
":",
"Union",
"[",
"str",
",",
"Path",
"]",
",",
"cache_dir",
":",
"Union",
"[",
"str",
",",
"Path",
"]",
"=",
"None",
")",
"->",
"str",
":",
"if",
"cache_dir",
"is",
"None",
":",
"cache_dir",
"=",
"PYTORCH_PRETRAINED_BERT_CACHE",
"if",
"isinstance",
"(",
"url_or_filename",
",",
"Path",
")",
":",
"url_or_filename",
"=",
"str",
"(",
"url_or_filename",
")",
"if",
"isinstance",
"(",
"cache_dir",
",",
"Path",
")",
":",
"cache_dir",
"=",
"str",
"(",
"cache_dir",
")",
"parsed",
"=",
"urlparse",
"(",
"url_or_filename",
")",
"if",
"parsed",
".",
"scheme",
"in",
"(",
"'http'",
",",
"'https'",
",",
"'s3'",
")",
":",
"# URL, so get it from the cache (downloading if necessary)",
"return",
"get_from_cache",
"(",
"url_or_filename",
",",
"cache_dir",
")",
"elif",
"os",
".",
"path",
".",
"exists",
"(",
"url_or_filename",
")",
":",
"# File, and it exists.",
"return",
"url_or_filename",
"elif",
"parsed",
".",
"scheme",
"==",
"''",
":",
"# File, but it doesn't exist.",
"raise",
"FileNotFoundError",
"(",
"\"file {} not found\"",
".",
"format",
"(",
"url_or_filename",
")",
")",
"else",
":",
"# Something unknown",
"raise",
"ValueError",
"(",
"\"unable to parse {} as a URL or as a local path\"",
".",
"format",
"(",
"url_or_filename",
")",
")"
] |
https://github.com/thunlp/ERNIE/blob/9a4ab4af54bccb70b4eb53cbfe71a2bc16b9e93f/code/knowledge_bert/file_utils.py#L74-L101
|
||
makerbot/ReplicatorG
|
d6f2b07785a5a5f1e172fb87cb4303b17c575d5d
|
skein_engines/skeinforge-50/fabmetheus_utilities/geometry/creation/gear.py
|
python
|
addBevelGear
|
(derivation, extrudeDerivation, pitchRadius, positives, teeth, vector3GearProfile)
|
Get extrude output for a cylinder gear.
|
Get extrude output for a cylinder gear.
|
[
"Get",
"extrude",
"output",
"for",
"a",
"cylinder",
"gear",
"."
] |
def addBevelGear(derivation, extrudeDerivation, pitchRadius, positives, teeth, vector3GearProfile):
"Get extrude output for a cylinder gear."
totalPitchRadius = derivation.pitchRadiusComplement + derivation.pitchRadius
totalTeeth = derivation.teethPinion + derivation.teethComplement
portionDirections = extrude.getSpacedPortionDirections(extrudeDerivation.interpolationDictionary)
loopLists = extrude.getLoopListsByPath(extrudeDerivation, None, vector3GearProfile[0], portionDirections)
firstLoopList = loopLists[0]
gearOverPinion = float(totalTeeth - teeth) / float(teeth)
thirdLayerHeight = 0.33333333333 * setting.getLayerHeight(derivation.elementNode)
pitchRadian = math.atan(math.sin(derivation.operatingRadian) / (gearOverPinion + math.cos(derivation.operatingRadian)))
coneDistance = pitchRadius / math.sin(pitchRadian)
apex = Vector3(0.0, 0.0, math.sqrt(coneDistance * coneDistance - pitchRadius * pitchRadius))
cosPitch = apex.z / coneDistance
sinPitch = math.sin(pitchRadian)
for loop in firstLoopList:
for point in loop:
alongWay = point.z / coneDistance
oneMinusAlongWay = 1.0 - alongWay
pointComplex = point.dropAxis()
pointComplexLength = abs(pointComplex)
deltaRadius = pointComplexLength - pitchRadius
cosDeltaRadius = cosPitch * deltaRadius
sinDeltaRadius = sinPitch * deltaRadius
pointComplex *= (cosDeltaRadius + pitchRadius) / pointComplexLength
point.x = pointComplex.real
point.y = pointComplex.imag
point.z += sinDeltaRadius
point.x *= oneMinusAlongWay
point.y *= oneMinusAlongWay
addBottomLoop(-thirdLayerHeight, firstLoopList)
topLoop = firstLoopList[-1]
topAddition = []
topZ = euclidean.getTopPath(topLoop) + thirdLayerHeight
oldIndex = topLoop[-1].index
for point in topLoop:
oldIndex += 1
topAddition.append(Vector3Index(oldIndex, 0.8 * point.x, 0.8 * point.y, topZ))
firstLoopList.append(topAddition)
translation = Vector3(0.0, 0.0, -euclidean.getBottomByPaths(firstLoopList))
euclidean.translateVector3Paths(firstLoopList, translation)
geometryOutput = triangle_mesh.getPillarsOutput(loopLists)
positives.append(geometryOutput)
|
[
"def",
"addBevelGear",
"(",
"derivation",
",",
"extrudeDerivation",
",",
"pitchRadius",
",",
"positives",
",",
"teeth",
",",
"vector3GearProfile",
")",
":",
"totalPitchRadius",
"=",
"derivation",
".",
"pitchRadiusComplement",
"+",
"derivation",
".",
"pitchRadius",
"totalTeeth",
"=",
"derivation",
".",
"teethPinion",
"+",
"derivation",
".",
"teethComplement",
"portionDirections",
"=",
"extrude",
".",
"getSpacedPortionDirections",
"(",
"extrudeDerivation",
".",
"interpolationDictionary",
")",
"loopLists",
"=",
"extrude",
".",
"getLoopListsByPath",
"(",
"extrudeDerivation",
",",
"None",
",",
"vector3GearProfile",
"[",
"0",
"]",
",",
"portionDirections",
")",
"firstLoopList",
"=",
"loopLists",
"[",
"0",
"]",
"gearOverPinion",
"=",
"float",
"(",
"totalTeeth",
"-",
"teeth",
")",
"/",
"float",
"(",
"teeth",
")",
"thirdLayerHeight",
"=",
"0.33333333333",
"*",
"setting",
".",
"getLayerHeight",
"(",
"derivation",
".",
"elementNode",
")",
"pitchRadian",
"=",
"math",
".",
"atan",
"(",
"math",
".",
"sin",
"(",
"derivation",
".",
"operatingRadian",
")",
"/",
"(",
"gearOverPinion",
"+",
"math",
".",
"cos",
"(",
"derivation",
".",
"operatingRadian",
")",
")",
")",
"coneDistance",
"=",
"pitchRadius",
"/",
"math",
".",
"sin",
"(",
"pitchRadian",
")",
"apex",
"=",
"Vector3",
"(",
"0.0",
",",
"0.0",
",",
"math",
".",
"sqrt",
"(",
"coneDistance",
"*",
"coneDistance",
"-",
"pitchRadius",
"*",
"pitchRadius",
")",
")",
"cosPitch",
"=",
"apex",
".",
"z",
"/",
"coneDistance",
"sinPitch",
"=",
"math",
".",
"sin",
"(",
"pitchRadian",
")",
"for",
"loop",
"in",
"firstLoopList",
":",
"for",
"point",
"in",
"loop",
":",
"alongWay",
"=",
"point",
".",
"z",
"/",
"coneDistance",
"oneMinusAlongWay",
"=",
"1.0",
"-",
"alongWay",
"pointComplex",
"=",
"point",
".",
"dropAxis",
"(",
")",
"pointComplexLength",
"=",
"abs",
"(",
"pointComplex",
")",
"deltaRadius",
"=",
"pointComplexLength",
"-",
"pitchRadius",
"cosDeltaRadius",
"=",
"cosPitch",
"*",
"deltaRadius",
"sinDeltaRadius",
"=",
"sinPitch",
"*",
"deltaRadius",
"pointComplex",
"*=",
"(",
"cosDeltaRadius",
"+",
"pitchRadius",
")",
"/",
"pointComplexLength",
"point",
".",
"x",
"=",
"pointComplex",
".",
"real",
"point",
".",
"y",
"=",
"pointComplex",
".",
"imag",
"point",
".",
"z",
"+=",
"sinDeltaRadius",
"point",
".",
"x",
"*=",
"oneMinusAlongWay",
"point",
".",
"y",
"*=",
"oneMinusAlongWay",
"addBottomLoop",
"(",
"-",
"thirdLayerHeight",
",",
"firstLoopList",
")",
"topLoop",
"=",
"firstLoopList",
"[",
"-",
"1",
"]",
"topAddition",
"=",
"[",
"]",
"topZ",
"=",
"euclidean",
".",
"getTopPath",
"(",
"topLoop",
")",
"+",
"thirdLayerHeight",
"oldIndex",
"=",
"topLoop",
"[",
"-",
"1",
"]",
".",
"index",
"for",
"point",
"in",
"topLoop",
":",
"oldIndex",
"+=",
"1",
"topAddition",
".",
"append",
"(",
"Vector3Index",
"(",
"oldIndex",
",",
"0.8",
"*",
"point",
".",
"x",
",",
"0.8",
"*",
"point",
".",
"y",
",",
"topZ",
")",
")",
"firstLoopList",
".",
"append",
"(",
"topAddition",
")",
"translation",
"=",
"Vector3",
"(",
"0.0",
",",
"0.0",
",",
"-",
"euclidean",
".",
"getBottomByPaths",
"(",
"firstLoopList",
")",
")",
"euclidean",
".",
"translateVector3Paths",
"(",
"firstLoopList",
",",
"translation",
")",
"geometryOutput",
"=",
"triangle_mesh",
".",
"getPillarsOutput",
"(",
"loopLists",
")",
"positives",
".",
"append",
"(",
"geometryOutput",
")"
] |
https://github.com/makerbot/ReplicatorG/blob/d6f2b07785a5a5f1e172fb87cb4303b17c575d5d/skein_engines/skeinforge-50/fabmetheus_utilities/geometry/creation/gear.py#L429-L470
|
||
jay0lee/GAM
|
c3ddeae3f35be646ce9cb471ba4ade072343be1d
|
src/gam/gapi/errors.py
|
python
|
_create_http_error_dict
|
(status_code, reason, message)
|
return {
'error': {
'code': status_code,
'errors': [{
'reason': str(reason),
'message': message,
}]
}
}
|
Creates a basic error dict similar to most Google API Errors.
Args:
status_code: Int, the error's HTTP response status code.
reason: String, a camelCase reason for the HttpError being given.
message: String, a general error message describing the error that occurred.
Returns:
dict
|
Creates a basic error dict similar to most Google API Errors.
|
[
"Creates",
"a",
"basic",
"error",
"dict",
"similar",
"to",
"most",
"Google",
"API",
"Errors",
"."
] |
def _create_http_error_dict(status_code, reason, message):
"""Creates a basic error dict similar to most Google API Errors.
Args:
status_code: Int, the error's HTTP response status code.
reason: String, a camelCase reason for the HttpError being given.
message: String, a general error message describing the error that occurred.
Returns:
dict
"""
return {
'error': {
'code': status_code,
'errors': [{
'reason': str(reason),
'message': message,
}]
}
}
|
[
"def",
"_create_http_error_dict",
"(",
"status_code",
",",
"reason",
",",
"message",
")",
":",
"return",
"{",
"'error'",
":",
"{",
"'code'",
":",
"status_code",
",",
"'errors'",
":",
"[",
"{",
"'reason'",
":",
"str",
"(",
"reason",
")",
",",
"'message'",
":",
"message",
",",
"}",
"]",
"}",
"}"
] |
https://github.com/jay0lee/GAM/blob/c3ddeae3f35be646ce9cb471ba4ade072343be1d/src/gam/gapi/errors.py#L255-L274
|
|
Urinx/WeixinBot
|
d9edcd2c9203fe7dd203b22b71bbc48a31e9492b
|
wxbot_demo_py3/weixin.py
|
python
|
UnicodeStreamFilter.write
|
(self, s)
|
[] |
def write(self, s):
if type(s) == str:
s = s.encode().decode('utf-8')
s = s.encode(self.encode_to, self.errors).decode(self.encode_to)
self.target.write(s)
|
[
"def",
"write",
"(",
"self",
",",
"s",
")",
":",
"if",
"type",
"(",
"s",
")",
"==",
"str",
":",
"s",
"=",
"s",
".",
"encode",
"(",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"s",
"=",
"s",
".",
"encode",
"(",
"self",
".",
"encode_to",
",",
"self",
".",
"errors",
")",
".",
"decode",
"(",
"self",
".",
"encode_to",
")",
"self",
".",
"target",
".",
"write",
"(",
"s",
")"
] |
https://github.com/Urinx/WeixinBot/blob/d9edcd2c9203fe7dd203b22b71bbc48a31e9492b/wxbot_demo_py3/weixin.py#L1199-L1203
|
||||
rucio/rucio
|
6d0d358e04f5431f0b9a98ae40f31af0ddff4833
|
lib/rucio/core/permission/cms.py
|
python
|
perm_del_protocol
|
(issuer, kwargs)
|
return _is_root(issuer) or has_account_attribute(account=issuer, key='admin')
|
Checks if an account can delete protocols from an RSE.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
|
Checks if an account can delete protocols from an RSE.
|
[
"Checks",
"if",
"an",
"account",
"can",
"delete",
"protocols",
"from",
"an",
"RSE",
"."
] |
def perm_del_protocol(issuer, kwargs):
"""
Checks if an account can delete protocols from an RSE.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
"""
return _is_root(issuer) or has_account_attribute(account=issuer, key='admin')
|
[
"def",
"perm_del_protocol",
"(",
"issuer",
",",
"kwargs",
")",
":",
"return",
"_is_root",
"(",
"issuer",
")",
"or",
"has_account_attribute",
"(",
"account",
"=",
"issuer",
",",
"key",
"=",
"'admin'",
")"
] |
https://github.com/rucio/rucio/blob/6d0d358e04f5431f0b9a98ae40f31af0ddff4833/lib/rucio/core/permission/cms.py#L626-L634
|
|
tern-tools/tern
|
723f43dcaae2f2f0a08a63e5e8de3938031a386e
|
tern/analyze/default/command_lib/command_lib.py
|
python
|
set_command_attrs
|
(command_obj)
|
return False
|
Given the command object, move the install and remove listings to
subcommands and set the flags, then return True. If the command name
is not in the snippets library then return False
|
Given the command object, move the install and remove listings to
subcommands and set the flags, then return True. If the command name
is not in the snippets library then return False
|
[
"Given",
"the",
"command",
"object",
"move",
"the",
"install",
"and",
"remove",
"listings",
"to",
"subcommands",
"and",
"set",
"the",
"flags",
"then",
"return",
"True",
".",
"If",
"the",
"command",
"name",
"is",
"not",
"in",
"the",
"snippets",
"library",
"then",
"return",
"False"
] |
def set_command_attrs(command_obj):
'''Given the command object, move the install and remove listings to
subcommands and set the flags, then return True. If the command name
is not in the snippets library then return False'''
command_listing = get_command_listing(command_obj.name)
if command_listing:
# the command is in the library
# look for install, remove and ignore commands
if 'install' in command_listing.keys():
set_subcommand(command_obj, 'install', command_listing['install'])
if 'remove' in command_listing.keys():
set_subcommand(command_obj, 'remove', command_listing['remove'])
if 'ignore' in command_listing.keys():
# check if any of the words in the ignore list are in
set_subcommand(command_obj, 'ignore', command_listing['ignore'])
return True
return False
|
[
"def",
"set_command_attrs",
"(",
"command_obj",
")",
":",
"command_listing",
"=",
"get_command_listing",
"(",
"command_obj",
".",
"name",
")",
"if",
"command_listing",
":",
"# the command is in the library",
"# look for install, remove and ignore commands",
"if",
"'install'",
"in",
"command_listing",
".",
"keys",
"(",
")",
":",
"set_subcommand",
"(",
"command_obj",
",",
"'install'",
",",
"command_listing",
"[",
"'install'",
"]",
")",
"if",
"'remove'",
"in",
"command_listing",
".",
"keys",
"(",
")",
":",
"set_subcommand",
"(",
"command_obj",
",",
"'remove'",
",",
"command_listing",
"[",
"'remove'",
"]",
")",
"if",
"'ignore'",
"in",
"command_listing",
".",
"keys",
"(",
")",
":",
"# check if any of the words in the ignore list are in",
"set_subcommand",
"(",
"command_obj",
",",
"'ignore'",
",",
"command_listing",
"[",
"'ignore'",
"]",
")",
"return",
"True",
"return",
"False"
] |
https://github.com/tern-tools/tern/blob/723f43dcaae2f2f0a08a63e5e8de3938031a386e/tern/analyze/default/command_lib/command_lib.py#L140-L156
|
|
GradiusX/HEVD-Python-Solutions
|
f0594ae6c926558c5a25a23fe2d4f50ce6eb6eb9
|
Win7 x86/HEVD_InsecureKernelResourceAccess.py
|
python
|
check_admin
|
(username)
|
Periodically checks for Admin Privs
|
Periodically checks for Admin Privs
|
[
"Periodically",
"checks",
"for",
"Admin",
"Privs"
] |
def check_admin(username):
''' Periodically checks for Admin Privs '''
global is_admin
while not is_admin:
members = win32net.NetLocalGroupGetMembers(None,'Administrators',1)
if username in [record['name'] for record in members[0]]:
is_admin = True
break
time.sleep(5)
|
[
"def",
"check_admin",
"(",
"username",
")",
":",
"global",
"is_admin",
"while",
"not",
"is_admin",
":",
"members",
"=",
"win32net",
".",
"NetLocalGroupGetMembers",
"(",
"None",
",",
"'Administrators'",
",",
"1",
")",
"if",
"username",
"in",
"[",
"record",
"[",
"'name'",
"]",
"for",
"record",
"in",
"members",
"[",
"0",
"]",
"]",
":",
"is_admin",
"=",
"True",
"break",
"time",
".",
"sleep",
"(",
"5",
")"
] |
https://github.com/GradiusX/HEVD-Python-Solutions/blob/f0594ae6c926558c5a25a23fe2d4f50ce6eb6eb9/Win7 x86/HEVD_InsecureKernelResourceAccess.py#L40-L48
|
||
PaddlePaddle/PaddleX
|
2bab73f81ab54e328204e7871e6ae4a82e719f5d
|
paddlex/ppcls/arch/backbone/model_zoo/ghostnet.py
|
python
|
GhostModule.forward
|
(self, inputs)
|
return out
|
[] |
def forward(self, inputs):
x = self.primary_conv(inputs)
y = self.cheap_operation(x)
out = paddle.concat([x, y], axis=1)
return out
|
[
"def",
"forward",
"(",
"self",
",",
"inputs",
")",
":",
"x",
"=",
"self",
".",
"primary_conv",
"(",
"inputs",
")",
"y",
"=",
"self",
".",
"cheap_operation",
"(",
"x",
")",
"out",
"=",
"paddle",
".",
"concat",
"(",
"[",
"x",
",",
"y",
"]",
",",
"axis",
"=",
"1",
")",
"return",
"out"
] |
https://github.com/PaddlePaddle/PaddleX/blob/2bab73f81ab54e328204e7871e6ae4a82e719f5d/paddlex/ppcls/arch/backbone/model_zoo/ghostnet.py#L141-L145
|
|||
OpenCobolIDE/OpenCobolIDE
|
c78d0d335378e5fe0a5e74f53c19b68b55e85388
|
open_cobol_ide/extlibs/pygments/lexers/templates.py
|
python
|
RhtmlLexer.analyse_text
|
(text)
|
return rv
|
[] |
def analyse_text(text):
rv = ErbLexer.analyse_text(text) - 0.01
if html_doctype_matches(text):
# one more than the XmlErbLexer returns
rv += 0.5
return rv
|
[
"def",
"analyse_text",
"(",
"text",
")",
":",
"rv",
"=",
"ErbLexer",
".",
"analyse_text",
"(",
"text",
")",
"-",
"0.01",
"if",
"html_doctype_matches",
"(",
"text",
")",
":",
"# one more than the XmlErbLexer returns",
"rv",
"+=",
"0.5",
"return",
"rv"
] |
https://github.com/OpenCobolIDE/OpenCobolIDE/blob/c78d0d335378e5fe0a5e74f53c19b68b55e85388/open_cobol_ide/extlibs/pygments/lexers/templates.py#L1034-L1039
|
|||
donnemartin/gitsome
|
d7c57abc7cb66e9c910a844f15d4536866da3310
|
gitsome/lib/github3/repos/repo.py
|
python
|
Repository.readme
|
(self)
|
return self._instance_or_null(Contents, json)
|
Get the README for this repository.
:returns: :class:`Contents <github3.repos.contents.Contents>`
|
Get the README for this repository.
|
[
"Get",
"the",
"README",
"for",
"this",
"repository",
"."
] |
def readme(self):
"""Get the README for this repository.
:returns: :class:`Contents <github3.repos.contents.Contents>`
"""
url = self._build_url('readme', base_url=self._api)
json = self._json(self._get(url), 200)
return self._instance_or_null(Contents, json)
|
[
"def",
"readme",
"(",
"self",
")",
":",
"url",
"=",
"self",
".",
"_build_url",
"(",
"'readme'",
",",
"base_url",
"=",
"self",
".",
"_api",
")",
"json",
"=",
"self",
".",
"_json",
"(",
"self",
".",
"_get",
"(",
"url",
")",
",",
"200",
")",
"return",
"self",
".",
"_instance_or_null",
"(",
"Contents",
",",
"json",
")"
] |
https://github.com/donnemartin/gitsome/blob/d7c57abc7cb66e9c910a844f15d4536866da3310/gitsome/lib/github3/repos/repo.py#L1726-L1733
|
|
AppScale/gts
|
46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9
|
AppServer/google/storage/speckle/python/api/rdbms.py
|
python
|
Connection.MakeRequest
|
(self, stub_method, request)
|
return response
|
Makes an ApiProxy request, and possibly raises an appropriate exception.
Args:
stub_method: A string, the name of the method to call.
request: A protobuf; 'instance' and 'connection_id' will be set
when available.
Returns:
A protobuf.
Raises:
DatabaseError: Error from SQL Service server.
|
Makes an ApiProxy request, and possibly raises an appropriate exception.
|
[
"Makes",
"an",
"ApiProxy",
"request",
"and",
"possibly",
"raises",
"an",
"appropriate",
"exception",
"."
] |
def MakeRequest(self, stub_method, request):
"""Makes an ApiProxy request, and possibly raises an appropriate exception.
Args:
stub_method: A string, the name of the method to call.
request: A protobuf; 'instance' and 'connection_id' will be set
when available.
Returns:
A protobuf.
Raises:
DatabaseError: Error from SQL Service server.
"""
request.instance = self._instance
if self._connection_id is not None:
request.connection_id = self._connection_id
if stub_method in ('Exec', 'ExecOp', 'GetMetadata'):
self._idempotent_request_id += 1
request.request_id = self._idempotent_request_id
response = self._MakeRetriableRequest(stub_method, request)
else:
response = self.MakeRequestImpl(stub_method, request)
if (hasattr(response, 'sql_exception') and
response.HasField('sql_exception')):
raise _ToDbApiException(response.sql_exception)
return response
|
[
"def",
"MakeRequest",
"(",
"self",
",",
"stub_method",
",",
"request",
")",
":",
"request",
".",
"instance",
"=",
"self",
".",
"_instance",
"if",
"self",
".",
"_connection_id",
"is",
"not",
"None",
":",
"request",
".",
"connection_id",
"=",
"self",
".",
"_connection_id",
"if",
"stub_method",
"in",
"(",
"'Exec'",
",",
"'ExecOp'",
",",
"'GetMetadata'",
")",
":",
"self",
".",
"_idempotent_request_id",
"+=",
"1",
"request",
".",
"request_id",
"=",
"self",
".",
"_idempotent_request_id",
"response",
"=",
"self",
".",
"_MakeRetriableRequest",
"(",
"stub_method",
",",
"request",
")",
"else",
":",
"response",
"=",
"self",
".",
"MakeRequestImpl",
"(",
"stub_method",
",",
"request",
")",
"if",
"(",
"hasattr",
"(",
"response",
",",
"'sql_exception'",
")",
"and",
"response",
".",
"HasField",
"(",
"'sql_exception'",
")",
")",
":",
"raise",
"_ToDbApiException",
"(",
"response",
".",
"sql_exception",
")",
"return",
"response"
] |
https://github.com/AppScale/gts/blob/46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9/AppServer/google/storage/speckle/python/api/rdbms.py#L920-L947
|
|
maas/maas
|
db2f89970c640758a51247c59bf1ec6f60cf4ab5
|
src/maasserver/rpc/boot.py
|
python
|
get_boot_filenames
|
(
arch,
subarch,
osystem,
series,
commissioning_osystem=undefined,
commissioning_distro_series=undefined,
)
|
return kernel, initrd, boot_dtb
|
Return the filenames of the kernel, initrd, and boot_dtb for the boot
resource.
|
Return the filenames of the kernel, initrd, and boot_dtb for the boot
resource.
|
[
"Return",
"the",
"filenames",
"of",
"the",
"kernel",
"initrd",
"and",
"boot_dtb",
"for",
"the",
"boot",
"resource",
"."
] |
def get_boot_filenames(
arch,
subarch,
osystem,
series,
commissioning_osystem=undefined,
commissioning_distro_series=undefined,
):
"""Return the filenames of the kernel, initrd, and boot_dtb for the boot
resource."""
if subarch == "generic":
# MAAS doesn't store in the BootResource table what subarch is the
# generic subarch so lookup what the generic subarch maps to.
try:
boot_resource_subarch = validate_hwe_kernel(
subarch,
None,
"%s/%s" % (arch, subarch),
osystem,
series,
commissioning_osystem=commissioning_osystem,
commissioning_distro_series=commissioning_distro_series,
)
except ValidationError:
# It's possible that no kernel's exist at all for this arch,
# subarch, osystem, series combination. In that case just fallback
# to 'generic'.
boot_resource_subarch = "generic"
else:
boot_resource_subarch = subarch
try:
# Get the filename for the kernel, initrd, and boot_dtb the rack should
# use when booting.
boot_resource = BootResource.objects.get(
architecture="%s/%s" % (arch, boot_resource_subarch),
name="%s/%s" % (osystem, series),
)
boot_resource_set = boot_resource.get_latest_complete_set()
boot_resource_files = {
bfile.filetype: bfile.filename
for bfile in boot_resource_set.files.all()
}
except ObjectDoesNotExist:
# If a filename can not be found return None to allow the rack to
# figure out what todo.
return None, None, None
kernel = boot_resource_files.get(BOOT_RESOURCE_FILE_TYPE.BOOT_KERNEL)
initrd = boot_resource_files.get(BOOT_RESOURCE_FILE_TYPE.BOOT_INITRD)
boot_dtb = boot_resource_files.get(BOOT_RESOURCE_FILE_TYPE.BOOT_DTB)
return kernel, initrd, boot_dtb
|
[
"def",
"get_boot_filenames",
"(",
"arch",
",",
"subarch",
",",
"osystem",
",",
"series",
",",
"commissioning_osystem",
"=",
"undefined",
",",
"commissioning_distro_series",
"=",
"undefined",
",",
")",
":",
"if",
"subarch",
"==",
"\"generic\"",
":",
"# MAAS doesn't store in the BootResource table what subarch is the",
"# generic subarch so lookup what the generic subarch maps to.",
"try",
":",
"boot_resource_subarch",
"=",
"validate_hwe_kernel",
"(",
"subarch",
",",
"None",
",",
"\"%s/%s\"",
"%",
"(",
"arch",
",",
"subarch",
")",
",",
"osystem",
",",
"series",
",",
"commissioning_osystem",
"=",
"commissioning_osystem",
",",
"commissioning_distro_series",
"=",
"commissioning_distro_series",
",",
")",
"except",
"ValidationError",
":",
"# It's possible that no kernel's exist at all for this arch,",
"# subarch, osystem, series combination. In that case just fallback",
"# to 'generic'.",
"boot_resource_subarch",
"=",
"\"generic\"",
"else",
":",
"boot_resource_subarch",
"=",
"subarch",
"try",
":",
"# Get the filename for the kernel, initrd, and boot_dtb the rack should",
"# use when booting.",
"boot_resource",
"=",
"BootResource",
".",
"objects",
".",
"get",
"(",
"architecture",
"=",
"\"%s/%s\"",
"%",
"(",
"arch",
",",
"boot_resource_subarch",
")",
",",
"name",
"=",
"\"%s/%s\"",
"%",
"(",
"osystem",
",",
"series",
")",
",",
")",
"boot_resource_set",
"=",
"boot_resource",
".",
"get_latest_complete_set",
"(",
")",
"boot_resource_files",
"=",
"{",
"bfile",
".",
"filetype",
":",
"bfile",
".",
"filename",
"for",
"bfile",
"in",
"boot_resource_set",
".",
"files",
".",
"all",
"(",
")",
"}",
"except",
"ObjectDoesNotExist",
":",
"# If a filename can not be found return None to allow the rack to",
"# figure out what todo.",
"return",
"None",
",",
"None",
",",
"None",
"kernel",
"=",
"boot_resource_files",
".",
"get",
"(",
"BOOT_RESOURCE_FILE_TYPE",
".",
"BOOT_KERNEL",
")",
"initrd",
"=",
"boot_resource_files",
".",
"get",
"(",
"BOOT_RESOURCE_FILE_TYPE",
".",
"BOOT_INITRD",
")",
"boot_dtb",
"=",
"boot_resource_files",
".",
"get",
"(",
"BOOT_RESOURCE_FILE_TYPE",
".",
"BOOT_DTB",
")",
"return",
"kernel",
",",
"initrd",
",",
"boot_dtb"
] |
https://github.com/maas/maas/blob/db2f89970c640758a51247c59bf1ec6f60cf4ab5/src/maasserver/rpc/boot.py#L93-L143
|
|
keiffster/program-y
|
8c99b56f8c32f01a7b9887b5daae9465619d0385
|
src/programy/services/rest/google/directions/service.py
|
python
|
GoogleDirectionsService.get_default_conf_file
|
()
|
return os.path.dirname(__file__) + os.sep + "directions.conf"
|
[] |
def get_default_conf_file():
return os.path.dirname(__file__) + os.sep + "directions.conf"
|
[
"def",
"get_default_conf_file",
"(",
")",
":",
"return",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
"+",
"os",
".",
"sep",
"+",
"\"directions.conf\""
] |
https://github.com/keiffster/program-y/blob/8c99b56f8c32f01a7b9887b5daae9465619d0385/src/programy/services/rest/google/directions/service.py#L92-L93
|
|||
LinOTP/LinOTP
|
bb3940bbaccea99550e6c063ff824f258dd6d6d7
|
linotp/tokens/base/__init__.py
|
python
|
TokenClass.is_auth_only_token
|
(self, user)
|
return not support_challenge_response
|
check if token is in the authenticate only mode
this is required to optimize the number of requests
:param user: the user / realm where the token policy is applied
:return: boolean
|
check if token is in the authenticate only mode
this is required to optimize the number of requests
|
[
"check",
"if",
"token",
"is",
"in",
"the",
"authenticate",
"only",
"mode",
"this",
"is",
"required",
"to",
"optimize",
"the",
"number",
"of",
"requests"
] |
def is_auth_only_token(self, user):
"""
check if token is in the authenticate only mode
this is required to optimize the number of requests
:param user: the user / realm where the token policy is applied
:return: boolean
"""
if len(self.mode) == 1 and "authenticate" in self.mode:
return True
if len(self.mode) == 1 and "challenge" in self.mode:
return False
import linotp.lib.policy
support_challenge_response = (
linotp.lib.policy.get_auth_challenge_response(user, self.type)
)
return not support_challenge_response
|
[
"def",
"is_auth_only_token",
"(",
"self",
",",
"user",
")",
":",
"if",
"len",
"(",
"self",
".",
"mode",
")",
"==",
"1",
"and",
"\"authenticate\"",
"in",
"self",
".",
"mode",
":",
"return",
"True",
"if",
"len",
"(",
"self",
".",
"mode",
")",
"==",
"1",
"and",
"\"challenge\"",
"in",
"self",
".",
"mode",
":",
"return",
"False",
"import",
"linotp",
".",
"lib",
".",
"policy",
"support_challenge_response",
"=",
"(",
"linotp",
".",
"lib",
".",
"policy",
".",
"get_auth_challenge_response",
"(",
"user",
",",
"self",
".",
"type",
")",
")",
"return",
"not",
"support_challenge_response"
] |
https://github.com/LinOTP/LinOTP/blob/bb3940bbaccea99550e6c063ff824f258dd6d6d7/linotp/tokens/base/__init__.py#L122-L142
|
|
Pymol-Scripts/Pymol-script-repo
|
bcd7bb7812dc6db1595953dfa4471fa15fb68c77
|
modules/pdb2pqr/src/psize.py
|
python
|
Psize.setLength
|
(self, maxlen, minlen)
|
return self.olen
|
Compute molecule dimensions
|
Compute molecule dimensions
|
[
"Compute",
"molecule",
"dimensions"
] |
def setLength(self, maxlen, minlen):
""" Compute molecule dimensions """
for i in range(3):
self.olen[i] = maxlen[i] - minlen[i]
if self.olen[i] < 0.1:
self.olen[i] = 0.1
return self.olen
|
[
"def",
"setLength",
"(",
"self",
",",
"maxlen",
",",
"minlen",
")",
":",
"for",
"i",
"in",
"range",
"(",
"3",
")",
":",
"self",
".",
"olen",
"[",
"i",
"]",
"=",
"maxlen",
"[",
"i",
"]",
"-",
"minlen",
"[",
"i",
"]",
"if",
"self",
".",
"olen",
"[",
"i",
"]",
"<",
"0.1",
":",
"self",
".",
"olen",
"[",
"i",
"]",
"=",
"0.1",
"return",
"self",
".",
"olen"
] |
https://github.com/Pymol-Scripts/Pymol-script-repo/blob/bcd7bb7812dc6db1595953dfa4471fa15fb68c77/modules/pdb2pqr/src/psize.py#L137-L143
|
|
ethereum/trinity
|
6383280c5044feb06695ac2f7bc1100b7bcf4fe0
|
p2p/auth.py
|
python
|
decode_auth_plain
|
(ciphertext: bytes, privkey: datatypes.PrivateKey)
|
return signature, pubkey, nonce, DEVP2P_V4
|
Decode legacy pre-EIP-8 auth message format
|
Decode legacy pre-EIP-8 auth message format
|
[
"Decode",
"legacy",
"pre",
"-",
"EIP",
"-",
"8",
"auth",
"message",
"format"
] |
def decode_auth_plain(ciphertext: bytes, privkey: datatypes.PrivateKey) -> Tuple[
datatypes.Signature, datatypes.PublicKey, bytes, int]:
"""Decode legacy pre-EIP-8 auth message format"""
message = ecies.decrypt(ciphertext, privkey)
if len(message) != AUTH_MSG_LEN:
raise BadAckMessage(f"Unexpected size for auth message: {len(message)}")
signature = keys.Signature(signature_bytes=message[:SIGNATURE_LEN])
pubkey_start = SIGNATURE_LEN + HASH_LEN
pubkey = keys.PublicKey(message[pubkey_start: pubkey_start + PUBKEY_LEN])
nonce_start = pubkey_start + PUBKEY_LEN
nonce = message[nonce_start: nonce_start + HASH_LEN]
return signature, pubkey, nonce, DEVP2P_V4
|
[
"def",
"decode_auth_plain",
"(",
"ciphertext",
":",
"bytes",
",",
"privkey",
":",
"datatypes",
".",
"PrivateKey",
")",
"->",
"Tuple",
"[",
"datatypes",
".",
"Signature",
",",
"datatypes",
".",
"PublicKey",
",",
"bytes",
",",
"int",
"]",
":",
"message",
"=",
"ecies",
".",
"decrypt",
"(",
"ciphertext",
",",
"privkey",
")",
"if",
"len",
"(",
"message",
")",
"!=",
"AUTH_MSG_LEN",
":",
"raise",
"BadAckMessage",
"(",
"f\"Unexpected size for auth message: {len(message)}\"",
")",
"signature",
"=",
"keys",
".",
"Signature",
"(",
"signature_bytes",
"=",
"message",
"[",
":",
"SIGNATURE_LEN",
"]",
")",
"pubkey_start",
"=",
"SIGNATURE_LEN",
"+",
"HASH_LEN",
"pubkey",
"=",
"keys",
".",
"PublicKey",
"(",
"message",
"[",
"pubkey_start",
":",
"pubkey_start",
"+",
"PUBKEY_LEN",
"]",
")",
"nonce_start",
"=",
"pubkey_start",
"+",
"PUBKEY_LEN",
"nonce",
"=",
"message",
"[",
"nonce_start",
":",
"nonce_start",
"+",
"HASH_LEN",
"]",
"return",
"signature",
",",
"pubkey",
",",
"nonce",
",",
"DEVP2P_V4"
] |
https://github.com/ethereum/trinity/blob/6383280c5044feb06695ac2f7bc1100b7bcf4fe0/p2p/auth.py#L295-L306
|
|
JeremyCCHsu/vae-npvc
|
94a83b33bf17593aa402cb38408fdfad1339a120
|
util/layers.py
|
python
|
lrelu
|
(x, leak=0.02, name="lrelu")
|
return tf.maximum(x, leak*x, name=name)
|
Leaky ReLU
|
Leaky ReLU
|
[
"Leaky",
"ReLU"
] |
def lrelu(x, leak=0.02, name="lrelu"):
''' Leaky ReLU '''
return tf.maximum(x, leak*x, name=name)
|
[
"def",
"lrelu",
"(",
"x",
",",
"leak",
"=",
"0.02",
",",
"name",
"=",
"\"lrelu\"",
")",
":",
"return",
"tf",
".",
"maximum",
"(",
"x",
",",
"leak",
"*",
"x",
",",
"name",
"=",
"name",
")"
] |
https://github.com/JeremyCCHsu/vae-npvc/blob/94a83b33bf17593aa402cb38408fdfad1339a120/util/layers.py#L147-L149
|
|
pantsbuild/pex
|
473c6ac732ed4bc338b4b20a9ec930d1d722c9b4
|
pex/vendor/_vendored/setuptools/setuptools/config.py
|
python
|
ConfigOptionsHandler.parse_section_extras_require
|
(self, section_options)
|
Parses `extras_require` configuration file section.
:param dict section_options:
|
Parses `extras_require` configuration file section.
|
[
"Parses",
"extras_require",
"configuration",
"file",
"section",
"."
] |
def parse_section_extras_require(self, section_options):
"""Parses `extras_require` configuration file section.
:param dict section_options:
"""
parse_list = partial(self._parse_list, separator=';')
self['extras_require'] = self._parse_section_to_dict(
section_options, parse_list)
|
[
"def",
"parse_section_extras_require",
"(",
"self",
",",
"section_options",
")",
":",
"parse_list",
"=",
"partial",
"(",
"self",
".",
"_parse_list",
",",
"separator",
"=",
"';'",
")",
"self",
"[",
"'extras_require'",
"]",
"=",
"self",
".",
"_parse_section_to_dict",
"(",
"section_options",
",",
"parse_list",
")"
] |
https://github.com/pantsbuild/pex/blob/473c6ac732ed4bc338b4b20a9ec930d1d722c9b4/pex/vendor/_vendored/setuptools/setuptools/config.py#L668-L675
|
||
vivisect/vivisect
|
37b0b655d8dedfcf322e86b0f144b096e48d547e
|
envi/archs/arm/emu.py
|
python
|
ArmEmulator.integerSubtraction
|
(self, op)
|
return self.intSubBase(src1, src2, Sflag)
|
Do the core of integer subtraction but only *return* the
resulting value rather than assigning it.
(allows cmp and sub to use the same code)
|
Do the core of integer subtraction but only *return* the
resulting value rather than assigning it.
(allows cmp and sub to use the same code)
|
[
"Do",
"the",
"core",
"of",
"integer",
"subtraction",
"but",
"only",
"*",
"return",
"*",
"the",
"resulting",
"value",
"rather",
"than",
"assigning",
"it",
".",
"(",
"allows",
"cmp",
"and",
"sub",
"to",
"use",
"the",
"same",
"code",
")"
] |
def integerSubtraction(self, op):
"""
Do the core of integer subtraction but only *return* the
resulting value rather than assigning it.
(allows cmp and sub to use the same code)
"""
# Src op gets sign extended to dst
#FIXME account for same operand with zero result for PDE
src1 = self.getOperValue(op, 1)
src2 = self.getOperValue(op, 2)
Sflag = op.iflags & IF_PSR_S
if src1 is None or src2 is None:
self.undefFlags()
return None
return self.intSubBase(src1, src2, Sflag)
|
[
"def",
"integerSubtraction",
"(",
"self",
",",
"op",
")",
":",
"# Src op gets sign extended to dst",
"#FIXME account for same operand with zero result for PDE",
"src1",
"=",
"self",
".",
"getOperValue",
"(",
"op",
",",
"1",
")",
"src2",
"=",
"self",
".",
"getOperValue",
"(",
"op",
",",
"2",
")",
"Sflag",
"=",
"op",
".",
"iflags",
"&",
"IF_PSR_S",
"if",
"src1",
"is",
"None",
"or",
"src2",
"is",
"None",
":",
"self",
".",
"undefFlags",
"(",
")",
"return",
"None",
"return",
"self",
".",
"intSubBase",
"(",
"src1",
",",
"src2",
",",
"Sflag",
")"
] |
https://github.com/vivisect/vivisect/blob/37b0b655d8dedfcf322e86b0f144b096e48d547e/envi/archs/arm/emu.py#L496-L512
|
|
openstack/octavia
|
27e5b27d31c695ba72fb6750de2bdafd76e0d7d9
|
octavia/controller/worker/v1/controller_worker.py
|
python
|
ControllerWorker.create_health_monitor
|
(self, health_monitor_id)
|
Creates a health monitor.
:param pool_id: ID of the pool to create a health monitor on
:returns: None
:raises NoResultFound: Unable to find the object
|
Creates a health monitor.
|
[
"Creates",
"a",
"health",
"monitor",
"."
] |
def create_health_monitor(self, health_monitor_id):
"""Creates a health monitor.
:param pool_id: ID of the pool to create a health monitor on
:returns: None
:raises NoResultFound: Unable to find the object
"""
health_mon = self._health_mon_repo.get(db_apis.get_session(),
id=health_monitor_id)
if not health_mon:
LOG.warning('Failed to fetch %s %s from DB. Retrying for up to '
'60 seconds.', 'health_monitor', health_monitor_id)
raise db_exceptions.NoResultFound
pool = health_mon.pool
listeners = pool.listeners
pool.health_monitor = health_mon
load_balancer = pool.load_balancer
create_hm_tf = self.taskflow_load(
self._health_monitor_flows.get_create_health_monitor_flow(),
store={constants.HEALTH_MON: health_mon,
constants.POOL: pool,
constants.LISTENERS: listeners,
constants.LOADBALANCER: load_balancer})
with tf_logging.DynamicLoggingListener(create_hm_tf,
log=LOG):
create_hm_tf.run()
|
[
"def",
"create_health_monitor",
"(",
"self",
",",
"health_monitor_id",
")",
":",
"health_mon",
"=",
"self",
".",
"_health_mon_repo",
".",
"get",
"(",
"db_apis",
".",
"get_session",
"(",
")",
",",
"id",
"=",
"health_monitor_id",
")",
"if",
"not",
"health_mon",
":",
"LOG",
".",
"warning",
"(",
"'Failed to fetch %s %s from DB. Retrying for up to '",
"'60 seconds.'",
",",
"'health_monitor'",
",",
"health_monitor_id",
")",
"raise",
"db_exceptions",
".",
"NoResultFound",
"pool",
"=",
"health_mon",
".",
"pool",
"listeners",
"=",
"pool",
".",
"listeners",
"pool",
".",
"health_monitor",
"=",
"health_mon",
"load_balancer",
"=",
"pool",
".",
"load_balancer",
"create_hm_tf",
"=",
"self",
".",
"taskflow_load",
"(",
"self",
".",
"_health_monitor_flows",
".",
"get_create_health_monitor_flow",
"(",
")",
",",
"store",
"=",
"{",
"constants",
".",
"HEALTH_MON",
":",
"health_mon",
",",
"constants",
".",
"POOL",
":",
"pool",
",",
"constants",
".",
"LISTENERS",
":",
"listeners",
",",
"constants",
".",
"LOADBALANCER",
":",
"load_balancer",
"}",
")",
"with",
"tf_logging",
".",
"DynamicLoggingListener",
"(",
"create_hm_tf",
",",
"log",
"=",
"LOG",
")",
":",
"create_hm_tf",
".",
"run",
"(",
")"
] |
https://github.com/openstack/octavia/blob/27e5b27d31c695ba72fb6750de2bdafd76e0d7d9/octavia/controller/worker/v1/controller_worker.py#L116-L143
|
||
roclark/sportsipy
|
c19f545d3376d62ded6304b137dc69238ac620a9
|
sportsipy/nba/roster.py
|
python
|
Player.percentage_zero_to_three_footers
|
(self)
|
return self._percentage_zero_to_three_footers
|
Returns a ``float`` of the percentage of shots the player takes from
zero to three feet from the basket. Percentage ranges from 0-1.
|
Returns a ``float`` of the percentage of shots the player takes from
zero to three feet from the basket. Percentage ranges from 0-1.
|
[
"Returns",
"a",
"float",
"of",
"the",
"percentage",
"of",
"shots",
"the",
"player",
"takes",
"from",
"zero",
"to",
"three",
"feet",
"from",
"the",
"basket",
".",
"Percentage",
"ranges",
"from",
"0",
"-",
"1",
"."
] |
def percentage_zero_to_three_footers(self):
"""
Returns a ``float`` of the percentage of shots the player takes from
zero to three feet from the basket. Percentage ranges from 0-1.
"""
return self._percentage_zero_to_three_footers
|
[
"def",
"percentage_zero_to_three_footers",
"(",
"self",
")",
":",
"return",
"self",
".",
"_percentage_zero_to_three_footers"
] |
https://github.com/roclark/sportsipy/blob/c19f545d3376d62ded6304b137dc69238ac620a9/sportsipy/nba/roster.py#L1053-L1058
|
|
openai/mujoco-worldgen
|
39f52b1b47aed499925a6a214b58bdbdb4e2f75e
|
mujoco_worldgen/objs/obj_from_xml.py
|
python
|
ObjFromXML._get_xml_dir_path
|
(self, *args)
|
return worldgen_path('assets/xmls', *args)
|
If you want to use custom XMLs, subclass this class and overwrite this
method to return the path to your 'xmls' folder
|
If you want to use custom XMLs, subclass this class and overwrite this
method to return the path to your 'xmls' folder
|
[
"If",
"you",
"want",
"to",
"use",
"custom",
"XMLs",
"subclass",
"this",
"class",
"and",
"overwrite",
"this",
"method",
"to",
"return",
"the",
"path",
"to",
"your",
"xmls",
"folder"
] |
def _get_xml_dir_path(self, *args):
'''
If you want to use custom XMLs, subclass this class and overwrite this
method to return the path to your 'xmls' folder
'''
return worldgen_path('assets/xmls', *args)
|
[
"def",
"_get_xml_dir_path",
"(",
"self",
",",
"*",
"args",
")",
":",
"return",
"worldgen_path",
"(",
"'assets/xmls'",
",",
"*",
"args",
")"
] |
https://github.com/openai/mujoco-worldgen/blob/39f52b1b47aed499925a6a214b58bdbdb4e2f75e/mujoco_worldgen/objs/obj_from_xml.py#L150-L155
|
|
OpenMDAO/OpenMDAO1
|
791a6fbbb7d266f3dcbc1f7bde3ae03a70dc1317
|
openmdao/core/petsc_impl.py
|
python
|
PetscDataTransfer.transfer
|
(self, srcvec, tgtvec, mode='fwd', deriv=False)
|
Performs data transfer between a distributed source vector and
a distributed target vector.
Args
----
srcvec : `VecWrapper`
Variables that are the source of the transfer in fwd mode and
the destination of the transfer in rev mode.
tgtvec : `VecWrapper`
Variables that are the destination of the transfer in fwd mode and
the source of the transfer in rev mode.
mode : 'fwd' or 'rev', optional
Direction of the data transfer, source to target ('fwd', the default)
or target to source ('rev').
deriv : bool, optional
If True, this is a derivative data transfer, so no pass_by_obj
variables will be transferred.
|
Performs data transfer between a distributed source vector and
a distributed target vector.
|
[
"Performs",
"data",
"transfer",
"between",
"a",
"distributed",
"source",
"vector",
"and",
"a",
"distributed",
"target",
"vector",
"."
] |
def transfer(self, srcvec, tgtvec, mode='fwd', deriv=False):
"""Performs data transfer between a distributed source vector and
a distributed target vector.
Args
----
srcvec : `VecWrapper`
Variables that are the source of the transfer in fwd mode and
the destination of the transfer in rev mode.
tgtvec : `VecWrapper`
Variables that are the destination of the transfer in fwd mode and
the source of the transfer in rev mode.
mode : 'fwd' or 'rev', optional
Direction of the data transfer, source to target ('fwd', the default)
or target to source ('rev').
deriv : bool, optional
If True, this is a derivative data transfer, so no pass_by_obj
variables will be transferred.
"""
if mode == 'rev':
# in reverse mode, srcvec and tgtvec are switched. Note, we only
# run in reverse for derivatives, and derivatives accumulate from
# all targets. This does not involve pass_by_object.
if trace: # pragma: no cover
conns = ['%s <-- %s' % (u, v) for v, u in self.vec_conns]
debug("%s rev scatter %s %s <-- %s" %
(srcvec._sysdata.pathname, conns, self.src_idxs, self.tgt_idxs))
debug("%s: srcvec = %s" % (tgtvec._sysdata.pathname,
tgtvec.petsc_vec.array))
self.scatter.scatter(tgtvec.petsc_vec, srcvec.petsc_vec, True, True)
if trace: # pragma: no cover
debug("%s: tgtvec = %s (DONE)" % (srcvec._sysdata.pathname,
srcvec.petsc_vec.array))
else:
# forward mode, source to target including pass_by_object
if trace: # pragma: no cover
conns = ['%s --> %s' % (u, v) for v, u in self.vec_conns]
debug("%s fwd scatter %s %s --> %s" %
(srcvec._sysdata.pathname, conns, self.src_idxs, self.tgt_idxs))
debug("%s: srcvec = %s" % (srcvec._sysdata.pathname,
srcvec.petsc_vec.array))
self.scatter.scatter(srcvec.petsc_vec, tgtvec.petsc_vec, False, False)
if tgtvec._probdata.in_complex_step:
self.scatter.scatter(srcvec.imag_petsc_vec, tgtvec.imag_petsc_vec,
False, False)
if trace: # pragma: no cover
debug("%s: tgtvec = %s (DONE)" % (tgtvec._sysdata.pathname,
tgtvec.petsc_vec.array))
if not deriv and self.byobj_conns:
comm = self.sysdata.comm
iproc = comm.rank
mylocals = self.sysdata.all_locals[iproc]
for itag, (tgt, src) in enumerate(self.byobj_conns):
# if we're the owning rank of the src, send it out to
# systems that don't have it locally.
if iproc == self.sysdata.owning_ranks[src]:
# grab local value
val = srcvec[src]
for i, localvars in enumerate(self.sysdata.all_locals):
if i != iproc and src not in localvars and tgt in localvars:
if trace: debug("sending %s" % val)
comm.send(val, dest=i, tag=itag)
if trace: debug("DONE sending %s" % val)
# ensure that all src values have been sent before we receive
# any in order to avoid possible race conditions
if trace: debug("waiting on comm.barrier")
comm.barrier()
if trace: debug("comm.barrier DONE")
for itag, (tgt, src) in enumerate(self.byobj_conns):
# if we don't have the value locally, pull it across using MPI
if tgt in mylocals:
if src in mylocals:
if isinstance(tgtvec[tgt], FileRef):
tgtvec[tgt]._assign_to(srcvec[src])
else:
tgtvec[tgt] = srcvec[src]
else:
if trace: debug("receiving to %s" % tgtvec[tgt])
val = comm.recv(source=self.sysdata.owning_ranks[src],
tag=itag)
if trace: debug("received %s" % val)
if isinstance(tgtvec[tgt], FileRef):
tgtvec[tgt]._assign_to(val)
else:
tgtvec[tgt] = val
|
[
"def",
"transfer",
"(",
"self",
",",
"srcvec",
",",
"tgtvec",
",",
"mode",
"=",
"'fwd'",
",",
"deriv",
"=",
"False",
")",
":",
"if",
"mode",
"==",
"'rev'",
":",
"# in reverse mode, srcvec and tgtvec are switched. Note, we only",
"# run in reverse for derivatives, and derivatives accumulate from",
"# all targets. This does not involve pass_by_object.",
"if",
"trace",
":",
"# pragma: no cover",
"conns",
"=",
"[",
"'%s <-- %s'",
"%",
"(",
"u",
",",
"v",
")",
"for",
"v",
",",
"u",
"in",
"self",
".",
"vec_conns",
"]",
"debug",
"(",
"\"%s rev scatter %s %s <-- %s\"",
"%",
"(",
"srcvec",
".",
"_sysdata",
".",
"pathname",
",",
"conns",
",",
"self",
".",
"src_idxs",
",",
"self",
".",
"tgt_idxs",
")",
")",
"debug",
"(",
"\"%s: srcvec = %s\"",
"%",
"(",
"tgtvec",
".",
"_sysdata",
".",
"pathname",
",",
"tgtvec",
".",
"petsc_vec",
".",
"array",
")",
")",
"self",
".",
"scatter",
".",
"scatter",
"(",
"tgtvec",
".",
"petsc_vec",
",",
"srcvec",
".",
"petsc_vec",
",",
"True",
",",
"True",
")",
"if",
"trace",
":",
"# pragma: no cover",
"debug",
"(",
"\"%s: tgtvec = %s (DONE)\"",
"%",
"(",
"srcvec",
".",
"_sysdata",
".",
"pathname",
",",
"srcvec",
".",
"petsc_vec",
".",
"array",
")",
")",
"else",
":",
"# forward mode, source to target including pass_by_object",
"if",
"trace",
":",
"# pragma: no cover",
"conns",
"=",
"[",
"'%s --> %s'",
"%",
"(",
"u",
",",
"v",
")",
"for",
"v",
",",
"u",
"in",
"self",
".",
"vec_conns",
"]",
"debug",
"(",
"\"%s fwd scatter %s %s --> %s\"",
"%",
"(",
"srcvec",
".",
"_sysdata",
".",
"pathname",
",",
"conns",
",",
"self",
".",
"src_idxs",
",",
"self",
".",
"tgt_idxs",
")",
")",
"debug",
"(",
"\"%s: srcvec = %s\"",
"%",
"(",
"srcvec",
".",
"_sysdata",
".",
"pathname",
",",
"srcvec",
".",
"petsc_vec",
".",
"array",
")",
")",
"self",
".",
"scatter",
".",
"scatter",
"(",
"srcvec",
".",
"petsc_vec",
",",
"tgtvec",
".",
"petsc_vec",
",",
"False",
",",
"False",
")",
"if",
"tgtvec",
".",
"_probdata",
".",
"in_complex_step",
":",
"self",
".",
"scatter",
".",
"scatter",
"(",
"srcvec",
".",
"imag_petsc_vec",
",",
"tgtvec",
".",
"imag_petsc_vec",
",",
"False",
",",
"False",
")",
"if",
"trace",
":",
"# pragma: no cover",
"debug",
"(",
"\"%s: tgtvec = %s (DONE)\"",
"%",
"(",
"tgtvec",
".",
"_sysdata",
".",
"pathname",
",",
"tgtvec",
".",
"petsc_vec",
".",
"array",
")",
")",
"if",
"not",
"deriv",
"and",
"self",
".",
"byobj_conns",
":",
"comm",
"=",
"self",
".",
"sysdata",
".",
"comm",
"iproc",
"=",
"comm",
".",
"rank",
"mylocals",
"=",
"self",
".",
"sysdata",
".",
"all_locals",
"[",
"iproc",
"]",
"for",
"itag",
",",
"(",
"tgt",
",",
"src",
")",
"in",
"enumerate",
"(",
"self",
".",
"byobj_conns",
")",
":",
"# if we're the owning rank of the src, send it out to",
"# systems that don't have it locally.",
"if",
"iproc",
"==",
"self",
".",
"sysdata",
".",
"owning_ranks",
"[",
"src",
"]",
":",
"# grab local value",
"val",
"=",
"srcvec",
"[",
"src",
"]",
"for",
"i",
",",
"localvars",
"in",
"enumerate",
"(",
"self",
".",
"sysdata",
".",
"all_locals",
")",
":",
"if",
"i",
"!=",
"iproc",
"and",
"src",
"not",
"in",
"localvars",
"and",
"tgt",
"in",
"localvars",
":",
"if",
"trace",
":",
"debug",
"(",
"\"sending %s\"",
"%",
"val",
")",
"comm",
".",
"send",
"(",
"val",
",",
"dest",
"=",
"i",
",",
"tag",
"=",
"itag",
")",
"if",
"trace",
":",
"debug",
"(",
"\"DONE sending %s\"",
"%",
"val",
")",
"# ensure that all src values have been sent before we receive",
"# any in order to avoid possible race conditions",
"if",
"trace",
":",
"debug",
"(",
"\"waiting on comm.barrier\"",
")",
"comm",
".",
"barrier",
"(",
")",
"if",
"trace",
":",
"debug",
"(",
"\"comm.barrier DONE\"",
")",
"for",
"itag",
",",
"(",
"tgt",
",",
"src",
")",
"in",
"enumerate",
"(",
"self",
".",
"byobj_conns",
")",
":",
"# if we don't have the value locally, pull it across using MPI",
"if",
"tgt",
"in",
"mylocals",
":",
"if",
"src",
"in",
"mylocals",
":",
"if",
"isinstance",
"(",
"tgtvec",
"[",
"tgt",
"]",
",",
"FileRef",
")",
":",
"tgtvec",
"[",
"tgt",
"]",
".",
"_assign_to",
"(",
"srcvec",
"[",
"src",
"]",
")",
"else",
":",
"tgtvec",
"[",
"tgt",
"]",
"=",
"srcvec",
"[",
"src",
"]",
"else",
":",
"if",
"trace",
":",
"debug",
"(",
"\"receiving to %s\"",
"%",
"tgtvec",
"[",
"tgt",
"]",
")",
"val",
"=",
"comm",
".",
"recv",
"(",
"source",
"=",
"self",
".",
"sysdata",
".",
"owning_ranks",
"[",
"src",
"]",
",",
"tag",
"=",
"itag",
")",
"if",
"trace",
":",
"debug",
"(",
"\"received %s\"",
"%",
"val",
")",
"if",
"isinstance",
"(",
"tgtvec",
"[",
"tgt",
"]",
",",
"FileRef",
")",
":",
"tgtvec",
"[",
"tgt",
"]",
".",
"_assign_to",
"(",
"val",
")",
"else",
":",
"tgtvec",
"[",
"tgt",
"]",
"=",
"val"
] |
https://github.com/OpenMDAO/OpenMDAO1/blob/791a6fbbb7d266f3dcbc1f7bde3ae03a70dc1317/openmdao/core/petsc_impl.py#L394-L485
|
||
googleads/google-ads-python
|
2a1d6062221f6aad1992a6bcca0e7e4a93d2db86
|
google/ads/googleads/v9/services/services/customer_conversion_goal_service/client.py
|
python
|
CustomerConversionGoalServiceClient.transport
|
(self)
|
return self._transport
|
Return the transport used by the client instance.
Returns:
CustomerConversionGoalServiceTransport: The transport used by the client instance.
|
Return the transport used by the client instance.
|
[
"Return",
"the",
"transport",
"used",
"by",
"the",
"client",
"instance",
"."
] |
def transport(self) -> CustomerConversionGoalServiceTransport:
"""Return the transport used by the client instance.
Returns:
CustomerConversionGoalServiceTransport: The transport used by the client instance.
"""
return self._transport
|
[
"def",
"transport",
"(",
"self",
")",
"->",
"CustomerConversionGoalServiceTransport",
":",
"return",
"self",
".",
"_transport"
] |
https://github.com/googleads/google-ads-python/blob/2a1d6062221f6aad1992a6bcca0e7e4a93d2db86/google/ads/googleads/v9/services/services/customer_conversion_goal_service/client.py#L159-L165
|
|
leapcode/bitmask_client
|
d2fe20df24fc6eaf146fa5ce1e847de6ab515688
|
pkg/osx/daemon/daemon.py
|
python
|
set_signal_handlers
|
(signal_handler_map)
|
Set the signal handlers as specified.
:param signal_handler_map: A map from signal number to handler
object.
:return: ``None``.
See the `signal` module for details on signal numbers and signal
handlers.
|
Set the signal handlers as specified.
|
[
"Set",
"the",
"signal",
"handlers",
"as",
"specified",
"."
] |
def set_signal_handlers(signal_handler_map):
""" Set the signal handlers as specified.
:param signal_handler_map: A map from signal number to handler
object.
:return: ``None``.
See the `signal` module for details on signal numbers and signal
handlers.
"""
for (signal_number, handler) in signal_handler_map.items():
signal.signal(signal_number, handler)
|
[
"def",
"set_signal_handlers",
"(",
"signal_handler_map",
")",
":",
"for",
"(",
"signal_number",
",",
"handler",
")",
"in",
"signal_handler_map",
".",
"items",
"(",
")",
":",
"signal",
".",
"signal",
"(",
"signal_number",
",",
"handler",
")"
] |
https://github.com/leapcode/bitmask_client/blob/d2fe20df24fc6eaf146fa5ce1e847de6ab515688/pkg/osx/daemon/daemon.py#L872-L884
|
||
caiiiac/Machine-Learning-with-Python
|
1a26c4467da41ca4ebc3d5bd789ea942ef79422f
|
MachineLearning/venv/lib/python3.5/site-packages/pyparsing.py
|
python
|
ParserElement.__and__
|
(self, other )
|
return Each( [ self, other ] )
|
Implementation of & operator - returns C{L{Each}}
|
Implementation of & operator - returns C{L{Each}}
|
[
"Implementation",
"of",
"&",
"operator",
"-",
"returns",
"C",
"{",
"L",
"{",
"Each",
"}}"
] |
def __and__(self, other ):
"""
Implementation of & operator - returns C{L{Each}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Each( [ self, other ] )
|
[
"def",
"__and__",
"(",
"self",
",",
"other",
")",
":",
"if",
"isinstance",
"(",
"other",
",",
"basestring",
")",
":",
"other",
"=",
"ParserElement",
".",
"_literalStringClass",
"(",
"other",
")",
"if",
"not",
"isinstance",
"(",
"other",
",",
"ParserElement",
")",
":",
"warnings",
".",
"warn",
"(",
"\"Cannot combine element of type %s with ParserElement\"",
"%",
"type",
"(",
"other",
")",
",",
"SyntaxWarning",
",",
"stacklevel",
"=",
"2",
")",
"return",
"None",
"return",
"Each",
"(",
"[",
"self",
",",
"other",
"]",
")"
] |
https://github.com/caiiiac/Machine-Learning-with-Python/blob/1a26c4467da41ca4ebc3d5bd789ea942ef79422f/MachineLearning/venv/lib/python3.5/site-packages/pyparsing.py#L1974-L1984
|
|
pyg-team/pytorch_geometric
|
b920e9a3a64e22c8356be55301c88444ff051cae
|
torch_geometric/data/hetero_data.py
|
python
|
HeteroData.__delattr__
|
(self, key: str)
|
[] |
def __delattr__(self, key: str):
delattr(self._global_store, key)
|
[
"def",
"__delattr__",
"(",
"self",
",",
"key",
":",
"str",
")",
":",
"delattr",
"(",
"self",
".",
"_global_store",
",",
"key",
")"
] |
https://github.com/pyg-team/pytorch_geometric/blob/b920e9a3a64e22c8356be55301c88444ff051cae/torch_geometric/data/hetero_data.py#L128-L129
|
||||
rcorcs/NatI
|
fdf014f4292afdc95250add7b6658468043228e1
|
en/parser/nltk_lite/cluster/em.py
|
python
|
cosine_distance
|
(u, v)
|
return numpy.dot(u, v) / (math.sqrt(numpy.dot(u, u)) * math.sqrt(numpy.dot(v, v)))
|
Returns the cosine of the angle between vectors v and u. This is equal to
u.v / |u||v|.
|
Returns the cosine of the angle between vectors v and u. This is equal to
u.v / |u||v|.
|
[
"Returns",
"the",
"cosine",
"of",
"the",
"angle",
"between",
"vectors",
"v",
"and",
"u",
".",
"This",
"is",
"equal",
"to",
"u",
".",
"v",
"/",
"|u||v|",
"."
] |
def cosine_distance(u, v):
"""
Returns the cosine of the angle between vectors v and u. This is equal to
u.v / |u||v|.
"""
return numpy.dot(u, v) / (math.sqrt(numpy.dot(u, u)) * math.sqrt(numpy.dot(v, v)))
|
[
"def",
"cosine_distance",
"(",
"u",
",",
"v",
")",
":",
"return",
"numpy",
".",
"dot",
"(",
"u",
",",
"v",
")",
"/",
"(",
"math",
".",
"sqrt",
"(",
"numpy",
".",
"dot",
"(",
"u",
",",
"u",
")",
")",
"*",
"math",
".",
"sqrt",
"(",
"numpy",
".",
"dot",
"(",
"v",
",",
"v",
")",
")",
")"
] |
https://github.com/rcorcs/NatI/blob/fdf014f4292afdc95250add7b6658468043228e1/en/parser/nltk_lite/cluster/em.py#L174-L179
|
|
crossbario/autobahn-python
|
fa9f2da0c5005574e63456a3a04f00e405744014
|
autobahn/xbr/_eip712_channel_open.py
|
python
|
sign_eip712_channel_open
|
(eth_privkey: bytes, chainId: int, verifyingContract: bytes, ctype: int,
openedAt: int, marketId: bytes, channelId: bytes, actor: bytes, delegate: bytes,
marketmaker: bytes, recipient: bytes, amount: int)
|
return sign(eth_privkey, data)
|
:param eth_privkey: Ethereum address of buyer (a raw 20 bytes Ethereum address).
:type eth_privkey: bytes
:return: The signature according to EIP712 (32+32+1 raw bytes).
:rtype: bytes
|
[] |
def sign_eip712_channel_open(eth_privkey: bytes, chainId: int, verifyingContract: bytes, ctype: int,
openedAt: int, marketId: bytes, channelId: bytes, actor: bytes, delegate: bytes,
marketmaker: bytes, recipient: bytes, amount: int) -> bytes:
"""
:param eth_privkey: Ethereum address of buyer (a raw 20 bytes Ethereum address).
:type eth_privkey: bytes
:return: The signature according to EIP712 (32+32+1 raw bytes).
:rtype: bytes
"""
assert is_eth_privkey(eth_privkey)
data = _create_eip712_channel_open(chainId, verifyingContract, ctype, openedAt, marketId, channelId,
actor, delegate, marketmaker, recipient, amount)
return sign(eth_privkey, data)
|
[
"def",
"sign_eip712_channel_open",
"(",
"eth_privkey",
":",
"bytes",
",",
"chainId",
":",
"int",
",",
"verifyingContract",
":",
"bytes",
",",
"ctype",
":",
"int",
",",
"openedAt",
":",
"int",
",",
"marketId",
":",
"bytes",
",",
"channelId",
":",
"bytes",
",",
"actor",
":",
"bytes",
",",
"delegate",
":",
"bytes",
",",
"marketmaker",
":",
"bytes",
",",
"recipient",
":",
"bytes",
",",
"amount",
":",
"int",
")",
"->",
"bytes",
":",
"assert",
"is_eth_privkey",
"(",
"eth_privkey",
")",
"data",
"=",
"_create_eip712_channel_open",
"(",
"chainId",
",",
"verifyingContract",
",",
"ctype",
",",
"openedAt",
",",
"marketId",
",",
"channelId",
",",
"actor",
",",
"delegate",
",",
"marketmaker",
",",
"recipient",
",",
"amount",
")",
"return",
"sign",
"(",
"eth_privkey",
",",
"data",
")"
] |
https://github.com/crossbario/autobahn-python/blob/fa9f2da0c5005574e63456a3a04f00e405744014/autobahn/xbr/_eip712_channel_open.py#L131-L146
|
||
PaddlePaddle/PaddleX
|
2bab73f81ab54e328204e7871e6ae4a82e719f5d
|
static/paddlex/interpret/as_data_reader/readers.py
|
python
|
preprocess_image
|
(img, random_mirror=False)
|
return img
|
centered, scaled by 1/255.
:param img: np.array: shape: [ns, h, w, 3], color order: rgb.
:return: np.array: shape: [ns, h, w, 3]
|
centered, scaled by 1/255.
:param img: np.array: shape: [ns, h, w, 3], color order: rgb.
:return: np.array: shape: [ns, h, w, 3]
|
[
"centered",
"scaled",
"by",
"1",
"/",
"255",
".",
":",
"param",
"img",
":",
"np",
".",
"array",
":",
"shape",
":",
"[",
"ns",
"h",
"w",
"3",
"]",
"color",
"order",
":",
"rgb",
".",
":",
"return",
":",
"np",
".",
"array",
":",
"shape",
":",
"[",
"ns",
"h",
"w",
"3",
"]"
] |
def preprocess_image(img, random_mirror=False):
"""
centered, scaled by 1/255.
:param img: np.array: shape: [ns, h, w, 3], color order: rgb.
:return: np.array: shape: [ns, h, w, 3]
"""
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
# transpose to [ns, 3, h, w]
img = img.astype('float32').transpose((0, 3, 1, 2)) / 255
img_mean = np.array(mean).reshape((3, 1, 1))
img_std = np.array(std).reshape((3, 1, 1))
img -= img_mean
img /= img_std
if random_mirror:
mirror = int(np.random.uniform(0, 2))
if mirror == 1:
img = img[:, :, ::-1, :]
return img
|
[
"def",
"preprocess_image",
"(",
"img",
",",
"random_mirror",
"=",
"False",
")",
":",
"mean",
"=",
"[",
"0.485",
",",
"0.456",
",",
"0.406",
"]",
"std",
"=",
"[",
"0.229",
",",
"0.224",
",",
"0.225",
"]",
"# transpose to [ns, 3, h, w]",
"img",
"=",
"img",
".",
"astype",
"(",
"'float32'",
")",
".",
"transpose",
"(",
"(",
"0",
",",
"3",
",",
"1",
",",
"2",
")",
")",
"/",
"255",
"img_mean",
"=",
"np",
".",
"array",
"(",
"mean",
")",
".",
"reshape",
"(",
"(",
"3",
",",
"1",
",",
"1",
")",
")",
"img_std",
"=",
"np",
".",
"array",
"(",
"std",
")",
".",
"reshape",
"(",
"(",
"3",
",",
"1",
",",
"1",
")",
")",
"img",
"-=",
"img_mean",
"img",
"/=",
"img_std",
"if",
"random_mirror",
":",
"mirror",
"=",
"int",
"(",
"np",
".",
"random",
".",
"uniform",
"(",
"0",
",",
"2",
")",
")",
"if",
"mirror",
"==",
"1",
":",
"img",
"=",
"img",
"[",
":",
",",
":",
",",
":",
":",
"-",
"1",
",",
":",
"]",
"return",
"img"
] |
https://github.com/PaddlePaddle/PaddleX/blob/2bab73f81ab54e328204e7871e6ae4a82e719f5d/static/paddlex/interpret/as_data_reader/readers.py#L73-L95
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.