nwo
stringlengths 5
106
| sha
stringlengths 40
40
| path
stringlengths 4
174
| language
stringclasses 1
value | identifier
stringlengths 1
140
| parameters
stringlengths 0
87.7k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
426k
| docstring
stringlengths 0
64.3k
| docstring_summary
stringlengths 0
26.3k
| docstring_tokens
list | function
stringlengths 18
4.83M
| function_tokens
list | url
stringlengths 83
304
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
andabi/music-source-separation
|
ba9aa531ccca08437f1efe5dec1871faebf5c840
|
mir_eval/hierarchy.py
|
python
|
_lca
|
(intervals_hier, frame_size)
|
return lca_matrix.tocsr()
|
Compute the (sparse) least-common-ancestor (LCA) matrix for a
hierarchical segmentation.
For any pair of frames ``(s, t)``, the LCA is the deepest level in
the hierarchy such that ``(s, t)`` are contained within a single
segment at that level.
Parameters
----------
intervals_hier : list of ndarray
An ordered list of segment interval arrays.
The list is assumed to be ordered by increasing specificity (depth).
frame_size : number
The length of the sample frames (in seconds)
Returns
-------
lca_matrix : scipy.sparse.csr_matrix
A sparse matrix such that ``lca_matrix[i, j]`` contains the depth
of the deepest segment containing frames ``i`` and ``j``.
|
Compute the (sparse) least-common-ancestor (LCA) matrix for a
hierarchical segmentation.
|
[
"Compute",
"the",
"(",
"sparse",
")",
"least",
"-",
"common",
"-",
"ancestor",
"(",
"LCA",
")",
"matrix",
"for",
"a",
"hierarchical",
"segmentation",
"."
] |
def _lca(intervals_hier, frame_size):
'''Compute the (sparse) least-common-ancestor (LCA) matrix for a
hierarchical segmentation.
For any pair of frames ``(s, t)``, the LCA is the deepest level in
the hierarchy such that ``(s, t)`` are contained within a single
segment at that level.
Parameters
----------
intervals_hier : list of ndarray
An ordered list of segment interval arrays.
The list is assumed to be ordered by increasing specificity (depth).
frame_size : number
The length of the sample frames (in seconds)
Returns
-------
lca_matrix : scipy.sparse.csr_matrix
A sparse matrix such that ``lca_matrix[i, j]`` contains the depth
of the deepest segment containing frames ``i`` and ``j``.
'''
frame_size = float(frame_size)
# Figure out how many frames we need
n_start, n_end = _hierarchy_bounds(intervals_hier)
n = int((_round(n_end, frame_size) -
_round(n_start, frame_size)) / frame_size)
# Initialize the LCA matrix
lca_matrix = scipy.sparse.lil_matrix((n, n), dtype=np.uint8)
for level, intervals in enumerate(intervals_hier, 1):
for ival in (_round(np.asarray(intervals),
frame_size) / frame_size).astype(int):
idx = slice(ival[0], ival[1])
lca_matrix[idx, idx] = level
return lca_matrix.tocsr()
|
[
"def",
"_lca",
"(",
"intervals_hier",
",",
"frame_size",
")",
":",
"frame_size",
"=",
"float",
"(",
"frame_size",
")",
"# Figure out how many frames we need",
"n_start",
",",
"n_end",
"=",
"_hierarchy_bounds",
"(",
"intervals_hier",
")",
"n",
"=",
"int",
"(",
"(",
"_round",
"(",
"n_end",
",",
"frame_size",
")",
"-",
"_round",
"(",
"n_start",
",",
"frame_size",
")",
")",
"/",
"frame_size",
")",
"# Initialize the LCA matrix",
"lca_matrix",
"=",
"scipy",
".",
"sparse",
".",
"lil_matrix",
"(",
"(",
"n",
",",
"n",
")",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"for",
"level",
",",
"intervals",
"in",
"enumerate",
"(",
"intervals_hier",
",",
"1",
")",
":",
"for",
"ival",
"in",
"(",
"_round",
"(",
"np",
".",
"asarray",
"(",
"intervals",
")",
",",
"frame_size",
")",
"/",
"frame_size",
")",
".",
"astype",
"(",
"int",
")",
":",
"idx",
"=",
"slice",
"(",
"ival",
"[",
"0",
"]",
",",
"ival",
"[",
"1",
"]",
")",
"lca_matrix",
"[",
"idx",
",",
"idx",
"]",
"=",
"level",
"return",
"lca_matrix",
".",
"tocsr",
"(",
")"
] |
https://github.com/andabi/music-source-separation/blob/ba9aa531ccca08437f1efe5dec1871faebf5c840/mir_eval/hierarchy.py#L101-L143
|
|
joschabach/micropsi2
|
74a2642d20da9da1d64acc5e4c11aeabee192a27
|
micropsi_core/world/minecraft/minecraft_graph_locomotion.py
|
python
|
MinecraftGraphLocomotion.update_data_sources_and_targets
|
(self)
|
called on every world calculation step to advance the life of the agent
|
called on every world calculation step to advance the life of the agent
|
[
"called",
"on",
"every",
"world",
"calculation",
"step",
"to",
"advance",
"the",
"life",
"of",
"the",
"agent"
] |
def update_data_sources_and_targets(self):
"""called on every world calculation step to advance the life of the agent"""
self.datasources['awake'] = 0 if self.sleeping else 1
# first thing when spock initialization is done, determine current loco node
if self.waiting_for_spock:
# by substitution: spock init is considered done, when its client has a position unlike
# {'on_ground': False, 'pitch': 0, 'x': 0, 'y': 0, 'yaw': 0, 'stance': 0, 'z': 0}:
if self.spockplugin.clientinfo.position['y'] != 0. \
and self.spockplugin.clientinfo.position['x'] != 0:
self.waiting_for_spock = False
x = int(self.spockplugin.clientinfo.position['x'])
y = int(self.spockplugin.clientinfo.position['y'])
z = int(self.spockplugin.clientinfo.position['z'])
for k, v in self.loco_nodes.items():
if abs(x - v['x']) <= self.tp_tolerance and abs(y - v['y']) <= self.tp_tolerance and abs(z - v['z']) <= self.tp_tolerance:
self.current_loco_node = self.loco_nodes[k]
self.last_slept = self.spockplugin.world.age
if self.current_loco_node is None:
# bot is outside our graph, teleport to a random graph location to get started.
target = random.choice(list(self.loco_nodes.keys()))
self.locomote(target)
# self.locomote(self.forest_uid)
else:
# reset self.datatarget_feedback
for k in self.datatarget_feedback.keys():
# reset actions only if not requested anymore
if k in self.actions:
if self.datatargets[k] == 0:
self.datatarget_feedback[k] = 0.
else:
self.datatarget_feedback[k] = 0.
if not self.spockplugin.is_connected():
return
self.datasources['current_location_index'] = self.loco_nodes_indexes.index(self.current_loco_node['name'])
# health and food are in [0;20]
self.datasources['health'] = self.spockplugin.clientinfo.health['health'] / 20
self.datasources['food'] = self.spockplugin.clientinfo.health['food'] / 20
if self.spockplugin.get_temperature() is not None:
self.datasources['temperature'] = self.spockplugin.get_temperature()
self.datasources['food_supply'] = self.spockplugin.count_inventory_item(297) # count bread
# compute fatigue: 0.1 per half a day:
# timeofday = self.spockplugin.world.time_of_day % 24000
if self.sleeping:
no_sleep = ((self.sleeping - self.last_slept) // 3000) / 2
else:
no_sleep = ((self.spockplugin.world.age - self.last_slept) // 3000) / 2
fatigue = no_sleep * 0.05
self.datasources['fatigue'] = round(fatigue, 2)
self.check_for_action_feedback()
# read locomotor values, trigger teleportation in the world, and provide action feedback
# don't trigger another teleportation if the datatargets was on continuously, cf. pipe logic
if self.datatargets['take_exit_one'] >= 1 and not self.datatarget_history['take_exit_one'] >= 1:
# if the current node on the transition graph has the selected exit
if self.current_loco_node['exit_one_uid'] is not None:
self.register_action(
'take_exit_one',
partial(self.locomote, self.current_loco_node['exit_one_uid']),
partial(self.check_movement_feedback, self.current_loco_node['exit_one_uid'])
)
else:
self.datatarget_feedback['take_exit_one'] = -1.
if self.datatargets['take_exit_two'] >= 1 and not self.datatarget_history['take_exit_two'] >= 1:
if self.current_loco_node['exit_two_uid'] is not None:
self.register_action(
'take_exit_two',
partial(self.locomote, self.current_loco_node['exit_two_uid']),
partial(self.check_movement_feedback, self.current_loco_node['exit_two_uid'])
)
else:
self.datatarget_feedback['take_exit_two'] = -1.
if self.datatargets['take_exit_three'] >= 1 and not self.datatarget_history['take_exit_three'] >=1:
if self.current_loco_node['exit_three_uid'] is not None:
self.register_action(
'take_exit_three',
partial(self.locomote, self.current_loco_node['exit_three_uid']),
partial(self.check_movement_feedback, self.current_loco_node['exit_three_uid'])
)
else:
self.datatarget_feedback['take_exit_three'] = -1.
if self.datatargets['eat'] >= 1 and not self.datatarget_history['eat'] >= 1:
if self.has_bread() and self.datasources['food'] < 1:
self.register_action(
'eat',
self.spockplugin.eat,
partial(self.check_eat_feedback, self.spockplugin.clientinfo.health['food'])
)
else:
self.datatarget_feedback['eat'] = -1.
if self.datatargets['sleep'] >= 1 and not self.datatarget_history['sleep'] >= 1:
if self.check_movement_feedback(self.home_uid) and self.spockplugin.world.time_of_day % 24000 > 12500:
# we're home and it's night, so we can sleep now:
self.register_action('sleep', self.sleep, self.check_waking_up)
else:
self.datatarget_feedback['sleep'] = -1.
# update datatarget history
for k in self.datatarget_history.keys():
self.datatarget_history[k] = self.datatargets[k]
|
[
"def",
"update_data_sources_and_targets",
"(",
"self",
")",
":",
"self",
".",
"datasources",
"[",
"'awake'",
"]",
"=",
"0",
"if",
"self",
".",
"sleeping",
"else",
"1",
"# first thing when spock initialization is done, determine current loco node",
"if",
"self",
".",
"waiting_for_spock",
":",
"# by substitution: spock init is considered done, when its client has a position unlike",
"# {'on_ground': False, 'pitch': 0, 'x': 0, 'y': 0, 'yaw': 0, 'stance': 0, 'z': 0}:",
"if",
"self",
".",
"spockplugin",
".",
"clientinfo",
".",
"position",
"[",
"'y'",
"]",
"!=",
"0.",
"and",
"self",
".",
"spockplugin",
".",
"clientinfo",
".",
"position",
"[",
"'x'",
"]",
"!=",
"0",
":",
"self",
".",
"waiting_for_spock",
"=",
"False",
"x",
"=",
"int",
"(",
"self",
".",
"spockplugin",
".",
"clientinfo",
".",
"position",
"[",
"'x'",
"]",
")",
"y",
"=",
"int",
"(",
"self",
".",
"spockplugin",
".",
"clientinfo",
".",
"position",
"[",
"'y'",
"]",
")",
"z",
"=",
"int",
"(",
"self",
".",
"spockplugin",
".",
"clientinfo",
".",
"position",
"[",
"'z'",
"]",
")",
"for",
"k",
",",
"v",
"in",
"self",
".",
"loco_nodes",
".",
"items",
"(",
")",
":",
"if",
"abs",
"(",
"x",
"-",
"v",
"[",
"'x'",
"]",
")",
"<=",
"self",
".",
"tp_tolerance",
"and",
"abs",
"(",
"y",
"-",
"v",
"[",
"'y'",
"]",
")",
"<=",
"self",
".",
"tp_tolerance",
"and",
"abs",
"(",
"z",
"-",
"v",
"[",
"'z'",
"]",
")",
"<=",
"self",
".",
"tp_tolerance",
":",
"self",
".",
"current_loco_node",
"=",
"self",
".",
"loco_nodes",
"[",
"k",
"]",
"self",
".",
"last_slept",
"=",
"self",
".",
"spockplugin",
".",
"world",
".",
"age",
"if",
"self",
".",
"current_loco_node",
"is",
"None",
":",
"# bot is outside our graph, teleport to a random graph location to get started.",
"target",
"=",
"random",
".",
"choice",
"(",
"list",
"(",
"self",
".",
"loco_nodes",
".",
"keys",
"(",
")",
")",
")",
"self",
".",
"locomote",
"(",
"target",
")",
"# self.locomote(self.forest_uid)",
"else",
":",
"# reset self.datatarget_feedback",
"for",
"k",
"in",
"self",
".",
"datatarget_feedback",
".",
"keys",
"(",
")",
":",
"# reset actions only if not requested anymore",
"if",
"k",
"in",
"self",
".",
"actions",
":",
"if",
"self",
".",
"datatargets",
"[",
"k",
"]",
"==",
"0",
":",
"self",
".",
"datatarget_feedback",
"[",
"k",
"]",
"=",
"0.",
"else",
":",
"self",
".",
"datatarget_feedback",
"[",
"k",
"]",
"=",
"0.",
"if",
"not",
"self",
".",
"spockplugin",
".",
"is_connected",
"(",
")",
":",
"return",
"self",
".",
"datasources",
"[",
"'current_location_index'",
"]",
"=",
"self",
".",
"loco_nodes_indexes",
".",
"index",
"(",
"self",
".",
"current_loco_node",
"[",
"'name'",
"]",
")",
"# health and food are in [0;20]",
"self",
".",
"datasources",
"[",
"'health'",
"]",
"=",
"self",
".",
"spockplugin",
".",
"clientinfo",
".",
"health",
"[",
"'health'",
"]",
"/",
"20",
"self",
".",
"datasources",
"[",
"'food'",
"]",
"=",
"self",
".",
"spockplugin",
".",
"clientinfo",
".",
"health",
"[",
"'food'",
"]",
"/",
"20",
"if",
"self",
".",
"spockplugin",
".",
"get_temperature",
"(",
")",
"is",
"not",
"None",
":",
"self",
".",
"datasources",
"[",
"'temperature'",
"]",
"=",
"self",
".",
"spockplugin",
".",
"get_temperature",
"(",
")",
"self",
".",
"datasources",
"[",
"'food_supply'",
"]",
"=",
"self",
".",
"spockplugin",
".",
"count_inventory_item",
"(",
"297",
")",
"# count bread",
"# compute fatigue: 0.1 per half a day:",
"# timeofday = self.spockplugin.world.time_of_day % 24000",
"if",
"self",
".",
"sleeping",
":",
"no_sleep",
"=",
"(",
"(",
"self",
".",
"sleeping",
"-",
"self",
".",
"last_slept",
")",
"//",
"3000",
")",
"/",
"2",
"else",
":",
"no_sleep",
"=",
"(",
"(",
"self",
".",
"spockplugin",
".",
"world",
".",
"age",
"-",
"self",
".",
"last_slept",
")",
"//",
"3000",
")",
"/",
"2",
"fatigue",
"=",
"no_sleep",
"*",
"0.05",
"self",
".",
"datasources",
"[",
"'fatigue'",
"]",
"=",
"round",
"(",
"fatigue",
",",
"2",
")",
"self",
".",
"check_for_action_feedback",
"(",
")",
"# read locomotor values, trigger teleportation in the world, and provide action feedback",
"# don't trigger another teleportation if the datatargets was on continuously, cf. pipe logic",
"if",
"self",
".",
"datatargets",
"[",
"'take_exit_one'",
"]",
">=",
"1",
"and",
"not",
"self",
".",
"datatarget_history",
"[",
"'take_exit_one'",
"]",
">=",
"1",
":",
"# if the current node on the transition graph has the selected exit",
"if",
"self",
".",
"current_loco_node",
"[",
"'exit_one_uid'",
"]",
"is",
"not",
"None",
":",
"self",
".",
"register_action",
"(",
"'take_exit_one'",
",",
"partial",
"(",
"self",
".",
"locomote",
",",
"self",
".",
"current_loco_node",
"[",
"'exit_one_uid'",
"]",
")",
",",
"partial",
"(",
"self",
".",
"check_movement_feedback",
",",
"self",
".",
"current_loco_node",
"[",
"'exit_one_uid'",
"]",
")",
")",
"else",
":",
"self",
".",
"datatarget_feedback",
"[",
"'take_exit_one'",
"]",
"=",
"-",
"1.",
"if",
"self",
".",
"datatargets",
"[",
"'take_exit_two'",
"]",
">=",
"1",
"and",
"not",
"self",
".",
"datatarget_history",
"[",
"'take_exit_two'",
"]",
">=",
"1",
":",
"if",
"self",
".",
"current_loco_node",
"[",
"'exit_two_uid'",
"]",
"is",
"not",
"None",
":",
"self",
".",
"register_action",
"(",
"'take_exit_two'",
",",
"partial",
"(",
"self",
".",
"locomote",
",",
"self",
".",
"current_loco_node",
"[",
"'exit_two_uid'",
"]",
")",
",",
"partial",
"(",
"self",
".",
"check_movement_feedback",
",",
"self",
".",
"current_loco_node",
"[",
"'exit_two_uid'",
"]",
")",
")",
"else",
":",
"self",
".",
"datatarget_feedback",
"[",
"'take_exit_two'",
"]",
"=",
"-",
"1.",
"if",
"self",
".",
"datatargets",
"[",
"'take_exit_three'",
"]",
">=",
"1",
"and",
"not",
"self",
".",
"datatarget_history",
"[",
"'take_exit_three'",
"]",
">=",
"1",
":",
"if",
"self",
".",
"current_loco_node",
"[",
"'exit_three_uid'",
"]",
"is",
"not",
"None",
":",
"self",
".",
"register_action",
"(",
"'take_exit_three'",
",",
"partial",
"(",
"self",
".",
"locomote",
",",
"self",
".",
"current_loco_node",
"[",
"'exit_three_uid'",
"]",
")",
",",
"partial",
"(",
"self",
".",
"check_movement_feedback",
",",
"self",
".",
"current_loco_node",
"[",
"'exit_three_uid'",
"]",
")",
")",
"else",
":",
"self",
".",
"datatarget_feedback",
"[",
"'take_exit_three'",
"]",
"=",
"-",
"1.",
"if",
"self",
".",
"datatargets",
"[",
"'eat'",
"]",
">=",
"1",
"and",
"not",
"self",
".",
"datatarget_history",
"[",
"'eat'",
"]",
">=",
"1",
":",
"if",
"self",
".",
"has_bread",
"(",
")",
"and",
"self",
".",
"datasources",
"[",
"'food'",
"]",
"<",
"1",
":",
"self",
".",
"register_action",
"(",
"'eat'",
",",
"self",
".",
"spockplugin",
".",
"eat",
",",
"partial",
"(",
"self",
".",
"check_eat_feedback",
",",
"self",
".",
"spockplugin",
".",
"clientinfo",
".",
"health",
"[",
"'food'",
"]",
")",
")",
"else",
":",
"self",
".",
"datatarget_feedback",
"[",
"'eat'",
"]",
"=",
"-",
"1.",
"if",
"self",
".",
"datatargets",
"[",
"'sleep'",
"]",
">=",
"1",
"and",
"not",
"self",
".",
"datatarget_history",
"[",
"'sleep'",
"]",
">=",
"1",
":",
"if",
"self",
".",
"check_movement_feedback",
"(",
"self",
".",
"home_uid",
")",
"and",
"self",
".",
"spockplugin",
".",
"world",
".",
"time_of_day",
"%",
"24000",
">",
"12500",
":",
"# we're home and it's night, so we can sleep now:",
"self",
".",
"register_action",
"(",
"'sleep'",
",",
"self",
".",
"sleep",
",",
"self",
".",
"check_waking_up",
")",
"else",
":",
"self",
".",
"datatarget_feedback",
"[",
"'sleep'",
"]",
"=",
"-",
"1.",
"# update datatarget history",
"for",
"k",
"in",
"self",
".",
"datatarget_history",
".",
"keys",
"(",
")",
":",
"self",
".",
"datatarget_history",
"[",
"k",
"]",
"=",
"self",
".",
"datatargets",
"[",
"k",
"]"
] |
https://github.com/joschabach/micropsi2/blob/74a2642d20da9da1d64acc5e4c11aeabee192a27/micropsi_core/world/minecraft/minecraft_graph_locomotion.py#L206-L318
|
||
gentoo/portage
|
e5be73709b1a42b40380fd336f9381452b01a723
|
lib/portage/dbapi/vartree.py
|
python
|
dblink._post_merge_sync
|
(self)
|
Call this after merge or unmerge, in order to sync relevant files to
disk and avoid data-loss in the event of a power failure. This method
does nothing if FEATURES=merge-sync is disabled.
|
Call this after merge or unmerge, in order to sync relevant files to
disk and avoid data-loss in the event of a power failure. This method
does nothing if FEATURES=merge-sync is disabled.
|
[
"Call",
"this",
"after",
"merge",
"or",
"unmerge",
"in",
"order",
"to",
"sync",
"relevant",
"files",
"to",
"disk",
"and",
"avoid",
"data",
"-",
"loss",
"in",
"the",
"event",
"of",
"a",
"power",
"failure",
".",
"This",
"method",
"does",
"nothing",
"if",
"FEATURES",
"=",
"merge",
"-",
"sync",
"is",
"disabled",
"."
] |
def _post_merge_sync(self):
"""
Call this after merge or unmerge, in order to sync relevant files to
disk and avoid data-loss in the event of a power failure. This method
does nothing if FEATURES=merge-sync is disabled.
"""
if not self._device_path_map or "merge-sync" not in self.settings.features:
return
returncode = None
if platform.system() == "Linux":
paths = []
for path in self._device_path_map.values():
if path is not False:
paths.append(path)
paths = tuple(paths)
proc = SyncfsProcess(
paths=paths, scheduler=(self._scheduler or asyncio._safe_loop())
)
proc.start()
returncode = proc.wait()
if returncode is None or returncode != os.EX_OK:
try:
proc = subprocess.Popen(["sync"])
except EnvironmentError:
pass
else:
proc.wait()
|
[
"def",
"_post_merge_sync",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_device_path_map",
"or",
"\"merge-sync\"",
"not",
"in",
"self",
".",
"settings",
".",
"features",
":",
"return",
"returncode",
"=",
"None",
"if",
"platform",
".",
"system",
"(",
")",
"==",
"\"Linux\"",
":",
"paths",
"=",
"[",
"]",
"for",
"path",
"in",
"self",
".",
"_device_path_map",
".",
"values",
"(",
")",
":",
"if",
"path",
"is",
"not",
"False",
":",
"paths",
".",
"append",
"(",
"path",
")",
"paths",
"=",
"tuple",
"(",
"paths",
")",
"proc",
"=",
"SyncfsProcess",
"(",
"paths",
"=",
"paths",
",",
"scheduler",
"=",
"(",
"self",
".",
"_scheduler",
"or",
"asyncio",
".",
"_safe_loop",
"(",
")",
")",
")",
"proc",
".",
"start",
"(",
")",
"returncode",
"=",
"proc",
".",
"wait",
"(",
")",
"if",
"returncode",
"is",
"None",
"or",
"returncode",
"!=",
"os",
".",
"EX_OK",
":",
"try",
":",
"proc",
"=",
"subprocess",
".",
"Popen",
"(",
"[",
"\"sync\"",
"]",
")",
"except",
"EnvironmentError",
":",
"pass",
"else",
":",
"proc",
".",
"wait",
"(",
")"
] |
https://github.com/gentoo/portage/blob/e5be73709b1a42b40380fd336f9381452b01a723/lib/portage/dbapi/vartree.py#L5963-L5993
|
||
makerdao/pymaker
|
9245b3e22bcb257004d54337df6c2b0c9cbe42c8
|
pymaker/auctions.py
|
python
|
Clipper.upchost
|
(self)
|
return Transact(self, self.web3, self.abi, self.address, self._contract, 'upchost', [])
|
Update the the cached dust*chop value following a governance change
|
Update the the cached dust*chop value following a governance change
|
[
"Update",
"the",
"the",
"cached",
"dust",
"*",
"chop",
"value",
"following",
"a",
"governance",
"change"
] |
def upchost(self):
"""Update the the cached dust*chop value following a governance change"""
return Transact(self, self.web3, self.abi, self.address, self._contract, 'upchost', [])
|
[
"def",
"upchost",
"(",
"self",
")",
":",
"return",
"Transact",
"(",
"self",
",",
"self",
".",
"web3",
",",
"self",
".",
"abi",
",",
"self",
".",
"address",
",",
"self",
".",
"_contract",
",",
"'upchost'",
",",
"[",
"]",
")"
] |
https://github.com/makerdao/pymaker/blob/9245b3e22bcb257004d54337df6c2b0c9cbe42c8/pymaker/auctions.py#L884-L886
|
|
yechengxi/deconvolution
|
dace8a38e6a7158a51d5b2d60fedd819a52f422c
|
Segmentation/models/resnetd.py
|
python
|
resnet34d
|
(deconv, channel_deconv, pretrained=False, progress=True, **kwargs)
|
return _resnet('resnet34d', BasicBlock, [3, 4, 6, 3], pretrained, progress, deconv=deconv,channel_deconv=channel_deconv,
**kwargs)
|
Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
|
Constructs a ResNet-34 model.
|
[
"Constructs",
"a",
"ResNet",
"-",
"34",
"model",
"."
] |
def resnet34d(deconv, channel_deconv, pretrained=False, progress=True, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34d', BasicBlock, [3, 4, 6, 3], pretrained, progress, deconv=deconv,channel_deconv=channel_deconv,
**kwargs)
|
[
"def",
"resnet34d",
"(",
"deconv",
",",
"channel_deconv",
",",
"pretrained",
"=",
"False",
",",
"progress",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_resnet",
"(",
"'resnet34d'",
",",
"BasicBlock",
",",
"[",
"3",
",",
"4",
",",
"6",
",",
"3",
"]",
",",
"pretrained",
",",
"progress",
",",
"deconv",
"=",
"deconv",
",",
"channel_deconv",
"=",
"channel_deconv",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/yechengxi/deconvolution/blob/dace8a38e6a7158a51d5b2d60fedd819a52f422c/Segmentation/models/resnetd.py#L287-L295
|
|
google-research/language
|
61fa7260ac7d690d11ef72ca863e45a37c0bdc80
|
language/nql/nql/dist.py
|
python
|
DistributedNeuralQueryContext._sharded_rel_name
|
(self, rel_name, shard_id = -1)
|
return rel_name + '_' + str(shard_id)
|
Helper function to append shard_id to the end of rel_name.
Args:
rel_name: string naming a declared relation
shard_id: the i'th shard of the matrix.
Returns:
A string of rel_name appended by shard_id
|
Helper function to append shard_id to the end of rel_name.
|
[
"Helper",
"function",
"to",
"append",
"shard_id",
"to",
"the",
"end",
"of",
"rel_name",
"."
] |
def _sharded_rel_name(self, rel_name, shard_id = -1):
"""Helper function to append shard_id to the end of rel_name.
Args:
rel_name: string naming a declared relation
shard_id: the i'th shard of the matrix.
Returns:
A string of rel_name appended by shard_id
"""
return rel_name + '_' + str(shard_id)
|
[
"def",
"_sharded_rel_name",
"(",
"self",
",",
"rel_name",
",",
"shard_id",
"=",
"-",
"1",
")",
":",
"return",
"rel_name",
"+",
"'_'",
"+",
"str",
"(",
"shard_id",
")"
] |
https://github.com/google-research/language/blob/61fa7260ac7d690d11ef72ca863e45a37c0bdc80/language/nql/nql/dist.py#L365-L375
|
|
mrkipling/maraschino
|
c6be9286937783ae01df2d6d8cebfc8b2734a7d7
|
lib/feedparser/sgmllib3.py
|
python
|
SGMLParser.reset
|
(self)
|
Reset this instance. Loses all unprocessed data.
|
Reset this instance. Loses all unprocessed data.
|
[
"Reset",
"this",
"instance",
".",
"Loses",
"all",
"unprocessed",
"data",
"."
] |
def reset(self):
"""Reset this instance. Loses all unprocessed data."""
self.__starttag_text = None
self.rawdata = ''
self.stack = []
self.lasttag = '???'
self.nomoretags = 0
self.literal = 0
_markupbase.ParserBase.reset(self)
|
[
"def",
"reset",
"(",
"self",
")",
":",
"self",
".",
"__starttag_text",
"=",
"None",
"self",
".",
"rawdata",
"=",
"''",
"self",
".",
"stack",
"=",
"[",
"]",
"self",
".",
"lasttag",
"=",
"'???'",
"self",
".",
"nomoretags",
"=",
"0",
"self",
".",
"literal",
"=",
"0",
"_markupbase",
".",
"ParserBase",
".",
"reset",
"(",
"self",
")"
] |
https://github.com/mrkipling/maraschino/blob/c6be9286937783ae01df2d6d8cebfc8b2734a7d7/lib/feedparser/sgmllib3.py#L65-L73
|
||
sagarvegad/Video-Classification-CNN-and-LSTM-
|
dd41a912a5fa8065b7f5d50924d4779b3c21e5b3
|
train_CNN.py
|
python
|
train_model
|
(train_data,train_labels,validation_data,validation_labels)
|
return model
|
used fully connected layers, SGD optimizer and
checkpoint to store the best weights
|
used fully connected layers, SGD optimizer and
checkpoint to store the best weights
|
[
"used",
"fully",
"connected",
"layers",
"SGD",
"optimizer",
"and",
"checkpoint",
"to",
"store",
"the",
"best",
"weights"
] |
def train_model(train_data,train_labels,validation_data,validation_labels):
''' used fully connected layers, SGD optimizer and
checkpoint to store the best weights'''
model = Sequential()
model.add(Flatten(input_shape=train_data.shape[1:]))
model.add(Dense(512, activation='relu'))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(5, activation='softmax'))
sgd = SGD(lr=0.00005, decay = 1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
model.load_weights('video_3_512_VGG_no_drop.h5')
callbacks = [ EarlyStopping(monitor='val_loss', patience=10, verbose=0), ModelCheckpoint('video_3_512_VGG_no_drop.h5', monitor='val_loss', save_best_only=True, verbose=0) ]
nb_epoch = 500
model.fit(train_data,train_labels,validation_data = (validation_data,validation_labels),batch_size=batch_size,nb_epoch=nb_epoch,callbacks=callbacks,shuffle=True,verbose=1)
return model
|
[
"def",
"train_model",
"(",
"train_data",
",",
"train_labels",
",",
"validation_data",
",",
"validation_labels",
")",
":",
"model",
"=",
"Sequential",
"(",
")",
"model",
".",
"add",
"(",
"Flatten",
"(",
"input_shape",
"=",
"train_data",
".",
"shape",
"[",
"1",
":",
"]",
")",
")",
"model",
".",
"add",
"(",
"Dense",
"(",
"512",
",",
"activation",
"=",
"'relu'",
")",
")",
"model",
".",
"add",
"(",
"Dense",
"(",
"512",
",",
"activation",
"=",
"'relu'",
")",
")",
"model",
".",
"add",
"(",
"Dropout",
"(",
"0.5",
")",
")",
"model",
".",
"add",
"(",
"Dense",
"(",
"512",
",",
"activation",
"=",
"'relu'",
")",
")",
"model",
".",
"add",
"(",
"Dropout",
"(",
"0.5",
")",
")",
"model",
".",
"add",
"(",
"Dense",
"(",
"5",
",",
"activation",
"=",
"'softmax'",
")",
")",
"sgd",
"=",
"SGD",
"(",
"lr",
"=",
"0.00005",
",",
"decay",
"=",
"1e-6",
",",
"momentum",
"=",
"0.9",
",",
"nesterov",
"=",
"True",
")",
"model",
".",
"compile",
"(",
"optimizer",
"=",
"sgd",
",",
"loss",
"=",
"'categorical_crossentropy'",
",",
"metrics",
"=",
"[",
"'accuracy'",
"]",
")",
"model",
".",
"load_weights",
"(",
"'video_3_512_VGG_no_drop.h5'",
")",
"callbacks",
"=",
"[",
"EarlyStopping",
"(",
"monitor",
"=",
"'val_loss'",
",",
"patience",
"=",
"10",
",",
"verbose",
"=",
"0",
")",
",",
"ModelCheckpoint",
"(",
"'video_3_512_VGG_no_drop.h5'",
",",
"monitor",
"=",
"'val_loss'",
",",
"save_best_only",
"=",
"True",
",",
"verbose",
"=",
"0",
")",
"]",
"nb_epoch",
"=",
"500",
"model",
".",
"fit",
"(",
"train_data",
",",
"train_labels",
",",
"validation_data",
"=",
"(",
"validation_data",
",",
"validation_labels",
")",
",",
"batch_size",
"=",
"batch_size",
",",
"nb_epoch",
"=",
"nb_epoch",
",",
"callbacks",
"=",
"callbacks",
",",
"shuffle",
"=",
"True",
",",
"verbose",
"=",
"1",
")",
"return",
"model"
] |
https://github.com/sagarvegad/Video-Classification-CNN-and-LSTM-/blob/dd41a912a5fa8065b7f5d50924d4779b3c21e5b3/train_CNN.py#L93-L111
|
|
cw1204772/AIC2018_iamai
|
9c3720ba5eeb94e02deed303f32acaaa80aa893d
|
Detection/lib/modeling/mask_rcnn_heads.py
|
python
|
mask_rcnn_fcn_head_v1up
|
(model, blob_in, dim_in, spatial_scale)
|
return mask_rcnn_fcn_head_v1upXconvs(
model, blob_in, dim_in, spatial_scale, 2
)
|
v1up design: 2 * (conv 3x3), convT 2x2.
|
v1up design: 2 * (conv 3x3), convT 2x2.
|
[
"v1up",
"design",
":",
"2",
"*",
"(",
"conv",
"3x3",
")",
"convT",
"2x2",
"."
] |
def mask_rcnn_fcn_head_v1up(model, blob_in, dim_in, spatial_scale):
"""v1up design: 2 * (conv 3x3), convT 2x2."""
return mask_rcnn_fcn_head_v1upXconvs(
model, blob_in, dim_in, spatial_scale, 2
)
|
[
"def",
"mask_rcnn_fcn_head_v1up",
"(",
"model",
",",
"blob_in",
",",
"dim_in",
",",
"spatial_scale",
")",
":",
"return",
"mask_rcnn_fcn_head_v1upXconvs",
"(",
"model",
",",
"blob_in",
",",
"dim_in",
",",
"spatial_scale",
",",
"2",
")"
] |
https://github.com/cw1204772/AIC2018_iamai/blob/9c3720ba5eeb94e02deed303f32acaaa80aa893d/Detection/lib/modeling/mask_rcnn_heads.py#L117-L121
|
|
yuxiaokui/Intranet-Penetration
|
f57678a204840c83cbf3308e3470ae56c5ff514b
|
proxy/XX-Net/code/default/python27/1.0/lib/win32/gevent/greenlet.py
|
python
|
Greenlet.link
|
(self, callback, SpawnedLink=SpawnedLink)
|
Link greenlet's completion to a callable.
The *callback* will be called with this instance as an argument
once this greenlet's dead. A callable is called in its own greenlet.
|
Link greenlet's completion to a callable.
|
[
"Link",
"greenlet",
"s",
"completion",
"to",
"a",
"callable",
"."
] |
def link(self, callback, SpawnedLink=SpawnedLink):
"""Link greenlet's completion to a callable.
The *callback* will be called with this instance as an argument
once this greenlet's dead. A callable is called in its own greenlet.
"""
self.rawlink(SpawnedLink(callback))
|
[
"def",
"link",
"(",
"self",
",",
"callback",
",",
"SpawnedLink",
"=",
"SpawnedLink",
")",
":",
"self",
".",
"rawlink",
"(",
"SpawnedLink",
"(",
"callback",
")",
")"
] |
https://github.com/yuxiaokui/Intranet-Penetration/blob/f57678a204840c83cbf3308e3470ae56c5ff514b/proxy/XX-Net/code/default/python27/1.0/lib/win32/gevent/greenlet.py#L348-L354
|
||
buke/GreenOdoo
|
3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df
|
runtime/python/lib/python2.7/site-packages/_xmlplus/utils/iso8601.py
|
python
|
__extract_tzd
|
(m)
|
return offset
|
Return the Time Zone Designator as an offset in seconds from UTC.
|
Return the Time Zone Designator as an offset in seconds from UTC.
|
[
"Return",
"the",
"Time",
"Zone",
"Designator",
"as",
"an",
"offset",
"in",
"seconds",
"from",
"UTC",
"."
] |
def __extract_tzd(m):
"""Return the Time Zone Designator as an offset in seconds from UTC."""
if not m:
return 0
tzd = m.group("tzd")
if not tzd:
return 0
if tzd == "Z":
return 0
hours = int(m.group("tzdhours"))
minutes = m.group("tzdminutes")
if minutes:
minutes = int(minutes)
else:
minutes = 0
offset = (hours*60 + minutes) * 60
if tzd[0] == "+":
return -offset
return offset
|
[
"def",
"__extract_tzd",
"(",
"m",
")",
":",
"if",
"not",
"m",
":",
"return",
"0",
"tzd",
"=",
"m",
".",
"group",
"(",
"\"tzd\"",
")",
"if",
"not",
"tzd",
":",
"return",
"0",
"if",
"tzd",
"==",
"\"Z\"",
":",
"return",
"0",
"hours",
"=",
"int",
"(",
"m",
".",
"group",
"(",
"\"tzdhours\"",
")",
")",
"minutes",
"=",
"m",
".",
"group",
"(",
"\"tzdminutes\"",
")",
"if",
"minutes",
":",
"minutes",
"=",
"int",
"(",
"minutes",
")",
"else",
":",
"minutes",
"=",
"0",
"offset",
"=",
"(",
"hours",
"*",
"60",
"+",
"minutes",
")",
"*",
"60",
"if",
"tzd",
"[",
"0",
"]",
"==",
"\"+\"",
":",
"return",
"-",
"offset",
"return",
"offset"
] |
https://github.com/buke/GreenOdoo/blob/3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df/runtime/python/lib/python2.7/site-packages/_xmlplus/utils/iso8601.py#L149-L167
|
|
CGCookie/retopoflow
|
3d8b3a47d1d661f99ab0aeb21d31370bf15de35e
|
retopoflow/updater.py
|
python
|
addon_updater_updated_successful.draw
|
(self, context)
|
[] |
def draw(self, context):
layout = self.layout
if updater.invalid_updater:
layout.label(text="Updater error")
return
saved = updater.json
if self.error != "":
col = layout.column()
col.scale_y = 0.7
col.label(text="Error occurred, did not install", icon="ERROR")
if updater.error_msg:
msg = updater.error_msg
else:
msg = self.error
col.label(text=str(msg), icon="BLANK1")
rw = col.row()
rw.scale_y = 2
rw.operator("wm.url_open",
text="Click for manual download.",
icon="BLANK1"
).url=updater.website
# manual download button here
elif updater.auto_reload_post_update == False:
# tell user to restart blender
if "just_restored" in saved and saved["just_restored"]:
col = layout.column()
col.scale_y = 0.7
col.label(text="Addon restored", icon="RECOVER_LAST")
col.label(text="Restart blender to reload.",icon="BLANK1")
updater.json_reset_restore()
else:
col = layout.column()
col.scale_y = 0.7
col.label(text="Addon successfully installed", icon="FILE_TICK")
col.label(text="Restart blender to reload.", icon="BLANK1")
else:
# reload addon, but still recommend they restart blender
if "just_restored" in saved and saved["just_restored"]:
col = layout.column()
col.scale_y = 0.7
col.label(text="Addon restored", icon="RECOVER_LAST")
col.label(text="Consider restarting blender to fully reload.",
icon="BLANK1")
updater.json_reset_restore()
else:
col = layout.column()
col.scale_y = 0.7
col.label(text="Addon successfully installed", icon="FILE_TICK")
col.label(text="Consider restarting blender to fully reload.",
icon="BLANK1")
|
[
"def",
"draw",
"(",
"self",
",",
"context",
")",
":",
"layout",
"=",
"self",
".",
"layout",
"if",
"updater",
".",
"invalid_updater",
":",
"layout",
".",
"label",
"(",
"text",
"=",
"\"Updater error\"",
")",
"return",
"saved",
"=",
"updater",
".",
"json",
"if",
"self",
".",
"error",
"!=",
"\"\"",
":",
"col",
"=",
"layout",
".",
"column",
"(",
")",
"col",
".",
"scale_y",
"=",
"0.7",
"col",
".",
"label",
"(",
"text",
"=",
"\"Error occurred, did not install\"",
",",
"icon",
"=",
"\"ERROR\"",
")",
"if",
"updater",
".",
"error_msg",
":",
"msg",
"=",
"updater",
".",
"error_msg",
"else",
":",
"msg",
"=",
"self",
".",
"error",
"col",
".",
"label",
"(",
"text",
"=",
"str",
"(",
"msg",
")",
",",
"icon",
"=",
"\"BLANK1\"",
")",
"rw",
"=",
"col",
".",
"row",
"(",
")",
"rw",
".",
"scale_y",
"=",
"2",
"rw",
".",
"operator",
"(",
"\"wm.url_open\"",
",",
"text",
"=",
"\"Click for manual download.\"",
",",
"icon",
"=",
"\"BLANK1\"",
")",
".",
"url",
"=",
"updater",
".",
"website",
"# manual download button here",
"elif",
"updater",
".",
"auto_reload_post_update",
"==",
"False",
":",
"# tell user to restart blender",
"if",
"\"just_restored\"",
"in",
"saved",
"and",
"saved",
"[",
"\"just_restored\"",
"]",
":",
"col",
"=",
"layout",
".",
"column",
"(",
")",
"col",
".",
"scale_y",
"=",
"0.7",
"col",
".",
"label",
"(",
"text",
"=",
"\"Addon restored\"",
",",
"icon",
"=",
"\"RECOVER_LAST\"",
")",
"col",
".",
"label",
"(",
"text",
"=",
"\"Restart blender to reload.\"",
",",
"icon",
"=",
"\"BLANK1\"",
")",
"updater",
".",
"json_reset_restore",
"(",
")",
"else",
":",
"col",
"=",
"layout",
".",
"column",
"(",
")",
"col",
".",
"scale_y",
"=",
"0.7",
"col",
".",
"label",
"(",
"text",
"=",
"\"Addon successfully installed\"",
",",
"icon",
"=",
"\"FILE_TICK\"",
")",
"col",
".",
"label",
"(",
"text",
"=",
"\"Restart blender to reload.\"",
",",
"icon",
"=",
"\"BLANK1\"",
")",
"else",
":",
"# reload addon, but still recommend they restart blender",
"if",
"\"just_restored\"",
"in",
"saved",
"and",
"saved",
"[",
"\"just_restored\"",
"]",
":",
"col",
"=",
"layout",
".",
"column",
"(",
")",
"col",
".",
"scale_y",
"=",
"0.7",
"col",
".",
"label",
"(",
"text",
"=",
"\"Addon restored\"",
",",
"icon",
"=",
"\"RECOVER_LAST\"",
")",
"col",
".",
"label",
"(",
"text",
"=",
"\"Consider restarting blender to fully reload.\"",
",",
"icon",
"=",
"\"BLANK1\"",
")",
"updater",
".",
"json_reset_restore",
"(",
")",
"else",
":",
"col",
"=",
"layout",
".",
"column",
"(",
")",
"col",
".",
"scale_y",
"=",
"0.7",
"col",
".",
"label",
"(",
"text",
"=",
"\"Addon successfully installed\"",
",",
"icon",
"=",
"\"FILE_TICK\"",
")",
"col",
".",
"label",
"(",
"text",
"=",
"\"Consider restarting blender to fully reload.\"",
",",
"icon",
"=",
"\"BLANK1\"",
")"
] |
https://github.com/CGCookie/retopoflow/blob/3d8b3a47d1d661f99ab0aeb21d31370bf15de35e/retopoflow/updater.py#L467-L520
|
||||
metakirby5/colorz
|
11fd47a28d7a4af5b91d29978524335c8fef8cc9
|
colorz.py
|
python
|
clamp
|
(color, min_v, max_v)
|
return tuple(map(up_scale, hsv_to_rgb(h, s, v)))
|
Clamps a color such that the value is between min_v and max_v.
|
Clamps a color such that the value is between min_v and max_v.
|
[
"Clamps",
"a",
"color",
"such",
"that",
"the",
"value",
"is",
"between",
"min_v",
"and",
"max_v",
"."
] |
def clamp(color, min_v, max_v):
"""
Clamps a color such that the value is between min_v and max_v.
"""
h, s, v = rgb_to_hsv(*map(down_scale, color))
min_v, max_v = map(down_scale, (min_v, max_v))
v = min(max(min_v, v), max_v)
return tuple(map(up_scale, hsv_to_rgb(h, s, v)))
|
[
"def",
"clamp",
"(",
"color",
",",
"min_v",
",",
"max_v",
")",
":",
"h",
",",
"s",
",",
"v",
"=",
"rgb_to_hsv",
"(",
"*",
"map",
"(",
"down_scale",
",",
"color",
")",
")",
"min_v",
",",
"max_v",
"=",
"map",
"(",
"down_scale",
",",
"(",
"min_v",
",",
"max_v",
")",
")",
"v",
"=",
"min",
"(",
"max",
"(",
"min_v",
",",
"v",
")",
",",
"max_v",
")",
"return",
"tuple",
"(",
"map",
"(",
"up_scale",
",",
"hsv_to_rgb",
"(",
"h",
",",
"s",
",",
"v",
")",
")",
")"
] |
https://github.com/metakirby5/colorz/blob/11fd47a28d7a4af5b91d29978524335c8fef8cc9/colorz.py#L64-L71
|
|
wxWidgets/Phoenix
|
b2199e299a6ca6d866aa6f3d0888499136ead9d6
|
wx/lib/ogl/basic.py
|
python
|
RectangleShape.GetBoundingBoxMin
|
(self)
|
return self._width, self._height
|
Get the bounding box minimum.
|
Get the bounding box minimum.
|
[
"Get",
"the",
"bounding",
"box",
"minimum",
"."
] |
def GetBoundingBoxMin(self):
"""Get the bounding box minimum."""
return self._width, self._height
|
[
"def",
"GetBoundingBoxMin",
"(",
"self",
")",
":",
"return",
"self",
".",
"_width",
",",
"self",
".",
"_height"
] |
https://github.com/wxWidgets/Phoenix/blob/b2199e299a6ca6d866aa6f3d0888499136ead9d6/wx/lib/ogl/basic.py#L2776-L2778
|
|
krintoxi/NoobSec-Toolkit
|
38738541cbc03cedb9a3b3ed13b629f781ad64f6
|
NoobSecToolkit /tools/inject/lib/core/common.py
|
python
|
filterPairValues
|
(values)
|
return retVal
|
Returns only list-like values with length 2
>>> filterPairValues([[1, 2], [3], 1, [4, 5]])
[[1, 2], [4, 5]]
|
Returns only list-like values with length 2
|
[
"Returns",
"only",
"list",
"-",
"like",
"values",
"with",
"length",
"2"
] |
def filterPairValues(values):
"""
Returns only list-like values with length 2
>>> filterPairValues([[1, 2], [3], 1, [4, 5]])
[[1, 2], [4, 5]]
"""
retVal = []
if not isNoneValue(values) and hasattr(values, '__iter__'):
retVal = filter(lambda x: isinstance(x, (tuple, list, set)) and len(x) == 2, values)
return retVal
|
[
"def",
"filterPairValues",
"(",
"values",
")",
":",
"retVal",
"=",
"[",
"]",
"if",
"not",
"isNoneValue",
"(",
"values",
")",
"and",
"hasattr",
"(",
"values",
",",
"'__iter__'",
")",
":",
"retVal",
"=",
"filter",
"(",
"lambda",
"x",
":",
"isinstance",
"(",
"x",
",",
"(",
"tuple",
",",
"list",
",",
"set",
")",
")",
"and",
"len",
"(",
"x",
")",
"==",
"2",
",",
"values",
")",
"return",
"retVal"
] |
https://github.com/krintoxi/NoobSec-Toolkit/blob/38738541cbc03cedb9a3b3ed13b629f781ad64f6/NoobSecToolkit /tools/inject/lib/core/common.py#L3399-L3412
|
|
twilio/twilio-python
|
6e1e811ea57a1edfadd5161ace87397c563f6915
|
twilio/rest/conversations/v1/role.py
|
python
|
RoleInstance.account_sid
|
(self)
|
return self._properties['account_sid']
|
:returns: The SID of the Account that created the resource
:rtype: unicode
|
:returns: The SID of the Account that created the resource
:rtype: unicode
|
[
":",
"returns",
":",
"The",
"SID",
"of",
"the",
"Account",
"that",
"created",
"the",
"resource",
":",
"rtype",
":",
"unicode"
] |
def account_sid(self):
"""
:returns: The SID of the Account that created the resource
:rtype: unicode
"""
return self._properties['account_sid']
|
[
"def",
"account_sid",
"(",
"self",
")",
":",
"return",
"self",
".",
"_properties",
"[",
"'account_sid'",
"]"
] |
https://github.com/twilio/twilio-python/blob/6e1e811ea57a1edfadd5161ace87397c563f6915/twilio/rest/conversations/v1/role.py#L321-L326
|
|
numba/numba
|
bf480b9e0da858a65508c2b17759a72ee6a44c51
|
numba/core/ir.py
|
python
|
Const.__deepcopy__
|
(self, memo)
|
return Const(
value=self.value, loc=self.loc,
use_literal_type=self.use_literal_type,
)
|
[] |
def __deepcopy__(self, memo):
# Override to not copy constant values in code
return Const(
value=self.value, loc=self.loc,
use_literal_type=self.use_literal_type,
)
|
[
"def",
"__deepcopy__",
"(",
"self",
",",
"memo",
")",
":",
"# Override to not copy constant values in code",
"return",
"Const",
"(",
"value",
"=",
"self",
".",
"value",
",",
"loc",
"=",
"self",
".",
"loc",
",",
"use_literal_type",
"=",
"self",
".",
"use_literal_type",
",",
")"
] |
https://github.com/numba/numba/blob/bf480b9e0da858a65508c2b17759a72ee6a44c51/numba/core/ir.py#L966-L971
|
|||
luoyetx/mx-lsoftmax
|
194f49eb41c58acf3eff46369574d8828844c4ef
|
plot_beta.py
|
python
|
plot_beta
|
()
|
plot beta over training
|
plot beta over training
|
[
"plot",
"beta",
"over",
"training"
] |
def plot_beta():
'''plot beta over training
'''
beta = args.beta
scale = args.scale
beta_min = args.beta_min
num_epoch = args.num_epoch
epoch_size = int(float(args.num_examples) / args.batch_size)
x = np.arange(num_epoch*epoch_size)
y = beta * np.power(scale, x)
y = np.maximum(y, beta_min)
epoch_x = np.arange(num_epoch) * epoch_size
epoch_y = beta * np.power(scale, epoch_x)
epoch_y = np.maximum(epoch_y, beta_min)
# plot beta descent curve
plt.semilogy(x, y)
plt.semilogy(epoch_x, epoch_y, 'ro')
plt.title('beta descent')
plt.ylabel('beta')
plt.xlabel('epoch')
plt.show()
|
[
"def",
"plot_beta",
"(",
")",
":",
"beta",
"=",
"args",
".",
"beta",
"scale",
"=",
"args",
".",
"scale",
"beta_min",
"=",
"args",
".",
"beta_min",
"num_epoch",
"=",
"args",
".",
"num_epoch",
"epoch_size",
"=",
"int",
"(",
"float",
"(",
"args",
".",
"num_examples",
")",
"/",
"args",
".",
"batch_size",
")",
"x",
"=",
"np",
".",
"arange",
"(",
"num_epoch",
"*",
"epoch_size",
")",
"y",
"=",
"beta",
"*",
"np",
".",
"power",
"(",
"scale",
",",
"x",
")",
"y",
"=",
"np",
".",
"maximum",
"(",
"y",
",",
"beta_min",
")",
"epoch_x",
"=",
"np",
".",
"arange",
"(",
"num_epoch",
")",
"*",
"epoch_size",
"epoch_y",
"=",
"beta",
"*",
"np",
".",
"power",
"(",
"scale",
",",
"epoch_x",
")",
"epoch_y",
"=",
"np",
".",
"maximum",
"(",
"epoch_y",
",",
"beta_min",
")",
"# plot beta descent curve",
"plt",
".",
"semilogy",
"(",
"x",
",",
"y",
")",
"plt",
".",
"semilogy",
"(",
"epoch_x",
",",
"epoch_y",
",",
"'ro'",
")",
"plt",
".",
"title",
"(",
"'beta descent'",
")",
"plt",
".",
"ylabel",
"(",
"'beta'",
")",
"plt",
".",
"xlabel",
"(",
"'epoch'",
")",
"plt",
".",
"show",
"(",
")"
] |
https://github.com/luoyetx/mx-lsoftmax/blob/194f49eb41c58acf3eff46369574d8828844c4ef/plot_beta.py#L8-L30
|
||
WilsonWangTHU/mbbl
|
bb88a016de2fcd8ea0ed9c4d5c539817d2b476e7
|
mbbl/env/gym_env/delayed_walker.py
|
python
|
env._get_done
|
(self, ob)
|
return done, alive_reward
|
@brief: add termination condition
|
[] |
def _get_done(self, ob):
""" @brief: add termination condition
"""
alive_reward = 0.0
done = False
if self._env_name == 'gym_dfhopper':
height, ang = ob[0], ob[1]
done = (height <= 0.7) or (abs(ang) >= 0.2)
alive_reward = float(not done)
elif self._env_name == 'gym_dfwalker2d':
height, ang = ob[0], ob[1]
done = (height >= 2.0) or (height <= 0.8) or (abs(ang) >= 1.0)
alive_reward = float(not done)
elif self._env_name == 'gym_dfant':
height = ob[0]
done = (height > 1.0) or (height < 0.2)
alive_reward = float(not done)
if self._no_termination:
done = False
if self._current_step >= self._env_info['max_length']:
done = True
return done, alive_reward
|
[
"def",
"_get_done",
"(",
"self",
",",
"ob",
")",
":",
"alive_reward",
"=",
"0.0",
"done",
"=",
"False",
"if",
"self",
".",
"_env_name",
"==",
"'gym_dfhopper'",
":",
"height",
",",
"ang",
"=",
"ob",
"[",
"0",
"]",
",",
"ob",
"[",
"1",
"]",
"done",
"=",
"(",
"height",
"<=",
"0.7",
")",
"or",
"(",
"abs",
"(",
"ang",
")",
">=",
"0.2",
")",
"alive_reward",
"=",
"float",
"(",
"not",
"done",
")",
"elif",
"self",
".",
"_env_name",
"==",
"'gym_dfwalker2d'",
":",
"height",
",",
"ang",
"=",
"ob",
"[",
"0",
"]",
",",
"ob",
"[",
"1",
"]",
"done",
"=",
"(",
"height",
">=",
"2.0",
")",
"or",
"(",
"height",
"<=",
"0.8",
")",
"or",
"(",
"abs",
"(",
"ang",
")",
">=",
"1.0",
")",
"alive_reward",
"=",
"float",
"(",
"not",
"done",
")",
"elif",
"self",
".",
"_env_name",
"==",
"'gym_dfant'",
":",
"height",
"=",
"ob",
"[",
"0",
"]",
"done",
"=",
"(",
"height",
">",
"1.0",
")",
"or",
"(",
"height",
"<",
"0.2",
")",
"alive_reward",
"=",
"float",
"(",
"not",
"done",
")",
"if",
"self",
".",
"_no_termination",
":",
"done",
"=",
"False",
"if",
"self",
".",
"_current_step",
">=",
"self",
".",
"_env_info",
"[",
"'max_length'",
"]",
":",
"done",
"=",
"True",
"return",
"done",
",",
"alive_reward"
] |
https://github.com/WilsonWangTHU/mbbl/blob/bb88a016de2fcd8ea0ed9c4d5c539817d2b476e7/mbbl/env/gym_env/delayed_walker.py#L80-L107
|
||
morganstanley/treadmill
|
f18267c665baf6def4374d21170198f63ff1cde4
|
lib/python/treadmill/localdiskutils.py
|
python
|
setup_image_lvm
|
(img_name, img_location, img_size,
vg_name=TREADMILL_VG)
|
return activated
|
Setup the LVM Volume Group based on image file
|
Setup the LVM Volume Group based on image file
|
[
"Setup",
"the",
"LVM",
"Volume",
"Group",
"based",
"on",
"image",
"file"
] |
def setup_image_lvm(img_name, img_location, img_size,
vg_name=TREADMILL_VG):
"""Setup the LVM Volume Group based on image file"""
activated = activate_vg(vg_name)
if not activated:
_LOGGER.info('Initializing Volume Group')
block_dev = init_block_dev(
img_name,
img_location,
img_size
)
init_vg(vg_name, block_dev)
return activated
|
[
"def",
"setup_image_lvm",
"(",
"img_name",
",",
"img_location",
",",
"img_size",
",",
"vg_name",
"=",
"TREADMILL_VG",
")",
":",
"activated",
"=",
"activate_vg",
"(",
"vg_name",
")",
"if",
"not",
"activated",
":",
"_LOGGER",
".",
"info",
"(",
"'Initializing Volume Group'",
")",
"block_dev",
"=",
"init_block_dev",
"(",
"img_name",
",",
"img_location",
",",
"img_size",
")",
"init_vg",
"(",
"vg_name",
",",
"block_dev",
")",
"return",
"activated"
] |
https://github.com/morganstanley/treadmill/blob/f18267c665baf6def4374d21170198f63ff1cde4/lib/python/treadmill/localdiskutils.py#L235-L247
|
|
shaneshixiang/rllabplusplus
|
4d55f96ec98e3fe025b7991945e3e6a54fd5449f
|
rllab/rllab_mujoco_py/glfw.py
|
python
|
get_monitor_name
|
(monitor)
|
return _glfw.glfwGetMonitorName(monitor)
|
Returns the name of the specified monitor.
Wrapper for:
const char* glfwGetMonitorName(GLFWmonitor* monitor);
|
Returns the name of the specified monitor.
|
[
"Returns",
"the",
"name",
"of",
"the",
"specified",
"monitor",
"."
] |
def get_monitor_name(monitor):
'''
Returns the name of the specified monitor.
Wrapper for:
const char* glfwGetMonitorName(GLFWmonitor* monitor);
'''
return _glfw.glfwGetMonitorName(monitor)
|
[
"def",
"get_monitor_name",
"(",
"monitor",
")",
":",
"return",
"_glfw",
".",
"glfwGetMonitorName",
"(",
"monitor",
")"
] |
https://github.com/shaneshixiang/rllabplusplus/blob/4d55f96ec98e3fe025b7991945e3e6a54fd5449f/rllab/rllab_mujoco_py/glfw.py#L659-L666
|
|
epinna/weevely3
|
6332b4641f5ac68f1cbeac1604e7dd03383d7b31
|
modules/net/proxy.py
|
python
|
get_cert_path
|
(path)
|
return os.path.join(cert_folder, path)
|
[] |
def get_cert_path(path):
return os.path.join(cert_folder, path)
|
[
"def",
"get_cert_path",
"(",
"path",
")",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"cert_folder",
",",
"path",
")"
] |
https://github.com/epinna/weevely3/blob/6332b4641f5ac68f1cbeac1604e7dd03383d7b31/modules/net/proxy.py#L52-L53
|
|||
HymanLiuTS/flaskTs
|
286648286976e85d9b9a5873632331efcafe0b21
|
flasky/lib/python2.7/site-packages/flask_wtf/form.py
|
python
|
FlaskForm.hidden_tag
|
(self, *fields)
|
return Markup(u'\n'.join(text_type(f) for f in hidden_fields(fields or self)))
|
Render the form's hidden fields in one call.
A field is considered hidden if it uses the
:class:`~wtforms.widgets.HiddenInput` widget.
If ``fields`` are given, only render the given fields that
are hidden. If a string is passed, render the field with that
name if it exists.
.. versionchanged:: 0.13
No longer wraps inputs in hidden div.
This is valid HTML 5.
.. versionchanged:: 0.13
Skip passed fields that aren't hidden.
Skip passed names that don't exist.
|
Render the form's hidden fields in one call.
|
[
"Render",
"the",
"form",
"s",
"hidden",
"fields",
"in",
"one",
"call",
"."
] |
def hidden_tag(self, *fields):
"""Render the form's hidden fields in one call.
A field is considered hidden if it uses the
:class:`~wtforms.widgets.HiddenInput` widget.
If ``fields`` are given, only render the given fields that
are hidden. If a string is passed, render the field with that
name if it exists.
.. versionchanged:: 0.13
No longer wraps inputs in hidden div.
This is valid HTML 5.
.. versionchanged:: 0.13
Skip passed fields that aren't hidden.
Skip passed names that don't exist.
"""
def hidden_fields(fields):
for f in fields:
if isinstance(f, string_types):
f = getattr(self, f, None)
if f is None or not isinstance(f.widget, HiddenInput):
continue
yield f
return Markup(u'\n'.join(text_type(f) for f in hidden_fields(fields or self)))
|
[
"def",
"hidden_tag",
"(",
"self",
",",
"*",
"fields",
")",
":",
"def",
"hidden_fields",
"(",
"fields",
")",
":",
"for",
"f",
"in",
"fields",
":",
"if",
"isinstance",
"(",
"f",
",",
"string_types",
")",
":",
"f",
"=",
"getattr",
"(",
"self",
",",
"f",
",",
"None",
")",
"if",
"f",
"is",
"None",
"or",
"not",
"isinstance",
"(",
"f",
".",
"widget",
",",
"HiddenInput",
")",
":",
"continue",
"yield",
"f",
"return",
"Markup",
"(",
"u'\\n'",
".",
"join",
"(",
"text_type",
"(",
"f",
")",
"for",
"f",
"in",
"hidden_fields",
"(",
"fields",
"or",
"self",
")",
")",
")"
] |
https://github.com/HymanLiuTS/flaskTs/blob/286648286976e85d9b9a5873632331efcafe0b21/flasky/lib/python2.7/site-packages/flask_wtf/form.py#L124-L155
|
|
karlicoss/orgparse
|
e79228fd0f2cedc6c6e21a3de5b73337d2749fbf
|
orgparse/node.py
|
python
|
OrgBaseNode._get_tags
|
(self, inher=False)
|
return set()
|
Return tags
:arg bool inher:
Mix with tags of all ancestor nodes if ``True``.
:rtype: set
|
Return tags
|
[
"Return",
"tags"
] |
def _get_tags(self, inher=False) -> Set[str]:
"""
Return tags
:arg bool inher:
Mix with tags of all ancestor nodes if ``True``.
:rtype: set
"""
return set()
|
[
"def",
"_get_tags",
"(",
"self",
",",
"inher",
"=",
"False",
")",
"->",
"Set",
"[",
"str",
"]",
":",
"return",
"set",
"(",
")"
] |
https://github.com/karlicoss/orgparse/blob/e79228fd0f2cedc6c6e21a3de5b73337d2749fbf/orgparse/node.py#L838-L848
|
|
securesystemslab/zippy
|
ff0e84ac99442c2c55fe1d285332cfd4e185e089
|
zippy/benchmarks/src/benchmarks/sympy/sympy/mpmath/functions/bessel.py
|
python
|
besselk
|
(ctx, n, z, **kwargs)
|
return ctx.hypercomb(h, [n], **kwargs)
|
[] |
def besselk(ctx, n, z, **kwargs):
if not z:
return ctx.inf
M = ctx.mag(z)
if M < 1:
# Represent as limit definition
def h(n):
r = (z/2)**2
T1 = [z, 2], [-n, n-1], [n], [], [], [1-n], r
T2 = [z, 2], [n, -n-1], [-n], [], [], [1+n], r
return T1, T2
# We could use the limit definition always, but it leads
# to very bad cancellation (of exponentially large terms)
# for large real z
# Instead represent in terms of 2F0
else:
ctx.prec += M
def h(n):
return [([ctx.pi/2, z, ctx.exp(-z)], [0.5,-0.5,1], [], [], \
[n+0.5, 0.5-n], [], -1/(2*z))]
return ctx.hypercomb(h, [n], **kwargs)
|
[
"def",
"besselk",
"(",
"ctx",
",",
"n",
",",
"z",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"z",
":",
"return",
"ctx",
".",
"inf",
"M",
"=",
"ctx",
".",
"mag",
"(",
"z",
")",
"if",
"M",
"<",
"1",
":",
"# Represent as limit definition",
"def",
"h",
"(",
"n",
")",
":",
"r",
"=",
"(",
"z",
"/",
"2",
")",
"**",
"2",
"T1",
"=",
"[",
"z",
",",
"2",
"]",
",",
"[",
"-",
"n",
",",
"n",
"-",
"1",
"]",
",",
"[",
"n",
"]",
",",
"[",
"]",
",",
"[",
"]",
",",
"[",
"1",
"-",
"n",
"]",
",",
"r",
"T2",
"=",
"[",
"z",
",",
"2",
"]",
",",
"[",
"n",
",",
"-",
"n",
"-",
"1",
"]",
",",
"[",
"-",
"n",
"]",
",",
"[",
"]",
",",
"[",
"]",
",",
"[",
"1",
"+",
"n",
"]",
",",
"r",
"return",
"T1",
",",
"T2",
"# We could use the limit definition always, but it leads",
"# to very bad cancellation (of exponentially large terms)",
"# for large real z",
"# Instead represent in terms of 2F0",
"else",
":",
"ctx",
".",
"prec",
"+=",
"M",
"def",
"h",
"(",
"n",
")",
":",
"return",
"[",
"(",
"[",
"ctx",
".",
"pi",
"/",
"2",
",",
"z",
",",
"ctx",
".",
"exp",
"(",
"-",
"z",
")",
"]",
",",
"[",
"0.5",
",",
"-",
"0.5",
",",
"1",
"]",
",",
"[",
"]",
",",
"[",
"]",
",",
"[",
"n",
"+",
"0.5",
",",
"0.5",
"-",
"n",
"]",
",",
"[",
"]",
",",
"-",
"1",
"/",
"(",
"2",
"*",
"z",
")",
")",
"]",
"return",
"ctx",
".",
"hypercomb",
"(",
"h",
",",
"[",
"n",
"]",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/securesystemslab/zippy/blob/ff0e84ac99442c2c55fe1d285332cfd4e185e089/zippy/benchmarks/src/benchmarks/sympy/sympy/mpmath/functions/bessel.py#L153-L173
|
|||
openshift/openshift-ansible
|
e3b38f9ffd8e954c0060ec6a62f141fbc6335354
|
roles/openshift_node/library/oc_csr_approve.py
|
python
|
CSRapprove.run
|
(self)
|
execute the CSR approval process
|
execute the CSR approval process
|
[
"execute",
"the",
"CSR",
"approval",
"process"
] |
def run(self):
"""execute the CSR approval process"""
# # Client Cert Section # #
mode = "client"
attempts = 1
while True:
# If the node is in the list of all nodes, we do not need to approve client CSRs
if self.nodename not in self.get_nodes():
attempts = self.runner(attempts, mode)
else:
self.result["{}_approve_results".format(mode)].append(
"Node {} is present in node list".format(self.nodename))
break
# # Server Cert Section # #
mode = "server"
attempts = 1
while True:
# If the node API is healthy, we do not need to approve server CSRs
if not self.node_is_ready(self.nodename):
attempts = self.runner(attempts, mode)
else:
self.result["{}_approve_results".format(mode)].append(
"Node {} API is ready".format(self.nodename))
break
self.module.exit_json(**self.result)
|
[
"def",
"run",
"(",
"self",
")",
":",
"# # Client Cert Section # #",
"mode",
"=",
"\"client\"",
"attempts",
"=",
"1",
"while",
"True",
":",
"# If the node is in the list of all nodes, we do not need to approve client CSRs",
"if",
"self",
".",
"nodename",
"not",
"in",
"self",
".",
"get_nodes",
"(",
")",
":",
"attempts",
"=",
"self",
".",
"runner",
"(",
"attempts",
",",
"mode",
")",
"else",
":",
"self",
".",
"result",
"[",
"\"{}_approve_results\"",
".",
"format",
"(",
"mode",
")",
"]",
".",
"append",
"(",
"\"Node {} is present in node list\"",
".",
"format",
"(",
"self",
".",
"nodename",
")",
")",
"break",
"# # Server Cert Section # #",
"mode",
"=",
"\"server\"",
"attempts",
"=",
"1",
"while",
"True",
":",
"# If the node API is healthy, we do not need to approve server CSRs",
"if",
"not",
"self",
".",
"node_is_ready",
"(",
"self",
".",
"nodename",
")",
":",
"attempts",
"=",
"self",
".",
"runner",
"(",
"attempts",
",",
"mode",
")",
"else",
":",
"self",
".",
"result",
"[",
"\"{}_approve_results\"",
".",
"format",
"(",
"mode",
")",
"]",
".",
"append",
"(",
"\"Node {} API is ready\"",
".",
"format",
"(",
"self",
".",
"nodename",
")",
")",
"break",
"self",
".",
"module",
".",
"exit_json",
"(",
"*",
"*",
"self",
".",
"result",
")"
] |
https://github.com/openshift/openshift-ansible/blob/e3b38f9ffd8e954c0060ec6a62f141fbc6335354/roles/openshift_node/library/oc_csr_approve.py#L241-L268
|
||
sahana/eden
|
1696fa50e90ce967df69f66b571af45356cc18da
|
modules/s3/s3masterkey.py
|
python
|
S3MasterKey.challenge
|
(cls, headers=None)
|
Add a response header to a HTTP-401 challenge containing
a master key auth token; this token can be used by the
client to generate an access key
The client must indicate its MasterKeyAuth capability by
adding a HTTP header to the original request:
- RequestMasterKeyAuth: true
In case of a 401-response, the server will send a corresponding
header:
- MasterKeyAuthToken: <token>
The value of this header is a base64-encoded combination of
token ID and token string: "ID:TOKEN".
|
Add a response header to a HTTP-401 challenge containing
a master key auth token; this token can be used by the
client to generate an access key
|
[
"Add",
"a",
"response",
"header",
"to",
"a",
"HTTP",
"-",
"401",
"challenge",
"containing",
"a",
"master",
"key",
"auth",
"token",
";",
"this",
"token",
"can",
"be",
"used",
"by",
"the",
"client",
"to",
"generate",
"an",
"access",
"key"
] |
def challenge(cls, headers=None):
"""
Add a response header to a HTTP-401 challenge containing
a master key auth token; this token can be used by the
client to generate an access key
The client must indicate its MasterKeyAuth capability by
adding a HTTP header to the original request:
- RequestMasterKeyAuth: true
In case of a 401-response, the server will send a corresponding
header:
- MasterKeyAuthToken: <token>
The value of this header is a base64-encoded combination of
token ID and token string: "ID:TOKEN".
"""
if not current.response.s3.masterkey_auth_failed and \
current.request.env.http_requestmasterkeyauth == "true":
header = ("%s:%s" % cls.__token()).encode("utf-8")
if headers is None:
headers = current.response.headers
headers["MasterKeyAuthToken"] = s3_str(base64.b64encode(header))
|
[
"def",
"challenge",
"(",
"cls",
",",
"headers",
"=",
"None",
")",
":",
"if",
"not",
"current",
".",
"response",
".",
"s3",
".",
"masterkey_auth_failed",
"and",
"current",
".",
"request",
".",
"env",
".",
"http_requestmasterkeyauth",
"==",
"\"true\"",
":",
"header",
"=",
"(",
"\"%s:%s\"",
"%",
"cls",
".",
"__token",
"(",
")",
")",
".",
"encode",
"(",
"\"utf-8\"",
")",
"if",
"headers",
"is",
"None",
":",
"headers",
"=",
"current",
".",
"response",
".",
"headers",
"headers",
"[",
"\"MasterKeyAuthToken\"",
"]",
"=",
"s3_str",
"(",
"base64",
".",
"b64encode",
"(",
"header",
")",
")"
] |
https://github.com/sahana/eden/blob/1696fa50e90ce967df69f66b571af45356cc18da/modules/s3/s3masterkey.py#L127-L155
|
||
DocNow/twarc
|
adec782f0a99987e0e909812823bb9bd01af4e5e
|
twarc/command2.py
|
python
|
hydrate
|
(T, infile, outfile, hide_progress, **kwargs)
|
Hydrate tweet ids.
|
Hydrate tweet ids.
|
[
"Hydrate",
"tweet",
"ids",
"."
] |
def hydrate(T, infile, outfile, hide_progress, **kwargs):
"""
Hydrate tweet ids.
"""
kwargs = _process_expansions_shortcuts(kwargs)
with FileLineProgressBar(infile, outfile, disable=hide_progress) as progress:
for result in T.tweet_lookup(infile, **kwargs):
_write(result, outfile)
tweet_ids = [t["id"] for t in result.get("data", [])]
log.info("archived %s", ",".join(tweet_ids))
progress.update_with_result(result, error_resource_type="tweet")
|
[
"def",
"hydrate",
"(",
"T",
",",
"infile",
",",
"outfile",
",",
"hide_progress",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"=",
"_process_expansions_shortcuts",
"(",
"kwargs",
")",
"with",
"FileLineProgressBar",
"(",
"infile",
",",
"outfile",
",",
"disable",
"=",
"hide_progress",
")",
"as",
"progress",
":",
"for",
"result",
"in",
"T",
".",
"tweet_lookup",
"(",
"infile",
",",
"*",
"*",
"kwargs",
")",
":",
"_write",
"(",
"result",
",",
"outfile",
")",
"tweet_ids",
"=",
"[",
"t",
"[",
"\"id\"",
"]",
"for",
"t",
"in",
"result",
".",
"get",
"(",
"\"data\"",
",",
"[",
"]",
")",
"]",
"log",
".",
"info",
"(",
"\"archived %s\"",
",",
"\",\"",
".",
"join",
"(",
"tweet_ids",
")",
")",
"progress",
".",
"update_with_result",
"(",
"result",
",",
"error_resource_type",
"=",
"\"tweet\"",
")"
] |
https://github.com/DocNow/twarc/blob/adec782f0a99987e0e909812823bb9bd01af4e5e/twarc/command2.py#L918-L930
|
||
triaquae/triaquae
|
bbabf736b3ba56a0c6498e7f04e16c13b8b8f2b9
|
TriAquae/models/django/contrib/gis/geos/polygon.py
|
python
|
Polygon._set_list
|
(self, length, items)
|
[] |
def _set_list(self, length, items):
# Getting the current pointer, replacing with the newly constructed
# geometry, and destroying the old geometry.
prev_ptr = self.ptr
srid = self.srid
self.ptr = self._create_polygon(length, items)
if srid: self.srid = srid
capi.destroy_geom(prev_ptr)
|
[
"def",
"_set_list",
"(",
"self",
",",
"length",
",",
"items",
")",
":",
"# Getting the current pointer, replacing with the newly constructed",
"# geometry, and destroying the old geometry.",
"prev_ptr",
"=",
"self",
".",
"ptr",
"srid",
"=",
"self",
".",
"srid",
"self",
".",
"ptr",
"=",
"self",
".",
"_create_polygon",
"(",
"length",
",",
"items",
")",
"if",
"srid",
":",
"self",
".",
"srid",
"=",
"srid",
"capi",
".",
"destroy_geom",
"(",
"prev_ptr",
")"
] |
https://github.com/triaquae/triaquae/blob/bbabf736b3ba56a0c6498e7f04e16c13b8b8f2b9/TriAquae/models/django/contrib/gis/geos/polygon.py#L107-L114
|
||||
Bemmu/PyNamecheap
|
1657852993bb0a7bfdc07c44977c88409bc7753a
|
namecheap.py
|
python
|
Api.domains_dns_delHost
|
(self, domain, host_record)
|
This method is absent in original API as well. It executes non-atomic
remove operation over the host record which has the following Type,
Hostname and Address.
Example:
api.domains_dns_delHost('example.com', {
"RecordType": "A",
"HostName": "test",
"Address": "127.0.0.1"
})
|
This method is absent in original API as well. It executes non-atomic
remove operation over the host record which has the following Type,
Hostname and Address.
|
[
"This",
"method",
"is",
"absent",
"in",
"original",
"API",
"as",
"well",
".",
"It",
"executes",
"non",
"-",
"atomic",
"remove",
"operation",
"over",
"the",
"host",
"record",
"which",
"has",
"the",
"following",
"Type",
"Hostname",
"and",
"Address",
"."
] |
def domains_dns_delHost(self, domain, host_record):
"""This method is absent in original API as well. It executes non-atomic
remove operation over the host record which has the following Type,
Hostname and Address.
Example:
api.domains_dns_delHost('example.com', {
"RecordType": "A",
"HostName": "test",
"Address": "127.0.0.1"
})
"""
host_records_remote = self.domains_dns_getHosts(domain)
print("Remote: %i" % len(host_records_remote))
host_records_new = []
for r in host_records_remote:
cond_type = r["Type"] == host_record["Type"]
cond_name = r["Name"] == host_record["Name"]
cond_addr = r["Address"] == host_record["Address"]
if cond_type and cond_name and cond_addr:
# skipping this record as it is the one we want to delete
pass
else:
host_records_new.append(r)
host_records_new = [self._elements_names_fix(x) for x in host_records_new]
print("To set: %i" % len(host_records_new))
# Check that we delete not more than 1 record at a time
if len(host_records_remote) != len(host_records_new) + 1:
sys.stderr.write(
"Something went wrong while removing host record, delta > 1: %i -> %i, aborting API call.\n" % (
len(host_records_remote),
len(host_records_new)
)
)
return False
extra_payload = self._list_of_dictionaries_to_numbered_payload(host_records_new)
sld, tld = domain.split(".")
extra_payload.update({
'SLD': sld,
'TLD': tld
})
self._call("namecheap.domains.dns.setHosts", extra_payload)
|
[
"def",
"domains_dns_delHost",
"(",
"self",
",",
"domain",
",",
"host_record",
")",
":",
"host_records_remote",
"=",
"self",
".",
"domains_dns_getHosts",
"(",
"domain",
")",
"print",
"(",
"\"Remote: %i\"",
"%",
"len",
"(",
"host_records_remote",
")",
")",
"host_records_new",
"=",
"[",
"]",
"for",
"r",
"in",
"host_records_remote",
":",
"cond_type",
"=",
"r",
"[",
"\"Type\"",
"]",
"==",
"host_record",
"[",
"\"Type\"",
"]",
"cond_name",
"=",
"r",
"[",
"\"Name\"",
"]",
"==",
"host_record",
"[",
"\"Name\"",
"]",
"cond_addr",
"=",
"r",
"[",
"\"Address\"",
"]",
"==",
"host_record",
"[",
"\"Address\"",
"]",
"if",
"cond_type",
"and",
"cond_name",
"and",
"cond_addr",
":",
"# skipping this record as it is the one we want to delete",
"pass",
"else",
":",
"host_records_new",
".",
"append",
"(",
"r",
")",
"host_records_new",
"=",
"[",
"self",
".",
"_elements_names_fix",
"(",
"x",
")",
"for",
"x",
"in",
"host_records_new",
"]",
"print",
"(",
"\"To set: %i\"",
"%",
"len",
"(",
"host_records_new",
")",
")",
"# Check that we delete not more than 1 record at a time",
"if",
"len",
"(",
"host_records_remote",
")",
"!=",
"len",
"(",
"host_records_new",
")",
"+",
"1",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"Something went wrong while removing host record, delta > 1: %i -> %i, aborting API call.\\n\"",
"%",
"(",
"len",
"(",
"host_records_remote",
")",
",",
"len",
"(",
"host_records_new",
")",
")",
")",
"return",
"False",
"extra_payload",
"=",
"self",
".",
"_list_of_dictionaries_to_numbered_payload",
"(",
"host_records_new",
")",
"sld",
",",
"tld",
"=",
"domain",
".",
"split",
"(",
"\".\"",
")",
"extra_payload",
".",
"update",
"(",
"{",
"'SLD'",
":",
"sld",
",",
"'TLD'",
":",
"tld",
"}",
")",
"self",
".",
"_call",
"(",
"\"namecheap.domains.dns.setHosts\"",
",",
"extra_payload",
")"
] |
https://github.com/Bemmu/PyNamecheap/blob/1657852993bb0a7bfdc07c44977c88409bc7753a/namecheap.py#L390-L439
|
||
openstack/swift
|
b8d7c3dcb817504dcc0959ba52cc4ed2cf66c100
|
swift/common/ring/builder.py
|
python
|
RingBuilder.get_required_overload
|
(self, weighted=None, wanted=None)
|
return max_overload
|
Returns the minimum overload value required to make the ring maximally
dispersed.
The required overload is the largest percentage change of any single
device from its weighted replicanth to its wanted replicanth (note:
under weighted devices have a negative percentage change) to archive
dispersion - that is to say a single device that must be overloaded by
5% is worse than 5 devices in a single tier overloaded by 1%.
|
Returns the minimum overload value required to make the ring maximally
dispersed.
|
[
"Returns",
"the",
"minimum",
"overload",
"value",
"required",
"to",
"make",
"the",
"ring",
"maximally",
"dispersed",
"."
] |
def get_required_overload(self, weighted=None, wanted=None):
"""
Returns the minimum overload value required to make the ring maximally
dispersed.
The required overload is the largest percentage change of any single
device from its weighted replicanth to its wanted replicanth (note:
under weighted devices have a negative percentage change) to archive
dispersion - that is to say a single device that must be overloaded by
5% is worse than 5 devices in a single tier overloaded by 1%.
"""
weighted = weighted or self._build_weighted_replicas_by_tier()
wanted = wanted or self._build_wanted_replicas_by_tier()
max_overload = 0.0
for dev in self._iter_devs():
tier = (dev['region'], dev['zone'], dev['ip'], dev['id'])
if not dev['weight']:
if tier not in wanted or not wanted[tier]:
continue
raise exceptions.RingValidationError(
'Device %s has zero weight and '
'should not want any replicas' % (tier,))
required = (wanted[tier] - weighted[tier]) / weighted[tier]
self.logger.debug('%(tier)s wants %(wanted)s and is weighted for '
'%(weight)s so therefore requires %(required)s '
'overload', {'tier': pretty_dev(dev),
'wanted': wanted[tier],
'weight': weighted[tier],
'required': required})
if required > max_overload:
max_overload = required
return max_overload
|
[
"def",
"get_required_overload",
"(",
"self",
",",
"weighted",
"=",
"None",
",",
"wanted",
"=",
"None",
")",
":",
"weighted",
"=",
"weighted",
"or",
"self",
".",
"_build_weighted_replicas_by_tier",
"(",
")",
"wanted",
"=",
"wanted",
"or",
"self",
".",
"_build_wanted_replicas_by_tier",
"(",
")",
"max_overload",
"=",
"0.0",
"for",
"dev",
"in",
"self",
".",
"_iter_devs",
"(",
")",
":",
"tier",
"=",
"(",
"dev",
"[",
"'region'",
"]",
",",
"dev",
"[",
"'zone'",
"]",
",",
"dev",
"[",
"'ip'",
"]",
",",
"dev",
"[",
"'id'",
"]",
")",
"if",
"not",
"dev",
"[",
"'weight'",
"]",
":",
"if",
"tier",
"not",
"in",
"wanted",
"or",
"not",
"wanted",
"[",
"tier",
"]",
":",
"continue",
"raise",
"exceptions",
".",
"RingValidationError",
"(",
"'Device %s has zero weight and '",
"'should not want any replicas'",
"%",
"(",
"tier",
",",
")",
")",
"required",
"=",
"(",
"wanted",
"[",
"tier",
"]",
"-",
"weighted",
"[",
"tier",
"]",
")",
"/",
"weighted",
"[",
"tier",
"]",
"self",
".",
"logger",
".",
"debug",
"(",
"'%(tier)s wants %(wanted)s and is weighted for '",
"'%(weight)s so therefore requires %(required)s '",
"'overload'",
",",
"{",
"'tier'",
":",
"pretty_dev",
"(",
"dev",
")",
",",
"'wanted'",
":",
"wanted",
"[",
"tier",
"]",
",",
"'weight'",
":",
"weighted",
"[",
"tier",
"]",
",",
"'required'",
":",
"required",
"}",
")",
"if",
"required",
">",
"max_overload",
":",
"max_overload",
"=",
"required",
"return",
"max_overload"
] |
https://github.com/openstack/swift/blob/b8d7c3dcb817504dcc0959ba52cc4ed2cf66c100/swift/common/ring/builder.py#L822-L853
|
|
DestructHub/ProjectEuler
|
50637c7b3022b3b9044009338a52e2135575a1cc
|
Problem501/Python/solution_slow_1.py
|
python
|
f_p
|
(n)
|
return last
|
[] |
def f_p(n):
last = 0
for x in xrange(2, n):
if len(d(x)) == 8:
print x, (x - last)
last = x
return last
|
[
"def",
"f_p",
"(",
"n",
")",
":",
"last",
"=",
"0",
"for",
"x",
"in",
"xrange",
"(",
"2",
",",
"n",
")",
":",
"if",
"len",
"(",
"d",
"(",
"x",
")",
")",
"==",
"8",
":",
"print",
"x",
",",
"(",
"x",
"-",
"last",
")",
"last",
"=",
"x",
"return",
"last"
] |
https://github.com/DestructHub/ProjectEuler/blob/50637c7b3022b3b9044009338a52e2135575a1cc/Problem501/Python/solution_slow_1.py#L29-L35
|
|||
FederatedAI/FATE
|
32540492623568ecd1afcb367360133616e02fa3
|
python/federatedml/secure_information_retrieval/base_secure_information_retrieval.py
|
python
|
BaseSecureInformationRetrieval._sync_coverage
|
(self, data_instance)
|
guest -> host
:param data_instance:
:return:
|
guest -> host
:param data_instance:
:return:
|
[
"guest",
"-",
">",
"host",
":",
"param",
"data_instance",
":",
":",
"return",
":"
] |
def _sync_coverage(self, data_instance):
"""
guest -> host
:param data_instance:
:return:
"""
pass
|
[
"def",
"_sync_coverage",
"(",
"self",
",",
"data_instance",
")",
":",
"pass"
] |
https://github.com/FederatedAI/FATE/blob/32540492623568ecd1afcb367360133616e02fa3/python/federatedml/secure_information_retrieval/base_secure_information_retrieval.py#L166-L172
|
||
twilio/twilio-python
|
6e1e811ea57a1edfadd5161ace87397c563f6915
|
twilio/rest/messaging/v1/service/us_app_to_person.py
|
python
|
UsAppToPersonList.__init__
|
(self, version, messaging_service_sid)
|
Initialize the UsAppToPersonList
:param Version version: Version that contains the resource
:param messaging_service_sid: The SID of the Messaging Service the resource is associated with
:returns: twilio.rest.messaging.v1.service.us_app_to_person.UsAppToPersonList
:rtype: twilio.rest.messaging.v1.service.us_app_to_person.UsAppToPersonList
|
Initialize the UsAppToPersonList
|
[
"Initialize",
"the",
"UsAppToPersonList"
] |
def __init__(self, version, messaging_service_sid):
"""
Initialize the UsAppToPersonList
:param Version version: Version that contains the resource
:param messaging_service_sid: The SID of the Messaging Service the resource is associated with
:returns: twilio.rest.messaging.v1.service.us_app_to_person.UsAppToPersonList
:rtype: twilio.rest.messaging.v1.service.us_app_to_person.UsAppToPersonList
"""
super(UsAppToPersonList, self).__init__(version)
# Path Solution
self._solution = {'messaging_service_sid': messaging_service_sid, }
self._uri = '/Services/{messaging_service_sid}/Compliance/Usa2p'.format(**self._solution)
|
[
"def",
"__init__",
"(",
"self",
",",
"version",
",",
"messaging_service_sid",
")",
":",
"super",
"(",
"UsAppToPersonList",
",",
"self",
")",
".",
"__init__",
"(",
"version",
")",
"# Path Solution",
"self",
".",
"_solution",
"=",
"{",
"'messaging_service_sid'",
":",
"messaging_service_sid",
",",
"}",
"self",
".",
"_uri",
"=",
"'/Services/{messaging_service_sid}/Compliance/Usa2p'",
".",
"format",
"(",
"*",
"*",
"self",
".",
"_solution",
")"
] |
https://github.com/twilio/twilio-python/blob/6e1e811ea57a1edfadd5161ace87397c563f6915/twilio/rest/messaging/v1/service/us_app_to_person.py#L22-L36
|
||
google/brain-tokyo-workshop
|
faf12f6bbae773fbe535c7a6cf357dc662c6c1d8
|
WANNRelease/prettyNeatWann/domain/classify_gym.py
|
python
|
ClassifyEnv.__init__
|
(self, trainSet, target)
|
Data set is a tuple of
[0] input data: [nSamples x nInputs]
[1] labels: [nSamples x 1]
Example data sets are given at the end of this file
|
Data set is a tuple of
[0] input data: [nSamples x nInputs]
[1] labels: [nSamples x 1]
|
[
"Data",
"set",
"is",
"a",
"tuple",
"of",
"[",
"0",
"]",
"input",
"data",
":",
"[",
"nSamples",
"x",
"nInputs",
"]",
"[",
"1",
"]",
"labels",
":",
"[",
"nSamples",
"x",
"1",
"]"
] |
def __init__(self, trainSet, target):
"""
Data set is a tuple of
[0] input data: [nSamples x nInputs]
[1] labels: [nSamples x 1]
Example data sets are given at the end of this file
"""
self.t = 0 # Current batch number
self.t_limit = 0 # Number of batches if you need them
self.batch = 1000 # Number of images per batch
self.seed()
self.viewer = None
self.trainSet = trainSet
self.target = target
nInputs = np.shape(trainSet)[1]
high = np.array([1.0]*nInputs)
self.action_space = spaces.Box(np.array(0,dtype=np.float32), \
np.array(1,dtype=np.float32))
self.observation_space = spaces.Box(np.array(0,dtype=np.float32), \
np.array(1,dtype=np.float32))
self.state = None
self.trainOrder = None
self.currIndx = None
|
[
"def",
"__init__",
"(",
"self",
",",
"trainSet",
",",
"target",
")",
":",
"self",
".",
"t",
"=",
"0",
"# Current batch number",
"self",
".",
"t_limit",
"=",
"0",
"# Number of batches if you need them",
"self",
".",
"batch",
"=",
"1000",
"# Number of images per batch",
"self",
".",
"seed",
"(",
")",
"self",
".",
"viewer",
"=",
"None",
"self",
".",
"trainSet",
"=",
"trainSet",
"self",
".",
"target",
"=",
"target",
"nInputs",
"=",
"np",
".",
"shape",
"(",
"trainSet",
")",
"[",
"1",
"]",
"high",
"=",
"np",
".",
"array",
"(",
"[",
"1.0",
"]",
"*",
"nInputs",
")",
"self",
".",
"action_space",
"=",
"spaces",
".",
"Box",
"(",
"np",
".",
"array",
"(",
"0",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
",",
"np",
".",
"array",
"(",
"1",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
")",
"self",
".",
"observation_space",
"=",
"spaces",
".",
"Box",
"(",
"np",
".",
"array",
"(",
"0",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
",",
"np",
".",
"array",
"(",
"1",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
")",
"self",
".",
"state",
"=",
"None",
"self",
".",
"trainOrder",
"=",
"None",
"self",
".",
"currIndx",
"=",
"None"
] |
https://github.com/google/brain-tokyo-workshop/blob/faf12f6bbae773fbe535c7a6cf357dc662c6c1d8/WANNRelease/prettyNeatWann/domain/classify_gym.py#L16-L43
|
||
chribsen/simple-machine-learning-examples
|
dc94e52a4cebdc8bb959ff88b81ff8cfeca25022
|
venv/lib/python2.7/site-packages/pandas/core/base.py
|
python
|
StringMixin.__bytes__
|
(self)
|
return self.__unicode__().encode(encoding, 'replace')
|
Return a string representation for a particular object.
Invoked by bytes(obj) in py3 only.
Yields a bytestring in both py2/py3.
|
Return a string representation for a particular object.
|
[
"Return",
"a",
"string",
"representation",
"for",
"a",
"particular",
"object",
"."
] |
def __bytes__(self):
"""
Return a string representation for a particular object.
Invoked by bytes(obj) in py3 only.
Yields a bytestring in both py2/py3.
"""
from pandas.core.config import get_option
encoding = get_option("display.encoding")
return self.__unicode__().encode(encoding, 'replace')
|
[
"def",
"__bytes__",
"(",
"self",
")",
":",
"from",
"pandas",
".",
"core",
".",
"config",
"import",
"get_option",
"encoding",
"=",
"get_option",
"(",
"\"display.encoding\"",
")",
"return",
"self",
".",
"__unicode__",
"(",
")",
".",
"encode",
"(",
"encoding",
",",
"'replace'",
")"
] |
https://github.com/chribsen/simple-machine-learning-examples/blob/dc94e52a4cebdc8bb959ff88b81ff8cfeca25022/venv/lib/python2.7/site-packages/pandas/core/base.py#L53-L63
|
|
anki/vector-python-sdk
|
d61fdb07c6278deba750f987b20441fff2df865f
|
anki_vector/screen.py
|
python
|
dimensions
|
()
|
return SCREEN_WIDTH, SCREEN_HEIGHT
|
Return the dimension (width, height) of the Screen.
.. testcode::
import anki_vector
screen_dimensions = anki_vector.screen.SCREEN_WIDTH, anki_vector.screen.SCREEN_HEIGHT
Returns:
A tuple of ints (width, height)
|
Return the dimension (width, height) of the Screen.
|
[
"Return",
"the",
"dimension",
"(",
"width",
"height",
")",
"of",
"the",
"Screen",
"."
] |
def dimensions():
"""Return the dimension (width, height) of the Screen.
.. testcode::
import anki_vector
screen_dimensions = anki_vector.screen.SCREEN_WIDTH, anki_vector.screen.SCREEN_HEIGHT
Returns:
A tuple of ints (width, height)
"""
return SCREEN_WIDTH, SCREEN_HEIGHT
|
[
"def",
"dimensions",
"(",
")",
":",
"return",
"SCREEN_WIDTH",
",",
"SCREEN_HEIGHT"
] |
https://github.com/anki/vector-python-sdk/blob/d61fdb07c6278deba750f987b20441fff2df865f/anki_vector/screen.py#L39-L51
|
|
cortex-lab/phy
|
9a330b9437a3d0b40a37a201d147224e6e7fb462
|
phy/cluster/supervisor.py
|
python
|
TaskLogger._after_move
|
(self, task, output)
|
Tasks that should follow a move.
|
Tasks that should follow a move.
|
[
"Tasks",
"that",
"should",
"follow",
"a",
"move",
"."
] |
def _after_move(self, task, output):
"""Tasks that should follow a move."""
which = output.metadata_changed
moved = set(self._get_clusters(which))
cluster_ids, next_cluster, similar, next_similar = self.last_state()
cluster_ids = set(cluster_ids or ())
similar = set(similar or ())
# Move best.
if moved <= cluster_ids:
self.enqueue(self.cluster_view, 'next')
# Move similar.
elif moved <= similar:
self.enqueue(self.similarity_view, 'next')
# Move all.
else:
self.enqueue(self.cluster_view, 'next')
self.enqueue(self.similarity_view, 'next')
|
[
"def",
"_after_move",
"(",
"self",
",",
"task",
",",
"output",
")",
":",
"which",
"=",
"output",
".",
"metadata_changed",
"moved",
"=",
"set",
"(",
"self",
".",
"_get_clusters",
"(",
"which",
")",
")",
"cluster_ids",
",",
"next_cluster",
",",
"similar",
",",
"next_similar",
"=",
"self",
".",
"last_state",
"(",
")",
"cluster_ids",
"=",
"set",
"(",
"cluster_ids",
"or",
"(",
")",
")",
"similar",
"=",
"set",
"(",
"similar",
"or",
"(",
")",
")",
"# Move best.",
"if",
"moved",
"<=",
"cluster_ids",
":",
"self",
".",
"enqueue",
"(",
"self",
".",
"cluster_view",
",",
"'next'",
")",
"# Move similar.",
"elif",
"moved",
"<=",
"similar",
":",
"self",
".",
"enqueue",
"(",
"self",
".",
"similarity_view",
",",
"'next'",
")",
"# Move all.",
"else",
":",
"self",
".",
"enqueue",
"(",
"self",
".",
"cluster_view",
",",
"'next'",
")",
"self",
".",
"enqueue",
"(",
"self",
".",
"similarity_view",
",",
"'next'",
")"
] |
https://github.com/cortex-lab/phy/blob/9a330b9437a3d0b40a37a201d147224e6e7fb462/phy/cluster/supervisor.py#L163-L179
|
||
thespianpy/Thespian
|
f35e5a74ae99ee3401eb9fc7757620a1cf043ee2
|
thespian/system/transport/TCPTransport.py
|
python
|
TCPTransport.lostRemote
|
(self, rmtaddr)
|
[optional] Called by adminstrative levels (e.g. convention.py) to
indicate that the indicated remote address is no longer
accessible. This is customarily used only by the Admin in
"Admin Routing" scenarios when the remote is shutdown or
de-registered to allow the transport to cleanup (e.g. close
open sockets, etc.).
This does *not* do anything to remote TXOnly sockets: those
connections were initiated by the remote and should
therefore be dropped by the remote. Dropping those
connections at this point would be harmful, especially
because this is typically called when first reconnecting to
the remote.
|
[optional] Called by adminstrative levels (e.g. convention.py) to
indicate that the indicated remote address is no longer
accessible. This is customarily used only by the Admin in
"Admin Routing" scenarios when the remote is shutdown or
de-registered to allow the transport to cleanup (e.g. close
open sockets, etc.).
|
[
"[",
"optional",
"]",
"Called",
"by",
"adminstrative",
"levels",
"(",
"e",
".",
"g",
".",
"convention",
".",
"py",
")",
"to",
"indicate",
"that",
"the",
"indicated",
"remote",
"address",
"is",
"no",
"longer",
"accessible",
".",
"This",
"is",
"customarily",
"used",
"only",
"by",
"the",
"Admin",
"in",
"Admin",
"Routing",
"scenarios",
"when",
"the",
"remote",
"is",
"shutdown",
"or",
"de",
"-",
"registered",
"to",
"allow",
"the",
"transport",
"to",
"cleanup",
"(",
"e",
".",
"g",
".",
"close",
"open",
"sockets",
"etc",
".",
")",
"."
] |
def lostRemote(self, rmtaddr):
"""[optional] Called by adminstrative levels (e.g. convention.py) to
indicate that the indicated remote address is no longer
accessible. This is customarily used only by the Admin in
"Admin Routing" scenarios when the remote is shutdown or
de-registered to allow the transport to cleanup (e.g. close
open sockets, etc.).
This does *not* do anything to remote TXOnly sockets: those
connections were initiated by the remote and should
therefore be dropped by the remote. Dropping those
connections at this point would be harmful, especially
because this is typically called when first reconnecting to
the remote.
"""
if isinstance(rmtaddr.addressDetails, TXOnlyAdminTCPv4ActorAddress):
return
if hasattr(self, '_openSockets'):
for rmvkey in [each
for each in self._openSockets
if rmtaddr.addressDetails.isSameSystem(
self._openSockets[each].rmtaddr)]:
_safeSocketShutdown(self._openSockets[rmvkey])
del self._openSockets[rmvkey]
for each in [i for i in self._transmitIntents
if rmtaddr.addressDetails.isSameSystem(
self._transmitIntents[i].targetAddr)]:
self._cancel_fd_ops(each)
for each in [i for i,v in self._incomingSockets.items()
if rmtaddr.addressDetails.isSameSystem(
v.fromAddress
if v.fromAddress.addressDetails else
v.socket)]:
self._cancel_fd_ops(each)
|
[
"def",
"lostRemote",
"(",
"self",
",",
"rmtaddr",
")",
":",
"if",
"isinstance",
"(",
"rmtaddr",
".",
"addressDetails",
",",
"TXOnlyAdminTCPv4ActorAddress",
")",
":",
"return",
"if",
"hasattr",
"(",
"self",
",",
"'_openSockets'",
")",
":",
"for",
"rmvkey",
"in",
"[",
"each",
"for",
"each",
"in",
"self",
".",
"_openSockets",
"if",
"rmtaddr",
".",
"addressDetails",
".",
"isSameSystem",
"(",
"self",
".",
"_openSockets",
"[",
"each",
"]",
".",
"rmtaddr",
")",
"]",
":",
"_safeSocketShutdown",
"(",
"self",
".",
"_openSockets",
"[",
"rmvkey",
"]",
")",
"del",
"self",
".",
"_openSockets",
"[",
"rmvkey",
"]",
"for",
"each",
"in",
"[",
"i",
"for",
"i",
"in",
"self",
".",
"_transmitIntents",
"if",
"rmtaddr",
".",
"addressDetails",
".",
"isSameSystem",
"(",
"self",
".",
"_transmitIntents",
"[",
"i",
"]",
".",
"targetAddr",
")",
"]",
":",
"self",
".",
"_cancel_fd_ops",
"(",
"each",
")",
"for",
"each",
"in",
"[",
"i",
"for",
"i",
",",
"v",
"in",
"self",
".",
"_incomingSockets",
".",
"items",
"(",
")",
"if",
"rmtaddr",
".",
"addressDetails",
".",
"isSameSystem",
"(",
"v",
".",
"fromAddress",
"if",
"v",
".",
"fromAddress",
".",
"addressDetails",
"else",
"v",
".",
"socket",
")",
"]",
":",
"self",
".",
"_cancel_fd_ops",
"(",
"each",
")"
] |
https://github.com/thespianpy/Thespian/blob/f35e5a74ae99ee3401eb9fc7757620a1cf043ee2/thespian/system/transport/TCPTransport.py#L563-L597
|
||
oracle/graalpython
|
577e02da9755d916056184ec441c26e00b70145c
|
graalpython/lib-python/3/tkinter/ttk.py
|
python
|
Panedwindow.pane
|
(self, pane, option=None, **kw)
|
return _val_or_dict(self.tk, kw, self._w, "pane", pane)
|
Query or modify the options of the specified pane.
pane is either an integer index or the name of a managed subwindow.
If kw is not given, returns a dict of the pane option values. If
option is specified then the value for that option is returned.
Otherwise, sets the options to the corresponding values.
|
Query or modify the options of the specified pane.
|
[
"Query",
"or",
"modify",
"the",
"options",
"of",
"the",
"specified",
"pane",
"."
] |
def pane(self, pane, option=None, **kw):
"""Query or modify the options of the specified pane.
pane is either an integer index or the name of a managed subwindow.
If kw is not given, returns a dict of the pane option values. If
option is specified then the value for that option is returned.
Otherwise, sets the options to the corresponding values."""
if option is not None:
kw[option] = None
return _val_or_dict(self.tk, kw, self._w, "pane", pane)
|
[
"def",
"pane",
"(",
"self",
",",
"pane",
",",
"option",
"=",
"None",
",",
"*",
"*",
"kw",
")",
":",
"if",
"option",
"is",
"not",
"None",
":",
"kw",
"[",
"option",
"]",
"=",
"None",
"return",
"_val_or_dict",
"(",
"self",
".",
"tk",
",",
"kw",
",",
"self",
".",
"_w",
",",
"\"pane\"",
",",
"pane",
")"
] |
https://github.com/oracle/graalpython/blob/577e02da9755d916056184ec441c26e00b70145c/graalpython/lib-python/3/tkinter/ttk.py#L969-L978
|
|
dimagi/commcare-hq
|
d67ff1d3b4c51fa050c19e60c3253a79d3452a39
|
corehq/apps/case_search/filter_dsl.py
|
python
|
build_filter_from_xpath
|
(domain, xpath, fuzzy=False)
|
[] |
def build_filter_from_xpath(domain, xpath, fuzzy=False):
error_message = _(
"We didn't understand what you were trying to do with {}. "
"Please try reformatting your query. "
"The operators we accept are: {}"
)
try:
return build_filter_from_ast(domain, parse_xpath(xpath), fuzzy=fuzzy)
except TypeError as e:
text_error = re.search(r"Unknown text '(.+)'", str(e))
if text_error:
# This often happens if there is a bad operator (e.g. a ~ b)
bad_part = text_error.groups()[0]
raise CaseFilterError(error_message.format(bad_part, ", ".join(ALL_OPERATORS)), bad_part)
raise CaseFilterError(_("Malformed search query"), None)
except RuntimeError as e:
# eulxml passes us string errors from YACC
lex_token_error = re.search(r"LexToken\((\w+),\w?'(.+)'", str(e))
if lex_token_error:
bad_part = lex_token_error.groups()[1]
raise CaseFilterError(error_message.format(bad_part, ", ".join(ALL_OPERATORS)), bad_part)
raise CaseFilterError(_("Malformed search query"), None)
|
[
"def",
"build_filter_from_xpath",
"(",
"domain",
",",
"xpath",
",",
"fuzzy",
"=",
"False",
")",
":",
"error_message",
"=",
"_",
"(",
"\"We didn't understand what you were trying to do with {}. \"",
"\"Please try reformatting your query. \"",
"\"The operators we accept are: {}\"",
")",
"try",
":",
"return",
"build_filter_from_ast",
"(",
"domain",
",",
"parse_xpath",
"(",
"xpath",
")",
",",
"fuzzy",
"=",
"fuzzy",
")",
"except",
"TypeError",
"as",
"e",
":",
"text_error",
"=",
"re",
".",
"search",
"(",
"r\"Unknown text '(.+)'\"",
",",
"str",
"(",
"e",
")",
")",
"if",
"text_error",
":",
"# This often happens if there is a bad operator (e.g. a ~ b)",
"bad_part",
"=",
"text_error",
".",
"groups",
"(",
")",
"[",
"0",
"]",
"raise",
"CaseFilterError",
"(",
"error_message",
".",
"format",
"(",
"bad_part",
",",
"\", \"",
".",
"join",
"(",
"ALL_OPERATORS",
")",
")",
",",
"bad_part",
")",
"raise",
"CaseFilterError",
"(",
"_",
"(",
"\"Malformed search query\"",
")",
",",
"None",
")",
"except",
"RuntimeError",
"as",
"e",
":",
"# eulxml passes us string errors from YACC",
"lex_token_error",
"=",
"re",
".",
"search",
"(",
"r\"LexToken\\((\\w+),\\w?'(.+)'\"",
",",
"str",
"(",
"e",
")",
")",
"if",
"lex_token_error",
":",
"bad_part",
"=",
"lex_token_error",
".",
"groups",
"(",
")",
"[",
"1",
"]",
"raise",
"CaseFilterError",
"(",
"error_message",
".",
"format",
"(",
"bad_part",
",",
"\", \"",
".",
"join",
"(",
"ALL_OPERATORS",
")",
")",
",",
"bad_part",
")",
"raise",
"CaseFilterError",
"(",
"_",
"(",
"\"Malformed search query\"",
")",
",",
"None",
")"
] |
https://github.com/dimagi/commcare-hq/blob/d67ff1d3b4c51fa050c19e60c3253a79d3452a39/corehq/apps/case_search/filter_dsl.py#L248-L269
|
||||
elastic/eland
|
72856e2c3f827a0b71d140323009a7a9a3df6e1d
|
eland/dataframe.py
|
python
|
DataFrame.mode
|
(
self,
numeric_only: bool = False,
dropna: bool = True,
es_size: int = 10,
)
|
return self._query_compiler.mode(
numeric_only=numeric_only, dropna=True, is_dataframe=True, es_size=es_size
)
|
Calculate mode of a DataFrame
Parameters
----------
numeric_only: {True, False} Default is False
Which datatype to be returned
- True: Returns all numeric or timestamp columns
- False: Returns all columns
dropna: {True, False} Default is True
- True: Don’t consider counts of NaN/NaT.
- False: Consider counts of NaN/NaT.
es_size: default 10
number of rows to be returned if mode has multiple values
See Also
--------
:pandas_api_docs:`pandas.DataFrame.mode`
Examples
--------
>>> ed_ecommerce = ed.DataFrame('http://localhost:9200', 'ecommerce')
>>> ed_df = ed_ecommerce.filter(["total_quantity", "geoip.city_name", "customer_birth_date", "day_of_week", "taxful_total_price"])
>>> ed_df.mode(numeric_only=False)
total_quantity geoip.city_name customer_birth_date day_of_week taxful_total_price
0 2 New York NaT Thursday 53.98
>>> ed_df.mode(numeric_only=True)
total_quantity taxful_total_price
0 2 53.98
>>> ed_df = ed_ecommerce.filter(["products.tax_amount","order_date"])
>>> ed_df.mode()
products.tax_amount order_date
0 0.0 2016-12-02 20:36:58
1 NaN 2016-12-04 23:44:10
2 NaN 2016-12-08 06:21:36
3 NaN 2016-12-08 09:38:53
4 NaN 2016-12-12 11:38:24
5 NaN 2016-12-12 19:46:34
6 NaN 2016-12-14 18:00:00
7 NaN 2016-12-15 11:38:24
8 NaN 2016-12-22 19:39:22
9 NaN 2016-12-24 06:21:36
>>> ed_df.mode(es_size = 3)
products.tax_amount order_date
0 0.0 2016-12-02 20:36:58
1 NaN 2016-12-04 23:44:10
2 NaN 2016-12-08 06:21:36
|
Calculate mode of a DataFrame
|
[
"Calculate",
"mode",
"of",
"a",
"DataFrame"
] |
def mode(
self,
numeric_only: bool = False,
dropna: bool = True,
es_size: int = 10,
) -> pd.DataFrame:
"""
Calculate mode of a DataFrame
Parameters
----------
numeric_only: {True, False} Default is False
Which datatype to be returned
- True: Returns all numeric or timestamp columns
- False: Returns all columns
dropna: {True, False} Default is True
- True: Don’t consider counts of NaN/NaT.
- False: Consider counts of NaN/NaT.
es_size: default 10
number of rows to be returned if mode has multiple values
See Also
--------
:pandas_api_docs:`pandas.DataFrame.mode`
Examples
--------
>>> ed_ecommerce = ed.DataFrame('http://localhost:9200', 'ecommerce')
>>> ed_df = ed_ecommerce.filter(["total_quantity", "geoip.city_name", "customer_birth_date", "day_of_week", "taxful_total_price"])
>>> ed_df.mode(numeric_only=False)
total_quantity geoip.city_name customer_birth_date day_of_week taxful_total_price
0 2 New York NaT Thursday 53.98
>>> ed_df.mode(numeric_only=True)
total_quantity taxful_total_price
0 2 53.98
>>> ed_df = ed_ecommerce.filter(["products.tax_amount","order_date"])
>>> ed_df.mode()
products.tax_amount order_date
0 0.0 2016-12-02 20:36:58
1 NaN 2016-12-04 23:44:10
2 NaN 2016-12-08 06:21:36
3 NaN 2016-12-08 09:38:53
4 NaN 2016-12-12 11:38:24
5 NaN 2016-12-12 19:46:34
6 NaN 2016-12-14 18:00:00
7 NaN 2016-12-15 11:38:24
8 NaN 2016-12-22 19:39:22
9 NaN 2016-12-24 06:21:36
>>> ed_df.mode(es_size = 3)
products.tax_amount order_date
0 0.0 2016-12-02 20:36:58
1 NaN 2016-12-04 23:44:10
2 NaN 2016-12-08 06:21:36
"""
# TODO dropna=False
return self._query_compiler.mode(
numeric_only=numeric_only, dropna=True, is_dataframe=True, es_size=es_size
)
|
[
"def",
"mode",
"(",
"self",
",",
"numeric_only",
":",
"bool",
"=",
"False",
",",
"dropna",
":",
"bool",
"=",
"True",
",",
"es_size",
":",
"int",
"=",
"10",
",",
")",
"->",
"pd",
".",
"DataFrame",
":",
"# TODO dropna=False",
"return",
"self",
".",
"_query_compiler",
".",
"mode",
"(",
"numeric_only",
"=",
"numeric_only",
",",
"dropna",
"=",
"True",
",",
"is_dataframe",
"=",
"True",
",",
"es_size",
"=",
"es_size",
")"
] |
https://github.com/elastic/eland/blob/72856e2c3f827a0b71d140323009a7a9a3df6e1d/eland/dataframe.py#L1760-L1820
|
|
trustedsec/hate_crack
|
b1d7e39cd1dd963c201a2c0dfdab997ab7d4d69b
|
PACK/enchant/checker/wxSpellCheckerDialog.py
|
python
|
wxSpellCheckerDialog.OnIgnoreAll
|
(self, evt)
|
Callback for the "ignore all" button.
|
Callback for the "ignore all" button.
|
[
"Callback",
"for",
"the",
"ignore",
"all",
"button",
"."
] |
def OnIgnoreAll(self, evt):
"""Callback for the "ignore all" button."""
self._checker.ignore_always()
self.Advance()
|
[
"def",
"OnIgnoreAll",
"(",
"self",
",",
"evt",
")",
":",
"self",
".",
"_checker",
".",
"ignore_always",
"(",
")",
"self",
".",
"Advance",
"(",
")"
] |
https://github.com/trustedsec/hate_crack/blob/b1d7e39cd1dd963c201a2c0dfdab997ab7d4d69b/PACK/enchant/checker/wxSpellCheckerDialog.py#L211-L214
|
||
reel2bits/reel2bits
|
007ab4f7d1c77d426e1b1b8b51ea57eac6501e13
|
api/commands/users.py
|
python
|
demote_mod
|
(username)
|
Remove moderator role from user.
|
Remove moderator role from user.
|
[
"Remove",
"moderator",
"role",
"from",
"user",
"."
] |
def demote_mod(username):
"""
Remove moderator role from user.
"""
u = User.query.filter(User.name == username, User.local.is_(True)).first()
if not u:
print(f"Cannot find local user with username '{username}'")
exit(1)
mod_role = Role.query.filter(Role.name == "moderator").first()
if not mod_role:
print("Cannot find a role named 'moderator'")
exit(1)
u.roles.remove(mod_role)
db.session.commit()
roles_str = ", ".join([r.name for r in u.roles])
print(f"User '{username}' now have roles: {roles_str}")
|
[
"def",
"demote_mod",
"(",
"username",
")",
":",
"u",
"=",
"User",
".",
"query",
".",
"filter",
"(",
"User",
".",
"name",
"==",
"username",
",",
"User",
".",
"local",
".",
"is_",
"(",
"True",
")",
")",
".",
"first",
"(",
")",
"if",
"not",
"u",
":",
"print",
"(",
"f\"Cannot find local user with username '{username}'\"",
")",
"exit",
"(",
"1",
")",
"mod_role",
"=",
"Role",
".",
"query",
".",
"filter",
"(",
"Role",
".",
"name",
"==",
"\"moderator\"",
")",
".",
"first",
"(",
")",
"if",
"not",
"mod_role",
":",
"print",
"(",
"\"Cannot find a role named 'moderator'\"",
")",
"exit",
"(",
"1",
")",
"u",
".",
"roles",
".",
"remove",
"(",
"mod_role",
")",
"db",
".",
"session",
".",
"commit",
"(",
")",
"roles_str",
"=",
"\", \"",
".",
"join",
"(",
"[",
"r",
".",
"name",
"for",
"r",
"in",
"u",
".",
"roles",
"]",
")",
"print",
"(",
"f\"User '{username}' now have roles: {roles_str}\"",
")"
] |
https://github.com/reel2bits/reel2bits/blob/007ab4f7d1c77d426e1b1b8b51ea57eac6501e13/api/commands/users.py#L124-L141
|
||
pytrainer/pytrainer
|
66f3e2b30b48c66e03111248faffc43b8e31c583
|
pytrainer/core/activity.py
|
python
|
Activity.get_value_f
|
(self, param, format=None)
|
return result
|
Function to return a value formated as a string
- takes into account US/metric
- also appends units if required
|
Function to return a value formated as a string
- takes into account US/metric
- also appends units if required
|
[
"Function",
"to",
"return",
"a",
"value",
"formated",
"as",
"a",
"string",
"-",
"takes",
"into",
"account",
"US",
"/",
"metric",
"-",
"also",
"appends",
"units",
"if",
"required"
] |
def get_value_f(self, param, format=None):
''' Function to return a value formated as a string
- takes into account US/metric
- also appends units if required
'''
value = self.get_value(param)
if not value:
#Return blank string if value is None or 0
return ""
if format is not None:
result = format % value
else:
result = str(value)
return result
|
[
"def",
"get_value_f",
"(",
"self",
",",
"param",
",",
"format",
"=",
"None",
")",
":",
"value",
"=",
"self",
".",
"get_value",
"(",
"param",
")",
"if",
"not",
"value",
":",
"#Return blank string if value is None or 0",
"return",
"\"\"",
"if",
"format",
"is",
"not",
"None",
":",
"result",
"=",
"format",
"%",
"value",
"else",
":",
"result",
"=",
"str",
"(",
"value",
")",
"return",
"result"
] |
https://github.com/pytrainer/pytrainer/blob/66f3e2b30b48c66e03111248faffc43b8e31c583/pytrainer/core/activity.py#L651-L664
|
|
dmlc/dgl
|
8d14a739bc9e446d6c92ef83eafe5782398118de
|
examples/pytorch/gxn/layers.py
|
python
|
GraphPool.forward
|
(self, graph:DGLGraph, feat:Tensor,
select_idx:Tensor, non_select_idx:Optional[Tensor]=None,
scores:Optional[Tensor]=None, pool_graph=False)
|
Description
-----------
Perform graph pooling.
Parameters
----------
graph : dgl.DGLGraph
The input graph
feat : torch.Tensor
The input node feature
select_idx : torch.Tensor
The index in fine graph of node from
coarse graph, this is obtained from
previous graph pooling layers.
non_select_idx : torch.Tensor, optional
The index that not included in output graph.
default: :obj:`None`
scores : torch.Tensor, optional
Scores for nodes used for pooling and scaling.
default: :obj:`None`
pool_graph : bool, optional
Whether perform graph pooling on graph topology.
default: :obj:`False`
|
Description
-----------
Perform graph pooling.
|
[
"Description",
"-----------",
"Perform",
"graph",
"pooling",
"."
] |
def forward(self, graph:DGLGraph, feat:Tensor,
select_idx:Tensor, non_select_idx:Optional[Tensor]=None,
scores:Optional[Tensor]=None, pool_graph=False):
"""
Description
-----------
Perform graph pooling.
Parameters
----------
graph : dgl.DGLGraph
The input graph
feat : torch.Tensor
The input node feature
select_idx : torch.Tensor
The index in fine graph of node from
coarse graph, this is obtained from
previous graph pooling layers.
non_select_idx : torch.Tensor, optional
The index that not included in output graph.
default: :obj:`None`
scores : torch.Tensor, optional
Scores for nodes used for pooling and scaling.
default: :obj:`None`
pool_graph : bool, optional
Whether perform graph pooling on graph topology.
default: :obj:`False`
"""
if self.use_gcn:
feat = self.down_sample_gcn(graph, feat)
feat = feat[select_idx]
if scores is not None:
feat = feat * scores.unsqueeze(-1)
if pool_graph:
num_node_batch = graph.batch_num_nodes()
graph = dgl.node_subgraph(graph, select_idx)
graph.set_batch_num_nodes(num_node_batch)
return feat, graph
else:
return feat
|
[
"def",
"forward",
"(",
"self",
",",
"graph",
":",
"DGLGraph",
",",
"feat",
":",
"Tensor",
",",
"select_idx",
":",
"Tensor",
",",
"non_select_idx",
":",
"Optional",
"[",
"Tensor",
"]",
"=",
"None",
",",
"scores",
":",
"Optional",
"[",
"Tensor",
"]",
"=",
"None",
",",
"pool_graph",
"=",
"False",
")",
":",
"if",
"self",
".",
"use_gcn",
":",
"feat",
"=",
"self",
".",
"down_sample_gcn",
"(",
"graph",
",",
"feat",
")",
"feat",
"=",
"feat",
"[",
"select_idx",
"]",
"if",
"scores",
"is",
"not",
"None",
":",
"feat",
"=",
"feat",
"*",
"scores",
".",
"unsqueeze",
"(",
"-",
"1",
")",
"if",
"pool_graph",
":",
"num_node_batch",
"=",
"graph",
".",
"batch_num_nodes",
"(",
")",
"graph",
"=",
"dgl",
".",
"node_subgraph",
"(",
"graph",
",",
"select_idx",
")",
"graph",
".",
"set_batch_num_nodes",
"(",
"num_node_batch",
")",
"return",
"feat",
",",
"graph",
"else",
":",
"return",
"feat"
] |
https://github.com/dmlc/dgl/blob/8d14a739bc9e446d6c92ef83eafe5782398118de/examples/pytorch/gxn/layers.py#L212-L253
|
||
Tencent/PocketFlow
|
53b82cba5a34834400619e7c335a23995d45c2a6
|
examples/convnet_at_fmnist.py
|
python
|
ModelHelper.build_dataset_train
|
(self, enbl_trn_val_split=False)
|
return self.dataset_train.build(enbl_trn_val_split)
|
Build the data subset for training, usually with data augmentation.
|
Build the data subset for training, usually with data augmentation.
|
[
"Build",
"the",
"data",
"subset",
"for",
"training",
"usually",
"with",
"data",
"augmentation",
"."
] |
def build_dataset_train(self, enbl_trn_val_split=False):
"""Build the data subset for training, usually with data augmentation."""
return self.dataset_train.build(enbl_trn_val_split)
|
[
"def",
"build_dataset_train",
"(",
"self",
",",
"enbl_trn_val_split",
"=",
"False",
")",
":",
"return",
"self",
".",
"dataset_train",
".",
"build",
"(",
"enbl_trn_val_split",
")"
] |
https://github.com/Tencent/PocketFlow/blob/53b82cba5a34834400619e7c335a23995d45c2a6/examples/convnet_at_fmnist.py#L82-L85
|
|
home-assistant/core
|
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
|
homeassistant/components/rachio/switch.py
|
python
|
RachioSwitch.is_on
|
(self)
|
return self._state
|
Return whether the switch is currently on.
|
Return whether the switch is currently on.
|
[
"Return",
"whether",
"the",
"switch",
"is",
"currently",
"on",
"."
] |
def is_on(self) -> bool:
"""Return whether the switch is currently on."""
return self._state
|
[
"def",
"is_on",
"(",
"self",
")",
"->",
"bool",
":",
"return",
"self",
".",
"_state"
] |
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/rachio/switch.py#L191-L193
|
|
quodlibet/quodlibet
|
e3099c89f7aa6524380795d325cc14630031886c
|
quodlibet/qltk/songlistcolumns.py
|
python
|
SongListColumn._needs_update
|
(self, value)
|
return True
|
Call to check if the last passed value was the same.
This is used to reduce formatting if the input is the same
either because of redraws or all columns have the same value
|
Call to check if the last passed value was the same.
|
[
"Call",
"to",
"check",
"if",
"the",
"last",
"passed",
"value",
"was",
"the",
"same",
"."
] |
def _needs_update(self, value):
"""Call to check if the last passed value was the same.
This is used to reduce formatting if the input is the same
either because of redraws or all columns have the same value
"""
if self._last_rendered == value:
return False
self._last_rendered = value
return True
|
[
"def",
"_needs_update",
"(",
"self",
",",
"value",
")",
":",
"if",
"self",
".",
"_last_rendered",
"==",
"value",
":",
"return",
"False",
"self",
".",
"_last_rendered",
"=",
"value",
"return",
"True"
] |
https://github.com/quodlibet/quodlibet/blob/e3099c89f7aa6524380795d325cc14630031886c/quodlibet/qltk/songlistcolumns.py#L125-L135
|
|
IJDykeman/wangTiles
|
7c1ee2095ebdf7f72bce07d94c6484915d5cae8b
|
experimental_code/tiles_3d/venv_mac_py3/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.py
|
python
|
LegacyMetadata.read
|
(self, filepath)
|
Read the metadata values from a file path.
|
Read the metadata values from a file path.
|
[
"Read",
"the",
"metadata",
"values",
"from",
"a",
"file",
"path",
"."
] |
def read(self, filepath):
"""Read the metadata values from a file path."""
fp = codecs.open(filepath, 'r', encoding='utf-8')
try:
self.read_file(fp)
finally:
fp.close()
|
[
"def",
"read",
"(",
"self",
",",
"filepath",
")",
":",
"fp",
"=",
"codecs",
".",
"open",
"(",
"filepath",
",",
"'r'",
",",
"encoding",
"=",
"'utf-8'",
")",
"try",
":",
"self",
".",
"read_file",
"(",
"fp",
")",
"finally",
":",
"fp",
".",
"close",
"(",
")"
] |
https://github.com/IJDykeman/wangTiles/blob/7c1ee2095ebdf7f72bce07d94c6484915d5cae8b/experimental_code/tiles_3d/venv_mac_py3/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.py#L352-L358
|
||
munki/munki
|
4b778f0e5a73ed3df9eb62d93c5227efb29eebe3
|
code/client/munkilib/installer/core.py
|
python
|
skipped_items_that_require_this
|
(item, skipped_items)
|
return matched_skipped_items
|
Looks for items in the skipped_items that require or are update_for
the current item. Returns a list of matches.
|
Looks for items in the skipped_items that require or are update_for
the current item. Returns a list of matches.
|
[
"Looks",
"for",
"items",
"in",
"the",
"skipped_items",
"that",
"require",
"or",
"are",
"update_for",
"the",
"current",
"item",
".",
"Returns",
"a",
"list",
"of",
"matches",
"."
] |
def skipped_items_that_require_this(item, skipped_items):
'''Looks for items in the skipped_items that require or are update_for
the current item. Returns a list of matches.'''
# shortcut -- if we have no skipped items, just return an empty list
# also reduces log noise in the common case
if not skipped_items:
return []
display.display_debug1(
'Checking for skipped items that require %s' % item['name'])
matched_skipped_items = []
for skipped_item in skipped_items:
# get list of prerequisites for this skipped_item
prerequisites = skipped_item.get('requires', [])
prerequisites.extend(skipped_item.get('update_for', []))
display.display_debug1(
'%s has these prerequisites: %s'
% (skipped_item['name'], ', '.join(prerequisites)))
for prereq in prerequisites:
(prereq_name, dummy_version) = catalogs.split_name_and_version(
prereq)
if prereq_name == item['name']:
matched_skipped_items.append(skipped_item['name'])
return matched_skipped_items
|
[
"def",
"skipped_items_that_require_this",
"(",
"item",
",",
"skipped_items",
")",
":",
"# shortcut -- if we have no skipped items, just return an empty list",
"# also reduces log noise in the common case",
"if",
"not",
"skipped_items",
":",
"return",
"[",
"]",
"display",
".",
"display_debug1",
"(",
"'Checking for skipped items that require %s'",
"%",
"item",
"[",
"'name'",
"]",
")",
"matched_skipped_items",
"=",
"[",
"]",
"for",
"skipped_item",
"in",
"skipped_items",
":",
"# get list of prerequisites for this skipped_item",
"prerequisites",
"=",
"skipped_item",
".",
"get",
"(",
"'requires'",
",",
"[",
"]",
")",
"prerequisites",
".",
"extend",
"(",
"skipped_item",
".",
"get",
"(",
"'update_for'",
",",
"[",
"]",
")",
")",
"display",
".",
"display_debug1",
"(",
"'%s has these prerequisites: %s'",
"%",
"(",
"skipped_item",
"[",
"'name'",
"]",
",",
"', '",
".",
"join",
"(",
"prerequisites",
")",
")",
")",
"for",
"prereq",
"in",
"prerequisites",
":",
"(",
"prereq_name",
",",
"dummy_version",
")",
"=",
"catalogs",
".",
"split_name_and_version",
"(",
"prereq",
")",
"if",
"prereq_name",
"==",
"item",
"[",
"'name'",
"]",
":",
"matched_skipped_items",
".",
"append",
"(",
"skipped_item",
"[",
"'name'",
"]",
")",
"return",
"matched_skipped_items"
] |
https://github.com/munki/munki/blob/4b778f0e5a73ed3df9eb62d93c5227efb29eebe3/code/client/munkilib/installer/core.py#L442-L467
|
|
lovelylain/pyctp
|
fd304de4b50c4ddc31a4190b1caaeb5dec66bc5d
|
example/ctp/futures/ApiStruct.py
|
python
|
QrySuperUser.__init__
|
(self, UserID='')
|
[] |
def __init__(self, UserID=''):
self.UserID = ''
|
[
"def",
"__init__",
"(",
"self",
",",
"UserID",
"=",
"''",
")",
":",
"self",
".",
"UserID",
"=",
"''"
] |
https://github.com/lovelylain/pyctp/blob/fd304de4b50c4ddc31a4190b1caaeb5dec66bc5d/example/ctp/futures/ApiStruct.py#L3132-L3133
|
||||
fake-name/ReadableWebProxy
|
ed5c7abe38706acc2684a1e6cd80242a03c5f010
|
WebMirror/management/rss_parser_funcs/feed_parse_extractElliPhantomhive.py
|
python
|
extractElliPhantomhive
|
(item)
|
return False
|
Parser for 'Elli Phantomhive♥'
|
Parser for 'Elli Phantomhive♥'
|
[
"Parser",
"for",
"Elli",
"Phantomhive♥"
] |
def extractElliPhantomhive(item):
"""
Parser for 'Elli Phantomhive♥'
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower():
return None
if 'WATTT' in item['tags']:
return buildReleaseMessageWithType(item, 'WATTT', vol, chp, frag=frag, postfix=postfix)
return False
|
[
"def",
"extractElliPhantomhive",
"(",
"item",
")",
":",
"vol",
",",
"chp",
",",
"frag",
",",
"postfix",
"=",
"extractVolChapterFragmentPostfix",
"(",
"item",
"[",
"'title'",
"]",
")",
"if",
"not",
"(",
"chp",
"or",
"vol",
")",
"or",
"'preview'",
"in",
"item",
"[",
"'title'",
"]",
".",
"lower",
"(",
")",
":",
"return",
"None",
"if",
"'WATTT'",
"in",
"item",
"[",
"'tags'",
"]",
":",
"return",
"buildReleaseMessageWithType",
"(",
"item",
",",
"'WATTT'",
",",
"vol",
",",
"chp",
",",
"frag",
"=",
"frag",
",",
"postfix",
"=",
"postfix",
")",
"return",
"False"
] |
https://github.com/fake-name/ReadableWebProxy/blob/ed5c7abe38706acc2684a1e6cd80242a03c5f010/WebMirror/management/rss_parser_funcs/feed_parse_extractElliPhantomhive.py#L1-L10
|
|
yzhao062/pyod
|
13b0cd5f50d5ea5c5321da88c46232ae6f24dff7
|
pyod/utils/data.py
|
python
|
check_consistent_shape
|
(X_train, y_train, X_test, y_test, y_train_pred,
y_test_pred)
|
return X_train, y_train, X_test, y_test, y_train_pred, y_test_pred
|
Internal shape to check input data shapes are consistent.
Parameters
----------
X_train : numpy array of shape (n_samples, n_features)
The training samples.
y_train : list or array of shape (n_samples,)
The ground truth of training samples.
X_test : numpy array of shape (n_samples, n_features)
The test samples.
y_test : list or array of shape (n_samples,)
The ground truth of test samples.
y_train_pred : numpy array of shape (n_samples, n_features)
The predicted binary labels of the training samples.
y_test_pred : numpy array of shape (n_samples, n_features)
The predicted binary labels of the test samples.
Returns
-------
X_train : numpy array of shape (n_samples, n_features)
The training samples.
y_train : list or array of shape (n_samples,)
The ground truth of training samples.
X_test : numpy array of shape (n_samples, n_features)
The test samples.
y_test : list or array of shape (n_samples,)
The ground truth of test samples.
y_train_pred : numpy array of shape (n_samples, n_features)
The predicted binary labels of the training samples.
y_test_pred : numpy array of shape (n_samples, n_features)
The predicted binary labels of the test samples.
|
Internal shape to check input data shapes are consistent.
|
[
"Internal",
"shape",
"to",
"check",
"input",
"data",
"shapes",
"are",
"consistent",
"."
] |
def check_consistent_shape(X_train, y_train, X_test, y_test, y_train_pred,
y_test_pred):
"""Internal shape to check input data shapes are consistent.
Parameters
----------
X_train : numpy array of shape (n_samples, n_features)
The training samples.
y_train : list or array of shape (n_samples,)
The ground truth of training samples.
X_test : numpy array of shape (n_samples, n_features)
The test samples.
y_test : list or array of shape (n_samples,)
The ground truth of test samples.
y_train_pred : numpy array of shape (n_samples, n_features)
The predicted binary labels of the training samples.
y_test_pred : numpy array of shape (n_samples, n_features)
The predicted binary labels of the test samples.
Returns
-------
X_train : numpy array of shape (n_samples, n_features)
The training samples.
y_train : list or array of shape (n_samples,)
The ground truth of training samples.
X_test : numpy array of shape (n_samples, n_features)
The test samples.
y_test : list or array of shape (n_samples,)
The ground truth of test samples.
y_train_pred : numpy array of shape (n_samples, n_features)
The predicted binary labels of the training samples.
y_test_pred : numpy array of shape (n_samples, n_features)
The predicted binary labels of the test samples.
"""
# check input data shapes are consistent
X_train, y_train = check_X_y(X_train, y_train)
X_test, y_test = check_X_y(X_test, y_test)
y_test_pred = column_or_1d(y_test_pred)
y_train_pred = column_or_1d(y_train_pred)
check_consistent_length(y_train, y_train_pred)
check_consistent_length(y_test, y_test_pred)
if X_train.shape[1] != X_test.shape[1]:
raise ValueError("X_train {0} and X_test {1} have different number "
"of features.".format(X_train.shape, X_test.shape))
return X_train, y_train, X_test, y_test, y_train_pred, y_test_pred
|
[
"def",
"check_consistent_shape",
"(",
"X_train",
",",
"y_train",
",",
"X_test",
",",
"y_test",
",",
"y_train_pred",
",",
"y_test_pred",
")",
":",
"# check input data shapes are consistent",
"X_train",
",",
"y_train",
"=",
"check_X_y",
"(",
"X_train",
",",
"y_train",
")",
"X_test",
",",
"y_test",
"=",
"check_X_y",
"(",
"X_test",
",",
"y_test",
")",
"y_test_pred",
"=",
"column_or_1d",
"(",
"y_test_pred",
")",
"y_train_pred",
"=",
"column_or_1d",
"(",
"y_train_pred",
")",
"check_consistent_length",
"(",
"y_train",
",",
"y_train_pred",
")",
"check_consistent_length",
"(",
"y_test",
",",
"y_test_pred",
")",
"if",
"X_train",
".",
"shape",
"[",
"1",
"]",
"!=",
"X_test",
".",
"shape",
"[",
"1",
"]",
":",
"raise",
"ValueError",
"(",
"\"X_train {0} and X_test {1} have different number \"",
"\"of features.\"",
".",
"format",
"(",
"X_train",
".",
"shape",
",",
"X_test",
".",
"shape",
")",
")",
"return",
"X_train",
",",
"y_train",
",",
"X_test",
",",
"y_test",
",",
"y_train_pred",
",",
"y_test_pred"
] |
https://github.com/yzhao062/pyod/blob/13b0cd5f50d5ea5c5321da88c46232ae6f24dff7/pyod/utils/data.py#L198-L257
|
|
microsoft/debugpy
|
be8dd607f6837244e0b565345e497aff7a0c08bf
|
src/debugpy/_vendored/pydevd/pydevd_attach_to_process/winappdbg/module.py
|
python
|
_ModuleContainer.is_system_defined_breakpoint
|
(self, address)
|
return False
|
@type address: int
@param address: Memory address.
@rtype: bool
@return: C{True} if the given address points to a system defined
breakpoint. System defined breakpoints are hardcoded into
system libraries.
|
@type address: int
@param address: Memory address.
|
[
"@type",
"address",
":",
"int",
"@param",
"address",
":",
"Memory",
"address",
"."
] |
def is_system_defined_breakpoint(self, address):
"""
@type address: int
@param address: Memory address.
@rtype: bool
@return: C{True} if the given address points to a system defined
breakpoint. System defined breakpoints are hardcoded into
system libraries.
"""
if address:
module = self.get_module_at_address(address)
if module:
return module.match_name("ntdll") or \
module.match_name("kernel32")
return False
|
[
"def",
"is_system_defined_breakpoint",
"(",
"self",
",",
"address",
")",
":",
"if",
"address",
":",
"module",
"=",
"self",
".",
"get_module_at_address",
"(",
"address",
")",
"if",
"module",
":",
"return",
"module",
".",
"match_name",
"(",
"\"ntdll\"",
")",
"or",
"module",
".",
"match_name",
"(",
"\"kernel32\"",
")",
"return",
"False"
] |
https://github.com/microsoft/debugpy/blob/be8dd607f6837244e0b565345e497aff7a0c08bf/src/debugpy/_vendored/pydevd/pydevd_attach_to_process/winappdbg/module.py#L1702-L1717
|
|
erget/StereoVision
|
7e2aff8e48bdae24becc22e099460acb8476572e
|
stereovision/blockmatchers.py
|
python
|
StereoSGBM.uniquenessRatio
|
(self)
|
return self._uniqueness
|
Return private ``_uniqueness`` value.
|
Return private ``_uniqueness`` value.
|
[
"Return",
"private",
"_uniqueness",
"value",
"."
] |
def uniquenessRatio(self):
"""Return private ``_uniqueness`` value."""
return self._uniqueness
|
[
"def",
"uniquenessRatio",
"(",
"self",
")",
":",
"return",
"self",
".",
"_uniqueness"
] |
https://github.com/erget/StereoVision/blob/7e2aff8e48bdae24becc22e099460acb8476572e/stereovision/blockmatchers.py#L265-L267
|
|
mljar/mljar-supervised
|
e003daeaa14894b533847cf51cbaf82c87d0c897
|
supervised/base_automl.py
|
python
|
BaseAutoML._validate_results_path
|
(self)
|
Validates path parameter
|
Validates path parameter
|
[
"Validates",
"path",
"parameter"
] |
def _validate_results_path(self):
"""Validates path parameter"""
if self.results_path is None or isinstance(self.results_path, str):
return
raise ValueError(
f"Expected 'results_path' to be of type string, got '{type(self.results_path)}''"
)
|
[
"def",
"_validate_results_path",
"(",
"self",
")",
":",
"if",
"self",
".",
"results_path",
"is",
"None",
"or",
"isinstance",
"(",
"self",
".",
"results_path",
",",
"str",
")",
":",
"return",
"raise",
"ValueError",
"(",
"f\"Expected 'results_path' to be of type string, got '{type(self.results_path)}''\"",
")"
] |
https://github.com/mljar/mljar-supervised/blob/e003daeaa14894b533847cf51cbaf82c87d0c897/supervised/base_automl.py#L1801-L1808
|
||
dbt-labs/dbt-core
|
e943b9fc842535e958ef4fd0b8703adc91556bc6
|
core/dbt/logger.py
|
python
|
TimingProcessor.process
|
(self, record)
|
[] |
def process(self, record):
if self.timing_info is not None:
record.extra['timing_info'] = self.timing_info.to_dict(
omit_none=True)
|
[
"def",
"process",
"(",
"self",
",",
"record",
")",
":",
"if",
"self",
".",
"timing_info",
"is",
"not",
"None",
":",
"record",
".",
"extra",
"[",
"'timing_info'",
"]",
"=",
"self",
".",
"timing_info",
".",
"to_dict",
"(",
"omit_none",
"=",
"True",
")"
] |
https://github.com/dbt-labs/dbt-core/blob/e943b9fc842535e958ef4fd0b8703adc91556bc6/core/dbt/logger.py#L228-L231
|
||||
devitocodes/devito
|
6abd441e3f5f091775ad332be6b95e017b8cbd16
|
devito/types/sparse.py
|
python
|
MatrixSparseTimeFunction._rank_to_points
|
(self)
|
return [np.concatenate((
empty, *[gp_map[bi] for bi in global_rank_to_bins.get(rank, [])]))
for rank in range(distributor.comm.Get_size())]
|
For each rank in self.grid.distributor, return
a numpy array of int32s for the positions within
this rank's self.gridpoints/self.interpolation_coefficients (i.e.
the locdim) which must be injected into that rank.
Any given location may require injection into several
ranks, based on the radius of the injection stencil
and its proximity to a rank boundary.
It is assumed, for now, that any given location may be
completely sampled from within one rank - so when
gathering the data, any point sampled from more than
one rank may have duplicates discarded. This implies
that the radius of the sampling is less than
the halo size of the Functions being sampled from.
It also requires that the halos be exchanged before
interpolation (must verify that this occurs).
|
For each rank in self.grid.distributor, return
a numpy array of int32s for the positions within
this rank's self.gridpoints/self.interpolation_coefficients (i.e.
the locdim) which must be injected into that rank.
|
[
"For",
"each",
"rank",
"in",
"self",
".",
"grid",
".",
"distributor",
"return",
"a",
"numpy",
"array",
"of",
"int32s",
"for",
"the",
"positions",
"within",
"this",
"rank",
"s",
"self",
".",
"gridpoints",
"/",
"self",
".",
"interpolation_coefficients",
"(",
"i",
".",
"e",
".",
"the",
"locdim",
")",
"which",
"must",
"be",
"injected",
"into",
"that",
"rank",
"."
] |
def _rank_to_points(self):
"""
For each rank in self.grid.distributor, return
a numpy array of int32s for the positions within
this rank's self.gridpoints/self.interpolation_coefficients (i.e.
the locdim) which must be injected into that rank.
Any given location may require injection into several
ranks, based on the radius of the injection stencil
and its proximity to a rank boundary.
It is assumed, for now, that any given location may be
completely sampled from within one rank - so when
gathering the data, any point sampled from more than
one rank may have duplicates discarded. This implies
that the radius of the sampling is less than
the halo size of the Functions being sampled from.
It also requires that the halos be exchanged before
interpolation (must verify that this occurs).
"""
distributor = self.grid.distributor
# Along each dimension, the coordinate indices are broken into
# 2*decomposition_size+3 groups, numbered starting at 0
# Group 2*i contributes only to rank i-1
# Group 2*i+1 contributes to rank i-1 and rank i
# Obviously this means groups 0 and 1 are "bad" - they contribute
# to points to the left of the domain (rank -1)
# So is group 2*decomp_size+1 and 2*decomp_size+2
# (these contributes to rank "decomp_size")
# binned_gridpoints will hold which group the particular
# point is along that decomposed dimension.
binned_gridpoints = np.empty_like(self._gridpoints.data)
dim_group_dim_rank = []
for idim, dim in enumerate(self.grid.dimensions):
decomp = distributor.decomposition[idim]
decomp_size = len(decomp)
dim_breaks = np.empty([2*decomp_size+2], dtype=np.int32)
dim_r = self.r[dim]
if dim_r is None:
# size is the whole grid
dim_r = self.grid.dimension_map[dim].glb
# Define the split
dim_breaks[:-2:2] = [
decomp_part[0] - self.r + 1 for decomp_part in decomp]
dim_breaks[-2] = decomp[-1][-1] + 1 - self.r + 1
dim_breaks[1:-1:2] = [
decomp_part[0] for decomp_part in decomp]
dim_breaks[-1] = decomp[-1][-1] + 1
# Handle the radius is None case by ensuring we treat
# all grid points in that direction as zero
gridpoints_dim = self._gridpoints.data[:, idim]
if self.r[dim] is None:
gridpoints_dim = np.zeros_like(gridpoints_dim)
try:
binned_gridpoints[:, idim] = np.digitize(
gridpoints_dim, dim_breaks)
except ValueError as e:
raise ValueError(
"decomposition failed! Are some ranks too skinny?"
) from e
this_group_rank_map = {
0: {None},
1: {None, 0},
**{2*i+2: {i} for i in range(decomp_size)},
**{2*i+2+1: {i, i+1} for i in range(decomp_size-1)},
2*decomp_size+1: {decomp_size-1, None},
2*decomp_size+2: {None}}
dim_group_dim_rank.append(this_group_rank_map)
# This allows the points to be grouped into non-overlapping sets
# based on their bin in each dimension. For each set we build a list
# of points.
bins, inverse, counts = np.unique(
binned_gridpoints,
return_inverse=True,
return_counts=True,
axis=0)
# inverse is now a "unique bin number" for each point gridpoints
# we want to turn that into a list of points for each bin
# so we argsort
inverse_argsort = np.argsort(inverse).astype(np.int32)
cumulative_counts = np.cumsum(counts)
gp_map = {tuple(bi): inverse_argsort[cci-ci:cci]
for bi, cci, ci in zip(bins, cumulative_counts, counts)
}
# the result is now going to be a concatenation of these lists
# for each of the output ranks
# each bin has a set of ranks -> each rank has a set (possibly empty)
# of bins
# For each rank get the per-dimension coordinates
# TODO maybe we should cache this on the distributor
dim_ranks_to_glb = {
tuple(distributor.comm.Get_coords(rank)): rank
for rank in range(distributor.comm.Get_size())}
global_rank_to_bins = {}
from itertools import product
for bi in bins:
# This is a list of sets for the dimension-specific rank
dim_rank_sets = [dgdr[bii]
for dgdr, bii in zip(dim_group_dim_rank, bi)]
# Convert these to an absolute rank
# This is where we will throw a KeyError if there are points OOB
for dim_ranks in product(*dim_rank_sets):
global_rank = dim_ranks_to_glb[tuple(dim_ranks)]
global_rank_to_bins\
.setdefault(global_rank, set())\
.add(tuple(bi))
empty = np.array([], dtype=np.int32)
return [np.concatenate((
empty, *[gp_map[bi] for bi in global_rank_to_bins.get(rank, [])]))
for rank in range(distributor.comm.Get_size())]
|
[
"def",
"_rank_to_points",
"(",
"self",
")",
":",
"distributor",
"=",
"self",
".",
"grid",
".",
"distributor",
"# Along each dimension, the coordinate indices are broken into",
"# 2*decomposition_size+3 groups, numbered starting at 0",
"# Group 2*i contributes only to rank i-1",
"# Group 2*i+1 contributes to rank i-1 and rank i",
"# Obviously this means groups 0 and 1 are \"bad\" - they contribute",
"# to points to the left of the domain (rank -1)",
"# So is group 2*decomp_size+1 and 2*decomp_size+2",
"# (these contributes to rank \"decomp_size\")",
"# binned_gridpoints will hold which group the particular",
"# point is along that decomposed dimension.",
"binned_gridpoints",
"=",
"np",
".",
"empty_like",
"(",
"self",
".",
"_gridpoints",
".",
"data",
")",
"dim_group_dim_rank",
"=",
"[",
"]",
"for",
"idim",
",",
"dim",
"in",
"enumerate",
"(",
"self",
".",
"grid",
".",
"dimensions",
")",
":",
"decomp",
"=",
"distributor",
".",
"decomposition",
"[",
"idim",
"]",
"decomp_size",
"=",
"len",
"(",
"decomp",
")",
"dim_breaks",
"=",
"np",
".",
"empty",
"(",
"[",
"2",
"*",
"decomp_size",
"+",
"2",
"]",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"dim_r",
"=",
"self",
".",
"r",
"[",
"dim",
"]",
"if",
"dim_r",
"is",
"None",
":",
"# size is the whole grid",
"dim_r",
"=",
"self",
".",
"grid",
".",
"dimension_map",
"[",
"dim",
"]",
".",
"glb",
"# Define the split",
"dim_breaks",
"[",
":",
"-",
"2",
":",
"2",
"]",
"=",
"[",
"decomp_part",
"[",
"0",
"]",
"-",
"self",
".",
"r",
"+",
"1",
"for",
"decomp_part",
"in",
"decomp",
"]",
"dim_breaks",
"[",
"-",
"2",
"]",
"=",
"decomp",
"[",
"-",
"1",
"]",
"[",
"-",
"1",
"]",
"+",
"1",
"-",
"self",
".",
"r",
"+",
"1",
"dim_breaks",
"[",
"1",
":",
"-",
"1",
":",
"2",
"]",
"=",
"[",
"decomp_part",
"[",
"0",
"]",
"for",
"decomp_part",
"in",
"decomp",
"]",
"dim_breaks",
"[",
"-",
"1",
"]",
"=",
"decomp",
"[",
"-",
"1",
"]",
"[",
"-",
"1",
"]",
"+",
"1",
"# Handle the radius is None case by ensuring we treat",
"# all grid points in that direction as zero",
"gridpoints_dim",
"=",
"self",
".",
"_gridpoints",
".",
"data",
"[",
":",
",",
"idim",
"]",
"if",
"self",
".",
"r",
"[",
"dim",
"]",
"is",
"None",
":",
"gridpoints_dim",
"=",
"np",
".",
"zeros_like",
"(",
"gridpoints_dim",
")",
"try",
":",
"binned_gridpoints",
"[",
":",
",",
"idim",
"]",
"=",
"np",
".",
"digitize",
"(",
"gridpoints_dim",
",",
"dim_breaks",
")",
"except",
"ValueError",
"as",
"e",
":",
"raise",
"ValueError",
"(",
"\"decomposition failed! Are some ranks too skinny?\"",
")",
"from",
"e",
"this_group_rank_map",
"=",
"{",
"0",
":",
"{",
"None",
"}",
",",
"1",
":",
"{",
"None",
",",
"0",
"}",
",",
"*",
"*",
"{",
"2",
"*",
"i",
"+",
"2",
":",
"{",
"i",
"}",
"for",
"i",
"in",
"range",
"(",
"decomp_size",
")",
"}",
",",
"*",
"*",
"{",
"2",
"*",
"i",
"+",
"2",
"+",
"1",
":",
"{",
"i",
",",
"i",
"+",
"1",
"}",
"for",
"i",
"in",
"range",
"(",
"decomp_size",
"-",
"1",
")",
"}",
",",
"2",
"*",
"decomp_size",
"+",
"1",
":",
"{",
"decomp_size",
"-",
"1",
",",
"None",
"}",
",",
"2",
"*",
"decomp_size",
"+",
"2",
":",
"{",
"None",
"}",
"}",
"dim_group_dim_rank",
".",
"append",
"(",
"this_group_rank_map",
")",
"# This allows the points to be grouped into non-overlapping sets",
"# based on their bin in each dimension. For each set we build a list",
"# of points.",
"bins",
",",
"inverse",
",",
"counts",
"=",
"np",
".",
"unique",
"(",
"binned_gridpoints",
",",
"return_inverse",
"=",
"True",
",",
"return_counts",
"=",
"True",
",",
"axis",
"=",
"0",
")",
"# inverse is now a \"unique bin number\" for each point gridpoints",
"# we want to turn that into a list of points for each bin",
"# so we argsort",
"inverse_argsort",
"=",
"np",
".",
"argsort",
"(",
"inverse",
")",
".",
"astype",
"(",
"np",
".",
"int32",
")",
"cumulative_counts",
"=",
"np",
".",
"cumsum",
"(",
"counts",
")",
"gp_map",
"=",
"{",
"tuple",
"(",
"bi",
")",
":",
"inverse_argsort",
"[",
"cci",
"-",
"ci",
":",
"cci",
"]",
"for",
"bi",
",",
"cci",
",",
"ci",
"in",
"zip",
"(",
"bins",
",",
"cumulative_counts",
",",
"counts",
")",
"}",
"# the result is now going to be a concatenation of these lists",
"# for each of the output ranks",
"# each bin has a set of ranks -> each rank has a set (possibly empty)",
"# of bins",
"# For each rank get the per-dimension coordinates",
"# TODO maybe we should cache this on the distributor",
"dim_ranks_to_glb",
"=",
"{",
"tuple",
"(",
"distributor",
".",
"comm",
".",
"Get_coords",
"(",
"rank",
")",
")",
":",
"rank",
"for",
"rank",
"in",
"range",
"(",
"distributor",
".",
"comm",
".",
"Get_size",
"(",
")",
")",
"}",
"global_rank_to_bins",
"=",
"{",
"}",
"from",
"itertools",
"import",
"product",
"for",
"bi",
"in",
"bins",
":",
"# This is a list of sets for the dimension-specific rank",
"dim_rank_sets",
"=",
"[",
"dgdr",
"[",
"bii",
"]",
"for",
"dgdr",
",",
"bii",
"in",
"zip",
"(",
"dim_group_dim_rank",
",",
"bi",
")",
"]",
"# Convert these to an absolute rank",
"# This is where we will throw a KeyError if there are points OOB",
"for",
"dim_ranks",
"in",
"product",
"(",
"*",
"dim_rank_sets",
")",
":",
"global_rank",
"=",
"dim_ranks_to_glb",
"[",
"tuple",
"(",
"dim_ranks",
")",
"]",
"global_rank_to_bins",
".",
"setdefault",
"(",
"global_rank",
",",
"set",
"(",
")",
")",
".",
"add",
"(",
"tuple",
"(",
"bi",
")",
")",
"empty",
"=",
"np",
".",
"array",
"(",
"[",
"]",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"return",
"[",
"np",
".",
"concatenate",
"(",
"(",
"empty",
",",
"*",
"[",
"gp_map",
"[",
"bi",
"]",
"for",
"bi",
"in",
"global_rank_to_bins",
".",
"get",
"(",
"rank",
",",
"[",
"]",
")",
"]",
")",
")",
"for",
"rank",
"in",
"range",
"(",
"distributor",
".",
"comm",
".",
"Get_size",
"(",
")",
")",
"]"
] |
https://github.com/devitocodes/devito/blob/6abd441e3f5f091775ad332be6b95e017b8cbd16/devito/types/sparse.py#L1640-L1769
|
|
lk-geimfari/mimesis
|
36653b49f28719c0a2aa20fef6c6df3911811b32
|
mimesis/providers/finance.py
|
python
|
Finance.price
|
(self, minimum: float = 500, maximum: float = 1500)
|
return self.random.uniform(
minimum,
maximum,
precision=2,
)
|
Generate random price.
:param minimum: Minimum value of price.
:param maximum: Maximum value of price.
:return: Price.
|
Generate random price.
|
[
"Generate",
"random",
"price",
"."
] |
def price(self, minimum: float = 500, maximum: float = 1500) -> float:
"""Generate random price.
:param minimum: Minimum value of price.
:param maximum: Maximum value of price.
:return: Price.
"""
return self.random.uniform(
minimum,
maximum,
precision=2,
)
|
[
"def",
"price",
"(",
"self",
",",
"minimum",
":",
"float",
"=",
"500",
",",
"maximum",
":",
"float",
"=",
"1500",
")",
"->",
"float",
":",
"return",
"self",
".",
"random",
".",
"uniform",
"(",
"minimum",
",",
"maximum",
",",
"precision",
"=",
"2",
",",
")"
] |
https://github.com/lk-geimfari/mimesis/blob/36653b49f28719c0a2aa20fef6c6df3911811b32/mimesis/providers/finance.py#L91-L102
|
|
IdentityPython/pysaml2
|
6badb32d212257bd83ffcc816f9b625f68281b47
|
src/saml2/ws/wsaddr.py
|
python
|
attributed_q_name_type__from_string
|
(xml_string)
|
return saml2.create_class_from_xml_string(AttributedQNameType_, xml_string)
|
[] |
def attributed_q_name_type__from_string(xml_string):
return saml2.create_class_from_xml_string(AttributedQNameType_, xml_string)
|
[
"def",
"attributed_q_name_type__from_string",
"(",
"xml_string",
")",
":",
"return",
"saml2",
".",
"create_class_from_xml_string",
"(",
"AttributedQNameType_",
",",
"xml_string",
")"
] |
https://github.com/IdentityPython/pysaml2/blob/6badb32d212257bd83ffcc816f9b625f68281b47/src/saml2/ws/wsaddr.py#L139-L140
|
|||
triaquae/triaquae
|
bbabf736b3ba56a0c6498e7f04e16c13b8b8f2b9
|
TriAquae/models/Ubuntu_12/paramiko/transport.py
|
python
|
Transport.is_active
|
(self)
|
return self.active
|
Return true if this session is active (open).
@return: True if the session is still active (open); False if the
session is closed
@rtype: bool
|
Return true if this session is active (open).
|
[
"Return",
"true",
"if",
"this",
"session",
"is",
"active",
"(",
"open",
")",
"."
] |
def is_active(self):
"""
Return true if this session is active (open).
@return: True if the session is still active (open); False if the
session is closed
@rtype: bool
"""
return self.active
|
[
"def",
"is_active",
"(",
"self",
")",
":",
"return",
"self",
".",
"active"
] |
https://github.com/triaquae/triaquae/blob/bbabf736b3ba56a0c6498e7f04e16c13b8b8f2b9/TriAquae/models/Ubuntu_12/paramiko/transport.py#L641-L649
|
|
pilotmoon/PopClip-Extensions
|
29fc472befc09ee350092ac70283bd9fdb456cb6
|
source/OneNote/requests/packages/urllib3/packages/ordered_dict.py
|
python
|
OrderedDict.__setitem__
|
(self, key, value, dict_setitem=dict.__setitem__)
|
od.__setitem__(i, y) <==> od[i]=y
|
od.__setitem__(i, y) <==> od[i]=y
|
[
"od",
".",
"__setitem__",
"(",
"i",
"y",
")",
"<",
"==",
">",
"od",
"[",
"i",
"]",
"=",
"y"
] |
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
|
[
"def",
"__setitem__",
"(",
"self",
",",
"key",
",",
"value",
",",
"dict_setitem",
"=",
"dict",
".",
"__setitem__",
")",
":",
"# Setting a new item creates a new link which goes at the end of the linked",
"# list, and the inherited dictionary is updated with the new key/value pair.",
"if",
"key",
"not",
"in",
"self",
":",
"root",
"=",
"self",
".",
"__root",
"last",
"=",
"root",
"[",
"0",
"]",
"last",
"[",
"1",
"]",
"=",
"root",
"[",
"0",
"]",
"=",
"self",
".",
"__map",
"[",
"key",
"]",
"=",
"[",
"last",
",",
"root",
",",
"key",
"]",
"dict_setitem",
"(",
"self",
",",
"key",
",",
"value",
")"
] |
https://github.com/pilotmoon/PopClip-Extensions/blob/29fc472befc09ee350092ac70283bd9fdb456cb6/source/OneNote/requests/packages/urllib3/packages/ordered_dict.py#L44-L52
|
||
tomplus/kubernetes_asyncio
|
f028cc793e3a2c519be6a52a49fb77ff0b014c9b
|
kubernetes_asyncio/client/models/v1beta1_pod_security_policy_spec.py
|
python
|
V1beta1PodSecurityPolicySpec.allow_privilege_escalation
|
(self, allow_privilege_escalation)
|
Sets the allow_privilege_escalation of this V1beta1PodSecurityPolicySpec.
allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true. # noqa: E501
:param allow_privilege_escalation: The allow_privilege_escalation of this V1beta1PodSecurityPolicySpec. # noqa: E501
:type: bool
|
Sets the allow_privilege_escalation of this V1beta1PodSecurityPolicySpec.
|
[
"Sets",
"the",
"allow_privilege_escalation",
"of",
"this",
"V1beta1PodSecurityPolicySpec",
"."
] |
def allow_privilege_escalation(self, allow_privilege_escalation):
"""Sets the allow_privilege_escalation of this V1beta1PodSecurityPolicySpec.
allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true. # noqa: E501
:param allow_privilege_escalation: The allow_privilege_escalation of this V1beta1PodSecurityPolicySpec. # noqa: E501
:type: bool
"""
self._allow_privilege_escalation = allow_privilege_escalation
|
[
"def",
"allow_privilege_escalation",
"(",
"self",
",",
"allow_privilege_escalation",
")",
":",
"self",
".",
"_allow_privilege_escalation",
"=",
"allow_privilege_escalation"
] |
https://github.com/tomplus/kubernetes_asyncio/blob/f028cc793e3a2c519be6a52a49fb77ff0b014c9b/kubernetes_asyncio/client/models/v1beta1_pod_security_policy_spec.py#L178-L187
|
||
replit-archive/empythoned
|
977ec10ced29a3541a4973dc2b59910805695752
|
dist/lib/python2.7/HTMLParser.py
|
python
|
HTMLParser.feed
|
(self, data)
|
r"""Feed data to the parser.
Call this as often as you want, with as little or as much text
as you want (may include '\n').
|
r"""Feed data to the parser.
|
[
"r",
"Feed",
"data",
"to",
"the",
"parser",
"."
] |
def feed(self, data):
r"""Feed data to the parser.
Call this as often as you want, with as little or as much text
as you want (may include '\n').
"""
self.rawdata = self.rawdata + data
self.goahead(0)
|
[
"def",
"feed",
"(",
"self",
",",
"data",
")",
":",
"self",
".",
"rawdata",
"=",
"self",
".",
"rawdata",
"+",
"data",
"self",
".",
"goahead",
"(",
"0",
")"
] |
https://github.com/replit-archive/empythoned/blob/977ec10ced29a3541a4973dc2b59910805695752/dist/lib/python2.7/HTMLParser.py#L101-L108
|
||
openshift/openshift-tools
|
1188778e728a6e4781acf728123e5b356380fe6f
|
ansible/roles/lib_openshift_3.2/library/oadm_registry.py
|
python
|
Service.__init__
|
(self, content)
|
Service constructor
|
Service constructor
|
[
"Service",
"constructor"
] |
def __init__(self, content):
'''Service constructor'''
super(Service, self).__init__(content=content)
|
[
"def",
"__init__",
"(",
"self",
",",
"content",
")",
":",
"super",
"(",
"Service",
",",
"self",
")",
".",
"__init__",
"(",
"content",
"=",
"content",
")"
] |
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/ansible/roles/lib_openshift_3.2/library/oadm_registry.py#L1005-L1007
|
||
HymanLiuTS/flaskTs
|
286648286976e85d9b9a5873632331efcafe0b21
|
flasky/lib/python2.7/site-packages/selenium/webdriver/firefox/options.py
|
python
|
Options.profile
|
(self)
|
return self._profile
|
Returns the Firefox profile to use.
|
Returns the Firefox profile to use.
|
[
"Returns",
"the",
"Firefox",
"profile",
"to",
"use",
"."
] |
def profile(self):
"""Returns the Firefox profile to use."""
return self._profile
|
[
"def",
"profile",
"(",
"self",
")",
":",
"return",
"self",
".",
"_profile"
] |
https://github.com/HymanLiuTS/flaskTs/blob/286648286976e85d9b9a5873632331efcafe0b21/flasky/lib/python2.7/site-packages/selenium/webdriver/firefox/options.py#L65-L67
|
|
materialsproject/pymatgen
|
8128f3062a334a2edd240e4062b5b9bdd1ae6f58
|
pymatgen/core/units.py
|
python
|
FloatWithUnit.from_string
|
(cls, s)
|
return cls(num, unit, unit_type=unit_type)
|
Initialize a FloatWithUnit from a string. Example Memory.from_string("1. Mb")
|
Initialize a FloatWithUnit from a string. Example Memory.from_string("1. Mb")
|
[
"Initialize",
"a",
"FloatWithUnit",
"from",
"a",
"string",
".",
"Example",
"Memory",
".",
"from_string",
"(",
"1",
".",
"Mb",
")"
] |
def from_string(cls, s):
"""
Initialize a FloatWithUnit from a string. Example Memory.from_string("1. Mb")
"""
# Extract num and unit string.
s = s.strip()
for i, char in enumerate(s):
if char.isalpha() or char.isspace():
break
else:
raise Exception("Unit is missing in string %s" % s)
num, unit = float(s[:i]), s[i:]
# Find unit type (set it to None if it cannot be detected)
for unit_type, d in BASE_UNITS.items():
if unit in d:
break
else:
unit_type = None
return cls(num, unit, unit_type=unit_type)
|
[
"def",
"from_string",
"(",
"cls",
",",
"s",
")",
":",
"# Extract num and unit string.",
"s",
"=",
"s",
".",
"strip",
"(",
")",
"for",
"i",
",",
"char",
"in",
"enumerate",
"(",
"s",
")",
":",
"if",
"char",
".",
"isalpha",
"(",
")",
"or",
"char",
".",
"isspace",
"(",
")",
":",
"break",
"else",
":",
"raise",
"Exception",
"(",
"\"Unit is missing in string %s\"",
"%",
"s",
")",
"num",
",",
"unit",
"=",
"float",
"(",
"s",
"[",
":",
"i",
"]",
")",
",",
"s",
"[",
"i",
":",
"]",
"# Find unit type (set it to None if it cannot be detected)",
"for",
"unit_type",
",",
"d",
"in",
"BASE_UNITS",
".",
"items",
"(",
")",
":",
"if",
"unit",
"in",
"d",
":",
"break",
"else",
":",
"unit_type",
"=",
"None",
"return",
"cls",
"(",
"num",
",",
"unit",
",",
"unit_type",
"=",
"unit_type",
")"
] |
https://github.com/materialsproject/pymatgen/blob/8128f3062a334a2edd240e4062b5b9bdd1ae6f58/pymatgen/core/units.py#L319-L339
|
|
pastas/pastas
|
efacd4e9433e0bf8fe208429c66d61c41b03087e
|
pastas/modelplots.py
|
python
|
Plotting.decomposition
|
(self, tmin=None, tmax=None, ytick_base=True, split=True,
figsize=(10, 8), axes=None, name=None,
return_warmup=False, min_ylim_diff=None, **kwargs)
|
return axes
|
Plot the decomposition of a time-series in the different stresses.
Parameters
----------
tmin: str or pandas.Timestamp, optional
tmax: str or pandas.Timestamp, optional
ytick_base: Boolean or float, optional
Make the ytick-base constant if True, set this base to float if
float.
split: bool, optional
Split the stresses in multiple stresses when possible. Default is
True.
axes: matplotlib.axes.Axes instance, optional
Matplotlib Axes instance to plot the figure on to.
figsize: tuple, optional
tuple of size 2 to determine the figure size in inches.
name: str, optional
Name to give the simulated time series in the legend.
return_warmup: bool, optional
Show the warmup-period. Default is false.
min_ylim_diff: float, optional
Float with the difference in the ylimits. Default is None
**kwargs: dict, optional
Optional arguments, passed on to the plt.subplots method.
Returns
-------
axes: list of matplotlib.axes.Axes
|
Plot the decomposition of a time-series in the different stresses.
|
[
"Plot",
"the",
"decomposition",
"of",
"a",
"time",
"-",
"series",
"in",
"the",
"different",
"stresses",
"."
] |
def decomposition(self, tmin=None, tmax=None, ytick_base=True, split=True,
figsize=(10, 8), axes=None, name=None,
return_warmup=False, min_ylim_diff=None, **kwargs):
"""Plot the decomposition of a time-series in the different stresses.
Parameters
----------
tmin: str or pandas.Timestamp, optional
tmax: str or pandas.Timestamp, optional
ytick_base: Boolean or float, optional
Make the ytick-base constant if True, set this base to float if
float.
split: bool, optional
Split the stresses in multiple stresses when possible. Default is
True.
axes: matplotlib.axes.Axes instance, optional
Matplotlib Axes instance to plot the figure on to.
figsize: tuple, optional
tuple of size 2 to determine the figure size in inches.
name: str, optional
Name to give the simulated time series in the legend.
return_warmup: bool, optional
Show the warmup-period. Default is false.
min_ylim_diff: float, optional
Float with the difference in the ylimits. Default is None
**kwargs: dict, optional
Optional arguments, passed on to the plt.subplots method.
Returns
-------
axes: list of matplotlib.axes.Axes
"""
o = self.ml.observations(tmin=tmin, tmax=tmax)
# determine the simulation
sim = self.ml.simulate(tmin=tmin, tmax=tmax,
return_warmup=return_warmup)
if name is not None:
sim.name = name
# determine the influence of the different stresses
contribs = self.ml.get_contributions(split=split, tmin=tmin, tmax=tmax,
return_warmup=return_warmup)
names = [s.name for s in contribs]
if self.ml.transform:
contrib = self.ml.get_transform_contribution(tmin=tmin, tmax=tmax)
contribs.append(contrib)
names.append(self.ml.transform.name)
# determine ylim for every graph, to scale the height
ylims = [(min([sim.min(), o[tmin:tmax].min()]),
max([sim.max(), o[tmin:tmax].max()]))]
for contrib in contribs:
hs = contrib[tmin:tmax]
if hs.empty:
if contrib.empty:
ylims.append((0.0, 0.0))
else:
ylims.append((contrib.min(), hs.max()))
else:
ylims.append((hs.min(), hs.max()))
if min_ylim_diff is not None:
for i, ylim in enumerate(ylims):
if np.diff(ylim) < min_ylim_diff:
ylims[i] = (np.mean(ylim) - min_ylim_diff / 2,
np.mean(ylim) + min_ylim_diff / 2)
# determine height ratios
height_ratios = _get_height_ratios(ylims)
nrows = len(contribs) + 1
if axes is None:
# open a new figure
gridspec_kw = {'height_ratios': height_ratios}
fig, axes = plt.subplots(nrows, sharex=True, figsize=figsize,
gridspec_kw=gridspec_kw, **kwargs)
axes = np.atleast_1d(axes)
o_label = o.name
set_axes_properties = True
else:
if len(axes) != nrows:
msg = 'Makes sure the number of axes equals the number of ' \
'series'
raise Exception(msg)
fig = axes[0].figure
o_label = ''
set_axes_properties = False
# plot simulation and observations in top graph
o_nu = self.ml.oseries.series.drop(o.index)
if not o_nu.empty:
# plot parts of the oseries that are not used in grey
o_nu.plot(linestyle='', marker='.', color='0.5', label='',
markersize=2, ax=axes[0], x_compat=True)
o.plot(linestyle='', marker='.', color='k', label=o_label,
markersize=3, ax=axes[0], x_compat=True)
sim.plot(ax=axes[0], x_compat=True)
if set_axes_properties:
axes[0].set_title('observations vs. simulation')
axes[0].set_ylim(ylims[0])
axes[0].grid(True)
axes[0].legend(ncol=3, frameon=False, numpoints=3)
if ytick_base and set_axes_properties:
if isinstance(ytick_base, bool):
# determine the ytick-spacing of the top graph
yticks = axes[0].yaxis.get_ticklocs()
if len(yticks) > 1:
ytick_base = yticks[1] - yticks[0]
else:
ytick_base = None
axes[0].yaxis.set_major_locator(
MultipleLocator(base=ytick_base))
# plot the influence of the stresses
for i, contrib in enumerate(contribs):
ax = axes[i + 1]
contrib.plot(ax=ax, x_compat=True)
if set_axes_properties:
if ytick_base:
# set the ytick-spacing equal to the top graph
locator = MultipleLocator(base=ytick_base)
ax.yaxis.set_major_locator(locator)
ax.set_title(names[i])
ax.set_ylim(ylims[i + 1])
ax.grid(True)
ax.minorticks_off()
if set_axes_properties:
axes[0].set_xlim(tmin, tmax)
fig.tight_layout(pad=0.0)
return axes
|
[
"def",
"decomposition",
"(",
"self",
",",
"tmin",
"=",
"None",
",",
"tmax",
"=",
"None",
",",
"ytick_base",
"=",
"True",
",",
"split",
"=",
"True",
",",
"figsize",
"=",
"(",
"10",
",",
"8",
")",
",",
"axes",
"=",
"None",
",",
"name",
"=",
"None",
",",
"return_warmup",
"=",
"False",
",",
"min_ylim_diff",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"o",
"=",
"self",
".",
"ml",
".",
"observations",
"(",
"tmin",
"=",
"tmin",
",",
"tmax",
"=",
"tmax",
")",
"# determine the simulation",
"sim",
"=",
"self",
".",
"ml",
".",
"simulate",
"(",
"tmin",
"=",
"tmin",
",",
"tmax",
"=",
"tmax",
",",
"return_warmup",
"=",
"return_warmup",
")",
"if",
"name",
"is",
"not",
"None",
":",
"sim",
".",
"name",
"=",
"name",
"# determine the influence of the different stresses",
"contribs",
"=",
"self",
".",
"ml",
".",
"get_contributions",
"(",
"split",
"=",
"split",
",",
"tmin",
"=",
"tmin",
",",
"tmax",
"=",
"tmax",
",",
"return_warmup",
"=",
"return_warmup",
")",
"names",
"=",
"[",
"s",
".",
"name",
"for",
"s",
"in",
"contribs",
"]",
"if",
"self",
".",
"ml",
".",
"transform",
":",
"contrib",
"=",
"self",
".",
"ml",
".",
"get_transform_contribution",
"(",
"tmin",
"=",
"tmin",
",",
"tmax",
"=",
"tmax",
")",
"contribs",
".",
"append",
"(",
"contrib",
")",
"names",
".",
"append",
"(",
"self",
".",
"ml",
".",
"transform",
".",
"name",
")",
"# determine ylim for every graph, to scale the height",
"ylims",
"=",
"[",
"(",
"min",
"(",
"[",
"sim",
".",
"min",
"(",
")",
",",
"o",
"[",
"tmin",
":",
"tmax",
"]",
".",
"min",
"(",
")",
"]",
")",
",",
"max",
"(",
"[",
"sim",
".",
"max",
"(",
")",
",",
"o",
"[",
"tmin",
":",
"tmax",
"]",
".",
"max",
"(",
")",
"]",
")",
")",
"]",
"for",
"contrib",
"in",
"contribs",
":",
"hs",
"=",
"contrib",
"[",
"tmin",
":",
"tmax",
"]",
"if",
"hs",
".",
"empty",
":",
"if",
"contrib",
".",
"empty",
":",
"ylims",
".",
"append",
"(",
"(",
"0.0",
",",
"0.0",
")",
")",
"else",
":",
"ylims",
".",
"append",
"(",
"(",
"contrib",
".",
"min",
"(",
")",
",",
"hs",
".",
"max",
"(",
")",
")",
")",
"else",
":",
"ylims",
".",
"append",
"(",
"(",
"hs",
".",
"min",
"(",
")",
",",
"hs",
".",
"max",
"(",
")",
")",
")",
"if",
"min_ylim_diff",
"is",
"not",
"None",
":",
"for",
"i",
",",
"ylim",
"in",
"enumerate",
"(",
"ylims",
")",
":",
"if",
"np",
".",
"diff",
"(",
"ylim",
")",
"<",
"min_ylim_diff",
":",
"ylims",
"[",
"i",
"]",
"=",
"(",
"np",
".",
"mean",
"(",
"ylim",
")",
"-",
"min_ylim_diff",
"/",
"2",
",",
"np",
".",
"mean",
"(",
"ylim",
")",
"+",
"min_ylim_diff",
"/",
"2",
")",
"# determine height ratios",
"height_ratios",
"=",
"_get_height_ratios",
"(",
"ylims",
")",
"nrows",
"=",
"len",
"(",
"contribs",
")",
"+",
"1",
"if",
"axes",
"is",
"None",
":",
"# open a new figure",
"gridspec_kw",
"=",
"{",
"'height_ratios'",
":",
"height_ratios",
"}",
"fig",
",",
"axes",
"=",
"plt",
".",
"subplots",
"(",
"nrows",
",",
"sharex",
"=",
"True",
",",
"figsize",
"=",
"figsize",
",",
"gridspec_kw",
"=",
"gridspec_kw",
",",
"*",
"*",
"kwargs",
")",
"axes",
"=",
"np",
".",
"atleast_1d",
"(",
"axes",
")",
"o_label",
"=",
"o",
".",
"name",
"set_axes_properties",
"=",
"True",
"else",
":",
"if",
"len",
"(",
"axes",
")",
"!=",
"nrows",
":",
"msg",
"=",
"'Makes sure the number of axes equals the number of '",
"'series'",
"raise",
"Exception",
"(",
"msg",
")",
"fig",
"=",
"axes",
"[",
"0",
"]",
".",
"figure",
"o_label",
"=",
"''",
"set_axes_properties",
"=",
"False",
"# plot simulation and observations in top graph",
"o_nu",
"=",
"self",
".",
"ml",
".",
"oseries",
".",
"series",
".",
"drop",
"(",
"o",
".",
"index",
")",
"if",
"not",
"o_nu",
".",
"empty",
":",
"# plot parts of the oseries that are not used in grey",
"o_nu",
".",
"plot",
"(",
"linestyle",
"=",
"''",
",",
"marker",
"=",
"'.'",
",",
"color",
"=",
"'0.5'",
",",
"label",
"=",
"''",
",",
"markersize",
"=",
"2",
",",
"ax",
"=",
"axes",
"[",
"0",
"]",
",",
"x_compat",
"=",
"True",
")",
"o",
".",
"plot",
"(",
"linestyle",
"=",
"''",
",",
"marker",
"=",
"'.'",
",",
"color",
"=",
"'k'",
",",
"label",
"=",
"o_label",
",",
"markersize",
"=",
"3",
",",
"ax",
"=",
"axes",
"[",
"0",
"]",
",",
"x_compat",
"=",
"True",
")",
"sim",
".",
"plot",
"(",
"ax",
"=",
"axes",
"[",
"0",
"]",
",",
"x_compat",
"=",
"True",
")",
"if",
"set_axes_properties",
":",
"axes",
"[",
"0",
"]",
".",
"set_title",
"(",
"'observations vs. simulation'",
")",
"axes",
"[",
"0",
"]",
".",
"set_ylim",
"(",
"ylims",
"[",
"0",
"]",
")",
"axes",
"[",
"0",
"]",
".",
"grid",
"(",
"True",
")",
"axes",
"[",
"0",
"]",
".",
"legend",
"(",
"ncol",
"=",
"3",
",",
"frameon",
"=",
"False",
",",
"numpoints",
"=",
"3",
")",
"if",
"ytick_base",
"and",
"set_axes_properties",
":",
"if",
"isinstance",
"(",
"ytick_base",
",",
"bool",
")",
":",
"# determine the ytick-spacing of the top graph",
"yticks",
"=",
"axes",
"[",
"0",
"]",
".",
"yaxis",
".",
"get_ticklocs",
"(",
")",
"if",
"len",
"(",
"yticks",
")",
">",
"1",
":",
"ytick_base",
"=",
"yticks",
"[",
"1",
"]",
"-",
"yticks",
"[",
"0",
"]",
"else",
":",
"ytick_base",
"=",
"None",
"axes",
"[",
"0",
"]",
".",
"yaxis",
".",
"set_major_locator",
"(",
"MultipleLocator",
"(",
"base",
"=",
"ytick_base",
")",
")",
"# plot the influence of the stresses",
"for",
"i",
",",
"contrib",
"in",
"enumerate",
"(",
"contribs",
")",
":",
"ax",
"=",
"axes",
"[",
"i",
"+",
"1",
"]",
"contrib",
".",
"plot",
"(",
"ax",
"=",
"ax",
",",
"x_compat",
"=",
"True",
")",
"if",
"set_axes_properties",
":",
"if",
"ytick_base",
":",
"# set the ytick-spacing equal to the top graph",
"locator",
"=",
"MultipleLocator",
"(",
"base",
"=",
"ytick_base",
")",
"ax",
".",
"yaxis",
".",
"set_major_locator",
"(",
"locator",
")",
"ax",
".",
"set_title",
"(",
"names",
"[",
"i",
"]",
")",
"ax",
".",
"set_ylim",
"(",
"ylims",
"[",
"i",
"+",
"1",
"]",
")",
"ax",
".",
"grid",
"(",
"True",
")",
"ax",
".",
"minorticks_off",
"(",
")",
"if",
"set_axes_properties",
":",
"axes",
"[",
"0",
"]",
".",
"set_xlim",
"(",
"tmin",
",",
"tmax",
")",
"fig",
".",
"tight_layout",
"(",
"pad",
"=",
"0.0",
")",
"return",
"axes"
] |
https://github.com/pastas/pastas/blob/efacd4e9433e0bf8fe208429c66d61c41b03087e/pastas/modelplots.py#L274-L405
|
|
bayespy/bayespy
|
0e6e6130c888a4295cc9421d61d4ad27b2960ebb
|
bayespy/inference/vmp/transformations.py
|
python
|
covariance_to_variance
|
(C, ndim=1, covariance_axis=None)
|
return np.einsum(C, [Ellipsis]+keys, [Ellipsis]+out_keys)
|
[] |
def covariance_to_variance(C, ndim=1, covariance_axis=None):
# Force None to empty list
if covariance_axis is None:
covariance_axis = []
# Force a list from integer
if isinstance(covariance_axis, int):
covariance_axis = [covariance_axis]
# Force positive axis indices
covariance_axis = [axis + ndim if axis < 0 else axis
for axis in covariance_axis]
# Make a set of the axes
covariance_axis = set(covariance_axis)
keys = [i+ndim if i in covariance_axis else i for i in range(ndim)]
keys += [i+2*ndim if i in covariance_axis else i for i in range(ndim)]
out_keys = sorted(list(set(keys)))
return np.einsum(C, [Ellipsis]+keys, [Ellipsis]+out_keys)
|
[
"def",
"covariance_to_variance",
"(",
"C",
",",
"ndim",
"=",
"1",
",",
"covariance_axis",
"=",
"None",
")",
":",
"# Force None to empty list",
"if",
"covariance_axis",
"is",
"None",
":",
"covariance_axis",
"=",
"[",
"]",
"# Force a list from integer",
"if",
"isinstance",
"(",
"covariance_axis",
",",
"int",
")",
":",
"covariance_axis",
"=",
"[",
"covariance_axis",
"]",
"# Force positive axis indices",
"covariance_axis",
"=",
"[",
"axis",
"+",
"ndim",
"if",
"axis",
"<",
"0",
"else",
"axis",
"for",
"axis",
"in",
"covariance_axis",
"]",
"# Make a set of the axes",
"covariance_axis",
"=",
"set",
"(",
"covariance_axis",
")",
"keys",
"=",
"[",
"i",
"+",
"ndim",
"if",
"i",
"in",
"covariance_axis",
"else",
"i",
"for",
"i",
"in",
"range",
"(",
"ndim",
")",
"]",
"keys",
"+=",
"[",
"i",
"+",
"2",
"*",
"ndim",
"if",
"i",
"in",
"covariance_axis",
"else",
"i",
"for",
"i",
"in",
"range",
"(",
"ndim",
")",
"]",
"out_keys",
"=",
"sorted",
"(",
"list",
"(",
"set",
"(",
"keys",
")",
")",
")",
"return",
"np",
".",
"einsum",
"(",
"C",
",",
"[",
"Ellipsis",
"]",
"+",
"keys",
",",
"[",
"Ellipsis",
"]",
"+",
"out_keys",
")"
] |
https://github.com/bayespy/bayespy/blob/0e6e6130c888a4295cc9421d61d4ad27b2960ebb/bayespy/inference/vmp/transformations.py#L336-L356
|
|||
astropy/astroquery
|
11c9c83fa8e5f948822f8f73c854ec4b72043016
|
astroquery/vo_conesearch/validator/validate.py
|
python
|
_html_subindex
|
(args)
|
HTML writer for multiprocessing support.
|
HTML writer for multiprocessing support.
|
[
"HTML",
"writer",
"for",
"multiprocessing",
"support",
"."
] |
def _html_subindex(args):
"""HTML writer for multiprocessing support."""
out_dir, subset, total = args
html.write_index_table(out_dir, *subset, total=total)
|
[
"def",
"_html_subindex",
"(",
"args",
")",
":",
"out_dir",
",",
"subset",
",",
"total",
"=",
"args",
"html",
".",
"write_index_table",
"(",
"out_dir",
",",
"*",
"subset",
",",
"total",
"=",
"total",
")"
] |
https://github.com/astropy/astroquery/blob/11c9c83fa8e5f948822f8f73c854ec4b72043016/astroquery/vo_conesearch/validator/validate.py#L365-L368
|
||
google-research/language
|
61fa7260ac7d690d11ef72ca863e45a37c0bdc80
|
language/labs/consistent_zero_shot_nmt/models/agreement.py
|
python
|
ag_gnmt_bahdanau_att_lm
|
()
|
return hparams
|
Hparams for LSTM with bahdanau attention.
|
Hparams for LSTM with bahdanau attention.
|
[
"Hparams",
"for",
"LSTM",
"with",
"bahdanau",
"attention",
"."
] |
def ag_gnmt_bahdanau_att_lm():
"""Hparams for LSTM with bahdanau attention."""
hparams = ag_gnmt_bahdanau_att()
hparams = base_lm(hparams)
return hparams
|
[
"def",
"ag_gnmt_bahdanau_att_lm",
"(",
")",
":",
"hparams",
"=",
"ag_gnmt_bahdanau_att",
"(",
")",
"hparams",
"=",
"base_lm",
"(",
"hparams",
")",
"return",
"hparams"
] |
https://github.com/google-research/language/blob/61fa7260ac7d690d11ef72ca863e45a37c0bdc80/language/labs/consistent_zero_shot_nmt/models/agreement.py#L646-L650
|
|
VirtueSecurity/aws-extender
|
d123b7e1a845847709ba3a481f11996bddc68a1c
|
BappModules/docutils/utils/math/math2html.py
|
python
|
TaggedText.constant
|
(self, text, tag, breaklines=False)
|
return self.complete([constant], tag, breaklines)
|
Complete the tagged text with a constant
|
Complete the tagged text with a constant
|
[
"Complete",
"the",
"tagged",
"text",
"with",
"a",
"constant"
] |
def constant(self, text, tag, breaklines=False):
"Complete the tagged text with a constant"
constant = Constant(text)
return self.complete([constant], tag, breaklines)
|
[
"def",
"constant",
"(",
"self",
",",
"text",
",",
"tag",
",",
"breaklines",
"=",
"False",
")",
":",
"constant",
"=",
"Constant",
"(",
"text",
")",
"return",
"self",
".",
"complete",
"(",
"[",
"constant",
"]",
",",
"tag",
",",
"breaklines",
")"
] |
https://github.com/VirtueSecurity/aws-extender/blob/d123b7e1a845847709ba3a481f11996bddc68a1c/BappModules/docutils/utils/math/math2html.py#L2423-L2426
|
|
securesystemslab/zippy
|
ff0e84ac99442c2c55fe1d285332cfd4e185e089
|
zippy/benchmarks/src/benchmarks/whoosh/src/whoosh/analysis/acore.py
|
python
|
Token.__repr__
|
(self)
|
return "%s(%s)" % (self.__class__.__name__, parms)
|
[] |
def __repr__(self):
parms = ", ".join("%s=%r" % (name, value)
for name, value in iteritems(self.__dict__))
return "%s(%s)" % (self.__class__.__name__, parms)
|
[
"def",
"__repr__",
"(",
"self",
")",
":",
"parms",
"=",
"\", \"",
".",
"join",
"(",
"\"%s=%r\"",
"%",
"(",
"name",
",",
"value",
")",
"for",
"name",
",",
"value",
"in",
"iteritems",
"(",
"self",
".",
"__dict__",
")",
")",
"return",
"\"%s(%s)\"",
"%",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"parms",
")"
] |
https://github.com/securesystemslab/zippy/blob/ff0e84ac99442c2c55fe1d285332cfd4e185e089/zippy/benchmarks/src/benchmarks/whoosh/src/whoosh/analysis/acore.py#L125-L128
|
|||
twilio/twilio-python
|
6e1e811ea57a1edfadd5161ace87397c563f6915
|
twilio/rest/api/v2010/account/incoming_phone_number/toll_free.py
|
python
|
TollFreeInstance.identity_sid
|
(self)
|
return self._properties['identity_sid']
|
:returns: The SID of the Identity resource associated with number
:rtype: unicode
|
:returns: The SID of the Identity resource associated with number
:rtype: unicode
|
[
":",
"returns",
":",
"The",
"SID",
"of",
"the",
"Identity",
"resource",
"associated",
"with",
"number",
":",
"rtype",
":",
"unicode"
] |
def identity_sid(self):
"""
:returns: The SID of the Identity resource associated with number
:rtype: unicode
"""
return self._properties['identity_sid']
|
[
"def",
"identity_sid",
"(",
"self",
")",
":",
"return",
"self",
".",
"_properties",
"[",
"'identity_sid'",
"]"
] |
https://github.com/twilio/twilio-python/blob/6e1e811ea57a1edfadd5161ace87397c563f6915/twilio/rest/api/v2010/account/incoming_phone_number/toll_free.py#L420-L425
|
|
TencentCloud/tencentcloud-sdk-python
|
3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2
|
tencentcloud/live/v20180801/live_client.py
|
python
|
LiveClient.DescribeUploadStreamNums
|
(self, request)
|
直播上行路数查询
:param request: Request instance for DescribeUploadStreamNums.
:type request: :class:`tencentcloud.live.v20180801.models.DescribeUploadStreamNumsRequest`
:rtype: :class:`tencentcloud.live.v20180801.models.DescribeUploadStreamNumsResponse`
|
直播上行路数查询
|
[
"直播上行路数查询"
] |
def DescribeUploadStreamNums(self, request):
"""直播上行路数查询
:param request: Request instance for DescribeUploadStreamNums.
:type request: :class:`tencentcloud.live.v20180801.models.DescribeUploadStreamNumsRequest`
:rtype: :class:`tencentcloud.live.v20180801.models.DescribeUploadStreamNumsResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeUploadStreamNums", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeUploadStreamNumsResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
|
[
"def",
"DescribeUploadStreamNums",
"(",
"self",
",",
"request",
")",
":",
"try",
":",
"params",
"=",
"request",
".",
"_serialize",
"(",
")",
"body",
"=",
"self",
".",
"call",
"(",
"\"DescribeUploadStreamNums\"",
",",
"params",
")",
"response",
"=",
"json",
".",
"loads",
"(",
"body",
")",
"if",
"\"Error\"",
"not",
"in",
"response",
"[",
"\"Response\"",
"]",
":",
"model",
"=",
"models",
".",
"DescribeUploadStreamNumsResponse",
"(",
")",
"model",
".",
"_deserialize",
"(",
"response",
"[",
"\"Response\"",
"]",
")",
"return",
"model",
"else",
":",
"code",
"=",
"response",
"[",
"\"Response\"",
"]",
"[",
"\"Error\"",
"]",
"[",
"\"Code\"",
"]",
"message",
"=",
"response",
"[",
"\"Response\"",
"]",
"[",
"\"Error\"",
"]",
"[",
"\"Message\"",
"]",
"reqid",
"=",
"response",
"[",
"\"Response\"",
"]",
"[",
"\"RequestId\"",
"]",
"raise",
"TencentCloudSDKException",
"(",
"code",
",",
"message",
",",
"reqid",
")",
"except",
"Exception",
"as",
"e",
":",
"if",
"isinstance",
"(",
"e",
",",
"TencentCloudSDKException",
")",
":",
"raise",
"else",
":",
"raise",
"TencentCloudSDKException",
"(",
"e",
".",
"message",
",",
"e",
".",
"message",
")"
] |
https://github.com/TencentCloud/tencentcloud-sdk-python/blob/3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2/tencentcloud/live/v20180801/live_client.py#L2693-L2718
|
||
angr/angr
|
4b04d56ace135018083d36d9083805be8146688b
|
angr/tablespecs.py
|
python
|
StringTableSpec.append_env
|
(self, env, add_null=True)
|
[] |
def append_env(self, env, add_null=True):
if isinstance(env, dict):
for k, v in env.items():
if type(k) is bytes:
k = claripy.BVV(k)
elif type(k) is str:
k = claripy.BVV(k.encode())
elif isinstance(k, claripy.ast.Bits):
pass
else:
raise TypeError("Key in env must be either string or bitvector")
if type(v) is bytes:
v = claripy.BVV(v)
elif type(v) is str:
v = claripy.BVV(v.encode())
elif isinstance(v, claripy.ast.Bits):
pass
else:
raise TypeError("Value in env must be either string or bitvector")
self.add_string(k.concat(claripy.BVV(b'='), v))
else:
for v in env:
self.add_string(v)
if add_null:
self.add_null()
|
[
"def",
"append_env",
"(",
"self",
",",
"env",
",",
"add_null",
"=",
"True",
")",
":",
"if",
"isinstance",
"(",
"env",
",",
"dict",
")",
":",
"for",
"k",
",",
"v",
"in",
"env",
".",
"items",
"(",
")",
":",
"if",
"type",
"(",
"k",
")",
"is",
"bytes",
":",
"k",
"=",
"claripy",
".",
"BVV",
"(",
"k",
")",
"elif",
"type",
"(",
"k",
")",
"is",
"str",
":",
"k",
"=",
"claripy",
".",
"BVV",
"(",
"k",
".",
"encode",
"(",
")",
")",
"elif",
"isinstance",
"(",
"k",
",",
"claripy",
".",
"ast",
".",
"Bits",
")",
":",
"pass",
"else",
":",
"raise",
"TypeError",
"(",
"\"Key in env must be either string or bitvector\"",
")",
"if",
"type",
"(",
"v",
")",
"is",
"bytes",
":",
"v",
"=",
"claripy",
".",
"BVV",
"(",
"v",
")",
"elif",
"type",
"(",
"v",
")",
"is",
"str",
":",
"v",
"=",
"claripy",
".",
"BVV",
"(",
"v",
".",
"encode",
"(",
")",
")",
"elif",
"isinstance",
"(",
"v",
",",
"claripy",
".",
"ast",
".",
"Bits",
")",
":",
"pass",
"else",
":",
"raise",
"TypeError",
"(",
"\"Value in env must be either string or bitvector\"",
")",
"self",
".",
"add_string",
"(",
"k",
".",
"concat",
"(",
"claripy",
".",
"BVV",
"(",
"b'='",
")",
",",
"v",
")",
")",
"else",
":",
"for",
"v",
"in",
"env",
":",
"self",
".",
"add_string",
"(",
"v",
")",
"if",
"add_null",
":",
"self",
".",
"add_null",
"(",
")"
] |
https://github.com/angr/angr/blob/4b04d56ace135018083d36d9083805be8146688b/angr/tablespecs.py#L15-L41
|
||||
FSecureLABS/needle
|
891b6601262020bb2df98f81f6c0ef2d97ddd82c
|
needle/core/framework/framework.py
|
python
|
Framework.do_info
|
(self, params)
|
Alias: info == show info.
|
Alias: info == show info.
|
[
"Alias",
":",
"info",
"==",
"show",
"info",
"."
] |
def do_info(self, params):
"""Alias: info == show info."""
if hasattr(self, 'show_info'):
self.show_info()
|
[
"def",
"do_info",
"(",
"self",
",",
"params",
")",
":",
"if",
"hasattr",
"(",
"self",
",",
"'show_info'",
")",
":",
"self",
".",
"show_info",
"(",
")"
] |
https://github.com/FSecureLABS/needle/blob/891b6601262020bb2df98f81f6c0ef2d97ddd82c/needle/core/framework/framework.py#L427-L430
|
||
missionpinball/mpf
|
8e6b74cff4ba06d2fec9445742559c1068b88582
|
mpf/devices/logic_blocks.py
|
python
|
Sequence.setup_event_handlers
|
(self)
|
Add the handlers for the current step.
|
Add the handlers for the current step.
|
[
"Add",
"the",
"handlers",
"for",
"the",
"current",
"step",
"."
] |
def setup_event_handlers(self):
"""Add the handlers for the current step."""
for step, events in enumerate(self.config['events']):
for event in Util.string_to_event_list(events):
# increase priority with steps to prevent advancing multiple steps at once
self.machine.events.add_handler(event, self.hit, step=step, priority=step)
|
[
"def",
"setup_event_handlers",
"(",
"self",
")",
":",
"for",
"step",
",",
"events",
"in",
"enumerate",
"(",
"self",
".",
"config",
"[",
"'events'",
"]",
")",
":",
"for",
"event",
"in",
"Util",
".",
"string_to_event_list",
"(",
"events",
")",
":",
"# increase priority with steps to prevent advancing multiple steps at once",
"self",
".",
"machine",
".",
"events",
".",
"add_handler",
"(",
"event",
",",
"self",
".",
"hit",
",",
"step",
"=",
"step",
",",
"priority",
"=",
"step",
")"
] |
https://github.com/missionpinball/mpf/blob/8e6b74cff4ba06d2fec9445742559c1068b88582/mpf/devices/logic_blocks.py#L643-L648
|
||
indico/indico
|
1579ea16235bbe5f22a308b79c5902c85374721f
|
indico/modules/categories/models/categories.py
|
python
|
Category.can_create_events
|
(self, user)
|
return user and ((self.event_creation_mode == EventCreationMode.open and self.can_access(user)) or
self.can_manage(user, permission='create'))
|
Check whether the user can create events in the category.
|
Check whether the user can create events in the category.
|
[
"Check",
"whether",
"the",
"user",
"can",
"create",
"events",
"in",
"the",
"category",
"."
] |
def can_create_events(self, user):
"""Check whether the user can create events in the category."""
# if creation is not restricted anyone who can access the category
# can also create events in it, otherwise only people with the
# creation role can
return user and ((self.event_creation_mode == EventCreationMode.open and self.can_access(user)) or
self.can_manage(user, permission='create'))
|
[
"def",
"can_create_events",
"(",
"self",
",",
"user",
")",
":",
"# if creation is not restricted anyone who can access the category",
"# can also create events in it, otherwise only people with the",
"# creation role can",
"return",
"user",
"and",
"(",
"(",
"self",
".",
"event_creation_mode",
"==",
"EventCreationMode",
".",
"open",
"and",
"self",
".",
"can_access",
"(",
"user",
")",
")",
"or",
"self",
".",
"can_manage",
"(",
"user",
",",
"permission",
"=",
"'create'",
")",
")"
] |
https://github.com/indico/indico/blob/1579ea16235bbe5f22a308b79c5902c85374721f/indico/modules/categories/models/categories.py#L338-L344
|
|
ytisf/theZoo
|
385eb68a35770991f34fed58f20b231e5e7a5fef
|
imports/globals.py
|
python
|
init.init
|
(self)
|
[] |
def init(self):
# Global Variables
version = "0.6.0 Moat"
appname = "theZoo"
codename = "Moat"
authors = "Yuval Nativ, Lahad Ludar, 5fingers"
maintainers = [ "Shahak Shalev", "Yuval Nativ" ]
github_add = "https://www.github.com/ytisf/theZoo"
licensev = "GPL v3.0"
fulllicense = appname + " Copyright (C) 2016 " + authors + "\n"
fulllicense += "This program comes with ABSOLUTELY NO WARRANTY; for details type '" + \
sys.argv[0] + " -w'.\n"
fulllicense += "This is free software, and you are welcome to redistribute it."
usage = '\nUsage: ' + sys.argv[0] + \
' -s search_query -t trojan -p vb\n\n'
usage += 'The search engine can search by regular search or using specified arguments:\n\nOPTIONS:\n -h --help\t\tShow this message\n -t --type\t\tMalware type, can be virus/trojan/botnet/spyware/ransomeware.\n -p --language\tProgramming language, can be c/cpp/vb/asm/bin/java.\n -u --update\t\tUpdate malware index. Rebuilds main CSV file. \n -s --search\t\tSearch query for name or anything. \n -v --version\tPrint the version information.\n -w\t\t\tPrint GNU license.\n'
conf_folder = 'conf'
eula_file = conf_folder + '/eula_run.conf'
maldb_ver_file = conf_folder + '/db.ver'
giturl = 'https://github.com/ytisf/theZoo/blob/master'
|
[
"def",
"init",
"(",
"self",
")",
":",
"# Global Variables",
"version",
"=",
"\"0.6.0 Moat\"",
"appname",
"=",
"\"theZoo\"",
"codename",
"=",
"\"Moat\"",
"authors",
"=",
"\"Yuval Nativ, Lahad Ludar, 5fingers\"",
"maintainers",
"=",
"[",
"\"Shahak Shalev\"",
",",
"\"Yuval Nativ\"",
"]",
"github_add",
"=",
"\"https://www.github.com/ytisf/theZoo\"",
"licensev",
"=",
"\"GPL v3.0\"",
"fulllicense",
"=",
"appname",
"+",
"\" Copyright (C) 2016 \"",
"+",
"authors",
"+",
"\"\\n\"",
"fulllicense",
"+=",
"\"This program comes with ABSOLUTELY NO WARRANTY; for details type '\"",
"+",
"sys",
".",
"argv",
"[",
"0",
"]",
"+",
"\" -w'.\\n\"",
"fulllicense",
"+=",
"\"This is free software, and you are welcome to redistribute it.\"",
"usage",
"=",
"'\\nUsage: '",
"+",
"sys",
".",
"argv",
"[",
"0",
"]",
"+",
"' -s search_query -t trojan -p vb\\n\\n'",
"usage",
"+=",
"'The search engine can search by regular search or using specified arguments:\\n\\nOPTIONS:\\n -h --help\\t\\tShow this message\\n -t --type\\t\\tMalware type, can be virus/trojan/botnet/spyware/ransomeware.\\n -p --language\\tProgramming language, can be c/cpp/vb/asm/bin/java.\\n -u --update\\t\\tUpdate malware index. Rebuilds main CSV file. \\n -s --search\\t\\tSearch query for name or anything. \\n -v --version\\tPrint the version information.\\n -w\\t\\t\\tPrint GNU license.\\n'",
"conf_folder",
"=",
"'conf'",
"eula_file",
"=",
"conf_folder",
"+",
"'/eula_run.conf'",
"maldb_ver_file",
"=",
"conf_folder",
"+",
"'/db.ver'",
"giturl",
"=",
"'https://github.com/ytisf/theZoo/blob/master'"
] |
https://github.com/ytisf/theZoo/blob/385eb68a35770991f34fed58f20b231e5e7a5fef/imports/globals.py#L24-L45
|
||||
kylebebak/Requester
|
4a9f9f051fa5fc951a8f7ad098a328261ca2db97
|
deps/graphql/parser.py
|
python
|
GraphQLParser.p_object_field_list_single
|
(self, p)
|
object_field_list : object_field
|
object_field_list : object_field
|
[
"object_field_list",
":",
"object_field"
] |
def p_object_field_list_single(self, p):
"""
object_field_list : object_field
"""
p[0] = p[1]
|
[
"def",
"p_object_field_list_single",
"(",
"self",
",",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"p",
"[",
"1",
"]"
] |
https://github.com/kylebebak/Requester/blob/4a9f9f051fa5fc951a8f7ad098a328261ca2db97/deps/graphql/parser.py#L568-L572
|
||
CERTCC/tapioca
|
2370e1724bfb75c0a2be0cc7e776f870f9d6a6ed
|
tapioca.py
|
python
|
Example.updateStatus
|
(self, test)
|
Main app module Status update.
This hooks into the self.worker.signalStatus event
|
Main app module Status update.
This hooks into the self.worker.signalStatus event
|
[
"Main",
"app",
"module",
"Status",
"update",
".",
"This",
"hooks",
"into",
"the",
"self",
".",
"worker",
".",
"signalStatus",
"event"
] |
def updateStatus(self, test):
'''
Main app module Status update.
This hooks into the self.worker.signalStatus event
'''
#print('*** Main updateStatus : %s ***' % test)
test = str(test)
if test.endswith('COMPLETE') or test.endswith('ERROR'):
if test.startswith('search '):
# Search results needs to pass data from worker object to GUI
# object
#print('Setting search result values in GUI object')
self.gui.searchfound = self.worker.searchfound
self.gui.foundunenc = self.worker.foundunenc
self.gui.foundunprot = self.worker.foundunprot
self.gui.foundprot = self.worker.foundprot
self.gui.updatesearchresults()
else:
# This is something handled only by the GUI part
pass
else:
# We need a prompt
prompt_msg = test
#print('We need a prompt!')
reply = QMessageBox.question(self.gui, 'Tapioca',
prompt_msg, QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.Yes:
pass
else:
pass
|
[
"def",
"updateStatus",
"(",
"self",
",",
"test",
")",
":",
"#print('*** Main updateStatus : %s ***' % test)",
"test",
"=",
"str",
"(",
"test",
")",
"if",
"test",
".",
"endswith",
"(",
"'COMPLETE'",
")",
"or",
"test",
".",
"endswith",
"(",
"'ERROR'",
")",
":",
"if",
"test",
".",
"startswith",
"(",
"'search '",
")",
":",
"# Search results needs to pass data from worker object to GUI",
"# object",
"#print('Setting search result values in GUI object')",
"self",
".",
"gui",
".",
"searchfound",
"=",
"self",
".",
"worker",
".",
"searchfound",
"self",
".",
"gui",
".",
"foundunenc",
"=",
"self",
".",
"worker",
".",
"foundunenc",
"self",
".",
"gui",
".",
"foundunprot",
"=",
"self",
".",
"worker",
".",
"foundunprot",
"self",
".",
"gui",
".",
"foundprot",
"=",
"self",
".",
"worker",
".",
"foundprot",
"self",
".",
"gui",
".",
"updatesearchresults",
"(",
")",
"else",
":",
"# This is something handled only by the GUI part",
"pass",
"else",
":",
"# We need a prompt",
"prompt_msg",
"=",
"test",
"#print('We need a prompt!')",
"reply",
"=",
"QMessageBox",
".",
"question",
"(",
"self",
".",
"gui",
",",
"'Tapioca'",
",",
"prompt_msg",
",",
"QMessageBox",
".",
"Yes",
",",
"QMessageBox",
".",
"No",
")",
"if",
"reply",
"==",
"QMessageBox",
".",
"Yes",
":",
"pass",
"else",
":",
"pass"
] |
https://github.com/CERTCC/tapioca/blob/2370e1724bfb75c0a2be0cc7e776f870f9d6a6ed/tapioca.py#L249-L278
|
||
pulp/pulp
|
a0a28d804f997b6f81c391378aff2e4c90183df9
|
server/pulp/plugins/conduits/cataloger.py
|
python
|
CatalogerConduit.add_entry
|
(self, type_id, unit_key, url)
|
Add an entry to the content catalog.
:param type_id: The content unit type ID.
:type type_id: str
:param unit_key: The content unit key.
:type unit_key: dict
:param url: The URL used to download content associated with the unit.
:type url: str
|
Add an entry to the content catalog.
:param type_id: The content unit type ID.
:type type_id: str
:param unit_key: The content unit key.
:type unit_key: dict
:param url: The URL used to download content associated with the unit.
:type url: str
|
[
"Add",
"an",
"entry",
"to",
"the",
"content",
"catalog",
".",
":",
"param",
"type_id",
":",
"The",
"content",
"unit",
"type",
"ID",
".",
":",
"type",
"type_id",
":",
"str",
":",
"param",
"unit_key",
":",
"The",
"content",
"unit",
"key",
".",
":",
"type",
"unit_key",
":",
"dict",
":",
"param",
"url",
":",
"The",
"URL",
"used",
"to",
"download",
"content",
"associated",
"with",
"the",
"unit",
".",
":",
"type",
"url",
":",
"str"
] |
def add_entry(self, type_id, unit_key, url):
"""
Add an entry to the content catalog.
:param type_id: The content unit type ID.
:type type_id: str
:param unit_key: The content unit key.
:type unit_key: dict
:param url: The URL used to download content associated with the unit.
:type url: str
"""
manager = managers.content_catalog_manager()
manager.add_entry(self.source_id, self.expires, type_id, unit_key, url)
self.added_count += 1
|
[
"def",
"add_entry",
"(",
"self",
",",
"type_id",
",",
"unit_key",
",",
"url",
")",
":",
"manager",
"=",
"managers",
".",
"content_catalog_manager",
"(",
")",
"manager",
".",
"add_entry",
"(",
"self",
".",
"source_id",
",",
"self",
".",
"expires",
",",
"type_id",
",",
"unit_key",
",",
"url",
")",
"self",
".",
"added_count",
"+=",
"1"
] |
https://github.com/pulp/pulp/blob/a0a28d804f997b6f81c391378aff2e4c90183df9/server/pulp/plugins/conduits/cataloger.py#L22-L34
|
||
securesystemslab/zippy
|
ff0e84ac99442c2c55fe1d285332cfd4e185e089
|
zippy/lib-python/3/idlelib/ZoomHeight.py
|
python
|
zoom_height
|
(top)
|
[] |
def zoom_height(top):
geom = top.wm_geometry()
m = re.match(r"(\d+)x(\d+)\+(-?\d+)\+(-?\d+)", geom)
if not m:
top.bell()
return
width, height, x, y = map(int, m.groups())
newheight = top.winfo_screenheight()
if sys.platform == 'win32':
newy = 0
newheight = newheight - 72
elif macosxSupport.runningAsOSXApp():
# The '88' below is a magic number that avoids placing the bottom
# of the window below the panel on my machine. I don't know how
# to calculate the correct value for this with tkinter.
newy = 22
newheight = newheight - newy - 88
else:
#newy = 24
newy = 0
#newheight = newheight - 96
newheight = newheight - 88
if height >= newheight:
newgeom = ""
else:
newgeom = "%dx%d+%d+%d" % (width, newheight, x, newy)
top.wm_geometry(newgeom)
|
[
"def",
"zoom_height",
"(",
"top",
")",
":",
"geom",
"=",
"top",
".",
"wm_geometry",
"(",
")",
"m",
"=",
"re",
".",
"match",
"(",
"r\"(\\d+)x(\\d+)\\+(-?\\d+)\\+(-?\\d+)\"",
",",
"geom",
")",
"if",
"not",
"m",
":",
"top",
".",
"bell",
"(",
")",
"return",
"width",
",",
"height",
",",
"x",
",",
"y",
"=",
"map",
"(",
"int",
",",
"m",
".",
"groups",
"(",
")",
")",
"newheight",
"=",
"top",
".",
"winfo_screenheight",
"(",
")",
"if",
"sys",
".",
"platform",
"==",
"'win32'",
":",
"newy",
"=",
"0",
"newheight",
"=",
"newheight",
"-",
"72",
"elif",
"macosxSupport",
".",
"runningAsOSXApp",
"(",
")",
":",
"# The '88' below is a magic number that avoids placing the bottom",
"# of the window below the panel on my machine. I don't know how",
"# to calculate the correct value for this with tkinter.",
"newy",
"=",
"22",
"newheight",
"=",
"newheight",
"-",
"newy",
"-",
"88",
"else",
":",
"#newy = 24",
"newy",
"=",
"0",
"#newheight = newheight - 96",
"newheight",
"=",
"newheight",
"-",
"88",
"if",
"height",
">=",
"newheight",
":",
"newgeom",
"=",
"\"\"",
"else",
":",
"newgeom",
"=",
"\"%dx%d+%d+%d\"",
"%",
"(",
"width",
",",
"newheight",
",",
"x",
",",
"newy",
")",
"top",
".",
"wm_geometry",
"(",
"newgeom",
")"
] |
https://github.com/securesystemslab/zippy/blob/ff0e84ac99442c2c55fe1d285332cfd4e185e089/zippy/lib-python/3/idlelib/ZoomHeight.py#L23-L51
|
||||
uqfoundation/mystic
|
154e6302d1f2f94e8f13e88ecc5f24241cc28ac7
|
cache/archive.py
|
python
|
read_func
|
(name, keymap=None, type=None, n=0)
|
return archive.get(entry, None)
|
read stored function from db with name 'name'
Args:
name (string): filename of the klepto db
keymap (klepto.keymap): keymap used for key encoding
type (klepto.archive): type of klepto archive
n (int): db entry in reverse order (i.e. most recent is ``0``)
Returns:
tuple of (stored function, distance information)
Notes:
If the db is empty, or ``n`` produces a bad index, returns ``None``.
Alternately, ``name`` can be the relevant klepto.archive instance.
|
read stored function from db with name 'name'
|
[
"read",
"stored",
"function",
"from",
"db",
"with",
"name",
"name"
] |
def read_func(name, keymap=None, type=None, n=0):
"""read stored function from db with name 'name'
Args:
name (string): filename of the klepto db
keymap (klepto.keymap): keymap used for key encoding
type (klepto.archive): type of klepto archive
n (int): db entry in reverse order (i.e. most recent is ``0``)
Returns:
tuple of (stored function, distance information)
Notes:
If the db is empty, or ``n`` produces a bad index, returns ``None``.
Alternately, ``name`` can be the relevant klepto.archive instance.
"""
if not isinstance(name, (str, (u'').__class__)):
if type is not None:
#msg = 'if a klepto.archive instance is provided, type must be None'
#raise TypeError(msg)
type = None #NOTE: ignore type
archive = getattr(name, '__archive__', name) # need cached == False
else:
archive = _read_func(name, type=type) # read entire archive to get size
size = len(archive) - n - 1
entry = size if keymap is None else keymap(size)
return archive.get(entry, None)
|
[
"def",
"read_func",
"(",
"name",
",",
"keymap",
"=",
"None",
",",
"type",
"=",
"None",
",",
"n",
"=",
"0",
")",
":",
"if",
"not",
"isinstance",
"(",
"name",
",",
"(",
"str",
",",
"(",
"u''",
")",
".",
"__class__",
")",
")",
":",
"if",
"type",
"is",
"not",
"None",
":",
"#msg = 'if a klepto.archive instance is provided, type must be None'",
"#raise TypeError(msg)",
"type",
"=",
"None",
"#NOTE: ignore type",
"archive",
"=",
"getattr",
"(",
"name",
",",
"'__archive__'",
",",
"name",
")",
"# need cached == False",
"else",
":",
"archive",
"=",
"_read_func",
"(",
"name",
",",
"type",
"=",
"type",
")",
"# read entire archive to get size",
"size",
"=",
"len",
"(",
"archive",
")",
"-",
"n",
"-",
"1",
"entry",
"=",
"size",
"if",
"keymap",
"is",
"None",
"else",
"keymap",
"(",
"size",
")",
"return",
"archive",
".",
"get",
"(",
"entry",
",",
"None",
")"
] |
https://github.com/uqfoundation/mystic/blob/154e6302d1f2f94e8f13e88ecc5f24241cc28ac7/cache/archive.py#L91-L117
|
|
xonsh/xonsh
|
b76d6f994f22a4078f602f8b386f4ec280c8461f
|
xonsh/completers/init.py
|
python
|
default_completers
|
()
|
return collections.OrderedDict(
[
# non-exclusive completers:
("end_proc_tokens", complete_end_proc_tokens),
("end_proc_keywords", complete_end_proc_keywords),
("environment_vars", complete_environment_vars),
# exclusive completers:
("base", complete_base),
("skip", complete_skipper),
("alias", complete_aliases),
("xompleter", complete_xompletions),
("import", complete_import),
("bash", complete_from_bash),
("man", complete_from_man),
("python", complete_python),
("path", complete_path),
]
)
|
Creates a copy of the default completers.
|
Creates a copy of the default completers.
|
[
"Creates",
"a",
"copy",
"of",
"the",
"default",
"completers",
"."
] |
def default_completers():
"""Creates a copy of the default completers."""
return collections.OrderedDict(
[
# non-exclusive completers:
("end_proc_tokens", complete_end_proc_tokens),
("end_proc_keywords", complete_end_proc_keywords),
("environment_vars", complete_environment_vars),
# exclusive completers:
("base", complete_base),
("skip", complete_skipper),
("alias", complete_aliases),
("xompleter", complete_xompletions),
("import", complete_import),
("bash", complete_from_bash),
("man", complete_from_man),
("python", complete_python),
("path", complete_path),
]
)
|
[
"def",
"default_completers",
"(",
")",
":",
"return",
"collections",
".",
"OrderedDict",
"(",
"[",
"# non-exclusive completers:",
"(",
"\"end_proc_tokens\"",
",",
"complete_end_proc_tokens",
")",
",",
"(",
"\"end_proc_keywords\"",
",",
"complete_end_proc_keywords",
")",
",",
"(",
"\"environment_vars\"",
",",
"complete_environment_vars",
")",
",",
"# exclusive completers:",
"(",
"\"base\"",
",",
"complete_base",
")",
",",
"(",
"\"skip\"",
",",
"complete_skipper",
")",
",",
"(",
"\"alias\"",
",",
"complete_aliases",
")",
",",
"(",
"\"xompleter\"",
",",
"complete_xompletions",
")",
",",
"(",
"\"import\"",
",",
"complete_import",
")",
",",
"(",
"\"bash\"",
",",
"complete_from_bash",
")",
",",
"(",
"\"man\"",
",",
"complete_from_man",
")",
",",
"(",
"\"python\"",
",",
"complete_python",
")",
",",
"(",
"\"path\"",
",",
"complete_path",
")",
",",
"]",
")"
] |
https://github.com/xonsh/xonsh/blob/b76d6f994f22a4078f602f8b386f4ec280c8461f/xonsh/completers/init.py#L22-L41
|
|
openhatch/oh-mainline
|
ce29352a034e1223141dcc2f317030bbc3359a51
|
vendor/packages/requests/requests/models.py
|
python
|
Response.content
|
(self)
|
return self._content
|
Content of the response, in bytes.
|
Content of the response, in bytes.
|
[
"Content",
"of",
"the",
"response",
"in",
"bytes",
"."
] |
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
try:
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code == 0:
self._content = None
else:
self._content = bytes().join(self.iter_content(CONTENT_CHUNK_SIZE)) or bytes()
except AttributeError:
self._content = None
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
|
[
"def",
"content",
"(",
"self",
")",
":",
"if",
"self",
".",
"_content",
"is",
"False",
":",
"# Read the contents.",
"try",
":",
"if",
"self",
".",
"_content_consumed",
":",
"raise",
"RuntimeError",
"(",
"'The content for this response was already consumed'",
")",
"if",
"self",
".",
"status_code",
"==",
"0",
":",
"self",
".",
"_content",
"=",
"None",
"else",
":",
"self",
".",
"_content",
"=",
"bytes",
"(",
")",
".",
"join",
"(",
"self",
".",
"iter_content",
"(",
"CONTENT_CHUNK_SIZE",
")",
")",
"or",
"bytes",
"(",
")",
"except",
"AttributeError",
":",
"self",
".",
"_content",
"=",
"None",
"self",
".",
"_content_consumed",
"=",
"True",
"# don't need to release the connection; that's been handled by urllib3",
"# since we exhausted the data.",
"return",
"self",
".",
"_content"
] |
https://github.com/openhatch/oh-mainline/blob/ce29352a034e1223141dcc2f317030bbc3359a51/vendor/packages/requests/requests/models.py#L737-L758
|
|
pabigot/pyxb
|
14737c23a125fd12c954823ad64fc4497816fae3
|
pyxb/namespace/builtin.py
|
python
|
_InitializeBuiltinNamespaces
|
(structures_module)
|
Invoked at the end of the L{pyxb.xmlschema.structures} module to
initialize the component models of the built-in namespaces.
@param structures_module: The L{pyxb.xmlschema.structures} module may not
be importable by that name at the time this is invoked (because it is
still being processed), so it gets passed in as a parameter.
|
Invoked at the end of the L{pyxb.xmlschema.structures} module to
initialize the component models of the built-in namespaces.
|
[
"Invoked",
"at",
"the",
"end",
"of",
"the",
"L",
"{",
"pyxb",
".",
"xmlschema",
".",
"structures",
"}",
"module",
"to",
"initialize",
"the",
"component",
"models",
"of",
"the",
"built",
"-",
"in",
"namespaces",
"."
] |
def _InitializeBuiltinNamespaces (structures_module):
"""Invoked at the end of the L{pyxb.xmlschema.structures} module to
initialize the component models of the built-in namespaces.
@param structures_module: The L{pyxb.xmlschema.structures} module may not
be importable by that name at the time this is invoked (because it is
still being processed), so it gets passed in as a parameter."""
global __InitializedBuiltinNamespaces
if not __InitializedBuiltinNamespaces:
__InitializedBuiltinNamespaces = True
[ _ns._defineBuiltins(structures_module) for _ns in BuiltInNamespaces ]
|
[
"def",
"_InitializeBuiltinNamespaces",
"(",
"structures_module",
")",
":",
"global",
"__InitializedBuiltinNamespaces",
"if",
"not",
"__InitializedBuiltinNamespaces",
":",
"__InitializedBuiltinNamespaces",
"=",
"True",
"[",
"_ns",
".",
"_defineBuiltins",
"(",
"structures_module",
")",
"for",
"_ns",
"in",
"BuiltInNamespaces",
"]"
] |
https://github.com/pabigot/pyxb/blob/14737c23a125fd12c954823ad64fc4497816fae3/pyxb/namespace/builtin.py#L289-L299
|
||
MenglinLu/Chinese-clinical-NER
|
9614593ee2e1ba38d0985c44e957d316e178b93c
|
bert_sklearn/bert_sklearn/sklearn.py
|
python
|
BertTokenClassifier.tag_text
|
(self, text, verbose=True)
|
return tags
|
Tokenize text and print most probable token tags.
|
Tokenize text and print most probable token tags.
|
[
"Tokenize",
"text",
"and",
"print",
"most",
"probable",
"token",
"tags",
"."
] |
def tag_text(self, text, verbose=True):
"""
Tokenize text and print most probable token tags.
"""
tokens = self.basic_tokenizer.tokenize(text)
tags = self.predict(np.array([tokens]))[0]
if verbose:
data = {"token": tokens, "predicted tags": tags}
df = pd.DataFrame(data=data)
print(df)
return tags
|
[
"def",
"tag_text",
"(",
"self",
",",
"text",
",",
"verbose",
"=",
"True",
")",
":",
"tokens",
"=",
"self",
".",
"basic_tokenizer",
".",
"tokenize",
"(",
"text",
")",
"tags",
"=",
"self",
".",
"predict",
"(",
"np",
".",
"array",
"(",
"[",
"tokens",
"]",
")",
")",
"[",
"0",
"]",
"if",
"verbose",
":",
"data",
"=",
"{",
"\"token\"",
":",
"tokens",
",",
"\"predicted tags\"",
":",
"tags",
"}",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"data",
"=",
"data",
")",
"print",
"(",
"df",
")",
"return",
"tags"
] |
https://github.com/MenglinLu/Chinese-clinical-NER/blob/9614593ee2e1ba38d0985c44e957d316e178b93c/bert_sklearn/bert_sklearn/sklearn.py#L726-L736
|
|
idanr1986/cuckoo-droid
|
1350274639473d3d2b0ac740cae133ca53ab7444
|
analyzer/android_on_linux/lib/api/androguard/analysis.py
|
python
|
is_native_code
|
(dx)
|
return False
|
Native code is present ?
:param dx : the analysis virtual machine
:type dx: a :class:`VMAnalysis` object
:rtype: boolean
|
Native code is present ?
:param dx : the analysis virtual machine
:type dx: a :class:`VMAnalysis` object
:rtype: boolean
|
[
"Native",
"code",
"is",
"present",
"?",
":",
"param",
"dx",
":",
"the",
"analysis",
"virtual",
"machine",
":",
"type",
"dx",
":",
"a",
":",
"class",
":",
"VMAnalysis",
"object",
":",
"rtype",
":",
"boolean"
] |
def is_native_code(dx) :
"""
Native code is present ?
:param dx : the analysis virtual machine
:type dx: a :class:`VMAnalysis` object
:rtype: boolean
"""
paths = dx.get_tainted_packages().search_methods( "Ljava/lang/System;", "loadLibrary", ".")
if paths != [] :
return True
return False
|
[
"def",
"is_native_code",
"(",
"dx",
")",
":",
"paths",
"=",
"dx",
".",
"get_tainted_packages",
"(",
")",
".",
"search_methods",
"(",
"\"Ljava/lang/System;\"",
",",
"\"loadLibrary\"",
",",
"\".\"",
")",
"if",
"paths",
"!=",
"[",
"]",
":",
"return",
"True",
"return",
"False"
] |
https://github.com/idanr1986/cuckoo-droid/blob/1350274639473d3d2b0ac740cae133ca53ab7444/analyzer/android_on_linux/lib/api/androguard/analysis.py#L1680-L1691
|
|
twilio/twilio-python
|
6e1e811ea57a1edfadd5161ace87397c563f6915
|
twilio/rest/verify/v2/service/entity/challenge/__init__.py
|
python
|
ChallengeInstance.update
|
(self, auth_payload=values.unset)
|
return self._proxy.update(auth_payload=auth_payload, )
|
Update the ChallengeInstance
:param unicode auth_payload: Optional payload to verify the Challenge
:returns: The updated ChallengeInstance
:rtype: twilio.rest.verify.v2.service.entity.challenge.ChallengeInstance
|
Update the ChallengeInstance
|
[
"Update",
"the",
"ChallengeInstance"
] |
def update(self, auth_payload=values.unset):
"""
Update the ChallengeInstance
:param unicode auth_payload: Optional payload to verify the Challenge
:returns: The updated ChallengeInstance
:rtype: twilio.rest.verify.v2.service.entity.challenge.ChallengeInstance
"""
return self._proxy.update(auth_payload=auth_payload, )
|
[
"def",
"update",
"(",
"self",
",",
"auth_payload",
"=",
"values",
".",
"unset",
")",
":",
"return",
"self",
".",
"_proxy",
".",
"update",
"(",
"auth_payload",
"=",
"auth_payload",
",",
")"
] |
https://github.com/twilio/twilio-python/blob/6e1e811ea57a1edfadd5161ace87397c563f6915/twilio/rest/verify/v2/service/entity/challenge/__init__.py#L580-L589
|
|
zhl2008/awd-platform
|
0416b31abea29743387b10b3914581fbe8e7da5e
|
web_hxb2/lib/python3.5/site-packages/pip/_vendor/pyparsing.py
|
python
|
_escapeRegexRangeChars
|
(s)
|
return _ustr(s)
|
[] |
def _escapeRegexRangeChars(s):
#~ escape these chars: ^-]
for c in r"\^-]":
s = s.replace(c,_bslash+c)
s = s.replace("\n",r"\n")
s = s.replace("\t",r"\t")
return _ustr(s)
|
[
"def",
"_escapeRegexRangeChars",
"(",
"s",
")",
":",
"#~ escape these chars: ^-]",
"for",
"c",
"in",
"r\"\\^-]\"",
":",
"s",
"=",
"s",
".",
"replace",
"(",
"c",
",",
"_bslash",
"+",
"c",
")",
"s",
"=",
"s",
".",
"replace",
"(",
"\"\\n\"",
",",
"r\"\\n\"",
")",
"s",
"=",
"s",
".",
"replace",
"(",
"\"\\t\"",
",",
"r\"\\t\"",
")",
"return",
"_ustr",
"(",
"s",
")"
] |
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_hxb2/lib/python3.5/site-packages/pip/_vendor/pyparsing.py#L4524-L4530
|
|||
IronLanguages/ironpython2
|
51fdedeeda15727717fb8268a805f71b06c0b9f1
|
Src/StdLib/Lib/ssl.py
|
python
|
SSLSocket.get_channel_binding
|
(self, cb_type="tls-unique")
|
return self._sslobj.tls_unique_cb()
|
Get channel binding data for current connection. Raise ValueError
if the requested `cb_type` is not supported. Return bytes of the data
or None if the data is not available (e.g. before the handshake).
|
Get channel binding data for current connection. Raise ValueError
if the requested `cb_type` is not supported. Return bytes of the data
or None if the data is not available (e.g. before the handshake).
|
[
"Get",
"channel",
"binding",
"data",
"for",
"current",
"connection",
".",
"Raise",
"ValueError",
"if",
"the",
"requested",
"cb_type",
"is",
"not",
"supported",
".",
"Return",
"bytes",
"of",
"the",
"data",
"or",
"None",
"if",
"the",
"data",
"is",
"not",
"available",
"(",
"e",
".",
"g",
".",
"before",
"the",
"handshake",
")",
"."
] |
def get_channel_binding(self, cb_type="tls-unique"):
"""Get channel binding data for current connection. Raise ValueError
if the requested `cb_type` is not supported. Return bytes of the data
or None if the data is not available (e.g. before the handshake).
"""
if cb_type not in CHANNEL_BINDING_TYPES:
raise ValueError("Unsupported channel binding type")
if cb_type != "tls-unique":
raise NotImplementedError(
"{0} channel binding type not implemented"
.format(cb_type))
if self._sslobj is None:
return None
return self._sslobj.tls_unique_cb()
|
[
"def",
"get_channel_binding",
"(",
"self",
",",
"cb_type",
"=",
"\"tls-unique\"",
")",
":",
"if",
"cb_type",
"not",
"in",
"CHANNEL_BINDING_TYPES",
":",
"raise",
"ValueError",
"(",
"\"Unsupported channel binding type\"",
")",
"if",
"cb_type",
"!=",
"\"tls-unique\"",
":",
"raise",
"NotImplementedError",
"(",
"\"{0} channel binding type not implemented\"",
".",
"format",
"(",
"cb_type",
")",
")",
"if",
"self",
".",
"_sslobj",
"is",
"None",
":",
"return",
"None",
"return",
"self",
".",
"_sslobj",
".",
"tls_unique_cb",
"(",
")"
] |
https://github.com/IronLanguages/ironpython2/blob/51fdedeeda15727717fb8268a805f71b06c0b9f1/Src/StdLib/Lib/ssl.py#L894-L907
|
|
NVIDIA/DeepLearningExamples
|
589604d49e016cd9ef4525f7abcc9c7b826cfc5e
|
TensorFlow/Segmentation/UNet_3D_Medical/dataset/transforms.py
|
python
|
OneHotLabels.__call__
|
(self, samples, labels, mean, stdev)
|
return samples, tf.one_hot(labels, self._n_classes)
|
Run op
:param samples: Sample arrays (unused)
:param labels: Label arrays
:param mean: Mean (unused)
:param stdev: Std (unused)
:return: One hot encoded labels
|
Run op
|
[
"Run",
"op"
] |
def __call__(self, samples, labels, mean, stdev):
""" Run op
:param samples: Sample arrays (unused)
:param labels: Label arrays
:param mean: Mean (unused)
:param stdev: Std (unused)
:return: One hot encoded labels
"""
return samples, tf.one_hot(labels, self._n_classes)
|
[
"def",
"__call__",
"(",
"self",
",",
"samples",
",",
"labels",
",",
"mean",
",",
"stdev",
")",
":",
"return",
"samples",
",",
"tf",
".",
"one_hot",
"(",
"labels",
",",
"self",
".",
"_n_classes",
")"
] |
https://github.com/NVIDIA/DeepLearningExamples/blob/589604d49e016cd9ef4525f7abcc9c7b826cfc5e/TensorFlow/Segmentation/UNet_3D_Medical/dataset/transforms.py#L282-L291
|
|
andresriancho/w3af
|
cd22e5252243a87aaa6d0ddea47cf58dacfe00a9
|
w3af/plugins/attack/db/sqlmap/lib/utils/api.py
|
python
|
security_headers
|
(json_header=True)
|
Set some headers across all HTTP responses
|
Set some headers across all HTTP responses
|
[
"Set",
"some",
"headers",
"across",
"all",
"HTTP",
"responses"
] |
def security_headers(json_header=True):
"""
Set some headers across all HTTP responses
"""
response.headers["Server"] = "Server"
response.headers["X-Content-Type-Options"] = "nosniff"
response.headers["X-Frame-Options"] = "DENY"
response.headers["X-XSS-Protection"] = "1; mode=block"
response.headers["Pragma"] = "no-cache"
response.headers["Cache-Control"] = "no-cache"
response.headers["Expires"] = "0"
if json_header:
response.content_type = "application/json; charset=UTF-8"
|
[
"def",
"security_headers",
"(",
"json_header",
"=",
"True",
")",
":",
"response",
".",
"headers",
"[",
"\"Server\"",
"]",
"=",
"\"Server\"",
"response",
".",
"headers",
"[",
"\"X-Content-Type-Options\"",
"]",
"=",
"\"nosniff\"",
"response",
".",
"headers",
"[",
"\"X-Frame-Options\"",
"]",
"=",
"\"DENY\"",
"response",
".",
"headers",
"[",
"\"X-XSS-Protection\"",
"]",
"=",
"\"1; mode=block\"",
"response",
".",
"headers",
"[",
"\"Pragma\"",
"]",
"=",
"\"no-cache\"",
"response",
".",
"headers",
"[",
"\"Cache-Control\"",
"]",
"=",
"\"no-cache\"",
"response",
".",
"headers",
"[",
"\"Expires\"",
"]",
"=",
"\"0\"",
"if",
"json_header",
":",
"response",
".",
"content_type",
"=",
"\"application/json; charset=UTF-8\""
] |
https://github.com/andresriancho/w3af/blob/cd22e5252243a87aaa6d0ddea47cf58dacfe00a9/w3af/plugins/attack/db/sqlmap/lib/utils/api.py#L316-L329
|
||
KU4NG/OPMS_v3
|
dbeeb74d9c0ff0ee3cfb940da7a1dadefcf9cfd4
|
extra_apps/webssh/main.py
|
python
|
IndexHandler.post
|
(self)
|
[] |
def post(self):
worker_id = None
status = None
try:
worker = self.ssh_connect()
except Exception as e:
logging.error(traceback.format_exc())
status = str(e)
else:
worker_id = worker.id
workers[worker_id] = worker
self.write(dict(id=worker_id, status=status))
|
[
"def",
"post",
"(",
"self",
")",
":",
"worker_id",
"=",
"None",
"status",
"=",
"None",
"try",
":",
"worker",
"=",
"self",
".",
"ssh_connect",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"logging",
".",
"error",
"(",
"traceback",
".",
"format_exc",
"(",
")",
")",
"status",
"=",
"str",
"(",
"e",
")",
"else",
":",
"worker_id",
"=",
"worker",
".",
"id",
"workers",
"[",
"worker_id",
"]",
"=",
"worker",
"self",
".",
"write",
"(",
"dict",
"(",
"id",
"=",
"worker_id",
",",
"status",
"=",
"status",
")",
")"
] |
https://github.com/KU4NG/OPMS_v3/blob/dbeeb74d9c0ff0ee3cfb940da7a1dadefcf9cfd4/extra_apps/webssh/main.py#L232-L245
|
||||
NanYoMy/DHT-woodworm
|
e28bbff214bc3c41ea462854256dd499fb8a6eb0
|
btdht/node.py
|
python
|
Node.got_peers
|
(self, token, values, socket=None, trans_id=None, sender_id=None, lock=None)
|
Construct reply message for got_peers
|
Construct reply message for got_peers
|
[
"Construct",
"reply",
"message",
"for",
"got_peers"
] |
def got_peers(self, token, values, socket=None, trans_id=None, sender_id=None, lock=None):
""" Construct reply message for got_peers """
message = {
"y": "r",
"r": {
"id": sender_id,
"nodes": values
}
}
logger.debug("got_peers msg to %s:%d, y:%s, t: %r" % (
self.host,
self.port,
message["y"],
trans_id.encode("hex")
))
self._sendmessage(message, socket, trans_id=trans_id, lock=lock)
|
[
"def",
"got_peers",
"(",
"self",
",",
"token",
",",
"values",
",",
"socket",
"=",
"None",
",",
"trans_id",
"=",
"None",
",",
"sender_id",
"=",
"None",
",",
"lock",
"=",
"None",
")",
":",
"message",
"=",
"{",
"\"y\"",
":",
"\"r\"",
",",
"\"r\"",
":",
"{",
"\"id\"",
":",
"sender_id",
",",
"\"nodes\"",
":",
"values",
"}",
"}",
"logger",
".",
"debug",
"(",
"\"got_peers msg to %s:%d, y:%s, t: %r\"",
"%",
"(",
"self",
".",
"host",
",",
"self",
".",
"port",
",",
"message",
"[",
"\"y\"",
"]",
",",
"trans_id",
".",
"encode",
"(",
"\"hex\"",
")",
")",
")",
"self",
".",
"_sendmessage",
"(",
"message",
",",
"socket",
",",
"trans_id",
"=",
"trans_id",
",",
"lock",
"=",
"lock",
")"
] |
https://github.com/NanYoMy/DHT-woodworm/blob/e28bbff214bc3c41ea462854256dd499fb8a6eb0/btdht/node.py#L186-L201
|
||
andreikop/enki
|
3170059e5cb46dcc77d7fb1457c38a8a5f13af66
|
enki/plugins/searchreplace/controller.py
|
python
|
Controller._updateSearchWidgetFoundItemsHighlighting
|
(self)
|
return self._updateFoundItemsHighlighting(self._widget.getRegExp())
|
[] |
def _updateSearchWidgetFoundItemsHighlighting(self):
document = core.workspace().currentDocument()
if document is None:
return
if not self._widget.isVisible() or \
not self._widget.isSearchRegExpValid()[0] or \
not self._widget.getRegExp().pattern:
document.qutepart.setExtraSelections([])
return
return self._updateFoundItemsHighlighting(self._widget.getRegExp())
|
[
"def",
"_updateSearchWidgetFoundItemsHighlighting",
"(",
"self",
")",
":",
"document",
"=",
"core",
".",
"workspace",
"(",
")",
".",
"currentDocument",
"(",
")",
"if",
"document",
"is",
"None",
":",
"return",
"if",
"not",
"self",
".",
"_widget",
".",
"isVisible",
"(",
")",
"or",
"not",
"self",
".",
"_widget",
".",
"isSearchRegExpValid",
"(",
")",
"[",
"0",
"]",
"or",
"not",
"self",
".",
"_widget",
".",
"getRegExp",
"(",
")",
".",
"pattern",
":",
"document",
".",
"qutepart",
".",
"setExtraSelections",
"(",
"[",
"]",
")",
"return",
"return",
"self",
".",
"_updateFoundItemsHighlighting",
"(",
"self",
".",
"_widget",
".",
"getRegExp",
"(",
")",
")"
] |
https://github.com/andreikop/enki/blob/3170059e5cb46dcc77d7fb1457c38a8a5f13af66/enki/plugins/searchreplace/controller.py#L239-L250
|
|||
kanzure/nanoengineer
|
874e4c9f8a9190f093625b267f9767e19f82e6c4
|
cad/src/PM/PM_ComboBox.py
|
python
|
PM_ComboBox.setCurrentIndex
|
(self, val, blockSignals = False)
|
Overrides the superclass method.
@param blockSignals: Many times, the caller just wants to setCurrentIndex
and don't want to send valueChanged signal.
If this flag is set to True, the currentIdexChanged
signal won't be emitted. The default value is
False.
@type blockSignals: bool
@see: DnaDisplayStyle_PropertyManager.updateDnaDisplayStyleWidgets()
|
Overrides the superclass method.
|
[
"Overrides",
"the",
"superclass",
"method",
"."
] |
def setCurrentIndex(self, val, blockSignals = False):
"""
Overrides the superclass method.
@param blockSignals: Many times, the caller just wants to setCurrentIndex
and don't want to send valueChanged signal.
If this flag is set to True, the currentIdexChanged
signal won't be emitted. The default value is
False.
@type blockSignals: bool
@see: DnaDisplayStyle_PropertyManager.updateDnaDisplayStyleWidgets()
"""
#If blockSignals flag is True, the valueChanged signal won't be emitted
#This is done by self.blockSignals method below. -- Ninad 2008-08-13
self.blockSignals(blockSignals)
QComboBox.setCurrentIndex(self, val)
#Make sure to always 'unblock' signals that might have been temporarily
#blocked before calling superclass.setValue.
self.blockSignals(False)
|
[
"def",
"setCurrentIndex",
"(",
"self",
",",
"val",
",",
"blockSignals",
"=",
"False",
")",
":",
"#If blockSignals flag is True, the valueChanged signal won't be emitted",
"#This is done by self.blockSignals method below. -- Ninad 2008-08-13",
"self",
".",
"blockSignals",
"(",
"blockSignals",
")",
"QComboBox",
".",
"setCurrentIndex",
"(",
"self",
",",
"val",
")",
"#Make sure to always 'unblock' signals that might have been temporarily",
"#blocked before calling superclass.setValue.",
"self",
".",
"blockSignals",
"(",
"False",
")"
] |
https://github.com/kanzure/nanoengineer/blob/874e4c9f8a9190f093625b267f9767e19f82e6c4/cad/src/PM/PM_ComboBox.py#L211-L232
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.