repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
howie6879/ruia | ruia/response.py | https://github.com/howie6879/ruia/blob/2dc5262fc9c3e902a8faa7d5fa2f046f9d9ee1fa/ruia/response.py#L123-L130 | async def json(self,
*,
encoding: str = None,
loads: JSONDecoder = DEFAULT_JSON_DECODER,
content_type: Optional[str] = 'application/json') -> Any:
"""Read and decodes JSON response."""
return await self._aws_json(
encoding=encoding, loads=loads, content_type=content_type) | [
"async",
"def",
"json",
"(",
"self",
",",
"*",
",",
"encoding",
":",
"str",
"=",
"None",
",",
"loads",
":",
"JSONDecoder",
"=",
"DEFAULT_JSON_DECODER",
",",
"content_type",
":",
"Optional",
"[",
"str",
"]",
"=",
"'application/json'",
")",
"->",
"Any",
":",
"return",
"await",
"self",
".",
"_aws_json",
"(",
"encoding",
"=",
"encoding",
",",
"loads",
"=",
"loads",
",",
"content_type",
"=",
"content_type",
")"
] | Read and decodes JSON response. | [
"Read",
"and",
"decodes",
"JSON",
"response",
"."
] | python | test |
decryptus/sonicprobe | sonicprobe/libs/xys.py | https://github.com/decryptus/sonicprobe/blob/72f73f3a40d2982d79ad68686e36aa31d94b76f8/sonicprobe/libs/xys.py#L278-L282 | def _split_params(tag_prefix, tag_suffix):
"Split comma-separated tag_suffix[:-1] and map with _maybe_int"
if tag_suffix[-1:] != ')':
raise ValueError, "unbalanced parenthesis in type %s%s" % (tag_prefix, tag_suffix)
return map(_maybe_int, tag_suffix[:-1].split(',')) | [
"def",
"_split_params",
"(",
"tag_prefix",
",",
"tag_suffix",
")",
":",
"if",
"tag_suffix",
"[",
"-",
"1",
":",
"]",
"!=",
"')'",
":",
"raise",
"ValueError",
",",
"\"unbalanced parenthesis in type %s%s\"",
"%",
"(",
"tag_prefix",
",",
"tag_suffix",
")",
"return",
"map",
"(",
"_maybe_int",
",",
"tag_suffix",
"[",
":",
"-",
"1",
"]",
".",
"split",
"(",
"','",
")",
")"
] | Split comma-separated tag_suffix[:-1] and map with _maybe_int | [
"Split",
"comma",
"-",
"separated",
"tag_suffix",
"[",
":",
"-",
"1",
"]",
"and",
"map",
"with",
"_maybe_int"
] | python | train |
scivision/gridaurora | gridaurora/calcemissions.py | https://github.com/scivision/gridaurora/blob/c3957b93c2201afff62bd104e0acead52c0d9e90/gridaurora/calcemissions.py#L169-L187 | def catvl(z, ver, vnew, lamb, lambnew, br):
"""
trapz integrates over altitude axis, axis = -2
concatenate over reaction dimension, axis = -1
br: column integrated brightness
lamb: wavelength [nm]
ver: volume emission rate [photons / cm^-3 s^-3 ...]
"""
if ver is not None:
br = np.concatenate((br, np.trapz(vnew, z, axis=-2)), axis=-1) # must come first!
ver = np.concatenate((ver, vnew), axis=-1)
lamb = np.concatenate((lamb, lambnew))
else:
ver = vnew.copy(order='F')
lamb = lambnew.copy()
br = np.trapz(ver, z, axis=-2)
return ver, lamb, br | [
"def",
"catvl",
"(",
"z",
",",
"ver",
",",
"vnew",
",",
"lamb",
",",
"lambnew",
",",
"br",
")",
":",
"if",
"ver",
"is",
"not",
"None",
":",
"br",
"=",
"np",
".",
"concatenate",
"(",
"(",
"br",
",",
"np",
".",
"trapz",
"(",
"vnew",
",",
"z",
",",
"axis",
"=",
"-",
"2",
")",
")",
",",
"axis",
"=",
"-",
"1",
")",
"# must come first!",
"ver",
"=",
"np",
".",
"concatenate",
"(",
"(",
"ver",
",",
"vnew",
")",
",",
"axis",
"=",
"-",
"1",
")",
"lamb",
"=",
"np",
".",
"concatenate",
"(",
"(",
"lamb",
",",
"lambnew",
")",
")",
"else",
":",
"ver",
"=",
"vnew",
".",
"copy",
"(",
"order",
"=",
"'F'",
")",
"lamb",
"=",
"lambnew",
".",
"copy",
"(",
")",
"br",
"=",
"np",
".",
"trapz",
"(",
"ver",
",",
"z",
",",
"axis",
"=",
"-",
"2",
")",
"return",
"ver",
",",
"lamb",
",",
"br"
] | trapz integrates over altitude axis, axis = -2
concatenate over reaction dimension, axis = -1
br: column integrated brightness
lamb: wavelength [nm]
ver: volume emission rate [photons / cm^-3 s^-3 ...] | [
"trapz",
"integrates",
"over",
"altitude",
"axis",
"axis",
"=",
"-",
"2",
"concatenate",
"over",
"reaction",
"dimension",
"axis",
"=",
"-",
"1"
] | python | train |
juanifioren/django-oidc-provider | oidc_provider/lib/utils/token.py | https://github.com/juanifioren/django-oidc-provider/blob/f0daed07b2ac7608565b80d4c80ccf04d8c416a8/oidc_provider/lib/utils/token.py#L105-L123 | def create_token(user, client, scope, id_token_dic=None):
"""
Create and populate a Token object.
Return a Token object.
"""
token = Token()
token.user = user
token.client = client
token.access_token = uuid.uuid4().hex
if id_token_dic is not None:
token.id_token = id_token_dic
token.refresh_token = uuid.uuid4().hex
token.expires_at = timezone.now() + timedelta(
seconds=settings.get('OIDC_TOKEN_EXPIRE'))
token.scope = scope
return token | [
"def",
"create_token",
"(",
"user",
",",
"client",
",",
"scope",
",",
"id_token_dic",
"=",
"None",
")",
":",
"token",
"=",
"Token",
"(",
")",
"token",
".",
"user",
"=",
"user",
"token",
".",
"client",
"=",
"client",
"token",
".",
"access_token",
"=",
"uuid",
".",
"uuid4",
"(",
")",
".",
"hex",
"if",
"id_token_dic",
"is",
"not",
"None",
":",
"token",
".",
"id_token",
"=",
"id_token_dic",
"token",
".",
"refresh_token",
"=",
"uuid",
".",
"uuid4",
"(",
")",
".",
"hex",
"token",
".",
"expires_at",
"=",
"timezone",
".",
"now",
"(",
")",
"+",
"timedelta",
"(",
"seconds",
"=",
"settings",
".",
"get",
"(",
"'OIDC_TOKEN_EXPIRE'",
")",
")",
"token",
".",
"scope",
"=",
"scope",
"return",
"token"
] | Create and populate a Token object.
Return a Token object. | [
"Create",
"and",
"populate",
"a",
"Token",
"object",
".",
"Return",
"a",
"Token",
"object",
"."
] | python | train |
molmod/molmod | molmod/examples/003_internal_coordinates/c_ff_hessian.py | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/examples/003_internal_coordinates/c_ff_hessian.py#L106-L144 | def setup_hydrocarbon_ff(graph):
"""Create a simple ForceField object for hydrocarbons based on the graph."""
# A) Define parameters.
# the bond parameters:
bond_params = {
(6, 1): 310*kcalmol/angstrom**2,
(6, 6): 220*kcalmol/angstrom**2,
}
# for every (a, b), also add (b, a)
for key, val in list(bond_params.items()):
if key[0] != key[1]:
bond_params[(key[1], key[0])] = val
# the bend parameters
bend_params = {
(1, 6, 1): 35*kcalmol/rad**2,
(1, 6, 6): 30*kcalmol/rad**2,
(6, 6, 6): 60*kcalmol/rad**2,
}
# for every (a, b, c), also add (c, b, a)
for key, val in list(bend_params.items()):
if key[0] != key[2]:
bend_params[(key[2], key[1], key[0])] = val
# B) detect all internal coordinates and corresponding energy terms.
terms = []
# bonds
for i0, i1 in graph.edges:
K = bond_params[(graph.numbers[i0], graph.numbers[i1])]
terms.append(BondStretchTerm(K, i0, i1))
# bends (see b_bending_angles.py for the explanation)
for i1 in range(graph.num_vertices):
n = list(graph.neighbors[i1])
for index, i0 in enumerate(n):
for i2 in n[:index]:
K = bend_params[(graph.numbers[i0], graph.numbers[i1], graph.numbers[i2])]
terms.append(BendAngleTerm(K, i0, i1, i2))
# C) Create and return the force field
return ForceField(terms) | [
"def",
"setup_hydrocarbon_ff",
"(",
"graph",
")",
":",
"# A) Define parameters.",
"# the bond parameters:",
"bond_params",
"=",
"{",
"(",
"6",
",",
"1",
")",
":",
"310",
"*",
"kcalmol",
"/",
"angstrom",
"**",
"2",
",",
"(",
"6",
",",
"6",
")",
":",
"220",
"*",
"kcalmol",
"/",
"angstrom",
"**",
"2",
",",
"}",
"# for every (a, b), also add (b, a)",
"for",
"key",
",",
"val",
"in",
"list",
"(",
"bond_params",
".",
"items",
"(",
")",
")",
":",
"if",
"key",
"[",
"0",
"]",
"!=",
"key",
"[",
"1",
"]",
":",
"bond_params",
"[",
"(",
"key",
"[",
"1",
"]",
",",
"key",
"[",
"0",
"]",
")",
"]",
"=",
"val",
"# the bend parameters",
"bend_params",
"=",
"{",
"(",
"1",
",",
"6",
",",
"1",
")",
":",
"35",
"*",
"kcalmol",
"/",
"rad",
"**",
"2",
",",
"(",
"1",
",",
"6",
",",
"6",
")",
":",
"30",
"*",
"kcalmol",
"/",
"rad",
"**",
"2",
",",
"(",
"6",
",",
"6",
",",
"6",
")",
":",
"60",
"*",
"kcalmol",
"/",
"rad",
"**",
"2",
",",
"}",
"# for every (a, b, c), also add (c, b, a)",
"for",
"key",
",",
"val",
"in",
"list",
"(",
"bend_params",
".",
"items",
"(",
")",
")",
":",
"if",
"key",
"[",
"0",
"]",
"!=",
"key",
"[",
"2",
"]",
":",
"bend_params",
"[",
"(",
"key",
"[",
"2",
"]",
",",
"key",
"[",
"1",
"]",
",",
"key",
"[",
"0",
"]",
")",
"]",
"=",
"val",
"# B) detect all internal coordinates and corresponding energy terms.",
"terms",
"=",
"[",
"]",
"# bonds",
"for",
"i0",
",",
"i1",
"in",
"graph",
".",
"edges",
":",
"K",
"=",
"bond_params",
"[",
"(",
"graph",
".",
"numbers",
"[",
"i0",
"]",
",",
"graph",
".",
"numbers",
"[",
"i1",
"]",
")",
"]",
"terms",
".",
"append",
"(",
"BondStretchTerm",
"(",
"K",
",",
"i0",
",",
"i1",
")",
")",
"# bends (see b_bending_angles.py for the explanation)",
"for",
"i1",
"in",
"range",
"(",
"graph",
".",
"num_vertices",
")",
":",
"n",
"=",
"list",
"(",
"graph",
".",
"neighbors",
"[",
"i1",
"]",
")",
"for",
"index",
",",
"i0",
"in",
"enumerate",
"(",
"n",
")",
":",
"for",
"i2",
"in",
"n",
"[",
":",
"index",
"]",
":",
"K",
"=",
"bend_params",
"[",
"(",
"graph",
".",
"numbers",
"[",
"i0",
"]",
",",
"graph",
".",
"numbers",
"[",
"i1",
"]",
",",
"graph",
".",
"numbers",
"[",
"i2",
"]",
")",
"]",
"terms",
".",
"append",
"(",
"BendAngleTerm",
"(",
"K",
",",
"i0",
",",
"i1",
",",
"i2",
")",
")",
"# C) Create and return the force field",
"return",
"ForceField",
"(",
"terms",
")"
] | Create a simple ForceField object for hydrocarbons based on the graph. | [
"Create",
"a",
"simple",
"ForceField",
"object",
"for",
"hydrocarbons",
"based",
"on",
"the",
"graph",
"."
] | python | train |
PmagPy/PmagPy | dialogs/drop_down_menus2.py | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/drop_down_menus2.py#L311-L350 | def on_select_menuitem(self, event, grid, row, col, selection):
"""
sets value of selected cell to value selected from menu
"""
if self.grid.changes: # if user selects a menuitem, that is an edit
self.grid.changes.add(row)
else:
self.grid.changes = {row}
item_id = event.GetId()
item = event.EventObject.FindItemById(item_id)
label = item.Label
cell_value = grid.GetCellValue(row, col)
if str(label) == "CLEAR cell of all values":
label = ""
col_label = grid.GetColLabelValue(col).strip('\nEDIT ALL').strip('**')
if col_label in self.colon_delimited_lst and label:
if not label.lower() in cell_value.lower():
label += (":" + cell_value).rstrip(':')
else:
label = cell_value
if self.selected_col and self.selected_col == col:
for row in range(self.grid.GetNumberRows()):
grid.SetCellValue(row, col, label)
if self.grid.changes:
self.grid.changes.add(row)
else:
self.grid.changes = {row}
#self.selected_col = None
else:
grid.SetCellValue(row, col, label)
if selection:
for cell in selection:
row = cell[0]
grid.SetCellValue(row, col, label)
return | [
"def",
"on_select_menuitem",
"(",
"self",
",",
"event",
",",
"grid",
",",
"row",
",",
"col",
",",
"selection",
")",
":",
"if",
"self",
".",
"grid",
".",
"changes",
":",
"# if user selects a menuitem, that is an edit",
"self",
".",
"grid",
".",
"changes",
".",
"add",
"(",
"row",
")",
"else",
":",
"self",
".",
"grid",
".",
"changes",
"=",
"{",
"row",
"}",
"item_id",
"=",
"event",
".",
"GetId",
"(",
")",
"item",
"=",
"event",
".",
"EventObject",
".",
"FindItemById",
"(",
"item_id",
")",
"label",
"=",
"item",
".",
"Label",
"cell_value",
"=",
"grid",
".",
"GetCellValue",
"(",
"row",
",",
"col",
")",
"if",
"str",
"(",
"label",
")",
"==",
"\"CLEAR cell of all values\"",
":",
"label",
"=",
"\"\"",
"col_label",
"=",
"grid",
".",
"GetColLabelValue",
"(",
"col",
")",
".",
"strip",
"(",
"'\\nEDIT ALL'",
")",
".",
"strip",
"(",
"'**'",
")",
"if",
"col_label",
"in",
"self",
".",
"colon_delimited_lst",
"and",
"label",
":",
"if",
"not",
"label",
".",
"lower",
"(",
")",
"in",
"cell_value",
".",
"lower",
"(",
")",
":",
"label",
"+=",
"(",
"\":\"",
"+",
"cell_value",
")",
".",
"rstrip",
"(",
"':'",
")",
"else",
":",
"label",
"=",
"cell_value",
"if",
"self",
".",
"selected_col",
"and",
"self",
".",
"selected_col",
"==",
"col",
":",
"for",
"row",
"in",
"range",
"(",
"self",
".",
"grid",
".",
"GetNumberRows",
"(",
")",
")",
":",
"grid",
".",
"SetCellValue",
"(",
"row",
",",
"col",
",",
"label",
")",
"if",
"self",
".",
"grid",
".",
"changes",
":",
"self",
".",
"grid",
".",
"changes",
".",
"add",
"(",
"row",
")",
"else",
":",
"self",
".",
"grid",
".",
"changes",
"=",
"{",
"row",
"}",
"#self.selected_col = None",
"else",
":",
"grid",
".",
"SetCellValue",
"(",
"row",
",",
"col",
",",
"label",
")",
"if",
"selection",
":",
"for",
"cell",
"in",
"selection",
":",
"row",
"=",
"cell",
"[",
"0",
"]",
"grid",
".",
"SetCellValue",
"(",
"row",
",",
"col",
",",
"label",
")",
"return"
] | sets value of selected cell to value selected from menu | [
"sets",
"value",
"of",
"selected",
"cell",
"to",
"value",
"selected",
"from",
"menu"
] | python | train |
JdeRobot/base | src/drivers/MAVLinkServer/MAVProxy/pymavlink/tools/mavgpslock.py | https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/tools/mavgpslock.py#L19-L58 | def lock_time(logfile):
'''work out gps lock times for a log file'''
print("Processing log %s" % filename)
mlog = mavutil.mavlink_connection(filename)
locked = False
start_time = 0.0
total_time = 0.0
t = None
m = mlog.recv_match(type=['GPS_RAW_INT','GPS_RAW'], condition=args.condition)
if m is None:
return 0
unlock_time = time.mktime(time.localtime(m._timestamp))
while True:
m = mlog.recv_match(type=['GPS_RAW_INT','GPS_RAW'], condition=args.condition)
if m is None:
if locked:
total_time += time.mktime(t) - start_time
if total_time > 0:
print("Lock time : %u:%02u" % (int(total_time)/60, int(total_time)%60))
return total_time
t = time.localtime(m._timestamp)
if m.fix_type >= 2 and not locked:
print("Locked at %s after %u seconds" % (time.asctime(t),
time.mktime(t) - unlock_time))
locked = True
start_time = time.mktime(t)
elif m.fix_type == 1 and locked:
print("Lost GPS lock at %s" % time.asctime(t))
locked = False
total_time += time.mktime(t) - start_time
unlock_time = time.mktime(t)
elif m.fix_type == 0 and locked:
print("Lost protocol lock at %s" % time.asctime(t))
locked = False
total_time += time.mktime(t) - start_time
unlock_time = time.mktime(t)
return total_time | [
"def",
"lock_time",
"(",
"logfile",
")",
":",
"print",
"(",
"\"Processing log %s\"",
"%",
"filename",
")",
"mlog",
"=",
"mavutil",
".",
"mavlink_connection",
"(",
"filename",
")",
"locked",
"=",
"False",
"start_time",
"=",
"0.0",
"total_time",
"=",
"0.0",
"t",
"=",
"None",
"m",
"=",
"mlog",
".",
"recv_match",
"(",
"type",
"=",
"[",
"'GPS_RAW_INT'",
",",
"'GPS_RAW'",
"]",
",",
"condition",
"=",
"args",
".",
"condition",
")",
"if",
"m",
"is",
"None",
":",
"return",
"0",
"unlock_time",
"=",
"time",
".",
"mktime",
"(",
"time",
".",
"localtime",
"(",
"m",
".",
"_timestamp",
")",
")",
"while",
"True",
":",
"m",
"=",
"mlog",
".",
"recv_match",
"(",
"type",
"=",
"[",
"'GPS_RAW_INT'",
",",
"'GPS_RAW'",
"]",
",",
"condition",
"=",
"args",
".",
"condition",
")",
"if",
"m",
"is",
"None",
":",
"if",
"locked",
":",
"total_time",
"+=",
"time",
".",
"mktime",
"(",
"t",
")",
"-",
"start_time",
"if",
"total_time",
">",
"0",
":",
"print",
"(",
"\"Lock time : %u:%02u\"",
"%",
"(",
"int",
"(",
"total_time",
")",
"/",
"60",
",",
"int",
"(",
"total_time",
")",
"%",
"60",
")",
")",
"return",
"total_time",
"t",
"=",
"time",
".",
"localtime",
"(",
"m",
".",
"_timestamp",
")",
"if",
"m",
".",
"fix_type",
">=",
"2",
"and",
"not",
"locked",
":",
"print",
"(",
"\"Locked at %s after %u seconds\"",
"%",
"(",
"time",
".",
"asctime",
"(",
"t",
")",
",",
"time",
".",
"mktime",
"(",
"t",
")",
"-",
"unlock_time",
")",
")",
"locked",
"=",
"True",
"start_time",
"=",
"time",
".",
"mktime",
"(",
"t",
")",
"elif",
"m",
".",
"fix_type",
"==",
"1",
"and",
"locked",
":",
"print",
"(",
"\"Lost GPS lock at %s\"",
"%",
"time",
".",
"asctime",
"(",
"t",
")",
")",
"locked",
"=",
"False",
"total_time",
"+=",
"time",
".",
"mktime",
"(",
"t",
")",
"-",
"start_time",
"unlock_time",
"=",
"time",
".",
"mktime",
"(",
"t",
")",
"elif",
"m",
".",
"fix_type",
"==",
"0",
"and",
"locked",
":",
"print",
"(",
"\"Lost protocol lock at %s\"",
"%",
"time",
".",
"asctime",
"(",
"t",
")",
")",
"locked",
"=",
"False",
"total_time",
"+=",
"time",
".",
"mktime",
"(",
"t",
")",
"-",
"start_time",
"unlock_time",
"=",
"time",
".",
"mktime",
"(",
"t",
")",
"return",
"total_time"
] | work out gps lock times for a log file | [
"work",
"out",
"gps",
"lock",
"times",
"for",
"a",
"log",
"file"
] | python | train |
prompt-toolkit/pymux | pymux/arrangement.py | https://github.com/prompt-toolkit/pymux/blob/3f66e62b9de4b2251c7f9afad6c516dc5a30ec67/pymux/arrangement.py#L485-L541 | def change_size_for_pane(self, pane, up=0, right=0, down=0, left=0):
"""
Increase the size of the current pane in any of the four directions.
Positive values indicate an increase, negative values a decrease.
"""
assert isinstance(pane, Pane)
def find_split_and_child(split_cls, is_before):
" Find the split for which we will have to update the weights. "
child = pane
split = self._get_parent(child)
def found():
return isinstance(split, split_cls) and (
not is_before or split.index(child) > 0) and (
is_before or split.index(child) < len(split) - 1)
while split and not found():
child = split
split = self._get_parent(child)
return split, child # split can be None!
def handle_side(split_cls, is_before, amount, trying_other_side=False):
" Increase weights on one side. (top/left/right/bottom). "
if amount:
split, child = find_split_and_child(split_cls, is_before)
if split:
# Find neighbour.
neighbour_index = split.index(child) + (-1 if is_before else 1)
neighbour_child = split[neighbour_index]
# Increase/decrease weights.
split.weights[child] += amount
split.weights[neighbour_child] -= amount
# Ensure that all weights are at least one.
for k, value in split.weights.items():
if value < 1:
split.weights[k] = 1
else:
# When no split has been found where we can move in this
# direction, try to move the other side instead using a
# negative amount. This happens when we run "resize-pane -R 4"
# inside the pane that is completely on the right. In that
# case it's logical to move the left border to the right
# instead.
if not trying_other_side:
handle_side(split_cls, not is_before, -amount,
trying_other_side=True)
handle_side(VSplit, True, left)
handle_side(VSplit, False, right)
handle_side(HSplit, True, up)
handle_side(HSplit, False, down) | [
"def",
"change_size_for_pane",
"(",
"self",
",",
"pane",
",",
"up",
"=",
"0",
",",
"right",
"=",
"0",
",",
"down",
"=",
"0",
",",
"left",
"=",
"0",
")",
":",
"assert",
"isinstance",
"(",
"pane",
",",
"Pane",
")",
"def",
"find_split_and_child",
"(",
"split_cls",
",",
"is_before",
")",
":",
"\" Find the split for which we will have to update the weights. \"",
"child",
"=",
"pane",
"split",
"=",
"self",
".",
"_get_parent",
"(",
"child",
")",
"def",
"found",
"(",
")",
":",
"return",
"isinstance",
"(",
"split",
",",
"split_cls",
")",
"and",
"(",
"not",
"is_before",
"or",
"split",
".",
"index",
"(",
"child",
")",
">",
"0",
")",
"and",
"(",
"is_before",
"or",
"split",
".",
"index",
"(",
"child",
")",
"<",
"len",
"(",
"split",
")",
"-",
"1",
")",
"while",
"split",
"and",
"not",
"found",
"(",
")",
":",
"child",
"=",
"split",
"split",
"=",
"self",
".",
"_get_parent",
"(",
"child",
")",
"return",
"split",
",",
"child",
"# split can be None!",
"def",
"handle_side",
"(",
"split_cls",
",",
"is_before",
",",
"amount",
",",
"trying_other_side",
"=",
"False",
")",
":",
"\" Increase weights on one side. (top/left/right/bottom). \"",
"if",
"amount",
":",
"split",
",",
"child",
"=",
"find_split_and_child",
"(",
"split_cls",
",",
"is_before",
")",
"if",
"split",
":",
"# Find neighbour.",
"neighbour_index",
"=",
"split",
".",
"index",
"(",
"child",
")",
"+",
"(",
"-",
"1",
"if",
"is_before",
"else",
"1",
")",
"neighbour_child",
"=",
"split",
"[",
"neighbour_index",
"]",
"# Increase/decrease weights.",
"split",
".",
"weights",
"[",
"child",
"]",
"+=",
"amount",
"split",
".",
"weights",
"[",
"neighbour_child",
"]",
"-=",
"amount",
"# Ensure that all weights are at least one.",
"for",
"k",
",",
"value",
"in",
"split",
".",
"weights",
".",
"items",
"(",
")",
":",
"if",
"value",
"<",
"1",
":",
"split",
".",
"weights",
"[",
"k",
"]",
"=",
"1",
"else",
":",
"# When no split has been found where we can move in this",
"# direction, try to move the other side instead using a",
"# negative amount. This happens when we run \"resize-pane -R 4\"",
"# inside the pane that is completely on the right. In that",
"# case it's logical to move the left border to the right",
"# instead.",
"if",
"not",
"trying_other_side",
":",
"handle_side",
"(",
"split_cls",
",",
"not",
"is_before",
",",
"-",
"amount",
",",
"trying_other_side",
"=",
"True",
")",
"handle_side",
"(",
"VSplit",
",",
"True",
",",
"left",
")",
"handle_side",
"(",
"VSplit",
",",
"False",
",",
"right",
")",
"handle_side",
"(",
"HSplit",
",",
"True",
",",
"up",
")",
"handle_side",
"(",
"HSplit",
",",
"False",
",",
"down",
")"
] | Increase the size of the current pane in any of the four directions.
Positive values indicate an increase, negative values a decrease. | [
"Increase",
"the",
"size",
"of",
"the",
"current",
"pane",
"in",
"any",
"of",
"the",
"four",
"directions",
".",
"Positive",
"values",
"indicate",
"an",
"increase",
"negative",
"values",
"a",
"decrease",
"."
] | python | train |
ANTsX/ANTsPy | ants/core/ants_image_io.py | https://github.com/ANTsX/ANTsPy/blob/638020af2cdfc5ff4bdb9809ffe67aa505727a3b/ants/core/ants_image_io.py#L208-L243 | def matrix_to_images(data_matrix, mask):
"""
Unmasks rows of a matrix and writes as images
ANTsR function: `matrixToImages`
Arguments
---------
data_matrix : numpy.ndarray
each row corresponds to an image
array should have number of columns equal to non-zero voxels in the mask
mask : ANTsImage
image containing a binary mask. Rows of the matrix are
unmasked and written as images. The mask defines the output image space
Returns
-------
list of ANTsImage types
"""
if data_matrix.ndim > 2:
data_matrix = data_matrix.reshape(data_matrix.shape[0], -1)
numimages = len(data_matrix)
numVoxelsInMatrix = data_matrix.shape[1]
numVoxelsInMask = (mask >= 0.5).sum()
if numVoxelsInMask != numVoxelsInMatrix:
raise ValueError('Num masked voxels %i must match data matrix %i' % (numVoxelsInMask, numVoxelsInMatrix))
imagelist = []
for i in range(numimages):
img = mask.clone()
img[mask >= 0.5] = data_matrix[i,:]
imagelist.append(img)
return imagelist | [
"def",
"matrix_to_images",
"(",
"data_matrix",
",",
"mask",
")",
":",
"if",
"data_matrix",
".",
"ndim",
">",
"2",
":",
"data_matrix",
"=",
"data_matrix",
".",
"reshape",
"(",
"data_matrix",
".",
"shape",
"[",
"0",
"]",
",",
"-",
"1",
")",
"numimages",
"=",
"len",
"(",
"data_matrix",
")",
"numVoxelsInMatrix",
"=",
"data_matrix",
".",
"shape",
"[",
"1",
"]",
"numVoxelsInMask",
"=",
"(",
"mask",
">=",
"0.5",
")",
".",
"sum",
"(",
")",
"if",
"numVoxelsInMask",
"!=",
"numVoxelsInMatrix",
":",
"raise",
"ValueError",
"(",
"'Num masked voxels %i must match data matrix %i'",
"%",
"(",
"numVoxelsInMask",
",",
"numVoxelsInMatrix",
")",
")",
"imagelist",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"numimages",
")",
":",
"img",
"=",
"mask",
".",
"clone",
"(",
")",
"img",
"[",
"mask",
">=",
"0.5",
"]",
"=",
"data_matrix",
"[",
"i",
",",
":",
"]",
"imagelist",
".",
"append",
"(",
"img",
")",
"return",
"imagelist"
] | Unmasks rows of a matrix and writes as images
ANTsR function: `matrixToImages`
Arguments
---------
data_matrix : numpy.ndarray
each row corresponds to an image
array should have number of columns equal to non-zero voxels in the mask
mask : ANTsImage
image containing a binary mask. Rows of the matrix are
unmasked and written as images. The mask defines the output image space
Returns
-------
list of ANTsImage types | [
"Unmasks",
"rows",
"of",
"a",
"matrix",
"and",
"writes",
"as",
"images"
] | python | train |
tknapen/FIRDeconvolution | src/FIRDeconvolution.py | https://github.com/tknapen/FIRDeconvolution/blob/6263496a356c449062fe4c216fef56541f6dc151/src/FIRDeconvolution.py#L300-L310 | def calculate_rsq(self):
"""calculate_rsq calculates coefficient of determination, or r-squared, defined here as 1.0 - SS_res / SS_tot. rsq is only calculated for those timepoints in the data for which the design matrix is non-zero.
"""
assert hasattr(self, 'betas'), 'no betas found, please run regression before rsq'
explained_times = self.design_matrix.sum(axis = 0) != 0
explained_signal = self.predict_from_design_matrix(self.design_matrix)
self.rsq = 1.0 - np.sum((explained_signal[:,explained_times] - self.resampled_signal[:,explained_times])**2, axis = -1) / np.sum(self.resampled_signal[:,explained_times].squeeze()**2, axis = -1)
self.ssr = np.sum((explained_signal[:,explained_times] - self.resampled_signal[:,explained_times])**2, axis = -1)
return np.squeeze(self.rsq) | [
"def",
"calculate_rsq",
"(",
"self",
")",
":",
"assert",
"hasattr",
"(",
"self",
",",
"'betas'",
")",
",",
"'no betas found, please run regression before rsq'",
"explained_times",
"=",
"self",
".",
"design_matrix",
".",
"sum",
"(",
"axis",
"=",
"0",
")",
"!=",
"0",
"explained_signal",
"=",
"self",
".",
"predict_from_design_matrix",
"(",
"self",
".",
"design_matrix",
")",
"self",
".",
"rsq",
"=",
"1.0",
"-",
"np",
".",
"sum",
"(",
"(",
"explained_signal",
"[",
":",
",",
"explained_times",
"]",
"-",
"self",
".",
"resampled_signal",
"[",
":",
",",
"explained_times",
"]",
")",
"**",
"2",
",",
"axis",
"=",
"-",
"1",
")",
"/",
"np",
".",
"sum",
"(",
"self",
".",
"resampled_signal",
"[",
":",
",",
"explained_times",
"]",
".",
"squeeze",
"(",
")",
"**",
"2",
",",
"axis",
"=",
"-",
"1",
")",
"self",
".",
"ssr",
"=",
"np",
".",
"sum",
"(",
"(",
"explained_signal",
"[",
":",
",",
"explained_times",
"]",
"-",
"self",
".",
"resampled_signal",
"[",
":",
",",
"explained_times",
"]",
")",
"**",
"2",
",",
"axis",
"=",
"-",
"1",
")",
"return",
"np",
".",
"squeeze",
"(",
"self",
".",
"rsq",
")"
] | calculate_rsq calculates coefficient of determination, or r-squared, defined here as 1.0 - SS_res / SS_tot. rsq is only calculated for those timepoints in the data for which the design matrix is non-zero. | [
"calculate_rsq",
"calculates",
"coefficient",
"of",
"determination",
"or",
"r",
"-",
"squared",
"defined",
"here",
"as",
"1",
".",
"0",
"-",
"SS_res",
"/",
"SS_tot",
".",
"rsq",
"is",
"only",
"calculated",
"for",
"those",
"timepoints",
"in",
"the",
"data",
"for",
"which",
"the",
"design",
"matrix",
"is",
"non",
"-",
"zero",
"."
] | python | train |
ralphje/imagemounter | imagemounter/filesystems.py | https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/filesystems.py#L460-L494 | def mount(self, volume):
"""Performs mount actions on a LVM. Scans for active volume groups from the loopback device, activates it
and fills :attr:`volumes` with the logical volumes.
:raises NoLoopbackAvailableError: when no loopback was available
:raises IncorrectFilesystemError: when the volume is not a volume group
"""
os.environ['LVM_SUPPRESS_FD_WARNINGS'] = '1'
# find free loopback device
volume._find_loopback()
time.sleep(0.2)
try:
# Scan for new lvm volumes
result = _util.check_output_(["lvm", "pvscan"])
for l in result.splitlines():
if volume.loopback in l or (volume.offset == 0 and volume.get_raw_path() in l):
for vg in re.findall(r'VG (\S+)', l):
volume.info['volume_group'] = vg
if not volume.info.get('volume_group'):
logger.warning("Volume is not a volume group. (Searching for %s)", volume.loopback)
raise IncorrectFilesystemError()
# Enable lvm volumes
_util.check_call_(["lvm", "vgchange", "-a", "y", volume.info['volume_group']], stdout=subprocess.PIPE)
except Exception:
volume._free_loopback()
raise
volume.volumes.vstype = 'lvm'
# fills it up.
for _ in volume.volumes.detect_volumes('lvm'):
pass | [
"def",
"mount",
"(",
"self",
",",
"volume",
")",
":",
"os",
".",
"environ",
"[",
"'LVM_SUPPRESS_FD_WARNINGS'",
"]",
"=",
"'1'",
"# find free loopback device",
"volume",
".",
"_find_loopback",
"(",
")",
"time",
".",
"sleep",
"(",
"0.2",
")",
"try",
":",
"# Scan for new lvm volumes",
"result",
"=",
"_util",
".",
"check_output_",
"(",
"[",
"\"lvm\"",
",",
"\"pvscan\"",
"]",
")",
"for",
"l",
"in",
"result",
".",
"splitlines",
"(",
")",
":",
"if",
"volume",
".",
"loopback",
"in",
"l",
"or",
"(",
"volume",
".",
"offset",
"==",
"0",
"and",
"volume",
".",
"get_raw_path",
"(",
")",
"in",
"l",
")",
":",
"for",
"vg",
"in",
"re",
".",
"findall",
"(",
"r'VG (\\S+)'",
",",
"l",
")",
":",
"volume",
".",
"info",
"[",
"'volume_group'",
"]",
"=",
"vg",
"if",
"not",
"volume",
".",
"info",
".",
"get",
"(",
"'volume_group'",
")",
":",
"logger",
".",
"warning",
"(",
"\"Volume is not a volume group. (Searching for %s)\"",
",",
"volume",
".",
"loopback",
")",
"raise",
"IncorrectFilesystemError",
"(",
")",
"# Enable lvm volumes",
"_util",
".",
"check_call_",
"(",
"[",
"\"lvm\"",
",",
"\"vgchange\"",
",",
"\"-a\"",
",",
"\"y\"",
",",
"volume",
".",
"info",
"[",
"'volume_group'",
"]",
"]",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
")",
"except",
"Exception",
":",
"volume",
".",
"_free_loopback",
"(",
")",
"raise",
"volume",
".",
"volumes",
".",
"vstype",
"=",
"'lvm'",
"# fills it up.",
"for",
"_",
"in",
"volume",
".",
"volumes",
".",
"detect_volumes",
"(",
"'lvm'",
")",
":",
"pass"
] | Performs mount actions on a LVM. Scans for active volume groups from the loopback device, activates it
and fills :attr:`volumes` with the logical volumes.
:raises NoLoopbackAvailableError: when no loopback was available
:raises IncorrectFilesystemError: when the volume is not a volume group | [
"Performs",
"mount",
"actions",
"on",
"a",
"LVM",
".",
"Scans",
"for",
"active",
"volume",
"groups",
"from",
"the",
"loopback",
"device",
"activates",
"it",
"and",
"fills",
":",
"attr",
":",
"volumes",
"with",
"the",
"logical",
"volumes",
"."
] | python | train |
spyder-ide/spyder | spyder/plugins/explorer/widgets.py | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/explorer/widgets.py#L551-L559 | def open(self, fnames=None):
"""Open files with the appropriate application"""
if fnames is None:
fnames = self.get_selected_filenames()
for fname in fnames:
if osp.isfile(fname) and encoding.is_text_file(fname):
self.parent_widget.sig_open_file.emit(fname)
else:
self.open_outside_spyder([fname]) | [
"def",
"open",
"(",
"self",
",",
"fnames",
"=",
"None",
")",
":",
"if",
"fnames",
"is",
"None",
":",
"fnames",
"=",
"self",
".",
"get_selected_filenames",
"(",
")",
"for",
"fname",
"in",
"fnames",
":",
"if",
"osp",
".",
"isfile",
"(",
"fname",
")",
"and",
"encoding",
".",
"is_text_file",
"(",
"fname",
")",
":",
"self",
".",
"parent_widget",
".",
"sig_open_file",
".",
"emit",
"(",
"fname",
")",
"else",
":",
"self",
".",
"open_outside_spyder",
"(",
"[",
"fname",
"]",
")"
] | Open files with the appropriate application | [
"Open",
"files",
"with",
"the",
"appropriate",
"application"
] | python | train |
base4sistemas/pyescpos | escpos/conn/serial.py | https://github.com/base4sistemas/pyescpos/blob/621bd00f1499aff700f37d8d36d04e0d761708f1/escpos/conn/serial.py#L444-L449 | def write(self, data):
"""Write data to serial port."""
for chunk in chunks(data, 512):
self.wait_to_write()
self.comport.write(chunk)
self.comport.flush() | [
"def",
"write",
"(",
"self",
",",
"data",
")",
":",
"for",
"chunk",
"in",
"chunks",
"(",
"data",
",",
"512",
")",
":",
"self",
".",
"wait_to_write",
"(",
")",
"self",
".",
"comport",
".",
"write",
"(",
"chunk",
")",
"self",
".",
"comport",
".",
"flush",
"(",
")"
] | Write data to serial port. | [
"Write",
"data",
"to",
"serial",
"port",
"."
] | python | train |
arne-cl/discoursegraphs | src/discoursegraphs/corpora.py | https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/corpora.py#L89-L107 | def get_document(self, doc_id):
"""
given a document ID, returns a merged document graph containng all
available annotation layers.
"""
layer_graphs = []
for layer_name in self.layers:
layer_files, read_function = self.layers[layer_name]
for layer_file in layer_files:
if fnmatch.fnmatch(layer_file, '*{}.*'.format(doc_id)):
layer_graphs.append(read_function(layer_file))
if not layer_graphs:
raise TypeError("There are no files with that document ID.")
else:
doc_graph = layer_graphs[0]
for layer_graph in layer_graphs[1:]:
doc_graph.merge_graphs(layer_graph)
return doc_graph | [
"def",
"get_document",
"(",
"self",
",",
"doc_id",
")",
":",
"layer_graphs",
"=",
"[",
"]",
"for",
"layer_name",
"in",
"self",
".",
"layers",
":",
"layer_files",
",",
"read_function",
"=",
"self",
".",
"layers",
"[",
"layer_name",
"]",
"for",
"layer_file",
"in",
"layer_files",
":",
"if",
"fnmatch",
".",
"fnmatch",
"(",
"layer_file",
",",
"'*{}.*'",
".",
"format",
"(",
"doc_id",
")",
")",
":",
"layer_graphs",
".",
"append",
"(",
"read_function",
"(",
"layer_file",
")",
")",
"if",
"not",
"layer_graphs",
":",
"raise",
"TypeError",
"(",
"\"There are no files with that document ID.\"",
")",
"else",
":",
"doc_graph",
"=",
"layer_graphs",
"[",
"0",
"]",
"for",
"layer_graph",
"in",
"layer_graphs",
"[",
"1",
":",
"]",
":",
"doc_graph",
".",
"merge_graphs",
"(",
"layer_graph",
")",
"return",
"doc_graph"
] | given a document ID, returns a merged document graph containng all
available annotation layers. | [
"given",
"a",
"document",
"ID",
"returns",
"a",
"merged",
"document",
"graph",
"containng",
"all",
"available",
"annotation",
"layers",
"."
] | python | train |
balloob/pychromecast | pychromecast/controllers/youtube.py | https://github.com/balloob/pychromecast/blob/831b09c4fed185a7bffe0ea330b7849d5f4e36b6/pychromecast/controllers/youtube.py#L29-L35 | def start_session_if_none(self):
"""
Starts a session it is not yet initialized.
"""
if not (self._screen_id and self._session):
self.update_screen_id()
self._session = YouTubeSession(screen_id=self._screen_id) | [
"def",
"start_session_if_none",
"(",
"self",
")",
":",
"if",
"not",
"(",
"self",
".",
"_screen_id",
"and",
"self",
".",
"_session",
")",
":",
"self",
".",
"update_screen_id",
"(",
")",
"self",
".",
"_session",
"=",
"YouTubeSession",
"(",
"screen_id",
"=",
"self",
".",
"_screen_id",
")"
] | Starts a session it is not yet initialized. | [
"Starts",
"a",
"session",
"it",
"is",
"not",
"yet",
"initialized",
"."
] | python | train |
materialsproject/pymatgen | pymatgen/analysis/chemenv/utils/coordination_geometry_utils.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/chemenv/utils/coordination_geometry_utils.py#L662-L684 | def indices_separate(self, points, dist_tolerance):
"""
Returns three lists containing the indices of the points lying on one side of the plane, on the plane
and on the other side of the plane. The dist_tolerance parameter controls the tolerance to which a point
is considered to lie on the plane or not (distance to the plane)
:param points: list of points
:param dist_tolerance: tolerance to which a point is considered to lie on the plane
or not (distance to the plane)
:return: The lists of indices of the points on one side of the plane, on the plane and
on the other side of the plane
"""
side1 = list()
inplane = list()
side2 = list()
for ip, pp in enumerate(points):
if self.is_in_plane(pp, dist_tolerance):
inplane.append(ip)
else:
if np.dot(pp + self.vector_to_origin, self.normal_vector) < 0.0:
side1.append(ip)
else:
side2.append(ip)
return [side1, inplane, side2] | [
"def",
"indices_separate",
"(",
"self",
",",
"points",
",",
"dist_tolerance",
")",
":",
"side1",
"=",
"list",
"(",
")",
"inplane",
"=",
"list",
"(",
")",
"side2",
"=",
"list",
"(",
")",
"for",
"ip",
",",
"pp",
"in",
"enumerate",
"(",
"points",
")",
":",
"if",
"self",
".",
"is_in_plane",
"(",
"pp",
",",
"dist_tolerance",
")",
":",
"inplane",
".",
"append",
"(",
"ip",
")",
"else",
":",
"if",
"np",
".",
"dot",
"(",
"pp",
"+",
"self",
".",
"vector_to_origin",
",",
"self",
".",
"normal_vector",
")",
"<",
"0.0",
":",
"side1",
".",
"append",
"(",
"ip",
")",
"else",
":",
"side2",
".",
"append",
"(",
"ip",
")",
"return",
"[",
"side1",
",",
"inplane",
",",
"side2",
"]"
] | Returns three lists containing the indices of the points lying on one side of the plane, on the plane
and on the other side of the plane. The dist_tolerance parameter controls the tolerance to which a point
is considered to lie on the plane or not (distance to the plane)
:param points: list of points
:param dist_tolerance: tolerance to which a point is considered to lie on the plane
or not (distance to the plane)
:return: The lists of indices of the points on one side of the plane, on the plane and
on the other side of the plane | [
"Returns",
"three",
"lists",
"containing",
"the",
"indices",
"of",
"the",
"points",
"lying",
"on",
"one",
"side",
"of",
"the",
"plane",
"on",
"the",
"plane",
"and",
"on",
"the",
"other",
"side",
"of",
"the",
"plane",
".",
"The",
"dist_tolerance",
"parameter",
"controls",
"the",
"tolerance",
"to",
"which",
"a",
"point",
"is",
"considered",
"to",
"lie",
"on",
"the",
"plane",
"or",
"not",
"(",
"distance",
"to",
"the",
"plane",
")",
":",
"param",
"points",
":",
"list",
"of",
"points",
":",
"param",
"dist_tolerance",
":",
"tolerance",
"to",
"which",
"a",
"point",
"is",
"considered",
"to",
"lie",
"on",
"the",
"plane",
"or",
"not",
"(",
"distance",
"to",
"the",
"plane",
")",
":",
"return",
":",
"The",
"lists",
"of",
"indices",
"of",
"the",
"points",
"on",
"one",
"side",
"of",
"the",
"plane",
"on",
"the",
"plane",
"and",
"on",
"the",
"other",
"side",
"of",
"the",
"plane"
] | python | train |
rgs1/zk_shell | zk_shell/shell.py | https://github.com/rgs1/zk_shell/blob/bbf34fdfcf1f81100e2a5816fad8af6afc782a54/zk_shell/shell.py#L1275-L1291 | def do_zero(self, params):
"""
\x1b[1mNAME\x1b[0m
zero - Set the znode's to None (no bytes)
\x1b[1mSYNOPSIS\x1b[0m
zero <path> [version]
\x1b[1mOPTIONS\x1b[0m
* version: only update if version matches (default: -1)
\x1b[1mEXAMPLES\x1b[0m
> zero /foo
> zero /foo 3
"""
self.set(params.path, None, version=params.version) | [
"def",
"do_zero",
"(",
"self",
",",
"params",
")",
":",
"self",
".",
"set",
"(",
"params",
".",
"path",
",",
"None",
",",
"version",
"=",
"params",
".",
"version",
")"
] | \x1b[1mNAME\x1b[0m
zero - Set the znode's to None (no bytes)
\x1b[1mSYNOPSIS\x1b[0m
zero <path> [version]
\x1b[1mOPTIONS\x1b[0m
* version: only update if version matches (default: -1)
\x1b[1mEXAMPLES\x1b[0m
> zero /foo
> zero /foo 3 | [
"\\",
"x1b",
"[",
"1mNAME",
"\\",
"x1b",
"[",
"0m",
"zero",
"-",
"Set",
"the",
"znode",
"s",
"to",
"None",
"(",
"no",
"bytes",
")"
] | python | train |
BerkeleyAutomation/perception | perception/webcam_sensor.py | https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/webcam_sensor.py#L104-L132 | def frames(self, most_recent=False):
"""Retrieve a new frame from the PhoXi and convert it to a ColorImage,
a DepthImage, and an IrImage.
Parameters
----------
most_recent: bool
If true, the OpenCV buffer is emptied for the webcam before reading the most recent frame.
Returns
-------
:obj:`tuple` of :obj:`ColorImage`, :obj:`DepthImage`, :obj:`IrImage`, :obj:`numpy.ndarray`
The ColorImage, DepthImage, and IrImage of the current frame.
"""
if most_recent:
for i in xrange(4):
self._cap.grab()
for i in range(1):
if self._adjust_exposure:
try:
command = 'v4l2-ctl -d /dev/video{} -c exposure_auto=1 -c exposure_auto_priority=0 -c exposure_absolute=100 -c saturation=60 -c gain=140'.format(self._device_id)
FNULL = open(os.devnull, 'w')
subprocess.call(shlex.split(command), stdout=FNULL, stderr=subprocess.STDOUT)
except:
pass
ret, frame = self._cap.read()
rgb_data = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
return ColorImage(rgb_data, frame=self._frame), None, None | [
"def",
"frames",
"(",
"self",
",",
"most_recent",
"=",
"False",
")",
":",
"if",
"most_recent",
":",
"for",
"i",
"in",
"xrange",
"(",
"4",
")",
":",
"self",
".",
"_cap",
".",
"grab",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"1",
")",
":",
"if",
"self",
".",
"_adjust_exposure",
":",
"try",
":",
"command",
"=",
"'v4l2-ctl -d /dev/video{} -c exposure_auto=1 -c exposure_auto_priority=0 -c exposure_absolute=100 -c saturation=60 -c gain=140'",
".",
"format",
"(",
"self",
".",
"_device_id",
")",
"FNULL",
"=",
"open",
"(",
"os",
".",
"devnull",
",",
"'w'",
")",
"subprocess",
".",
"call",
"(",
"shlex",
".",
"split",
"(",
"command",
")",
",",
"stdout",
"=",
"FNULL",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
")",
"except",
":",
"pass",
"ret",
",",
"frame",
"=",
"self",
".",
"_cap",
".",
"read",
"(",
")",
"rgb_data",
"=",
"cv2",
".",
"cvtColor",
"(",
"frame",
",",
"cv2",
".",
"COLOR_BGR2RGB",
")",
"return",
"ColorImage",
"(",
"rgb_data",
",",
"frame",
"=",
"self",
".",
"_frame",
")",
",",
"None",
",",
"None"
] | Retrieve a new frame from the PhoXi and convert it to a ColorImage,
a DepthImage, and an IrImage.
Parameters
----------
most_recent: bool
If true, the OpenCV buffer is emptied for the webcam before reading the most recent frame.
Returns
-------
:obj:`tuple` of :obj:`ColorImage`, :obj:`DepthImage`, :obj:`IrImage`, :obj:`numpy.ndarray`
The ColorImage, DepthImage, and IrImage of the current frame. | [
"Retrieve",
"a",
"new",
"frame",
"from",
"the",
"PhoXi",
"and",
"convert",
"it",
"to",
"a",
"ColorImage",
"a",
"DepthImage",
"and",
"an",
"IrImage",
"."
] | python | train |
gem/oq-engine | openquake/calculators/base.py | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/calculators/base.py#L321-L331 | def check_time_event(oqparam, occupancy_periods):
"""
Check the `time_event` parameter in the datastore, by comparing
with the periods found in the exposure.
"""
time_event = oqparam.time_event
if time_event and time_event not in occupancy_periods:
raise ValueError(
'time_event is %s in %s, but the exposure contains %s' %
(time_event, oqparam.inputs['job_ini'],
', '.join(occupancy_periods))) | [
"def",
"check_time_event",
"(",
"oqparam",
",",
"occupancy_periods",
")",
":",
"time_event",
"=",
"oqparam",
".",
"time_event",
"if",
"time_event",
"and",
"time_event",
"not",
"in",
"occupancy_periods",
":",
"raise",
"ValueError",
"(",
"'time_event is %s in %s, but the exposure contains %s'",
"%",
"(",
"time_event",
",",
"oqparam",
".",
"inputs",
"[",
"'job_ini'",
"]",
",",
"', '",
".",
"join",
"(",
"occupancy_periods",
")",
")",
")"
] | Check the `time_event` parameter in the datastore, by comparing
with the periods found in the exposure. | [
"Check",
"the",
"time_event",
"parameter",
"in",
"the",
"datastore",
"by",
"comparing",
"with",
"the",
"periods",
"found",
"in",
"the",
"exposure",
"."
] | python | train |
mkouhei/tonicdnscli | src/tonicdnscli/processing.py | https://github.com/mkouhei/tonicdnscli/blob/df2d6fb2104ae4d49fa89d1bba2f3ccd2fed388c/src/tonicdnscli/processing.py#L124-L137 | def delete_zone(server, token, domain):
"""Delete specific zone.
Argument:
server: TonicDNS API server
token: TonicDNS API authentication token
domain: Specify domain name
x-authentication-token: token
"""
method = 'DELETE'
uri = 'https://' + server + '/zone/' + domain
connect.tonicdns_client(uri, method, token, data=False) | [
"def",
"delete_zone",
"(",
"server",
",",
"token",
",",
"domain",
")",
":",
"method",
"=",
"'DELETE'",
"uri",
"=",
"'https://'",
"+",
"server",
"+",
"'/zone/'",
"+",
"domain",
"connect",
".",
"tonicdns_client",
"(",
"uri",
",",
"method",
",",
"token",
",",
"data",
"=",
"False",
")"
] | Delete specific zone.
Argument:
server: TonicDNS API server
token: TonicDNS API authentication token
domain: Specify domain name
x-authentication-token: token | [
"Delete",
"specific",
"zone",
"."
] | python | train |
pandas-dev/pandas | pandas/core/internals/construction.py | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/construction.py#L62-L98 | def masked_rec_array_to_mgr(data, index, columns, dtype, copy):
"""
Extract from a masked rec array and create the manager.
"""
# essentially process a record array then fill it
fill_value = data.fill_value
fdata = ma.getdata(data)
if index is None:
index = get_names_from_index(fdata)
if index is None:
index = ibase.default_index(len(data))
index = ensure_index(index)
if columns is not None:
columns = ensure_index(columns)
arrays, arr_columns = to_arrays(fdata, columns)
# fill if needed
new_arrays = []
for fv, arr, col in zip(fill_value, arrays, arr_columns):
mask = ma.getmaskarray(data[col])
if mask.any():
arr, fv = maybe_upcast(arr, fill_value=fv, copy=True)
arr[mask] = fv
new_arrays.append(arr)
# create the manager
arrays, arr_columns = reorder_arrays(new_arrays, arr_columns, columns)
if columns is None:
columns = arr_columns
mgr = arrays_to_mgr(arrays, arr_columns, index, columns, dtype)
if copy:
mgr = mgr.copy()
return mgr | [
"def",
"masked_rec_array_to_mgr",
"(",
"data",
",",
"index",
",",
"columns",
",",
"dtype",
",",
"copy",
")",
":",
"# essentially process a record array then fill it",
"fill_value",
"=",
"data",
".",
"fill_value",
"fdata",
"=",
"ma",
".",
"getdata",
"(",
"data",
")",
"if",
"index",
"is",
"None",
":",
"index",
"=",
"get_names_from_index",
"(",
"fdata",
")",
"if",
"index",
"is",
"None",
":",
"index",
"=",
"ibase",
".",
"default_index",
"(",
"len",
"(",
"data",
")",
")",
"index",
"=",
"ensure_index",
"(",
"index",
")",
"if",
"columns",
"is",
"not",
"None",
":",
"columns",
"=",
"ensure_index",
"(",
"columns",
")",
"arrays",
",",
"arr_columns",
"=",
"to_arrays",
"(",
"fdata",
",",
"columns",
")",
"# fill if needed",
"new_arrays",
"=",
"[",
"]",
"for",
"fv",
",",
"arr",
",",
"col",
"in",
"zip",
"(",
"fill_value",
",",
"arrays",
",",
"arr_columns",
")",
":",
"mask",
"=",
"ma",
".",
"getmaskarray",
"(",
"data",
"[",
"col",
"]",
")",
"if",
"mask",
".",
"any",
"(",
")",
":",
"arr",
",",
"fv",
"=",
"maybe_upcast",
"(",
"arr",
",",
"fill_value",
"=",
"fv",
",",
"copy",
"=",
"True",
")",
"arr",
"[",
"mask",
"]",
"=",
"fv",
"new_arrays",
".",
"append",
"(",
"arr",
")",
"# create the manager",
"arrays",
",",
"arr_columns",
"=",
"reorder_arrays",
"(",
"new_arrays",
",",
"arr_columns",
",",
"columns",
")",
"if",
"columns",
"is",
"None",
":",
"columns",
"=",
"arr_columns",
"mgr",
"=",
"arrays_to_mgr",
"(",
"arrays",
",",
"arr_columns",
",",
"index",
",",
"columns",
",",
"dtype",
")",
"if",
"copy",
":",
"mgr",
"=",
"mgr",
".",
"copy",
"(",
")",
"return",
"mgr"
] | Extract from a masked rec array and create the manager. | [
"Extract",
"from",
"a",
"masked",
"rec",
"array",
"and",
"create",
"the",
"manager",
"."
] | python | train |
markovmodel/PyEMMA | pyemma/datasets/potentials.py | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/datasets/potentials.py#L85-L93 | def folding_model_gradient(rvec, rcut):
r"""computes the potential's gradient at point rvec"""
rnorm = np.linalg.norm(rvec)
if rnorm == 0.0:
return np.zeros(rvec.shape)
r = rnorm - rcut
if r < 0.0:
return -5.0 * r * rvec / rnorm
return (1.5 * r - 2.0) * rvec / rnorm | [
"def",
"folding_model_gradient",
"(",
"rvec",
",",
"rcut",
")",
":",
"rnorm",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"rvec",
")",
"if",
"rnorm",
"==",
"0.0",
":",
"return",
"np",
".",
"zeros",
"(",
"rvec",
".",
"shape",
")",
"r",
"=",
"rnorm",
"-",
"rcut",
"if",
"r",
"<",
"0.0",
":",
"return",
"-",
"5.0",
"*",
"r",
"*",
"rvec",
"/",
"rnorm",
"return",
"(",
"1.5",
"*",
"r",
"-",
"2.0",
")",
"*",
"rvec",
"/",
"rnorm"
] | r"""computes the potential's gradient at point rvec | [
"r",
"computes",
"the",
"potential",
"s",
"gradient",
"at",
"point",
"rvec"
] | python | train |
vinci1it2000/schedula | schedula/utils/io.py | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/io.py#L126-L164 | def load_default_values(dsp, path):
"""
Load Dispatcher default values in Python pickle format.
Pickles are a serialized byte stream of a Python object.
This format will preserve Python objects used as nodes or edges.
:param dsp:
A dispatcher that identifies the model adopted.
:type dsp: schedula.Dispatcher
:param path:
File or filename to write.
File names ending in .gz or .bz2 will be uncompressed.
:type path: str, file
.. testsetup::
>>> from tempfile import mkstemp
>>> file_name = mkstemp()[1]
Example::
>>> from schedula import Dispatcher
>>> dsp = Dispatcher()
>>> dsp.add_data('a', default_value=1)
'a'
>>> dsp.add_function(function=max, inputs=['a', 'b'], outputs=['c'])
'max'
>>> save_default_values(dsp, file_name)
>>> dsp = Dispatcher(dmap=dsp.dmap)
>>> load_default_values(dsp, file_name)
>>> dsp.dispatch(inputs={'b': 3})['c']
3
"""
import dill
# noinspection PyArgumentList
with open(path, 'rb') as f:
dsp.__init__(dmap=dsp.dmap, default_values=dill.load(f)) | [
"def",
"load_default_values",
"(",
"dsp",
",",
"path",
")",
":",
"import",
"dill",
"# noinspection PyArgumentList",
"with",
"open",
"(",
"path",
",",
"'rb'",
")",
"as",
"f",
":",
"dsp",
".",
"__init__",
"(",
"dmap",
"=",
"dsp",
".",
"dmap",
",",
"default_values",
"=",
"dill",
".",
"load",
"(",
"f",
")",
")"
] | Load Dispatcher default values in Python pickle format.
Pickles are a serialized byte stream of a Python object.
This format will preserve Python objects used as nodes or edges.
:param dsp:
A dispatcher that identifies the model adopted.
:type dsp: schedula.Dispatcher
:param path:
File or filename to write.
File names ending in .gz or .bz2 will be uncompressed.
:type path: str, file
.. testsetup::
>>> from tempfile import mkstemp
>>> file_name = mkstemp()[1]
Example::
>>> from schedula import Dispatcher
>>> dsp = Dispatcher()
>>> dsp.add_data('a', default_value=1)
'a'
>>> dsp.add_function(function=max, inputs=['a', 'b'], outputs=['c'])
'max'
>>> save_default_values(dsp, file_name)
>>> dsp = Dispatcher(dmap=dsp.dmap)
>>> load_default_values(dsp, file_name)
>>> dsp.dispatch(inputs={'b': 3})['c']
3 | [
"Load",
"Dispatcher",
"default",
"values",
"in",
"Python",
"pickle",
"format",
"."
] | python | train |
mnooner256/pyqrcode | pyqrcode/builder.py | https://github.com/mnooner256/pyqrcode/blob/674a77b5eaf850d063f518bd90c243ee34ad6b5d/pyqrcode/builder.py#L233-L274 | def encode_kanji(self):
"""This method encodes the QR code's data if its mode is
kanji. It returns the data encoded as a binary string.
"""
def two_bytes(data):
"""Output two byte character code as a single integer."""
def next_byte(b):
"""Make sure that character code is an int. Python 2 and
3 compatibility.
"""
if not isinstance(b, int):
return ord(b)
else:
return b
#Go through the data by looping to every other character
for i in range(0, len(data), 2):
yield (next_byte(data[i]) << 8) | next_byte(data[i+1])
#Force the data into Kanji encoded bytes
if isinstance(self.data, bytes):
data = self.data.decode('shiftjis').encode('shiftjis')
else:
data = self.data.encode('shiftjis')
#Now perform the algorithm that will make the kanji into 13 bit fields
with io.StringIO() as buf:
for asint in two_bytes(data):
#Shift the two byte value as indicated by the standard
if 0x8140 <= asint <= 0x9FFC:
difference = asint - 0x8140
elif 0xE040 <= asint <= 0xEBBF:
difference = asint - 0xC140
#Split the new value into most and least significant bytes
msb = (difference >> 8)
lsb = (difference & 0x00FF)
#Calculate the actual 13 bit binary value
buf.write('{0:013b}'.format((msb * 0xC0) + lsb))
#Return the binary string
return buf.getvalue() | [
"def",
"encode_kanji",
"(",
"self",
")",
":",
"def",
"two_bytes",
"(",
"data",
")",
":",
"\"\"\"Output two byte character code as a single integer.\"\"\"",
"def",
"next_byte",
"(",
"b",
")",
":",
"\"\"\"Make sure that character code is an int. Python 2 and\n 3 compatibility.\n \"\"\"",
"if",
"not",
"isinstance",
"(",
"b",
",",
"int",
")",
":",
"return",
"ord",
"(",
"b",
")",
"else",
":",
"return",
"b",
"#Go through the data by looping to every other character",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"data",
")",
",",
"2",
")",
":",
"yield",
"(",
"next_byte",
"(",
"data",
"[",
"i",
"]",
")",
"<<",
"8",
")",
"|",
"next_byte",
"(",
"data",
"[",
"i",
"+",
"1",
"]",
")",
"#Force the data into Kanji encoded bytes",
"if",
"isinstance",
"(",
"self",
".",
"data",
",",
"bytes",
")",
":",
"data",
"=",
"self",
".",
"data",
".",
"decode",
"(",
"'shiftjis'",
")",
".",
"encode",
"(",
"'shiftjis'",
")",
"else",
":",
"data",
"=",
"self",
".",
"data",
".",
"encode",
"(",
"'shiftjis'",
")",
"#Now perform the algorithm that will make the kanji into 13 bit fields",
"with",
"io",
".",
"StringIO",
"(",
")",
"as",
"buf",
":",
"for",
"asint",
"in",
"two_bytes",
"(",
"data",
")",
":",
"#Shift the two byte value as indicated by the standard",
"if",
"0x8140",
"<=",
"asint",
"<=",
"0x9FFC",
":",
"difference",
"=",
"asint",
"-",
"0x8140",
"elif",
"0xE040",
"<=",
"asint",
"<=",
"0xEBBF",
":",
"difference",
"=",
"asint",
"-",
"0xC140",
"#Split the new value into most and least significant bytes",
"msb",
"=",
"(",
"difference",
">>",
"8",
")",
"lsb",
"=",
"(",
"difference",
"&",
"0x00FF",
")",
"#Calculate the actual 13 bit binary value",
"buf",
".",
"write",
"(",
"'{0:013b}'",
".",
"format",
"(",
"(",
"msb",
"*",
"0xC0",
")",
"+",
"lsb",
")",
")",
"#Return the binary string",
"return",
"buf",
".",
"getvalue",
"(",
")"
] | This method encodes the QR code's data if its mode is
kanji. It returns the data encoded as a binary string. | [
"This",
"method",
"encodes",
"the",
"QR",
"code",
"s",
"data",
"if",
"its",
"mode",
"is",
"kanji",
".",
"It",
"returns",
"the",
"data",
"encoded",
"as",
"a",
"binary",
"string",
"."
] | python | train |
larsks/thecache | thecache/cache.py | https://github.com/larsks/thecache/blob/e535f91031a7f92f19b5ff6fe2a1a03c7680e9e0/thecache/cache.py#L201-L206 | def load_iter(self, key, chunksize=None, noexpire=None):
'''Lookup an item in the cache and return an iterator
that reads chunksize bytes of data at a time. The underlying
file will be closed when all data has been read'''
return chunk_iterator(self.load_fd(key, noexpire=noexpire),
chunksize=chunksize) | [
"def",
"load_iter",
"(",
"self",
",",
"key",
",",
"chunksize",
"=",
"None",
",",
"noexpire",
"=",
"None",
")",
":",
"return",
"chunk_iterator",
"(",
"self",
".",
"load_fd",
"(",
"key",
",",
"noexpire",
"=",
"noexpire",
")",
",",
"chunksize",
"=",
"chunksize",
")"
] | Lookup an item in the cache and return an iterator
that reads chunksize bytes of data at a time. The underlying
file will be closed when all data has been read | [
"Lookup",
"an",
"item",
"in",
"the",
"cache",
"and",
"return",
"an",
"iterator",
"that",
"reads",
"chunksize",
"bytes",
"of",
"data",
"at",
"a",
"time",
".",
"The",
"underlying",
"file",
"will",
"be",
"closed",
"when",
"all",
"data",
"has",
"been",
"read"
] | python | train |
hardbyte/python-can | can/interfaces/ixxat/canlib.py | https://github.com/hardbyte/python-can/blob/cdc5254d96072df7739263623f3e920628a7d214/can/interfaces/ixxat/canlib.py#L99-L131 | def __check_status(result, function, arguments):
"""
Check the result of a vcinpl function call and raise appropriate exception
in case of an error. Used as errcheck function when mapping C functions
with ctypes.
:param result:
Function call numeric result
:param callable function:
Called function
:param arguments:
Arbitrary arguments tuple
:raise:
:class:VCITimeout
:class:VCIRxQueueEmptyError
:class:StopIteration
:class:VCIError
"""
if isinstance(result, int):
# Real return value is an unsigned long
result = ctypes.c_ulong(result).value
if result == constants.VCI_E_TIMEOUT:
raise VCITimeout("Function {} timed out".format(function._name))
elif result == constants.VCI_E_RXQUEUE_EMPTY:
raise VCIRxQueueEmptyError()
elif result == constants.VCI_E_NO_MORE_ITEMS:
raise StopIteration()
elif result == constants.VCI_E_ACCESSDENIED:
pass # not a real error, might happen if another program has initialized the bus
elif result != constants.VCI_OK:
raise VCIError(vciFormatError(function, result))
return result | [
"def",
"__check_status",
"(",
"result",
",",
"function",
",",
"arguments",
")",
":",
"if",
"isinstance",
"(",
"result",
",",
"int",
")",
":",
"# Real return value is an unsigned long",
"result",
"=",
"ctypes",
".",
"c_ulong",
"(",
"result",
")",
".",
"value",
"if",
"result",
"==",
"constants",
".",
"VCI_E_TIMEOUT",
":",
"raise",
"VCITimeout",
"(",
"\"Function {} timed out\"",
".",
"format",
"(",
"function",
".",
"_name",
")",
")",
"elif",
"result",
"==",
"constants",
".",
"VCI_E_RXQUEUE_EMPTY",
":",
"raise",
"VCIRxQueueEmptyError",
"(",
")",
"elif",
"result",
"==",
"constants",
".",
"VCI_E_NO_MORE_ITEMS",
":",
"raise",
"StopIteration",
"(",
")",
"elif",
"result",
"==",
"constants",
".",
"VCI_E_ACCESSDENIED",
":",
"pass",
"# not a real error, might happen if another program has initialized the bus",
"elif",
"result",
"!=",
"constants",
".",
"VCI_OK",
":",
"raise",
"VCIError",
"(",
"vciFormatError",
"(",
"function",
",",
"result",
")",
")",
"return",
"result"
] | Check the result of a vcinpl function call and raise appropriate exception
in case of an error. Used as errcheck function when mapping C functions
with ctypes.
:param result:
Function call numeric result
:param callable function:
Called function
:param arguments:
Arbitrary arguments tuple
:raise:
:class:VCITimeout
:class:VCIRxQueueEmptyError
:class:StopIteration
:class:VCIError | [
"Check",
"the",
"result",
"of",
"a",
"vcinpl",
"function",
"call",
"and",
"raise",
"appropriate",
"exception",
"in",
"case",
"of",
"an",
"error",
".",
"Used",
"as",
"errcheck",
"function",
"when",
"mapping",
"C",
"functions",
"with",
"ctypes",
".",
":",
"param",
"result",
":",
"Function",
"call",
"numeric",
"result",
":",
"param",
"callable",
"function",
":",
"Called",
"function",
":",
"param",
"arguments",
":",
"Arbitrary",
"arguments",
"tuple",
":",
"raise",
":",
":",
"class",
":",
"VCITimeout",
":",
"class",
":",
"VCIRxQueueEmptyError",
":",
"class",
":",
"StopIteration",
":",
"class",
":",
"VCIError"
] | python | train |
rabitt/pysox | sox/transform.py | https://github.com/rabitt/pysox/blob/eae89bde74567136ec3f723c3e6b369916d9b837/sox/transform.py#L1662-L1699 | def loudness(self, gain_db=-10.0, reference_level=65.0):
'''Loudness control. Similar to the gain effect, but provides
equalisation for the human auditory system.
The gain is adjusted by gain_db and the signal is equalised according
to ISO 226 w.r.t. reference_level.
Parameters
----------
gain_db : float, default=-10.0
Loudness adjustment amount (in dB)
reference_level : float, default=65.0
Reference level (in dB) according to which the signal is equalized.
Must be between 50 and 75 (dB)
See Also
--------
gain
'''
if not is_number(gain_db):
raise ValueError('gain_db must be a number.')
if not is_number(reference_level):
raise ValueError('reference_level must be a number')
if reference_level > 75 or reference_level < 50:
raise ValueError('reference_level must be between 50 and 75')
effect_args = [
'loudness',
'{:f}'.format(gain_db),
'{:f}'.format(reference_level)
]
self.effects.extend(effect_args)
self.effects_log.append('loudness')
return self | [
"def",
"loudness",
"(",
"self",
",",
"gain_db",
"=",
"-",
"10.0",
",",
"reference_level",
"=",
"65.0",
")",
":",
"if",
"not",
"is_number",
"(",
"gain_db",
")",
":",
"raise",
"ValueError",
"(",
"'gain_db must be a number.'",
")",
"if",
"not",
"is_number",
"(",
"reference_level",
")",
":",
"raise",
"ValueError",
"(",
"'reference_level must be a number'",
")",
"if",
"reference_level",
">",
"75",
"or",
"reference_level",
"<",
"50",
":",
"raise",
"ValueError",
"(",
"'reference_level must be between 50 and 75'",
")",
"effect_args",
"=",
"[",
"'loudness'",
",",
"'{:f}'",
".",
"format",
"(",
"gain_db",
")",
",",
"'{:f}'",
".",
"format",
"(",
"reference_level",
")",
"]",
"self",
".",
"effects",
".",
"extend",
"(",
"effect_args",
")",
"self",
".",
"effects_log",
".",
"append",
"(",
"'loudness'",
")",
"return",
"self"
] | Loudness control. Similar to the gain effect, but provides
equalisation for the human auditory system.
The gain is adjusted by gain_db and the signal is equalised according
to ISO 226 w.r.t. reference_level.
Parameters
----------
gain_db : float, default=-10.0
Loudness adjustment amount (in dB)
reference_level : float, default=65.0
Reference level (in dB) according to which the signal is equalized.
Must be between 50 and 75 (dB)
See Also
--------
gain | [
"Loudness",
"control",
".",
"Similar",
"to",
"the",
"gain",
"effect",
"but",
"provides",
"equalisation",
"for",
"the",
"human",
"auditory",
"system",
"."
] | python | valid |
hassa/BeatCop | beatcop.py | https://github.com/hassa/BeatCop/blob/bf7721e17a7828728b15c5833f047d858111197c/beatcop.py#L140-L170 | def run(self):
"""Run process if nobody else is, otherwise wait until we're needed. Never returns."""
log.info("Waiting for lock, currently held by %s", self.lock.who())
if self.lock.acquire():
log.info("Lock '%s' acquired", self.lockname)
# We got the lock, so we make sure the process is running and keep refreshing the lock - if we ever stop for any reason, for example because our host died, the lock will soon expire.
while True:
if self.process is None: # Process not spawned yet
self.process = self.spawn(self.command)
log.info("Spawned PID %d", self.process.pid)
child_status = self.process.poll()
if child_status is not None:
# Oops, process died on us.
log.error("Child died with exit code %d", child_status)
sys.exit(1)
# Refresh lock and sleep
if not self.lock.refresh():
who = self.lock.who()
if who is None:
if self.lock.acquire(block=False):
log.warning("Lock refresh failed, but successfully re-acquired unclaimed lock")
else:
log.error("Lock refresh and subsequent re-acquire failed, giving up (Lock now held by %s)", self.lock.who())
self.cleanup()
sys.exit(os.EX_UNAVAILABLE)
else:
log.error("Lock refresh failed, %s stole it - bailing out", self.lock.who())
self.cleanup()
sys.exit(os.EX_UNAVAILABLE)
time.sleep(self.sleep) | [
"def",
"run",
"(",
"self",
")",
":",
"log",
".",
"info",
"(",
"\"Waiting for lock, currently held by %s\"",
",",
"self",
".",
"lock",
".",
"who",
"(",
")",
")",
"if",
"self",
".",
"lock",
".",
"acquire",
"(",
")",
":",
"log",
".",
"info",
"(",
"\"Lock '%s' acquired\"",
",",
"self",
".",
"lockname",
")",
"# We got the lock, so we make sure the process is running and keep refreshing the lock - if we ever stop for any reason, for example because our host died, the lock will soon expire.",
"while",
"True",
":",
"if",
"self",
".",
"process",
"is",
"None",
":",
"# Process not spawned yet",
"self",
".",
"process",
"=",
"self",
".",
"spawn",
"(",
"self",
".",
"command",
")",
"log",
".",
"info",
"(",
"\"Spawned PID %d\"",
",",
"self",
".",
"process",
".",
"pid",
")",
"child_status",
"=",
"self",
".",
"process",
".",
"poll",
"(",
")",
"if",
"child_status",
"is",
"not",
"None",
":",
"# Oops, process died on us.",
"log",
".",
"error",
"(",
"\"Child died with exit code %d\"",
",",
"child_status",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"# Refresh lock and sleep",
"if",
"not",
"self",
".",
"lock",
".",
"refresh",
"(",
")",
":",
"who",
"=",
"self",
".",
"lock",
".",
"who",
"(",
")",
"if",
"who",
"is",
"None",
":",
"if",
"self",
".",
"lock",
".",
"acquire",
"(",
"block",
"=",
"False",
")",
":",
"log",
".",
"warning",
"(",
"\"Lock refresh failed, but successfully re-acquired unclaimed lock\"",
")",
"else",
":",
"log",
".",
"error",
"(",
"\"Lock refresh and subsequent re-acquire failed, giving up (Lock now held by %s)\"",
",",
"self",
".",
"lock",
".",
"who",
"(",
")",
")",
"self",
".",
"cleanup",
"(",
")",
"sys",
".",
"exit",
"(",
"os",
".",
"EX_UNAVAILABLE",
")",
"else",
":",
"log",
".",
"error",
"(",
"\"Lock refresh failed, %s stole it - bailing out\"",
",",
"self",
".",
"lock",
".",
"who",
"(",
")",
")",
"self",
".",
"cleanup",
"(",
")",
"sys",
".",
"exit",
"(",
"os",
".",
"EX_UNAVAILABLE",
")",
"time",
".",
"sleep",
"(",
"self",
".",
"sleep",
")"
] | Run process if nobody else is, otherwise wait until we're needed. Never returns. | [
"Run",
"process",
"if",
"nobody",
"else",
"is",
"otherwise",
"wait",
"until",
"we",
"re",
"needed",
".",
"Never",
"returns",
"."
] | python | train |
MaxStrange/AudioSegment | algorithms/asa.py | https://github.com/MaxStrange/AudioSegment/blob/1daefb8de626ddff3ff7016697c3ad31d262ecd6/algorithms/asa.py#L263-L283 | def _lookup_offset_by_onset_idx(onset_idx, onsets, offsets):
"""
Takes an onset index (freq, sample) and returns the offset index (freq, sample)
such that frequency index is the same, and sample index is the minimum of all
offsets ocurring after the given onset. If there are no offsets after the given
onset in that frequency channel, the final sample in that channel is returned.
"""
assert len(onset_idx) == 2, "Onset_idx must be a tuple of the form (freq_idx, sample_idx)"
frequency_idx, sample_idx = onset_idx
offset_sample_idxs = np.reshape(np.where(offsets[frequency_idx, :] == 1), (-1,))
# get the offsets which occur after onset
offset_sample_idxs = offset_sample_idxs[offset_sample_idxs > sample_idx]
if len(offset_sample_idxs) == 0:
# There is no offset in this frequency that occurs after the onset, just return the last sample
chosen_offset_sample_idx = offsets.shape[1] - 1
assert offsets[frequency_idx, chosen_offset_sample_idx] == 0
else:
# Return the closest offset to the onset
chosen_offset_sample_idx = offset_sample_idxs[0]
assert offsets[frequency_idx, chosen_offset_sample_idx] != 0
return frequency_idx, chosen_offset_sample_idx | [
"def",
"_lookup_offset_by_onset_idx",
"(",
"onset_idx",
",",
"onsets",
",",
"offsets",
")",
":",
"assert",
"len",
"(",
"onset_idx",
")",
"==",
"2",
",",
"\"Onset_idx must be a tuple of the form (freq_idx, sample_idx)\"",
"frequency_idx",
",",
"sample_idx",
"=",
"onset_idx",
"offset_sample_idxs",
"=",
"np",
".",
"reshape",
"(",
"np",
".",
"where",
"(",
"offsets",
"[",
"frequency_idx",
",",
":",
"]",
"==",
"1",
")",
",",
"(",
"-",
"1",
",",
")",
")",
"# get the offsets which occur after onset",
"offset_sample_idxs",
"=",
"offset_sample_idxs",
"[",
"offset_sample_idxs",
">",
"sample_idx",
"]",
"if",
"len",
"(",
"offset_sample_idxs",
")",
"==",
"0",
":",
"# There is no offset in this frequency that occurs after the onset, just return the last sample",
"chosen_offset_sample_idx",
"=",
"offsets",
".",
"shape",
"[",
"1",
"]",
"-",
"1",
"assert",
"offsets",
"[",
"frequency_idx",
",",
"chosen_offset_sample_idx",
"]",
"==",
"0",
"else",
":",
"# Return the closest offset to the onset",
"chosen_offset_sample_idx",
"=",
"offset_sample_idxs",
"[",
"0",
"]",
"assert",
"offsets",
"[",
"frequency_idx",
",",
"chosen_offset_sample_idx",
"]",
"!=",
"0",
"return",
"frequency_idx",
",",
"chosen_offset_sample_idx"
] | Takes an onset index (freq, sample) and returns the offset index (freq, sample)
such that frequency index is the same, and sample index is the minimum of all
offsets ocurring after the given onset. If there are no offsets after the given
onset in that frequency channel, the final sample in that channel is returned. | [
"Takes",
"an",
"onset",
"index",
"(",
"freq",
"sample",
")",
"and",
"returns",
"the",
"offset",
"index",
"(",
"freq",
"sample",
")",
"such",
"that",
"frequency",
"index",
"is",
"the",
"same",
"and",
"sample",
"index",
"is",
"the",
"minimum",
"of",
"all",
"offsets",
"ocurring",
"after",
"the",
"given",
"onset",
".",
"If",
"there",
"are",
"no",
"offsets",
"after",
"the",
"given",
"onset",
"in",
"that",
"frequency",
"channel",
"the",
"final",
"sample",
"in",
"that",
"channel",
"is",
"returned",
"."
] | python | test |
DeepHorizons/iarm | iarm/arm_instructions/_meta.py | https://github.com/DeepHorizons/iarm/blob/b913c9fd577b793a6bbced78b78a5d8d7cd88de4/iarm/arm_instructions/_meta.py#L241-L257 | def get_parameters(self, regex_exp, parameters):
"""
Given a regex expression and the string with the paramers,
either return a regex match object or raise an exception if the regex
did not find a match
:param regex_exp:
:param parameters:
:return:
"""
# TODO find a better way to do the equate replacement
for rep in self.equates:
parameters = parameters.replace(rep, str(self.equates[rep]))
match = re.match(regex_exp, parameters)
if not match:
raise iarm.exceptions.ParsingError("Parameters are None, did you miss a comma?")
return match.groups() | [
"def",
"get_parameters",
"(",
"self",
",",
"regex_exp",
",",
"parameters",
")",
":",
"# TODO find a better way to do the equate replacement",
"for",
"rep",
"in",
"self",
".",
"equates",
":",
"parameters",
"=",
"parameters",
".",
"replace",
"(",
"rep",
",",
"str",
"(",
"self",
".",
"equates",
"[",
"rep",
"]",
")",
")",
"match",
"=",
"re",
".",
"match",
"(",
"regex_exp",
",",
"parameters",
")",
"if",
"not",
"match",
":",
"raise",
"iarm",
".",
"exceptions",
".",
"ParsingError",
"(",
"\"Parameters are None, did you miss a comma?\"",
")",
"return",
"match",
".",
"groups",
"(",
")"
] | Given a regex expression and the string with the paramers,
either return a regex match object or raise an exception if the regex
did not find a match
:param regex_exp:
:param parameters:
:return: | [
"Given",
"a",
"regex",
"expression",
"and",
"the",
"string",
"with",
"the",
"paramers",
"either",
"return",
"a",
"regex",
"match",
"object",
"or",
"raise",
"an",
"exception",
"if",
"the",
"regex",
"did",
"not",
"find",
"a",
"match",
":",
"param",
"regex_exp",
":",
":",
"param",
"parameters",
":",
":",
"return",
":"
] | python | train |
Iotic-Labs/py-IoticAgent | src/IoticAgent/IOT/Client.py | https://github.com/Iotic-Labs/py-IoticAgent/blob/893e8582ad1dacfe32dfc0ee89452bbd6f57d28d/src/IoticAgent/IOT/Client.py#L453-L483 | def register_callback_deleted(self, func, serialised=True):
"""
Register a callback for resource deletion. This will be called when any resource
is deleted within your agent.
If `serialised` is not set, the callbacks might arrive in a different order to they were requested.
The payload passed to your callback is an OrderedDict with the following keys
#!python
r : R_ENTITY, R_FEED, etc # the type of resource deleted
lid : <name> # the local name of the resource
id : <GUID> # the global Id of the resource
`Note` resource types are defined [here](../Core/Const.m.html)
`Example`
#!python
def deleted_callback(args):
print(args)
...
client.register_callback_deleted(deleted_callback)
This would print out something like the following on deletion of an R_ENTITY
#!python
OrderedDict([(u'lid', u'old_thing1'),
(u'r', 1),
(u'id', u'315637813d801ec6f057c67728bf00c2')])
"""
self.__client.register_callback_deleted(partial(self.__callback_payload_only, func), serialised=serialised) | [
"def",
"register_callback_deleted",
"(",
"self",
",",
"func",
",",
"serialised",
"=",
"True",
")",
":",
"self",
".",
"__client",
".",
"register_callback_deleted",
"(",
"partial",
"(",
"self",
".",
"__callback_payload_only",
",",
"func",
")",
",",
"serialised",
"=",
"serialised",
")"
] | Register a callback for resource deletion. This will be called when any resource
is deleted within your agent.
If `serialised` is not set, the callbacks might arrive in a different order to they were requested.
The payload passed to your callback is an OrderedDict with the following keys
#!python
r : R_ENTITY, R_FEED, etc # the type of resource deleted
lid : <name> # the local name of the resource
id : <GUID> # the global Id of the resource
`Note` resource types are defined [here](../Core/Const.m.html)
`Example`
#!python
def deleted_callback(args):
print(args)
...
client.register_callback_deleted(deleted_callback)
This would print out something like the following on deletion of an R_ENTITY
#!python
OrderedDict([(u'lid', u'old_thing1'),
(u'r', 1),
(u'id', u'315637813d801ec6f057c67728bf00c2')]) | [
"Register",
"a",
"callback",
"for",
"resource",
"deletion",
".",
"This",
"will",
"be",
"called",
"when",
"any",
"resource",
"is",
"deleted",
"within",
"your",
"agent",
".",
"If",
"serialised",
"is",
"not",
"set",
"the",
"callbacks",
"might",
"arrive",
"in",
"a",
"different",
"order",
"to",
"they",
"were",
"requested",
"."
] | python | train |
sdispater/pendulum | pendulum/datetime.py | https://github.com/sdispater/pendulum/blob/94d28b0d3cb524ae02361bd1ed7ea03e2e655e4e/pendulum/datetime.py#L512-L530 | def farthest(self, dt1, dt2, *dts):
from functools import reduce
"""
Get the farthest date from the instance.
:type dt1: datetime.datetime
:type dt2: datetime.datetime
:type dts: list[datetime.datetime,]
:rtype: DateTime
"""
dt1 = pendulum.instance(dt1)
dt2 = pendulum.instance(dt2)
dts = [dt1, dt2] + [pendulum.instance(x) for x in dts]
dts = [(abs(self - dt), dt) for dt in dts]
return max(dts)[1] | [
"def",
"farthest",
"(",
"self",
",",
"dt1",
",",
"dt2",
",",
"*",
"dts",
")",
":",
"from",
"functools",
"import",
"reduce",
"dt1",
"=",
"pendulum",
".",
"instance",
"(",
"dt1",
")",
"dt2",
"=",
"pendulum",
".",
"instance",
"(",
"dt2",
")",
"dts",
"=",
"[",
"dt1",
",",
"dt2",
"]",
"+",
"[",
"pendulum",
".",
"instance",
"(",
"x",
")",
"for",
"x",
"in",
"dts",
"]",
"dts",
"=",
"[",
"(",
"abs",
"(",
"self",
"-",
"dt",
")",
",",
"dt",
")",
"for",
"dt",
"in",
"dts",
"]",
"return",
"max",
"(",
"dts",
")",
"[",
"1",
"]"
] | Get the farthest date from the instance.
:type dt1: datetime.datetime
:type dt2: datetime.datetime
:type dts: list[datetime.datetime,]
:rtype: DateTime | [
"Get",
"the",
"farthest",
"date",
"from",
"the",
"instance",
"."
] | python | train |
PyCQA/pylint | pylint/pyreverse/diadefslib.py | https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/pyreverse/diadefslib.py#L171-L175 | def visit_importfrom(self, node):
"""visit astroid.ImportFrom and catch modules for package diagram
"""
if self.pkgdiagram:
self.pkgdiagram.add_from_depend(node, node.modname) | [
"def",
"visit_importfrom",
"(",
"self",
",",
"node",
")",
":",
"if",
"self",
".",
"pkgdiagram",
":",
"self",
".",
"pkgdiagram",
".",
"add_from_depend",
"(",
"node",
",",
"node",
".",
"modname",
")"
] | visit astroid.ImportFrom and catch modules for package diagram | [
"visit",
"astroid",
".",
"ImportFrom",
"and",
"catch",
"modules",
"for",
"package",
"diagram"
] | python | test |
raiden-network/raiden | raiden/network/transport/matrix/utils.py | https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/network/transport/matrix/utils.py#L196-L230 | def _presence_listener(self, event: Dict[str, Any]):
"""
Update cached user presence state from Matrix presence events.
Due to the possibility of nodes using accounts on multiple homeservers a composite
address state is synthesised from the cached individual user presence states.
"""
if self._stop_event.ready():
return
user_id = event['sender']
if event['type'] != 'm.presence' or user_id == self._user_id:
return
user = self._get_user(user_id)
user.displayname = event['content'].get('displayname') or user.displayname
address = self._validate_userid_signature(user)
if not address:
# Malformed address - skip
return
# not a user we've whitelisted, skip
if not self.is_address_known(address):
return
self.add_userid_for_address(address, user_id)
new_state = UserPresence(event['content']['presence'])
if new_state == self._userid_to_presence.get(user_id):
# Cached presence state matches, no action required
return
self._userid_to_presence[user_id] = new_state
self.refresh_address_presence(address)
if self._user_presence_changed_callback:
self._user_presence_changed_callback(user, new_state) | [
"def",
"_presence_listener",
"(",
"self",
",",
"event",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
")",
":",
"if",
"self",
".",
"_stop_event",
".",
"ready",
"(",
")",
":",
"return",
"user_id",
"=",
"event",
"[",
"'sender'",
"]",
"if",
"event",
"[",
"'type'",
"]",
"!=",
"'m.presence'",
"or",
"user_id",
"==",
"self",
".",
"_user_id",
":",
"return",
"user",
"=",
"self",
".",
"_get_user",
"(",
"user_id",
")",
"user",
".",
"displayname",
"=",
"event",
"[",
"'content'",
"]",
".",
"get",
"(",
"'displayname'",
")",
"or",
"user",
".",
"displayname",
"address",
"=",
"self",
".",
"_validate_userid_signature",
"(",
"user",
")",
"if",
"not",
"address",
":",
"# Malformed address - skip",
"return",
"# not a user we've whitelisted, skip",
"if",
"not",
"self",
".",
"is_address_known",
"(",
"address",
")",
":",
"return",
"self",
".",
"add_userid_for_address",
"(",
"address",
",",
"user_id",
")",
"new_state",
"=",
"UserPresence",
"(",
"event",
"[",
"'content'",
"]",
"[",
"'presence'",
"]",
")",
"if",
"new_state",
"==",
"self",
".",
"_userid_to_presence",
".",
"get",
"(",
"user_id",
")",
":",
"# Cached presence state matches, no action required",
"return",
"self",
".",
"_userid_to_presence",
"[",
"user_id",
"]",
"=",
"new_state",
"self",
".",
"refresh_address_presence",
"(",
"address",
")",
"if",
"self",
".",
"_user_presence_changed_callback",
":",
"self",
".",
"_user_presence_changed_callback",
"(",
"user",
",",
"new_state",
")"
] | Update cached user presence state from Matrix presence events.
Due to the possibility of nodes using accounts on multiple homeservers a composite
address state is synthesised from the cached individual user presence states. | [
"Update",
"cached",
"user",
"presence",
"state",
"from",
"Matrix",
"presence",
"events",
"."
] | python | train |
python-diamond/Diamond | src/collectors/puppetagent/puppetagent.py | https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/collectors/puppetagent/puppetagent.py#L30-L39 | def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(PuppetAgentCollector, self).get_default_config()
config.update({
'yaml_path': '/var/lib/puppet/state/last_run_summary.yaml',
'path': 'puppetagent',
})
return config | [
"def",
"get_default_config",
"(",
"self",
")",
":",
"config",
"=",
"super",
"(",
"PuppetAgentCollector",
",",
"self",
")",
".",
"get_default_config",
"(",
")",
"config",
".",
"update",
"(",
"{",
"'yaml_path'",
":",
"'/var/lib/puppet/state/last_run_summary.yaml'",
",",
"'path'",
":",
"'puppetagent'",
",",
"}",
")",
"return",
"config"
] | Returns the default collector settings | [
"Returns",
"the",
"default",
"collector",
"settings"
] | python | train |
iwanbk/nyamuk | nyamuk/nyamuk.py | https://github.com/iwanbk/nyamuk/blob/ac4c6028de288a4c8e0b332ae16eae889deb643d/nyamuk/nyamuk.py#L204-L210 | def subscribe_multi(self, topics):
"""Subscribe to some topics."""
if self.sock == NC.INVALID_SOCKET:
return NC.ERR_NO_CONN
self.logger.info("SUBSCRIBE: %s", ', '.join([t for (t,q) in topics]))
return self.send_subscribe(False, [(utf8encode(topic), qos) for (topic, qos) in topics]) | [
"def",
"subscribe_multi",
"(",
"self",
",",
"topics",
")",
":",
"if",
"self",
".",
"sock",
"==",
"NC",
".",
"INVALID_SOCKET",
":",
"return",
"NC",
".",
"ERR_NO_CONN",
"self",
".",
"logger",
".",
"info",
"(",
"\"SUBSCRIBE: %s\"",
",",
"', '",
".",
"join",
"(",
"[",
"t",
"for",
"(",
"t",
",",
"q",
")",
"in",
"topics",
"]",
")",
")",
"return",
"self",
".",
"send_subscribe",
"(",
"False",
",",
"[",
"(",
"utf8encode",
"(",
"topic",
")",
",",
"qos",
")",
"for",
"(",
"topic",
",",
"qos",
")",
"in",
"topics",
"]",
")"
] | Subscribe to some topics. | [
"Subscribe",
"to",
"some",
"topics",
"."
] | python | train |
grangier/python-goose | goose/extractors/content.py | https://github.com/grangier/python-goose/blob/09023ec9f5ef26a628a2365616c0a7c864f0ecea/goose/extractors/content.py#L266-L276 | def update_node_count(self, node, add_to_count):
"""\
stores how many decent nodes are under a parent node
"""
current_score = 0
count_string = self.parser.getAttribute(node, 'gravityNodes')
if count_string:
current_score = int(count_string)
new_score = current_score + add_to_count
self.parser.setAttribute(node, "gravityNodes", str(new_score)) | [
"def",
"update_node_count",
"(",
"self",
",",
"node",
",",
"add_to_count",
")",
":",
"current_score",
"=",
"0",
"count_string",
"=",
"self",
".",
"parser",
".",
"getAttribute",
"(",
"node",
",",
"'gravityNodes'",
")",
"if",
"count_string",
":",
"current_score",
"=",
"int",
"(",
"count_string",
")",
"new_score",
"=",
"current_score",
"+",
"add_to_count",
"self",
".",
"parser",
".",
"setAttribute",
"(",
"node",
",",
"\"gravityNodes\"",
",",
"str",
"(",
"new_score",
")",
")"
] | \
stores how many decent nodes are under a parent node | [
"\\",
"stores",
"how",
"many",
"decent",
"nodes",
"are",
"under",
"a",
"parent",
"node"
] | python | train |
tcalmant/ipopo | pelix/ipopo/instance.py | https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/ipopo/instance.py#L872-L892 | def __set_binding(self, dependency, service, reference):
# type: (Any, Any, ServiceReference) -> None
"""
Injects a service in the component
:param dependency: The dependency handler
:param service: The injected service
:param reference: The reference of the injected service
"""
# Set the value
setattr(self.instance, dependency.get_field(), dependency.get_value())
# Call the component back
self.safe_callback(constants.IPOPO_CALLBACK_BIND, service, reference)
self.__safe_field_callback(
dependency.get_field(),
constants.IPOPO_CALLBACK_BIND_FIELD,
service,
reference,
) | [
"def",
"__set_binding",
"(",
"self",
",",
"dependency",
",",
"service",
",",
"reference",
")",
":",
"# type: (Any, Any, ServiceReference) -> None",
"# Set the value",
"setattr",
"(",
"self",
".",
"instance",
",",
"dependency",
".",
"get_field",
"(",
")",
",",
"dependency",
".",
"get_value",
"(",
")",
")",
"# Call the component back",
"self",
".",
"safe_callback",
"(",
"constants",
".",
"IPOPO_CALLBACK_BIND",
",",
"service",
",",
"reference",
")",
"self",
".",
"__safe_field_callback",
"(",
"dependency",
".",
"get_field",
"(",
")",
",",
"constants",
".",
"IPOPO_CALLBACK_BIND_FIELD",
",",
"service",
",",
"reference",
",",
")"
] | Injects a service in the component
:param dependency: The dependency handler
:param service: The injected service
:param reference: The reference of the injected service | [
"Injects",
"a",
"service",
"in",
"the",
"component"
] | python | train |
glasslion/redlock | redlock/lock.py | https://github.com/glasslion/redlock/blob/7f873cc362eefa7f7adee8d4913e64f87c1fd1c9/redlock/lock.py#L62-L72 | def create_lock(self, resource, **kwargs):
"""
Create a new RedLock object and reuse stored Redis clients.
All the kwargs it received would be passed to the RedLock's __init__
function.
"""
lock = RedLock(resource=resource, created_by_factory=True, **kwargs)
lock.redis_nodes = self.redis_nodes
lock.quorum = self.quorum
lock.factory = self
return lock | [
"def",
"create_lock",
"(",
"self",
",",
"resource",
",",
"*",
"*",
"kwargs",
")",
":",
"lock",
"=",
"RedLock",
"(",
"resource",
"=",
"resource",
",",
"created_by_factory",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
"lock",
".",
"redis_nodes",
"=",
"self",
".",
"redis_nodes",
"lock",
".",
"quorum",
"=",
"self",
".",
"quorum",
"lock",
".",
"factory",
"=",
"self",
"return",
"lock"
] | Create a new RedLock object and reuse stored Redis clients.
All the kwargs it received would be passed to the RedLock's __init__
function. | [
"Create",
"a",
"new",
"RedLock",
"object",
"and",
"reuse",
"stored",
"Redis",
"clients",
".",
"All",
"the",
"kwargs",
"it",
"received",
"would",
"be",
"passed",
"to",
"the",
"RedLock",
"s",
"__init__",
"function",
"."
] | python | train |
glormph/msstitch | src/app/drivers/prottable/merge.py | https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/drivers/prottable/merge.py#L35-L44 | def set_feature_generator(self):
"""Generates proteins with quant from the lookup table"""
self.features = preparation.build_proteintable(self.lookup,
self.headerfields,
self.mergecutoff,
self.isobaric,
self.precursor,
self.probability,
self.fdr, self.pep,
self.genecentric) | [
"def",
"set_feature_generator",
"(",
"self",
")",
":",
"self",
".",
"features",
"=",
"preparation",
".",
"build_proteintable",
"(",
"self",
".",
"lookup",
",",
"self",
".",
"headerfields",
",",
"self",
".",
"mergecutoff",
",",
"self",
".",
"isobaric",
",",
"self",
".",
"precursor",
",",
"self",
".",
"probability",
",",
"self",
".",
"fdr",
",",
"self",
".",
"pep",
",",
"self",
".",
"genecentric",
")"
] | Generates proteins with quant from the lookup table | [
"Generates",
"proteins",
"with",
"quant",
"from",
"the",
"lookup",
"table"
] | python | train |
mdeous/fatbotslim | fatbotslim/irc/__init__.py | https://github.com/mdeous/fatbotslim/blob/341595d24454a79caee23750eac271f9d0626c88/fatbotslim/irc/__init__.py#L22-L39 | def u(s, errors='ignore'):
"""
Automatically detects given string's encoding and returns its unicode form.
Decoding errors are handled according to the `errors` argument, see `unicode()`
documentation for more details.
:param s: string to decode.
:type s: str
:param errors: decoding error handling behaviour.
:type errors: str
:return: decoded string
:rtype: unicode
"""
try:
return s.decode('utf-8', errors=errors)
except UnicodeDecodeError:
encoding = chardet.detect(s)['encoding']
return unicode(s, encoding=encoding, errors=errors) | [
"def",
"u",
"(",
"s",
",",
"errors",
"=",
"'ignore'",
")",
":",
"try",
":",
"return",
"s",
".",
"decode",
"(",
"'utf-8'",
",",
"errors",
"=",
"errors",
")",
"except",
"UnicodeDecodeError",
":",
"encoding",
"=",
"chardet",
".",
"detect",
"(",
"s",
")",
"[",
"'encoding'",
"]",
"return",
"unicode",
"(",
"s",
",",
"encoding",
"=",
"encoding",
",",
"errors",
"=",
"errors",
")"
] | Automatically detects given string's encoding and returns its unicode form.
Decoding errors are handled according to the `errors` argument, see `unicode()`
documentation for more details.
:param s: string to decode.
:type s: str
:param errors: decoding error handling behaviour.
:type errors: str
:return: decoded string
:rtype: unicode | [
"Automatically",
"detects",
"given",
"string",
"s",
"encoding",
"and",
"returns",
"its",
"unicode",
"form",
".",
"Decoding",
"errors",
"are",
"handled",
"according",
"to",
"the",
"errors",
"argument",
"see",
"unicode",
"()",
"documentation",
"for",
"more",
"details",
"."
] | python | train |
thespacedoctor/sherlock | sherlock/imports/ned_d.py | https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/imports/ned_d.py#L445-L553 | def _query_ned_and_add_results_to_database(
self,
batchCount):
""" query ned and add results to database
**Key Arguments:**
- ``batchCount`` - the index number of the batch sent to NED
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug(
'starting the ``_query_ned_and_add_results_to_database`` method')
tableName = self.dbTableName
# ASTROCALC UNIT CONVERTER OBJECT
converter = unit_conversion(
log=self.log
)
# QUERY NED WITH BATCH
totalCount = len(self.theseIds)
print "requesting metadata from NED for %(totalCount)s galaxies (batch %(batchCount)s)" % locals()
search = namesearch(
log=self.log,
names=self.theseIds.keys(),
quiet=True
)
results = search.get()
print "results returned from ned -- starting to add to database" % locals()
# CLEAN THE RETURNED DATA AND UPDATE DATABASE
totalCount = len(results)
count = 0
sqlQuery = ""
dictList = []
colList = ["redshift_quality", "redshift", "hierarchy", "object_type", "major_diameter_arcmin", "morphology", "magnitude_filter",
"ned_notes", "eb_v", "raDeg", "radio_morphology", "activity_type", "minor_diameter_arcmin", "decDeg", "redshift_err", "in_ned"]
if not len(results):
for k, v in self.theseIds.iteritems():
dictList.append({
"in_ned": 0,
"primaryID": v
})
for thisDict in results:
thisDict["tableName"] = tableName
count += 1
for k, v in thisDict.iteritems():
if not v or len(v) == 0:
thisDict[k] = "null"
if k in ["major_diameter_arcmin", "minor_diameter_arcmin"] and (":" in v or "?" in v or "<" in v):
thisDict[k] = v.replace(":", "").replace(
"?", "").replace("<", "")
if isinstance(v, str) and '"' in v:
thisDict[k] = v.replace('"', '\\"')
if "Input name not" not in thisDict["input_note"] and "Same object as" not in thisDict["input_note"]:
if thisDict["ra"] != "null" and thisDict["dec"] != "null":
thisDict["raDeg"] = converter.ra_sexegesimal_to_decimal(
ra=thisDict["ra"]
)
thisDict["decDeg"] = converter.dec_sexegesimal_to_decimal(
dec=thisDict["dec"]
)
else:
thisDict["raDeg"] = None
thisDict["decDeg"] = None
thisDict["in_ned"] = 1
thisDict["eb_v"] = thisDict["eb-v"]
row = {}
row["primary_ned_id"] = thisDict["input_name"]
try:
row["primaryID"] = self.theseIds[thisDict["input_name"]]
for c in colList:
if thisDict[c] == "null":
row[c] = None
else:
row[c] = thisDict[c]
dictList.append(row)
except:
g = thisDict["input_name"]
self.log.error(
"Cannot find database table %(tableName)s primaryID for '%(g)s'\n\n" % locals())
dictList.append({
"in_ned": 0,
"primary_ned_id": thisDict["input_name"]
})
else:
dictList.append({
"primary_ned_id": thisDict["input_name"],
"in_ned": 0,
"primaryID": self.theseIds[thisDict["input_name"]]
})
self.log.debug(
'completed the ``_query_ned_and_add_results_to_database`` method')
return dictList | [
"def",
"_query_ned_and_add_results_to_database",
"(",
"self",
",",
"batchCount",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'starting the ``_query_ned_and_add_results_to_database`` method'",
")",
"tableName",
"=",
"self",
".",
"dbTableName",
"# ASTROCALC UNIT CONVERTER OBJECT",
"converter",
"=",
"unit_conversion",
"(",
"log",
"=",
"self",
".",
"log",
")",
"# QUERY NED WITH BATCH",
"totalCount",
"=",
"len",
"(",
"self",
".",
"theseIds",
")",
"print",
"\"requesting metadata from NED for %(totalCount)s galaxies (batch %(batchCount)s)\"",
"%",
"locals",
"(",
")",
"search",
"=",
"namesearch",
"(",
"log",
"=",
"self",
".",
"log",
",",
"names",
"=",
"self",
".",
"theseIds",
".",
"keys",
"(",
")",
",",
"quiet",
"=",
"True",
")",
"results",
"=",
"search",
".",
"get",
"(",
")",
"print",
"\"results returned from ned -- starting to add to database\"",
"%",
"locals",
"(",
")",
"# CLEAN THE RETURNED DATA AND UPDATE DATABASE",
"totalCount",
"=",
"len",
"(",
"results",
")",
"count",
"=",
"0",
"sqlQuery",
"=",
"\"\"",
"dictList",
"=",
"[",
"]",
"colList",
"=",
"[",
"\"redshift_quality\"",
",",
"\"redshift\"",
",",
"\"hierarchy\"",
",",
"\"object_type\"",
",",
"\"major_diameter_arcmin\"",
",",
"\"morphology\"",
",",
"\"magnitude_filter\"",
",",
"\"ned_notes\"",
",",
"\"eb_v\"",
",",
"\"raDeg\"",
",",
"\"radio_morphology\"",
",",
"\"activity_type\"",
",",
"\"minor_diameter_arcmin\"",
",",
"\"decDeg\"",
",",
"\"redshift_err\"",
",",
"\"in_ned\"",
"]",
"if",
"not",
"len",
"(",
"results",
")",
":",
"for",
"k",
",",
"v",
"in",
"self",
".",
"theseIds",
".",
"iteritems",
"(",
")",
":",
"dictList",
".",
"append",
"(",
"{",
"\"in_ned\"",
":",
"0",
",",
"\"primaryID\"",
":",
"v",
"}",
")",
"for",
"thisDict",
"in",
"results",
":",
"thisDict",
"[",
"\"tableName\"",
"]",
"=",
"tableName",
"count",
"+=",
"1",
"for",
"k",
",",
"v",
"in",
"thisDict",
".",
"iteritems",
"(",
")",
":",
"if",
"not",
"v",
"or",
"len",
"(",
"v",
")",
"==",
"0",
":",
"thisDict",
"[",
"k",
"]",
"=",
"\"null\"",
"if",
"k",
"in",
"[",
"\"major_diameter_arcmin\"",
",",
"\"minor_diameter_arcmin\"",
"]",
"and",
"(",
"\":\"",
"in",
"v",
"or",
"\"?\"",
"in",
"v",
"or",
"\"<\"",
"in",
"v",
")",
":",
"thisDict",
"[",
"k",
"]",
"=",
"v",
".",
"replace",
"(",
"\":\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"\"?\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"\"<\"",
",",
"\"\"",
")",
"if",
"isinstance",
"(",
"v",
",",
"str",
")",
"and",
"'\"'",
"in",
"v",
":",
"thisDict",
"[",
"k",
"]",
"=",
"v",
".",
"replace",
"(",
"'\"'",
",",
"'\\\\\"'",
")",
"if",
"\"Input name not\"",
"not",
"in",
"thisDict",
"[",
"\"input_note\"",
"]",
"and",
"\"Same object as\"",
"not",
"in",
"thisDict",
"[",
"\"input_note\"",
"]",
":",
"if",
"thisDict",
"[",
"\"ra\"",
"]",
"!=",
"\"null\"",
"and",
"thisDict",
"[",
"\"dec\"",
"]",
"!=",
"\"null\"",
":",
"thisDict",
"[",
"\"raDeg\"",
"]",
"=",
"converter",
".",
"ra_sexegesimal_to_decimal",
"(",
"ra",
"=",
"thisDict",
"[",
"\"ra\"",
"]",
")",
"thisDict",
"[",
"\"decDeg\"",
"]",
"=",
"converter",
".",
"dec_sexegesimal_to_decimal",
"(",
"dec",
"=",
"thisDict",
"[",
"\"dec\"",
"]",
")",
"else",
":",
"thisDict",
"[",
"\"raDeg\"",
"]",
"=",
"None",
"thisDict",
"[",
"\"decDeg\"",
"]",
"=",
"None",
"thisDict",
"[",
"\"in_ned\"",
"]",
"=",
"1",
"thisDict",
"[",
"\"eb_v\"",
"]",
"=",
"thisDict",
"[",
"\"eb-v\"",
"]",
"row",
"=",
"{",
"}",
"row",
"[",
"\"primary_ned_id\"",
"]",
"=",
"thisDict",
"[",
"\"input_name\"",
"]",
"try",
":",
"row",
"[",
"\"primaryID\"",
"]",
"=",
"self",
".",
"theseIds",
"[",
"thisDict",
"[",
"\"input_name\"",
"]",
"]",
"for",
"c",
"in",
"colList",
":",
"if",
"thisDict",
"[",
"c",
"]",
"==",
"\"null\"",
":",
"row",
"[",
"c",
"]",
"=",
"None",
"else",
":",
"row",
"[",
"c",
"]",
"=",
"thisDict",
"[",
"c",
"]",
"dictList",
".",
"append",
"(",
"row",
")",
"except",
":",
"g",
"=",
"thisDict",
"[",
"\"input_name\"",
"]",
"self",
".",
"log",
".",
"error",
"(",
"\"Cannot find database table %(tableName)s primaryID for '%(g)s'\\n\\n\"",
"%",
"locals",
"(",
")",
")",
"dictList",
".",
"append",
"(",
"{",
"\"in_ned\"",
":",
"0",
",",
"\"primary_ned_id\"",
":",
"thisDict",
"[",
"\"input_name\"",
"]",
"}",
")",
"else",
":",
"dictList",
".",
"append",
"(",
"{",
"\"primary_ned_id\"",
":",
"thisDict",
"[",
"\"input_name\"",
"]",
",",
"\"in_ned\"",
":",
"0",
",",
"\"primaryID\"",
":",
"self",
".",
"theseIds",
"[",
"thisDict",
"[",
"\"input_name\"",
"]",
"]",
"}",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'completed the ``_query_ned_and_add_results_to_database`` method'",
")",
"return",
"dictList"
] | query ned and add results to database
**Key Arguments:**
- ``batchCount`` - the index number of the batch sent to NED
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring | [
"query",
"ned",
"and",
"add",
"results",
"to",
"database"
] | python | train |
VIVelev/PyDojoML | dojo/cluster/kmeans.py | https://github.com/VIVelev/PyDojoML/blob/773fdce6866aa6decd306a5a85f94129fed816eb/dojo/cluster/kmeans.py#L77-L102 | def fit(self, X):
"""The K-Means itself
"""
self._X = super().cluster(X)
candidates = []
for _ in range(self.n_runs):
self._init_random_centroids()
while True:
prev_clusters = self.clusters
self._assign_clusters()
self._move_centroids()
if np.all(prev_clusters == self.clusters):
break
self._calc_distortion()
candidates.append((self.distortion, self.centroids, self.clusters))
candidates.sort(key=lambda x: x[0])
self.distortion = candidates[0][0]
self.centroids = candidates[0][1]
self.clusters = candidates[0][2]
return self | [
"def",
"fit",
"(",
"self",
",",
"X",
")",
":",
"self",
".",
"_X",
"=",
"super",
"(",
")",
".",
"cluster",
"(",
"X",
")",
"candidates",
"=",
"[",
"]",
"for",
"_",
"in",
"range",
"(",
"self",
".",
"n_runs",
")",
":",
"self",
".",
"_init_random_centroids",
"(",
")",
"while",
"True",
":",
"prev_clusters",
"=",
"self",
".",
"clusters",
"self",
".",
"_assign_clusters",
"(",
")",
"self",
".",
"_move_centroids",
"(",
")",
"if",
"np",
".",
"all",
"(",
"prev_clusters",
"==",
"self",
".",
"clusters",
")",
":",
"break",
"self",
".",
"_calc_distortion",
"(",
")",
"candidates",
".",
"append",
"(",
"(",
"self",
".",
"distortion",
",",
"self",
".",
"centroids",
",",
"self",
".",
"clusters",
")",
")",
"candidates",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
")",
"self",
".",
"distortion",
"=",
"candidates",
"[",
"0",
"]",
"[",
"0",
"]",
"self",
".",
"centroids",
"=",
"candidates",
"[",
"0",
"]",
"[",
"1",
"]",
"self",
".",
"clusters",
"=",
"candidates",
"[",
"0",
"]",
"[",
"2",
"]",
"return",
"self"
] | The K-Means itself | [
"The",
"K",
"-",
"Means",
"itself"
] | python | train |
fastai/fastai | docs_src/nbval/cover.py | https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/docs_src/nbval/cover.py#L76-L95 | def teardown_coverage(config, kernel, output_loc=None):
"""Finish coverage reporting in kernel.
The coverage should previously have been started with
setup_coverage.
"""
language = kernel.language
if language.startswith('python'):
# Teardown code does not require any input, simply execute:
msg_id = kernel.kc.execute(_python_teardown)
kernel.await_idle(msg_id, 60) # A minute should be plenty to write out coverage
# Ensure we merge our data into parent data of pytest-cov, if possible
cov = get_cov(config)
_merge_nbval_coverage_data(cov)
else:
# Warnings should be given on setup, or there might be no teardown
# for a specific language, so do nothing here
pass | [
"def",
"teardown_coverage",
"(",
"config",
",",
"kernel",
",",
"output_loc",
"=",
"None",
")",
":",
"language",
"=",
"kernel",
".",
"language",
"if",
"language",
".",
"startswith",
"(",
"'python'",
")",
":",
"# Teardown code does not require any input, simply execute:",
"msg_id",
"=",
"kernel",
".",
"kc",
".",
"execute",
"(",
"_python_teardown",
")",
"kernel",
".",
"await_idle",
"(",
"msg_id",
",",
"60",
")",
"# A minute should be plenty to write out coverage",
"# Ensure we merge our data into parent data of pytest-cov, if possible",
"cov",
"=",
"get_cov",
"(",
"config",
")",
"_merge_nbval_coverage_data",
"(",
"cov",
")",
"else",
":",
"# Warnings should be given on setup, or there might be no teardown",
"# for a specific language, so do nothing here",
"pass"
] | Finish coverage reporting in kernel.
The coverage should previously have been started with
setup_coverage. | [
"Finish",
"coverage",
"reporting",
"in",
"kernel",
"."
] | python | train |
twilio/twilio-python | twilio/rest/taskrouter/v1/workspace/activity.py | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/taskrouter/v1/workspace/activity.py#L208-L217 | def get_instance(self, payload):
"""
Build an instance of ActivityInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.taskrouter.v1.workspace.activity.ActivityInstance
:rtype: twilio.rest.taskrouter.v1.workspace.activity.ActivityInstance
"""
return ActivityInstance(self._version, payload, workspace_sid=self._solution['workspace_sid'], ) | [
"def",
"get_instance",
"(",
"self",
",",
"payload",
")",
":",
"return",
"ActivityInstance",
"(",
"self",
".",
"_version",
",",
"payload",
",",
"workspace_sid",
"=",
"self",
".",
"_solution",
"[",
"'workspace_sid'",
"]",
",",
")"
] | Build an instance of ActivityInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.taskrouter.v1.workspace.activity.ActivityInstance
:rtype: twilio.rest.taskrouter.v1.workspace.activity.ActivityInstance | [
"Build",
"an",
"instance",
"of",
"ActivityInstance"
] | python | train |
secdev/scapy | scapy/contrib/http2.py | https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/contrib/http2.py#L2344-L2361 | def _reduce_dynamic_table(self, new_entry_size=0):
# type: (int) -> None
"""_reduce_dynamic_table evicts entries from the dynamic table until it
fits in less than the current size limit. The optional parameter,
new_entry_size, allows the resize to happen so that a new entry of this
size fits in.
@param int new_entry_size: if called before adding a new entry, the size of the new entry in bytes (following # noqa: E501
the RFC7541 definition of the size of an entry)
@raise AssertionError
"""
assert(new_entry_size >= 0)
cur_sz = len(self)
dyn_tbl_sz = len(self._dynamic_table)
while dyn_tbl_sz > 0 and cur_sz + new_entry_size > self._dynamic_table_max_size: # noqa: E501
last_elmt_sz = len(self._dynamic_table[-1])
self._dynamic_table.pop()
dyn_tbl_sz -= 1
cur_sz -= last_elmt_sz | [
"def",
"_reduce_dynamic_table",
"(",
"self",
",",
"new_entry_size",
"=",
"0",
")",
":",
"# type: (int) -> None",
"assert",
"(",
"new_entry_size",
">=",
"0",
")",
"cur_sz",
"=",
"len",
"(",
"self",
")",
"dyn_tbl_sz",
"=",
"len",
"(",
"self",
".",
"_dynamic_table",
")",
"while",
"dyn_tbl_sz",
">",
"0",
"and",
"cur_sz",
"+",
"new_entry_size",
">",
"self",
".",
"_dynamic_table_max_size",
":",
"# noqa: E501",
"last_elmt_sz",
"=",
"len",
"(",
"self",
".",
"_dynamic_table",
"[",
"-",
"1",
"]",
")",
"self",
".",
"_dynamic_table",
".",
"pop",
"(",
")",
"dyn_tbl_sz",
"-=",
"1",
"cur_sz",
"-=",
"last_elmt_sz"
] | _reduce_dynamic_table evicts entries from the dynamic table until it
fits in less than the current size limit. The optional parameter,
new_entry_size, allows the resize to happen so that a new entry of this
size fits in.
@param int new_entry_size: if called before adding a new entry, the size of the new entry in bytes (following # noqa: E501
the RFC7541 definition of the size of an entry)
@raise AssertionError | [
"_reduce_dynamic_table",
"evicts",
"entries",
"from",
"the",
"dynamic",
"table",
"until",
"it",
"fits",
"in",
"less",
"than",
"the",
"current",
"size",
"limit",
".",
"The",
"optional",
"parameter",
"new_entry_size",
"allows",
"the",
"resize",
"to",
"happen",
"so",
"that",
"a",
"new",
"entry",
"of",
"this",
"size",
"fits",
"in",
"."
] | python | train |
dpkp/kafka-python | kafka/client_async.py | https://github.com/dpkp/kafka-python/blob/f6a8a38937688ea2cc5dc13d3d1039493be5c9b5/kafka/client_async.py#L329-L339 | def maybe_connect(self, node_id, wakeup=True):
"""Queues a node for asynchronous connection during the next .poll()"""
if self._can_connect(node_id):
self._connecting.add(node_id)
# Wakeup signal is useful in case another thread is
# blocked waiting for incoming network traffic while holding
# the client lock in poll().
if wakeup:
self.wakeup()
return True
return False | [
"def",
"maybe_connect",
"(",
"self",
",",
"node_id",
",",
"wakeup",
"=",
"True",
")",
":",
"if",
"self",
".",
"_can_connect",
"(",
"node_id",
")",
":",
"self",
".",
"_connecting",
".",
"add",
"(",
"node_id",
")",
"# Wakeup signal is useful in case another thread is",
"# blocked waiting for incoming network traffic while holding",
"# the client lock in poll().",
"if",
"wakeup",
":",
"self",
".",
"wakeup",
"(",
")",
"return",
"True",
"return",
"False"
] | Queues a node for asynchronous connection during the next .poll() | [
"Queues",
"a",
"node",
"for",
"asynchronous",
"connection",
"during",
"the",
"next",
".",
"poll",
"()"
] | python | train |
secynic/ipwhois | ipwhois/scripts/ipwhois_cli.py | https://github.com/secynic/ipwhois/blob/b5d634d36b0b942d538d38d77b3bdcd815f155a0/ipwhois/scripts/ipwhois_cli.py#L313-L350 | def generate_output(line='0', short=None, name=None, value=None,
is_parent=False, colorize=True):
"""
The function for formatting CLI output results.
Args:
line (:obj:`str`): The line number (0-4). Determines indentation.
Defaults to '0'.
short (:obj:`str`): The optional abbreviated name for a field.
See hr.py for values.
name (:obj:`str`): The optional name for a field. See hr.py for values.
value (:obj:`str`): The field data (required).
is_parent (:obj:`bool`): Set to True if the field value has sub-items
(dicts/lists). Defaults to False.
colorize (:obj:`bool`): Colorize the console output with ANSI colors.
Defaults to True.
Returns:
str: The generated output.
"""
# TODO: so ugly
output = '{0}{1}{2}{3}{4}{5}{6}{7}\n'.format(
LINES['{0}{1}'.format(line, 'C' if colorize else '')] if (
line in LINES.keys()) else '',
COLOR_DEPTH[line] if (colorize and line in COLOR_DEPTH) else '',
ANSI['b'],
short if short is not None else (
name if (name is not None) else ''
),
'' if (name is None or short is None) else ' ({0})'.format(
name),
'' if (name is None and short is None) else ': ',
ANSI['end'] if colorize else '',
'' if is_parent else value
)
return output | [
"def",
"generate_output",
"(",
"line",
"=",
"'0'",
",",
"short",
"=",
"None",
",",
"name",
"=",
"None",
",",
"value",
"=",
"None",
",",
"is_parent",
"=",
"False",
",",
"colorize",
"=",
"True",
")",
":",
"# TODO: so ugly",
"output",
"=",
"'{0}{1}{2}{3}{4}{5}{6}{7}\\n'",
".",
"format",
"(",
"LINES",
"[",
"'{0}{1}'",
".",
"format",
"(",
"line",
",",
"'C'",
"if",
"colorize",
"else",
"''",
")",
"]",
"if",
"(",
"line",
"in",
"LINES",
".",
"keys",
"(",
")",
")",
"else",
"''",
",",
"COLOR_DEPTH",
"[",
"line",
"]",
"if",
"(",
"colorize",
"and",
"line",
"in",
"COLOR_DEPTH",
")",
"else",
"''",
",",
"ANSI",
"[",
"'b'",
"]",
",",
"short",
"if",
"short",
"is",
"not",
"None",
"else",
"(",
"name",
"if",
"(",
"name",
"is",
"not",
"None",
")",
"else",
"''",
")",
",",
"''",
"if",
"(",
"name",
"is",
"None",
"or",
"short",
"is",
"None",
")",
"else",
"' ({0})'",
".",
"format",
"(",
"name",
")",
",",
"''",
"if",
"(",
"name",
"is",
"None",
"and",
"short",
"is",
"None",
")",
"else",
"': '",
",",
"ANSI",
"[",
"'end'",
"]",
"if",
"colorize",
"else",
"''",
",",
"''",
"if",
"is_parent",
"else",
"value",
")",
"return",
"output"
] | The function for formatting CLI output results.
Args:
line (:obj:`str`): The line number (0-4). Determines indentation.
Defaults to '0'.
short (:obj:`str`): The optional abbreviated name for a field.
See hr.py for values.
name (:obj:`str`): The optional name for a field. See hr.py for values.
value (:obj:`str`): The field data (required).
is_parent (:obj:`bool`): Set to True if the field value has sub-items
(dicts/lists). Defaults to False.
colorize (:obj:`bool`): Colorize the console output with ANSI colors.
Defaults to True.
Returns:
str: The generated output. | [
"The",
"function",
"for",
"formatting",
"CLI",
"output",
"results",
"."
] | python | train |
chrisspen/burlap | burlap/apache.py | https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/apache.py#L485-L516 | def configure_modevasive(self):
"""
Installs the mod-evasive Apache module for combating DDOS attacks.
https://www.linode.com/docs/websites/apache-tips-and-tricks/modevasive-on-apache
"""
r = self.local_renderer
if r.env.modevasive_enabled:
self.install_packages()
# Write conf for each Ubuntu version since they don't conflict.
fn = r.render_to_file('apache/apache_modevasive.template.conf')
# Ubuntu 12.04
r.put(
local_path=fn,
remote_path='/etc/apache2/mods-available/mod-evasive.conf',
use_sudo=True)
# Ubuntu 14.04
r.put(
local_path=fn,
remote_path='/etc/apache2/mods-available/evasive.conf',
use_sudo=True)
self.enable_mod('evasive')
else:
# print('self.last_manifest:', self.last_manifest)
# print('a:', self.last_manifest.apache_modevasive_enabled)
# print('b:', self.last_manifest.modevasive_enabled)
if self.last_manifest.modevasive_enabled:
self.disable_mod('evasive') | [
"def",
"configure_modevasive",
"(",
"self",
")",
":",
"r",
"=",
"self",
".",
"local_renderer",
"if",
"r",
".",
"env",
".",
"modevasive_enabled",
":",
"self",
".",
"install_packages",
"(",
")",
"# Write conf for each Ubuntu version since they don't conflict.",
"fn",
"=",
"r",
".",
"render_to_file",
"(",
"'apache/apache_modevasive.template.conf'",
")",
"# Ubuntu 12.04",
"r",
".",
"put",
"(",
"local_path",
"=",
"fn",
",",
"remote_path",
"=",
"'/etc/apache2/mods-available/mod-evasive.conf'",
",",
"use_sudo",
"=",
"True",
")",
"# Ubuntu 14.04",
"r",
".",
"put",
"(",
"local_path",
"=",
"fn",
",",
"remote_path",
"=",
"'/etc/apache2/mods-available/evasive.conf'",
",",
"use_sudo",
"=",
"True",
")",
"self",
".",
"enable_mod",
"(",
"'evasive'",
")",
"else",
":",
"# print('self.last_manifest:', self.last_manifest)",
"# print('a:', self.last_manifest.apache_modevasive_enabled)",
"# print('b:', self.last_manifest.modevasive_enabled)",
"if",
"self",
".",
"last_manifest",
".",
"modevasive_enabled",
":",
"self",
".",
"disable_mod",
"(",
"'evasive'",
")"
] | Installs the mod-evasive Apache module for combating DDOS attacks.
https://www.linode.com/docs/websites/apache-tips-and-tricks/modevasive-on-apache | [
"Installs",
"the",
"mod",
"-",
"evasive",
"Apache",
"module",
"for",
"combating",
"DDOS",
"attacks",
"."
] | python | valid |
aacanakin/glim | glim/command.py | https://github.com/aacanakin/glim/blob/71a20ac149a1292c0d6c1dc7414985ea51854f7a/glim/command.py#L125-L144 | def match(self, args):
"""
Function dispatches the active command line utility.
Args
----
args (argparse.parse_args()):
The parsed arguments using parser.parse_args() function.
Returns
-------
command (glim.command.Command): the active command object.
"""
command = None
for c in self.commands:
if c.name == args.which:
c.args = args
command = c
break
return command | [
"def",
"match",
"(",
"self",
",",
"args",
")",
":",
"command",
"=",
"None",
"for",
"c",
"in",
"self",
".",
"commands",
":",
"if",
"c",
".",
"name",
"==",
"args",
".",
"which",
":",
"c",
".",
"args",
"=",
"args",
"command",
"=",
"c",
"break",
"return",
"command"
] | Function dispatches the active command line utility.
Args
----
args (argparse.parse_args()):
The parsed arguments using parser.parse_args() function.
Returns
-------
command (glim.command.Command): the active command object. | [
"Function",
"dispatches",
"the",
"active",
"command",
"line",
"utility",
"."
] | python | train |
batiste/django-page-cms | pages/admin/views.py | https://github.com/batiste/django-page-cms/blob/3c72111eb7c3997a63c462c1776ffd8ce8c50a5d/pages/admin/views.py#L181-L184 | def get_content(request, page_id, content_id):
"""Get the content for a particular page"""
content = Content.objects.get(pk=content_id)
return HttpResponse(content.body) | [
"def",
"get_content",
"(",
"request",
",",
"page_id",
",",
"content_id",
")",
":",
"content",
"=",
"Content",
".",
"objects",
".",
"get",
"(",
"pk",
"=",
"content_id",
")",
"return",
"HttpResponse",
"(",
"content",
".",
"body",
")"
] | Get the content for a particular page | [
"Get",
"the",
"content",
"for",
"a",
"particular",
"page"
] | python | train |
xolox/python-coloredlogs | scripts/generate-screenshots.py | https://github.com/xolox/python-coloredlogs/blob/1cbf0c6bbee400c6ddbc43008143809934ec3e79/scripts/generate-screenshots.py#L70-L84 | def main():
"""Command line interface."""
coloredlogs.install(level='debug')
arguments = sys.argv[1:]
if arguments:
interpret_script(arguments[0])
else:
logger.notice(compact("""
This script requires the 'urxvt' terminal emulator and the
ImageMagick command line programs 'convert' and 'import' to be
installed. Don't switch windows while the screenshots are being
generated because it seems that 'import' can only take screenshots
of foreground windows.
"""))
generate_screenshots() | [
"def",
"main",
"(",
")",
":",
"coloredlogs",
".",
"install",
"(",
"level",
"=",
"'debug'",
")",
"arguments",
"=",
"sys",
".",
"argv",
"[",
"1",
":",
"]",
"if",
"arguments",
":",
"interpret_script",
"(",
"arguments",
"[",
"0",
"]",
")",
"else",
":",
"logger",
".",
"notice",
"(",
"compact",
"(",
"\"\"\"\n This script requires the 'urxvt' terminal emulator and the\n ImageMagick command line programs 'convert' and 'import' to be\n installed. Don't switch windows while the screenshots are being\n generated because it seems that 'import' can only take screenshots\n of foreground windows.\n \"\"\"",
")",
")",
"generate_screenshots",
"(",
")"
] | Command line interface. | [
"Command",
"line",
"interface",
"."
] | python | train |
log2timeline/plaso | plaso/engine/knowledge_base.py | https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/engine/knowledge_base.py#L125-L142 | def GetHostname(self, session_identifier=CURRENT_SESSION):
"""Retrieves the hostname related to the event.
If the hostname is not stored in the event it is determined based
on the preprocessing information that is stored inside the storage file.
Args:
session_identifier (Optional[str])): session identifier, where
CURRENT_SESSION represents the active session.
Returns:
str: hostname.
"""
hostname_artifact = self._hostnames.get(session_identifier, None)
if not hostname_artifact:
return ''
return hostname_artifact.name or '' | [
"def",
"GetHostname",
"(",
"self",
",",
"session_identifier",
"=",
"CURRENT_SESSION",
")",
":",
"hostname_artifact",
"=",
"self",
".",
"_hostnames",
".",
"get",
"(",
"session_identifier",
",",
"None",
")",
"if",
"not",
"hostname_artifact",
":",
"return",
"''",
"return",
"hostname_artifact",
".",
"name",
"or",
"''"
] | Retrieves the hostname related to the event.
If the hostname is not stored in the event it is determined based
on the preprocessing information that is stored inside the storage file.
Args:
session_identifier (Optional[str])): session identifier, where
CURRENT_SESSION represents the active session.
Returns:
str: hostname. | [
"Retrieves",
"the",
"hostname",
"related",
"to",
"the",
"event",
"."
] | python | train |
PrefPy/prefpy | prefpy/utilityFunction.py | https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/utilityFunction.py#L115-L128 | def getScoringVector(self, orderVector):
"""
Returns the scoring vector [1,0,0,...,0]. This function is called by getUtilities()
which is implemented in the parent class.
:ivar list<int> orderVector: A list of integer representations for each candidate ordered
from most preferred to least.
"""
scoringVector = []
scoringVector.append(1)
for i in range(1, len(orderVector)):
scoringVector.append(0)
return scoringVector | [
"def",
"getScoringVector",
"(",
"self",
",",
"orderVector",
")",
":",
"scoringVector",
"=",
"[",
"]",
"scoringVector",
".",
"append",
"(",
"1",
")",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"orderVector",
")",
")",
":",
"scoringVector",
".",
"append",
"(",
"0",
")",
"return",
"scoringVector"
] | Returns the scoring vector [1,0,0,...,0]. This function is called by getUtilities()
which is implemented in the parent class.
:ivar list<int> orderVector: A list of integer representations for each candidate ordered
from most preferred to least. | [
"Returns",
"the",
"scoring",
"vector",
"[",
"1",
"0",
"0",
"...",
"0",
"]",
".",
"This",
"function",
"is",
"called",
"by",
"getUtilities",
"()",
"which",
"is",
"implemented",
"in",
"the",
"parent",
"class",
"."
] | python | train |
zero-os/0-core | client/py-client/zeroos/core0/client/client.py | https://github.com/zero-os/0-core/blob/69f6ce845ab8b8ad805a79a415227e7ac566c218/client/py-client/zeroos/core0/client/client.py#L734-L750 | def raw(self, command, arguments, queue=None, max_time=None, stream=False, tags=None, id=None):
"""
Implements the low level command call, this needs to build the command structure
and push it on the correct queue.
:param command: Command name to execute supported by the node (ex: core.system, info.cpu, etc...)
check documentation for list of built in commands
:param arguments: A dict of required command arguments depends on the command name.
:param queue: command queue (commands on the same queue are executed sequentially)
:param max_time: kill job server side if it exceeded this amount of seconds
:param stream: If True, process stdout and stderr are pushed to a special queue (stream:<id>) so
client can stream output
:param tags: job tags
:param id: job id. Generated if not supplied
:return: Response object
"""
raise NotImplemented() | [
"def",
"raw",
"(",
"self",
",",
"command",
",",
"arguments",
",",
"queue",
"=",
"None",
",",
"max_time",
"=",
"None",
",",
"stream",
"=",
"False",
",",
"tags",
"=",
"None",
",",
"id",
"=",
"None",
")",
":",
"raise",
"NotImplemented",
"(",
")"
] | Implements the low level command call, this needs to build the command structure
and push it on the correct queue.
:param command: Command name to execute supported by the node (ex: core.system, info.cpu, etc...)
check documentation for list of built in commands
:param arguments: A dict of required command arguments depends on the command name.
:param queue: command queue (commands on the same queue are executed sequentially)
:param max_time: kill job server side if it exceeded this amount of seconds
:param stream: If True, process stdout and stderr are pushed to a special queue (stream:<id>) so
client can stream output
:param tags: job tags
:param id: job id. Generated if not supplied
:return: Response object | [
"Implements",
"the",
"low",
"level",
"command",
"call",
"this",
"needs",
"to",
"build",
"the",
"command",
"structure",
"and",
"push",
"it",
"on",
"the",
"correct",
"queue",
"."
] | python | train |
daler/gffutils | gffutils/parser.py | https://github.com/daler/gffutils/blob/6f7f547cad898738a1bd0a999fd68ba68db2c524/gffutils/parser.py#L175-L360 | def _split_keyvals(keyval_str, dialect=None):
"""
Given the string attributes field of a GFF-like line, split it into an
attributes dictionary and a "dialect" dictionary which contains information
needed to reconstruct the original string.
Lots of logic here to handle all the corner cases.
If `dialect` is None, then do all the logic to infer a dialect from this
attribute string.
Otherwise, use the provided dialect (and return it at the end).
"""
def _unquote_quals(quals, dialect):
"""
Handles the unquoting (decoding) of percent-encoded characters.
See notes on encoding/decoding above.
"""
if not constants.ignore_url_escape_characters and dialect['fmt'] == 'gff3':
for key, vals in quals.items():
unquoted = [urllib.parse.unquote(v) for v in vals]
quals[key] = unquoted
return quals
infer_dialect = False
if dialect is None:
# Make a copy of default dialect so it can be modified as needed
dialect = copy.copy(constants.dialect)
infer_dialect = True
from gffutils import feature
quals = feature.dict_class()
if not keyval_str:
return quals, dialect
# If a dialect was provided, then use that directly.
if not infer_dialect:
if dialect['trailing semicolon']:
keyval_str = keyval_str.rstrip(';')
parts = keyval_str.split(dialect['field separator'])
kvsep = dialect['keyval separator']
if dialect['leading semicolon']:
pieces = []
for p in parts:
if p and p[0] == ';':
p = p[1:]
pieces.append(p.strip().split(kvsep))
key_vals = [(p[0], " ".join(p[1:])) for p in pieces]
if dialect['fmt'] == 'gff3':
key_vals = [p.split(kvsep) for p in parts]
else:
leadingsemicolon = dialect['leading semicolon']
pieces = []
for i, p in enumerate(parts):
if i == 0 and leadingsemicolon:
p = p[1:]
pieces.append(p.strip().split(kvsep))
key_vals = [(p[0], " ".join(p[1:])) for p in pieces]
quoted = dialect['quoted GFF2 values']
for item in key_vals:
# Easy if it follows spec
if len(item) == 2:
key, val = item
# Only key provided?
elif len(item) == 1:
key = item[0]
val = ''
else:
key = item[0]
val = dialect['keyval separator'].join(item[1:])
try:
quals[key]
except KeyError:
quals[key] = []
if quoted:
if (len(val) > 0 and val[0] == '"' and val[-1] == '"'):
val = val[1:-1]
if val:
# TODO: if there are extra commas for a value, just use empty
# strings
# quals[key].extend([v for v in val.split(',') if v])
vals = val.split(',')
quals[key].extend(vals)
quals = _unquote_quals(quals, dialect)
return quals, dialect
# If we got here, then we need to infer the dialect....
#
# Reset the order to an empty list so that it will only be populated with
# keys that are found in the file.
dialect['order'] = []
# ensembl GTF has trailing semicolon
if keyval_str[-1] == ';':
keyval_str = keyval_str[:-1]
dialect['trailing semicolon'] = True
# GFF2/GTF has a semicolon with at least one space after it.
# Spaces can be on both sides (e.g. wormbase)
# GFF3 works with no spaces.
# So split on the first one we can recognize...
for sep in (' ; ', '; ', ';'):
parts = keyval_str.split(sep)
if len(parts) > 1:
dialect['field separator'] = sep
break
# Is it GFF3? They have key-vals separated by "="
if gff3_kw_pat.match(parts[0]):
key_vals = [p.split('=') for p in parts]
dialect['fmt'] = 'gff3'
dialect['keyval separator'] = '='
# Otherwise, key-vals separated by space. Key is first item.
else:
dialect['keyval separator'] = " "
pieces = []
for p in parts:
# Fix misplaced semicolons in keys in some GFF2 files
if p and p[0] == ';':
p = p[1:]
dialect['leading semicolon'] = True
pieces.append(p.strip().split(' '))
key_vals = [(p[0], " ".join(p[1:])) for p in pieces]
for item in key_vals:
# Easy if it follows spec
if len(item) == 2:
key, val = item
# Only key provided?
elif len(item) == 1:
key = item[0]
val = ''
# Pathological cases where values of a key have within them the key-val
# separator, e.g.,
# Alias=SGN-M1347;ID=T0028;Note=marker name(s): T0028 SGN-M1347 |identity=99.58|escore=2e-126
else:
key = item[0]
val = dialect['keyval separator'].join(item[1:])
# Is the key already in there?
if key in quals:
dialect['repeated keys'] = True
else:
quals[key] = []
# Remove quotes in GFF2
if len(val) > 0 and val[0] == '"' and val[-1] == '"':
val = val[1:-1]
dialect['quoted GFF2 values'] = True
if val:
# TODO: if there are extra commas for a value, just use empty
# strings
# quals[key].extend([v for v in val.split(',') if v])
vals = val.split(',')
if (len(vals) > 1) and dialect['repeated keys']:
raise AttributeStringError(
"Internally inconsistent attributes formatting: "
"some have repeated keys, some do not.")
quals[key].extend(vals)
# keep track of the order of keys
dialect['order'].append(key)
if (
(dialect['keyval separator'] == ' ') and
(dialect['quoted GFF2 values'])
):
dialect['fmt'] = 'gtf'
quals = _unquote_quals(quals, dialect)
return quals, dialect | [
"def",
"_split_keyvals",
"(",
"keyval_str",
",",
"dialect",
"=",
"None",
")",
":",
"def",
"_unquote_quals",
"(",
"quals",
",",
"dialect",
")",
":",
"\"\"\"\n Handles the unquoting (decoding) of percent-encoded characters.\n\n See notes on encoding/decoding above.\n \"\"\"",
"if",
"not",
"constants",
".",
"ignore_url_escape_characters",
"and",
"dialect",
"[",
"'fmt'",
"]",
"==",
"'gff3'",
":",
"for",
"key",
",",
"vals",
"in",
"quals",
".",
"items",
"(",
")",
":",
"unquoted",
"=",
"[",
"urllib",
".",
"parse",
".",
"unquote",
"(",
"v",
")",
"for",
"v",
"in",
"vals",
"]",
"quals",
"[",
"key",
"]",
"=",
"unquoted",
"return",
"quals",
"infer_dialect",
"=",
"False",
"if",
"dialect",
"is",
"None",
":",
"# Make a copy of default dialect so it can be modified as needed",
"dialect",
"=",
"copy",
".",
"copy",
"(",
"constants",
".",
"dialect",
")",
"infer_dialect",
"=",
"True",
"from",
"gffutils",
"import",
"feature",
"quals",
"=",
"feature",
".",
"dict_class",
"(",
")",
"if",
"not",
"keyval_str",
":",
"return",
"quals",
",",
"dialect",
"# If a dialect was provided, then use that directly.",
"if",
"not",
"infer_dialect",
":",
"if",
"dialect",
"[",
"'trailing semicolon'",
"]",
":",
"keyval_str",
"=",
"keyval_str",
".",
"rstrip",
"(",
"';'",
")",
"parts",
"=",
"keyval_str",
".",
"split",
"(",
"dialect",
"[",
"'field separator'",
"]",
")",
"kvsep",
"=",
"dialect",
"[",
"'keyval separator'",
"]",
"if",
"dialect",
"[",
"'leading semicolon'",
"]",
":",
"pieces",
"=",
"[",
"]",
"for",
"p",
"in",
"parts",
":",
"if",
"p",
"and",
"p",
"[",
"0",
"]",
"==",
"';'",
":",
"p",
"=",
"p",
"[",
"1",
":",
"]",
"pieces",
".",
"append",
"(",
"p",
".",
"strip",
"(",
")",
".",
"split",
"(",
"kvsep",
")",
")",
"key_vals",
"=",
"[",
"(",
"p",
"[",
"0",
"]",
",",
"\" \"",
".",
"join",
"(",
"p",
"[",
"1",
":",
"]",
")",
")",
"for",
"p",
"in",
"pieces",
"]",
"if",
"dialect",
"[",
"'fmt'",
"]",
"==",
"'gff3'",
":",
"key_vals",
"=",
"[",
"p",
".",
"split",
"(",
"kvsep",
")",
"for",
"p",
"in",
"parts",
"]",
"else",
":",
"leadingsemicolon",
"=",
"dialect",
"[",
"'leading semicolon'",
"]",
"pieces",
"=",
"[",
"]",
"for",
"i",
",",
"p",
"in",
"enumerate",
"(",
"parts",
")",
":",
"if",
"i",
"==",
"0",
"and",
"leadingsemicolon",
":",
"p",
"=",
"p",
"[",
"1",
":",
"]",
"pieces",
".",
"append",
"(",
"p",
".",
"strip",
"(",
")",
".",
"split",
"(",
"kvsep",
")",
")",
"key_vals",
"=",
"[",
"(",
"p",
"[",
"0",
"]",
",",
"\" \"",
".",
"join",
"(",
"p",
"[",
"1",
":",
"]",
")",
")",
"for",
"p",
"in",
"pieces",
"]",
"quoted",
"=",
"dialect",
"[",
"'quoted GFF2 values'",
"]",
"for",
"item",
"in",
"key_vals",
":",
"# Easy if it follows spec",
"if",
"len",
"(",
"item",
")",
"==",
"2",
":",
"key",
",",
"val",
"=",
"item",
"# Only key provided?",
"elif",
"len",
"(",
"item",
")",
"==",
"1",
":",
"key",
"=",
"item",
"[",
"0",
"]",
"val",
"=",
"''",
"else",
":",
"key",
"=",
"item",
"[",
"0",
"]",
"val",
"=",
"dialect",
"[",
"'keyval separator'",
"]",
".",
"join",
"(",
"item",
"[",
"1",
":",
"]",
")",
"try",
":",
"quals",
"[",
"key",
"]",
"except",
"KeyError",
":",
"quals",
"[",
"key",
"]",
"=",
"[",
"]",
"if",
"quoted",
":",
"if",
"(",
"len",
"(",
"val",
")",
">",
"0",
"and",
"val",
"[",
"0",
"]",
"==",
"'\"'",
"and",
"val",
"[",
"-",
"1",
"]",
"==",
"'\"'",
")",
":",
"val",
"=",
"val",
"[",
"1",
":",
"-",
"1",
"]",
"if",
"val",
":",
"# TODO: if there are extra commas for a value, just use empty",
"# strings",
"# quals[key].extend([v for v in val.split(',') if v])",
"vals",
"=",
"val",
".",
"split",
"(",
"','",
")",
"quals",
"[",
"key",
"]",
".",
"extend",
"(",
"vals",
")",
"quals",
"=",
"_unquote_quals",
"(",
"quals",
",",
"dialect",
")",
"return",
"quals",
",",
"dialect",
"# If we got here, then we need to infer the dialect....",
"#",
"# Reset the order to an empty list so that it will only be populated with",
"# keys that are found in the file.",
"dialect",
"[",
"'order'",
"]",
"=",
"[",
"]",
"# ensembl GTF has trailing semicolon",
"if",
"keyval_str",
"[",
"-",
"1",
"]",
"==",
"';'",
":",
"keyval_str",
"=",
"keyval_str",
"[",
":",
"-",
"1",
"]",
"dialect",
"[",
"'trailing semicolon'",
"]",
"=",
"True",
"# GFF2/GTF has a semicolon with at least one space after it.",
"# Spaces can be on both sides (e.g. wormbase)",
"# GFF3 works with no spaces.",
"# So split on the first one we can recognize...",
"for",
"sep",
"in",
"(",
"' ; '",
",",
"'; '",
",",
"';'",
")",
":",
"parts",
"=",
"keyval_str",
".",
"split",
"(",
"sep",
")",
"if",
"len",
"(",
"parts",
")",
">",
"1",
":",
"dialect",
"[",
"'field separator'",
"]",
"=",
"sep",
"break",
"# Is it GFF3? They have key-vals separated by \"=\"",
"if",
"gff3_kw_pat",
".",
"match",
"(",
"parts",
"[",
"0",
"]",
")",
":",
"key_vals",
"=",
"[",
"p",
".",
"split",
"(",
"'='",
")",
"for",
"p",
"in",
"parts",
"]",
"dialect",
"[",
"'fmt'",
"]",
"=",
"'gff3'",
"dialect",
"[",
"'keyval separator'",
"]",
"=",
"'='",
"# Otherwise, key-vals separated by space. Key is first item.",
"else",
":",
"dialect",
"[",
"'keyval separator'",
"]",
"=",
"\" \"",
"pieces",
"=",
"[",
"]",
"for",
"p",
"in",
"parts",
":",
"# Fix misplaced semicolons in keys in some GFF2 files",
"if",
"p",
"and",
"p",
"[",
"0",
"]",
"==",
"';'",
":",
"p",
"=",
"p",
"[",
"1",
":",
"]",
"dialect",
"[",
"'leading semicolon'",
"]",
"=",
"True",
"pieces",
".",
"append",
"(",
"p",
".",
"strip",
"(",
")",
".",
"split",
"(",
"' '",
")",
")",
"key_vals",
"=",
"[",
"(",
"p",
"[",
"0",
"]",
",",
"\" \"",
".",
"join",
"(",
"p",
"[",
"1",
":",
"]",
")",
")",
"for",
"p",
"in",
"pieces",
"]",
"for",
"item",
"in",
"key_vals",
":",
"# Easy if it follows spec",
"if",
"len",
"(",
"item",
")",
"==",
"2",
":",
"key",
",",
"val",
"=",
"item",
"# Only key provided?",
"elif",
"len",
"(",
"item",
")",
"==",
"1",
":",
"key",
"=",
"item",
"[",
"0",
"]",
"val",
"=",
"''",
"# Pathological cases where values of a key have within them the key-val",
"# separator, e.g.,",
"# Alias=SGN-M1347;ID=T0028;Note=marker name(s): T0028 SGN-M1347 |identity=99.58|escore=2e-126",
"else",
":",
"key",
"=",
"item",
"[",
"0",
"]",
"val",
"=",
"dialect",
"[",
"'keyval separator'",
"]",
".",
"join",
"(",
"item",
"[",
"1",
":",
"]",
")",
"# Is the key already in there?",
"if",
"key",
"in",
"quals",
":",
"dialect",
"[",
"'repeated keys'",
"]",
"=",
"True",
"else",
":",
"quals",
"[",
"key",
"]",
"=",
"[",
"]",
"# Remove quotes in GFF2",
"if",
"len",
"(",
"val",
")",
">",
"0",
"and",
"val",
"[",
"0",
"]",
"==",
"'\"'",
"and",
"val",
"[",
"-",
"1",
"]",
"==",
"'\"'",
":",
"val",
"=",
"val",
"[",
"1",
":",
"-",
"1",
"]",
"dialect",
"[",
"'quoted GFF2 values'",
"]",
"=",
"True",
"if",
"val",
":",
"# TODO: if there are extra commas for a value, just use empty",
"# strings",
"# quals[key].extend([v for v in val.split(',') if v])",
"vals",
"=",
"val",
".",
"split",
"(",
"','",
")",
"if",
"(",
"len",
"(",
"vals",
")",
">",
"1",
")",
"and",
"dialect",
"[",
"'repeated keys'",
"]",
":",
"raise",
"AttributeStringError",
"(",
"\"Internally inconsistent attributes formatting: \"",
"\"some have repeated keys, some do not.\"",
")",
"quals",
"[",
"key",
"]",
".",
"extend",
"(",
"vals",
")",
"# keep track of the order of keys",
"dialect",
"[",
"'order'",
"]",
".",
"append",
"(",
"key",
")",
"if",
"(",
"(",
"dialect",
"[",
"'keyval separator'",
"]",
"==",
"' '",
")",
"and",
"(",
"dialect",
"[",
"'quoted GFF2 values'",
"]",
")",
")",
":",
"dialect",
"[",
"'fmt'",
"]",
"=",
"'gtf'",
"quals",
"=",
"_unquote_quals",
"(",
"quals",
",",
"dialect",
")",
"return",
"quals",
",",
"dialect"
] | Given the string attributes field of a GFF-like line, split it into an
attributes dictionary and a "dialect" dictionary which contains information
needed to reconstruct the original string.
Lots of logic here to handle all the corner cases.
If `dialect` is None, then do all the logic to infer a dialect from this
attribute string.
Otherwise, use the provided dialect (and return it at the end). | [
"Given",
"the",
"string",
"attributes",
"field",
"of",
"a",
"GFF",
"-",
"like",
"line",
"split",
"it",
"into",
"an",
"attributes",
"dictionary",
"and",
"a",
"dialect",
"dictionary",
"which",
"contains",
"information",
"needed",
"to",
"reconstruct",
"the",
"original",
"string",
"."
] | python | train |
tensorflow/tensorboard | tensorboard/plugins/interactive_inference/interactive_inference_plugin.py | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/interactive_inference/interactive_inference_plugin.py#L211-L228 | def _delete_example(self, request):
"""Deletes the specified example.
Args:
request: A request that should contain 'index'.
Returns:
An empty response.
"""
index = int(request.args.get('index'))
if index >= len(self.examples):
return http_util.Respond(request, {'error': 'invalid index provided'},
'application/json', code=400)
del self.examples[index]
self.updated_example_indices = set([
i if i < index else i - 1 for i in self.updated_example_indices])
self.generate_sprite([ex.SerializeToString() for ex in self.examples])
return http_util.Respond(request, {}, 'application/json') | [
"def",
"_delete_example",
"(",
"self",
",",
"request",
")",
":",
"index",
"=",
"int",
"(",
"request",
".",
"args",
".",
"get",
"(",
"'index'",
")",
")",
"if",
"index",
">=",
"len",
"(",
"self",
".",
"examples",
")",
":",
"return",
"http_util",
".",
"Respond",
"(",
"request",
",",
"{",
"'error'",
":",
"'invalid index provided'",
"}",
",",
"'application/json'",
",",
"code",
"=",
"400",
")",
"del",
"self",
".",
"examples",
"[",
"index",
"]",
"self",
".",
"updated_example_indices",
"=",
"set",
"(",
"[",
"i",
"if",
"i",
"<",
"index",
"else",
"i",
"-",
"1",
"for",
"i",
"in",
"self",
".",
"updated_example_indices",
"]",
")",
"self",
".",
"generate_sprite",
"(",
"[",
"ex",
".",
"SerializeToString",
"(",
")",
"for",
"ex",
"in",
"self",
".",
"examples",
"]",
")",
"return",
"http_util",
".",
"Respond",
"(",
"request",
",",
"{",
"}",
",",
"'application/json'",
")"
] | Deletes the specified example.
Args:
request: A request that should contain 'index'.
Returns:
An empty response. | [
"Deletes",
"the",
"specified",
"example",
"."
] | python | train |
SiLab-Bonn/pyBAR | pybar/fei4/register_utils.py | https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/fei4/register_utils.py#L182-L193 | def reset_bunch_counter(self):
'''Resetting Bunch Counter
'''
logging.info('Resetting Bunch Counter')
commands = []
commands.extend(self.register.get_commands("RunMode"))
commands.extend(self.register.get_commands("BCR"))
self.send_commands(commands)
time.sleep(0.1)
commands = []
commands.extend(self.register.get_commands("ConfMode"))
self.send_commands(commands) | [
"def",
"reset_bunch_counter",
"(",
"self",
")",
":",
"logging",
".",
"info",
"(",
"'Resetting Bunch Counter'",
")",
"commands",
"=",
"[",
"]",
"commands",
".",
"extend",
"(",
"self",
".",
"register",
".",
"get_commands",
"(",
"\"RunMode\"",
")",
")",
"commands",
".",
"extend",
"(",
"self",
".",
"register",
".",
"get_commands",
"(",
"\"BCR\"",
")",
")",
"self",
".",
"send_commands",
"(",
"commands",
")",
"time",
".",
"sleep",
"(",
"0.1",
")",
"commands",
"=",
"[",
"]",
"commands",
".",
"extend",
"(",
"self",
".",
"register",
".",
"get_commands",
"(",
"\"ConfMode\"",
")",
")",
"self",
".",
"send_commands",
"(",
"commands",
")"
] | Resetting Bunch Counter | [
"Resetting",
"Bunch",
"Counter"
] | python | train |
orbingol/NURBS-Python | geomdl/exchange.py | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/exchange.py#L159-L193 | def export_csv(obj, file_name, point_type='evalpts', **kwargs):
""" Exports control points or evaluated points as a CSV file.
:param obj: a spline geometry object
:type obj: abstract.SplineGeometry
:param file_name: output file name
:type file_name: str
:param point_type: ``ctrlpts`` for control points or ``evalpts`` for evaluated points
:type point_type: str
:raises GeomdlException: an error occurred writing the file
"""
if not 0 < obj.pdimension < 3:
raise exch.GeomdlException("Input object should be a curve or a surface")
# Pick correct points from the object
if point_type == 'ctrlpts':
points = obj.ctrlptsw if obj.rational else obj.ctrlpts
elif point_type == 'evalpts':
points = obj.evalpts
else:
raise exch.GeomdlException("Please choose a valid point type option. Possible types: ctrlpts, evalpts")
# Prepare CSV header
dim = len(points[0])
line = "dim "
for i in range(dim-1):
line += str(i + 1) + ", dim "
line += str(dim) + "\n"
# Prepare values
for pt in points:
line += ",".join([str(p) for p in pt]) + "\n"
# Write to file
return exch.write_file(file_name, line) | [
"def",
"export_csv",
"(",
"obj",
",",
"file_name",
",",
"point_type",
"=",
"'evalpts'",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"0",
"<",
"obj",
".",
"pdimension",
"<",
"3",
":",
"raise",
"exch",
".",
"GeomdlException",
"(",
"\"Input object should be a curve or a surface\"",
")",
"# Pick correct points from the object",
"if",
"point_type",
"==",
"'ctrlpts'",
":",
"points",
"=",
"obj",
".",
"ctrlptsw",
"if",
"obj",
".",
"rational",
"else",
"obj",
".",
"ctrlpts",
"elif",
"point_type",
"==",
"'evalpts'",
":",
"points",
"=",
"obj",
".",
"evalpts",
"else",
":",
"raise",
"exch",
".",
"GeomdlException",
"(",
"\"Please choose a valid point type option. Possible types: ctrlpts, evalpts\"",
")",
"# Prepare CSV header",
"dim",
"=",
"len",
"(",
"points",
"[",
"0",
"]",
")",
"line",
"=",
"\"dim \"",
"for",
"i",
"in",
"range",
"(",
"dim",
"-",
"1",
")",
":",
"line",
"+=",
"str",
"(",
"i",
"+",
"1",
")",
"+",
"\", dim \"",
"line",
"+=",
"str",
"(",
"dim",
")",
"+",
"\"\\n\"",
"# Prepare values",
"for",
"pt",
"in",
"points",
":",
"line",
"+=",
"\",\"",
".",
"join",
"(",
"[",
"str",
"(",
"p",
")",
"for",
"p",
"in",
"pt",
"]",
")",
"+",
"\"\\n\"",
"# Write to file",
"return",
"exch",
".",
"write_file",
"(",
"file_name",
",",
"line",
")"
] | Exports control points or evaluated points as a CSV file.
:param obj: a spline geometry object
:type obj: abstract.SplineGeometry
:param file_name: output file name
:type file_name: str
:param point_type: ``ctrlpts`` for control points or ``evalpts`` for evaluated points
:type point_type: str
:raises GeomdlException: an error occurred writing the file | [
"Exports",
"control",
"points",
"or",
"evaluated",
"points",
"as",
"a",
"CSV",
"file",
"."
] | python | train |
ToFuProject/tofu | tofu/geom/_core.py | https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/geom/_core.py#L2228-L2261 | def get_summary(self, verb=False, max_columns=100, width=1000):
""" Summary description of the object content as a pandas DataFrame """
# Make sure the data is accessible
msg = "The data is not accessible because self.strip(2) was used !"
assert self._dstrip['strip']<2, msg
# Build the list
d = self._dStruct['dObj']
data = []
for k in self._ddef['dStruct']['order']:
if k not in d.keys():
continue
for kk in d[k].keys():
lu = [k,
self._dStruct['dObj'][k][kk]._Id._dall['Name'],
self._dStruct['dObj'][k][kk]._Id._dall['SaveName'],
self._dStruct['dObj'][k][kk]._dgeom['nP'],
self._dStruct['dObj'][k][kk]._dgeom['noccur'],
self._dStruct['dObj'][k][kk]._dgeom['mobile'],
self._dStruct['dObj'][k][kk]._dmisc['color']]
for pp in self._dextraprop['lprop']:
lu.append(self._dextraprop['d'+pp][k][kk])
data.append(lu)
# Build the pandas DataFrame
col = ['class', 'Name', 'SaveName', 'nP', 'noccur',
'mobile', 'color'] + self._dextraprop['lprop']
df = pd.DataFrame(data, columns=col)
pd.set_option('display.max_columns',max_columns)
pd.set_option('display.width',width)
if verb:
print(df)
return df | [
"def",
"get_summary",
"(",
"self",
",",
"verb",
"=",
"False",
",",
"max_columns",
"=",
"100",
",",
"width",
"=",
"1000",
")",
":",
"# Make sure the data is accessible",
"msg",
"=",
"\"The data is not accessible because self.strip(2) was used !\"",
"assert",
"self",
".",
"_dstrip",
"[",
"'strip'",
"]",
"<",
"2",
",",
"msg",
"# Build the list",
"d",
"=",
"self",
".",
"_dStruct",
"[",
"'dObj'",
"]",
"data",
"=",
"[",
"]",
"for",
"k",
"in",
"self",
".",
"_ddef",
"[",
"'dStruct'",
"]",
"[",
"'order'",
"]",
":",
"if",
"k",
"not",
"in",
"d",
".",
"keys",
"(",
")",
":",
"continue",
"for",
"kk",
"in",
"d",
"[",
"k",
"]",
".",
"keys",
"(",
")",
":",
"lu",
"=",
"[",
"k",
",",
"self",
".",
"_dStruct",
"[",
"'dObj'",
"]",
"[",
"k",
"]",
"[",
"kk",
"]",
".",
"_Id",
".",
"_dall",
"[",
"'Name'",
"]",
",",
"self",
".",
"_dStruct",
"[",
"'dObj'",
"]",
"[",
"k",
"]",
"[",
"kk",
"]",
".",
"_Id",
".",
"_dall",
"[",
"'SaveName'",
"]",
",",
"self",
".",
"_dStruct",
"[",
"'dObj'",
"]",
"[",
"k",
"]",
"[",
"kk",
"]",
".",
"_dgeom",
"[",
"'nP'",
"]",
",",
"self",
".",
"_dStruct",
"[",
"'dObj'",
"]",
"[",
"k",
"]",
"[",
"kk",
"]",
".",
"_dgeom",
"[",
"'noccur'",
"]",
",",
"self",
".",
"_dStruct",
"[",
"'dObj'",
"]",
"[",
"k",
"]",
"[",
"kk",
"]",
".",
"_dgeom",
"[",
"'mobile'",
"]",
",",
"self",
".",
"_dStruct",
"[",
"'dObj'",
"]",
"[",
"k",
"]",
"[",
"kk",
"]",
".",
"_dmisc",
"[",
"'color'",
"]",
"]",
"for",
"pp",
"in",
"self",
".",
"_dextraprop",
"[",
"'lprop'",
"]",
":",
"lu",
".",
"append",
"(",
"self",
".",
"_dextraprop",
"[",
"'d'",
"+",
"pp",
"]",
"[",
"k",
"]",
"[",
"kk",
"]",
")",
"data",
".",
"append",
"(",
"lu",
")",
"# Build the pandas DataFrame",
"col",
"=",
"[",
"'class'",
",",
"'Name'",
",",
"'SaveName'",
",",
"'nP'",
",",
"'noccur'",
",",
"'mobile'",
",",
"'color'",
"]",
"+",
"self",
".",
"_dextraprop",
"[",
"'lprop'",
"]",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"data",
",",
"columns",
"=",
"col",
")",
"pd",
".",
"set_option",
"(",
"'display.max_columns'",
",",
"max_columns",
")",
"pd",
".",
"set_option",
"(",
"'display.width'",
",",
"width",
")",
"if",
"verb",
":",
"print",
"(",
"df",
")",
"return",
"df"
] | Summary description of the object content as a pandas DataFrame | [
"Summary",
"description",
"of",
"the",
"object",
"content",
"as",
"a",
"pandas",
"DataFrame"
] | python | train |
stevearc/dql | dql/cli.py | https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/cli.py#L487-L501 | def do_local(self, host="localhost", port=8000):
"""
Connect to a local DynamoDB instance. Use 'local off' to disable.
> local
> local host=localhost port=8001
> local off
"""
port = int(port)
if host == "off":
self._local_endpoint = None
else:
self._local_endpoint = (host, port)
self.onecmd("use %s" % self.engine.region) | [
"def",
"do_local",
"(",
"self",
",",
"host",
"=",
"\"localhost\"",
",",
"port",
"=",
"8000",
")",
":",
"port",
"=",
"int",
"(",
"port",
")",
"if",
"host",
"==",
"\"off\"",
":",
"self",
".",
"_local_endpoint",
"=",
"None",
"else",
":",
"self",
".",
"_local_endpoint",
"=",
"(",
"host",
",",
"port",
")",
"self",
".",
"onecmd",
"(",
"\"use %s\"",
"%",
"self",
".",
"engine",
".",
"region",
")"
] | Connect to a local DynamoDB instance. Use 'local off' to disable.
> local
> local host=localhost port=8001
> local off | [
"Connect",
"to",
"a",
"local",
"DynamoDB",
"instance",
".",
"Use",
"local",
"off",
"to",
"disable",
"."
] | python | train |
google/apitools | apitools/base/py/encoding_helper.py | https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/encoding_helper.py#L667-L683 | def _CheckForExistingMappings(mapping_type, message_type,
python_name, json_name):
"""Validate that no mappings exist for the given values."""
if mapping_type == 'field':
getter = GetCustomJsonFieldMapping
elif mapping_type == 'enum':
getter = GetCustomJsonEnumMapping
remapping = getter(message_type, python_name=python_name)
if remapping is not None and remapping != json_name:
raise exceptions.InvalidDataError(
'Cannot add mapping for %s "%s", already mapped to "%s"' % (
mapping_type, python_name, remapping))
remapping = getter(message_type, json_name=json_name)
if remapping is not None and remapping != python_name:
raise exceptions.InvalidDataError(
'Cannot add mapping for %s "%s", already mapped to "%s"' % (
mapping_type, json_name, remapping)) | [
"def",
"_CheckForExistingMappings",
"(",
"mapping_type",
",",
"message_type",
",",
"python_name",
",",
"json_name",
")",
":",
"if",
"mapping_type",
"==",
"'field'",
":",
"getter",
"=",
"GetCustomJsonFieldMapping",
"elif",
"mapping_type",
"==",
"'enum'",
":",
"getter",
"=",
"GetCustomJsonEnumMapping",
"remapping",
"=",
"getter",
"(",
"message_type",
",",
"python_name",
"=",
"python_name",
")",
"if",
"remapping",
"is",
"not",
"None",
"and",
"remapping",
"!=",
"json_name",
":",
"raise",
"exceptions",
".",
"InvalidDataError",
"(",
"'Cannot add mapping for %s \"%s\", already mapped to \"%s\"'",
"%",
"(",
"mapping_type",
",",
"python_name",
",",
"remapping",
")",
")",
"remapping",
"=",
"getter",
"(",
"message_type",
",",
"json_name",
"=",
"json_name",
")",
"if",
"remapping",
"is",
"not",
"None",
"and",
"remapping",
"!=",
"python_name",
":",
"raise",
"exceptions",
".",
"InvalidDataError",
"(",
"'Cannot add mapping for %s \"%s\", already mapped to \"%s\"'",
"%",
"(",
"mapping_type",
",",
"json_name",
",",
"remapping",
")",
")"
] | Validate that no mappings exist for the given values. | [
"Validate",
"that",
"no",
"mappings",
"exist",
"for",
"the",
"given",
"values",
"."
] | python | train |
PSPC-SPAC-buyandsell/von_anchor | von_anchor/anchor/base.py | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/anchor/base.py#L202-L222 | async def reseed(self, seed: str = None) -> None:
"""
Rotate key for VON anchor: generate new key, submit to ledger, update wallet.
Raise WalletState if wallet is currently closed.
:param seed: new seed for ed25519 key pair (default random)
"""
LOGGER.debug('BaseAnchor.reseed >>> seed: [SEED]')
verkey = await self.wallet.reseed_init(seed)
req_json = await ledger.build_nym_request(
self.did,
self.did,
verkey,
self.name,
(await self.get_nym_role()).token())
await self._sign_submit(req_json)
await self.wallet.reseed_apply()
LOGGER.debug('BaseAnchor.reseed <<<') | [
"async",
"def",
"reseed",
"(",
"self",
",",
"seed",
":",
"str",
"=",
"None",
")",
"->",
"None",
":",
"LOGGER",
".",
"debug",
"(",
"'BaseAnchor.reseed >>> seed: [SEED]'",
")",
"verkey",
"=",
"await",
"self",
".",
"wallet",
".",
"reseed_init",
"(",
"seed",
")",
"req_json",
"=",
"await",
"ledger",
".",
"build_nym_request",
"(",
"self",
".",
"did",
",",
"self",
".",
"did",
",",
"verkey",
",",
"self",
".",
"name",
",",
"(",
"await",
"self",
".",
"get_nym_role",
"(",
")",
")",
".",
"token",
"(",
")",
")",
"await",
"self",
".",
"_sign_submit",
"(",
"req_json",
")",
"await",
"self",
".",
"wallet",
".",
"reseed_apply",
"(",
")",
"LOGGER",
".",
"debug",
"(",
"'BaseAnchor.reseed <<<'",
")"
] | Rotate key for VON anchor: generate new key, submit to ledger, update wallet.
Raise WalletState if wallet is currently closed.
:param seed: new seed for ed25519 key pair (default random) | [
"Rotate",
"key",
"for",
"VON",
"anchor",
":",
"generate",
"new",
"key",
"submit",
"to",
"ledger",
"update",
"wallet",
".",
"Raise",
"WalletState",
"if",
"wallet",
"is",
"currently",
"closed",
"."
] | python | train |
FujiMakoto/AgentML | agentml/__init__.py | https://github.com/FujiMakoto/AgentML/blob/c8cb64b460d876666bf29ea2c682189874c7c403/agentml/__init__.py#L610-L621 | def format(self, message_format):
"""
Set the message format
:param message_format: The format to set
:type message_format: str
"""
if message_format not in self.formats:
self._log.error('Invalid Message format specified: {format}'.format(format=message_format))
return
self._log.debug('Setting message format to {format}'.format(format=message_format))
self._format = message_format | [
"def",
"format",
"(",
"self",
",",
"message_format",
")",
":",
"if",
"message_format",
"not",
"in",
"self",
".",
"formats",
":",
"self",
".",
"_log",
".",
"error",
"(",
"'Invalid Message format specified: {format}'",
".",
"format",
"(",
"format",
"=",
"message_format",
")",
")",
"return",
"self",
".",
"_log",
".",
"debug",
"(",
"'Setting message format to {format}'",
".",
"format",
"(",
"format",
"=",
"message_format",
")",
")",
"self",
".",
"_format",
"=",
"message_format"
] | Set the message format
:param message_format: The format to set
:type message_format: str | [
"Set",
"the",
"message",
"format",
":",
"param",
"message_format",
":",
"The",
"format",
"to",
"set",
":",
"type",
"message_format",
":",
"str"
] | python | train |
cloudtools/stacker | stacker/session_cache.py | https://github.com/cloudtools/stacker/blob/ad6013a03a560c46ba3c63c4d153336273e6da5d/stacker/session_cache.py#L20-L44 | def get_session(region, profile=None):
"""Creates a boto3 session with a cache
Args:
region (str): The region for the session
profile (str): The profile for the session
Returns:
:class:`boto3.session.Session`: A boto3 session with
credential caching
"""
if profile is None:
logger.debug("No AWS profile explicitly provided. "
"Falling back to default.")
profile = default_profile
logger.debug("Building session using profile \"%s\" in region \"%s\""
% (profile, region))
session = boto3.Session(region_name=region, profile_name=profile)
c = session._session.get_component('credential_provider')
provider = c.get_provider('assume-role')
provider.cache = credential_cache
provider._prompter = ui.getpass
return session | [
"def",
"get_session",
"(",
"region",
",",
"profile",
"=",
"None",
")",
":",
"if",
"profile",
"is",
"None",
":",
"logger",
".",
"debug",
"(",
"\"No AWS profile explicitly provided. \"",
"\"Falling back to default.\"",
")",
"profile",
"=",
"default_profile",
"logger",
".",
"debug",
"(",
"\"Building session using profile \\\"%s\\\" in region \\\"%s\\\"\"",
"%",
"(",
"profile",
",",
"region",
")",
")",
"session",
"=",
"boto3",
".",
"Session",
"(",
"region_name",
"=",
"region",
",",
"profile_name",
"=",
"profile",
")",
"c",
"=",
"session",
".",
"_session",
".",
"get_component",
"(",
"'credential_provider'",
")",
"provider",
"=",
"c",
".",
"get_provider",
"(",
"'assume-role'",
")",
"provider",
".",
"cache",
"=",
"credential_cache",
"provider",
".",
"_prompter",
"=",
"ui",
".",
"getpass",
"return",
"session"
] | Creates a boto3 session with a cache
Args:
region (str): The region for the session
profile (str): The profile for the session
Returns:
:class:`boto3.session.Session`: A boto3 session with
credential caching | [
"Creates",
"a",
"boto3",
"session",
"with",
"a",
"cache"
] | python | train |
gpennington/PyMarvel | marvel/marvel.py | https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/marvel.py#L196-L212 | def get_creators(self, *args, **kwargs):
"""Fetches lists of creators.
get /v1/public/creators
:returns: CreatorDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_creators(lastName="Lee", orderBy="firstName,-modified", limit="5", offset="15")
>>> print cdw.data.total
25
>>> print cdw.data.results[0].fullName
Alvin Lee
"""
response = json.loads(self._call(Creator.resource_url(), self._params(kwargs)).text)
return CreatorDataWrapper(self, response) | [
"def",
"get_creators",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"response",
"=",
"json",
".",
"loads",
"(",
"self",
".",
"_call",
"(",
"Creator",
".",
"resource_url",
"(",
")",
",",
"self",
".",
"_params",
"(",
"kwargs",
")",
")",
".",
"text",
")",
"return",
"CreatorDataWrapper",
"(",
"self",
",",
"response",
")"
] | Fetches lists of creators.
get /v1/public/creators
:returns: CreatorDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_creators(lastName="Lee", orderBy="firstName,-modified", limit="5", offset="15")
>>> print cdw.data.total
25
>>> print cdw.data.results[0].fullName
Alvin Lee | [
"Fetches",
"lists",
"of",
"creators",
".",
"get",
"/",
"v1",
"/",
"public",
"/",
"creators",
":",
"returns",
":",
"CreatorDataWrapper"
] | python | train |
hydpy-dev/hydpy | hydpy/core/timetools.py | https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/core/timetools.py#L972-L987 | def _guessunit(self):
"""Guess the unit of the period as the largest one, which results in
an integer duration.
"""
if not self.days % 1:
return 'd'
elif not self.hours % 1:
return 'h'
elif not self.minutes % 1:
return 'm'
elif not self.seconds % 1:
return 's'
else:
raise ValueError(
'The stepsize is not a multiple of one '
'second, which is not allowed.') | [
"def",
"_guessunit",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"days",
"%",
"1",
":",
"return",
"'d'",
"elif",
"not",
"self",
".",
"hours",
"%",
"1",
":",
"return",
"'h'",
"elif",
"not",
"self",
".",
"minutes",
"%",
"1",
":",
"return",
"'m'",
"elif",
"not",
"self",
".",
"seconds",
"%",
"1",
":",
"return",
"'s'",
"else",
":",
"raise",
"ValueError",
"(",
"'The stepsize is not a multiple of one '",
"'second, which is not allowed.'",
")"
] | Guess the unit of the period as the largest one, which results in
an integer duration. | [
"Guess",
"the",
"unit",
"of",
"the",
"period",
"as",
"the",
"largest",
"one",
"which",
"results",
"in",
"an",
"integer",
"duration",
"."
] | python | train |
crate/crate-python | src/crate/client/http.py | https://github.com/crate/crate-python/blob/68e39c95f5bbe88b74bbfa26de4347fc644636a8/src/crate/client/http.py#L318-L331 | def sql(self, stmt, parameters=None, bulk_parameters=None):
"""
Execute SQL stmt against the crate server.
"""
if stmt is None:
return None
data = _create_sql_payload(stmt, parameters, bulk_parameters)
logger.debug(
'Sending request to %s with payload: %s', self.path, data)
content = self._json_request('POST', self.path, data=data)
logger.debug("JSON response for stmt(%s): %s", stmt, content)
return content | [
"def",
"sql",
"(",
"self",
",",
"stmt",
",",
"parameters",
"=",
"None",
",",
"bulk_parameters",
"=",
"None",
")",
":",
"if",
"stmt",
"is",
"None",
":",
"return",
"None",
"data",
"=",
"_create_sql_payload",
"(",
"stmt",
",",
"parameters",
",",
"bulk_parameters",
")",
"logger",
".",
"debug",
"(",
"'Sending request to %s with payload: %s'",
",",
"self",
".",
"path",
",",
"data",
")",
"content",
"=",
"self",
".",
"_json_request",
"(",
"'POST'",
",",
"self",
".",
"path",
",",
"data",
"=",
"data",
")",
"logger",
".",
"debug",
"(",
"\"JSON response for stmt(%s): %s\"",
",",
"stmt",
",",
"content",
")",
"return",
"content"
] | Execute SQL stmt against the crate server. | [
"Execute",
"SQL",
"stmt",
"against",
"the",
"crate",
"server",
"."
] | python | train |
jobovy/galpy | galpy/potential/KuzminDiskPotential.py | https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/potential/KuzminDiskPotential.py#L171-L187 | def _surfdens(self,R,z,phi=0.,t=0.):
"""
NAME:
_surfdens
PURPOSE:
evaluate the surface density
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Sigma (R,z)
HISTORY:
2018-08-19 - Written - Bovy (UofT)
"""
return self._a*(R**2+self._a**2)**-1.5/2./nu.pi | [
"def",
"_surfdens",
"(",
"self",
",",
"R",
",",
"z",
",",
"phi",
"=",
"0.",
",",
"t",
"=",
"0.",
")",
":",
"return",
"self",
".",
"_a",
"*",
"(",
"R",
"**",
"2",
"+",
"self",
".",
"_a",
"**",
"2",
")",
"**",
"-",
"1.5",
"/",
"2.",
"/",
"nu",
".",
"pi"
] | NAME:
_surfdens
PURPOSE:
evaluate the surface density
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Sigma (R,z)
HISTORY:
2018-08-19 - Written - Bovy (UofT) | [
"NAME",
":",
"_surfdens",
"PURPOSE",
":",
"evaluate",
"the",
"surface",
"density",
"INPUT",
":",
"R",
"-",
"Cylindrical",
"Galactocentric",
"radius",
"z",
"-",
"vertical",
"height",
"phi",
"-",
"azimuth",
"t",
"-",
"time",
"OUTPUT",
":",
"Sigma",
"(",
"R",
"z",
")",
"HISTORY",
":",
"2018",
"-",
"08",
"-",
"19",
"-",
"Written",
"-",
"Bovy",
"(",
"UofT",
")"
] | python | train |
sirrice/pygg | pygg/pygg.py | https://github.com/sirrice/pygg/blob/b36e19b3827e0a7d661de660b04d55a73f35896b/pygg/pygg.py#L448-L462 | def make_master_binding():
"""
wrap around ggplot() call to handle passed in data objects
"""
ggplot = make_ggplot2_binding("ggplot")
def _ggplot(data, *args, **kwargs):
data_var = data
if not isinstance(data, basestring):
data_var = "data"
else:
data = None
stmt = ggplot(data_var, *args, **kwargs)
stmt.data = data
return stmt
return _ggplot | [
"def",
"make_master_binding",
"(",
")",
":",
"ggplot",
"=",
"make_ggplot2_binding",
"(",
"\"ggplot\"",
")",
"def",
"_ggplot",
"(",
"data",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"data_var",
"=",
"data",
"if",
"not",
"isinstance",
"(",
"data",
",",
"basestring",
")",
":",
"data_var",
"=",
"\"data\"",
"else",
":",
"data",
"=",
"None",
"stmt",
"=",
"ggplot",
"(",
"data_var",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"stmt",
".",
"data",
"=",
"data",
"return",
"stmt",
"return",
"_ggplot"
] | wrap around ggplot() call to handle passed in data objects | [
"wrap",
"around",
"ggplot",
"()",
"call",
"to",
"handle",
"passed",
"in",
"data",
"objects"
] | python | train |
pygobject/pgi | pgi/overrides/Gtk.py | https://github.com/pygobject/pgi/blob/2090435df6241a15ec2a78379a36b738b728652c/pgi/overrides/Gtk.py#L561-L567 | def add_from_string(self, buffer, length=-1):
"""add_from_string(buffer, length=-1)
{{ all }}
"""
return Gtk.Builder.add_from_string(self, buffer, length) | [
"def",
"add_from_string",
"(",
"self",
",",
"buffer",
",",
"length",
"=",
"-",
"1",
")",
":",
"return",
"Gtk",
".",
"Builder",
".",
"add_from_string",
"(",
"self",
",",
"buffer",
",",
"length",
")"
] | add_from_string(buffer, length=-1)
{{ all }} | [
"add_from_string",
"(",
"buffer",
"length",
"=",
"-",
"1",
")"
] | python | train |
mckib2/rawdatarinator | rawdatarinator/readMeasDataVB15.py | https://github.com/mckib2/rawdatarinator/blob/03a85fd8f5e380b424027d28e97972bd7a6a3f1b/rawdatarinator/readMeasDataVB15.py#L32-L53 | def get_yaps_by_name(root,name,afun=lambda x:x,default=None):
"""From XML root, return value of node matching attribute 'name'.
Arguments:
root (Element) Root XML node (xml.etree.ElementTree Element).
This is the root of the entire XML document, not the YAPS
subtree.
name (String) name='name' attribute of ParamLong tag to be
matched.
afun Anonymous function in the form of a lambda expression to
process the string value. Defaults to the identity function.
default Default value if node is not found. Defaults to 'None'.
"""
node = root.find("ParamMap[@name='YAPS']/ParamLong[@name='%s']/value" % name)
if node is not None:
return(afun(node.text))
else:
return(default) | [
"def",
"get_yaps_by_name",
"(",
"root",
",",
"name",
",",
"afun",
"=",
"lambda",
"x",
":",
"x",
",",
"default",
"=",
"None",
")",
":",
"node",
"=",
"root",
".",
"find",
"(",
"\"ParamMap[@name='YAPS']/ParamLong[@name='%s']/value\"",
"%",
"name",
")",
"if",
"node",
"is",
"not",
"None",
":",
"return",
"(",
"afun",
"(",
"node",
".",
"text",
")",
")",
"else",
":",
"return",
"(",
"default",
")"
] | From XML root, return value of node matching attribute 'name'.
Arguments:
root (Element) Root XML node (xml.etree.ElementTree Element).
This is the root of the entire XML document, not the YAPS
subtree.
name (String) name='name' attribute of ParamLong tag to be
matched.
afun Anonymous function in the form of a lambda expression to
process the string value. Defaults to the identity function.
default Default value if node is not found. Defaults to 'None'. | [
"From",
"XML",
"root",
"return",
"value",
"of",
"node",
"matching",
"attribute",
"name",
"."
] | python | train |
DataONEorg/d1_python | client_cli/src/d1_cli/impl/command_parser.py | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_cli/src/d1_cli/impl/command_parser.py#L565-L587 | def _print_help(self):
"""Custom help message to group commands by functionality."""
msg = """Commands (type help <command> for details)
CLI: help history exit quit
Session, General: set load save reset
Session, Access Control: allowaccess denyaccess clearaccess
Session, Replication: allowrep denyrep preferrep blockrep
removerep numberrep clearrep
Read Operations: get meta list log resolve
Write Operations: update create package archive
updateaccess updatereplication
Utilities: listformats listnodes search ping
Write Operation Queue: queue run edit clearqueue
Command History: Arrow Up, Arrow Down
Command Editing: Arrow Left, Arrow Right, Delete
"""
if platform.system() != "Windows":
msg += """Command Completion: Single Tab: Complete unique command
Double Tab: Display possible commands
"""
d1_cli.impl.util.print_info(msg) | [
"def",
"_print_help",
"(",
"self",
")",
":",
"msg",
"=",
"\"\"\"Commands (type help <command> for details)\n\nCLI: help history exit quit\nSession, General: set load save reset\nSession, Access Control: allowaccess denyaccess clearaccess\nSession, Replication: allowrep denyrep preferrep blockrep\n removerep numberrep clearrep\nRead Operations: get meta list log resolve\nWrite Operations: update create package archive\n updateaccess updatereplication\nUtilities: listformats listnodes search ping\nWrite Operation Queue: queue run edit clearqueue\n\nCommand History: Arrow Up, Arrow Down\nCommand Editing: Arrow Left, Arrow Right, Delete\n \"\"\"",
"if",
"platform",
".",
"system",
"(",
")",
"!=",
"\"Windows\"",
":",
"msg",
"+=",
"\"\"\"Command Completion: Single Tab: Complete unique command\n Double Tab: Display possible commands\n \"\"\"",
"d1_cli",
".",
"impl",
".",
"util",
".",
"print_info",
"(",
"msg",
")"
] | Custom help message to group commands by functionality. | [
"Custom",
"help",
"message",
"to",
"group",
"commands",
"by",
"functionality",
"."
] | python | train |
google/grr | grr/client/grr_response_client/vfs.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/client/grr_response_client/vfs.py#L209-L224 | def VFSMultiOpen(pathspecs, progress_callback=None):
"""Opens multiple files specified by given path-specs.
See documentation for `VFSOpen` for more information.
Args:
pathspecs: A list of pathspec instances of files to open.
progress_callback: A callback function to call to notify about progress
Returns:
A context manager yielding file-like objects.
"""
precondition.AssertIterableType(pathspecs, rdf_paths.PathSpec)
vfs_open = functools.partial(VFSOpen, progress_callback=progress_callback)
return context.MultiContext(map(vfs_open, pathspecs)) | [
"def",
"VFSMultiOpen",
"(",
"pathspecs",
",",
"progress_callback",
"=",
"None",
")",
":",
"precondition",
".",
"AssertIterableType",
"(",
"pathspecs",
",",
"rdf_paths",
".",
"PathSpec",
")",
"vfs_open",
"=",
"functools",
".",
"partial",
"(",
"VFSOpen",
",",
"progress_callback",
"=",
"progress_callback",
")",
"return",
"context",
".",
"MultiContext",
"(",
"map",
"(",
"vfs_open",
",",
"pathspecs",
")",
")"
] | Opens multiple files specified by given path-specs.
See documentation for `VFSOpen` for more information.
Args:
pathspecs: A list of pathspec instances of files to open.
progress_callback: A callback function to call to notify about progress
Returns:
A context manager yielding file-like objects. | [
"Opens",
"multiple",
"files",
"specified",
"by",
"given",
"path",
"-",
"specs",
"."
] | python | train |
square/pylink | pylink/jlink.py | https://github.com/square/pylink/blob/81dda0a191d923a8b2627c52cb778aba24d279d7/pylink/jlink.py#L3175-L3198 | def coresight_write(self, reg, data, ap=True):
"""Writes an Ap/DP register on a CoreSight DAP.
Note:
``coresight_configure()`` must be called prior to calling this method.
Args:
self (JLink): the ``JLink`` instance
reg (int): index of DP/AP register to write
data (int): data to write
ap (bool): ``True`` if writing to an Access Port register, otherwise
``False`` for Debug Port
Returns:
Number of repetitions needed until write request accepted.
Raises:
JLinkException: on hardware error
"""
ap = 1 if ap else 0
res = self._dll.JLINKARM_CORESIGHT_WriteAPDPReg(reg, ap, data)
if res < 0:
raise errors.JLinkException(res)
return res | [
"def",
"coresight_write",
"(",
"self",
",",
"reg",
",",
"data",
",",
"ap",
"=",
"True",
")",
":",
"ap",
"=",
"1",
"if",
"ap",
"else",
"0",
"res",
"=",
"self",
".",
"_dll",
".",
"JLINKARM_CORESIGHT_WriteAPDPReg",
"(",
"reg",
",",
"ap",
",",
"data",
")",
"if",
"res",
"<",
"0",
":",
"raise",
"errors",
".",
"JLinkException",
"(",
"res",
")",
"return",
"res"
] | Writes an Ap/DP register on a CoreSight DAP.
Note:
``coresight_configure()`` must be called prior to calling this method.
Args:
self (JLink): the ``JLink`` instance
reg (int): index of DP/AP register to write
data (int): data to write
ap (bool): ``True`` if writing to an Access Port register, otherwise
``False`` for Debug Port
Returns:
Number of repetitions needed until write request accepted.
Raises:
JLinkException: on hardware error | [
"Writes",
"an",
"Ap",
"/",
"DP",
"register",
"on",
"a",
"CoreSight",
"DAP",
"."
] | python | train |
loli/medpy | medpy/filter/noise.py | https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/filter/noise.py#L168-L209 | def separable_convolution(input, weights, output=None, mode="reflect", cval=0.0, origin=0):
r"""
Calculate a n-dimensional convolution of a separable kernel to a n-dimensional input.
Achieved by calling convolution1d along the first axis, obtaining an intermediate
image, on which the next convolution1d along the second axis is called and so on.
Parameters
----------
input : array_like
Array of which to estimate the noise.
weights : ndarray
One-dimensional sequence of numbers.
output : array, optional
The `output` parameter passes an array in which to store the
filter output.
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The `mode` parameter determines how the array borders are
handled, where `cval` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0
origin : scalar, optional
The `origin` parameter controls the placement of the filter.
Default 0.0.
Returns
-------
output : ndarray
Input image convolved with the supplied kernel.
"""
input = numpy.asarray(input)
output = _ni_support._get_output(output, input)
axes = list(range(input.ndim))
if len(axes) > 0:
convolve1d(input, weights, axes[0], output, mode, cval, origin)
for ii in range(1, len(axes)):
convolve1d(output, weights, axes[ii], output, mode, cval, origin)
else:
output[...] = input[...]
return output | [
"def",
"separable_convolution",
"(",
"input",
",",
"weights",
",",
"output",
"=",
"None",
",",
"mode",
"=",
"\"reflect\"",
",",
"cval",
"=",
"0.0",
",",
"origin",
"=",
"0",
")",
":",
"input",
"=",
"numpy",
".",
"asarray",
"(",
"input",
")",
"output",
"=",
"_ni_support",
".",
"_get_output",
"(",
"output",
",",
"input",
")",
"axes",
"=",
"list",
"(",
"range",
"(",
"input",
".",
"ndim",
")",
")",
"if",
"len",
"(",
"axes",
")",
">",
"0",
":",
"convolve1d",
"(",
"input",
",",
"weights",
",",
"axes",
"[",
"0",
"]",
",",
"output",
",",
"mode",
",",
"cval",
",",
"origin",
")",
"for",
"ii",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"axes",
")",
")",
":",
"convolve1d",
"(",
"output",
",",
"weights",
",",
"axes",
"[",
"ii",
"]",
",",
"output",
",",
"mode",
",",
"cval",
",",
"origin",
")",
"else",
":",
"output",
"[",
"...",
"]",
"=",
"input",
"[",
"...",
"]",
"return",
"output"
] | r"""
Calculate a n-dimensional convolution of a separable kernel to a n-dimensional input.
Achieved by calling convolution1d along the first axis, obtaining an intermediate
image, on which the next convolution1d along the second axis is called and so on.
Parameters
----------
input : array_like
Array of which to estimate the noise.
weights : ndarray
One-dimensional sequence of numbers.
output : array, optional
The `output` parameter passes an array in which to store the
filter output.
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The `mode` parameter determines how the array borders are
handled, where `cval` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0
origin : scalar, optional
The `origin` parameter controls the placement of the filter.
Default 0.0.
Returns
-------
output : ndarray
Input image convolved with the supplied kernel. | [
"r",
"Calculate",
"a",
"n",
"-",
"dimensional",
"convolution",
"of",
"a",
"separable",
"kernel",
"to",
"a",
"n",
"-",
"dimensional",
"input",
".",
"Achieved",
"by",
"calling",
"convolution1d",
"along",
"the",
"first",
"axis",
"obtaining",
"an",
"intermediate",
"image",
"on",
"which",
"the",
"next",
"convolution1d",
"along",
"the",
"second",
"axis",
"is",
"called",
"and",
"so",
"on",
".",
"Parameters",
"----------",
"input",
":",
"array_like",
"Array",
"of",
"which",
"to",
"estimate",
"the",
"noise",
".",
"weights",
":",
"ndarray",
"One",
"-",
"dimensional",
"sequence",
"of",
"numbers",
".",
"output",
":",
"array",
"optional",
"The",
"output",
"parameter",
"passes",
"an",
"array",
"in",
"which",
"to",
"store",
"the",
"filter",
"output",
".",
"mode",
":",
"{",
"reflect",
"constant",
"nearest",
"mirror",
"wrap",
"}",
"optional",
"The",
"mode",
"parameter",
"determines",
"how",
"the",
"array",
"borders",
"are",
"handled",
"where",
"cval",
"is",
"the",
"value",
"when",
"mode",
"is",
"equal",
"to",
"constant",
".",
"Default",
"is",
"reflect",
"cval",
":",
"scalar",
"optional",
"Value",
"to",
"fill",
"past",
"edges",
"of",
"input",
"if",
"mode",
"is",
"constant",
".",
"Default",
"is",
"0",
".",
"0",
"origin",
":",
"scalar",
"optional",
"The",
"origin",
"parameter",
"controls",
"the",
"placement",
"of",
"the",
"filter",
".",
"Default",
"0",
".",
"0",
".",
"Returns",
"-------",
"output",
":",
"ndarray",
"Input",
"image",
"convolved",
"with",
"the",
"supplied",
"kernel",
"."
] | python | train |
gwastro/pycbc | pycbc/weave.py | https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/weave.py#L32-L64 | def pycbc_compile_function(code,arg_names,local_dict,global_dict,
module_dir,
compiler='',
verbose=1,
support_code=None,
headers=None,
customize=None,
type_converters=None,
auto_downcast=1,
**kw):
""" Dummy wrapper around scipy weave compile to implement file locking
"""
headers = [] if headers is None else headers
lockfile_dir = os.environ['PYTHONCOMPILED']
lockfile_name = os.path.join(lockfile_dir, 'code_lockfile')
logging.info("attempting to aquire lock '%s' for "
"compiling code" % lockfile_name)
if not os.path.exists(lockfile_dir):
os.makedirs(lockfile_dir)
lockfile = open(lockfile_name, 'w')
fcntl.lockf(lockfile, fcntl.LOCK_EX)
logging.info("we have aquired the lock")
func = _compile_function(code,arg_names, local_dict, global_dict,
module_dir, compiler, verbose,
support_code, headers, customize,
type_converters,
auto_downcast, **kw)
fcntl.lockf(lockfile, fcntl.LOCK_UN)
logging.info("the lock has been released")
return func | [
"def",
"pycbc_compile_function",
"(",
"code",
",",
"arg_names",
",",
"local_dict",
",",
"global_dict",
",",
"module_dir",
",",
"compiler",
"=",
"''",
",",
"verbose",
"=",
"1",
",",
"support_code",
"=",
"None",
",",
"headers",
"=",
"None",
",",
"customize",
"=",
"None",
",",
"type_converters",
"=",
"None",
",",
"auto_downcast",
"=",
"1",
",",
"*",
"*",
"kw",
")",
":",
"headers",
"=",
"[",
"]",
"if",
"headers",
"is",
"None",
"else",
"headers",
"lockfile_dir",
"=",
"os",
".",
"environ",
"[",
"'PYTHONCOMPILED'",
"]",
"lockfile_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"lockfile_dir",
",",
"'code_lockfile'",
")",
"logging",
".",
"info",
"(",
"\"attempting to aquire lock '%s' for \"",
"\"compiling code\"",
"%",
"lockfile_name",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"lockfile_dir",
")",
":",
"os",
".",
"makedirs",
"(",
"lockfile_dir",
")",
"lockfile",
"=",
"open",
"(",
"lockfile_name",
",",
"'w'",
")",
"fcntl",
".",
"lockf",
"(",
"lockfile",
",",
"fcntl",
".",
"LOCK_EX",
")",
"logging",
".",
"info",
"(",
"\"we have aquired the lock\"",
")",
"func",
"=",
"_compile_function",
"(",
"code",
",",
"arg_names",
",",
"local_dict",
",",
"global_dict",
",",
"module_dir",
",",
"compiler",
",",
"verbose",
",",
"support_code",
",",
"headers",
",",
"customize",
",",
"type_converters",
",",
"auto_downcast",
",",
"*",
"*",
"kw",
")",
"fcntl",
".",
"lockf",
"(",
"lockfile",
",",
"fcntl",
".",
"LOCK_UN",
")",
"logging",
".",
"info",
"(",
"\"the lock has been released\"",
")",
"return",
"func"
] | Dummy wrapper around scipy weave compile to implement file locking | [
"Dummy",
"wrapper",
"around",
"scipy",
"weave",
"compile",
"to",
"implement",
"file",
"locking"
] | python | train |
wonambi-python/wonambi | wonambi/widgets/spectrum.py | https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/widgets/spectrum.py#L104-L117 | def create(self):
"""Create empty scene for power spectrum."""
self.idx_chan = QComboBox()
self.idx_chan.activated.connect(self.display_window)
self.idx_fig = QGraphicsView(self)
self.idx_fig.scale(1, -1)
layout = QVBoxLayout()
layout.addWidget(self.idx_chan)
layout.addWidget(self.idx_fig)
self.setLayout(layout)
self.resizeEvent(None) | [
"def",
"create",
"(",
"self",
")",
":",
"self",
".",
"idx_chan",
"=",
"QComboBox",
"(",
")",
"self",
".",
"idx_chan",
".",
"activated",
".",
"connect",
"(",
"self",
".",
"display_window",
")",
"self",
".",
"idx_fig",
"=",
"QGraphicsView",
"(",
"self",
")",
"self",
".",
"idx_fig",
".",
"scale",
"(",
"1",
",",
"-",
"1",
")",
"layout",
"=",
"QVBoxLayout",
"(",
")",
"layout",
".",
"addWidget",
"(",
"self",
".",
"idx_chan",
")",
"layout",
".",
"addWidget",
"(",
"self",
".",
"idx_fig",
")",
"self",
".",
"setLayout",
"(",
"layout",
")",
"self",
".",
"resizeEvent",
"(",
"None",
")"
] | Create empty scene for power spectrum. | [
"Create",
"empty",
"scene",
"for",
"power",
"spectrum",
"."
] | python | train |
google/grr | grr/server/grr_response_server/export.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/export.py#L1187-L1198 | def Convert(self, metadata, grr_message, token=None):
"""Converts GrrMessage into a set of RDFValues.
Args:
metadata: ExportedMetadata to be used for conversion.
grr_message: GrrMessage to be converted.
token: Security token.
Returns:
List or generator with resulting RDFValues.
"""
return self.BatchConvert([(metadata, grr_message)], token=token) | [
"def",
"Convert",
"(",
"self",
",",
"metadata",
",",
"grr_message",
",",
"token",
"=",
"None",
")",
":",
"return",
"self",
".",
"BatchConvert",
"(",
"[",
"(",
"metadata",
",",
"grr_message",
")",
"]",
",",
"token",
"=",
"token",
")"
] | Converts GrrMessage into a set of RDFValues.
Args:
metadata: ExportedMetadata to be used for conversion.
grr_message: GrrMessage to be converted.
token: Security token.
Returns:
List or generator with resulting RDFValues. | [
"Converts",
"GrrMessage",
"into",
"a",
"set",
"of",
"RDFValues",
"."
] | python | train |
SoCo/SoCo | soco/music_services/data_structures.py | https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/music_services/data_structures.py#L102-L149 | def parse_response(service, response, search_type):
"""Parse the response to a music service query and return a SearchResult
Args:
service (MusicService): The music service that produced the response
response (OrderedDict): The response from the soap client call
search_type (str): A string that indicates the search type that the
response is from
Returns:
SearchResult: A SearchResult object
"""
_LOG.debug('Parse response "%s" from service "%s" of type "%s"', response,
service, search_type)
items = []
# The result to be parsed is in either searchResult or getMetadataResult
if 'searchResult' in response:
response = response['searchResult']
elif 'getMetadataResult' in response:
response = response['getMetadataResult']
else:
raise ValueError('"response" should contain either the key '
'"searchResult" or "getMetadataResult"')
# Form the search metadata
search_metadata = {
'number_returned': response['count'],
'total_matches': None,
'search_type': search_type,
'update_id': None,
}
for result_type in ('mediaCollection', 'mediaMetadata'):
# Upper case the first letter (used for the class_key)
result_type_proper = result_type[0].upper() + result_type[1:]
raw_items = response.get(result_type, [])
# If there is only 1 result, it is not put in an array
if isinstance(raw_items, OrderedDict):
raw_items = [raw_items]
for raw_item in raw_items:
# Form the class_key, which is a unique string for this type,
# formed by concatenating the result type with the item type. Turns
# into e.g: MediaMetadataTrack
class_key = result_type_proper + raw_item['itemType'].title()
cls = get_class(class_key)
items.append(cls.from_music_service(service, raw_item))
return SearchResult(items, **search_metadata) | [
"def",
"parse_response",
"(",
"service",
",",
"response",
",",
"search_type",
")",
":",
"_LOG",
".",
"debug",
"(",
"'Parse response \"%s\" from service \"%s\" of type \"%s\"'",
",",
"response",
",",
"service",
",",
"search_type",
")",
"items",
"=",
"[",
"]",
"# The result to be parsed is in either searchResult or getMetadataResult",
"if",
"'searchResult'",
"in",
"response",
":",
"response",
"=",
"response",
"[",
"'searchResult'",
"]",
"elif",
"'getMetadataResult'",
"in",
"response",
":",
"response",
"=",
"response",
"[",
"'getMetadataResult'",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"'\"response\" should contain either the key '",
"'\"searchResult\" or \"getMetadataResult\"'",
")",
"# Form the search metadata",
"search_metadata",
"=",
"{",
"'number_returned'",
":",
"response",
"[",
"'count'",
"]",
",",
"'total_matches'",
":",
"None",
",",
"'search_type'",
":",
"search_type",
",",
"'update_id'",
":",
"None",
",",
"}",
"for",
"result_type",
"in",
"(",
"'mediaCollection'",
",",
"'mediaMetadata'",
")",
":",
"# Upper case the first letter (used for the class_key)",
"result_type_proper",
"=",
"result_type",
"[",
"0",
"]",
".",
"upper",
"(",
")",
"+",
"result_type",
"[",
"1",
":",
"]",
"raw_items",
"=",
"response",
".",
"get",
"(",
"result_type",
",",
"[",
"]",
")",
"# If there is only 1 result, it is not put in an array",
"if",
"isinstance",
"(",
"raw_items",
",",
"OrderedDict",
")",
":",
"raw_items",
"=",
"[",
"raw_items",
"]",
"for",
"raw_item",
"in",
"raw_items",
":",
"# Form the class_key, which is a unique string for this type,",
"# formed by concatenating the result type with the item type. Turns",
"# into e.g: MediaMetadataTrack",
"class_key",
"=",
"result_type_proper",
"+",
"raw_item",
"[",
"'itemType'",
"]",
".",
"title",
"(",
")",
"cls",
"=",
"get_class",
"(",
"class_key",
")",
"items",
".",
"append",
"(",
"cls",
".",
"from_music_service",
"(",
"service",
",",
"raw_item",
")",
")",
"return",
"SearchResult",
"(",
"items",
",",
"*",
"*",
"search_metadata",
")"
] | Parse the response to a music service query and return a SearchResult
Args:
service (MusicService): The music service that produced the response
response (OrderedDict): The response from the soap client call
search_type (str): A string that indicates the search type that the
response is from
Returns:
SearchResult: A SearchResult object | [
"Parse",
"the",
"response",
"to",
"a",
"music",
"service",
"query",
"and",
"return",
"a",
"SearchResult"
] | python | train |
acutesoftware/virtual-AI-simulator | vais/simulator.py | https://github.com/acutesoftware/virtual-AI-simulator/blob/57de679a5b1a58c38fefe6aea58af1f3a7e79c58/vais/simulator.py#L93-L105 | def command(self, cmd, agent, src='admin', password=''):
"""
takes a command from a source 'src' and if
access is allowed (future implementation)
then execute the command on the 'agent'
"""
print(src, 'says ' + cmd['type'] + ' agent', agent.name, '', cmd['direction'],' password=', password)
if cmd['type'] == 'move':
self._move_agent(agent, cmd['direction'], False)
elif cmd['type'] == 'run':
print(agent.name, 'runs in direction', cmd['direction'])
elif cmd['type'] == 'fight':
print(agent.name, 'fights') | [
"def",
"command",
"(",
"self",
",",
"cmd",
",",
"agent",
",",
"src",
"=",
"'admin'",
",",
"password",
"=",
"''",
")",
":",
"print",
"(",
"src",
",",
"'says '",
"+",
"cmd",
"[",
"'type'",
"]",
"+",
"' agent'",
",",
"agent",
".",
"name",
",",
"''",
",",
"cmd",
"[",
"'direction'",
"]",
",",
"' password='",
",",
"password",
")",
"if",
"cmd",
"[",
"'type'",
"]",
"==",
"'move'",
":",
"self",
".",
"_move_agent",
"(",
"agent",
",",
"cmd",
"[",
"'direction'",
"]",
",",
"False",
")",
"elif",
"cmd",
"[",
"'type'",
"]",
"==",
"'run'",
":",
"print",
"(",
"agent",
".",
"name",
",",
"'runs in direction'",
",",
"cmd",
"[",
"'direction'",
"]",
")",
"elif",
"cmd",
"[",
"'type'",
"]",
"==",
"'fight'",
":",
"print",
"(",
"agent",
".",
"name",
",",
"'fights'",
")"
] | takes a command from a source 'src' and if
access is allowed (future implementation)
then execute the command on the 'agent' | [
"takes",
"a",
"command",
"from",
"a",
"source",
"src",
"and",
"if",
"access",
"is",
"allowed",
"(",
"future",
"implementation",
")",
"then",
"execute",
"the",
"command",
"on",
"the",
"agent"
] | python | train |
tgbugs/pyontutils | pyontutils/core.py | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/pyontutils/core.py#L28-L36 | def relative_resources(pathstring, failover='nifstd/resources'):
""" relative paths to resources in this repository
`failover` matches the location relative to the
github location (usually for prov purposes) """
if working_dir is None:
return Path(failover, pathstring).resolve()
else:
return Path(devconfig.resources, pathstring).resolve().relative_to(working_dir.resolve()) | [
"def",
"relative_resources",
"(",
"pathstring",
",",
"failover",
"=",
"'nifstd/resources'",
")",
":",
"if",
"working_dir",
"is",
"None",
":",
"return",
"Path",
"(",
"failover",
",",
"pathstring",
")",
".",
"resolve",
"(",
")",
"else",
":",
"return",
"Path",
"(",
"devconfig",
".",
"resources",
",",
"pathstring",
")",
".",
"resolve",
"(",
")",
".",
"relative_to",
"(",
"working_dir",
".",
"resolve",
"(",
")",
")"
] | relative paths to resources in this repository
`failover` matches the location relative to the
github location (usually for prov purposes) | [
"relative",
"paths",
"to",
"resources",
"in",
"this",
"repository",
"failover",
"matches",
"the",
"location",
"relative",
"to",
"the",
"github",
"location",
"(",
"usually",
"for",
"prov",
"purposes",
")"
] | python | train |
SBRG/ssbio | ssbio/io/__init__.py | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/io/__init__.py#L9-L17 | def save_json(obj, outfile, allow_nan=True, compression=False):
"""Save an ssbio object as a JSON file using json_tricks"""
if compression:
with open(outfile, 'wb') as f:
dump(obj, f, allow_nan=allow_nan, compression=compression)
else:
with open(outfile, 'w') as f:
dump(obj, f, allow_nan=allow_nan, compression=compression)
log.info('Saved {} (id: {}) to {}'.format(type(obj), obj.id, outfile)) | [
"def",
"save_json",
"(",
"obj",
",",
"outfile",
",",
"allow_nan",
"=",
"True",
",",
"compression",
"=",
"False",
")",
":",
"if",
"compression",
":",
"with",
"open",
"(",
"outfile",
",",
"'wb'",
")",
"as",
"f",
":",
"dump",
"(",
"obj",
",",
"f",
",",
"allow_nan",
"=",
"allow_nan",
",",
"compression",
"=",
"compression",
")",
"else",
":",
"with",
"open",
"(",
"outfile",
",",
"'w'",
")",
"as",
"f",
":",
"dump",
"(",
"obj",
",",
"f",
",",
"allow_nan",
"=",
"allow_nan",
",",
"compression",
"=",
"compression",
")",
"log",
".",
"info",
"(",
"'Saved {} (id: {}) to {}'",
".",
"format",
"(",
"type",
"(",
"obj",
")",
",",
"obj",
".",
"id",
",",
"outfile",
")",
")"
] | Save an ssbio object as a JSON file using json_tricks | [
"Save",
"an",
"ssbio",
"object",
"as",
"a",
"JSON",
"file",
"using",
"json_tricks"
] | python | train |
mitsei/dlkit | dlkit/json_/assessment/sessions.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment/sessions.py#L567-L585 | def get_next_question(self, assessment_section_id, item_id):
"""Gets the next question in this assesment section.
arg: assessment_section_id (osid.id.Id): ``Id`` of the
``AssessmentSection``
arg: item_id (osid.id.Id): ``Id`` of the ``Item``
return: (osid.assessment.Question) - the next question
raise: IllegalState - ``has_next_question()`` is ``false``
raise: NotFound - ``assessment_section_id`` or ``item_id`` is
not found, or ``item_id`` not part of
``assessment_section_id``
raise: NullArgument - ``assessment_section_id`` or ``item_id``
is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return self.get_assessment_section(assessment_section_id).get_next_question(question_id=item_id) | [
"def",
"get_next_question",
"(",
"self",
",",
"assessment_section_id",
",",
"item_id",
")",
":",
"return",
"self",
".",
"get_assessment_section",
"(",
"assessment_section_id",
")",
".",
"get_next_question",
"(",
"question_id",
"=",
"item_id",
")"
] | Gets the next question in this assesment section.
arg: assessment_section_id (osid.id.Id): ``Id`` of the
``AssessmentSection``
arg: item_id (osid.id.Id): ``Id`` of the ``Item``
return: (osid.assessment.Question) - the next question
raise: IllegalState - ``has_next_question()`` is ``false``
raise: NotFound - ``assessment_section_id`` or ``item_id`` is
not found, or ``item_id`` not part of
``assessment_section_id``
raise: NullArgument - ``assessment_section_id`` or ``item_id``
is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.* | [
"Gets",
"the",
"next",
"question",
"in",
"this",
"assesment",
"section",
"."
] | python | train |
sethmlarson/virtualbox-python | virtualbox/library.py | https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L9565-L9583 | def get_values_by_type(self, type_p, which):
"""This is the same as :py:func:`get_description_by_type` except that you can specify which
value types should be returned. See :py:class:`VirtualSystemDescriptionValueType` for possible
values.
in type_p of type :class:`VirtualSystemDescriptionType`
in which of type :class:`VirtualSystemDescriptionValueType`
return values of type str
"""
if not isinstance(type_p, VirtualSystemDescriptionType):
raise TypeError("type_p can only be an instance of type VirtualSystemDescriptionType")
if not isinstance(which, VirtualSystemDescriptionValueType):
raise TypeError("which can only be an instance of type VirtualSystemDescriptionValueType")
values = self._call("getValuesByType",
in_p=[type_p, which])
return values | [
"def",
"get_values_by_type",
"(",
"self",
",",
"type_p",
",",
"which",
")",
":",
"if",
"not",
"isinstance",
"(",
"type_p",
",",
"VirtualSystemDescriptionType",
")",
":",
"raise",
"TypeError",
"(",
"\"type_p can only be an instance of type VirtualSystemDescriptionType\"",
")",
"if",
"not",
"isinstance",
"(",
"which",
",",
"VirtualSystemDescriptionValueType",
")",
":",
"raise",
"TypeError",
"(",
"\"which can only be an instance of type VirtualSystemDescriptionValueType\"",
")",
"values",
"=",
"self",
".",
"_call",
"(",
"\"getValuesByType\"",
",",
"in_p",
"=",
"[",
"type_p",
",",
"which",
"]",
")",
"return",
"values"
] | This is the same as :py:func:`get_description_by_type` except that you can specify which
value types should be returned. See :py:class:`VirtualSystemDescriptionValueType` for possible
values.
in type_p of type :class:`VirtualSystemDescriptionType`
in which of type :class:`VirtualSystemDescriptionValueType`
return values of type str | [
"This",
"is",
"the",
"same",
"as",
":",
"py",
":",
"func",
":",
"get_description_by_type",
"except",
"that",
"you",
"can",
"specify",
"which",
"value",
"types",
"should",
"be",
"returned",
".",
"See",
":",
"py",
":",
"class",
":",
"VirtualSystemDescriptionValueType",
"for",
"possible",
"values",
"."
] | python | train |
globocom/GloboNetworkAPI-client-python | networkapiclient/ApiNetworkIPv4.py | https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/ApiNetworkIPv4.py#L22-L33 | def deploy(self, id_networkv4):
"""Deploy network in equipments and set column 'active = 1' in tables redeipv4
:param id_networkv4: ID for NetworkIPv4
:return: Equipments configuration output
"""
data = dict()
uri = 'api/networkv4/%s/equipments/' % id_networkv4
return super(ApiNetworkIPv4, self).post(uri, data=data) | [
"def",
"deploy",
"(",
"self",
",",
"id_networkv4",
")",
":",
"data",
"=",
"dict",
"(",
")",
"uri",
"=",
"'api/networkv4/%s/equipments/'",
"%",
"id_networkv4",
"return",
"super",
"(",
"ApiNetworkIPv4",
",",
"self",
")",
".",
"post",
"(",
"uri",
",",
"data",
"=",
"data",
")"
] | Deploy network in equipments and set column 'active = 1' in tables redeipv4
:param id_networkv4: ID for NetworkIPv4
:return: Equipments configuration output | [
"Deploy",
"network",
"in",
"equipments",
"and",
"set",
"column",
"active",
"=",
"1",
"in",
"tables",
"redeipv4"
] | python | train |
squaresLab/BugZoo | bugzoo/mgr/bug.py | https://github.com/squaresLab/BugZoo/blob/68664f1977e85b37a78604f7c570382ffae1fa3b/bugzoo/mgr/bug.py#L73-L80 | def is_installed(self, bug: Bug) -> bool:
"""
Determines whether or not the Docker image for a given bug has been
installed onto this server.
See: `BuildManager.is_installed`
"""
return self.__installation.build.is_installed(bug.image) | [
"def",
"is_installed",
"(",
"self",
",",
"bug",
":",
"Bug",
")",
"->",
"bool",
":",
"return",
"self",
".",
"__installation",
".",
"build",
".",
"is_installed",
"(",
"bug",
".",
"image",
")"
] | Determines whether or not the Docker image for a given bug has been
installed onto this server.
See: `BuildManager.is_installed` | [
"Determines",
"whether",
"or",
"not",
"the",
"Docker",
"image",
"for",
"a",
"given",
"bug",
"has",
"been",
"installed",
"onto",
"this",
"server",
"."
] | python | train |
ratt-ru/PyMORESANE | pymoresane/iuwt_convolution.py | https://github.com/ratt-ru/PyMORESANE/blob/b024591ad0bbb69320d08841f28a2c27f62ae1af/pymoresane/iuwt_convolution.py#L142-L158 | def pad_array(in1):
"""
Simple convenience function to pad arrays for linear convolution.
INPUTS:
in1 (no default): Input array which is to be padded.
OUTPUTS:
out1 Padded version of the input.
"""
padded_size = 2*np.array(in1.shape)
out1 = np.zeros([padded_size[0],padded_size[1]])
out1[padded_size[0]/4:3*padded_size[0]/4,padded_size[1]/4:3*padded_size[1]/4] = in1
return out1 | [
"def",
"pad_array",
"(",
"in1",
")",
":",
"padded_size",
"=",
"2",
"*",
"np",
".",
"array",
"(",
"in1",
".",
"shape",
")",
"out1",
"=",
"np",
".",
"zeros",
"(",
"[",
"padded_size",
"[",
"0",
"]",
",",
"padded_size",
"[",
"1",
"]",
"]",
")",
"out1",
"[",
"padded_size",
"[",
"0",
"]",
"/",
"4",
":",
"3",
"*",
"padded_size",
"[",
"0",
"]",
"/",
"4",
",",
"padded_size",
"[",
"1",
"]",
"/",
"4",
":",
"3",
"*",
"padded_size",
"[",
"1",
"]",
"/",
"4",
"]",
"=",
"in1",
"return",
"out1"
] | Simple convenience function to pad arrays for linear convolution.
INPUTS:
in1 (no default): Input array which is to be padded.
OUTPUTS:
out1 Padded version of the input. | [
"Simple",
"convenience",
"function",
"to",
"pad",
"arrays",
"for",
"linear",
"convolution",
"."
] | python | train |
log2timeline/plaso | plaso/lib/timelib.py | https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/lib/timelib.py#L206-L246 | def CopyToDatetime(cls, timestamp, timezone, raise_error=False):
"""Copies the timestamp to a datetime object.
Args:
timestamp: The timestamp which is an integer containing the number
of micro seconds since January 1, 1970, 00:00:00 UTC.
timezone: The timezone (pytz.timezone) object.
raise_error: Boolean that if set to True will not absorb an OverflowError
if the timestamp is out of bounds. By default there will be
no error raised.
Returns:
A datetime object (instance of datetime.datetime). A datetime object of
January 1, 1970 00:00:00 UTC is returned on error if raises_error is
not set.
Raises:
OverflowError: If raises_error is set to True and an overflow error
occurs.
ValueError: If raises_error is set to True and no timestamp value is
provided.
"""
datetime_object = datetime.datetime(1970, 1, 1, 0, 0, 0, 0, tzinfo=pytz.UTC)
if not timestamp:
if raise_error:
raise ValueError('Missing timestamp value')
return datetime_object
try:
datetime_object += datetime.timedelta(microseconds=timestamp)
return datetime_object.astimezone(timezone)
except OverflowError as exception:
if raise_error:
raise
logging.error((
'Unable to copy {0:d} to a datetime object with error: '
'{1!s}').format(timestamp, exception))
return datetime_object | [
"def",
"CopyToDatetime",
"(",
"cls",
",",
"timestamp",
",",
"timezone",
",",
"raise_error",
"=",
"False",
")",
":",
"datetime_object",
"=",
"datetime",
".",
"datetime",
"(",
"1970",
",",
"1",
",",
"1",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
",",
"tzinfo",
"=",
"pytz",
".",
"UTC",
")",
"if",
"not",
"timestamp",
":",
"if",
"raise_error",
":",
"raise",
"ValueError",
"(",
"'Missing timestamp value'",
")",
"return",
"datetime_object",
"try",
":",
"datetime_object",
"+=",
"datetime",
".",
"timedelta",
"(",
"microseconds",
"=",
"timestamp",
")",
"return",
"datetime_object",
".",
"astimezone",
"(",
"timezone",
")",
"except",
"OverflowError",
"as",
"exception",
":",
"if",
"raise_error",
":",
"raise",
"logging",
".",
"error",
"(",
"(",
"'Unable to copy {0:d} to a datetime object with error: '",
"'{1!s}'",
")",
".",
"format",
"(",
"timestamp",
",",
"exception",
")",
")",
"return",
"datetime_object"
] | Copies the timestamp to a datetime object.
Args:
timestamp: The timestamp which is an integer containing the number
of micro seconds since January 1, 1970, 00:00:00 UTC.
timezone: The timezone (pytz.timezone) object.
raise_error: Boolean that if set to True will not absorb an OverflowError
if the timestamp is out of bounds. By default there will be
no error raised.
Returns:
A datetime object (instance of datetime.datetime). A datetime object of
January 1, 1970 00:00:00 UTC is returned on error if raises_error is
not set.
Raises:
OverflowError: If raises_error is set to True and an overflow error
occurs.
ValueError: If raises_error is set to True and no timestamp value is
provided. | [
"Copies",
"the",
"timestamp",
"to",
"a",
"datetime",
"object",
"."
] | python | train |
PMBio/limix-backup | limix/varDecomp/varianceDecomposition.py | https://github.com/PMBio/limix-backup/blob/1e201fdb5c694d0d5506f207f3de65d8ef66146c/limix/varDecomp/varianceDecomposition.py#L954-L965 | def _getLaplaceCovar(self):
"""
Internal function for estimating parameter uncertainty
Returns:
the
"""
assert self.init, 'GP not initialised'
assert self.fast==False, 'Not supported for fast implementation'
if self.cache['Sigma'] is None:
self.cache['Sigma'] = sp.linalg.inv(self._getHessian())
return self.cache['Sigma'] | [
"def",
"_getLaplaceCovar",
"(",
"self",
")",
":",
"assert",
"self",
".",
"init",
",",
"'GP not initialised'",
"assert",
"self",
".",
"fast",
"==",
"False",
",",
"'Not supported for fast implementation'",
"if",
"self",
".",
"cache",
"[",
"'Sigma'",
"]",
"is",
"None",
":",
"self",
".",
"cache",
"[",
"'Sigma'",
"]",
"=",
"sp",
".",
"linalg",
".",
"inv",
"(",
"self",
".",
"_getHessian",
"(",
")",
")",
"return",
"self",
".",
"cache",
"[",
"'Sigma'",
"]"
] | Internal function for estimating parameter uncertainty
Returns:
the | [
"Internal",
"function",
"for",
"estimating",
"parameter",
"uncertainty",
"Returns",
":",
"the"
] | python | train |
ahmontero/dop | dop/client.py | https://github.com/ahmontero/dop/blob/40354ac6feefe92a7555fe2d1834138c9a03e518/dop/client.py#L262-L299 | def resize_droplet(self, droplet_id, size):
"""
This method allows you to resize a specific droplet to a different size.
This will affect the number of processors and memory allocated to the droplet.
Required parameters:
droplet_id:
Integer, this is the id of your droplet that you want to resize
size, one of
size_id: Numeric, this is the id of the size with which you
would like the droplet created
size_slug: String, this is the slug of the size with which you
would like the droplet created
"""
if not droplet_id:
raise DOPException('droplet_id is required to resize a droplet!')
params = {}
size_id = size.get('size_id')
if size_id:
params.update({'size_id': size_id})
else:
size_slug = size.get('size_slug')
if size_slug:
params.update({'size_slug': size_slug})
else:
msg = 'size_id or size_slug are required to resize a droplet!'
raise DOPException(msg)
json = self.request('/droplets/%s/resize' % droplet_id, method='GET',
params=params)
status = json.get('status')
if status == 'OK':
return json.get('event_id')
else:
message = json.get('message')
raise DOPException('[%s]: %s' % (status, message)) | [
"def",
"resize_droplet",
"(",
"self",
",",
"droplet_id",
",",
"size",
")",
":",
"if",
"not",
"droplet_id",
":",
"raise",
"DOPException",
"(",
"'droplet_id is required to resize a droplet!'",
")",
"params",
"=",
"{",
"}",
"size_id",
"=",
"size",
".",
"get",
"(",
"'size_id'",
")",
"if",
"size_id",
":",
"params",
".",
"update",
"(",
"{",
"'size_id'",
":",
"size_id",
"}",
")",
"else",
":",
"size_slug",
"=",
"size",
".",
"get",
"(",
"'size_slug'",
")",
"if",
"size_slug",
":",
"params",
".",
"update",
"(",
"{",
"'size_slug'",
":",
"size_slug",
"}",
")",
"else",
":",
"msg",
"=",
"'size_id or size_slug are required to resize a droplet!'",
"raise",
"DOPException",
"(",
"msg",
")",
"json",
"=",
"self",
".",
"request",
"(",
"'/droplets/%s/resize'",
"%",
"droplet_id",
",",
"method",
"=",
"'GET'",
",",
"params",
"=",
"params",
")",
"status",
"=",
"json",
".",
"get",
"(",
"'status'",
")",
"if",
"status",
"==",
"'OK'",
":",
"return",
"json",
".",
"get",
"(",
"'event_id'",
")",
"else",
":",
"message",
"=",
"json",
".",
"get",
"(",
"'message'",
")",
"raise",
"DOPException",
"(",
"'[%s]: %s'",
"%",
"(",
"status",
",",
"message",
")",
")"
] | This method allows you to resize a specific droplet to a different size.
This will affect the number of processors and memory allocated to the droplet.
Required parameters:
droplet_id:
Integer, this is the id of your droplet that you want to resize
size, one of
size_id: Numeric, this is the id of the size with which you
would like the droplet created
size_slug: String, this is the slug of the size with which you
would like the droplet created | [
"This",
"method",
"allows",
"you",
"to",
"resize",
"a",
"specific",
"droplet",
"to",
"a",
"different",
"size",
".",
"This",
"will",
"affect",
"the",
"number",
"of",
"processors",
"and",
"memory",
"allocated",
"to",
"the",
"droplet",
"."
] | python | train |
flowersteam/explauto | explauto/sensorimotor_model/inverse/optimize.py | https://github.com/flowersteam/explauto/blob/cf0f81ecb9f6412f7276a95bd27359000e1e26b6/explauto/sensorimotor_model/inverse/optimize.py#L46-L54 | def _error(self, x):
"""Error function.
Once self.y_desired has been defined, compute the error
of input x using the forward model.
"""
y_pred = self.fmodel.predict_y(x)
err_v = y_pred - self.goal
error = sum(e*e for e in err_v)
return error | [
"def",
"_error",
"(",
"self",
",",
"x",
")",
":",
"y_pred",
"=",
"self",
".",
"fmodel",
".",
"predict_y",
"(",
"x",
")",
"err_v",
"=",
"y_pred",
"-",
"self",
".",
"goal",
"error",
"=",
"sum",
"(",
"e",
"*",
"e",
"for",
"e",
"in",
"err_v",
")",
"return",
"error"
] | Error function.
Once self.y_desired has been defined, compute the error
of input x using the forward model. | [
"Error",
"function",
".",
"Once",
"self",
".",
"y_desired",
"has",
"been",
"defined",
"compute",
"the",
"error",
"of",
"input",
"x",
"using",
"the",
"forward",
"model",
"."
] | python | train |
sashka/flask-googleauth | flask_googleauth.py | https://github.com/sashka/flask-googleauth/blob/4e481d645f1bb22124a6d79c7881746004cf4369/flask_googleauth.py#L292-L304 | def required(self, fn):
"""Request decorator. Forces authentication."""
@functools.wraps(fn)
def decorated(*args, **kwargs):
if (not self._check_auth()
# Don't try to force authentication if the request is part
# of the authentication process - otherwise we end up in a
# loop.
and request.blueprint != self.blueprint.name):
return redirect(url_for("%s.login" % self.blueprint.name,
next=request.url))
return fn(*args, **kwargs)
return decorated | [
"def",
"required",
"(",
"self",
",",
"fn",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"fn",
")",
"def",
"decorated",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"(",
"not",
"self",
".",
"_check_auth",
"(",
")",
"# Don't try to force authentication if the request is part",
"# of the authentication process - otherwise we end up in a",
"# loop.",
"and",
"request",
".",
"blueprint",
"!=",
"self",
".",
"blueprint",
".",
"name",
")",
":",
"return",
"redirect",
"(",
"url_for",
"(",
"\"%s.login\"",
"%",
"self",
".",
"blueprint",
".",
"name",
",",
"next",
"=",
"request",
".",
"url",
")",
")",
"return",
"fn",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"decorated"
] | Request decorator. Forces authentication. | [
"Request",
"decorator",
".",
"Forces",
"authentication",
"."
] | python | train |
saltstack/salt | salt/utils/dns.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/dns.py#L859-L911 | def spf_rec(rdata):
'''
Validate and parse DNS record data for SPF record(s)
:param rdata: DNS record data
:return: dict w/fields
'''
spf_fields = rdata.split(' ')
if not spf_fields.pop(0).startswith('v=spf'):
raise ValueError('Not an SPF record')
res = OrderedDict()
mods = set()
for mech_spec in spf_fields:
if mech_spec.startswith(('exp', 'redirect')):
# It's a modifier
mod, val = mech_spec.split('=', 1)
if mod in mods:
raise KeyError('Modifier {0} can only appear once'.format(mod))
mods.add(mod)
continue
# TODO: Should be in something intelligent like an SPF_get
# if mod == 'exp':
# res[mod] = lookup(val, 'TXT', **qargs)
# continue
# elif mod == 'redirect':
# return query(val, 'SPF', **qargs)
mech = {}
if mech_spec[0] in ('+', '-', '~', '?'):
mech['qualifier'] = mech_spec[0]
mech_spec = mech_spec[1:]
if ':' in mech_spec:
mech_spec, val = mech_spec.split(':', 1)
elif '/' in mech_spec:
idx = mech_spec.find('/')
mech_spec = mech_spec[0:idx]
val = mech_spec[idx:]
else:
val = None
res[mech_spec] = mech
if not val:
continue
elif mech_spec in ('ip4', 'ip6'):
val = ipaddress.ip_interface(val)
assert val.version == int(mech_spec[-1])
mech['value'] = val
return res | [
"def",
"spf_rec",
"(",
"rdata",
")",
":",
"spf_fields",
"=",
"rdata",
".",
"split",
"(",
"' '",
")",
"if",
"not",
"spf_fields",
".",
"pop",
"(",
"0",
")",
".",
"startswith",
"(",
"'v=spf'",
")",
":",
"raise",
"ValueError",
"(",
"'Not an SPF record'",
")",
"res",
"=",
"OrderedDict",
"(",
")",
"mods",
"=",
"set",
"(",
")",
"for",
"mech_spec",
"in",
"spf_fields",
":",
"if",
"mech_spec",
".",
"startswith",
"(",
"(",
"'exp'",
",",
"'redirect'",
")",
")",
":",
"# It's a modifier",
"mod",
",",
"val",
"=",
"mech_spec",
".",
"split",
"(",
"'='",
",",
"1",
")",
"if",
"mod",
"in",
"mods",
":",
"raise",
"KeyError",
"(",
"'Modifier {0} can only appear once'",
".",
"format",
"(",
"mod",
")",
")",
"mods",
".",
"add",
"(",
"mod",
")",
"continue",
"# TODO: Should be in something intelligent like an SPF_get",
"# if mod == 'exp':",
"# res[mod] = lookup(val, 'TXT', **qargs)",
"# continue",
"# elif mod == 'redirect':",
"# return query(val, 'SPF', **qargs)",
"mech",
"=",
"{",
"}",
"if",
"mech_spec",
"[",
"0",
"]",
"in",
"(",
"'+'",
",",
"'-'",
",",
"'~'",
",",
"'?'",
")",
":",
"mech",
"[",
"'qualifier'",
"]",
"=",
"mech_spec",
"[",
"0",
"]",
"mech_spec",
"=",
"mech_spec",
"[",
"1",
":",
"]",
"if",
"':'",
"in",
"mech_spec",
":",
"mech_spec",
",",
"val",
"=",
"mech_spec",
".",
"split",
"(",
"':'",
",",
"1",
")",
"elif",
"'/'",
"in",
"mech_spec",
":",
"idx",
"=",
"mech_spec",
".",
"find",
"(",
"'/'",
")",
"mech_spec",
"=",
"mech_spec",
"[",
"0",
":",
"idx",
"]",
"val",
"=",
"mech_spec",
"[",
"idx",
":",
"]",
"else",
":",
"val",
"=",
"None",
"res",
"[",
"mech_spec",
"]",
"=",
"mech",
"if",
"not",
"val",
":",
"continue",
"elif",
"mech_spec",
"in",
"(",
"'ip4'",
",",
"'ip6'",
")",
":",
"val",
"=",
"ipaddress",
".",
"ip_interface",
"(",
"val",
")",
"assert",
"val",
".",
"version",
"==",
"int",
"(",
"mech_spec",
"[",
"-",
"1",
"]",
")",
"mech",
"[",
"'value'",
"]",
"=",
"val",
"return",
"res"
] | Validate and parse DNS record data for SPF record(s)
:param rdata: DNS record data
:return: dict w/fields | [
"Validate",
"and",
"parse",
"DNS",
"record",
"data",
"for",
"SPF",
"record",
"(",
"s",
")",
":",
"param",
"rdata",
":",
"DNS",
"record",
"data",
":",
"return",
":",
"dict",
"w",
"/",
"fields"
] | python | train |
MoseleyBioinformaticsLab/nmrstarlib | nmrstarlib/csviewer.py | https://github.com/MoseleyBioinformaticsLab/nmrstarlib/blob/f2adabbca04d5a134ce6ba3211099d1457787ff2/nmrstarlib/csviewer.py#L70-L122 | def csview(self, view=False):
"""View chemical shift values organized by amino acid residue.
:param view: Open in default image viewer or save file in current working directory quietly.
:type view: :py:obj:`True` or :py:obj:`False`
:return: None
:rtype: :py:obj:`None`
"""
for starfile in fileio.read_files(self.from_path):
chains = starfile.chem_shifts_by_residue(amino_acids=self.amino_acids,
atoms=self.atoms,
amino_acids_and_atoms=self.amino_acids_and_atoms,
nmrstar_version=self.nmrstar_version)
for idx, chemshifts_dict in enumerate(chains):
nodes = []
edges = []
for seq_id in chemshifts_dict:
aaname = "{}_{}".format(chemshifts_dict[seq_id]["AA3Code"], seq_id)
label = '"{{{}|{}}}"'.format(seq_id, chemshifts_dict[seq_id]["AA3Code"])
color = 8
aanode_entry = " {} [label={}, fillcolor={}]".format(aaname, label, color)
nodes.append(aanode_entry)
currnodename = aaname
for atom_type in chemshifts_dict[seq_id]:
if atom_type in ["AA3Code", "Seq_ID"]:
continue
else:
atname = "{}_{}".format(aaname, atom_type)
label = '"{{{}|{}}}"'.format(atom_type, chemshifts_dict[seq_id][atom_type])
if atom_type.startswith("H"):
color = 4
elif atom_type.startswith("C"):
color = 6
elif atom_type.startswith("N"):
color = 10
else:
color = 8
atnode_entry = "{} [label={}, fillcolor={}]".format(atname, label, color)
nextnodename = atname
nodes.append(atnode_entry)
edges.append("{} -> {}".format(currnodename, nextnodename))
currnodename = nextnodename
if self.filename is None:
filename = "{}_{}".format(starfile.id, idx)
else:
filename = "{}_{}".format(self.filename, idx)
src = Source(self.dot_template.format("\n".join(nodes), "\n".join(edges)), format=self.csview_format)
src.render(filename=filename, view=view) | [
"def",
"csview",
"(",
"self",
",",
"view",
"=",
"False",
")",
":",
"for",
"starfile",
"in",
"fileio",
".",
"read_files",
"(",
"self",
".",
"from_path",
")",
":",
"chains",
"=",
"starfile",
".",
"chem_shifts_by_residue",
"(",
"amino_acids",
"=",
"self",
".",
"amino_acids",
",",
"atoms",
"=",
"self",
".",
"atoms",
",",
"amino_acids_and_atoms",
"=",
"self",
".",
"amino_acids_and_atoms",
",",
"nmrstar_version",
"=",
"self",
".",
"nmrstar_version",
")",
"for",
"idx",
",",
"chemshifts_dict",
"in",
"enumerate",
"(",
"chains",
")",
":",
"nodes",
"=",
"[",
"]",
"edges",
"=",
"[",
"]",
"for",
"seq_id",
"in",
"chemshifts_dict",
":",
"aaname",
"=",
"\"{}_{}\"",
".",
"format",
"(",
"chemshifts_dict",
"[",
"seq_id",
"]",
"[",
"\"AA3Code\"",
"]",
",",
"seq_id",
")",
"label",
"=",
"'\"{{{}|{}}}\"'",
".",
"format",
"(",
"seq_id",
",",
"chemshifts_dict",
"[",
"seq_id",
"]",
"[",
"\"AA3Code\"",
"]",
")",
"color",
"=",
"8",
"aanode_entry",
"=",
"\" {} [label={}, fillcolor={}]\"",
".",
"format",
"(",
"aaname",
",",
"label",
",",
"color",
")",
"nodes",
".",
"append",
"(",
"aanode_entry",
")",
"currnodename",
"=",
"aaname",
"for",
"atom_type",
"in",
"chemshifts_dict",
"[",
"seq_id",
"]",
":",
"if",
"atom_type",
"in",
"[",
"\"AA3Code\"",
",",
"\"Seq_ID\"",
"]",
":",
"continue",
"else",
":",
"atname",
"=",
"\"{}_{}\"",
".",
"format",
"(",
"aaname",
",",
"atom_type",
")",
"label",
"=",
"'\"{{{}|{}}}\"'",
".",
"format",
"(",
"atom_type",
",",
"chemshifts_dict",
"[",
"seq_id",
"]",
"[",
"atom_type",
"]",
")",
"if",
"atom_type",
".",
"startswith",
"(",
"\"H\"",
")",
":",
"color",
"=",
"4",
"elif",
"atom_type",
".",
"startswith",
"(",
"\"C\"",
")",
":",
"color",
"=",
"6",
"elif",
"atom_type",
".",
"startswith",
"(",
"\"N\"",
")",
":",
"color",
"=",
"10",
"else",
":",
"color",
"=",
"8",
"atnode_entry",
"=",
"\"{} [label={}, fillcolor={}]\"",
".",
"format",
"(",
"atname",
",",
"label",
",",
"color",
")",
"nextnodename",
"=",
"atname",
"nodes",
".",
"append",
"(",
"atnode_entry",
")",
"edges",
".",
"append",
"(",
"\"{} -> {}\"",
".",
"format",
"(",
"currnodename",
",",
"nextnodename",
")",
")",
"currnodename",
"=",
"nextnodename",
"if",
"self",
".",
"filename",
"is",
"None",
":",
"filename",
"=",
"\"{}_{}\"",
".",
"format",
"(",
"starfile",
".",
"id",
",",
"idx",
")",
"else",
":",
"filename",
"=",
"\"{}_{}\"",
".",
"format",
"(",
"self",
".",
"filename",
",",
"idx",
")",
"src",
"=",
"Source",
"(",
"self",
".",
"dot_template",
".",
"format",
"(",
"\"\\n\"",
".",
"join",
"(",
"nodes",
")",
",",
"\"\\n\"",
".",
"join",
"(",
"edges",
")",
")",
",",
"format",
"=",
"self",
".",
"csview_format",
")",
"src",
".",
"render",
"(",
"filename",
"=",
"filename",
",",
"view",
"=",
"view",
")"
] | View chemical shift values organized by amino acid residue.
:param view: Open in default image viewer or save file in current working directory quietly.
:type view: :py:obj:`True` or :py:obj:`False`
:return: None
:rtype: :py:obj:`None` | [
"View",
"chemical",
"shift",
"values",
"organized",
"by",
"amino",
"acid",
"residue",
"."
] | python | train |
deschler/django-modeltranslation | modeltranslation/manager.py | https://github.com/deschler/django-modeltranslation/blob/18fec04a5105cbd83fc3759f4fda20135b3a848c/modeltranslation/manager.py#L79-L87 | def append_translated(model, fields):
"If translated field is encountered, add also all its translation fields."
fields = set(fields)
from modeltranslation.translator import translator
opts = translator.get_options_for_model(model)
for key, translated in opts.fields.items():
if key in fields:
fields = fields.union(f.name for f in translated)
return fields | [
"def",
"append_translated",
"(",
"model",
",",
"fields",
")",
":",
"fields",
"=",
"set",
"(",
"fields",
")",
"from",
"modeltranslation",
".",
"translator",
"import",
"translator",
"opts",
"=",
"translator",
".",
"get_options_for_model",
"(",
"model",
")",
"for",
"key",
",",
"translated",
"in",
"opts",
".",
"fields",
".",
"items",
"(",
")",
":",
"if",
"key",
"in",
"fields",
":",
"fields",
"=",
"fields",
".",
"union",
"(",
"f",
".",
"name",
"for",
"f",
"in",
"translated",
")",
"return",
"fields"
] | If translated field is encountered, add also all its translation fields. | [
"If",
"translated",
"field",
"is",
"encountered",
"add",
"also",
"all",
"its",
"translation",
"fields",
"."
] | python | train |
manns/pyspread | pyspread/src/gui/_main_window.py | https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_main_window.py#L726-L734 | def OnTableListToggle(self, event):
"""Table list toggle event handler"""
table_list_panel_info = \
self.main_window._mgr.GetPane("table_list_panel")
self._toggle_pane(table_list_panel_info)
event.Skip() | [
"def",
"OnTableListToggle",
"(",
"self",
",",
"event",
")",
":",
"table_list_panel_info",
"=",
"self",
".",
"main_window",
".",
"_mgr",
".",
"GetPane",
"(",
"\"table_list_panel\"",
")",
"self",
".",
"_toggle_pane",
"(",
"table_list_panel_info",
")",
"event",
".",
"Skip",
"(",
")"
] | Table list toggle event handler | [
"Table",
"list",
"toggle",
"event",
"handler"
] | python | train |
nir0s/ghost | ghost.py | https://github.com/nir0s/ghost/blob/77da967a4577ca4cf100cfe34e87b39ad88bf21c/ghost.py#L942-L946 | def init(self):
"""Create an Elasticsearch index if necessary
"""
# ignore 400 (IndexAlreadyExistsException) when creating an index
self.es.indices.create(index=self.params['index'], ignore=400) | [
"def",
"init",
"(",
"self",
")",
":",
"# ignore 400 (IndexAlreadyExistsException) when creating an index",
"self",
".",
"es",
".",
"indices",
".",
"create",
"(",
"index",
"=",
"self",
".",
"params",
"[",
"'index'",
"]",
",",
"ignore",
"=",
"400",
")"
] | Create an Elasticsearch index if necessary | [
"Create",
"an",
"Elasticsearch",
"index",
"if",
"necessary"
] | python | valid |
tensorflow/tensor2tensor | tensor2tensor/data_generators/imagenet.py | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/imagenet.py#L591-L607 | def preprocess_for_train(image, image_size=224, normalize=True):
"""Preprocesses the given image for evaluation.
Args:
image: `Tensor` representing an image of arbitrary size.
image_size: int, how large the output image should be.
normalize: bool, if True the image is normalized.
Returns:
A preprocessed image `Tensor`.
"""
if normalize: image = tf.to_float(image) / 255.0
image = _random_crop(image, image_size)
if normalize: image = _normalize(image)
image = _flip(image)
image = tf.reshape(image, [image_size, image_size, 3])
return image | [
"def",
"preprocess_for_train",
"(",
"image",
",",
"image_size",
"=",
"224",
",",
"normalize",
"=",
"True",
")",
":",
"if",
"normalize",
":",
"image",
"=",
"tf",
".",
"to_float",
"(",
"image",
")",
"/",
"255.0",
"image",
"=",
"_random_crop",
"(",
"image",
",",
"image_size",
")",
"if",
"normalize",
":",
"image",
"=",
"_normalize",
"(",
"image",
")",
"image",
"=",
"_flip",
"(",
"image",
")",
"image",
"=",
"tf",
".",
"reshape",
"(",
"image",
",",
"[",
"image_size",
",",
"image_size",
",",
"3",
"]",
")",
"return",
"image"
] | Preprocesses the given image for evaluation.
Args:
image: `Tensor` representing an image of arbitrary size.
image_size: int, how large the output image should be.
normalize: bool, if True the image is normalized.
Returns:
A preprocessed image `Tensor`. | [
"Preprocesses",
"the",
"given",
"image",
"for",
"evaluation",
"."
] | python | train |
Azure/azure-sdk-for-python | azure-servicemanagement-legacy/azure/servicemanagement/_http/winhttp.py | https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-servicemanagement-legacy/azure/servicemanagement/_http/winhttp.py#L418-L428 | def putrequest(self, method, uri):
''' Connects to host and sends the request. '''
protocol = unicode(self.protocol + '://')
url = protocol + self.host + unicode(uri)
self._httprequest.set_timeout(self.timeout)
self._httprequest.open(unicode(method), url)
# sets certificate for the connection if cert_file is set.
if self.cert_file is not None:
self._httprequest.set_client_certificate(unicode(self.cert_file)) | [
"def",
"putrequest",
"(",
"self",
",",
"method",
",",
"uri",
")",
":",
"protocol",
"=",
"unicode",
"(",
"self",
".",
"protocol",
"+",
"'://'",
")",
"url",
"=",
"protocol",
"+",
"self",
".",
"host",
"+",
"unicode",
"(",
"uri",
")",
"self",
".",
"_httprequest",
".",
"set_timeout",
"(",
"self",
".",
"timeout",
")",
"self",
".",
"_httprequest",
".",
"open",
"(",
"unicode",
"(",
"method",
")",
",",
"url",
")",
"# sets certificate for the connection if cert_file is set.",
"if",
"self",
".",
"cert_file",
"is",
"not",
"None",
":",
"self",
".",
"_httprequest",
".",
"set_client_certificate",
"(",
"unicode",
"(",
"self",
".",
"cert_file",
")",
")"
] | Connects to host and sends the request. | [
"Connects",
"to",
"host",
"and",
"sends",
"the",
"request",
"."
] | python | test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.