nwo
stringlengths
5
106
sha
stringlengths
40
40
path
stringlengths
4
174
language
stringclasses
1 value
identifier
stringlengths
1
140
parameters
stringlengths
0
87.7k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
426k
docstring
stringlengths
0
64.3k
docstring_summary
stringlengths
0
26.3k
docstring_tokens
list
function
stringlengths
18
4.83M
function_tokens
list
url
stringlengths
83
304
lucadelu/pyModis
de86ccf28fffcb759d18b4b5b5a601304ec4fd14
scripts/modis_multiparse.py
python
main
()
Main function
Main function
[ "Main", "function" ]
def main(): """Main function""" #usage usage = "usage: %prog [options] hdf_files_list" if 1 == len(sys.argv) and wxpython: option_parser_class = optparse_gui.OptionParser else: option_parser_class = optparse_required.OptionParser parser = option_parser_class(usage=usage, description='modis_multiparse') #spatial extent parser.add_option("-b", action="store_true", dest="bound", default=False, help="print the values related to the spatial max extent") #write into file parser.add_option("-w", "--write", dest="output", metavar="OUTPUT_FILE", help="write the MODIS XML metadata file for MODIS mosaic") (options, args) = parser.parse_args() #create modis object if len(args) == 0 and not wxpython: parser.print_help() sys.exit(1) if len(args) < 2: parser.error("You have to define the name of multiple HDF files") for arg in args: if not os.path.isfile(arg): parser.error(arg + " does not exist or is not a file") modisOgg = parsemodis.parseModisMulti(args) if options.bound: modisOgg.valBound() print(readDict(modisOgg.boundary)) elif options.output: modisOgg.writexml(options.output) print("%s write correctly" % options.output) else: parser.error("You have to choose at least one option")
[ "def", "main", "(", ")", ":", "#usage", "usage", "=", "\"usage: %prog [options] hdf_files_list\"", "if", "1", "==", "len", "(", "sys", ".", "argv", ")", "and", "wxpython", ":", "option_parser_class", "=", "optparse_gui", ".", "OptionParser", "else", ":", "option_parser_class", "=", "optparse_required", ".", "OptionParser", "parser", "=", "option_parser_class", "(", "usage", "=", "usage", ",", "description", "=", "'modis_multiparse'", ")", "#spatial extent", "parser", ".", "add_option", "(", "\"-b\"", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "\"bound\"", ",", "default", "=", "False", ",", "help", "=", "\"print the values related to the spatial max extent\"", ")", "#write into file", "parser", ".", "add_option", "(", "\"-w\"", ",", "\"--write\"", ",", "dest", "=", "\"output\"", ",", "metavar", "=", "\"OUTPUT_FILE\"", ",", "help", "=", "\"write the MODIS XML metadata file for MODIS mosaic\"", ")", "(", "options", ",", "args", ")", "=", "parser", ".", "parse_args", "(", ")", "#create modis object", "if", "len", "(", "args", ")", "==", "0", "and", "not", "wxpython", ":", "parser", ".", "print_help", "(", ")", "sys", ".", "exit", "(", "1", ")", "if", "len", "(", "args", ")", "<", "2", ":", "parser", ".", "error", "(", "\"You have to define the name of multiple HDF files\"", ")", "for", "arg", "in", "args", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "arg", ")", ":", "parser", ".", "error", "(", "arg", "+", "\" does not exist or is not a file\"", ")", "modisOgg", "=", "parsemodis", ".", "parseModisMulti", "(", "args", ")", "if", "options", ".", "bound", ":", "modisOgg", ".", "valBound", "(", ")", "print", "(", "readDict", "(", "modisOgg", ".", "boundary", ")", ")", "elif", "options", ".", "output", ":", "modisOgg", ".", "writexml", "(", "options", ".", "output", ")", "print", "(", "\"%s write correctly\"", "%", "options", ".", "output", ")", "else", ":", "parser", ".", "error", "(", "\"You have to choose at least one option\"", ")" ]
https://github.com/lucadelu/pyModis/blob/de86ccf28fffcb759d18b4b5b5a601304ec4fd14/scripts/modis_multiparse.py#L46-L81
compas-dev/compas
0b33f8786481f710115fb1ae5fe79abc2a9a5175
src/compas/geometry/intersections/intersections.py
python
intersection_segment_polyline_xy
(segment, polyline, tol=1e-6)
Calculate the intersection point of a segment and a polyline on the XY-plane. Parameters ---------- segment : [point, point] or :class:`compas.geometry.Line` A line segment defined by two points, with at least XY coordinates. polyline : sequence[point] or :class:`compas.geometry.Polyline` A polyline defined by a sequence of points, with at least XY coordinates. tol : float, optional The tolerance for intersection verification. Returns ------- [float, float, 0.0] or None XYZ coordinates of the first intersection point if one exists. None otherwise Examples -------- >>> from compas.geometry import is_point_on_polyline_xy >>> from compas.geometry import is_point_on_segment_xy >>> from compas.geometry import distance_point_point >>> p = [(0.0, 0.0, 0.0), (1.0, 0.0, 0.0), (2.0, 0.0, 0.0)] >>> s = [(0.5, -0.5, 0.0), (0.5, 0.5, 0.0)] >>> x = intersection_segment_polyline_xy(s, p) >>> is_point_on_polyline_xy(x, p) True >>> is_point_on_segment_xy(x, s) True >>> distance_point_point((0.5, 0.0, 0.0), x) < 1e-6 True
Calculate the intersection point of a segment and a polyline on the XY-plane.
[ "Calculate", "the", "intersection", "point", "of", "a", "segment", "and", "a", "polyline", "on", "the", "XY", "-", "plane", "." ]
def intersection_segment_polyline_xy(segment, polyline, tol=1e-6): """ Calculate the intersection point of a segment and a polyline on the XY-plane. Parameters ---------- segment : [point, point] or :class:`compas.geometry.Line` A line segment defined by two points, with at least XY coordinates. polyline : sequence[point] or :class:`compas.geometry.Polyline` A polyline defined by a sequence of points, with at least XY coordinates. tol : float, optional The tolerance for intersection verification. Returns ------- [float, float, 0.0] or None XYZ coordinates of the first intersection point if one exists. None otherwise Examples -------- >>> from compas.geometry import is_point_on_polyline_xy >>> from compas.geometry import is_point_on_segment_xy >>> from compas.geometry import distance_point_point >>> p = [(0.0, 0.0, 0.0), (1.0, 0.0, 0.0), (2.0, 0.0, 0.0)] >>> s = [(0.5, -0.5, 0.0), (0.5, 0.5, 0.0)] >>> x = intersection_segment_polyline_xy(s, p) >>> is_point_on_polyline_xy(x, p) True >>> is_point_on_segment_xy(x, s) True >>> distance_point_point((0.5, 0.0, 0.0), x) < 1e-6 True """ for cd in pairwise(polyline): pt = intersection_segment_segment_xy(segment, cd, tol) if pt: return pt
[ "def", "intersection_segment_polyline_xy", "(", "segment", ",", "polyline", ",", "tol", "=", "1e-6", ")", ":", "for", "cd", "in", "pairwise", "(", "polyline", ")", ":", "pt", "=", "intersection_segment_segment_xy", "(", "segment", ",", "cd", ",", "tol", ")", "if", "pt", ":", "return", "pt" ]
https://github.com/compas-dev/compas/blob/0b33f8786481f710115fb1ae5fe79abc2a9a5175/src/compas/geometry/intersections/intersections.py#L908-L946
vikasverma1077/manifold_mixup
870ef77caaa5092144d82c56f26b07b29eefabec
gan/interactive.py
python
pp_interp
(net, alpha)
Only works with model_resnet_preproc.py as your architecture!!!
Only works with model_resnet_preproc.py as your architecture!!!
[ "Only", "works", "with", "model_resnet_preproc", ".", "py", "as", "your", "architecture!!!" ]
def pp_interp(net, alpha): """ Only works with model_resnet_preproc.py as your architecture!!! """ conv2d = net.d.preproc deconv2d = nn.ConvTranspose2d(16, 3, 3, stride=1, padding=1) deconv2d = deconv2d.cuda() deconv2d.weight = conv2d.weight gz1 = net.sample(bs=128) gz2 = net.sample(bs=128) #alpha = net.sample_lambda(gz1.size(0)) gz_mix = alpha*gz1 + (1.-alpha)*gz2 save_image(gz1*0.5 + 0.5, filename="gz1.png") save_image(gz2*0.5 + 0.5, filename="gz2.png") save_image(gz_mix*0.5 + 0.5, filename="gz_mix.png") # Ok, do the mixup in hidden space. gz1_h = conv2d(gz1) gz2_h = conv2d(gz2) #alpha = 0.05 gz_mix_h = alpha*gz1_h + (1.-alpha)*gz2_h gz_mix_h_dec = deconv2d(gz_mix_h) save_image(gz_mix_h_dec*0.5 + 0.5, filename="gz_mix_h_dec.png") print(conv2d.weight == deconv2d.weight) import pdb pdb.set_trace()
[ "def", "pp_interp", "(", "net", ",", "alpha", ")", ":", "conv2d", "=", "net", ".", "d", ".", "preproc", "deconv2d", "=", "nn", ".", "ConvTranspose2d", "(", "16", ",", "3", ",", "3", ",", "stride", "=", "1", ",", "padding", "=", "1", ")", "deconv2d", "=", "deconv2d", ".", "cuda", "(", ")", "deconv2d", ".", "weight", "=", "conv2d", ".", "weight", "gz1", "=", "net", ".", "sample", "(", "bs", "=", "128", ")", "gz2", "=", "net", ".", "sample", "(", "bs", "=", "128", ")", "#alpha = net.sample_lambda(gz1.size(0))", "gz_mix", "=", "alpha", "*", "gz1", "+", "(", "1.", "-", "alpha", ")", "*", "gz2", "save_image", "(", "gz1", "*", "0.5", "+", "0.5", ",", "filename", "=", "\"gz1.png\"", ")", "save_image", "(", "gz2", "*", "0.5", "+", "0.5", ",", "filename", "=", "\"gz2.png\"", ")", "save_image", "(", "gz_mix", "*", "0.5", "+", "0.5", ",", "filename", "=", "\"gz_mix.png\"", ")", "# Ok, do the mixup in hidden space.", "gz1_h", "=", "conv2d", "(", "gz1", ")", "gz2_h", "=", "conv2d", "(", "gz2", ")", "#alpha = 0.05", "gz_mix_h", "=", "alpha", "*", "gz1_h", "+", "(", "1.", "-", "alpha", ")", "*", "gz2_h", "gz_mix_h_dec", "=", "deconv2d", "(", "gz_mix_h", ")", "save_image", "(", "gz_mix_h_dec", "*", "0.5", "+", "0.5", ",", "filename", "=", "\"gz_mix_h_dec.png\"", ")", "print", "(", "conv2d", ".", "weight", "==", "deconv2d", ".", "weight", ")", "import", "pdb", "pdb", ".", "set_trace", "(", ")" ]
https://github.com/vikasverma1077/manifold_mixup/blob/870ef77caaa5092144d82c56f26b07b29eefabec/gan/interactive.py#L5-L38
sshaoshuai/PointRCNN
1d0dee91262b970f460135252049112d80259ca0
lib/utils/bbox_transform.py
python
rotate_pc_along_y_torch
(pc, rot_angle)
return pc
:param pc: (N, 3 + C) :param rot_angle: (N) :return:
:param pc: (N, 3 + C) :param rot_angle: (N) :return:
[ ":", "param", "pc", ":", "(", "N", "3", "+", "C", ")", ":", "param", "rot_angle", ":", "(", "N", ")", ":", "return", ":" ]
def rotate_pc_along_y_torch(pc, rot_angle): """ :param pc: (N, 3 + C) :param rot_angle: (N) :return: """ cosa = torch.cos(rot_angle).view(-1, 1) sina = torch.sin(rot_angle).view(-1, 1) raw_1 = torch.cat([cosa, -sina], dim=1) raw_2 = torch.cat([sina, cosa], dim=1) R = torch.cat((raw_1.unsqueeze(dim=1), raw_2.unsqueeze(dim=1)), dim=1) # (N, 2, 2) pc_temp = pc[:, [0, 2]].unsqueeze(dim=1) # (N, 1, 2) pc[:, [0, 2]] = torch.matmul(pc_temp, R.permute(0, 2, 1)).squeeze(dim=1) return pc
[ "def", "rotate_pc_along_y_torch", "(", "pc", ",", "rot_angle", ")", ":", "cosa", "=", "torch", ".", "cos", "(", "rot_angle", ")", ".", "view", "(", "-", "1", ",", "1", ")", "sina", "=", "torch", ".", "sin", "(", "rot_angle", ")", ".", "view", "(", "-", "1", ",", "1", ")", "raw_1", "=", "torch", ".", "cat", "(", "[", "cosa", ",", "-", "sina", "]", ",", "dim", "=", "1", ")", "raw_2", "=", "torch", ".", "cat", "(", "[", "sina", ",", "cosa", "]", ",", "dim", "=", "1", ")", "R", "=", "torch", ".", "cat", "(", "(", "raw_1", ".", "unsqueeze", "(", "dim", "=", "1", ")", ",", "raw_2", ".", "unsqueeze", "(", "dim", "=", "1", ")", ")", ",", "dim", "=", "1", ")", "# (N, 2, 2)", "pc_temp", "=", "pc", "[", ":", ",", "[", "0", ",", "2", "]", "]", ".", "unsqueeze", "(", "dim", "=", "1", ")", "# (N, 1, 2)", "pc", "[", ":", ",", "[", "0", ",", "2", "]", "]", "=", "torch", ".", "matmul", "(", "pc_temp", ",", "R", ".", "permute", "(", "0", ",", "2", ",", "1", ")", ")", ".", "squeeze", "(", "dim", "=", "1", ")", "return", "pc" ]
https://github.com/sshaoshuai/PointRCNN/blob/1d0dee91262b970f460135252049112d80259ca0/lib/utils/bbox_transform.py#L5-L21
mne-tools/mne-python
f90b303ce66a8415e64edd4605b09ac0179c1ebf
mne/io/curry/curry.py
python
_read_curry_info
(curry_paths)
return info, curry_params.n_samples, curry_params.is_ascii
Extract info from curry parameter files.
Extract info from curry parameter files.
[ "Extract", "info", "from", "curry", "parameter", "files", "." ]
def _read_curry_info(curry_paths): """Extract info from curry parameter files.""" curry_params = _read_curry_parameters(curry_paths['info']) R = np.eye(4) R[[0, 1], [0, 1]] = -1 # rotate 180 deg # shift down and back # (chosen by eyeballing to make the CTF helmet look roughly correct) R[:3, 3] = [0., -0.015, -0.12] curry_dev_dev_t = Transform('ctf_meg', 'meg', R) # read labels from label files label_fname = curry_paths['labels'] types = ["meg", "eeg", "misc"] labels = _read_curry_lines(label_fname, ["LABELS" + CHANTYPES[key] for key in types]) sensors = _read_curry_lines(label_fname, ["SENSORS" + CHANTYPES[key] for key in types]) normals = _read_curry_lines(label_fname, ['NORMALS' + CHANTYPES[key] for key in types]) assert len(labels) == len(sensors) == len(normals) all_chans = list() for key in ["meg", "eeg", "misc"]: chanidx_is_explicit = (len(curry_params.chanidx_in_file["CHAN_IN_FILE" + CHANTYPES[key]]) > 0) # channel index # position in the datafile may or may not be explicitly declared, # based on the CHAN_IN_FILE section in info file for ind, chan in enumerate(labels["LABELS" + CHANTYPES[key]]): chanidx = len(all_chans) + 1 # by default, just assume the # channel index in the datafile is in order of the channel # names as we found them in the labels file if chanidx_is_explicit: # but, if explicitly declared, use # that index number chanidx = int(curry_params.chanidx_in_file["CHAN_IN_FILE" + CHANTYPES[key]][ind]) if chanidx <= 0: # if chanidx was explicitly declared to be ' 0', # it means the channel is not actually saved in the data file # (e.g. the "Ref" channel), so don't add it to our list. # Git issue #8391 continue ch = {"ch_name": chan, "unit": curry_params.unit_dict[key], "kind": FIFFV_CHANTYPES[key], "coil_type": FIFFV_COILTYPES[key], "ch_idx": chanidx } if key == "eeg": loc = np.array(sensors["SENSORS" + CHANTYPES[key]][ind], float) # XXX just the sensor, where is ref (next 3)? assert loc.shape == (3,) loc /= 1000. # to meters loc = np.concatenate([loc, np.zeros(9)]) ch['loc'] = loc # XXX need to check/ensure this ch['coord_frame'] = FIFF.FIFFV_COORD_HEAD elif key == 'meg': pos = np.array(sensors["SENSORS" + CHANTYPES[key]][ind], float) pos /= 1000. # to meters pos = pos[:3] # just the inner coil pos = apply_trans(curry_dev_dev_t, pos) nn = np.array(normals["NORMALS" + CHANTYPES[key]][ind], float) assert np.isclose(np.linalg.norm(nn), 1., atol=1e-4) nn /= np.linalg.norm(nn) nn = apply_trans(curry_dev_dev_t, nn, move=False) trans = np.eye(4) trans[:3, 3] = pos trans[:3, :3] = _normal_orth(nn).T ch['loc'] = _coil_trans_to_loc(trans) ch['coord_frame'] = FIFF.FIFFV_COORD_DEVICE all_chans.append(ch) ch_count = len(all_chans) assert (ch_count == curry_params.n_chans) # ensure that we have assembled # the same number of channels as declared in the info (.DAP) file in the # DATA_PARAMETERS section. Git issue #8391 # sort the channels to assure they are in the order that matches how # recorded in the datafile. In general they most likely are already in # the correct order, but if the channel index in the data file was # explicitly declared we might as well use it. all_chans = sorted(all_chans, key=lambda ch: ch['ch_idx']) ch_names = [chan["ch_name"] for chan in all_chans] info = create_info(ch_names, curry_params.sfreq) with info._unlock(): info['meas_date'] = curry_params.dt_start # for Git issue #8398 _make_trans_dig(curry_paths, info, curry_dev_dev_t) for ind, ch_dict in enumerate(info["chs"]): all_chans[ind].pop('ch_idx') ch_dict.update(all_chans[ind]) assert ch_dict['loc'].shape == (12,) ch_dict['unit'] = SI_UNITS[all_chans[ind]['unit'][1]] ch_dict['cal'] = SI_UNIT_SCALE[all_chans[ind]['unit'][0]] return info, curry_params.n_samples, curry_params.is_ascii
[ "def", "_read_curry_info", "(", "curry_paths", ")", ":", "curry_params", "=", "_read_curry_parameters", "(", "curry_paths", "[", "'info'", "]", ")", "R", "=", "np", ".", "eye", "(", "4", ")", "R", "[", "[", "0", ",", "1", "]", ",", "[", "0", ",", "1", "]", "]", "=", "-", "1", "# rotate 180 deg", "# shift down and back", "# (chosen by eyeballing to make the CTF helmet look roughly correct)", "R", "[", ":", "3", ",", "3", "]", "=", "[", "0.", ",", "-", "0.015", ",", "-", "0.12", "]", "curry_dev_dev_t", "=", "Transform", "(", "'ctf_meg'", ",", "'meg'", ",", "R", ")", "# read labels from label files", "label_fname", "=", "curry_paths", "[", "'labels'", "]", "types", "=", "[", "\"meg\"", ",", "\"eeg\"", ",", "\"misc\"", "]", "labels", "=", "_read_curry_lines", "(", "label_fname", ",", "[", "\"LABELS\"", "+", "CHANTYPES", "[", "key", "]", "for", "key", "in", "types", "]", ")", "sensors", "=", "_read_curry_lines", "(", "label_fname", ",", "[", "\"SENSORS\"", "+", "CHANTYPES", "[", "key", "]", "for", "key", "in", "types", "]", ")", "normals", "=", "_read_curry_lines", "(", "label_fname", ",", "[", "'NORMALS'", "+", "CHANTYPES", "[", "key", "]", "for", "key", "in", "types", "]", ")", "assert", "len", "(", "labels", ")", "==", "len", "(", "sensors", ")", "==", "len", "(", "normals", ")", "all_chans", "=", "list", "(", ")", "for", "key", "in", "[", "\"meg\"", ",", "\"eeg\"", ",", "\"misc\"", "]", ":", "chanidx_is_explicit", "=", "(", "len", "(", "curry_params", ".", "chanidx_in_file", "[", "\"CHAN_IN_FILE\"", "+", "CHANTYPES", "[", "key", "]", "]", ")", ">", "0", ")", "# channel index", "# position in the datafile may or may not be explicitly declared,", "# based on the CHAN_IN_FILE section in info file", "for", "ind", ",", "chan", "in", "enumerate", "(", "labels", "[", "\"LABELS\"", "+", "CHANTYPES", "[", "key", "]", "]", ")", ":", "chanidx", "=", "len", "(", "all_chans", ")", "+", "1", "# by default, just assume the", "# channel index in the datafile is in order of the channel", "# names as we found them in the labels file", "if", "chanidx_is_explicit", ":", "# but, if explicitly declared, use", "# that index number", "chanidx", "=", "int", "(", "curry_params", ".", "chanidx_in_file", "[", "\"CHAN_IN_FILE\"", "+", "CHANTYPES", "[", "key", "]", "]", "[", "ind", "]", ")", "if", "chanidx", "<=", "0", ":", "# if chanidx was explicitly declared to be ' 0',", "# it means the channel is not actually saved in the data file", "# (e.g. the \"Ref\" channel), so don't add it to our list.", "# Git issue #8391", "continue", "ch", "=", "{", "\"ch_name\"", ":", "chan", ",", "\"unit\"", ":", "curry_params", ".", "unit_dict", "[", "key", "]", ",", "\"kind\"", ":", "FIFFV_CHANTYPES", "[", "key", "]", ",", "\"coil_type\"", ":", "FIFFV_COILTYPES", "[", "key", "]", ",", "\"ch_idx\"", ":", "chanidx", "}", "if", "key", "==", "\"eeg\"", ":", "loc", "=", "np", ".", "array", "(", "sensors", "[", "\"SENSORS\"", "+", "CHANTYPES", "[", "key", "]", "]", "[", "ind", "]", ",", "float", ")", "# XXX just the sensor, where is ref (next 3)?", "assert", "loc", ".", "shape", "==", "(", "3", ",", ")", "loc", "/=", "1000.", "# to meters", "loc", "=", "np", ".", "concatenate", "(", "[", "loc", ",", "np", ".", "zeros", "(", "9", ")", "]", ")", "ch", "[", "'loc'", "]", "=", "loc", "# XXX need to check/ensure this", "ch", "[", "'coord_frame'", "]", "=", "FIFF", ".", "FIFFV_COORD_HEAD", "elif", "key", "==", "'meg'", ":", "pos", "=", "np", ".", "array", "(", "sensors", "[", "\"SENSORS\"", "+", "CHANTYPES", "[", "key", "]", "]", "[", "ind", "]", ",", "float", ")", "pos", "/=", "1000.", "# to meters", "pos", "=", "pos", "[", ":", "3", "]", "# just the inner coil", "pos", "=", "apply_trans", "(", "curry_dev_dev_t", ",", "pos", ")", "nn", "=", "np", ".", "array", "(", "normals", "[", "\"NORMALS\"", "+", "CHANTYPES", "[", "key", "]", "]", "[", "ind", "]", ",", "float", ")", "assert", "np", ".", "isclose", "(", "np", ".", "linalg", ".", "norm", "(", "nn", ")", ",", "1.", ",", "atol", "=", "1e-4", ")", "nn", "/=", "np", ".", "linalg", ".", "norm", "(", "nn", ")", "nn", "=", "apply_trans", "(", "curry_dev_dev_t", ",", "nn", ",", "move", "=", "False", ")", "trans", "=", "np", ".", "eye", "(", "4", ")", "trans", "[", ":", "3", ",", "3", "]", "=", "pos", "trans", "[", ":", "3", ",", ":", "3", "]", "=", "_normal_orth", "(", "nn", ")", ".", "T", "ch", "[", "'loc'", "]", "=", "_coil_trans_to_loc", "(", "trans", ")", "ch", "[", "'coord_frame'", "]", "=", "FIFF", ".", "FIFFV_COORD_DEVICE", "all_chans", ".", "append", "(", "ch", ")", "ch_count", "=", "len", "(", "all_chans", ")", "assert", "(", "ch_count", "==", "curry_params", ".", "n_chans", ")", "# ensure that we have assembled", "# the same number of channels as declared in the info (.DAP) file in the", "# DATA_PARAMETERS section. Git issue #8391", "# sort the channels to assure they are in the order that matches how", "# recorded in the datafile. In general they most likely are already in", "# the correct order, but if the channel index in the data file was", "# explicitly declared we might as well use it.", "all_chans", "=", "sorted", "(", "all_chans", ",", "key", "=", "lambda", "ch", ":", "ch", "[", "'ch_idx'", "]", ")", "ch_names", "=", "[", "chan", "[", "\"ch_name\"", "]", "for", "chan", "in", "all_chans", "]", "info", "=", "create_info", "(", "ch_names", ",", "curry_params", ".", "sfreq", ")", "with", "info", ".", "_unlock", "(", ")", ":", "info", "[", "'meas_date'", "]", "=", "curry_params", ".", "dt_start", "# for Git issue #8398", "_make_trans_dig", "(", "curry_paths", ",", "info", ",", "curry_dev_dev_t", ")", "for", "ind", ",", "ch_dict", "in", "enumerate", "(", "info", "[", "\"chs\"", "]", ")", ":", "all_chans", "[", "ind", "]", ".", "pop", "(", "'ch_idx'", ")", "ch_dict", ".", "update", "(", "all_chans", "[", "ind", "]", ")", "assert", "ch_dict", "[", "'loc'", "]", ".", "shape", "==", "(", "12", ",", ")", "ch_dict", "[", "'unit'", "]", "=", "SI_UNITS", "[", "all_chans", "[", "ind", "]", "[", "'unit'", "]", "[", "1", "]", "]", "ch_dict", "[", "'cal'", "]", "=", "SI_UNIT_SCALE", "[", "all_chans", "[", "ind", "]", "[", "'unit'", "]", "[", "0", "]", "]", "return", "info", ",", "curry_params", ".", "n_samples", ",", "curry_params", ".", "is_ascii" ]
https://github.com/mne-tools/mne-python/blob/f90b303ce66a8415e64edd4605b09ac0179c1ebf/mne/io/curry/curry.py#L208-L303
osmr/imgclsmob
f2993d3ce73a2f7ddba05da3891defb08547d504
pytorch/pytorchcv/models/unet.py
python
get_unet
(model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs)
return net
Create U-Net model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters.
Create U-Net model with specific parameters.
[ "Create", "U", "-", "Net", "model", "with", "specific", "parameters", "." ]
def get_unet(model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): """ Create U-Net model with specific parameters. Parameters: ---------- model_name : str or None, default None Model name for loading pretrained model. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ channels = [[128, 256, 512, 512], [512, 256, 128, 64]] init_block_channels = 64 net = UNet( channels=channels, init_block_channels=init_block_channels, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net
[ "def", "get_unet", "(", "model_name", "=", "None", ",", "pretrained", "=", "False", ",", "root", "=", "os", ".", "path", ".", "join", "(", "\"~\"", ",", "\".torch\"", ",", "\"models\"", ")", ",", "*", "*", "kwargs", ")", ":", "channels", "=", "[", "[", "128", ",", "256", ",", "512", ",", "512", "]", ",", "[", "512", ",", "256", ",", "128", ",", "64", "]", "]", "init_block_channels", "=", "64", "net", "=", "UNet", "(", "channels", "=", "channels", ",", "init_block_channels", "=", "init_block_channels", ",", "*", "*", "kwargs", ")", "if", "pretrained", ":", "if", "(", "model_name", "is", "None", ")", "or", "(", "not", "model_name", ")", ":", "raise", "ValueError", "(", "\"Parameter `model_name` should be properly initialized for loading pretrained model.\"", ")", "from", ".", "model_store", "import", "download_model", "download_model", "(", "net", "=", "net", ",", "model_name", "=", "model_name", ",", "local_model_store_dir_path", "=", "root", ")", "return", "net" ]
https://github.com/osmr/imgclsmob/blob/f2993d3ce73a2f7ddba05da3891defb08547d504/pytorch/pytorchcv/models/unet.py#L240-L273
ales-tsurko/cells
4cf7e395cd433762bea70cdc863a346f3a6fe1d0
packaging/macos/python/lib/python3.7/timeit.py
python
repeat
(stmt="pass", setup="pass", timer=default_timer, repeat=default_repeat, number=default_number, globals=None)
return Timer(stmt, setup, timer, globals).repeat(repeat, number)
Convenience function to create Timer object and call repeat method.
Convenience function to create Timer object and call repeat method.
[ "Convenience", "function", "to", "create", "Timer", "object", "and", "call", "repeat", "method", "." ]
def repeat(stmt="pass", setup="pass", timer=default_timer, repeat=default_repeat, number=default_number, globals=None): """Convenience function to create Timer object and call repeat method.""" return Timer(stmt, setup, timer, globals).repeat(repeat, number)
[ "def", "repeat", "(", "stmt", "=", "\"pass\"", ",", "setup", "=", "\"pass\"", ",", "timer", "=", "default_timer", ",", "repeat", "=", "default_repeat", ",", "number", "=", "default_number", ",", "globals", "=", "None", ")", ":", "return", "Timer", "(", "stmt", ",", "setup", ",", "timer", ",", "globals", ")", ".", "repeat", "(", "repeat", ",", "number", ")" ]
https://github.com/ales-tsurko/cells/blob/4cf7e395cd433762bea70cdc863a346f3a6fe1d0/packaging/macos/python/lib/python3.7/timeit.py#L234-L237
CedricGuillemet/Imogen
ee417b42747ed5b46cb11b02ef0c3630000085b3
bin/Lib/email/message.py
python
Message.__getitem__
(self, name)
return self.get(name)
Get a header value. Return None if the header is missing instead of raising an exception. Note that if the header appeared multiple times, exactly which occurrence gets returned is undefined. Use get_all() to get all the values matching a header field name.
Get a header value.
[ "Get", "a", "header", "value", "." ]
def __getitem__(self, name): """Get a header value. Return None if the header is missing instead of raising an exception. Note that if the header appeared multiple times, exactly which occurrence gets returned is undefined. Use get_all() to get all the values matching a header field name. """ return self.get(name)
[ "def", "__getitem__", "(", "self", ",", "name", ")", ":", "return", "self", ".", "get", "(", "name", ")" ]
https://github.com/CedricGuillemet/Imogen/blob/ee417b42747ed5b46cb11b02ef0c3630000085b3/bin/Lib/email/message.py#L382-L391
convexengineering/gpkit
3d4dd34ba4e95f1fe58fe9ea45401a6ff2fde1fa
gpkit/globals.py
python
load_settings
(path=None, trybuild=True)
return settings_
Load the settings file at SETTINGS_PATH; return settings dict
Load the settings file at SETTINGS_PATH; return settings dict
[ "Load", "the", "settings", "file", "at", "SETTINGS_PATH", ";", "return", "settings", "dict" ]
def load_settings(path=None, trybuild=True): "Load the settings file at SETTINGS_PATH; return settings dict" if path is None: path = os.sep.join([os.path.dirname(__file__), "env", "settings"]) try: # if the settings file already exists, read it with open(path) as settingsfile: lines = [line[:-1].split(" : ") for line in settingsfile if len(line.split(" : ")) == 2] settings_ = {name: value.split(", ") for name, value in lines} for name, value in settings_.items(): # flatten 1-element lists unless they're the solver list if len(value) == 1 and name != "installed_solvers": settings_[name], = value except IOError: # pragma: no cover settings_ = {"installed_solvers": [""]} if settings_["installed_solvers"] == [""] and trybuild: # pragma: no cover print("Found no installed solvers, beginning a build.") build() settings_ = load_settings(path, trybuild=False) if settings_["installed_solvers"] != [""]: settings_["just built!"] = True else: print(""" ============= Build failed! :( ============= You may need to install a solver and then `import gpkit` again; see https://gpkit.readthedocs.io/en/latest/installation.html for troubleshooting details. But before you go, please post the output above (starting from "Found no installed solvers, beginning a build.") to [email protected] or https://github.com/convexengineering/gpkit/issues/new so we can prevent others from having to see this message. Thanks! :) """) settings_["default_solver"] = settings_["installed_solvers"][0] return settings_
[ "def", "load_settings", "(", "path", "=", "None", ",", "trybuild", "=", "True", ")", ":", "if", "path", "is", "None", ":", "path", "=", "os", ".", "sep", ".", "join", "(", "[", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "\"env\"", ",", "\"settings\"", "]", ")", "try", ":", "# if the settings file already exists, read it", "with", "open", "(", "path", ")", "as", "settingsfile", ":", "lines", "=", "[", "line", "[", ":", "-", "1", "]", ".", "split", "(", "\" : \"", ")", "for", "line", "in", "settingsfile", "if", "len", "(", "line", ".", "split", "(", "\" : \"", ")", ")", "==", "2", "]", "settings_", "=", "{", "name", ":", "value", ".", "split", "(", "\", \"", ")", "for", "name", ",", "value", "in", "lines", "}", "for", "name", ",", "value", "in", "settings_", ".", "items", "(", ")", ":", "# flatten 1-element lists unless they're the solver list", "if", "len", "(", "value", ")", "==", "1", "and", "name", "!=", "\"installed_solvers\"", ":", "settings_", "[", "name", "]", ",", "=", "value", "except", "IOError", ":", "# pragma: no cover", "settings_", "=", "{", "\"installed_solvers\"", ":", "[", "\"\"", "]", "}", "if", "settings_", "[", "\"installed_solvers\"", "]", "==", "[", "\"\"", "]", "and", "trybuild", ":", "# pragma: no cover", "print", "(", "\"Found no installed solvers, beginning a build.\"", ")", "build", "(", ")", "settings_", "=", "load_settings", "(", "path", ",", "trybuild", "=", "False", ")", "if", "settings_", "[", "\"installed_solvers\"", "]", "!=", "[", "\"\"", "]", ":", "settings_", "[", "\"just built!\"", "]", "=", "True", "else", ":", "print", "(", "\"\"\"\n=============\nBuild failed! :(\n=============\nYou may need to install a solver and then `import gpkit` again;\nsee https://gpkit.readthedocs.io/en/latest/installation.html\nfor troubleshooting details.\n\nBut before you go, please post the output above\n(starting from \"Found no installed solvers, beginning a build.\")\nto [email protected] or https://github.com/convexengineering/gpkit/issues/new\nso we can prevent others from having to see this message.\n\n Thanks! :)\n\"\"\"", ")", "settings_", "[", "\"default_solver\"", "]", "=", "settings_", "[", "\"installed_solvers\"", "]", "[", "0", "]", "return", "settings_" ]
https://github.com/convexengineering/gpkit/blob/3d4dd34ba4e95f1fe58fe9ea45401a6ff2fde1fa/gpkit/globals.py#L7-L45
dmlc/dgl
8d14a739bc9e446d6c92ef83eafe5782398118de
examples/pytorch/ogb/deepwalk/model.py
python
SkipGramModel.forward
(self, pos_u, pos_v, neg_v)
return torch.sum(score), torch.sum(neg_score)
Do forward and backward. It is designed for future use.
Do forward and backward. It is designed for future use.
[ "Do", "forward", "and", "backward", ".", "It", "is", "designed", "for", "future", "use", "." ]
def forward(self, pos_u, pos_v, neg_v): ''' Do forward and backward. It is designed for future use. ''' emb_u = self.u_embeddings(pos_u) emb_v = self.v_embeddings(pos_v) emb_neg_v = self.v_embeddings(neg_v) score = torch.sum(torch.mul(emb_u, emb_v), dim=1) score = torch.clamp(score, max=6, min=-6) score = -F.logsigmoid(score) neg_score = torch.bmm(emb_neg_v, emb_u.unsqueeze(2)).squeeze() neg_score = torch.clamp(neg_score, max=6, min=-6) neg_score = -torch.sum(F.logsigmoid(-neg_score), dim=1) #return torch.mean(score + neg_score) return torch.sum(score), torch.sum(neg_score)
[ "def", "forward", "(", "self", ",", "pos_u", ",", "pos_v", ",", "neg_v", ")", ":", "emb_u", "=", "self", ".", "u_embeddings", "(", "pos_u", ")", "emb_v", "=", "self", ".", "v_embeddings", "(", "pos_v", ")", "emb_neg_v", "=", "self", ".", "v_embeddings", "(", "neg_v", ")", "score", "=", "torch", ".", "sum", "(", "torch", ".", "mul", "(", "emb_u", ",", "emb_v", ")", ",", "dim", "=", "1", ")", "score", "=", "torch", ".", "clamp", "(", "score", ",", "max", "=", "6", ",", "min", "=", "-", "6", ")", "score", "=", "-", "F", ".", "logsigmoid", "(", "score", ")", "neg_score", "=", "torch", ".", "bmm", "(", "emb_neg_v", ",", "emb_u", ".", "unsqueeze", "(", "2", ")", ")", ".", "squeeze", "(", ")", "neg_score", "=", "torch", ".", "clamp", "(", "neg_score", ",", "max", "=", "6", ",", "min", "=", "-", "6", ")", "neg_score", "=", "-", "torch", ".", "sum", "(", "F", ".", "logsigmoid", "(", "-", "neg_score", ")", ",", "dim", "=", "1", ")", "#return torch.mean(score + neg_score)", "return", "torch", ".", "sum", "(", "score", ")", ",", "torch", ".", "sum", "(", "neg_score", ")" ]
https://github.com/dmlc/dgl/blob/8d14a739bc9e446d6c92ef83eafe5782398118de/examples/pytorch/ogb/deepwalk/model.py#L458-L473
securityclippy/elasticintel
aa08d3e9f5ab1c000128e95161139ce97ff0e334
ingest_feed_lambda/pandas/core/dtypes/concat.py
python
_concat_compat
(to_concat, axis=0)
return np.concatenate(to_concat, axis=axis)
provide concatenation of an array of arrays each of which is a single 'normalized' dtypes (in that for example, if it's object, then it is a non-datetimelike and provide a combined dtype for the resulting array that preserves the overall dtype if possible) Parameters ---------- to_concat : array of arrays axis : axis to provide concatenation Returns ------- a single array, preserving the combined dtypes
provide concatenation of an array of arrays each of which is a single 'normalized' dtypes (in that for example, if it's object, then it is a non-datetimelike and provide a combined dtype for the resulting array that preserves the overall dtype if possible)
[ "provide", "concatenation", "of", "an", "array", "of", "arrays", "each", "of", "which", "is", "a", "single", "normalized", "dtypes", "(", "in", "that", "for", "example", "if", "it", "s", "object", "then", "it", "is", "a", "non", "-", "datetimelike", "and", "provide", "a", "combined", "dtype", "for", "the", "resulting", "array", "that", "preserves", "the", "overall", "dtype", "if", "possible", ")" ]
def _concat_compat(to_concat, axis=0): """ provide concatenation of an array of arrays each of which is a single 'normalized' dtypes (in that for example, if it's object, then it is a non-datetimelike and provide a combined dtype for the resulting array that preserves the overall dtype if possible) Parameters ---------- to_concat : array of arrays axis : axis to provide concatenation Returns ------- a single array, preserving the combined dtypes """ # filter empty arrays # 1-d dtypes always are included here def is_nonempty(x): try: return x.shape[axis] > 0 except Exception: return True nonempty = [x for x in to_concat if is_nonempty(x)] # If all arrays are empty, there's nothing to convert, just short-cut to # the concatenation, #3121. # # Creating an empty array directly is tempting, but the winnings would be # marginal given that it would still require shape & dtype calculation and # np.concatenate which has them both implemented is compiled. typs = get_dtype_kinds(to_concat) _contains_datetime = any(typ.startswith('datetime') for typ in typs) _contains_period = any(typ.startswith('period') for typ in typs) if 'category' in typs: # this must be priort to _concat_datetime, # to support Categorical + datetime-like return _concat_categorical(to_concat, axis=axis) elif _contains_datetime or 'timedelta' in typs or _contains_period: return _concat_datetime(to_concat, axis=axis, typs=typs) # these are mandated to handle empties as well elif 'sparse' in typs: return _concat_sparse(to_concat, axis=axis, typs=typs) if not nonempty: # we have all empties, but may need to coerce the result dtype to # object if we have non-numeric type operands (numpy would otherwise # cast this to float) typs = get_dtype_kinds(to_concat) if len(typs) != 1: if (not len(typs - set(['i', 'u', 'f'])) or not len(typs - set(['bool', 'i', 'u']))): # let numpy coerce pass else: # coerce to object to_concat = [x.astype('object') for x in to_concat] return np.concatenate(to_concat, axis=axis)
[ "def", "_concat_compat", "(", "to_concat", ",", "axis", "=", "0", ")", ":", "# filter empty arrays", "# 1-d dtypes always are included here", "def", "is_nonempty", "(", "x", ")", ":", "try", ":", "return", "x", ".", "shape", "[", "axis", "]", ">", "0", "except", "Exception", ":", "return", "True", "nonempty", "=", "[", "x", "for", "x", "in", "to_concat", "if", "is_nonempty", "(", "x", ")", "]", "# If all arrays are empty, there's nothing to convert, just short-cut to", "# the concatenation, #3121.", "#", "# Creating an empty array directly is tempting, but the winnings would be", "# marginal given that it would still require shape & dtype calculation and", "# np.concatenate which has them both implemented is compiled.", "typs", "=", "get_dtype_kinds", "(", "to_concat", ")", "_contains_datetime", "=", "any", "(", "typ", ".", "startswith", "(", "'datetime'", ")", "for", "typ", "in", "typs", ")", "_contains_period", "=", "any", "(", "typ", ".", "startswith", "(", "'period'", ")", "for", "typ", "in", "typs", ")", "if", "'category'", "in", "typs", ":", "# this must be priort to _concat_datetime,", "# to support Categorical + datetime-like", "return", "_concat_categorical", "(", "to_concat", ",", "axis", "=", "axis", ")", "elif", "_contains_datetime", "or", "'timedelta'", "in", "typs", "or", "_contains_period", ":", "return", "_concat_datetime", "(", "to_concat", ",", "axis", "=", "axis", ",", "typs", "=", "typs", ")", "# these are mandated to handle empties as well", "elif", "'sparse'", "in", "typs", ":", "return", "_concat_sparse", "(", "to_concat", ",", "axis", "=", "axis", ",", "typs", "=", "typs", ")", "if", "not", "nonempty", ":", "# we have all empties, but may need to coerce the result dtype to", "# object if we have non-numeric type operands (numpy would otherwise", "# cast this to float)", "typs", "=", "get_dtype_kinds", "(", "to_concat", ")", "if", "len", "(", "typs", ")", "!=", "1", ":", "if", "(", "not", "len", "(", "typs", "-", "set", "(", "[", "'i'", ",", "'u'", ",", "'f'", "]", ")", ")", "or", "not", "len", "(", "typs", "-", "set", "(", "[", "'bool'", ",", "'i'", ",", "'u'", "]", ")", ")", ")", ":", "# let numpy coerce", "pass", "else", ":", "# coerce to object", "to_concat", "=", "[", "x", ".", "astype", "(", "'object'", ")", "for", "x", "in", "to_concat", "]", "return", "np", ".", "concatenate", "(", "to_concat", ",", "axis", "=", "axis", ")" ]
https://github.com/securityclippy/elasticintel/blob/aa08d3e9f5ab1c000128e95161139ce97ff0e334/ingest_feed_lambda/pandas/core/dtypes/concat.py#L102-L168
Runbook/runbook
7b68622f75ef09f654046f0394540025f3ee7445
src/bridge/bridge.py
python
decimateRedis
(itemkey, item)
return True
This will parse out a dictionary and kill the redis data
This will parse out a dictionary and kill the redis data
[ "This", "will", "parse", "out", "a", "dictionary", "and", "kill", "the", "redis", "data" ]
def decimateRedis(itemkey, item): ''' This will parse out a dictionary and kill the redis data ''' if "timer" in item['data']: try: r_server.srem(item['data']['timer'], item['cid']) except: pass try: r_server.delete(itemkey) except: pass return True
[ "def", "decimateRedis", "(", "itemkey", ",", "item", ")", ":", "if", "\"timer\"", "in", "item", "[", "'data'", "]", ":", "try", ":", "r_server", ".", "srem", "(", "item", "[", "'data'", "]", "[", "'timer'", "]", ",", "item", "[", "'cid'", "]", ")", "except", ":", "pass", "try", ":", "r_server", ".", "delete", "(", "itemkey", ")", "except", ":", "pass", "return", "True" ]
https://github.com/Runbook/runbook/blob/7b68622f75ef09f654046f0394540025f3ee7445/src/bridge/bridge.py#L134-L145
ni/nidaqmx-python
62fc6b48cbbb330fe1bcc9aedadc86610a1269b6
nidaqmx/_task_modules/channels/ci_channel.py
python
CIChannel.ci_freq_div
(self)
[]
def ci_freq_div(self): cfunc = lib_importer.windll.DAQmxResetCIFreqDiv if cfunc.argtypes is None: with cfunc.arglock: if cfunc.argtypes is None: cfunc.argtypes = [ lib_importer.task_handle, ctypes_byte_str] error_code = cfunc( self._handle, self._name) check_for_error(error_code)
[ "def", "ci_freq_div", "(", "self", ")", ":", "cfunc", "=", "lib_importer", ".", "windll", ".", "DAQmxResetCIFreqDiv", "if", "cfunc", ".", "argtypes", "is", "None", ":", "with", "cfunc", ".", "arglock", ":", "if", "cfunc", ".", "argtypes", "is", "None", ":", "cfunc", ".", "argtypes", "=", "[", "lib_importer", ".", "task_handle", ",", "ctypes_byte_str", "]", "error_code", "=", "cfunc", "(", "self", ".", "_handle", ",", "self", ".", "_name", ")", "check_for_error", "(", "error_code", ")" ]
https://github.com/ni/nidaqmx-python/blob/62fc6b48cbbb330fe1bcc9aedadc86610a1269b6/nidaqmx/_task_modules/channels/ci_channel.py#L5149-L5159
CouchPotato/CouchPotatoServer
7260c12f72447ddb6f062367c6dfbda03ecd4e9c
libs/tornado/autoreload.py
python
add_reload_hook
(fn)
Add a function to be called before reloading the process. Note that for open file and socket handles it is generally preferable to set the ``FD_CLOEXEC`` flag (using `fcntl` or ``tornado.platform.auto.set_close_exec``) instead of using a reload hook to close them.
Add a function to be called before reloading the process.
[ "Add", "a", "function", "to", "be", "called", "before", "reloading", "the", "process", "." ]
def add_reload_hook(fn): """Add a function to be called before reloading the process. Note that for open file and socket handles it is generally preferable to set the ``FD_CLOEXEC`` flag (using `fcntl` or ``tornado.platform.auto.set_close_exec``) instead of using a reload hook to close them. """ _reload_hooks.append(fn)
[ "def", "add_reload_hook", "(", "fn", ")", ":", "_reload_hooks", ".", "append", "(", "fn", ")" ]
https://github.com/CouchPotato/CouchPotatoServer/blob/7260c12f72447ddb6f062367c6dfbda03ecd4e9c/libs/tornado/autoreload.py#L149-L157
aleju/imgaug
0101108d4fed06bc5056c4a03e2bcb0216dac326
imgaug/augmenters/debug.py
python
_DebugGridImageCell._resize_overlay
(cls, arr, size)
return arr_rs
[]
def _resize_overlay(cls, arr, size): arr_rs = ia.imresize_single_image(arr, size, interpolation="nearest") return arr_rs
[ "def", "_resize_overlay", "(", "cls", ",", "arr", ",", "size", ")", ":", "arr_rs", "=", "ia", ".", "imresize_single_image", "(", "arr", ",", "size", ",", "interpolation", "=", "\"nearest\"", ")", "return", "arr_rs" ]
https://github.com/aleju/imgaug/blob/0101108d4fed06bc5056c4a03e2bcb0216dac326/imgaug/augmenters/debug.py#L254-L256
pyvista/pyvista
012dbb95a9aae406c3cd4cd94fc8c477f871e426
pyvista/themes.py
python
DefaultTheme.hidden_line_removal
(self)
return self._hidden_line_removal
Return or set hidden line removal. Wireframe geometry will be drawn using hidden line removal if the rendering engine supports it. See Also -------- pyvista.BasePlotter.enable_hidden_line_removal Examples -------- Enable hidden line removal. >>> import pyvista >>> pyvista.global_theme.hidden_line_removal = True # doctest:+SKIP >>> pyvista.global_theme.hidden_line_removal # doctest:+SKIP True
Return or set hidden line removal.
[ "Return", "or", "set", "hidden", "line", "removal", "." ]
def hidden_line_removal(self) -> bool: """Return or set hidden line removal. Wireframe geometry will be drawn using hidden line removal if the rendering engine supports it. See Also -------- pyvista.BasePlotter.enable_hidden_line_removal Examples -------- Enable hidden line removal. >>> import pyvista >>> pyvista.global_theme.hidden_line_removal = True # doctest:+SKIP >>> pyvista.global_theme.hidden_line_removal # doctest:+SKIP True """ return self._hidden_line_removal
[ "def", "hidden_line_removal", "(", "self", ")", "->", "bool", ":", "return", "self", ".", "_hidden_line_removal" ]
https://github.com/pyvista/pyvista/blob/012dbb95a9aae406c3cd4cd94fc8c477f871e426/pyvista/themes.py#L1221-L1241
beeware/ouroboros
a29123c6fab6a807caffbb7587cf548e0c370296
ouroboros/_osx_support.py
python
_save_modified_value
(_config_vars, cv, newvalue)
Save modified and original unmodified value of configuration var
Save modified and original unmodified value of configuration var
[ "Save", "modified", "and", "original", "unmodified", "value", "of", "configuration", "var" ]
def _save_modified_value(_config_vars, cv, newvalue): """Save modified and original unmodified value of configuration var""" oldvalue = _config_vars.get(cv, '') if (oldvalue != newvalue) and (_INITPRE + cv not in _config_vars): _config_vars[_INITPRE + cv] = oldvalue _config_vars[cv] = newvalue
[ "def", "_save_modified_value", "(", "_config_vars", ",", "cv", ",", "newvalue", ")", ":", "oldvalue", "=", "_config_vars", ".", "get", "(", "cv", ",", "''", ")", "if", "(", "oldvalue", "!=", "newvalue", ")", "and", "(", "_INITPRE", "+", "cv", "not", "in", "_config_vars", ")", ":", "_config_vars", "[", "_INITPRE", "+", "cv", "]", "=", "oldvalue", "_config_vars", "[", "cv", "]", "=", "newvalue" ]
https://github.com/beeware/ouroboros/blob/a29123c6fab6a807caffbb7587cf548e0c370296/ouroboros/_osx_support.py#L120-L126
rrmina/fast-neural-style-pytorch
f32f6cac3ac7906df4655fa70ab831ca6336f9b9
utils.py
python
transfer_color
(src, dest)
return cv2.cvtColor(src_yiq, cv2.COLOR_YCrCb2BGR).clip(0,255)
Transfer Color using YIQ colorspace. Useful in preserving colors in style transfer. This method assumes inputs of shape [Height, Width, Channel] in BGR Color Space
Transfer Color using YIQ colorspace. Useful in preserving colors in style transfer. This method assumes inputs of shape [Height, Width, Channel] in BGR Color Space
[ "Transfer", "Color", "using", "YIQ", "colorspace", ".", "Useful", "in", "preserving", "colors", "in", "style", "transfer", ".", "This", "method", "assumes", "inputs", "of", "shape", "[", "Height", "Width", "Channel", "]", "in", "BGR", "Color", "Space" ]
def transfer_color(src, dest): """ Transfer Color using YIQ colorspace. Useful in preserving colors in style transfer. This method assumes inputs of shape [Height, Width, Channel] in BGR Color Space """ src, dest = src.clip(0,255), dest.clip(0,255) # Resize src to dest's size H,W,_ = src.shape dest = cv2.resize(dest, dsize=(W, H), interpolation=cv2.INTER_CUBIC) dest_gray = cv2.cvtColor(dest, cv2.COLOR_BGR2GRAY) #1 Extract the Destination's luminance src_yiq = cv2.cvtColor(src, cv2.COLOR_BGR2YCrCb) #2 Convert the Source from BGR to YIQ/YCbCr src_yiq[...,0] = dest_gray #3 Combine Destination's luminance and Source's IQ/CbCr return cv2.cvtColor(src_yiq, cv2.COLOR_YCrCb2BGR).clip(0,255)
[ "def", "transfer_color", "(", "src", ",", "dest", ")", ":", "src", ",", "dest", "=", "src", ".", "clip", "(", "0", ",", "255", ")", ",", "dest", ".", "clip", "(", "0", ",", "255", ")", "# Resize src to dest's size", "H", ",", "W", ",", "_", "=", "src", ".", "shape", "dest", "=", "cv2", ".", "resize", "(", "dest", ",", "dsize", "=", "(", "W", ",", "H", ")", ",", "interpolation", "=", "cv2", ".", "INTER_CUBIC", ")", "dest_gray", "=", "cv2", ".", "cvtColor", "(", "dest", ",", "cv2", ".", "COLOR_BGR2GRAY", ")", "#1 Extract the Destination's luminance", "src_yiq", "=", "cv2", ".", "cvtColor", "(", "src", ",", "cv2", ".", "COLOR_BGR2YCrCb", ")", "#2 Convert the Source from BGR to YIQ/YCbCr", "src_yiq", "[", "...", ",", "0", "]", "=", "dest_gray", "#3 Combine Destination's luminance and Source's IQ/CbCr", "return", "cv2", ".", "cvtColor", "(", "src_yiq", ",", "cv2", ".", "COLOR_YCrCb2BGR", ")", ".", "clip", "(", "0", ",", "255", ")" ]
https://github.com/rrmina/fast-neural-style-pytorch/blob/f32f6cac3ac7906df4655fa70ab831ca6336f9b9/utils.py#L77-L92
ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework
cb692f527e4e819b6c228187c5702d990a180043
bin/x86/Debug/scripting_engine/Lib/decimal.py
python
Decimal.__abs__
(self, round=True, context=None)
return ans
Returns the absolute value of self. If the keyword argument 'round' is false, do not round. The expression self.__abs__(round=False) is equivalent to self.copy_abs().
Returns the absolute value of self.
[ "Returns", "the", "absolute", "value", "of", "self", "." ]
def __abs__(self, round=True, context=None): """Returns the absolute value of self. If the keyword argument 'round' is false, do not round. The expression self.__abs__(round=False) is equivalent to self.copy_abs(). """ if not round: return self.copy_abs() if self._is_special: ans = self._check_nans(context=context) if ans: return ans if self._sign: ans = self.__neg__(context=context) else: ans = self.__pos__(context=context) return ans
[ "def", "__abs__", "(", "self", ",", "round", "=", "True", ",", "context", "=", "None", ")", ":", "if", "not", "round", ":", "return", "self", ".", "copy_abs", "(", ")", "if", "self", ".", "_is_special", ":", "ans", "=", "self", ".", "_check_nans", "(", "context", "=", "context", ")", "if", "ans", ":", "return", "ans", "if", "self", ".", "_sign", ":", "ans", "=", "self", ".", "__neg__", "(", "context", "=", "context", ")", "else", ":", "ans", "=", "self", ".", "__pos__", "(", "context", "=", "context", ")", "return", "ans" ]
https://github.com/ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework/blob/cb692f527e4e819b6c228187c5702d990a180043/bin/x86/Debug/scripting_engine/Lib/decimal.py#L1110-L1130
gunthercox/ChatterBot
4ff8af28567ed446ae796d37c246bb6a14032fe7
chatterbot/storage/mongodb.py
python
MongoDatabaseAdapter.get_statement_model
(self)
return statement
Return the class for the statement model.
Return the class for the statement model.
[ "Return", "the", "class", "for", "the", "statement", "model", "." ]
def get_statement_model(self): """ Return the class for the statement model. """ from chatterbot.conversation import Statement # Create a storage-aware statement statement = Statement statement.storage = self return statement
[ "def", "get_statement_model", "(", "self", ")", ":", "from", "chatterbot", ".", "conversation", "import", "Statement", "# Create a storage-aware statement", "statement", "=", "Statement", "statement", ".", "storage", "=", "self", "return", "statement" ]
https://github.com/gunthercox/ChatterBot/blob/4ff8af28567ed446ae796d37c246bb6a14032fe7/chatterbot/storage/mongodb.py#L44-L54
chromium/web-page-replay
472351e1122bb1beb936952c7e75ae58bf8a69f1
replay.py
python
OptionsWrapper.__getattr__
(self, name)
return getattr(self._options, name)
Make the original option values available.
Make the original option values available.
[ "Make", "the", "original", "option", "values", "available", "." ]
def __getattr__(self, name): """Make the original option values available.""" return getattr(self._options, name)
[ "def", "__getattr__", "(", "self", ",", "name", ")", ":", "return", "getattr", "(", "self", ".", "_options", ",", "name", ")" ]
https://github.com/chromium/web-page-replay/blob/472351e1122bb1beb936952c7e75ae58bf8a69f1/replay.py#L277-L279
ipython/ipython
c0abea7a6dfe52c1f74c9d0387d4accadba7cc14
IPython/utils/frame.py
python
extract_vars
(*names,**kw)
return dict((k,callerNS[k]) for k in names)
Extract a set of variables by name from another frame. Parameters ---------- *names : str One or more variable names which will be extracted from the caller's frame. **kw : integer, optional How many frames in the stack to walk when looking for your variables. The default is 0, which will use the frame where the call was made. Examples -------- :: In [2]: def func(x): ...: y = 1 ...: print(sorted(extract_vars('x','y').items())) ...: In [3]: func('hello') [('x', 'hello'), ('y', 1)]
Extract a set of variables by name from another frame.
[ "Extract", "a", "set", "of", "variables", "by", "name", "from", "another", "frame", "." ]
def extract_vars(*names,**kw): """Extract a set of variables by name from another frame. Parameters ---------- *names : str One or more variable names which will be extracted from the caller's frame. **kw : integer, optional How many frames in the stack to walk when looking for your variables. The default is 0, which will use the frame where the call was made. Examples -------- :: In [2]: def func(x): ...: y = 1 ...: print(sorted(extract_vars('x','y').items())) ...: In [3]: func('hello') [('x', 'hello'), ('y', 1)] """ depth = kw.get('depth',0) callerNS = sys._getframe(depth+1).f_locals return dict((k,callerNS[k]) for k in names)
[ "def", "extract_vars", "(", "*", "names", ",", "*", "*", "kw", ")", ":", "depth", "=", "kw", ".", "get", "(", "'depth'", ",", "0", ")", "callerNS", "=", "sys", ".", "_getframe", "(", "depth", "+", "1", ")", ".", "f_locals", "return", "dict", "(", "(", "k", ",", "callerNS", "[", "k", "]", ")", "for", "k", "in", "names", ")" ]
https://github.com/ipython/ipython/blob/c0abea7a6dfe52c1f74c9d0387d4accadba7cc14/IPython/utils/frame.py#L23-L51
OpenMDAO/OpenMDAO-Framework
f2e37b7de3edeaaeb2d251b375917adec059db9b
openmdao.main/src/openmdao/main/interfaces.py
python
IHasConstraints.get_constraints
()
Returns an ordered dict of constraint objects.
Returns an ordered dict of constraint objects.
[ "Returns", "an", "ordered", "dict", "of", "constraint", "objects", "." ]
def get_constraints(): """Returns an ordered dict of constraint objects."""
[ "def", "get_constraints", "(", ")", ":" ]
https://github.com/OpenMDAO/OpenMDAO-Framework/blob/f2e37b7de3edeaaeb2d251b375917adec059db9b/openmdao.main/src/openmdao/main/interfaces.py#L724-L725
sagemath/sage
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
src/sage/schemes/elliptic_curves/heegner.py
python
HeegnerPointOnEllipticCurve._trace_numerical_conductor_1
(self, prec=53)
return s
Return numerical approximation using ``prec`` terms of working precision to the trace down to the quadratic imaginary field `K` of this Heegner point. INPUT: - `prec` -- bits precision (default: 53) EXAMPLES:: sage: E = EllipticCurve('57a1') sage: P = E.heegner_point(-8); P Heegner point of discriminant -8 on elliptic curve of conductor 57 sage: P._trace_numerical_conductor_1() # approx. (1 : 0 : 1) (1.00000000000000 + ...e-16*I : ...e-16 - ...e-16*I : 1.00000000000000) sage: P = E(2,1) # a generator sage: E([1,0]).height() 0.150298370947295 sage: P.height() 0.0375745927368238 sage: E.heegner_index(-8) 2.0000? sage: E.torsion_order() 1 sage: 2*P (1 : 0 : 1)
Return numerical approximation using ``prec`` terms of working precision to the trace down to the quadratic imaginary field `K` of this Heegner point.
[ "Return", "numerical", "approximation", "using", "prec", "terms", "of", "working", "precision", "to", "the", "trace", "down", "to", "the", "quadratic", "imaginary", "field", "K", "of", "this", "Heegner", "point", "." ]
def _trace_numerical_conductor_1(self, prec=53): """ Return numerical approximation using ``prec`` terms of working precision to the trace down to the quadratic imaginary field `K` of this Heegner point. INPUT: - `prec` -- bits precision (default: 53) EXAMPLES:: sage: E = EllipticCurve('57a1') sage: P = E.heegner_point(-8); P Heegner point of discriminant -8 on elliptic curve of conductor 57 sage: P._trace_numerical_conductor_1() # approx. (1 : 0 : 1) (1.00000000000000 + ...e-16*I : ...e-16 - ...e-16*I : 1.00000000000000) sage: P = E(2,1) # a generator sage: E([1,0]).height() 0.150298370947295 sage: P.height() 0.0375745927368238 sage: E.heegner_index(-8) 2.0000? sage: E.torsion_order() 1 sage: 2*P (1 : 0 : 1) """ if self.conductor() != 1: raise ValueError("conductor must be 1") R, U = self._good_tau_representatives() E = self.__E phi = E.modular_parametrization() C = rings.ComplexField(prec) F = E.change_ring(C) s = 0 for u, weight in U: P = phi(C(self._qf_to_tau(u))) z = F.point(list(P),check=False) if abs(weight) == 2: t = F.point(z,check=False) + F.point(tuple([x.conjugate() for x in z]), check=False) if weight < 0: s -= t else: s += t else: if weight < 0: s -= z else: s += z return s
[ "def", "_trace_numerical_conductor_1", "(", "self", ",", "prec", "=", "53", ")", ":", "if", "self", ".", "conductor", "(", ")", "!=", "1", ":", "raise", "ValueError", "(", "\"conductor must be 1\"", ")", "R", ",", "U", "=", "self", ".", "_good_tau_representatives", "(", ")", "E", "=", "self", ".", "__E", "phi", "=", "E", ".", "modular_parametrization", "(", ")", "C", "=", "rings", ".", "ComplexField", "(", "prec", ")", "F", "=", "E", ".", "change_ring", "(", "C", ")", "s", "=", "0", "for", "u", ",", "weight", "in", "U", ":", "P", "=", "phi", "(", "C", "(", "self", ".", "_qf_to_tau", "(", "u", ")", ")", ")", "z", "=", "F", ".", "point", "(", "list", "(", "P", ")", ",", "check", "=", "False", ")", "if", "abs", "(", "weight", ")", "==", "2", ":", "t", "=", "F", ".", "point", "(", "z", ",", "check", "=", "False", ")", "+", "F", ".", "point", "(", "tuple", "(", "[", "x", ".", "conjugate", "(", ")", "for", "x", "in", "z", "]", ")", ",", "check", "=", "False", ")", "if", "weight", "<", "0", ":", "s", "-=", "t", "else", ":", "s", "+=", "t", "else", ":", "if", "weight", "<", "0", ":", "s", "-=", "z", "else", ":", "s", "+=", "z", "return", "s" ]
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/schemes/elliptic_curves/heegner.py#L3706-L3757
ifwe/digsby
f5fe00244744aa131e07f09348d10563f3d8fa99
digsby/src/mail/emailobj.py
python
Email.fromEmailMessage
(cls, id, email, sendtime_if_error = None)
return email
Creates an Email from a Python email.message.Message object.
Creates an Email from a Python email.message.Message object.
[ "Creates", "an", "Email", "from", "a", "Python", "email", ".", "message", ".", "Message", "object", "." ]
def fromEmailMessage(cls, id, email, sendtime_if_error = None): 'Creates an Email from a Python email.message.Message object.' encoding = email.get_content_charset() # parse name, address realname, email_address = parseaddr(email['From']) realname = unicode_hdr(realname, encoding) # parse date _email = email try: datetuple = parsedate(email['Date']) sendtime = datetime(*datetuple[:7]) except Exception: traceback.print_exc() print >> sys.stderr, 'using %s for "sendtime" instead' % sendtime_if_error sendtime = sendtime_if_error try: attachments = find_attachments(email) except: attachments = {} part = find_part(email, ('text/plain', 'text/html')) if part is None: content = u'' else: content = parse_content(part) content = replace_newlines(content) prev_length = pref('email.preview_length', 200) if len(content) > prev_length: content = content[:prev_length] + '...' else: content email = cls(id, realname, email_address, sendtime, email['Subject'], content = content, attachments=attachments) return email
[ "def", "fromEmailMessage", "(", "cls", ",", "id", ",", "email", ",", "sendtime_if_error", "=", "None", ")", ":", "encoding", "=", "email", ".", "get_content_charset", "(", ")", "# parse name, address", "realname", ",", "email_address", "=", "parseaddr", "(", "email", "[", "'From'", "]", ")", "realname", "=", "unicode_hdr", "(", "realname", ",", "encoding", ")", "# parse date", "_email", "=", "email", "try", ":", "datetuple", "=", "parsedate", "(", "email", "[", "'Date'", "]", ")", "sendtime", "=", "datetime", "(", "*", "datetuple", "[", ":", "7", "]", ")", "except", "Exception", ":", "traceback", ".", "print_exc", "(", ")", "print", ">>", "sys", ".", "stderr", ",", "'using %s for \"sendtime\" instead'", "%", "sendtime_if_error", "sendtime", "=", "sendtime_if_error", "try", ":", "attachments", "=", "find_attachments", "(", "email", ")", "except", ":", "attachments", "=", "{", "}", "part", "=", "find_part", "(", "email", ",", "(", "'text/plain'", ",", "'text/html'", ")", ")", "if", "part", "is", "None", ":", "content", "=", "u''", "else", ":", "content", "=", "parse_content", "(", "part", ")", "content", "=", "replace_newlines", "(", "content", ")", "prev_length", "=", "pref", "(", "'email.preview_length'", ",", "200", ")", "if", "len", "(", "content", ")", ">", "prev_length", ":", "content", "=", "content", "[", ":", "prev_length", "]", "+", "'...'", "else", ":", "content", "email", "=", "cls", "(", "id", ",", "realname", ",", "email_address", ",", "sendtime", ",", "email", "[", "'Subject'", "]", ",", "content", "=", "content", ",", "attachments", "=", "attachments", ")", "return", "email" ]
https://github.com/ifwe/digsby/blob/f5fe00244744aa131e07f09348d10563f3d8fa99/digsby/src/mail/emailobj.py#L110-L152
huggingface/transformers
623b4f7c63f60cce917677ee704d6c93ee960b4b
examples/research_projects/rag/utils_rag.py
python
set_extra_model_params
(extra_params, hparams, config)
return hparams, config
[]
def set_extra_model_params(extra_params, hparams, config): equivalent_param = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead equivalent_param["dropout"] = "dropout_rate" for p in extra_params: if getattr(hparams, p, None): if not hasattr(config, p) and not hasattr(config, equivalent_param[p]): logger.info("config doesn't have a `{}` attribute".format(p)) delattr(hparams, p) continue set_p = p if hasattr(config, p) else equivalent_param[p] setattr(config, set_p, getattr(hparams, p)) delattr(hparams, p) return hparams, config
[ "def", "set_extra_model_params", "(", "extra_params", ",", "hparams", ",", "config", ")", ":", "equivalent_param", "=", "{", "p", ":", "p", "for", "p", "in", "extra_params", "}", "# T5 models don't have `dropout` param, they have `dropout_rate` instead", "equivalent_param", "[", "\"dropout\"", "]", "=", "\"dropout_rate\"", "for", "p", "in", "extra_params", ":", "if", "getattr", "(", "hparams", ",", "p", ",", "None", ")", ":", "if", "not", "hasattr", "(", "config", ",", "p", ")", "and", "not", "hasattr", "(", "config", ",", "equivalent_param", "[", "p", "]", ")", ":", "logger", ".", "info", "(", "\"config doesn't have a `{}` attribute\"", ".", "format", "(", "p", ")", ")", "delattr", "(", "hparams", ",", "p", ")", "continue", "set_p", "=", "p", "if", "hasattr", "(", "config", ",", "p", ")", "else", "equivalent_param", "[", "p", "]", "setattr", "(", "config", ",", "set_p", ",", "getattr", "(", "hparams", ",", "p", ")", ")", "delattr", "(", "hparams", ",", "p", ")", "return", "hparams", ",", "config" ]
https://github.com/huggingface/transformers/blob/623b4f7c63f60cce917677ee704d6c93ee960b4b/examples/research_projects/rag/utils_rag.py#L231-L244
beeware/ouroboros
a29123c6fab6a807caffbb7587cf548e0c370296
ouroboros/mimetypes.py
python
guess_type
(url, strict=True)
return _db.guess_type(url, strict)
Guess the type of a file based on its URL. Return value is a tuple (type, encoding) where type is None if the type can't be guessed (no or unknown suffix) or a string of the form type/subtype, usable for a MIME Content-type header; and encoding is None for no encoding or the name of the program used to encode (e.g. compress or gzip). The mappings are table driven. Encoding suffixes are case sensitive; type suffixes are first tried case sensitive, then case insensitive. The suffixes .tgz, .taz and .tz (case sensitive!) are all mapped to ".tar.gz". (This is table-driven too, using the dictionary suffix_map). Optional `strict' argument when false adds a bunch of commonly found, but non-standard types.
Guess the type of a file based on its URL.
[ "Guess", "the", "type", "of", "a", "file", "based", "on", "its", "URL", "." ]
def guess_type(url, strict=True): """Guess the type of a file based on its URL. Return value is a tuple (type, encoding) where type is None if the type can't be guessed (no or unknown suffix) or a string of the form type/subtype, usable for a MIME Content-type header; and encoding is None for no encoding or the name of the program used to encode (e.g. compress or gzip). The mappings are table driven. Encoding suffixes are case sensitive; type suffixes are first tried case sensitive, then case insensitive. The suffixes .tgz, .taz and .tz (case sensitive!) are all mapped to ".tar.gz". (This is table-driven too, using the dictionary suffix_map). Optional `strict' argument when false adds a bunch of commonly found, but non-standard types. """ if _db is None: init() return _db.guess_type(url, strict)
[ "def", "guess_type", "(", "url", ",", "strict", "=", "True", ")", ":", "if", "_db", "is", "None", ":", "init", "(", ")", "return", "_db", ".", "guess_type", "(", "url", ",", "strict", ")" ]
https://github.com/beeware/ouroboros/blob/a29123c6fab6a807caffbb7587cf548e0c370296/ouroboros/mimetypes.py#L269-L289
QCoDeS/Qcodes
3cda2cef44812e2aa4672781f2423bf5f816f9f9
qcodes/utils/command.py
python
Command.call_cmd_parsed_out
(self, *args)
return self.output_parser(self._cmd(*args))
Execute a function with output parsing.
Execute a function with output parsing.
[ "Execute", "a", "function", "with", "output", "parsing", "." ]
def call_cmd_parsed_out(self, *args): """Execute a function with output parsing.""" return self.output_parser(self._cmd(*args))
[ "def", "call_cmd_parsed_out", "(", "self", ",", "*", "args", ")", ":", "return", "self", ".", "output_parser", "(", "self", ".", "_cmd", "(", "*", "args", ")", ")" ]
https://github.com/QCoDeS/Qcodes/blob/3cda2cef44812e2aa4672781f2423bf5f816f9f9/qcodes/utils/command.py#L155-L157
smart-mobile-software/gitstack
d9fee8f414f202143eb6e620529e8e5539a2af56
python/Lib/site-packages/django/contrib/gis/gdal/field.py
python
Field.type
(self)
return capi.get_field_type(self.ptr)
Returns the OGR type of this Field.
Returns the OGR type of this Field.
[ "Returns", "the", "OGR", "type", "of", "this", "Field", "." ]
def type(self): "Returns the OGR type of this Field." return capi.get_field_type(self.ptr)
[ "def", "type", "(", "self", ")", ":", "return", "capi", ".", "get_field_type", "(", "self", ".", "ptr", ")" ]
https://github.com/smart-mobile-software/gitstack/blob/d9fee8f414f202143eb6e620529e8e5539a2af56/python/Lib/site-packages/django/contrib/gis/gdal/field.py#L77-L79
aws-samples/aws-kube-codesuite
ab4e5ce45416b83bffb947ab8d234df5437f4fca
src/networkx/readwrite/adjlist.py
python
parse_adjlist
(lines, comments='#', delimiter=None, create_using=None, nodetype=None)
return G
Parse lines of a graph adjacency list representation. Parameters ---------- lines : list or iterator of strings Input data in adjlist format create_using: NetworkX graph container Use given NetworkX graph for holding nodes or edges. nodetype : Python type, optional Convert nodes to this type. comments : string, optional Marker for comment lines delimiter : string, optional Separator for node labels. The default is whitespace. Returns ------- G: NetworkX graph The graph corresponding to the lines in adjacency list format. Examples -------- >>> lines = ['1 2 5', ... '2 3 4', ... '3 5', ... '4', ... '5'] >>> G = nx.parse_adjlist(lines, nodetype=int) >>> nodes = [1, 2, 3, 4, 5] >>> all(node in G for node in nodes) True >>> edges = [(1, 2), (1, 5), (2, 3), (2, 4), (3, 5)] >>> all((u, v) in G.edges() or (v, u) in G.edges() for (u, v) in edges) True See Also -------- read_adjlist
Parse lines of a graph adjacency list representation.
[ "Parse", "lines", "of", "a", "graph", "adjacency", "list", "representation", "." ]
def parse_adjlist(lines, comments='#', delimiter=None, create_using=None, nodetype=None): """Parse lines of a graph adjacency list representation. Parameters ---------- lines : list or iterator of strings Input data in adjlist format create_using: NetworkX graph container Use given NetworkX graph for holding nodes or edges. nodetype : Python type, optional Convert nodes to this type. comments : string, optional Marker for comment lines delimiter : string, optional Separator for node labels. The default is whitespace. Returns ------- G: NetworkX graph The graph corresponding to the lines in adjacency list format. Examples -------- >>> lines = ['1 2 5', ... '2 3 4', ... '3 5', ... '4', ... '5'] >>> G = nx.parse_adjlist(lines, nodetype=int) >>> nodes = [1, 2, 3, 4, 5] >>> all(node in G for node in nodes) True >>> edges = [(1, 2), (1, 5), (2, 3), (2, 4), (3, 5)] >>> all((u, v) in G.edges() or (v, u) in G.edges() for (u, v) in edges) True See Also -------- read_adjlist """ if create_using is None: G = nx.Graph() else: try: G = create_using G.clear() except: raise TypeError("Input graph is not a NetworkX graph type") for line in lines: p = line.find(comments) if p >= 0: line = line[:p] if not len(line): continue vlist = line.strip().split(delimiter) u = vlist.pop(0) # convert types if nodetype is not None: try: u = nodetype(u) except: raise TypeError("Failed to convert node ({}) to type {}" .format(u, nodetype)) G.add_node(u) if nodetype is not None: try: vlist = map(nodetype, vlist) except: raise TypeError("Failed to convert nodes ({}) to type {}" .format(','.join(vlist), nodetype)) G.add_edges_from([(u, v) for v in vlist]) return G
[ "def", "parse_adjlist", "(", "lines", ",", "comments", "=", "'#'", ",", "delimiter", "=", "None", ",", "create_using", "=", "None", ",", "nodetype", "=", "None", ")", ":", "if", "create_using", "is", "None", ":", "G", "=", "nx", ".", "Graph", "(", ")", "else", ":", "try", ":", "G", "=", "create_using", "G", ".", "clear", "(", ")", "except", ":", "raise", "TypeError", "(", "\"Input graph is not a NetworkX graph type\"", ")", "for", "line", "in", "lines", ":", "p", "=", "line", ".", "find", "(", "comments", ")", "if", "p", ">=", "0", ":", "line", "=", "line", "[", ":", "p", "]", "if", "not", "len", "(", "line", ")", ":", "continue", "vlist", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "delimiter", ")", "u", "=", "vlist", ".", "pop", "(", "0", ")", "# convert types", "if", "nodetype", "is", "not", "None", ":", "try", ":", "u", "=", "nodetype", "(", "u", ")", "except", ":", "raise", "TypeError", "(", "\"Failed to convert node ({}) to type {}\"", ".", "format", "(", "u", ",", "nodetype", ")", ")", "G", ".", "add_node", "(", "u", ")", "if", "nodetype", "is", "not", "None", ":", "try", ":", "vlist", "=", "map", "(", "nodetype", ",", "vlist", ")", "except", ":", "raise", "TypeError", "(", "\"Failed to convert nodes ({}) to type {}\"", ".", "format", "(", "','", ".", "join", "(", "vlist", ")", ",", "nodetype", ")", ")", "G", ".", "add_edges_from", "(", "[", "(", "u", ",", "v", ")", "for", "v", "in", "vlist", "]", ")", "return", "G" ]
https://github.com/aws-samples/aws-kube-codesuite/blob/ab4e5ce45416b83bffb947ab8d234df5437f4fca/src/networkx/readwrite/adjlist.py#L148-L226
tadejmagajna/HereIsWally
eba5274f65c1b9b636aba23942364933a632efc1
object_detection/core/model.py
python
DetectionModel.__init__
(self, num_classes)
Constructor. Args: num_classes: number of classes. Note that num_classes *does not* include background categories that might be implicitly be predicted in various implementations.
Constructor.
[ "Constructor", "." ]
def __init__(self, num_classes): """Constructor. Args: num_classes: number of classes. Note that num_classes *does not* include background categories that might be implicitly be predicted in various implementations. """ self._num_classes = num_classes self._groundtruth_lists = {}
[ "def", "__init__", "(", "self", ",", "num_classes", ")", ":", "self", ".", "_num_classes", "=", "num_classes", "self", ".", "_groundtruth_lists", "=", "{", "}" ]
https://github.com/tadejmagajna/HereIsWally/blob/eba5274f65c1b9b636aba23942364933a632efc1/object_detection/core/model.py#L57-L66
aiidateam/aiida-core
c743a335480f8bb3a5e4ebd2463a31f9f3b9f9b2
aiida/schedulers/plugins/direct.py
python
DirectScheduler._parse_joblist_output
(self, retval, stdout, stderr)
return job_list
Parse the queue output string, as returned by executing the command returned by _get_joblist_command command (qstat -f). Return a list of JobInfo objects, one of each job, each relevant parameters implemented. .. note:: depending on the scheduler configuration, finished jobs may either appear here, or not. This function will only return one element for each job find in the qstat output; missing jobs (for whatever reason) simply will not appear here.
Parse the queue output string, as returned by executing the command returned by _get_joblist_command command (qstat -f).
[ "Parse", "the", "queue", "output", "string", "as", "returned", "by", "executing", "the", "command", "returned", "by", "_get_joblist_command", "command", "(", "qstat", "-", "f", ")", "." ]
def _parse_joblist_output(self, retval, stdout, stderr): """ Parse the queue output string, as returned by executing the command returned by _get_joblist_command command (qstat -f). Return a list of JobInfo objects, one of each job, each relevant parameters implemented. .. note:: depending on the scheduler configuration, finished jobs may either appear here, or not. This function will only return one element for each job find in the qstat output; missing jobs (for whatever reason) simply will not appear here. """ import re filtered_stderr = '\n'.join(l for l in stderr.split('\n')) if filtered_stderr.strip(): self.logger.warning(f"Warning in _parse_joblist_output, non-empty (filtered) stderr='{filtered_stderr}'") if retval != 0: raise SchedulerError('Error during direct execution parsing (_parse_joblist_output function)') # Create dictionary and parse specific fields job_list = [] for line in stdout.split('\n'): if re.search(r'^\s*PID', line) or line == '': # Skip the header if present continue line = re.sub(r'^\s+', '', line) job = re.split(r'\s+', line) this_job = JobInfo() this_job.job_id = job[0] if len(job) < 3: raise SchedulerError(f"Unexpected output from the scheduler, not enough fields in line '{line}'") try: job_state_string = job[1][0] # I just check the first character except IndexError: self.logger.debug(f"No 'job_state' field for job id {this_job.job_id}") this_job.job_state = JobState.UNDETERMINED else: try: this_job.job_state = \ _MAP_STATUS_PS[job_state_string] except KeyError: self.logger.warning(f"Unrecognized job_state '{job_state_string}' for job id {this_job.job_id}") this_job.job_state = JobState.UNDETERMINED try: # I strip the part after the @: is this always ok? this_job.job_owner = job[2] except KeyError: self.logger.debug(f"No 'job_owner' field for job id {this_job.job_id}") try: this_job.wallclock_time_seconds = self._convert_time(job[3]) except KeyError: # May not have started yet pass except ValueError: self.logger.warning(f"Error parsing 'resources_used.walltime' for job id {this_job.job_id}") # I append to the list of jobs to return job_list.append(this_job) return job_list
[ "def", "_parse_joblist_output", "(", "self", ",", "retval", ",", "stdout", ",", "stderr", ")", ":", "import", "re", "filtered_stderr", "=", "'\\n'", ".", "join", "(", "l", "for", "l", "in", "stderr", ".", "split", "(", "'\\n'", ")", ")", "if", "filtered_stderr", ".", "strip", "(", ")", ":", "self", ".", "logger", ".", "warning", "(", "f\"Warning in _parse_joblist_output, non-empty (filtered) stderr='{filtered_stderr}'\"", ")", "if", "retval", "!=", "0", ":", "raise", "SchedulerError", "(", "'Error during direct execution parsing (_parse_joblist_output function)'", ")", "# Create dictionary and parse specific fields", "job_list", "=", "[", "]", "for", "line", "in", "stdout", ".", "split", "(", "'\\n'", ")", ":", "if", "re", ".", "search", "(", "r'^\\s*PID'", ",", "line", ")", "or", "line", "==", "''", ":", "# Skip the header if present", "continue", "line", "=", "re", ".", "sub", "(", "r'^\\s+'", ",", "''", ",", "line", ")", "job", "=", "re", ".", "split", "(", "r'\\s+'", ",", "line", ")", "this_job", "=", "JobInfo", "(", ")", "this_job", ".", "job_id", "=", "job", "[", "0", "]", "if", "len", "(", "job", ")", "<", "3", ":", "raise", "SchedulerError", "(", "f\"Unexpected output from the scheduler, not enough fields in line '{line}'\"", ")", "try", ":", "job_state_string", "=", "job", "[", "1", "]", "[", "0", "]", "# I just check the first character", "except", "IndexError", ":", "self", ".", "logger", ".", "debug", "(", "f\"No 'job_state' field for job id {this_job.job_id}\"", ")", "this_job", ".", "job_state", "=", "JobState", ".", "UNDETERMINED", "else", ":", "try", ":", "this_job", ".", "job_state", "=", "_MAP_STATUS_PS", "[", "job_state_string", "]", "except", "KeyError", ":", "self", ".", "logger", ".", "warning", "(", "f\"Unrecognized job_state '{job_state_string}' for job id {this_job.job_id}\"", ")", "this_job", ".", "job_state", "=", "JobState", ".", "UNDETERMINED", "try", ":", "# I strip the part after the @: is this always ok?", "this_job", ".", "job_owner", "=", "job", "[", "2", "]", "except", "KeyError", ":", "self", ".", "logger", ".", "debug", "(", "f\"No 'job_owner' field for job id {this_job.job_id}\"", ")", "try", ":", "this_job", ".", "wallclock_time_seconds", "=", "self", ".", "_convert_time", "(", "job", "[", "3", "]", ")", "except", "KeyError", ":", "# May not have started yet", "pass", "except", "ValueError", ":", "self", ".", "logger", ".", "warning", "(", "f\"Error parsing 'resources_used.walltime' for job id {this_job.job_id}\"", ")", "# I append to the list of jobs to return", "job_list", ".", "append", "(", "this_job", ")", "return", "job_list" ]
https://github.com/aiidateam/aiida-core/blob/c743a335480f8bb3a5e4ebd2463a31f9f3b9f9b2/aiida/schedulers/plugins/direct.py#L199-L265
Jajcus/pyxmpp2
59e5fd7c8837991ac265dc6aad23a6bd256768a7
pyxmpp2/ext/muc/muccore.py
python
MucPresence.copy
(self)
return MucPresence(self)
Return a copy of `self`.
Return a copy of `self`.
[ "Return", "a", "copy", "of", "self", "." ]
def copy(self): """ Return a copy of `self`. """ return MucPresence(self)
[ "def", "copy", "(", "self", ")", ":", "return", "MucPresence", "(", "self", ")" ]
https://github.com/Jajcus/pyxmpp2/blob/59e5fd7c8837991ac265dc6aad23a6bd256768a7/pyxmpp2/ext/muc/muccore.py#L703-L707
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/lib/python2.7/site-packages/sqlalchemy/engine/base.py
python
Connection.recover_twophase
(self)
return self.engine.dialect.do_recover_twophase(self)
[]
def recover_twophase(self): return self.engine.dialect.do_recover_twophase(self)
[ "def", "recover_twophase", "(", "self", ")", ":", "return", "self", ".", "engine", ".", "dialect", ".", "do_recover_twophase", "(", "self", ")" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/sqlalchemy/engine/base.py#L665-L666
neuropsychology/NeuroKit
d01111b9b82364d28da01c002e6cbfc45d9493d9
neurokit2/complexity/optim_complexity_k.py
python
complexity_k
(signal, k_max="max", show=False)
return kmax_optimal, { "Values": kmax_range, "Scores": slopes, "Intercepts": intercepts, "Average_Values": average_values, }
Automated selection of the optimal k_max parameter for Higuchi Fractal Dimension (HFD). The optimal kmax is computed based on the point at which HFD values plateau for a range of kmax values (see Vega, 2015). Parameters ---------- signal : Union[list, np.array, pd.Series] The signal (i.e., a time series) in the form of a vector of values. k_max : Union[int, str, list], optional Maximum number of interval times (should be greater than or equal to 3) to be tested. If 'max', it selects the maximum possible value corresponding to half the length of the signal. show : bool Visualise the slope of the curve for the selected kmax value. Returns -------- k : float The optimal kmax of the time series. info : dict A dictionary containing additional information regarding the parameters used to compute optimal kmax. See Also -------- fractal_higuchi Examples ---------- >>> import neurokit2 as nk >>> >>> signal = nk.signal_simulate(duration=2, sampling_rate=100, frequency=[5, 6], noise=0.5) >>> k_max, info = nk.complexity_k(signal, k_max='default', show=True) >>> k_max #doctest: +SKIP Reference ---------- - Higuchi, T. (1988). Approach to an irregular time series on the basis of the fractal theory. Physica D: Nonlinear Phenomena, 31(2), 277-283. - Vega, C. F., & Noel, J. (2015, June). Parameters analyzed of Higuchi's fractal dimension for EEG brain signals. In 2015 Signal Processing Symposium (SPSympo) (pp. 1-5). IEEE. https://ieeexplore.ieee.org/document/7168285
Automated selection of the optimal k_max parameter for Higuchi Fractal Dimension (HFD).
[ "Automated", "selection", "of", "the", "optimal", "k_max", "parameter", "for", "Higuchi", "Fractal", "Dimension", "(", "HFD", ")", "." ]
def complexity_k(signal, k_max="max", show=False): """Automated selection of the optimal k_max parameter for Higuchi Fractal Dimension (HFD). The optimal kmax is computed based on the point at which HFD values plateau for a range of kmax values (see Vega, 2015). Parameters ---------- signal : Union[list, np.array, pd.Series] The signal (i.e., a time series) in the form of a vector of values. k_max : Union[int, str, list], optional Maximum number of interval times (should be greater than or equal to 3) to be tested. If 'max', it selects the maximum possible value corresponding to half the length of the signal. show : bool Visualise the slope of the curve for the selected kmax value. Returns -------- k : float The optimal kmax of the time series. info : dict A dictionary containing additional information regarding the parameters used to compute optimal kmax. See Also -------- fractal_higuchi Examples ---------- >>> import neurokit2 as nk >>> >>> signal = nk.signal_simulate(duration=2, sampling_rate=100, frequency=[5, 6], noise=0.5) >>> k_max, info = nk.complexity_k(signal, k_max='default', show=True) >>> k_max #doctest: +SKIP Reference ---------- - Higuchi, T. (1988). Approach to an irregular time series on the basis of the fractal theory. Physica D: Nonlinear Phenomena, 31(2), 277-283. - Vega, C. F., & Noel, J. (2015, June). Parameters analyzed of Higuchi's fractal dimension for EEG brain signals. In 2015 Signal Processing Symposium (SPSympo) (pp. 1-5). IEEE. https://ieeexplore.ieee.org/document/7168285 """ # Get the range of k-max values to be tested # ------------------------------------------ if isinstance(k_max, str): # e.g., "default" # upper limit for k value (max possible value) k_max = int(np.floor(len(signal) / 2)) # so that normalizing factor is positive if isinstance(k_max, int): kmax_range = np.arange(2, k_max + 1) elif isinstance(k_max, (list, np.ndarray, pd.Series)): kmax_range = np.array(k_max) else: warn( "k_max should be an int or a list of values of kmax to be tested.", category=NeuroKitWarning, ) # Compute the slope for each kmax value # -------------------------------------- vectorized_k_slope = np.vectorize(_complexity_k_slope, excluded=[1]) slopes, intercepts, info = vectorized_k_slope(kmax_range, signal) # k_values = [d["k_values"] for d in info] average_values = [d["average_values"] for d in info] # Find plateau (the saturation point of slope) # -------------------------------------------- optimal_point = find_plateau(slopes, show=False) if optimal_point is not None: kmax_optimal = kmax_range[optimal_point] else: kmax_optimal = np.max(kmax_range) warn( "The optimal kmax value detected is 2 or less. There may be no plateau in this case. " + f"You can inspect the plot by set `show=True`. We will return optimal k_max = {kmax_optimal} (the max).", category=NeuroKitWarning, ) # Plot if show: _complexity_k_plot(kmax_range, slopes, kmax_optimal, ax=None) # Return optimal tau and info dict return kmax_optimal, { "Values": kmax_range, "Scores": slopes, "Intercepts": intercepts, "Average_Values": average_values, }
[ "def", "complexity_k", "(", "signal", ",", "k_max", "=", "\"max\"", ",", "show", "=", "False", ")", ":", "# Get the range of k-max values to be tested", "# ------------------------------------------", "if", "isinstance", "(", "k_max", ",", "str", ")", ":", "# e.g., \"default\"", "# upper limit for k value (max possible value)", "k_max", "=", "int", "(", "np", ".", "floor", "(", "len", "(", "signal", ")", "/", "2", ")", ")", "# so that normalizing factor is positive", "if", "isinstance", "(", "k_max", ",", "int", ")", ":", "kmax_range", "=", "np", ".", "arange", "(", "2", ",", "k_max", "+", "1", ")", "elif", "isinstance", "(", "k_max", ",", "(", "list", ",", "np", ".", "ndarray", ",", "pd", ".", "Series", ")", ")", ":", "kmax_range", "=", "np", ".", "array", "(", "k_max", ")", "else", ":", "warn", "(", "\"k_max should be an int or a list of values of kmax to be tested.\"", ",", "category", "=", "NeuroKitWarning", ",", ")", "# Compute the slope for each kmax value", "# --------------------------------------", "vectorized_k_slope", "=", "np", ".", "vectorize", "(", "_complexity_k_slope", ",", "excluded", "=", "[", "1", "]", ")", "slopes", ",", "intercepts", ",", "info", "=", "vectorized_k_slope", "(", "kmax_range", ",", "signal", ")", "# k_values = [d[\"k_values\"] for d in info]", "average_values", "=", "[", "d", "[", "\"average_values\"", "]", "for", "d", "in", "info", "]", "# Find plateau (the saturation point of slope)", "# --------------------------------------------", "optimal_point", "=", "find_plateau", "(", "slopes", ",", "show", "=", "False", ")", "if", "optimal_point", "is", "not", "None", ":", "kmax_optimal", "=", "kmax_range", "[", "optimal_point", "]", "else", ":", "kmax_optimal", "=", "np", ".", "max", "(", "kmax_range", ")", "warn", "(", "\"The optimal kmax value detected is 2 or less. There may be no plateau in this case. \"", "+", "f\"You can inspect the plot by set `show=True`. We will return optimal k_max = {kmax_optimal} (the max).\"", ",", "category", "=", "NeuroKitWarning", ",", ")", "# Plot", "if", "show", ":", "_complexity_k_plot", "(", "kmax_range", ",", "slopes", ",", "kmax_optimal", ",", "ax", "=", "None", ")", "# Return optimal tau and info dict", "return", "kmax_optimal", ",", "{", "\"Values\"", ":", "kmax_range", ",", "\"Scores\"", ":", "slopes", ",", "\"Intercepts\"", ":", "intercepts", ",", "\"Average_Values\"", ":", "average_values", ",", "}" ]
https://github.com/neuropsychology/NeuroKit/blob/d01111b9b82364d28da01c002e6cbfc45d9493d9/neurokit2/complexity/optim_complexity_k.py#L10-L99
JiYou/openstack
8607dd488bde0905044b303eb6e52bdea6806923
packages/source/ceilometer/ceilometer/openstack/common/timeutils.py
python
normalize_time
(timestamp)
return timestamp.replace(tzinfo=None) - offset
Normalize time in arbitrary timezone to UTC naive object.
Normalize time in arbitrary timezone to UTC naive object.
[ "Normalize", "time", "in", "arbitrary", "timezone", "to", "UTC", "naive", "object", "." ]
def normalize_time(timestamp): """Normalize time in arbitrary timezone to UTC naive object.""" offset = timestamp.utcoffset() if offset is None: return timestamp return timestamp.replace(tzinfo=None) - offset
[ "def", "normalize_time", "(", "timestamp", ")", ":", "offset", "=", "timestamp", ".", "utcoffset", "(", ")", "if", "offset", "is", "None", ":", "return", "timestamp", "return", "timestamp", ".", "replace", "(", "tzinfo", "=", "None", ")", "-", "offset" ]
https://github.com/JiYou/openstack/blob/8607dd488bde0905044b303eb6e52bdea6806923/packages/source/ceilometer/ceilometer/openstack/common/timeutils.py#L68-L73
tomplus/kubernetes_asyncio
f028cc793e3a2c519be6a52a49fb77ff0b014c9b
kubernetes_asyncio/client/models/v1_service_spec.py
python
V1ServiceSpec.selector
(self)
return self._selector
Gets the selector of this V1ServiceSpec. # noqa: E501 Route service traffic to pods with label keys and values matching this selector. If empty or not present, the service is assumed to have an external process managing its endpoints, which Kubernetes will not modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/ # noqa: E501 :return: The selector of this V1ServiceSpec. # noqa: E501 :rtype: dict(str, str)
Gets the selector of this V1ServiceSpec. # noqa: E501
[ "Gets", "the", "selector", "of", "this", "V1ServiceSpec", ".", "#", "noqa", ":", "E501" ]
def selector(self): """Gets the selector of this V1ServiceSpec. # noqa: E501 Route service traffic to pods with label keys and values matching this selector. If empty or not present, the service is assumed to have an external process managing its endpoints, which Kubernetes will not modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/ # noqa: E501 :return: The selector of this V1ServiceSpec. # noqa: E501 :rtype: dict(str, str) """ return self._selector
[ "def", "selector", "(", "self", ")", ":", "return", "self", ".", "_selector" ]
https://github.com/tomplus/kubernetes_asyncio/blob/f028cc793e3a2c519be6a52a49fb77ff0b014c9b/kubernetes_asyncio/client/models/v1_service_spec.py#L356-L364
Lonero-Team/Decentralized-Internet
3cb157834fcc19ff8c2316e66bf07b103c137068
packages/p2lara/src/storages/bigchaindb/lib.py
python
BigchainDB.migrate_abci_chain
(self)
Generate and record a new ABCI chain ID. New blocks are not accepted until we receive an InitChain ABCI request with the matching chain ID and validator set. Chain ID is generated based on the current chain and height. `chain-X` => `chain-X-migrated-at-height-5`. `chain-X-migrated-at-height-5` => `chain-X-migrated-at-height-21`. If there is no known chain (we are at genesis), the function returns.
Generate and record a new ABCI chain ID. New blocks are not accepted until we receive an InitChain ABCI request with the matching chain ID and validator set.
[ "Generate", "and", "record", "a", "new", "ABCI", "chain", "ID", ".", "New", "blocks", "are", "not", "accepted", "until", "we", "receive", "an", "InitChain", "ABCI", "request", "with", "the", "matching", "chain", "ID", "and", "validator", "set", "." ]
def migrate_abci_chain(self): """Generate and record a new ABCI chain ID. New blocks are not accepted until we receive an InitChain ABCI request with the matching chain ID and validator set. Chain ID is generated based on the current chain and height. `chain-X` => `chain-X-migrated-at-height-5`. `chain-X-migrated-at-height-5` => `chain-X-migrated-at-height-21`. If there is no known chain (we are at genesis), the function returns. """ latest_chain = self.get_latest_abci_chain() if latest_chain is None: return block = self.get_latest_block() suffix = '-migrated-at-height-' chain_id = latest_chain['chain_id'] block_height_str = str(block['height']) new_chain_id = chain_id.split(suffix)[0] + suffix + block_height_str self.store_abci_chain(block['height'] + 1, new_chain_id, False)
[ "def", "migrate_abci_chain", "(", "self", ")", ":", "latest_chain", "=", "self", ".", "get_latest_abci_chain", "(", ")", "if", "latest_chain", "is", "None", ":", "return", "block", "=", "self", ".", "get_latest_block", "(", ")", "suffix", "=", "'-migrated-at-height-'", "chain_id", "=", "latest_chain", "[", "'chain_id'", "]", "block_height_str", "=", "str", "(", "block", "[", "'height'", "]", ")", "new_chain_id", "=", "chain_id", ".", "split", "(", "suffix", ")", "[", "0", "]", "+", "suffix", "+", "block_height_str", "self", ".", "store_abci_chain", "(", "block", "[", "'height'", "]", "+", "1", ",", "new_chain_id", ",", "False", ")" ]
https://github.com/Lonero-Team/Decentralized-Internet/blob/3cb157834fcc19ff8c2316e66bf07b103c137068/packages/p2lara/src/storages/bigchaindb/lib.py#L478-L500
log2timeline/plaso
fe2e316b8c76a0141760c0f2f181d84acb83abc2
plaso/multi_process/extraction_engine.py
python
ExtractionMultiProcessEngine._StopExtractionProcesses
(self, abort=False)
Stops the extraction processes. Args: abort (bool): True to indicated the stop is issued on abort.
Stops the extraction processes.
[ "Stops", "the", "extraction", "processes", "." ]
def _StopExtractionProcesses(self, abort=False): """Stops the extraction processes. Args: abort (bool): True to indicated the stop is issued on abort. """ logger.debug('Stopping extraction processes.') self._StopMonitoringProcesses() if abort: # Signal all the processes to abort. self._AbortTerminate() logger.debug('Emptying task queue.') self._task_queue.Empty() # Wake the processes to make sure that they are not blocking # waiting for the queue new items. for _ in self._processes_per_pid: try: self._task_queue.PushItem(plaso_queue.QueueAbort(), block=False) except errors.QueueFull: logger.warning('Task queue full, unable to push abort message.') # Try waiting for the processes to exit normally. self._AbortJoin(timeout=self._PROCESS_JOIN_TIMEOUT) self._task_queue.Close(abort=abort) if not abort: # Check if the processes are still alive and terminate them if necessary. self._AbortTerminate() self._AbortJoin(timeout=self._PROCESS_JOIN_TIMEOUT) self._task_queue.Close(abort=True) # Kill any lingering processes. self._AbortKill()
[ "def", "_StopExtractionProcesses", "(", "self", ",", "abort", "=", "False", ")", ":", "logger", ".", "debug", "(", "'Stopping extraction processes.'", ")", "self", ".", "_StopMonitoringProcesses", "(", ")", "if", "abort", ":", "# Signal all the processes to abort.", "self", ".", "_AbortTerminate", "(", ")", "logger", ".", "debug", "(", "'Emptying task queue.'", ")", "self", ".", "_task_queue", ".", "Empty", "(", ")", "# Wake the processes to make sure that they are not blocking", "# waiting for the queue new items.", "for", "_", "in", "self", ".", "_processes_per_pid", ":", "try", ":", "self", ".", "_task_queue", ".", "PushItem", "(", "plaso_queue", ".", "QueueAbort", "(", ")", ",", "block", "=", "False", ")", "except", "errors", ".", "QueueFull", ":", "logger", ".", "warning", "(", "'Task queue full, unable to push abort message.'", ")", "# Try waiting for the processes to exit normally.", "self", ".", "_AbortJoin", "(", "timeout", "=", "self", ".", "_PROCESS_JOIN_TIMEOUT", ")", "self", ".", "_task_queue", ".", "Close", "(", "abort", "=", "abort", ")", "if", "not", "abort", ":", "# Check if the processes are still alive and terminate them if necessary.", "self", ".", "_AbortTerminate", "(", ")", "self", ".", "_AbortJoin", "(", "timeout", "=", "self", ".", "_PROCESS_JOIN_TIMEOUT", ")", "self", ".", "_task_queue", ".", "Close", "(", "abort", "=", "True", ")", "# Kill any lingering processes.", "self", ".", "_AbortKill", "(", ")" ]
https://github.com/log2timeline/plaso/blob/fe2e316b8c76a0141760c0f2f181d84acb83abc2/plaso/multi_process/extraction_engine.py#L790-L825
DataDog/integrations-core
934674b29d94b70ccc008f76ea172d0cdae05e1e
openmetrics/datadog_checks/openmetrics/config_models/shared.py
python
SharedConfig._final_validation
(cls, values)
return validation.core.finalize_config(getattr(validators, 'finalize_shared', identity)(values))
[]
def _final_validation(cls, values): return validation.core.finalize_config(getattr(validators, 'finalize_shared', identity)(values))
[ "def", "_final_validation", "(", "cls", ",", "values", ")", ":", "return", "validation", ".", "core", ".", "finalize_config", "(", "getattr", "(", "validators", ",", "'finalize_shared'", ",", "identity", ")", "(", "values", ")", ")" ]
https://github.com/DataDog/integrations-core/blob/934674b29d94b70ccc008f76ea172d0cdae05e1e/openmetrics/datadog_checks/openmetrics/config_models/shared.py#L59-L60
robotlearn/pyrobolearn
9cd7c060723fda7d2779fa255ac998c2c82b8436
pyrobolearn/exploration/actions/boltzmann.py
python
BoltzmannActionExploration.__init__
(self, policy, action)
Initialize the Boltzmann action exploration strategy. Args: policy (Policy): policy to wrap. action (Action): discrete actions.
Initialize the Boltzmann action exploration strategy.
[ "Initialize", "the", "Boltzmann", "action", "exploration", "strategy", "." ]
def __init__(self, policy, action): """ Initialize the Boltzmann action exploration strategy. Args: policy (Policy): policy to wrap. action (Action): discrete actions. """ super(BoltzmannActionExploration, self).__init__(policy, action=action) # create Categorical module logits = IdentityModule() self._module = CategoricalModule(logits=logits)
[ "def", "__init__", "(", "self", ",", "policy", ",", "action", ")", ":", "super", "(", "BoltzmannActionExploration", ",", "self", ")", ".", "__init__", "(", "policy", ",", "action", "=", "action", ")", "# create Categorical module", "logits", "=", "IdentityModule", "(", ")", "self", ".", "_module", "=", "CategoricalModule", "(", "logits", "=", "logits", ")" ]
https://github.com/robotlearn/pyrobolearn/blob/9cd7c060723fda7d2779fa255ac998c2c82b8436/pyrobolearn/exploration/actions/boltzmann.py#L39-L51
angr/angr
4b04d56ace135018083d36d9083805be8146688b
angr/analyses/typehoon/translator.py
python
TypeTranslator._translate_Pointer64
(self, tc)
return sim_type.SimTypePointer(internal).with_arch(self.arch)
[]
def _translate_Pointer64(self, tc): if isinstance(tc.basetype, typeconsts.BottomType): # void * internal = sim_type.SimTypeBottom(label="void").with_arch(self.arch) else: internal = self._tc2simtype(tc.basetype) return sim_type.SimTypePointer(internal).with_arch(self.arch)
[ "def", "_translate_Pointer64", "(", "self", ",", "tc", ")", ":", "if", "isinstance", "(", "tc", ".", "basetype", ",", "typeconsts", ".", "BottomType", ")", ":", "# void *", "internal", "=", "sim_type", ".", "SimTypeBottom", "(", "label", "=", "\"void\"", ")", ".", "with_arch", "(", "self", ".", "arch", ")", "else", ":", "internal", "=", "self", ".", "_tc2simtype", "(", "tc", ".", "basetype", ")", "return", "sim_type", ".", "SimTypePointer", "(", "internal", ")", ".", "with_arch", "(", "self", ".", "arch", ")" ]
https://github.com/angr/angr/blob/4b04d56ace135018083d36d9083805be8146688b/angr/analyses/typehoon/translator.py#L81-L88
kubernetes-client/python
47b9da9de2d02b2b7a34fbe05afb44afd130d73a
kubernetes/client/models/v1beta1_priority_level_configuration_reference.py
python
V1beta1PriorityLevelConfigurationReference.__init__
(self, name=None, local_vars_configuration=None)
V1beta1PriorityLevelConfigurationReference - a model defined in OpenAPI
V1beta1PriorityLevelConfigurationReference - a model defined in OpenAPI
[ "V1beta1PriorityLevelConfigurationReference", "-", "a", "model", "defined", "in", "OpenAPI" ]
def __init__(self, name=None, local_vars_configuration=None): # noqa: E501 """V1beta1PriorityLevelConfigurationReference - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._name = None self.discriminator = None self.name = name
[ "def", "__init__", "(", "self", ",", "name", "=", "None", ",", "local_vars_configuration", "=", "None", ")", ":", "# noqa: E501", "# noqa: E501", "if", "local_vars_configuration", "is", "None", ":", "local_vars_configuration", "=", "Configuration", "(", ")", "self", ".", "local_vars_configuration", "=", "local_vars_configuration", "self", ".", "_name", "=", "None", "self", ".", "discriminator", "=", "None", "self", ".", "name", "=", "name" ]
https://github.com/kubernetes-client/python/blob/47b9da9de2d02b2b7a34fbe05afb44afd130d73a/kubernetes/client/models/v1beta1_priority_level_configuration_reference.py#L43-L52
tensorflow/lingvo
ce10019243d954c3c3ebe739f7589b5eebfdf907
lingvo/tools/gke_launch.py
python
tensorboard_template
(job_name, logdir, port)
return """ apiVersion: apps/v1 kind: Deployment metadata: name: {job_name} spec: replicas: 1 selector: matchLabels: name: {job_name} template: metadata: labels: name: {job_name} spec: restartPolicy: Always containers: - name: {container_name} image: gcr.io/tensorflow/tpu-util:r1.11 command: - tensorboard - --logdir=$(MODEL_BUCKET) env: - name: MODEL_BUCKET value: {logdir} ports: - containerPort: {port} --- apiVersion: v1 kind: Service metadata: name: {container_name}-service spec: type: LoadBalancer selector: name: {job_name} ports: - port: {port} targetPort: {port} """.format( job_name=job_name, container_name=container_name, logdir=logdir, port=port)
Constructs the tensorboard YAML template.
Constructs the tensorboard YAML template.
[ "Constructs", "the", "tensorboard", "YAML", "template", "." ]
def tensorboard_template(job_name, logdir, port): """Constructs the tensorboard YAML template.""" job_name = six.ensure_str(job_name) + ".tensorboard" container_name = job_name.replace(".", "-") print("To poll for tensorboard address, run: $ kubectl get service %s -w" % (container_name + "-service")) return """ apiVersion: apps/v1 kind: Deployment metadata: name: {job_name} spec: replicas: 1 selector: matchLabels: name: {job_name} template: metadata: labels: name: {job_name} spec: restartPolicy: Always containers: - name: {container_name} image: gcr.io/tensorflow/tpu-util:r1.11 command: - tensorboard - --logdir=$(MODEL_BUCKET) env: - name: MODEL_BUCKET value: {logdir} ports: - containerPort: {port} --- apiVersion: v1 kind: Service metadata: name: {container_name}-service spec: type: LoadBalancer selector: name: {job_name} ports: - port: {port} targetPort: {port} """.format( job_name=job_name, container_name=container_name, logdir=logdir, port=port)
[ "def", "tensorboard_template", "(", "job_name", ",", "logdir", ",", "port", ")", ":", "job_name", "=", "six", ".", "ensure_str", "(", "job_name", ")", "+", "\".tensorboard\"", "container_name", "=", "job_name", ".", "replace", "(", "\".\"", ",", "\"-\"", ")", "print", "(", "\"To poll for tensorboard address, run: $ kubectl get service %s -w\"", "%", "(", "container_name", "+", "\"-service\"", ")", ")", "return", "\"\"\"\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {job_name}\nspec:\n replicas: 1\n selector:\n matchLabels:\n name: {job_name}\n template:\n metadata:\n labels:\n name: {job_name}\n spec:\n restartPolicy: Always\n containers:\n - name: {container_name}\n image: gcr.io/tensorflow/tpu-util:r1.11\n command:\n - tensorboard\n - --logdir=$(MODEL_BUCKET)\n env:\n - name: MODEL_BUCKET\n value: {logdir}\n ports:\n - containerPort: {port}\n---\napiVersion: v1\nkind: Service\nmetadata:\n name: {container_name}-service\nspec:\n type: LoadBalancer\n selector:\n name: {job_name}\n ports:\n - port: {port}\n targetPort: {port}\n\"\"\"", ".", "format", "(", "job_name", "=", "job_name", ",", "container_name", "=", "container_name", ",", "logdir", "=", "logdir", ",", "port", "=", "port", ")" ]
https://github.com/tensorflow/lingvo/blob/ce10019243d954c3c3ebe739f7589b5eebfdf907/lingvo/tools/gke_launch.py#L229-L276
plotly/plotly.py
cfad7862594b35965c0e000813bd7805e8494a5b
packages/python/plotly/plotly/graph_objs/indicator/gauge/_axis.py
python
Axis.tickwidth
(self)
return self["tickwidth"]
Sets the tick width (in px). The 'tickwidth' property is a number and may be specified as: - An int or float in the interval [0, inf] Returns ------- int|float
Sets the tick width (in px). The 'tickwidth' property is a number and may be specified as: - An int or float in the interval [0, inf]
[ "Sets", "the", "tick", "width", "(", "in", "px", ")", ".", "The", "tickwidth", "property", "is", "a", "number", "and", "may", "be", "specified", "as", ":", "-", "An", "int", "or", "float", "in", "the", "interval", "[", "0", "inf", "]" ]
def tickwidth(self): """ Sets the tick width (in px). The 'tickwidth' property is a number and may be specified as: - An int or float in the interval [0, inf] Returns ------- int|float """ return self["tickwidth"]
[ "def", "tickwidth", "(", "self", ")", ":", "return", "self", "[", "\"tickwidth\"", "]" ]
https://github.com/plotly/plotly.py/blob/cfad7862594b35965c0e000813bd7805e8494a5b/packages/python/plotly/plotly/graph_objs/indicator/gauge/_axis.py#L753-L764
EricSteinberger/Deep-CFR
2e664ebbe5bf3c8f8ab56057205cbe4c2c7baeb5
DeepCFR/IterationStrategy.py
python
IterationStrategy.get_a_probs_for_each_hand
(self, pub_obs, legal_actions_list)
return self._get_a_probs_of_hands(pub_obs=pub_obs, legal_actions_list=legal_actions_list, range_idxs_tensor=self._all_range_idxs)
Args: pub_obs (np.array(shape=(seq_len, n_features,))) legal_actions_list (list): list of ints representing legal actions
Args: pub_obs (np.array(shape=(seq_len, n_features,))) legal_actions_list (list): list of ints representing legal actions
[ "Args", ":", "pub_obs", "(", "np", ".", "array", "(", "shape", "=", "(", "seq_len", "n_features", ")))", "legal_actions_list", "(", "list", ")", ":", "list", "of", "ints", "representing", "legal", "actions" ]
def get_a_probs_for_each_hand(self, pub_obs, legal_actions_list): """ Args: pub_obs (np.array(shape=(seq_len, n_features,))) legal_actions_list (list): list of ints representing legal actions """ if self._t_prof.DEBUGGING: assert isinstance(pub_obs, np.ndarray) assert len(pub_obs.shape) == 2, "all hands have the same public obs" assert isinstance(legal_actions_list[0], int), "all hands do the same actions. no need to batch, just parse int" return self._get_a_probs_of_hands(pub_obs=pub_obs, legal_actions_list=legal_actions_list, range_idxs_tensor=self._all_range_idxs)
[ "def", "get_a_probs_for_each_hand", "(", "self", ",", "pub_obs", ",", "legal_actions_list", ")", ":", "if", "self", ".", "_t_prof", ".", "DEBUGGING", ":", "assert", "isinstance", "(", "pub_obs", ",", "np", ".", "ndarray", ")", "assert", "len", "(", "pub_obs", ".", "shape", ")", "==", "2", ",", "\"all hands have the same public obs\"", "assert", "isinstance", "(", "legal_actions_list", "[", "0", "]", ",", "int", ")", ",", "\"all hands do the same actions. no need to batch, just parse int\"", "return", "self", ".", "_get_a_probs_of_hands", "(", "pub_obs", "=", "pub_obs", ",", "legal_actions_list", "=", "legal_actions_list", ",", "range_idxs_tensor", "=", "self", ".", "_all_range_idxs", ")" ]
https://github.com/EricSteinberger/Deep-CFR/blob/2e664ebbe5bf3c8f8ab56057205cbe4c2c7baeb5/DeepCFR/IterationStrategy.py#L116-L130
kivymd/KivyMD
1cb82f7d2437770f71be7c5a4f7de4b8da61f352
kivymd/uix/bottomnavigation/bottomnavigation.py
python
MDBottomNavigation.refresh_tabs
(self, *args)
Refresh all tabs.
Refresh all tabs.
[ "Refresh", "all", "tabs", "." ]
def refresh_tabs(self, *args) -> NoReturn: """Refresh all tabs.""" if self.ids: tab_bar = self.ids.tab_bar tab_bar.clear_widgets() tab_manager = self.ids.tab_manager self._active_color = self.theme_cls.primary_color if self.text_color_active != [1, 1, 1, 1]: self._active_color = self.text_color_active for tab in tab_manager.screens: self.tab_header = MDBottomNavigationHeader(tab=tab, panel=self) tab.header = self.tab_header tab_bar.add_widget(self.tab_header) if tab is self.first_widget: self.tab_header._text_color_normal = self._active_color self.tab_header._label_font_size = sp(14) self.tab_header.active = True else: self.tab_header.ids._label.font_size = sp(12) self.tab_header._label_font_size = sp(12)
[ "def", "refresh_tabs", "(", "self", ",", "*", "args", ")", "->", "NoReturn", ":", "if", "self", ".", "ids", ":", "tab_bar", "=", "self", ".", "ids", ".", "tab_bar", "tab_bar", ".", "clear_widgets", "(", ")", "tab_manager", "=", "self", ".", "ids", ".", "tab_manager", "self", ".", "_active_color", "=", "self", ".", "theme_cls", ".", "primary_color", "if", "self", ".", "text_color_active", "!=", "[", "1", ",", "1", ",", "1", ",", "1", "]", ":", "self", ".", "_active_color", "=", "self", ".", "text_color_active", "for", "tab", "in", "tab_manager", ".", "screens", ":", "self", ".", "tab_header", "=", "MDBottomNavigationHeader", "(", "tab", "=", "tab", ",", "panel", "=", "self", ")", "tab", ".", "header", "=", "self", ".", "tab_header", "tab_bar", ".", "add_widget", "(", "self", ".", "tab_header", ")", "if", "tab", "is", "self", ".", "first_widget", ":", "self", ".", "tab_header", ".", "_text_color_normal", "=", "self", ".", "_active_color", "self", ".", "tab_header", ".", "_label_font_size", "=", "sp", "(", "14", ")", "self", ".", "tab_header", ".", "active", "=", "True", "else", ":", "self", ".", "tab_header", ".", "ids", ".", "_label", ".", "font_size", "=", "sp", "(", "12", ")", "self", ".", "tab_header", ".", "_label_font_size", "=", "sp", "(", "12", ")" ]
https://github.com/kivymd/KivyMD/blob/1cb82f7d2437770f71be7c5a4f7de4b8da61f352/kivymd/uix/bottomnavigation/bottomnavigation.py#L586-L606
bungnoid/glTools
8ff0899de43784a18bd4543285655e68e28fb5e5
utils/nDynamics.py
python
connectToNucleus
(object,nucleus)
return nucleus
Connect the specified nDynamics node to an existing nucleus node @param object: nDynamics node to connect to the nucleus solver @type object: str @param nucleus: nucleus solver to connect to @type nucleus: str
Connect the specified nDynamics node to an existing nucleus node
[ "Connect", "the", "specified", "nDynamics", "node", "to", "an", "existing", "nucleus", "node" ]
def connectToNucleus(object,nucleus): ''' Connect the specified nDynamics node to an existing nucleus node @param object: nDynamics node to connect to the nucleus solver @type object: str @param nucleus: nucleus solver to connect to @type nucleus: str ''' # Check nucleus if not isNucleus(nucleus): preNucleusList = mc.ls(type='nucleus') # Check nDynamics node if isNDynamicsNode(object): nNode = object else: nNode = getConnectedNNode(nNode) if not nNode: raise Exception('Object "'+object+'" is not a valid nDynamics node, or connected to a valid nDynamics node!') nNode = nNode[0] # Check nRigid if isNRigid(nNode): connectNRigidToNucleus(nNode,nucleus,True) # Assign nNode to nucleus solver mc.select(nNode) mm.eval('assignNSolver '+nucleus) # Rename new nucleus node if not mc.objExists(nucleus): postNucleusList = mc.ls(type='nucleus') newNucleus = list(set(postNucleusList) - set(preNucleusList)) if not newNucleus: raise Exception('Unable to determine new nucleus node attached to "'+object+'"!') nucleus = mc.rename(newNucleus[0],nucleus) # Return result mc.select(nucleus) return nucleus
[ "def", "connectToNucleus", "(", "object", ",", "nucleus", ")", ":", "# Check nucleus", "if", "not", "isNucleus", "(", "nucleus", ")", ":", "preNucleusList", "=", "mc", ".", "ls", "(", "type", "=", "'nucleus'", ")", "# Check nDynamics node", "if", "isNDynamicsNode", "(", "object", ")", ":", "nNode", "=", "object", "else", ":", "nNode", "=", "getConnectedNNode", "(", "nNode", ")", "if", "not", "nNode", ":", "raise", "Exception", "(", "'Object \"'", "+", "object", "+", "'\" is not a valid nDynamics node, or connected to a valid nDynamics node!'", ")", "nNode", "=", "nNode", "[", "0", "]", "# Check nRigid", "if", "isNRigid", "(", "nNode", ")", ":", "connectNRigidToNucleus", "(", "nNode", ",", "nucleus", ",", "True", ")", "# Assign nNode to nucleus solver", "mc", ".", "select", "(", "nNode", ")", "mm", ".", "eval", "(", "'assignNSolver '", "+", "nucleus", ")", "# Rename new nucleus node", "if", "not", "mc", ".", "objExists", "(", "nucleus", ")", ":", "postNucleusList", "=", "mc", ".", "ls", "(", "type", "=", "'nucleus'", ")", "newNucleus", "=", "list", "(", "set", "(", "postNucleusList", ")", "-", "set", "(", "preNucleusList", ")", ")", "if", "not", "newNucleus", ":", "raise", "Exception", "(", "'Unable to determine new nucleus node attached to \"'", "+", "object", "+", "'\"!'", ")", "nucleus", "=", "mc", ".", "rename", "(", "newNucleus", "[", "0", "]", ",", "nucleus", ")", "# Return result", "mc", ".", "select", "(", "nucleus", ")", "return", "nucleus" ]
https://github.com/bungnoid/glTools/blob/8ff0899de43784a18bd4543285655e68e28fb5e5/utils/nDynamics.py#L442-L478
dimagi/commcare-hq
d67ff1d3b4c51fa050c19e60c3253a79d3452a39
custom/inddex/filters.py
python
GapTypeFilter.options
(self)
return [ (ConvFactorGaps.slug, ConvFactorGaps.name), (FctGaps.slug, FctGaps.name), ]
[]
def options(self): return [ (ConvFactorGaps.slug, ConvFactorGaps.name), (FctGaps.slug, FctGaps.name), ]
[ "def", "options", "(", "self", ")", ":", "return", "[", "(", "ConvFactorGaps", ".", "slug", ",", "ConvFactorGaps", ".", "name", ")", ",", "(", "FctGaps", ".", "slug", ",", "FctGaps", ".", "name", ")", ",", "]" ]
https://github.com/dimagi/commcare-hq/blob/d67ff1d3b4c51fa050c19e60c3253a79d3452a39/custom/inddex/filters.py#L143-L147
openstack/barbican
a9d2b133c8dc3307974f119f9a2b23a4ba82e8ce
barbican/common/exception.py
python
MultipleSecretStoreLookupFailed.__init__
(self)
[]
def __init__(self): msg = u._("Plugin lookup property 'stores_lookup_suffix' is not " "defined in service configuration") super(MultipleSecretStoreLookupFailed, self).__init__(msg)
[ "def", "__init__", "(", "self", ")", ":", "msg", "=", "u", ".", "_", "(", "\"Plugin lookup property 'stores_lookup_suffix' is not \"", "\"defined in service configuration\"", ")", "super", "(", "MultipleSecretStoreLookupFailed", ",", "self", ")", ".", "__init__", "(", "msg", ")" ]
https://github.com/openstack/barbican/blob/a9d2b133c8dc3307974f119f9a2b23a4ba82e8ce/barbican/common/exception.py#L372-L375
rwth-i6/returnn
f2d718a197a280b0d5f0fd91a7fcb8658560dddb
returnn/tf/layers/rec.py
python
BaseChoiceLayer.__init__
(self, beam_size, search=NotSpecified, **kwargs)
:param int|None beam_size: the outgoing beam size. i.e. our output will be (batch * beam_size, ...) :param NotSpecified|bool search: whether to perform search, or use the ground truth (`target` option). If not specified, it will depend on `network.search_flag`.
:param int|None beam_size: the outgoing beam size. i.e. our output will be (batch * beam_size, ...) :param NotSpecified|bool search: whether to perform search, or use the ground truth (`target` option). If not specified, it will depend on `network.search_flag`.
[ ":", "param", "int|None", "beam_size", ":", "the", "outgoing", "beam", "size", ".", "i", ".", "e", ".", "our", "output", "will", "be", "(", "batch", "*", "beam_size", "...", ")", ":", "param", "NotSpecified|bool", "search", ":", "whether", "to", "perform", "search", "or", "use", "the", "ground", "truth", "(", "target", "option", ")", ".", "If", "not", "specified", "it", "will", "depend", "on", "network", ".", "search_flag", "." ]
def __init__(self, beam_size, search=NotSpecified, **kwargs): """ :param int|None beam_size: the outgoing beam size. i.e. our output will be (batch * beam_size, ...) :param NotSpecified|bool search: whether to perform search, or use the ground truth (`target` option). If not specified, it will depend on `network.search_flag`. """ super(BaseChoiceLayer, self).__init__(**kwargs)
[ "def", "__init__", "(", "self", ",", "beam_size", ",", "search", "=", "NotSpecified", ",", "*", "*", "kwargs", ")", ":", "super", "(", "BaseChoiceLayer", ",", "self", ")", ".", "__init__", "(", "*", "*", "kwargs", ")" ]
https://github.com/rwth-i6/returnn/blob/f2d718a197a280b0d5f0fd91a7fcb8658560dddb/returnn/tf/layers/rec.py#L4969-L4975
williballenthin/python-registry
11e857623469dd28ed14519a08d2db7c8228ca0c
Registry/RegistryParse.py
python
REGFBlock.is_old_transaction_log_file
(self)
return (self.file_type() == FileType.FILE_TYPE_LOG_OLD_1) or (self.file_type() == FileType.FILE_TYPE_LOG_OLD_2)
Check if this REGF block belongs to an old transaction log file (used before Windows 8.1).
Check if this REGF block belongs to an old transaction log file (used before Windows 8.1).
[ "Check", "if", "this", "REGF", "block", "belongs", "to", "an", "old", "transaction", "log", "file", "(", "used", "before", "Windows", "8", ".", "1", ")", "." ]
def is_old_transaction_log_file(self): """ Check if this REGF block belongs to an old transaction log file (used before Windows 8.1). """ return (self.file_type() == FileType.FILE_TYPE_LOG_OLD_1) or (self.file_type() == FileType.FILE_TYPE_LOG_OLD_2)
[ "def", "is_old_transaction_log_file", "(", "self", ")", ":", "return", "(", "self", ".", "file_type", "(", ")", "==", "FileType", ".", "FILE_TYPE_LOG_OLD_1", ")", "or", "(", "self", ".", "file_type", "(", ")", "==", "FileType", ".", "FILE_TYPE_LOG_OLD_2", ")" ]
https://github.com/williballenthin/python-registry/blob/11e857623469dd28ed14519a08d2db7c8228ca0c/Registry/RegistryParse.py#L396-L400
jwasham/code-catalog-python
c8645a1058b970206e688bfcb1782c18c64bcc00
catalog/suggested/lists/circular_queue.py
python
CircularQueue.dequeue
(self)
return oldhead._element
Remove and return the first element of the queue (i.e., FIFO). Raise Empty exception if the queue is empty.
Remove and return the first element of the queue (i.e., FIFO).
[ "Remove", "and", "return", "the", "first", "element", "of", "the", "queue", "(", "i", ".", "e", ".", "FIFO", ")", "." ]
def dequeue(self): """Remove and return the first element of the queue (i.e., FIFO). Raise Empty exception if the queue is empty. """ if self.is_empty(): raise Empty('Queue is empty') oldhead = self._tail._next if self._size == 1: # removing only element self._tail = None # queue becomes empty else: self._tail._next = oldhead._next # bypass the old head self._size -= 1 return oldhead._element
[ "def", "dequeue", "(", "self", ")", ":", "if", "self", ".", "is_empty", "(", ")", ":", "raise", "Empty", "(", "'Queue is empty'", ")", "oldhead", "=", "self", ".", "_tail", ".", "_next", "if", "self", ".", "_size", "==", "1", ":", "# removing only element", "self", ".", "_tail", "=", "None", "# queue becomes empty", "else", ":", "self", ".", "_tail", ".", "_next", "=", "oldhead", ".", "_next", "# bypass the old head", "self", ".", "_size", "-=", "1", "return", "oldhead", ".", "_element" ]
https://github.com/jwasham/code-catalog-python/blob/c8645a1058b970206e688bfcb1782c18c64bcc00/catalog/suggested/lists/circular_queue.py#L45-L58
linxid/Machine_Learning_Study_Path
558e82d13237114bbb8152483977806fc0c222af
Machine Learning In Action/Chapter5-LogisticRegression/venv/Lib/site-packages/pip/_vendor/requests/packages/urllib3/contrib/pyopenssl.py
python
ssl_wrap_socket
(sock, keyfile=None, certfile=None, cert_reqs=None, ca_certs=None, server_hostname=None, ssl_version=None, ca_cert_dir=None)
return WrappedSocket(cnx, sock)
[]
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None, ca_certs=None, server_hostname=None, ssl_version=None, ca_cert_dir=None): ctx = OpenSSL.SSL.Context(_openssl_versions[ssl_version]) if certfile: keyfile = keyfile or certfile # Match behaviour of the normal python ssl library ctx.use_certificate_file(certfile) if keyfile: ctx.use_privatekey_file(keyfile) if cert_reqs != ssl.CERT_NONE: ctx.set_verify(_openssl_verify[cert_reqs], _verify_callback) if ca_certs or ca_cert_dir: try: ctx.load_verify_locations(ca_certs, ca_cert_dir) except OpenSSL.SSL.Error as e: raise ssl.SSLError('bad ca_certs: %r' % ca_certs, e) else: ctx.set_default_verify_paths() # Disable TLS compression to mitigate CRIME attack (issue #309) OP_NO_COMPRESSION = 0x20000 ctx.set_options(OP_NO_COMPRESSION) # Set list of supported ciphersuites. ctx.set_cipher_list(DEFAULT_SSL_CIPHER_LIST) cnx = OpenSSL.SSL.Connection(ctx, sock) if isinstance(server_hostname, six.text_type): # Platform-specific: Python 3 server_hostname = server_hostname.encode('utf-8') cnx.set_tlsext_host_name(server_hostname) cnx.set_connect_state() while True: try: cnx.do_handshake() except OpenSSL.SSL.WantReadError: rd, _, _ = select.select([sock], [], [], sock.gettimeout()) if not rd: raise timeout('select timed out') continue except OpenSSL.SSL.Error as e: raise ssl.SSLError('bad handshake: %r' % e) break return WrappedSocket(cnx, sock)
[ "def", "ssl_wrap_socket", "(", "sock", ",", "keyfile", "=", "None", ",", "certfile", "=", "None", ",", "cert_reqs", "=", "None", ",", "ca_certs", "=", "None", ",", "server_hostname", "=", "None", ",", "ssl_version", "=", "None", ",", "ca_cert_dir", "=", "None", ")", ":", "ctx", "=", "OpenSSL", ".", "SSL", ".", "Context", "(", "_openssl_versions", "[", "ssl_version", "]", ")", "if", "certfile", ":", "keyfile", "=", "keyfile", "or", "certfile", "# Match behaviour of the normal python ssl library", "ctx", ".", "use_certificate_file", "(", "certfile", ")", "if", "keyfile", ":", "ctx", ".", "use_privatekey_file", "(", "keyfile", ")", "if", "cert_reqs", "!=", "ssl", ".", "CERT_NONE", ":", "ctx", ".", "set_verify", "(", "_openssl_verify", "[", "cert_reqs", "]", ",", "_verify_callback", ")", "if", "ca_certs", "or", "ca_cert_dir", ":", "try", ":", "ctx", ".", "load_verify_locations", "(", "ca_certs", ",", "ca_cert_dir", ")", "except", "OpenSSL", ".", "SSL", ".", "Error", "as", "e", ":", "raise", "ssl", ".", "SSLError", "(", "'bad ca_certs: %r'", "%", "ca_certs", ",", "e", ")", "else", ":", "ctx", ".", "set_default_verify_paths", "(", ")", "# Disable TLS compression to mitigate CRIME attack (issue #309)", "OP_NO_COMPRESSION", "=", "0x20000", "ctx", ".", "set_options", "(", "OP_NO_COMPRESSION", ")", "# Set list of supported ciphersuites.", "ctx", ".", "set_cipher_list", "(", "DEFAULT_SSL_CIPHER_LIST", ")", "cnx", "=", "OpenSSL", ".", "SSL", ".", "Connection", "(", "ctx", ",", "sock", ")", "if", "isinstance", "(", "server_hostname", ",", "six", ".", "text_type", ")", ":", "# Platform-specific: Python 3", "server_hostname", "=", "server_hostname", ".", "encode", "(", "'utf-8'", ")", "cnx", ".", "set_tlsext_host_name", "(", "server_hostname", ")", "cnx", ".", "set_connect_state", "(", ")", "while", "True", ":", "try", ":", "cnx", ".", "do_handshake", "(", ")", "except", "OpenSSL", ".", "SSL", ".", "WantReadError", ":", "rd", ",", "_", ",", "_", "=", "select", ".", "select", "(", "[", "sock", "]", ",", "[", "]", ",", "[", "]", ",", "sock", ".", "gettimeout", "(", ")", ")", "if", "not", "rd", ":", "raise", "timeout", "(", "'select timed out'", ")", "continue", "except", "OpenSSL", ".", "SSL", ".", "Error", "as", "e", ":", "raise", "ssl", ".", "SSLError", "(", "'bad handshake: %r'", "%", "e", ")", "break", "return", "WrappedSocket", "(", "cnx", ",", "sock", ")" ]
https://github.com/linxid/Machine_Learning_Study_Path/blob/558e82d13237114bbb8152483977806fc0c222af/Machine Learning In Action/Chapter5-LogisticRegression/venv/Lib/site-packages/pip/_vendor/requests/packages/urllib3/contrib/pyopenssl.py#L315-L358
CuriousAI/mean-teacher
546348ff863c998c26be4339021425df973b4a36
pytorch/mean_teacher/data.py
python
TwoStreamBatchSampler.__init__
(self, primary_indices, secondary_indices, batch_size, secondary_batch_size)
[]
def __init__(self, primary_indices, secondary_indices, batch_size, secondary_batch_size): self.primary_indices = primary_indices self.secondary_indices = secondary_indices self.secondary_batch_size = secondary_batch_size self.primary_batch_size = batch_size - secondary_batch_size assert len(self.primary_indices) >= self.primary_batch_size > 0 assert len(self.secondary_indices) >= self.secondary_batch_size > 0
[ "def", "__init__", "(", "self", ",", "primary_indices", ",", "secondary_indices", ",", "batch_size", ",", "secondary_batch_size", ")", ":", "self", ".", "primary_indices", "=", "primary_indices", "self", ".", "secondary_indices", "=", "secondary_indices", "self", ".", "secondary_batch_size", "=", "secondary_batch_size", "self", ".", "primary_batch_size", "=", "batch_size", "-", "secondary_batch_size", "assert", "len", "(", "self", ".", "primary_indices", ")", ">=", "self", ".", "primary_batch_size", ">", "0", "assert", "len", "(", "self", ".", "secondary_indices", ")", ">=", "self", ".", "secondary_batch_size", ">", "0" ]
https://github.com/CuriousAI/mean-teacher/blob/546348ff863c998c26be4339021425df973b4a36/pytorch/mean_teacher/data.py#L112-L119
vericast/spylon-kernel
2d0ddf2aca1b91738f938b72a500c20293e3156c
spylon_kernel/_version.py
python
git_get_keywords
(versionfile_abs)
return keywords
Extract version information from the given file.
Extract version information from the given file.
[ "Extract", "version", "information", "from", "the", "given", "file", "." ]
def git_get_keywords(versionfile_abs): """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: f = open(versionfile_abs, "r") for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) if line.strip().startswith("git_date ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["date"] = mo.group(1) f.close() except EnvironmentError: pass return keywords
[ "def", "git_get_keywords", "(", "versionfile_abs", ")", ":", "# the code embedded in _version.py can just fetch the value of these", "# keywords. When used from setup.py, we don't want to import _version.py,", "# so we do it with a regexp instead. This function is not used from", "# _version.py.", "keywords", "=", "{", "}", "try", ":", "f", "=", "open", "(", "versionfile_abs", ",", "\"r\"", ")", "for", "line", "in", "f", ".", "readlines", "(", ")", ":", "if", "line", ".", "strip", "(", ")", ".", "startswith", "(", "\"git_refnames =\"", ")", ":", "mo", "=", "re", ".", "search", "(", "r'=\\s*\"(.*)\"'", ",", "line", ")", "if", "mo", ":", "keywords", "[", "\"refnames\"", "]", "=", "mo", ".", "group", "(", "1", ")", "if", "line", ".", "strip", "(", ")", ".", "startswith", "(", "\"git_full =\"", ")", ":", "mo", "=", "re", ".", "search", "(", "r'=\\s*\"(.*)\"'", ",", "line", ")", "if", "mo", ":", "keywords", "[", "\"full\"", "]", "=", "mo", ".", "group", "(", "1", ")", "if", "line", ".", "strip", "(", ")", ".", "startswith", "(", "\"git_date =\"", ")", ":", "mo", "=", "re", ".", "search", "(", "r'=\\s*\"(.*)\"'", ",", "line", ")", "if", "mo", ":", "keywords", "[", "\"date\"", "]", "=", "mo", ".", "group", "(", "1", ")", "f", ".", "close", "(", ")", "except", "EnvironmentError", ":", "pass", "return", "keywords" ]
https://github.com/vericast/spylon-kernel/blob/2d0ddf2aca1b91738f938b72a500c20293e3156c/spylon_kernel/_version.py#L133-L158
MontrealCorpusTools/Montreal-Forced-Aligner
63473f9a4fabd31eec14e1e5022882f85cfdaf31
montreal_forced_aligner/transcription/multiprocessing.py
python
compose_g
(arpa_path: str, words_path: str, g_path: str, log_file: TextIO)
Create G.fst from an ARPA formatted language model See Also -------- :kaldi_src:`arpa2fst` Relevant Kaldi binary Parameters ---------- arpa_path: str Path to ARPA file words_path: str Path to words symbols file g_path: str Path to output G.fst file log_file: TextIO Log file handler to output logging info to
Create G.fst from an ARPA formatted language model
[ "Create", "G", ".", "fst", "from", "an", "ARPA", "formatted", "language", "model" ]
def compose_g(arpa_path: str, words_path: str, g_path: str, log_file: TextIO) -> None: """ Create G.fst from an ARPA formatted language model See Also -------- :kaldi_src:`arpa2fst` Relevant Kaldi binary Parameters ---------- arpa_path: str Path to ARPA file words_path: str Path to words symbols file g_path: str Path to output G.fst file log_file: TextIO Log file handler to output logging info to """ arpafst_proc = subprocess.Popen( [ thirdparty_binary("arpa2fst"), "--disambig-symbol=#0", f"--read-symbol-table={words_path}", arpa_path, g_path, ], stderr=log_file, stdout=log_file, ) arpafst_proc.communicate()
[ "def", "compose_g", "(", "arpa_path", ":", "str", ",", "words_path", ":", "str", ",", "g_path", ":", "str", ",", "log_file", ":", "TextIO", ")", "->", "None", ":", "arpafst_proc", "=", "subprocess", ".", "Popen", "(", "[", "thirdparty_binary", "(", "\"arpa2fst\"", ")", ",", "\"--disambig-symbol=#0\"", ",", "f\"--read-symbol-table={words_path}\"", ",", "arpa_path", ",", "g_path", ",", "]", ",", "stderr", "=", "log_file", ",", "stdout", "=", "log_file", ",", ")", "arpafst_proc", ".", "communicate", "(", ")" ]
https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner/blob/63473f9a4fabd31eec14e1e5022882f85cfdaf31/montreal_forced_aligner/transcription/multiprocessing.py#L392-L423
jwkvam/bowtie
220cd41367a70f2e206db846278cb7b6fd3649eb
bowtie/control.py
python
DatePicker.get
(self, data)
return data
Get the currently selected date. Returns ------- str Date in the format "YYYY-MM-DD"
Get the currently selected date.
[ "Get", "the", "currently", "selected", "date", "." ]
def get(self, data): """ Get the currently selected date. Returns ------- str Date in the format "YYYY-MM-DD" """ return data
[ "def", "get", "(", "self", ",", "data", ")", ":", "return", "data" ]
https://github.com/jwkvam/bowtie/blob/220cd41367a70f2e206db846278cb7b6fd3649eb/bowtie/control.py#L273-L283
wxWidgets/Phoenix
b2199e299a6ca6d866aa6f3d0888499136ead9d6
wx/py/frame.py
python
Frame.OnHelp
(self, event)
Display a Help window.
Display a Help window.
[ "Display", "a", "Help", "window", "." ]
def OnHelp(self, event): """Display a Help window.""" title = 'Help' text = "Type 'shell.help()' in the shell window." dialog = wx.MessageDialog(self, text, title, wx.OK | wx.ICON_INFORMATION) dialog.ShowModal() dialog.Destroy()
[ "def", "OnHelp", "(", "self", ",", "event", ")", ":", "title", "=", "'Help'", "text", "=", "\"Type 'shell.help()' in the shell window.\"", "dialog", "=", "wx", ".", "MessageDialog", "(", "self", ",", "text", ",", "title", ",", "wx", ".", "OK", "|", "wx", ".", "ICON_INFORMATION", ")", "dialog", ".", "ShowModal", "(", ")", "dialog", ".", "Destroy", "(", ")" ]
https://github.com/wxWidgets/Phoenix/blob/b2199e299a6ca6d866aa6f3d0888499136ead9d6/wx/py/frame.py#L447-L454
quic/aimet
dae9bae9a77ca719aa7553fefde4768270fc3518
TrainingExtensions/torch/src/python/aimet_torch/qc_quantize_recurrent.py
python
QcQuantizeRecurrent.grouped_quantizers
(self)
return self._grouped_quantizers
Return dictionary of grouped quantizer
Return dictionary of grouped quantizer
[ "Return", "dictionary", "of", "grouped", "quantizer" ]
def grouped_quantizers(self): """ Return dictionary of grouped quantizer """ return self._grouped_quantizers
[ "def", "grouped_quantizers", "(", "self", ")", ":", "return", "self", ".", "_grouped_quantizers" ]
https://github.com/quic/aimet/blob/dae9bae9a77ca719aa7553fefde4768270fc3518/TrainingExtensions/torch/src/python/aimet_torch/qc_quantize_recurrent.py#L198-L200
aws/aws-parallelcluster
f1fe5679a01c524e7ea904c329bd6d17318c6cd9
cli/src/pcluster/templates/cw_dashboard_builder.py
python
CWDashboardConstruct._generate_graph_widget
(self, title, metric_list)
return widget
Generate a graph widget and update the coordinates.
Generate a graph widget and update the coordinates.
[ "Generate", "a", "graph", "widget", "and", "update", "the", "coordinates", "." ]
def _generate_graph_widget(self, title, metric_list): """Generate a graph widget and update the coordinates.""" widget = cloudwatch.GraphWidget( title=title, left=metric_list, region=self._stack_region, width=self.graph_width, height=self.graph_height, ) widget.position(x=self.coord.x_value, y=self.coord.y_value) self._update_coord(self.graph_width, self.graph_height) return widget
[ "def", "_generate_graph_widget", "(", "self", ",", "title", ",", "metric_list", ")", ":", "widget", "=", "cloudwatch", ".", "GraphWidget", "(", "title", "=", "title", ",", "left", "=", "metric_list", ",", "region", "=", "self", ".", "_stack_region", ",", "width", "=", "self", ".", "graph_width", ",", "height", "=", "self", ".", "graph_height", ",", ")", "widget", ".", "position", "(", "x", "=", "self", ".", "coord", ".", "x_value", ",", "y", "=", "self", ".", "coord", ".", "y_value", ")", "self", ".", "_update_coord", "(", "self", ".", "graph_width", ",", "self", ".", "graph_height", ")", "return", "widget" ]
https://github.com/aws/aws-parallelcluster/blob/f1fe5679a01c524e7ea904c329bd6d17318c6cd9/cli/src/pcluster/templates/cw_dashboard_builder.py#L170-L181
svinota/pyroute2
d320acd67067206b4217bb862afdae23bcb55266
pyroute2.core/pr2modules/netlink/generic/l2tp.py
python
L2tp.delete_tunnel
(self, tunnel_id)
return self._do_request(msg)
Delete a tunnel :param tunnel_id: tunnel id of the tunnel to be deleted :return: netlink response
Delete a tunnel :param tunnel_id: tunnel id of the tunnel to be deleted :return: netlink response
[ "Delete", "a", "tunnel", ":", "param", "tunnel_id", ":", "tunnel", "id", "of", "the", "tunnel", "to", "be", "deleted", ":", "return", ":", "netlink", "response" ]
def delete_tunnel(self, tunnel_id): """ Delete a tunnel :param tunnel_id: tunnel id of the tunnel to be deleted :return: netlink response """ msg = l2tpmsg() msg["cmd"] = L2TP_CMD_TUNNEL_DELETE msg["version"] = L2TP_GENL_VERSION msg["attrs"].append(["L2TP_ATTR_CONN_ID", tunnel_id]) return self._do_request(msg)
[ "def", "delete_tunnel", "(", "self", ",", "tunnel_id", ")", ":", "msg", "=", "l2tpmsg", "(", ")", "msg", "[", "\"cmd\"", "]", "=", "L2TP_CMD_TUNNEL_DELETE", "msg", "[", "\"version\"", "]", "=", "L2TP_GENL_VERSION", "msg", "[", "\"attrs\"", "]", ".", "append", "(", "[", "\"L2TP_ATTR_CONN_ID\"", ",", "tunnel_id", "]", ")", "return", "self", ".", "_do_request", "(", "msg", ")" ]
https://github.com/svinota/pyroute2/blob/d320acd67067206b4217bb862afdae23bcb55266/pyroute2.core/pr2modules/netlink/generic/l2tp.py#L336-L347
yianjiajia/django_web_ansible
1103343082a65abf9d37310f5048514d74930753
devops/apps/ansible/elfinder/volumes/storage.py
python
ElfinderVolumeStorage._rmdir
(self, path)
Remove a directory. This implementation calls the :ref:`setting-rmDir` callable driver option, if it is available. If not, it raises an ``os.error``.
Remove a directory. This implementation calls the :ref:`setting-rmDir` callable driver option, if it is available. If not, it raises an ``os.error``.
[ "Remove", "a", "directory", ".", "This", "implementation", "calls", "the", ":", "ref", ":", "setting", "-", "rmDir", "callable", "driver", "option", "if", "it", "is", "available", ".", "If", "not", "it", "raises", "an", "os", ".", "error", "." ]
def _rmdir(self, path): """ Remove a directory. This implementation calls the :ref:`setting-rmDir` callable driver option, if it is available. If not, it raises an ``os.error``. """ if 'rmDir' in self._options and callable(self._options['rmDir']): return self._options['rmDir'](path, self._options['storage']) raise os.error
[ "def", "_rmdir", "(", "self", ",", "path", ")", ":", "if", "'rmDir'", "in", "self", ".", "_options", "and", "callable", "(", "self", ".", "_options", "[", "'rmDir'", "]", ")", ":", "return", "self", ".", "_options", "[", "'rmDir'", "]", "(", "path", ",", "self", ".", "_options", "[", "'storage'", "]", ")", "raise", "os", ".", "error" ]
https://github.com/yianjiajia/django_web_ansible/blob/1103343082a65abf9d37310f5048514d74930753/devops/apps/ansible/elfinder/volumes/storage.py#L398-L406
google/diff-match-patch
62f2e689f498f9c92dbc588c58750addec9b1654
python3/diff_match_patch.py
python
diff_match_patch.diff_xIndex
(self, diffs, loc)
return last_chars2 + (loc - last_chars1)
loc is a location in text1, compute and return the equivalent location in text2. e.g. "The cat" vs "The big cat", 1->1, 5->8 Args: diffs: Array of diff tuples. loc: Location within text1. Returns: Location within text2.
loc is a location in text1, compute and return the equivalent location in text2. e.g. "The cat" vs "The big cat", 1->1, 5->8
[ "loc", "is", "a", "location", "in", "text1", "compute", "and", "return", "the", "equivalent", "location", "in", "text2", ".", "e", ".", "g", ".", "The", "cat", "vs", "The", "big", "cat", "1", "-", ">", "1", "5", "-", ">", "8" ]
def diff_xIndex(self, diffs, loc): """loc is a location in text1, compute and return the equivalent location in text2. e.g. "The cat" vs "The big cat", 1->1, 5->8 Args: diffs: Array of diff tuples. loc: Location within text1. Returns: Location within text2. """ chars1 = 0 chars2 = 0 last_chars1 = 0 last_chars2 = 0 for x in range(len(diffs)): (op, text) = diffs[x] if op != self.DIFF_INSERT: # Equality or deletion. chars1 += len(text) if op != self.DIFF_DELETE: # Equality or insertion. chars2 += len(text) if chars1 > loc: # Overshot the location. break last_chars1 = chars1 last_chars2 = chars2 if len(diffs) != x and diffs[x][0] == self.DIFF_DELETE: # The location was deleted. return last_chars2 # Add the remaining len(character). return last_chars2 + (loc - last_chars1)
[ "def", "diff_xIndex", "(", "self", ",", "diffs", ",", "loc", ")", ":", "chars1", "=", "0", "chars2", "=", "0", "last_chars1", "=", "0", "last_chars2", "=", "0", "for", "x", "in", "range", "(", "len", "(", "diffs", ")", ")", ":", "(", "op", ",", "text", ")", "=", "diffs", "[", "x", "]", "if", "op", "!=", "self", ".", "DIFF_INSERT", ":", "# Equality or deletion.", "chars1", "+=", "len", "(", "text", ")", "if", "op", "!=", "self", ".", "DIFF_DELETE", ":", "# Equality or insertion.", "chars2", "+=", "len", "(", "text", ")", "if", "chars1", ">", "loc", ":", "# Overshot the location.", "break", "last_chars1", "=", "chars1", "last_chars2", "=", "chars2", "if", "len", "(", "diffs", ")", "!=", "x", "and", "diffs", "[", "x", "]", "[", "0", "]", "==", "self", ".", "DIFF_DELETE", ":", "# The location was deleted.", "return", "last_chars2", "# Add the remaining len(character).", "return", "last_chars2", "+", "(", "loc", "-", "last_chars1", ")" ]
https://github.com/google/diff-match-patch/blob/62f2e689f498f9c92dbc588c58750addec9b1654/python3/diff_match_patch.py#L1027-L1057
as-ideas/TransformerTTS
363805548abdd93b33508da2c027ae514bfc1a07
utils/alignments.py
python
get_durations_from_alignment
(batch_alignments, mels, phonemes, weighted=False)
return durations, final_alignment, jumpiness, peakiness, diag_measure
:param batch_alignments: attention weights from autoregressive model. :param mels: mel spectrograms. :param phonemes: phoneme sequence. :param weighted: if True use weighted average of durations of heads, best head if False. :param binary: if True take maximum attention peak, sum if False. :param fill_gaps: if True fills zeros durations with ones. :param fix_jumps: if True, tries to scan alingments for attention jumps and interpolate. :param fill_mode: used only if fill_gaps is True. Is either 'max' or 'next'. Defines where to take the duration needed to fill the gap. Next takes it from the next non-zeros duration value, max from the sequence maximum. :return:
[]
def get_durations_from_alignment(batch_alignments, mels, phonemes, weighted=False): """ :param batch_alignments: attention weights from autoregressive model. :param mels: mel spectrograms. :param phonemes: phoneme sequence. :param weighted: if True use weighted average of durations of heads, best head if False. :param binary: if True take maximum attention peak, sum if False. :param fill_gaps: if True fills zeros durations with ones. :param fix_jumps: if True, tries to scan alingments for attention jumps and interpolate. :param fill_mode: used only if fill_gaps is True. Is either 'max' or 'next'. Defines where to take the duration needed to fill the gap. Next takes it from the next non-zeros duration value, max from the sequence maximum. :return: """ # mel_len - 1 because we remove last timestep, which is end_vector. start vector is not predicted (or removed from GTA) mel_len = mel_lengths(mels, padding_value=0.) - 1 # [N] # phonemes contain start and end tokens (start will be removed later) phon_len = phoneme_lengths(phonemes) - 1 jumpiness, peakiness, diag_measure = attention_score(att=batch_alignments, mel_len=mel_len, phon_len=phon_len, r=1) attn_scores = diag_measure + jumpiness + peakiness durations = [] final_alignment = [] for batch_num, al in enumerate(batch_alignments): unpad_mel_len = mel_len[batch_num] unpad_phon_len = phon_len[batch_num] unpad_alignments = al[:, 1:unpad_mel_len, 1:unpad_phon_len] # first dim is heads scored_attention = unpad_alignments * attn_scores[batch_num][:, None, None] if weighted: ref_attention_weights = np.sum(scored_attention, axis=0) else: best_head = np.argmax(attn_scores[batch_num]) ref_attention_weights = unpad_alignments[best_head] integer_durations = extract_durations_with_dijkstra(ref_attention_weights) assert np.sum(integer_durations) == mel_len[batch_num]-1, f'{np.sum(integer_durations)} vs {mel_len[batch_num]-1}' new_alignment = duration_to_alignment_matrix(integer_durations.astype(int)) best_head = np.argmax(attn_scores[batch_num]) best_attention = unpad_alignments[best_head] final_alignment.append(best_attention.T + new_alignment) durations.append(integer_durations) return durations, final_alignment, jumpiness, peakiness, diag_measure
[ "def", "get_durations_from_alignment", "(", "batch_alignments", ",", "mels", ",", "phonemes", ",", "weighted", "=", "False", ")", ":", "# mel_len - 1 because we remove last timestep, which is end_vector. start vector is not predicted (or removed from GTA)", "mel_len", "=", "mel_lengths", "(", "mels", ",", "padding_value", "=", "0.", ")", "-", "1", "# [N]", "# phonemes contain start and end tokens (start will be removed later)", "phon_len", "=", "phoneme_lengths", "(", "phonemes", ")", "-", "1", "jumpiness", ",", "peakiness", ",", "diag_measure", "=", "attention_score", "(", "att", "=", "batch_alignments", ",", "mel_len", "=", "mel_len", ",", "phon_len", "=", "phon_len", ",", "r", "=", "1", ")", "attn_scores", "=", "diag_measure", "+", "jumpiness", "+", "peakiness", "durations", "=", "[", "]", "final_alignment", "=", "[", "]", "for", "batch_num", ",", "al", "in", "enumerate", "(", "batch_alignments", ")", ":", "unpad_mel_len", "=", "mel_len", "[", "batch_num", "]", "unpad_phon_len", "=", "phon_len", "[", "batch_num", "]", "unpad_alignments", "=", "al", "[", ":", ",", "1", ":", "unpad_mel_len", ",", "1", ":", "unpad_phon_len", "]", "# first dim is heads", "scored_attention", "=", "unpad_alignments", "*", "attn_scores", "[", "batch_num", "]", "[", ":", ",", "None", ",", "None", "]", "if", "weighted", ":", "ref_attention_weights", "=", "np", ".", "sum", "(", "scored_attention", ",", "axis", "=", "0", ")", "else", ":", "best_head", "=", "np", ".", "argmax", "(", "attn_scores", "[", "batch_num", "]", ")", "ref_attention_weights", "=", "unpad_alignments", "[", "best_head", "]", "integer_durations", "=", "extract_durations_with_dijkstra", "(", "ref_attention_weights", ")", "assert", "np", ".", "sum", "(", "integer_durations", ")", "==", "mel_len", "[", "batch_num", "]", "-", "1", ",", "f'{np.sum(integer_durations)} vs {mel_len[batch_num]-1}'", "new_alignment", "=", "duration_to_alignment_matrix", "(", "integer_durations", ".", "astype", "(", "int", ")", ")", "best_head", "=", "np", ".", "argmax", "(", "attn_scores", "[", "batch_num", "]", ")", "best_attention", "=", "unpad_alignments", "[", "best_head", "]", "final_alignment", ".", "append", "(", "best_attention", ".", "T", "+", "new_alignment", ")", "durations", ".", "append", "(", "integer_durations", ")", "return", "durations", ",", "final_alignment", ",", "jumpiness", ",", "peakiness", ",", "diag_measure" ]
https://github.com/as-ideas/TransformerTTS/blob/363805548abdd93b33508da2c027ae514bfc1a07/utils/alignments.py#L102-L143
eirannejad/pyRevit
49c0b7eb54eb343458ce1365425e6552d0c47d44
site-packages/sqlalchemy/dialects/oracle/cx_oracle.py
python
OracleDialect_cx_oracle._detect_decimal_char
(self, connection)
detect if the decimal separator character is not '.', as is the case with European locale settings for NLS_LANG. cx_oracle itself uses similar logic when it formats Python Decimal objects to strings on the bind side (as of 5.0.3), as Oracle sends/receives string numerics only in the current locale.
detect if the decimal separator character is not '.', as is the case with European locale settings for NLS_LANG.
[ "detect", "if", "the", "decimal", "separator", "character", "is", "not", ".", "as", "is", "the", "case", "with", "European", "locale", "settings", "for", "NLS_LANG", "." ]
def _detect_decimal_char(self, connection): """detect if the decimal separator character is not '.', as is the case with European locale settings for NLS_LANG. cx_oracle itself uses similar logic when it formats Python Decimal objects to strings on the bind side (as of 5.0.3), as Oracle sends/receives string numerics only in the current locale. """ if self.cx_oracle_ver < (5,): # no output type handlers before version 5 return cx_Oracle = self.dbapi conn = connection.connection # override the output_type_handler that's # on the cx_oracle connection with a plain # one on the cursor def output_type_handler(cursor, name, defaultType, size, precision, scale): return cursor.var( cx_Oracle.STRING, 255, arraysize=cursor.arraysize) cursor = conn.cursor() cursor.outputtypehandler = output_type_handler cursor.execute("SELECT 0.1 FROM DUAL") val = cursor.fetchone()[0] cursor.close() char = re.match(r"([\.,])", val).group(1) if char != '.': _detect_decimal = self._detect_decimal self._detect_decimal = \ lambda value: _detect_decimal(value.replace(char, '.')) self._to_decimal = \ lambda value: decimal.Decimal(value.replace(char, '.'))
[ "def", "_detect_decimal_char", "(", "self", ",", "connection", ")", ":", "if", "self", ".", "cx_oracle_ver", "<", "(", "5", ",", ")", ":", "# no output type handlers before version 5", "return", "cx_Oracle", "=", "self", ".", "dbapi", "conn", "=", "connection", ".", "connection", "# override the output_type_handler that's", "# on the cx_oracle connection with a plain", "# one on the cursor", "def", "output_type_handler", "(", "cursor", ",", "name", ",", "defaultType", ",", "size", ",", "precision", ",", "scale", ")", ":", "return", "cursor", ".", "var", "(", "cx_Oracle", ".", "STRING", ",", "255", ",", "arraysize", "=", "cursor", ".", "arraysize", ")", "cursor", "=", "conn", ".", "cursor", "(", ")", "cursor", ".", "outputtypehandler", "=", "output_type_handler", "cursor", ".", "execute", "(", "\"SELECT 0.1 FROM DUAL\"", ")", "val", "=", "cursor", ".", "fetchone", "(", ")", "[", "0", "]", "cursor", ".", "close", "(", ")", "char", "=", "re", ".", "match", "(", "r\"([\\.,])\"", ",", "val", ")", ".", "group", "(", "1", ")", "if", "char", "!=", "'.'", ":", "_detect_decimal", "=", "self", ".", "_detect_decimal", "self", ".", "_detect_decimal", "=", "lambda", "value", ":", "_detect_decimal", "(", "value", ".", "replace", "(", "char", ",", "'.'", ")", ")", "self", ".", "_to_decimal", "=", "lambda", "value", ":", "decimal", ".", "Decimal", "(", "value", ".", "replace", "(", "char", ",", "'.'", ")", ")" ]
https://github.com/eirannejad/pyRevit/blob/49c0b7eb54eb343458ce1365425e6552d0c47d44/site-packages/sqlalchemy/dialects/oracle/cx_oracle.py#L802-L840
obspy/obspy
0ee5a0d2db293c8d5d4c3b1f148a6c5a85fea55f
obspy/clients/fdsn/mass_downloader/download_helpers.py
python
Station.remove_files
(self, logger, reason)
Delete all files under it. Only delete stuff that actually has been downloaded!
Delete all files under it. Only delete stuff that actually has been downloaded!
[ "Delete", "all", "files", "under", "it", ".", "Only", "delete", "stuff", "that", "actually", "has", "been", "downloaded!" ]
def remove_files(self, logger, reason): """ Delete all files under it. Only delete stuff that actually has been downloaded! """ for chan in self.channels: for ti in chan.intervals: if ti.status != STATUS.DOWNLOADED or not ti.filename: continue if os.path.exists(ti.filename): logger.info("Deleting MiniSEED file '%s'. Reason: %s" % ( ti.filename, reason)) utils.safe_delete(ti.filename) if self.stationxml_status == STATUS.DOWNLOADED and \ self.stationxml_filename and \ os.path.exists(self.stationxml_filename): logger.info("Deleting StationXMl file '%s'. Reason: %s" % (self.stationxml_filename, reason)) utils.safe_delete(self.stationxml_filename)
[ "def", "remove_files", "(", "self", ",", "logger", ",", "reason", ")", ":", "for", "chan", "in", "self", ".", "channels", ":", "for", "ti", "in", "chan", ".", "intervals", ":", "if", "ti", ".", "status", "!=", "STATUS", ".", "DOWNLOADED", "or", "not", "ti", ".", "filename", ":", "continue", "if", "os", ".", "path", ".", "exists", "(", "ti", ".", "filename", ")", ":", "logger", ".", "info", "(", "\"Deleting MiniSEED file '%s'. Reason: %s\"", "%", "(", "ti", ".", "filename", ",", "reason", ")", ")", "utils", ".", "safe_delete", "(", "ti", ".", "filename", ")", "if", "self", ".", "stationxml_status", "==", "STATUS", ".", "DOWNLOADED", "and", "self", ".", "stationxml_filename", "and", "os", ".", "path", ".", "exists", "(", "self", ".", "stationxml_filename", ")", ":", "logger", ".", "info", "(", "\"Deleting StationXMl file '%s'. Reason: %s\"", "%", "(", "self", ".", "stationxml_filename", ",", "reason", ")", ")", "utils", ".", "safe_delete", "(", "self", ".", "stationxml_filename", ")" ]
https://github.com/obspy/obspy/blob/0ee5a0d2db293c8d5d4c3b1f148a6c5a85fea55f/obspy/clients/fdsn/mass_downloader/download_helpers.py#L126-L145
MeanEYE/Sunflower
1024bbdde3b8e202ddad3553b321a7b6230bffc9
sunflower/gui/input_dialog.py
python
CreateDialog._entry_activate
(self, widget, data=None)
Handle octal mode change
Handle octal mode change
[ "Handle", "octal", "mode", "change" ]
def _entry_activate(self, widget, data=None): """Handle octal mode change""" self._mode = int(widget.get_text(), 8) self.update_mode()
[ "def", "_entry_activate", "(", "self", ",", "widget", ",", "data", "=", "None", ")", ":", "self", ".", "_mode", "=", "int", "(", "widget", ".", "get_text", "(", ")", ",", "8", ")", "self", ".", "update_mode", "(", ")" ]
https://github.com/MeanEYE/Sunflower/blob/1024bbdde3b8e202ddad3553b321a7b6230bffc9/sunflower/gui/input_dialog.py#L303-L306
svip-lab/impersonator
b041dd415157c1e7f5b46e579a1ad4dffabb2e66
thirdparty/his_evaluators/his_evaluators/evaluators/appearance_transfer.py
python
IPERAppearanceTransferEvaluator.__init__
(self, data_dir, dataset="iPER_Appearance_Transfer")
Args: data_dir (str): the data directory dataset (str): the dataset name, it can be --iPER_Appearance_Transfer: the iPER dataset;
[]
def __init__(self, data_dir, dataset="iPER_Appearance_Transfer"): """ Args: data_dir (str): the data directory dataset (str): the dataset name, it can be --iPER_Appearance_Transfer: the iPER dataset; """ super().__init__(dataset=dataset, data_dir=data_dir)
[ "def", "__init__", "(", "self", ",", "data_dir", ",", "dataset", "=", "\"iPER_Appearance_Transfer\"", ")", ":", "super", "(", ")", ".", "__init__", "(", "dataset", "=", "dataset", ",", "data_dir", "=", "data_dir", ")" ]
https://github.com/svip-lab/impersonator/blob/b041dd415157c1e7f5b46e579a1ad4dffabb2e66/thirdparty/his_evaluators/his_evaluators/evaluators/appearance_transfer.py#L205-L213
ym2011/POC-EXP
206b22d3a6b2a172359678df33bbc5b2ad04b6c3
K8/Web-Exp/sqlmap/thirdparty/oset/_abc.py
python
ABCMeta._dump_registry
(cls, file=None)
Debug helper to print the ABC registry.
Debug helper to print the ABC registry.
[ "Debug", "helper", "to", "print", "the", "ABC", "registry", "." ]
def _dump_registry(cls, file=None): """Debug helper to print the ABC registry.""" print >> file, "Class: %s.%s" % (cls.__module__, cls.__name__) print >> file, "Inv.counter: %s" % ABCMeta._abc_invalidation_counter for name in sorted(cls.__dict__.keys()): if name.startswith("_abc_"): value = getattr(cls, name) print >> file, "%s: %r" % (name, value)
[ "def", "_dump_registry", "(", "cls", ",", "file", "=", "None", ")", ":", "print", ">>", "file", ",", "\"Class: %s.%s\"", "%", "(", "cls", ".", "__module__", ",", "cls", ".", "__name__", ")", "print", ">>", "file", ",", "\"Inv.counter: %s\"", "%", "ABCMeta", ".", "_abc_invalidation_counter", "for", "name", "in", "sorted", "(", "cls", ".", "__dict__", ".", "keys", "(", ")", ")", ":", "if", "name", ".", "startswith", "(", "\"_abc_\"", ")", ":", "value", "=", "getattr", "(", "cls", ",", "name", ")", "print", ">>", "file", ",", "\"%s: %r\"", "%", "(", "name", ",", "value", ")" ]
https://github.com/ym2011/POC-EXP/blob/206b22d3a6b2a172359678df33bbc5b2ad04b6c3/K8/Web-Exp/sqlmap/thirdparty/oset/_abc.py#L97-L104
mars-project/mars
6afd7ed86db77f29cc9470485698ef192ecc6d33
mars/tensor/statistics/ptp.py
python
ptp
(a, axis=None, out=None, keepdims=None)
return t
Range of values (maximum - minimum) along an axis. The name of the function comes from the acronym for 'peak to peak'. Parameters ---------- a : array_like Input values. axis : int, optional Axis along which to find the peaks. By default, flatten the array. out : array_like Alternative output tensor in which to place the result. It must have the same shape and buffer length as the expected output, but the type of the output values will be cast if necessary. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the input array. If the default value is passed, then `keepdims` will not be passed through to the `ptp` method of sub-classes of `Tensor`, however any non-default value will be. If the sub-class' method does not implement `keepdims` any exceptions will be raised. Returns ------- ptp : Tensor A new tensor holding the result, unless `out` was specified, in which case a reference to `out` is returned. Examples -------- >>> import mars.tensor as mt >>> x = mt.arange(4).reshape((2,2)) >>> x.execute() array([[0, 1], [2, 3]]) >>> mt.ptp(x, axis=0).execute() array([2, 2]) >>> mt.ptp(x, axis=1).execute() array([1, 1])
Range of values (maximum - minimum) along an axis.
[ "Range", "of", "values", "(", "maximum", "-", "minimum", ")", "along", "an", "axis", "." ]
def ptp(a, axis=None, out=None, keepdims=None): """ Range of values (maximum - minimum) along an axis. The name of the function comes from the acronym for 'peak to peak'. Parameters ---------- a : array_like Input values. axis : int, optional Axis along which to find the peaks. By default, flatten the array. out : array_like Alternative output tensor in which to place the result. It must have the same shape and buffer length as the expected output, but the type of the output values will be cast if necessary. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the input array. If the default value is passed, then `keepdims` will not be passed through to the `ptp` method of sub-classes of `Tensor`, however any non-default value will be. If the sub-class' method does not implement `keepdims` any exceptions will be raised. Returns ------- ptp : Tensor A new tensor holding the result, unless `out` was specified, in which case a reference to `out` is returned. Examples -------- >>> import mars.tensor as mt >>> x = mt.arange(4).reshape((2,2)) >>> x.execute() array([[0, 1], [2, 3]]) >>> mt.ptp(x, axis=0).execute() array([2, 2]) >>> mt.ptp(x, axis=1).execute() array([1, 1]) """ a = astensor(a) if axis is None: a = ravel(a) else: validate_axis(a.ndim, axis) t = a.max(axis=axis, keepdims=keepdims) - a.min(axis=axis, keepdims=keepdims) if out is not None: if not isinstance(out, Tensor): raise TypeError(f"out should be Tensor object, got {type(out)} instead") check_out_param(out, t, "same_kind") out.data = t.data return out return t
[ "def", "ptp", "(", "a", ",", "axis", "=", "None", ",", "out", "=", "None", ",", "keepdims", "=", "None", ")", ":", "a", "=", "astensor", "(", "a", ")", "if", "axis", "is", "None", ":", "a", "=", "ravel", "(", "a", ")", "else", ":", "validate_axis", "(", "a", ".", "ndim", ",", "axis", ")", "t", "=", "a", ".", "max", "(", "axis", "=", "axis", ",", "keepdims", "=", "keepdims", ")", "-", "a", ".", "min", "(", "axis", "=", "axis", ",", "keepdims", "=", "keepdims", ")", "if", "out", "is", "not", "None", ":", "if", "not", "isinstance", "(", "out", ",", "Tensor", ")", ":", "raise", "TypeError", "(", "f\"out should be Tensor object, got {type(out)} instead\"", ")", "check_out_param", "(", "out", ",", "t", ",", "\"same_kind\"", ")", "out", ".", "data", "=", "t", ".", "data", "return", "out", "return", "t" ]
https://github.com/mars-project/mars/blob/6afd7ed86db77f29cc9470485698ef192ecc6d33/mars/tensor/statistics/ptp.py#L23-L90
rowliny/DiffHelper
ab3a96f58f9579d0023aed9ebd785f4edf26f8af
Tool/SitePackages/lxml/html/diff.py
python
copy_annotations
(src, dest)
Copy annotations from the tokens listed in src to the tokens in dest
Copy annotations from the tokens listed in src to the tokens in dest
[ "Copy", "annotations", "from", "the", "tokens", "listed", "in", "src", "to", "the", "tokens", "in", "dest" ]
def copy_annotations(src, dest): """ Copy annotations from the tokens listed in src to the tokens in dest """ assert len(src) == len(dest) for src_tok, dest_tok in zip(src, dest): dest_tok.annotation = src_tok.annotation
[ "def", "copy_annotations", "(", "src", ",", "dest", ")", ":", "assert", "len", "(", "src", ")", "==", "len", "(", "dest", ")", "for", "src_tok", ",", "dest_tok", "in", "zip", "(", "src", ",", "dest", ")", ":", "dest_tok", ".", "annotation", "=", "src_tok", ".", "annotation" ]
https://github.com/rowliny/DiffHelper/blob/ab3a96f58f9579d0023aed9ebd785f4edf26f8af/Tool/SitePackages/lxml/html/diff.py#L96-L102
MDudek-ICS/TRISIS-TRITON-HATMAN
15a00af7fd1040f0430729d024427601f84886a1
decompiled_code/library/os2emxpath.py
python
dirname
(p)
return split(p)[0]
Returns the directory component of a pathname
Returns the directory component of a pathname
[ "Returns", "the", "directory", "component", "of", "a", "pathname" ]
def dirname(p): """Returns the directory component of a pathname""" return split(p)[0]
[ "def", "dirname", "(", "p", ")", ":", "return", "split", "(", "p", ")", "[", "0", "]" ]
https://github.com/MDudek-ICS/TRISIS-TRITON-HATMAN/blob/15a00af7fd1040f0430729d024427601f84886a1/decompiled_code/library/os2emxpath.py#L82-L84
aimagelab/meshed-memory-transformer
e0fe3fae68091970407e82e5b907cbc423f25df2
data/dataset.py
python
COCO.get_samples
(cls, roots, ids_dataset=None)
return train_samples, val_samples, test_samples
[]
def get_samples(cls, roots, ids_dataset=None): train_samples = [] val_samples = [] test_samples = [] for split in ['train', 'val', 'test']: if isinstance(roots[split]['cap'], tuple): coco_dataset = (pyCOCO(roots[split]['cap'][0]), pyCOCO(roots[split]['cap'][1])) root = roots[split]['img'] else: coco_dataset = (pyCOCO(roots[split]['cap']),) root = (roots[split]['img'],) if ids_dataset is None: ids = list(coco_dataset.anns.keys()) else: ids = ids_dataset[split] if isinstance(ids, tuple): bp = len(ids[0]) ids = list(ids[0]) + list(ids[1]) else: bp = len(ids) for index in range(len(ids)): if index < bp: coco = coco_dataset[0] img_root = root[0] else: coco = coco_dataset[1] img_root = root[1] ann_id = ids[index] caption = coco.anns[ann_id]['caption'] img_id = coco.anns[ann_id]['image_id'] filename = coco.loadImgs(img_id)[0]['file_name'] example = Example.fromdict({'image': os.path.join(img_root, filename), 'text': caption}) if split == 'train': train_samples.append(example) elif split == 'val': val_samples.append(example) elif split == 'test': test_samples.append(example) return train_samples, val_samples, test_samples
[ "def", "get_samples", "(", "cls", ",", "roots", ",", "ids_dataset", "=", "None", ")", ":", "train_samples", "=", "[", "]", "val_samples", "=", "[", "]", "test_samples", "=", "[", "]", "for", "split", "in", "[", "'train'", ",", "'val'", ",", "'test'", "]", ":", "if", "isinstance", "(", "roots", "[", "split", "]", "[", "'cap'", "]", ",", "tuple", ")", ":", "coco_dataset", "=", "(", "pyCOCO", "(", "roots", "[", "split", "]", "[", "'cap'", "]", "[", "0", "]", ")", ",", "pyCOCO", "(", "roots", "[", "split", "]", "[", "'cap'", "]", "[", "1", "]", ")", ")", "root", "=", "roots", "[", "split", "]", "[", "'img'", "]", "else", ":", "coco_dataset", "=", "(", "pyCOCO", "(", "roots", "[", "split", "]", "[", "'cap'", "]", ")", ",", ")", "root", "=", "(", "roots", "[", "split", "]", "[", "'img'", "]", ",", ")", "if", "ids_dataset", "is", "None", ":", "ids", "=", "list", "(", "coco_dataset", ".", "anns", ".", "keys", "(", ")", ")", "else", ":", "ids", "=", "ids_dataset", "[", "split", "]", "if", "isinstance", "(", "ids", ",", "tuple", ")", ":", "bp", "=", "len", "(", "ids", "[", "0", "]", ")", "ids", "=", "list", "(", "ids", "[", "0", "]", ")", "+", "list", "(", "ids", "[", "1", "]", ")", "else", ":", "bp", "=", "len", "(", "ids", ")", "for", "index", "in", "range", "(", "len", "(", "ids", ")", ")", ":", "if", "index", "<", "bp", ":", "coco", "=", "coco_dataset", "[", "0", "]", "img_root", "=", "root", "[", "0", "]", "else", ":", "coco", "=", "coco_dataset", "[", "1", "]", "img_root", "=", "root", "[", "1", "]", "ann_id", "=", "ids", "[", "index", "]", "caption", "=", "coco", ".", "anns", "[", "ann_id", "]", "[", "'caption'", "]", "img_id", "=", "coco", ".", "anns", "[", "ann_id", "]", "[", "'image_id'", "]", "filename", "=", "coco", ".", "loadImgs", "(", "img_id", ")", "[", "0", "]", "[", "'file_name'", "]", "example", "=", "Example", ".", "fromdict", "(", "{", "'image'", ":", "os", ".", "path", ".", "join", "(", "img_root", ",", "filename", ")", ",", "'text'", ":", "caption", "}", ")", "if", "split", "==", "'train'", ":", "train_samples", ".", "append", "(", "example", ")", "elif", "split", "==", "'val'", ":", "val_samples", ".", "append", "(", "example", ")", "elif", "split", "==", "'test'", ":", "test_samples", ".", "append", "(", "example", ")", "return", "train_samples", ",", "val_samples", ",", "test_samples" ]
https://github.com/aimagelab/meshed-memory-transformer/blob/e0fe3fae68091970407e82e5b907cbc423f25df2/data/dataset.py#L232-L278
twisted/twisted
dee676b040dd38b847ea6fb112a712cb5e119490
src/twisted/names/dns.py
python
_nameToLabels
(name)
return labels
Split a domain name into its constituent labels. @type name: L{bytes} @param name: A fully qualified domain name (with or without a trailing dot). @return: A L{list} of labels ending with an empty label representing the DNS root zone. @rtype: L{list} of L{bytes}
Split a domain name into its constituent labels.
[ "Split", "a", "domain", "name", "into", "its", "constituent", "labels", "." ]
def _nameToLabels(name): """ Split a domain name into its constituent labels. @type name: L{bytes} @param name: A fully qualified domain name (with or without a trailing dot). @return: A L{list} of labels ending with an empty label representing the DNS root zone. @rtype: L{list} of L{bytes} """ if name in (b"", b"."): return [b""] labels = name.split(b".") if labels[-1] != b"": labels.append(b"") return labels
[ "def", "_nameToLabels", "(", "name", ")", ":", "if", "name", "in", "(", "b\"\"", ",", "b\".\"", ")", ":", "return", "[", "b\"\"", "]", "labels", "=", "name", ".", "split", "(", "b\".\"", ")", "if", "labels", "[", "-", "1", "]", "!=", "b\"\"", ":", "labels", ".", "append", "(", "b\"\"", ")", "return", "labels" ]
https://github.com/twisted/twisted/blob/dee676b040dd38b847ea6fb112a712cb5e119490/src/twisted/names/dns.py#L298-L315
TRI-ML/packnet-sfm
f59b1d615777a9987285a10e45b5d87b0369fa7d
packnet_sfm/loggers/wandb_logger.py
python
WandbLogger.log_images
(self, func, mode, batch, output, args, dataset, world_size, config)
Adds images to metrics for later logging. Parameters ---------- func : Function Function used to process the image before logging mode : str {"train", "val"} Training stage where the images come from (serve as prefix for logging) batch : dict Data batch output : dict Model output args : tuple Step arguments dataset : CfgNode Dataset configuration world_size : int Number of GPUs, used to get logging samples at consistent intervals config : CfgNode Model configuration
Adds images to metrics for later logging.
[ "Adds", "images", "to", "metrics", "for", "later", "logging", "." ]
def log_images(self, func, mode, batch, output, args, dataset, world_size, config): """ Adds images to metrics for later logging. Parameters ---------- func : Function Function used to process the image before logging mode : str {"train", "val"} Training stage where the images come from (serve as prefix for logging) batch : dict Data batch output : dict Model output args : tuple Step arguments dataset : CfgNode Dataset configuration world_size : int Number of GPUs, used to get logging samples at consistent intervals config : CfgNode Model configuration """ dataset_idx = 0 if len(args) == 1 else args[1] prefix = prepare_dataset_prefix(config, dataset_idx) interval = len(dataset[dataset_idx]) // world_size // config.num_logs if args[0] % interval == 0: prefix_idx = '{}-{}-{}'.format(mode, prefix, batch['idx'][0].item()) func(prefix_idx, batch, output)
[ "def", "log_images", "(", "self", ",", "func", ",", "mode", ",", "batch", ",", "output", ",", "args", ",", "dataset", ",", "world_size", ",", "config", ")", ":", "dataset_idx", "=", "0", "if", "len", "(", "args", ")", "==", "1", "else", "args", "[", "1", "]", "prefix", "=", "prepare_dataset_prefix", "(", "config", ",", "dataset_idx", ")", "interval", "=", "len", "(", "dataset", "[", "dataset_idx", "]", ")", "//", "world_size", "//", "config", ".", "num_logs", "if", "args", "[", "0", "]", "%", "interval", "==", "0", ":", "prefix_idx", "=", "'{}-{}-{}'", ".", "format", "(", "mode", ",", "prefix", ",", "batch", "[", "'idx'", "]", "[", "0", "]", ".", "item", "(", ")", ")", "func", "(", "prefix_idx", ",", "batch", ",", "output", ")" ]
https://github.com/TRI-ML/packnet-sfm/blob/f59b1d615777a9987285a10e45b5d87b0369fa7d/packnet_sfm/loggers/wandb_logger.py#L133-L162
saltstack/salt
fae5bc757ad0f1716483ce7ae180b451545c2058
salt/runners/drac.py
python
__connect
(hostname, timeout=20, username=None, password=None)
return client
Connect to the DRAC
Connect to the DRAC
[ "Connect", "to", "the", "DRAC" ]
def __connect(hostname, timeout=20, username=None, password=None): """ Connect to the DRAC """ drac_cred = __opts__.get("drac") err_msg = ( "No drac login credentials found. Please add the 'username' and 'password' " "fields beneath a 'drac' key in the master configuration file. Or you can " "pass in a username and password as kwargs at the CLI." ) if not username: if drac_cred is None: log.error(err_msg) return False username = drac_cred.get("username", None) if not password: if drac_cred is None: log.error(err_msg) return False password = drac_cred.get("password", None) client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) try: client.connect(hostname, username=username, password=password, timeout=timeout) except Exception as e: # pylint: disable=broad-except log.error("Unable to connect to %s: %s", hostname, e) return False return client
[ "def", "__connect", "(", "hostname", ",", "timeout", "=", "20", ",", "username", "=", "None", ",", "password", "=", "None", ")", ":", "drac_cred", "=", "__opts__", ".", "get", "(", "\"drac\"", ")", "err_msg", "=", "(", "\"No drac login credentials found. Please add the 'username' and 'password' \"", "\"fields beneath a 'drac' key in the master configuration file. Or you can \"", "\"pass in a username and password as kwargs at the CLI.\"", ")", "if", "not", "username", ":", "if", "drac_cred", "is", "None", ":", "log", ".", "error", "(", "err_msg", ")", "return", "False", "username", "=", "drac_cred", ".", "get", "(", "\"username\"", ",", "None", ")", "if", "not", "password", ":", "if", "drac_cred", "is", "None", ":", "log", ".", "error", "(", "err_msg", ")", "return", "False", "password", "=", "drac_cred", ".", "get", "(", "\"password\"", ",", "None", ")", "client", "=", "paramiko", ".", "SSHClient", "(", ")", "client", ".", "set_missing_host_key_policy", "(", "paramiko", ".", "AutoAddPolicy", "(", ")", ")", "try", ":", "client", ".", "connect", "(", "hostname", ",", "username", "=", "username", ",", "password", "=", "password", ",", "timeout", "=", "timeout", ")", "except", "Exception", "as", "e", ":", "# pylint: disable=broad-except", "log", ".", "error", "(", "\"Unable to connect to %s: %s\"", ",", "hostname", ",", "e", ")", "return", "False", "return", "client" ]
https://github.com/saltstack/salt/blob/fae5bc757ad0f1716483ce7ae180b451545c2058/salt/runners/drac.py#L38-L69
sympy/sympy
d822fcba181155b85ff2b29fe525adbafb22b448
sympy/physics/quantum/qasm.py
python
stripquotes
(s)
return s
Replace explicit quotes in a string. >>> from sympy.physics.quantum.qasm import stripquotes >>> stripquotes("'S'") == 'S' True >>> stripquotes('"S"') == 'S' True >>> stripquotes('S') == 'S' True
Replace explicit quotes in a string.
[ "Replace", "explicit", "quotes", "in", "a", "string", "." ]
def stripquotes(s): """Replace explicit quotes in a string. >>> from sympy.physics.quantum.qasm import stripquotes >>> stripquotes("'S'") == 'S' True >>> stripquotes('"S"') == 'S' True >>> stripquotes('S') == 'S' True """ s = s.replace('"', '') # Remove second set of quotes? s = s.replace("'", '') return s
[ "def", "stripquotes", "(", "s", ")", ":", "s", "=", "s", ".", "replace", "(", "'\"'", ",", "''", ")", "# Remove second set of quotes?", "s", "=", "s", ".", "replace", "(", "\"'\"", ",", "''", ")", "return", "s" ]
https://github.com/sympy/sympy/blob/d822fcba181155b85ff2b29fe525adbafb22b448/sympy/physics/quantum/qasm.py#L104-L117
AppScale/gts
46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9
AppServer/google/appengine/ext/ndb/polymodel.py
python
_ClassKeyProperty.__init__
(self, name=_CLASS_KEY_PROPERTY, indexed=True)
Constructor. If you really want to you can give this a different datastore name or make it unindexed. For example: class Foo(PolyModel): class_ = _ClassKeyProperty(indexed=False)
Constructor.
[ "Constructor", "." ]
def __init__(self, name=_CLASS_KEY_PROPERTY, indexed=True): """Constructor. If you really want to you can give this a different datastore name or make it unindexed. For example: class Foo(PolyModel): class_ = _ClassKeyProperty(indexed=False) """ super(_ClassKeyProperty, self).__init__(name=name, indexed=indexed, repeated=True)
[ "def", "__init__", "(", "self", ",", "name", "=", "_CLASS_KEY_PROPERTY", ",", "indexed", "=", "True", ")", ":", "super", "(", "_ClassKeyProperty", ",", "self", ")", ".", "__init__", "(", "name", "=", "name", ",", "indexed", "=", "indexed", ",", "repeated", "=", "True", ")" ]
https://github.com/AppScale/gts/blob/46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9/AppServer/google/appengine/ext/ndb/polymodel.py#L41-L51
ucsb-seclab/karonte
427ac313e596f723e40768b95d13bd7a9fc92fd8
eval/multi_bin/all_bins/binary_dependency_graph/binary_dependency_graph.py
python
BdgNode.__setstate__
(self, info)
[]
def __setstate__(self, info): self._p = info[0] self._bin = info[1] self._role_strings_info = info[2] self._root = info[3] self._generator_strings = info[4] self._plugins_used = []
[ "def", "__setstate__", "(", "self", ",", "info", ")", ":", "self", ".", "_p", "=", "info", "[", "0", "]", "self", ".", "_bin", "=", "info", "[", "1", "]", "self", ".", "_role_strings_info", "=", "info", "[", "2", "]", "self", ".", "_root", "=", "info", "[", "3", "]", "self", ".", "_generator_strings", "=", "info", "[", "4", "]", "self", ".", "_plugins_used", "=", "[", "]" ]
https://github.com/ucsb-seclab/karonte/blob/427ac313e596f723e40768b95d13bd7a9fc92fd8/eval/multi_bin/all_bins/binary_dependency_graph/binary_dependency_graph.py#L96-L102
wxWidgets/Phoenix
b2199e299a6ca6d866aa6f3d0888499136ead9d6
wx/lib/agw/ribbon/buttonbar.py
python
RibbonButtonBar.DoGetNextSmallerSize
(self, direction, _result)
return result
Implementation of :meth:`RibbonControl.GetNextSmallerSize() <lib.agw.ribbon.control.RibbonControl.GetNextSmallerSize>`. Controls which have non-continuous sizing must override this virtual function rather than :meth:`RibbonControl.GetNextSmallerSize() <lib.agw.ribbon.control.RibbonControl.GetNextSmallerSize>`. :return: An instance of :class:`wx.Size`.
Implementation of :meth:`RibbonControl.GetNextSmallerSize() <lib.agw.ribbon.control.RibbonControl.GetNextSmallerSize>`.
[ "Implementation", "of", ":", "meth", ":", "RibbonControl", ".", "GetNextSmallerSize", "()", "<lib", ".", "agw", ".", "ribbon", ".", "control", ".", "RibbonControl", ".", "GetNextSmallerSize", ">", "." ]
def DoGetNextSmallerSize(self, direction, _result): """ Implementation of :meth:`RibbonControl.GetNextSmallerSize() <lib.agw.ribbon.control.RibbonControl.GetNextSmallerSize>`. Controls which have non-continuous sizing must override this virtual function rather than :meth:`RibbonControl.GetNextSmallerSize() <lib.agw.ribbon.control.RibbonControl.GetNextSmallerSize>`. :return: An instance of :class:`wx.Size`. """ result = wx.Size(*_result) for i, layout in enumerate(self._layouts): size = wx.Size(*layout.overall_size) if direction == wx.HORIZONTAL: if size.x < result.x and size.y <= result.y: result.x = size.x break elif direction == wx.VERTICAL: if size.x <= result.x and size.y < result.y: result.y = size.y break elif direction == wx.BOTH: if size.x < result.x and size.y < result.y: result = size break return result
[ "def", "DoGetNextSmallerSize", "(", "self", ",", "direction", ",", "_result", ")", ":", "result", "=", "wx", ".", "Size", "(", "*", "_result", ")", "for", "i", ",", "layout", "in", "enumerate", "(", "self", ".", "_layouts", ")", ":", "size", "=", "wx", ".", "Size", "(", "*", "layout", ".", "overall_size", ")", "if", "direction", "==", "wx", ".", "HORIZONTAL", ":", "if", "size", ".", "x", "<", "result", ".", "x", "and", "size", ".", "y", "<=", "result", ".", "y", ":", "result", ".", "x", "=", "size", ".", "x", "break", "elif", "direction", "==", "wx", ".", "VERTICAL", ":", "if", "size", ".", "x", "<=", "result", ".", "x", "and", "size", ".", "y", "<", "result", ".", "y", ":", "result", ".", "y", "=", "size", ".", "y", "break", "elif", "direction", "==", "wx", ".", "BOTH", ":", "if", "size", ".", "x", "<", "result", ".", "x", "and", "size", ".", "y", "<", "result", ".", "y", ":", "result", "=", "size", "break", "return", "result" ]
https://github.com/wxWidgets/Phoenix/blob/b2199e299a6ca6d866aa6f3d0888499136ead9d6/wx/lib/agw/ribbon/buttonbar.py#L746-L776
snowkylin/ntm
7db406826d6109f44c61a857ef2d1aadbbec7f54
utils.py
python
baseN
(num,b)
return ((num == 0) and "0" ) or ( baseN(num // b, b).lstrip("0") + "0123456789abcdefghijklmnopqrstuvwxyz"[num % b])
[]
def baseN(num,b): return ((num == 0) and "0" ) or ( baseN(num // b, b).lstrip("0") + "0123456789abcdefghijklmnopqrstuvwxyz"[num % b])
[ "def", "baseN", "(", "num", ",", "b", ")", ":", "return", "(", "(", "num", "==", "0", ")", "and", "\"0\"", ")", "or", "(", "baseN", "(", "num", "//", "b", ",", "b", ")", ".", "lstrip", "(", "\"0\"", ")", "+", "\"0123456789abcdefghijklmnopqrstuvwxyz\"", "[", "num", "%", "b", "]", ")" ]
https://github.com/snowkylin/ntm/blob/7db406826d6109f44c61a857ef2d1aadbbec7f54/utils.py#L34-L35
google/grr
8ad8a4d2c5a93c92729206b7771af19d92d4f915
grr/server/grr_response_server/access_control.py
python
AccessControlManager.CheckClientAccess
(self, context, client_urn)
Checks access to the given client. Args: context: User credentials context. client_urn: URN of a client to check. Returns: True if access is allowed, raises otherwise.
Checks access to the given client.
[ "Checks", "access", "to", "the", "given", "client", "." ]
def CheckClientAccess(self, context, client_urn): """Checks access to the given client. Args: context: User credentials context. client_urn: URN of a client to check. Returns: True if access is allowed, raises otherwise. """ logging.debug("Checking %s for client %s access.", context, client_urn) raise NotImplementedError()
[ "def", "CheckClientAccess", "(", "self", ",", "context", ",", "client_urn", ")", ":", "logging", ".", "debug", "(", "\"Checking %s for client %s access.\"", ",", "context", ",", "client_urn", ")", "raise", "NotImplementedError", "(", ")" ]
https://github.com/google/grr/blob/8ad8a4d2c5a93c92729206b7771af19d92d4f915/grr/server/grr_response_server/access_control.py#L62-L73
jliljebl/flowblade
995313a509b80e99eb1ad550d945bdda5995093b
flowblade-trunk/Flowblade/tools/toolsencoding.py
python
RenderFilePanel.enable_file_selections
(self, enabled)
[]
def enable_file_selections(self, enabled): self.movie_name.set_sensitive(enabled) self.extension_label.set_sensitive(enabled) self.out_folder.set_sensitive(enabled) self.out_folder_label.set_sensitive(enabled) self.name_label.set_sensitive(enabled) self.frame_name_label.set_sensitive(enabled) self.frame_name.set_sensitive(enabled)
[ "def", "enable_file_selections", "(", "self", ",", "enabled", ")", ":", "self", ".", "movie_name", ".", "set_sensitive", "(", "enabled", ")", "self", ".", "extension_label", ".", "set_sensitive", "(", "enabled", ")", "self", ".", "out_folder", ".", "set_sensitive", "(", "enabled", ")", "self", ".", "out_folder_label", ".", "set_sensitive", "(", "enabled", ")", "self", ".", "name_label", ".", "set_sensitive", "(", "enabled", ")", "self", ".", "frame_name_label", ".", "set_sensitive", "(", "enabled", ")", "self", ".", "frame_name", ".", "set_sensitive", "(", "enabled", ")" ]
https://github.com/jliljebl/flowblade/blob/995313a509b80e99eb1ad550d945bdda5995093b/flowblade-trunk/Flowblade/tools/toolsencoding.py#L329-L336
phantomcyber/playbooks
9e850ecc44cb98c5dde53784744213a1ed5799bd
user_prompt_and_block_domain.py
python
on_finish
(container, summary)
return
[]
def on_finish(container, summary): phantom.debug('on_finish() called') # This function is called after all actions are completed. # summary of all the action and/or all details of actions # can be collected here. # summary_json = phantom.get_summary() # if 'result' in summary_json: # for action_result in summary_json['result']: # if 'action_run_id' in action_result: # action_results = phantom.get_action_results(action_run_id=action_result['action_run_id'], result_data=False, flatten=False) # phantom.debug(action_results) return
[ "def", "on_finish", "(", "container", ",", "summary", ")", ":", "phantom", ".", "debug", "(", "'on_finish() called'", ")", "# This function is called after all actions are completed.", "# summary of all the action and/or all details of actions", "# can be collected here.", "# summary_json = phantom.get_summary()", "# if 'result' in summary_json:", "# for action_result in summary_json['result']:", "# if 'action_run_id' in action_result:", "# action_results = phantom.get_action_results(action_run_id=action_result['action_run_id'], result_data=False, flatten=False)", "# phantom.debug(action_results)", "return" ]
https://github.com/phantomcyber/playbooks/blob/9e850ecc44cb98c5dde53784744213a1ed5799bd/user_prompt_and_block_domain.py#L158-L171
CoinAlpha/hummingbot
36f6149c1644c07cd36795b915f38b8f49b798e7
hummingbot/strategy/twap/twap.py
python
TwapTradeStrategy.process_market
(self, market_info)
Checks if enough time has elapsed from previous order to place order and if so, calls place_orders_for_market() and cancels orders if they are older than self._cancel_order_wait_time. :param market_info: a market trading pair
Checks if enough time has elapsed from previous order to place order and if so, calls place_orders_for_market() and cancels orders if they are older than self._cancel_order_wait_time.
[ "Checks", "if", "enough", "time", "has", "elapsed", "from", "previous", "order", "to", "place", "order", "and", "if", "so", "calls", "place_orders_for_market", "()", "and", "cancels", "orders", "if", "they", "are", "older", "than", "self", ".", "_cancel_order_wait_time", "." ]
def process_market(self, market_info): """ Checks if enough time has elapsed from previous order to place order and if so, calls place_orders_for_market() and cancels orders if they are older than self._cancel_order_wait_time. :param market_info: a market trading pair """ if self._quantity_remaining > 0: # If current timestamp is greater than the start timestamp and its the first order if (self.current_timestamp > self._previous_timestamp) and self._first_order: self.logger().info("Trying to place orders now. ") self._previous_timestamp = self.current_timestamp self.place_orders_for_market(market_info) self._first_order = False # If current timestamp is greater than the start timestamp + time delay place orders elif (self.current_timestamp > self._previous_timestamp + self._order_delay_time) and (self._first_order is False): self.logger().info("Current time: " f"{datetime.fromtimestamp(self.current_timestamp).strftime('%Y-%m-%d %H:%M:%S')} " "is now greater than " "Previous time: " f"{datetime.fromtimestamp(self._previous_timestamp).strftime('%Y-%m-%d %H:%M:%S')} " f" with time delay: {self._order_delay_time}. Trying to place orders now. ") self._previous_timestamp = self.current_timestamp self.place_orders_for_market(market_info) active_orders = self.market_info_to_active_orders.get(market_info, []) orders_to_cancel = (active_order for active_order in active_orders if self.current_timestamp >= self._time_to_cancel[active_order.client_order_id]) for order in orders_to_cancel: self.cancel_order(market_info, order.client_order_id)
[ "def", "process_market", "(", "self", ",", "market_info", ")", ":", "if", "self", ".", "_quantity_remaining", ">", "0", ":", "# If current timestamp is greater than the start timestamp and its the first order", "if", "(", "self", ".", "current_timestamp", ">", "self", ".", "_previous_timestamp", ")", "and", "self", ".", "_first_order", ":", "self", ".", "logger", "(", ")", ".", "info", "(", "\"Trying to place orders now. \"", ")", "self", ".", "_previous_timestamp", "=", "self", ".", "current_timestamp", "self", ".", "place_orders_for_market", "(", "market_info", ")", "self", ".", "_first_order", "=", "False", "# If current timestamp is greater than the start timestamp + time delay place orders", "elif", "(", "self", ".", "current_timestamp", ">", "self", ".", "_previous_timestamp", "+", "self", ".", "_order_delay_time", ")", "and", "(", "self", ".", "_first_order", "is", "False", ")", ":", "self", ".", "logger", "(", ")", ".", "info", "(", "\"Current time: \"", "f\"{datetime.fromtimestamp(self.current_timestamp).strftime('%Y-%m-%d %H:%M:%S')} \"", "\"is now greater than \"", "\"Previous time: \"", "f\"{datetime.fromtimestamp(self._previous_timestamp).strftime('%Y-%m-%d %H:%M:%S')} \"", "f\" with time delay: {self._order_delay_time}. Trying to place orders now. \"", ")", "self", ".", "_previous_timestamp", "=", "self", ".", "current_timestamp", "self", ".", "place_orders_for_market", "(", "market_info", ")", "active_orders", "=", "self", ".", "market_info_to_active_orders", ".", "get", "(", "market_info", ",", "[", "]", ")", "orders_to_cancel", "=", "(", "active_order", "for", "active_order", "in", "active_orders", "if", "self", ".", "current_timestamp", ">=", "self", ".", "_time_to_cancel", "[", "active_order", ".", "client_order_id", "]", ")", "for", "order", "in", "orders_to_cancel", ":", "self", ".", "cancel_order", "(", "market_info", ",", "order", ".", "client_order_id", ")" ]
https://github.com/CoinAlpha/hummingbot/blob/36f6149c1644c07cd36795b915f38b8f49b798e7/hummingbot/strategy/twap/twap.py#L257-L293
guildai/guildai
1665985a3d4d788efc1a3180ca51cc417f71ca78
guild/external/pkg_resources/__init__.py
python
ResourceManager._warn_unsafe_extraction_path
(path)
If the default extraction path is overridden and set to an insecure location, such as /tmp, it opens up an opportunity for an attacker to replace an extracted file with an unauthorized payload. Warn the user if a known insecure location is used. See Distribute #375 for more details.
If the default extraction path is overridden and set to an insecure location, such as /tmp, it opens up an opportunity for an attacker to replace an extracted file with an unauthorized payload. Warn the user if a known insecure location is used.
[ "If", "the", "default", "extraction", "path", "is", "overridden", "and", "set", "to", "an", "insecure", "location", "such", "as", "/", "tmp", "it", "opens", "up", "an", "opportunity", "for", "an", "attacker", "to", "replace", "an", "extracted", "file", "with", "an", "unauthorized", "payload", ".", "Warn", "the", "user", "if", "a", "known", "insecure", "location", "is", "used", "." ]
def _warn_unsafe_extraction_path(path): """ If the default extraction path is overridden and set to an insecure location, such as /tmp, it opens up an opportunity for an attacker to replace an extracted file with an unauthorized payload. Warn the user if a known insecure location is used. See Distribute #375 for more details. """ if os.name == 'nt' and not path.startswith(os.environ['windir']): # On Windows, permissions are generally restrictive by default # and temp directories are not writable by other users, so # bypass the warning. return mode = os.stat(path).st_mode if mode & stat.S_IWOTH or mode & stat.S_IWGRP: msg = ( "%s is writable by group/others and vulnerable to attack " "when " "used with get_resource_filename. Consider a more secure " "location (set with .set_extraction_path or the " "PYTHON_EGG_CACHE environment variable)." % path ) warnings.warn(msg, UserWarning)
[ "def", "_warn_unsafe_extraction_path", "(", "path", ")", ":", "if", "os", ".", "name", "==", "'nt'", "and", "not", "path", ".", "startswith", "(", "os", ".", "environ", "[", "'windir'", "]", ")", ":", "# On Windows, permissions are generally restrictive by default", "# and temp directories are not writable by other users, so", "# bypass the warning.", "return", "mode", "=", "os", ".", "stat", "(", "path", ")", ".", "st_mode", "if", "mode", "&", "stat", ".", "S_IWOTH", "or", "mode", "&", "stat", ".", "S_IWGRP", ":", "msg", "=", "(", "\"%s is writable by group/others and vulnerable to attack \"", "\"when \"", "\"used with get_resource_filename. Consider a more secure \"", "\"location (set with .set_extraction_path or the \"", "\"PYTHON_EGG_CACHE environment variable).\"", "%", "path", ")", "warnings", ".", "warn", "(", "msg", ",", "UserWarning", ")" ]
https://github.com/guildai/guildai/blob/1665985a3d4d788efc1a3180ca51cc417f71ca78/guild/external/pkg_resources/__init__.py#L1221-L1244
fffonion/xeHentai
26063154a238d4df280f8d17f14d090e679084ec
xeHentai/rpc.py
python
RPCServer.run
(self)
[]
def run(self): try: self.server = ThreadedHTTPServer(self.bind_addr, lambda *x: Handler(self.xeH, self.secret, *x)) except Exception as ex: self.logger.error(i18n.RPC_CANNOT_BIND % traceback.format_exc()) else: self.logger.info(i18n.RPC_STARTED % (self.bind_addr[0], self.bind_addr[1])) url = "http://%s:%s/ui/#host=%s,port=%s,https=no" % ( self.bind_addr[0], self.bind_addr[1], self.bind_addr[0], self.bind_addr[1] ) if self.secret: url = url + ",token=" + self.secret if self.open_browser: import webbrowser webbrowser.open(url) else: self.logger.info(i18n.RPC_WEBUI_PATH % url) while not self._exit("rpc"): self.server.handle_request()
[ "def", "run", "(", "self", ")", ":", "try", ":", "self", ".", "server", "=", "ThreadedHTTPServer", "(", "self", ".", "bind_addr", ",", "lambda", "*", "x", ":", "Handler", "(", "self", ".", "xeH", ",", "self", ".", "secret", ",", "*", "x", ")", ")", "except", "Exception", "as", "ex", ":", "self", ".", "logger", ".", "error", "(", "i18n", ".", "RPC_CANNOT_BIND", "%", "traceback", ".", "format_exc", "(", ")", ")", "else", ":", "self", ".", "logger", ".", "info", "(", "i18n", ".", "RPC_STARTED", "%", "(", "self", ".", "bind_addr", "[", "0", "]", ",", "self", ".", "bind_addr", "[", "1", "]", ")", ")", "url", "=", "\"http://%s:%s/ui/#host=%s,port=%s,https=no\"", "%", "(", "self", ".", "bind_addr", "[", "0", "]", ",", "self", ".", "bind_addr", "[", "1", "]", ",", "self", ".", "bind_addr", "[", "0", "]", ",", "self", ".", "bind_addr", "[", "1", "]", ")", "if", "self", ".", "secret", ":", "url", "=", "url", "+", "\",token=\"", "+", "self", ".", "secret", "if", "self", ".", "open_browser", ":", "import", "webbrowser", "webbrowser", ".", "open", "(", "url", ")", "else", ":", "self", ".", "logger", ".", "info", "(", "i18n", ".", "RPC_WEBUI_PATH", "%", "url", ")", "while", "not", "self", ".", "_exit", "(", "\"rpc\"", ")", ":", "self", ".", "server", ".", "handle_request", "(", ")" ]
https://github.com/fffonion/xeHentai/blob/26063154a238d4df280f8d17f14d090e679084ec/xeHentai/rpc.py#L50-L69
Runbook/runbook
7b68622f75ef09f654046f0394540025f3ee7445
src/actions/actions/stathat/stathat.py
python
_StatHatBase._send
(self, path, data, async)
return True
[]
def _send(self, path, data, async): endpoint = STATHAT_ENDPOINT + path payload = self._auth.copy() payload.update(data) if HAS_GEVENT and async is not False: # Async request should be completely silent and ignore any # errors that may be thrown. async_group.spawn(self._send_inner, endpoint, payload, silent=True) else: # If the request isn't async, we should make an effort # to parse the response and return it, or raise a proper exception try: raw = self._send_inner(endpoint, payload) except urllib2.URLError, e: # Network issue or something else affecting the general request raise StatHatError(e) try: resp = json.loads(raw) except Exception: # JSON decoding false meaning StatHat returned something bad raise StatHatError('Something bad happened: %s' % raw) if 'msg' in resp and 'status' in resp: if resp['status'] != 200: # Normal error from StatHat raise StatHatError(resp['msg']) else: # 'msg' and 'status' keys weren't returned, something bad happened raise StatHatError('Something bad happened: %s' % raw) return True
[ "def", "_send", "(", "self", ",", "path", ",", "data", ",", "async", ")", ":", "endpoint", "=", "STATHAT_ENDPOINT", "+", "path", "payload", "=", "self", ".", "_auth", ".", "copy", "(", ")", "payload", ".", "update", "(", "data", ")", "if", "HAS_GEVENT", "and", "async", "is", "not", "False", ":", "# Async request should be completely silent and ignore any", "# errors that may be thrown.", "async_group", ".", "spawn", "(", "self", ".", "_send_inner", ",", "endpoint", ",", "payload", ",", "silent", "=", "True", ")", "else", ":", "# If the request isn't async, we should make an effort", "# to parse the response and return it, or raise a proper exception", "try", ":", "raw", "=", "self", ".", "_send_inner", "(", "endpoint", ",", "payload", ")", "except", "urllib2", ".", "URLError", ",", "e", ":", "# Network issue or something else affecting the general request", "raise", "StatHatError", "(", "e", ")", "try", ":", "resp", "=", "json", ".", "loads", "(", "raw", ")", "except", "Exception", ":", "# JSON decoding false meaning StatHat returned something bad", "raise", "StatHatError", "(", "'Something bad happened: %s'", "%", "raw", ")", "if", "'msg'", "in", "resp", "and", "'status'", "in", "resp", ":", "if", "resp", "[", "'status'", "]", "!=", "200", ":", "# Normal error from StatHat", "raise", "StatHatError", "(", "resp", "[", "'msg'", "]", ")", "else", ":", "# 'msg' and 'status' keys weren't returned, something bad happened", "raise", "StatHatError", "(", "'Something bad happened: %s'", "%", "raw", ")", "return", "True" ]
https://github.com/Runbook/runbook/blob/7b68622f75ef09f654046f0394540025f3ee7445/src/actions/actions/stathat/stathat.py#L88-L117
ales-tsurko/cells
4cf7e395cd433762bea70cdc863a346f3a6fe1d0
packaging/macos/python/lib/python3.7/asyncio/transports.py
python
ReadTransport.pause_reading
(self)
Pause the receiving end. No data will be passed to the protocol's data_received() method until resume_reading() is called.
Pause the receiving end.
[ "Pause", "the", "receiving", "end", "." ]
def pause_reading(self): """Pause the receiving end. No data will be passed to the protocol's data_received() method until resume_reading() is called. """ raise NotImplementedError
[ "def", "pause_reading", "(", "self", ")", ":", "raise", "NotImplementedError" ]
https://github.com/ales-tsurko/cells/blob/4cf7e395cd433762bea70cdc863a346f3a6fe1d0/packaging/macos/python/lib/python3.7/asyncio/transports.py#L51-L57
runawayhorse001/LearningApacheSpark
67f3879dce17553195f094f5728b94a01badcf24
pyspark/sql/session.py
python
SparkSession._convert_from_pandas
(self, pdf, schema, timezone)
return [r.tolist() for r in np_records]
Convert a pandas.DataFrame to list of records that can be used to make a DataFrame :return list of records
Convert a pandas.DataFrame to list of records that can be used to make a DataFrame :return list of records
[ "Convert", "a", "pandas", ".", "DataFrame", "to", "list", "of", "records", "that", "can", "be", "used", "to", "make", "a", "DataFrame", ":", "return", "list", "of", "records" ]
def _convert_from_pandas(self, pdf, schema, timezone): """ Convert a pandas.DataFrame to list of records that can be used to make a DataFrame :return list of records """ if timezone is not None: from pyspark.sql.types import _check_series_convert_timestamps_tz_local copied = False if isinstance(schema, StructType): for field in schema: # TODO: handle nested timestamps, such as ArrayType(TimestampType())? if isinstance(field.dataType, TimestampType): s = _check_series_convert_timestamps_tz_local(pdf[field.name], timezone) if s is not pdf[field.name]: if not copied: # Copy once if the series is modified to prevent the original # Pandas DataFrame from being updated pdf = pdf.copy() copied = True pdf[field.name] = s else: for column, series in pdf.iteritems(): s = _check_series_convert_timestamps_tz_local(series, timezone) if s is not series: if not copied: # Copy once if the series is modified to prevent the original # Pandas DataFrame from being updated pdf = pdf.copy() copied = True pdf[column] = s # Convert pandas.DataFrame to list of numpy records np_records = pdf.to_records(index=False) # Check if any columns need to be fixed for Spark to infer properly if len(np_records) > 0: record_dtype = self._get_numpy_record_dtype(np_records[0]) if record_dtype is not None: return [r.astype(record_dtype).tolist() for r in np_records] # Convert list of numpy records to python lists return [r.tolist() for r in np_records]
[ "def", "_convert_from_pandas", "(", "self", ",", "pdf", ",", "schema", ",", "timezone", ")", ":", "if", "timezone", "is", "not", "None", ":", "from", "pyspark", ".", "sql", ".", "types", "import", "_check_series_convert_timestamps_tz_local", "copied", "=", "False", "if", "isinstance", "(", "schema", ",", "StructType", ")", ":", "for", "field", "in", "schema", ":", "# TODO: handle nested timestamps, such as ArrayType(TimestampType())?", "if", "isinstance", "(", "field", ".", "dataType", ",", "TimestampType", ")", ":", "s", "=", "_check_series_convert_timestamps_tz_local", "(", "pdf", "[", "field", ".", "name", "]", ",", "timezone", ")", "if", "s", "is", "not", "pdf", "[", "field", ".", "name", "]", ":", "if", "not", "copied", ":", "# Copy once if the series is modified to prevent the original", "# Pandas DataFrame from being updated", "pdf", "=", "pdf", ".", "copy", "(", ")", "copied", "=", "True", "pdf", "[", "field", ".", "name", "]", "=", "s", "else", ":", "for", "column", ",", "series", "in", "pdf", ".", "iteritems", "(", ")", ":", "s", "=", "_check_series_convert_timestamps_tz_local", "(", "series", ",", "timezone", ")", "if", "s", "is", "not", "series", ":", "if", "not", "copied", ":", "# Copy once if the series is modified to prevent the original", "# Pandas DataFrame from being updated", "pdf", "=", "pdf", ".", "copy", "(", ")", "copied", "=", "True", "pdf", "[", "column", "]", "=", "s", "# Convert pandas.DataFrame to list of numpy records", "np_records", "=", "pdf", ".", "to_records", "(", "index", "=", "False", ")", "# Check if any columns need to be fixed for Spark to infer properly", "if", "len", "(", "np_records", ")", ">", "0", ":", "record_dtype", "=", "self", ".", "_get_numpy_record_dtype", "(", "np_records", "[", "0", "]", ")", "if", "record_dtype", "is", "not", "None", ":", "return", "[", "r", ".", "astype", "(", "record_dtype", ")", ".", "tolist", "(", ")", "for", "r", "in", "np_records", "]", "# Convert list of numpy records to python lists", "return", "[", "r", ".", "tolist", "(", ")", "for", "r", "in", "np_records", "]" ]
https://github.com/runawayhorse001/LearningApacheSpark/blob/67f3879dce17553195f094f5728b94a01badcf24/pyspark/sql/session.py#L455-L496
sagemath/sage
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
src/sage/combinat/sloane_functions.py
python
A000108._eval
(self, n)
return combinat.catalan_number(n)
EXAMPLES:: sage: [sloane.A000108._eval(n) for n in range(10)] [1, 1, 2, 5, 14, 42, 132, 429, 1430, 4862]
EXAMPLES::
[ "EXAMPLES", "::" ]
def _eval(self, n): """ EXAMPLES:: sage: [sloane.A000108._eval(n) for n in range(10)] [1, 1, 2, 5, 14, 42, 132, 429, 1430, 4862] """ return combinat.catalan_number(n)
[ "def", "_eval", "(", "self", ",", "n", ")", ":", "return", "combinat", ".", "catalan_number", "(", "n", ")" ]
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/combinat/sloane_functions.py#L4031-L4038
jkwill87/mnamer
c8bbc63a8847e9b15b0f512f7ae01de0b98cf739
mnamer/endpoints.py
python
tvdb_search_series
( token: str, series: Optional[str] = None, id_imdb: Optional[str] = None, id_zap2it: Optional[str] = None, language: Optional[Language] = None, cache: bool = True, )
return content
Allows the user to search for a series based on the following parameters. Online docs: https://api.thetvdb.com/swagger#!/Search/get_search_series Note: results a maximum of 100 entries per page, no option for pagination.
Allows the user to search for a series based on the following parameters.
[ "Allows", "the", "user", "to", "search", "for", "a", "series", "based", "on", "the", "following", "parameters", "." ]
def tvdb_search_series( token: str, series: Optional[str] = None, id_imdb: Optional[str] = None, id_zap2it: Optional[str] = None, language: Optional[Language] = None, cache: bool = True, ) -> dict: """ Allows the user to search for a series based on the following parameters. Online docs: https://api.thetvdb.com/swagger#!/Search/get_search_series Note: results a maximum of 100 entries per page, no option for pagination. """ Language.ensure_valid_for_tvdb(language) url = "https://api.thetvdb.com/search/series" parameters = {"name": series, "imdbId": id_imdb, "zap2itId": id_zap2it} headers = {"Authorization": f"Bearer {token}"} if language: headers["Accept-Language"] = language.a2 status, content = request_json( url, parameters, headers=headers, cache=cache is True and language is None, ) if status == 401: raise MnamerException("invalid token") elif status == 405: raise MnamerException( "series, id_imdb, id_zap2it parameters are mutually exclusive" ) elif status == 404: raise MnamerNotFoundException elif status != 200 or not content.get("data"): # pragma: no cover raise MnamerNetworkException("TVDb down or unavailable?") return content
[ "def", "tvdb_search_series", "(", "token", ":", "str", ",", "series", ":", "Optional", "[", "str", "]", "=", "None", ",", "id_imdb", ":", "Optional", "[", "str", "]", "=", "None", ",", "id_zap2it", ":", "Optional", "[", "str", "]", "=", "None", ",", "language", ":", "Optional", "[", "Language", "]", "=", "None", ",", "cache", ":", "bool", "=", "True", ",", ")", "->", "dict", ":", "Language", ".", "ensure_valid_for_tvdb", "(", "language", ")", "url", "=", "\"https://api.thetvdb.com/search/series\"", "parameters", "=", "{", "\"name\"", ":", "series", ",", "\"imdbId\"", ":", "id_imdb", ",", "\"zap2itId\"", ":", "id_zap2it", "}", "headers", "=", "{", "\"Authorization\"", ":", "f\"Bearer {token}\"", "}", "if", "language", ":", "headers", "[", "\"Accept-Language\"", "]", "=", "language", ".", "a2", "status", ",", "content", "=", "request_json", "(", "url", ",", "parameters", ",", "headers", "=", "headers", ",", "cache", "=", "cache", "is", "True", "and", "language", "is", "None", ",", ")", "if", "status", "==", "401", ":", "raise", "MnamerException", "(", "\"invalid token\"", ")", "elif", "status", "==", "405", ":", "raise", "MnamerException", "(", "\"series, id_imdb, id_zap2it parameters are mutually exclusive\"", ")", "elif", "status", "==", "404", ":", "raise", "MnamerNotFoundException", "elif", "status", "!=", "200", "or", "not", "content", ".", "get", "(", "\"data\"", ")", ":", "# pragma: no cover", "raise", "MnamerNetworkException", "(", "\"TVDb down or unavailable?\"", ")", "return", "content" ]
https://github.com/jkwill87/mnamer/blob/c8bbc63a8847e9b15b0f512f7ae01de0b98cf739/mnamer/endpoints.py#L388-L424
bruderstein/PythonScript
df9f7071ddf3a079e3a301b9b53a6dc78cf1208f
PythonLib/min/uuid.py
python
UUID.bytes_le
(self)
return (bytes[4-1::-1] + bytes[6-1:4-1:-1] + bytes[8-1:6-1:-1] + bytes[8:])
[]
def bytes_le(self): bytes = self.bytes return (bytes[4-1::-1] + bytes[6-1:4-1:-1] + bytes[8-1:6-1:-1] + bytes[8:])
[ "def", "bytes_le", "(", "self", ")", ":", "bytes", "=", "self", ".", "bytes", "return", "(", "bytes", "[", "4", "-", "1", ":", ":", "-", "1", "]", "+", "bytes", "[", "6", "-", "1", ":", "4", "-", "1", ":", "-", "1", "]", "+", "bytes", "[", "8", "-", "1", ":", "6", "-", "1", ":", "-", "1", "]", "+", "bytes", "[", "8", ":", "]", ")" ]
https://github.com/bruderstein/PythonScript/blob/df9f7071ddf3a079e3a301b9b53a6dc78cf1208f/PythonLib/min/uuid.py#L289-L292
JimmXinu/FanFicFare
bc149a2deb2636320fe50a3e374af6eef8f61889
fanficfare/adapters/adapter_storiesonlinenet.py
python
StoriesOnlineNetAdapter.getSiteAbbrev
(cls)
return 'strol'
[]
def getSiteAbbrev(cls): return 'strol'
[ "def", "getSiteAbbrev", "(", "cls", ")", ":", "return", "'strol'" ]
https://github.com/JimmXinu/FanFicFare/blob/bc149a2deb2636320fe50a3e374af6eef8f61889/fanficfare/adapters/adapter_storiesonlinenet.py#L69-L70
ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework
cb692f527e4e819b6c228187c5702d990a180043
external/Scripting Engine/packages/IronPython.StdLib.2.7.4/content/Lib/filecmp.py
python
cmpfiles
(a, b, common, shallow=1)
return res
Compare common files in two directories. a, b -- directory names common -- list of file names found in both directories shallow -- if true, do comparison based solely on stat() information Returns a tuple of three lists: files that compare equal files that are different filenames that aren't regular files.
Compare common files in two directories.
[ "Compare", "common", "files", "in", "two", "directories", "." ]
def cmpfiles(a, b, common, shallow=1): """Compare common files in two directories. a, b -- directory names common -- list of file names found in both directories shallow -- if true, do comparison based solely on stat() information Returns a tuple of three lists: files that compare equal files that are different filenames that aren't regular files. """ res = ([], [], []) for x in common: ax = os.path.join(a, x) bx = os.path.join(b, x) res[_cmp(ax, bx, shallow)].append(x) return res
[ "def", "cmpfiles", "(", "a", ",", "b", ",", "common", ",", "shallow", "=", "1", ")", ":", "res", "=", "(", "[", "]", ",", "[", "]", ",", "[", "]", ")", "for", "x", "in", "common", ":", "ax", "=", "os", ".", "path", ".", "join", "(", "a", ",", "x", ")", "bx", "=", "os", ".", "path", ".", "join", "(", "b", ",", "x", ")", "res", "[", "_cmp", "(", "ax", ",", "bx", ",", "shallow", ")", "]", ".", "append", "(", "x", ")", "return", "res" ]
https://github.com/ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework/blob/cb692f527e4e819b6c228187c5702d990a180043/external/Scripting Engine/packages/IronPython.StdLib.2.7.4/content/Lib/filecmp.py#L240-L258
Calysto/calysto_scheme
15bf81987870bcae1264e5a0a06feb9a8ee12b8b
calysto_scheme/scheme.py
python
list_head
(lyst, pos)
return retval
[]
def list_head(lyst, pos): stack = symbol_emptylist current = lyst for i in range(pos): stack = cons(current.car, stack) current = current.cdr retval = symbol_emptylist for i in range(pos): retval = cons(stack.car, retval) stack = stack.cdr return retval
[ "def", "list_head", "(", "lyst", ",", "pos", ")", ":", "stack", "=", "symbol_emptylist", "current", "=", "lyst", "for", "i", "in", "range", "(", "pos", ")", ":", "stack", "=", "cons", "(", "current", ".", "car", ",", "stack", ")", "current", "=", "current", ".", "cdr", "retval", "=", "symbol_emptylist", "for", "i", "in", "range", "(", "pos", ")", ":", "retval", "=", "cons", "(", "stack", ".", "car", ",", "retval", ")", "stack", "=", "stack", ".", "cdr", "return", "retval" ]
https://github.com/Calysto/calysto_scheme/blob/15bf81987870bcae1264e5a0a06feb9a8ee12b8b/calysto_scheme/scheme.py#L482-L492
basho/riak-python-client
91de13a16607cdf553d1a194e762734e3bec4231
riak/riak_object.py
python
RiakObject.link
(self, *args)
return mr.link(*args)
Start assembling a Map/Reduce operation. A shortcut for :meth:`~riak.mapreduce.RiakMapReduce.link`. :rtype: :class:`~riak.mapreduce.RiakMapReduce`
Start assembling a Map/Reduce operation. A shortcut for :meth:`~riak.mapreduce.RiakMapReduce.link`.
[ "Start", "assembling", "a", "Map", "/", "Reduce", "operation", ".", "A", "shortcut", "for", ":", "meth", ":", "~riak", ".", "mapreduce", ".", "RiakMapReduce", ".", "link", "." ]
def link(self, *args): """ Start assembling a Map/Reduce operation. A shortcut for :meth:`~riak.mapreduce.RiakMapReduce.link`. :rtype: :class:`~riak.mapreduce.RiakMapReduce` """ mr = RiakMapReduce(self.client) mr.add(self.bucket.name, self.key) return mr.link(*args)
[ "def", "link", "(", "self", ",", "*", "args", ")", ":", "mr", "=", "RiakMapReduce", "(", "self", ".", "client", ")", "mr", ".", "add", "(", "self", ".", "bucket", ".", "name", ",", "self", ".", "key", ")", "return", "mr", ".", "link", "(", "*", "args", ")" ]
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/riak_object.py#L379-L388
beeware/ouroboros
a29123c6fab6a807caffbb7587cf548e0c370296
ouroboros/base64.py
python
b16decode
(s, casefold=False)
return binascii.unhexlify(s)
Decode a Base16 encoded byte string. s is the byte string to decode. Optional casefold is a flag specifying whether a lowercase alphabet is acceptable as input. For security purposes, the default is False. The decoded byte string is returned. binascii.Error is raised if s were incorrectly padded or if there are non-alphabet characters present in the string.
Decode a Base16 encoded byte string.
[ "Decode", "a", "Base16", "encoded", "byte", "string", "." ]
def b16decode(s, casefold=False): """Decode a Base16 encoded byte string. s is the byte string to decode. Optional casefold is a flag specifying whether a lowercase alphabet is acceptable as input. For security purposes, the default is False. The decoded byte string is returned. binascii.Error is raised if s were incorrectly padded or if there are non-alphabet characters present in the string. """ s = _bytes_from_decode_data(s) if casefold: s = s.upper() if re.search(b'[^0-9A-F]', s): raise binascii.Error('Non-base16 digit found') return binascii.unhexlify(s)
[ "def", "b16decode", "(", "s", ",", "casefold", "=", "False", ")", ":", "s", "=", "_bytes_from_decode_data", "(", "s", ")", "if", "casefold", ":", "s", "=", "s", ".", "upper", "(", ")", "if", "re", ".", "search", "(", "b'[^0-9A-F]'", ",", "s", ")", ":", "raise", "binascii", ".", "Error", "(", "'Non-base16 digit found'", ")", "return", "binascii", ".", "unhexlify", "(", "s", ")" ]
https://github.com/beeware/ouroboros/blob/a29123c6fab6a807caffbb7587cf548e0c370296/ouroboros/base64.py#L267-L283
kuri65536/python-for-android
26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891
python-modules/pybluez/bluetooth/bluez.py
python
DeviceDiscoverer.cancel_inquiry
(self)
Call this method to cancel an inquiry in process. inquiry_complete will still be called.
Call this method to cancel an inquiry in process. inquiry_complete will still be called.
[ "Call", "this", "method", "to", "cancel", "an", "inquiry", "in", "process", ".", "inquiry_complete", "will", "still", "be", "called", "." ]
def cancel_inquiry (self): """ Call this method to cancel an inquiry in process. inquiry_complete will still be called. """ self.names_to_find = {} if self.is_inquiring: try: _bt.hci_send_cmd (self.sock, _bt.OGF_LINK_CTL, \ _bt.OCF_INQUIRY_CANCEL) self.sock.close () self.sock = None except: raise BluetoothError ("error canceling inquiry") self.is_inquiring = False
[ "def", "cancel_inquiry", "(", "self", ")", ":", "self", ".", "names_to_find", "=", "{", "}", "if", "self", ".", "is_inquiring", ":", "try", ":", "_bt", ".", "hci_send_cmd", "(", "self", ".", "sock", ",", "_bt", ".", "OGF_LINK_CTL", ",", "_bt", ".", "OCF_INQUIRY_CANCEL", ")", "self", ".", "sock", ".", "close", "(", ")", "self", ".", "sock", "=", "None", "except", ":", "raise", "BluetoothError", "(", "\"error canceling inquiry\"", ")", "self", ".", "is_inquiring", "=", "False" ]
https://github.com/kuri65536/python-for-android/blob/26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891/python-modules/pybluez/bluetooth/bluez.py#L405-L420