repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
openstack/python-scciclient
scciclient/irmc/elcm.py
https://github.com/openstack/python-scciclient/blob/4585ce2f76853b9773fb190ca0cfff0aa04a7cf8/scciclient/irmc/elcm.py#L149-L177
def _parse_elcm_response_body_as_json(response): """parse eLCM response body as json data eLCM response should be in form of: _ Key1: value1 <-- optional --> Key2: value2 <-- optional --> KeyN: valueN <-- optional --> - CRLF - JSON string - :param response: eLCM response :return: json object if success :raise ELCMInvalidResponse: if the response does not contain valid json data. """ try: body = response.text body_parts = body.split('\r\n') if len(body_parts) > 0: return jsonutils.loads(body_parts[-1]) else: return None except (TypeError, ValueError): raise ELCMInvalidResponse('eLCM response does not contain valid json ' 'data. Response is "%s".' % body)
[ "def", "_parse_elcm_response_body_as_json", "(", "response", ")", ":", "try", ":", "body", "=", "response", ".", "text", "body_parts", "=", "body", ".", "split", "(", "'\\r\\n'", ")", "if", "len", "(", "body_parts", ")", ">", "0", ":", "return", "jsonutils", ".", "loads", "(", "body_parts", "[", "-", "1", "]", ")", "else", ":", "return", "None", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ELCMInvalidResponse", "(", "'eLCM response does not contain valid json '", "'data. Response is \"%s\".'", "%", "body", ")" ]
parse eLCM response body as json data eLCM response should be in form of: _ Key1: value1 <-- optional --> Key2: value2 <-- optional --> KeyN: valueN <-- optional --> - CRLF - JSON string - :param response: eLCM response :return: json object if success :raise ELCMInvalidResponse: if the response does not contain valid json data.
[ "parse", "eLCM", "response", "body", "as", "json", "data" ]
python
train
webadmin87/midnight
midnight_main/views.py
https://github.com/webadmin87/midnight/blob/b60b3b257b4d633550b82a692f3ea3756c62a0a9/midnight_main/views.py#L39-L53
def pages(request, path=None, instance=None): """ Представление для отображения текстовых страниц :param request: запрос :param path: адрес :param instance: страница :return: """ if instance and instance.active: p = instance else: raise Http404() return render(request, p.tpl, get_page_tpl_ctx(p, request))
[ "def", "pages", "(", "request", ",", "path", "=", "None", ",", "instance", "=", "None", ")", ":", "if", "instance", "and", "instance", ".", "active", ":", "p", "=", "instance", "else", ":", "raise", "Http404", "(", ")", "return", "render", "(", "request", ",", "p", ".", "tpl", ",", "get_page_tpl_ctx", "(", "p", ",", "request", ")", ")" ]
Представление для отображения текстовых страниц :param request: запрос :param path: адрес :param instance: страница :return:
[ "Представление", "для", "отображения", "текстовых", "страниц", ":", "param", "request", ":", "запрос", ":", "param", "path", ":", "адрес", ":", "param", "instance", ":", "страница", ":", "return", ":" ]
python
train
google/grr
grr/client_builder/grr_response_client_builder/builders/osx.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/client_builder/grr_response_client_builder/builders/osx.py#L80-L100
def MakeZip(self, xar_file, output_file): """Add a zip to the end of the .xar containing build.yaml. The build.yaml is already inside the .xar file, but we can't easily open this on linux. To make repacking easier we add a zip to the end of the .xar and add in the build.yaml. The repack step will then look at the build.yaml and insert the config.yaml. We end up storing the build.yaml twice but it is tiny, so this doesn't matter. Args: xar_file: the name of the xar file. output_file: the name of the output ZIP archive. """ logging.info("Generating zip template file at %s", output_file) with zipfile.ZipFile(output_file, mode="a") as zf: # Get the build yaml # TODO(hanuszczak): YAML, consider using `StringIO` instead. build_yaml = io.BytesIO() self.WriteBuildYaml(build_yaml) build_yaml.seek(0) zf.writestr("build.yaml", build_yaml.read())
[ "def", "MakeZip", "(", "self", ",", "xar_file", ",", "output_file", ")", ":", "logging", ".", "info", "(", "\"Generating zip template file at %s\"", ",", "output_file", ")", "with", "zipfile", ".", "ZipFile", "(", "output_file", ",", "mode", "=", "\"a\"", ")", "as", "zf", ":", "# Get the build yaml", "# TODO(hanuszczak): YAML, consider using `StringIO` instead.", "build_yaml", "=", "io", ".", "BytesIO", "(", ")", "self", ".", "WriteBuildYaml", "(", "build_yaml", ")", "build_yaml", ".", "seek", "(", "0", ")", "zf", ".", "writestr", "(", "\"build.yaml\"", ",", "build_yaml", ".", "read", "(", ")", ")" ]
Add a zip to the end of the .xar containing build.yaml. The build.yaml is already inside the .xar file, but we can't easily open this on linux. To make repacking easier we add a zip to the end of the .xar and add in the build.yaml. The repack step will then look at the build.yaml and insert the config.yaml. We end up storing the build.yaml twice but it is tiny, so this doesn't matter. Args: xar_file: the name of the xar file. output_file: the name of the output ZIP archive.
[ "Add", "a", "zip", "to", "the", "end", "of", "the", ".", "xar", "containing", "build", ".", "yaml", "." ]
python
train
Workiva/furious
example/grep.py
https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/example/grep.py#L60-L64
def build_and_start(query, directory): """This function will create and then start a new Async task with the default callbacks argument defined in the decorator.""" Async(target=grep, args=[query, directory]).start()
[ "def", "build_and_start", "(", "query", ",", "directory", ")", ":", "Async", "(", "target", "=", "grep", ",", "args", "=", "[", "query", ",", "directory", "]", ")", ".", "start", "(", ")" ]
This function will create and then start a new Async task with the default callbacks argument defined in the decorator.
[ "This", "function", "will", "create", "and", "then", "start", "a", "new", "Async", "task", "with", "the", "default", "callbacks", "argument", "defined", "in", "the", "decorator", "." ]
python
train
PaulHancock/Aegean
AegeanTools/BANE.py
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/BANE.py#L127-L267
def sigma_filter(filename, region, step_size, box_size, shape, domask, sid): """ Calculate the background and rms for a sub region of an image. The results are written to shared memory - irms and ibkg. Parameters ---------- filename : string Fits file to open region : list Region within the fits file that is to be processed. (row_min, row_max). step_size : (int, int) The filtering step size box_size : (int, int) The size of the box over which the filter is applied (each step). shape : tuple The shape of the fits image domask : bool If true then copy the data mask to the output. sid : int The stripe number Returns ------- None """ ymin, ymax = region logging.debug('rows {0}-{1} starting at {2}'.format(ymin, ymax, strftime("%Y-%m-%d %H:%M:%S", gmtime()))) # cut out the region of interest plus 1/2 the box size, but clip to the image size data_row_min = max(0, ymin - box_size[0]//2) data_row_max = min(shape[0], ymax + box_size[0]//2) # Figure out how many axes are in the datafile NAXIS = fits.getheader(filename)["NAXIS"] with fits.open(filename, memmap=True) as a: if NAXIS == 2: data = a[0].section[data_row_min:data_row_max, 0:shape[1]] elif NAXIS == 3: data = a[0].section[0, data_row_min:data_row_max, 0:shape[1]] elif NAXIS == 4: data = a[0].section[0, 0, data_row_min:data_row_max, 0:shape[1]] else: logging.error("Too many NAXIS for me {0}".format(NAXIS)) logging.error("fix your file to be more sane") raise Exception("Too many NAXIS") row_len = shape[1] logging.debug('data size is {0}'.format(data.shape)) def box(r, c): """ calculate the boundaries of the box centered at r,c with size = box_size """ r_min = max(0, r - box_size[0] // 2) r_max = min(data.shape[0] - 1, r + box_size[0] // 2) c_min = max(0, c - box_size[1] // 2) c_max = min(data.shape[1] - 1, c + box_size[1] // 2) return r_min, r_max, c_min, c_max # set up a grid of rows/cols at which we will compute the bkg/rms rows = list(range(ymin-data_row_min, ymax-data_row_min, step_size[0])) rows.append(ymax-data_row_min) cols = list(range(0, shape[1], step_size[1])) cols.append(shape[1]) # store the computed bkg/rms in this smaller array vals = np.zeros(shape=(len(rows),len(cols))) for i, row in enumerate(rows): for j, col in enumerate(cols): r_min, r_max, c_min, c_max = box(row, col) new = data[r_min:r_max, c_min:c_max] new = np.ravel(new) bkg, _ = sigmaclip(new, 3, 3) vals[i,j] = bkg # indices of all the pixels within our region gr, gc = np.mgrid[ymin-data_row_min:ymax-data_row_min, 0:shape[1]] logging.debug("Interpolating bkg to sharemem") ifunc = RegularGridInterpolator((rows, cols), vals) for i in range(gr.shape[0]): row = np.array(ifunc((gr[i], gc[i])), dtype=np.float32) start_idx = np.ravel_multi_index((ymin+i, 0), shape) end_idx = start_idx + row_len ibkg[start_idx:end_idx] = row # np.ctypeslib.as_ctypes(row) del ifunc logging.debug(" ... done writing bkg") # signal that the bkg is done for this region, and wait for neighbours barrier(bkg_events, sid) logging.debug("{0} background subtraction".format(sid)) for i in range(data_row_max - data_row_min): start_idx = np.ravel_multi_index((data_row_min + i, 0), shape) end_idx = start_idx + row_len data[i, :] = data[i, :] - ibkg[start_idx:end_idx] # reset/recycle the vals array vals[:] = 0 for i, row in enumerate(rows): for j, col in enumerate(cols): r_min, r_max, c_min, c_max = box(row, col) new = data[r_min:r_max, c_min:c_max] new = np.ravel(new) _ , rms = sigmaclip(new, 3, 3) vals[i,j] = rms logging.debug("Interpolating rm to sharemem rms") ifunc = RegularGridInterpolator((rows, cols), vals) for i in range(gr.shape[0]): row = np.array(ifunc((gr[i], gc[i])), dtype=np.float32) start_idx = np.ravel_multi_index((ymin+i, 0), shape) end_idx = start_idx + row_len irms[start_idx:end_idx] = row # np.ctypeslib.as_ctypes(row) del ifunc logging.debug(" .. done writing rms") if domask: barrier(mask_events, sid) logging.debug("applying mask") for i in range(gr.shape[0]): mask = np.where(np.bitwise_not(np.isfinite(data[i + ymin-data_row_min,:])))[0] for j in mask: idx = np.ravel_multi_index((i + ymin,j),shape) ibkg[idx] = np.nan irms[idx] = np.nan logging.debug(" ... done applying mask") logging.debug('rows {0}-{1} finished at {2}'.format(ymin, ymax, strftime("%Y-%m-%d %H:%M:%S", gmtime()))) return
[ "def", "sigma_filter", "(", "filename", ",", "region", ",", "step_size", ",", "box_size", ",", "shape", ",", "domask", ",", "sid", ")", ":", "ymin", ",", "ymax", "=", "region", "logging", ".", "debug", "(", "'rows {0}-{1} starting at {2}'", ".", "format", "(", "ymin", ",", "ymax", ",", "strftime", "(", "\"%Y-%m-%d %H:%M:%S\"", ",", "gmtime", "(", ")", ")", ")", ")", "# cut out the region of interest plus 1/2 the box size, but clip to the image size", "data_row_min", "=", "max", "(", "0", ",", "ymin", "-", "box_size", "[", "0", "]", "//", "2", ")", "data_row_max", "=", "min", "(", "shape", "[", "0", "]", ",", "ymax", "+", "box_size", "[", "0", "]", "//", "2", ")", "# Figure out how many axes are in the datafile", "NAXIS", "=", "fits", ".", "getheader", "(", "filename", ")", "[", "\"NAXIS\"", "]", "with", "fits", ".", "open", "(", "filename", ",", "memmap", "=", "True", ")", "as", "a", ":", "if", "NAXIS", "==", "2", ":", "data", "=", "a", "[", "0", "]", ".", "section", "[", "data_row_min", ":", "data_row_max", ",", "0", ":", "shape", "[", "1", "]", "]", "elif", "NAXIS", "==", "3", ":", "data", "=", "a", "[", "0", "]", ".", "section", "[", "0", ",", "data_row_min", ":", "data_row_max", ",", "0", ":", "shape", "[", "1", "]", "]", "elif", "NAXIS", "==", "4", ":", "data", "=", "a", "[", "0", "]", ".", "section", "[", "0", ",", "0", ",", "data_row_min", ":", "data_row_max", ",", "0", ":", "shape", "[", "1", "]", "]", "else", ":", "logging", ".", "error", "(", "\"Too many NAXIS for me {0}\"", ".", "format", "(", "NAXIS", ")", ")", "logging", ".", "error", "(", "\"fix your file to be more sane\"", ")", "raise", "Exception", "(", "\"Too many NAXIS\"", ")", "row_len", "=", "shape", "[", "1", "]", "logging", ".", "debug", "(", "'data size is {0}'", ".", "format", "(", "data", ".", "shape", ")", ")", "def", "box", "(", "r", ",", "c", ")", ":", "\"\"\"\n calculate the boundaries of the box centered at r,c\n with size = box_size\n \"\"\"", "r_min", "=", "max", "(", "0", ",", "r", "-", "box_size", "[", "0", "]", "//", "2", ")", "r_max", "=", "min", "(", "data", ".", "shape", "[", "0", "]", "-", "1", ",", "r", "+", "box_size", "[", "0", "]", "//", "2", ")", "c_min", "=", "max", "(", "0", ",", "c", "-", "box_size", "[", "1", "]", "//", "2", ")", "c_max", "=", "min", "(", "data", ".", "shape", "[", "1", "]", "-", "1", ",", "c", "+", "box_size", "[", "1", "]", "//", "2", ")", "return", "r_min", ",", "r_max", ",", "c_min", ",", "c_max", "# set up a grid of rows/cols at which we will compute the bkg/rms", "rows", "=", "list", "(", "range", "(", "ymin", "-", "data_row_min", ",", "ymax", "-", "data_row_min", ",", "step_size", "[", "0", "]", ")", ")", "rows", ".", "append", "(", "ymax", "-", "data_row_min", ")", "cols", "=", "list", "(", "range", "(", "0", ",", "shape", "[", "1", "]", ",", "step_size", "[", "1", "]", ")", ")", "cols", ".", "append", "(", "shape", "[", "1", "]", ")", "# store the computed bkg/rms in this smaller array", "vals", "=", "np", ".", "zeros", "(", "shape", "=", "(", "len", "(", "rows", ")", ",", "len", "(", "cols", ")", ")", ")", "for", "i", ",", "row", "in", "enumerate", "(", "rows", ")", ":", "for", "j", ",", "col", "in", "enumerate", "(", "cols", ")", ":", "r_min", ",", "r_max", ",", "c_min", ",", "c_max", "=", "box", "(", "row", ",", "col", ")", "new", "=", "data", "[", "r_min", ":", "r_max", ",", "c_min", ":", "c_max", "]", "new", "=", "np", ".", "ravel", "(", "new", ")", "bkg", ",", "_", "=", "sigmaclip", "(", "new", ",", "3", ",", "3", ")", "vals", "[", "i", ",", "j", "]", "=", "bkg", "# indices of all the pixels within our region", "gr", ",", "gc", "=", "np", ".", "mgrid", "[", "ymin", "-", "data_row_min", ":", "ymax", "-", "data_row_min", ",", "0", ":", "shape", "[", "1", "]", "]", "logging", ".", "debug", "(", "\"Interpolating bkg to sharemem\"", ")", "ifunc", "=", "RegularGridInterpolator", "(", "(", "rows", ",", "cols", ")", ",", "vals", ")", "for", "i", "in", "range", "(", "gr", ".", "shape", "[", "0", "]", ")", ":", "row", "=", "np", ".", "array", "(", "ifunc", "(", "(", "gr", "[", "i", "]", ",", "gc", "[", "i", "]", ")", ")", ",", "dtype", "=", "np", ".", "float32", ")", "start_idx", "=", "np", ".", "ravel_multi_index", "(", "(", "ymin", "+", "i", ",", "0", ")", ",", "shape", ")", "end_idx", "=", "start_idx", "+", "row_len", "ibkg", "[", "start_idx", ":", "end_idx", "]", "=", "row", "# np.ctypeslib.as_ctypes(row)", "del", "ifunc", "logging", ".", "debug", "(", "\" ... done writing bkg\"", ")", "# signal that the bkg is done for this region, and wait for neighbours", "barrier", "(", "bkg_events", ",", "sid", ")", "logging", ".", "debug", "(", "\"{0} background subtraction\"", ".", "format", "(", "sid", ")", ")", "for", "i", "in", "range", "(", "data_row_max", "-", "data_row_min", ")", ":", "start_idx", "=", "np", ".", "ravel_multi_index", "(", "(", "data_row_min", "+", "i", ",", "0", ")", ",", "shape", ")", "end_idx", "=", "start_idx", "+", "row_len", "data", "[", "i", ",", ":", "]", "=", "data", "[", "i", ",", ":", "]", "-", "ibkg", "[", "start_idx", ":", "end_idx", "]", "# reset/recycle the vals array", "vals", "[", ":", "]", "=", "0", "for", "i", ",", "row", "in", "enumerate", "(", "rows", ")", ":", "for", "j", ",", "col", "in", "enumerate", "(", "cols", ")", ":", "r_min", ",", "r_max", ",", "c_min", ",", "c_max", "=", "box", "(", "row", ",", "col", ")", "new", "=", "data", "[", "r_min", ":", "r_max", ",", "c_min", ":", "c_max", "]", "new", "=", "np", ".", "ravel", "(", "new", ")", "_", ",", "rms", "=", "sigmaclip", "(", "new", ",", "3", ",", "3", ")", "vals", "[", "i", ",", "j", "]", "=", "rms", "logging", ".", "debug", "(", "\"Interpolating rm to sharemem rms\"", ")", "ifunc", "=", "RegularGridInterpolator", "(", "(", "rows", ",", "cols", ")", ",", "vals", ")", "for", "i", "in", "range", "(", "gr", ".", "shape", "[", "0", "]", ")", ":", "row", "=", "np", ".", "array", "(", "ifunc", "(", "(", "gr", "[", "i", "]", ",", "gc", "[", "i", "]", ")", ")", ",", "dtype", "=", "np", ".", "float32", ")", "start_idx", "=", "np", ".", "ravel_multi_index", "(", "(", "ymin", "+", "i", ",", "0", ")", ",", "shape", ")", "end_idx", "=", "start_idx", "+", "row_len", "irms", "[", "start_idx", ":", "end_idx", "]", "=", "row", "# np.ctypeslib.as_ctypes(row)", "del", "ifunc", "logging", ".", "debug", "(", "\" .. done writing rms\"", ")", "if", "domask", ":", "barrier", "(", "mask_events", ",", "sid", ")", "logging", ".", "debug", "(", "\"applying mask\"", ")", "for", "i", "in", "range", "(", "gr", ".", "shape", "[", "0", "]", ")", ":", "mask", "=", "np", ".", "where", "(", "np", ".", "bitwise_not", "(", "np", ".", "isfinite", "(", "data", "[", "i", "+", "ymin", "-", "data_row_min", ",", ":", "]", ")", ")", ")", "[", "0", "]", "for", "j", "in", "mask", ":", "idx", "=", "np", ".", "ravel_multi_index", "(", "(", "i", "+", "ymin", ",", "j", ")", ",", "shape", ")", "ibkg", "[", "idx", "]", "=", "np", ".", "nan", "irms", "[", "idx", "]", "=", "np", ".", "nan", "logging", ".", "debug", "(", "\" ... done applying mask\"", ")", "logging", ".", "debug", "(", "'rows {0}-{1} finished at {2}'", ".", "format", "(", "ymin", ",", "ymax", ",", "strftime", "(", "\"%Y-%m-%d %H:%M:%S\"", ",", "gmtime", "(", ")", ")", ")", ")", "return" ]
Calculate the background and rms for a sub region of an image. The results are written to shared memory - irms and ibkg. Parameters ---------- filename : string Fits file to open region : list Region within the fits file that is to be processed. (row_min, row_max). step_size : (int, int) The filtering step size box_size : (int, int) The size of the box over which the filter is applied (each step). shape : tuple The shape of the fits image domask : bool If true then copy the data mask to the output. sid : int The stripe number Returns ------- None
[ "Calculate", "the", "background", "and", "rms", "for", "a", "sub", "region", "of", "an", "image", ".", "The", "results", "are", "written", "to", "shared", "memory", "-", "irms", "and", "ibkg", "." ]
python
train
joferkington/mplstereonet
mplstereonet/stereonet_axes.py
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_axes.py#L450-L475
def rake(self, strike, dip, rake_angle, *args, **kwargs): """ Plot points representing lineations along planes on the axes. Additional arguments and keyword arguments are passed on to `plot`. Parameters ---------- strike, dip : number or sequences of numbers The strike and dip of the plane(s) in degrees. The dip direction is defined by the strike following the "right-hand rule". rake_angle : number or sequences of numbers The angle of the lineation(s) on the plane(s) measured in degrees downward from horizontal. Zero degrees corresponds to the "right hand" direction indicated by the strike, while negative angles are measured downward from the opposite strike direction. **kwargs Additional arguments are passed on to `plot`. Returns ------- A sequence of Line2D artists representing the point(s) specified by `strike` and `dip`. """ lon, lat = stereonet_math.rake(strike, dip, rake_angle) args, kwargs = self._point_plot_defaults(args, kwargs) return self.plot(lon, lat, *args, **kwargs)
[ "def", "rake", "(", "self", ",", "strike", ",", "dip", ",", "rake_angle", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "lon", ",", "lat", "=", "stereonet_math", ".", "rake", "(", "strike", ",", "dip", ",", "rake_angle", ")", "args", ",", "kwargs", "=", "self", ".", "_point_plot_defaults", "(", "args", ",", "kwargs", ")", "return", "self", ".", "plot", "(", "lon", ",", "lat", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Plot points representing lineations along planes on the axes. Additional arguments and keyword arguments are passed on to `plot`. Parameters ---------- strike, dip : number or sequences of numbers The strike and dip of the plane(s) in degrees. The dip direction is defined by the strike following the "right-hand rule". rake_angle : number or sequences of numbers The angle of the lineation(s) on the plane(s) measured in degrees downward from horizontal. Zero degrees corresponds to the "right hand" direction indicated by the strike, while negative angles are measured downward from the opposite strike direction. **kwargs Additional arguments are passed on to `plot`. Returns ------- A sequence of Line2D artists representing the point(s) specified by `strike` and `dip`.
[ "Plot", "points", "representing", "lineations", "along", "planes", "on", "the", "axes", ".", "Additional", "arguments", "and", "keyword", "arguments", "are", "passed", "on", "to", "plot", "." ]
python
train
Jammy2211/PyAutoLens
autolens/lens/sensitivity_fit.py
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/lens/sensitivity_fit.py#L5-L35
def fit_lens_data_with_sensitivity_tracers(lens_data, tracer_normal, tracer_sensitive): """Fit lens data with a normal tracer and sensitivity tracer, to determine our sensitivity to a selection of \ galaxy components. This factory automatically determines the type of fit based on the properties of the galaxies \ in the tracers. Parameters ----------- lens_data : lens_data.LensData or lens_data.LensDataHyper The lens-images that is fitted. tracer_normal : ray_tracing.AbstractTracer A tracer whose galaxies have the same model components (e.g. light profiles, mass profiles) as the \ lens data that we are fitting. tracer_sensitive : ray_tracing.AbstractTracerNonStack A tracer whose galaxies have the same model components (e.g. light profiles, mass profiles) as the \ lens data that we are fitting, but also addition components (e.g. mass clumps) which we measure \ how sensitive we are too. """ if (tracer_normal.has_light_profile and tracer_sensitive.has_light_profile) and \ (not tracer_normal.has_pixelization and not tracer_sensitive.has_pixelization): return SensitivityProfileFit(lens_data=lens_data, tracer_normal=tracer_normal, tracer_sensitive=tracer_sensitive) elif (not tracer_normal.has_light_profile and not tracer_sensitive.has_light_profile) and \ (tracer_normal.has_pixelization and tracer_sensitive.has_pixelization): return SensitivityInversionFit(lens_data=lens_data, tracer_normal=tracer_normal, tracer_sensitive=tracer_sensitive) else: raise exc.FittingException('The sensitivity_fit routine did not call a SensitivityFit class - check the ' 'properties of the tracers')
[ "def", "fit_lens_data_with_sensitivity_tracers", "(", "lens_data", ",", "tracer_normal", ",", "tracer_sensitive", ")", ":", "if", "(", "tracer_normal", ".", "has_light_profile", "and", "tracer_sensitive", ".", "has_light_profile", ")", "and", "(", "not", "tracer_normal", ".", "has_pixelization", "and", "not", "tracer_sensitive", ".", "has_pixelization", ")", ":", "return", "SensitivityProfileFit", "(", "lens_data", "=", "lens_data", ",", "tracer_normal", "=", "tracer_normal", ",", "tracer_sensitive", "=", "tracer_sensitive", ")", "elif", "(", "not", "tracer_normal", ".", "has_light_profile", "and", "not", "tracer_sensitive", ".", "has_light_profile", ")", "and", "(", "tracer_normal", ".", "has_pixelization", "and", "tracer_sensitive", ".", "has_pixelization", ")", ":", "return", "SensitivityInversionFit", "(", "lens_data", "=", "lens_data", ",", "tracer_normal", "=", "tracer_normal", ",", "tracer_sensitive", "=", "tracer_sensitive", ")", "else", ":", "raise", "exc", ".", "FittingException", "(", "'The sensitivity_fit routine did not call a SensitivityFit class - check the '", "'properties of the tracers'", ")" ]
Fit lens data with a normal tracer and sensitivity tracer, to determine our sensitivity to a selection of \ galaxy components. This factory automatically determines the type of fit based on the properties of the galaxies \ in the tracers. Parameters ----------- lens_data : lens_data.LensData or lens_data.LensDataHyper The lens-images that is fitted. tracer_normal : ray_tracing.AbstractTracer A tracer whose galaxies have the same model components (e.g. light profiles, mass profiles) as the \ lens data that we are fitting. tracer_sensitive : ray_tracing.AbstractTracerNonStack A tracer whose galaxies have the same model components (e.g. light profiles, mass profiles) as the \ lens data that we are fitting, but also addition components (e.g. mass clumps) which we measure \ how sensitive we are too.
[ "Fit", "lens", "data", "with", "a", "normal", "tracer", "and", "sensitivity", "tracer", "to", "determine", "our", "sensitivity", "to", "a", "selection", "of", "\\", "galaxy", "components", ".", "This", "factory", "automatically", "determines", "the", "type", "of", "fit", "based", "on", "the", "properties", "of", "the", "galaxies", "\\", "in", "the", "tracers", "." ]
python
valid
summanlp/textrank
summa/preprocessing/snowball.py
https://github.com/summanlp/textrank/blob/6844bbe8c4b2b468020ae0dfd6574a743f9ad442/summa/preprocessing/snowball.py#L2667-L2835
def stem(self, word): """ Stem a Romanian word and return the stemmed form. :param word: The word that is stemmed. :type word: str or unicode :return: The stemmed form. :rtype: unicode """ word = word.lower() step1_success = False step2_success = False for i in range(1, len(word)-1): if word[i-1] in self.__vowels and word[i+1] in self.__vowels: if word[i] == "u": word = "".join((word[:i], "U", word[i+1:])) elif word[i] == "i": word = "".join((word[:i], "I", word[i+1:])) r1, r2 = self._r1r2_standard(word, self.__vowels) rv = self._rv_standard(word, self.__vowels) # STEP 0: Removal of plurals and other simplifications for suffix in self.__step0_suffixes: if word.endswith(suffix): if suffix in r1: if suffix in ("ul", "ului"): word = word[:-len(suffix)] if suffix in rv: rv = rv[:-len(suffix)] else: rv = "" elif (suffix == "aua" or suffix == "atei" or (suffix == "ile" and word[-5:-3] != "ab")): word = word[:-2] elif suffix in ("ea", "ele", "elor"): word = "".join((word[:-len(suffix)], "e")) if suffix in rv: rv = "".join((rv[:-len(suffix)], "e")) else: rv = "" elif suffix in ("ii", "iua", "iei", "iile", "iilor", "ilor"): word = "".join((word[:-len(suffix)], "i")) if suffix in rv: rv = "".join((rv[:-len(suffix)], "i")) else: rv = "" elif suffix in ("a\u0163ie", "a\u0163ia"): word = word[:-1] break # STEP 1: Reduction of combining suffixes while True: replacement_done = False for suffix in self.__step1_suffixes: if word.endswith(suffix): if suffix in r1: step1_success = True replacement_done = True if suffix in ("abilitate", "abilitati", "abilit\u0103i", "abilit\u0103\u0163i"): word = "".join((word[:-len(suffix)], "abil")) elif suffix == "ibilitate": word = word[:-5] elif suffix in ("ivitate", "ivitati", "ivit\u0103i", "ivit\u0103\u0163i"): word = "".join((word[:-len(suffix)], "iv")) elif suffix in ("icitate", "icitati", "icit\u0103i", "icit\u0103\u0163i", "icator", "icatori", "iciv", "iciva", "icive", "icivi", "iciv\u0103", "ical", "icala", "icale", "icali", "ical\u0103"): word = "".join((word[:-len(suffix)], "ic")) elif suffix in ("ativ", "ativa", "ative", "ativi", "ativ\u0103", "a\u0163iune", "atoare", "ator", "atori", "\u0103toare", "\u0103tor", "\u0103tori"): word = "".join((word[:-len(suffix)], "at")) if suffix in r2: r2 = "".join((r2[:-len(suffix)], "at")) elif suffix in ("itiv", "itiva", "itive", "itivi", "itiv\u0103", "i\u0163iune", "itoare", "itor", "itori"): word = "".join((word[:-len(suffix)], "it")) if suffix in r2: r2 = "".join((r2[:-len(suffix)], "it")) else: step1_success = False break if not replacement_done: break # STEP 2: Removal of standard suffixes for suffix in self.__step2_suffixes: if word.endswith(suffix): if suffix in r2: step2_success = True if suffix in ("iune", "iuni"): if word[-5] == "\u0163": word = "".join((word[:-5], "t")) elif suffix in ("ism", "isme", "ist", "ista", "iste", "isti", "ist\u0103", "i\u015Fti"): word = "".join((word[:-len(suffix)], "ist")) else: word = word[:-len(suffix)] break # STEP 3: Removal of verb suffixes if not step1_success and not step2_success: for suffix in self.__step3_suffixes: if word.endswith(suffix): if suffix in rv: if suffix in ('seser\u0103\u0163i', 'seser\u0103m', 'ser\u0103\u0163i', 'sese\u015Fi', 'seser\u0103', 'ser\u0103m', 'sesem', 'se\u015Fi', 'ser\u0103', 'sese', 'a\u0163i', 'e\u0163i', 'i\u0163i', '\xE2\u0163i', 'sei', '\u0103m', 'em', 'im', '\xE2m', 'se'): word = word[:-len(suffix)] rv = rv[:-len(suffix)] else: if (not rv.startswith(suffix) and rv[rv.index(suffix)-1] not in "aeio\u0103\xE2\xEE"): word = word[:-len(suffix)] break # STEP 4: Removal of final vowel for suffix in ("ie", "a", "e", "i", "\u0103"): if word.endswith(suffix): if suffix in rv: word = word[:-len(suffix)] break word = word.replace("I", "i").replace("U", "u") return word
[ "def", "stem", "(", "self", ",", "word", ")", ":", "word", "=", "word", ".", "lower", "(", ")", "step1_success", "=", "False", "step2_success", "=", "False", "for", "i", "in", "range", "(", "1", ",", "len", "(", "word", ")", "-", "1", ")", ":", "if", "word", "[", "i", "-", "1", "]", "in", "self", ".", "__vowels", "and", "word", "[", "i", "+", "1", "]", "in", "self", ".", "__vowels", ":", "if", "word", "[", "i", "]", "==", "\"u\"", ":", "word", "=", "\"\"", ".", "join", "(", "(", "word", "[", ":", "i", "]", ",", "\"U\"", ",", "word", "[", "i", "+", "1", ":", "]", ")", ")", "elif", "word", "[", "i", "]", "==", "\"i\"", ":", "word", "=", "\"\"", ".", "join", "(", "(", "word", "[", ":", "i", "]", ",", "\"I\"", ",", "word", "[", "i", "+", "1", ":", "]", ")", ")", "r1", ",", "r2", "=", "self", ".", "_r1r2_standard", "(", "word", ",", "self", ".", "__vowels", ")", "rv", "=", "self", ".", "_rv_standard", "(", "word", ",", "self", ".", "__vowels", ")", "# STEP 0: Removal of plurals and other simplifications", "for", "suffix", "in", "self", ".", "__step0_suffixes", ":", "if", "word", ".", "endswith", "(", "suffix", ")", ":", "if", "suffix", "in", "r1", ":", "if", "suffix", "in", "(", "\"ul\"", ",", "\"ului\"", ")", ":", "word", "=", "word", "[", ":", "-", "len", "(", "suffix", ")", "]", "if", "suffix", "in", "rv", ":", "rv", "=", "rv", "[", ":", "-", "len", "(", "suffix", ")", "]", "else", ":", "rv", "=", "\"\"", "elif", "(", "suffix", "==", "\"aua\"", "or", "suffix", "==", "\"atei\"", "or", "(", "suffix", "==", "\"ile\"", "and", "word", "[", "-", "5", ":", "-", "3", "]", "!=", "\"ab\"", ")", ")", ":", "word", "=", "word", "[", ":", "-", "2", "]", "elif", "suffix", "in", "(", "\"ea\"", ",", "\"ele\"", ",", "\"elor\"", ")", ":", "word", "=", "\"\"", ".", "join", "(", "(", "word", "[", ":", "-", "len", "(", "suffix", ")", "]", ",", "\"e\"", ")", ")", "if", "suffix", "in", "rv", ":", "rv", "=", "\"\"", ".", "join", "(", "(", "rv", "[", ":", "-", "len", "(", "suffix", ")", "]", ",", "\"e\"", ")", ")", "else", ":", "rv", "=", "\"\"", "elif", "suffix", "in", "(", "\"ii\"", ",", "\"iua\"", ",", "\"iei\"", ",", "\"iile\"", ",", "\"iilor\"", ",", "\"ilor\"", ")", ":", "word", "=", "\"\"", ".", "join", "(", "(", "word", "[", ":", "-", "len", "(", "suffix", ")", "]", ",", "\"i\"", ")", ")", "if", "suffix", "in", "rv", ":", "rv", "=", "\"\"", ".", "join", "(", "(", "rv", "[", ":", "-", "len", "(", "suffix", ")", "]", ",", "\"i\"", ")", ")", "else", ":", "rv", "=", "\"\"", "elif", "suffix", "in", "(", "\"a\\u0163ie\"", ",", "\"a\\u0163ia\"", ")", ":", "word", "=", "word", "[", ":", "-", "1", "]", "break", "# STEP 1: Reduction of combining suffixes", "while", "True", ":", "replacement_done", "=", "False", "for", "suffix", "in", "self", ".", "__step1_suffixes", ":", "if", "word", ".", "endswith", "(", "suffix", ")", ":", "if", "suffix", "in", "r1", ":", "step1_success", "=", "True", "replacement_done", "=", "True", "if", "suffix", "in", "(", "\"abilitate\"", ",", "\"abilitati\"", ",", "\"abilit\\u0103i\"", ",", "\"abilit\\u0103\\u0163i\"", ")", ":", "word", "=", "\"\"", ".", "join", "(", "(", "word", "[", ":", "-", "len", "(", "suffix", ")", "]", ",", "\"abil\"", ")", ")", "elif", "suffix", "==", "\"ibilitate\"", ":", "word", "=", "word", "[", ":", "-", "5", "]", "elif", "suffix", "in", "(", "\"ivitate\"", ",", "\"ivitati\"", ",", "\"ivit\\u0103i\"", ",", "\"ivit\\u0103\\u0163i\"", ")", ":", "word", "=", "\"\"", ".", "join", "(", "(", "word", "[", ":", "-", "len", "(", "suffix", ")", "]", ",", "\"iv\"", ")", ")", "elif", "suffix", "in", "(", "\"icitate\"", ",", "\"icitati\"", ",", "\"icit\\u0103i\"", ",", "\"icit\\u0103\\u0163i\"", ",", "\"icator\"", ",", "\"icatori\"", ",", "\"iciv\"", ",", "\"iciva\"", ",", "\"icive\"", ",", "\"icivi\"", ",", "\"iciv\\u0103\"", ",", "\"ical\"", ",", "\"icala\"", ",", "\"icale\"", ",", "\"icali\"", ",", "\"ical\\u0103\"", ")", ":", "word", "=", "\"\"", ".", "join", "(", "(", "word", "[", ":", "-", "len", "(", "suffix", ")", "]", ",", "\"ic\"", ")", ")", "elif", "suffix", "in", "(", "\"ativ\"", ",", "\"ativa\"", ",", "\"ative\"", ",", "\"ativi\"", ",", "\"ativ\\u0103\"", ",", "\"a\\u0163iune\"", ",", "\"atoare\"", ",", "\"ator\"", ",", "\"atori\"", ",", "\"\\u0103toare\"", ",", "\"\\u0103tor\"", ",", "\"\\u0103tori\"", ")", ":", "word", "=", "\"\"", ".", "join", "(", "(", "word", "[", ":", "-", "len", "(", "suffix", ")", "]", ",", "\"at\"", ")", ")", "if", "suffix", "in", "r2", ":", "r2", "=", "\"\"", ".", "join", "(", "(", "r2", "[", ":", "-", "len", "(", "suffix", ")", "]", ",", "\"at\"", ")", ")", "elif", "suffix", "in", "(", "\"itiv\"", ",", "\"itiva\"", ",", "\"itive\"", ",", "\"itivi\"", ",", "\"itiv\\u0103\"", ",", "\"i\\u0163iune\"", ",", "\"itoare\"", ",", "\"itor\"", ",", "\"itori\"", ")", ":", "word", "=", "\"\"", ".", "join", "(", "(", "word", "[", ":", "-", "len", "(", "suffix", ")", "]", ",", "\"it\"", ")", ")", "if", "suffix", "in", "r2", ":", "r2", "=", "\"\"", ".", "join", "(", "(", "r2", "[", ":", "-", "len", "(", "suffix", ")", "]", ",", "\"it\"", ")", ")", "else", ":", "step1_success", "=", "False", "break", "if", "not", "replacement_done", ":", "break", "# STEP 2: Removal of standard suffixes", "for", "suffix", "in", "self", ".", "__step2_suffixes", ":", "if", "word", ".", "endswith", "(", "suffix", ")", ":", "if", "suffix", "in", "r2", ":", "step2_success", "=", "True", "if", "suffix", "in", "(", "\"iune\"", ",", "\"iuni\"", ")", ":", "if", "word", "[", "-", "5", "]", "==", "\"\\u0163\"", ":", "word", "=", "\"\"", ".", "join", "(", "(", "word", "[", ":", "-", "5", "]", ",", "\"t\"", ")", ")", "elif", "suffix", "in", "(", "\"ism\"", ",", "\"isme\"", ",", "\"ist\"", ",", "\"ista\"", ",", "\"iste\"", ",", "\"isti\"", ",", "\"ist\\u0103\"", ",", "\"i\\u015Fti\"", ")", ":", "word", "=", "\"\"", ".", "join", "(", "(", "word", "[", ":", "-", "len", "(", "suffix", ")", "]", ",", "\"ist\"", ")", ")", "else", ":", "word", "=", "word", "[", ":", "-", "len", "(", "suffix", ")", "]", "break", "# STEP 3: Removal of verb suffixes", "if", "not", "step1_success", "and", "not", "step2_success", ":", "for", "suffix", "in", "self", ".", "__step3_suffixes", ":", "if", "word", ".", "endswith", "(", "suffix", ")", ":", "if", "suffix", "in", "rv", ":", "if", "suffix", "in", "(", "'seser\\u0103\\u0163i'", ",", "'seser\\u0103m'", ",", "'ser\\u0103\\u0163i'", ",", "'sese\\u015Fi'", ",", "'seser\\u0103'", ",", "'ser\\u0103m'", ",", "'sesem'", ",", "'se\\u015Fi'", ",", "'ser\\u0103'", ",", "'sese'", ",", "'a\\u0163i'", ",", "'e\\u0163i'", ",", "'i\\u0163i'", ",", "'\\xE2\\u0163i'", ",", "'sei'", ",", "'\\u0103m'", ",", "'em'", ",", "'im'", ",", "'\\xE2m'", ",", "'se'", ")", ":", "word", "=", "word", "[", ":", "-", "len", "(", "suffix", ")", "]", "rv", "=", "rv", "[", ":", "-", "len", "(", "suffix", ")", "]", "else", ":", "if", "(", "not", "rv", ".", "startswith", "(", "suffix", ")", "and", "rv", "[", "rv", ".", "index", "(", "suffix", ")", "-", "1", "]", "not", "in", "\"aeio\\u0103\\xE2\\xEE\"", ")", ":", "word", "=", "word", "[", ":", "-", "len", "(", "suffix", ")", "]", "break", "# STEP 4: Removal of final vowel", "for", "suffix", "in", "(", "\"ie\"", ",", "\"a\"", ",", "\"e\"", ",", "\"i\"", ",", "\"\\u0103\"", ")", ":", "if", "word", ".", "endswith", "(", "suffix", ")", ":", "if", "suffix", "in", "rv", ":", "word", "=", "word", "[", ":", "-", "len", "(", "suffix", ")", "]", "break", "word", "=", "word", ".", "replace", "(", "\"I\"", ",", "\"i\"", ")", ".", "replace", "(", "\"U\"", ",", "\"u\"", ")", "return", "word" ]
Stem a Romanian word and return the stemmed form. :param word: The word that is stemmed. :type word: str or unicode :return: The stemmed form. :rtype: unicode
[ "Stem", "a", "Romanian", "word", "and", "return", "the", "stemmed", "form", "." ]
python
train
Alignak-monitoring/alignak
alignak/objects/macromodulation.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/macromodulation.py#L107-L127
def is_correct(self): """ Check if this object configuration is correct :: * Call our parent class is_correct checker :return: True if the configuration is correct, otherwise False :rtype: bool """ state = True # Ok just put None as modulation_period, means 24x7 if not hasattr(self, 'modulation_period'): self.modulation_period = None if not hasattr(self, 'customs') or not self.customs: msg = "[macromodulation::%s] contains no macro definition" % (self.get_name()) self.add_error(msg) state = False return super(MacroModulation, self).is_correct() and state
[ "def", "is_correct", "(", "self", ")", ":", "state", "=", "True", "# Ok just put None as modulation_period, means 24x7", "if", "not", "hasattr", "(", "self", ",", "'modulation_period'", ")", ":", "self", ".", "modulation_period", "=", "None", "if", "not", "hasattr", "(", "self", ",", "'customs'", ")", "or", "not", "self", ".", "customs", ":", "msg", "=", "\"[macromodulation::%s] contains no macro definition\"", "%", "(", "self", ".", "get_name", "(", ")", ")", "self", ".", "add_error", "(", "msg", ")", "state", "=", "False", "return", "super", "(", "MacroModulation", ",", "self", ")", ".", "is_correct", "(", ")", "and", "state" ]
Check if this object configuration is correct :: * Call our parent class is_correct checker :return: True if the configuration is correct, otherwise False :rtype: bool
[ "Check", "if", "this", "object", "configuration", "is", "correct", "::" ]
python
train
sigvaldm/frmt
frmt.py
https://github.com/sigvaldm/frmt/blob/d077af06c83a7a0533ca2218be55ce086df274b7/frmt.py#L143-L277
def format_table(table, align='<', format='{:.3g}', colwidth=None, maxwidth=None, spacing=2, truncate=0, suffix="..." ): """ Formats a table represented as an iterable of iterable into a nice big string suitable for printing. Parameters: ----------- align : string or list of strings Alignment of cell contents. Each character in a string specifies the alignment of one column. * ``<`` - Left aligned (default) * ``^`` - Centered * ``>`` - Right aligned The last alignment is repeated for unspecified columns. If it's a list of strings, each string specifies the alignment of one row. The last string is used repeatedly for unspecified rows. format : string/function, or (nested) list of string/function Formats the contents of the cells using the specified function(s) or format string(s). If it's a list of strings/functions each entry specifies formatting for one column, the last entry being used repeatedly for unspecified columns. If it's a list of lists, each sub-list specifies one row, the last sub-list being used repeatedly for unspecified rows. colwidth : int, list of ints or None The width of each column. The last width is used repeatedly for unspecified columns. If ``None`` the width is fitted to the contents. maxwidth : int or None The maximum width of the table. Defaults to terminal width minus 1 if ``None``. If the table would be wider than ``maxwidth`` one of the columns is truncated. spacing : int The spacing between columns truncate : int Which column to truncate if table width would exceed ``maxwidth``. Beware that no columns can have zero or negative width. If for instance 'maxwidth' is 80 and 'colwidth' is [10, 30, 30, 30] with spacing 2 the total width will initially be 10+2+30+2+30+2+30=106. That's 26 characters too much, so a width of 26 will be removed from the truncated column. If 'truncate' is 0, column 0 will have a width of -16 which is not permitted. """ table = list(deepcopy(table)) if not isinstance(align, list): align = [align] if not isinstance(format, list): format = [format] if not isinstance(format[0], list): format = [format] num_cols = len(table[0]) if len(set([len(row) for row in table]))>1: raise ValueError("All rows must have the same number of columns") for i in range(len(table)): table[i] = list(table[i]) colformat = format[min(i,len(format)-1)] for j, cell in enumerate(table[i]): f = colformat[min(j,len(colformat)-1)] if isinstance(f, str): fun = lambda x: f.format(x) else: fun = f try: table[i][j] = fun(cell) except: table[i][j] = str(cell) if colwidth==None: cellwidth = [[len(cell) for cell in row] for row in table] colwidth = list(map(max, zip(*cellwidth))) elif not isinstance(colwidth, list): colwidth = [colwidth] colwidth.extend([colwidth[-1]]*(num_cols-len(colwidth))) if maxwidth==None: maxwidth = get_terminal_size().columns-1 width = sum(colwidth)+spacing*(num_cols-1) if width>maxwidth: colwidth[truncate] -= (width-maxwidth) for j, cw in enumerate(colwidth): if cw<1: raise RuntimeError("Column {} in format_table() has width {}. " "Make sure all columns have width >0. " "Read docstring for further details." .format(j,cw) ) s = '' for i, row in enumerate(table): if i != 0: s += "\n" colalign = align[min(i,len(align)-1)] colformat = format[min(i,len(format)-1)] for j, col in enumerate(row): a = colalign[min(j,len(colalign)-1)] f = colformat[min(j,len(colformat)-1)] w = colwidth[j] if j!=0: s+= ' '*spacing s += format_fit(format_time(col), w, a, suffix) return s
[ "def", "format_table", "(", "table", ",", "align", "=", "'<'", ",", "format", "=", "'{:.3g}'", ",", "colwidth", "=", "None", ",", "maxwidth", "=", "None", ",", "spacing", "=", "2", ",", "truncate", "=", "0", ",", "suffix", "=", "\"...\"", ")", ":", "table", "=", "list", "(", "deepcopy", "(", "table", ")", ")", "if", "not", "isinstance", "(", "align", ",", "list", ")", ":", "align", "=", "[", "align", "]", "if", "not", "isinstance", "(", "format", ",", "list", ")", ":", "format", "=", "[", "format", "]", "if", "not", "isinstance", "(", "format", "[", "0", "]", ",", "list", ")", ":", "format", "=", "[", "format", "]", "num_cols", "=", "len", "(", "table", "[", "0", "]", ")", "if", "len", "(", "set", "(", "[", "len", "(", "row", ")", "for", "row", "in", "table", "]", ")", ")", ">", "1", ":", "raise", "ValueError", "(", "\"All rows must have the same number of columns\"", ")", "for", "i", "in", "range", "(", "len", "(", "table", ")", ")", ":", "table", "[", "i", "]", "=", "list", "(", "table", "[", "i", "]", ")", "colformat", "=", "format", "[", "min", "(", "i", ",", "len", "(", "format", ")", "-", "1", ")", "]", "for", "j", ",", "cell", "in", "enumerate", "(", "table", "[", "i", "]", ")", ":", "f", "=", "colformat", "[", "min", "(", "j", ",", "len", "(", "colformat", ")", "-", "1", ")", "]", "if", "isinstance", "(", "f", ",", "str", ")", ":", "fun", "=", "lambda", "x", ":", "f", ".", "format", "(", "x", ")", "else", ":", "fun", "=", "f", "try", ":", "table", "[", "i", "]", "[", "j", "]", "=", "fun", "(", "cell", ")", "except", ":", "table", "[", "i", "]", "[", "j", "]", "=", "str", "(", "cell", ")", "if", "colwidth", "==", "None", ":", "cellwidth", "=", "[", "[", "len", "(", "cell", ")", "for", "cell", "in", "row", "]", "for", "row", "in", "table", "]", "colwidth", "=", "list", "(", "map", "(", "max", ",", "zip", "(", "*", "cellwidth", ")", ")", ")", "elif", "not", "isinstance", "(", "colwidth", ",", "list", ")", ":", "colwidth", "=", "[", "colwidth", "]", "colwidth", ".", "extend", "(", "[", "colwidth", "[", "-", "1", "]", "]", "*", "(", "num_cols", "-", "len", "(", "colwidth", ")", ")", ")", "if", "maxwidth", "==", "None", ":", "maxwidth", "=", "get_terminal_size", "(", ")", ".", "columns", "-", "1", "width", "=", "sum", "(", "colwidth", ")", "+", "spacing", "*", "(", "num_cols", "-", "1", ")", "if", "width", ">", "maxwidth", ":", "colwidth", "[", "truncate", "]", "-=", "(", "width", "-", "maxwidth", ")", "for", "j", ",", "cw", "in", "enumerate", "(", "colwidth", ")", ":", "if", "cw", "<", "1", ":", "raise", "RuntimeError", "(", "\"Column {} in format_table() has width {}. \"", "\"Make sure all columns have width >0. \"", "\"Read docstring for further details.\"", ".", "format", "(", "j", ",", "cw", ")", ")", "s", "=", "''", "for", "i", ",", "row", "in", "enumerate", "(", "table", ")", ":", "if", "i", "!=", "0", ":", "s", "+=", "\"\\n\"", "colalign", "=", "align", "[", "min", "(", "i", ",", "len", "(", "align", ")", "-", "1", ")", "]", "colformat", "=", "format", "[", "min", "(", "i", ",", "len", "(", "format", ")", "-", "1", ")", "]", "for", "j", ",", "col", "in", "enumerate", "(", "row", ")", ":", "a", "=", "colalign", "[", "min", "(", "j", ",", "len", "(", "colalign", ")", "-", "1", ")", "]", "f", "=", "colformat", "[", "min", "(", "j", ",", "len", "(", "colformat", ")", "-", "1", ")", "]", "w", "=", "colwidth", "[", "j", "]", "if", "j", "!=", "0", ":", "s", "+=", "' '", "*", "spacing", "s", "+=", "format_fit", "(", "format_time", "(", "col", ")", ",", "w", ",", "a", ",", "suffix", ")", "return", "s" ]
Formats a table represented as an iterable of iterable into a nice big string suitable for printing. Parameters: ----------- align : string or list of strings Alignment of cell contents. Each character in a string specifies the alignment of one column. * ``<`` - Left aligned (default) * ``^`` - Centered * ``>`` - Right aligned The last alignment is repeated for unspecified columns. If it's a list of strings, each string specifies the alignment of one row. The last string is used repeatedly for unspecified rows. format : string/function, or (nested) list of string/function Formats the contents of the cells using the specified function(s) or format string(s). If it's a list of strings/functions each entry specifies formatting for one column, the last entry being used repeatedly for unspecified columns. If it's a list of lists, each sub-list specifies one row, the last sub-list being used repeatedly for unspecified rows. colwidth : int, list of ints or None The width of each column. The last width is used repeatedly for unspecified columns. If ``None`` the width is fitted to the contents. maxwidth : int or None The maximum width of the table. Defaults to terminal width minus 1 if ``None``. If the table would be wider than ``maxwidth`` one of the columns is truncated. spacing : int The spacing between columns truncate : int Which column to truncate if table width would exceed ``maxwidth``. Beware that no columns can have zero or negative width. If for instance 'maxwidth' is 80 and 'colwidth' is [10, 30, 30, 30] with spacing 2 the total width will initially be 10+2+30+2+30+2+30=106. That's 26 characters too much, so a width of 26 will be removed from the truncated column. If 'truncate' is 0, column 0 will have a width of -16 which is not permitted.
[ "Formats", "a", "table", "represented", "as", "an", "iterable", "of", "iterable", "into", "a", "nice", "big", "string", "suitable", "for", "printing", "." ]
python
train
YosaiProject/yosai
yosai/web/subject/subject.py
https://github.com/YosaiProject/yosai/blob/7f96aa6b837ceae9bf3d7387cd7e35f5ab032575/yosai/web/subject/subject.py#L232-L246
def requires_authentication(fn): """ Requires that the calling Subject be authenticated before allowing access. """ @functools.wraps(fn) def wrap(*args, **kwargs): subject = WebYosai.get_current_subject() if not subject.authenticated: msg = "The current Subject is not authenticated. ACCESS DENIED." raise WebYosai.get_current_webregistry().raise_unauthorized(msg) return fn(*args, **kwargs) return wrap
[ "def", "requires_authentication", "(", "fn", ")", ":", "@", "functools", ".", "wraps", "(", "fn", ")", "def", "wrap", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "subject", "=", "WebYosai", ".", "get_current_subject", "(", ")", "if", "not", "subject", ".", "authenticated", ":", "msg", "=", "\"The current Subject is not authenticated. ACCESS DENIED.\"", "raise", "WebYosai", ".", "get_current_webregistry", "(", ")", ".", "raise_unauthorized", "(", "msg", ")", "return", "fn", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrap" ]
Requires that the calling Subject be authenticated before allowing access.
[ "Requires", "that", "the", "calling", "Subject", "be", "authenticated", "before", "allowing", "access", "." ]
python
train
edx/XBlock
xblock/fields.py
https://github.com/edx/XBlock/blob/368bf46e2c0ee69bbb21817f428c4684936e18ee/xblock/fields.py#L683-L688
def is_set_on(self, xblock): """ Return whether this field has a non-default value on the supplied xblock """ # pylint: disable=protected-access return self._is_dirty(xblock) or xblock._field_data.has(xblock, self.name)
[ "def", "is_set_on", "(", "self", ",", "xblock", ")", ":", "# pylint: disable=protected-access", "return", "self", ".", "_is_dirty", "(", "xblock", ")", "or", "xblock", ".", "_field_data", ".", "has", "(", "xblock", ",", "self", ".", "name", ")" ]
Return whether this field has a non-default value on the supplied xblock
[ "Return", "whether", "this", "field", "has", "a", "non", "-", "default", "value", "on", "the", "supplied", "xblock" ]
python
train
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/_backends/update_service/apis/default_api.py
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/_backends/update_service/apis/default_api.py#L767-L787
def firmware_manifest_retrieve(self, manifest_id, **kwargs): # noqa: E501 """Get a manifest # noqa: E501 Retrieve a firmware manifest. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.firmware_manifest_retrieve(manifest_id, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str manifest_id: The firmware manifest ID (required) :return: FirmwareManifest If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.firmware_manifest_retrieve_with_http_info(manifest_id, **kwargs) # noqa: E501 else: (data) = self.firmware_manifest_retrieve_with_http_info(manifest_id, **kwargs) # noqa: E501 return data
[ "def", "firmware_manifest_retrieve", "(", "self", ",", "manifest_id", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'asynchronous'", ")", ":", "return", "self", ".", "firmware_manifest_retrieve_with_http_info", "(", "manifest_id", ",", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "firmware_manifest_retrieve_with_http_info", "(", "manifest_id", ",", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
Get a manifest # noqa: E501 Retrieve a firmware manifest. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.firmware_manifest_retrieve(manifest_id, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str manifest_id: The firmware manifest ID (required) :return: FirmwareManifest If the method is called asynchronously, returns the request thread.
[ "Get", "a", "manifest", "#", "noqa", ":", "E501" ]
python
train
QualiSystems/cloudshell-networking-devices
cloudshell/devices/driver_helper.py
https://github.com/QualiSystems/cloudshell-networking-devices/blob/009aab33edb30035b52fe10dbb91db61c95ba4d9/cloudshell/devices/driver_helper.py#L45-L72
def get_snmp_parameters_from_command_context(resource_config, api, force_decrypt=False): """ :param ResourceCommandContext resource_config: command context :return: """ if '3' in resource_config.snmp_version: return SNMPV3Parameters(ip=resource_config.address, snmp_user=resource_config.snmp_v3_user or '', snmp_password=api.DecryptPassword(resource_config.snmp_v3_password).Value or '', snmp_private_key=resource_config.snmp_v3_private_key or '', auth_protocol=resource_config.snmp_v3_auth_protocol or SNMPV3Parameters.AUTH_NO_AUTH, private_key_protocol=resource_config.snmp_v3_priv_protocol or SNMPV3Parameters.PRIV_NO_PRIV).get_valid() else: if resource_config.shell_name or force_decrypt: write_community = api.DecryptPassword(resource_config.snmp_write_community).Value or '' else: write_community = resource_config.snmp_write_community or '' if write_community: return SNMPV2WriteParameters(ip=resource_config.address, snmp_write_community=write_community) else: if resource_config.shell_name or force_decrypt: read_community = api.DecryptPassword(resource_config.snmp_read_community).Value or '' else: read_community = resource_config.snmp_read_community or '' return SNMPV2ReadParameters(ip=resource_config.address, snmp_read_community=read_community)
[ "def", "get_snmp_parameters_from_command_context", "(", "resource_config", ",", "api", ",", "force_decrypt", "=", "False", ")", ":", "if", "'3'", "in", "resource_config", ".", "snmp_version", ":", "return", "SNMPV3Parameters", "(", "ip", "=", "resource_config", ".", "address", ",", "snmp_user", "=", "resource_config", ".", "snmp_v3_user", "or", "''", ",", "snmp_password", "=", "api", ".", "DecryptPassword", "(", "resource_config", ".", "snmp_v3_password", ")", ".", "Value", "or", "''", ",", "snmp_private_key", "=", "resource_config", ".", "snmp_v3_private_key", "or", "''", ",", "auth_protocol", "=", "resource_config", ".", "snmp_v3_auth_protocol", "or", "SNMPV3Parameters", ".", "AUTH_NO_AUTH", ",", "private_key_protocol", "=", "resource_config", ".", "snmp_v3_priv_protocol", "or", "SNMPV3Parameters", ".", "PRIV_NO_PRIV", ")", ".", "get_valid", "(", ")", "else", ":", "if", "resource_config", ".", "shell_name", "or", "force_decrypt", ":", "write_community", "=", "api", ".", "DecryptPassword", "(", "resource_config", ".", "snmp_write_community", ")", ".", "Value", "or", "''", "else", ":", "write_community", "=", "resource_config", ".", "snmp_write_community", "or", "''", "if", "write_community", ":", "return", "SNMPV2WriteParameters", "(", "ip", "=", "resource_config", ".", "address", ",", "snmp_write_community", "=", "write_community", ")", "else", ":", "if", "resource_config", ".", "shell_name", "or", "force_decrypt", ":", "read_community", "=", "api", ".", "DecryptPassword", "(", "resource_config", ".", "snmp_read_community", ")", ".", "Value", "or", "''", "else", ":", "read_community", "=", "resource_config", ".", "snmp_read_community", "or", "''", "return", "SNMPV2ReadParameters", "(", "ip", "=", "resource_config", ".", "address", ",", "snmp_read_community", "=", "read_community", ")" ]
:param ResourceCommandContext resource_config: command context :return:
[ ":", "param", "ResourceCommandContext", "resource_config", ":", "command", "context", ":", "return", ":" ]
python
train
saltstack/salt
salt/modules/win_timezone.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_timezone.py#L265-L303
def set_zone(timezone): ''' Sets the timezone using the tzutil. Args: timezone (str): A valid timezone Returns: bool: ``True`` if successful, otherwise ``False`` Raises: CommandExecutionError: If invalid timezone is passed CLI Example: .. code-block:: bash salt '*' timezone.set_zone 'America/Denver' ''' # if it's one of the key's just use it if timezone.lower() in mapper.win_to_unix: win_zone = timezone elif timezone.lower() in mapper.unix_to_win: # if it's one of the values, use the key win_zone = mapper.get_win(timezone) else: # Raise error because it's neither key nor value raise CommandExecutionError('Invalid timezone passed: {0}'.format(timezone)) # Set the value cmd = ['tzutil', '/s', win_zone] res = __salt__['cmd.run_all'](cmd, python_shell=False) if res['retcode']: raise CommandExecutionError('tzutil encountered an error setting ' 'timezone: {0}'.format(timezone), info=res) return zone_compare(timezone)
[ "def", "set_zone", "(", "timezone", ")", ":", "# if it's one of the key's just use it", "if", "timezone", ".", "lower", "(", ")", "in", "mapper", ".", "win_to_unix", ":", "win_zone", "=", "timezone", "elif", "timezone", ".", "lower", "(", ")", "in", "mapper", ".", "unix_to_win", ":", "# if it's one of the values, use the key", "win_zone", "=", "mapper", ".", "get_win", "(", "timezone", ")", "else", ":", "# Raise error because it's neither key nor value", "raise", "CommandExecutionError", "(", "'Invalid timezone passed: {0}'", ".", "format", "(", "timezone", ")", ")", "# Set the value", "cmd", "=", "[", "'tzutil'", ",", "'/s'", ",", "win_zone", "]", "res", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "cmd", ",", "python_shell", "=", "False", ")", "if", "res", "[", "'retcode'", "]", ":", "raise", "CommandExecutionError", "(", "'tzutil encountered an error setting '", "'timezone: {0}'", ".", "format", "(", "timezone", ")", ",", "info", "=", "res", ")", "return", "zone_compare", "(", "timezone", ")" ]
Sets the timezone using the tzutil. Args: timezone (str): A valid timezone Returns: bool: ``True`` if successful, otherwise ``False`` Raises: CommandExecutionError: If invalid timezone is passed CLI Example: .. code-block:: bash salt '*' timezone.set_zone 'America/Denver'
[ "Sets", "the", "timezone", "using", "the", "tzutil", "." ]
python
train
Metatab/metatab
metatab/terms.py
https://github.com/Metatab/metatab/blob/8336ec3e4bd8da84a9a5cb86de1c1086e14b8b22/metatab/terms.py#L216-L226
def find(self, term, value=False): """Return a terms by name. If the name is not qualified, use this term's record name for the parent. The method will yield all terms with a matching qualified name. """ if '.' in term: parent, term = term.split('.') assert parent.lower() == self.record_term_lc, (parent.lower(), self.record_term_lc) for c in self.children: if c.record_term_lc == term.lower(): if value is False or c.value == value: yield c
[ "def", "find", "(", "self", ",", "term", ",", "value", "=", "False", ")", ":", "if", "'.'", "in", "term", ":", "parent", ",", "term", "=", "term", ".", "split", "(", "'.'", ")", "assert", "parent", ".", "lower", "(", ")", "==", "self", ".", "record_term_lc", ",", "(", "parent", ".", "lower", "(", ")", ",", "self", ".", "record_term_lc", ")", "for", "c", "in", "self", ".", "children", ":", "if", "c", ".", "record_term_lc", "==", "term", ".", "lower", "(", ")", ":", "if", "value", "is", "False", "or", "c", ".", "value", "==", "value", ":", "yield", "c" ]
Return a terms by name. If the name is not qualified, use this term's record name for the parent. The method will yield all terms with a matching qualified name.
[ "Return", "a", "terms", "by", "name", ".", "If", "the", "name", "is", "not", "qualified", "use", "this", "term", "s", "record", "name", "for", "the", "parent", ".", "The", "method", "will", "yield", "all", "terms", "with", "a", "matching", "qualified", "name", "." ]
python
train
saltstack/salt
salt/modules/logrotate.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/logrotate.py#L276-L285
def _dict_to_stanza(key, stanza): ''' Convert a dict to a multi-line stanza ''' ret = '' for skey in stanza: if stanza[skey] is True: stanza[skey] = '' ret += ' {0} {1}\n'.format(skey, stanza[skey]) return '{0} {{\n{1}}}'.format(key, ret)
[ "def", "_dict_to_stanza", "(", "key", ",", "stanza", ")", ":", "ret", "=", "''", "for", "skey", "in", "stanza", ":", "if", "stanza", "[", "skey", "]", "is", "True", ":", "stanza", "[", "skey", "]", "=", "''", "ret", "+=", "' {0} {1}\\n'", ".", "format", "(", "skey", ",", "stanza", "[", "skey", "]", ")", "return", "'{0} {{\\n{1}}}'", ".", "format", "(", "key", ",", "ret", ")" ]
Convert a dict to a multi-line stanza
[ "Convert", "a", "dict", "to", "a", "multi", "-", "line", "stanza" ]
python
train
ajenhl/tacl
tacl/tei_corpus.py
https://github.com/ajenhl/tacl/blob/b8a343248e77f1c07a5a4ac133a9ad6e0b4781c2/tacl/tei_corpus.py#L141-L145
def _output_work(self, work, root): """Saves the TEI XML document `root` at the path `work`.""" output_filename = os.path.join(self._output_dir, work) tree = etree.ElementTree(root) tree.write(output_filename, encoding='utf-8', pretty_print=True)
[ "def", "_output_work", "(", "self", ",", "work", ",", "root", ")", ":", "output_filename", "=", "os", ".", "path", ".", "join", "(", "self", ".", "_output_dir", ",", "work", ")", "tree", "=", "etree", ".", "ElementTree", "(", "root", ")", "tree", ".", "write", "(", "output_filename", ",", "encoding", "=", "'utf-8'", ",", "pretty_print", "=", "True", ")" ]
Saves the TEI XML document `root` at the path `work`.
[ "Saves", "the", "TEI", "XML", "document", "root", "at", "the", "path", "work", "." ]
python
train
auth0/auth0-python
auth0/v3/management/users.py
https://github.com/auth0/auth0-python/blob/34adad3f342226aaaa6071387fa405ab840e5c02/auth0/v3/management/users.py#L195-L225
def get_log_events(self, user_id, page=0, per_page=50, sort=None, include_totals=False): """Retrieve every log event for a specific user id Args: user_id (str): The user_id of the logs to retrieve page (int, optional): The result's page number (zero based). per_page (int, optional): The amount of entries per page. Default: 50. Max value: 100 sort (str, optional): The field to use for sorting. Use field:order where order is 1 for ascending and -1 for descending. For example date:-1 include_totals (bool, optional): True if the query summary is to be included in the result, False otherwise. See: https://auth0.com/docs/api/management/v2#!/Users/get_logs_by_user """ params = { 'per_page': per_page, 'page': page, 'include_totals': str(include_totals).lower(), 'sort': sort } url = self._url('{}/logs'.format(user_id)) return self.client.get(url, params=params)
[ "def", "get_log_events", "(", "self", ",", "user_id", ",", "page", "=", "0", ",", "per_page", "=", "50", ",", "sort", "=", "None", ",", "include_totals", "=", "False", ")", ":", "params", "=", "{", "'per_page'", ":", "per_page", ",", "'page'", ":", "page", ",", "'include_totals'", ":", "str", "(", "include_totals", ")", ".", "lower", "(", ")", ",", "'sort'", ":", "sort", "}", "url", "=", "self", ".", "_url", "(", "'{}/logs'", ".", "format", "(", "user_id", ")", ")", "return", "self", ".", "client", ".", "get", "(", "url", ",", "params", "=", "params", ")" ]
Retrieve every log event for a specific user id Args: user_id (str): The user_id of the logs to retrieve page (int, optional): The result's page number (zero based). per_page (int, optional): The amount of entries per page. Default: 50. Max value: 100 sort (str, optional): The field to use for sorting. Use field:order where order is 1 for ascending and -1 for descending. For example date:-1 include_totals (bool, optional): True if the query summary is to be included in the result, False otherwise. See: https://auth0.com/docs/api/management/v2#!/Users/get_logs_by_user
[ "Retrieve", "every", "log", "event", "for", "a", "specific", "user", "id" ]
python
train
veeti/decent
decent/validators.py
https://github.com/veeti/decent/blob/07b11536953b9cf4402c65f241706ab717b90bff/decent/validators.py#L11-L21
def All(*validators): """ Combines all the given validator callables into one, running all the validators in sequence on the given value. """ @wraps(All) def built(value): for validator in validators: value = validator(value) return value return built
[ "def", "All", "(", "*", "validators", ")", ":", "@", "wraps", "(", "All", ")", "def", "built", "(", "value", ")", ":", "for", "validator", "in", "validators", ":", "value", "=", "validator", "(", "value", ")", "return", "value", "return", "built" ]
Combines all the given validator callables into one, running all the validators in sequence on the given value.
[ "Combines", "all", "the", "given", "validator", "callables", "into", "one", "running", "all", "the", "validators", "in", "sequence", "on", "the", "given", "value", "." ]
python
train
Murali-group/halp
halp/directed_hypergraph.py
https://github.com/Murali-group/halp/blob/6eb27466ba84e2281e18f93b62aae5efb21ef8b3/halp/directed_hypergraph.py#L1115-L1161
def write(self, file_name, delim=',', sep='\t'): """Write a directed hypergraph to a file, where nodes are represented as strings. Each column is separated by "sep", and the individual tail nodes and head nodes are delimited by "delim". The header line is currently ignored, but columns should be of the format: tailnode1[delim]..tailnodeM[sep]headnode1[delim]..headnodeN[sep]weight As a concrete example, an arbitrary line with delim=',' and sep=' ' (4 spaces) may look like: :: x1,x2 x3,x4,x5 12 which defines a hyperedge of weight 12 from a tail set containing nodes "x1" and "x2" to a head set containing nodes "x3", "x4", and "x5" """ out_file = open(file_name, 'w') # write first header line out_file.write("tail" + sep + "head" + sep + "weight\n") for hyperedge_id in self.get_hyperedge_id_set(): line = "" # Write each tail node to the line, separated by delim for tail_node in self.get_hyperedge_tail(hyperedge_id): line += tail_node + delim # Remove last (extra) delim line = line[:-1] # Add sep between columns line += sep # Write each head node to the line, separated by delim for head_node in self.get_hyperedge_head(hyperedge_id): line += head_node + delim # Remove last (extra) delim line = line[:-1] # Write the weight to the line and end the line line += sep + str(self.get_hyperedge_weight(hyperedge_id)) + "\n" out_file.write(line) out_file.close()
[ "def", "write", "(", "self", ",", "file_name", ",", "delim", "=", "','", ",", "sep", "=", "'\\t'", ")", ":", "out_file", "=", "open", "(", "file_name", ",", "'w'", ")", "# write first header line", "out_file", ".", "write", "(", "\"tail\"", "+", "sep", "+", "\"head\"", "+", "sep", "+", "\"weight\\n\"", ")", "for", "hyperedge_id", "in", "self", ".", "get_hyperedge_id_set", "(", ")", ":", "line", "=", "\"\"", "# Write each tail node to the line, separated by delim", "for", "tail_node", "in", "self", ".", "get_hyperedge_tail", "(", "hyperedge_id", ")", ":", "line", "+=", "tail_node", "+", "delim", "# Remove last (extra) delim", "line", "=", "line", "[", ":", "-", "1", "]", "# Add sep between columns", "line", "+=", "sep", "# Write each head node to the line, separated by delim", "for", "head_node", "in", "self", ".", "get_hyperedge_head", "(", "hyperedge_id", ")", ":", "line", "+=", "head_node", "+", "delim", "# Remove last (extra) delim", "line", "=", "line", "[", ":", "-", "1", "]", "# Write the weight to the line and end the line", "line", "+=", "sep", "+", "str", "(", "self", ".", "get_hyperedge_weight", "(", "hyperedge_id", ")", ")", "+", "\"\\n\"", "out_file", ".", "write", "(", "line", ")", "out_file", ".", "close", "(", ")" ]
Write a directed hypergraph to a file, where nodes are represented as strings. Each column is separated by "sep", and the individual tail nodes and head nodes are delimited by "delim". The header line is currently ignored, but columns should be of the format: tailnode1[delim]..tailnodeM[sep]headnode1[delim]..headnodeN[sep]weight As a concrete example, an arbitrary line with delim=',' and sep=' ' (4 spaces) may look like: :: x1,x2 x3,x4,x5 12 which defines a hyperedge of weight 12 from a tail set containing nodes "x1" and "x2" to a head set containing nodes "x3", "x4", and "x5"
[ "Write", "a", "directed", "hypergraph", "to", "a", "file", "where", "nodes", "are", "represented", "as", "strings", ".", "Each", "column", "is", "separated", "by", "sep", "and", "the", "individual", "tail", "nodes", "and", "head", "nodes", "are", "delimited", "by", "delim", ".", "The", "header", "line", "is", "currently", "ignored", "but", "columns", "should", "be", "of", "the", "format", ":", "tailnode1", "[", "delim", "]", "..", "tailnodeM", "[", "sep", "]", "headnode1", "[", "delim", "]", "..", "headnodeN", "[", "sep", "]", "weight" ]
python
train
oauthlib/oauthlib
oauthlib/oauth1/rfc5849/request_validator.py
https://github.com/oauthlib/oauthlib/blob/30321dd3c0ca784d3508a1970cf90d9f76835c79/oauthlib/oauth1/rfc5849/request_validator.py#L182-L188
def check_nonce(self, nonce): """Checks that the nonce only contains only safe characters and is no shorter than lower and no longer than upper. """ lower, upper = self.nonce_length return (set(nonce) <= self.safe_characters and lower <= len(nonce) <= upper)
[ "def", "check_nonce", "(", "self", ",", "nonce", ")", ":", "lower", ",", "upper", "=", "self", ".", "nonce_length", "return", "(", "set", "(", "nonce", ")", "<=", "self", ".", "safe_characters", "and", "lower", "<=", "len", "(", "nonce", ")", "<=", "upper", ")" ]
Checks that the nonce only contains only safe characters and is no shorter than lower and no longer than upper.
[ "Checks", "that", "the", "nonce", "only", "contains", "only", "safe", "characters", "and", "is", "no", "shorter", "than", "lower", "and", "no", "longer", "than", "upper", "." ]
python
train
ttinies/sc2gameLobby
sc2gameLobby/clientManagement.py
https://github.com/ttinies/sc2gameLobby/blob/5352d51d53ddeb4858e92e682da89c4434123e52/sc2gameLobby/clientManagement.py#L111-L113
def debug(self, *debugReqs): """send a debug command to control the game state's setup""" return self._client.send(debug=sc2api_pb2.RequestDebug(debug=debugReqs))
[ "def", "debug", "(", "self", ",", "*", "debugReqs", ")", ":", "return", "self", ".", "_client", ".", "send", "(", "debug", "=", "sc2api_pb2", ".", "RequestDebug", "(", "debug", "=", "debugReqs", ")", ")" ]
send a debug command to control the game state's setup
[ "send", "a", "debug", "command", "to", "control", "the", "game", "state", "s", "setup" ]
python
train
aeguana/PyFileMaker
PyFileMaker/FMServer.py
https://github.com/aeguana/PyFileMaker/blob/ef269b52a97e329d91da3c4851ddac800d7fd7e6/PyFileMaker/FMServer.py#L501-L526
def doNew(self, WHAT={}, **params): """This function will perform the command -new.""" if hasattr(WHAT, '_modified'): for key in WHAT: if key not in ['RECORDID','MODID']: if WHAT.__new2old__.has_key(key): self._addDBParam(WHAT.__new2old__[key].encode('utf-8'), WHAT[key]) else: self._addDBParam(key, WHAT[key]) elif type(WHAT)==dict: for key in WHAT: self._addDBParam(key, WHAT[key]) else: raise FMError, 'Python Runtime: Object type (%s) given to function doNew as argument WHAT cannot be used.' % type(WHAT) if self._layout == '': raise FMError, 'No layout was selected' for key in params: self._addDBParam(key, params[key]) if len(self._dbParams) == 0: raise FMError, 'No data to be added' return self._doAction('-new')
[ "def", "doNew", "(", "self", ",", "WHAT", "=", "{", "}", ",", "*", "*", "params", ")", ":", "if", "hasattr", "(", "WHAT", ",", "'_modified'", ")", ":", "for", "key", "in", "WHAT", ":", "if", "key", "not", "in", "[", "'RECORDID'", ",", "'MODID'", "]", ":", "if", "WHAT", ".", "__new2old__", ".", "has_key", "(", "key", ")", ":", "self", ".", "_addDBParam", "(", "WHAT", ".", "__new2old__", "[", "key", "]", ".", "encode", "(", "'utf-8'", ")", ",", "WHAT", "[", "key", "]", ")", "else", ":", "self", ".", "_addDBParam", "(", "key", ",", "WHAT", "[", "key", "]", ")", "elif", "type", "(", "WHAT", ")", "==", "dict", ":", "for", "key", "in", "WHAT", ":", "self", ".", "_addDBParam", "(", "key", ",", "WHAT", "[", "key", "]", ")", "else", ":", "raise", "FMError", ",", "'Python Runtime: Object type (%s) given to function doNew as argument WHAT cannot be used.'", "%", "type", "(", "WHAT", ")", "if", "self", ".", "_layout", "==", "''", ":", "raise", "FMError", ",", "'No layout was selected'", "for", "key", "in", "params", ":", "self", ".", "_addDBParam", "(", "key", ",", "params", "[", "key", "]", ")", "if", "len", "(", "self", ".", "_dbParams", ")", "==", "0", ":", "raise", "FMError", ",", "'No data to be added'", "return", "self", ".", "_doAction", "(", "'-new'", ")" ]
This function will perform the command -new.
[ "This", "function", "will", "perform", "the", "command", "-", "new", "." ]
python
train
datadesk/python-documentcloud
documentcloud/__init__.py
https://github.com/datadesk/python-documentcloud/blob/0d7f42cbf1edf5c61fca37ed846362cba4abfd76/documentcloud/__init__.py#L92-L136
def put(self, method, params): """ Post changes back to DocumentCloud """ # Prepare the params, first by adding a custom command to # simulate a PUT request even though we are actually POSTing. # This is something DocumentCloud expects. params['_method'] = 'put' # Some special case handling of the document_ids list, if it exists if params.get("document_ids", None): # Pull the document_ids out of the params document_ids = params.get("document_ids") del params['document_ids'] params = urllib.parse.urlencode(params, doseq=True) # These need to be specially formatted in the style documentcloud # expects arrays. The example they provide is: # ?document_ids[]=28-boumediene&document_ids[]=\ # 207-academy&document_ids[]=30-insider-trading params += "".join([ '&document_ids[]=%s' % id for id in document_ids ]) # More special case handler of key/value data tags, if they exist elif params.get("data", None): # Pull them out of the dict data = params.get("data") del params['data'] params = urllib.parse.urlencode(params, doseq=True) # Format them in the style documentcloud expects # ?data['foo']=bar&data['tit']=tat params += "".join([ '&data[%s]=%s' % ( urllib.parse.quote_plus(key.encode("utf-8")), urllib.parse.quote_plus(value.encode("utf-8")) ) for key, value in data.items() ]) else: # Otherwise, we can just use the vanilla urllib prep method params = urllib.parse.urlencode(params, doseq=True) # Make the request self._make_request( self.BASE_URI + method, params.encode("utf-8"), )
[ "def", "put", "(", "self", ",", "method", ",", "params", ")", ":", "# Prepare the params, first by adding a custom command to", "# simulate a PUT request even though we are actually POSTing.", "# This is something DocumentCloud expects.", "params", "[", "'_method'", "]", "=", "'put'", "# Some special case handling of the document_ids list, if it exists", "if", "params", ".", "get", "(", "\"document_ids\"", ",", "None", ")", ":", "# Pull the document_ids out of the params", "document_ids", "=", "params", ".", "get", "(", "\"document_ids\"", ")", "del", "params", "[", "'document_ids'", "]", "params", "=", "urllib", ".", "parse", ".", "urlencode", "(", "params", ",", "doseq", "=", "True", ")", "# These need to be specially formatted in the style documentcloud", "# expects arrays. The example they provide is:", "# ?document_ids[]=28-boumediene&document_ids[]=\\", "# 207-academy&document_ids[]=30-insider-trading", "params", "+=", "\"\"", ".", "join", "(", "[", "'&document_ids[]=%s'", "%", "id", "for", "id", "in", "document_ids", "]", ")", "# More special case handler of key/value data tags, if they exist", "elif", "params", ".", "get", "(", "\"data\"", ",", "None", ")", ":", "# Pull them out of the dict", "data", "=", "params", ".", "get", "(", "\"data\"", ")", "del", "params", "[", "'data'", "]", "params", "=", "urllib", ".", "parse", ".", "urlencode", "(", "params", ",", "doseq", "=", "True", ")", "# Format them in the style documentcloud expects", "# ?data['foo']=bar&data['tit']=tat", "params", "+=", "\"\"", ".", "join", "(", "[", "'&data[%s]=%s'", "%", "(", "urllib", ".", "parse", ".", "quote_plus", "(", "key", ".", "encode", "(", "\"utf-8\"", ")", ")", ",", "urllib", ".", "parse", ".", "quote_plus", "(", "value", ".", "encode", "(", "\"utf-8\"", ")", ")", ")", "for", "key", ",", "value", "in", "data", ".", "items", "(", ")", "]", ")", "else", ":", "# Otherwise, we can just use the vanilla urllib prep method", "params", "=", "urllib", ".", "parse", ".", "urlencode", "(", "params", ",", "doseq", "=", "True", ")", "# Make the request", "self", ".", "_make_request", "(", "self", ".", "BASE_URI", "+", "method", ",", "params", ".", "encode", "(", "\"utf-8\"", ")", ",", ")" ]
Post changes back to DocumentCloud
[ "Post", "changes", "back", "to", "DocumentCloud" ]
python
train
fiesta/fiesta-python
fiesta/fiesta.py
https://github.com/fiesta/fiesta-python/blob/cfcc11e4ae4c76b1007794604c33dde877f62cfb/fiesta/fiesta.py#L210-L215
def send_message(self, subject=None, text=None, markdown=None, message_dict=None): """ Helper function to send a message to a group """ message = FiestaMessage(self.api, self, subject, text, markdown, message_dict) return message.send()
[ "def", "send_message", "(", "self", ",", "subject", "=", "None", ",", "text", "=", "None", ",", "markdown", "=", "None", ",", "message_dict", "=", "None", ")", ":", "message", "=", "FiestaMessage", "(", "self", ".", "api", ",", "self", ",", "subject", ",", "text", ",", "markdown", ",", "message_dict", ")", "return", "message", ".", "send", "(", ")" ]
Helper function to send a message to a group
[ "Helper", "function", "to", "send", "a", "message", "to", "a", "group" ]
python
train
EmbodiedCognition/pagoda
pagoda/parser.py
https://github.com/EmbodiedCognition/pagoda/blob/8892f847026d98aba8646ecbc4589397e6dec7bd/pagoda/parser.py#L486-L523
def parse_amc(source): '''Parse an AMC motion capture data file. Parameters ---------- source : file A file-like object that contains AMC motion capture text. Yields ------ frame : dict Yields a series of motion capture frames. Each frame is a dictionary that maps a bone name to a list of the DOF configurations for that bone. ''' lines = 0 frames = 1 frame = {} degrees = False for line in source: lines += 1 line = line.split('#')[0].strip() if not line: continue if line.startswith(':'): if line.lower().startswith(':deg'): degrees = True continue if line.isdigit(): if int(line) != frames: raise RuntimeError( 'frame mismatch on line {}: ' 'produced {} but file claims {}'.format(lines, frames, line)) yield frame frames += 1 frame = {} continue fields = line.split() frame[fields[0]] = list(map(float, fields[1:]))
[ "def", "parse_amc", "(", "source", ")", ":", "lines", "=", "0", "frames", "=", "1", "frame", "=", "{", "}", "degrees", "=", "False", "for", "line", "in", "source", ":", "lines", "+=", "1", "line", "=", "line", ".", "split", "(", "'#'", ")", "[", "0", "]", ".", "strip", "(", ")", "if", "not", "line", ":", "continue", "if", "line", ".", "startswith", "(", "':'", ")", ":", "if", "line", ".", "lower", "(", ")", ".", "startswith", "(", "':deg'", ")", ":", "degrees", "=", "True", "continue", "if", "line", ".", "isdigit", "(", ")", ":", "if", "int", "(", "line", ")", "!=", "frames", ":", "raise", "RuntimeError", "(", "'frame mismatch on line {}: '", "'produced {} but file claims {}'", ".", "format", "(", "lines", ",", "frames", ",", "line", ")", ")", "yield", "frame", "frames", "+=", "1", "frame", "=", "{", "}", "continue", "fields", "=", "line", ".", "split", "(", ")", "frame", "[", "fields", "[", "0", "]", "]", "=", "list", "(", "map", "(", "float", ",", "fields", "[", "1", ":", "]", ")", ")" ]
Parse an AMC motion capture data file. Parameters ---------- source : file A file-like object that contains AMC motion capture text. Yields ------ frame : dict Yields a series of motion capture frames. Each frame is a dictionary that maps a bone name to a list of the DOF configurations for that bone.
[ "Parse", "an", "AMC", "motion", "capture", "data", "file", "." ]
python
valid
cloudera/impyla
impala/_thrift_gen/hive_metastore/ThriftHiveMetastore.py
https://github.com/cloudera/impyla/blob/547fa2ba3b6151e2a98b3544301471a643212dc3/impala/_thrift_gen/hive_metastore/ThriftHiveMetastore.py#L4003-L4012
def markPartitionForEvent(self, db_name, tbl_name, part_vals, eventType): """ Parameters: - db_name - tbl_name - part_vals - eventType """ self.send_markPartitionForEvent(db_name, tbl_name, part_vals, eventType) self.recv_markPartitionForEvent()
[ "def", "markPartitionForEvent", "(", "self", ",", "db_name", ",", "tbl_name", ",", "part_vals", ",", "eventType", ")", ":", "self", ".", "send_markPartitionForEvent", "(", "db_name", ",", "tbl_name", ",", "part_vals", ",", "eventType", ")", "self", ".", "recv_markPartitionForEvent", "(", ")" ]
Parameters: - db_name - tbl_name - part_vals - eventType
[ "Parameters", ":", "-", "db_name", "-", "tbl_name", "-", "part_vals", "-", "eventType" ]
python
train
apache/incubator-mxnet
example/vae-gan/vaegan_mxnet.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/vae-gan/vaegan_mxnet.py#L673-L708
def parse_args(): '''Parse args ''' parser = argparse.ArgumentParser(description='Train and Test an Adversarial Variatiional Encoder') parser.add_argument('--train', help='train the network', action='store_true') parser.add_argument('--test', help='test the network', action='store_true') parser.add_argument('--save_embedding', help='saves the shape embedding of each input image', action='store_true') parser.add_argument('--dataset', help='dataset name', default='caltech', type=str) parser.add_argument('--activation', help='activation i.e. sigmoid or tanh', default='sigmoid', type=str) parser.add_argument('--training_data_path', help='training data path', default='datasets/caltech101/data/images32x32', type=str) parser.add_argument('--testing_data_path', help='testing data path', default='datasets/caltech101/test_data', type=str) parser.add_argument('--pretrained_encoder_path', help='pretrained encoder model path', default='checkpoints32x32_sigmoid/caltech_E-0045.params', type=str) parser.add_argument('--pretrained_generator_path', help='pretrained generator model path', default='checkpoints32x32_sigmoid/caltech_G-0045.params', type=str) parser.add_argument('--output_path', help='output path for the generated images', default='outputs32x32_sigmoid', type=str) parser.add_argument('--embedding_path', help='output path for the generated embeddings', default='outputs32x32_sigmoid', type=str) parser.add_argument('--checkpoint_path', help='checkpoint saving path ', default='checkpoints32x32_sigmoid', type=str) parser.add_argument('--nef', help='encoder filter count in the first layer', default=64, type=int) parser.add_argument('--ndf', help='discriminator filter count in the first layer', default=64, type=int) parser.add_argument('--ngf', help='generator filter count in the second last layer', default=64, type=int) parser.add_argument('--nc', help='generator filter count in the last layer i.e. 1 for grayscale image, 3 for RGB image', default=1, type=int) parser.add_argument('--batch_size', help='batch size, keep it 1 during testing', default=64, type=int) parser.add_argument('--Z', help='embedding size', default=100, type=int) parser.add_argument('--lr', help='learning rate', default=0.0002, type=float) parser.add_argument('--beta1', help='beta1 for adam optimizer', default=0.5, type=float) parser.add_argument('--epsilon', help='epsilon for adam optimizer', default=1e-5, type=float) parser.add_argument('--g_dl_weight', help='discriminator layer loss weight', default=1e-1, type=float) parser.add_argument('--gpu', help='gpu index', default=0, type=int) parser.add_argument('--use_cpu', help='use cpu', action='store_true') parser.add_argument('--num_epoch', help='number of maximum epochs ', default=45, type=int) parser.add_argument('--save_after_every', help='save checkpoint after every this number of epochs ', default=5, type=int) parser.add_argument('--visualize_after_every', help='save output images after every this number of epochs', default=5, type=int) parser.add_argument('--show_after_every', help='show metrics after this number of iterations', default=10, type=int) args = parser.parse_args() return args
[ "def", "parse_args", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Train and Test an Adversarial Variatiional Encoder'", ")", "parser", ".", "add_argument", "(", "'--train'", ",", "help", "=", "'train the network'", ",", "action", "=", "'store_true'", ")", "parser", ".", "add_argument", "(", "'--test'", ",", "help", "=", "'test the network'", ",", "action", "=", "'store_true'", ")", "parser", ".", "add_argument", "(", "'--save_embedding'", ",", "help", "=", "'saves the shape embedding of each input image'", ",", "action", "=", "'store_true'", ")", "parser", ".", "add_argument", "(", "'--dataset'", ",", "help", "=", "'dataset name'", ",", "default", "=", "'caltech'", ",", "type", "=", "str", ")", "parser", ".", "add_argument", "(", "'--activation'", ",", "help", "=", "'activation i.e. sigmoid or tanh'", ",", "default", "=", "'sigmoid'", ",", "type", "=", "str", ")", "parser", ".", "add_argument", "(", "'--training_data_path'", ",", "help", "=", "'training data path'", ",", "default", "=", "'datasets/caltech101/data/images32x32'", ",", "type", "=", "str", ")", "parser", ".", "add_argument", "(", "'--testing_data_path'", ",", "help", "=", "'testing data path'", ",", "default", "=", "'datasets/caltech101/test_data'", ",", "type", "=", "str", ")", "parser", ".", "add_argument", "(", "'--pretrained_encoder_path'", ",", "help", "=", "'pretrained encoder model path'", ",", "default", "=", "'checkpoints32x32_sigmoid/caltech_E-0045.params'", ",", "type", "=", "str", ")", "parser", ".", "add_argument", "(", "'--pretrained_generator_path'", ",", "help", "=", "'pretrained generator model path'", ",", "default", "=", "'checkpoints32x32_sigmoid/caltech_G-0045.params'", ",", "type", "=", "str", ")", "parser", ".", "add_argument", "(", "'--output_path'", ",", "help", "=", "'output path for the generated images'", ",", "default", "=", "'outputs32x32_sigmoid'", ",", "type", "=", "str", ")", "parser", ".", "add_argument", "(", "'--embedding_path'", ",", "help", "=", "'output path for the generated embeddings'", ",", "default", "=", "'outputs32x32_sigmoid'", ",", "type", "=", "str", ")", "parser", ".", "add_argument", "(", "'--checkpoint_path'", ",", "help", "=", "'checkpoint saving path '", ",", "default", "=", "'checkpoints32x32_sigmoid'", ",", "type", "=", "str", ")", "parser", ".", "add_argument", "(", "'--nef'", ",", "help", "=", "'encoder filter count in the first layer'", ",", "default", "=", "64", ",", "type", "=", "int", ")", "parser", ".", "add_argument", "(", "'--ndf'", ",", "help", "=", "'discriminator filter count in the first layer'", ",", "default", "=", "64", ",", "type", "=", "int", ")", "parser", ".", "add_argument", "(", "'--ngf'", ",", "help", "=", "'generator filter count in the second last layer'", ",", "default", "=", "64", ",", "type", "=", "int", ")", "parser", ".", "add_argument", "(", "'--nc'", ",", "help", "=", "'generator filter count in the last layer i.e. 1 for grayscale image, 3 for RGB image'", ",", "default", "=", "1", ",", "type", "=", "int", ")", "parser", ".", "add_argument", "(", "'--batch_size'", ",", "help", "=", "'batch size, keep it 1 during testing'", ",", "default", "=", "64", ",", "type", "=", "int", ")", "parser", ".", "add_argument", "(", "'--Z'", ",", "help", "=", "'embedding size'", ",", "default", "=", "100", ",", "type", "=", "int", ")", "parser", ".", "add_argument", "(", "'--lr'", ",", "help", "=", "'learning rate'", ",", "default", "=", "0.0002", ",", "type", "=", "float", ")", "parser", ".", "add_argument", "(", "'--beta1'", ",", "help", "=", "'beta1 for adam optimizer'", ",", "default", "=", "0.5", ",", "type", "=", "float", ")", "parser", ".", "add_argument", "(", "'--epsilon'", ",", "help", "=", "'epsilon for adam optimizer'", ",", "default", "=", "1e-5", ",", "type", "=", "float", ")", "parser", ".", "add_argument", "(", "'--g_dl_weight'", ",", "help", "=", "'discriminator layer loss weight'", ",", "default", "=", "1e-1", ",", "type", "=", "float", ")", "parser", ".", "add_argument", "(", "'--gpu'", ",", "help", "=", "'gpu index'", ",", "default", "=", "0", ",", "type", "=", "int", ")", "parser", ".", "add_argument", "(", "'--use_cpu'", ",", "help", "=", "'use cpu'", ",", "action", "=", "'store_true'", ")", "parser", ".", "add_argument", "(", "'--num_epoch'", ",", "help", "=", "'number of maximum epochs '", ",", "default", "=", "45", ",", "type", "=", "int", ")", "parser", ".", "add_argument", "(", "'--save_after_every'", ",", "help", "=", "'save checkpoint after every this number of epochs '", ",", "default", "=", "5", ",", "type", "=", "int", ")", "parser", ".", "add_argument", "(", "'--visualize_after_every'", ",", "help", "=", "'save output images after every this number of epochs'", ",", "default", "=", "5", ",", "type", "=", "int", ")", "parser", ".", "add_argument", "(", "'--show_after_every'", ",", "help", "=", "'show metrics after this number of iterations'", ",", "default", "=", "10", ",", "type", "=", "int", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "return", "args" ]
Parse args
[ "Parse", "args" ]
python
train
F5Networks/f5-common-python
f5/bigip/resource.py
https://github.com/F5Networks/f5-common-python/blob/7e67d5acd757a60e3d5f8c88c534bd72208f5494/f5/bigip/resource.py#L445-L456
def _iter_list_for_dicts(self, check_list): """Iterate over list to find dicts and check for python keywords.""" list_copy = copy.deepcopy(check_list) for index, elem in enumerate(check_list): if isinstance(elem, dict): list_copy[index] = self._check_for_python_keywords(elem) elif isinstance(elem, list): list_copy[index] = self._iter_list_for_dicts(elem) else: list_copy[index] = elem return list_copy
[ "def", "_iter_list_for_dicts", "(", "self", ",", "check_list", ")", ":", "list_copy", "=", "copy", ".", "deepcopy", "(", "check_list", ")", "for", "index", ",", "elem", "in", "enumerate", "(", "check_list", ")", ":", "if", "isinstance", "(", "elem", ",", "dict", ")", ":", "list_copy", "[", "index", "]", "=", "self", ".", "_check_for_python_keywords", "(", "elem", ")", "elif", "isinstance", "(", "elem", ",", "list", ")", ":", "list_copy", "[", "index", "]", "=", "self", ".", "_iter_list_for_dicts", "(", "elem", ")", "else", ":", "list_copy", "[", "index", "]", "=", "elem", "return", "list_copy" ]
Iterate over list to find dicts and check for python keywords.
[ "Iterate", "over", "list", "to", "find", "dicts", "and", "check", "for", "python", "keywords", "." ]
python
train
zvoase/django-relax
relax/viewserver.py
https://github.com/zvoase/django-relax/blob/10bb37bf3a512b290816856a6877c17fa37e930f/relax/viewserver.py#L145-L156
def handle_validate(self, function_name, new_doc, old_doc, user_ctx): """Validate...this function is undocumented, but still in CouchDB.""" try: function = get_function(function_name) except Exception, exc: self.log(repr(exc)) return False try: return function(new_doc, old_doc, user_ctx) except Exception, exc: self.log(repr(exc)) return repr(exc)
[ "def", "handle_validate", "(", "self", ",", "function_name", ",", "new_doc", ",", "old_doc", ",", "user_ctx", ")", ":", "try", ":", "function", "=", "get_function", "(", "function_name", ")", "except", "Exception", ",", "exc", ":", "self", ".", "log", "(", "repr", "(", "exc", ")", ")", "return", "False", "try", ":", "return", "function", "(", "new_doc", ",", "old_doc", ",", "user_ctx", ")", "except", "Exception", ",", "exc", ":", "self", ".", "log", "(", "repr", "(", "exc", ")", ")", "return", "repr", "(", "exc", ")" ]
Validate...this function is undocumented, but still in CouchDB.
[ "Validate", "...", "this", "function", "is", "undocumented", "but", "still", "in", "CouchDB", "." ]
python
valid
ajenhl/tacl
tacl/cli/utils.py
https://github.com/ajenhl/tacl/blob/b8a343248e77f1c07a5a4ac133a9ad6e0b4781c2/tacl/cli/utils.py#L108-L112
def get_ngrams(path): """Returns a list of n-grams read from the file at `path`.""" with open(path, encoding='utf-8') as fh: ngrams = [ngram.strip() for ngram in fh.readlines()] return ngrams
[ "def", "get_ngrams", "(", "path", ")", ":", "with", "open", "(", "path", ",", "encoding", "=", "'utf-8'", ")", "as", "fh", ":", "ngrams", "=", "[", "ngram", ".", "strip", "(", ")", "for", "ngram", "in", "fh", ".", "readlines", "(", ")", "]", "return", "ngrams" ]
Returns a list of n-grams read from the file at `path`.
[ "Returns", "a", "list", "of", "n", "-", "grams", "read", "from", "the", "file", "at", "path", "." ]
python
train
delfick/nose-of-yeti
noseOfYeti/tokeniser/containers.py
https://github.com/delfick/nose-of-yeti/blob/0b545ff350cebd59b40b601333c13033ce40d6dc/noseOfYeti/tokeniser/containers.py#L156-L163
def super_kls(self): """ Determine what kls this group inherits from If default kls should be used, then None is returned """ if not self.kls and self.parent and self.parent.name: return self.parent.kls_name return self.kls
[ "def", "super_kls", "(", "self", ")", ":", "if", "not", "self", ".", "kls", "and", "self", ".", "parent", "and", "self", ".", "parent", ".", "name", ":", "return", "self", ".", "parent", ".", "kls_name", "return", "self", ".", "kls" ]
Determine what kls this group inherits from If default kls should be used, then None is returned
[ "Determine", "what", "kls", "this", "group", "inherits", "from", "If", "default", "kls", "should", "be", "used", "then", "None", "is", "returned" ]
python
train
dmwm/DBS
Server/Python/src/dbs/dao/Oracle/FileBuffer/DeleteDuplicates.py
https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Server/Python/src/dbs/dao/Oracle/FileBuffer/DeleteDuplicates.py#L21-L29
def execute(self, conn, logical_file_name, transaction=False): """ simple execute """ if not conn: dbsExceptionHandler("dbsException-db-conn-failed", "Oracle/FileBuffer/DeleteDupicates. Expects db connection from upper layer.") print(self.sql) self.dbi.processData(self.sql, logical_file_name, conn, transaction)
[ "def", "execute", "(", "self", ",", "conn", ",", "logical_file_name", ",", "transaction", "=", "False", ")", ":", "if", "not", "conn", ":", "dbsExceptionHandler", "(", "\"dbsException-db-conn-failed\"", ",", "\"Oracle/FileBuffer/DeleteDupicates. Expects db connection from upper layer.\"", ")", "print", "(", "self", ".", "sql", ")", "self", ".", "dbi", ".", "processData", "(", "self", ".", "sql", ",", "logical_file_name", ",", "conn", ",", "transaction", ")" ]
simple execute
[ "simple", "execute" ]
python
train
fermiPy/fermipy
fermipy/gtanalysis.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/gtanalysis.py#L2001-L2016
def free_norm(self, name, free=True, **kwargs): """Free/Fix normalization of a source. Parameters ---------- name : str Source name. free : bool Choose whether to free (free=True) or fix (free=False). """ name = self.get_source_name(name) normPar = self.like.normPar(name).getName() self.free_source(name, pars=[normPar], free=free, **kwargs)
[ "def", "free_norm", "(", "self", ",", "name", ",", "free", "=", "True", ",", "*", "*", "kwargs", ")", ":", "name", "=", "self", ".", "get_source_name", "(", "name", ")", "normPar", "=", "self", ".", "like", ".", "normPar", "(", "name", ")", ".", "getName", "(", ")", "self", ".", "free_source", "(", "name", ",", "pars", "=", "[", "normPar", "]", ",", "free", "=", "free", ",", "*", "*", "kwargs", ")" ]
Free/Fix normalization of a source. Parameters ---------- name : str Source name. free : bool Choose whether to free (free=True) or fix (free=False).
[ "Free", "/", "Fix", "normalization", "of", "a", "source", "." ]
python
train
projecthamster/hamster
src/hamster/lib/graphics.py
https://github.com/projecthamster/hamster/blob/ca5254eff53172796ddafc72226c394ed1858245/src/hamster/lib/graphics.py#L321-L323
def curve_to(self, x, y, x2, y2, x3, y3): """draw a curve. (x2, y2) is the middle point of the curve""" self._add_instruction("curve_to", x, y, x2, y2, x3, y3)
[ "def", "curve_to", "(", "self", ",", "x", ",", "y", ",", "x2", ",", "y2", ",", "x3", ",", "y3", ")", ":", "self", ".", "_add_instruction", "(", "\"curve_to\"", ",", "x", ",", "y", ",", "x2", ",", "y2", ",", "x3", ",", "y3", ")" ]
draw a curve. (x2, y2) is the middle point of the curve
[ "draw", "a", "curve", ".", "(", "x2", "y2", ")", "is", "the", "middle", "point", "of", "the", "curve" ]
python
train
mitsei/dlkit
dlkit/json_/assessment/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment/objects.py#L684-L694
def get_learning_objectives_metadata(self): """Gets the metadata for learning objectives. return: (osid.Metadata) - metadata for the learning objectives *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.learning.ActivityForm.get_assets_metadata_template metadata = dict(self._mdata['learning_objectives']) metadata.update({'existing_learning_objectives_values': self._my_map['learningObjectiveIds']}) return Metadata(**metadata)
[ "def", "get_learning_objectives_metadata", "(", "self", ")", ":", "# Implemented from template for osid.learning.ActivityForm.get_assets_metadata_template", "metadata", "=", "dict", "(", "self", ".", "_mdata", "[", "'learning_objectives'", "]", ")", "metadata", ".", "update", "(", "{", "'existing_learning_objectives_values'", ":", "self", ".", "_my_map", "[", "'learningObjectiveIds'", "]", "}", ")", "return", "Metadata", "(", "*", "*", "metadata", ")" ]
Gets the metadata for learning objectives. return: (osid.Metadata) - metadata for the learning objectives *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "metadata", "for", "learning", "objectives", "." ]
python
train
log2timeline/plaso
plaso/analysis/interface.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/analysis/interface.py#L251-L265
def _LogProgressUpdateIfReasonable(self): """Prints a progress update if enough time has passed.""" next_log_time = ( self._time_of_last_status_log + self.SECONDS_BETWEEN_STATUS_LOG_MESSAGES) current_time = time.time() if current_time < next_log_time: return completion_time = time.ctime(current_time + self.EstimateTimeRemaining()) log_message = ( '{0:s} hash analysis plugin running. {1:d} hashes in queue, ' 'estimated completion time {2:s}.'.format( self.NAME, self.hash_queue.qsize(), completion_time)) logger.info(log_message) self._time_of_last_status_log = current_time
[ "def", "_LogProgressUpdateIfReasonable", "(", "self", ")", ":", "next_log_time", "=", "(", "self", ".", "_time_of_last_status_log", "+", "self", ".", "SECONDS_BETWEEN_STATUS_LOG_MESSAGES", ")", "current_time", "=", "time", ".", "time", "(", ")", "if", "current_time", "<", "next_log_time", ":", "return", "completion_time", "=", "time", ".", "ctime", "(", "current_time", "+", "self", ".", "EstimateTimeRemaining", "(", ")", ")", "log_message", "=", "(", "'{0:s} hash analysis plugin running. {1:d} hashes in queue, '", "'estimated completion time {2:s}.'", ".", "format", "(", "self", ".", "NAME", ",", "self", ".", "hash_queue", ".", "qsize", "(", ")", ",", "completion_time", ")", ")", "logger", ".", "info", "(", "log_message", ")", "self", ".", "_time_of_last_status_log", "=", "current_time" ]
Prints a progress update if enough time has passed.
[ "Prints", "a", "progress", "update", "if", "enough", "time", "has", "passed", "." ]
python
train
SolutionsCloud/apidoc
apidoc/factory/source/responseCode.py
https://github.com/SolutionsCloud/apidoc/blob/1ee25d886a5bea11dc744c2f3d0abb0b55d942e1/apidoc/factory/source/responseCode.py#L54-L71
def create_from_dictionary(self, datas): """Return a populated object ResponseCode from dictionary datas """ if "code" not in datas: raise ValueError("A response code must contain a code in \"%s\"." % repr(datas)) code = ObjectResponseCode() self.set_common_datas(code, str(datas["code"]), datas) code.code = int(datas["code"]) if "message" in datas: code.message = str(datas["message"]) elif code.code in self.default_messages.keys(): code.message = self.default_messages[code.code] if "generic" in datas: code.generic = to_boolean(datas["generic"]) return code
[ "def", "create_from_dictionary", "(", "self", ",", "datas", ")", ":", "if", "\"code\"", "not", "in", "datas", ":", "raise", "ValueError", "(", "\"A response code must contain a code in \\\"%s\\\".\"", "%", "repr", "(", "datas", ")", ")", "code", "=", "ObjectResponseCode", "(", ")", "self", ".", "set_common_datas", "(", "code", ",", "str", "(", "datas", "[", "\"code\"", "]", ")", ",", "datas", ")", "code", ".", "code", "=", "int", "(", "datas", "[", "\"code\"", "]", ")", "if", "\"message\"", "in", "datas", ":", "code", ".", "message", "=", "str", "(", "datas", "[", "\"message\"", "]", ")", "elif", "code", ".", "code", "in", "self", ".", "default_messages", ".", "keys", "(", ")", ":", "code", ".", "message", "=", "self", ".", "default_messages", "[", "code", ".", "code", "]", "if", "\"generic\"", "in", "datas", ":", "code", ".", "generic", "=", "to_boolean", "(", "datas", "[", "\"generic\"", "]", ")", "return", "code" ]
Return a populated object ResponseCode from dictionary datas
[ "Return", "a", "populated", "object", "ResponseCode", "from", "dictionary", "datas" ]
python
train
bcbio/bcbio-nextgen
bcbio/pipeline/config_utils.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/config_utils.py#L84-L110
def _merge_system_configs(host_config, container_config, out_file=None): """Create a merged system configuration from external and internal specification. """ out = copy.deepcopy(container_config) for k, v in host_config.items(): if k in set(["galaxy_config"]): out[k] = v elif k == "resources": for pname, resources in v.items(): if not isinstance(resources, dict) and pname not in out[k]: out[k][pname] = resources else: for rname, rval in resources.items(): if (rname in set(["cores", "jvm_opts", "memory"]) or pname in set(["gatk", "mutect"])): if pname not in out[k]: out[k][pname] = {} out[k][pname][rname] = rval # Ensure final file is relocatable by mapping back to reference directory if "bcbio_system" in out and ("galaxy_config" not in out or not os.path.isabs(out["galaxy_config"])): out["galaxy_config"] = os.path.normpath(os.path.join(os.path.dirname(out["bcbio_system"]), os.pardir, "galaxy", "universe_wsgi.ini")) if out_file: with open(out_file, "w") as out_handle: yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False) return out
[ "def", "_merge_system_configs", "(", "host_config", ",", "container_config", ",", "out_file", "=", "None", ")", ":", "out", "=", "copy", ".", "deepcopy", "(", "container_config", ")", "for", "k", ",", "v", "in", "host_config", ".", "items", "(", ")", ":", "if", "k", "in", "set", "(", "[", "\"galaxy_config\"", "]", ")", ":", "out", "[", "k", "]", "=", "v", "elif", "k", "==", "\"resources\"", ":", "for", "pname", ",", "resources", "in", "v", ".", "items", "(", ")", ":", "if", "not", "isinstance", "(", "resources", ",", "dict", ")", "and", "pname", "not", "in", "out", "[", "k", "]", ":", "out", "[", "k", "]", "[", "pname", "]", "=", "resources", "else", ":", "for", "rname", ",", "rval", "in", "resources", ".", "items", "(", ")", ":", "if", "(", "rname", "in", "set", "(", "[", "\"cores\"", ",", "\"jvm_opts\"", ",", "\"memory\"", "]", ")", "or", "pname", "in", "set", "(", "[", "\"gatk\"", ",", "\"mutect\"", "]", ")", ")", ":", "if", "pname", "not", "in", "out", "[", "k", "]", ":", "out", "[", "k", "]", "[", "pname", "]", "=", "{", "}", "out", "[", "k", "]", "[", "pname", "]", "[", "rname", "]", "=", "rval", "# Ensure final file is relocatable by mapping back to reference directory", "if", "\"bcbio_system\"", "in", "out", "and", "(", "\"galaxy_config\"", "not", "in", "out", "or", "not", "os", ".", "path", ".", "isabs", "(", "out", "[", "\"galaxy_config\"", "]", ")", ")", ":", "out", "[", "\"galaxy_config\"", "]", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "out", "[", "\"bcbio_system\"", "]", ")", ",", "os", ".", "pardir", ",", "\"galaxy\"", ",", "\"universe_wsgi.ini\"", ")", ")", "if", "out_file", ":", "with", "open", "(", "out_file", ",", "\"w\"", ")", "as", "out_handle", ":", "yaml", ".", "safe_dump", "(", "out", ",", "out_handle", ",", "default_flow_style", "=", "False", ",", "allow_unicode", "=", "False", ")", "return", "out" ]
Create a merged system configuration from external and internal specification.
[ "Create", "a", "merged", "system", "configuration", "from", "external", "and", "internal", "specification", "." ]
python
train
mitsei/dlkit
dlkit/json_/repository/managers.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/repository/managers.py#L1068-L1083
def get_repository_admin_session(self): """Gets the repository administrative session for creating, updating and deleteing repositories. return: (osid.repository.RepositoryAdminSession) - a ``RepositoryAdminSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_repository_admin()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_repository_admin()`` is ``true``.* """ if not self.supports_repository_admin(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.RepositoryAdminSession(runtime=self._runtime)
[ "def", "get_repository_admin_session", "(", "self", ")", ":", "if", "not", "self", ".", "supports_repository_admin", "(", ")", ":", "raise", "errors", ".", "Unimplemented", "(", ")", "# pylint: disable=no-member", "return", "sessions", ".", "RepositoryAdminSession", "(", "runtime", "=", "self", ".", "_runtime", ")" ]
Gets the repository administrative session for creating, updating and deleteing repositories. return: (osid.repository.RepositoryAdminSession) - a ``RepositoryAdminSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_repository_admin()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_repository_admin()`` is ``true``.*
[ "Gets", "the", "repository", "administrative", "session", "for", "creating", "updating", "and", "deleteing", "repositories", "." ]
python
train
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L218-L230
def filler(self): """Returns the pipeline ID that filled this slot's value. Returns: A string that is the pipeline ID. Raises: SlotNotFilledError if the value hasn't been filled yet. """ if not self.filled: raise SlotNotFilledError('Slot with name "%s", key "%s" not yet filled.' % (self.name, self.key)) return self._filler_pipeline_key.name()
[ "def", "filler", "(", "self", ")", ":", "if", "not", "self", ".", "filled", ":", "raise", "SlotNotFilledError", "(", "'Slot with name \"%s\", key \"%s\" not yet filled.'", "%", "(", "self", ".", "name", ",", "self", ".", "key", ")", ")", "return", "self", ".", "_filler_pipeline_key", ".", "name", "(", ")" ]
Returns the pipeline ID that filled this slot's value. Returns: A string that is the pipeline ID. Raises: SlotNotFilledError if the value hasn't been filled yet.
[ "Returns", "the", "pipeline", "ID", "that", "filled", "this", "slot", "s", "value", "." ]
python
train
pyBookshelf/bookshelf
bookshelf/api_v2/os_helpers.py
https://github.com/pyBookshelf/bookshelf/blob/a6770678e735de95b194f6e6989223970db5f654/bookshelf/api_v2/os_helpers.py#L106-L117
def disable_env_reset_on_sudo(log=False): """ updates /etc/sudoers so that users from %wheel keep their environment when executing a sudo call """ if log: bookshelf2.logging_helpers.log_green('disabling env reset on sudo') file_append('/etc/sudoers', 'Defaults:%wheel !env_reset,!secure_path', use_sudo=True, partial=True) return True
[ "def", "disable_env_reset_on_sudo", "(", "log", "=", "False", ")", ":", "if", "log", ":", "bookshelf2", ".", "logging_helpers", ".", "log_green", "(", "'disabling env reset on sudo'", ")", "file_append", "(", "'/etc/sudoers'", ",", "'Defaults:%wheel !env_reset,!secure_path'", ",", "use_sudo", "=", "True", ",", "partial", "=", "True", ")", "return", "True" ]
updates /etc/sudoers so that users from %wheel keep their environment when executing a sudo call
[ "updates", "/", "etc", "/", "sudoers", "so", "that", "users", "from", "%wheel", "keep", "their", "environment", "when", "executing", "a", "sudo", "call" ]
python
train
fermiPy/fermipy
fermipy/gtanalysis.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/gtanalysis.py#L166-L177
def get_spectral_index(src, egy): """Compute the local spectral index of a source.""" delta = 1E-5 f0 = src.spectrum()(pyLike.dArg(egy * (1 - delta))) f1 = src.spectrum()(pyLike.dArg(egy * (1 + delta))) if f0 > 0 and f1 > 0: gamma = np.log10(f0 / f1) / np.log10((1 - delta) / (1 + delta)) else: gamma = np.nan return gamma
[ "def", "get_spectral_index", "(", "src", ",", "egy", ")", ":", "delta", "=", "1E-5", "f0", "=", "src", ".", "spectrum", "(", ")", "(", "pyLike", ".", "dArg", "(", "egy", "*", "(", "1", "-", "delta", ")", ")", ")", "f1", "=", "src", ".", "spectrum", "(", ")", "(", "pyLike", ".", "dArg", "(", "egy", "*", "(", "1", "+", "delta", ")", ")", ")", "if", "f0", ">", "0", "and", "f1", ">", "0", ":", "gamma", "=", "np", ".", "log10", "(", "f0", "/", "f1", ")", "/", "np", ".", "log10", "(", "(", "1", "-", "delta", ")", "/", "(", "1", "+", "delta", ")", ")", "else", ":", "gamma", "=", "np", ".", "nan", "return", "gamma" ]
Compute the local spectral index of a source.
[ "Compute", "the", "local", "spectral", "index", "of", "a", "source", "." ]
python
train
eumis/pyviews
pyviews/core/observable.py
https://github.com/eumis/pyviews/blob/80a868242ee9cdc6f4ded594b3e0544cc238ed55/pyviews/core/observable.py#L124-L126
def release_all(self, callback: Callable[[str, Any, Any], None]): """Releases callback from all keys changes""" self._all_callbacks.remove(callback)
[ "def", "release_all", "(", "self", ",", "callback", ":", "Callable", "[", "[", "str", ",", "Any", ",", "Any", "]", ",", "None", "]", ")", ":", "self", ".", "_all_callbacks", ".", "remove", "(", "callback", ")" ]
Releases callback from all keys changes
[ "Releases", "callback", "from", "all", "keys", "changes" ]
python
train
pyGrowler/Growler
growler/http/response.py
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/http/response.py#L120-L127
def redirect(self, url, status=None): """ Redirect to the specified url, optional status code defaults to 302. """ self.status_code = 302 if status is None else status self.headers = Headers([('location', url)]) self.message = '' self.end()
[ "def", "redirect", "(", "self", ",", "url", ",", "status", "=", "None", ")", ":", "self", ".", "status_code", "=", "302", "if", "status", "is", "None", "else", "status", "self", ".", "headers", "=", "Headers", "(", "[", "(", "'location'", ",", "url", ")", "]", ")", "self", ".", "message", "=", "''", "self", ".", "end", "(", ")" ]
Redirect to the specified url, optional status code defaults to 302.
[ "Redirect", "to", "the", "specified", "url", "optional", "status", "code", "defaults", "to", "302", "." ]
python
train
ibis-project/ibis
ibis/expr/groupby.py
https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/expr/groupby.py#L98-L119
def having(self, expr): """ Add a post-aggregation result filter (like the having argument in `aggregate`), for composability with the group_by API Parameters ---------- expr : ibis.expr.types.Expr Returns ------- grouped : GroupedTableExpr """ exprs = util.promote_list(expr) new_having = self._having + exprs return GroupedTableExpr( self.table, self.by, having=new_having, order_by=self._order_by, window=self._window, )
[ "def", "having", "(", "self", ",", "expr", ")", ":", "exprs", "=", "util", ".", "promote_list", "(", "expr", ")", "new_having", "=", "self", ".", "_having", "+", "exprs", "return", "GroupedTableExpr", "(", "self", ".", "table", ",", "self", ".", "by", ",", "having", "=", "new_having", ",", "order_by", "=", "self", ".", "_order_by", ",", "window", "=", "self", ".", "_window", ",", ")" ]
Add a post-aggregation result filter (like the having argument in `aggregate`), for composability with the group_by API Parameters ---------- expr : ibis.expr.types.Expr Returns ------- grouped : GroupedTableExpr
[ "Add", "a", "post", "-", "aggregation", "result", "filter", "(", "like", "the", "having", "argument", "in", "aggregate", ")", "for", "composability", "with", "the", "group_by", "API" ]
python
train
PMEAL/porespy
porespy/tools/__funcs__.py
https://github.com/PMEAL/porespy/blob/1e13875b56787d8f5b7ffdabce8c4342c33ba9f8/porespy/tools/__funcs__.py#L510-L575
def randomize_colors(im, keep_vals=[0]): r''' Takes a greyscale image and randomly shuffles the greyscale values, so that all voxels labeled X will be labelled Y, and all voxels labeled Y will be labeled Z, where X, Y, Z and so on are randomly selected from the values in the input image. This function is useful for improving the visibility of images with neighboring regions that are only incrementally different from each other, such as that returned by `scipy.ndimage.label`. Parameters ---------- im : array_like An ND image of greyscale values. keep_vals : array_like Indicate which voxel values should NOT be altered. The default is `[0]` which is useful for leaving the background of the image untouched. Returns ------- image : ND-array An image the same size and type as ``im`` but with the greyscale values reassigned. The unique values in both the input and output images will be identical. Notes ----- If the greyscale values in the input image are not contiguous then the neither will they be in the output. Examples -------- >>> import porespy as ps >>> import scipy as sp >>> sp.random.seed(0) >>> im = sp.random.randint(low=0, high=5, size=[4, 4]) >>> print(im) [[4 0 3 3] [3 1 3 2] [4 0 0 4] [2 1 0 1]] >>> im_rand = ps.tools.randomize_colors(im) >>> print(im_rand) [[2 0 4 4] [4 1 4 3] [2 0 0 2] [3 1 0 1]] As can be seen, the 2's have become 3, 3's have become 4, and 4's have become 2. 1's remained 1 by random accident. 0's remain zeros by default, but this can be controlled using the `keep_vals` argument. ''' im_flat = im.flatten() keep_vals = sp.array(keep_vals) swap_vals = ~sp.in1d(im_flat, keep_vals) im_vals = sp.unique(im_flat[swap_vals]) new_vals = sp.random.permutation(im_vals) im_map = sp.zeros(shape=[sp.amax(im_vals) + 1, ], dtype=int) im_map[im_vals] = new_vals im_new = im_map[im_flat] im_new = sp.reshape(im_new, newshape=sp.shape(im)) return im_new
[ "def", "randomize_colors", "(", "im", ",", "keep_vals", "=", "[", "0", "]", ")", ":", "im_flat", "=", "im", ".", "flatten", "(", ")", "keep_vals", "=", "sp", ".", "array", "(", "keep_vals", ")", "swap_vals", "=", "~", "sp", ".", "in1d", "(", "im_flat", ",", "keep_vals", ")", "im_vals", "=", "sp", ".", "unique", "(", "im_flat", "[", "swap_vals", "]", ")", "new_vals", "=", "sp", ".", "random", ".", "permutation", "(", "im_vals", ")", "im_map", "=", "sp", ".", "zeros", "(", "shape", "=", "[", "sp", ".", "amax", "(", "im_vals", ")", "+", "1", ",", "]", ",", "dtype", "=", "int", ")", "im_map", "[", "im_vals", "]", "=", "new_vals", "im_new", "=", "im_map", "[", "im_flat", "]", "im_new", "=", "sp", ".", "reshape", "(", "im_new", ",", "newshape", "=", "sp", ".", "shape", "(", "im", ")", ")", "return", "im_new" ]
r''' Takes a greyscale image and randomly shuffles the greyscale values, so that all voxels labeled X will be labelled Y, and all voxels labeled Y will be labeled Z, where X, Y, Z and so on are randomly selected from the values in the input image. This function is useful for improving the visibility of images with neighboring regions that are only incrementally different from each other, such as that returned by `scipy.ndimage.label`. Parameters ---------- im : array_like An ND image of greyscale values. keep_vals : array_like Indicate which voxel values should NOT be altered. The default is `[0]` which is useful for leaving the background of the image untouched. Returns ------- image : ND-array An image the same size and type as ``im`` but with the greyscale values reassigned. The unique values in both the input and output images will be identical. Notes ----- If the greyscale values in the input image are not contiguous then the neither will they be in the output. Examples -------- >>> import porespy as ps >>> import scipy as sp >>> sp.random.seed(0) >>> im = sp.random.randint(low=0, high=5, size=[4, 4]) >>> print(im) [[4 0 3 3] [3 1 3 2] [4 0 0 4] [2 1 0 1]] >>> im_rand = ps.tools.randomize_colors(im) >>> print(im_rand) [[2 0 4 4] [4 1 4 3] [2 0 0 2] [3 1 0 1]] As can be seen, the 2's have become 3, 3's have become 4, and 4's have become 2. 1's remained 1 by random accident. 0's remain zeros by default, but this can be controlled using the `keep_vals` argument.
[ "r", "Takes", "a", "greyscale", "image", "and", "randomly", "shuffles", "the", "greyscale", "values", "so", "that", "all", "voxels", "labeled", "X", "will", "be", "labelled", "Y", "and", "all", "voxels", "labeled", "Y", "will", "be", "labeled", "Z", "where", "X", "Y", "Z", "and", "so", "on", "are", "randomly", "selected", "from", "the", "values", "in", "the", "input", "image", "." ]
python
train
uogbuji/amara3-xml
pylib/uxml/uxpath/functions.py
https://github.com/uogbuji/amara3-xml/blob/88c18876418cffc89bb85b4a3193e5002b6b39a6/pylib/uxml/uxpath/functions.py#L182-L190
def string_length(ctx, s=None): ''' Yields one number ''' if s is None: s = ctx.node elif callable(s): s = next(s.compute(ctx), '') yield len(s)
[ "def", "string_length", "(", "ctx", ",", "s", "=", "None", ")", ":", "if", "s", "is", "None", ":", "s", "=", "ctx", ".", "node", "elif", "callable", "(", "s", ")", ":", "s", "=", "next", "(", "s", ".", "compute", "(", "ctx", ")", ",", "''", ")", "yield", "len", "(", "s", ")" ]
Yields one number
[ "Yields", "one", "number" ]
python
test
autokey/autokey
lib/autokey/model.py
https://github.com/autokey/autokey/blob/35decb72f286ce68cd2a1f09ace8891a520b58d1/lib/autokey/model.py#L248-L261
def _case_insensitive_rpartition(input_string: str, separator: str) -> typing.Tuple[str, str, str]: """Same as str.rpartition(), except that the partitioning is done case insensitive.""" lowered_input_string = input_string.lower() lowered_separator = separator.lower() try: split_index = lowered_input_string.rindex(lowered_separator) except ValueError: # Did not find the separator in the input_string. # Follow https://docs.python.org/3/library/stdtypes.html#text-sequence-type-str # str.rpartition documentation and return the tuple ("", "", unmodified_input) in this case return "", "", input_string else: split_index_2 = split_index+len(separator) return input_string[:split_index], input_string[split_index: split_index_2], input_string[split_index_2:]
[ "def", "_case_insensitive_rpartition", "(", "input_string", ":", "str", ",", "separator", ":", "str", ")", "->", "typing", ".", "Tuple", "[", "str", ",", "str", ",", "str", "]", ":", "lowered_input_string", "=", "input_string", ".", "lower", "(", ")", "lowered_separator", "=", "separator", ".", "lower", "(", ")", "try", ":", "split_index", "=", "lowered_input_string", ".", "rindex", "(", "lowered_separator", ")", "except", "ValueError", ":", "# Did not find the separator in the input_string.", "# Follow https://docs.python.org/3/library/stdtypes.html#text-sequence-type-str", "# str.rpartition documentation and return the tuple (\"\", \"\", unmodified_input) in this case", "return", "\"\"", ",", "\"\"", ",", "input_string", "else", ":", "split_index_2", "=", "split_index", "+", "len", "(", "separator", ")", "return", "input_string", "[", ":", "split_index", "]", ",", "input_string", "[", "split_index", ":", "split_index_2", "]", ",", "input_string", "[", "split_index_2", ":", "]" ]
Same as str.rpartition(), except that the partitioning is done case insensitive.
[ "Same", "as", "str", ".", "rpartition", "()", "except", "that", "the", "partitioning", "is", "done", "case", "insensitive", "." ]
python
train
cbclab/MOT
mot/optimize/__init__.py
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/optimize/__init__.py#L133-L183
def maximize(func, x0, nmr_observations, **kwargs): """Maximization of a function. This wraps the objective function to take the negative of the computed values and passes it then on to one of the minimization routines. Args: func (mot.lib.cl_function.CLFunction): A CL function with the signature: .. code-block:: c double <func_name>(local const mot_float_type* const x, void* data, local mot_float_type* objective_list); The objective list needs to be filled when the provided pointer is not null. It should contain the function values for each observation. This list is used by non-linear least-squares routines, and will be squared by the least-square optimizer. This is only used by the ``Levenberg-Marquardt`` routine. x0 (ndarray): Initial guess. Array of real elements of size (n, p), for 'n' problems and 'p' independent variables. nmr_observations (int): the number of observations returned by the optimization function. **kwargs: see :func:`minimize`. """ wrapped_func = SimpleCLFunction.from_string(''' double _negate_''' + func.get_cl_function_name() + '''( local mot_float_type* x, void* data, local mot_float_type* objective_list){ double return_val = ''' + func.get_cl_function_name() + '''(x, data, objective_list); if(objective_list){ const uint nmr_observations = ''' + str(nmr_observations) + '''; uint local_id = get_local_id(0); uint workgroup_size = get_local_size(0); uint observation_ind; for(uint i = 0; i < (nmr_observations + workgroup_size - 1) / workgroup_size; i++){ observation_ind = i * workgroup_size + local_id; if(observation_ind < nmr_observations){ objective_list[observation_ind] *= -1; } } } return -return_val; } ''', dependencies=[func]) kwargs['nmr_observations'] = nmr_observations return minimize(wrapped_func, x0, **kwargs)
[ "def", "maximize", "(", "func", ",", "x0", ",", "nmr_observations", ",", "*", "*", "kwargs", ")", ":", "wrapped_func", "=", "SimpleCLFunction", ".", "from_string", "(", "'''\n double _negate_'''", "+", "func", ".", "get_cl_function_name", "(", ")", "+", "'''(\n local mot_float_type* x,\n void* data, \n local mot_float_type* objective_list){\n\n double return_val = '''", "+", "func", ".", "get_cl_function_name", "(", ")", "+", "'''(x, data, objective_list); \n\n if(objective_list){\n const uint nmr_observations = '''", "+", "str", "(", "nmr_observations", ")", "+", "''';\n uint local_id = get_local_id(0);\n uint workgroup_size = get_local_size(0);\n\n uint observation_ind;\n for(uint i = 0; i < (nmr_observations + workgroup_size - 1) / workgroup_size; i++){\n observation_ind = i * workgroup_size + local_id;\n\n if(observation_ind < nmr_observations){\n objective_list[observation_ind] *= -1; \n }\n }\n }\n return -return_val;\n }\n '''", ",", "dependencies", "=", "[", "func", "]", ")", "kwargs", "[", "'nmr_observations'", "]", "=", "nmr_observations", "return", "minimize", "(", "wrapped_func", ",", "x0", ",", "*", "*", "kwargs", ")" ]
Maximization of a function. This wraps the objective function to take the negative of the computed values and passes it then on to one of the minimization routines. Args: func (mot.lib.cl_function.CLFunction): A CL function with the signature: .. code-block:: c double <func_name>(local const mot_float_type* const x, void* data, local mot_float_type* objective_list); The objective list needs to be filled when the provided pointer is not null. It should contain the function values for each observation. This list is used by non-linear least-squares routines, and will be squared by the least-square optimizer. This is only used by the ``Levenberg-Marquardt`` routine. x0 (ndarray): Initial guess. Array of real elements of size (n, p), for 'n' problems and 'p' independent variables. nmr_observations (int): the number of observations returned by the optimization function. **kwargs: see :func:`minimize`.
[ "Maximization", "of", "a", "function", "." ]
python
train
TomAugspurger/engarde
engarde/generic.py
https://github.com/TomAugspurger/engarde/blob/e7ea040cf0d20aee7ca4375b8c27caa2d9e43945/engarde/generic.py#L40-L52
def verify_all(df, check, *args, **kwargs): """ Verify that all the entries in ``check(df, *args, **kwargs)`` are true. """ result = check(df, *args, **kwargs) try: assert np.all(result) except AssertionError as e: msg = "{} not true for all".format(check.__name__) e.args = (msg, df[~result]) raise return df
[ "def", "verify_all", "(", "df", ",", "check", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "result", "=", "check", "(", "df", ",", "*", "args", ",", "*", "*", "kwargs", ")", "try", ":", "assert", "np", ".", "all", "(", "result", ")", "except", "AssertionError", "as", "e", ":", "msg", "=", "\"{} not true for all\"", ".", "format", "(", "check", ".", "__name__", ")", "e", ".", "args", "=", "(", "msg", ",", "df", "[", "~", "result", "]", ")", "raise", "return", "df" ]
Verify that all the entries in ``check(df, *args, **kwargs)`` are true.
[ "Verify", "that", "all", "the", "entries", "in", "check", "(", "df", "*", "args", "**", "kwargs", ")", "are", "true", "." ]
python
train
DataDog/integrations-core
vsphere/datadog_checks/vsphere/metadata_cache.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/vsphere/datadog_checks/vsphere/metadata_cache.py#L40-L45
def set_metadata(self, key, metadata): """ Store the metadata for the given instance key. """ with self._lock: self._metadata[key] = metadata
[ "def", "set_metadata", "(", "self", ",", "key", ",", "metadata", ")", ":", "with", "self", ".", "_lock", ":", "self", ".", "_metadata", "[", "key", "]", "=", "metadata" ]
Store the metadata for the given instance key.
[ "Store", "the", "metadata", "for", "the", "given", "instance", "key", "." ]
python
train
wbond/oscrypto
oscrypto/_osx/asymmetric.py
https://github.com/wbond/oscrypto/blob/af778bf1c88bf6c4a7342f5353b130686a5bbe1c/oscrypto/_osx/asymmetric.py#L694-L782
def _load_key(key_object): """ Common code to load public and private keys into PublicKey and PrivateKey objects :param key_object: An asn1crypto.keys.PublicKeyInfo or asn1crypto.keys.PrivateKeyInfo object :raises: ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type oscrypto.errors.AsymmetricKeyError - when the key is incompatible with the OS crypto library OSError - when an error is returned by the OS crypto library :return: A PublicKey or PrivateKey object """ if key_object.algorithm == 'ec': curve_type, details = key_object.curve if curve_type != 'named': raise AsymmetricKeyError('OS X only supports EC keys using named curves') if details not in set(['secp256r1', 'secp384r1', 'secp521r1']): raise AsymmetricKeyError(pretty_message( ''' OS X only supports EC keys using the named curves secp256r1, secp384r1 and secp521r1 ''' )) elif key_object.algorithm == 'dsa' and key_object.hash_algo == 'sha2': raise AsymmetricKeyError(pretty_message( ''' OS X only supports DSA keys based on SHA1 (2048 bits or less) - this key is based on SHA2 and is %s bits ''', key_object.bit_size )) elif key_object.algorithm == 'dsa' and key_object.hash_algo is None: raise IncompleteAsymmetricKeyError(pretty_message( ''' The DSA key does not contain the necessary p, q and g parameters and can not be used ''' )) if isinstance(key_object, keys.PublicKeyInfo): source = key_object.dump() key_class = Security.kSecAttrKeyClassPublic else: source = key_object.unwrap().dump() key_class = Security.kSecAttrKeyClassPrivate cf_source = None cf_dict = None cf_output = None try: cf_source = CFHelpers.cf_data_from_bytes(source) key_type = { 'dsa': Security.kSecAttrKeyTypeDSA, 'ec': Security.kSecAttrKeyTypeECDSA, 'rsa': Security.kSecAttrKeyTypeRSA, }[key_object.algorithm] cf_dict = CFHelpers.cf_dictionary_from_pairs([ (Security.kSecAttrKeyType, key_type), (Security.kSecAttrKeyClass, key_class), (Security.kSecAttrCanSign, CoreFoundation.kCFBooleanTrue), (Security.kSecAttrCanVerify, CoreFoundation.kCFBooleanTrue), ]) error_pointer = new(CoreFoundation, 'CFErrorRef *') sec_key_ref = Security.SecKeyCreateFromData(cf_dict, cf_source, error_pointer) handle_cf_error(error_pointer) if key_class == Security.kSecAttrKeyClassPublic: return PublicKey(sec_key_ref, key_object) if key_class == Security.kSecAttrKeyClassPrivate: return PrivateKey(sec_key_ref, key_object) finally: if cf_source: CoreFoundation.CFRelease(cf_source) if cf_dict: CoreFoundation.CFRelease(cf_dict) if cf_output: CoreFoundation.CFRelease(cf_output)
[ "def", "_load_key", "(", "key_object", ")", ":", "if", "key_object", ".", "algorithm", "==", "'ec'", ":", "curve_type", ",", "details", "=", "key_object", ".", "curve", "if", "curve_type", "!=", "'named'", ":", "raise", "AsymmetricKeyError", "(", "'OS X only supports EC keys using named curves'", ")", "if", "details", "not", "in", "set", "(", "[", "'secp256r1'", ",", "'secp384r1'", ",", "'secp521r1'", "]", ")", ":", "raise", "AsymmetricKeyError", "(", "pretty_message", "(", "'''\n OS X only supports EC keys using the named curves secp256r1,\n secp384r1 and secp521r1\n '''", ")", ")", "elif", "key_object", ".", "algorithm", "==", "'dsa'", "and", "key_object", ".", "hash_algo", "==", "'sha2'", ":", "raise", "AsymmetricKeyError", "(", "pretty_message", "(", "'''\n OS X only supports DSA keys based on SHA1 (2048 bits or less) - this\n key is based on SHA2 and is %s bits\n '''", ",", "key_object", ".", "bit_size", ")", ")", "elif", "key_object", ".", "algorithm", "==", "'dsa'", "and", "key_object", ".", "hash_algo", "is", "None", ":", "raise", "IncompleteAsymmetricKeyError", "(", "pretty_message", "(", "'''\n The DSA key does not contain the necessary p, q and g parameters\n and can not be used\n '''", ")", ")", "if", "isinstance", "(", "key_object", ",", "keys", ".", "PublicKeyInfo", ")", ":", "source", "=", "key_object", ".", "dump", "(", ")", "key_class", "=", "Security", ".", "kSecAttrKeyClassPublic", "else", ":", "source", "=", "key_object", ".", "unwrap", "(", ")", ".", "dump", "(", ")", "key_class", "=", "Security", ".", "kSecAttrKeyClassPrivate", "cf_source", "=", "None", "cf_dict", "=", "None", "cf_output", "=", "None", "try", ":", "cf_source", "=", "CFHelpers", ".", "cf_data_from_bytes", "(", "source", ")", "key_type", "=", "{", "'dsa'", ":", "Security", ".", "kSecAttrKeyTypeDSA", ",", "'ec'", ":", "Security", ".", "kSecAttrKeyTypeECDSA", ",", "'rsa'", ":", "Security", ".", "kSecAttrKeyTypeRSA", ",", "}", "[", "key_object", ".", "algorithm", "]", "cf_dict", "=", "CFHelpers", ".", "cf_dictionary_from_pairs", "(", "[", "(", "Security", ".", "kSecAttrKeyType", ",", "key_type", ")", ",", "(", "Security", ".", "kSecAttrKeyClass", ",", "key_class", ")", ",", "(", "Security", ".", "kSecAttrCanSign", ",", "CoreFoundation", ".", "kCFBooleanTrue", ")", ",", "(", "Security", ".", "kSecAttrCanVerify", ",", "CoreFoundation", ".", "kCFBooleanTrue", ")", ",", "]", ")", "error_pointer", "=", "new", "(", "CoreFoundation", ",", "'CFErrorRef *'", ")", "sec_key_ref", "=", "Security", ".", "SecKeyCreateFromData", "(", "cf_dict", ",", "cf_source", ",", "error_pointer", ")", "handle_cf_error", "(", "error_pointer", ")", "if", "key_class", "==", "Security", ".", "kSecAttrKeyClassPublic", ":", "return", "PublicKey", "(", "sec_key_ref", ",", "key_object", ")", "if", "key_class", "==", "Security", ".", "kSecAttrKeyClassPrivate", ":", "return", "PrivateKey", "(", "sec_key_ref", ",", "key_object", ")", "finally", ":", "if", "cf_source", ":", "CoreFoundation", ".", "CFRelease", "(", "cf_source", ")", "if", "cf_dict", ":", "CoreFoundation", ".", "CFRelease", "(", "cf_dict", ")", "if", "cf_output", ":", "CoreFoundation", ".", "CFRelease", "(", "cf_output", ")" ]
Common code to load public and private keys into PublicKey and PrivateKey objects :param key_object: An asn1crypto.keys.PublicKeyInfo or asn1crypto.keys.PrivateKeyInfo object :raises: ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type oscrypto.errors.AsymmetricKeyError - when the key is incompatible with the OS crypto library OSError - when an error is returned by the OS crypto library :return: A PublicKey or PrivateKey object
[ "Common", "code", "to", "load", "public", "and", "private", "keys", "into", "PublicKey", "and", "PrivateKey", "objects" ]
python
valid
vingd/encrypted-pickle-python
encryptedpickle/encryptedpickle.py
https://github.com/vingd/encrypted-pickle-python/blob/7656233598e02e65971f69e11849a0f288b2b2a5/encryptedpickle/encryptedpickle.py#L594-L621
def _read_header(self, data): '''Read header from data''' # pylint: disable=W0212 version = self._read_version(data) version_info = self._get_version_info(version) header_data = data[:version_info['header_size']] header = version_info['header'] header = header._make( unpack(version_info['header_format'], header_data)) header = dict(header._asdict()) flags = list("{0:0>8b}".format(header['flags'])) flags = dict(version_info['flags']._make(flags)._asdict()) flags = dict((i, bool(int(j))) for i, j in flags.iteritems()) header['flags'] = flags timestamp = None if flags['timestamp']: ts_start = version_info['header_size'] ts_end = ts_start + version_info['timestamp_size'] timestamp_data = data[ts_start:ts_end] timestamp = unpack( version_info['timestamp_format'], timestamp_data)[0] header['info'] = {'timestamp': timestamp} return header
[ "def", "_read_header", "(", "self", ",", "data", ")", ":", "# pylint: disable=W0212", "version", "=", "self", ".", "_read_version", "(", "data", ")", "version_info", "=", "self", ".", "_get_version_info", "(", "version", ")", "header_data", "=", "data", "[", ":", "version_info", "[", "'header_size'", "]", "]", "header", "=", "version_info", "[", "'header'", "]", "header", "=", "header", ".", "_make", "(", "unpack", "(", "version_info", "[", "'header_format'", "]", ",", "header_data", ")", ")", "header", "=", "dict", "(", "header", ".", "_asdict", "(", ")", ")", "flags", "=", "list", "(", "\"{0:0>8b}\"", ".", "format", "(", "header", "[", "'flags'", "]", ")", ")", "flags", "=", "dict", "(", "version_info", "[", "'flags'", "]", ".", "_make", "(", "flags", ")", ".", "_asdict", "(", ")", ")", "flags", "=", "dict", "(", "(", "i", ",", "bool", "(", "int", "(", "j", ")", ")", ")", "for", "i", ",", "j", "in", "flags", ".", "iteritems", "(", ")", ")", "header", "[", "'flags'", "]", "=", "flags", "timestamp", "=", "None", "if", "flags", "[", "'timestamp'", "]", ":", "ts_start", "=", "version_info", "[", "'header_size'", "]", "ts_end", "=", "ts_start", "+", "version_info", "[", "'timestamp_size'", "]", "timestamp_data", "=", "data", "[", "ts_start", ":", "ts_end", "]", "timestamp", "=", "unpack", "(", "version_info", "[", "'timestamp_format'", "]", ",", "timestamp_data", ")", "[", "0", "]", "header", "[", "'info'", "]", "=", "{", "'timestamp'", ":", "timestamp", "}", "return", "header" ]
Read header from data
[ "Read", "header", "from", "data" ]
python
valid
python-bonobo/bonobo
bonobo/structs/graphs.py
https://github.com/python-bonobo/bonobo/blob/70c8e62c4a88576976e5b52e58d380d6e3227ab4/bonobo/structs/graphs.py#L78-L83
def outputs_of(self, idx, create=False): """ Get a set of the outputs for a given node index. """ if create and not idx in self.edges: self.edges[idx] = set() return self.edges[idx]
[ "def", "outputs_of", "(", "self", ",", "idx", ",", "create", "=", "False", ")", ":", "if", "create", "and", "not", "idx", "in", "self", ".", "edges", ":", "self", ".", "edges", "[", "idx", "]", "=", "set", "(", ")", "return", "self", ".", "edges", "[", "idx", "]" ]
Get a set of the outputs for a given node index.
[ "Get", "a", "set", "of", "the", "outputs", "for", "a", "given", "node", "index", "." ]
python
train
Autodesk/aomi
aomi/cli.py
https://github.com/Autodesk/aomi/blob/84da2dfb0424837adf9c4ddc1aa352e942bb7a4a/aomi/cli.py#L117-L140
def template_args(subparsers): """Add command line options for the template operation""" template_parser = subparsers.add_parser('template') template_parser.add_argument('template', help='Template source', nargs='?') template_parser.add_argument('destination', help='Path to write rendered template', nargs='?') template_parser.add_argument('vault_paths', help='Full path(s) to secret', nargs='*') template_parser.add_argument('--builtin-list', dest='builtin_list', help='Display a list of builtin templates', action='store_true', default=False) template_parser.add_argument('--builtin-info', dest='builtin_info', help='Display information on a ' 'particular builtin template') vars_args(template_parser) mapping_args(template_parser) base_args(template_parser)
[ "def", "template_args", "(", "subparsers", ")", ":", "template_parser", "=", "subparsers", ".", "add_parser", "(", "'template'", ")", "template_parser", ".", "add_argument", "(", "'template'", ",", "help", "=", "'Template source'", ",", "nargs", "=", "'?'", ")", "template_parser", ".", "add_argument", "(", "'destination'", ",", "help", "=", "'Path to write rendered template'", ",", "nargs", "=", "'?'", ")", "template_parser", ".", "add_argument", "(", "'vault_paths'", ",", "help", "=", "'Full path(s) to secret'", ",", "nargs", "=", "'*'", ")", "template_parser", ".", "add_argument", "(", "'--builtin-list'", ",", "dest", "=", "'builtin_list'", ",", "help", "=", "'Display a list of builtin templates'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ")", "template_parser", ".", "add_argument", "(", "'--builtin-info'", ",", "dest", "=", "'builtin_info'", ",", "help", "=", "'Display information on a '", "'particular builtin template'", ")", "vars_args", "(", "template_parser", ")", "mapping_args", "(", "template_parser", ")", "base_args", "(", "template_parser", ")" ]
Add command line options for the template operation
[ "Add", "command", "line", "options", "for", "the", "template", "operation" ]
python
train
eugene-eeo/mailthon
mailthon/headers.py
https://github.com/eugene-eeo/mailthon/blob/e3d5aef62505acb4edbc33e3378a04951c3199cb/mailthon/headers.py#L59-L70
def receivers(self): """ Returns a list of receivers, obtained from the To, Cc, and Bcc headers, respecting the Resent-* headers if the email was resent. """ attrs = ( ['Resent-To', 'Resent-Cc', 'Resent-Bcc'] if self.resent else ['To', 'Cc', 'Bcc'] ) addrs = (v for v in (self.get(k) for k in attrs) if v) return [addr for _, addr in getaddresses(addrs)]
[ "def", "receivers", "(", "self", ")", ":", "attrs", "=", "(", "[", "'Resent-To'", ",", "'Resent-Cc'", ",", "'Resent-Bcc'", "]", "if", "self", ".", "resent", "else", "[", "'To'", ",", "'Cc'", ",", "'Bcc'", "]", ")", "addrs", "=", "(", "v", "for", "v", "in", "(", "self", ".", "get", "(", "k", ")", "for", "k", "in", "attrs", ")", "if", "v", ")", "return", "[", "addr", "for", "_", ",", "addr", "in", "getaddresses", "(", "addrs", ")", "]" ]
Returns a list of receivers, obtained from the To, Cc, and Bcc headers, respecting the Resent-* headers if the email was resent.
[ "Returns", "a", "list", "of", "receivers", "obtained", "from", "the", "To", "Cc", "and", "Bcc", "headers", "respecting", "the", "Resent", "-", "*", "headers", "if", "the", "email", "was", "resent", "." ]
python
train
saltstack/salt
salt/modules/mandrill.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mandrill.py#L101-L132
def _http_request(url, headers=None, data=None): ''' Make the HTTP request and return the body as python object. ''' if not headers: headers = _get_headers() session = requests.session() log.debug('Querying %s', url) req = session.post(url, headers=headers, data=salt.utils.json.dumps(data)) req_body = req.json() ret = _default_ret() log.debug('Status code: %d', req.status_code) log.debug('Response body:') log.debug(req_body) if req.status_code != 200: if req.status_code == 500: ret['comment'] = req_body.pop('message', '') ret['out'] = req_body return ret ret.update({ 'comment': req_body.get('error', '') }) return ret ret.update({ 'result': True, 'out': req.json() }) return ret
[ "def", "_http_request", "(", "url", ",", "headers", "=", "None", ",", "data", "=", "None", ")", ":", "if", "not", "headers", ":", "headers", "=", "_get_headers", "(", ")", "session", "=", "requests", ".", "session", "(", ")", "log", ".", "debug", "(", "'Querying %s'", ",", "url", ")", "req", "=", "session", ".", "post", "(", "url", ",", "headers", "=", "headers", ",", "data", "=", "salt", ".", "utils", ".", "json", ".", "dumps", "(", "data", ")", ")", "req_body", "=", "req", ".", "json", "(", ")", "ret", "=", "_default_ret", "(", ")", "log", ".", "debug", "(", "'Status code: %d'", ",", "req", ".", "status_code", ")", "log", ".", "debug", "(", "'Response body:'", ")", "log", ".", "debug", "(", "req_body", ")", "if", "req", ".", "status_code", "!=", "200", ":", "if", "req", ".", "status_code", "==", "500", ":", "ret", "[", "'comment'", "]", "=", "req_body", ".", "pop", "(", "'message'", ",", "''", ")", "ret", "[", "'out'", "]", "=", "req_body", "return", "ret", "ret", ".", "update", "(", "{", "'comment'", ":", "req_body", ".", "get", "(", "'error'", ",", "''", ")", "}", ")", "return", "ret", "ret", ".", "update", "(", "{", "'result'", ":", "True", ",", "'out'", ":", "req", ".", "json", "(", ")", "}", ")", "return", "ret" ]
Make the HTTP request and return the body as python object.
[ "Make", "the", "HTTP", "request", "and", "return", "the", "body", "as", "python", "object", "." ]
python
train
ludeeus/pyhaversion
pyhaversion/__init__.py
https://github.com/ludeeus/pyhaversion/blob/a49d714fce0343657d94faae360a77edf22305dc/pyhaversion/__init__.py#L32-L45
async def get_local_version(self): """Get the local installed version.""" self._version_data["source"] = "Local" try: from homeassistant.const import __version__ as localversion self._version = localversion _LOGGER.debug("Version: %s", self.version) _LOGGER.debug("Version data: %s", self.version_data) except ImportError as error: _LOGGER.critical("Home Assistant not found - %s", error) except Exception as error: # pylint: disable=broad-except _LOGGER.critical("Something really wrong happend! - %s", error)
[ "async", "def", "get_local_version", "(", "self", ")", ":", "self", ".", "_version_data", "[", "\"source\"", "]", "=", "\"Local\"", "try", ":", "from", "homeassistant", ".", "const", "import", "__version__", "as", "localversion", "self", ".", "_version", "=", "localversion", "_LOGGER", ".", "debug", "(", "\"Version: %s\"", ",", "self", ".", "version", ")", "_LOGGER", ".", "debug", "(", "\"Version data: %s\"", ",", "self", ".", "version_data", ")", "except", "ImportError", "as", "error", ":", "_LOGGER", ".", "critical", "(", "\"Home Assistant not found - %s\"", ",", "error", ")", "except", "Exception", "as", "error", ":", "# pylint: disable=broad-except", "_LOGGER", ".", "critical", "(", "\"Something really wrong happend! - %s\"", ",", "error", ")" ]
Get the local installed version.
[ "Get", "the", "local", "installed", "version", "." ]
python
train
edx/edx-drf-extensions
edx_rest_framework_extensions/auth/session/authentication.py
https://github.com/edx/edx-drf-extensions/blob/2f4c1682b8471bf894ea566a43fd9f91ba219f83/edx_rest_framework_extensions/auth/session/authentication.py#L20-L50
def authenticate(self, request): """Authenticate the user, requiring a logged-in account and CSRF. This is exactly the same as the `SessionAuthentication` implementation, with the `user.is_active` check removed. Args: request (HttpRequest) Returns: Tuple of `(user, token)` Raises: PermissionDenied: The CSRF token check failed. """ # Get the underlying HttpRequest object request = request._request # pylint: disable=protected-access user = getattr(request, 'user', None) # Unauthenticated, CSRF validation not required # This is where regular `SessionAuthentication` checks that the user is active. # We have removed that check in this implementation. # But we added a check to prevent anonymous users since we require a logged-in account. if not user or user.is_anonymous: return None self.enforce_csrf(request) # CSRF passed with authenticated user return (user, None)
[ "def", "authenticate", "(", "self", ",", "request", ")", ":", "# Get the underlying HttpRequest object", "request", "=", "request", ".", "_request", "# pylint: disable=protected-access", "user", "=", "getattr", "(", "request", ",", "'user'", ",", "None", ")", "# Unauthenticated, CSRF validation not required", "# This is where regular `SessionAuthentication` checks that the user is active.", "# We have removed that check in this implementation.", "# But we added a check to prevent anonymous users since we require a logged-in account.", "if", "not", "user", "or", "user", ".", "is_anonymous", ":", "return", "None", "self", ".", "enforce_csrf", "(", "request", ")", "# CSRF passed with authenticated user", "return", "(", "user", ",", "None", ")" ]
Authenticate the user, requiring a logged-in account and CSRF. This is exactly the same as the `SessionAuthentication` implementation, with the `user.is_active` check removed. Args: request (HttpRequest) Returns: Tuple of `(user, token)` Raises: PermissionDenied: The CSRF token check failed.
[ "Authenticate", "the", "user", "requiring", "a", "logged", "-", "in", "account", "and", "CSRF", "." ]
python
train
googleapis/google-cloud-python
storage/google/cloud/storage/bucket.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/storage/google/cloud/storage/bucket.py#L979-L1043
def copy_blob( self, blob, destination_bucket, new_name=None, client=None, preserve_acl=True, source_generation=None, ): """Copy the given blob to the given bucket, optionally with a new name. If :attr:`user_project` is set, bills the API request to that project. :type blob: :class:`google.cloud.storage.blob.Blob` :param blob: The blob to be copied. :type destination_bucket: :class:`google.cloud.storage.bucket.Bucket` :param destination_bucket: The bucket into which the blob should be copied. :type new_name: str :param new_name: (optional) the new name for the copied file. :type client: :class:`~google.cloud.storage.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the current bucket. :type preserve_acl: bool :param preserve_acl: Optional. Copies ACL from old blob to new blob. Default: True. :type source_generation: long :param source_generation: Optional. The generation of the blob to be copied. :rtype: :class:`google.cloud.storage.blob.Blob` :returns: The new Blob. """ client = self._require_client(client) query_params = {} if self.user_project is not None: query_params["userProject"] = self.user_project if source_generation is not None: query_params["sourceGeneration"] = source_generation if new_name is None: new_name = blob.name new_blob = Blob(bucket=destination_bucket, name=new_name) api_path = blob.path + "/copyTo" + new_blob.path copy_result = client._connection.api_request( method="POST", path=api_path, query_params=query_params, _target_object=new_blob, ) if not preserve_acl: new_blob.acl.save(acl={}, client=client) new_blob._set_properties(copy_result) return new_blob
[ "def", "copy_blob", "(", "self", ",", "blob", ",", "destination_bucket", ",", "new_name", "=", "None", ",", "client", "=", "None", ",", "preserve_acl", "=", "True", ",", "source_generation", "=", "None", ",", ")", ":", "client", "=", "self", ".", "_require_client", "(", "client", ")", "query_params", "=", "{", "}", "if", "self", ".", "user_project", "is", "not", "None", ":", "query_params", "[", "\"userProject\"", "]", "=", "self", ".", "user_project", "if", "source_generation", "is", "not", "None", ":", "query_params", "[", "\"sourceGeneration\"", "]", "=", "source_generation", "if", "new_name", "is", "None", ":", "new_name", "=", "blob", ".", "name", "new_blob", "=", "Blob", "(", "bucket", "=", "destination_bucket", ",", "name", "=", "new_name", ")", "api_path", "=", "blob", ".", "path", "+", "\"/copyTo\"", "+", "new_blob", ".", "path", "copy_result", "=", "client", ".", "_connection", ".", "api_request", "(", "method", "=", "\"POST\"", ",", "path", "=", "api_path", ",", "query_params", "=", "query_params", ",", "_target_object", "=", "new_blob", ",", ")", "if", "not", "preserve_acl", ":", "new_blob", ".", "acl", ".", "save", "(", "acl", "=", "{", "}", ",", "client", "=", "client", ")", "new_blob", ".", "_set_properties", "(", "copy_result", ")", "return", "new_blob" ]
Copy the given blob to the given bucket, optionally with a new name. If :attr:`user_project` is set, bills the API request to that project. :type blob: :class:`google.cloud.storage.blob.Blob` :param blob: The blob to be copied. :type destination_bucket: :class:`google.cloud.storage.bucket.Bucket` :param destination_bucket: The bucket into which the blob should be copied. :type new_name: str :param new_name: (optional) the new name for the copied file. :type client: :class:`~google.cloud.storage.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the current bucket. :type preserve_acl: bool :param preserve_acl: Optional. Copies ACL from old blob to new blob. Default: True. :type source_generation: long :param source_generation: Optional. The generation of the blob to be copied. :rtype: :class:`google.cloud.storage.blob.Blob` :returns: The new Blob.
[ "Copy", "the", "given", "blob", "to", "the", "given", "bucket", "optionally", "with", "a", "new", "name", "." ]
python
train
google/grr
grr/server/grr_response_server/client_index.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/client_index.py#L285-L301
def RemoveClientLabels(self, client): """Removes all labels for a given client object. Args: client: A VFSGRRClient record. """ keywords = [] for label in client.GetLabelsNames(): keyword = self._NormalizeKeyword(utils.SmartStr(label)) # This might actually delete a keyword with the same name as the label (if # there is one). Usually the client keywords will be rebuilt after the # deletion of the old labels though, so this can only destroy historic # index data; normal search functionality will not be affected. keywords.append(keyword) keywords.append("label:%s" % keyword) self.RemoveKeywordsForName(self._ClientIdFromURN(client.urn), keywords)
[ "def", "RemoveClientLabels", "(", "self", ",", "client", ")", ":", "keywords", "=", "[", "]", "for", "label", "in", "client", ".", "GetLabelsNames", "(", ")", ":", "keyword", "=", "self", ".", "_NormalizeKeyword", "(", "utils", ".", "SmartStr", "(", "label", ")", ")", "# This might actually delete a keyword with the same name as the label (if", "# there is one). Usually the client keywords will be rebuilt after the", "# deletion of the old labels though, so this can only destroy historic", "# index data; normal search functionality will not be affected.", "keywords", ".", "append", "(", "keyword", ")", "keywords", ".", "append", "(", "\"label:%s\"", "%", "keyword", ")", "self", ".", "RemoveKeywordsForName", "(", "self", ".", "_ClientIdFromURN", "(", "client", ".", "urn", ")", ",", "keywords", ")" ]
Removes all labels for a given client object. Args: client: A VFSGRRClient record.
[ "Removes", "all", "labels", "for", "a", "given", "client", "object", "." ]
python
train
chrisspen/burlap
burlap/settings.py
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/settings.py#L17-L33
def show(keyword=''): """ Displays a list of all environment key/value pairs for the current role. """ keyword = keyword.strip().lower() max_len = max(len(k) for k in env.iterkeys()) keyword_found = False for k in sorted(env.keys()): if keyword and keyword not in k.lower(): continue keyword_found = True #print '%s: %s' % (k, env[k]) print('%s: ' % (k.ljust(max_len),)) pprint(env[k], indent=4) if keyword: if not keyword_found: print('Keyword "%s" not found.' % keyword)
[ "def", "show", "(", "keyword", "=", "''", ")", ":", "keyword", "=", "keyword", ".", "strip", "(", ")", ".", "lower", "(", ")", "max_len", "=", "max", "(", "len", "(", "k", ")", "for", "k", "in", "env", ".", "iterkeys", "(", ")", ")", "keyword_found", "=", "False", "for", "k", "in", "sorted", "(", "env", ".", "keys", "(", ")", ")", ":", "if", "keyword", "and", "keyword", "not", "in", "k", ".", "lower", "(", ")", ":", "continue", "keyword_found", "=", "True", "#print '%s: %s' % (k, env[k])", "print", "(", "'%s: '", "%", "(", "k", ".", "ljust", "(", "max_len", ")", ",", ")", ")", "pprint", "(", "env", "[", "k", "]", ",", "indent", "=", "4", ")", "if", "keyword", ":", "if", "not", "keyword_found", ":", "print", "(", "'Keyword \"%s\" not found.'", "%", "keyword", ")" ]
Displays a list of all environment key/value pairs for the current role.
[ "Displays", "a", "list", "of", "all", "environment", "key", "/", "value", "pairs", "for", "the", "current", "role", "." ]
python
valid
saltstack/salt
salt/modules/gentoolkitmod.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/gentoolkitmod.py#L78-L159
def eclean_dist(destructive=False, package_names=False, size_limit=0, time_limit=0, fetch_restricted=False, exclude_file='/etc/eclean/distfiles.exclude'): ''' Clean obsolete portage sources destructive Only keep minimum for reinstallation package_names Protect all versions of installed packages. Only meaningful if used with destructive=True size_limit <size> Don't delete distfiles bigger than <size>. <size> is a size specification: "10M" is "ten megabytes", "200K" is "two hundreds kilobytes", etc. Units are: G, M, K and B. time_limit <time> Don't delete distfiles files modified since <time> <time> is an amount of time: "1y" is "one year", "2w" is "two weeks", etc. Units are: y (years), m (months), w (weeks), d (days) and h (hours). fetch_restricted Protect fetch-restricted files. Only meaningful if used with destructive=True exclude_file Path to exclusion file. Default is /etc/eclean/distfiles.exclude This is the same default eclean-dist uses. Use None if this file exists and you want to ignore. Returns a dict containing the cleaned, saved, and deprecated dists: .. code-block:: python {'cleaned': {<dist file>: <size>}, 'deprecated': {<package>: <dist file>}, 'saved': {<package>: <dist file>}, 'total_cleaned': <size>} CLI Example: .. code-block:: bash salt '*' gentoolkit.eclean_dist destructive=True ''' if exclude_file is None: exclude = None else: try: exclude = _parse_exclude(exclude_file) except excludemod.ParseExcludeFileException as e: ret = {e: 'Invalid exclusion file: {0}'.format(exclude_file)} return ret if time_limit != 0: time_limit = cli.parseTime(time_limit) if size_limit != 0: size_limit = cli.parseSize(size_limit) clean_size = 0 engine = search.DistfilesSearch(lambda x: None) clean_me, saved, deprecated = engine.findDistfiles( destructive=destructive, package_names=package_names, size_limit=size_limit, time_limit=time_limit, fetch_restricted=fetch_restricted, exclude=exclude) cleaned = dict() def _eclean_progress_controller(size, key, *args): cleaned[key] = _pretty_size(size) return True if clean_me: cleaner = clean.CleanUp(_eclean_progress_controller) clean_size = cleaner.clean_dist(clean_me) ret = {'cleaned': cleaned, 'saved': saved, 'deprecated': deprecated, 'total_cleaned': _pretty_size(clean_size)} return ret
[ "def", "eclean_dist", "(", "destructive", "=", "False", ",", "package_names", "=", "False", ",", "size_limit", "=", "0", ",", "time_limit", "=", "0", ",", "fetch_restricted", "=", "False", ",", "exclude_file", "=", "'/etc/eclean/distfiles.exclude'", ")", ":", "if", "exclude_file", "is", "None", ":", "exclude", "=", "None", "else", ":", "try", ":", "exclude", "=", "_parse_exclude", "(", "exclude_file", ")", "except", "excludemod", ".", "ParseExcludeFileException", "as", "e", ":", "ret", "=", "{", "e", ":", "'Invalid exclusion file: {0}'", ".", "format", "(", "exclude_file", ")", "}", "return", "ret", "if", "time_limit", "!=", "0", ":", "time_limit", "=", "cli", ".", "parseTime", "(", "time_limit", ")", "if", "size_limit", "!=", "0", ":", "size_limit", "=", "cli", ".", "parseSize", "(", "size_limit", ")", "clean_size", "=", "0", "engine", "=", "search", ".", "DistfilesSearch", "(", "lambda", "x", ":", "None", ")", "clean_me", ",", "saved", ",", "deprecated", "=", "engine", ".", "findDistfiles", "(", "destructive", "=", "destructive", ",", "package_names", "=", "package_names", ",", "size_limit", "=", "size_limit", ",", "time_limit", "=", "time_limit", ",", "fetch_restricted", "=", "fetch_restricted", ",", "exclude", "=", "exclude", ")", "cleaned", "=", "dict", "(", ")", "def", "_eclean_progress_controller", "(", "size", ",", "key", ",", "*", "args", ")", ":", "cleaned", "[", "key", "]", "=", "_pretty_size", "(", "size", ")", "return", "True", "if", "clean_me", ":", "cleaner", "=", "clean", ".", "CleanUp", "(", "_eclean_progress_controller", ")", "clean_size", "=", "cleaner", ".", "clean_dist", "(", "clean_me", ")", "ret", "=", "{", "'cleaned'", ":", "cleaned", ",", "'saved'", ":", "saved", ",", "'deprecated'", ":", "deprecated", ",", "'total_cleaned'", ":", "_pretty_size", "(", "clean_size", ")", "}", "return", "ret" ]
Clean obsolete portage sources destructive Only keep minimum for reinstallation package_names Protect all versions of installed packages. Only meaningful if used with destructive=True size_limit <size> Don't delete distfiles bigger than <size>. <size> is a size specification: "10M" is "ten megabytes", "200K" is "two hundreds kilobytes", etc. Units are: G, M, K and B. time_limit <time> Don't delete distfiles files modified since <time> <time> is an amount of time: "1y" is "one year", "2w" is "two weeks", etc. Units are: y (years), m (months), w (weeks), d (days) and h (hours). fetch_restricted Protect fetch-restricted files. Only meaningful if used with destructive=True exclude_file Path to exclusion file. Default is /etc/eclean/distfiles.exclude This is the same default eclean-dist uses. Use None if this file exists and you want to ignore. Returns a dict containing the cleaned, saved, and deprecated dists: .. code-block:: python {'cleaned': {<dist file>: <size>}, 'deprecated': {<package>: <dist file>}, 'saved': {<package>: <dist file>}, 'total_cleaned': <size>} CLI Example: .. code-block:: bash salt '*' gentoolkit.eclean_dist destructive=True
[ "Clean", "obsolete", "portage", "sources" ]
python
train
jpoullet2000/atlasclient
atlasclient/base.py
https://github.com/jpoullet2000/atlasclient/blob/4548b441143ebf7fc4075d113db5ca5a23e0eed2/atlasclient/base.py#L583-L605
def inflate(self): """Load the resource from the server, if not already loaded.""" if not self._is_inflated: if self._is_inflating: # catch infinite recursion when attempting to inflate # an object that doesn't have enough data to inflate msg = ("There is not enough data to inflate this object. " "Need either an href: {} or a {}: {}") msg = msg.format(self._href, self.primary_key, self._data.get(self.primary_key)) raise exceptions.ClientError(msg) self._is_inflating = True try: params = self.searchParameters if hasattr(self, 'searchParameters') else {} # To keep the method same as the original request. The default is GET self.load(self.client.request(self.method, self.url, **params)) except Exception: self.load(self._data) self._is_inflated = True self._is_inflating = False return self
[ "def", "inflate", "(", "self", ")", ":", "if", "not", "self", ".", "_is_inflated", ":", "if", "self", ".", "_is_inflating", ":", "# catch infinite recursion when attempting to inflate", "# an object that doesn't have enough data to inflate", "msg", "=", "(", "\"There is not enough data to inflate this object. \"", "\"Need either an href: {} or a {}: {}\"", ")", "msg", "=", "msg", ".", "format", "(", "self", ".", "_href", ",", "self", ".", "primary_key", ",", "self", ".", "_data", ".", "get", "(", "self", ".", "primary_key", ")", ")", "raise", "exceptions", ".", "ClientError", "(", "msg", ")", "self", ".", "_is_inflating", "=", "True", "try", ":", "params", "=", "self", ".", "searchParameters", "if", "hasattr", "(", "self", ",", "'searchParameters'", ")", "else", "{", "}", "# To keep the method same as the original request. The default is GET", "self", ".", "load", "(", "self", ".", "client", ".", "request", "(", "self", ".", "method", ",", "self", ".", "url", ",", "*", "*", "params", ")", ")", "except", "Exception", ":", "self", ".", "load", "(", "self", ".", "_data", ")", "self", ".", "_is_inflated", "=", "True", "self", ".", "_is_inflating", "=", "False", "return", "self" ]
Load the resource from the server, if not already loaded.
[ "Load", "the", "resource", "from", "the", "server", "if", "not", "already", "loaded", "." ]
python
train
jic-dtool/dtoolcore
dtoolcore/utils.py
https://github.com/jic-dtool/dtoolcore/blob/eeb9a924dc8fcf543340653748a7877be1f98e0f/dtoolcore/utils.py#L207-L214
def timestamp(datetime_obj): """Return Unix timestamp as float. The number of seconds that have elapsed since January 1, 1970. """ start_of_time = datetime.datetime(1970, 1, 1) diff = datetime_obj - start_of_time return diff.total_seconds()
[ "def", "timestamp", "(", "datetime_obj", ")", ":", "start_of_time", "=", "datetime", ".", "datetime", "(", "1970", ",", "1", ",", "1", ")", "diff", "=", "datetime_obj", "-", "start_of_time", "return", "diff", ".", "total_seconds", "(", ")" ]
Return Unix timestamp as float. The number of seconds that have elapsed since January 1, 1970.
[ "Return", "Unix", "timestamp", "as", "float", "." ]
python
train
Rockhopper-Technologies/enlighten
enlighten/_win_terminal.py
https://github.com/Rockhopper-Technologies/enlighten/blob/857855f940e6c1bb84d0be849b999a18fff5bf5a/enlighten/_win_terminal.py#L218-L227
def create_color_method(color, code): """ Create a function for the given color Done inside this function to keep the variables out of the main scope """ def func(self, content=''): return self._apply_color(code, content) # pylint: disable=protected-access setattr(Terminal, color, func)
[ "def", "create_color_method", "(", "color", ",", "code", ")", ":", "def", "func", "(", "self", ",", "content", "=", "''", ")", ":", "return", "self", ".", "_apply_color", "(", "code", ",", "content", ")", "# pylint: disable=protected-access", "setattr", "(", "Terminal", ",", "color", ",", "func", ")" ]
Create a function for the given color Done inside this function to keep the variables out of the main scope
[ "Create", "a", "function", "for", "the", "given", "color", "Done", "inside", "this", "function", "to", "keep", "the", "variables", "out", "of", "the", "main", "scope" ]
python
train
dereneaton/ipyrad
ipyrad/file_conversion/loci2alleles.py
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/file_conversion/loci2alleles.py#L12-L48
def make(data, samples): """ reads in .loci and builds alleles from case characters """ #read in loci file outfile = open(os.path.join(data.dirs.outfiles, data.name+".alleles"), 'w') lines = open(os.path.join(data.dirs.outfiles, data.name+".loci"), 'r') ## Get the longest sample name for pretty printing longname = max(len(x) for x in data.samples.keys()) ## Padding between name and sequence in output file. This should be the ## same as write_outfiles.write_tmp_loci.name_padding name_padding = 5 writing = [] loc = 0 for line in lines: if ">" in line: name, seq = line.split(" ")[0], line.split(" ")[-1] allele1, allele2 = splitalleles(seq.strip()) ## Format the output string. the "-2" below accounts for the additional ## 2 characters added to the sample name that don't get added to the ## snpsites line, so you gotta bump this line back 2 to make it ## line up right. writing.append(name+"_0"+" "*(longname-len(name)-2+name_padding)+allele1) writing.append(name+"_1"+" "*(longname-len(name)-2+name_padding)+allele2) else: writing.append(line.strip()) loc += 1 ## print every 10K loci " if not loc % 10000: outfile.write("\n".join(writing)+"\n") writing = [] outfile.write("\n".join(writing)) outfile.close()
[ "def", "make", "(", "data", ",", "samples", ")", ":", "#read in loci file", "outfile", "=", "open", "(", "os", ".", "path", ".", "join", "(", "data", ".", "dirs", ".", "outfiles", ",", "data", ".", "name", "+", "\".alleles\"", ")", ",", "'w'", ")", "lines", "=", "open", "(", "os", ".", "path", ".", "join", "(", "data", ".", "dirs", ".", "outfiles", ",", "data", ".", "name", "+", "\".loci\"", ")", ",", "'r'", ")", "## Get the longest sample name for pretty printing", "longname", "=", "max", "(", "len", "(", "x", ")", "for", "x", "in", "data", ".", "samples", ".", "keys", "(", ")", ")", "## Padding between name and sequence in output file. This should be the ", "## same as write_outfiles.write_tmp_loci.name_padding", "name_padding", "=", "5", "writing", "=", "[", "]", "loc", "=", "0", "for", "line", "in", "lines", ":", "if", "\">\"", "in", "line", ":", "name", ",", "seq", "=", "line", ".", "split", "(", "\" \"", ")", "[", "0", "]", ",", "line", ".", "split", "(", "\" \"", ")", "[", "-", "1", "]", "allele1", ",", "allele2", "=", "splitalleles", "(", "seq", ".", "strip", "(", ")", ")", "## Format the output string. the \"-2\" below accounts for the additional", "## 2 characters added to the sample name that don't get added to the", "## snpsites line, so you gotta bump this line back 2 to make it", "## line up right.", "writing", ".", "append", "(", "name", "+", "\"_0\"", "+", "\" \"", "*", "(", "longname", "-", "len", "(", "name", ")", "-", "2", "+", "name_padding", ")", "+", "allele1", ")", "writing", ".", "append", "(", "name", "+", "\"_1\"", "+", "\" \"", "*", "(", "longname", "-", "len", "(", "name", ")", "-", "2", "+", "name_padding", ")", "+", "allele2", ")", "else", ":", "writing", ".", "append", "(", "line", ".", "strip", "(", ")", ")", "loc", "+=", "1", "## print every 10K loci \"", "if", "not", "loc", "%", "10000", ":", "outfile", ".", "write", "(", "\"\\n\"", ".", "join", "(", "writing", ")", "+", "\"\\n\"", ")", "writing", "=", "[", "]", "outfile", ".", "write", "(", "\"\\n\"", ".", "join", "(", "writing", ")", ")", "outfile", ".", "close", "(", ")" ]
reads in .loci and builds alleles from case characters
[ "reads", "in", ".", "loci", "and", "builds", "alleles", "from", "case", "characters" ]
python
valid
kejbaly2/members
members/mailman2.py
https://github.com/kejbaly2/members/blob/28e70a25cceade514c550e3ce9963f73167e8572/members/mailman2.py#L48-L75
def extract(list_name, base_url, list_config=None, user=None, password=None): ''' # FIXME DOCS ''' if not (base_url and list_name): raise RuntimeError( "base_url [{}] and list_name [{}] can not be NULL".format( base_url, list_name)) list_config = list_config or {} assert isinstance(list_config, dict) logr.debug( '[{}] {}: {}'.format(base_url, list_name, user)) list_url = "{}/roster/{}".format(base_url, list_name) content = _download(list_url, user, password) # Check for and report any errors return in the HTML check_h2(content, 'Error') # source contain list members page content users = re.findall(r'(?<=>)(\S* at \S*|\S*@\S*)(?=<\/a>)', content) users = ['@'.join(u.split(' at ')) if ' at ' in u else u for u in users] return users
[ "def", "extract", "(", "list_name", ",", "base_url", ",", "list_config", "=", "None", ",", "user", "=", "None", ",", "password", "=", "None", ")", ":", "if", "not", "(", "base_url", "and", "list_name", ")", ":", "raise", "RuntimeError", "(", "\"base_url [{}] and list_name [{}] can not be NULL\"", ".", "format", "(", "base_url", ",", "list_name", ")", ")", "list_config", "=", "list_config", "or", "{", "}", "assert", "isinstance", "(", "list_config", ",", "dict", ")", "logr", ".", "debug", "(", "'[{}] {}: {}'", ".", "format", "(", "base_url", ",", "list_name", ",", "user", ")", ")", "list_url", "=", "\"{}/roster/{}\"", ".", "format", "(", "base_url", ",", "list_name", ")", "content", "=", "_download", "(", "list_url", ",", "user", ",", "password", ")", "# Check for and report any errors return in the HTML", "check_h2", "(", "content", ",", "'Error'", ")", "# source contain list members page content", "users", "=", "re", ".", "findall", "(", "r'(?<=>)(\\S* at \\S*|\\S*@\\S*)(?=<\\/a>)'", ",", "content", ")", "users", "=", "[", "'@'", ".", "join", "(", "u", ".", "split", "(", "' at '", ")", ")", "if", "' at '", "in", "u", "else", "u", "for", "u", "in", "users", "]", "return", "users" ]
# FIXME DOCS
[ "#", "FIXME", "DOCS" ]
python
train
Fortran-FOSS-Programmers/ford
ford/utils.py
https://github.com/Fortran-FOSS-Programmers/ford/blob/d46a44eae20d99205292c31785f936fbed47070f/ford/utils.py#L59-L84
def get_parens(line,retlevel=0,retblevel=0): """ By default akes a string starting with an open parenthesis and returns the portion of the string going to the corresponding close parenthesis. If retlevel != 0 then will return when that level (for parentheses) is reached. Same for retblevel. """ if len(line) == 0: return line parenstr = '' level = 0 blevel = 0 for char in line: if char == '(': level += 1 elif char == ')': level -= 1 elif char == '[': blevel += 1 elif char == ']': blevel -= 1 elif (char.isalpha() or char == '_' or char == ':' or char == ',' or char == ' ') and level == retlevel and blevel == retblevel: return parenstr parenstr = parenstr + char if level == retlevel and blevel == retblevel: return parenstr raise Exception("Couldn't parse parentheses: {}".format(line))
[ "def", "get_parens", "(", "line", ",", "retlevel", "=", "0", ",", "retblevel", "=", "0", ")", ":", "if", "len", "(", "line", ")", "==", "0", ":", "return", "line", "parenstr", "=", "''", "level", "=", "0", "blevel", "=", "0", "for", "char", "in", "line", ":", "if", "char", "==", "'('", ":", "level", "+=", "1", "elif", "char", "==", "')'", ":", "level", "-=", "1", "elif", "char", "==", "'['", ":", "blevel", "+=", "1", "elif", "char", "==", "']'", ":", "blevel", "-=", "1", "elif", "(", "char", ".", "isalpha", "(", ")", "or", "char", "==", "'_'", "or", "char", "==", "':'", "or", "char", "==", "','", "or", "char", "==", "' '", ")", "and", "level", "==", "retlevel", "and", "blevel", "==", "retblevel", ":", "return", "parenstr", "parenstr", "=", "parenstr", "+", "char", "if", "level", "==", "retlevel", "and", "blevel", "==", "retblevel", ":", "return", "parenstr", "raise", "Exception", "(", "\"Couldn't parse parentheses: {}\"", ".", "format", "(", "line", ")", ")" ]
By default akes a string starting with an open parenthesis and returns the portion of the string going to the corresponding close parenthesis. If retlevel != 0 then will return when that level (for parentheses) is reached. Same for retblevel.
[ "By", "default", "akes", "a", "string", "starting", "with", "an", "open", "parenthesis", "and", "returns", "the", "portion", "of", "the", "string", "going", "to", "the", "corresponding", "close", "parenthesis", ".", "If", "retlevel", "!", "=", "0", "then", "will", "return", "when", "that", "level", "(", "for", "parentheses", ")", "is", "reached", ".", "Same", "for", "retblevel", "." ]
python
train
jbloomlab/phydms
phydmslib/models.py
https://github.com/jbloomlab/phydms/blob/9cdebc10bafbe543c552d79486c7f950780ed3c0/phydmslib/models.py#L774-L788
def _update_B(self): """Update `B`.""" for param in self.freeparams: if param == 'mu': continue paramval = getattr(self, param) if isinstance(paramval, float): self.B[param] = broadcastMatrixMultiply(self.Ainv, broadcastMatrixMultiply(self.dPrxy[param], self.A)) else: assert isinstance(paramval, numpy.ndarray) and paramval.ndim == 1 for j in range(paramval.shape[0]): self.B[param][j] = broadcastMatrixMultiply(self.Ainv, broadcastMatrixMultiply(self.dPrxy[param][j], self.A))
[ "def", "_update_B", "(", "self", ")", ":", "for", "param", "in", "self", ".", "freeparams", ":", "if", "param", "==", "'mu'", ":", "continue", "paramval", "=", "getattr", "(", "self", ",", "param", ")", "if", "isinstance", "(", "paramval", ",", "float", ")", ":", "self", ".", "B", "[", "param", "]", "=", "broadcastMatrixMultiply", "(", "self", ".", "Ainv", ",", "broadcastMatrixMultiply", "(", "self", ".", "dPrxy", "[", "param", "]", ",", "self", ".", "A", ")", ")", "else", ":", "assert", "isinstance", "(", "paramval", ",", "numpy", ".", "ndarray", ")", "and", "paramval", ".", "ndim", "==", "1", "for", "j", "in", "range", "(", "paramval", ".", "shape", "[", "0", "]", ")", ":", "self", ".", "B", "[", "param", "]", "[", "j", "]", "=", "broadcastMatrixMultiply", "(", "self", ".", "Ainv", ",", "broadcastMatrixMultiply", "(", "self", ".", "dPrxy", "[", "param", "]", "[", "j", "]", ",", "self", ".", "A", ")", ")" ]
Update `B`.
[ "Update", "B", "." ]
python
train
spookey/photon
photon/tools/template.py
https://github.com/spookey/photon/blob/57212a26ce713ab7723910ee49e3d0ba1697799f/photon/tools/template.py#L57-L68
def sub(self): ''' :param fields: Set fields to substitute :returns: Substituted Template with given fields. If no fields were set up beforehand, :func:`raw` is used. ''' if self.__fields: return _Template(self.raw).substitute(self.__fields) return self.raw
[ "def", "sub", "(", "self", ")", ":", "if", "self", ".", "__fields", ":", "return", "_Template", "(", "self", ".", "raw", ")", ".", "substitute", "(", "self", ".", "__fields", ")", "return", "self", ".", "raw" ]
:param fields: Set fields to substitute :returns: Substituted Template with given fields. If no fields were set up beforehand, :func:`raw` is used.
[ ":", "param", "fields", ":", "Set", "fields", "to", "substitute", ":", "returns", ":", "Substituted", "Template", "with", "given", "fields", ".", "If", "no", "fields", "were", "set", "up", "beforehand", ":", "func", ":", "raw", "is", "used", "." ]
python
train
quantopian/zipline
zipline/data/hdf5_daily_bars.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/hdf5_daily_bars.py#L800-L831
def load_raw_arrays(self, columns, start_date, end_date, assets): """ Parameters ---------- columns : list of str 'open', 'high', 'low', 'close', or 'volume' start_date: Timestamp Beginning of the window range. end_date: Timestamp End of the window range. assets : list of int The asset identifiers in the window. Returns ------- list of np.ndarray A list with an entry per field of ndarrays with shape (minutes in range, sids) with a dtype of float64, containing the values for the respective field over start and end dt range. """ country_code = self._country_code_for_assets(assets) return self._readers[country_code].load_raw_arrays( columns, start_date, end_date, assets, )
[ "def", "load_raw_arrays", "(", "self", ",", "columns", ",", "start_date", ",", "end_date", ",", "assets", ")", ":", "country_code", "=", "self", ".", "_country_code_for_assets", "(", "assets", ")", "return", "self", ".", "_readers", "[", "country_code", "]", ".", "load_raw_arrays", "(", "columns", ",", "start_date", ",", "end_date", ",", "assets", ",", ")" ]
Parameters ---------- columns : list of str 'open', 'high', 'low', 'close', or 'volume' start_date: Timestamp Beginning of the window range. end_date: Timestamp End of the window range. assets : list of int The asset identifiers in the window. Returns ------- list of np.ndarray A list with an entry per field of ndarrays with shape (minutes in range, sids) with a dtype of float64, containing the values for the respective field over start and end dt range.
[ "Parameters", "----------", "columns", ":", "list", "of", "str", "open", "high", "low", "close", "or", "volume", "start_date", ":", "Timestamp", "Beginning", "of", "the", "window", "range", ".", "end_date", ":", "Timestamp", "End", "of", "the", "window", "range", ".", "assets", ":", "list", "of", "int", "The", "asset", "identifiers", "in", "the", "window", "." ]
python
train
google/grumpy
third_party/stdlib/dircache.py
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/dircache.py#L37-L41
def annotate(head, list): """Add '/' suffixes to directories.""" for i in range(len(list)): if os.path.isdir(os.path.join(head, list[i])): list[i] = list[i] + '/'
[ "def", "annotate", "(", "head", ",", "list", ")", ":", "for", "i", "in", "range", "(", "len", "(", "list", ")", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "os", ".", "path", ".", "join", "(", "head", ",", "list", "[", "i", "]", ")", ")", ":", "list", "[", "i", "]", "=", "list", "[", "i", "]", "+", "'/'" ]
Add '/' suffixes to directories.
[ "Add", "/", "suffixes", "to", "directories", "." ]
python
valid
oscarbranson/latools
latools/filtering/signal_optimiser.py
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/filtering/signal_optimiser.py#L14-L31
def calc_windows(fn, s, min_points): """ Apply fn to all contiguous regions in s that have at least min_points. """ max_points = np.sum(~np.isnan(s)) n_points = max_points - min_points out = np.full((n_points, s.size), np.nan) # skip nans, for speed ind = ~np.isnan(s) s = s[ind] for i, w in enumerate(range(min_points, s.size)): r = rolling_window(s, w, pad=np.nan) out[i, ind] = np.apply_along_axis(fn, 1, r) return out
[ "def", "calc_windows", "(", "fn", ",", "s", ",", "min_points", ")", ":", "max_points", "=", "np", ".", "sum", "(", "~", "np", ".", "isnan", "(", "s", ")", ")", "n_points", "=", "max_points", "-", "min_points", "out", "=", "np", ".", "full", "(", "(", "n_points", ",", "s", ".", "size", ")", ",", "np", ".", "nan", ")", "# skip nans, for speed", "ind", "=", "~", "np", ".", "isnan", "(", "s", ")", "s", "=", "s", "[", "ind", "]", "for", "i", ",", "w", "in", "enumerate", "(", "range", "(", "min_points", ",", "s", ".", "size", ")", ")", ":", "r", "=", "rolling_window", "(", "s", ",", "w", ",", "pad", "=", "np", ".", "nan", ")", "out", "[", "i", ",", "ind", "]", "=", "np", ".", "apply_along_axis", "(", "fn", ",", "1", ",", "r", ")", "return", "out" ]
Apply fn to all contiguous regions in s that have at least min_points.
[ "Apply", "fn", "to", "all", "contiguous", "regions", "in", "s", "that", "have", "at", "least", "min_points", "." ]
python
test
CartoDB/cartoframes
cartoframes/context.py
https://github.com/CartoDB/cartoframes/blob/c94238a545f3dec45963dac3892540942b6f0df8/cartoframes/context.py#L396-L463
def fetch(self, query, decode_geom=False): """Pull the result from an arbitrary SELECT SQL query from a CARTO account into a pandas DataFrame. Args: query (str): SELECT query to run against CARTO user database. This data will then be converted into a pandas DataFrame. decode_geom (bool, optional): Decodes CARTO's geometries into a `Shapely <https://github.com/Toblerity/Shapely>`__ object that can be used, for example, in `GeoPandas <http://geopandas.org/>`__. Returns: pandas.DataFrame: DataFrame representation of query supplied. Pandas data types are inferred from PostgreSQL data types. In the case of PostgreSQL date types, dates are attempted to be converted, but on failure a data type 'object' is used. Examples: This query gets the 10 highest values from a table and returns a dataframe. .. code:: python topten_df = cc.query( ''' SELECT * FROM my_table ORDER BY value_column DESC LIMIT 10 ''' ) This query joins points to polygons based on intersection, and aggregates by summing the values of the points in each polygon. The query returns a dataframe, with a geometry column that contains polygons. .. code:: python points_aggregated_to_polygons = cc.query( ''' SELECT polygons.*, sum(points.values) FROM polygons JOIN points ON ST_Intersects(points.the_geom, polygons.the_geom) GROUP BY polygons.the_geom, polygons.cartodb_id ''', decode_geom=True ) """ copy_query = 'COPY ({query}) TO stdout WITH (FORMAT csv, HEADER true)'.format(query=query) query_columns = get_columns(self, query) result = recursive_read(self, copy_query) df_types = dtypes(query_columns, exclude_dates=True) df = pd.read_csv(result, dtype=df_types, parse_dates=date_columns_names(query_columns), true_values=['t'], false_values=['f'], index_col='cartodb_id' if 'cartodb_id' in df_types.keys() else False, converters={'the_geom': lambda x: _decode_geom(x) if decode_geom else x}) if decode_geom: df.rename({'the_geom': 'geometry'}, axis='columns', inplace=True) return df
[ "def", "fetch", "(", "self", ",", "query", ",", "decode_geom", "=", "False", ")", ":", "copy_query", "=", "'COPY ({query}) TO stdout WITH (FORMAT csv, HEADER true)'", ".", "format", "(", "query", "=", "query", ")", "query_columns", "=", "get_columns", "(", "self", ",", "query", ")", "result", "=", "recursive_read", "(", "self", ",", "copy_query", ")", "df_types", "=", "dtypes", "(", "query_columns", ",", "exclude_dates", "=", "True", ")", "df", "=", "pd", ".", "read_csv", "(", "result", ",", "dtype", "=", "df_types", ",", "parse_dates", "=", "date_columns_names", "(", "query_columns", ")", ",", "true_values", "=", "[", "'t'", "]", ",", "false_values", "=", "[", "'f'", "]", ",", "index_col", "=", "'cartodb_id'", "if", "'cartodb_id'", "in", "df_types", ".", "keys", "(", ")", "else", "False", ",", "converters", "=", "{", "'the_geom'", ":", "lambda", "x", ":", "_decode_geom", "(", "x", ")", "if", "decode_geom", "else", "x", "}", ")", "if", "decode_geom", ":", "df", ".", "rename", "(", "{", "'the_geom'", ":", "'geometry'", "}", ",", "axis", "=", "'columns'", ",", "inplace", "=", "True", ")", "return", "df" ]
Pull the result from an arbitrary SELECT SQL query from a CARTO account into a pandas DataFrame. Args: query (str): SELECT query to run against CARTO user database. This data will then be converted into a pandas DataFrame. decode_geom (bool, optional): Decodes CARTO's geometries into a `Shapely <https://github.com/Toblerity/Shapely>`__ object that can be used, for example, in `GeoPandas <http://geopandas.org/>`__. Returns: pandas.DataFrame: DataFrame representation of query supplied. Pandas data types are inferred from PostgreSQL data types. In the case of PostgreSQL date types, dates are attempted to be converted, but on failure a data type 'object' is used. Examples: This query gets the 10 highest values from a table and returns a dataframe. .. code:: python topten_df = cc.query( ''' SELECT * FROM my_table ORDER BY value_column DESC LIMIT 10 ''' ) This query joins points to polygons based on intersection, and aggregates by summing the values of the points in each polygon. The query returns a dataframe, with a geometry column that contains polygons. .. code:: python points_aggregated_to_polygons = cc.query( ''' SELECT polygons.*, sum(points.values) FROM polygons JOIN points ON ST_Intersects(points.the_geom, polygons.the_geom) GROUP BY polygons.the_geom, polygons.cartodb_id ''', decode_geom=True )
[ "Pull", "the", "result", "from", "an", "arbitrary", "SELECT", "SQL", "query", "from", "a", "CARTO", "account", "into", "a", "pandas", "DataFrame", "." ]
python
train
nugget/python-insteonplm
insteonplm/states/onOff.py
https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/states/onOff.py#L877-L883
def register_led_updates(self, callback, button): """Register a callback when a specific button LED changes.""" button_callbacks = self._button_observer_callbacks.get(button) if not button_callbacks: self._button_observer_callbacks[button] = [] _LOGGER.debug('New callback for button %d', button) self._button_observer_callbacks[button].append(callback)
[ "def", "register_led_updates", "(", "self", ",", "callback", ",", "button", ")", ":", "button_callbacks", "=", "self", ".", "_button_observer_callbacks", ".", "get", "(", "button", ")", "if", "not", "button_callbacks", ":", "self", ".", "_button_observer_callbacks", "[", "button", "]", "=", "[", "]", "_LOGGER", ".", "debug", "(", "'New callback for button %d'", ",", "button", ")", "self", ".", "_button_observer_callbacks", "[", "button", "]", ".", "append", "(", "callback", ")" ]
Register a callback when a specific button LED changes.
[ "Register", "a", "callback", "when", "a", "specific", "button", "LED", "changes", "." ]
python
train
christian-oudard/htmltreediff
htmltreediff/changes.py
https://github.com/christian-oudard/htmltreediff/blob/0e28f56492ae7e69bb0f74f9a79a8909a5ad588d/htmltreediff/changes.py#L67-L79
def remove_nesting(dom, tag_name): """ Unwrap items in the node list that have ancestors with the same tag. """ for node in dom.getElementsByTagName(tag_name): for ancestor in ancestors(node): if ancestor is node: continue if ancestor is dom.documentElement: break if ancestor.tagName == tag_name: unwrap(node) break
[ "def", "remove_nesting", "(", "dom", ",", "tag_name", ")", ":", "for", "node", "in", "dom", ".", "getElementsByTagName", "(", "tag_name", ")", ":", "for", "ancestor", "in", "ancestors", "(", "node", ")", ":", "if", "ancestor", "is", "node", ":", "continue", "if", "ancestor", "is", "dom", ".", "documentElement", ":", "break", "if", "ancestor", ".", "tagName", "==", "tag_name", ":", "unwrap", "(", "node", ")", "break" ]
Unwrap items in the node list that have ancestors with the same tag.
[ "Unwrap", "items", "in", "the", "node", "list", "that", "have", "ancestors", "with", "the", "same", "tag", "." ]
python
train
hearsaycorp/normalize
normalize/record/json.py
https://github.com/hearsaycorp/normalize/blob/8b36522ddca6d41b434580bd848f3bdaa7a999c8/normalize/record/json.py#L175-L247
def to_json(record, extraneous=True, prop=None): """JSON conversion function: a 'visitor' function which implements marshall out (to JSON data form), honoring JSON property types/hints but does not require them. To convert to an actual JSON document, pass the return value to ``json.dumps`` or a similar function. args: ``record=``\ *anything* This object can be of any type; a best-effort attempt is made to convert to a form which ``json.dumps`` can accept; this function will call itself recursively, respecting any types which define ``.json_data()`` as a method and calling that. ``extraneous=``\ *BOOL* This parameter is passed through to any ``json_data()`` methods which support it. ``prop=``\ *PROPNAME*\ |\ *PROPERTY* Specifies to return the given property from an object, calling any ``to_json`` mapping defined on the property. Does not catch the ``AttributeError`` that is raised by the property not being set. """ if prop: if isinstance(prop, basestring): prop = type(record).properties[prop] val = prop.__get__(record) if hasattr(prop, "to_json"): return prop.to_json(val, extraneous, _json_data) else: return _json_data(val, extraneous) elif isinstance(record, Collection): if isinstance(record, RecordDict): return dict( (k, _json_data(v, extraneous)) for k, v in record.items() ) else: return list(_json_data(x, extraneous) for x in record) elif isinstance(record, Record): rv_dict = {} for propname, prop in type(record).properties.iteritems(): if not extraneous and prop.extraneous: pass elif prop.slot_is_empty(record): pass elif not hasattr(prop, "json_name") or prop.json_name is not None: json_name = getattr(prop, "json_name", prop.name) try: rv_dict[json_name] = to_json(record, extraneous, prop) except AttributeError: pass return rv_dict elif isinstance(record, long): return str(record) if abs(record) > 2**50 else record elif isinstance(record, dict): return dict( (k, _json_data(v, extraneous)) for k, v in record.iteritems() ) elif isinstance(record, (list, tuple, set, frozenset)): return list(_json_data(x, extraneous) for x in record) elif isinstance(record, (basestring, int, float, types.NoneType)): return record else: raise TypeError( "I don't know how to marshall a %s to JSON" % type(record).__name__ )
[ "def", "to_json", "(", "record", ",", "extraneous", "=", "True", ",", "prop", "=", "None", ")", ":", "if", "prop", ":", "if", "isinstance", "(", "prop", ",", "basestring", ")", ":", "prop", "=", "type", "(", "record", ")", ".", "properties", "[", "prop", "]", "val", "=", "prop", ".", "__get__", "(", "record", ")", "if", "hasattr", "(", "prop", ",", "\"to_json\"", ")", ":", "return", "prop", ".", "to_json", "(", "val", ",", "extraneous", ",", "_json_data", ")", "else", ":", "return", "_json_data", "(", "val", ",", "extraneous", ")", "elif", "isinstance", "(", "record", ",", "Collection", ")", ":", "if", "isinstance", "(", "record", ",", "RecordDict", ")", ":", "return", "dict", "(", "(", "k", ",", "_json_data", "(", "v", ",", "extraneous", ")", ")", "for", "k", ",", "v", "in", "record", ".", "items", "(", ")", ")", "else", ":", "return", "list", "(", "_json_data", "(", "x", ",", "extraneous", ")", "for", "x", "in", "record", ")", "elif", "isinstance", "(", "record", ",", "Record", ")", ":", "rv_dict", "=", "{", "}", "for", "propname", ",", "prop", "in", "type", "(", "record", ")", ".", "properties", ".", "iteritems", "(", ")", ":", "if", "not", "extraneous", "and", "prop", ".", "extraneous", ":", "pass", "elif", "prop", ".", "slot_is_empty", "(", "record", ")", ":", "pass", "elif", "not", "hasattr", "(", "prop", ",", "\"json_name\"", ")", "or", "prop", ".", "json_name", "is", "not", "None", ":", "json_name", "=", "getattr", "(", "prop", ",", "\"json_name\"", ",", "prop", ".", "name", ")", "try", ":", "rv_dict", "[", "json_name", "]", "=", "to_json", "(", "record", ",", "extraneous", ",", "prop", ")", "except", "AttributeError", ":", "pass", "return", "rv_dict", "elif", "isinstance", "(", "record", ",", "long", ")", ":", "return", "str", "(", "record", ")", "if", "abs", "(", "record", ")", ">", "2", "**", "50", "else", "record", "elif", "isinstance", "(", "record", ",", "dict", ")", ":", "return", "dict", "(", "(", "k", ",", "_json_data", "(", "v", ",", "extraneous", ")", ")", "for", "k", ",", "v", "in", "record", ".", "iteritems", "(", ")", ")", "elif", "isinstance", "(", "record", ",", "(", "list", ",", "tuple", ",", "set", ",", "frozenset", ")", ")", ":", "return", "list", "(", "_json_data", "(", "x", ",", "extraneous", ")", "for", "x", "in", "record", ")", "elif", "isinstance", "(", "record", ",", "(", "basestring", ",", "int", ",", "float", ",", "types", ".", "NoneType", ")", ")", ":", "return", "record", "else", ":", "raise", "TypeError", "(", "\"I don't know how to marshall a %s to JSON\"", "%", "type", "(", "record", ")", ".", "__name__", ")" ]
JSON conversion function: a 'visitor' function which implements marshall out (to JSON data form), honoring JSON property types/hints but does not require them. To convert to an actual JSON document, pass the return value to ``json.dumps`` or a similar function. args: ``record=``\ *anything* This object can be of any type; a best-effort attempt is made to convert to a form which ``json.dumps`` can accept; this function will call itself recursively, respecting any types which define ``.json_data()`` as a method and calling that. ``extraneous=``\ *BOOL* This parameter is passed through to any ``json_data()`` methods which support it. ``prop=``\ *PROPNAME*\ |\ *PROPERTY* Specifies to return the given property from an object, calling any ``to_json`` mapping defined on the property. Does not catch the ``AttributeError`` that is raised by the property not being set.
[ "JSON", "conversion", "function", ":", "a", "visitor", "function", "which", "implements", "marshall", "out", "(", "to", "JSON", "data", "form", ")", "honoring", "JSON", "property", "types", "/", "hints", "but", "does", "not", "require", "them", ".", "To", "convert", "to", "an", "actual", "JSON", "document", "pass", "the", "return", "value", "to", "json", ".", "dumps", "or", "a", "similar", "function", "." ]
python
train
dslackw/slpkg
slpkg/pkg/manager.py
https://github.com/dslackw/slpkg/blob/dd2e08a80e944d337d157b992167ba631a4343de/slpkg/pkg/manager.py#L361-L377
def _reference_rmvs(self, removes): """Prints all removed packages """ print("") self.msg.template(78) msg_pkg = "package" if len(removes) > 1: msg_pkg = "packages" print("| Total {0} {1} removed".format(len(removes), msg_pkg)) self.msg.template(78) for pkg in removes: if not GetFromInstalled(pkg).name(): print("| Package {0} removed".format(pkg)) else: print("| Package {0} not found".format(pkg)) self.msg.template(78) print("")
[ "def", "_reference_rmvs", "(", "self", ",", "removes", ")", ":", "print", "(", "\"\"", ")", "self", ".", "msg", ".", "template", "(", "78", ")", "msg_pkg", "=", "\"package\"", "if", "len", "(", "removes", ")", ">", "1", ":", "msg_pkg", "=", "\"packages\"", "print", "(", "\"| Total {0} {1} removed\"", ".", "format", "(", "len", "(", "removes", ")", ",", "msg_pkg", ")", ")", "self", ".", "msg", ".", "template", "(", "78", ")", "for", "pkg", "in", "removes", ":", "if", "not", "GetFromInstalled", "(", "pkg", ")", ".", "name", "(", ")", ":", "print", "(", "\"| Package {0} removed\"", ".", "format", "(", "pkg", ")", ")", "else", ":", "print", "(", "\"| Package {0} not found\"", ".", "format", "(", "pkg", ")", ")", "self", ".", "msg", ".", "template", "(", "78", ")", "print", "(", "\"\"", ")" ]
Prints all removed packages
[ "Prints", "all", "removed", "packages" ]
python
train
cjdrake/pyeda
pyeda/boolalg/bdd.py
https://github.com/cjdrake/pyeda/blob/554ee53aa678f4b61bcd7e07ba2c74ddc749d665/pyeda/boolalg/bdd.py#L538-L546
def _dfs_preorder(node, visited): """Iterate through nodes in DFS pre-order.""" if node not in visited: visited.add(node) yield node if node.lo is not None: yield from _dfs_preorder(node.lo, visited) if node.hi is not None: yield from _dfs_preorder(node.hi, visited)
[ "def", "_dfs_preorder", "(", "node", ",", "visited", ")", ":", "if", "node", "not", "in", "visited", ":", "visited", ".", "add", "(", "node", ")", "yield", "node", "if", "node", ".", "lo", "is", "not", "None", ":", "yield", "from", "_dfs_preorder", "(", "node", ".", "lo", ",", "visited", ")", "if", "node", ".", "hi", "is", "not", "None", ":", "yield", "from", "_dfs_preorder", "(", "node", ".", "hi", ",", "visited", ")" ]
Iterate through nodes in DFS pre-order.
[ "Iterate", "through", "nodes", "in", "DFS", "pre", "-", "order", "." ]
python
train
abilian/abilian-core
abilian/core/entities.py
https://github.com/abilian/abilian-core/blob/0a71275bf108c3d51e13ca9e093c0249235351e3/abilian/core/entities.py#L486-L499
def polymorphic_update_timestamp(session, flush_context, instances): """This listener ensures an update statement is emited for "entity" table to update 'updated_at'. With joined-table inheritance if the only modified attributes are subclass's ones, then no update statement will be emitted. """ for obj in session.dirty: if not isinstance(obj, Entity): continue state = sa.inspect(obj) history = state.attrs["updated_at"].history if not any((history.added, history.deleted)): obj.updated_at = datetime.utcnow()
[ "def", "polymorphic_update_timestamp", "(", "session", ",", "flush_context", ",", "instances", ")", ":", "for", "obj", "in", "session", ".", "dirty", ":", "if", "not", "isinstance", "(", "obj", ",", "Entity", ")", ":", "continue", "state", "=", "sa", ".", "inspect", "(", "obj", ")", "history", "=", "state", ".", "attrs", "[", "\"updated_at\"", "]", ".", "history", "if", "not", "any", "(", "(", "history", ".", "added", ",", "history", ".", "deleted", ")", ")", ":", "obj", ".", "updated_at", "=", "datetime", ".", "utcnow", "(", ")" ]
This listener ensures an update statement is emited for "entity" table to update 'updated_at'. With joined-table inheritance if the only modified attributes are subclass's ones, then no update statement will be emitted.
[ "This", "listener", "ensures", "an", "update", "statement", "is", "emited", "for", "entity", "table", "to", "update", "updated_at", "." ]
python
train
andialbrecht/sentry-comments
sentry_comments/plugin.py
https://github.com/andialbrecht/sentry-comments/blob/b9319320dc3b25b6d813377e69b2d379bcbf6197/sentry_comments/plugin.py#L38-L47
def get_title(self, group=None): """Adds number of comments to title.""" title = super(CommentsPlugin, self).get_title() if group is not None: count = GroupComments.objects.filter(group=group).count() else: count = None if count: title = u'%s (%d)' % (title, count) return title
[ "def", "get_title", "(", "self", ",", "group", "=", "None", ")", ":", "title", "=", "super", "(", "CommentsPlugin", ",", "self", ")", ".", "get_title", "(", ")", "if", "group", "is", "not", "None", ":", "count", "=", "GroupComments", ".", "objects", ".", "filter", "(", "group", "=", "group", ")", ".", "count", "(", ")", "else", ":", "count", "=", "None", "if", "count", ":", "title", "=", "u'%s (%d)'", "%", "(", "title", ",", "count", ")", "return", "title" ]
Adds number of comments to title.
[ "Adds", "number", "of", "comments", "to", "title", "." ]
python
train
ipfs/py-ipfs-api
ipfsapi/client.py
https://github.com/ipfs/py-ipfs-api/blob/7574dad04877b45dbe4ad321dcfa9e880eb2d90c/ipfsapi/client.py#L907-L957
def name_publish(self, ipfs_path, resolve=True, lifetime="24h", ttl=None, key=None, **kwargs): """Publishes an object to IPNS. IPNS is a PKI namespace, where names are the hashes of public keys, and the private key enables publishing new (signed) values. In publish, the default value of *name* is your own identity public key. .. code-block:: python >>> c.name_publish('/ipfs/QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZK … GZ5d') {'Value': '/ipfs/QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZKZMTodAtmvyGZ5d', 'Name': 'QmVgNoP89mzpgEAAqK8owYoDEyB97MkcGvoWZir8otE9Uc'} Parameters ---------- ipfs_path : str IPFS path of the object to be published resolve : bool Resolve given path before publishing lifetime : str Time duration that the record will be valid for Accepts durations such as ``"300s"``, ``"1.5h"`` or ``"2h45m"``. Valid units are: * ``"ns"`` * ``"us"`` (or ``"µs"``) * ``"ms"`` * ``"s"`` * ``"m"`` * ``"h"`` ttl : int Time duration this record should be cached for key : string Name of the key to be used, as listed by 'ipfs key list'. Returns ------- dict : IPNS hash and the IPFS path it points at """ opts = {"lifetime": lifetime, "resolve": resolve} if ttl: opts["ttl"] = ttl if key: opts["key"] = key kwargs.setdefault("opts", opts) args = (ipfs_path,) return self._client.request('/name/publish', args, decoder='json', **kwargs)
[ "def", "name_publish", "(", "self", ",", "ipfs_path", ",", "resolve", "=", "True", ",", "lifetime", "=", "\"24h\"", ",", "ttl", "=", "None", ",", "key", "=", "None", ",", "*", "*", "kwargs", ")", ":", "opts", "=", "{", "\"lifetime\"", ":", "lifetime", ",", "\"resolve\"", ":", "resolve", "}", "if", "ttl", ":", "opts", "[", "\"ttl\"", "]", "=", "ttl", "if", "key", ":", "opts", "[", "\"key\"", "]", "=", "key", "kwargs", ".", "setdefault", "(", "\"opts\"", ",", "opts", ")", "args", "=", "(", "ipfs_path", ",", ")", "return", "self", ".", "_client", ".", "request", "(", "'/name/publish'", ",", "args", ",", "decoder", "=", "'json'", ",", "*", "*", "kwargs", ")" ]
Publishes an object to IPNS. IPNS is a PKI namespace, where names are the hashes of public keys, and the private key enables publishing new (signed) values. In publish, the default value of *name* is your own identity public key. .. code-block:: python >>> c.name_publish('/ipfs/QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZK … GZ5d') {'Value': '/ipfs/QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZKZMTodAtmvyGZ5d', 'Name': 'QmVgNoP89mzpgEAAqK8owYoDEyB97MkcGvoWZir8otE9Uc'} Parameters ---------- ipfs_path : str IPFS path of the object to be published resolve : bool Resolve given path before publishing lifetime : str Time duration that the record will be valid for Accepts durations such as ``"300s"``, ``"1.5h"`` or ``"2h45m"``. Valid units are: * ``"ns"`` * ``"us"`` (or ``"µs"``) * ``"ms"`` * ``"s"`` * ``"m"`` * ``"h"`` ttl : int Time duration this record should be cached for key : string Name of the key to be used, as listed by 'ipfs key list'. Returns ------- dict : IPNS hash and the IPFS path it points at
[ "Publishes", "an", "object", "to", "IPNS", "." ]
python
train
inveniosoftware-contrib/json-merger
json_merger/utils.py
https://github.com/inveniosoftware-contrib/json-merger/blob/adc6d372da018427e1db7b92424d3471e01a4118/json_merger/utils.py#L107-L120
def dedupe_list(l): """Remove duplicates from a list preserving the order. We might be tempted to use the list(set(l)) idiom, but it doesn't preserve the order, which hinders testability and does not work for lists with unhashable elements. """ result = [] for el in l: if el not in result: result.append(el) return result
[ "def", "dedupe_list", "(", "l", ")", ":", "result", "=", "[", "]", "for", "el", "in", "l", ":", "if", "el", "not", "in", "result", ":", "result", ".", "append", "(", "el", ")", "return", "result" ]
Remove duplicates from a list preserving the order. We might be tempted to use the list(set(l)) idiom, but it doesn't preserve the order, which hinders testability and does not work for lists with unhashable elements.
[ "Remove", "duplicates", "from", "a", "list", "preserving", "the", "order", "." ]
python
train
O365/python-o365
O365/connection.py
https://github.com/O365/python-o365/blob/02a71cf3775cc6a3c042e003365d6a07c8c75a73/O365/connection.py#L538-L546
def _check_delay(self): """ Checks if a delay is needed between requests and sleeps if True """ if self._previous_request_at: dif = round(time.time() - self._previous_request_at, 2) * 1000 # difference in miliseconds if dif < self.requests_delay: time.sleep( (self.requests_delay - dif) / 1000) # sleep needs seconds self._previous_request_at = time.time()
[ "def", "_check_delay", "(", "self", ")", ":", "if", "self", ".", "_previous_request_at", ":", "dif", "=", "round", "(", "time", ".", "time", "(", ")", "-", "self", ".", "_previous_request_at", ",", "2", ")", "*", "1000", "# difference in miliseconds", "if", "dif", "<", "self", ".", "requests_delay", ":", "time", ".", "sleep", "(", "(", "self", ".", "requests_delay", "-", "dif", ")", "/", "1000", ")", "# sleep needs seconds", "self", ".", "_previous_request_at", "=", "time", ".", "time", "(", ")" ]
Checks if a delay is needed between requests and sleeps if True
[ "Checks", "if", "a", "delay", "is", "needed", "between", "requests", "and", "sleeps", "if", "True" ]
python
train
jalanb/pysyte
pysyte/bash/git.py
https://github.com/jalanb/pysyte/blob/4e278101943d1ceb1a6bcaf6ddc72052ecf13114/pysyte/bash/git.py#L299-L305
def add(path=None, force=False, quiet=False): """Add that path to git's staging area (default current dir) so that it will be included in next commit """ option = '-f' if force else '' return run('add %s %s' % (option, path) or '.', quiet=quiet)
[ "def", "add", "(", "path", "=", "None", ",", "force", "=", "False", ",", "quiet", "=", "False", ")", ":", "option", "=", "'-f'", "if", "force", "else", "''", "return", "run", "(", "'add %s %s'", "%", "(", "option", ",", "path", ")", "or", "'.'", ",", "quiet", "=", "quiet", ")" ]
Add that path to git's staging area (default current dir) so that it will be included in next commit
[ "Add", "that", "path", "to", "git", "s", "staging", "area", "(", "default", "current", "dir", ")" ]
python
train
linkhub-sdk/popbill.py
popbill/statementService.py
https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/statementService.py#L109-L134
def FAXSend(self, CorpNum, statement, SendNum, ReceiveNum, UserID=None): """ 선팩스 전송 args CorpNum : 팝빌회원 사업자번호 statement : 전자명세서 객체 SendNum : 팩스 발신번호 ReceiveNum : 팩스 수신번호 UserID : 팝빌회원 아이디 return 팩스전송 접수번호(receiptNum) raise PopbillException """ if statement == None: raise PopbillException(-99999999, "전송할 전자명세서 정보가 입력되지 않았습니다.") if SendNum == None or SendNum == '': raise PopbillException(-99999999, "팩스전송 발신번호가 올바르지 않았습니다.") if ReceiveNum == None or ReceiveNum == '': raise PopbillException(-99999999, "팩스전송 수신번호가 올바르지 않습니다.") statement.sendNum = SendNum statement.receiveNum = ReceiveNum postData = self._stringtify(statement) return self._httppost('/Statement', postData, CorpNum, UserID, "FAX").receiptNum
[ "def", "FAXSend", "(", "self", ",", "CorpNum", ",", "statement", ",", "SendNum", ",", "ReceiveNum", ",", "UserID", "=", "None", ")", ":", "if", "statement", "==", "None", ":", "raise", "PopbillException", "(", "-", "99999999", ",", "\"전송할 전자명세서 정보가 입력되지 않았습니다.\")\r", "", "if", "SendNum", "==", "None", "or", "SendNum", "==", "''", ":", "raise", "PopbillException", "(", "-", "99999999", ",", "\"팩스전송 발신번호가 올바르지 않았습니다.\")\r", "", "if", "ReceiveNum", "==", "None", "or", "ReceiveNum", "==", "''", ":", "raise", "PopbillException", "(", "-", "99999999", ",", "\"팩스전송 수신번호가 올바르지 않습니다.\")\r", "", "statement", ".", "sendNum", "=", "SendNum", "statement", ".", "receiveNum", "=", "ReceiveNum", "postData", "=", "self", ".", "_stringtify", "(", "statement", ")", "return", "self", ".", "_httppost", "(", "'/Statement'", ",", "postData", ",", "CorpNum", ",", "UserID", ",", "\"FAX\"", ")", ".", "receiptNum" ]
선팩스 전송 args CorpNum : 팝빌회원 사업자번호 statement : 전자명세서 객체 SendNum : 팩스 발신번호 ReceiveNum : 팩스 수신번호 UserID : 팝빌회원 아이디 return 팩스전송 접수번호(receiptNum) raise PopbillException
[ "선팩스", "전송", "args", "CorpNum", ":", "팝빌회원", "사업자번호", "statement", ":", "전자명세서", "객체", "SendNum", ":", "팩스", "발신번호", "ReceiveNum", ":", "팩스", "수신번호", "UserID", ":", "팝빌회원", "아이디", "return", "팩스전송", "접수번호", "(", "receiptNum", ")", "raise", "PopbillException" ]
python
train
KE-works/pykechain
pykechain/models/scope.py
https://github.com/KE-works/pykechain/blob/b0296cf34328fd41660bf6f0b9114fd0167c40c4/pykechain/models/scope.py#L179-L197
def members(self, is_manager=None): """ Retrieve members of the scope. :param is_manager: (optional) set to True to return only Scope members that are also managers. :type is_manager: bool :return: List of members (usernames) Examples -------- >>> members = project.members() >>> managers = project.members(is_manager=True) """ if not is_manager: return [member for member in self._json_data['members'] if member['is_active']] else: return [member for member in self._json_data['members'] if member.get('is_active', False) and member.get('is_manager', False)]
[ "def", "members", "(", "self", ",", "is_manager", "=", "None", ")", ":", "if", "not", "is_manager", ":", "return", "[", "member", "for", "member", "in", "self", ".", "_json_data", "[", "'members'", "]", "if", "member", "[", "'is_active'", "]", "]", "else", ":", "return", "[", "member", "for", "member", "in", "self", ".", "_json_data", "[", "'members'", "]", "if", "member", ".", "get", "(", "'is_active'", ",", "False", ")", "and", "member", ".", "get", "(", "'is_manager'", ",", "False", ")", "]" ]
Retrieve members of the scope. :param is_manager: (optional) set to True to return only Scope members that are also managers. :type is_manager: bool :return: List of members (usernames) Examples -------- >>> members = project.members() >>> managers = project.members(is_manager=True)
[ "Retrieve", "members", "of", "the", "scope", "." ]
python
train
idlesign/pysyge
pysyge/pysyge.py
https://github.com/idlesign/pysyge/blob/24a3d9c9a82438f3327a3dc801984b50fd1f0fee/pysyge/pysyge.py#L408-L420
def get_location(self, ip, detailed=False): """Returns a dictionary with location data or False on failure. Amount of information about IP contained in the dictionary depends upon `detailed` flag state. """ seek = self._get_pos(ip) if seek > 0: return self._parse_location(seek, detailed=detailed) return False
[ "def", "get_location", "(", "self", ",", "ip", ",", "detailed", "=", "False", ")", ":", "seek", "=", "self", ".", "_get_pos", "(", "ip", ")", "if", "seek", ">", "0", ":", "return", "self", ".", "_parse_location", "(", "seek", ",", "detailed", "=", "detailed", ")", "return", "False" ]
Returns a dictionary with location data or False on failure. Amount of information about IP contained in the dictionary depends upon `detailed` flag state.
[ "Returns", "a", "dictionary", "with", "location", "data", "or", "False", "on", "failure", "." ]
python
train
lesscpy/lesscpy
lesscpy/plib/block.py
https://github.com/lesscpy/lesscpy/blob/51e392fb4a3cd4ccfb6175e0e42ce7d2f6b78126/lesscpy/plib/block.py#L139-L149
def raw(self, clean=False): """Raw block name args: clean (bool): clean name returns: str """ try: return self.tokens[0].raw(clean) except (AttributeError, TypeError): pass
[ "def", "raw", "(", "self", ",", "clean", "=", "False", ")", ":", "try", ":", "return", "self", ".", "tokens", "[", "0", "]", ".", "raw", "(", "clean", ")", "except", "(", "AttributeError", ",", "TypeError", ")", ":", "pass" ]
Raw block name args: clean (bool): clean name returns: str
[ "Raw", "block", "name", "args", ":", "clean", "(", "bool", ")", ":", "clean", "name", "returns", ":", "str" ]
python
valid
sahilchinoy/django-irs-filings
irs/management/commands/loadIRS.py
https://github.com/sahilchinoy/django-irs-filings/blob/efe80cc57ce1d9d8488f4e9496cf2347e29b6d8b/irs/management/commands/loadIRS.py#L50-L76
def clean_cell(self, cell, cell_type): """ Uses the type of field (from the mapping) to determine how to clean and format the cell. """ try: # Get rid of non-ASCII characters cell = cell.encode('ascii', 'ignore').decode() if cell_type == 'D': cell = datetime.strptime(cell, '%Y%m%d') elif cell_type == 'I': cell = int(cell) elif cell_type == 'N': cell = Decimal(cell) else: cell = cell.upper() if len(cell) > 50: cell = cell[0:50] if not cell or cell in NULL_TERMS: cell = None except: cell = None return cell
[ "def", "clean_cell", "(", "self", ",", "cell", ",", "cell_type", ")", ":", "try", ":", "# Get rid of non-ASCII characters", "cell", "=", "cell", ".", "encode", "(", "'ascii'", ",", "'ignore'", ")", ".", "decode", "(", ")", "if", "cell_type", "==", "'D'", ":", "cell", "=", "datetime", ".", "strptime", "(", "cell", ",", "'%Y%m%d'", ")", "elif", "cell_type", "==", "'I'", ":", "cell", "=", "int", "(", "cell", ")", "elif", "cell_type", "==", "'N'", ":", "cell", "=", "Decimal", "(", "cell", ")", "else", ":", "cell", "=", "cell", ".", "upper", "(", ")", "if", "len", "(", "cell", ")", ">", "50", ":", "cell", "=", "cell", "[", "0", ":", "50", "]", "if", "not", "cell", "or", "cell", "in", "NULL_TERMS", ":", "cell", "=", "None", "except", ":", "cell", "=", "None", "return", "cell" ]
Uses the type of field (from the mapping) to determine how to clean and format the cell.
[ "Uses", "the", "type", "of", "field", "(", "from", "the", "mapping", ")", "to", "determine", "how", "to", "clean", "and", "format", "the", "cell", "." ]
python
train
cisco-sas/kitty
kitty/data/data_manager.py
https://github.com/cisco-sas/kitty/blob/cb0760989dcdfe079e43ac574d872d0b18953a32/kitty/data/data_manager.py#L497-L513
def set_session_info(self, info): ''' :type info: :class:`~kitty.data.data_manager.SessionInfo` :param info: info to set ''' if not self.info: self.info = SessionInfo() info_d = self.info.as_dict() ks = [] vs = [] for k, v in info_d.items(): ks.append(k) vs.append(v) self.insert(ks, vs) changed = self.info.copy(info) if changed: self.update(self.info.as_dict())
[ "def", "set_session_info", "(", "self", ",", "info", ")", ":", "if", "not", "self", ".", "info", ":", "self", ".", "info", "=", "SessionInfo", "(", ")", "info_d", "=", "self", ".", "info", ".", "as_dict", "(", ")", "ks", "=", "[", "]", "vs", "=", "[", "]", "for", "k", ",", "v", "in", "info_d", ".", "items", "(", ")", ":", "ks", ".", "append", "(", "k", ")", "vs", ".", "append", "(", "v", ")", "self", ".", "insert", "(", "ks", ",", "vs", ")", "changed", "=", "self", ".", "info", ".", "copy", "(", "info", ")", "if", "changed", ":", "self", ".", "update", "(", "self", ".", "info", ".", "as_dict", "(", ")", ")" ]
:type info: :class:`~kitty.data.data_manager.SessionInfo` :param info: info to set
[ ":", "type", "info", ":", ":", "class", ":", "~kitty", ".", "data", ".", "data_manager", ".", "SessionInfo", ":", "param", "info", ":", "info", "to", "set" ]
python
train
tensorlayer/tensorlayer
tensorlayer/prepro.py
https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/prepro.py#L237-L261
def affine_rotation_matrix(angle=(-20, 20)): """Create an affine transform matrix for image rotation. NOTE: In OpenCV, x is width and y is height. Parameters ----------- angle : int/float or tuple of two int/float Degree to rotate, usually -180 ~ 180. - int/float, a fixed angle. - tuple of 2 floats/ints, randomly sample a value as the angle between these 2 values. Returns ------- numpy.array An affine transform matrix. """ if isinstance(angle, tuple): theta = np.pi / 180 * np.random.uniform(angle[0], angle[1]) else: theta = np.pi / 180 * angle rotation_matrix = np.array([[np.cos(theta), np.sin(theta), 0], \ [-np.sin(theta), np.cos(theta), 0], \ [0, 0, 1]]) return rotation_matrix
[ "def", "affine_rotation_matrix", "(", "angle", "=", "(", "-", "20", ",", "20", ")", ")", ":", "if", "isinstance", "(", "angle", ",", "tuple", ")", ":", "theta", "=", "np", ".", "pi", "/", "180", "*", "np", ".", "random", ".", "uniform", "(", "angle", "[", "0", "]", ",", "angle", "[", "1", "]", ")", "else", ":", "theta", "=", "np", ".", "pi", "/", "180", "*", "angle", "rotation_matrix", "=", "np", ".", "array", "(", "[", "[", "np", ".", "cos", "(", "theta", ")", ",", "np", ".", "sin", "(", "theta", ")", ",", "0", "]", ",", "[", "-", "np", ".", "sin", "(", "theta", ")", ",", "np", ".", "cos", "(", "theta", ")", ",", "0", "]", ",", "[", "0", ",", "0", ",", "1", "]", "]", ")", "return", "rotation_matrix" ]
Create an affine transform matrix for image rotation. NOTE: In OpenCV, x is width and y is height. Parameters ----------- angle : int/float or tuple of two int/float Degree to rotate, usually -180 ~ 180. - int/float, a fixed angle. - tuple of 2 floats/ints, randomly sample a value as the angle between these 2 values. Returns ------- numpy.array An affine transform matrix.
[ "Create", "an", "affine", "transform", "matrix", "for", "image", "rotation", ".", "NOTE", ":", "In", "OpenCV", "x", "is", "width", "and", "y", "is", "height", "." ]
python
valid
scour-project/scour
scour/scour.py
https://github.com/scour-project/scour/blob/049264eba6b1a54ae5ba1d6a5077d8e7b80e8835/scour/scour.py#L524-L539
def findElementsWithId(node, elems=None): """ Returns all elements with id attributes """ if elems is None: elems = {} id = node.getAttribute('id') if id != '': elems[id] = node if node.hasChildNodes(): for child in node.childNodes: # from http://www.w3.org/TR/DOM-Level-2-Core/idl-definitions.html # we are only really interested in nodes of type Element (1) if child.nodeType == Node.ELEMENT_NODE: findElementsWithId(child, elems) return elems
[ "def", "findElementsWithId", "(", "node", ",", "elems", "=", "None", ")", ":", "if", "elems", "is", "None", ":", "elems", "=", "{", "}", "id", "=", "node", ".", "getAttribute", "(", "'id'", ")", "if", "id", "!=", "''", ":", "elems", "[", "id", "]", "=", "node", "if", "node", ".", "hasChildNodes", "(", ")", ":", "for", "child", "in", "node", ".", "childNodes", ":", "# from http://www.w3.org/TR/DOM-Level-2-Core/idl-definitions.html", "# we are only really interested in nodes of type Element (1)", "if", "child", ".", "nodeType", "==", "Node", ".", "ELEMENT_NODE", ":", "findElementsWithId", "(", "child", ",", "elems", ")", "return", "elems" ]
Returns all elements with id attributes
[ "Returns", "all", "elements", "with", "id", "attributes" ]
python
train
PythonRails/rails
rails/views/jinja.py
https://github.com/PythonRails/rails/blob/1e199b9da4da5b24fef39fc6212d71fc9fbb18a5/rails/views/jinja.py#L26-L33
def render_source(self, source, variables=None): """ Render a source with the passed variables. """ if variables is None: variables = {} template = self._engine.from_string(source) return template.render(**variables)
[ "def", "render_source", "(", "self", ",", "source", ",", "variables", "=", "None", ")", ":", "if", "variables", "is", "None", ":", "variables", "=", "{", "}", "template", "=", "self", ".", "_engine", ".", "from_string", "(", "source", ")", "return", "template", ".", "render", "(", "*", "*", "variables", ")" ]
Render a source with the passed variables.
[ "Render", "a", "source", "with", "the", "passed", "variables", "." ]
python
train
SBRG/ssbio
ssbio/utils.py
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/utils.py#L854-L874
def conv_to_float(indata, inf_str=''): """Try to convert an arbitrary string to a float. Specify what will be replaced with "Inf". Args: indata (str): String which contains a float inf_str (str): If string contains something other than a float, and you want to replace it with float("Inf"), specify that string here. Returns: float: Converted string representation """ if indata.strip() == inf_str: outdata = float('Inf') else: try: outdata = float(indata) except: raise ValueError('Unable to convert {} to float'.format(indata)) return outdata
[ "def", "conv_to_float", "(", "indata", ",", "inf_str", "=", "''", ")", ":", "if", "indata", ".", "strip", "(", ")", "==", "inf_str", ":", "outdata", "=", "float", "(", "'Inf'", ")", "else", ":", "try", ":", "outdata", "=", "float", "(", "indata", ")", "except", ":", "raise", "ValueError", "(", "'Unable to convert {} to float'", ".", "format", "(", "indata", ")", ")", "return", "outdata" ]
Try to convert an arbitrary string to a float. Specify what will be replaced with "Inf". Args: indata (str): String which contains a float inf_str (str): If string contains something other than a float, and you want to replace it with float("Inf"), specify that string here. Returns: float: Converted string representation
[ "Try", "to", "convert", "an", "arbitrary", "string", "to", "a", "float", ".", "Specify", "what", "will", "be", "replaced", "with", "Inf", ".", "Args", ":", "indata", "(", "str", ")", ":", "String", "which", "contains", "a", "float", "inf_str", "(", "str", ")", ":", "If", "string", "contains", "something", "other", "than", "a", "float", "and", "you", "want", "to", "replace", "it", "with", "float", "(", "Inf", ")", "specify", "that", "string", "here", "." ]
python
train
Azure/azure-sdk-for-python
azure-servicemanagement-legacy/azure/servicemanagement/servicemanagementservice.py
https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-servicemanagement-legacy/azure/servicemanagement/servicemanagementservice.py#L890-L902
def list_service_certificates(self, service_name): ''' Lists all of the service certificates associated with the specified hosted service. service_name: Name of the hosted service. ''' _validate_not_none('service_name', service_name) return self._perform_get( '/' + self.subscription_id + '/services/hostedservices/' + _str(service_name) + '/certificates', Certificates)
[ "def", "list_service_certificates", "(", "self", ",", "service_name", ")", ":", "_validate_not_none", "(", "'service_name'", ",", "service_name", ")", "return", "self", ".", "_perform_get", "(", "'/'", "+", "self", ".", "subscription_id", "+", "'/services/hostedservices/'", "+", "_str", "(", "service_name", ")", "+", "'/certificates'", ",", "Certificates", ")" ]
Lists all of the service certificates associated with the specified hosted service. service_name: Name of the hosted service.
[ "Lists", "all", "of", "the", "service", "certificates", "associated", "with", "the", "specified", "hosted", "service", "." ]
python
test
DarkEnergySurvey/ugali
ugali/utils/stats.py
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/stats.py#L294-L299
def median(self, name, **kwargs): """ Median of the distribution. """ data = self.get(name,**kwargs) return np.percentile(data,[50])
[ "def", "median", "(", "self", ",", "name", ",", "*", "*", "kwargs", ")", ":", "data", "=", "self", ".", "get", "(", "name", ",", "*", "*", "kwargs", ")", "return", "np", ".", "percentile", "(", "data", ",", "[", "50", "]", ")" ]
Median of the distribution.
[ "Median", "of", "the", "distribution", "." ]
python
train