repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
Kortemme-Lab/klab
klab/general/strutil.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/general/strutil.py#L66-L87
def merge_pdb_range_pairs(prs): '''Takes in a list of PDB residue IDs (including insertion codes) specifying ranges and returns a sorted list of merged, sorted ranges. This works as above but we have to split the residues into pairs as "1A" > "19". ''' new_prs = [] sprs = [sorted((split_pdb_residue(p[0]), split_pdb_residue(p[1]))) for p in prs] sprs = sorted(sprs) merged = False x = 0 from klab import colortext while x < len(sprs): newx = x + 1 new_pair = list(sprs[x]) for y in range(x + 1, len(sprs)): if new_pair[0] <= (sprs[y][0][0] - 1, sprs[y][0][1]) <= new_pair[1]: new_pair[0] = min(new_pair[0], sprs[y][0]) new_pair[1] = max(new_pair[1], sprs[y][1]) newx = y + 1 if new_pair not in new_prs: new_prs.append(new_pair) x = newx return new_prs
[ "def", "merge_pdb_range_pairs", "(", "prs", ")", ":", "new_prs", "=", "[", "]", "sprs", "=", "[", "sorted", "(", "(", "split_pdb_residue", "(", "p", "[", "0", "]", ")", ",", "split_pdb_residue", "(", "p", "[", "1", "]", ")", ")", ")", "for", "p", "in", "prs", "]", "sprs", "=", "sorted", "(", "sprs", ")", "merged", "=", "False", "x", "=", "0", "from", "klab", "import", "colortext", "while", "x", "<", "len", "(", "sprs", ")", ":", "newx", "=", "x", "+", "1", "new_pair", "=", "list", "(", "sprs", "[", "x", "]", ")", "for", "y", "in", "range", "(", "x", "+", "1", ",", "len", "(", "sprs", ")", ")", ":", "if", "new_pair", "[", "0", "]", "<=", "(", "sprs", "[", "y", "]", "[", "0", "]", "[", "0", "]", "-", "1", ",", "sprs", "[", "y", "]", "[", "0", "]", "[", "1", "]", ")", "<=", "new_pair", "[", "1", "]", ":", "new_pair", "[", "0", "]", "=", "min", "(", "new_pair", "[", "0", "]", ",", "sprs", "[", "y", "]", "[", "0", "]", ")", "new_pair", "[", "1", "]", "=", "max", "(", "new_pair", "[", "1", "]", ",", "sprs", "[", "y", "]", "[", "1", "]", ")", "newx", "=", "y", "+", "1", "if", "new_pair", "not", "in", "new_prs", ":", "new_prs", ".", "append", "(", "new_pair", ")", "x", "=", "newx", "return", "new_prs" ]
Takes in a list of PDB residue IDs (including insertion codes) specifying ranges and returns a sorted list of merged, sorted ranges. This works as above but we have to split the residues into pairs as "1A" > "19".
[ "Takes", "in", "a", "list", "of", "PDB", "residue", "IDs", "(", "including", "insertion", "codes", ")", "specifying", "ranges", "and", "returns", "a", "sorted", "list", "of", "merged", "sorted", "ranges", ".", "This", "works", "as", "above", "but", "we", "have", "to", "split", "the", "residues", "into", "pairs", "as", "1A", ">", "19", "." ]
python
train
40.5
IS-ENES-Data/esgf-pid
esgfpid/connector.py
https://github.com/IS-ENES-Data/esgf-pid/blob/2f4909bb3ff79c0b6ed2932e0dd8b3bb6aec5e41/esgfpid/connector.py#L351-L401
def unpublish_one_version(self, **args): ''' Sends a PID update request for the unpublication of one version of a dataset currently published at the given data node. Either the handle or the pair of drs_id and version_number have to be provided, otherwise an exception will occur. The consumer will of course check the PID request message's timestamp with the timestamp of the last publication, so that republications in the mean time are not unpublished. The unpublication of the files is included in this method. :param handle: Optional. The handle of the dataset to be unpublished. :param drs_id: Optional. The dataset id of the dataset to be unpublished. :param version_number: Optional. The version number of the dataset to be unpublished. :raises: ArgumentError: If not enough arguments are passed to identify the dataset, or if no data node was specified during library init. ''' # Check args optional_args = ['handle', 'drs_id', 'version_number'] esgfpid.utils.add_missing_optional_args_with_value_none(args, optional_args) # Check if data node is given if self.__data_node is None: msg = 'No data_node given (but it is mandatory for unpublication)' logwarn(LOGGER, msg) raise esgfpid.exceptions.ArgumentError(msg) # Unpublish assistant = esgfpid.assistant.unpublish.AssistantOneVersion( drs_id = args['drs_id'], data_node = self.__data_node, prefix=self.prefix, coupler=self.__coupler, message_timestamp=esgfpid.utils.get_now_utc_as_formatted_string() ) assistant.unpublish_one_dataset_version( handle = args['handle'], version_number = args['version_number'] )
[ "def", "unpublish_one_version", "(", "self", ",", "*", "*", "args", ")", ":", "# Check args", "optional_args", "=", "[", "'handle'", ",", "'drs_id'", ",", "'version_number'", "]", "esgfpid", ".", "utils", ".", "add_missing_optional_args_with_value_none", "(", "args", ",", "optional_args", ")", "# Check if data node is given", "if", "self", ".", "__data_node", "is", "None", ":", "msg", "=", "'No data_node given (but it is mandatory for unpublication)'", "logwarn", "(", "LOGGER", ",", "msg", ")", "raise", "esgfpid", ".", "exceptions", ".", "ArgumentError", "(", "msg", ")", "# Unpublish", "assistant", "=", "esgfpid", ".", "assistant", ".", "unpublish", ".", "AssistantOneVersion", "(", "drs_id", "=", "args", "[", "'drs_id'", "]", ",", "data_node", "=", "self", ".", "__data_node", ",", "prefix", "=", "self", ".", "prefix", ",", "coupler", "=", "self", ".", "__coupler", ",", "message_timestamp", "=", "esgfpid", ".", "utils", ".", "get_now_utc_as_formatted_string", "(", ")", ")", "assistant", ".", "unpublish_one_dataset_version", "(", "handle", "=", "args", "[", "'handle'", "]", ",", "version_number", "=", "args", "[", "'version_number'", "]", ")" ]
Sends a PID update request for the unpublication of one version of a dataset currently published at the given data node. Either the handle or the pair of drs_id and version_number have to be provided, otherwise an exception will occur. The consumer will of course check the PID request message's timestamp with the timestamp of the last publication, so that republications in the mean time are not unpublished. The unpublication of the files is included in this method. :param handle: Optional. The handle of the dataset to be unpublished. :param drs_id: Optional. The dataset id of the dataset to be unpublished. :param version_number: Optional. The version number of the dataset to be unpublished. :raises: ArgumentError: If not enough arguments are passed to identify the dataset, or if no data node was specified during library init.
[ "Sends", "a", "PID", "update", "request", "for", "the", "unpublication", "of", "one", "version", "of", "a", "dataset", "currently", "published", "at", "the", "given", "data", "node", "." ]
python
train
37.313725
howl-anderson/MicroTokenizer
MicroTokenizer/util.py
https://github.com/howl-anderson/MicroTokenizer/blob/41bbe9c31d202b4f751ad5201d343ad1123b42b5/MicroTokenizer/util.py#L370-L386
def print_markdown(data, title=None): """Print data in GitHub-flavoured Markdown format for issues etc. data (dict or list of tuples): Label/value pairs. title (unicode or None): Title, will be rendered as headline 2. """ def excl_value(value): # contains path, i.e. personal info return isinstance(value, basestring_) and Path(value).exists() if isinstance(data, dict): data = list(data.items()) markdown = ["* **{}:** {}".format(l, unicode_(v)) for l, v in data if not excl_value(v)] if title: print("\n## {}".format(title)) print('\n{}\n'.format('\n'.join(markdown)))
[ "def", "print_markdown", "(", "data", ",", "title", "=", "None", ")", ":", "def", "excl_value", "(", "value", ")", ":", "# contains path, i.e. personal info", "return", "isinstance", "(", "value", ",", "basestring_", ")", "and", "Path", "(", "value", ")", ".", "exists", "(", ")", "if", "isinstance", "(", "data", ",", "dict", ")", ":", "data", "=", "list", "(", "data", ".", "items", "(", ")", ")", "markdown", "=", "[", "\"* **{}:** {}\"", ".", "format", "(", "l", ",", "unicode_", "(", "v", ")", ")", "for", "l", ",", "v", "in", "data", "if", "not", "excl_value", "(", "v", ")", "]", "if", "title", ":", "print", "(", "\"\\n## {}\"", ".", "format", "(", "title", ")", ")", "print", "(", "'\\n{}\\n'", ".", "format", "(", "'\\n'", ".", "join", "(", "markdown", ")", ")", ")" ]
Print data in GitHub-flavoured Markdown format for issues etc. data (dict or list of tuples): Label/value pairs. title (unicode or None): Title, will be rendered as headline 2.
[ "Print", "data", "in", "GitHub", "-", "flavoured", "Markdown", "format", "for", "issues", "etc", "." ]
python
train
37.647059
ungarj/mapchete
mapchete/io/raster.py
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/io/raster.py#L31-L86
def read_raster_window( input_files, tile, indexes=None, resampling="nearest", src_nodata=None, dst_nodata=None, gdal_opts=None ): """ Return NumPy arrays from an input raster. NumPy arrays are reprojected and resampled to tile properties from input raster. If tile boundaries cross the antimeridian, data on the other side of the antimeridian will be read and concatenated to the numpy array accordingly. Parameters ---------- input_files : string or list path to a raster file or list of paths to multiple raster files readable by rasterio. tile : Tile a Tile object indexes : list or int a list of band numbers; None will read all. resampling : string one of "nearest", "average", "bilinear" or "lanczos" src_nodata : int or float, optional if not set, the nodata value from the source dataset will be used dst_nodata : int or float, optional if not set, the nodata value from the source dataset will be used gdal_opts : dict GDAL options passed on to rasterio.Env() Returns ------- raster : MaskedArray """ with rasterio.Env( **get_gdal_options( gdal_opts, is_remote=path_is_remote( input_files[0] if isinstance(input_files, list) else input_files, s3=True ) ) ) as env: logger.debug("reading %s with GDAL options %s", input_files, env.options) return _read_raster_window( input_files, tile, indexes=indexes, resampling=resampling, src_nodata=src_nodata, dst_nodata=dst_nodata )
[ "def", "read_raster_window", "(", "input_files", ",", "tile", ",", "indexes", "=", "None", ",", "resampling", "=", "\"nearest\"", ",", "src_nodata", "=", "None", ",", "dst_nodata", "=", "None", ",", "gdal_opts", "=", "None", ")", ":", "with", "rasterio", ".", "Env", "(", "*", "*", "get_gdal_options", "(", "gdal_opts", ",", "is_remote", "=", "path_is_remote", "(", "input_files", "[", "0", "]", "if", "isinstance", "(", "input_files", ",", "list", ")", "else", "input_files", ",", "s3", "=", "True", ")", ")", ")", "as", "env", ":", "logger", ".", "debug", "(", "\"reading %s with GDAL options %s\"", ",", "input_files", ",", "env", ".", "options", ")", "return", "_read_raster_window", "(", "input_files", ",", "tile", ",", "indexes", "=", "indexes", ",", "resampling", "=", "resampling", ",", "src_nodata", "=", "src_nodata", ",", "dst_nodata", "=", "dst_nodata", ")" ]
Return NumPy arrays from an input raster. NumPy arrays are reprojected and resampled to tile properties from input raster. If tile boundaries cross the antimeridian, data on the other side of the antimeridian will be read and concatenated to the numpy array accordingly. Parameters ---------- input_files : string or list path to a raster file or list of paths to multiple raster files readable by rasterio. tile : Tile a Tile object indexes : list or int a list of band numbers; None will read all. resampling : string one of "nearest", "average", "bilinear" or "lanczos" src_nodata : int or float, optional if not set, the nodata value from the source dataset will be used dst_nodata : int or float, optional if not set, the nodata value from the source dataset will be used gdal_opts : dict GDAL options passed on to rasterio.Env() Returns ------- raster : MaskedArray
[ "Return", "NumPy", "arrays", "from", "an", "input", "raster", "." ]
python
valid
29.821429
kellerza/pyqwikswitch
pyqwikswitch/async_.py
https://github.com/kellerza/pyqwikswitch/blob/9d4f080048221eaee93e3eefcf641919ff1af586/pyqwikswitch/async_.py#L79-L108
async def _async_listen(self, callback=None): """Listen loop.""" while True: if not self._running: return try: packet = await self.get_json( URL_LISTEN.format(self._url), timeout=30, exceptions=True) except asyncio.TimeoutError: continue except aiohttp.client_exceptions.ClientError as exc: _LOGGER.warning("ClientError: %s", exc) self._sleep_task = self.loop.create_task(asyncio.sleep(30)) try: await self._sleep_task except asyncio.CancelledError: pass self._sleep_task = None continue if isinstance(packet, dict) and QS_CMD in packet: _LOGGER.debug("callback( %s )", packet) try: callback(packet) except Exception as err: # pylint: disable=broad-except _LOGGER.error("Exception in callback\nType: %s: %s", type(err), err) else: _LOGGER.debug("unknown packet? %s", packet)
[ "async", "def", "_async_listen", "(", "self", ",", "callback", "=", "None", ")", ":", "while", "True", ":", "if", "not", "self", ".", "_running", ":", "return", "try", ":", "packet", "=", "await", "self", ".", "get_json", "(", "URL_LISTEN", ".", "format", "(", "self", ".", "_url", ")", ",", "timeout", "=", "30", ",", "exceptions", "=", "True", ")", "except", "asyncio", ".", "TimeoutError", ":", "continue", "except", "aiohttp", ".", "client_exceptions", ".", "ClientError", "as", "exc", ":", "_LOGGER", ".", "warning", "(", "\"ClientError: %s\"", ",", "exc", ")", "self", ".", "_sleep_task", "=", "self", ".", "loop", ".", "create_task", "(", "asyncio", ".", "sleep", "(", "30", ")", ")", "try", ":", "await", "self", ".", "_sleep_task", "except", "asyncio", ".", "CancelledError", ":", "pass", "self", ".", "_sleep_task", "=", "None", "continue", "if", "isinstance", "(", "packet", ",", "dict", ")", "and", "QS_CMD", "in", "packet", ":", "_LOGGER", ".", "debug", "(", "\"callback( %s )\"", ",", "packet", ")", "try", ":", "callback", "(", "packet", ")", "except", "Exception", "as", "err", ":", "# pylint: disable=broad-except", "_LOGGER", ".", "error", "(", "\"Exception in callback\\nType: %s: %s\"", ",", "type", "(", "err", ")", ",", "err", ")", "else", ":", "_LOGGER", ".", "debug", "(", "\"unknown packet? %s\"", ",", "packet", ")" ]
Listen loop.
[ "Listen", "loop", "." ]
python
train
39.266667
calmjs/calmjs
src/calmjs/cli.py
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/cli.py#L119-L127
def node(self, source, args=(), env={}): """ Calls node with an inline source. Returns decoded output of stdout and stderr; decoding determine by locale. """ return self._exec(self.node_bin, source, args=args, env=env)
[ "def", "node", "(", "self", ",", "source", ",", "args", "=", "(", ")", ",", "env", "=", "{", "}", ")", ":", "return", "self", ".", "_exec", "(", "self", ".", "node_bin", ",", "source", ",", "args", "=", "args", ",", "env", "=", "env", ")" ]
Calls node with an inline source. Returns decoded output of stdout and stderr; decoding determine by locale.
[ "Calls", "node", "with", "an", "inline", "source", "." ]
python
train
28.888889
sosy-lab/benchexec
benchexec/containerexecutor.py
https://github.com/sosy-lab/benchexec/blob/44428f67f41384c03aea13e7e25f884764653617/benchexec/containerexecutor.py#L77-L134
def handle_basic_container_args(options, parser=None): """Handle the options specified by add_basic_container_args(). @return: a dict that can be used as kwargs for the ContainerExecutor constructor """ dir_modes = {} error_fn = parser.error if parser else sys.exit def handle_dir_mode(path, mode): path = os.path.abspath(path) if not os.path.isdir(path): error_fn( "Cannot specify directory mode for '{}' because it does not exist or is no directory." .format(path)) if path in dir_modes: error_fn("Cannot specify multiple directory modes for '{}'.".format(path)) dir_modes[path] = mode for path in options.hidden_dir: handle_dir_mode(path, DIR_HIDDEN) for path in options.read_only_dir: handle_dir_mode(path, DIR_READ_ONLY) for path in options.overlay_dir: handle_dir_mode(path, DIR_OVERLAY) for path in options.full_access_dir: handle_dir_mode(path, DIR_FULL_ACCESS) if options.keep_tmp: if "/tmp" in dir_modes and not dir_modes["/tmp"] == DIR_FULL_ACCESS: error_fn("Cannot specify both --keep-tmp and --hidden-dir /tmp.") dir_modes["/tmp"] = DIR_FULL_ACCESS elif not "/tmp" in dir_modes: dir_modes["/tmp"] = DIR_HIDDEN if not "/" in dir_modes: dir_modes["/"] = DIR_OVERLAY if not "/run" in dir_modes: dir_modes["/run"] = DIR_HIDDEN if options.container_system_config: if options.network_access: logging.warning("The container configuration disables DNS, " "host lookups will fail despite --network-access. " "Consider using --keep-system-config.") else: # /etc/resolv.conf is necessary for DNS lookups and on many systems is a symlink # to either /run/resolvconf/resolv.conf or /run/systemd/resolve/sub-resolve.conf, # so we keep that directory accessible as well. if not "/run/resolvconf" in dir_modes and os.path.isdir("/run/resolvconf"): dir_modes["/run/resolvconf"] = DIR_READ_ONLY if not "/run/systemd/resolve" in dir_modes and os.path.isdir("/run/systemd/resolve"): dir_modes["/run/systemd/resolve"] = DIR_READ_ONLY return { 'network_access': options.network_access, 'container_tmpfs': options.tmpfs, 'container_system_config': options.container_system_config, 'dir_modes': dir_modes, }
[ "def", "handle_basic_container_args", "(", "options", ",", "parser", "=", "None", ")", ":", "dir_modes", "=", "{", "}", "error_fn", "=", "parser", ".", "error", "if", "parser", "else", "sys", ".", "exit", "def", "handle_dir_mode", "(", "path", ",", "mode", ")", ":", "path", "=", "os", ".", "path", ".", "abspath", "(", "path", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "error_fn", "(", "\"Cannot specify directory mode for '{}' because it does not exist or is no directory.\"", ".", "format", "(", "path", ")", ")", "if", "path", "in", "dir_modes", ":", "error_fn", "(", "\"Cannot specify multiple directory modes for '{}'.\"", ".", "format", "(", "path", ")", ")", "dir_modes", "[", "path", "]", "=", "mode", "for", "path", "in", "options", ".", "hidden_dir", ":", "handle_dir_mode", "(", "path", ",", "DIR_HIDDEN", ")", "for", "path", "in", "options", ".", "read_only_dir", ":", "handle_dir_mode", "(", "path", ",", "DIR_READ_ONLY", ")", "for", "path", "in", "options", ".", "overlay_dir", ":", "handle_dir_mode", "(", "path", ",", "DIR_OVERLAY", ")", "for", "path", "in", "options", ".", "full_access_dir", ":", "handle_dir_mode", "(", "path", ",", "DIR_FULL_ACCESS", ")", "if", "options", ".", "keep_tmp", ":", "if", "\"/tmp\"", "in", "dir_modes", "and", "not", "dir_modes", "[", "\"/tmp\"", "]", "==", "DIR_FULL_ACCESS", ":", "error_fn", "(", "\"Cannot specify both --keep-tmp and --hidden-dir /tmp.\"", ")", "dir_modes", "[", "\"/tmp\"", "]", "=", "DIR_FULL_ACCESS", "elif", "not", "\"/tmp\"", "in", "dir_modes", ":", "dir_modes", "[", "\"/tmp\"", "]", "=", "DIR_HIDDEN", "if", "not", "\"/\"", "in", "dir_modes", ":", "dir_modes", "[", "\"/\"", "]", "=", "DIR_OVERLAY", "if", "not", "\"/run\"", "in", "dir_modes", ":", "dir_modes", "[", "\"/run\"", "]", "=", "DIR_HIDDEN", "if", "options", ".", "container_system_config", ":", "if", "options", ".", "network_access", ":", "logging", ".", "warning", "(", "\"The container configuration disables DNS, \"", "\"host lookups will fail despite --network-access. \"", "\"Consider using --keep-system-config.\"", ")", "else", ":", "# /etc/resolv.conf is necessary for DNS lookups and on many systems is a symlink", "# to either /run/resolvconf/resolv.conf or /run/systemd/resolve/sub-resolve.conf,", "# so we keep that directory accessible as well.", "if", "not", "\"/run/resolvconf\"", "in", "dir_modes", "and", "os", ".", "path", ".", "isdir", "(", "\"/run/resolvconf\"", ")", ":", "dir_modes", "[", "\"/run/resolvconf\"", "]", "=", "DIR_READ_ONLY", "if", "not", "\"/run/systemd/resolve\"", "in", "dir_modes", "and", "os", ".", "path", ".", "isdir", "(", "\"/run/systemd/resolve\"", ")", ":", "dir_modes", "[", "\"/run/systemd/resolve\"", "]", "=", "DIR_READ_ONLY", "return", "{", "'network_access'", ":", "options", ".", "network_access", ",", "'container_tmpfs'", ":", "options", ".", "tmpfs", ",", "'container_system_config'", ":", "options", ".", "container_system_config", ",", "'dir_modes'", ":", "dir_modes", ",", "}" ]
Handle the options specified by add_basic_container_args(). @return: a dict that can be used as kwargs for the ContainerExecutor constructor
[ "Handle", "the", "options", "specified", "by", "add_basic_container_args", "()", "." ]
python
train
42.086207
mwgielen/jackal
jackal/core.py
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/core.py#L184-L192
def id_to_object(self, line): """ Resolves an ip adres to a range object, creating it if it doesn't exists. """ result = Range.get(line, ignore=404) if not result: result = Range(range=line) result.save() return result
[ "def", "id_to_object", "(", "self", ",", "line", ")", ":", "result", "=", "Range", ".", "get", "(", "line", ",", "ignore", "=", "404", ")", "if", "not", "result", ":", "result", "=", "Range", "(", "range", "=", "line", ")", "result", ".", "save", "(", ")", "return", "result" ]
Resolves an ip adres to a range object, creating it if it doesn't exists.
[ "Resolves", "an", "ip", "adres", "to", "a", "range", "object", "creating", "it", "if", "it", "doesn", "t", "exists", "." ]
python
valid
31.777778
zooniverse/panoptes-python-client
panoptes_client/exportable.py
https://github.com/zooniverse/panoptes-python-client/blob/138d93cb03378501a8d349428e381ad73f928680/panoptes_client/exportable.py#L162-L179
def describe_export(self, export_type): """ Fetch metadata for an export. - **export_type** is a string specifying which type of export to look up. Returns a :py:class:`dict` containing metadata for the export. """ if export_type in TALK_EXPORT_TYPES: return talk.get_data_request( 'project-{}'.format(self.id), export_type.replace('talk_', '') )[0] return self.http_get( self._export_path(export_type), )[0]
[ "def", "describe_export", "(", "self", ",", "export_type", ")", ":", "if", "export_type", "in", "TALK_EXPORT_TYPES", ":", "return", "talk", ".", "get_data_request", "(", "'project-{}'", ".", "format", "(", "self", ".", "id", ")", ",", "export_type", ".", "replace", "(", "'talk_'", ",", "''", ")", ")", "[", "0", "]", "return", "self", ".", "http_get", "(", "self", ".", "_export_path", "(", "export_type", ")", ",", ")", "[", "0", "]" ]
Fetch metadata for an export. - **export_type** is a string specifying which type of export to look up. Returns a :py:class:`dict` containing metadata for the export.
[ "Fetch", "metadata", "for", "an", "export", "." ]
python
train
29.777778
josuebrunel/yahoo-oauth
yahoo_oauth/utils.py
https://github.com/josuebrunel/yahoo-oauth/blob/40eff7809366850c46e1a3340469044f33cd1713/yahoo_oauth/utils.py#L51-L58
def json_get_data(filename): """Get data from json file """ with open(filename) as fp: json_data = json.load(fp) return json_data return False
[ "def", "json_get_data", "(", "filename", ")", ":", "with", "open", "(", "filename", ")", "as", "fp", ":", "json_data", "=", "json", ".", "load", "(", "fp", ")", "return", "json_data", "return", "False" ]
Get data from json file
[ "Get", "data", "from", "json", "file" ]
python
valid
21
bihealth/vcfpy
vcfpy/parser.py
https://github.com/bihealth/vcfpy/blob/99e2165df30f11e0c95f3170f31bc5191d9e9e15/vcfpy/parser.py#L313-L323
def process_sub_shrink(ref, alt_str): """Process substution where the string shrink""" if len(ref) == 0: raise exceptions.InvalidRecordException("Invalid VCF, empty REF") elif len(ref) == 1: if ref[0] == alt_str[0]: return record.Substitution(record.INS, alt_str) else: return record.Substitution(record.INDEL, alt_str) else: return record.Substitution(record.INDEL, alt_str)
[ "def", "process_sub_shrink", "(", "ref", ",", "alt_str", ")", ":", "if", "len", "(", "ref", ")", "==", "0", ":", "raise", "exceptions", ".", "InvalidRecordException", "(", "\"Invalid VCF, empty REF\"", ")", "elif", "len", "(", "ref", ")", "==", "1", ":", "if", "ref", "[", "0", "]", "==", "alt_str", "[", "0", "]", ":", "return", "record", ".", "Substitution", "(", "record", ".", "INS", ",", "alt_str", ")", "else", ":", "return", "record", ".", "Substitution", "(", "record", ".", "INDEL", ",", "alt_str", ")", "else", ":", "return", "record", ".", "Substitution", "(", "record", ".", "INDEL", ",", "alt_str", ")" ]
Process substution where the string shrink
[ "Process", "substution", "where", "the", "string", "shrink" ]
python
train
39.727273
eqcorrscan/EQcorrscan
eqcorrscan/utils/correlate.py
https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/correlate.py#L680-L790
def fftw_multi_normxcorr(template_array, stream_array, pad_array, seed_ids, cores_inner, cores_outer): """ Use a C loop rather than a Python loop - in some cases this will be fast. :type template_array: dict :param template_array: :type stream_array: dict :param stream_array: :type pad_array: dict :param pad_array: :type seed_ids: list :param seed_ids: rtype: np.ndarray, list :return: 3D Array of cross-correlations and list of used channels. """ utilslib = _load_cdll('libutils') utilslib.multi_normxcorr_fftw.argtypes = [ np.ctypeslib.ndpointer(dtype=np.float32, flags=native_str('C_CONTIGUOUS')), ctypes.c_long, ctypes.c_long, ctypes.c_long, np.ctypeslib.ndpointer(dtype=np.float32, flags=native_str('C_CONTIGUOUS')), ctypes.c_long, np.ctypeslib.ndpointer(dtype=np.float32, flags=native_str('C_CONTIGUOUS')), ctypes.c_long, np.ctypeslib.ndpointer(dtype=np.intc, flags=native_str('C_CONTIGUOUS')), np.ctypeslib.ndpointer(dtype=np.intc, flags=native_str('C_CONTIGUOUS')), ctypes.c_int, ctypes.c_int, np.ctypeslib.ndpointer(dtype=np.intc, flags=native_str('C_CONTIGUOUS'))] utilslib.multi_normxcorr_fftw.restype = ctypes.c_int ''' Arguments are: templates (stacked [ch_1-t_1, ch_1-t_2, ..., ch_2-t_1, ch_2-t_2, ...]) number of templates template length number of channels image (stacked [ch_1, ch_2, ..., ch_n]) image length cross-correlations (stacked as per image) fft-length used channels (stacked as per templates) pad array (stacked as per templates) ''' # pre processing used_chans = [] template_len = template_array[seed_ids[0]].shape[1] for seed_id in seed_ids: used_chans.append(~np.isnan(template_array[seed_id]).any(axis=1)) template_array[seed_id] = ( (template_array[seed_id] - template_array[seed_id].mean(axis=-1, keepdims=True)) / ( template_array[seed_id].std(axis=-1, keepdims=True) * template_len)) template_array[seed_id] = np.nan_to_num(template_array[seed_id]) n_channels = len(seed_ids) n_templates = template_array[seed_ids[0]].shape[0] image_len = stream_array[seed_ids[0]].shape[0] fft_len = next_fast_len(template_len + image_len - 1) template_array = np.ascontiguousarray([template_array[x] for x in seed_ids], dtype=np.float32) for x in seed_ids: # Check that stream is non-zero and above variance threshold if not np.all(stream_array[x] == 0) and np.var(stream_array[x]) < 1e-8: # Apply gain stream_array *= 1e8 warnings.warn("Low variance found for {0}, applying gain " "to stabilise correlations".format(x)) stream_array = np.ascontiguousarray([stream_array[x] for x in seed_ids], dtype=np.float32) cccs = np.zeros((n_templates, image_len - template_len + 1), np.float32) used_chans_np = np.ascontiguousarray(used_chans, dtype=np.intc) pad_array_np = np.ascontiguousarray([pad_array[seed_id] for seed_id in seed_ids], dtype=np.intc) variance_warnings = np.ascontiguousarray( np.zeros(n_channels), dtype=np.intc) # call C function ret = utilslib.multi_normxcorr_fftw( template_array, n_templates, template_len, n_channels, stream_array, image_len, cccs, fft_len, used_chans_np, pad_array_np, cores_outer, cores_inner, variance_warnings) if ret < 0: raise MemoryError("Memory allocation failed in correlation C-code") elif ret not in [0, 999]: print('Error in C code (possible normalisation error)') print('Maximum cccs %f at %s' % (cccs.max(), np.unravel_index(cccs.argmax(), cccs.shape))) print('Minimum cccs %f at %s' % (cccs.min(), np.unravel_index(cccs.argmin(), cccs.shape))) raise CorrelationError("Internal correlation error") elif ret == 999: warnings.warn("Some correlations not computed, are there " "zeros in data? If not, consider increasing gain.") for i, variance_warning in enumerate(variance_warnings): if variance_warning and variance_warning > template_len: warnings.warn("Low variance found in {0} places for {1}," " check result.".format(variance_warning, seed_ids[i])) return cccs, used_chans
[ "def", "fftw_multi_normxcorr", "(", "template_array", ",", "stream_array", ",", "pad_array", ",", "seed_ids", ",", "cores_inner", ",", "cores_outer", ")", ":", "utilslib", "=", "_load_cdll", "(", "'libutils'", ")", "utilslib", ".", "multi_normxcorr_fftw", ".", "argtypes", "=", "[", "np", ".", "ctypeslib", ".", "ndpointer", "(", "dtype", "=", "np", ".", "float32", ",", "flags", "=", "native_str", "(", "'C_CONTIGUOUS'", ")", ")", ",", "ctypes", ".", "c_long", ",", "ctypes", ".", "c_long", ",", "ctypes", ".", "c_long", ",", "np", ".", "ctypeslib", ".", "ndpointer", "(", "dtype", "=", "np", ".", "float32", ",", "flags", "=", "native_str", "(", "'C_CONTIGUOUS'", ")", ")", ",", "ctypes", ".", "c_long", ",", "np", ".", "ctypeslib", ".", "ndpointer", "(", "dtype", "=", "np", ".", "float32", ",", "flags", "=", "native_str", "(", "'C_CONTIGUOUS'", ")", ")", ",", "ctypes", ".", "c_long", ",", "np", ".", "ctypeslib", ".", "ndpointer", "(", "dtype", "=", "np", ".", "intc", ",", "flags", "=", "native_str", "(", "'C_CONTIGUOUS'", ")", ")", ",", "np", ".", "ctypeslib", ".", "ndpointer", "(", "dtype", "=", "np", ".", "intc", ",", "flags", "=", "native_str", "(", "'C_CONTIGUOUS'", ")", ")", ",", "ctypes", ".", "c_int", ",", "ctypes", ".", "c_int", ",", "np", ".", "ctypeslib", ".", "ndpointer", "(", "dtype", "=", "np", ".", "intc", ",", "flags", "=", "native_str", "(", "'C_CONTIGUOUS'", ")", ")", "]", "utilslib", ".", "multi_normxcorr_fftw", ".", "restype", "=", "ctypes", ".", "c_int", "'''\n Arguments are:\n templates (stacked [ch_1-t_1, ch_1-t_2, ..., ch_2-t_1, ch_2-t_2, ...])\n number of templates\n template length\n number of channels\n image (stacked [ch_1, ch_2, ..., ch_n])\n image length\n cross-correlations (stacked as per image)\n fft-length\n used channels (stacked as per templates)\n pad array (stacked as per templates)\n '''", "# pre processing", "used_chans", "=", "[", "]", "template_len", "=", "template_array", "[", "seed_ids", "[", "0", "]", "]", ".", "shape", "[", "1", "]", "for", "seed_id", "in", "seed_ids", ":", "used_chans", ".", "append", "(", "~", "np", ".", "isnan", "(", "template_array", "[", "seed_id", "]", ")", ".", "any", "(", "axis", "=", "1", ")", ")", "template_array", "[", "seed_id", "]", "=", "(", "(", "template_array", "[", "seed_id", "]", "-", "template_array", "[", "seed_id", "]", ".", "mean", "(", "axis", "=", "-", "1", ",", "keepdims", "=", "True", ")", ")", "/", "(", "template_array", "[", "seed_id", "]", ".", "std", "(", "axis", "=", "-", "1", ",", "keepdims", "=", "True", ")", "*", "template_len", ")", ")", "template_array", "[", "seed_id", "]", "=", "np", ".", "nan_to_num", "(", "template_array", "[", "seed_id", "]", ")", "n_channels", "=", "len", "(", "seed_ids", ")", "n_templates", "=", "template_array", "[", "seed_ids", "[", "0", "]", "]", ".", "shape", "[", "0", "]", "image_len", "=", "stream_array", "[", "seed_ids", "[", "0", "]", "]", ".", "shape", "[", "0", "]", "fft_len", "=", "next_fast_len", "(", "template_len", "+", "image_len", "-", "1", ")", "template_array", "=", "np", ".", "ascontiguousarray", "(", "[", "template_array", "[", "x", "]", "for", "x", "in", "seed_ids", "]", ",", "dtype", "=", "np", ".", "float32", ")", "for", "x", "in", "seed_ids", ":", "# Check that stream is non-zero and above variance threshold", "if", "not", "np", ".", "all", "(", "stream_array", "[", "x", "]", "==", "0", ")", "and", "np", ".", "var", "(", "stream_array", "[", "x", "]", ")", "<", "1e-8", ":", "# Apply gain", "stream_array", "*=", "1e8", "warnings", ".", "warn", "(", "\"Low variance found for {0}, applying gain \"", "\"to stabilise correlations\"", ".", "format", "(", "x", ")", ")", "stream_array", "=", "np", ".", "ascontiguousarray", "(", "[", "stream_array", "[", "x", "]", "for", "x", "in", "seed_ids", "]", ",", "dtype", "=", "np", ".", "float32", ")", "cccs", "=", "np", ".", "zeros", "(", "(", "n_templates", ",", "image_len", "-", "template_len", "+", "1", ")", ",", "np", ".", "float32", ")", "used_chans_np", "=", "np", ".", "ascontiguousarray", "(", "used_chans", ",", "dtype", "=", "np", ".", "intc", ")", "pad_array_np", "=", "np", ".", "ascontiguousarray", "(", "[", "pad_array", "[", "seed_id", "]", "for", "seed_id", "in", "seed_ids", "]", ",", "dtype", "=", "np", ".", "intc", ")", "variance_warnings", "=", "np", ".", "ascontiguousarray", "(", "np", ".", "zeros", "(", "n_channels", ")", ",", "dtype", "=", "np", ".", "intc", ")", "# call C function", "ret", "=", "utilslib", ".", "multi_normxcorr_fftw", "(", "template_array", ",", "n_templates", ",", "template_len", ",", "n_channels", ",", "stream_array", ",", "image_len", ",", "cccs", ",", "fft_len", ",", "used_chans_np", ",", "pad_array_np", ",", "cores_outer", ",", "cores_inner", ",", "variance_warnings", ")", "if", "ret", "<", "0", ":", "raise", "MemoryError", "(", "\"Memory allocation failed in correlation C-code\"", ")", "elif", "ret", "not", "in", "[", "0", ",", "999", "]", ":", "print", "(", "'Error in C code (possible normalisation error)'", ")", "print", "(", "'Maximum cccs %f at %s'", "%", "(", "cccs", ".", "max", "(", ")", ",", "np", ".", "unravel_index", "(", "cccs", ".", "argmax", "(", ")", ",", "cccs", ".", "shape", ")", ")", ")", "print", "(", "'Minimum cccs %f at %s'", "%", "(", "cccs", ".", "min", "(", ")", ",", "np", ".", "unravel_index", "(", "cccs", ".", "argmin", "(", ")", ",", "cccs", ".", "shape", ")", ")", ")", "raise", "CorrelationError", "(", "\"Internal correlation error\"", ")", "elif", "ret", "==", "999", ":", "warnings", ".", "warn", "(", "\"Some correlations not computed, are there \"", "\"zeros in data? If not, consider increasing gain.\"", ")", "for", "i", ",", "variance_warning", "in", "enumerate", "(", "variance_warnings", ")", ":", "if", "variance_warning", "and", "variance_warning", ">", "template_len", ":", "warnings", ".", "warn", "(", "\"Low variance found in {0} places for {1},\"", "\" check result.\"", ".", "format", "(", "variance_warning", ",", "seed_ids", "[", "i", "]", ")", ")", "return", "cccs", ",", "used_chans" ]
Use a C loop rather than a Python loop - in some cases this will be fast. :type template_array: dict :param template_array: :type stream_array: dict :param stream_array: :type pad_array: dict :param pad_array: :type seed_ids: list :param seed_ids: rtype: np.ndarray, list :return: 3D Array of cross-correlations and list of used channels.
[ "Use", "a", "C", "loop", "rather", "than", "a", "Python", "loop", "-", "in", "some", "cases", "this", "will", "be", "fast", "." ]
python
train
44.045045
google/apitools
apitools/base/py/transfer.py
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/transfer.py#L499-L504
def StreamInChunks(self, callback=None, finish_callback=None, additional_headers=None): """Stream the entire download in chunks.""" self.StreamMedia(callback=callback, finish_callback=finish_callback, additional_headers=additional_headers, use_chunks=True)
[ "def", "StreamInChunks", "(", "self", ",", "callback", "=", "None", ",", "finish_callback", "=", "None", ",", "additional_headers", "=", "None", ")", ":", "self", ".", "StreamMedia", "(", "callback", "=", "callback", ",", "finish_callback", "=", "finish_callback", ",", "additional_headers", "=", "additional_headers", ",", "use_chunks", "=", "True", ")" ]
Stream the entire download in chunks.
[ "Stream", "the", "entire", "download", "in", "chunks", "." ]
python
train
56.666667
portfors-lab/sparkle
sparkle/gui/plotting/protocoldisplay.py
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/plotting/protocoldisplay.py#L85-L88
def showSpec(self, fname): """Draws the spectrogram if it is currently None""" if not self.specPlot.hasImg() and fname is not None: self.specPlot.fromFile(fname)
[ "def", "showSpec", "(", "self", ",", "fname", ")", ":", "if", "not", "self", ".", "specPlot", ".", "hasImg", "(", ")", "and", "fname", "is", "not", "None", ":", "self", ".", "specPlot", ".", "fromFile", "(", "fname", ")" ]
Draws the spectrogram if it is currently None
[ "Draws", "the", "spectrogram", "if", "it", "is", "currently", "None" ]
python
train
46.5
UCL-INGI/INGInious
inginious/frontend/installer.py
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/installer.py#L91-L164
def run(self): """ Run the installator """ self._display_header("BACKEND CONFIGURATION") options = {} while True: options = {} backend = self.ask_backend() if backend == "local": self._display_info("Backend chosen: local. Testing the configuration.") options = self._ask_local_config() if not self.test_local_docker_conf(): self._display_error( "An error occurred while testing the configuration. Please make sure you are able do run `docker info` in " "your command line, and environment parameters like DOCKER_HOST are correctly set.") if self._ask_boolean("Would you like to continue anyway?", False): break else: break else: self._display_warning( "Backend chosen: manual. As it is a really advanced feature, you will have to configure it yourself in " "the configuration file, at the end of the setup process.") options = {"backend": backend} break self._display_header("MONGODB CONFIGURATION") mongo_opt = self.configure_mongodb() options.update(mongo_opt) self._display_header("TASK DIRECTORY") task_directory_opt = self.configure_task_directory() options.update(task_directory_opt) self._display_header("CONTAINERS") self.configure_containers(options) self._display_header("MISC") misc_opt = self.configure_misc() options.update(misc_opt) database = self.try_mongodb_opts(options["mongo_opt"]["host"], options["mongo_opt"]["database"]) self._display_header("BACKUP DIRECTORY") backup_directory_opt = self.configure_backup_directory() options.update(backup_directory_opt) self._display_header("AUTHENTIFICATION") auth_opts = self.configure_authentication(database) options.update(auth_opts) self._display_info("You may want to add additional plugins to the configuration file.") self._display_header("REMOTE DEBUGGING - IN BROWSER") self._display_info( "If you want to activate the remote debugging of task in the users' browser, you have to install separately " "INGInious-xterm, which is available on Github, according to the parameters you have given for the hostname and the " "port range given in the configuration of the remote debugging.") self._display_info( "You can leave the following question empty to disable this feature; remote debugging will still be available, " "but not in the browser.") webterm = self._ask_with_default( "Please indicate the link to your installation of INGInious-xterm (for example: " "https://your-hostname.com:8080).", "") if webterm != "": options["webterm"] = webterm self._display_header("END") file_dir = self._config_path or os.path.join(os.getcwd(), self.configuration_filename()) try: yaml.dump(options, open(file_dir, "w")) self._display_info("Successfully written the configuration file") except: self._display_error("Cannot write the configuration file on disk. Here is the content of the file") print(yaml.dump(options))
[ "def", "run", "(", "self", ")", ":", "self", ".", "_display_header", "(", "\"BACKEND CONFIGURATION\"", ")", "options", "=", "{", "}", "while", "True", ":", "options", "=", "{", "}", "backend", "=", "self", ".", "ask_backend", "(", ")", "if", "backend", "==", "\"local\"", ":", "self", ".", "_display_info", "(", "\"Backend chosen: local. Testing the configuration.\"", ")", "options", "=", "self", ".", "_ask_local_config", "(", ")", "if", "not", "self", ".", "test_local_docker_conf", "(", ")", ":", "self", ".", "_display_error", "(", "\"An error occurred while testing the configuration. Please make sure you are able do run `docker info` in \"", "\"your command line, and environment parameters like DOCKER_HOST are correctly set.\"", ")", "if", "self", ".", "_ask_boolean", "(", "\"Would you like to continue anyway?\"", ",", "False", ")", ":", "break", "else", ":", "break", "else", ":", "self", ".", "_display_warning", "(", "\"Backend chosen: manual. As it is a really advanced feature, you will have to configure it yourself in \"", "\"the configuration file, at the end of the setup process.\"", ")", "options", "=", "{", "\"backend\"", ":", "backend", "}", "break", "self", ".", "_display_header", "(", "\"MONGODB CONFIGURATION\"", ")", "mongo_opt", "=", "self", ".", "configure_mongodb", "(", ")", "options", ".", "update", "(", "mongo_opt", ")", "self", ".", "_display_header", "(", "\"TASK DIRECTORY\"", ")", "task_directory_opt", "=", "self", ".", "configure_task_directory", "(", ")", "options", ".", "update", "(", "task_directory_opt", ")", "self", ".", "_display_header", "(", "\"CONTAINERS\"", ")", "self", ".", "configure_containers", "(", "options", ")", "self", ".", "_display_header", "(", "\"MISC\"", ")", "misc_opt", "=", "self", ".", "configure_misc", "(", ")", "options", ".", "update", "(", "misc_opt", ")", "database", "=", "self", ".", "try_mongodb_opts", "(", "options", "[", "\"mongo_opt\"", "]", "[", "\"host\"", "]", ",", "options", "[", "\"mongo_opt\"", "]", "[", "\"database\"", "]", ")", "self", ".", "_display_header", "(", "\"BACKUP DIRECTORY\"", ")", "backup_directory_opt", "=", "self", ".", "configure_backup_directory", "(", ")", "options", ".", "update", "(", "backup_directory_opt", ")", "self", ".", "_display_header", "(", "\"AUTHENTIFICATION\"", ")", "auth_opts", "=", "self", ".", "configure_authentication", "(", "database", ")", "options", ".", "update", "(", "auth_opts", ")", "self", ".", "_display_info", "(", "\"You may want to add additional plugins to the configuration file.\"", ")", "self", ".", "_display_header", "(", "\"REMOTE DEBUGGING - IN BROWSER\"", ")", "self", ".", "_display_info", "(", "\"If you want to activate the remote debugging of task in the users' browser, you have to install separately \"", "\"INGInious-xterm, which is available on Github, according to the parameters you have given for the hostname and the \"", "\"port range given in the configuration of the remote debugging.\"", ")", "self", ".", "_display_info", "(", "\"You can leave the following question empty to disable this feature; remote debugging will still be available, \"", "\"but not in the browser.\"", ")", "webterm", "=", "self", ".", "_ask_with_default", "(", "\"Please indicate the link to your installation of INGInious-xterm (for example: \"", "\"https://your-hostname.com:8080).\"", ",", "\"\"", ")", "if", "webterm", "!=", "\"\"", ":", "options", "[", "\"webterm\"", "]", "=", "webterm", "self", ".", "_display_header", "(", "\"END\"", ")", "file_dir", "=", "self", ".", "_config_path", "or", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "self", ".", "configuration_filename", "(", ")", ")", "try", ":", "yaml", ".", "dump", "(", "options", ",", "open", "(", "file_dir", ",", "\"w\"", ")", ")", "self", ".", "_display_info", "(", "\"Successfully written the configuration file\"", ")", "except", ":", "self", ".", "_display_error", "(", "\"Cannot write the configuration file on disk. Here is the content of the file\"", ")", "print", "(", "yaml", ".", "dump", "(", "options", ")", ")" ]
Run the installator
[ "Run", "the", "installator" ]
python
train
46.581081
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/util/config.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/util/config.py#L35-L88
def _init(): """ Create global Config object, parse command flags """ global config, _data_path, _allowed_config_keys app_dir = _get_vispy_app_dir() if app_dir is not None: _data_path = op.join(app_dir, 'data') _test_data_path = op.join(app_dir, 'test_data') else: _data_path = _test_data_path = None # All allowed config keys and the types they may have _allowed_config_keys = { 'data_path': string_types, 'default_backend': string_types, 'gl_backend': string_types, 'gl_debug': (bool,), 'glir_file': string_types+file_types, 'include_path': list, 'logging_level': string_types, 'qt_lib': string_types, 'dpi': (int, type(None)), 'profile': string_types + (type(None),), 'audit_tests': (bool,), 'test_data_path': string_types + (type(None),), } # Default values for all config options default_config_options = { 'data_path': _data_path, 'default_backend': '', 'gl_backend': 'gl2', 'gl_debug': False, 'glir_file': '', 'include_path': [], 'logging_level': 'info', 'qt_lib': 'any', 'dpi': None, 'profile': None, 'audit_tests': False, 'test_data_path': _test_data_path, } config = Config(**default_config_options) try: config.update(**_load_config()) except Exception as err: raise Exception('Error while reading vispy config file "%s":\n %s' % (_get_config_fname(), err.message)) set_log_level(config['logging_level']) _parse_command_line_arguments()
[ "def", "_init", "(", ")", ":", "global", "config", ",", "_data_path", ",", "_allowed_config_keys", "app_dir", "=", "_get_vispy_app_dir", "(", ")", "if", "app_dir", "is", "not", "None", ":", "_data_path", "=", "op", ".", "join", "(", "app_dir", ",", "'data'", ")", "_test_data_path", "=", "op", ".", "join", "(", "app_dir", ",", "'test_data'", ")", "else", ":", "_data_path", "=", "_test_data_path", "=", "None", "# All allowed config keys and the types they may have", "_allowed_config_keys", "=", "{", "'data_path'", ":", "string_types", ",", "'default_backend'", ":", "string_types", ",", "'gl_backend'", ":", "string_types", ",", "'gl_debug'", ":", "(", "bool", ",", ")", ",", "'glir_file'", ":", "string_types", "+", "file_types", ",", "'include_path'", ":", "list", ",", "'logging_level'", ":", "string_types", ",", "'qt_lib'", ":", "string_types", ",", "'dpi'", ":", "(", "int", ",", "type", "(", "None", ")", ")", ",", "'profile'", ":", "string_types", "+", "(", "type", "(", "None", ")", ",", ")", ",", "'audit_tests'", ":", "(", "bool", ",", ")", ",", "'test_data_path'", ":", "string_types", "+", "(", "type", "(", "None", ")", ",", ")", ",", "}", "# Default values for all config options", "default_config_options", "=", "{", "'data_path'", ":", "_data_path", ",", "'default_backend'", ":", "''", ",", "'gl_backend'", ":", "'gl2'", ",", "'gl_debug'", ":", "False", ",", "'glir_file'", ":", "''", ",", "'include_path'", ":", "[", "]", ",", "'logging_level'", ":", "'info'", ",", "'qt_lib'", ":", "'any'", ",", "'dpi'", ":", "None", ",", "'profile'", ":", "None", ",", "'audit_tests'", ":", "False", ",", "'test_data_path'", ":", "_test_data_path", ",", "}", "config", "=", "Config", "(", "*", "*", "default_config_options", ")", "try", ":", "config", ".", "update", "(", "*", "*", "_load_config", "(", ")", ")", "except", "Exception", "as", "err", ":", "raise", "Exception", "(", "'Error while reading vispy config file \"%s\":\\n %s'", "%", "(", "_get_config_fname", "(", ")", ",", "err", ".", "message", ")", ")", "set_log_level", "(", "config", "[", "'logging_level'", "]", ")", "_parse_command_line_arguments", "(", ")" ]
Create global Config object, parse command flags
[ "Create", "global", "Config", "object", "parse", "command", "flags" ]
python
train
30.092593
PyHDI/Pyverilog
pyverilog/vparser/parser.py
https://github.com/PyHDI/Pyverilog/blob/b852cc5ed6a7a2712e33639f9d9782d0d1587a53/pyverilog/vparser/parser.py#L1865-L1868
def p_instance_port_arg(self, p): 'instance_port_arg : DOT ID LPAREN identifier RPAREN' p[0] = PortArg(p[2], p[4], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
[ "def", "p_instance_port_arg", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "PortArg", "(", "p", "[", "2", "]", ",", "p", "[", "4", "]", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ")", "p", ".", "set_lineno", "(", "0", ",", "p", ".", "lineno", "(", "1", ")", ")" ]
instance_port_arg : DOT ID LPAREN identifier RPAREN
[ "instance_port_arg", ":", "DOT", "ID", "LPAREN", "identifier", "RPAREN" ]
python
train
46
deepmind/sonnet
sonnet/python/modules/pondering_rnn.py
https://github.com/deepmind/sonnet/blob/00612ca3178964d86b556e062694d808ff81fcca/sonnet/python/modules/pondering_rnn.py#L166-L211
def _build(self, x, prev_state): """Connects the core to the graph. Args: x: Input `Tensor` of shape `(batch_size, input_size)`. prev_state: Previous state. This could be a `Tensor`, or a tuple of `Tensor`s. Returns: The tuple `(output, state)` for this core. Raises: ValueError: if the `Tensor` `x` does not have rank 2. """ x.get_shape().with_rank(2) self._batch_size = x.get_shape().as_list()[0] self._dtype = x.dtype x_zeros = tf.concat( [x, tf.zeros( shape=(self._batch_size, 1), dtype=self._dtype)], 1) x_ones = tf.concat( [x, tf.ones( shape=(self._batch_size, 1), dtype=self._dtype)], 1) # Weights for the halting signal halting_linear = basic.Linear(name="halting_linear", output_size=1) body = functools.partial( self._body, halting_linear=halting_linear, x_ones=x_ones) cumul_halting_init = tf.zeros(shape=(self._batch_size, 1), dtype=self._dtype) iteration_init = tf.zeros(shape=(self._batch_size, 1), dtype=self._dtype) core_output_size = [x.value for x in self._core.output_size] out_init = tf.zeros(shape=(self._batch_size,) + tuple(core_output_size), dtype=self._dtype) cumul_state_init = _nested_zeros_like(prev_state) remainder_init = tf.zeros(shape=(self._batch_size, 1), dtype=self._dtype) (unused_final_x, final_out, unused_final_state, final_cumul_state, unused_final_halting, final_iteration, final_remainder) = tf.while_loop( self._cond, body, [x_zeros, out_init, prev_state, cumul_state_init, cumul_halting_init, iteration_init, remainder_init]) act_output = basic.Linear( name="act_output_linear", output_size=self._output_size)(final_out) return (act_output, (final_iteration, final_remainder)), final_cumul_state
[ "def", "_build", "(", "self", ",", "x", ",", "prev_state", ")", ":", "x", ".", "get_shape", "(", ")", ".", "with_rank", "(", "2", ")", "self", ".", "_batch_size", "=", "x", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "[", "0", "]", "self", ".", "_dtype", "=", "x", ".", "dtype", "x_zeros", "=", "tf", ".", "concat", "(", "[", "x", ",", "tf", ".", "zeros", "(", "shape", "=", "(", "self", ".", "_batch_size", ",", "1", ")", ",", "dtype", "=", "self", ".", "_dtype", ")", "]", ",", "1", ")", "x_ones", "=", "tf", ".", "concat", "(", "[", "x", ",", "tf", ".", "ones", "(", "shape", "=", "(", "self", ".", "_batch_size", ",", "1", ")", ",", "dtype", "=", "self", ".", "_dtype", ")", "]", ",", "1", ")", "# Weights for the halting signal", "halting_linear", "=", "basic", ".", "Linear", "(", "name", "=", "\"halting_linear\"", ",", "output_size", "=", "1", ")", "body", "=", "functools", ".", "partial", "(", "self", ".", "_body", ",", "halting_linear", "=", "halting_linear", ",", "x_ones", "=", "x_ones", ")", "cumul_halting_init", "=", "tf", ".", "zeros", "(", "shape", "=", "(", "self", ".", "_batch_size", ",", "1", ")", ",", "dtype", "=", "self", ".", "_dtype", ")", "iteration_init", "=", "tf", ".", "zeros", "(", "shape", "=", "(", "self", ".", "_batch_size", ",", "1", ")", ",", "dtype", "=", "self", ".", "_dtype", ")", "core_output_size", "=", "[", "x", ".", "value", "for", "x", "in", "self", ".", "_core", ".", "output_size", "]", "out_init", "=", "tf", ".", "zeros", "(", "shape", "=", "(", "self", ".", "_batch_size", ",", ")", "+", "tuple", "(", "core_output_size", ")", ",", "dtype", "=", "self", ".", "_dtype", ")", "cumul_state_init", "=", "_nested_zeros_like", "(", "prev_state", ")", "remainder_init", "=", "tf", ".", "zeros", "(", "shape", "=", "(", "self", ".", "_batch_size", ",", "1", ")", ",", "dtype", "=", "self", ".", "_dtype", ")", "(", "unused_final_x", ",", "final_out", ",", "unused_final_state", ",", "final_cumul_state", ",", "unused_final_halting", ",", "final_iteration", ",", "final_remainder", ")", "=", "tf", ".", "while_loop", "(", "self", ".", "_cond", ",", "body", ",", "[", "x_zeros", ",", "out_init", ",", "prev_state", ",", "cumul_state_init", ",", "cumul_halting_init", ",", "iteration_init", ",", "remainder_init", "]", ")", "act_output", "=", "basic", ".", "Linear", "(", "name", "=", "\"act_output_linear\"", ",", "output_size", "=", "self", ".", "_output_size", ")", "(", "final_out", ")", "return", "(", "act_output", ",", "(", "final_iteration", ",", "final_remainder", ")", ")", ",", "final_cumul_state" ]
Connects the core to the graph. Args: x: Input `Tensor` of shape `(batch_size, input_size)`. prev_state: Previous state. This could be a `Tensor`, or a tuple of `Tensor`s. Returns: The tuple `(output, state)` for this core. Raises: ValueError: if the `Tensor` `x` does not have rank 2.
[ "Connects", "the", "core", "to", "the", "graph", "." ]
python
train
40.847826
Calysto/calysto
calysto/ai/conx.py
https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L1064-L1088
def addLayerNode(self, layerName, bias = None, weights = {}): """ Adds a new node to a layer, and puts in new weights. Adds node on the end. Weights will be random, unless specified. bias = the new node's bias weight weights = dict of {connectedLayerName: [weights], ...} Example: >>> net = Network() # doctest: +ELLIPSIS Conx using seed: ... >>> net.addLayers(2, 5, 1) >>> net.addLayerNode("hidden", bias = -0.12, weights = {"input": [1, 0], "output": [0]}) """ self.changeLayerSize(layerName, self[layerName].size + 1) if bias != None: self[layerName].weight[-1] = bias for name in list(weights.keys()): for c in self.connections: if c.fromLayer.name == name and c.toLayer.name == layerName: for i in range(self[name].size): self[name, layerName].weight[i][-1] = weights[name][i] elif c.toLayer.name == name and c.fromLayer.name == layerName: for j in range(self[name].size): self[layerName, name].weight[-1][j] = weights[name][j]
[ "def", "addLayerNode", "(", "self", ",", "layerName", ",", "bias", "=", "None", ",", "weights", "=", "{", "}", ")", ":", "self", ".", "changeLayerSize", "(", "layerName", ",", "self", "[", "layerName", "]", ".", "size", "+", "1", ")", "if", "bias", "!=", "None", ":", "self", "[", "layerName", "]", ".", "weight", "[", "-", "1", "]", "=", "bias", "for", "name", "in", "list", "(", "weights", ".", "keys", "(", ")", ")", ":", "for", "c", "in", "self", ".", "connections", ":", "if", "c", ".", "fromLayer", ".", "name", "==", "name", "and", "c", ".", "toLayer", ".", "name", "==", "layerName", ":", "for", "i", "in", "range", "(", "self", "[", "name", "]", ".", "size", ")", ":", "self", "[", "name", ",", "layerName", "]", ".", "weight", "[", "i", "]", "[", "-", "1", "]", "=", "weights", "[", "name", "]", "[", "i", "]", "elif", "c", ".", "toLayer", ".", "name", "==", "name", "and", "c", ".", "fromLayer", ".", "name", "==", "layerName", ":", "for", "j", "in", "range", "(", "self", "[", "name", "]", ".", "size", ")", ":", "self", "[", "layerName", ",", "name", "]", ".", "weight", "[", "-", "1", "]", "[", "j", "]", "=", "weights", "[", "name", "]", "[", "j", "]" ]
Adds a new node to a layer, and puts in new weights. Adds node on the end. Weights will be random, unless specified. bias = the new node's bias weight weights = dict of {connectedLayerName: [weights], ...} Example: >>> net = Network() # doctest: +ELLIPSIS Conx using seed: ... >>> net.addLayers(2, 5, 1) >>> net.addLayerNode("hidden", bias = -0.12, weights = {"input": [1, 0], "output": [0]})
[ "Adds", "a", "new", "node", "to", "a", "layer", "and", "puts", "in", "new", "weights", ".", "Adds", "node", "on", "the", "end", ".", "Weights", "will", "be", "random", "unless", "specified", "." ]
python
train
46.76
mrcagney/gtfstk
gtfstk/trips.py
https://github.com/mrcagney/gtfstk/blob/c91494e6fefc02523889655a0dc92d1c0eee8d03/gtfstk/trips.py#L22-L83
def is_active_trip(feed: "Feed", trip_id: str, date: str) -> bool: """ Return ``True`` if the ``feed.calendar`` or ``feed.calendar_dates`` says that the trip runs on the given date; return ``False`` otherwise. Note that a trip that starts on date d, ends after 23:59:59, and does not start again on date d+1 is considered active on date d and not active on date d+1. This subtle point, which is a side effect of the GTFS, can lead to confusion. Parameters ---------- feed : Feed trip_id : string ID of a trip in ``feed.trips`` date : string YYYYMMDD date string Returns ------- boolean ``True`` if and only if the given trip starts on the given date. Notes ----- - This function is key for getting all trips, routes, etc. that are active on a given date, so the function needs to be fast - Assume the following feed attributes are not ``None``: * ``feed.trips`` """ service = feed._trips_i.at[trip_id, "service_id"] # Check feed._calendar_dates_g. caldg = feed._calendar_dates_g if caldg is not None: if (service, date) in caldg.groups: et = caldg.get_group((service, date))["exception_type"].iat[0] if et == 1: return True else: # Exception type is 2 return False # Check feed._calendar_i cali = feed._calendar_i if cali is not None: if service in cali.index: weekday_str = hp.weekday_to_str(hp.datestr_to_date(date).weekday()) if ( cali.at[service, "start_date"] <= date <= cali.at[service, "end_date"] and cali.at[service, weekday_str] == 1 ): return True else: return False # If you made it here, then something went wrong return False
[ "def", "is_active_trip", "(", "feed", ":", "\"Feed\"", ",", "trip_id", ":", "str", ",", "date", ":", "str", ")", "->", "bool", ":", "service", "=", "feed", ".", "_trips_i", ".", "at", "[", "trip_id", ",", "\"service_id\"", "]", "# Check feed._calendar_dates_g.", "caldg", "=", "feed", ".", "_calendar_dates_g", "if", "caldg", "is", "not", "None", ":", "if", "(", "service", ",", "date", ")", "in", "caldg", ".", "groups", ":", "et", "=", "caldg", ".", "get_group", "(", "(", "service", ",", "date", ")", ")", "[", "\"exception_type\"", "]", ".", "iat", "[", "0", "]", "if", "et", "==", "1", ":", "return", "True", "else", ":", "# Exception type is 2", "return", "False", "# Check feed._calendar_i", "cali", "=", "feed", ".", "_calendar_i", "if", "cali", "is", "not", "None", ":", "if", "service", "in", "cali", ".", "index", ":", "weekday_str", "=", "hp", ".", "weekday_to_str", "(", "hp", ".", "datestr_to_date", "(", "date", ")", ".", "weekday", "(", ")", ")", "if", "(", "cali", ".", "at", "[", "service", ",", "\"start_date\"", "]", "<=", "date", "<=", "cali", ".", "at", "[", "service", ",", "\"end_date\"", "]", "and", "cali", ".", "at", "[", "service", ",", "weekday_str", "]", "==", "1", ")", ":", "return", "True", "else", ":", "return", "False", "# If you made it here, then something went wrong", "return", "False" ]
Return ``True`` if the ``feed.calendar`` or ``feed.calendar_dates`` says that the trip runs on the given date; return ``False`` otherwise. Note that a trip that starts on date d, ends after 23:59:59, and does not start again on date d+1 is considered active on date d and not active on date d+1. This subtle point, which is a side effect of the GTFS, can lead to confusion. Parameters ---------- feed : Feed trip_id : string ID of a trip in ``feed.trips`` date : string YYYYMMDD date string Returns ------- boolean ``True`` if and only if the given trip starts on the given date. Notes ----- - This function is key for getting all trips, routes, etc. that are active on a given date, so the function needs to be fast - Assume the following feed attributes are not ``None``: * ``feed.trips``
[ "Return", "True", "if", "the", "feed", ".", "calendar", "or", "feed", ".", "calendar_dates", "says", "that", "the", "trip", "runs", "on", "the", "given", "date", ";", "return", "False", "otherwise", "." ]
python
train
30.612903
chaoss/grimoirelab-perceval
perceval/backends/core/bugzillarest.py
https://github.com/chaoss/grimoirelab-perceval/blob/41c908605e88b7ebc3a536c643fa0f212eaf9e0e/perceval/backends/core/bugzillarest.py#L79-L96
def fetch(self, category=CATEGORY_BUG, from_date=DEFAULT_DATETIME): """Fetch the bugs from the repository. The method retrieves, from a Bugzilla repository, the bugs updated since the given date. :param category: the category of items to fetch :param from_date: obtain bugs updated since this date :returns: a generator of bugs """ if not from_date: from_date = DEFAULT_DATETIME kwargs = {'from_date': from_date} items = super().fetch(category, **kwargs) return items
[ "def", "fetch", "(", "self", ",", "category", "=", "CATEGORY_BUG", ",", "from_date", "=", "DEFAULT_DATETIME", ")", ":", "if", "not", "from_date", ":", "from_date", "=", "DEFAULT_DATETIME", "kwargs", "=", "{", "'from_date'", ":", "from_date", "}", "items", "=", "super", "(", ")", ".", "fetch", "(", "category", ",", "*", "*", "kwargs", ")", "return", "items" ]
Fetch the bugs from the repository. The method retrieves, from a Bugzilla repository, the bugs updated since the given date. :param category: the category of items to fetch :param from_date: obtain bugs updated since this date :returns: a generator of bugs
[ "Fetch", "the", "bugs", "from", "the", "repository", "." ]
python
test
30.833333
samghelms/mathviz
mathviz_hopper/src/bottle.py
https://github.com/samghelms/mathviz/blob/30fe89537379faea4de8c8b568ac6e52e4d15353/mathviz_hopper/src/bottle.py#L2546-L2579
def _make_overlay(self): """ (Unstable) Create a new overlay that acts like a chained map: Values missing in the overlay are copied from the source map. Both maps share the same meta entries. Entries that were copied from the source are called 'virtual'. You can not delete virtual keys, but overwrite them, which turns them into non-virtual entries. Setting keys on an overlay never affects its source, but may affect any number of child overlays. Other than collections.ChainMap or most other implementations, this approach does not resolve missing keys on demand, but instead actively copies all values from the source to the overlay and keeps track of virtual and non-virtual keys internally. This removes any lookup-overhead. Read-access is as fast as a build-in dict for both virtual and non-virtual keys. Changes are propagated recursively and depth-first. A failing on-change handler in an overlay stops the propagation of virtual values and may result in an partly updated tree. Take extra care here and make sure that on-change handlers never fail. Used by Route.config """ # Cleanup dead references self._overlays[:] = [ref for ref in self._overlays if ref() is not None] overlay = ConfigDict() overlay._meta = self._meta overlay._source = self self._overlays.append(weakref.ref(overlay)) for key in self: overlay._set_virtual(key, self[key]) return overlay
[ "def", "_make_overlay", "(", "self", ")", ":", "# Cleanup dead references", "self", ".", "_overlays", "[", ":", "]", "=", "[", "ref", "for", "ref", "in", "self", ".", "_overlays", "if", "ref", "(", ")", "is", "not", "None", "]", "overlay", "=", "ConfigDict", "(", ")", "overlay", ".", "_meta", "=", "self", ".", "_meta", "overlay", ".", "_source", "=", "self", "self", ".", "_overlays", ".", "append", "(", "weakref", ".", "ref", "(", "overlay", ")", ")", "for", "key", "in", "self", ":", "overlay", ".", "_set_virtual", "(", "key", ",", "self", "[", "key", "]", ")", "return", "overlay" ]
(Unstable) Create a new overlay that acts like a chained map: Values missing in the overlay are copied from the source map. Both maps share the same meta entries. Entries that were copied from the source are called 'virtual'. You can not delete virtual keys, but overwrite them, which turns them into non-virtual entries. Setting keys on an overlay never affects its source, but may affect any number of child overlays. Other than collections.ChainMap or most other implementations, this approach does not resolve missing keys on demand, but instead actively copies all values from the source to the overlay and keeps track of virtual and non-virtual keys internally. This removes any lookup-overhead. Read-access is as fast as a build-in dict for both virtual and non-virtual keys. Changes are propagated recursively and depth-first. A failing on-change handler in an overlay stops the propagation of virtual values and may result in an partly updated tree. Take extra care here and make sure that on-change handlers never fail. Used by Route.config
[ "(", "Unstable", ")", "Create", "a", "new", "overlay", "that", "acts", "like", "a", "chained", "map", ":", "Values", "missing", "in", "the", "overlay", "are", "copied", "from", "the", "source", "map", ".", "Both", "maps", "share", "the", "same", "meta", "entries", "." ]
python
train
48.117647
mozilla/elasticutils
elasticutils/contrib/django/__init__.py
https://github.com/mozilla/elasticutils/blob/b880cc5d51fb1079b0581255ec664c1ec934656e/elasticutils/contrib/django/__init__.py#L225-L246
def get_index(cls): """Gets the index for this model. The index for this model is specified in `settings.ES_INDEXES` which is a dict of mapping type -> index name. By default, this uses `.get_mapping_type()` to determine the mapping and returns the value in `settings.ES_INDEXES` for that or ``settings.ES_INDEXES['default']``. Override this to compute it differently. :returns: index name to use """ indexes = settings.ES_INDEXES index = indexes.get(cls.get_mapping_type_name()) or indexes['default'] if not (isinstance(index, six.string_types)): # FIXME - not sure what to do here, but we only want one # index and somehow this isn't one index. index = index[0] return index
[ "def", "get_index", "(", "cls", ")", ":", "indexes", "=", "settings", ".", "ES_INDEXES", "index", "=", "indexes", ".", "get", "(", "cls", ".", "get_mapping_type_name", "(", ")", ")", "or", "indexes", "[", "'default'", "]", "if", "not", "(", "isinstance", "(", "index", ",", "six", ".", "string_types", ")", ")", ":", "# FIXME - not sure what to do here, but we only want one", "# index and somehow this isn't one index.", "index", "=", "index", "[", "0", "]", "return", "index" ]
Gets the index for this model. The index for this model is specified in `settings.ES_INDEXES` which is a dict of mapping type -> index name. By default, this uses `.get_mapping_type()` to determine the mapping and returns the value in `settings.ES_INDEXES` for that or ``settings.ES_INDEXES['default']``. Override this to compute it differently. :returns: index name to use
[ "Gets", "the", "index", "for", "this", "model", "." ]
python
train
36.363636
glomex/gcdt
gcdt/ramuda_core.py
https://github.com/glomex/gcdt/blob/cd67cf416371337b83cb9ca3f696277125703339/gcdt/ramuda_core.py#L623-L635
def cleanup_bundle(): """Deletes files used for creating bundle. * vendored/* * bundle.zip """ paths = ['./vendored', './bundle.zip'] for path in paths: if os.path.exists(path): log.debug("Deleting %s..." % path) if os.path.isdir(path): shutil.rmtree(path) else: os.remove(path)
[ "def", "cleanup_bundle", "(", ")", ":", "paths", "=", "[", "'./vendored'", ",", "'./bundle.zip'", "]", "for", "path", "in", "paths", ":", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "log", ".", "debug", "(", "\"Deleting %s...\"", "%", "path", ")", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "shutil", ".", "rmtree", "(", "path", ")", "else", ":", "os", ".", "remove", "(", "path", ")" ]
Deletes files used for creating bundle. * vendored/* * bundle.zip
[ "Deletes", "files", "used", "for", "creating", "bundle", ".", "*", "vendored", "/", "*", "*", "bundle", ".", "zip" ]
python
train
28.769231
django-treebeard/django-treebeard
treebeard/ns_tree.py
https://github.com/django-treebeard/django-treebeard/blob/8042ee939cb45394909237da447f8925e3cc6aa3/treebeard/ns_tree.py#L613-L620
def get_descendants(self): """ :returns: A queryset of all the node's descendants as DFS, doesn't include the node itself """ if self.is_leaf(): return get_result_class(self.__class__).objects.none() return self.__class__.get_tree(self).exclude(pk=self.pk)
[ "def", "get_descendants", "(", "self", ")", ":", "if", "self", ".", "is_leaf", "(", ")", ":", "return", "get_result_class", "(", "self", ".", "__class__", ")", ".", "objects", ".", "none", "(", ")", "return", "self", ".", "__class__", ".", "get_tree", "(", "self", ")", ".", "exclude", "(", "pk", "=", "self", ".", "pk", ")" ]
:returns: A queryset of all the node's descendants as DFS, doesn't include the node itself
[ ":", "returns", ":", "A", "queryset", "of", "all", "the", "node", "s", "descendants", "as", "DFS", "doesn", "t", "include", "the", "node", "itself" ]
python
train
39.125
benhoff/pluginmanager
pluginmanager/file_manager.py
https://github.com/benhoff/pluginmanager/blob/a8a184f9ebfbb521703492cb88c1dbda4cd04c06/pluginmanager/file_manager.py#L219-L233
def _filter_filepaths(self, filepaths): """ helps iterate through all the file parsers each filter is applied individually to the same set of `filepaths` """ if self.file_filters: plugin_filepaths = set() for file_filter in self.file_filters: plugin_paths = file_filter(filepaths) plugin_filepaths.update(plugin_paths) else: plugin_filepaths = filepaths return plugin_filepaths
[ "def", "_filter_filepaths", "(", "self", ",", "filepaths", ")", ":", "if", "self", ".", "file_filters", ":", "plugin_filepaths", "=", "set", "(", ")", "for", "file_filter", "in", "self", ".", "file_filters", ":", "plugin_paths", "=", "file_filter", "(", "filepaths", ")", "plugin_filepaths", ".", "update", "(", "plugin_paths", ")", "else", ":", "plugin_filepaths", "=", "filepaths", "return", "plugin_filepaths" ]
helps iterate through all the file parsers each filter is applied individually to the same set of `filepaths`
[ "helps", "iterate", "through", "all", "the", "file", "parsers", "each", "filter", "is", "applied", "individually", "to", "the", "same", "set", "of", "filepaths" ]
python
train
33.066667
ergoithz/browsepy
browsepy/stream.py
https://github.com/ergoithz/browsepy/blob/1612a930ef220fae507e1b152c531707e555bd92/browsepy/stream.py#L54-L76
def fill(self): ''' Writes data on internal tarfile instance, which writes to current object, using :meth:`write`. As this method is blocking, it is used inside a thread. This method is called automatically, on a thread, on initialization, so there is little need to call it manually. ''' if self.exclude: exclude = self.exclude ap = functools.partial(os.path.join, self.path) self._tarfile.add( self.path, "", filter=lambda info: None if exclude(ap(info.name)) else info ) else: self._tarfile.add(self.path, "") self._tarfile.close() # force stream flush self._finished += 1 if not self._result.is_set(): self._result.set()
[ "def", "fill", "(", "self", ")", ":", "if", "self", ".", "exclude", ":", "exclude", "=", "self", ".", "exclude", "ap", "=", "functools", ".", "partial", "(", "os", ".", "path", ".", "join", ",", "self", ".", "path", ")", "self", ".", "_tarfile", ".", "add", "(", "self", ".", "path", ",", "\"\"", ",", "filter", "=", "lambda", "info", ":", "None", "if", "exclude", "(", "ap", "(", "info", ".", "name", ")", ")", "else", "info", ")", "else", ":", "self", ".", "_tarfile", ".", "add", "(", "self", ".", "path", ",", "\"\"", ")", "self", ".", "_tarfile", ".", "close", "(", ")", "# force stream flush", "self", ".", "_finished", "+=", "1", "if", "not", "self", ".", "_result", ".", "is_set", "(", ")", ":", "self", ".", "_result", ".", "set", "(", ")" ]
Writes data on internal tarfile instance, which writes to current object, using :meth:`write`. As this method is blocking, it is used inside a thread. This method is called automatically, on a thread, on initialization, so there is little need to call it manually.
[ "Writes", "data", "on", "internal", "tarfile", "instance", "which", "writes", "to", "current", "object", "using", ":", "meth", ":", "write", "." ]
python
train
35.173913
databio/pypiper
pypiper/manager.py
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/manager.py#L1310-L1326
def _make_lock_path(self, lock_name_base): """ Create path to lock file with given name as base. :param str lock_name_base: Lock file name, designed to not be prefixed with the lock file designation, but that's permitted. :return str: Path to the lock file. """ # For lock prefix validation, separate file name from other path # components, as we care about the name prefix not path prefix. base, name = os.path.split(lock_name_base) lock_name = self._ensure_lock_prefix(name) if base: lock_name = os.path.join(base, lock_name) return pipeline_filepath(self, filename=lock_name)
[ "def", "_make_lock_path", "(", "self", ",", "lock_name_base", ")", ":", "# For lock prefix validation, separate file name from other path", "# components, as we care about the name prefix not path prefix.", "base", ",", "name", "=", "os", ".", "path", ".", "split", "(", "lock_name_base", ")", "lock_name", "=", "self", ".", "_ensure_lock_prefix", "(", "name", ")", "if", "base", ":", "lock_name", "=", "os", ".", "path", ".", "join", "(", "base", ",", "lock_name", ")", "return", "pipeline_filepath", "(", "self", ",", "filename", "=", "lock_name", ")" ]
Create path to lock file with given name as base. :param str lock_name_base: Lock file name, designed to not be prefixed with the lock file designation, but that's permitted. :return str: Path to the lock file.
[ "Create", "path", "to", "lock", "file", "with", "given", "name", "as", "base", ".", ":", "param", "str", "lock_name_base", ":", "Lock", "file", "name", "designed", "to", "not", "be", "prefixed", "with", "the", "lock", "file", "designation", "but", "that", "s", "permitted", ".", ":", "return", "str", ":", "Path", "to", "the", "lock", "file", "." ]
python
train
40.352941
spacetelescope/stsci.tools
lib/stsci/tools/validate.py
https://github.com/spacetelescope/stsci.tools/blob/9a022503ad24ca54ce83331482dfa3ff6de9f403/lib/stsci/tools/validate.py#L777-L831
def is_integer(value, min=None, max=None): """ A check that tests that a given value is an integer (int, or long) and optionally, between bounds. A negative value is accepted, while a float will fail. If the value is a string, then the conversion is done - if possible. Otherwise a VdtError is raised. >>> vtor = Validator() >>> vtor.check('integer', '-1') -1 >>> vtor.check('integer', '0') 0 >>> vtor.check('integer', 9) 9 >>> vtor.check('integer', 'a') # doctest: +SKIP Traceback (most recent call last): VdtTypeError: the value "a" is of the wrong type. >>> vtor.check('integer', '2.2') # doctest: +SKIP Traceback (most recent call last): VdtTypeError: the value "2.2" is of the wrong type. >>> vtor.check('integer(10)', '20') 20 >>> vtor.check('integer(max=20)', '15') 15 >>> vtor.check('integer(10)', '9') # doctest: +SKIP Traceback (most recent call last): VdtValueTooSmallError: the value "9" is too small. >>> vtor.check('integer(10)', 9) # doctest: +SKIP Traceback (most recent call last): VdtValueTooSmallError: the value "9" is too small. >>> vtor.check('integer(max=20)', '35') # doctest: +SKIP Traceback (most recent call last): VdtValueTooBigError: the value "35" is too big. >>> vtor.check('integer(max=20)', 35) # doctest: +SKIP Traceback (most recent call last): VdtValueTooBigError: the value "35" is too big. >>> vtor.check('integer(0, 9)', False) 0 """ (min_val, max_val) = _is_num_param(('min', 'max'), (min, max)) if not isinstance(value, int_or_string_types): raise VdtTypeError(value) if isinstance(value, string_types): # if it's a string - does it represent an integer ? try: value = int(value) except ValueError: raise VdtTypeError(value) if (min_val is not None) and (value < min_val): raise VdtValueTooSmallError(value) if (max_val is not None) and (value > max_val): raise VdtValueTooBigError(value) return value
[ "def", "is_integer", "(", "value", ",", "min", "=", "None", ",", "max", "=", "None", ")", ":", "(", "min_val", ",", "max_val", ")", "=", "_is_num_param", "(", "(", "'min'", ",", "'max'", ")", ",", "(", "min", ",", "max", ")", ")", "if", "not", "isinstance", "(", "value", ",", "int_or_string_types", ")", ":", "raise", "VdtTypeError", "(", "value", ")", "if", "isinstance", "(", "value", ",", "string_types", ")", ":", "# if it's a string - does it represent an integer ?", "try", ":", "value", "=", "int", "(", "value", ")", "except", "ValueError", ":", "raise", "VdtTypeError", "(", "value", ")", "if", "(", "min_val", "is", "not", "None", ")", "and", "(", "value", "<", "min_val", ")", ":", "raise", "VdtValueTooSmallError", "(", "value", ")", "if", "(", "max_val", "is", "not", "None", ")", "and", "(", "value", ">", "max_val", ")", ":", "raise", "VdtValueTooBigError", "(", "value", ")", "return", "value" ]
A check that tests that a given value is an integer (int, or long) and optionally, between bounds. A negative value is accepted, while a float will fail. If the value is a string, then the conversion is done - if possible. Otherwise a VdtError is raised. >>> vtor = Validator() >>> vtor.check('integer', '-1') -1 >>> vtor.check('integer', '0') 0 >>> vtor.check('integer', 9) 9 >>> vtor.check('integer', 'a') # doctest: +SKIP Traceback (most recent call last): VdtTypeError: the value "a" is of the wrong type. >>> vtor.check('integer', '2.2') # doctest: +SKIP Traceback (most recent call last): VdtTypeError: the value "2.2" is of the wrong type. >>> vtor.check('integer(10)', '20') 20 >>> vtor.check('integer(max=20)', '15') 15 >>> vtor.check('integer(10)', '9') # doctest: +SKIP Traceback (most recent call last): VdtValueTooSmallError: the value "9" is too small. >>> vtor.check('integer(10)', 9) # doctest: +SKIP Traceback (most recent call last): VdtValueTooSmallError: the value "9" is too small. >>> vtor.check('integer(max=20)', '35') # doctest: +SKIP Traceback (most recent call last): VdtValueTooBigError: the value "35" is too big. >>> vtor.check('integer(max=20)', 35) # doctest: +SKIP Traceback (most recent call last): VdtValueTooBigError: the value "35" is too big. >>> vtor.check('integer(0, 9)', False) 0
[ "A", "check", "that", "tests", "that", "a", "given", "value", "is", "an", "integer", "(", "int", "or", "long", ")", "and", "optionally", "between", "bounds", ".", "A", "negative", "value", "is", "accepted", "while", "a", "float", "will", "fail", "." ]
python
train
37.163636
sbuss/pypercube
pypercube/metric.py
https://github.com/sbuss/pypercube/blob/e9d2cca9c004b8bad6d1e0b68b080f887a186a22/pypercube/metric.py#L25-L48
def from_json(cls, json_obj): """Build a MetricResponse from JSON. :param json_obj: JSON data representing a Cube Metric. :type json_obj: `String` or `json` :throws: `InvalidMetricError` when any of {type,time,data} fields are not present in json_obj. """ if isinstance(json_obj, str): json_obj = json.loads(json_obj) time = None value = None if cls.TIME_FIELD_NAME in json_obj: time = json_obj[cls.TIME_FIELD_NAME] else: raise InvalidMetricError("{field} must be present!".format( field=cls.TIME_FIELD_NAME)) if cls.VALUE_FIELD_NAME in json_obj: value = json_obj[cls.VALUE_FIELD_NAME] return cls(time, value)
[ "def", "from_json", "(", "cls", ",", "json_obj", ")", ":", "if", "isinstance", "(", "json_obj", ",", "str", ")", ":", "json_obj", "=", "json", ".", "loads", "(", "json_obj", ")", "time", "=", "None", "value", "=", "None", "if", "cls", ".", "TIME_FIELD_NAME", "in", "json_obj", ":", "time", "=", "json_obj", "[", "cls", ".", "TIME_FIELD_NAME", "]", "else", ":", "raise", "InvalidMetricError", "(", "\"{field} must be present!\"", ".", "format", "(", "field", "=", "cls", ".", "TIME_FIELD_NAME", ")", ")", "if", "cls", ".", "VALUE_FIELD_NAME", "in", "json_obj", ":", "value", "=", "json_obj", "[", "cls", ".", "VALUE_FIELD_NAME", "]", "return", "cls", "(", "time", ",", "value", ")" ]
Build a MetricResponse from JSON. :param json_obj: JSON data representing a Cube Metric. :type json_obj: `String` or `json` :throws: `InvalidMetricError` when any of {type,time,data} fields are not present in json_obj.
[ "Build", "a", "MetricResponse", "from", "JSON", "." ]
python
train
31.625
zimeon/iiif
iiif/flask_utils.py
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/flask_utils.py#L384-L404
def iiif_info_handler(prefix=None, identifier=None, config=None, klass=None, auth=None, **args): """Handler for IIIF Image Information requests.""" if (not auth or degraded_request(identifier) or auth.info_authz()): # go ahead with request as made if (auth): logging.debug("Authorized for image %s" % identifier) i = IIIFHandler(prefix, identifier, config, klass, auth) try: return i.image_information_response() except IIIFError as e: return i.error_response(e) elif (auth.info_authn()): # authn but not authz -> 401 abort(401) else: # redirect to degraded response = redirect(host_port_prefix( config.host, config.port, prefix) + '/' + identifier + '-deg/info.json') response.headers['Access-control-allow-origin'] = '*' return response
[ "def", "iiif_info_handler", "(", "prefix", "=", "None", ",", "identifier", "=", "None", ",", "config", "=", "None", ",", "klass", "=", "None", ",", "auth", "=", "None", ",", "*", "*", "args", ")", ":", "if", "(", "not", "auth", "or", "degraded_request", "(", "identifier", ")", "or", "auth", ".", "info_authz", "(", ")", ")", ":", "# go ahead with request as made", "if", "(", "auth", ")", ":", "logging", ".", "debug", "(", "\"Authorized for image %s\"", "%", "identifier", ")", "i", "=", "IIIFHandler", "(", "prefix", ",", "identifier", ",", "config", ",", "klass", ",", "auth", ")", "try", ":", "return", "i", ".", "image_information_response", "(", ")", "except", "IIIFError", "as", "e", ":", "return", "i", ".", "error_response", "(", "e", ")", "elif", "(", "auth", ".", "info_authn", "(", ")", ")", ":", "# authn but not authz -> 401", "abort", "(", "401", ")", "else", ":", "# redirect to degraded", "response", "=", "redirect", "(", "host_port_prefix", "(", "config", ".", "host", ",", "config", ".", "port", ",", "prefix", ")", "+", "'/'", "+", "identifier", "+", "'-deg/info.json'", ")", "response", ".", "headers", "[", "'Access-control-allow-origin'", "]", "=", "'*'", "return", "response" ]
Handler for IIIF Image Information requests.
[ "Handler", "for", "IIIF", "Image", "Information", "requests", "." ]
python
train
42.47619
cltl/KafNafParserPy
KafNafParserPy/term_data.py
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/term_data.py#L327-L336
def get_term(self,term_id): """ Returns the term object for the supplied identifier @type term_id: string @param term_id: term identifier """ if term_id in self.idx: return Cterm(self.idx[term_id],self.type) else: return None
[ "def", "get_term", "(", "self", ",", "term_id", ")", ":", "if", "term_id", "in", "self", ".", "idx", ":", "return", "Cterm", "(", "self", ".", "idx", "[", "term_id", "]", ",", "self", ".", "type", ")", "else", ":", "return", "None" ]
Returns the term object for the supplied identifier @type term_id: string @param term_id: term identifier
[ "Returns", "the", "term", "object", "for", "the", "supplied", "identifier" ]
python
train
29.6
RiotGames/cloud-inquisitor
plugins/public/cinq-collector-aws/cinq_collector_aws/account.py
https://github.com/RiotGames/cloud-inquisitor/blob/181dc2566ca59fc855f695b7fcc2c3b934e6ee9f/plugins/public/cinq-collector-aws/cinq_collector_aws/account.py#L558-L585
def _get_resource_hash(zone_name, record): """Returns the last ten digits of the sha256 hash of the combined arguments. Useful for generating unique resource IDs Args: zone_name (`str`): The name of the DNS Zone the record belongs to record (`dict`): A record dict to generate the hash from Returns: `str` """ record_data = defaultdict(int, record) if type(record_data['GeoLocation']) == dict: record_data['GeoLocation'] = ":".join(["{}={}".format(k, v) for k, v in record_data['GeoLocation'].items()]) args = [ zone_name, record_data['Name'], record_data['Type'], record_data['Weight'], record_data['Region'], record_data['GeoLocation'], record_data['Failover'], record_data['HealthCheckId'], record_data['TrafficPolicyInstanceId'] ] return get_resource_id('r53r', args)
[ "def", "_get_resource_hash", "(", "zone_name", ",", "record", ")", ":", "record_data", "=", "defaultdict", "(", "int", ",", "record", ")", "if", "type", "(", "record_data", "[", "'GeoLocation'", "]", ")", "==", "dict", ":", "record_data", "[", "'GeoLocation'", "]", "=", "\":\"", ".", "join", "(", "[", "\"{}={}\"", ".", "format", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "record_data", "[", "'GeoLocation'", "]", ".", "items", "(", ")", "]", ")", "args", "=", "[", "zone_name", ",", "record_data", "[", "'Name'", "]", ",", "record_data", "[", "'Type'", "]", ",", "record_data", "[", "'Weight'", "]", ",", "record_data", "[", "'Region'", "]", ",", "record_data", "[", "'GeoLocation'", "]", ",", "record_data", "[", "'Failover'", "]", ",", "record_data", "[", "'HealthCheckId'", "]", ",", "record_data", "[", "'TrafficPolicyInstanceId'", "]", "]", "return", "get_resource_id", "(", "'r53r'", ",", "args", ")" ]
Returns the last ten digits of the sha256 hash of the combined arguments. Useful for generating unique resource IDs Args: zone_name (`str`): The name of the DNS Zone the record belongs to record (`dict`): A record dict to generate the hash from Returns: `str`
[ "Returns", "the", "last", "ten", "digits", "of", "the", "sha256", "hash", "of", "the", "combined", "arguments", ".", "Useful", "for", "generating", "unique", "resource", "IDs" ]
python
train
35.142857
newville/asteval
asteval/asteval.py
https://github.com/newville/asteval/blob/bb7d3a95079f96ead75ea55662014bbcc82f9b28/asteval/asteval.py#L667-L681
def on_for(self, node): # ('target', 'iter', 'body', 'orelse') """For blocks.""" for val in self.run(node.iter): self.node_assign(node.target, val) self._interrupt = None for tnode in node.body: self.run(tnode) if self._interrupt is not None: break if isinstance(self._interrupt, ast.Break): break else: for tnode in node.orelse: self.run(tnode) self._interrupt = None
[ "def", "on_for", "(", "self", ",", "node", ")", ":", "# ('target', 'iter', 'body', 'orelse')", "for", "val", "in", "self", ".", "run", "(", "node", ".", "iter", ")", ":", "self", ".", "node_assign", "(", "node", ".", "target", ",", "val", ")", "self", ".", "_interrupt", "=", "None", "for", "tnode", "in", "node", ".", "body", ":", "self", ".", "run", "(", "tnode", ")", "if", "self", ".", "_interrupt", "is", "not", "None", ":", "break", "if", "isinstance", "(", "self", ".", "_interrupt", ",", "ast", ".", "Break", ")", ":", "break", "else", ":", "for", "tnode", "in", "node", ".", "orelse", ":", "self", ".", "run", "(", "tnode", ")", "self", ".", "_interrupt", "=", "None" ]
For blocks.
[ "For", "blocks", "." ]
python
train
35.533333
rhayes777/PyAutoFit
autofit/mapper/prior_model.py
https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/mapper/prior_model.py#L593-L612
def gaussian_prior_model_for_arguments(self, arguments): """ Parameters ---------- arguments: {Prior: float} A dictionary of arguments Returns ------- prior_models: [PriorModel] A new list of prior models with gaussian priors """ return CollectionPriorModel( { key: value.gaussian_prior_model_for_arguments(arguments) if isinstance(value, AbstractPriorModel) else value for key, value in self.__dict__.items() if key not in ('component_number', 'item_number', 'id') } )
[ "def", "gaussian_prior_model_for_arguments", "(", "self", ",", "arguments", ")", ":", "return", "CollectionPriorModel", "(", "{", "key", ":", "value", ".", "gaussian_prior_model_for_arguments", "(", "arguments", ")", "if", "isinstance", "(", "value", ",", "AbstractPriorModel", ")", "else", "value", "for", "key", ",", "value", "in", "self", ".", "__dict__", ".", "items", "(", ")", "if", "key", "not", "in", "(", "'component_number'", ",", "'item_number'", ",", "'id'", ")", "}", ")" ]
Parameters ---------- arguments: {Prior: float} A dictionary of arguments Returns ------- prior_models: [PriorModel] A new list of prior models with gaussian priors
[ "Parameters", "----------", "arguments", ":", "{", "Prior", ":", "float", "}", "A", "dictionary", "of", "arguments" ]
python
train
32.15
UCL-INGI/INGInious
inginious/frontend/pages/course_admin/student_list.py
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/pages/course_admin/student_list.py#L16-L19
def GET_AUTH(self, courseid): # pylint: disable=arguments-differ """ GET request """ course, __ = self.get_course_and_check_rights(courseid) return self.page(course)
[ "def", "GET_AUTH", "(", "self", ",", "courseid", ")", ":", "# pylint: disable=arguments-differ", "course", ",", "__", "=", "self", ".", "get_course_and_check_rights", "(", "courseid", ")", "return", "self", ".", "page", "(", "course", ")" ]
GET request
[ "GET", "request" ]
python
train
46.75
kytos/python-openflow
pyof/foundation/base.py
https://github.com/kytos/python-openflow/blob/4f2d0d08ab28e102ed88fe57a4ee17729f1e1bb7/pyof/foundation/base.py#L547-L572
def get_class_attributes(cls): """Return a generator for class attributes' names and value. This method strict relies on the PEP 520 (Preserving Class Attribute Definition Order), implemented on Python 3.6. So, if this behaviour changes this whole lib can loose its functionality (since the attributes order are a strong requirement.) For the same reason, this lib will not work on python versions earlier than 3.6. .. code-block:: python3 for name, value in self.get_class_attributes(): print("attribute name: {}".format(name)) print("attribute type: {}".format(value)) Returns: generator: tuples with attribute name and value. """ #: see this method docstring for a important notice about the use of #: cls.__dict__ for name, value in cls.__dict__.items(): # gets only our (kytos) attributes. this ignores methods, dunder # methods and attributes, and common python type attributes. if GenericStruct._is_pyof_attribute(value): yield (name, value)
[ "def", "get_class_attributes", "(", "cls", ")", ":", "#: see this method docstring for a important notice about the use of", "#: cls.__dict__", "for", "name", ",", "value", "in", "cls", ".", "__dict__", ".", "items", "(", ")", ":", "# gets only our (kytos) attributes. this ignores methods, dunder", "# methods and attributes, and common python type attributes.", "if", "GenericStruct", ".", "_is_pyof_attribute", "(", "value", ")", ":", "yield", "(", "name", ",", "value", ")" ]
Return a generator for class attributes' names and value. This method strict relies on the PEP 520 (Preserving Class Attribute Definition Order), implemented on Python 3.6. So, if this behaviour changes this whole lib can loose its functionality (since the attributes order are a strong requirement.) For the same reason, this lib will not work on python versions earlier than 3.6. .. code-block:: python3 for name, value in self.get_class_attributes(): print("attribute name: {}".format(name)) print("attribute type: {}".format(value)) Returns: generator: tuples with attribute name and value.
[ "Return", "a", "generator", "for", "class", "attributes", "names", "and", "value", "." ]
python
train
43.538462
jrigden/pyPodcastParser
pyPodcastParser/Item.py
https://github.com/jrigden/pyPodcastParser/blob/b21e027bb56ec77986d76fc1990f4e420c6de869/pyPodcastParser/Item.py#L183-L188
def set_title(self): """Parses title and set value.""" try: self.title = self.soup.find('title').string except AttributeError: self.title = None
[ "def", "set_title", "(", "self", ")", ":", "try", ":", "self", ".", "title", "=", "self", ".", "soup", ".", "find", "(", "'title'", ")", ".", "string", "except", "AttributeError", ":", "self", ".", "title", "=", "None" ]
Parses title and set value.
[ "Parses", "title", "and", "set", "value", "." ]
python
train
31.166667
django-danceschool/django-danceschool
danceschool/stats/stats.py
https://github.com/django-danceschool/django-danceschool/blob/bb08cbf39017a812a5a94bdb4ea34170bf1a30ba/danceschool/stats/stats.py#L376-L451
def getMonthlyPerformance(): ''' This function does the work of compiling monthly performance data that can either be rendered as CSV or as JSON ''' when_all = { 'eventregistration__dropIn': False, 'eventregistration__cancelled': False, } # Get objects at the Series level so that we can calculate StudentHours series_counts = list(Series.objects.annotate( eventregistrations=Sum(Case(When(Q(**when_all),then=1),output_field=IntegerField())),) .values('year','month','eventregistrations','duration')) for series in series_counts: series['studenthours'] = (series.get('eventregistrations') or 0) * (series.get('duration') or 0) all_years = set([x['year'] for x in series_counts]) dataseries_list = ['EventRegistrations', 'Registrations','Hours','StudentHours','AvgStudents'] yearTotals = {} # Initialize dictionaries for dataseries in dataseries_list: yearTotals[dataseries] = {'MonthlyAverage': {}} for year in all_years: yearTotals[dataseries][year] = {} # Fill in by year and month for a cleaner looping process for year in all_years: # Monthly Totals for month in range(1,13): # Total EventRegistrations per month is retrieved by the query above. yearTotals['EventRegistrations'][year][month] = sum([x['eventregistrations'] or 0 for x in series_counts if x['month'] == month and x['year'] == year]) # Total Registrations per month and hours per month require a separate query for each month yearTotals['Registrations'][year][month] = len(Registration.objects.filter(eventregistration__dropIn=False, eventregistration__cancelled=False,eventregistration__event__year=year,eventregistration__event__month=month).distinct()) yearTotals['Hours'][year][month] = sum([x['duration'] or 0 for x in series_counts if x['month'] == month and x['year'] == year]) yearTotals['StudentHours'][year][month] = sum([x['studenthours'] or 0 for x in series_counts if x['month'] == month and x['year'] == year]) if yearTotals['Hours'][year][month] > 0: yearTotals['AvgStudents'][year][month] = yearTotals['StudentHours'][year][month] / float(yearTotals['Hours'][year][month]) else: yearTotals['AvgStudents'][year][month] = 0 # Annual Totals for sub_series in ['EventRegistrations','Registrations','Hours','StudentHours']: yearTotals[sub_series][year]['Total'] = sum([x for x in yearTotals[sub_series][year].values()]) # Annual (Monthly) Averages month_count = len([x for k,x in yearTotals['Hours'][year].items() if k in range(1,13) and x > 0]) if month_count > 0: for sub_series in ['EventRegistrations','Registrations','Hours','StudentHours']: yearTotals[sub_series][year]['Average'] = yearTotals[sub_series][year]['Total'] / float(month_count) yearTotals['AvgStudents'][year]['Average'] = yearTotals['StudentHours'][year]['Total'] / float(yearTotals['Hours'][year]['Total']) # Monthly Averages for month in range(1,13): yearly_hours_data = [x[month] for k,x in yearTotals['Hours'].items() if k in all_years and x[month] > 0] yearly_studenthours_data = [x[month] for k,x in yearTotals['StudentHours'].items() if k in all_years and x[month] > 0] yearly_eventregistrations_data = [x[month] for k,x in yearTotals['EventRegistrations'].items() if k in all_years and yearTotals['Hours'][k][month] > 0] yearly_registrations_data = [x[month] for k,x in yearTotals['Registrations'].items() if k in all_years and yearTotals['Hours'][k][month] > 0] year_count = len(yearly_hours_data) if year_count > 0: yearTotals['EventRegistrations']['MonthlyAverage'][month] = sum([x for x in yearly_eventregistrations_data]) / year_count yearTotals['Registrations']['MonthlyAverage'][month] = sum([x for x in yearly_registrations_data]) / year_count yearTotals['Hours']['MonthlyAverage'][month] = sum([x for x in yearly_hours_data]) / year_count yearTotals['StudentHours']['MonthlyAverage'][month] = sum([x for x in yearly_studenthours_data]) / year_count yearTotals['AvgStudents']['MonthlyAverage'][month] = yearTotals['StudentHours']['MonthlyAverage'][month] / float(yearTotals['Hours']['MonthlyAverage'][month]) return yearTotals
[ "def", "getMonthlyPerformance", "(", ")", ":", "when_all", "=", "{", "'eventregistration__dropIn'", ":", "False", ",", "'eventregistration__cancelled'", ":", "False", ",", "}", "# Get objects at the Series level so that we can calculate StudentHours", "series_counts", "=", "list", "(", "Series", ".", "objects", ".", "annotate", "(", "eventregistrations", "=", "Sum", "(", "Case", "(", "When", "(", "Q", "(", "*", "*", "when_all", ")", ",", "then", "=", "1", ")", ",", "output_field", "=", "IntegerField", "(", ")", ")", ")", ",", ")", ".", "values", "(", "'year'", ",", "'month'", ",", "'eventregistrations'", ",", "'duration'", ")", ")", "for", "series", "in", "series_counts", ":", "series", "[", "'studenthours'", "]", "=", "(", "series", ".", "get", "(", "'eventregistrations'", ")", "or", "0", ")", "*", "(", "series", ".", "get", "(", "'duration'", ")", "or", "0", ")", "all_years", "=", "set", "(", "[", "x", "[", "'year'", "]", "for", "x", "in", "series_counts", "]", ")", "dataseries_list", "=", "[", "'EventRegistrations'", ",", "'Registrations'", ",", "'Hours'", ",", "'StudentHours'", ",", "'AvgStudents'", "]", "yearTotals", "=", "{", "}", "# Initialize dictionaries", "for", "dataseries", "in", "dataseries_list", ":", "yearTotals", "[", "dataseries", "]", "=", "{", "'MonthlyAverage'", ":", "{", "}", "}", "for", "year", "in", "all_years", ":", "yearTotals", "[", "dataseries", "]", "[", "year", "]", "=", "{", "}", "# Fill in by year and month for a cleaner looping process", "for", "year", "in", "all_years", ":", "# Monthly Totals", "for", "month", "in", "range", "(", "1", ",", "13", ")", ":", "# Total EventRegistrations per month is retrieved by the query above.", "yearTotals", "[", "'EventRegistrations'", "]", "[", "year", "]", "[", "month", "]", "=", "sum", "(", "[", "x", "[", "'eventregistrations'", "]", "or", "0", "for", "x", "in", "series_counts", "if", "x", "[", "'month'", "]", "==", "month", "and", "x", "[", "'year'", "]", "==", "year", "]", ")", "# Total Registrations per month and hours per month require a separate query for each month", "yearTotals", "[", "'Registrations'", "]", "[", "year", "]", "[", "month", "]", "=", "len", "(", "Registration", ".", "objects", ".", "filter", "(", "eventregistration__dropIn", "=", "False", ",", "eventregistration__cancelled", "=", "False", ",", "eventregistration__event__year", "=", "year", ",", "eventregistration__event__month", "=", "month", ")", ".", "distinct", "(", ")", ")", "yearTotals", "[", "'Hours'", "]", "[", "year", "]", "[", "month", "]", "=", "sum", "(", "[", "x", "[", "'duration'", "]", "or", "0", "for", "x", "in", "series_counts", "if", "x", "[", "'month'", "]", "==", "month", "and", "x", "[", "'year'", "]", "==", "year", "]", ")", "yearTotals", "[", "'StudentHours'", "]", "[", "year", "]", "[", "month", "]", "=", "sum", "(", "[", "x", "[", "'studenthours'", "]", "or", "0", "for", "x", "in", "series_counts", "if", "x", "[", "'month'", "]", "==", "month", "and", "x", "[", "'year'", "]", "==", "year", "]", ")", "if", "yearTotals", "[", "'Hours'", "]", "[", "year", "]", "[", "month", "]", ">", "0", ":", "yearTotals", "[", "'AvgStudents'", "]", "[", "year", "]", "[", "month", "]", "=", "yearTotals", "[", "'StudentHours'", "]", "[", "year", "]", "[", "month", "]", "/", "float", "(", "yearTotals", "[", "'Hours'", "]", "[", "year", "]", "[", "month", "]", ")", "else", ":", "yearTotals", "[", "'AvgStudents'", "]", "[", "year", "]", "[", "month", "]", "=", "0", "# Annual Totals", "for", "sub_series", "in", "[", "'EventRegistrations'", ",", "'Registrations'", ",", "'Hours'", ",", "'StudentHours'", "]", ":", "yearTotals", "[", "sub_series", "]", "[", "year", "]", "[", "'Total'", "]", "=", "sum", "(", "[", "x", "for", "x", "in", "yearTotals", "[", "sub_series", "]", "[", "year", "]", ".", "values", "(", ")", "]", ")", "# Annual (Monthly) Averages", "month_count", "=", "len", "(", "[", "x", "for", "k", ",", "x", "in", "yearTotals", "[", "'Hours'", "]", "[", "year", "]", ".", "items", "(", ")", "if", "k", "in", "range", "(", "1", ",", "13", ")", "and", "x", ">", "0", "]", ")", "if", "month_count", ">", "0", ":", "for", "sub_series", "in", "[", "'EventRegistrations'", ",", "'Registrations'", ",", "'Hours'", ",", "'StudentHours'", "]", ":", "yearTotals", "[", "sub_series", "]", "[", "year", "]", "[", "'Average'", "]", "=", "yearTotals", "[", "sub_series", "]", "[", "year", "]", "[", "'Total'", "]", "/", "float", "(", "month_count", ")", "yearTotals", "[", "'AvgStudents'", "]", "[", "year", "]", "[", "'Average'", "]", "=", "yearTotals", "[", "'StudentHours'", "]", "[", "year", "]", "[", "'Total'", "]", "/", "float", "(", "yearTotals", "[", "'Hours'", "]", "[", "year", "]", "[", "'Total'", "]", ")", "# Monthly Averages", "for", "month", "in", "range", "(", "1", ",", "13", ")", ":", "yearly_hours_data", "=", "[", "x", "[", "month", "]", "for", "k", ",", "x", "in", "yearTotals", "[", "'Hours'", "]", ".", "items", "(", ")", "if", "k", "in", "all_years", "and", "x", "[", "month", "]", ">", "0", "]", "yearly_studenthours_data", "=", "[", "x", "[", "month", "]", "for", "k", ",", "x", "in", "yearTotals", "[", "'StudentHours'", "]", ".", "items", "(", ")", "if", "k", "in", "all_years", "and", "x", "[", "month", "]", ">", "0", "]", "yearly_eventregistrations_data", "=", "[", "x", "[", "month", "]", "for", "k", ",", "x", "in", "yearTotals", "[", "'EventRegistrations'", "]", ".", "items", "(", ")", "if", "k", "in", "all_years", "and", "yearTotals", "[", "'Hours'", "]", "[", "k", "]", "[", "month", "]", ">", "0", "]", "yearly_registrations_data", "=", "[", "x", "[", "month", "]", "for", "k", ",", "x", "in", "yearTotals", "[", "'Registrations'", "]", ".", "items", "(", ")", "if", "k", "in", "all_years", "and", "yearTotals", "[", "'Hours'", "]", "[", "k", "]", "[", "month", "]", ">", "0", "]", "year_count", "=", "len", "(", "yearly_hours_data", ")", "if", "year_count", ">", "0", ":", "yearTotals", "[", "'EventRegistrations'", "]", "[", "'MonthlyAverage'", "]", "[", "month", "]", "=", "sum", "(", "[", "x", "for", "x", "in", "yearly_eventregistrations_data", "]", ")", "/", "year_count", "yearTotals", "[", "'Registrations'", "]", "[", "'MonthlyAverage'", "]", "[", "month", "]", "=", "sum", "(", "[", "x", "for", "x", "in", "yearly_registrations_data", "]", ")", "/", "year_count", "yearTotals", "[", "'Hours'", "]", "[", "'MonthlyAverage'", "]", "[", "month", "]", "=", "sum", "(", "[", "x", "for", "x", "in", "yearly_hours_data", "]", ")", "/", "year_count", "yearTotals", "[", "'StudentHours'", "]", "[", "'MonthlyAverage'", "]", "[", "month", "]", "=", "sum", "(", "[", "x", "for", "x", "in", "yearly_studenthours_data", "]", ")", "/", "year_count", "yearTotals", "[", "'AvgStudents'", "]", "[", "'MonthlyAverage'", "]", "[", "month", "]", "=", "yearTotals", "[", "'StudentHours'", "]", "[", "'MonthlyAverage'", "]", "[", "month", "]", "/", "float", "(", "yearTotals", "[", "'Hours'", "]", "[", "'MonthlyAverage'", "]", "[", "month", "]", ")", "return", "yearTotals" ]
This function does the work of compiling monthly performance data that can either be rendered as CSV or as JSON
[ "This", "function", "does", "the", "work", "of", "compiling", "monthly", "performance", "data", "that", "can", "either", "be", "rendered", "as", "CSV", "or", "as", "JSON" ]
python
train
58.171053
cloudmesh/cloudmesh-common
cloudmesh/common/TableParser.py
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/TableParser.py#L104-L117
def _get_headers(self): """ assumes comment have been stripped with extract :return: """ header = self.lines[0] self.lines = self.lines[1:] self.headers = \ [self.clean(h) for h in header.split(self.seperator)] if self.is_strip: self.headers = self.headers[1:-1] return self.headers
[ "def", "_get_headers", "(", "self", ")", ":", "header", "=", "self", ".", "lines", "[", "0", "]", "self", ".", "lines", "=", "self", ".", "lines", "[", "1", ":", "]", "self", ".", "headers", "=", "[", "self", ".", "clean", "(", "h", ")", "for", "h", "in", "header", ".", "split", "(", "self", ".", "seperator", ")", "]", "if", "self", ".", "is_strip", ":", "self", ".", "headers", "=", "self", ".", "headers", "[", "1", ":", "-", "1", "]", "return", "self", ".", "headers" ]
assumes comment have been stripped with extract :return:
[ "assumes", "comment", "have", "been", "stripped", "with", "extract", ":", "return", ":" ]
python
train
26.214286
wavefrontHQ/python-client
wavefront_api_client/api/dashboard_api.py
https://github.com/wavefrontHQ/python-client/blob/b0f1046a8f68c2c7d69e395f7167241f224c738a/wavefront_api_client/api/dashboard_api.py#L1201-L1222
def remove_dashboard_tag(self, id, tag_value, **kwargs): # noqa: E501 """Remove a tag from a specific dashboard # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.remove_dashboard_tag(id, tag_value, async_req=True) >>> result = thread.get() :param async_req bool :param str id: (required) :param str tag_value: (required) :return: ResponseContainer If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.remove_dashboard_tag_with_http_info(id, tag_value, **kwargs) # noqa: E501 else: (data) = self.remove_dashboard_tag_with_http_info(id, tag_value, **kwargs) # noqa: E501 return data
[ "def", "remove_dashboard_tag", "(", "self", ",", "id", ",", "tag_value", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "remove_dashboard_tag_with_http_info", "(", "id", ",", "tag_value", ",", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "remove_dashboard_tag_with_http_info", "(", "id", ",", "tag_value", ",", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
Remove a tag from a specific dashboard # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.remove_dashboard_tag(id, tag_value, async_req=True) >>> result = thread.get() :param async_req bool :param str id: (required) :param str tag_value: (required) :return: ResponseContainer If the method is called asynchronously, returns the request thread.
[ "Remove", "a", "tag", "from", "a", "specific", "dashboard", "#", "noqa", ":", "E501" ]
python
train
43.772727
materialsproject/pymatgen
pymatgen/core/molecular_orbitals.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/core/molecular_orbitals.py#L83-L119
def obtain_band_edges(self): ''' Fill up the atomic orbitals with available electrons. Return HOMO, LUMO, and whether it's a metal. ''' orbitals = self.aos_as_list() electrons = Composition(self.composition).total_electrons partial_filled = [] for orbital in orbitals: if electrons <= 0: break if 's' in orbital[1]: electrons += -2 elif 'p' in orbital[1]: electrons += -6 elif 'd' in orbital[1]: electrons += -10 elif 'f' in orbital[1]: electrons += -14 partial_filled.append(orbital) if electrons != 0: homo = partial_filled[-1] lumo = partial_filled[-1] else: homo = partial_filled[-1] try: lumo = orbitals[len(partial_filled)] except: lumo = None if homo == lumo: metal = True else: metal = False return {'HOMO': homo, 'LUMO': lumo, 'metal': metal}
[ "def", "obtain_band_edges", "(", "self", ")", ":", "orbitals", "=", "self", ".", "aos_as_list", "(", ")", "electrons", "=", "Composition", "(", "self", ".", "composition", ")", ".", "total_electrons", "partial_filled", "=", "[", "]", "for", "orbital", "in", "orbitals", ":", "if", "electrons", "<=", "0", ":", "break", "if", "'s'", "in", "orbital", "[", "1", "]", ":", "electrons", "+=", "-", "2", "elif", "'p'", "in", "orbital", "[", "1", "]", ":", "electrons", "+=", "-", "6", "elif", "'d'", "in", "orbital", "[", "1", "]", ":", "electrons", "+=", "-", "10", "elif", "'f'", "in", "orbital", "[", "1", "]", ":", "electrons", "+=", "-", "14", "partial_filled", ".", "append", "(", "orbital", ")", "if", "electrons", "!=", "0", ":", "homo", "=", "partial_filled", "[", "-", "1", "]", "lumo", "=", "partial_filled", "[", "-", "1", "]", "else", ":", "homo", "=", "partial_filled", "[", "-", "1", "]", "try", ":", "lumo", "=", "orbitals", "[", "len", "(", "partial_filled", ")", "]", "except", ":", "lumo", "=", "None", "if", "homo", "==", "lumo", ":", "metal", "=", "True", "else", ":", "metal", "=", "False", "return", "{", "'HOMO'", ":", "homo", ",", "'LUMO'", ":", "lumo", ",", "'metal'", ":", "metal", "}" ]
Fill up the atomic orbitals with available electrons. Return HOMO, LUMO, and whether it's a metal.
[ "Fill", "up", "the", "atomic", "orbitals", "with", "available", "electrons", ".", "Return", "HOMO", "LUMO", "and", "whether", "it", "s", "a", "metal", "." ]
python
train
29.459459
jwhitlock/drf-cached-instances
drf_cached_instances/models.py
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/drf_cached_instances/models.py#L46-L55
def values_list(self, *args, **kwargs): """Return the primary keys as a list. The only valid call is values_list('pk', flat=True) """ flat = kwargs.pop('flat', False) assert flat is True assert len(args) == 1 assert args[0] == self.model._meta.pk.name return self.pks
[ "def", "values_list", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "flat", "=", "kwargs", ".", "pop", "(", "'flat'", ",", "False", ")", "assert", "flat", "is", "True", "assert", "len", "(", "args", ")", "==", "1", "assert", "args", "[", "0", "]", "==", "self", ".", "model", ".", "_meta", ".", "pk", ".", "name", "return", "self", ".", "pks" ]
Return the primary keys as a list. The only valid call is values_list('pk', flat=True)
[ "Return", "the", "primary", "keys", "as", "a", "list", "." ]
python
train
32.3
tango-controls/pytango
tango/device_class.py
https://github.com/tango-controls/pytango/blob/9cf78c517c9cdc1081ff6d080a9646a740cc1d36/tango/device_class.py#L91-L117
def get_class_properties(self, dev_class, class_prop): """ get_class_properties(self, dev_class, class_prop) -> None Returns the class properties Parameters : - dev_class : (DeviceClass) the DeviceClass object - class_prop : [in, out] (dict<str, None>) the property names. Will be filled with property values Return : None""" # initialize default values if class_prop == {} or not Util._UseDb: return # call database to get properties props = self.db.get_class_property(dev_class.get_name(), list(class_prop.keys())) # if value defined in database, store it for name in class_prop: if props[name]: type = self.get_property_type(name, class_prop) values = self.stringArray2values(props[name], type) self.set_property_values(name, class_prop, values) else: print(name + " property NOT found in database")
[ "def", "get_class_properties", "(", "self", ",", "dev_class", ",", "class_prop", ")", ":", "# initialize default values", "if", "class_prop", "==", "{", "}", "or", "not", "Util", ".", "_UseDb", ":", "return", "# call database to get properties", "props", "=", "self", ".", "db", ".", "get_class_property", "(", "dev_class", ".", "get_name", "(", ")", ",", "list", "(", "class_prop", ".", "keys", "(", ")", ")", ")", "# if value defined in database, store it", "for", "name", "in", "class_prop", ":", "if", "props", "[", "name", "]", ":", "type", "=", "self", ".", "get_property_type", "(", "name", ",", "class_prop", ")", "values", "=", "self", ".", "stringArray2values", "(", "props", "[", "name", "]", ",", "type", ")", "self", ".", "set_property_values", "(", "name", ",", "class_prop", ",", "values", ")", "else", ":", "print", "(", "name", "+", "\" property NOT found in database\"", ")" ]
get_class_properties(self, dev_class, class_prop) -> None Returns the class properties Parameters : - dev_class : (DeviceClass) the DeviceClass object - class_prop : [in, out] (dict<str, None>) the property names. Will be filled with property values Return : None
[ "get_class_properties", "(", "self", "dev_class", "class_prop", ")", "-", ">", "None" ]
python
train
40
pytorch/ignite
ignite/handlers/timing.py
https://github.com/pytorch/ignite/blob/a96bd07cb58822cfb39fd81765135712f1db41ca/ignite/handlers/timing.py#L87-L116
def attach(self, engine, start=Events.STARTED, pause=Events.COMPLETED, resume=None, step=None): """ Register callbacks to control the timer. Args: engine (Engine): Engine that this timer will be attached to. start (Events): Event which should start (reset) the timer. pause (Events): Event which should pause the timer. resume (Events, optional): Event which should resume the timer. step (Events, optional): Event which should call the `step` method of the counter. Returns: self (Timer) """ engine.add_event_handler(start, self.reset) engine.add_event_handler(pause, self.pause) if resume is not None: engine.add_event_handler(resume, self.resume) if step is not None: engine.add_event_handler(step, self.step) return self
[ "def", "attach", "(", "self", ",", "engine", ",", "start", "=", "Events", ".", "STARTED", ",", "pause", "=", "Events", ".", "COMPLETED", ",", "resume", "=", "None", ",", "step", "=", "None", ")", ":", "engine", ".", "add_event_handler", "(", "start", ",", "self", ".", "reset", ")", "engine", ".", "add_event_handler", "(", "pause", ",", "self", ".", "pause", ")", "if", "resume", "is", "not", "None", ":", "engine", ".", "add_event_handler", "(", "resume", ",", "self", ".", "resume", ")", "if", "step", "is", "not", "None", ":", "engine", ".", "add_event_handler", "(", "step", ",", "self", ".", "step", ")", "return", "self" ]
Register callbacks to control the timer. Args: engine (Engine): Engine that this timer will be attached to. start (Events): Event which should start (reset) the timer. pause (Events): Event which should pause the timer. resume (Events, optional): Event which should resume the timer. step (Events, optional): Event which should call the `step` method of the counter. Returns: self (Timer)
[ "Register", "callbacks", "to", "control", "the", "timer", "." ]
python
train
31.666667
ungarj/tilematrix
tilematrix/_tilepyramid.py
https://github.com/ungarj/tilematrix/blob/6f8cd3b85f61434a7ce5d7b635c3ad8f18ccb268/tilematrix/_tilepyramid.py#L196-L204
def tiles_from_bbox(self, geometry, zoom): """ All metatiles intersecting with given bounding box. - geometry: shapely geometry - zoom: zoom level """ validate_zoom(zoom) return self.tiles_from_bounds(geometry.bounds, zoom)
[ "def", "tiles_from_bbox", "(", "self", ",", "geometry", ",", "zoom", ")", ":", "validate_zoom", "(", "zoom", ")", "return", "self", ".", "tiles_from_bounds", "(", "geometry", ".", "bounds", ",", "zoom", ")" ]
All metatiles intersecting with given bounding box. - geometry: shapely geometry - zoom: zoom level
[ "All", "metatiles", "intersecting", "with", "given", "bounding", "box", "." ]
python
train
30.222222
python-gitlab/python-gitlab
gitlab/v4/objects.py
https://github.com/python-gitlab/python-gitlab/blob/16de1b03fde3dbbe8f851614dd1d8c09de102fe5/gitlab/v4/objects.py#L2277-L2291
def changes(self, **kwargs): """List the merge request changes. Args: **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabListError: If the list could not be retrieved Returns: RESTObjectList: List of changes """ path = '%s/%s/changes' % (self.manager.path, self.get_id()) return self.manager.gitlab.http_get(path, **kwargs)
[ "def", "changes", "(", "self", ",", "*", "*", "kwargs", ")", ":", "path", "=", "'%s/%s/changes'", "%", "(", "self", ".", "manager", ".", "path", ",", "self", ".", "get_id", "(", ")", ")", "return", "self", ".", "manager", ".", "gitlab", ".", "http_get", "(", "path", ",", "*", "*", "kwargs", ")" ]
List the merge request changes. Args: **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabListError: If the list could not be retrieved Returns: RESTObjectList: List of changes
[ "List", "the", "merge", "request", "changes", "." ]
python
train
33.133333
ekzhu/datasketch
datasketch/minhash.py
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/minhash.py#L156-L164
def count(self): '''Estimate the cardinality count based on the technique described in `this paper <http://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=365694>`_. Returns: int: The estimated cardinality of the set represented by this MinHash. ''' k = len(self) return np.float(k) / np.sum(self.hashvalues / np.float(_max_hash)) - 1.0
[ "def", "count", "(", "self", ")", ":", "k", "=", "len", "(", "self", ")", "return", "np", ".", "float", "(", "k", ")", "/", "np", ".", "sum", "(", "self", ".", "hashvalues", "/", "np", ".", "float", "(", "_max_hash", ")", ")", "-", "1.0" ]
Estimate the cardinality count based on the technique described in `this paper <http://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=365694>`_. Returns: int: The estimated cardinality of the set represented by this MinHash.
[ "Estimate", "the", "cardinality", "count", "based", "on", "the", "technique", "described", "in", "this", "paper", "<http", ":", "//", "ieeexplore", ".", "ieee", ".", "org", "/", "stamp", "/", "stamp", ".", "jsp?arnumber", "=", "365694", ">", "_", "." ]
python
test
42.888889
coldfix/udiskie
udiskie/udisks2.py
https://github.com/coldfix/udiskie/blob/804c9d27df6f7361fec3097c432398f2d702f911/udiskie/udisks2.py#L357-L366
def drive(self): """Get wrapper to the drive containing this device.""" if self.is_drive: return self cleartext = self.luks_cleartext_slave if cleartext: return cleartext.drive if self.is_block: return self._daemon[self._P.Block.Drive] return None
[ "def", "drive", "(", "self", ")", ":", "if", "self", ".", "is_drive", ":", "return", "self", "cleartext", "=", "self", ".", "luks_cleartext_slave", "if", "cleartext", ":", "return", "cleartext", ".", "drive", "if", "self", ".", "is_block", ":", "return", "self", ".", "_daemon", "[", "self", ".", "_P", ".", "Block", ".", "Drive", "]", "return", "None" ]
Get wrapper to the drive containing this device.
[ "Get", "wrapper", "to", "the", "drive", "containing", "this", "device", "." ]
python
train
32.2
twidi/py-dataql
dataql/solvers/resources.py
https://github.com/twidi/py-dataql/blob/5841a3fd559829193ed709c255166085bdde1c52/dataql/solvers/resources.py#L112-L192
def solve_value(self, value, resource): """Solve a resource with a value, without coercing. Arguments --------- value : ? A value to solve in combination with the given resource. The first filter of the resource will be applied on this value (next filters on the result of the previous filter). resource : dataql.resources.Resource An instance of a subclass of ``Resource`` to solve with the given value. Returns ------- The result of all filters applied on the value for the first filter, and result of the previous filter for next filters. Example ------- >>> from dataql.solvers.registry import Registry >>> registry = Registry() >>> from datetime import date >>> registry.register(date, allow_class=True) >>> registry.register(str) >>> class MySolver(Solver): ... def coerce(self, value, resource): return value >>> solver = MySolver(registry) >>> from dataql.resources import Filter, NamedArg, PosArg, SliceFilter >>> field = Field(None, ... filters=[ ... Filter(name='fromtimestamp', args=[PosArg(1433109600)]), ... Filter(name='replace', args=[NamedArg('year', '=', 2014)]), ... Filter(name='strftime', args=[PosArg('%F')]), ... Filter(name='replace', args=[PosArg('2014'), PosArg('2015')]), ... ] ... ) >>> solver.solve_value(date, field) '2015-06-01' >>> solver.solve_value(None, field) >>> d = {'foo': {'date': date(2015, 6, 1)}, 'bar': {'date': None}, 'baz': [{'date': None}]} >>> registry.register(dict) >>> solver.solve_value(d, Field(None, filters=[ ... Filter(name='foo'), ... Filter(name='date'), ... Filter(name='strftime', args=[PosArg('%F')]), ... ])) '2015-06-01' >>> solver.solve_value(d, Field(None, filters=[ ... Filter(name='bar'), ... Filter(name='date'), ... Filter(name='strftime', args=[PosArg('%F')]), ... ])) >>> solver.solve_value(d, Field(None, filters=[ ... Filter(name='baz'), ... SliceFilter(0), ... Filter(name='date'), ... Filter(name='strftime', args=[PosArg('%F')]), ... ])) # Example of how to raise a ``CannotSolve`` exception. >>> from dataql.solvers.exceptions import CannotSolve >>> raise CannotSolve(solver, Field('fromtimestamp'), date) # doctest: +ELLIPSIS Traceback (most recent call last): dataql...CannotSolve: Solver `<MySolver>` was not able to solve...`<Field[fromtimestamp]>`. """ # The given value is the starting point on which we apply the first filter. result = value # Apply filters one by one on the previous result. if result is not None: for filter_ in resource.filters: result = self.registry.solve_filter(result, filter_) if result is None: break return result
[ "def", "solve_value", "(", "self", ",", "value", ",", "resource", ")", ":", "# The given value is the starting point on which we apply the first filter.", "result", "=", "value", "# Apply filters one by one on the previous result.", "if", "result", "is", "not", "None", ":", "for", "filter_", "in", "resource", ".", "filters", ":", "result", "=", "self", ".", "registry", ".", "solve_filter", "(", "result", ",", "filter_", ")", "if", "result", "is", "None", ":", "break", "return", "result" ]
Solve a resource with a value, without coercing. Arguments --------- value : ? A value to solve in combination with the given resource. The first filter of the resource will be applied on this value (next filters on the result of the previous filter). resource : dataql.resources.Resource An instance of a subclass of ``Resource`` to solve with the given value. Returns ------- The result of all filters applied on the value for the first filter, and result of the previous filter for next filters. Example ------- >>> from dataql.solvers.registry import Registry >>> registry = Registry() >>> from datetime import date >>> registry.register(date, allow_class=True) >>> registry.register(str) >>> class MySolver(Solver): ... def coerce(self, value, resource): return value >>> solver = MySolver(registry) >>> from dataql.resources import Filter, NamedArg, PosArg, SliceFilter >>> field = Field(None, ... filters=[ ... Filter(name='fromtimestamp', args=[PosArg(1433109600)]), ... Filter(name='replace', args=[NamedArg('year', '=', 2014)]), ... Filter(name='strftime', args=[PosArg('%F')]), ... Filter(name='replace', args=[PosArg('2014'), PosArg('2015')]), ... ] ... ) >>> solver.solve_value(date, field) '2015-06-01' >>> solver.solve_value(None, field) >>> d = {'foo': {'date': date(2015, 6, 1)}, 'bar': {'date': None}, 'baz': [{'date': None}]} >>> registry.register(dict) >>> solver.solve_value(d, Field(None, filters=[ ... Filter(name='foo'), ... Filter(name='date'), ... Filter(name='strftime', args=[PosArg('%F')]), ... ])) '2015-06-01' >>> solver.solve_value(d, Field(None, filters=[ ... Filter(name='bar'), ... Filter(name='date'), ... Filter(name='strftime', args=[PosArg('%F')]), ... ])) >>> solver.solve_value(d, Field(None, filters=[ ... Filter(name='baz'), ... SliceFilter(0), ... Filter(name='date'), ... Filter(name='strftime', args=[PosArg('%F')]), ... ])) # Example of how to raise a ``CannotSolve`` exception. >>> from dataql.solvers.exceptions import CannotSolve >>> raise CannotSolve(solver, Field('fromtimestamp'), date) # doctest: +ELLIPSIS Traceback (most recent call last): dataql...CannotSolve: Solver `<MySolver>` was not able to solve...`<Field[fromtimestamp]>`.
[ "Solve", "a", "resource", "with", "a", "value", "without", "coercing", "." ]
python
train
38.765432
gijzelaerr/python-snap7
snap7/server.py
https://github.com/gijzelaerr/python-snap7/blob/a6db134c7a3a2ef187b9eca04669221d6fc634c3/snap7/server.py#L72-L81
def register_area(self, area_code, index, userdata): """Shares a memory area with the server. That memory block will be visible by the clients. """ size = ctypes.sizeof(userdata) logger.info("registering area %s, index %s, size %s" % (area_code, index, size)) size = ctypes.sizeof(userdata) return self.library.Srv_RegisterArea(self.pointer, area_code, index, ctypes.byref(userdata), size)
[ "def", "register_area", "(", "self", ",", "area_code", ",", "index", ",", "userdata", ")", ":", "size", "=", "ctypes", ".", "sizeof", "(", "userdata", ")", "logger", ".", "info", "(", "\"registering area %s, index %s, size %s\"", "%", "(", "area_code", ",", "index", ",", "size", ")", ")", "size", "=", "ctypes", ".", "sizeof", "(", "userdata", ")", "return", "self", ".", "library", ".", "Srv_RegisterArea", "(", "self", ".", "pointer", ",", "area_code", ",", "index", ",", "ctypes", ".", "byref", "(", "userdata", ")", ",", "size", ")" ]
Shares a memory area with the server. That memory block will be visible by the clients.
[ "Shares", "a", "memory", "area", "with", "the", "server", ".", "That", "memory", "block", "will", "be", "visible", "by", "the", "clients", "." ]
python
train
54.5
RudolfCardinal/pythonlib
cardinal_pythonlib/email/sendmail.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/email/sendmail.py#L66-L240
def make_email(from_addr: str, date: str = None, sender: str = "", reply_to: Union[str, List[str]] = "", to: Union[str, List[str]] = "", cc: Union[str, List[str]] = "", bcc: Union[str, List[str]] = "", subject: str = "", body: str = "", content_type: str = CONTENT_TYPE_TEXT, charset: str = "utf8", attachment_filenames: Sequence[str] = None, attachment_binaries: Sequence[bytes] = None, attachment_binary_filenames: Sequence[str] = None, verbose: bool = False) -> email.mime.multipart.MIMEMultipart: """ Makes an e-mail message. Arguments that can be multiple e-mail addresses are (a) a single e-mail address as a string, or (b) a list of strings (each a single e-mail address), or (c) a comma-separated list of multiple e-mail addresses. Args: from_addr: name of the sender for the "From:" field date: e-mail date in RFC 2822 format, or ``None`` for "now" sender: name of the sender for the "Sender:" field reply_to: name of the sender for the "Reply-To:" field to: e-mail address(es) of the recipients for "To:" field cc: e-mail address(es) of the recipients for "Cc:" field bcc: e-mail address(es) of the recipients for "Bcc:" field subject: e-mail subject body: e-mail body content_type: MIME type for body content, default ``text/plain`` charset: character set for body; default ``utf8`` attachment_filenames: filenames of attachments to add attachment_binaries: binary objects to add as attachments attachment_binary_filenames: filenames corresponding to ``attachment_binaries`` verbose: be verbose? Returns: a :class:`email.mime.multipart.MIMEMultipart` Raises: :exc:`AssertionError`, :exc:`ValueError` """ def _csv_list_to_list(x: str) -> List[str]: stripped = [item.strip() for item in x.split(COMMA)] return [item for item in stripped if item] def _assert_nocomma(x: Union[str, List[str]]) -> None: if isinstance(x, str): x = [x] for _addr in x: assert COMMA not in _addr, ( "Commas not allowed in e-mail addresses: {!r}".format(_addr) ) # ------------------------------------------------------------------------- # Arguments # ------------------------------------------------------------------------- if not date: date = email.utils.formatdate(localtime=True) assert isinstance(from_addr, str), ( "'From:' can only be a single address " "(for Python sendmail, not RFC 2822); was {!r}".format(from_addr) ) _assert_nocomma(from_addr) assert isinstance(sender, str), ( "'Sender:' can only be a single address; was {!r}".format(sender) ) _assert_nocomma(sender) if isinstance(reply_to, str): reply_to = [reply_to] if reply_to else [] # type: List[str] _assert_nocomma(reply_to) if isinstance(to, str): to = _csv_list_to_list(to) if isinstance(cc, str): cc = _csv_list_to_list(cc) if isinstance(bcc, str): bcc = _csv_list_to_list(bcc) assert to or cc or bcc, "No recipients (must have some of: To, Cc, Bcc)" _assert_nocomma(to) _assert_nocomma(cc) _assert_nocomma(bcc) attachment_filenames = attachment_filenames or [] # type: List[str] assert all(attachment_filenames), ( "Missing attachment filenames: {!r}".format(attachment_filenames) ) attachment_binaries = attachment_binaries or [] # type: List[bytes] attachment_binary_filenames = attachment_binary_filenames or [] # type: List[str] # noqa assert len(attachment_binaries) == len(attachment_binary_filenames), ( "If you specify attachment_binaries or attachment_binary_filenames, " "they must be iterables of the same length." ) assert all(attachment_binary_filenames), ( "Missing filenames for attached binaries: {!r}".format( attachment_binary_filenames) ) # ------------------------------------------------------------------------- # Make message # ------------------------------------------------------------------------- msg = email.mime.multipart.MIMEMultipart() # Headers: mandatory msg["From"] = from_addr msg["Date"] = date msg["Subject"] = subject # Headers: optional if sender: msg["Sender"] = sender # Single only, not a list if reply_to: msg["Reply-To"] = COMMASPACE.join(reply_to) if to: msg["To"] = COMMASPACE.join(to) if cc: msg["Cc"] = COMMASPACE.join(cc) if bcc: msg["Bcc"] = COMMASPACE.join(bcc) # Body if content_type == CONTENT_TYPE_TEXT: msgbody = email.mime.text.MIMEText(body, "plain", charset) elif content_type == CONTENT_TYPE_HTML: msgbody = email.mime.text.MIMEText(body, "html", charset) else: raise ValueError("unknown content_type") msg.attach(msgbody) # Attachments # noinspection PyPep8,PyBroadException try: if attachment_filenames: # ----------------------------------------------------------------- # Attach things by filename # ----------------------------------------------------------------- if verbose: log.debug("attachment_filenames: {}", attachment_filenames) # noinspection PyTypeChecker for f in attachment_filenames: part = email.mime.base.MIMEBase("application", "octet-stream") part.set_payload(open(f, "rb").read()) email.encoders.encode_base64(part) part.add_header( 'Content-Disposition', 'attachment; filename="%s"' % os.path.basename(f) ) msg.attach(part) if attachment_binaries: # ----------------------------------------------------------------- # Binary attachments, which have a notional filename # ----------------------------------------------------------------- if verbose: log.debug("attachment_binary_filenames: {}", attachment_binary_filenames) for i in range(len(attachment_binaries)): blob = attachment_binaries[i] filename = attachment_binary_filenames[i] part = email.mime.base.MIMEBase("application", "octet-stream") part.set_payload(blob) email.encoders.encode_base64(part) part.add_header( 'Content-Disposition', 'attachment; filename="%s"' % filename) msg.attach(part) except Exception as e: raise ValueError("send_email: Failed to attach files: {}".format(e)) return msg
[ "def", "make_email", "(", "from_addr", ":", "str", ",", "date", ":", "str", "=", "None", ",", "sender", ":", "str", "=", "\"\"", ",", "reply_to", ":", "Union", "[", "str", ",", "List", "[", "str", "]", "]", "=", "\"\"", ",", "to", ":", "Union", "[", "str", ",", "List", "[", "str", "]", "]", "=", "\"\"", ",", "cc", ":", "Union", "[", "str", ",", "List", "[", "str", "]", "]", "=", "\"\"", ",", "bcc", ":", "Union", "[", "str", ",", "List", "[", "str", "]", "]", "=", "\"\"", ",", "subject", ":", "str", "=", "\"\"", ",", "body", ":", "str", "=", "\"\"", ",", "content_type", ":", "str", "=", "CONTENT_TYPE_TEXT", ",", "charset", ":", "str", "=", "\"utf8\"", ",", "attachment_filenames", ":", "Sequence", "[", "str", "]", "=", "None", ",", "attachment_binaries", ":", "Sequence", "[", "bytes", "]", "=", "None", ",", "attachment_binary_filenames", ":", "Sequence", "[", "str", "]", "=", "None", ",", "verbose", ":", "bool", "=", "False", ")", "->", "email", ".", "mime", ".", "multipart", ".", "MIMEMultipart", ":", "def", "_csv_list_to_list", "(", "x", ":", "str", ")", "->", "List", "[", "str", "]", ":", "stripped", "=", "[", "item", ".", "strip", "(", ")", "for", "item", "in", "x", ".", "split", "(", "COMMA", ")", "]", "return", "[", "item", "for", "item", "in", "stripped", "if", "item", "]", "def", "_assert_nocomma", "(", "x", ":", "Union", "[", "str", ",", "List", "[", "str", "]", "]", ")", "->", "None", ":", "if", "isinstance", "(", "x", ",", "str", ")", ":", "x", "=", "[", "x", "]", "for", "_addr", "in", "x", ":", "assert", "COMMA", "not", "in", "_addr", ",", "(", "\"Commas not allowed in e-mail addresses: {!r}\"", ".", "format", "(", "_addr", ")", ")", "# -------------------------------------------------------------------------", "# Arguments", "# -------------------------------------------------------------------------", "if", "not", "date", ":", "date", "=", "email", ".", "utils", ".", "formatdate", "(", "localtime", "=", "True", ")", "assert", "isinstance", "(", "from_addr", ",", "str", ")", ",", "(", "\"'From:' can only be a single address \"", "\"(for Python sendmail, not RFC 2822); was {!r}\"", ".", "format", "(", "from_addr", ")", ")", "_assert_nocomma", "(", "from_addr", ")", "assert", "isinstance", "(", "sender", ",", "str", ")", ",", "(", "\"'Sender:' can only be a single address; was {!r}\"", ".", "format", "(", "sender", ")", ")", "_assert_nocomma", "(", "sender", ")", "if", "isinstance", "(", "reply_to", ",", "str", ")", ":", "reply_to", "=", "[", "reply_to", "]", "if", "reply_to", "else", "[", "]", "# type: List[str]", "_assert_nocomma", "(", "reply_to", ")", "if", "isinstance", "(", "to", ",", "str", ")", ":", "to", "=", "_csv_list_to_list", "(", "to", ")", "if", "isinstance", "(", "cc", ",", "str", ")", ":", "cc", "=", "_csv_list_to_list", "(", "cc", ")", "if", "isinstance", "(", "bcc", ",", "str", ")", ":", "bcc", "=", "_csv_list_to_list", "(", "bcc", ")", "assert", "to", "or", "cc", "or", "bcc", ",", "\"No recipients (must have some of: To, Cc, Bcc)\"", "_assert_nocomma", "(", "to", ")", "_assert_nocomma", "(", "cc", ")", "_assert_nocomma", "(", "bcc", ")", "attachment_filenames", "=", "attachment_filenames", "or", "[", "]", "# type: List[str]", "assert", "all", "(", "attachment_filenames", ")", ",", "(", "\"Missing attachment filenames: {!r}\"", ".", "format", "(", "attachment_filenames", ")", ")", "attachment_binaries", "=", "attachment_binaries", "or", "[", "]", "# type: List[bytes]", "attachment_binary_filenames", "=", "attachment_binary_filenames", "or", "[", "]", "# type: List[str] # noqa", "assert", "len", "(", "attachment_binaries", ")", "==", "len", "(", "attachment_binary_filenames", ")", ",", "(", "\"If you specify attachment_binaries or attachment_binary_filenames, \"", "\"they must be iterables of the same length.\"", ")", "assert", "all", "(", "attachment_binary_filenames", ")", ",", "(", "\"Missing filenames for attached binaries: {!r}\"", ".", "format", "(", "attachment_binary_filenames", ")", ")", "# -------------------------------------------------------------------------", "# Make message", "# -------------------------------------------------------------------------", "msg", "=", "email", ".", "mime", ".", "multipart", ".", "MIMEMultipart", "(", ")", "# Headers: mandatory", "msg", "[", "\"From\"", "]", "=", "from_addr", "msg", "[", "\"Date\"", "]", "=", "date", "msg", "[", "\"Subject\"", "]", "=", "subject", "# Headers: optional", "if", "sender", ":", "msg", "[", "\"Sender\"", "]", "=", "sender", "# Single only, not a list", "if", "reply_to", ":", "msg", "[", "\"Reply-To\"", "]", "=", "COMMASPACE", ".", "join", "(", "reply_to", ")", "if", "to", ":", "msg", "[", "\"To\"", "]", "=", "COMMASPACE", ".", "join", "(", "to", ")", "if", "cc", ":", "msg", "[", "\"Cc\"", "]", "=", "COMMASPACE", ".", "join", "(", "cc", ")", "if", "bcc", ":", "msg", "[", "\"Bcc\"", "]", "=", "COMMASPACE", ".", "join", "(", "bcc", ")", "# Body", "if", "content_type", "==", "CONTENT_TYPE_TEXT", ":", "msgbody", "=", "email", ".", "mime", ".", "text", ".", "MIMEText", "(", "body", ",", "\"plain\"", ",", "charset", ")", "elif", "content_type", "==", "CONTENT_TYPE_HTML", ":", "msgbody", "=", "email", ".", "mime", ".", "text", ".", "MIMEText", "(", "body", ",", "\"html\"", ",", "charset", ")", "else", ":", "raise", "ValueError", "(", "\"unknown content_type\"", ")", "msg", ".", "attach", "(", "msgbody", ")", "# Attachments", "# noinspection PyPep8,PyBroadException", "try", ":", "if", "attachment_filenames", ":", "# -----------------------------------------------------------------", "# Attach things by filename", "# -----------------------------------------------------------------", "if", "verbose", ":", "log", ".", "debug", "(", "\"attachment_filenames: {}\"", ",", "attachment_filenames", ")", "# noinspection PyTypeChecker", "for", "f", "in", "attachment_filenames", ":", "part", "=", "email", ".", "mime", ".", "base", ".", "MIMEBase", "(", "\"application\"", ",", "\"octet-stream\"", ")", "part", ".", "set_payload", "(", "open", "(", "f", ",", "\"rb\"", ")", ".", "read", "(", ")", ")", "email", ".", "encoders", ".", "encode_base64", "(", "part", ")", "part", ".", "add_header", "(", "'Content-Disposition'", ",", "'attachment; filename=\"%s\"'", "%", "os", ".", "path", ".", "basename", "(", "f", ")", ")", "msg", ".", "attach", "(", "part", ")", "if", "attachment_binaries", ":", "# -----------------------------------------------------------------", "# Binary attachments, which have a notional filename", "# -----------------------------------------------------------------", "if", "verbose", ":", "log", ".", "debug", "(", "\"attachment_binary_filenames: {}\"", ",", "attachment_binary_filenames", ")", "for", "i", "in", "range", "(", "len", "(", "attachment_binaries", ")", ")", ":", "blob", "=", "attachment_binaries", "[", "i", "]", "filename", "=", "attachment_binary_filenames", "[", "i", "]", "part", "=", "email", ".", "mime", ".", "base", ".", "MIMEBase", "(", "\"application\"", ",", "\"octet-stream\"", ")", "part", ".", "set_payload", "(", "blob", ")", "email", ".", "encoders", ".", "encode_base64", "(", "part", ")", "part", ".", "add_header", "(", "'Content-Disposition'", ",", "'attachment; filename=\"%s\"'", "%", "filename", ")", "msg", ".", "attach", "(", "part", ")", "except", "Exception", "as", "e", ":", "raise", "ValueError", "(", "\"send_email: Failed to attach files: {}\"", ".", "format", "(", "e", ")", ")", "return", "msg" ]
Makes an e-mail message. Arguments that can be multiple e-mail addresses are (a) a single e-mail address as a string, or (b) a list of strings (each a single e-mail address), or (c) a comma-separated list of multiple e-mail addresses. Args: from_addr: name of the sender for the "From:" field date: e-mail date in RFC 2822 format, or ``None`` for "now" sender: name of the sender for the "Sender:" field reply_to: name of the sender for the "Reply-To:" field to: e-mail address(es) of the recipients for "To:" field cc: e-mail address(es) of the recipients for "Cc:" field bcc: e-mail address(es) of the recipients for "Bcc:" field subject: e-mail subject body: e-mail body content_type: MIME type for body content, default ``text/plain`` charset: character set for body; default ``utf8`` attachment_filenames: filenames of attachments to add attachment_binaries: binary objects to add as attachments attachment_binary_filenames: filenames corresponding to ``attachment_binaries`` verbose: be verbose? Returns: a :class:`email.mime.multipart.MIMEMultipart` Raises: :exc:`AssertionError`, :exc:`ValueError`
[ "Makes", "an", "e", "-", "mail", "message", "." ]
python
train
39.554286
diffeo/py-nilsimsa
nilsimsa/deprecated/_deprecated_nilsimsa.py
https://github.com/diffeo/py-nilsimsa/blob/c652f4bbfd836f7aebf292dcea676cc925ec315a/nilsimsa/deprecated/_deprecated_nilsimsa.py#L174-L182
def from_file(self, filename): """Update running digest with content of named file.""" f = open(filename, 'rb') while True: data = f.read(10480) if not data: break self.update(data) f.close()
[ "def", "from_file", "(", "self", ",", "filename", ")", ":", "f", "=", "open", "(", "filename", ",", "'rb'", ")", "while", "True", ":", "data", "=", "f", ".", "read", "(", "10480", ")", "if", "not", "data", ":", "break", "self", ".", "update", "(", "data", ")", "f", ".", "close", "(", ")" ]
Update running digest with content of named file.
[ "Update", "running", "digest", "with", "content", "of", "named", "file", "." ]
python
train
29.666667
apple/turicreate
src/unity/python/turicreate/data_structures/sgraph.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/data_structures/sgraph.py#L1153-L1221
def load_sgraph(filename, format='binary', delimiter='auto'): """ Load SGraph from text file or previously saved SGraph binary. Parameters ---------- filename : string Location of the file. Can be a local path or a remote URL. format : {'binary', 'snap', 'csv', 'tsv'}, optional Format to of the file to load. - 'binary': native graph format obtained from `SGraph.save`. - 'snap': tab or space separated edge list format with comments, used in the `Stanford Network Analysis Platform <http://snap.stanford.edu/snap/>`_. - 'csv': comma-separated edge list without header or comments. - 'tsv': tab-separated edge list without header or comments. delimiter : str, optional Specifying the Delimiter used in 'snap', 'csv' or 'tsv' format. Those format has default delimiter, but sometimes it is useful to overwrite the default delimiter. Returns ------- out : SGraph Loaded SGraph. See Also -------- SGraph, SGraph.save Examples -------- >>> g = turicreate.SGraph().add_vertices([turicreate.Vertex(i) for i in range(5)]) Save and load in binary format. >>> g.save('mygraph') >>> g2 = turicreate.load_sgraph('mygraph') """ if not format in ['binary', 'snap', 'csv', 'tsv']: raise ValueError('Invalid format: %s' % format) with cython_context(): g = None if format is 'binary': proxy = glconnect.get_unity().load_graph(_make_internal_url(filename)) g = SGraph(_proxy=proxy) elif format is 'snap': if delimiter == 'auto': delimiter = '\t' sf = SFrame.read_csv(filename, comment_char='#', delimiter=delimiter, header=False, column_type_hints=int) g = SGraph().add_edges(sf, 'X1', 'X2') elif format is 'csv': if delimiter == 'auto': delimiter = ',' sf = SFrame.read_csv(filename, header=False, delimiter=delimiter) g = SGraph().add_edges(sf, 'X1', 'X2') elif format is 'tsv': if delimiter == 'auto': delimiter = '\t' sf = SFrame.read_csv(filename, header=False, delimiter=delimiter) g = SGraph().add_edges(sf, 'X1', 'X2') g.summary() # materialize return g
[ "def", "load_sgraph", "(", "filename", ",", "format", "=", "'binary'", ",", "delimiter", "=", "'auto'", ")", ":", "if", "not", "format", "in", "[", "'binary'", ",", "'snap'", ",", "'csv'", ",", "'tsv'", "]", ":", "raise", "ValueError", "(", "'Invalid format: %s'", "%", "format", ")", "with", "cython_context", "(", ")", ":", "g", "=", "None", "if", "format", "is", "'binary'", ":", "proxy", "=", "glconnect", ".", "get_unity", "(", ")", ".", "load_graph", "(", "_make_internal_url", "(", "filename", ")", ")", "g", "=", "SGraph", "(", "_proxy", "=", "proxy", ")", "elif", "format", "is", "'snap'", ":", "if", "delimiter", "==", "'auto'", ":", "delimiter", "=", "'\\t'", "sf", "=", "SFrame", ".", "read_csv", "(", "filename", ",", "comment_char", "=", "'#'", ",", "delimiter", "=", "delimiter", ",", "header", "=", "False", ",", "column_type_hints", "=", "int", ")", "g", "=", "SGraph", "(", ")", ".", "add_edges", "(", "sf", ",", "'X1'", ",", "'X2'", ")", "elif", "format", "is", "'csv'", ":", "if", "delimiter", "==", "'auto'", ":", "delimiter", "=", "','", "sf", "=", "SFrame", ".", "read_csv", "(", "filename", ",", "header", "=", "False", ",", "delimiter", "=", "delimiter", ")", "g", "=", "SGraph", "(", ")", ".", "add_edges", "(", "sf", ",", "'X1'", ",", "'X2'", ")", "elif", "format", "is", "'tsv'", ":", "if", "delimiter", "==", "'auto'", ":", "delimiter", "=", "'\\t'", "sf", "=", "SFrame", ".", "read_csv", "(", "filename", ",", "header", "=", "False", ",", "delimiter", "=", "delimiter", ")", "g", "=", "SGraph", "(", ")", ".", "add_edges", "(", "sf", ",", "'X1'", ",", "'X2'", ")", "g", ".", "summary", "(", ")", "# materialize", "return", "g" ]
Load SGraph from text file or previously saved SGraph binary. Parameters ---------- filename : string Location of the file. Can be a local path or a remote URL. format : {'binary', 'snap', 'csv', 'tsv'}, optional Format to of the file to load. - 'binary': native graph format obtained from `SGraph.save`. - 'snap': tab or space separated edge list format with comments, used in the `Stanford Network Analysis Platform <http://snap.stanford.edu/snap/>`_. - 'csv': comma-separated edge list without header or comments. - 'tsv': tab-separated edge list without header or comments. delimiter : str, optional Specifying the Delimiter used in 'snap', 'csv' or 'tsv' format. Those format has default delimiter, but sometimes it is useful to overwrite the default delimiter. Returns ------- out : SGraph Loaded SGraph. See Also -------- SGraph, SGraph.save Examples -------- >>> g = turicreate.SGraph().add_vertices([turicreate.Vertex(i) for i in range(5)]) Save and load in binary format. >>> g.save('mygraph') >>> g2 = turicreate.load_sgraph('mygraph')
[ "Load", "SGraph", "from", "text", "file", "or", "previously", "saved", "SGraph", "binary", "." ]
python
train
34
pandas-dev/pandas
pandas/io/json/json.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/json/json.py#L558-L580
def _get_object_parser(self, json): """ Parses a json document into a pandas object. """ typ = self.typ dtype = self.dtype kwargs = { "orient": self.orient, "dtype": self.dtype, "convert_axes": self.convert_axes, "convert_dates": self.convert_dates, "keep_default_dates": self.keep_default_dates, "numpy": self.numpy, "precise_float": self.precise_float, "date_unit": self.date_unit } obj = None if typ == 'frame': obj = FrameParser(json, **kwargs).parse() if typ == 'series' or obj is None: if not isinstance(dtype, bool): kwargs['dtype'] = dtype obj = SeriesParser(json, **kwargs).parse() return obj
[ "def", "_get_object_parser", "(", "self", ",", "json", ")", ":", "typ", "=", "self", ".", "typ", "dtype", "=", "self", ".", "dtype", "kwargs", "=", "{", "\"orient\"", ":", "self", ".", "orient", ",", "\"dtype\"", ":", "self", ".", "dtype", ",", "\"convert_axes\"", ":", "self", ".", "convert_axes", ",", "\"convert_dates\"", ":", "self", ".", "convert_dates", ",", "\"keep_default_dates\"", ":", "self", ".", "keep_default_dates", ",", "\"numpy\"", ":", "self", ".", "numpy", ",", "\"precise_float\"", ":", "self", ".", "precise_float", ",", "\"date_unit\"", ":", "self", ".", "date_unit", "}", "obj", "=", "None", "if", "typ", "==", "'frame'", ":", "obj", "=", "FrameParser", "(", "json", ",", "*", "*", "kwargs", ")", ".", "parse", "(", ")", "if", "typ", "==", "'series'", "or", "obj", "is", "None", ":", "if", "not", "isinstance", "(", "dtype", ",", "bool", ")", ":", "kwargs", "[", "'dtype'", "]", "=", "dtype", "obj", "=", "SeriesParser", "(", "json", ",", "*", "*", "kwargs", ")", ".", "parse", "(", ")", "return", "obj" ]
Parses a json document into a pandas object.
[ "Parses", "a", "json", "document", "into", "a", "pandas", "object", "." ]
python
train
33.956522
cytoscape/py2cytoscape
py2cytoscape/cyrest/networks.py
https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/cyrest/networks.py#L518-L531
def updateColumnName(self, networkId, tableType, body, verbose=None): """ Renames an existing column in the table specified by the `tableType` and `networkId` parameters. :param networkId: SUID of the network containing the table :param tableType: Table Type :param body: Old and new column name :param verbose: print more :returns: default: successful operation """ response=api(url=self.___url+'networks/'+str(networkId)+'/tables/'+str(tableType)+'/columns', method="PUT", body=body, verbose=verbose) return response
[ "def", "updateColumnName", "(", "self", ",", "networkId", ",", "tableType", ",", "body", ",", "verbose", "=", "None", ")", ":", "response", "=", "api", "(", "url", "=", "self", ".", "___url", "+", "'networks/'", "+", "str", "(", "networkId", ")", "+", "'/tables/'", "+", "str", "(", "tableType", ")", "+", "'/columns'", ",", "method", "=", "\"PUT\"", ",", "body", "=", "body", ",", "verbose", "=", "verbose", ")", "return", "response" ]
Renames an existing column in the table specified by the `tableType` and `networkId` parameters. :param networkId: SUID of the network containing the table :param tableType: Table Type :param body: Old and new column name :param verbose: print more :returns: default: successful operation
[ "Renames", "an", "existing", "column", "in", "the", "table", "specified", "by", "the", "tableType", "and", "networkId", "parameters", "." ]
python
train
42
lpantano/seqcluster
seqcluster/seqbuster/__init__.py
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/seqbuster/__init__.py#L376-L389
def _mirtop(out_files, hairpin, gff3, species, out): """ Convert miraligner to mirtop format """ args = argparse.Namespace() args.hairpin = hairpin args.sps = species args.gtf = gff3 args.add_extra = True args.files = out_files args.format = "seqbuster" args.out_format = "gff" args.out = out reader(args)
[ "def", "_mirtop", "(", "out_files", ",", "hairpin", ",", "gff3", ",", "species", ",", "out", ")", ":", "args", "=", "argparse", ".", "Namespace", "(", ")", "args", ".", "hairpin", "=", "hairpin", "args", ".", "sps", "=", "species", "args", ".", "gtf", "=", "gff3", "args", ".", "add_extra", "=", "True", "args", ".", "files", "=", "out_files", "args", ".", "format", "=", "\"seqbuster\"", "args", ".", "out_format", "=", "\"gff\"", "args", ".", "out", "=", "out", "reader", "(", "args", ")" ]
Convert miraligner to mirtop format
[ "Convert", "miraligner", "to", "mirtop", "format" ]
python
train
24.571429
bethgelab/foolbox
foolbox/attacks/precomputed.py
https://github.com/bethgelab/foolbox/blob/8ab54248c70e45d8580a7d9ee44c9c0fb5755c4a/foolbox/attacks/precomputed.py#L30-L42
def _get_output(self, a, image): """ Looks up the precomputed adversarial image for a given image. """ sd = np.square(self._input_images - image) mses = np.mean(sd, axis=tuple(range(1, sd.ndim))) index = np.argmin(mses) # if we run into numerical problems with this approach, we might # need to add a very tiny threshold here if mses[index] > 0: raise ValueError('No precomputed output image for this image') return self._output_images[index]
[ "def", "_get_output", "(", "self", ",", "a", ",", "image", ")", ":", "sd", "=", "np", ".", "square", "(", "self", ".", "_input_images", "-", "image", ")", "mses", "=", "np", ".", "mean", "(", "sd", ",", "axis", "=", "tuple", "(", "range", "(", "1", ",", "sd", ".", "ndim", ")", ")", ")", "index", "=", "np", ".", "argmin", "(", "mses", ")", "# if we run into numerical problems with this approach, we might", "# need to add a very tiny threshold here", "if", "mses", "[", "index", "]", ">", "0", ":", "raise", "ValueError", "(", "'No precomputed output image for this image'", ")", "return", "self", ".", "_output_images", "[", "index", "]" ]
Looks up the precomputed adversarial image for a given image.
[ "Looks", "up", "the", "precomputed", "adversarial", "image", "for", "a", "given", "image", "." ]
python
valid
39.692308
luckydonald/pytgbot
code_generation/output/pytgbot/api_types/receivable/media.py
https://github.com/luckydonald/pytgbot/blob/67f4b5a1510d4583d40b5477e876b1ef0eb8971b/code_generation/output/pytgbot/api_types/receivable/media.py#L1899-L1914
def to_array(self): """ Serializes this File to a dictionary. :return: dictionary representation of this object. :rtype: dict """ array = super(File, self).to_array() array['file_id'] = u(self.file_id) # py2: type unicode, py3: type str if self.file_size is not None: array['file_size'] = int(self.file_size) # type int if self.file_path is not None: array['file_path'] = u(self.file_path) # py2: type unicode, py3: type str return array
[ "def", "to_array", "(", "self", ")", ":", "array", "=", "super", "(", "File", ",", "self", ")", ".", "to_array", "(", ")", "array", "[", "'file_id'", "]", "=", "u", "(", "self", ".", "file_id", ")", "# py2: type unicode, py3: type str", "if", "self", ".", "file_size", "is", "not", "None", ":", "array", "[", "'file_size'", "]", "=", "int", "(", "self", ".", "file_size", ")", "# type int", "if", "self", ".", "file_path", "is", "not", "None", ":", "array", "[", "'file_path'", "]", "=", "u", "(", "self", ".", "file_path", ")", "# py2: type unicode, py3: type str", "return", "array" ]
Serializes this File to a dictionary. :return: dictionary representation of this object. :rtype: dict
[ "Serializes", "this", "File", "to", "a", "dictionary", "." ]
python
train
33.25
shreyaspotnis/rampage
rampage/widgets/KeyFrameWidgets.py
https://github.com/shreyaspotnis/rampage/blob/e2565aef7ee16ee06523de975e8aa41aca14e3b2/rampage/widgets/KeyFrameWidgets.py#L472-L475
def updateAllKeys(self): """Update times for all keys in the layout.""" for kf, key in zip(self.kf_list, self.sorted_key_list()): kf.update(key, self.dct[key])
[ "def", "updateAllKeys", "(", "self", ")", ":", "for", "kf", ",", "key", "in", "zip", "(", "self", ".", "kf_list", ",", "self", ".", "sorted_key_list", "(", ")", ")", ":", "kf", ".", "update", "(", "key", ",", "self", ".", "dct", "[", "key", "]", ")" ]
Update times for all keys in the layout.
[ "Update", "times", "for", "all", "keys", "in", "the", "layout", "." ]
python
train
46
pypa/setuptools
setuptools/config.py
https://github.com/pypa/setuptools/blob/83c667e0b2a98193851c07115d1af65011ed0fb6/setuptools/config.py#L427-L439
def _deprecated_config_handler(self, func, msg, warning_class): """ this function will wrap around parameters that are deprecated :param msg: deprecation message :param warning_class: class of warning exception to be raised :param func: function to be wrapped around """ @wraps(func) def config_handler(*args, **kwargs): warnings.warn(msg, warning_class) return func(*args, **kwargs) return config_handler
[ "def", "_deprecated_config_handler", "(", "self", ",", "func", ",", "msg", ",", "warning_class", ")", ":", "@", "wraps", "(", "func", ")", "def", "config_handler", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "warnings", ".", "warn", "(", "msg", ",", "warning_class", ")", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "config_handler" ]
this function will wrap around parameters that are deprecated :param msg: deprecation message :param warning_class: class of warning exception to be raised :param func: function to be wrapped around
[ "this", "function", "will", "wrap", "around", "parameters", "that", "are", "deprecated" ]
python
train
37.153846
QUANTAXIS/QUANTAXIS
QUANTAXIS/QAFetch/QAQuery.py
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAFetch/QAQuery.py#L174-L211
def QA_fetch_stock_basic_info_tushare(collections=DATABASE.stock_info_tushare): ''' purpose: tushare 股票列表数据库 code,代码 name,名称 industry,所属行业 area,地区 pe,市盈率 outstanding,流通股本(亿) totals,总股本(亿) totalAssets,总资产(万) liquidAssets,流动资产 fixedAssets,固定资产 reserved,公积金 reservedPerShare,每股公积金 esp,每股收益 bvps,每股净资 pb,市净率 timeToMarket,上市日期 undp,未分利润 perundp, 每股未分配 rev,收入同比(%) profit,利润同比(%) gpr,毛利率(%) npr,净利润率(%) holders,股东人数 add by tauruswang, :param collections: stock_info_tushare 集合 :return: ''' '获取股票基本信息' items = [item for item in collections.find()] # 🛠todo 转变成 dataframe 类型数据 return items
[ "def", "QA_fetch_stock_basic_info_tushare", "(", "collections", "=", "DATABASE", ".", "stock_info_tushare", ")", ":", "'获取股票基本信息'", "items", "=", "[", "item", "for", "item", "in", "collections", ".", "find", "(", ")", "]", "# 🛠todo 转变成 dataframe 类型数据", "return", "items" ]
purpose: tushare 股票列表数据库 code,代码 name,名称 industry,所属行业 area,地区 pe,市盈率 outstanding,流通股本(亿) totals,总股本(亿) totalAssets,总资产(万) liquidAssets,流动资产 fixedAssets,固定资产 reserved,公积金 reservedPerShare,每股公积金 esp,每股收益 bvps,每股净资 pb,市净率 timeToMarket,上市日期 undp,未分利润 perundp, 每股未分配 rev,收入同比(%) profit,利润同比(%) gpr,毛利率(%) npr,净利润率(%) holders,股东人数 add by tauruswang, :param collections: stock_info_tushare 集合 :return:
[ "purpose", ":", "tushare", "股票列表数据库" ]
python
train
20.605263
tech-pi/doufo
src/python/doufo/function.py
https://github.com/tech-pi/doufo/blob/3d375fef30670597768a6eef809b75b4b1b5a3fd/src/python/doufo/function.py#L388-L395
def tagfunc(nargs=None, ndefs=None, nouts=None): """ decorate of tagged function """ def wrapper(f): return wraps(f)(FunctionWithTag(f, nargs=nargs, nouts=nouts, ndefs=ndefs)) return wrapper
[ "def", "tagfunc", "(", "nargs", "=", "None", ",", "ndefs", "=", "None", ",", "nouts", "=", "None", ")", ":", "def", "wrapper", "(", "f", ")", ":", "return", "wraps", "(", "f", ")", "(", "FunctionWithTag", "(", "f", ",", "nargs", "=", "nargs", ",", "nouts", "=", "nouts", ",", "ndefs", "=", "ndefs", ")", ")", "return", "wrapper" ]
decorate of tagged function
[ "decorate", "of", "tagged", "function" ]
python
train
27
googleapis/google-cloud-python
core/google/cloud/_helpers.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/core/google/cloud/_helpers.py#L149-L170
def _ensure_tuple_or_list(arg_name, tuple_or_list): """Ensures an input is a tuple or list. This effectively reduces the iterable types allowed to a very short whitelist: list and tuple. :type arg_name: str :param arg_name: Name of argument to use in error message. :type tuple_or_list: sequence of str :param tuple_or_list: Sequence to be verified. :rtype: list of str :returns: The ``tuple_or_list`` passed in cast to a ``list``. :raises TypeError: if the ``tuple_or_list`` is not a tuple or list. """ if not isinstance(tuple_or_list, (tuple, list)): raise TypeError( "Expected %s to be a tuple or list. " "Received %r" % (arg_name, tuple_or_list) ) return list(tuple_or_list)
[ "def", "_ensure_tuple_or_list", "(", "arg_name", ",", "tuple_or_list", ")", ":", "if", "not", "isinstance", "(", "tuple_or_list", ",", "(", "tuple", ",", "list", ")", ")", ":", "raise", "TypeError", "(", "\"Expected %s to be a tuple or list. \"", "\"Received %r\"", "%", "(", "arg_name", ",", "tuple_or_list", ")", ")", "return", "list", "(", "tuple_or_list", ")" ]
Ensures an input is a tuple or list. This effectively reduces the iterable types allowed to a very short whitelist: list and tuple. :type arg_name: str :param arg_name: Name of argument to use in error message. :type tuple_or_list: sequence of str :param tuple_or_list: Sequence to be verified. :rtype: list of str :returns: The ``tuple_or_list`` passed in cast to a ``list``. :raises TypeError: if the ``tuple_or_list`` is not a tuple or list.
[ "Ensures", "an", "input", "is", "a", "tuple", "or", "list", "." ]
python
train
34.227273
GreenBuildingRegistry/yaml-config
yamlconf/config.py
https://github.com/GreenBuildingRegistry/yaml-config/blob/3d4bf4cadd07d4c3b71674077bd7cf16efb6ea10/yamlconf/config.py#L142-L147
def keys(self, section=None): """Provide dict like keys method""" if not section and self.section: section = self.section config = self.config.get(section, {}) if section else self.config return config.keys()
[ "def", "keys", "(", "self", ",", "section", "=", "None", ")", ":", "if", "not", "section", "and", "self", ".", "section", ":", "section", "=", "self", ".", "section", "config", "=", "self", ".", "config", ".", "get", "(", "section", ",", "{", "}", ")", "if", "section", "else", "self", ".", "config", "return", "config", ".", "keys", "(", ")" ]
Provide dict like keys method
[ "Provide", "dict", "like", "keys", "method" ]
python
train
41.166667
angr/angr
angr/simos/javavm.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/simos/javavm.py#L161-L180
def state_entry(self, args=None, **kwargs): # pylint: disable=arguments-differ """ Create an entry state. :param args: List of SootArgument values (optional). """ state = self.state_blank(**kwargs) # for the Java main method `public static main(String[] args)`, # we add symbolic cmdline arguments if not args and state.addr.method.name == 'main' and \ state.addr.method.params[0] == 'java.lang.String[]': cmd_line_args = SimSootExpr_NewArray.new_array(state, "java.lang.String", BVS('argc', 32)) cmd_line_args.add_default_value_generator(self.generate_symbolic_cmd_line_arg) args = [SootArgument(cmd_line_args, "java.lang.String[]")] # for referencing the Java array, we need to know the array reference # => saves it in the globals dict state.globals['cmd_line_args'] = cmd_line_args # setup arguments SimEngineSoot.setup_arguments(state, args) return state
[ "def", "state_entry", "(", "self", ",", "args", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# pylint: disable=arguments-differ", "state", "=", "self", ".", "state_blank", "(", "*", "*", "kwargs", ")", "# for the Java main method `public static main(String[] args)`,", "# we add symbolic cmdline arguments", "if", "not", "args", "and", "state", ".", "addr", ".", "method", ".", "name", "==", "'main'", "and", "state", ".", "addr", ".", "method", ".", "params", "[", "0", "]", "==", "'java.lang.String[]'", ":", "cmd_line_args", "=", "SimSootExpr_NewArray", ".", "new_array", "(", "state", ",", "\"java.lang.String\"", ",", "BVS", "(", "'argc'", ",", "32", ")", ")", "cmd_line_args", ".", "add_default_value_generator", "(", "self", ".", "generate_symbolic_cmd_line_arg", ")", "args", "=", "[", "SootArgument", "(", "cmd_line_args", ",", "\"java.lang.String[]\"", ")", "]", "# for referencing the Java array, we need to know the array reference", "# => saves it in the globals dict", "state", ".", "globals", "[", "'cmd_line_args'", "]", "=", "cmd_line_args", "# setup arguments", "SimEngineSoot", ".", "setup_arguments", "(", "state", ",", "args", ")", "return", "state" ]
Create an entry state. :param args: List of SootArgument values (optional).
[ "Create", "an", "entry", "state", "." ]
python
train
51.25
euske/pdfminer
pdfminer/utils.py
https://github.com/euske/pdfminer/blob/8150458718e9024c80b00e74965510b20206e588/pdfminer/utils.py#L169-L183
def nunpack(s, default=0): """Unpacks 1 to 4 byte integers (big endian).""" l = len(s) if not l: return default elif l == 1: return ord(s) elif l == 2: return struct.unpack('>H', s)[0] elif l == 3: return struct.unpack('>L', b'\x00'+s)[0] elif l == 4: return struct.unpack('>L', s)[0] else: raise TypeError('invalid length: %d' % l)
[ "def", "nunpack", "(", "s", ",", "default", "=", "0", ")", ":", "l", "=", "len", "(", "s", ")", "if", "not", "l", ":", "return", "default", "elif", "l", "==", "1", ":", "return", "ord", "(", "s", ")", "elif", "l", "==", "2", ":", "return", "struct", ".", "unpack", "(", "'>H'", ",", "s", ")", "[", "0", "]", "elif", "l", "==", "3", ":", "return", "struct", ".", "unpack", "(", "'>L'", ",", "b'\\x00'", "+", "s", ")", "[", "0", "]", "elif", "l", "==", "4", ":", "return", "struct", ".", "unpack", "(", "'>L'", ",", "s", ")", "[", "0", "]", "else", ":", "raise", "TypeError", "(", "'invalid length: %d'", "%", "l", ")" ]
Unpacks 1 to 4 byte integers (big endian).
[ "Unpacks", "1", "to", "4", "byte", "integers", "(", "big", "endian", ")", "." ]
python
train
26.533333
python-visualization/folium
folium/folium.py
https://github.com/python-visualization/folium/blob/8595240517135d1637ca4cf7cc624045f1d911b3/folium/folium.py#L408-L420
def choropleth(self, *args, **kwargs): """Call the Choropleth class with the same arguments. This method may be deleted after a year from now (Nov 2018). """ warnings.warn( 'The choropleth method has been deprecated. Instead use the new ' 'Choropleth class, which has the same arguments. See the example ' 'notebook \'GeoJSON_and_choropleth\' for how to do this.', FutureWarning ) from folium.features import Choropleth self.add_child(Choropleth(*args, **kwargs))
[ "def", "choropleth", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "warnings", ".", "warn", "(", "'The choropleth method has been deprecated. Instead use the new '", "'Choropleth class, which has the same arguments. See the example '", "'notebook \\'GeoJSON_and_choropleth\\' for how to do this.'", ",", "FutureWarning", ")", "from", "folium", ".", "features", "import", "Choropleth", "self", ".", "add_child", "(", "Choropleth", "(", "*", "args", ",", "*", "*", "kwargs", ")", ")" ]
Call the Choropleth class with the same arguments. This method may be deleted after a year from now (Nov 2018).
[ "Call", "the", "Choropleth", "class", "with", "the", "same", "arguments", "." ]
python
train
42.846154
ahtn/python-easyhid
easyhid/easyhid.py
https://github.com/ahtn/python-easyhid/blob/b89a60e5b378495b34c51ef11c5260bb43885780/easyhid/easyhid.py#L329-L350
def description(self): """ Get a string describing the HID descriptor. """ return \ """HIDDevice: {} | {:x}:{:x} | {} | {} | {} release_number: {} usage_page: {} usage: {} interface_number: {}\ """.format(self.path, self.vendor_id, self.product_id, self.manufacturer_string, self.product_string, self.serial_number, self.release_number, self.usage_page, self.usage, self.interface_number )
[ "def", "description", "(", "self", ")", ":", "return", "\"\"\"HIDDevice:\n {} | {:x}:{:x} | {} | {} | {}\n release_number: {}\n usage_page: {}\n usage: {}\n interface_number: {}\\\n\"\"\"", ".", "format", "(", "self", ".", "path", ",", "self", ".", "vendor_id", ",", "self", ".", "product_id", ",", "self", ".", "manufacturer_string", ",", "self", ".", "product_string", ",", "self", ".", "serial_number", ",", "self", ".", "release_number", ",", "self", ".", "usage_page", ",", "self", ".", "usage", ",", "self", ".", "interface_number", ")" ]
Get a string describing the HID descriptor.
[ "Get", "a", "string", "describing", "the", "HID", "descriptor", "." ]
python
train
23.954545
asweigart/PyMsgBox
pymsgbox/__init__.py
https://github.com/asweigart/PyMsgBox/blob/c94325d21c08690dd89ebf9ebf1cf1b6ed54a1da/pymsgbox/__init__.py#L197-L226
def __put_buttons_in_buttonframe(choices): """Put the buttons in the buttons frame""" global __widgetTexts, __firstWidget, buttonsFrame __firstWidget = None __widgetTexts = {} i = 0 for buttonText in choices: tempButton = tk.Button(buttonsFrame, takefocus=1, text=buttonText) _bindArrows(tempButton) tempButton.pack(expand=tk.YES, side=tk.LEFT, padx='1m', pady='1m', ipadx='2m', ipady='1m') # remember the text associated with this widget __widgetTexts[tempButton] = buttonText # remember the first widget, so we can put the focus there if i == 0: __firstWidget = tempButton i = 1 # for the commandButton, bind activation events to the activation event handler commandButton = tempButton handler = __buttonEvent for selectionEvent in STANDARD_SELECTION_EVENTS: commandButton.bind('<%s>' % selectionEvent, handler) if CANCEL_TEXT in choices: commandButton.bind('<Escape>', __cancelButtonEvent)
[ "def", "__put_buttons_in_buttonframe", "(", "choices", ")", ":", "global", "__widgetTexts", ",", "__firstWidget", ",", "buttonsFrame", "__firstWidget", "=", "None", "__widgetTexts", "=", "{", "}", "i", "=", "0", "for", "buttonText", "in", "choices", ":", "tempButton", "=", "tk", ".", "Button", "(", "buttonsFrame", ",", "takefocus", "=", "1", ",", "text", "=", "buttonText", ")", "_bindArrows", "(", "tempButton", ")", "tempButton", ".", "pack", "(", "expand", "=", "tk", ".", "YES", ",", "side", "=", "tk", ".", "LEFT", ",", "padx", "=", "'1m'", ",", "pady", "=", "'1m'", ",", "ipadx", "=", "'2m'", ",", "ipady", "=", "'1m'", ")", "# remember the text associated with this widget", "__widgetTexts", "[", "tempButton", "]", "=", "buttonText", "# remember the first widget, so we can put the focus there", "if", "i", "==", "0", ":", "__firstWidget", "=", "tempButton", "i", "=", "1", "# for the commandButton, bind activation events to the activation event handler", "commandButton", "=", "tempButton", "handler", "=", "__buttonEvent", "for", "selectionEvent", "in", "STANDARD_SELECTION_EVENTS", ":", "commandButton", ".", "bind", "(", "'<%s>'", "%", "selectionEvent", ",", "handler", ")", "if", "CANCEL_TEXT", "in", "choices", ":", "commandButton", ".", "bind", "(", "'<Escape>'", ",", "__cancelButtonEvent", ")" ]
Put the buttons in the buttons frame
[ "Put", "the", "buttons", "in", "the", "buttons", "frame" ]
python
train
34.633333
PGower/PyCanvas
pycanvas/apis/files.py
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/files.py#L498-L516
def delete_file(self, id): """ Delete file. Remove the specified file curl -XDELETE 'https://<canvas>/api/v1/files/<file_id>' \ -H 'Authorization: Bearer <token>' """ path = {} data = {} params = {} # REQUIRED - PATH - id """ID""" path["id"] = id self.logger.debug("DELETE /api/v1/files/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("DELETE", "/api/v1/files/{id}".format(**path), data=data, params=params, no_data=True)
[ "def", "delete_file", "(", "self", ",", "id", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - id\r", "\"\"\"ID\"\"\"", "path", "[", "\"id\"", "]", "=", "id", "self", ".", "logger", ".", "debug", "(", "\"DELETE /api/v1/files/{id} with query params: {params} and form data: {data}\"", ".", "format", "(", "params", "=", "params", ",", "data", "=", "data", ",", "*", "*", "path", ")", ")", "return", "self", ".", "generic_request", "(", "\"DELETE\"", ",", "\"/api/v1/files/{id}\"", ".", "format", "(", "*", "*", "path", ")", ",", "data", "=", "data", ",", "params", "=", "params", ",", "no_data", "=", "True", ")" ]
Delete file. Remove the specified file curl -XDELETE 'https://<canvas>/api/v1/files/<file_id>' \ -H 'Authorization: Bearer <token>'
[ "Delete", "file", ".", "Remove", "the", "specified", "file", "curl", "-", "XDELETE", "https", ":", "//", "<canvas", ">", "/", "api", "/", "v1", "/", "files", "/", "<file_id", ">", "\\", "-", "H", "Authorization", ":", "Bearer", "<token", ">" ]
python
train
33.263158
watson-developer-cloud/python-sdk
ibm_watson/assistant_v1.py
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/assistant_v1.py#L5040-L5047
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'entities') and self.entities is not None: _dict['entities'] = [x._to_dict() for x in self.entities] if hasattr(self, 'pagination') and self.pagination is not None: _dict['pagination'] = self.pagination._to_dict() return _dict
[ "def", "_to_dict", "(", "self", ")", ":", "_dict", "=", "{", "}", "if", "hasattr", "(", "self", ",", "'entities'", ")", "and", "self", ".", "entities", "is", "not", "None", ":", "_dict", "[", "'entities'", "]", "=", "[", "x", ".", "_to_dict", "(", ")", "for", "x", "in", "self", ".", "entities", "]", "if", "hasattr", "(", "self", ",", "'pagination'", ")", "and", "self", ".", "pagination", "is", "not", "None", ":", "_dict", "[", "'pagination'", "]", "=", "self", ".", "pagination", ".", "_to_dict", "(", ")", "return", "_dict" ]
Return a json dictionary representing this model.
[ "Return", "a", "json", "dictionary", "representing", "this", "model", "." ]
python
train
48.375
marvin-ai/marvin-python-toolbox
marvin_python_toolbox/common/http_client.py
https://github.com/marvin-ai/marvin-python-toolbox/blob/7c95cb2f9698b989150ab94c1285f3a9eaaba423/marvin_python_toolbox/common/http_client.py#L111-L115
def post(self, path, data=None): """Encapsulates POST requests""" data = data or {} response = requests.post(self.url(path), data=to_json(data), headers=self.request_header()) return self.parse_response(response)
[ "def", "post", "(", "self", ",", "path", ",", "data", "=", "None", ")", ":", "data", "=", "data", "or", "{", "}", "response", "=", "requests", ".", "post", "(", "self", ".", "url", "(", "path", ")", ",", "data", "=", "to_json", "(", "data", ")", ",", "headers", "=", "self", ".", "request_header", "(", ")", ")", "return", "self", ".", "parse_response", "(", "response", ")" ]
Encapsulates POST requests
[ "Encapsulates", "POST", "requests" ]
python
train
48
rigetti/grove
grove/qft/fourier.py
https://github.com/rigetti/grove/blob/dc6bf6ec63e8c435fe52b1e00f707d5ce4cdb9b3/grove/qft/fourier.py#L37-L57
def _core_qft(qubits: List[int], coeff: int) -> Program: """ Generates the core program to perform the quantum Fourier transform :param qubits: A list of qubit indexes. :param coeff: A modifier for the angle used in rotations (-1 for inverse QFT, 1 for QFT) :return: A Quil program to compute the core (inverse) QFT of the qubits. """ q = qubits[0] qs = qubits[1:] if 1 == len(qubits): return [H(q)] else: n = 1 + len(qs) cR = [] for idx, i in enumerate(range(n - 1, 0, -1)): q_idx = qs[idx] angle = math.pi / 2 ** (n - i) cR.append(CPHASE(coeff * angle, q, q_idx)) return _core_qft(qs, coeff) + list(reversed(cR)) + [H(q)]
[ "def", "_core_qft", "(", "qubits", ":", "List", "[", "int", "]", ",", "coeff", ":", "int", ")", "->", "Program", ":", "q", "=", "qubits", "[", "0", "]", "qs", "=", "qubits", "[", "1", ":", "]", "if", "1", "==", "len", "(", "qubits", ")", ":", "return", "[", "H", "(", "q", ")", "]", "else", ":", "n", "=", "1", "+", "len", "(", "qs", ")", "cR", "=", "[", "]", "for", "idx", ",", "i", "in", "enumerate", "(", "range", "(", "n", "-", "1", ",", "0", ",", "-", "1", ")", ")", ":", "q_idx", "=", "qs", "[", "idx", "]", "angle", "=", "math", ".", "pi", "/", "2", "**", "(", "n", "-", "i", ")", "cR", ".", "append", "(", "CPHASE", "(", "coeff", "*", "angle", ",", "q", ",", "q_idx", ")", ")", "return", "_core_qft", "(", "qs", ",", "coeff", ")", "+", "list", "(", "reversed", "(", "cR", ")", ")", "+", "[", "H", "(", "q", ")", "]" ]
Generates the core program to perform the quantum Fourier transform :param qubits: A list of qubit indexes. :param coeff: A modifier for the angle used in rotations (-1 for inverse QFT, 1 for QFT) :return: A Quil program to compute the core (inverse) QFT of the qubits.
[ "Generates", "the", "core", "program", "to", "perform", "the", "quantum", "Fourier", "transform", ":", "param", "qubits", ":", "A", "list", "of", "qubit", "indexes", ".", ":", "param", "coeff", ":", "A", "modifier", "for", "the", "angle", "used", "in", "rotations", "(", "-", "1", "for", "inverse", "QFT", "1", "for", "QFT", ")", ":", "return", ":", "A", "Quil", "program", "to", "compute", "the", "core", "(", "inverse", ")", "QFT", "of", "the", "qubits", "." ]
python
train
34.714286
peri-source/peri
peri/states.py
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/states.py#L273-L288
def _grad_one_param(self, funct, p, dl=2e-5, rts=False, nout=1, **kwargs): """ Gradient of `func` wrt a single parameter `p`. (see _graddoc) """ vals = self.get_values(p) f0 = funct(**kwargs) self.update(p, vals+dl) f1 = funct(**kwargs) if rts: self.update(p, vals) if nout == 1: return (f1 - f0) / dl else: return [(f1[i] - f0[i]) / dl for i in range(nout)]
[ "def", "_grad_one_param", "(", "self", ",", "funct", ",", "p", ",", "dl", "=", "2e-5", ",", "rts", "=", "False", ",", "nout", "=", "1", ",", "*", "*", "kwargs", ")", ":", "vals", "=", "self", ".", "get_values", "(", "p", ")", "f0", "=", "funct", "(", "*", "*", "kwargs", ")", "self", ".", "update", "(", "p", ",", "vals", "+", "dl", ")", "f1", "=", "funct", "(", "*", "*", "kwargs", ")", "if", "rts", ":", "self", ".", "update", "(", "p", ",", "vals", ")", "if", "nout", "==", "1", ":", "return", "(", "f1", "-", "f0", ")", "/", "dl", "else", ":", "return", "[", "(", "f1", "[", "i", "]", "-", "f0", "[", "i", "]", ")", "/", "dl", "for", "i", "in", "range", "(", "nout", ")", "]" ]
Gradient of `func` wrt a single parameter `p`. (see _graddoc)
[ "Gradient", "of", "func", "wrt", "a", "single", "parameter", "p", ".", "(", "see", "_graddoc", ")" ]
python
valid
28.8125
bram85/topydo
topydo/ui/columns/TodoListWidget.py
https://github.com/bram85/topydo/blob/b59fcfca5361869a6b78d4c9808c7c6cd0a18b58/topydo/ui/columns/TodoListWidget.py#L233-L254
def _execute_on_selected(self, p_cmd_str, p_execute_signal): """ Executes command specified by p_cmd_str on selected todo item. p_cmd_str should be a string with one replacement field ('{}') which will be substituted by id of the selected todo item. p_execute_signal is the signal name passed to the main loop. It should be one of 'execute_command' or 'execute_command_silent'. """ try: todo = self.listbox.focus.todo todo_id = str(self.view.todolist.number(todo)) urwid.emit_signal(self, p_execute_signal, p_cmd_str, todo_id) # force screen redraw after editing if p_cmd_str.startswith('edit'): urwid.emit_signal(self, 'refresh') except AttributeError: # No todo item selected pass
[ "def", "_execute_on_selected", "(", "self", ",", "p_cmd_str", ",", "p_execute_signal", ")", ":", "try", ":", "todo", "=", "self", ".", "listbox", ".", "focus", ".", "todo", "todo_id", "=", "str", "(", "self", ".", "view", ".", "todolist", ".", "number", "(", "todo", ")", ")", "urwid", ".", "emit_signal", "(", "self", ",", "p_execute_signal", ",", "p_cmd_str", ",", "todo_id", ")", "# force screen redraw after editing", "if", "p_cmd_str", ".", "startswith", "(", "'edit'", ")", ":", "urwid", ".", "emit_signal", "(", "self", ",", "'refresh'", ")", "except", "AttributeError", ":", "# No todo item selected", "pass" ]
Executes command specified by p_cmd_str on selected todo item. p_cmd_str should be a string with one replacement field ('{}') which will be substituted by id of the selected todo item. p_execute_signal is the signal name passed to the main loop. It should be one of 'execute_command' or 'execute_command_silent'.
[ "Executes", "command", "specified", "by", "p_cmd_str", "on", "selected", "todo", "item", "." ]
python
train
38.045455
pandas-dev/pandas
pandas/io/excel/_util.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/excel/_util.py#L89-L119
def _range2cols(areas): """ Convert comma separated list of column names and ranges to indices. Parameters ---------- areas : str A string containing a sequence of column ranges (or areas). Returns ------- cols : list A list of 0-based column indices. Examples -------- >>> _range2cols('A:E') [0, 1, 2, 3, 4] >>> _range2cols('A,C,Z:AB') [0, 2, 25, 26, 27] """ cols = [] for rng in areas.split(","): if ":" in rng: rng = rng.split(":") cols.extend(lrange(_excel2num(rng[0]), _excel2num(rng[1]) + 1)) else: cols.append(_excel2num(rng)) return cols
[ "def", "_range2cols", "(", "areas", ")", ":", "cols", "=", "[", "]", "for", "rng", "in", "areas", ".", "split", "(", "\",\"", ")", ":", "if", "\":\"", "in", "rng", ":", "rng", "=", "rng", ".", "split", "(", "\":\"", ")", "cols", ".", "extend", "(", "lrange", "(", "_excel2num", "(", "rng", "[", "0", "]", ")", ",", "_excel2num", "(", "rng", "[", "1", "]", ")", "+", "1", ")", ")", "else", ":", "cols", ".", "append", "(", "_excel2num", "(", "rng", ")", ")", "return", "cols" ]
Convert comma separated list of column names and ranges to indices. Parameters ---------- areas : str A string containing a sequence of column ranges (or areas). Returns ------- cols : list A list of 0-based column indices. Examples -------- >>> _range2cols('A:E') [0, 1, 2, 3, 4] >>> _range2cols('A,C,Z:AB') [0, 2, 25, 26, 27]
[ "Convert", "comma", "separated", "list", "of", "column", "names", "and", "ranges", "to", "indices", "." ]
python
train
21.290323
steffann/pylisp
pylisp/packet/lisp/control/map_register.py
https://github.com/steffann/pylisp/blob/907340f0c7ef2c4d4fe0c8e0a48df5be0d969407/pylisp/packet/lisp/control/map_register.py#L274-L322
def to_bytes(self): ''' Create bytes from properties ''' # Verify that properties make sense self.sanitize() # Start with the type bitstream = BitArray('uint:4=%d' % self.message_type) # Add the flags bitstream += BitArray('bool=%d' % self.proxy_map_reply) # Add reserved bits bitstream += self._reserved1 # Decide on the has_xtr_site_id value has_xtr_site_id = bool(self.xtr_id or self.site_id or self.for_rtr) bitstream += BitArray('bool=%d, bool=%d' % (has_xtr_site_id, self.for_rtr)) # Add reserved bits bitstream += self._reserved2 # Add the rest of the flags bitstream += BitArray('bool=%d' % self.want_map_notify) # Add record count bitstream += BitArray('uint:8=%d' % len(self.records)) # Add the nonce bitstream += BitArray(bytes=self.nonce) # Add the key-id and authentication data bitstream += BitArray('uint:16=%d, uint:16=%d, hex=%s' % (self.key_id, len(self.authentication_data), self.authentication_data.encode('hex'))) # Add the map-reply records for record in self.records: bitstream += record.to_bitstream() # Add xTR-ID and site-ID if we said we would if has_xtr_site_id: bitstream += BitArray('uint:128=%d, uint:64=%d' % (self.xtr_id, self.site_id)) return bitstream.bytes
[ "def", "to_bytes", "(", "self", ")", ":", "# Verify that properties make sense", "self", ".", "sanitize", "(", ")", "# Start with the type", "bitstream", "=", "BitArray", "(", "'uint:4=%d'", "%", "self", ".", "message_type", ")", "# Add the flags", "bitstream", "+=", "BitArray", "(", "'bool=%d'", "%", "self", ".", "proxy_map_reply", ")", "# Add reserved bits", "bitstream", "+=", "self", ".", "_reserved1", "# Decide on the has_xtr_site_id value", "has_xtr_site_id", "=", "bool", "(", "self", ".", "xtr_id", "or", "self", ".", "site_id", "or", "self", ".", "for_rtr", ")", "bitstream", "+=", "BitArray", "(", "'bool=%d, bool=%d'", "%", "(", "has_xtr_site_id", ",", "self", ".", "for_rtr", ")", ")", "# Add reserved bits", "bitstream", "+=", "self", ".", "_reserved2", "# Add the rest of the flags", "bitstream", "+=", "BitArray", "(", "'bool=%d'", "%", "self", ".", "want_map_notify", ")", "# Add record count", "bitstream", "+=", "BitArray", "(", "'uint:8=%d'", "%", "len", "(", "self", ".", "records", ")", ")", "# Add the nonce", "bitstream", "+=", "BitArray", "(", "bytes", "=", "self", ".", "nonce", ")", "# Add the key-id and authentication data", "bitstream", "+=", "BitArray", "(", "'uint:16=%d, uint:16=%d, hex=%s'", "%", "(", "self", ".", "key_id", ",", "len", "(", "self", ".", "authentication_data", ")", ",", "self", ".", "authentication_data", ".", "encode", "(", "'hex'", ")", ")", ")", "# Add the map-reply records", "for", "record", "in", "self", ".", "records", ":", "bitstream", "+=", "record", ".", "to_bitstream", "(", ")", "# Add xTR-ID and site-ID if we said we would", "if", "has_xtr_site_id", ":", "bitstream", "+=", "BitArray", "(", "'uint:128=%d, uint:64=%d'", "%", "(", "self", ".", "xtr_id", ",", "self", ".", "site_id", ")", ")", "return", "bitstream", ".", "bytes" ]
Create bytes from properties
[ "Create", "bytes", "from", "properties" ]
python
train
33.122449
Autodesk/cryptorito
cryptorito/__init__.py
https://github.com/Autodesk/cryptorito/blob/277fc7cc42c31c5bc37e26d8bf5a2ac746a6ea85/cryptorito/__init__.py#L345-L359
def import_gpg_key(key): """Imports a GPG key""" if not key: raise CryptoritoError('Invalid GPG Key') key_fd, key_filename = mkstemp("cryptorito-gpg-import") key_handle = os.fdopen(key_fd, 'w') key_handle.write(polite_string(key)) key_handle.close() cmd = flatten([gnupg_bin(), gnupg_home(), "--import", key_filename]) output = stderr_output(cmd) msg = 'gpg: Total number processed: 1' output_bits = polite_string(output).split('\n') return len([line for line in output_bits if line == msg]) == 1
[ "def", "import_gpg_key", "(", "key", ")", ":", "if", "not", "key", ":", "raise", "CryptoritoError", "(", "'Invalid GPG Key'", ")", "key_fd", ",", "key_filename", "=", "mkstemp", "(", "\"cryptorito-gpg-import\"", ")", "key_handle", "=", "os", ".", "fdopen", "(", "key_fd", ",", "'w'", ")", "key_handle", ".", "write", "(", "polite_string", "(", "key", ")", ")", "key_handle", ".", "close", "(", ")", "cmd", "=", "flatten", "(", "[", "gnupg_bin", "(", ")", ",", "gnupg_home", "(", ")", ",", "\"--import\"", ",", "key_filename", "]", ")", "output", "=", "stderr_output", "(", "cmd", ")", "msg", "=", "'gpg: Total number processed: 1'", "output_bits", "=", "polite_string", "(", "output", ")", ".", "split", "(", "'\\n'", ")", "return", "len", "(", "[", "line", "for", "line", "in", "output_bits", "if", "line", "==", "msg", "]", ")", "==", "1" ]
Imports a GPG key
[ "Imports", "a", "GPG", "key" ]
python
train
35.733333
moralrecordings/mrcrowbar
mrcrowbar/utils.py
https://github.com/moralrecordings/mrcrowbar/blob/b1ed882c4555552e7656b2d84aca543184577fa3/mrcrowbar/utils.py#L450-L476
def pixdump( source, start=None, end=None, length=None, width=64, height=None, palette=None ): """Print the contents of a byte string as a 256 colour image. source The byte string to print. start Start offset to read from (default: start) end End offset to stop reading at (default: end) length Length to read in (optional replacement for end) width Width of image to render in pixels (default: 64) height Height of image to render in pixels (default: auto) palette List of Colours to use (default: test palette) """ for line in pixdump_iter( source, start, end, length, width, height, palette ): print( line )
[ "def", "pixdump", "(", "source", ",", "start", "=", "None", ",", "end", "=", "None", ",", "length", "=", "None", ",", "width", "=", "64", ",", "height", "=", "None", ",", "palette", "=", "None", ")", ":", "for", "line", "in", "pixdump_iter", "(", "source", ",", "start", ",", "end", ",", "length", ",", "width", ",", "height", ",", "palette", ")", ":", "print", "(", "line", ")" ]
Print the contents of a byte string as a 256 colour image. source The byte string to print. start Start offset to read from (default: start) end End offset to stop reading at (default: end) length Length to read in (optional replacement for end) width Width of image to render in pixels (default: 64) height Height of image to render in pixels (default: auto) palette List of Colours to use (default: test palette)
[ "Print", "the", "contents", "of", "a", "byte", "string", "as", "a", "256", "colour", "image", "." ]
python
train
25.777778
projectshift/shift-schema
shiftschema/validators/email.py
https://github.com/projectshift/shift-schema/blob/07787b540d3369bb37217ffbfbe629118edaf0eb/shiftschema/validators/email.py#L26-L43
def validate(self, value, model=None, context=None): """ Validate Perform value validation and return result :param value: value to check :param model: parent model being validated :param context: object or None, validation context :return: shiftschema.results.SimpleResult """ regex = self.regex() match = regex.match(value) if not match: return Error(self.not_email) # success otherwise return Error()
[ "def", "validate", "(", "self", ",", "value", ",", "model", "=", "None", ",", "context", "=", "None", ")", ":", "regex", "=", "self", ".", "regex", "(", ")", "match", "=", "regex", ".", "match", "(", "value", ")", "if", "not", "match", ":", "return", "Error", "(", "self", ".", "not_email", ")", "# success otherwise", "return", "Error", "(", ")" ]
Validate Perform value validation and return result :param value: value to check :param model: parent model being validated :param context: object or None, validation context :return: shiftschema.results.SimpleResult
[ "Validate", "Perform", "value", "validation", "and", "return", "result" ]
python
train
30.444444
linode/linode_api4-python
linode_api4/linode_client.py
https://github.com/linode/linode_api4-python/blob/1dd7318d2aed014c746d48c7957464c57af883ca/linode_api4/linode_client.py#L560-L569
def transfer(self): """ Returns a MappedObject containing the account's transfer pool data """ result = self.client.get('/account/transfer') if not 'used' in result: raise UnexpectedResponseError('Unexpected response when getting Transfer Pool!') return MappedObject(**result)
[ "def", "transfer", "(", "self", ")", ":", "result", "=", "self", ".", "client", ".", "get", "(", "'/account/transfer'", ")", "if", "not", "'used'", "in", "result", ":", "raise", "UnexpectedResponseError", "(", "'Unexpected response when getting Transfer Pool!'", ")", "return", "MappedObject", "(", "*", "*", "result", ")" ]
Returns a MappedObject containing the account's transfer pool data
[ "Returns", "a", "MappedObject", "containing", "the", "account", "s", "transfer", "pool", "data" ]
python
train
32.9
bukun/TorCMS
torcms/handlers/wiki_handler.py
https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/wiki_handler.py#L105-L130
def update(self, uid): ''' Update the wiki. ''' postinfo = MWiki.get_by_uid(uid) if self.check_post_role()['EDIT'] or postinfo.user_name == self.get_current_user(): pass else: return False post_data = self.get_post_data() post_data['user_name'] = self.userinfo.user_name cnt_old = tornado.escape.xhtml_unescape(postinfo.cnt_md).strip() cnt_new = post_data['cnt_md'].strip() if cnt_old == cnt_new: pass else: MWikiHist.create_wiki_history(postinfo) MWiki.update(uid, post_data) # cele_gen_whoosh.delay() tornado.ioloop.IOLoop.instance().add_callback(self.cele_gen_whoosh) self.redirect('/wiki/{0}'.format(tornado.escape.url_escape(post_data['title'])))
[ "def", "update", "(", "self", ",", "uid", ")", ":", "postinfo", "=", "MWiki", ".", "get_by_uid", "(", "uid", ")", "if", "self", ".", "check_post_role", "(", ")", "[", "'EDIT'", "]", "or", "postinfo", ".", "user_name", "==", "self", ".", "get_current_user", "(", ")", ":", "pass", "else", ":", "return", "False", "post_data", "=", "self", ".", "get_post_data", "(", ")", "post_data", "[", "'user_name'", "]", "=", "self", ".", "userinfo", ".", "user_name", "cnt_old", "=", "tornado", ".", "escape", ".", "xhtml_unescape", "(", "postinfo", ".", "cnt_md", ")", ".", "strip", "(", ")", "cnt_new", "=", "post_data", "[", "'cnt_md'", "]", ".", "strip", "(", ")", "if", "cnt_old", "==", "cnt_new", ":", "pass", "else", ":", "MWikiHist", ".", "create_wiki_history", "(", "postinfo", ")", "MWiki", ".", "update", "(", "uid", ",", "post_data", ")", "# cele_gen_whoosh.delay()", "tornado", ".", "ioloop", ".", "IOLoop", ".", "instance", "(", ")", ".", "add_callback", "(", "self", ".", "cele_gen_whoosh", ")", "self", ".", "redirect", "(", "'/wiki/{0}'", ".", "format", "(", "tornado", ".", "escape", ".", "url_escape", "(", "post_data", "[", "'title'", "]", ")", ")", ")" ]
Update the wiki.
[ "Update", "the", "wiki", "." ]
python
train
31.038462
onelogin/python3-saml
src/onelogin/saml2/response.py
https://github.com/onelogin/python3-saml/blob/064b7275fba1e5f39a9116ba1cdcc5d01fc34daa/src/onelogin/saml2/response.py#L521-L535
def get_session_index(self): """ Gets the SessionIndex from the AuthnStatement Could be used to be stored in the local session in order to be used in a future Logout Request that the SP could send to the SP, to set what specific session must be deleted :returns: The SessionIndex value :rtype: string|None """ session_index = None authn_statement_nodes = self.__query_assertion('/saml:AuthnStatement[@SessionIndex]') if authn_statement_nodes: session_index = authn_statement_nodes[0].get('SessionIndex') return session_index
[ "def", "get_session_index", "(", "self", ")", ":", "session_index", "=", "None", "authn_statement_nodes", "=", "self", ".", "__query_assertion", "(", "'/saml:AuthnStatement[@SessionIndex]'", ")", "if", "authn_statement_nodes", ":", "session_index", "=", "authn_statement_nodes", "[", "0", "]", ".", "get", "(", "'SessionIndex'", ")", "return", "session_index" ]
Gets the SessionIndex from the AuthnStatement Could be used to be stored in the local session in order to be used in a future Logout Request that the SP could send to the SP, to set what specific session must be deleted :returns: The SessionIndex value :rtype: string|None
[ "Gets", "the", "SessionIndex", "from", "the", "AuthnStatement", "Could", "be", "used", "to", "be", "stored", "in", "the", "local", "session", "in", "order", "to", "be", "used", "in", "a", "future", "Logout", "Request", "that", "the", "SP", "could", "send", "to", "the", "SP", "to", "set", "what", "specific", "session", "must", "be", "deleted" ]
python
train
41.266667
hozn/coilmq
coilmq/protocol/__init__.py
https://github.com/hozn/coilmq/blob/76b7fcf347144b3a5746423a228bed121dc564b5/coilmq/protocol/__init__.py#L218-L230
def abort(self, frame): """ Handles ABORT command: Rolls back specified transaction. """ if not frame.transaction: raise ProtocolError("Missing transaction for ABORT command.") if not frame.transaction in self.engine.transactions: raise ProtocolError("Invalid transaction: %s" % frame.transaction) self.engine.queue_manager.resend_transaction_frames( self.engine.connection, frame.transaction) del self.engine.transactions[frame.transaction]
[ "def", "abort", "(", "self", ",", "frame", ")", ":", "if", "not", "frame", ".", "transaction", ":", "raise", "ProtocolError", "(", "\"Missing transaction for ABORT command.\"", ")", "if", "not", "frame", ".", "transaction", "in", "self", ".", "engine", ".", "transactions", ":", "raise", "ProtocolError", "(", "\"Invalid transaction: %s\"", "%", "frame", ".", "transaction", ")", "self", ".", "engine", ".", "queue_manager", ".", "resend_transaction_frames", "(", "self", ".", "engine", ".", "connection", ",", "frame", ".", "transaction", ")", "del", "self", ".", "engine", ".", "transactions", "[", "frame", ".", "transaction", "]" ]
Handles ABORT command: Rolls back specified transaction.
[ "Handles", "ABORT", "command", ":", "Rolls", "back", "specified", "transaction", "." ]
python
train
40.230769
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/packaging/__init__.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/packaging/__init__.py#L277-L305
def stripinstallbuilder(target, source, env): """ Strips the install builder action from the source list and stores the final installation location as the "PACKAGING_INSTALL_LOCATION" of the source of the source file. This effectively removes the final installed files from the source list while remembering the installation location. It also warns about files which have no install builder attached. """ def has_no_install_location(file): return not (file.has_builder() and\ hasattr(file.builder, 'name') and\ (file.builder.name=="InstallBuilder" or\ file.builder.name=="InstallAsBuilder")) if len([src for src in source if has_no_install_location(src)]): warn(Warning, "there are files to package which have no\ InstallBuilder attached, this might lead to irreproducible packages") n_source=[] for s in source: if has_no_install_location(s): n_source.append(s) else: for ss in s.sources: n_source.append(ss) copy_attr(s, ss) ss.Tag('PACKAGING_INSTALL_LOCATION', s.get_path()) return (target, n_source)
[ "def", "stripinstallbuilder", "(", "target", ",", "source", ",", "env", ")", ":", "def", "has_no_install_location", "(", "file", ")", ":", "return", "not", "(", "file", ".", "has_builder", "(", ")", "and", "hasattr", "(", "file", ".", "builder", ",", "'name'", ")", "and", "(", "file", ".", "builder", ".", "name", "==", "\"InstallBuilder\"", "or", "file", ".", "builder", ".", "name", "==", "\"InstallAsBuilder\"", ")", ")", "if", "len", "(", "[", "src", "for", "src", "in", "source", "if", "has_no_install_location", "(", "src", ")", "]", ")", ":", "warn", "(", "Warning", ",", "\"there are files to package which have no\\\n InstallBuilder attached, this might lead to irreproducible packages\"", ")", "n_source", "=", "[", "]", "for", "s", "in", "source", ":", "if", "has_no_install_location", "(", "s", ")", ":", "n_source", ".", "append", "(", "s", ")", "else", ":", "for", "ss", "in", "s", ".", "sources", ":", "n_source", ".", "append", "(", "ss", ")", "copy_attr", "(", "s", ",", "ss", ")", "ss", ".", "Tag", "(", "'PACKAGING_INSTALL_LOCATION'", ",", "s", ".", "get_path", "(", ")", ")", "return", "(", "target", ",", "n_source", ")" ]
Strips the install builder action from the source list and stores the final installation location as the "PACKAGING_INSTALL_LOCATION" of the source of the source file. This effectively removes the final installed files from the source list while remembering the installation location. It also warns about files which have no install builder attached.
[ "Strips", "the", "install", "builder", "action", "from", "the", "source", "list", "and", "stores", "the", "final", "installation", "location", "as", "the", "PACKAGING_INSTALL_LOCATION", "of", "the", "source", "of", "the", "source", "file", ".", "This", "effectively", "removes", "the", "final", "installed", "files", "from", "the", "source", "list", "while", "remembering", "the", "installation", "location", "." ]
python
train
40.413793
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py#L9279-L9298
def rc_channels_scaled_encode(self, time_boot_ms, port, chan1_scaled, chan2_scaled, chan3_scaled, chan4_scaled, chan5_scaled, chan6_scaled, chan7_scaled, chan8_scaled, rssi): ''' The scaled values of the RC channels received. (-100%) -10000, (0%) 0, (100%) 10000. Channels that are inactive should be set to UINT16_MAX. time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t) port : Servo output port (set of 8 outputs = 1 port). Most MAVs will just use one, but this allows for more than 8 servos. (uint8_t) chan1_scaled : RC channel 1 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t) chan2_scaled : RC channel 2 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t) chan3_scaled : RC channel 3 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t) chan4_scaled : RC channel 4 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t) chan5_scaled : RC channel 5 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t) chan6_scaled : RC channel 6 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t) chan7_scaled : RC channel 7 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t) chan8_scaled : RC channel 8 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t) rssi : Receive signal strength indicator, 0: 0%, 100: 100%, 255: invalid/unknown. (uint8_t) ''' return MAVLink_rc_channels_scaled_message(time_boot_ms, port, chan1_scaled, chan2_scaled, chan3_scaled, chan4_scaled, chan5_scaled, chan6_scaled, chan7_scaled, chan8_scaled, rssi)
[ "def", "rc_channels_scaled_encode", "(", "self", ",", "time_boot_ms", ",", "port", ",", "chan1_scaled", ",", "chan2_scaled", ",", "chan3_scaled", ",", "chan4_scaled", ",", "chan5_scaled", ",", "chan6_scaled", ",", "chan7_scaled", ",", "chan8_scaled", ",", "rssi", ")", ":", "return", "MAVLink_rc_channels_scaled_message", "(", "time_boot_ms", ",", "port", ",", "chan1_scaled", ",", "chan2_scaled", ",", "chan3_scaled", ",", "chan4_scaled", ",", "chan5_scaled", ",", "chan6_scaled", ",", "chan7_scaled", ",", "chan8_scaled", ",", "rssi", ")" ]
The scaled values of the RC channels received. (-100%) -10000, (0%) 0, (100%) 10000. Channels that are inactive should be set to UINT16_MAX. time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t) port : Servo output port (set of 8 outputs = 1 port). Most MAVs will just use one, but this allows for more than 8 servos. (uint8_t) chan1_scaled : RC channel 1 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t) chan2_scaled : RC channel 2 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t) chan3_scaled : RC channel 3 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t) chan4_scaled : RC channel 4 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t) chan5_scaled : RC channel 5 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t) chan6_scaled : RC channel 6 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t) chan7_scaled : RC channel 7 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t) chan8_scaled : RC channel 8 value scaled, (-100%) -10000, (0%) 0, (100%) 10000, (invalid) INT16_MAX. (int16_t) rssi : Receive signal strength indicator, 0: 0%, 100: 100%, 255: invalid/unknown. (uint8_t)
[ "The", "scaled", "values", "of", "the", "RC", "channels", "received", ".", "(", "-", "100%", ")", "-", "10000", "(", "0%", ")", "0", "(", "100%", ")", "10000", ".", "Channels", "that", "are", "inactive", "should", "be", "set", "to", "UINT16_MAX", "." ]
python
train
104.95
Alignak-monitoring/alignak
alignak/objects/config.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/config.py#L2566-L2576
def remove_templates(self): """Clean useless elements like templates because they are not needed anymore :return: None """ self.hosts.remove_templates() self.contacts.remove_templates() self.services.remove_templates() self.servicedependencies.remove_templates() self.hostdependencies.remove_templates() self.timeperiods.remove_templates()
[ "def", "remove_templates", "(", "self", ")", ":", "self", ".", "hosts", ".", "remove_templates", "(", ")", "self", ".", "contacts", ".", "remove_templates", "(", ")", "self", ".", "services", ".", "remove_templates", "(", ")", "self", ".", "servicedependencies", ".", "remove_templates", "(", ")", "self", ".", "hostdependencies", ".", "remove_templates", "(", ")", "self", ".", "timeperiods", ".", "remove_templates", "(", ")" ]
Clean useless elements like templates because they are not needed anymore :return: None
[ "Clean", "useless", "elements", "like", "templates", "because", "they", "are", "not", "needed", "anymore" ]
python
train
36.545455
eandersson/amqpstorm
amqpstorm/exchange.py
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/exchange.py#L77-L106
def bind(self, destination='', source='', routing_key='', arguments=None): """Bind an Exchange. :param str destination: Exchange name :param str source: Exchange to bind to :param str routing_key: The routing key to use :param dict arguments: Bind key/value arguments :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :rtype: dict """ if not compatibility.is_string(destination): raise AMQPInvalidArgument('destination should be a string') elif not compatibility.is_string(source): raise AMQPInvalidArgument('source should be a string') elif not compatibility.is_string(routing_key): raise AMQPInvalidArgument('routing_key should be a string') elif arguments is not None and not isinstance(arguments, dict): raise AMQPInvalidArgument('arguments should be a dict or None') bind_frame = pamqp_exchange.Bind(destination=destination, source=source, routing_key=routing_key, arguments=arguments) return self._channel.rpc_request(bind_frame)
[ "def", "bind", "(", "self", ",", "destination", "=", "''", ",", "source", "=", "''", ",", "routing_key", "=", "''", ",", "arguments", "=", "None", ")", ":", "if", "not", "compatibility", ".", "is_string", "(", "destination", ")", ":", "raise", "AMQPInvalidArgument", "(", "'destination should be a string'", ")", "elif", "not", "compatibility", ".", "is_string", "(", "source", ")", ":", "raise", "AMQPInvalidArgument", "(", "'source should be a string'", ")", "elif", "not", "compatibility", ".", "is_string", "(", "routing_key", ")", ":", "raise", "AMQPInvalidArgument", "(", "'routing_key should be a string'", ")", "elif", "arguments", "is", "not", "None", "and", "not", "isinstance", "(", "arguments", ",", "dict", ")", ":", "raise", "AMQPInvalidArgument", "(", "'arguments should be a dict or None'", ")", "bind_frame", "=", "pamqp_exchange", ".", "Bind", "(", "destination", "=", "destination", ",", "source", "=", "source", ",", "routing_key", "=", "routing_key", ",", "arguments", "=", "arguments", ")", "return", "self", ".", "_channel", ".", "rpc_request", "(", "bind_frame", ")" ]
Bind an Exchange. :param str destination: Exchange name :param str source: Exchange to bind to :param str routing_key: The routing key to use :param dict arguments: Bind key/value arguments :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :rtype: dict
[ "Bind", "an", "Exchange", "." ]
python
train
46.766667
bcbio/bcbio-nextgen
bcbio/variation/germline.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/germline.py#L19-L50
def split_somatic(items): """Split somatic batches, adding a germline target. Enables separate germline calling of samples using shared alignments. """ items = [_clean_flat_variantcaller(x) for x in items] somatic_groups, somatic, non_somatic = vcfutils.somatic_batches(items) # extract germline samples to run from normals in tumor/normal pairs germline_added = set([]) germline = [] for somatic_group in somatic_groups: paired = vcfutils.get_paired(somatic_group) if paired and paired.normal_data: cur = utils.deepish_copy(paired.normal_data) vc = dd.get_variantcaller(cur) if isinstance(vc, dict) and "germline" in vc: if cur["description"] not in germline_added: germline_added.add(cur["description"]) cur["rgnames"]["sample"] = cur["description"] cur["metadata"]["batch"] = "%s-germline" % cur["description"] cur["metadata"]["phenotype"] = "germline" cur = remove_align_qc_tools(cur) cur["config"]["algorithm"]["variantcaller"] = vc["germline"] germline.append(cur) # Fix variantcalling specification for only somatic targets somatic_out = [] for data in somatic: vc = dd.get_variantcaller(data) if isinstance(vc, dict) and "somatic" in vc: data["config"]["algorithm"]["variantcaller"] = vc["somatic"] somatic_out.append(data) return non_somatic + somatic_out + germline
[ "def", "split_somatic", "(", "items", ")", ":", "items", "=", "[", "_clean_flat_variantcaller", "(", "x", ")", "for", "x", "in", "items", "]", "somatic_groups", ",", "somatic", ",", "non_somatic", "=", "vcfutils", ".", "somatic_batches", "(", "items", ")", "# extract germline samples to run from normals in tumor/normal pairs", "germline_added", "=", "set", "(", "[", "]", ")", "germline", "=", "[", "]", "for", "somatic_group", "in", "somatic_groups", ":", "paired", "=", "vcfutils", ".", "get_paired", "(", "somatic_group", ")", "if", "paired", "and", "paired", ".", "normal_data", ":", "cur", "=", "utils", ".", "deepish_copy", "(", "paired", ".", "normal_data", ")", "vc", "=", "dd", ".", "get_variantcaller", "(", "cur", ")", "if", "isinstance", "(", "vc", ",", "dict", ")", "and", "\"germline\"", "in", "vc", ":", "if", "cur", "[", "\"description\"", "]", "not", "in", "germline_added", ":", "germline_added", ".", "add", "(", "cur", "[", "\"description\"", "]", ")", "cur", "[", "\"rgnames\"", "]", "[", "\"sample\"", "]", "=", "cur", "[", "\"description\"", "]", "cur", "[", "\"metadata\"", "]", "[", "\"batch\"", "]", "=", "\"%s-germline\"", "%", "cur", "[", "\"description\"", "]", "cur", "[", "\"metadata\"", "]", "[", "\"phenotype\"", "]", "=", "\"germline\"", "cur", "=", "remove_align_qc_tools", "(", "cur", ")", "cur", "[", "\"config\"", "]", "[", "\"algorithm\"", "]", "[", "\"variantcaller\"", "]", "=", "vc", "[", "\"germline\"", "]", "germline", ".", "append", "(", "cur", ")", "# Fix variantcalling specification for only somatic targets", "somatic_out", "=", "[", "]", "for", "data", "in", "somatic", ":", "vc", "=", "dd", ".", "get_variantcaller", "(", "data", ")", "if", "isinstance", "(", "vc", ",", "dict", ")", "and", "\"somatic\"", "in", "vc", ":", "data", "[", "\"config\"", "]", "[", "\"algorithm\"", "]", "[", "\"variantcaller\"", "]", "=", "vc", "[", "\"somatic\"", "]", "somatic_out", ".", "append", "(", "data", ")", "return", "non_somatic", "+", "somatic_out", "+", "germline" ]
Split somatic batches, adding a germline target. Enables separate germline calling of samples using shared alignments.
[ "Split", "somatic", "batches", "adding", "a", "germline", "target", "." ]
python
train
48.15625
MatterMiners/cobald
cobald/daemon/runners/service.py
https://github.com/MatterMiners/cobald/blob/264138de4382d1c9b53fabcbc6660e10b33a914d/cobald/daemon/runners/service.py#L97-L105
def execute(self, payload, *args, flavour: ModuleType, **kwargs): """ Synchronously run ``payload`` and provide its output If ``*args*`` and/or ``**kwargs`` are provided, pass them to ``payload`` upon execution. """ if args or kwargs: payload = functools.partial(payload, *args, **kwargs) return self._meta_runner.run_payload(payload, flavour=flavour)
[ "def", "execute", "(", "self", ",", "payload", ",", "*", "args", ",", "flavour", ":", "ModuleType", ",", "*", "*", "kwargs", ")", ":", "if", "args", "or", "kwargs", ":", "payload", "=", "functools", ".", "partial", "(", "payload", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "self", ".", "_meta_runner", ".", "run_payload", "(", "payload", ",", "flavour", "=", "flavour", ")" ]
Synchronously run ``payload`` and provide its output If ``*args*`` and/or ``**kwargs`` are provided, pass them to ``payload`` upon execution.
[ "Synchronously", "run", "payload", "and", "provide", "its", "output" ]
python
train
44.888889
cltl/KafNafParserPy
KafNafParserPy/opinion_data.py
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/opinion_data.py#L47-L54
def set_comment(self,c): """ Sets the comment for the element @type c: string @param c: comment for the element """ c = ' '+c.replace('-','').strip()+' ' self.node.insert(0,etree.Comment(c))
[ "def", "set_comment", "(", "self", ",", "c", ")", ":", "c", "=", "' '", "+", "c", ".", "replace", "(", "'-'", ",", "''", ")", ".", "strip", "(", ")", "+", "' '", "self", ".", "node", ".", "insert", "(", "0", ",", "etree", ".", "Comment", "(", "c", ")", ")" ]
Sets the comment for the element @type c: string @param c: comment for the element
[ "Sets", "the", "comment", "for", "the", "element" ]
python
train
29.875
Yelp/kafka-utils
kafka_utils/util/__init__.py
https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/__init__.py#L105-L107
def dict_merge(set1, set2): """Joins two dictionaries.""" return dict(list(set1.items()) + list(set2.items()))
[ "def", "dict_merge", "(", "set1", ",", "set2", ")", ":", "return", "dict", "(", "list", "(", "set1", ".", "items", "(", ")", ")", "+", "list", "(", "set2", ".", "items", "(", ")", ")", ")" ]
Joins two dictionaries.
[ "Joins", "two", "dictionaries", "." ]
python
train
38.666667
qweeze/wex-api-client
wex/client.py
https://github.com/qweeze/wex-api-client/blob/e84d139be229aab2c7c5eda5976b812be651807b/wex/client.py#L182-L201
def trans_history( self, from_=None, count=None, from_id=None, end_id=None, order=None, since=None, end=None ): """ Returns the history of transactions. To use this method you need a privilege of the info key. :param int or None from_: transaction ID, from which the display starts (default 0) :param int or None count: number of transaction to be displayed (default 1000) :param int or None from_id: transaction ID, from which the display starts (default 0) :param int or None end_id: transaction ID on which the display ends (default inf.) :param str or None order: sorting (default 'DESC') :param int or None since: the time to start the display (default 0) :param int or None end: the time to end the display (default inf.) """ return self._trade_api_call( 'TransHistory', from_=from_, count=count, from_id=from_id, end_id=end_id, order=order, since=since, end=end )
[ "def", "trans_history", "(", "self", ",", "from_", "=", "None", ",", "count", "=", "None", ",", "from_id", "=", "None", ",", "end_id", "=", "None", ",", "order", "=", "None", ",", "since", "=", "None", ",", "end", "=", "None", ")", ":", "return", "self", ".", "_trade_api_call", "(", "'TransHistory'", ",", "from_", "=", "from_", ",", "count", "=", "count", ",", "from_id", "=", "from_id", ",", "end_id", "=", "end_id", ",", "order", "=", "order", ",", "since", "=", "since", ",", "end", "=", "end", ")" ]
Returns the history of transactions. To use this method you need a privilege of the info key. :param int or None from_: transaction ID, from which the display starts (default 0) :param int or None count: number of transaction to be displayed (default 1000) :param int or None from_id: transaction ID, from which the display starts (default 0) :param int or None end_id: transaction ID on which the display ends (default inf.) :param str or None order: sorting (default 'DESC') :param int or None since: the time to start the display (default 0) :param int or None end: the time to end the display (default inf.)
[ "Returns", "the", "history", "of", "transactions", ".", "To", "use", "this", "method", "you", "need", "a", "privilege", "of", "the", "info", "key", "." ]
python
train
50
flo-compbio/genometools
genometools/gcloud/compute/instance.py
https://github.com/flo-compbio/genometools/blob/dd962bb26d60a0f14ca14d8c9a4dd75768962c7d/genometools/gcloud/compute/instance.py#L164-L209
def wait_for_instance_deletion(credentials, project, zone, instance_name, interval_seconds=5): """Wait until an instance is deleted. We require that initially, the specified instance exists. TODO: docstring """ t0 = time.time() access_token = credentials.get_access_token() headers = { 'Authorization': 'Bearer %s' % access_token.access_token } r = requests.get('https://www.googleapis.com/compute/v1/' 'projects/%s/zones/%s/instances/%s' % (project, zone, instance_name), headers=headers) if r.status_code == 404: raise AssertionError('Instance "%s" does not exist!' % instance_name) r.raise_for_status() _LOGGER.debug('Instance "%s" exists.', instance_name) while True: time.sleep(interval_seconds) access_token = credentials.get_access_token() headers = { 'Authorization': 'Bearer %s' % access_token.access_token } r = requests.get('https://www.googleapis.com/compute/v1/' 'projects/%s/zones/%s/instances/%s' % (project, zone, instance_name), headers=headers) if r.status_code == 404: break r.raise_for_status() _LOGGER.debug('Instance "%s" still exists.', instance_name) t1 = time.time() t = t1-t0 t_min = t/60.0 _LOGGER.info('Instance was deleted after %.1f s (%.1f m).', t, t_min)
[ "def", "wait_for_instance_deletion", "(", "credentials", ",", "project", ",", "zone", ",", "instance_name", ",", "interval_seconds", "=", "5", ")", ":", "t0", "=", "time", ".", "time", "(", ")", "access_token", "=", "credentials", ".", "get_access_token", "(", ")", "headers", "=", "{", "'Authorization'", ":", "'Bearer %s'", "%", "access_token", ".", "access_token", "}", "r", "=", "requests", ".", "get", "(", "'https://www.googleapis.com/compute/v1/'", "'projects/%s/zones/%s/instances/%s'", "%", "(", "project", ",", "zone", ",", "instance_name", ")", ",", "headers", "=", "headers", ")", "if", "r", ".", "status_code", "==", "404", ":", "raise", "AssertionError", "(", "'Instance \"%s\" does not exist!'", "%", "instance_name", ")", "r", ".", "raise_for_status", "(", ")", "_LOGGER", ".", "debug", "(", "'Instance \"%s\" exists.'", ",", "instance_name", ")", "while", "True", ":", "time", ".", "sleep", "(", "interval_seconds", ")", "access_token", "=", "credentials", ".", "get_access_token", "(", ")", "headers", "=", "{", "'Authorization'", ":", "'Bearer %s'", "%", "access_token", ".", "access_token", "}", "r", "=", "requests", ".", "get", "(", "'https://www.googleapis.com/compute/v1/'", "'projects/%s/zones/%s/instances/%s'", "%", "(", "project", ",", "zone", ",", "instance_name", ")", ",", "headers", "=", "headers", ")", "if", "r", ".", "status_code", "==", "404", ":", "break", "r", ".", "raise_for_status", "(", ")", "_LOGGER", ".", "debug", "(", "'Instance \"%s\" still exists.'", ",", "instance_name", ")", "t1", "=", "time", ".", "time", "(", ")", "t", "=", "t1", "-", "t0", "t_min", "=", "t", "/", "60.0", "_LOGGER", ".", "info", "(", "'Instance was deleted after %.1f s (%.1f m).'", ",", "t", ",", "t_min", ")" ]
Wait until an instance is deleted. We require that initially, the specified instance exists. TODO: docstring
[ "Wait", "until", "an", "instance", "is", "deleted", ".", "We", "require", "that", "initially", "the", "specified", "instance", "exists", ".", "TODO", ":", "docstring" ]
python
train
33.021739
marcomusy/vtkplotter
vtkplotter/actors.py
https://github.com/marcomusy/vtkplotter/blob/692c3396782722ec525bc1346a26999868c650c6/vtkplotter/actors.py#L923-L933
def lineWidth(self, lw=None): """Set/get width of mesh edges. Same as `lw()`.""" if lw is not None: if lw == 0: self.GetProperty().EdgeVisibilityOff() return self.GetProperty().EdgeVisibilityOn() self.GetProperty().SetLineWidth(lw) else: return self.GetProperty().GetLineWidth() return self
[ "def", "lineWidth", "(", "self", ",", "lw", "=", "None", ")", ":", "if", "lw", "is", "not", "None", ":", "if", "lw", "==", "0", ":", "self", ".", "GetProperty", "(", ")", ".", "EdgeVisibilityOff", "(", ")", "return", "self", ".", "GetProperty", "(", ")", ".", "EdgeVisibilityOn", "(", ")", "self", ".", "GetProperty", "(", ")", ".", "SetLineWidth", "(", "lw", ")", "else", ":", "return", "self", ".", "GetProperty", "(", ")", ".", "GetLineWidth", "(", ")", "return", "self" ]
Set/get width of mesh edges. Same as `lw()`.
[ "Set", "/", "get", "width", "of", "mesh", "edges", ".", "Same", "as", "lw", "()", "." ]
python
train
35.636364
Spinmob/spinmob
egg/_gui.py
https://github.com/Spinmob/spinmob/blob/f037f5df07f194bcd4a01f4d9916e57b9e8fb45a/egg/_gui.py#L2855-L2864
def autozoom(self, n=None): """ Auto-scales the axes to fit all the data in plot index n. If n == None, auto-scale everyone. """ if n==None: for p in self.plot_widgets: p.autoRange() else: self.plot_widgets[n].autoRange() return self
[ "def", "autozoom", "(", "self", ",", "n", "=", "None", ")", ":", "if", "n", "==", "None", ":", "for", "p", "in", "self", ".", "plot_widgets", ":", "p", ".", "autoRange", "(", ")", "else", ":", "self", ".", "plot_widgets", "[", "n", "]", ".", "autoRange", "(", ")", "return", "self" ]
Auto-scales the axes to fit all the data in plot index n. If n == None, auto-scale everyone.
[ "Auto", "-", "scales", "the", "axes", "to", "fit", "all", "the", "data", "in", "plot", "index", "n", ".", "If", "n", "==", "None", "auto", "-", "scale", "everyone", "." ]
python
train
30
quantopian/zipline
zipline/data/loader.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/loader.py#L232-L292
def ensure_treasury_data(symbol, first_date, last_date, now, environ=None): """ Ensure we have treasury data from treasury module associated with `symbol`. Parameters ---------- symbol : str Benchmark symbol for which we're loading associated treasury curves. first_date : pd.Timestamp First date required to be in the cache. last_date : pd.Timestamp Last date required to be in the cache. now : pd.Timestamp The current time. This is used to prevent repeated attempts to re-download data that isn't available due to scheduling quirks or other failures. We attempt to download data unless we already have data stored in the cache for `module_name` whose first entry is before or on `first_date` and whose last entry is on or after `last_date`. If we perform a download and the cache criteria are not satisfied, we wait at least one hour before attempting a redownload. This is determined by comparing the current time to the result of os.path.getmtime on the cache path. """ loader_module, filename, source = INDEX_MAPPING.get( symbol, INDEX_MAPPING['SPY'], ) first_date = max(first_date, loader_module.earliest_possible_date()) data = _load_cached_data(filename, first_date, last_date, now, 'treasury', environ) if data is not None: return data # If no cached data was found or it was missing any dates then download the # necessary data. logger.info( ('Downloading treasury data for {symbol!r} ' 'from {first_date} to {last_date}'), symbol=symbol, first_date=first_date, last_date=last_date ) try: data = loader_module.get_treasury_data(first_date, last_date) data.to_csv(get_data_filepath(filename, environ)) except (OSError, IOError, HTTPError): logger.exception('failed to cache treasury data') if not has_data_for_dates(data, first_date, last_date): logger.warn( ("Still don't have expected treasury data for {symbol!r} " "from {first_date} to {last_date} after redownload!"), symbol=symbol, first_date=first_date, last_date=last_date ) return data
[ "def", "ensure_treasury_data", "(", "symbol", ",", "first_date", ",", "last_date", ",", "now", ",", "environ", "=", "None", ")", ":", "loader_module", ",", "filename", ",", "source", "=", "INDEX_MAPPING", ".", "get", "(", "symbol", ",", "INDEX_MAPPING", "[", "'SPY'", "]", ",", ")", "first_date", "=", "max", "(", "first_date", ",", "loader_module", ".", "earliest_possible_date", "(", ")", ")", "data", "=", "_load_cached_data", "(", "filename", ",", "first_date", ",", "last_date", ",", "now", ",", "'treasury'", ",", "environ", ")", "if", "data", "is", "not", "None", ":", "return", "data", "# If no cached data was found or it was missing any dates then download the", "# necessary data.", "logger", ".", "info", "(", "(", "'Downloading treasury data for {symbol!r} '", "'from {first_date} to {last_date}'", ")", ",", "symbol", "=", "symbol", ",", "first_date", "=", "first_date", ",", "last_date", "=", "last_date", ")", "try", ":", "data", "=", "loader_module", ".", "get_treasury_data", "(", "first_date", ",", "last_date", ")", "data", ".", "to_csv", "(", "get_data_filepath", "(", "filename", ",", "environ", ")", ")", "except", "(", "OSError", ",", "IOError", ",", "HTTPError", ")", ":", "logger", ".", "exception", "(", "'failed to cache treasury data'", ")", "if", "not", "has_data_for_dates", "(", "data", ",", "first_date", ",", "last_date", ")", ":", "logger", ".", "warn", "(", "(", "\"Still don't have expected treasury data for {symbol!r} \"", "\"from {first_date} to {last_date} after redownload!\"", ")", ",", "symbol", "=", "symbol", ",", "first_date", "=", "first_date", ",", "last_date", "=", "last_date", ")", "return", "data" ]
Ensure we have treasury data from treasury module associated with `symbol`. Parameters ---------- symbol : str Benchmark symbol for which we're loading associated treasury curves. first_date : pd.Timestamp First date required to be in the cache. last_date : pd.Timestamp Last date required to be in the cache. now : pd.Timestamp The current time. This is used to prevent repeated attempts to re-download data that isn't available due to scheduling quirks or other failures. We attempt to download data unless we already have data stored in the cache for `module_name` whose first entry is before or on `first_date` and whose last entry is on or after `last_date`. If we perform a download and the cache criteria are not satisfied, we wait at least one hour before attempting a redownload. This is determined by comparing the current time to the result of os.path.getmtime on the cache path.
[ "Ensure", "we", "have", "treasury", "data", "from", "treasury", "module", "associated", "with", "symbol", "." ]
python
train
37.098361
pantsbuild/pants
src/python/pants/releases/reversion.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/releases/reversion.py#L66-L93
def rewrite_record_file(workspace, src_record_file, mutated_file_tuples): """Given a RECORD file and list of mutated file tuples, update the RECORD file in place. The RECORD file should always be a member of the mutated files, due to both containing versions, and having a version in its filename. """ mutated_files = set() dst_record_file = None for src, dst in mutated_file_tuples: if src == src_record_file: dst_record_file = dst else: mutated_files.add(dst) if not dst_record_file: raise Exception('Malformed whl or bad globs: `{}` was not rewritten.'.format(src_record_file)) output_records = [] file_name = os.path.join(workspace, dst_record_file) for line in read_file(file_name).splitlines(): filename, fingerprint_str, size_str = line.rsplit(',', 3) if filename in mutated_files: fingerprint_str, size_str = fingerprint_file(workspace, filename) output_line = ','.join((filename, fingerprint_str, size_str)) else: output_line = line output_records.append(output_line) safe_file_dump(file_name, '\r\n'.join(output_records) + '\r\n')
[ "def", "rewrite_record_file", "(", "workspace", ",", "src_record_file", ",", "mutated_file_tuples", ")", ":", "mutated_files", "=", "set", "(", ")", "dst_record_file", "=", "None", "for", "src", ",", "dst", "in", "mutated_file_tuples", ":", "if", "src", "==", "src_record_file", ":", "dst_record_file", "=", "dst", "else", ":", "mutated_files", ".", "add", "(", "dst", ")", "if", "not", "dst_record_file", ":", "raise", "Exception", "(", "'Malformed whl or bad globs: `{}` was not rewritten.'", ".", "format", "(", "src_record_file", ")", ")", "output_records", "=", "[", "]", "file_name", "=", "os", ".", "path", ".", "join", "(", "workspace", ",", "dst_record_file", ")", "for", "line", "in", "read_file", "(", "file_name", ")", ".", "splitlines", "(", ")", ":", "filename", ",", "fingerprint_str", ",", "size_str", "=", "line", ".", "rsplit", "(", "','", ",", "3", ")", "if", "filename", "in", "mutated_files", ":", "fingerprint_str", ",", "size_str", "=", "fingerprint_file", "(", "workspace", ",", "filename", ")", "output_line", "=", "','", ".", "join", "(", "(", "filename", ",", "fingerprint_str", ",", "size_str", ")", ")", "else", ":", "output_line", "=", "line", "output_records", ".", "append", "(", "output_line", ")", "safe_file_dump", "(", "file_name", ",", "'\\r\\n'", ".", "join", "(", "output_records", ")", "+", "'\\r\\n'", ")" ]
Given a RECORD file and list of mutated file tuples, update the RECORD file in place. The RECORD file should always be a member of the mutated files, due to both containing versions, and having a version in its filename.
[ "Given", "a", "RECORD", "file", "and", "list", "of", "mutated", "file", "tuples", "update", "the", "RECORD", "file", "in", "place", "." ]
python
train
39.214286
bwesterb/py-seccure
src/__init__.py
https://github.com/bwesterb/py-seccure/blob/944760744686dd0ad015bd90ecb13a3ce0d7c9c9/src/__init__.py#L911-L915
def decrypt(s, passphrase, curve='secp160r1', mac_bytes=10): """ Decrypts `s' with passphrase `passphrase' """ curve = Curve.by_name(curve) privkey = curve.passphrase_to_privkey(passphrase) return privkey.decrypt(s, mac_bytes)
[ "def", "decrypt", "(", "s", ",", "passphrase", ",", "curve", "=", "'secp160r1'", ",", "mac_bytes", "=", "10", ")", ":", "curve", "=", "Curve", ".", "by_name", "(", "curve", ")", "privkey", "=", "curve", ".", "passphrase_to_privkey", "(", "passphrase", ")", "return", "privkey", ".", "decrypt", "(", "s", ",", "mac_bytes", ")" ]
Decrypts `s' with passphrase `passphrase'
[ "Decrypts", "s", "with", "passphrase", "passphrase" ]
python
train
47.6