repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
nickpandolfi/Cyther
cyther/instructions.py
https://github.com/nickpandolfi/Cyther/blob/9fb0bd77af594008aa6ee8af460aa8c953abf5bc/cyther/instructions.py#L53-L82
def processAndSetDefaults(self): """ The heart of the 'Instruction' object. This method will make sure that all fields not entered will be defaulted to a correct value. Also checks for incongruities in the data entered, if it was by the user. """ # INPUT, OUTPUT, GIVEN + BUILDABLE DEPS if not self.input: raise ValueError(NO_INPUT_FILE) if not self.output: # Build directory must exist, right? if not self.build_directory: File() pass # Can it be built? / reference self.output_format for this else: pass # if it is not congruent with other info provided if not self.build_directory: pass # Initialize it for dependency in self.given_dependencies: pass # Check if the dependcy exists if self.output_format != self.output.getType(): raise ValueError("") # Given dependencies must actually exist! # output_name must be at a lower extenion level than input_name # The build directory return
[ "def", "processAndSetDefaults", "(", "self", ")", ":", "# INPUT, OUTPUT, GIVEN + BUILDABLE DEPS", "if", "not", "self", ".", "input", ":", "raise", "ValueError", "(", "NO_INPUT_FILE", ")", "if", "not", "self", ".", "output", ":", "# Build directory must exist, right?", "if", "not", "self", ".", "build_directory", ":", "File", "(", ")", "pass", "# Can it be built? / reference self.output_format for this", "else", ":", "pass", "# if it is not congruent with other info provided", "if", "not", "self", ".", "build_directory", ":", "pass", "# Initialize it", "for", "dependency", "in", "self", ".", "given_dependencies", ":", "pass", "# Check if the dependcy exists", "if", "self", ".", "output_format", "!=", "self", ".", "output", ".", "getType", "(", ")", ":", "raise", "ValueError", "(", "\"\"", ")", "# Given dependencies must actually exist!", "# output_name must be at a lower extenion level than input_name", "# The build directory", "return" ]
The heart of the 'Instruction' object. This method will make sure that all fields not entered will be defaulted to a correct value. Also checks for incongruities in the data entered, if it was by the user.
[ "The", "heart", "of", "the", "Instruction", "object", ".", "This", "method", "will", "make", "sure", "that", "all", "fields", "not", "entered", "will", "be", "defaulted", "to", "a", "correct", "value", ".", "Also", "checks", "for", "incongruities", "in", "the", "data", "entered", "if", "it", "was", "by", "the", "user", "." ]
python
train
36.9
LudovicRousseau/pyscard
smartcard/wx/CardAndReaderTreePanel.py
https://github.com/LudovicRousseau/pyscard/blob/62e675028086c75656444cc21d563d9f08ebf8e7/smartcard/wx/CardAndReaderTreePanel.py#L177-L190
def GetATR(self, reader): """Return the ATR of the card inserted into the reader.""" atr = "no card inserted" try: if not type(reader) is str: connection = reader.createConnection() connection.connect() atr = toHexString(connection.getATR()) connection.disconnect() except NoCardException: pass except CardConnectionException: pass return atr
[ "def", "GetATR", "(", "self", ",", "reader", ")", ":", "atr", "=", "\"no card inserted\"", "try", ":", "if", "not", "type", "(", "reader", ")", "is", "str", ":", "connection", "=", "reader", ".", "createConnection", "(", ")", "connection", ".", "connect", "(", ")", "atr", "=", "toHexString", "(", "connection", ".", "getATR", "(", ")", ")", "connection", ".", "disconnect", "(", ")", "except", "NoCardException", ":", "pass", "except", "CardConnectionException", ":", "pass", "return", "atr" ]
Return the ATR of the card inserted into the reader.
[ "Return", "the", "ATR", "of", "the", "card", "inserted", "into", "the", "reader", "." ]
python
train
34.071429
gplepage/lsqfit
src/lsqfit/_extras.py
https://github.com/gplepage/lsqfit/blob/6a57fd687632c175fccb47d8e8e943cda5e9ce9d/src/lsqfit/_extras.py#L1394-L1412
def process_dataset(dataset, models, **kargs): """ Convert ``dataset`` to processed data using ``models``. :class:`gvar.dataset.Dataset` (or similar dictionary) object ``dataset`` is processed by each model in list ``models``, and the results collected into a new dictionary ``pdata`` for use in :meth:`MultiFitter.lsqfit` and :meth:`MultiFitter.chained_lsqft`. Assumes that the models have defined method :meth:`MultiFitterModel.builddataset`. Keyword arguments ``kargs`` are passed on to :func:`gvar.dataset.avg_data` when averaging the data. """ dset = collections.OrderedDict() for m in MultiFitter.flatten_models(models): dset[m.datatag] = ( m.builddataset(dataset) if m.ncg <= 1 else MultiFitter.coarse_grain(m.builddataset(dataset), ncg=m.ncg) ) return gvar.dataset.avg_data(dset, **kargs)
[ "def", "process_dataset", "(", "dataset", ",", "models", ",", "*", "*", "kargs", ")", ":", "dset", "=", "collections", ".", "OrderedDict", "(", ")", "for", "m", "in", "MultiFitter", ".", "flatten_models", "(", "models", ")", ":", "dset", "[", "m", ".", "datatag", "]", "=", "(", "m", ".", "builddataset", "(", "dataset", ")", "if", "m", ".", "ncg", "<=", "1", "else", "MultiFitter", ".", "coarse_grain", "(", "m", ".", "builddataset", "(", "dataset", ")", ",", "ncg", "=", "m", ".", "ncg", ")", ")", "return", "gvar", ".", "dataset", ".", "avg_data", "(", "dset", ",", "*", "*", "kargs", ")" ]
Convert ``dataset`` to processed data using ``models``. :class:`gvar.dataset.Dataset` (or similar dictionary) object ``dataset`` is processed by each model in list ``models``, and the results collected into a new dictionary ``pdata`` for use in :meth:`MultiFitter.lsqfit` and :meth:`MultiFitter.chained_lsqft`. Assumes that the models have defined method :meth:`MultiFitterModel.builddataset`. Keyword arguments ``kargs`` are passed on to :func:`gvar.dataset.avg_data` when averaging the data.
[ "Convert", "dataset", "to", "processed", "data", "using", "models", "." ]
python
train
49.631579
jmcgeheeiv/pyfakefs
pyfakefs/fake_filesystem.py
https://github.com/jmcgeheeiv/pyfakefs/blob/6c36fb8987108107fc861fc3013620d46c7d2f9c/pyfakefs/fake_filesystem.py#L1311-L1329
def _add_open_file(self, file_obj): """Add file_obj to the list of open files on the filesystem. Used internally to manage open files. The position in the open_files array is the file descriptor number. Args: file_obj: File object to be added to open files list. Returns: File descriptor number for the file object. """ if self._free_fd_heap: open_fd = heapq.heappop(self._free_fd_heap) self.open_files[open_fd] = [file_obj] return open_fd self.open_files.append([file_obj]) return len(self.open_files) - 1
[ "def", "_add_open_file", "(", "self", ",", "file_obj", ")", ":", "if", "self", ".", "_free_fd_heap", ":", "open_fd", "=", "heapq", ".", "heappop", "(", "self", ".", "_free_fd_heap", ")", "self", ".", "open_files", "[", "open_fd", "]", "=", "[", "file_obj", "]", "return", "open_fd", "self", ".", "open_files", ".", "append", "(", "[", "file_obj", "]", ")", "return", "len", "(", "self", ".", "open_files", ")", "-", "1" ]
Add file_obj to the list of open files on the filesystem. Used internally to manage open files. The position in the open_files array is the file descriptor number. Args: file_obj: File object to be added to open files list. Returns: File descriptor number for the file object.
[ "Add", "file_obj", "to", "the", "list", "of", "open", "files", "on", "the", "filesystem", ".", "Used", "internally", "to", "manage", "open", "files", "." ]
python
train
32.842105
google/grr
grr/server/grr_response_server/gui/api_plugins/config.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/gui/api_plugins/config.py#L102-L127
def Handle(self, unused_args, token=None): """Build the data structure representing the config.""" sections = {} for descriptor in config.CONFIG.type_infos: if descriptor.section in sections: continue section_data = {} for parameter in self._ListParametersInSection(descriptor.section): section_data[parameter] = ApiConfigOption().InitFromConfigOption( parameter) sections[descriptor.section] = section_data result = ApiGetConfigResult() for section_name in sorted(sections): section = sections[section_name] api_section = ApiConfigSection(name=section_name) api_section.options = [] for param_name in sorted(section): api_section.options.append(section[param_name]) result.sections.append(api_section) return result
[ "def", "Handle", "(", "self", ",", "unused_args", ",", "token", "=", "None", ")", ":", "sections", "=", "{", "}", "for", "descriptor", "in", "config", ".", "CONFIG", ".", "type_infos", ":", "if", "descriptor", ".", "section", "in", "sections", ":", "continue", "section_data", "=", "{", "}", "for", "parameter", "in", "self", ".", "_ListParametersInSection", "(", "descriptor", ".", "section", ")", ":", "section_data", "[", "parameter", "]", "=", "ApiConfigOption", "(", ")", ".", "InitFromConfigOption", "(", "parameter", ")", "sections", "[", "descriptor", ".", "section", "]", "=", "section_data", "result", "=", "ApiGetConfigResult", "(", ")", "for", "section_name", "in", "sorted", "(", "sections", ")", ":", "section", "=", "sections", "[", "section_name", "]", "api_section", "=", "ApiConfigSection", "(", "name", "=", "section_name", ")", "api_section", ".", "options", "=", "[", "]", "for", "param_name", "in", "sorted", "(", "section", ")", ":", "api_section", ".", "options", ".", "append", "(", "section", "[", "param_name", "]", ")", "result", ".", "sections", ".", "append", "(", "api_section", ")", "return", "result" ]
Build the data structure representing the config.
[ "Build", "the", "data", "structure", "representing", "the", "config", "." ]
python
train
31.192308
Azure/azure-storage-python
azure-storage-file/azure/storage/file/fileservice.py
https://github.com/Azure/azure-storage-python/blob/52327354b192cbcf6b7905118ec6b5d57fa46275/azure-storage-file/azure/storage/file/fileservice.py#L2336-L2391
def update_range(self, share_name, directory_name, file_name, data, start_range, end_range, validate_content=False, timeout=None): ''' Writes the bytes specified by the request body into the specified range. :param str share_name: Name of existing share. :param str directory_name: The path to the directory. :param str file_name: Name of existing file. :param bytes data: Content of the range. :param int start_range: Start of byte range to use for updating a section of the file. The range can be up to 4 MB in size. The start_range and end_range params are inclusive. Ex: start_range=0, end_range=511 will download first 512 bytes of file. :param int end_range: End of byte range to use for updating a section of the file. The range can be up to 4 MB in size. The start_range and end_range params are inclusive. Ex: start_range=0, end_range=511 will download first 512 bytes of file. :param bool validate_content: If true, calculates an MD5 hash of the page content. The storage service checks the hash of the content that has arrived with the hash that was sent. This is primarily valuable for detecting bitflips on the wire if using http instead of https as https (the default) will already validate. Note that this MD5 hash is not stored with the file. :param int timeout: The timeout parameter is expressed in seconds. ''' _validate_not_none('share_name', share_name) _validate_not_none('file_name', file_name) _validate_not_none('data', data) request = HTTPRequest() request.method = 'PUT' request.host_locations = self._get_host_locations() request.path = _get_path(share_name, directory_name, file_name) request.query = { 'comp': 'range', 'timeout': _int_to_str(timeout), } request.headers = { 'x-ms-write': 'update', } _validate_and_format_range_headers( request, start_range, end_range) request.body = _get_data_bytes_only('data', data) if validate_content: computed_md5 = _get_content_md5(request.body) request.headers['Content-MD5'] = _to_str(computed_md5) self._perform_request(request)
[ "def", "update_range", "(", "self", ",", "share_name", ",", "directory_name", ",", "file_name", ",", "data", ",", "start_range", ",", "end_range", ",", "validate_content", "=", "False", ",", "timeout", "=", "None", ")", ":", "_validate_not_none", "(", "'share_name'", ",", "share_name", ")", "_validate_not_none", "(", "'file_name'", ",", "file_name", ")", "_validate_not_none", "(", "'data'", ",", "data", ")", "request", "=", "HTTPRequest", "(", ")", "request", ".", "method", "=", "'PUT'", "request", ".", "host_locations", "=", "self", ".", "_get_host_locations", "(", ")", "request", ".", "path", "=", "_get_path", "(", "share_name", ",", "directory_name", ",", "file_name", ")", "request", ".", "query", "=", "{", "'comp'", ":", "'range'", ",", "'timeout'", ":", "_int_to_str", "(", "timeout", ")", ",", "}", "request", ".", "headers", "=", "{", "'x-ms-write'", ":", "'update'", ",", "}", "_validate_and_format_range_headers", "(", "request", ",", "start_range", ",", "end_range", ")", "request", ".", "body", "=", "_get_data_bytes_only", "(", "'data'", ",", "data", ")", "if", "validate_content", ":", "computed_md5", "=", "_get_content_md5", "(", "request", ".", "body", ")", "request", ".", "headers", "[", "'Content-MD5'", "]", "=", "_to_str", "(", "computed_md5", ")", "self", ".", "_perform_request", "(", "request", ")" ]
Writes the bytes specified by the request body into the specified range. :param str share_name: Name of existing share. :param str directory_name: The path to the directory. :param str file_name: Name of existing file. :param bytes data: Content of the range. :param int start_range: Start of byte range to use for updating a section of the file. The range can be up to 4 MB in size. The start_range and end_range params are inclusive. Ex: start_range=0, end_range=511 will download first 512 bytes of file. :param int end_range: End of byte range to use for updating a section of the file. The range can be up to 4 MB in size. The start_range and end_range params are inclusive. Ex: start_range=0, end_range=511 will download first 512 bytes of file. :param bool validate_content: If true, calculates an MD5 hash of the page content. The storage service checks the hash of the content that has arrived with the hash that was sent. This is primarily valuable for detecting bitflips on the wire if using http instead of https as https (the default) will already validate. Note that this MD5 hash is not stored with the file. :param int timeout: The timeout parameter is expressed in seconds.
[ "Writes", "the", "bytes", "specified", "by", "the", "request", "body", "into", "the", "specified", "range", ".", ":", "param", "str", "share_name", ":", "Name", "of", "existing", "share", ".", ":", "param", "str", "directory_name", ":", "The", "path", "to", "the", "directory", ".", ":", "param", "str", "file_name", ":", "Name", "of", "existing", "file", ".", ":", "param", "bytes", "data", ":", "Content", "of", "the", "range", ".", ":", "param", "int", "start_range", ":", "Start", "of", "byte", "range", "to", "use", "for", "updating", "a", "section", "of", "the", "file", ".", "The", "range", "can", "be", "up", "to", "4", "MB", "in", "size", ".", "The", "start_range", "and", "end_range", "params", "are", "inclusive", ".", "Ex", ":", "start_range", "=", "0", "end_range", "=", "511", "will", "download", "first", "512", "bytes", "of", "file", ".", ":", "param", "int", "end_range", ":", "End", "of", "byte", "range", "to", "use", "for", "updating", "a", "section", "of", "the", "file", ".", "The", "range", "can", "be", "up", "to", "4", "MB", "in", "size", ".", "The", "start_range", "and", "end_range", "params", "are", "inclusive", ".", "Ex", ":", "start_range", "=", "0", "end_range", "=", "511", "will", "download", "first", "512", "bytes", "of", "file", ".", ":", "param", "bool", "validate_content", ":", "If", "true", "calculates", "an", "MD5", "hash", "of", "the", "page", "content", ".", "The", "storage", "service", "checks", "the", "hash", "of", "the", "content", "that", "has", "arrived", "with", "the", "hash", "that", "was", "sent", ".", "This", "is", "primarily", "valuable", "for", "detecting", "bitflips", "on", "the", "wire", "if", "using", "http", "instead", "of", "https", "as", "https", "(", "the", "default", ")", "will", "already", "validate", ".", "Note", "that", "this", "MD5", "hash", "is", "not", "stored", "with", "the", "file", ".", ":", "param", "int", "timeout", ":", "The", "timeout", "parameter", "is", "expressed", "in", "seconds", "." ]
python
train
44.482143
zsimic/runez
src/runez/click.py
https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/click.py#L42-L46
def config(*args, **attrs): """Override configuration""" attrs.setdefault("metavar", "KEY=VALUE") attrs.setdefault("multiple", True) return option(config, *args, **attrs)
[ "def", "config", "(", "*", "args", ",", "*", "*", "attrs", ")", ":", "attrs", ".", "setdefault", "(", "\"metavar\"", ",", "\"KEY=VALUE\"", ")", "attrs", ".", "setdefault", "(", "\"multiple\"", ",", "True", ")", "return", "option", "(", "config", ",", "*", "args", ",", "*", "*", "attrs", ")" ]
Override configuration
[ "Override", "configuration" ]
python
train
36.4
esheldon/fitsio
fitsio/fitslib.py
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/fitslib.py#L1356-L1406
def descr2tabledef(descr, table_type='binary', write_bitcols=False): """ Create a FITS table def from the input numpy descriptor. parameters ---------- descr: list A numpy recarray type descriptor array.dtype.descr returns ------- names, formats, dims: tuple of lists These are the ttyp, tform and tdim header entries for each field. dim entries may be None """ names = [] formats = [] dims = [] for d in descr: """ npy_dtype = d[1][1:] if is_ascii and npy_dtype in ['u1','i1']: raise ValueError("1-byte integers are not supported for " "ascii tables") """ if d[1][1] == 'O': raise ValueError( 'cannot automatically declare a var column without ' 'some data to determine max len') name, form, dim = _npy2fits( d, table_type=table_type, write_bitcols=write_bitcols) if name == '': raise ValueError("field name is an empty string") """ if is_ascii: if dim is not None: raise ValueError("array columns are not supported " "for ascii tables") """ names.append(name) formats.append(form) dims.append(dim) return names, formats, dims
[ "def", "descr2tabledef", "(", "descr", ",", "table_type", "=", "'binary'", ",", "write_bitcols", "=", "False", ")", ":", "names", "=", "[", "]", "formats", "=", "[", "]", "dims", "=", "[", "]", "for", "d", "in", "descr", ":", "\"\"\"\n npy_dtype = d[1][1:]\n if is_ascii and npy_dtype in ['u1','i1']:\n raise ValueError(\"1-byte integers are not supported for \"\n \"ascii tables\")\n \"\"\"", "if", "d", "[", "1", "]", "[", "1", "]", "==", "'O'", ":", "raise", "ValueError", "(", "'cannot automatically declare a var column without '", "'some data to determine max len'", ")", "name", ",", "form", ",", "dim", "=", "_npy2fits", "(", "d", ",", "table_type", "=", "table_type", ",", "write_bitcols", "=", "write_bitcols", ")", "if", "name", "==", "''", ":", "raise", "ValueError", "(", "\"field name is an empty string\"", ")", "\"\"\"\n if is_ascii:\n if dim is not None:\n raise ValueError(\"array columns are not supported \"\n \"for ascii tables\")\n \"\"\"", "names", ".", "append", "(", "name", ")", "formats", ".", "append", "(", "form", ")", "dims", ".", "append", "(", "dim", ")", "return", "names", ",", "formats", ",", "dims" ]
Create a FITS table def from the input numpy descriptor. parameters ---------- descr: list A numpy recarray type descriptor array.dtype.descr returns ------- names, formats, dims: tuple of lists These are the ttyp, tform and tdim header entries for each field. dim entries may be None
[ "Create", "a", "FITS", "table", "def", "from", "the", "input", "numpy", "descriptor", "." ]
python
train
26.333333
wummel/linkchecker
linkcheck/parser/__init__.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/parser/__init__.py#L138-L142
def parse_firefox (url_data): """Parse a Firefox3 bookmark file.""" filename = url_data.get_os_filename() for url, name in firefox.parse_bookmark_file(filename): url_data.add_url(url, name=name)
[ "def", "parse_firefox", "(", "url_data", ")", ":", "filename", "=", "url_data", ".", "get_os_filename", "(", ")", "for", "url", ",", "name", "in", "firefox", ".", "parse_bookmark_file", "(", "filename", ")", ":", "url_data", ".", "add_url", "(", "url", ",", "name", "=", "name", ")" ]
Parse a Firefox3 bookmark file.
[ "Parse", "a", "Firefox3", "bookmark", "file", "." ]
python
train
42
Arubacloud/pyArubaCloud
ArubaCloud/ReverseDns/ReverseDns.py
https://github.com/Arubacloud/pyArubaCloud/blob/ec4aecd8ca342b1e1a4f16b7cc87cb5e697cfcd4/ArubaCloud/ReverseDns/ReverseDns.py#L36-L45
def reset(self, addresses): """ Remove all PTR records from the given address :type addresses: List[str] :param addresses: (List[str]) The IP Address to reset :return: (bool) True in case of success, False in case of failure """ request = self._call(SetEnqueueResetReverseDns.SetEnqueueResetReverseDns, IPs=addresses) response = request.commit() return response['Success']
[ "def", "reset", "(", "self", ",", "addresses", ")", ":", "request", "=", "self", ".", "_call", "(", "SetEnqueueResetReverseDns", ".", "SetEnqueueResetReverseDns", ",", "IPs", "=", "addresses", ")", "response", "=", "request", ".", "commit", "(", ")", "return", "response", "[", "'Success'", "]" ]
Remove all PTR records from the given address :type addresses: List[str] :param addresses: (List[str]) The IP Address to reset :return: (bool) True in case of success, False in case of failure
[ "Remove", "all", "PTR", "records", "from", "the", "given", "address", ":", "type", "addresses", ":", "List", "[", "str", "]", ":", "param", "addresses", ":", "(", "List", "[", "str", "]", ")", "The", "IP", "Address", "to", "reset", ":", "return", ":", "(", "bool", ")", "True", "in", "case", "of", "success", "False", "in", "case", "of", "failure" ]
python
train
43.5
jkitzes/macroeco
macroeco/empirical/_empirical.py
https://github.com/jkitzes/macroeco/blob/ee5fac5560a2d64de3a64738b5bc6833e2d7ff2e/macroeco/empirical/_empirical.py#L807-L860
def _yield_spatial_table(patch, div, spp_col, count_col, x_col, y_col): """ Calculates an empirical spatial table Yields ------- DataFrame Spatial table for each division. See Notes. Notes ----- The spatial table is the precursor to the SAR, EAR, and grid-based commonality metrics. Each row in the table corresponds to a cell created by a given division. Columns are cell_loc (within the grid defined by the division), spp_set, n_spp, and n_individs. """ # Catch error if you don't use ; after divs in comm_grid in MacroecoDesktop try: div_split_list = div.replace(';','').split(',') except AttributeError: div_split_list = str(div).strip("()").split(',') div_split = (x_col + ':' + div_split_list[0] + ';' + y_col + ':' + div_split_list[1]) # Get cell_locs # Requires _parse_splits and _product functions to go y inside of x x_starts, x_ends = _col_starts_ends(patch, x_col, div_split_list[0]) x_offset = (x_ends[0] - x_starts[0]) / 2 x_locs = x_starts + x_offset y_starts, y_ends = _col_starts_ends(patch, y_col, div_split_list[1]) y_offset = (y_ends[0] - y_starts[0]) / 2 y_locs = y_starts + y_offset cell_locs = _product(x_locs, y_locs) # Get spp set and count for all cells n_spp_list = [] # Number of species in cell n_individs_list = [] spp_set_list = [] # Set object giving unique species IDs in cell for cellstring, cellpatch in _yield_subpatches(patch,div_split,name='div'): spp_set = set(np.unique(cellpatch.table[spp_col])) spp_set_list.append(spp_set) n_spp_list.append(len(spp_set)) n_individs_list.append(np.sum(cellpatch.table[count_col])) # Create and return dataframe df = pd.DataFrame({'cell_loc': cell_locs, 'spp_set': spp_set_list, 'n_spp': n_spp_list, 'n_individs': n_individs_list}) return df
[ "def", "_yield_spatial_table", "(", "patch", ",", "div", ",", "spp_col", ",", "count_col", ",", "x_col", ",", "y_col", ")", ":", "# Catch error if you don't use ; after divs in comm_grid in MacroecoDesktop", "try", ":", "div_split_list", "=", "div", ".", "replace", "(", "';'", ",", "''", ")", ".", "split", "(", "','", ")", "except", "AttributeError", ":", "div_split_list", "=", "str", "(", "div", ")", ".", "strip", "(", "\"()\"", ")", ".", "split", "(", "','", ")", "div_split", "=", "(", "x_col", "+", "':'", "+", "div_split_list", "[", "0", "]", "+", "';'", "+", "y_col", "+", "':'", "+", "div_split_list", "[", "1", "]", ")", "# Get cell_locs", "# Requires _parse_splits and _product functions to go y inside of x", "x_starts", ",", "x_ends", "=", "_col_starts_ends", "(", "patch", ",", "x_col", ",", "div_split_list", "[", "0", "]", ")", "x_offset", "=", "(", "x_ends", "[", "0", "]", "-", "x_starts", "[", "0", "]", ")", "/", "2", "x_locs", "=", "x_starts", "+", "x_offset", "y_starts", ",", "y_ends", "=", "_col_starts_ends", "(", "patch", ",", "y_col", ",", "div_split_list", "[", "1", "]", ")", "y_offset", "=", "(", "y_ends", "[", "0", "]", "-", "y_starts", "[", "0", "]", ")", "/", "2", "y_locs", "=", "y_starts", "+", "y_offset", "cell_locs", "=", "_product", "(", "x_locs", ",", "y_locs", ")", "# Get spp set and count for all cells", "n_spp_list", "=", "[", "]", "# Number of species in cell", "n_individs_list", "=", "[", "]", "spp_set_list", "=", "[", "]", "# Set object giving unique species IDs in cell", "for", "cellstring", ",", "cellpatch", "in", "_yield_subpatches", "(", "patch", ",", "div_split", ",", "name", "=", "'div'", ")", ":", "spp_set", "=", "set", "(", "np", ".", "unique", "(", "cellpatch", ".", "table", "[", "spp_col", "]", ")", ")", "spp_set_list", ".", "append", "(", "spp_set", ")", "n_spp_list", ".", "append", "(", "len", "(", "spp_set", ")", ")", "n_individs_list", ".", "append", "(", "np", ".", "sum", "(", "cellpatch", ".", "table", "[", "count_col", "]", ")", ")", "# Create and return dataframe", "df", "=", "pd", ".", "DataFrame", "(", "{", "'cell_loc'", ":", "cell_locs", ",", "'spp_set'", ":", "spp_set_list", ",", "'n_spp'", ":", "n_spp_list", ",", "'n_individs'", ":", "n_individs_list", "}", ")", "return", "df" ]
Calculates an empirical spatial table Yields ------- DataFrame Spatial table for each division. See Notes. Notes ----- The spatial table is the precursor to the SAR, EAR, and grid-based commonality metrics. Each row in the table corresponds to a cell created by a given division. Columns are cell_loc (within the grid defined by the division), spp_set, n_spp, and n_individs.
[ "Calculates", "an", "empirical", "spatial", "table" ]
python
train
35.259259
DataBiosphere/toil
src/toil/job.py
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/job.py#L643-L670
def checkJobGraphAcylic(self): """ :raises toil.job.JobGraphDeadlockException: if the connected component \ of jobs containing this job contains any cycles of child/followOn dependencies \ in the *augmented job graph* (see below). Such cycles are not allowed \ in valid job graphs. A follow-on edge (A, B) between two jobs A and B is equivalent \ to adding a child edge to B from (1) A, (2) from each child of A, \ and (3) from the successors of each child of A. We call each such edge \ an edge an "implied" edge. The augmented job graph is a job graph including \ all the implied edges. For a job graph G = (V, E) the algorithm is ``O(|V|^2)``. It is ``O(|V| + |E|)`` for \ a graph with no follow-ons. The former follow-on case could be improved! """ #Get the root jobs roots = self.getRootJobs() if len(roots) == 0: raise JobGraphDeadlockException("Graph contains no root jobs due to cycles") #Get implied edges extraEdges = self._getImpliedEdges(roots) #Check for directed cycles in the augmented graph visited = set() for root in roots: root._checkJobGraphAcylicDFS([], visited, extraEdges)
[ "def", "checkJobGraphAcylic", "(", "self", ")", ":", "#Get the root jobs", "roots", "=", "self", ".", "getRootJobs", "(", ")", "if", "len", "(", "roots", ")", "==", "0", ":", "raise", "JobGraphDeadlockException", "(", "\"Graph contains no root jobs due to cycles\"", ")", "#Get implied edges", "extraEdges", "=", "self", ".", "_getImpliedEdges", "(", "roots", ")", "#Check for directed cycles in the augmented graph", "visited", "=", "set", "(", ")", "for", "root", "in", "roots", ":", "root", ".", "_checkJobGraphAcylicDFS", "(", "[", "]", ",", "visited", ",", "extraEdges", ")" ]
:raises toil.job.JobGraphDeadlockException: if the connected component \ of jobs containing this job contains any cycles of child/followOn dependencies \ in the *augmented job graph* (see below). Such cycles are not allowed \ in valid job graphs. A follow-on edge (A, B) between two jobs A and B is equivalent \ to adding a child edge to B from (1) A, (2) from each child of A, \ and (3) from the successors of each child of A. We call each such edge \ an edge an "implied" edge. The augmented job graph is a job graph including \ all the implied edges. For a job graph G = (V, E) the algorithm is ``O(|V|^2)``. It is ``O(|V| + |E|)`` for \ a graph with no follow-ons. The former follow-on case could be improved!
[ ":", "raises", "toil", ".", "job", ".", "JobGraphDeadlockException", ":", "if", "the", "connected", "component", "\\", "of", "jobs", "containing", "this", "job", "contains", "any", "cycles", "of", "child", "/", "followOn", "dependencies", "\\", "in", "the", "*", "augmented", "job", "graph", "*", "(", "see", "below", ")", ".", "Such", "cycles", "are", "not", "allowed", "\\", "in", "valid", "job", "graphs", "." ]
python
train
45.142857
Skype4Py/Skype4Py
Skype4Py/client.py
https://github.com/Skype4Py/Skype4Py/blob/c48d83f7034109fe46315d45a066126002c6e0d4/Skype4Py/client.py#L62-L99
def CreateMenuItem(self, MenuItemId, PluginContext, CaptionText, HintText=u'', IconPath='', Enabled=True, ContactType=pluginContactTypeAll, MultipleContacts=False): """Creates custom menu item in Skype client's "Do More" menus. :Parameters: MenuItemId : unicode Unique identifier for the menu item. PluginContext : `enums`.pluginContext* Menu item context. Allows to choose in which client windows will the menu item appear. CaptionText : unicode Caption text. HintText : unicode Hint text (optional). Shown when mouse hoovers over the menu item. IconPath : unicode Path to the icon (optional). Enabled : bool Initial state of the menu item. True by default. ContactType : `enums`.pluginContactType* In case of `enums.pluginContextContact` tells which contacts the menu item should appear for. Defaults to `enums.pluginContactTypeAll`. MultipleContacts : bool Set to True if multiple contacts should be allowed (defaults to False). :return: Menu item object. :rtype: `PluginMenuItem` """ cmd = 'CREATE MENU_ITEM %s CONTEXT %s CAPTION %s ENABLED %s' % (tounicode(MenuItemId), PluginContext, quote(tounicode(CaptionText)), cndexp(Enabled, 'true', 'false')) if HintText: cmd += ' HINT %s' % quote(tounicode(HintText)) if IconPath: cmd += ' ICON %s' % quote(path2unicode(IconPath)) if MultipleContacts: cmd += ' ENABLE_MULTIPLE_CONTACTS true' if PluginContext == pluginContextContact: cmd += ' CONTACT_TYPE_FILTER %s' % ContactType self._Skype._DoCommand(cmd) return PluginMenuItem(self._Skype, MenuItemId, CaptionText, HintText, Enabled)
[ "def", "CreateMenuItem", "(", "self", ",", "MenuItemId", ",", "PluginContext", ",", "CaptionText", ",", "HintText", "=", "u''", ",", "IconPath", "=", "''", ",", "Enabled", "=", "True", ",", "ContactType", "=", "pluginContactTypeAll", ",", "MultipleContacts", "=", "False", ")", ":", "cmd", "=", "'CREATE MENU_ITEM %s CONTEXT %s CAPTION %s ENABLED %s'", "%", "(", "tounicode", "(", "MenuItemId", ")", ",", "PluginContext", ",", "quote", "(", "tounicode", "(", "CaptionText", ")", ")", ",", "cndexp", "(", "Enabled", ",", "'true'", ",", "'false'", ")", ")", "if", "HintText", ":", "cmd", "+=", "' HINT %s'", "%", "quote", "(", "tounicode", "(", "HintText", ")", ")", "if", "IconPath", ":", "cmd", "+=", "' ICON %s'", "%", "quote", "(", "path2unicode", "(", "IconPath", ")", ")", "if", "MultipleContacts", ":", "cmd", "+=", "' ENABLE_MULTIPLE_CONTACTS true'", "if", "PluginContext", "==", "pluginContextContact", ":", "cmd", "+=", "' CONTACT_TYPE_FILTER %s'", "%", "ContactType", "self", ".", "_Skype", ".", "_DoCommand", "(", "cmd", ")", "return", "PluginMenuItem", "(", "self", ".", "_Skype", ",", "MenuItemId", ",", "CaptionText", ",", "HintText", ",", "Enabled", ")" ]
Creates custom menu item in Skype client's "Do More" menus. :Parameters: MenuItemId : unicode Unique identifier for the menu item. PluginContext : `enums`.pluginContext* Menu item context. Allows to choose in which client windows will the menu item appear. CaptionText : unicode Caption text. HintText : unicode Hint text (optional). Shown when mouse hoovers over the menu item. IconPath : unicode Path to the icon (optional). Enabled : bool Initial state of the menu item. True by default. ContactType : `enums`.pluginContactType* In case of `enums.pluginContextContact` tells which contacts the menu item should appear for. Defaults to `enums.pluginContactTypeAll`. MultipleContacts : bool Set to True if multiple contacts should be allowed (defaults to False). :return: Menu item object. :rtype: `PluginMenuItem`
[ "Creates", "custom", "menu", "item", "in", "Skype", "client", "s", "Do", "More", "menus", "." ]
python
train
49.105263
saltstack/salt
salt/modules/azurearm_network.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/azurearm_network.py#L604-L634
def network_security_groups_list(resource_group, **kwargs): ''' .. versionadded:: 2019.2.0 List all network security groups within a resource group. :param resource_group: The resource group name to list network security \ groups within. CLI Example: .. code-block:: bash salt-call azurearm_network.network_security_groups_list testgroup ''' result = {} netconn = __utils__['azurearm.get_client']('network', **kwargs) try: secgroups = __utils__['azurearm.paged_object_to_list']( netconn.network_security_groups.list( resource_group_name=resource_group ) ) for secgroup in secgroups: result[secgroup['name']] = secgroup except CloudError as exc: __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs) result = {'error': str(exc)} return result
[ "def", "network_security_groups_list", "(", "resource_group", ",", "*", "*", "kwargs", ")", ":", "result", "=", "{", "}", "netconn", "=", "__utils__", "[", "'azurearm.get_client'", "]", "(", "'network'", ",", "*", "*", "kwargs", ")", "try", ":", "secgroups", "=", "__utils__", "[", "'azurearm.paged_object_to_list'", "]", "(", "netconn", ".", "network_security_groups", ".", "list", "(", "resource_group_name", "=", "resource_group", ")", ")", "for", "secgroup", "in", "secgroups", ":", "result", "[", "secgroup", "[", "'name'", "]", "]", "=", "secgroup", "except", "CloudError", "as", "exc", ":", "__utils__", "[", "'azurearm.log_cloud_error'", "]", "(", "'network'", ",", "str", "(", "exc", ")", ",", "*", "*", "kwargs", ")", "result", "=", "{", "'error'", ":", "str", "(", "exc", ")", "}", "return", "result" ]
.. versionadded:: 2019.2.0 List all network security groups within a resource group. :param resource_group: The resource group name to list network security \ groups within. CLI Example: .. code-block:: bash salt-call azurearm_network.network_security_groups_list testgroup
[ "..", "versionadded", "::", "2019", ".", "2", ".", "0" ]
python
train
28.677419
Genida/django-meerkat
src/meerkat/utils/geolocation.py
https://github.com/Genida/django-meerkat/blob/486502a75bb0800266db785fd32717d8c0eb8deb/src/meerkat/utils/geolocation.py#L8-L30
def ip_geoloc(ip, hit_api=True): """ Get IP geolocation. Args: ip (str): IP address to use if no data provided. hit_api (bool): whether to hit api if info not found. Returns: str: latitude and longitude, comma-separated. """ from ..logs.models import IPInfoCheck try: obj = IPInfoCheck.objects.get(ip_address=ip).ip_info except IPInfoCheck.DoesNotExist: if hit_api: try: obj = IPInfoCheck.check_ip(ip) except RateExceededError: return None else: return None return obj.latitude, obj.longitude
[ "def", "ip_geoloc", "(", "ip", ",", "hit_api", "=", "True", ")", ":", "from", ".", ".", "logs", ".", "models", "import", "IPInfoCheck", "try", ":", "obj", "=", "IPInfoCheck", ".", "objects", ".", "get", "(", "ip_address", "=", "ip", ")", ".", "ip_info", "except", "IPInfoCheck", ".", "DoesNotExist", ":", "if", "hit_api", ":", "try", ":", "obj", "=", "IPInfoCheck", ".", "check_ip", "(", "ip", ")", "except", "RateExceededError", ":", "return", "None", "else", ":", "return", "None", "return", "obj", ".", "latitude", ",", "obj", ".", "longitude" ]
Get IP geolocation. Args: ip (str): IP address to use if no data provided. hit_api (bool): whether to hit api if info not found. Returns: str: latitude and longitude, comma-separated.
[ "Get", "IP", "geolocation", "." ]
python
train
27.130435
AkihikoITOH/capybara
capybara/virtualenv/lib/python2.7/site-packages/setuptools/package_index.py
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/setuptools/package_index.py#L279-L325
def process_url(self, url, retrieve=False): """Evaluate a URL as a possible download, and maybe retrieve it""" if url in self.scanned_urls and not retrieve: return self.scanned_urls[url] = True if not URL_SCHEME(url): self.process_filename(url) return else: dists = list(distros_for_url(url)) if dists: if not self.url_ok(url): return self.debug("Found link: %s", url) if dists or not retrieve or url in self.fetched_urls: list(map(self.add, dists)) return # don't need the actual page if not self.url_ok(url): self.fetched_urls[url] = True return self.info("Reading %s", url) self.fetched_urls[url] = True # prevent multiple fetch attempts f = self.open_url(url, "Download error on %s: %%s -- Some packages may not be found!" % url) if f is None: return self.fetched_urls[f.url] = True if 'html' not in f.headers.get('content-type', '').lower(): f.close() # not html, we can't process it return base = f.url # handle redirects page = f.read() if not isinstance(page, str): # We are in Python 3 and got bytes. We want str. if isinstance(f, HTTPError): # Errors have no charset, assume latin1: charset = 'latin-1' else: charset = f.headers.get_param('charset') or 'latin-1' page = page.decode(charset, "ignore") f.close() for match in HREF.finditer(page): link = urljoin(base, htmldecode(match.group(1))) self.process_url(link) if url.startswith(self.index_url) and getattr(f,'code',None)!=404: page = self.process_index(url, page)
[ "def", "process_url", "(", "self", ",", "url", ",", "retrieve", "=", "False", ")", ":", "if", "url", "in", "self", ".", "scanned_urls", "and", "not", "retrieve", ":", "return", "self", ".", "scanned_urls", "[", "url", "]", "=", "True", "if", "not", "URL_SCHEME", "(", "url", ")", ":", "self", ".", "process_filename", "(", "url", ")", "return", "else", ":", "dists", "=", "list", "(", "distros_for_url", "(", "url", ")", ")", "if", "dists", ":", "if", "not", "self", ".", "url_ok", "(", "url", ")", ":", "return", "self", ".", "debug", "(", "\"Found link: %s\"", ",", "url", ")", "if", "dists", "or", "not", "retrieve", "or", "url", "in", "self", ".", "fetched_urls", ":", "list", "(", "map", "(", "self", ".", "add", ",", "dists", ")", ")", "return", "# don't need the actual page", "if", "not", "self", ".", "url_ok", "(", "url", ")", ":", "self", ".", "fetched_urls", "[", "url", "]", "=", "True", "return", "self", ".", "info", "(", "\"Reading %s\"", ",", "url", ")", "self", ".", "fetched_urls", "[", "url", "]", "=", "True", "# prevent multiple fetch attempts", "f", "=", "self", ".", "open_url", "(", "url", ",", "\"Download error on %s: %%s -- Some packages may not be found!\"", "%", "url", ")", "if", "f", "is", "None", ":", "return", "self", ".", "fetched_urls", "[", "f", ".", "url", "]", "=", "True", "if", "'html'", "not", "in", "f", ".", "headers", ".", "get", "(", "'content-type'", ",", "''", ")", ".", "lower", "(", ")", ":", "f", ".", "close", "(", ")", "# not html, we can't process it", "return", "base", "=", "f", ".", "url", "# handle redirects", "page", "=", "f", ".", "read", "(", ")", "if", "not", "isinstance", "(", "page", ",", "str", ")", ":", "# We are in Python 3 and got bytes. We want str.", "if", "isinstance", "(", "f", ",", "HTTPError", ")", ":", "# Errors have no charset, assume latin1:", "charset", "=", "'latin-1'", "else", ":", "charset", "=", "f", ".", "headers", ".", "get_param", "(", "'charset'", ")", "or", "'latin-1'", "page", "=", "page", ".", "decode", "(", "charset", ",", "\"ignore\"", ")", "f", ".", "close", "(", ")", "for", "match", "in", "HREF", ".", "finditer", "(", "page", ")", ":", "link", "=", "urljoin", "(", "base", ",", "htmldecode", "(", "match", ".", "group", "(", "1", ")", ")", ")", "self", ".", "process_url", "(", "link", ")", "if", "url", ".", "startswith", "(", "self", ".", "index_url", ")", "and", "getattr", "(", "f", ",", "'code'", ",", "None", ")", "!=", "404", ":", "page", "=", "self", ".", "process_index", "(", "url", ",", "page", ")" ]
Evaluate a URL as a possible download, and maybe retrieve it
[ "Evaluate", "a", "URL", "as", "a", "possible", "download", "and", "maybe", "retrieve", "it" ]
python
test
39.425532
googleapis/google-cloud-python
logging/google/cloud/logging_v2/gapic/config_service_v2_client.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/logging/google/cloud/logging_v2/gapic/config_service_v2_client.py#L88-L92
def sink_path(cls, project, sink): """Return a fully-qualified sink string.""" return google.api_core.path_template.expand( "projects/{project}/sinks/{sink}", project=project, sink=sink )
[ "def", "sink_path", "(", "cls", ",", "project", ",", "sink", ")", ":", "return", "google", ".", "api_core", ".", "path_template", ".", "expand", "(", "\"projects/{project}/sinks/{sink}\"", ",", "project", "=", "project", ",", "sink", "=", "sink", ")" ]
Return a fully-qualified sink string.
[ "Return", "a", "fully", "-", "qualified", "sink", "string", "." ]
python
train
43.8
gmdzy2010/dingtalk_sdk_gmdzy2010
dingtalk_sdk_gmdzy2010/base_request.py
https://github.com/gmdzy2010/dingtalk_sdk_gmdzy2010/blob/b06cb1f78f89be9554dcb6101af8bc72718a9ecd/dingtalk_sdk_gmdzy2010/base_request.py#L22-L35
def set_logger(self): """Method to build the base logging system. By default, logging level is set to INFO.""" logger = logging.getLogger(__name__) logger.setLevel(level=logging.INFO) logger_file = os.path.join(self.logs_path, 'dingtalk_sdk.logs') logger_handler = logging.FileHandler(logger_file) logger_handler.setLevel(logging.INFO) logger_formatter = logging.Formatter( '[%(asctime)s | %(name)s | %(levelname)s] %(message)s' ) logger_handler.setFormatter(logger_formatter) logger.addHandler(logger_handler) return logger
[ "def", "set_logger", "(", "self", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "logger", ".", "setLevel", "(", "level", "=", "logging", ".", "INFO", ")", "logger_file", "=", "os", ".", "path", ".", "join", "(", "self", ".", "logs_path", ",", "'dingtalk_sdk.logs'", ")", "logger_handler", "=", "logging", ".", "FileHandler", "(", "logger_file", ")", "logger_handler", ".", "setLevel", "(", "logging", ".", "INFO", ")", "logger_formatter", "=", "logging", ".", "Formatter", "(", "'[%(asctime)s | %(name)s | %(levelname)s] %(message)s'", ")", "logger_handler", ".", "setFormatter", "(", "logger_formatter", ")", "logger", ".", "addHandler", "(", "logger_handler", ")", "return", "logger" ]
Method to build the base logging system. By default, logging level is set to INFO.
[ "Method", "to", "build", "the", "base", "logging", "system", ".", "By", "default", "logging", "level", "is", "set", "to", "INFO", "." ]
python
train
45.142857
mohabusama/pyguacamole
guacamole/client.py
https://github.com/mohabusama/pyguacamole/blob/344dccc6cb3a9a045afeaf337677e5d0001aa83a/guacamole/client.py#L117-L122
def send(self, data): """ Send encoded instructions to Guacamole guacd server. """ self.logger.debug('Sending data: %s' % data) self.client.sendall(data.encode())
[ "def", "send", "(", "self", ",", "data", ")", ":", "self", ".", "logger", ".", "debug", "(", "'Sending data: %s'", "%", "data", ")", "self", ".", "client", ".", "sendall", "(", "data", ".", "encode", "(", ")", ")" ]
Send encoded instructions to Guacamole guacd server.
[ "Send", "encoded", "instructions", "to", "Guacamole", "guacd", "server", "." ]
python
test
32.833333
Ehco1996/lazySpider
lazyspider/lazyheaders.py
https://github.com/Ehco1996/lazySpider/blob/6ae43fec7f784d7e515379e79bcaff06b7fd5ade/lazyspider/lazyheaders.py#L46-L60
def _stripStrList(self, raw_str, stop_strs): ''' 去除字符串中的所有指定字符串 args: raw_str 源字符串 stop_strs 指定字符串 列表 return str 筛选后的字符串 ''' if type(stop_strs) == list: for word in stop_strs: raw_str = self._stripStr(raw_str, word) return raw_str else: raise Exception('stop_words must be list!')
[ "def", "_stripStrList", "(", "self", ",", "raw_str", ",", "stop_strs", ")", ":", "if", "type", "(", "stop_strs", ")", "==", "list", ":", "for", "word", "in", "stop_strs", ":", "raw_str", "=", "self", ".", "_stripStr", "(", "raw_str", ",", "word", ")", "return", "raw_str", "else", ":", "raise", "Exception", "(", "'stop_words must be list!'", ")" ]
去除字符串中的所有指定字符串 args: raw_str 源字符串 stop_strs 指定字符串 列表 return str 筛选后的字符串
[ "去除字符串中的所有指定字符串", "args:", "raw_str", "源字符串", "stop_strs", "指定字符串", "列表", "return", "str", "筛选后的字符串" ]
python
train
27.333333
deep-compute/deeputil
deeputil/misc.py
https://github.com/deep-compute/deeputil/blob/9af5702bc3fd990688bf2aed16c20fa104be66df/deeputil/misc.py#L16-L31
def generate_random_string(length=6): ''' Returns a random string of a specified length. >>> len(generate_random_string(length=25)) 25 Test randomness. Try N times and observe no duplicaton >>> N = 100 >>> len(set(generate_random_string(10) for i in range(N))) == N True ''' n = int(length / 2 + 1) x = binascii.hexlify(os.urandom(n)) s = x[:length] return s.decode('utf-8')
[ "def", "generate_random_string", "(", "length", "=", "6", ")", ":", "n", "=", "int", "(", "length", "/", "2", "+", "1", ")", "x", "=", "binascii", ".", "hexlify", "(", "os", ".", "urandom", "(", "n", ")", ")", "s", "=", "x", "[", ":", "length", "]", "return", "s", ".", "decode", "(", "'utf-8'", ")" ]
Returns a random string of a specified length. >>> len(generate_random_string(length=25)) 25 Test randomness. Try N times and observe no duplicaton >>> N = 100 >>> len(set(generate_random_string(10) for i in range(N))) == N True
[ "Returns", "a", "random", "string", "of", "a", "specified", "length", "." ]
python
train
25.8125
vmlaker/coils
coils/SortedList.py
https://github.com/vmlaker/coils/blob/a3a613b3d661dec010e5879c86e62cbff2519dd0/coils/SortedList.py#L23-L26
def getCountGT(self, item): """Return number of elements greater than *item*.""" index = bisect.bisect_right(self._list, item) return len(self._list) - index
[ "def", "getCountGT", "(", "self", ",", "item", ")", ":", "index", "=", "bisect", ".", "bisect_right", "(", "self", ".", "_list", ",", "item", ")", "return", "len", "(", "self", ".", "_list", ")", "-", "index" ]
Return number of elements greater than *item*.
[ "Return", "number", "of", "elements", "greater", "than", "*", "item", "*", "." ]
python
train
44.5
MediaFire/mediafire-python-open-sdk
mediafire/api.py
https://github.com/MediaFire/mediafire-python-open-sdk/blob/8f1f23db1b16f16e026f5c6777aec32d00baa05f/mediafire/api.py#L430-L439
def user_set_avatar(self, action=None, quick_key=None, url=None): """user/set_avatar http://www.mediafire.com/developers/core_api/1.3/user/#set_avatar """ return self.request("user/set_avatar", QueryParams({ "action": action, "quick_key": quick_key, "url": url }))
[ "def", "user_set_avatar", "(", "self", ",", "action", "=", "None", ",", "quick_key", "=", "None", ",", "url", "=", "None", ")", ":", "return", "self", ".", "request", "(", "\"user/set_avatar\"", ",", "QueryParams", "(", "{", "\"action\"", ":", "action", ",", "\"quick_key\"", ":", "quick_key", ",", "\"url\"", ":", "url", "}", ")", ")" ]
user/set_avatar http://www.mediafire.com/developers/core_api/1.3/user/#set_avatar
[ "user", "/", "set_avatar" ]
python
train
33.2
Ex-Mente/auxi.0
auxi/tools/chemistry/stoichiometry.py
https://github.com/Ex-Mente/auxi.0/blob/2dcdae74154f136f8ca58289fe5b20772f215046/auxi/tools/chemistry/stoichiometry.py#L330-L344
def elements(compounds): """ Determine the set of elements present in a list of chemical compounds. The list of elements is sorted alphabetically. :param compounds: List of compound formulas and phases, e.g. ['Fe2O3[S1]', 'Al2O3[S1]']. :returns: List of elements. """ elementlist = [parse_compound(compound).count().keys() for compound in compounds] return set().union(*elementlist)
[ "def", "elements", "(", "compounds", ")", ":", "elementlist", "=", "[", "parse_compound", "(", "compound", ")", ".", "count", "(", ")", ".", "keys", "(", ")", "for", "compound", "in", "compounds", "]", "return", "set", "(", ")", ".", "union", "(", "*", "elementlist", ")" ]
Determine the set of elements present in a list of chemical compounds. The list of elements is sorted alphabetically. :param compounds: List of compound formulas and phases, e.g. ['Fe2O3[S1]', 'Al2O3[S1]']. :returns: List of elements.
[ "Determine", "the", "set", "of", "elements", "present", "in", "a", "list", "of", "chemical", "compounds", "." ]
python
valid
28.6
AkihikoITOH/capybara
capybara/virtualenv/lib/python2.7/site-packages/setuptools/package_index.py
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/setuptools/package_index.py#L184-L199
def find_external_links(url, page): """Find rel="homepage" and rel="download" links in `page`, yielding URLs""" for match in REL.finditer(page): tag, rel = match.groups() rels = set(map(str.strip, rel.lower().split(','))) if 'homepage' in rels or 'download' in rels: for match in HREF.finditer(tag): yield urljoin(url, htmldecode(match.group(1))) for tag in ("<th>Home Page", "<th>Download URL"): pos = page.find(tag) if pos!=-1: match = HREF.search(page,pos) if match: yield urljoin(url, htmldecode(match.group(1)))
[ "def", "find_external_links", "(", "url", ",", "page", ")", ":", "for", "match", "in", "REL", ".", "finditer", "(", "page", ")", ":", "tag", ",", "rel", "=", "match", ".", "groups", "(", ")", "rels", "=", "set", "(", "map", "(", "str", ".", "strip", ",", "rel", ".", "lower", "(", ")", ".", "split", "(", "','", ")", ")", ")", "if", "'homepage'", "in", "rels", "or", "'download'", "in", "rels", ":", "for", "match", "in", "HREF", ".", "finditer", "(", "tag", ")", ":", "yield", "urljoin", "(", "url", ",", "htmldecode", "(", "match", ".", "group", "(", "1", ")", ")", ")", "for", "tag", "in", "(", "\"<th>Home Page\"", ",", "\"<th>Download URL\"", ")", ":", "pos", "=", "page", ".", "find", "(", "tag", ")", "if", "pos", "!=", "-", "1", ":", "match", "=", "HREF", ".", "search", "(", "page", ",", "pos", ")", "if", "match", ":", "yield", "urljoin", "(", "url", ",", "htmldecode", "(", "match", ".", "group", "(", "1", ")", ")", ")" ]
Find rel="homepage" and rel="download" links in `page`, yielding URLs
[ "Find", "rel", "=", "homepage", "and", "rel", "=", "download", "links", "in", "page", "yielding", "URLs" ]
python
test
38.9375
tensorflow/hub
examples/image_retraining/retrain.py
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/examples/image_retraining/retrain.py#L437-L478
def cache_bottlenecks(sess, image_lists, image_dir, bottleneck_dir, jpeg_data_tensor, decoded_image_tensor, resized_input_tensor, bottleneck_tensor, module_name): """Ensures all the training, testing, and validation bottlenecks are cached. Because we're likely to read the same image multiple times (if there are no distortions applied during training) it can speed things up a lot if we calculate the bottleneck layer values once for each image during preprocessing, and then just read those cached values repeatedly during training. Here we go through all the images we've found, calculate those values, and save them off. Args: sess: The current active TensorFlow Session. image_lists: OrderedDict of training images for each label. image_dir: Root folder string of the subfolders containing the training images. bottleneck_dir: Folder string holding cached files of bottleneck values. jpeg_data_tensor: Input tensor for jpeg data from file. decoded_image_tensor: The output of decoding and resizing the image. resized_input_tensor: The input node of the recognition graph. bottleneck_tensor: The penultimate output layer of the graph. module_name: The name of the image module being used. Returns: Nothing. """ how_many_bottlenecks = 0 ensure_dir_exists(bottleneck_dir) for label_name, label_lists in image_lists.items(): for category in ['training', 'testing', 'validation']: category_list = label_lists[category] for index, unused_base_name in enumerate(category_list): get_or_create_bottleneck( sess, image_lists, label_name, index, image_dir, category, bottleneck_dir, jpeg_data_tensor, decoded_image_tensor, resized_input_tensor, bottleneck_tensor, module_name) how_many_bottlenecks += 1 if how_many_bottlenecks % 100 == 0: tf.logging.info( str(how_many_bottlenecks) + ' bottleneck files created.')
[ "def", "cache_bottlenecks", "(", "sess", ",", "image_lists", ",", "image_dir", ",", "bottleneck_dir", ",", "jpeg_data_tensor", ",", "decoded_image_tensor", ",", "resized_input_tensor", ",", "bottleneck_tensor", ",", "module_name", ")", ":", "how_many_bottlenecks", "=", "0", "ensure_dir_exists", "(", "bottleneck_dir", ")", "for", "label_name", ",", "label_lists", "in", "image_lists", ".", "items", "(", ")", ":", "for", "category", "in", "[", "'training'", ",", "'testing'", ",", "'validation'", "]", ":", "category_list", "=", "label_lists", "[", "category", "]", "for", "index", ",", "unused_base_name", "in", "enumerate", "(", "category_list", ")", ":", "get_or_create_bottleneck", "(", "sess", ",", "image_lists", ",", "label_name", ",", "index", ",", "image_dir", ",", "category", ",", "bottleneck_dir", ",", "jpeg_data_tensor", ",", "decoded_image_tensor", ",", "resized_input_tensor", ",", "bottleneck_tensor", ",", "module_name", ")", "how_many_bottlenecks", "+=", "1", "if", "how_many_bottlenecks", "%", "100", "==", "0", ":", "tf", ".", "logging", ".", "info", "(", "str", "(", "how_many_bottlenecks", ")", "+", "' bottleneck files created.'", ")" ]
Ensures all the training, testing, and validation bottlenecks are cached. Because we're likely to read the same image multiple times (if there are no distortions applied during training) it can speed things up a lot if we calculate the bottleneck layer values once for each image during preprocessing, and then just read those cached values repeatedly during training. Here we go through all the images we've found, calculate those values, and save them off. Args: sess: The current active TensorFlow Session. image_lists: OrderedDict of training images for each label. image_dir: Root folder string of the subfolders containing the training images. bottleneck_dir: Folder string holding cached files of bottleneck values. jpeg_data_tensor: Input tensor for jpeg data from file. decoded_image_tensor: The output of decoding and resizing the image. resized_input_tensor: The input node of the recognition graph. bottleneck_tensor: The penultimate output layer of the graph. module_name: The name of the image module being used. Returns: Nothing.
[ "Ensures", "all", "the", "training", "testing", "and", "validation", "bottlenecks", "are", "cached", "." ]
python
train
47.238095
hamelsmu/ktext
ktext/preprocess.py
https://github.com/hamelsmu/ktext/blob/221f09f5b1762705075fd1bd914881c0724d5e02/ktext/preprocess.py#L254-L305
def fit(self, data: List[str], return_tokenized_data: bool = False) -> Union[None, List[List[str]]]: """ TODO: update docs Apply cleaner and tokenzier to raw data and build vocabulary. Parameters ---------- data : List[str] These are raw documents, which are a list of strings. ex: [["The quick brown fox"], ["jumps over the lazy dog"]] return_tokenized_data : bool Return the tokenized strings. This is primarly used for debugging purposes. Returns ------- None or List[List[str]] if return_tokenized_data=True then will return tokenized documents, otherwise will not return anything. """ self.__clear_data() now = get_time() logging.warning(f'....tokenizing data') tokenized_data = self.parallel_process_text(data) if not self.padding_maxlen: # its not worth the overhead to parallelize document length counts length_counts = map(count_len, tokenized_data) self.document_length_histogram = Counter(length_counts) self.generate_doc_length_stats() # Learn corpus on single thread logging.warning(f'(1/2) done. {time_diff(now)} sec') logging.warning(f'....building corpus') now = get_time() self.indexer = custom_Indexer(num_words=self.keep_n) self.indexer.fit_on_tokenized_texts(tokenized_data) # Build Dictionary accounting For 0 padding, and reserve 1 for unknown and rare Words self.token2id = self.indexer.word_index self.id2token = {v: k for k, v in self.token2id.items()} self.n_tokens = max(self.indexer.word_index.values()) # logging logging.warning(f'(2/2) done. {time_diff(now)} sec') logging.warning(f'Finished parsing {self.indexer.document_count:,} documents.') if return_tokenized_data: return tokenized_data
[ "def", "fit", "(", "self", ",", "data", ":", "List", "[", "str", "]", ",", "return_tokenized_data", ":", "bool", "=", "False", ")", "->", "Union", "[", "None", ",", "List", "[", "List", "[", "str", "]", "]", "]", ":", "self", ".", "__clear_data", "(", ")", "now", "=", "get_time", "(", ")", "logging", ".", "warning", "(", "f'....tokenizing data'", ")", "tokenized_data", "=", "self", ".", "parallel_process_text", "(", "data", ")", "if", "not", "self", ".", "padding_maxlen", ":", "# its not worth the overhead to parallelize document length counts", "length_counts", "=", "map", "(", "count_len", ",", "tokenized_data", ")", "self", ".", "document_length_histogram", "=", "Counter", "(", "length_counts", ")", "self", ".", "generate_doc_length_stats", "(", ")", "# Learn corpus on single thread", "logging", ".", "warning", "(", "f'(1/2) done. {time_diff(now)} sec'", ")", "logging", ".", "warning", "(", "f'....building corpus'", ")", "now", "=", "get_time", "(", ")", "self", ".", "indexer", "=", "custom_Indexer", "(", "num_words", "=", "self", ".", "keep_n", ")", "self", ".", "indexer", ".", "fit_on_tokenized_texts", "(", "tokenized_data", ")", "# Build Dictionary accounting For 0 padding, and reserve 1 for unknown and rare Words", "self", ".", "token2id", "=", "self", ".", "indexer", ".", "word_index", "self", ".", "id2token", "=", "{", "v", ":", "k", "for", "k", ",", "v", "in", "self", ".", "token2id", ".", "items", "(", ")", "}", "self", ".", "n_tokens", "=", "max", "(", "self", ".", "indexer", ".", "word_index", ".", "values", "(", ")", ")", "# logging", "logging", ".", "warning", "(", "f'(2/2) done. {time_diff(now)} sec'", ")", "logging", ".", "warning", "(", "f'Finished parsing {self.indexer.document_count:,} documents.'", ")", "if", "return_tokenized_data", ":", "return", "tokenized_data" ]
TODO: update docs Apply cleaner and tokenzier to raw data and build vocabulary. Parameters ---------- data : List[str] These are raw documents, which are a list of strings. ex: [["The quick brown fox"], ["jumps over the lazy dog"]] return_tokenized_data : bool Return the tokenized strings. This is primarly used for debugging purposes. Returns ------- None or List[List[str]] if return_tokenized_data=True then will return tokenized documents, otherwise will not return anything.
[ "TODO", ":", "update", "docs" ]
python
test
38
inveniosoftware/invenio-deposit
invenio_deposit/serializers.py
https://github.com/inveniosoftware/invenio-deposit/blob/f243ea1d01ab0a3bc92ade3262d1abdd2bc32447/invenio_deposit/serializers.py#L89-L106
def json_file_response(obj=None, pid=None, record=None, status=None): """JSON Files/File serializer. :param obj: A :class:`invenio_files_rest.models.ObjectVersion` instance or a :class:`invenio_records_files.api.FilesIterator` if it's a list of files. :param pid: PID value. (not used) :param record: The record metadata. (not used) :param status: The HTTP status code. :returns: A Flask response with JSON data. :rtype: :py:class:`flask.Response`. """ from invenio_records_files.api import FilesIterator if isinstance(obj, FilesIterator): return json_files_serializer(obj, status=status) else: return json_file_serializer(obj, status=status)
[ "def", "json_file_response", "(", "obj", "=", "None", ",", "pid", "=", "None", ",", "record", "=", "None", ",", "status", "=", "None", ")", ":", "from", "invenio_records_files", ".", "api", "import", "FilesIterator", "if", "isinstance", "(", "obj", ",", "FilesIterator", ")", ":", "return", "json_files_serializer", "(", "obj", ",", "status", "=", "status", ")", "else", ":", "return", "json_file_serializer", "(", "obj", ",", "status", "=", "status", ")" ]
JSON Files/File serializer. :param obj: A :class:`invenio_files_rest.models.ObjectVersion` instance or a :class:`invenio_records_files.api.FilesIterator` if it's a list of files. :param pid: PID value. (not used) :param record: The record metadata. (not used) :param status: The HTTP status code. :returns: A Flask response with JSON data. :rtype: :py:class:`flask.Response`.
[ "JSON", "Files", "/", "File", "serializer", "." ]
python
valid
39.055556
picklepete/pyicloud
pyicloud/services/findmyiphone.py
https://github.com/picklepete/pyicloud/blob/9bb6d750662ce24c8febc94807ddbdcdf3cadaa2/pyicloud/services/findmyiphone.py#L128-L144
def play_sound(self, subject='Find My iPhone Alert'): """ Send a request to the device to play a sound. It's possible to pass a custom message by changing the `subject`. """ data = json.dumps({ 'device': self.content['id'], 'subject': subject, 'clientContext': { 'fmly': True } }) self.session.post( self.sound_url, params=self.params, data=data )
[ "def", "play_sound", "(", "self", ",", "subject", "=", "'Find My iPhone Alert'", ")", ":", "data", "=", "json", ".", "dumps", "(", "{", "'device'", ":", "self", ".", "content", "[", "'id'", "]", ",", "'subject'", ":", "subject", ",", "'clientContext'", ":", "{", "'fmly'", ":", "True", "}", "}", ")", "self", ".", "session", ".", "post", "(", "self", ".", "sound_url", ",", "params", "=", "self", ".", "params", ",", "data", "=", "data", ")" ]
Send a request to the device to play a sound. It's possible to pass a custom message by changing the `subject`.
[ "Send", "a", "request", "to", "the", "device", "to", "play", "a", "sound", "." ]
python
train
28.705882
PyCQA/astroid
astroid/inference.py
https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/inference.py#L526-L531
def infer_unaryop(self, context=None): """Infer what an UnaryOp should return when evaluated.""" yield from _filter_operation_errors( self, _infer_unaryop, context, util.BadUnaryOperationMessage ) return dict(node=self, context=context)
[ "def", "infer_unaryop", "(", "self", ",", "context", "=", "None", ")", ":", "yield", "from", "_filter_operation_errors", "(", "self", ",", "_infer_unaryop", ",", "context", ",", "util", ".", "BadUnaryOperationMessage", ")", "return", "dict", "(", "node", "=", "self", ",", "context", "=", "context", ")" ]
Infer what an UnaryOp should return when evaluated.
[ "Infer", "what", "an", "UnaryOp", "should", "return", "when", "evaluated", "." ]
python
train
42.5
apple/turicreate
deps/src/libxml2-2.9.1/python/libxml2.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L5288-L5311
def parseEntityRef(self): """parse ENTITY references declarations [68] EntityRef ::= '&' Name ';' [ WFC: Entity Declared ] In a document without any DTD, a document with only an internal DTD subset which contains no parameter entity references, or a document with "standalone='yes'", the Name given in the entity reference must match that in an entity declaration, except that well-formed documents need not declare any of the following entities: amp, lt, gt, apos, quot. The declaration of a parameter entity must precede any reference to it. Similarly, the declaration of a general entity must precede any reference to it which appears in a default value in an attribute-list declaration. Note that if entities are declared in the external subset or in external parameter entities, a non-validating processor is not obligated to read and process their declarations; for such documents, the rule that an entity must be declared is a well-formedness constraint only if standalone='yes'. [ WFC: Parsed Entity ] An entity reference must not contain the name of an unparsed entity """ ret = libxml2mod.xmlParseEntityRef(self._o) if ret is None:raise parserError('xmlParseEntityRef() failed') __tmp = xmlEntity(_obj=ret) return __tmp
[ "def", "parseEntityRef", "(", "self", ")", ":", "ret", "=", "libxml2mod", ".", "xmlParseEntityRef", "(", "self", ".", "_o", ")", "if", "ret", "is", "None", ":", "raise", "parserError", "(", "'xmlParseEntityRef() failed'", ")", "__tmp", "=", "xmlEntity", "(", "_obj", "=", "ret", ")", "return", "__tmp" ]
parse ENTITY references declarations [68] EntityRef ::= '&' Name ';' [ WFC: Entity Declared ] In a document without any DTD, a document with only an internal DTD subset which contains no parameter entity references, or a document with "standalone='yes'", the Name given in the entity reference must match that in an entity declaration, except that well-formed documents need not declare any of the following entities: amp, lt, gt, apos, quot. The declaration of a parameter entity must precede any reference to it. Similarly, the declaration of a general entity must precede any reference to it which appears in a default value in an attribute-list declaration. Note that if entities are declared in the external subset or in external parameter entities, a non-validating processor is not obligated to read and process their declarations; for such documents, the rule that an entity must be declared is a well-formedness constraint only if standalone='yes'. [ WFC: Parsed Entity ] An entity reference must not contain the name of an unparsed entity
[ "parse", "ENTITY", "references", "declarations", "[", "68", "]", "EntityRef", "::", "=", "&", "Name", ";", "[", "WFC", ":", "Entity", "Declared", "]", "In", "a", "document", "without", "any", "DTD", "a", "document", "with", "only", "an", "internal", "DTD", "subset", "which", "contains", "no", "parameter", "entity", "references", "or", "a", "document", "with", "standalone", "=", "yes", "the", "Name", "given", "in", "the", "entity", "reference", "must", "match", "that", "in", "an", "entity", "declaration", "except", "that", "well", "-", "formed", "documents", "need", "not", "declare", "any", "of", "the", "following", "entities", ":", "amp", "lt", "gt", "apos", "quot", ".", "The", "declaration", "of", "a", "parameter", "entity", "must", "precede", "any", "reference", "to", "it", ".", "Similarly", "the", "declaration", "of", "a", "general", "entity", "must", "precede", "any", "reference", "to", "it", "which", "appears", "in", "a", "default", "value", "in", "an", "attribute", "-", "list", "declaration", ".", "Note", "that", "if", "entities", "are", "declared", "in", "the", "external", "subset", "or", "in", "external", "parameter", "entities", "a", "non", "-", "validating", "processor", "is", "not", "obligated", "to", "read", "and", "process", "their", "declarations", ";", "for", "such", "documents", "the", "rule", "that", "an", "entity", "must", "be", "declared", "is", "a", "well", "-", "formedness", "constraint", "only", "if", "standalone", "=", "yes", ".", "[", "WFC", ":", "Parsed", "Entity", "]", "An", "entity", "reference", "must", "not", "contain", "the", "name", "of", "an", "unparsed", "entity" ]
python
train
59.666667
delfick/harpoon
harpoon/dockerpty/pty.py
https://github.com/delfick/harpoon/blob/a2d39311d6127b7da2e15f40468bf320d598e461/harpoon/dockerpty/pty.py#L202-L220
def resize(self, size=None): """ Resize the container's PTY. If `size` is not None, it must be a tuple of (height,width), otherwise it will be determined by the size of the current TTY. """ if not self.israw(): return size = size or tty.size(self.stdout) if size is not None: rows, cols = size try: self.client.resize(self.container, height=rows, width=cols) except IOError: # Container already exited pass
[ "def", "resize", "(", "self", ",", "size", "=", "None", ")", ":", "if", "not", "self", ".", "israw", "(", ")", ":", "return", "size", "=", "size", "or", "tty", ".", "size", "(", "self", ".", "stdout", ")", "if", "size", "is", "not", "None", ":", "rows", ",", "cols", "=", "size", "try", ":", "self", ".", "client", ".", "resize", "(", "self", ".", "container", ",", "height", "=", "rows", ",", "width", "=", "cols", ")", "except", "IOError", ":", "# Container already exited", "pass" ]
Resize the container's PTY. If `size` is not None, it must be a tuple of (height,width), otherwise it will be determined by the size of the current TTY.
[ "Resize", "the", "container", "s", "PTY", "." ]
python
train
28.210526
onnx/onnx-mxnet
onnx_mxnet/import_onnx.py
https://github.com/onnx/onnx-mxnet/blob/b602d75c5a01f5ed8f68b11150a06374f058a86b/onnx_mxnet/import_onnx.py#L187-L199
def _fix_squeeze(self, inputs, new_attr): """ MXNet doesnt have a squeeze operator. Using "split" to perform similar operation. "split" can be slower compared to "reshape". This can have performance impact. TODO: Remove this implementation once mxnet adds the support. """ axes = new_attr.get('axis') op = mx.sym.split(inputs[0], axis=axes[0], num_outputs=1, squeeze_axis=1) for i in axes[1:]: op = mx.sym.split(op, axis=i-1, num_outputs=1, squeeze_axis=1) return op
[ "def", "_fix_squeeze", "(", "self", ",", "inputs", ",", "new_attr", ")", ":", "axes", "=", "new_attr", ".", "get", "(", "'axis'", ")", "op", "=", "mx", ".", "sym", ".", "split", "(", "inputs", "[", "0", "]", ",", "axis", "=", "axes", "[", "0", "]", ",", "num_outputs", "=", "1", ",", "squeeze_axis", "=", "1", ")", "for", "i", "in", "axes", "[", "1", ":", "]", ":", "op", "=", "mx", ".", "sym", ".", "split", "(", "op", ",", "axis", "=", "i", "-", "1", ",", "num_outputs", "=", "1", ",", "squeeze_axis", "=", "1", ")", "return", "op" ]
MXNet doesnt have a squeeze operator. Using "split" to perform similar operation. "split" can be slower compared to "reshape". This can have performance impact. TODO: Remove this implementation once mxnet adds the support.
[ "MXNet", "doesnt", "have", "a", "squeeze", "operator", ".", "Using", "split", "to", "perform", "similar", "operation", ".", "split", "can", "be", "slower", "compared", "to", "reshape", ".", "This", "can", "have", "performance", "impact", ".", "TODO", ":", "Remove", "this", "implementation", "once", "mxnet", "adds", "the", "support", "." ]
python
train
42.769231
IdentityPython/pysaml2
src/saml2/response.py
https://github.com/IdentityPython/pysaml2/blob/d3aa78eeb7d37c12688f783cb4db1c7263a14ad6/src/saml2/response.py#L914-L1030
def parse_assertion(self, keys=None): """ Parse the assertions for a saml response. :param keys: A string representing a RSA key or a list of strings containing RSA keys. :return: True if the assertions are parsed otherwise False. """ if self.context == "AuthnQuery": # can contain one or more assertions pass else: # This is a saml2int limitation try: assert ( len(self.response.assertion) == 1 or len(self.response.encrypted_assertion) == 1 or self.assertion is not None ) except AssertionError: raise Exception("No assertion part") if self.response.assertion: logger.debug("***Unencrypted assertion***") for assertion in self.response.assertion: if not self._assertion(assertion, False): return False if self.find_encrypt_data(self.response): logger.debug("***Encrypted assertion/-s***") _enc_assertions = [] resp = self.response decr_text = str(self.response) decr_text_old = None while self.find_encrypt_data(resp) and decr_text_old != decr_text: decr_text_old = decr_text try: decr_text = self.sec.decrypt_keys(decr_text, keys) except DecryptError as e: continue else: resp = samlp.response_from_string(decr_text) # check and prepare for comparison between str and unicode if type(decr_text_old) != type(decr_text): if isinstance(decr_text_old, six.binary_type): decr_text_old = decr_text_old.decode("utf-8") else: decr_text_old = decr_text_old.encode("utf-8") _enc_assertions = self.decrypt_assertions( resp.encrypted_assertion, decr_text ) decr_text_old = None while ( self.find_encrypt_data(resp) or self.find_encrypt_data_assertion_list(_enc_assertions) ) and decr_text_old != decr_text: decr_text_old = decr_text try: decr_text = self.sec.decrypt_keys(decr_text, keys) except DecryptError as e: continue else: resp = samlp.response_from_string(decr_text) _enc_assertions = self.decrypt_assertions( resp.encrypted_assertion, decr_text, verified=True ) # check and prepare for comparison between str and unicode if type(decr_text_old) != type(decr_text): if isinstance(decr_text_old, six.binary_type): decr_text_old = decr_text_old.decode("utf-8") else: decr_text_old = decr_text_old.encode("utf-8") all_assertions = _enc_assertions if resp.assertion: all_assertions = all_assertions + resp.assertion if len(all_assertions) > 0: for tmp_ass in all_assertions: if tmp_ass.advice and tmp_ass.advice.encrypted_assertion: advice_res = self.decrypt_assertions( tmp_ass.advice.encrypted_assertion, decr_text, tmp_ass.issuer) if tmp_ass.advice.assertion: tmp_ass.advice.assertion.extend(advice_res) else: tmp_ass.advice.assertion = advice_res if len(advice_res) > 0: tmp_ass.advice.encrypted_assertion = [] self.response.assertion = resp.assertion for assertion in _enc_assertions: if not self._assertion(assertion, True): return False else: self.assertions.append(assertion) self.xmlstr = decr_text if len(_enc_assertions) > 0: self.response.encrypted_assertion = [] if self.response.assertion: for assertion in self.response.assertion: self.assertions.append(assertion) if self.assertions and len(self.assertions) > 0: self.assertion = self.assertions[0] if self.context == "AuthnReq" or self.context == "AttrQuery": self.ava = self.get_identity() logger.debug("--- AVA: %s", self.ava) return True
[ "def", "parse_assertion", "(", "self", ",", "keys", "=", "None", ")", ":", "if", "self", ".", "context", "==", "\"AuthnQuery\"", ":", "# can contain one or more assertions", "pass", "else", ":", "# This is a saml2int limitation", "try", ":", "assert", "(", "len", "(", "self", ".", "response", ".", "assertion", ")", "==", "1", "or", "len", "(", "self", ".", "response", ".", "encrypted_assertion", ")", "==", "1", "or", "self", ".", "assertion", "is", "not", "None", ")", "except", "AssertionError", ":", "raise", "Exception", "(", "\"No assertion part\"", ")", "if", "self", ".", "response", ".", "assertion", ":", "logger", ".", "debug", "(", "\"***Unencrypted assertion***\"", ")", "for", "assertion", "in", "self", ".", "response", ".", "assertion", ":", "if", "not", "self", ".", "_assertion", "(", "assertion", ",", "False", ")", ":", "return", "False", "if", "self", ".", "find_encrypt_data", "(", "self", ".", "response", ")", ":", "logger", ".", "debug", "(", "\"***Encrypted assertion/-s***\"", ")", "_enc_assertions", "=", "[", "]", "resp", "=", "self", ".", "response", "decr_text", "=", "str", "(", "self", ".", "response", ")", "decr_text_old", "=", "None", "while", "self", ".", "find_encrypt_data", "(", "resp", ")", "and", "decr_text_old", "!=", "decr_text", ":", "decr_text_old", "=", "decr_text", "try", ":", "decr_text", "=", "self", ".", "sec", ".", "decrypt_keys", "(", "decr_text", ",", "keys", ")", "except", "DecryptError", "as", "e", ":", "continue", "else", ":", "resp", "=", "samlp", ".", "response_from_string", "(", "decr_text", ")", "# check and prepare for comparison between str and unicode", "if", "type", "(", "decr_text_old", ")", "!=", "type", "(", "decr_text", ")", ":", "if", "isinstance", "(", "decr_text_old", ",", "six", ".", "binary_type", ")", ":", "decr_text_old", "=", "decr_text_old", ".", "decode", "(", "\"utf-8\"", ")", "else", ":", "decr_text_old", "=", "decr_text_old", ".", "encode", "(", "\"utf-8\"", ")", "_enc_assertions", "=", "self", ".", "decrypt_assertions", "(", "resp", ".", "encrypted_assertion", ",", "decr_text", ")", "decr_text_old", "=", "None", "while", "(", "self", ".", "find_encrypt_data", "(", "resp", ")", "or", "self", ".", "find_encrypt_data_assertion_list", "(", "_enc_assertions", ")", ")", "and", "decr_text_old", "!=", "decr_text", ":", "decr_text_old", "=", "decr_text", "try", ":", "decr_text", "=", "self", ".", "sec", ".", "decrypt_keys", "(", "decr_text", ",", "keys", ")", "except", "DecryptError", "as", "e", ":", "continue", "else", ":", "resp", "=", "samlp", ".", "response_from_string", "(", "decr_text", ")", "_enc_assertions", "=", "self", ".", "decrypt_assertions", "(", "resp", ".", "encrypted_assertion", ",", "decr_text", ",", "verified", "=", "True", ")", "# check and prepare for comparison between str and unicode", "if", "type", "(", "decr_text_old", ")", "!=", "type", "(", "decr_text", ")", ":", "if", "isinstance", "(", "decr_text_old", ",", "six", ".", "binary_type", ")", ":", "decr_text_old", "=", "decr_text_old", ".", "decode", "(", "\"utf-8\"", ")", "else", ":", "decr_text_old", "=", "decr_text_old", ".", "encode", "(", "\"utf-8\"", ")", "all_assertions", "=", "_enc_assertions", "if", "resp", ".", "assertion", ":", "all_assertions", "=", "all_assertions", "+", "resp", ".", "assertion", "if", "len", "(", "all_assertions", ")", ">", "0", ":", "for", "tmp_ass", "in", "all_assertions", ":", "if", "tmp_ass", ".", "advice", "and", "tmp_ass", ".", "advice", ".", "encrypted_assertion", ":", "advice_res", "=", "self", ".", "decrypt_assertions", "(", "tmp_ass", ".", "advice", ".", "encrypted_assertion", ",", "decr_text", ",", "tmp_ass", ".", "issuer", ")", "if", "tmp_ass", ".", "advice", ".", "assertion", ":", "tmp_ass", ".", "advice", ".", "assertion", ".", "extend", "(", "advice_res", ")", "else", ":", "tmp_ass", ".", "advice", ".", "assertion", "=", "advice_res", "if", "len", "(", "advice_res", ")", ">", "0", ":", "tmp_ass", ".", "advice", ".", "encrypted_assertion", "=", "[", "]", "self", ".", "response", ".", "assertion", "=", "resp", ".", "assertion", "for", "assertion", "in", "_enc_assertions", ":", "if", "not", "self", ".", "_assertion", "(", "assertion", ",", "True", ")", ":", "return", "False", "else", ":", "self", ".", "assertions", ".", "append", "(", "assertion", ")", "self", ".", "xmlstr", "=", "decr_text", "if", "len", "(", "_enc_assertions", ")", ">", "0", ":", "self", ".", "response", ".", "encrypted_assertion", "=", "[", "]", "if", "self", ".", "response", ".", "assertion", ":", "for", "assertion", "in", "self", ".", "response", ".", "assertion", ":", "self", ".", "assertions", ".", "append", "(", "assertion", ")", "if", "self", ".", "assertions", "and", "len", "(", "self", ".", "assertions", ")", ">", "0", ":", "self", ".", "assertion", "=", "self", ".", "assertions", "[", "0", "]", "if", "self", ".", "context", "==", "\"AuthnReq\"", "or", "self", ".", "context", "==", "\"AttrQuery\"", ":", "self", ".", "ava", "=", "self", ".", "get_identity", "(", ")", "logger", ".", "debug", "(", "\"--- AVA: %s\"", ",", "self", ".", "ava", ")", "return", "True" ]
Parse the assertions for a saml response. :param keys: A string representing a RSA key or a list of strings containing RSA keys. :return: True if the assertions are parsed otherwise False.
[ "Parse", "the", "assertions", "for", "a", "saml", "response", "." ]
python
train
40.717949
joerick/pyinstrument
pyinstrument/util.py
https://github.com/joerick/pyinstrument/blob/cc4f3f6fc1b493d7cd058ecf41ad012e0030a512/pyinstrument/util.py#L18-L25
def deprecated(func, *args, **kwargs): ''' Marks a function as deprecated. ''' warnings.warn( '{} is deprecated and should no longer be used.'.format(func), DeprecationWarning, stacklevel=3 ) return func(*args, **kwargs)
[ "def", "deprecated", "(", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "warnings", ".", "warn", "(", "'{} is deprecated and should no longer be used.'", ".", "format", "(", "func", ")", ",", "DeprecationWarning", ",", "stacklevel", "=", "3", ")", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Marks a function as deprecated.
[ "Marks", "a", "function", "as", "deprecated", "." ]
python
train
31.625
reingart/pyafipws
wslpg.py
https://github.com/reingart/pyafipws/blob/ee87cfe4ac12285ab431df5fec257f103042d1ab/wslpg.py#L1746-L1760
def AgregarCalidad(self, analisis_muestra=None, nro_boletin=None, cod_grado=None, valor_grado=None, valor_contenido_proteico=None, valor_factor=None, **kwargs): "Agrega la información sobre la calidad, al autorizar o posteriormente" self.certificacion['primaria']['calidad'] = dict( analisisMuestra=analisis_muestra, nroBoletin=nro_boletin, codGrado=cod_grado, # G1 G2 G3 F1 F2 F3 valorGrado=valor_grado or None, # opcional valorContProteico=valor_contenido_proteico, valorFactor=valor_factor, detalleMuestraAnalisis=[], # <!--1 or more repetitions:--> ) return True
[ "def", "AgregarCalidad", "(", "self", ",", "analisis_muestra", "=", "None", ",", "nro_boletin", "=", "None", ",", "cod_grado", "=", "None", ",", "valor_grado", "=", "None", ",", "valor_contenido_proteico", "=", "None", ",", "valor_factor", "=", "None", ",", "*", "*", "kwargs", ")", ":", "self", ".", "certificacion", "[", "'primaria'", "]", "[", "'calidad'", "]", "=", "dict", "(", "analisisMuestra", "=", "analisis_muestra", ",", "nroBoletin", "=", "nro_boletin", ",", "codGrado", "=", "cod_grado", ",", "# G1 G2 G3 F1 F2 F3", "valorGrado", "=", "valor_grado", "or", "None", ",", "# opcional", "valorContProteico", "=", "valor_contenido_proteico", ",", "valorFactor", "=", "valor_factor", ",", "detalleMuestraAnalisis", "=", "[", "]", ",", "# <!--1 or more repetitions:-->", ")", "return", "True" ]
Agrega la información sobre la calidad, al autorizar o posteriormente
[ "Agrega", "la", "información", "sobre", "la", "calidad", "al", "autorizar", "o", "posteriormente" ]
python
train
55.533333
cjdrake/pyeda
pyeda/parsing/boolexpr.py
https://github.com/cjdrake/pyeda/blob/554ee53aa678f4b61bcd7e07ba2c74ddc749d665/pyeda/parsing/boolexpr.py#L427-L443
def _impl(lexer): """Return an Implies expression.""" p = _sumterm(lexer) tok = next(lexer) # SUMTERM '=>' IMPL if isinstance(tok, OP_rarrow): q = _impl(lexer) return ('implies', p, q) # SUMTERM '<=>' IMPL elif isinstance(tok, OP_lrarrow): q = _impl(lexer) return ('equal', p, q) # SUMTERM else: lexer.unpop_token(tok) return p
[ "def", "_impl", "(", "lexer", ")", ":", "p", "=", "_sumterm", "(", "lexer", ")", "tok", "=", "next", "(", "lexer", ")", "# SUMTERM '=>' IMPL", "if", "isinstance", "(", "tok", ",", "OP_rarrow", ")", ":", "q", "=", "_impl", "(", "lexer", ")", "return", "(", "'implies'", ",", "p", ",", "q", ")", "# SUMTERM '<=>' IMPL", "elif", "isinstance", "(", "tok", ",", "OP_lrarrow", ")", ":", "q", "=", "_impl", "(", "lexer", ")", "return", "(", "'equal'", ",", "p", ",", "q", ")", "# SUMTERM", "else", ":", "lexer", ".", "unpop_token", "(", "tok", ")", "return", "p" ]
Return an Implies expression.
[ "Return", "an", "Implies", "expression", "." ]
python
train
23.294118
City-of-Helsinki/django-helusers
helusers/authz.py
https://github.com/City-of-Helsinki/django-helusers/blob/9064979f6f990987358e2bca3c24a80fad201bdb/helusers/authz.py#L15-L29
def has_api_scopes(self, *api_scopes): """ Test if all given API scopes are authorized. :type api_scopes: list[str] :param api_scopes: The API scopes to test :rtype: bool|None :return: True or False, if the API Token has the API scopes field set, otherwise None """ if self._authorized_api_scopes is None: return None return all((x in self._authorized_api_scopes) for x in api_scopes)
[ "def", "has_api_scopes", "(", "self", ",", "*", "api_scopes", ")", ":", "if", "self", ".", "_authorized_api_scopes", "is", "None", ":", "return", "None", "return", "all", "(", "(", "x", "in", "self", ".", "_authorized_api_scopes", ")", "for", "x", "in", "api_scopes", ")" ]
Test if all given API scopes are authorized. :type api_scopes: list[str] :param api_scopes: The API scopes to test :rtype: bool|None :return: True or False, if the API Token has the API scopes field set, otherwise None
[ "Test", "if", "all", "given", "API", "scopes", "are", "authorized", "." ]
python
train
31.733333
frostming/atoml
atoml/encoder.py
https://github.com/frostming/atoml/blob/85414ef77777366887a819a05b496d5279296cd2/atoml/encoder.py#L193-L201
def dumps(obj, preserve=False): """Stringifies a dict as toml :param obj: the object to be dumped into toml :param preserve: optional flag to preserve the inline table in result """ f = StringIO() dump(obj, f, preserve) return f.getvalue()
[ "def", "dumps", "(", "obj", ",", "preserve", "=", "False", ")", ":", "f", "=", "StringIO", "(", ")", "dump", "(", "obj", ",", "f", ",", "preserve", ")", "return", "f", ".", "getvalue", "(", ")" ]
Stringifies a dict as toml :param obj: the object to be dumped into toml :param preserve: optional flag to preserve the inline table in result
[ "Stringifies", "a", "dict", "as", "toml" ]
python
train
28.888889
explosion/spaCy
spacy/util.py
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/util.py#L252-L262
def get_entry_point(key, value): """Check if registered entry point is available for a given name and load it. Otherwise, return None. key (unicode): Entry point name. value (unicode): Name of entry point to load. RETURNS: The loaded entry point or None. """ for entry_point in pkg_resources.iter_entry_points(key): if entry_point.name == value: return entry_point.load()
[ "def", "get_entry_point", "(", "key", ",", "value", ")", ":", "for", "entry_point", "in", "pkg_resources", ".", "iter_entry_points", "(", "key", ")", ":", "if", "entry_point", ".", "name", "==", "value", ":", "return", "entry_point", ".", "load", "(", ")" ]
Check if registered entry point is available for a given name and load it. Otherwise, return None. key (unicode): Entry point name. value (unicode): Name of entry point to load. RETURNS: The loaded entry point or None.
[ "Check", "if", "registered", "entry", "point", "is", "available", "for", "a", "given", "name", "and", "load", "it", ".", "Otherwise", "return", "None", "." ]
python
train
37.272727
Nagasaki45/bibo
bibo/internals.py
https://github.com/Nagasaki45/bibo/blob/e6afb28711e78eb11475834d3f9455252ac9f347/bibo/internals.py#L26-L36
def open_file(filepath): """ Open file with the default system app. Copied from https://stackoverflow.com/a/435669/1224456 """ if sys.platform.startswith('darwin'): subprocess.Popen(('open', filepath)) elif os.name == 'nt': os.startfile(filepath) elif os.name == 'posix': subprocess.Popen(('xdg-open', filepath))
[ "def", "open_file", "(", "filepath", ")", ":", "if", "sys", ".", "platform", ".", "startswith", "(", "'darwin'", ")", ":", "subprocess", ".", "Popen", "(", "(", "'open'", ",", "filepath", ")", ")", "elif", "os", ".", "name", "==", "'nt'", ":", "os", ".", "startfile", "(", "filepath", ")", "elif", "os", ".", "name", "==", "'posix'", ":", "subprocess", ".", "Popen", "(", "(", "'xdg-open'", ",", "filepath", ")", ")" ]
Open file with the default system app. Copied from https://stackoverflow.com/a/435669/1224456
[ "Open", "file", "with", "the", "default", "system", "app", ".", "Copied", "from", "https", ":", "//", "stackoverflow", ".", "com", "/", "a", "/", "435669", "/", "1224456" ]
python
train
32.181818
consbio/ncdjango
ncdjango/geoprocessing/params.py
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/geoprocessing/params.py#L275-L281
def clean(self, value): """Cleans and returns the given value, or raises a ParameterNotValidError exception""" if isinstance(value, six.string_types) and value.lower() == 'false': return False return bool(value)
[ "def", "clean", "(", "self", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "six", ".", "string_types", ")", "and", "value", ".", "lower", "(", ")", "==", "'false'", ":", "return", "False", "return", "bool", "(", "value", ")" ]
Cleans and returns the given value, or raises a ParameterNotValidError exception
[ "Cleans", "and", "returns", "the", "given", "value", "or", "raises", "a", "ParameterNotValidError", "exception" ]
python
train
34.714286
twilio/twilio-python
twilio/rest/autopilot/v1/assistant/task/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/autopilot/v1/assistant/task/__init__.py#L124-L150
def create(self, unique_name, friendly_name=values.unset, actions=values.unset, actions_url=values.unset): """ Create a new TaskInstance :param unicode unique_name: An application-defined string that uniquely identifies the resource :param unicode friendly_name: descriptive string that you create to describe the new resource :param dict actions: The JSON string that specifies the actions that instruct the Assistant on how to perform the task :param unicode actions_url: The URL from which the Assistant can fetch actions :returns: Newly created TaskInstance :rtype: twilio.rest.autopilot.v1.assistant.task.TaskInstance """ data = values.of({ 'UniqueName': unique_name, 'FriendlyName': friendly_name, 'Actions': serialize.object(actions), 'ActionsUrl': actions_url, }) payload = self._version.create( 'POST', self._uri, data=data, ) return TaskInstance(self._version, payload, assistant_sid=self._solution['assistant_sid'], )
[ "def", "create", "(", "self", ",", "unique_name", ",", "friendly_name", "=", "values", ".", "unset", ",", "actions", "=", "values", ".", "unset", ",", "actions_url", "=", "values", ".", "unset", ")", ":", "data", "=", "values", ".", "of", "(", "{", "'UniqueName'", ":", "unique_name", ",", "'FriendlyName'", ":", "friendly_name", ",", "'Actions'", ":", "serialize", ".", "object", "(", "actions", ")", ",", "'ActionsUrl'", ":", "actions_url", ",", "}", ")", "payload", "=", "self", ".", "_version", ".", "create", "(", "'POST'", ",", "self", ".", "_uri", ",", "data", "=", "data", ",", ")", "return", "TaskInstance", "(", "self", ".", "_version", ",", "payload", ",", "assistant_sid", "=", "self", ".", "_solution", "[", "'assistant_sid'", "]", ",", ")" ]
Create a new TaskInstance :param unicode unique_name: An application-defined string that uniquely identifies the resource :param unicode friendly_name: descriptive string that you create to describe the new resource :param dict actions: The JSON string that specifies the actions that instruct the Assistant on how to perform the task :param unicode actions_url: The URL from which the Assistant can fetch actions :returns: Newly created TaskInstance :rtype: twilio.rest.autopilot.v1.assistant.task.TaskInstance
[ "Create", "a", "new", "TaskInstance" ]
python
train
41.407407
materialsproject/pymatgen
pymatgen/apps/borg/queen.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/apps/borg/queen.py#L131-L143
def order_assimilation(args): """ Internal helper method for BorgQueen to process assimilation """ (path, drone, data, status) = args newdata = drone.assimilate(path) if newdata: data.append(json.dumps(newdata, cls=MontyEncoder)) status['count'] += 1 count = status['count'] total = status['total'] logger.info('{}/{} ({:.2f}%) done'.format(count, total, count / total * 100))
[ "def", "order_assimilation", "(", "args", ")", ":", "(", "path", ",", "drone", ",", "data", ",", "status", ")", "=", "args", "newdata", "=", "drone", ".", "assimilate", "(", "path", ")", "if", "newdata", ":", "data", ".", "append", "(", "json", ".", "dumps", "(", "newdata", ",", "cls", "=", "MontyEncoder", ")", ")", "status", "[", "'count'", "]", "+=", "1", "count", "=", "status", "[", "'count'", "]", "total", "=", "status", "[", "'total'", "]", "logger", ".", "info", "(", "'{}/{} ({:.2f}%) done'", ".", "format", "(", "count", ",", "total", ",", "count", "/", "total", "*", "100", ")", ")" ]
Internal helper method for BorgQueen to process assimilation
[ "Internal", "helper", "method", "for", "BorgQueen", "to", "process", "assimilation" ]
python
train
35.230769
andy-z/ged4py
ged4py/model.py
https://github.com/andy-z/ged4py/blob/d0e0cceaadf0a84cbf052705e3c27303b12e1757/ged4py/model.py#L295-L321
def order(self, order): """Returns name order key. Returns tuple with two strings that can be compared to other such tuple obtained from different name. Note that if you want locale-dependent ordering then you need to compare strings using locale-aware method (e.g. ``locale.strxfrm``). :param order: One of the ORDER_* constants. :returns: tuple of two strings """ given = self.given surname = self.surname if order in (ORDER_MAIDEN_GIVEN, ORDER_GIVEN_MAIDEN): surname = self.maiden or self.surname # We are collating empty names to come after non-empty, # so instead of empty we return "2" and add "1" as prefix to others given = ("1" + given) if given else "2" surname = ("1" + surname) if surname else "2" if order in (ORDER_SURNAME_GIVEN, ORDER_MAIDEN_GIVEN): return (surname, given) elif order in (ORDER_GIVEN_SURNAME, ORDER_GIVEN_MAIDEN): return (given, surname) else: raise ValueError("unexpected order: {}".format(order))
[ "def", "order", "(", "self", ",", "order", ")", ":", "given", "=", "self", ".", "given", "surname", "=", "self", ".", "surname", "if", "order", "in", "(", "ORDER_MAIDEN_GIVEN", ",", "ORDER_GIVEN_MAIDEN", ")", ":", "surname", "=", "self", ".", "maiden", "or", "self", ".", "surname", "# We are collating empty names to come after non-empty,", "# so instead of empty we return \"2\" and add \"1\" as prefix to others", "given", "=", "(", "\"1\"", "+", "given", ")", "if", "given", "else", "\"2\"", "surname", "=", "(", "\"1\"", "+", "surname", ")", "if", "surname", "else", "\"2\"", "if", "order", "in", "(", "ORDER_SURNAME_GIVEN", ",", "ORDER_MAIDEN_GIVEN", ")", ":", "return", "(", "surname", ",", "given", ")", "elif", "order", "in", "(", "ORDER_GIVEN_SURNAME", ",", "ORDER_GIVEN_MAIDEN", ")", ":", "return", "(", "given", ",", "surname", ")", "else", ":", "raise", "ValueError", "(", "\"unexpected order: {}\"", ".", "format", "(", "order", ")", ")" ]
Returns name order key. Returns tuple with two strings that can be compared to other such tuple obtained from different name. Note that if you want locale-dependent ordering then you need to compare strings using locale-aware method (e.g. ``locale.strxfrm``). :param order: One of the ORDER_* constants. :returns: tuple of two strings
[ "Returns", "name", "order", "key", "." ]
python
train
40.740741
GetmeUK/MongoFrames
snippets/comparable.py
https://github.com/GetmeUK/MongoFrames/blob/7d2bd792235dfa77a9deecab5366f5f73480823d/snippets/comparable.py#L310-L323
def logged_delete(self, user): """Delete the document and log the event in the change log""" self.delete() # Log the change entry = ChangeLogEntry({ 'type': 'DELETED', 'documents': [self], 'user': user }) entry.insert() return entry
[ "def", "logged_delete", "(", "self", ",", "user", ")", ":", "self", ".", "delete", "(", ")", "# Log the change", "entry", "=", "ChangeLogEntry", "(", "{", "'type'", ":", "'DELETED'", ",", "'documents'", ":", "[", "self", "]", ",", "'user'", ":", "user", "}", ")", "entry", ".", "insert", "(", ")", "return", "entry" ]
Delete the document and log the event in the change log
[ "Delete", "the", "document", "and", "log", "the", "event", "in", "the", "change", "log" ]
python
train
22.714286
raiden-network/raiden
raiden/messages.py
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/messages.py#L1726-L1733
def sign(self, signer: Signer): """This method signs twice: - the `non_closing_signature` for the balance proof update - the `reward_proof_signature` for the monitoring request """ self.non_closing_signature = self.balance_proof._sign(signer) message_data = self._data_to_sign() self.signature = signer.sign(data=message_data)
[ "def", "sign", "(", "self", ",", "signer", ":", "Signer", ")", ":", "self", ".", "non_closing_signature", "=", "self", ".", "balance_proof", ".", "_sign", "(", "signer", ")", "message_data", "=", "self", ".", "_data_to_sign", "(", ")", "self", ".", "signature", "=", "signer", ".", "sign", "(", "data", "=", "message_data", ")" ]
This method signs twice: - the `non_closing_signature` for the balance proof update - the `reward_proof_signature` for the monitoring request
[ "This", "method", "signs", "twice", ":", "-", "the", "non_closing_signature", "for", "the", "balance", "proof", "update", "-", "the", "reward_proof_signature", "for", "the", "monitoring", "request" ]
python
train
47.875
mitsei/dlkit
dlkit/json_/repository/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/repository/objects.py#L290-L316
def get_source_id(self): """Gets the ``Resource Id`` of the source of this asset. The source is the original owner of the copyright of this asset and may differ from the creator of this asset. The source for a published book written by Margaret Mitchell would be Macmillan. The source for an unpublished painting by Arthur Goodwin would be Arthur Goodwin. An ``Asset`` is ``Sourceable`` and also contains a provider identity. The provider is the entity that makes this digital asset available in this repository but may or may not be the publisher of the contents depicted in the asset. For example, a map published by Ticknor and Fields in 1848 may have a provider of Library of Congress and a source of Ticknor and Fields. If copied from a repository at Middlebury College, the provider would be Middlebury College and a source of Ticknor and Fields. return: (osid.id.Id) - the source ``Id`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.Resource.get_avatar_id_template if not bool(self._my_map['sourceId']): raise errors.IllegalState('this Asset has no source') else: return Id(self._my_map['sourceId'])
[ "def", "get_source_id", "(", "self", ")", ":", "# Implemented from template for osid.resource.Resource.get_avatar_id_template", "if", "not", "bool", "(", "self", ".", "_my_map", "[", "'sourceId'", "]", ")", ":", "raise", "errors", ".", "IllegalState", "(", "'this Asset has no source'", ")", "else", ":", "return", "Id", "(", "self", ".", "_my_map", "[", "'sourceId'", "]", ")" ]
Gets the ``Resource Id`` of the source of this asset. The source is the original owner of the copyright of this asset and may differ from the creator of this asset. The source for a published book written by Margaret Mitchell would be Macmillan. The source for an unpublished painting by Arthur Goodwin would be Arthur Goodwin. An ``Asset`` is ``Sourceable`` and also contains a provider identity. The provider is the entity that makes this digital asset available in this repository but may or may not be the publisher of the contents depicted in the asset. For example, a map published by Ticknor and Fields in 1848 may have a provider of Library of Congress and a source of Ticknor and Fields. If copied from a repository at Middlebury College, the provider would be Middlebury College and a source of Ticknor and Fields. return: (osid.id.Id) - the source ``Id`` *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "Resource", "Id", "of", "the", "source", "of", "this", "asset", "." ]
python
train
49.333333
allenai/allennlp
allennlp/predictors/open_information_extraction.py
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/predictors/open_information_extraction.py#L76-L86
def predicates_overlap(tags1: List[str], tags2: List[str]) -> bool: """ Tests whether the predicate in BIO tags1 overlap with those of tags2. """ # Get predicate word indices from both predictions pred_ind1 = get_predicate_indices(tags1) pred_ind2 = get_predicate_indices(tags2) # Return if pred_ind1 pred_ind2 overlap return any(set.intersection(set(pred_ind1), set(pred_ind2)))
[ "def", "predicates_overlap", "(", "tags1", ":", "List", "[", "str", "]", ",", "tags2", ":", "List", "[", "str", "]", ")", "->", "bool", ":", "# Get predicate word indices from both predictions", "pred_ind1", "=", "get_predicate_indices", "(", "tags1", ")", "pred_ind2", "=", "get_predicate_indices", "(", "tags2", ")", "# Return if pred_ind1 pred_ind2 overlap", "return", "any", "(", "set", ".", "intersection", "(", "set", "(", "pred_ind1", ")", ",", "set", "(", "pred_ind2", ")", ")", ")" ]
Tests whether the predicate in BIO tags1 overlap with those of tags2.
[ "Tests", "whether", "the", "predicate", "in", "BIO", "tags1", "overlap", "with", "those", "of", "tags2", "." ]
python
train
36.909091
kvesteri/validators
validators/email.py
https://github.com/kvesteri/validators/blob/34d355e87168241e872b25811d245810df2bd430/validators/email.py#L26-L72
def email(value, whitelist=None): """ Validate an email address. This validator is based on `Django's email validator`_. Returns ``True`` on success and :class:`~validators.utils.ValidationFailure` when validation fails. Examples:: >>> email('[email protected]') True >>> email('bogus@@') ValidationFailure(func=email, ...) .. _Django's email validator: https://github.com/django/django/blob/master/django/core/validators.py .. versionadded:: 0.1 :param value: value to validate :param whitelist: domain names to whitelist :copyright: (c) Django Software Foundation and individual contributors. :license: BSD """ if whitelist is None: whitelist = domain_whitelist if not value or '@' not in value: return False user_part, domain_part = value.rsplit('@', 1) if not user_regex.match(user_part): return False if domain_part not in whitelist and not domain_regex.match(domain_part): # Try for possible IDN domain-part try: domain_part = domain_part.encode('idna').decode('ascii') return domain_regex.match(domain_part) except UnicodeError: return False return True
[ "def", "email", "(", "value", ",", "whitelist", "=", "None", ")", ":", "if", "whitelist", "is", "None", ":", "whitelist", "=", "domain_whitelist", "if", "not", "value", "or", "'@'", "not", "in", "value", ":", "return", "False", "user_part", ",", "domain_part", "=", "value", ".", "rsplit", "(", "'@'", ",", "1", ")", "if", "not", "user_regex", ".", "match", "(", "user_part", ")", ":", "return", "False", "if", "domain_part", "not", "in", "whitelist", "and", "not", "domain_regex", ".", "match", "(", "domain_part", ")", ":", "# Try for possible IDN domain-part", "try", ":", "domain_part", "=", "domain_part", ".", "encode", "(", "'idna'", ")", ".", "decode", "(", "'ascii'", ")", "return", "domain_regex", ".", "match", "(", "domain_part", ")", "except", "UnicodeError", ":", "return", "False", "return", "True" ]
Validate an email address. This validator is based on `Django's email validator`_. Returns ``True`` on success and :class:`~validators.utils.ValidationFailure` when validation fails. Examples:: >>> email('[email protected]') True >>> email('bogus@@') ValidationFailure(func=email, ...) .. _Django's email validator: https://github.com/django/django/blob/master/django/core/validators.py .. versionadded:: 0.1 :param value: value to validate :param whitelist: domain names to whitelist :copyright: (c) Django Software Foundation and individual contributors. :license: BSD
[ "Validate", "an", "email", "address", "." ]
python
train
26.12766
cmck/pybrowserstack-screenshots
browserstack_screenshots/__init__.py
https://github.com/cmck/pybrowserstack-screenshots/blob/598358fc5b9a41678b3f913f2c082a288011322d/browserstack_screenshots/__init__.py#L89-L95
def screenshots_done(self, jobid): """ Return true if the screenshots job is done """ resp = self.session.get(os.path.join(self.api_url, '{0}.json'.format(jobid))) resp = self._process_response(resp) return True if resp.json()['state'] == 'done' else False
[ "def", "screenshots_done", "(", "self", ",", "jobid", ")", ":", "resp", "=", "self", ".", "session", ".", "get", "(", "os", ".", "path", ".", "join", "(", "self", ".", "api_url", ",", "'{0}.json'", ".", "format", "(", "jobid", ")", ")", ")", "resp", "=", "self", ".", "_process_response", "(", "resp", ")", "return", "True", "if", "resp", ".", "json", "(", ")", "[", "'state'", "]", "==", "'done'", "else", "False" ]
Return true if the screenshots job is done
[ "Return", "true", "if", "the", "screenshots", "job", "is", "done" ]
python
train
42.571429
saltstack/salt
salt/modules/systemd_service.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/systemd_service.py#L311-L320
def _strip_scope(msg): ''' Strip unnecessary message about running the command with --scope from stderr so that we can raise an exception with the remaining stderr text. ''' ret = [] for line in msg.splitlines(): if not line.endswith('.scope'): ret.append(line) return '\n'.join(ret).strip()
[ "def", "_strip_scope", "(", "msg", ")", ":", "ret", "=", "[", "]", "for", "line", "in", "msg", ".", "splitlines", "(", ")", ":", "if", "not", "line", ".", "endswith", "(", "'.scope'", ")", ":", "ret", ".", "append", "(", "line", ")", "return", "'\\n'", ".", "join", "(", "ret", ")", ".", "strip", "(", ")" ]
Strip unnecessary message about running the command with --scope from stderr so that we can raise an exception with the remaining stderr text.
[ "Strip", "unnecessary", "message", "about", "running", "the", "command", "with", "--", "scope", "from", "stderr", "so", "that", "we", "can", "raise", "an", "exception", "with", "the", "remaining", "stderr", "text", "." ]
python
train
33
inveniosoftware-contrib/record-recommender
record_recommender/storage.py
https://github.com/inveniosoftware-contrib/record-recommender/blob/07f71e783369e6373218b5e6ba0bf15901e9251a/record_recommender/storage.py#L152-L155
def add_user(self, uid, nodes, weights): """Add a user.""" for i, node in enumerate(nodes): self.file.write("{},{},{}\n".format(uid, node, weights[i]))
[ "def", "add_user", "(", "self", ",", "uid", ",", "nodes", ",", "weights", ")", ":", "for", "i", ",", "node", "in", "enumerate", "(", "nodes", ")", ":", "self", ".", "file", ".", "write", "(", "\"{},{},{}\\n\"", ".", "format", "(", "uid", ",", "node", ",", "weights", "[", "i", "]", ")", ")" ]
Add a user.
[ "Add", "a", "user", "." ]
python
train
44
neo4j/neo4j-python-driver
neo4j/types/temporal.py
https://github.com/neo4j/neo4j-python-driver/blob/0c641e826765e86ff5454dae57c99521db8ca45c/neo4j/types/temporal.py#L80-L97
def dehydrate_time(value): """ Dehydrator for `time` values. :param value: :type value: Time :return: """ if isinstance(value, Time): nanoseconds = int(value.ticks * 1000000000) elif isinstance(value, time): nanoseconds = (3600000000000 * value.hour + 60000000000 * value.minute + 1000000000 * value.second + 1000 * value.microsecond) else: raise TypeError("Value must be a neotime.Time or a datetime.time") if value.tzinfo: return Structure(b"T", nanoseconds, value.tzinfo.utcoffset(value).seconds) else: return Structure(b"t", nanoseconds)
[ "def", "dehydrate_time", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "Time", ")", ":", "nanoseconds", "=", "int", "(", "value", ".", "ticks", "*", "1000000000", ")", "elif", "isinstance", "(", "value", ",", "time", ")", ":", "nanoseconds", "=", "(", "3600000000000", "*", "value", ".", "hour", "+", "60000000000", "*", "value", ".", "minute", "+", "1000000000", "*", "value", ".", "second", "+", "1000", "*", "value", ".", "microsecond", ")", "else", ":", "raise", "TypeError", "(", "\"Value must be a neotime.Time or a datetime.time\"", ")", "if", "value", ".", "tzinfo", ":", "return", "Structure", "(", "b\"T\"", ",", "nanoseconds", ",", "value", ".", "tzinfo", ".", "utcoffset", "(", "value", ")", ".", "seconds", ")", "else", ":", "return", "Structure", "(", "b\"t\"", ",", "nanoseconds", ")" ]
Dehydrator for `time` values. :param value: :type value: Time :return:
[ "Dehydrator", "for", "time", "values", "." ]
python
train
34.888889
WZBSocialScienceCenter/tmtoolkit
tmtoolkit/topicmod/visualize.py
https://github.com/WZBSocialScienceCenter/tmtoolkit/blob/ca8b9d072e37ccc82b533f47d48bd9755722305b/tmtoolkit/topicmod/visualize.py#L235-L306
def plot_heatmap(fig, ax, data, xaxislabel=None, yaxislabel=None, xticklabels=None, yticklabels=None, title=None, grid=True, values_in_cells=True, round_values_in_cells=2, legend=False, fontsize_axislabel=None, fontsize_axisticks=None, fontsize_cell_values=None): """" helper function to plot a heatmap for a 2D matrix `data` using matplotlib's "matshow" function """ if not isinstance(data, np.ndarray): data = np.array(data) if data.ndim != 2: raise ValueError('`data` must be a 2D matrix/array') # draw basic heatmap cax = ax.matshow(data) # draw legend if legend: fig.colorbar(cax) # set title if title: ax.set_title(title, y=1.25) n_rows, n_cols = data.shape # draw values in cells if values_in_cells: textcol_thresh = data.min() + (data.max() - data.min()) / 2 x_indices, y_indices = np.meshgrid(np.arange(n_cols), np.arange(n_rows)) for x, y in zip(x_indices.flatten(), y_indices.flatten()): val = data[y, x] # lower values get white text color for better visibility textcol = 'white' if val < textcol_thresh else 'black' disp_val = round(val, round_values_in_cells) if round_values_in_cells is not None else val ax.text(x, y, disp_val, va='center', ha='center', color=textcol, fontsize=fontsize_cell_values) # customize axes if xaxislabel: ax.set_xlabel(xaxislabel) if yaxislabel: ax.set_ylabel(yaxislabel) if fontsize_axislabel: for item in (ax.xaxis.label, ax.yaxis.label): item.set_fontsize(fontsize_axislabel) ax.set_xticks(np.arange(0, n_cols)) ax.set_yticks(np.arange(0, n_rows)) if xticklabels is not None: ax.set_xticklabels(xticklabels, rotation=45, ha='left') if yticklabels is not None: ax.set_yticklabels(yticklabels) if fontsize_axisticks: for label in (ax.get_xticklabels() + ax.get_yticklabels()): label.set_fontsize(fontsize_axisticks) # gridlines based on minor ticks if grid: ax.set_xticks(np.arange(-.5, n_cols), minor=True) ax.set_yticks(np.arange(-.5, n_rows), minor=True) ax.grid(which='minor', color='w', linestyle='-', linewidth=1) return fig, ax
[ "def", "plot_heatmap", "(", "fig", ",", "ax", ",", "data", ",", "xaxislabel", "=", "None", ",", "yaxislabel", "=", "None", ",", "xticklabels", "=", "None", ",", "yticklabels", "=", "None", ",", "title", "=", "None", ",", "grid", "=", "True", ",", "values_in_cells", "=", "True", ",", "round_values_in_cells", "=", "2", ",", "legend", "=", "False", ",", "fontsize_axislabel", "=", "None", ",", "fontsize_axisticks", "=", "None", ",", "fontsize_cell_values", "=", "None", ")", ":", "if", "not", "isinstance", "(", "data", ",", "np", ".", "ndarray", ")", ":", "data", "=", "np", ".", "array", "(", "data", ")", "if", "data", ".", "ndim", "!=", "2", ":", "raise", "ValueError", "(", "'`data` must be a 2D matrix/array'", ")", "# draw basic heatmap", "cax", "=", "ax", ".", "matshow", "(", "data", ")", "# draw legend", "if", "legend", ":", "fig", ".", "colorbar", "(", "cax", ")", "# set title", "if", "title", ":", "ax", ".", "set_title", "(", "title", ",", "y", "=", "1.25", ")", "n_rows", ",", "n_cols", "=", "data", ".", "shape", "# draw values in cells", "if", "values_in_cells", ":", "textcol_thresh", "=", "data", ".", "min", "(", ")", "+", "(", "data", ".", "max", "(", ")", "-", "data", ".", "min", "(", ")", ")", "/", "2", "x_indices", ",", "y_indices", "=", "np", ".", "meshgrid", "(", "np", ".", "arange", "(", "n_cols", ")", ",", "np", ".", "arange", "(", "n_rows", ")", ")", "for", "x", ",", "y", "in", "zip", "(", "x_indices", ".", "flatten", "(", ")", ",", "y_indices", ".", "flatten", "(", ")", ")", ":", "val", "=", "data", "[", "y", ",", "x", "]", "# lower values get white text color for better visibility", "textcol", "=", "'white'", "if", "val", "<", "textcol_thresh", "else", "'black'", "disp_val", "=", "round", "(", "val", ",", "round_values_in_cells", ")", "if", "round_values_in_cells", "is", "not", "None", "else", "val", "ax", ".", "text", "(", "x", ",", "y", ",", "disp_val", ",", "va", "=", "'center'", ",", "ha", "=", "'center'", ",", "color", "=", "textcol", ",", "fontsize", "=", "fontsize_cell_values", ")", "# customize axes", "if", "xaxislabel", ":", "ax", ".", "set_xlabel", "(", "xaxislabel", ")", "if", "yaxislabel", ":", "ax", ".", "set_ylabel", "(", "yaxislabel", ")", "if", "fontsize_axislabel", ":", "for", "item", "in", "(", "ax", ".", "xaxis", ".", "label", ",", "ax", ".", "yaxis", ".", "label", ")", ":", "item", ".", "set_fontsize", "(", "fontsize_axislabel", ")", "ax", ".", "set_xticks", "(", "np", ".", "arange", "(", "0", ",", "n_cols", ")", ")", "ax", ".", "set_yticks", "(", "np", ".", "arange", "(", "0", ",", "n_rows", ")", ")", "if", "xticklabels", "is", "not", "None", ":", "ax", ".", "set_xticklabels", "(", "xticklabels", ",", "rotation", "=", "45", ",", "ha", "=", "'left'", ")", "if", "yticklabels", "is", "not", "None", ":", "ax", ".", "set_yticklabels", "(", "yticklabels", ")", "if", "fontsize_axisticks", ":", "for", "label", "in", "(", "ax", ".", "get_xticklabels", "(", ")", "+", "ax", ".", "get_yticklabels", "(", ")", ")", ":", "label", ".", "set_fontsize", "(", "fontsize_axisticks", ")", "# gridlines based on minor ticks", "if", "grid", ":", "ax", ".", "set_xticks", "(", "np", ".", "arange", "(", "-", ".5", ",", "n_cols", ")", ",", "minor", "=", "True", ")", "ax", ".", "set_yticks", "(", "np", ".", "arange", "(", "-", ".5", ",", "n_rows", ")", ",", "minor", "=", "True", ")", "ax", ".", "grid", "(", "which", "=", "'minor'", ",", "color", "=", "'w'", ",", "linestyle", "=", "'-'", ",", "linewidth", "=", "1", ")", "return", "fig", ",", "ax" ]
helper function to plot a heatmap for a 2D matrix `data` using matplotlib's "matshow" function
[ "helper", "function", "to", "plot", "a", "heatmap", "for", "a", "2D", "matrix", "data", "using", "matplotlib", "s", "matshow", "function" ]
python
train
32.958333
tnkteja/myhelp
virtualEnvironment/lib/python2.7/site-packages/coverage/cmdline.py
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/coverage/cmdline.py#L474-L487
def help(self, error=None, topic=None, parser=None): """Display an error message, or the named topic.""" assert error or topic or parser if error: print(error) print("Use 'coverage help' for help.") elif parser: print(parser.format_help().strip()) else: help_msg = HELP_TOPICS.get(topic, '').strip() if help_msg: print(help_msg % self.covpkg.__dict__) else: print("Don't know topic %r" % topic)
[ "def", "help", "(", "self", ",", "error", "=", "None", ",", "topic", "=", "None", ",", "parser", "=", "None", ")", ":", "assert", "error", "or", "topic", "or", "parser", "if", "error", ":", "print", "(", "error", ")", "print", "(", "\"Use 'coverage help' for help.\"", ")", "elif", "parser", ":", "print", "(", "parser", ".", "format_help", "(", ")", ".", "strip", "(", ")", ")", "else", ":", "help_msg", "=", "HELP_TOPICS", ".", "get", "(", "topic", ",", "''", ")", ".", "strip", "(", ")", "if", "help_msg", ":", "print", "(", "help_msg", "%", "self", ".", "covpkg", ".", "__dict__", ")", "else", ":", "print", "(", "\"Don't know topic %r\"", "%", "topic", ")" ]
Display an error message, or the named topic.
[ "Display", "an", "error", "message", "or", "the", "named", "topic", "." ]
python
test
37.5
Riminder/python-riminder-api
riminder/webhook.py
https://github.com/Riminder/python-riminder-api/blob/01279f0ece08cf3d1dd45f76de6d9edf7fafec90/riminder/webhook.py#L59-L65
def setHandler(self, event_name, callback): """Set an handler for given event.""" if event_name not in self.handlers: raise ValueError('{} is not a valid event'.format(event_name)) if callable(event_name): raise TypeError('{} is not callable'.format(callback)) self.handlers[event_name] = callback
[ "def", "setHandler", "(", "self", ",", "event_name", ",", "callback", ")", ":", "if", "event_name", "not", "in", "self", ".", "handlers", ":", "raise", "ValueError", "(", "'{} is not a valid event'", ".", "format", "(", "event_name", ")", ")", "if", "callable", "(", "event_name", ")", ":", "raise", "TypeError", "(", "'{} is not callable'", ".", "format", "(", "callback", ")", ")", "self", ".", "handlers", "[", "event_name", "]", "=", "callback" ]
Set an handler for given event.
[ "Set", "an", "handler", "for", "given", "event", "." ]
python
train
49.571429
limpyd/redis-limpyd
limpyd/fields.py
https://github.com/limpyd/redis-limpyd/blob/3c745dde1390a0bd09690b77a089dcc08c6c7e43/limpyd/fields.py#L640-L647
def _del(self, command, *args, **kwargs): """ Shortcut for commands that remove all values of the field. All will be deindexed. """ if self.indexable: self.deindex() return self._traverse_command(command, *args, **kwargs)
[ "def", "_del", "(", "self", ",", "command", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "indexable", ":", "self", ".", "deindex", "(", ")", "return", "self", ".", "_traverse_command", "(", "command", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Shortcut for commands that remove all values of the field. All will be deindexed.
[ "Shortcut", "for", "commands", "that", "remove", "all", "values", "of", "the", "field", ".", "All", "will", "be", "deindexed", "." ]
python
train
34.25
richardchien/nonebot
nonebot/__init__.py
https://github.com/richardchien/nonebot/blob/13ed9e4e87d9824b61592520aabda6d2737c8848/nonebot/__init__.py#L102-L105
def run(host: Optional[str] = None, port: Optional[int] = None, *args, **kwargs) -> None: """Run the NoneBot instance.""" get_bot().run(host=host, port=port, *args, **kwargs)
[ "def", "run", "(", "host", ":", "Optional", "[", "str", "]", "=", "None", ",", "port", ":", "Optional", "[", "int", "]", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", "->", "None", ":", "get_bot", "(", ")", ".", "run", "(", "host", "=", "host", ",", "port", "=", "port", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Run the NoneBot instance.
[ "Run", "the", "NoneBot", "instance", "." ]
python
train
46.75
juju/charm-helpers
charmhelpers/contrib/hardening/audits/__init__.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/hardening/audits/__init__.py#L36-L54
def _take_action(self): """Determines whether to perform the action or not. Checks whether or not an action should be taken. This is determined by the truthy value for the unless parameter. If unless is a callback method, it will be invoked with no parameters in order to determine whether or not the action should be taken. Otherwise, the truthy value of the unless attribute will determine if the action should be performed. """ # Do the action if there isn't an unless override. if self.unless is None: return True # Invoke the callback if there is one. if hasattr(self.unless, '__call__'): return not self.unless() return not self.unless
[ "def", "_take_action", "(", "self", ")", ":", "# Do the action if there isn't an unless override.", "if", "self", ".", "unless", "is", "None", ":", "return", "True", "# Invoke the callback if there is one.", "if", "hasattr", "(", "self", ".", "unless", ",", "'__call__'", ")", ":", "return", "not", "self", ".", "unless", "(", ")", "return", "not", "self", ".", "unless" ]
Determines whether to perform the action or not. Checks whether or not an action should be taken. This is determined by the truthy value for the unless parameter. If unless is a callback method, it will be invoked with no parameters in order to determine whether or not the action should be taken. Otherwise, the truthy value of the unless attribute will determine if the action should be performed.
[ "Determines", "whether", "to", "perform", "the", "action", "or", "not", "." ]
python
train
39.684211
odlgroup/odl
odl/util/vectorization.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/util/vectorization.py#L22-L29
def is_valid_input_array(x, ndim=None): """Test if ``x`` is a correctly shaped point array in R^d.""" x = np.asarray(x) if ndim is None or ndim == 1: return x.ndim == 1 and x.size > 1 or x.ndim == 2 and x.shape[0] == 1 else: return x.ndim == 2 and x.shape[0] == ndim
[ "def", "is_valid_input_array", "(", "x", ",", "ndim", "=", "None", ")", ":", "x", "=", "np", ".", "asarray", "(", "x", ")", "if", "ndim", "is", "None", "or", "ndim", "==", "1", ":", "return", "x", ".", "ndim", "==", "1", "and", "x", ".", "size", ">", "1", "or", "x", ".", "ndim", "==", "2", "and", "x", ".", "shape", "[", "0", "]", "==", "1", "else", ":", "return", "x", ".", "ndim", "==", "2", "and", "x", ".", "shape", "[", "0", "]", "==", "ndim" ]
Test if ``x`` is a correctly shaped point array in R^d.
[ "Test", "if", "x", "is", "a", "correctly", "shaped", "point", "array", "in", "R^d", "." ]
python
train
36.5
aamalev/aiohttp_apiset
aiohttp_apiset/compat.py
https://github.com/aamalev/aiohttp_apiset/blob/ba3492ce929e39be1325d506b727a8bfb34e7b33/aiohttp_apiset/compat.py#L364-L368
def add_put(self, *args, **kwargs): """ Shortcut for add_route with method PUT """ return self.add_route(hdrs.METH_PUT, *args, **kwargs)
[ "def", "add_put", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "add_route", "(", "hdrs", ".", "METH_PUT", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Shortcut for add_route with method PUT
[ "Shortcut", "for", "add_route", "with", "method", "PUT" ]
python
train
32.8
peri-source/peri
peri/opt/optimize.py
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L2380-L2406
def do_levmarq_all_particle_groups(s, region_size=40, max_iter=2, damping=1.0, decrease_damp_factor=10., run_length=4, collect_stats=False, **kwargs): """ Levenberg-Marquardt optimization for every particle in the state. Convenience wrapper for LMParticleGroupCollection. Same keyword args, but I've set the defaults to what I've found to be useful values for optimizing particles. See LMParticleGroupCollection for documentation. See Also -------- do_levmarq_particles : Levenberg-Marquardt optimization of a specified set of particles. do_levmarq : Levenberg-Marquardt optimization of the entire state; useful for optimizing global parameters. LMParticleGroupCollection : The workhorse of do_levmarq. LMEngine : Engine superclass for all the optimizers. """ lp = LMParticleGroupCollection(s, region_size=region_size, damping=damping, run_length=run_length, decrease_damp_factor=decrease_damp_factor, get_cos=collect_stats, max_iter=max_iter, **kwargs) lp.do_run_2() if collect_stats: return lp.stats
[ "def", "do_levmarq_all_particle_groups", "(", "s", ",", "region_size", "=", "40", ",", "max_iter", "=", "2", ",", "damping", "=", "1.0", ",", "decrease_damp_factor", "=", "10.", ",", "run_length", "=", "4", ",", "collect_stats", "=", "False", ",", "*", "*", "kwargs", ")", ":", "lp", "=", "LMParticleGroupCollection", "(", "s", ",", "region_size", "=", "region_size", ",", "damping", "=", "damping", ",", "run_length", "=", "run_length", ",", "decrease_damp_factor", "=", "decrease_damp_factor", ",", "get_cos", "=", "collect_stats", ",", "max_iter", "=", "max_iter", ",", "*", "*", "kwargs", ")", "lp", ".", "do_run_2", "(", ")", "if", "collect_stats", ":", "return", "lp", ".", "stats" ]
Levenberg-Marquardt optimization for every particle in the state. Convenience wrapper for LMParticleGroupCollection. Same keyword args, but I've set the defaults to what I've found to be useful values for optimizing particles. See LMParticleGroupCollection for documentation. See Also -------- do_levmarq_particles : Levenberg-Marquardt optimization of a specified set of particles. do_levmarq : Levenberg-Marquardt optimization of the entire state; useful for optimizing global parameters. LMParticleGroupCollection : The workhorse of do_levmarq. LMEngine : Engine superclass for all the optimizers.
[ "Levenberg", "-", "Marquardt", "optimization", "for", "every", "particle", "in", "the", "state", "." ]
python
valid
41.481481
FulcrumTechnologies/pyconfluence
pyconfluence/actions.py
https://github.com/FulcrumTechnologies/pyconfluence/blob/a999726dbc1cbdd3d9062234698eeae799ce84ce/pyconfluence/actions.py#L151-L162
def get_page_id(name, space): """Return id of a page based on passed page name and space. Parameters: - name: name of a Confluence page. - space: space the Confluence page is in. """ data = _json.loads(_api.rest("?title=" + name.replace(" ", "%20") + "&" "spaceKey=" + space + "&expand=history")) try: return data["results"][0]["id"] except: return ("Page not found!")
[ "def", "get_page_id", "(", "name", ",", "space", ")", ":", "data", "=", "_json", ".", "loads", "(", "_api", ".", "rest", "(", "\"?title=\"", "+", "name", ".", "replace", "(", "\" \"", ",", "\"%20\"", ")", "+", "\"&\"", "\"spaceKey=\"", "+", "space", "+", "\"&expand=history\"", ")", ")", "try", ":", "return", "data", "[", "\"results\"", "]", "[", "0", "]", "[", "\"id\"", "]", "except", ":", "return", "(", "\"Page not found!\"", ")" ]
Return id of a page based on passed page name and space. Parameters: - name: name of a Confluence page. - space: space the Confluence page is in.
[ "Return", "id", "of", "a", "page", "based", "on", "passed", "page", "name", "and", "space", ".", "Parameters", ":", "-", "name", ":", "name", "of", "a", "Confluence", "page", ".", "-", "space", ":", "space", "the", "Confluence", "page", "is", "in", "." ]
python
train
35.666667
shoebot/shoebot
shoebot/grammar/bot.py
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/shoebot/grammar/bot.py#L324-L360
def snapshot(self, target=None, defer=None, autonumber=False): '''Save the contents of current surface into a file or cairo surface/context :param filename: Can be a filename or a Cairo surface. :param defer: If true, buffering/threading may be employed however output will not be immediate. :param autonumber: If true then a number will be appended to the filename. ''' if autonumber: file_number = self._frame else: file_number = None if isinstance(target, cairo.Surface): # snapshot to Cairo surface if defer is None: self._canvas.snapshot(surface, defer) defer = False ctx = cairo.Context(target) # this used to be self._canvas.snapshot, but I couldn't make it work. # self._canvas.snapshot(target, defer) # TODO: check if this breaks when taking more than 1 snapshot self._canvas._drawqueue.render(ctx) return elif target is None: # If nothing specified, use a default filename from the script name script_file = self._namespace.get('__file__') if script_file: target = os.path.splitext(script_file)[0] + '.svg' file_number = True if target: # snapshot to file, target is a filename if defer is None: defer = True self._canvas.snapshot(target, defer=defer, file_number=file_number) else: raise ShoebotError('No image saved')
[ "def", "snapshot", "(", "self", ",", "target", "=", "None", ",", "defer", "=", "None", ",", "autonumber", "=", "False", ")", ":", "if", "autonumber", ":", "file_number", "=", "self", ".", "_frame", "else", ":", "file_number", "=", "None", "if", "isinstance", "(", "target", ",", "cairo", ".", "Surface", ")", ":", "# snapshot to Cairo surface", "if", "defer", "is", "None", ":", "self", ".", "_canvas", ".", "snapshot", "(", "surface", ",", "defer", ")", "defer", "=", "False", "ctx", "=", "cairo", ".", "Context", "(", "target", ")", "# this used to be self._canvas.snapshot, but I couldn't make it work.", "# self._canvas.snapshot(target, defer)", "# TODO: check if this breaks when taking more than 1 snapshot", "self", ".", "_canvas", ".", "_drawqueue", ".", "render", "(", "ctx", ")", "return", "elif", "target", "is", "None", ":", "# If nothing specified, use a default filename from the script name", "script_file", "=", "self", ".", "_namespace", ".", "get", "(", "'__file__'", ")", "if", "script_file", ":", "target", "=", "os", ".", "path", ".", "splitext", "(", "script_file", ")", "[", "0", "]", "+", "'.svg'", "file_number", "=", "True", "if", "target", ":", "# snapshot to file, target is a filename", "if", "defer", "is", "None", ":", "defer", "=", "True", "self", ".", "_canvas", ".", "snapshot", "(", "target", ",", "defer", "=", "defer", ",", "file_number", "=", "file_number", ")", "else", ":", "raise", "ShoebotError", "(", "'No image saved'", ")" ]
Save the contents of current surface into a file or cairo surface/context :param filename: Can be a filename or a Cairo surface. :param defer: If true, buffering/threading may be employed however output will not be immediate. :param autonumber: If true then a number will be appended to the filename.
[ "Save", "the", "contents", "of", "current", "surface", "into", "a", "file", "or", "cairo", "surface", "/", "context" ]
python
valid
42.378378
SBRG/ssbio
ssbio/protein/sequence/properties/scratch.py
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/properties/scratch.py#L222-L244
def read_accpro20(infile): """Read the accpro20 output (.acc20) and return the parsed FASTA records. Keeps the spaces between the accessibility numbers. Args: infile: Path to .acc20 file Returns: dict: Dictionary of accessibilities with keys as the ID """ with open(infile) as f: records = f.read().splitlines() accpro20_dict = {} for i, r in enumerate(records): if i % 2 == 0: # TODO: Double check how to parse FASTA IDs (can they have a space because that is what i split by) # Key was originally records[i][1:] accpro20_dict[records[i].split(' ')[0][1:]] = [int(x) for x in records[i + 1].split(' ')] return accpro20_dict
[ "def", "read_accpro20", "(", "infile", ")", ":", "with", "open", "(", "infile", ")", "as", "f", ":", "records", "=", "f", ".", "read", "(", ")", ".", "splitlines", "(", ")", "accpro20_dict", "=", "{", "}", "for", "i", ",", "r", "in", "enumerate", "(", "records", ")", ":", "if", "i", "%", "2", "==", "0", ":", "# TODO: Double check how to parse FASTA IDs (can they have a space because that is what i split by)", "# Key was originally records[i][1:]", "accpro20_dict", "[", "records", "[", "i", "]", ".", "split", "(", "' '", ")", "[", "0", "]", "[", "1", ":", "]", "]", "=", "[", "int", "(", "x", ")", "for", "x", "in", "records", "[", "i", "+", "1", "]", ".", "split", "(", "' '", ")", "]", "return", "accpro20_dict" ]
Read the accpro20 output (.acc20) and return the parsed FASTA records. Keeps the spaces between the accessibility numbers. Args: infile: Path to .acc20 file Returns: dict: Dictionary of accessibilities with keys as the ID
[ "Read", "the", "accpro20", "output", "(", ".", "acc20", ")", "and", "return", "the", "parsed", "FASTA", "records", "." ]
python
train
30.956522
googleapis/google-cloud-python
spanner/google/cloud/spanner_v1/gapic/spanner_client.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/spanner/google/cloud/spanner_v1/gapic/spanner_client.py#L94-L102
def session_path(cls, project, instance, database, session): """Return a fully-qualified session string.""" return google.api_core.path_template.expand( "projects/{project}/instances/{instance}/databases/{database}/sessions/{session}", project=project, instance=instance, database=database, session=session, )
[ "def", "session_path", "(", "cls", ",", "project", ",", "instance", ",", "database", ",", "session", ")", ":", "return", "google", ".", "api_core", ".", "path_template", ".", "expand", "(", "\"projects/{project}/instances/{instance}/databases/{database}/sessions/{session}\"", ",", "project", "=", "project", ",", "instance", "=", "instance", ",", "database", "=", "database", ",", "session", "=", "session", ",", ")" ]
Return a fully-qualified session string.
[ "Return", "a", "fully", "-", "qualified", "session", "string", "." ]
python
train
42.777778
henrysher/kotocore
kotocore/session.py
https://github.com/henrysher/kotocore/blob/c52d2f3878b924ceabca07f61c91abcb1b230ecc/kotocore/session.py#L97-L131
def get_resource(self, service_name, resource_name, base_class=None): """ Returns a ``Resource`` **class** for a given service. :param service_name: A string that specifies the name of the desired service. Ex. ``sqs``, ``sns``, ``dynamodb``, etc. :type service_name: string :param resource_name: A string that specifies the name of the desired class. Ex. ``Queue``, ``Notification``, ``Table``, etc. :type resource_name: string :param base_class: (Optional) The base class of the object. Prevents "magically" loading the wrong class (one with a different base). :type base_class: class :rtype: <kotocore.resources.Resource subclass> """ try: return self.cache.get_resource( service_name, resource_name, base_class=base_class ) except NotCached: pass # We didn't find it. Construct it. new_class = self.resource_factory.construct_for( service_name, resource_name, base_class=base_class ) self.cache.set_resource(service_name, resource_name, new_class) return new_class
[ "def", "get_resource", "(", "self", ",", "service_name", ",", "resource_name", ",", "base_class", "=", "None", ")", ":", "try", ":", "return", "self", ".", "cache", ".", "get_resource", "(", "service_name", ",", "resource_name", ",", "base_class", "=", "base_class", ")", "except", "NotCached", ":", "pass", "# We didn't find it. Construct it.", "new_class", "=", "self", ".", "resource_factory", ".", "construct_for", "(", "service_name", ",", "resource_name", ",", "base_class", "=", "base_class", ")", "self", ".", "cache", ".", "set_resource", "(", "service_name", ",", "resource_name", ",", "new_class", ")", "return", "new_class" ]
Returns a ``Resource`` **class** for a given service. :param service_name: A string that specifies the name of the desired service. Ex. ``sqs``, ``sns``, ``dynamodb``, etc. :type service_name: string :param resource_name: A string that specifies the name of the desired class. Ex. ``Queue``, ``Notification``, ``Table``, etc. :type resource_name: string :param base_class: (Optional) The base class of the object. Prevents "magically" loading the wrong class (one with a different base). :type base_class: class :rtype: <kotocore.resources.Resource subclass>
[ "Returns", "a", "Resource", "**", "class", "**", "for", "a", "given", "service", "." ]
python
train
35.142857
craffel/mir_eval
mir_eval/util.py
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/util.py#L13-L52
def index_labels(labels, case_sensitive=False): """Convert a list of string identifiers into numerical indices. Parameters ---------- labels : list of strings, shape=(n,) A list of annotations, e.g., segment or chord labels from an annotation file. case_sensitive : bool Set to True to enable case-sensitive label indexing (Default value = False) Returns ------- indices : list, shape=(n,) Numerical representation of ``labels`` index_to_label : dict Mapping to convert numerical indices back to labels. ``labels[i] == index_to_label[indices[i]]`` """ label_to_index = {} index_to_label = {} # If we're not case-sensitive, if not case_sensitive: labels = [str(s).lower() for s in labels] # First, build the unique label mapping for index, s in enumerate(sorted(set(labels))): label_to_index[s] = index index_to_label[index] = s # Remap the labels to indices indices = [label_to_index[s] for s in labels] # Return the converted labels, and the inverse mapping return indices, index_to_label
[ "def", "index_labels", "(", "labels", ",", "case_sensitive", "=", "False", ")", ":", "label_to_index", "=", "{", "}", "index_to_label", "=", "{", "}", "# If we're not case-sensitive,", "if", "not", "case_sensitive", ":", "labels", "=", "[", "str", "(", "s", ")", ".", "lower", "(", ")", "for", "s", "in", "labels", "]", "# First, build the unique label mapping", "for", "index", ",", "s", "in", "enumerate", "(", "sorted", "(", "set", "(", "labels", ")", ")", ")", ":", "label_to_index", "[", "s", "]", "=", "index", "index_to_label", "[", "index", "]", "=", "s", "# Remap the labels to indices", "indices", "=", "[", "label_to_index", "[", "s", "]", "for", "s", "in", "labels", "]", "# Return the converted labels, and the inverse mapping", "return", "indices", ",", "index_to_label" ]
Convert a list of string identifiers into numerical indices. Parameters ---------- labels : list of strings, shape=(n,) A list of annotations, e.g., segment or chord labels from an annotation file. case_sensitive : bool Set to True to enable case-sensitive label indexing (Default value = False) Returns ------- indices : list, shape=(n,) Numerical representation of ``labels`` index_to_label : dict Mapping to convert numerical indices back to labels. ``labels[i] == index_to_label[indices[i]]``
[ "Convert", "a", "list", "of", "string", "identifiers", "into", "numerical", "indices", "." ]
python
train
27.975
obriencj/python-javatools
javatools/report.py
https://github.com/obriencj/python-javatools/blob/9e2332b452ddc508bed0615937dddcb2cf051557/javatools/report.py#L388-L398
def _indent(stream, indent, *msgs): """ write a message to a text stream, with indentation. Also ensures that the output encoding of the messages is safe for writing. """ for x in range(0, indent): stream.write(" ") for x in msgs: # Any nicer way? In Py2 x can be 'str' or 'unicode'. stream.write(x.encode("ascii", "backslashreplace").decode("ascii")) stream.write("\n")
[ "def", "_indent", "(", "stream", ",", "indent", ",", "*", "msgs", ")", ":", "for", "x", "in", "range", "(", "0", ",", "indent", ")", ":", "stream", ".", "write", "(", "\" \"", ")", "for", "x", "in", "msgs", ":", "# Any nicer way? In Py2 x can be 'str' or 'unicode'.", "stream", ".", "write", "(", "x", ".", "encode", "(", "\"ascii\"", ",", "\"backslashreplace\"", ")", ".", "decode", "(", "\"ascii\"", ")", ")", "stream", ".", "write", "(", "\"\\n\"", ")" ]
write a message to a text stream, with indentation. Also ensures that the output encoding of the messages is safe for writing.
[ "write", "a", "message", "to", "a", "text", "stream", "with", "indentation", ".", "Also", "ensures", "that", "the", "output", "encoding", "of", "the", "messages", "is", "safe", "for", "writing", "." ]
python
train
37
inasafe/inasafe
safe/gui/tools/wizard/step_kw40_classify.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/wizard/step_kw40_classify.py#L148-L277
def set_widgets(self): """Set widgets on the Classify tab.""" purpose = self.parent.step_kw_purpose.selected_purpose() subcategory = self.parent.step_kw_subcategory.selected_subcategory() classification = self.parent.step_kw_classification.\ selected_classification() classification_name = classification['name'] if is_raster_layer(self.parent.layer): self.lblClassify.setText(classify_raster_question % ( subcategory['name'], purpose['name'], classification_name)) dataset = gdal.Open(self.parent.layer.source(), GA_ReadOnly) active_band = self.parent.step_kw_band_selector.selected_band() unique_values = numpy.unique(numpy.array( dataset.GetRasterBand(active_band).ReadAsArray())) field_type = 0 # Convert datatype to a json serializable type if numpy.issubdtype(unique_values.dtype, float): unique_values = [float(i) for i in unique_values] else: unique_values = [int(i) for i in unique_values] else: field = self.parent.step_kw_field.selected_fields() field_index = self.parent.layer.fields().indexFromName(field) field_type = self.parent.layer.fields()[field_index].type() self.lblClassify.setText(classify_vector_question % ( subcategory['name'], purpose['name'], classification_name, field.upper())) unique_values = self.parent.layer.uniqueValues(field_index) clean_unique_values = [] for unique_value in unique_values: if (unique_value is None or (hasattr(unique_value, 'isNull') and unique_value.isNull())): # Don't classify features with NULL value continue clean_unique_values.append(unique_value) # get default classes default_classes = deepcopy(classification['classes']) if classification['key'] == data_driven_classes['key']: for unique_value in clean_unique_values: name = str(unique_value).upper().replace('_', ' ') default_class = {'key': unique_value, 'name': name, # 'description': tr('Settlement'), 'string_defaults': [name]} default_classes.append(default_class) # Assign unique values to classes (according to default) unassigned_values = list() assigned_values = dict() for default_class in default_classes: assigned_values[default_class['key']] = list() for unique_value in clean_unique_values: # Capitalization of the value and removing '_' (raw OSM data). value_as_string = str(unique_value).upper().replace('_', ' ') assigned = False for default_class in default_classes: if 'string_defaults' in default_class: # To make it case insensitive upper_string_defaults = [ c.upper() for c in default_class['string_defaults']] in_string_default = ( value_as_string in upper_string_defaults) condition_1 = field_type > 9 and in_string_default else: condition_1 = False condition_2 = ( field_type < 10 and 'numeric_default_min' in default_class and 'numeric_default_max' in default_class and (default_class['numeric_default_min'] <= unique_value <= default_class['numeric_default_max'])) if condition_1 or condition_2: assigned_values[default_class['key']] += [unique_value] assigned = True if not assigned: # add to unassigned values list otherwise unassigned_values += [unique_value] self.populate_classified_values( unassigned_values, assigned_values, default_classes) # Overwrite assigned values according to existing keyword (if present). # Note the default_classes and unique_values are already loaded! value_map = self.parent.get_existing_keyword('value_map') value_map_classification_name = self.parent.get_existing_keyword( 'classification') # Do not continue if there is no value_map in existing keywords if (value_map is None or value_map_classification_name != classification['key']): return # Do not continue if user selected different field field_keyword = self.parent.field_keyword_for_the_layer() field = self.parent.get_existing_keyword('inasafe_fields').get( field_keyword) if (not is_raster_layer(self.parent.layer) and field != self.parent.step_kw_field.selected_fields()): return unassigned_values = list() assigned_values = dict() for default_class in default_classes: assigned_values[default_class['key']] = list() if isinstance(value_map, str): try: value_map = json.loads(value_map) except ValueError: return for unique_value in clean_unique_values: # check in value map assigned = False for key, value_list in list(value_map.items()): if unique_value in value_list and key in assigned_values: assigned_values[key] += [unique_value] assigned = True if not assigned: unassigned_values += [unique_value] self.populate_classified_values( unassigned_values, assigned_values, default_classes)
[ "def", "set_widgets", "(", "self", ")", ":", "purpose", "=", "self", ".", "parent", ".", "step_kw_purpose", ".", "selected_purpose", "(", ")", "subcategory", "=", "self", ".", "parent", ".", "step_kw_subcategory", ".", "selected_subcategory", "(", ")", "classification", "=", "self", ".", "parent", ".", "step_kw_classification", ".", "selected_classification", "(", ")", "classification_name", "=", "classification", "[", "'name'", "]", "if", "is_raster_layer", "(", "self", ".", "parent", ".", "layer", ")", ":", "self", ".", "lblClassify", ".", "setText", "(", "classify_raster_question", "%", "(", "subcategory", "[", "'name'", "]", ",", "purpose", "[", "'name'", "]", ",", "classification_name", ")", ")", "dataset", "=", "gdal", ".", "Open", "(", "self", ".", "parent", ".", "layer", ".", "source", "(", ")", ",", "GA_ReadOnly", ")", "active_band", "=", "self", ".", "parent", ".", "step_kw_band_selector", ".", "selected_band", "(", ")", "unique_values", "=", "numpy", ".", "unique", "(", "numpy", ".", "array", "(", "dataset", ".", "GetRasterBand", "(", "active_band", ")", ".", "ReadAsArray", "(", ")", ")", ")", "field_type", "=", "0", "# Convert datatype to a json serializable type", "if", "numpy", ".", "issubdtype", "(", "unique_values", ".", "dtype", ",", "float", ")", ":", "unique_values", "=", "[", "float", "(", "i", ")", "for", "i", "in", "unique_values", "]", "else", ":", "unique_values", "=", "[", "int", "(", "i", ")", "for", "i", "in", "unique_values", "]", "else", ":", "field", "=", "self", ".", "parent", ".", "step_kw_field", ".", "selected_fields", "(", ")", "field_index", "=", "self", ".", "parent", ".", "layer", ".", "fields", "(", ")", ".", "indexFromName", "(", "field", ")", "field_type", "=", "self", ".", "parent", ".", "layer", ".", "fields", "(", ")", "[", "field_index", "]", ".", "type", "(", ")", "self", ".", "lblClassify", ".", "setText", "(", "classify_vector_question", "%", "(", "subcategory", "[", "'name'", "]", ",", "purpose", "[", "'name'", "]", ",", "classification_name", ",", "field", ".", "upper", "(", ")", ")", ")", "unique_values", "=", "self", ".", "parent", ".", "layer", ".", "uniqueValues", "(", "field_index", ")", "clean_unique_values", "=", "[", "]", "for", "unique_value", "in", "unique_values", ":", "if", "(", "unique_value", "is", "None", "or", "(", "hasattr", "(", "unique_value", ",", "'isNull'", ")", "and", "unique_value", ".", "isNull", "(", ")", ")", ")", ":", "# Don't classify features with NULL value", "continue", "clean_unique_values", ".", "append", "(", "unique_value", ")", "# get default classes", "default_classes", "=", "deepcopy", "(", "classification", "[", "'classes'", "]", ")", "if", "classification", "[", "'key'", "]", "==", "data_driven_classes", "[", "'key'", "]", ":", "for", "unique_value", "in", "clean_unique_values", ":", "name", "=", "str", "(", "unique_value", ")", ".", "upper", "(", ")", ".", "replace", "(", "'_'", ",", "' '", ")", "default_class", "=", "{", "'key'", ":", "unique_value", ",", "'name'", ":", "name", ",", "# 'description': tr('Settlement'),", "'string_defaults'", ":", "[", "name", "]", "}", "default_classes", ".", "append", "(", "default_class", ")", "# Assign unique values to classes (according to default)", "unassigned_values", "=", "list", "(", ")", "assigned_values", "=", "dict", "(", ")", "for", "default_class", "in", "default_classes", ":", "assigned_values", "[", "default_class", "[", "'key'", "]", "]", "=", "list", "(", ")", "for", "unique_value", "in", "clean_unique_values", ":", "# Capitalization of the value and removing '_' (raw OSM data).", "value_as_string", "=", "str", "(", "unique_value", ")", ".", "upper", "(", ")", ".", "replace", "(", "'_'", ",", "' '", ")", "assigned", "=", "False", "for", "default_class", "in", "default_classes", ":", "if", "'string_defaults'", "in", "default_class", ":", "# To make it case insensitive", "upper_string_defaults", "=", "[", "c", ".", "upper", "(", ")", "for", "c", "in", "default_class", "[", "'string_defaults'", "]", "]", "in_string_default", "=", "(", "value_as_string", "in", "upper_string_defaults", ")", "condition_1", "=", "field_type", ">", "9", "and", "in_string_default", "else", ":", "condition_1", "=", "False", "condition_2", "=", "(", "field_type", "<", "10", "and", "'numeric_default_min'", "in", "default_class", "and", "'numeric_default_max'", "in", "default_class", "and", "(", "default_class", "[", "'numeric_default_min'", "]", "<=", "unique_value", "<=", "default_class", "[", "'numeric_default_max'", "]", ")", ")", "if", "condition_1", "or", "condition_2", ":", "assigned_values", "[", "default_class", "[", "'key'", "]", "]", "+=", "[", "unique_value", "]", "assigned", "=", "True", "if", "not", "assigned", ":", "# add to unassigned values list otherwise", "unassigned_values", "+=", "[", "unique_value", "]", "self", ".", "populate_classified_values", "(", "unassigned_values", ",", "assigned_values", ",", "default_classes", ")", "# Overwrite assigned values according to existing keyword (if present).", "# Note the default_classes and unique_values are already loaded!", "value_map", "=", "self", ".", "parent", ".", "get_existing_keyword", "(", "'value_map'", ")", "value_map_classification_name", "=", "self", ".", "parent", ".", "get_existing_keyword", "(", "'classification'", ")", "# Do not continue if there is no value_map in existing keywords", "if", "(", "value_map", "is", "None", "or", "value_map_classification_name", "!=", "classification", "[", "'key'", "]", ")", ":", "return", "# Do not continue if user selected different field", "field_keyword", "=", "self", ".", "parent", ".", "field_keyword_for_the_layer", "(", ")", "field", "=", "self", ".", "parent", ".", "get_existing_keyword", "(", "'inasafe_fields'", ")", ".", "get", "(", "field_keyword", ")", "if", "(", "not", "is_raster_layer", "(", "self", ".", "parent", ".", "layer", ")", "and", "field", "!=", "self", ".", "parent", ".", "step_kw_field", ".", "selected_fields", "(", ")", ")", ":", "return", "unassigned_values", "=", "list", "(", ")", "assigned_values", "=", "dict", "(", ")", "for", "default_class", "in", "default_classes", ":", "assigned_values", "[", "default_class", "[", "'key'", "]", "]", "=", "list", "(", ")", "if", "isinstance", "(", "value_map", ",", "str", ")", ":", "try", ":", "value_map", "=", "json", ".", "loads", "(", "value_map", ")", "except", "ValueError", ":", "return", "for", "unique_value", "in", "clean_unique_values", ":", "# check in value map", "assigned", "=", "False", "for", "key", ",", "value_list", "in", "list", "(", "value_map", ".", "items", "(", ")", ")", ":", "if", "unique_value", "in", "value_list", "and", "key", "in", "assigned_values", ":", "assigned_values", "[", "key", "]", "+=", "[", "unique_value", "]", "assigned", "=", "True", "if", "not", "assigned", ":", "unassigned_values", "+=", "[", "unique_value", "]", "self", ".", "populate_classified_values", "(", "unassigned_values", ",", "assigned_values", ",", "default_classes", ")" ]
Set widgets on the Classify tab.
[ "Set", "widgets", "on", "the", "Classify", "tab", "." ]
python
train
45.307692
timgabets/pynblock
pynblock/tools.py
https://github.com/timgabets/pynblock/blob/dbdb6d06bd7741e1138bed09d874b47b23d8d200/pynblock/tools.py#L149-L167
def get_clear_pin(pinblock, account_number): """ Calculate the clear PIN from provided PIN block and account_number, which is the 12 right-most digits of card account number, excluding check digit """ raw_pinblock = bytes.fromhex(pinblock.decode('utf-8')) raw_acct_num = bytes.fromhex((b'0000' + account_number).decode('utf-8')) pin_str = xor(raw2B(raw_pinblock), raw2B(raw_acct_num)).decode('utf-8') pin_length = int(pin_str[:2], 16) if pin_length >= 4 and pin_length < 9: pin = pin_str[2:2+pin_length] try: int(pin) except ValueError: raise ValueError('PIN contains non-numeric characters') return bytes(pin, 'utf-8') else: raise ValueError('Incorrect PIN length: {}'.format(pin_length))
[ "def", "get_clear_pin", "(", "pinblock", ",", "account_number", ")", ":", "raw_pinblock", "=", "bytes", ".", "fromhex", "(", "pinblock", ".", "decode", "(", "'utf-8'", ")", ")", "raw_acct_num", "=", "bytes", ".", "fromhex", "(", "(", "b'0000'", "+", "account_number", ")", ".", "decode", "(", "'utf-8'", ")", ")", "pin_str", "=", "xor", "(", "raw2B", "(", "raw_pinblock", ")", ",", "raw2B", "(", "raw_acct_num", ")", ")", ".", "decode", "(", "'utf-8'", ")", "pin_length", "=", "int", "(", "pin_str", "[", ":", "2", "]", ",", "16", ")", "if", "pin_length", ">=", "4", "and", "pin_length", "<", "9", ":", "pin", "=", "pin_str", "[", "2", ":", "2", "+", "pin_length", "]", "try", ":", "int", "(", "pin", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'PIN contains non-numeric characters'", ")", "return", "bytes", "(", "pin", ",", "'utf-8'", ")", "else", ":", "raise", "ValueError", "(", "'Incorrect PIN length: {}'", ".", "format", "(", "pin_length", ")", ")" ]
Calculate the clear PIN from provided PIN block and account_number, which is the 12 right-most digits of card account number, excluding check digit
[ "Calculate", "the", "clear", "PIN", "from", "provided", "PIN", "block", "and", "account_number", "which", "is", "the", "12", "right", "-", "most", "digits", "of", "card", "account", "number", "excluding", "check", "digit" ]
python
train
41.947368
Kane610/axis
axis/port_cgi.py
https://github.com/Kane610/axis/blob/b2b44ce595c7b722b5e13eabcab7b91f048e1808/axis/port_cgi.py#L42-L58
def process_raw(self, raw: dict) -> None: """Pre-process raw dict. Prepare parameters to work with APIItems. """ raw_ports = {} for param in raw: port_index = REGEX_PORT_INDEX.search(param).group(0) if port_index not in raw_ports: raw_ports[port_index] = {} name = param.replace(IOPORT + '.I' + port_index + '.', '') raw_ports[port_index][name] = raw[param] super().process_raw(raw_ports)
[ "def", "process_raw", "(", "self", ",", "raw", ":", "dict", ")", "->", "None", ":", "raw_ports", "=", "{", "}", "for", "param", "in", "raw", ":", "port_index", "=", "REGEX_PORT_INDEX", ".", "search", "(", "param", ")", ".", "group", "(", "0", ")", "if", "port_index", "not", "in", "raw_ports", ":", "raw_ports", "[", "port_index", "]", "=", "{", "}", "name", "=", "param", ".", "replace", "(", "IOPORT", "+", "'.I'", "+", "port_index", "+", "'.'", ",", "''", ")", "raw_ports", "[", "port_index", "]", "[", "name", "]", "=", "raw", "[", "param", "]", "super", "(", ")", ".", "process_raw", "(", "raw_ports", ")" ]
Pre-process raw dict. Prepare parameters to work with APIItems.
[ "Pre", "-", "process", "raw", "dict", "." ]
python
train
28.764706
globocom/GloboNetworkAPI-client-python
networkapiclient/ClientFactory.py
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/ClientFactory.py#L181-L188
def create_api_ipv4(self): """Get an instance of Api IPv4 services facade.""" return ApiIPv4( self.networkapi_url, self.user, self.password, self.user_ldap)
[ "def", "create_api_ipv4", "(", "self", ")", ":", "return", "ApiIPv4", "(", "self", ".", "networkapi_url", ",", "self", ".", "user", ",", "self", ".", "password", ",", "self", ".", "user_ldap", ")" ]
Get an instance of Api IPv4 services facade.
[ "Get", "an", "instance", "of", "Api", "IPv4", "services", "facade", "." ]
python
train
26.75
jay-johnson/spylunking
spylunking/wait_for_exit.py
https://github.com/jay-johnson/spylunking/blob/95cc86776f04ec5935cf04e291cf18798345d6cb/spylunking/wait_for_exit.py#L6-L70
def wait_for_exit( log, debug=False): """wait_for_exit Sleep to allow the thread to pick up final messages before exiting and stopping the Splunk HTTP publisher. You can decrease this delay (in seconds) by reducing the splunk_sleep_interval or by exporting the env var: export SPLUNK_SLEEP_INTERVAL=0.5 If you set the timer to 0 then it will be a blocking HTTP POST sent to Splunk for each log message. This creates a blocking logger in your application that will wait until each log's HTTP POST was received before continuing. Note: Reducing this Splunk sleep timer could result in losing messages that were stuck in the queue when the parent process exits. The multiprocessing Splunk Publisher was built to do this, but will not work in certain frameworks like Celery as it requires access to spawn daemon processes to prevent this 'message loss' case during exiting. Applications using this library should ensure there's no critical log messages stuck in a queue when stopping a long-running process. :param log: created logger :param debug: bool to debug with prints """ debug = SPLUNK_DEBUG for i in log.root.handlers: handler_class_name = i.__class__.__name__.lower() if debug: print(( ' - wait_for_exit handler={}').format( handler_class_name)) if ('splunkpublisher' == handler_class_name or 'mpsplunkpublisher' == handler_class_name): if hasattr(i, 'sleep_interval'): total_sleep = i.sleep_interval + 2.0 if os.getenv( 'PUBLISHER_EXIT_DELAY', False): total_sleep = float(os.getenv( 'PUBLISHER_EXIT_DELAY', total_sleep)) if debug: print(( ' - wait_for_exit ' 'handler={} wait={}s').format( handler_class_name, total_sleep)) time.sleep(total_sleep) if debug: print(( 'done waiting for exit')) return else: print(( ' - wait_for_exit handler={} has no' 'sleep_interval').format( handler_class_name))
[ "def", "wait_for_exit", "(", "log", ",", "debug", "=", "False", ")", ":", "debug", "=", "SPLUNK_DEBUG", "for", "i", "in", "log", ".", "root", ".", "handlers", ":", "handler_class_name", "=", "i", ".", "__class__", ".", "__name__", ".", "lower", "(", ")", "if", "debug", ":", "print", "(", "(", "' - wait_for_exit handler={}'", ")", ".", "format", "(", "handler_class_name", ")", ")", "if", "(", "'splunkpublisher'", "==", "handler_class_name", "or", "'mpsplunkpublisher'", "==", "handler_class_name", ")", ":", "if", "hasattr", "(", "i", ",", "'sleep_interval'", ")", ":", "total_sleep", "=", "i", ".", "sleep_interval", "+", "2.0", "if", "os", ".", "getenv", "(", "'PUBLISHER_EXIT_DELAY'", ",", "False", ")", ":", "total_sleep", "=", "float", "(", "os", ".", "getenv", "(", "'PUBLISHER_EXIT_DELAY'", ",", "total_sleep", ")", ")", "if", "debug", ":", "print", "(", "(", "' - wait_for_exit '", "'handler={} wait={}s'", ")", ".", "format", "(", "handler_class_name", ",", "total_sleep", ")", ")", "time", ".", "sleep", "(", "total_sleep", ")", "if", "debug", ":", "print", "(", "(", "'done waiting for exit'", ")", ")", "return", "else", ":", "print", "(", "(", "' - wait_for_exit handler={} has no'", "'sleep_interval'", ")", ".", "format", "(", "handler_class_name", ")", ")" ]
wait_for_exit Sleep to allow the thread to pick up final messages before exiting and stopping the Splunk HTTP publisher. You can decrease this delay (in seconds) by reducing the splunk_sleep_interval or by exporting the env var: export SPLUNK_SLEEP_INTERVAL=0.5 If you set the timer to 0 then it will be a blocking HTTP POST sent to Splunk for each log message. This creates a blocking logger in your application that will wait until each log's HTTP POST was received before continuing. Note: Reducing this Splunk sleep timer could result in losing messages that were stuck in the queue when the parent process exits. The multiprocessing Splunk Publisher was built to do this, but will not work in certain frameworks like Celery as it requires access to spawn daemon processes to prevent this 'message loss' case during exiting. Applications using this library should ensure there's no critical log messages stuck in a queue when stopping a long-running process. :param log: created logger :param debug: bool to debug with prints
[ "wait_for_exit" ]
python
train
38.261538
saltstack/salt
salt/modules/postgres.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/postgres.py#L1335-L1359
def _role_remove(name, user=None, host=None, port=None, maintenance_db=None, password=None, runas=None): ''' Removes a role from the Postgres Server ''' # check if user exists if not user_exists(name, user, host, port, maintenance_db, password=password, runas=runas): log.info('User \'%s\' does not exist', name) return False # user exists, proceed sub_cmd = 'DROP ROLE "{0}"'.format(name) _psql_prepare_and_run( ['-c', sub_cmd], runas=runas, host=host, user=user, port=port, maintenance_db=maintenance_db, password=password) if not user_exists(name, user, host, port, maintenance_db, password=password, runas=runas): return True else: log.info('Failed to delete user \'%s\'.', name) return False
[ "def", "_role_remove", "(", "name", ",", "user", "=", "None", ",", "host", "=", "None", ",", "port", "=", "None", ",", "maintenance_db", "=", "None", ",", "password", "=", "None", ",", "runas", "=", "None", ")", ":", "# check if user exists", "if", "not", "user_exists", "(", "name", ",", "user", ",", "host", ",", "port", ",", "maintenance_db", ",", "password", "=", "password", ",", "runas", "=", "runas", ")", ":", "log", ".", "info", "(", "'User \\'%s\\' does not exist'", ",", "name", ")", "return", "False", "# user exists, proceed", "sub_cmd", "=", "'DROP ROLE \"{0}\"'", ".", "format", "(", "name", ")", "_psql_prepare_and_run", "(", "[", "'-c'", ",", "sub_cmd", "]", ",", "runas", "=", "runas", ",", "host", "=", "host", ",", "user", "=", "user", ",", "port", "=", "port", ",", "maintenance_db", "=", "maintenance_db", ",", "password", "=", "password", ")", "if", "not", "user_exists", "(", "name", ",", "user", ",", "host", ",", "port", ",", "maintenance_db", ",", "password", "=", "password", ",", "runas", "=", "runas", ")", ":", "return", "True", "else", ":", "log", ".", "info", "(", "'Failed to delete user \\'%s\\'.'", ",", "name", ")", "return", "False" ]
Removes a role from the Postgres Server
[ "Removes", "a", "role", "from", "the", "Postgres", "Server" ]
python
train
33.68
rcbops/osa_differ
osa_differ/osa_differ.py
https://github.com/rcbops/osa_differ/blob/b3452436655ba3db8cc6602390fd7fdf4ef30f01/osa_differ/osa_differ.py#L399-L405
def prepare_storage_dir(storage_directory): """Prepare the storage directory.""" storage_directory = os.path.expanduser(storage_directory) if not os.path.exists(storage_directory): os.mkdir(storage_directory) return storage_directory
[ "def", "prepare_storage_dir", "(", "storage_directory", ")", ":", "storage_directory", "=", "os", ".", "path", ".", "expanduser", "(", "storage_directory", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "storage_directory", ")", ":", "os", ".", "mkdir", "(", "storage_directory", ")", "return", "storage_directory" ]
Prepare the storage directory.
[ "Prepare", "the", "storage", "directory", "." ]
python
train
36
last-partizan/pytils
pytils/typo.py
https://github.com/last-partizan/pytils/blob/1c570a32b15e564bc68587b8207e32d464e61d08/pytils/typo.py#L26-L44
def rl_cleanspaces(x): """ Clean double spaces, trailing spaces, heading spaces, spaces before punctuations """ patterns = ( # arguments for re.sub: pattern and repl # удаляем пробел перед знаками препинания (r' +([\.,?!\)]+)', r'\1'), # добавляем пробел после знака препинания, если только за ним нет другого (r'([\.,?!\)]+)([^\.!,?\)]+)', r'\1 \2'), # убираем пробел после открывающей скобки (r'(\S+)\s*(\()\s*(\S+)', r'\1 (\3'), ) # удаляем двойные, начальные и конечные пробелы return os.linesep.join( ' '.join(part for part in line.split(' ') if part) for line in _sub_patterns(patterns, x).split(os.linesep) )
[ "def", "rl_cleanspaces", "(", "x", ")", ":", "patterns", "=", "(", "# arguments for re.sub: pattern and repl", "# удаляем пробел перед знаками препинания", "(", "r' +([\\.,?!\\)]+)'", ",", "r'\\1'", ")", ",", "# добавляем пробел после знака препинания, если только за ним нет другого", "(", "r'([\\.,?!\\)]+)([^\\.!,?\\)]+)'", ",", "r'\\1 \\2'", ")", ",", "# убираем пробел после открывающей скобки", "(", "r'(\\S+)\\s*(\\()\\s*(\\S+)'", ",", "r'\\1 (\\3'", ")", ",", ")", "# удаляем двойные, начальные и конечные пробелы", "return", "os", ".", "linesep", ".", "join", "(", "' '", ".", "join", "(", "part", "for", "part", "in", "line", ".", "split", "(", "' '", ")", "if", "part", ")", "for", "line", "in", "_sub_patterns", "(", "patterns", ",", "x", ")", ".", "split", "(", "os", ".", "linesep", ")", ")" ]
Clean double spaces, trailing spaces, heading spaces, spaces before punctuations
[ "Clean", "double", "spaces", "trailing", "spaces", "heading", "spaces", "spaces", "before", "punctuations" ]
python
train
37.105263
radjkarl/fancyTools
fancytools/math/line.py
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/math/line.py#L212-L221
def resize(line, factor): """ factor: relative length (1->no change, 2-> double, 0.5:half) """ a = angle(line) mx, my = middle(line) d = length(line) * factor * 0.5 dx = cos(a) * d dy = sin(a) * d return mx - dx, my - dy, mx + dx, my + dy
[ "def", "resize", "(", "line", ",", "factor", ")", ":", "a", "=", "angle", "(", "line", ")", "mx", ",", "my", "=", "middle", "(", "line", ")", "d", "=", "length", "(", "line", ")", "*", "factor", "*", "0.5", "dx", "=", "cos", "(", "a", ")", "*", "d", "dy", "=", "sin", "(", "a", ")", "*", "d", "return", "mx", "-", "dx", ",", "my", "-", "dy", ",", "mx", "+", "dx", ",", "my", "+", "dy" ]
factor: relative length (1->no change, 2-> double, 0.5:half)
[ "factor", ":", "relative", "length", "(", "1", "-", ">", "no", "change", "2", "-", ">", "double", "0", ".", "5", ":", "half", ")" ]
python
train
26.5
materialsproject/pymatgen
pymatgen/io/adf.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/adf.py#L305-L325
def has_option(self, option): """ Return True if the option is included in this key. Parameters ---------- option : str The option. Returns ------- has : bool True if the option can be found. Otherwise False will be returned. """ if len(self.options) == 0: return False for op in self.options: if (self._sized_op and op[0] == option) or (op == option): return True return False
[ "def", "has_option", "(", "self", ",", "option", ")", ":", "if", "len", "(", "self", ".", "options", ")", "==", "0", ":", "return", "False", "for", "op", "in", "self", ".", "options", ":", "if", "(", "self", ".", "_sized_op", "and", "op", "[", "0", "]", "==", "option", ")", "or", "(", "op", "==", "option", ")", ":", "return", "True", "return", "False" ]
Return True if the option is included in this key. Parameters ---------- option : str The option. Returns ------- has : bool True if the option can be found. Otherwise False will be returned.
[ "Return", "True", "if", "the", "option", "is", "included", "in", "this", "key", "." ]
python
train
24.761905
valhallasw/flask-mwoauth
flask_mwoauth/__init__.py
https://github.com/valhallasw/flask-mwoauth/blob/216aa9c1ead07d99a9e11deb7642e33a70aa59d7/flask_mwoauth/__init__.py#L124-L158
def request(self, api_query, url=None): """ e.g. {'action': 'query', 'meta': 'userinfo'}. format=json not required function returns a python dict that resembles the api's json response """ api_query['format'] = 'json' if url is not None: api_url = url + "/api.php" else: api_url = self.api_url size = sum([sys.getsizeof(v) for k, v in iteritems(api_query)]) if size > (1024 * 8): # if request is bigger than 8 kB (the limit is somewhat arbitrary, # see https://www.mediawiki.org/wiki/API:Edit#Large_texts) then # transmit as multipart message req = self._prepare_long_request(url=api_url, api_query=api_query) req.send() if self.return_json: return req.response.json() else: return req.response.text else: auth1 = OAuth1( self.consumer_token.key, client_secret=self.consumer_token.secret, resource_owner_key=session['mwoauth_access_token']['key'], resource_owner_secret=session['mwoauth_access_token']['secret']) if self.return_json: return requests.post(api_url, data=api_query, auth=auth1).json() else: return requests.post(api_url, data=api_query, auth=auth1).text
[ "def", "request", "(", "self", ",", "api_query", ",", "url", "=", "None", ")", ":", "api_query", "[", "'format'", "]", "=", "'json'", "if", "url", "is", "not", "None", ":", "api_url", "=", "url", "+", "\"/api.php\"", "else", ":", "api_url", "=", "self", ".", "api_url", "size", "=", "sum", "(", "[", "sys", ".", "getsizeof", "(", "v", ")", "for", "k", ",", "v", "in", "iteritems", "(", "api_query", ")", "]", ")", "if", "size", ">", "(", "1024", "*", "8", ")", ":", "# if request is bigger than 8 kB (the limit is somewhat arbitrary,", "# see https://www.mediawiki.org/wiki/API:Edit#Large_texts) then", "# transmit as multipart message", "req", "=", "self", ".", "_prepare_long_request", "(", "url", "=", "api_url", ",", "api_query", "=", "api_query", ")", "req", ".", "send", "(", ")", "if", "self", ".", "return_json", ":", "return", "req", ".", "response", ".", "json", "(", ")", "else", ":", "return", "req", ".", "response", ".", "text", "else", ":", "auth1", "=", "OAuth1", "(", "self", ".", "consumer_token", ".", "key", ",", "client_secret", "=", "self", ".", "consumer_token", ".", "secret", ",", "resource_owner_key", "=", "session", "[", "'mwoauth_access_token'", "]", "[", "'key'", "]", ",", "resource_owner_secret", "=", "session", "[", "'mwoauth_access_token'", "]", "[", "'secret'", "]", ")", "if", "self", ".", "return_json", ":", "return", "requests", ".", "post", "(", "api_url", ",", "data", "=", "api_query", ",", "auth", "=", "auth1", ")", ".", "json", "(", ")", "else", ":", "return", "requests", ".", "post", "(", "api_url", ",", "data", "=", "api_query", ",", "auth", "=", "auth1", ")", ".", "text" ]
e.g. {'action': 'query', 'meta': 'userinfo'}. format=json not required function returns a python dict that resembles the api's json response
[ "e", ".", "g", ".", "{", "action", ":", "query", "meta", ":", "userinfo", "}", ".", "format", "=", "json", "not", "required", "function", "returns", "a", "python", "dict", "that", "resembles", "the", "api", "s", "json", "response" ]
python
train
40.942857
andreafioraldi/angrdbg
angrdbg/brk.py
https://github.com/andreafioraldi/angrdbg/blob/939b20fb9b341aee695d2db12142b1eddc5b555a/angrdbg/brk.py#L11-L52
def get_dbg_brk_linux64(): ''' Return the current brk value in the debugged process (only x86_64 Linux) ''' # TODO this method is so weird, find a unused address to inject code not # the base address debugger = get_debugger() code = b'\x0f\x05' # syscall rax = debugger.get_reg("rax") rdi = debugger.get_reg("rdi") rip = debugger.get_reg("rip") efl = debugger.get_reg("efl") debugger.set_reg("rax", 12) # sys_brk debugger.set_reg("rdi", 0) base = debugger.image_base() inj = base save = debugger.get_bytes(inj, len(code)) debugger.put_bytes(inj, code) debugger.set_reg("rip", inj) debugger.step_into() debugger.wait_ready() brk_res = debugger.get_reg("rax") debugger.set_reg("rax", rax) debugger.set_reg("rdi", rdi) debugger.set_reg("rip", rip) debugger.set_reg("efl", efl) debugger.put_bytes(inj, save) return brk_res
[ "def", "get_dbg_brk_linux64", "(", ")", ":", "# TODO this method is so weird, find a unused address to inject code not", "# the base address", "debugger", "=", "get_debugger", "(", ")", "code", "=", "b'\\x0f\\x05'", "# syscall", "rax", "=", "debugger", ".", "get_reg", "(", "\"rax\"", ")", "rdi", "=", "debugger", ".", "get_reg", "(", "\"rdi\"", ")", "rip", "=", "debugger", ".", "get_reg", "(", "\"rip\"", ")", "efl", "=", "debugger", ".", "get_reg", "(", "\"efl\"", ")", "debugger", ".", "set_reg", "(", "\"rax\"", ",", "12", ")", "# sys_brk", "debugger", ".", "set_reg", "(", "\"rdi\"", ",", "0", ")", "base", "=", "debugger", ".", "image_base", "(", ")", "inj", "=", "base", "save", "=", "debugger", ".", "get_bytes", "(", "inj", ",", "len", "(", "code", ")", ")", "debugger", ".", "put_bytes", "(", "inj", ",", "code", ")", "debugger", ".", "set_reg", "(", "\"rip\"", ",", "inj", ")", "debugger", ".", "step_into", "(", ")", "debugger", ".", "wait_ready", "(", ")", "brk_res", "=", "debugger", ".", "get_reg", "(", "\"rax\"", ")", "debugger", ".", "set_reg", "(", "\"rax\"", ",", "rax", ")", "debugger", ".", "set_reg", "(", "\"rdi\"", ",", "rdi", ")", "debugger", ".", "set_reg", "(", "\"rip\"", ",", "rip", ")", "debugger", ".", "set_reg", "(", "\"efl\"", ",", "efl", ")", "debugger", ".", "put_bytes", "(", "inj", ",", "save", ")", "return", "brk_res" ]
Return the current brk value in the debugged process (only x86_64 Linux)
[ "Return", "the", "current", "brk", "value", "in", "the", "debugged", "process", "(", "only", "x86_64", "Linux", ")" ]
python
train
21.452381
andymccurdy/redis-py
redis/client.py
https://github.com/andymccurdy/redis-py/blob/cdfe2befbe00db4a3c48c9ddd6d64dea15f6f0db/redis/client.py#L3050-L3069
def psubscribe(self, *args, **kwargs): """ Subscribe to channel patterns. Patterns supplied as keyword arguments expect a pattern name as the key and a callable as the value. A pattern's callable will be invoked automatically when a message is received on that pattern rather than producing a message via ``listen()``. """ if args: args = list_or_args(args[0], args[1:]) new_patterns = dict.fromkeys(args) new_patterns.update(kwargs) ret_val = self.execute_command('PSUBSCRIBE', *iterkeys(new_patterns)) # update the patterns dict AFTER we send the command. we don't want to # subscribe twice to these patterns, once for the command and again # for the reconnection. new_patterns = self._normalize_keys(new_patterns) self.patterns.update(new_patterns) self.pending_unsubscribe_patterns.difference_update(new_patterns) return ret_val
[ "def", "psubscribe", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "args", ":", "args", "=", "list_or_args", "(", "args", "[", "0", "]", ",", "args", "[", "1", ":", "]", ")", "new_patterns", "=", "dict", ".", "fromkeys", "(", "args", ")", "new_patterns", ".", "update", "(", "kwargs", ")", "ret_val", "=", "self", ".", "execute_command", "(", "'PSUBSCRIBE'", ",", "*", "iterkeys", "(", "new_patterns", ")", ")", "# update the patterns dict AFTER we send the command. we don't want to", "# subscribe twice to these patterns, once for the command and again", "# for the reconnection.", "new_patterns", "=", "self", ".", "_normalize_keys", "(", "new_patterns", ")", "self", ".", "patterns", ".", "update", "(", "new_patterns", ")", "self", ".", "pending_unsubscribe_patterns", ".", "difference_update", "(", "new_patterns", ")", "return", "ret_val" ]
Subscribe to channel patterns. Patterns supplied as keyword arguments expect a pattern name as the key and a callable as the value. A pattern's callable will be invoked automatically when a message is received on that pattern rather than producing a message via ``listen()``.
[ "Subscribe", "to", "channel", "patterns", ".", "Patterns", "supplied", "as", "keyword", "arguments", "expect", "a", "pattern", "name", "as", "the", "key", "and", "a", "callable", "as", "the", "value", ".", "A", "pattern", "s", "callable", "will", "be", "invoked", "automatically", "when", "a", "message", "is", "received", "on", "that", "pattern", "rather", "than", "producing", "a", "message", "via", "listen", "()", "." ]
python
train
48.45
Genida/archan
src/archan/logging.py
https://github.com/Genida/archan/blob/a026d3105c7e86f30e6c9507b93ceb736684bfdc/src/archan/logging.py#L31-L56
def get_logger(name, level=None, fmt=':%(lineno)d: %(message)s'): """ Return a logger. Args: name (str): name to pass to the logging module. level (int): level of logging. fmt (str): format string. Returns: logging.Logger: logger from ``logging.getLogger``. """ if name not in Logger.loggers: if Logger.level is None and level is None: Logger.level = level = logging.ERROR elif Logger.level is None: Logger.level = level elif level is None: level = Logger.level logger = logging.getLogger(name) logger_handler = logging.StreamHandler() logger_handler.setFormatter(LoggingFormatter(fmt=name + fmt)) logger.addHandler(logger_handler) logger.setLevel(level) Logger.loggers[name] = logger return Logger.loggers[name]
[ "def", "get_logger", "(", "name", ",", "level", "=", "None", ",", "fmt", "=", "':%(lineno)d: %(message)s'", ")", ":", "if", "name", "not", "in", "Logger", ".", "loggers", ":", "if", "Logger", ".", "level", "is", "None", "and", "level", "is", "None", ":", "Logger", ".", "level", "=", "level", "=", "logging", ".", "ERROR", "elif", "Logger", ".", "level", "is", "None", ":", "Logger", ".", "level", "=", "level", "elif", "level", "is", "None", ":", "level", "=", "Logger", ".", "level", "logger", "=", "logging", ".", "getLogger", "(", "name", ")", "logger_handler", "=", "logging", ".", "StreamHandler", "(", ")", "logger_handler", ".", "setFormatter", "(", "LoggingFormatter", "(", "fmt", "=", "name", "+", "fmt", ")", ")", "logger", ".", "addHandler", "(", "logger_handler", ")", "logger", ".", "setLevel", "(", "level", ")", "Logger", ".", "loggers", "[", "name", "]", "=", "logger", "return", "Logger", ".", "loggers", "[", "name", "]" ]
Return a logger. Args: name (str): name to pass to the logging module. level (int): level of logging. fmt (str): format string. Returns: logging.Logger: logger from ``logging.getLogger``.
[ "Return", "a", "logger", "." ]
python
train
36.5
ronniedada/tabula
tabula/section.py
https://github.com/ronniedada/tabula/blob/ba18bb2f7db75972256b950711415031dc5421c7/tabula/section.py#L109-L138
def align(self, arr): """ Align columns, including column headers """ if arr is None: return arr c_hdrs = self._get_col_hdrs() if self.show_col_hdr_in_cell: for hdr in c_hdrs: arr[hdr] = map(lambda col: ":".join([hdr, str(col)]), arr[hdr]) if self.show_col_hdrs: widths = [max(len(str(col)) for col in arr[hdr].tolist() + [hdr]) for hdr in c_hdrs] else: widths = [max(len(str(col)) for col in arr[hdr].tolist()) for hdr in c_hdrs] # align column headers c_hdrs = map(lambda (c_hdr, width): c_hdr.ljust(width), zip(c_hdrs, widths)) # align data for n_row in range(len(arr)): arr[n_row] = tuple(map(lambda (col, width): col.ljust(width), zip(arr[n_row], widths))) return arr, c_hdrs, widths
[ "def", "align", "(", "self", ",", "arr", ")", ":", "if", "arr", "is", "None", ":", "return", "arr", "c_hdrs", "=", "self", ".", "_get_col_hdrs", "(", ")", "if", "self", ".", "show_col_hdr_in_cell", ":", "for", "hdr", "in", "c_hdrs", ":", "arr", "[", "hdr", "]", "=", "map", "(", "lambda", "col", ":", "\":\"", ".", "join", "(", "[", "hdr", ",", "str", "(", "col", ")", "]", ")", ",", "arr", "[", "hdr", "]", ")", "if", "self", ".", "show_col_hdrs", ":", "widths", "=", "[", "max", "(", "len", "(", "str", "(", "col", ")", ")", "for", "col", "in", "arr", "[", "hdr", "]", ".", "tolist", "(", ")", "+", "[", "hdr", "]", ")", "for", "hdr", "in", "c_hdrs", "]", "else", ":", "widths", "=", "[", "max", "(", "len", "(", "str", "(", "col", ")", ")", "for", "col", "in", "arr", "[", "hdr", "]", ".", "tolist", "(", ")", ")", "for", "hdr", "in", "c_hdrs", "]", "# align column headers", "c_hdrs", "=", "map", "(", "lambda", "(", "c_hdr", ",", "width", ")", ":", "c_hdr", ".", "ljust", "(", "width", ")", ",", "zip", "(", "c_hdrs", ",", "widths", ")", ")", "# align data", "for", "n_row", "in", "range", "(", "len", "(", "arr", ")", ")", ":", "arr", "[", "n_row", "]", "=", "tuple", "(", "map", "(", "lambda", "(", "col", ",", "width", ")", ":", "col", ".", "ljust", "(", "width", ")", ",", "zip", "(", "arr", "[", "n_row", "]", ",", "widths", ")", ")", ")", "return", "arr", ",", "c_hdrs", ",", "widths" ]
Align columns, including column headers
[ "Align", "columns", "including", "column", "headers" ]
python
train
31.5
adaptive-learning/proso-apps
proso_models/models.py
https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso_models/models.py#L539-L575
def translate_identifiers(self, identifiers, language): """ Translate a list of identifiers to item ids. Identifier is a string of the following form: <model_prefix>/<model_identifier> where <model_prefix> is any suffix of database table of the given model which uniquely specifies the table, and <model_identifier> is identifier of the object. Args: identifiers (list[str]): list of identifiers language (str): language used for further filtering (some objects for different languages share the same item Returns: dict: identifier -> item id """ result = {} identifiers = set(identifiers) item_types = ItemType.objects.get_all_types() for item_type_id, type_identifiers in proso.list.group_by(identifiers, by=lambda identifier: self.get_item_type_id_from_identifier(identifier, item_types)).items(): to_find = {} for identifier in type_identifiers: identifier_split = identifier.split('/') to_find[identifier_split[1]] = identifier kwargs = {'identifier__in': list(to_find.keys())} item_type = ItemType.objects.get_all_types()[item_type_id] model = ItemType.objects.get_model(item_type_id) if 'language' in item_type: kwargs[item_type['language']] = language for identifier, item_id in model.objects.filter(**kwargs).values_list('identifier', item_type['foreign_key']): result[to_find[identifier]] = item_id if len(result) != len(identifiers): raise HttpError(404, "Can't translate the following identifiers: {}".format(set(identifiers) - set(result.keys())), 'identifier_not_found') return result
[ "def", "translate_identifiers", "(", "self", ",", "identifiers", ",", "language", ")", ":", "result", "=", "{", "}", "identifiers", "=", "set", "(", "identifiers", ")", "item_types", "=", "ItemType", ".", "objects", ".", "get_all_types", "(", ")", "for", "item_type_id", ",", "type_identifiers", "in", "proso", ".", "list", ".", "group_by", "(", "identifiers", ",", "by", "=", "lambda", "identifier", ":", "self", ".", "get_item_type_id_from_identifier", "(", "identifier", ",", "item_types", ")", ")", ".", "items", "(", ")", ":", "to_find", "=", "{", "}", "for", "identifier", "in", "type_identifiers", ":", "identifier_split", "=", "identifier", ".", "split", "(", "'/'", ")", "to_find", "[", "identifier_split", "[", "1", "]", "]", "=", "identifier", "kwargs", "=", "{", "'identifier__in'", ":", "list", "(", "to_find", ".", "keys", "(", ")", ")", "}", "item_type", "=", "ItemType", ".", "objects", ".", "get_all_types", "(", ")", "[", "item_type_id", "]", "model", "=", "ItemType", ".", "objects", ".", "get_model", "(", "item_type_id", ")", "if", "'language'", "in", "item_type", ":", "kwargs", "[", "item_type", "[", "'language'", "]", "]", "=", "language", "for", "identifier", ",", "item_id", "in", "model", ".", "objects", ".", "filter", "(", "*", "*", "kwargs", ")", ".", "values_list", "(", "'identifier'", ",", "item_type", "[", "'foreign_key'", "]", ")", ":", "result", "[", "to_find", "[", "identifier", "]", "]", "=", "item_id", "if", "len", "(", "result", ")", "!=", "len", "(", "identifiers", ")", ":", "raise", "HttpError", "(", "404", ",", "\"Can't translate the following identifiers: {}\"", ".", "format", "(", "set", "(", "identifiers", ")", "-", "set", "(", "result", ".", "keys", "(", ")", ")", ")", ",", "'identifier_not_found'", ")", "return", "result" ]
Translate a list of identifiers to item ids. Identifier is a string of the following form: <model_prefix>/<model_identifier> where <model_prefix> is any suffix of database table of the given model which uniquely specifies the table, and <model_identifier> is identifier of the object. Args: identifiers (list[str]): list of identifiers language (str): language used for further filtering (some objects for different languages share the same item Returns: dict: identifier -> item id
[ "Translate", "a", "list", "of", "identifiers", "to", "item", "ids", ".", "Identifier", "is", "a", "string", "of", "the", "following", "form", ":" ]
python
train
48.810811
StackStorm/pybind
pybind/nos/v6_0_2f/brocade_firmware_rpc/firmware_download/input/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/brocade_firmware_rpc/firmware_download/input/__init__.py#L167-L188
def _set_sftp(self, v, load=False): """ Setter method for sftp, mapped from YANG variable /brocade_firmware_rpc/firmware_download/input/sftp (container) If this variable is read-only (config: false) in the source YANG file, then _set_sftp is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_sftp() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=sftp.sftp, is_container='container', presence=False, yang_name="sftp", rest_name="sftp", parent=self, choice=(u'protocol-type', u'sftp-protocol'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """sftp must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=sftp.sftp, is_container='container', presence=False, yang_name="sftp", rest_name="sftp", parent=self, choice=(u'protocol-type', u'sftp-protocol'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='container', is_config=True)""", }) self.__sftp = t if hasattr(self, '_set'): self._set()
[ "def", "_set_sftp", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "sftp", ".", "sftp", ",", "is_container", "=", "'container'", ",", "presence", "=", "False", ",", "yang_name", "=", "\"sftp\"", ",", "rest_name", "=", "\"sftp\"", ",", "parent", "=", "self", ",", "choice", "=", "(", "u'protocol-type'", ",", "u'sftp-protocol'", ")", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "False", ",", "extensions", "=", "None", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-firmware'", ",", "defining_module", "=", "'brocade-firmware'", ",", "yang_type", "=", "'container'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"sftp must be of a type compatible with container\"\"\"", ",", "'defined-type'", ":", "\"container\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=sftp.sftp, is_container='container', presence=False, yang_name=\"sftp\", rest_name=\"sftp\", parent=self, choice=(u'protocol-type', u'sftp-protocol'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='container', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__sftp", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for sftp, mapped from YANG variable /brocade_firmware_rpc/firmware_download/input/sftp (container) If this variable is read-only (config: false) in the source YANG file, then _set_sftp is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_sftp() directly.
[ "Setter", "method", "for", "sftp", "mapped", "from", "YANG", "variable", "/", "brocade_firmware_rpc", "/", "firmware_download", "/", "input", "/", "sftp", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_sftp", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_sftp", "()", "directly", "." ]
python
train
69.590909
CivicSpleen/ambry
ambry/bundle/bundle.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/bundle/bundle.py#L3029-L3034
def is_installed(self): """Return True if the bundle is installed.""" r = self.library.resolve(self.identity.vid) return r is not None
[ "def", "is_installed", "(", "self", ")", ":", "r", "=", "self", ".", "library", ".", "resolve", "(", "self", ".", "identity", ".", "vid", ")", "return", "r", "is", "not", "None" ]
Return True if the bundle is installed.
[ "Return", "True", "if", "the", "bundle", "is", "installed", "." ]
python
train
25.833333
google/prettytensor
prettytensor/recurrent_networks.py
https://github.com/google/prettytensor/blob/75daa0b11252590f548da5647addc0ea610c4c45/prettytensor/recurrent_networks.py#L250-L322
def gru_cell(input_layer, state, num_units, bias=tf.zeros_initializer(), weights=None, phase=prettytensor.Phase.train, parameter_modifier=parameters.identity): """Gated recurrent unit memory cell (GRU). Args: input_layer: The input layer. state: The current state of the network. For GRUs, this is a list with one element (tensor) of shape [batch, num_units]. num_units: How big is the hidden state. bias: An initializer for the bias or a Tensor. No bias if set to None. weights: An initializer for weights or a Tensor. phase: The phase of graph construction. See `pt.Phase`. parameter_modifier: A function to modify parameters that is applied after creation and before use. Returns: A RecurrentResult. """ # As a compound op, it needs to respect whether or not this is a sequential # builder. if input_layer.is_sequential_builder(): layer = input_layer.as_layer() else: layer = input_layer # We start with bias of 1.0 to not reset and not udpate. # NB We compute activation_input and activation_state in two different ops, # instead of concatenating them, followed by one matrix multiplication. The # reason is that input has size [batch_size x input_size], while state has # [ ? x state_size ], where the first dimension is 1 initially and will be # batch_size only after the first RNN computation. We thus cannot concatenate # input and state, and instead add the results of two fully connected ops, # which works thanks to broadcasting, independent of state's batch size. state = state[0] state_pt = prettytensor.wrap(state, layer.bookkeeper) activation_input = layer.fully_connected( 2 * num_units, bias=None if bias is None else tf.constant_initializer(1.0), activation_fn=None, weights=weights, phase=phase, parameter_modifier=parameter_modifier) activation_state = state_pt.fully_connected( 2 * num_units, bias=None, activation_fn=None, weights=weights, phase=phase, parameter_modifier=parameter_modifier) # adds batch_size x (2 * num_units) + ? x (2 * num_inputs) activation = activation_input + activation_state activation = activation.sigmoid() split = activation.split(1, 2) r = split[0] u = split[1] c = layer.concat(1, [r * state]).fully_connected( num_units, bias=bias, activation_fn=None, weights=weights, phase=phase, parameter_modifier=parameter_modifier).apply(tf.tanh) new_h = u * state + (1 - u) * c if input_layer.is_sequential_builder(): new_h = input_layer.set_head(input_layer) return RecurrentResult(new_h, [new_h])
[ "def", "gru_cell", "(", "input_layer", ",", "state", ",", "num_units", ",", "bias", "=", "tf", ".", "zeros_initializer", "(", ")", ",", "weights", "=", "None", ",", "phase", "=", "prettytensor", ".", "Phase", ".", "train", ",", "parameter_modifier", "=", "parameters", ".", "identity", ")", ":", "# As a compound op, it needs to respect whether or not this is a sequential", "# builder.", "if", "input_layer", ".", "is_sequential_builder", "(", ")", ":", "layer", "=", "input_layer", ".", "as_layer", "(", ")", "else", ":", "layer", "=", "input_layer", "# We start with bias of 1.0 to not reset and not udpate.", "# NB We compute activation_input and activation_state in two different ops,", "# instead of concatenating them, followed by one matrix multiplication. The", "# reason is that input has size [batch_size x input_size], while state has", "# [ ? x state_size ], where the first dimension is 1 initially and will be", "# batch_size only after the first RNN computation. We thus cannot concatenate", "# input and state, and instead add the results of two fully connected ops,", "# which works thanks to broadcasting, independent of state's batch size.", "state", "=", "state", "[", "0", "]", "state_pt", "=", "prettytensor", ".", "wrap", "(", "state", ",", "layer", ".", "bookkeeper", ")", "activation_input", "=", "layer", ".", "fully_connected", "(", "2", "*", "num_units", ",", "bias", "=", "None", "if", "bias", "is", "None", "else", "tf", ".", "constant_initializer", "(", "1.0", ")", ",", "activation_fn", "=", "None", ",", "weights", "=", "weights", ",", "phase", "=", "phase", ",", "parameter_modifier", "=", "parameter_modifier", ")", "activation_state", "=", "state_pt", ".", "fully_connected", "(", "2", "*", "num_units", ",", "bias", "=", "None", ",", "activation_fn", "=", "None", ",", "weights", "=", "weights", ",", "phase", "=", "phase", ",", "parameter_modifier", "=", "parameter_modifier", ")", "# adds batch_size x (2 * num_units) + ? x (2 * num_inputs)", "activation", "=", "activation_input", "+", "activation_state", "activation", "=", "activation", ".", "sigmoid", "(", ")", "split", "=", "activation", ".", "split", "(", "1", ",", "2", ")", "r", "=", "split", "[", "0", "]", "u", "=", "split", "[", "1", "]", "c", "=", "layer", ".", "concat", "(", "1", ",", "[", "r", "*", "state", "]", ")", ".", "fully_connected", "(", "num_units", ",", "bias", "=", "bias", ",", "activation_fn", "=", "None", ",", "weights", "=", "weights", ",", "phase", "=", "phase", ",", "parameter_modifier", "=", "parameter_modifier", ")", ".", "apply", "(", "tf", ".", "tanh", ")", "new_h", "=", "u", "*", "state", "+", "(", "1", "-", "u", ")", "*", "c", "if", "input_layer", ".", "is_sequential_builder", "(", ")", ":", "new_h", "=", "input_layer", ".", "set_head", "(", "input_layer", ")", "return", "RecurrentResult", "(", "new_h", ",", "[", "new_h", "]", ")" ]
Gated recurrent unit memory cell (GRU). Args: input_layer: The input layer. state: The current state of the network. For GRUs, this is a list with one element (tensor) of shape [batch, num_units]. num_units: How big is the hidden state. bias: An initializer for the bias or a Tensor. No bias if set to None. weights: An initializer for weights or a Tensor. phase: The phase of graph construction. See `pt.Phase`. parameter_modifier: A function to modify parameters that is applied after creation and before use. Returns: A RecurrentResult.
[ "Gated", "recurrent", "unit", "memory", "cell", "(", "GRU", ")", "." ]
python
train
36.671233
pandas-dev/pandas
pandas/core/sparse/frame.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/sparse/frame.py#L809-L817
def transpose(self, *args, **kwargs): """ Returns a DataFrame with the rows/columns switched. """ nv.validate_transpose(args, kwargs) return self._constructor( self.values.T, index=self.columns, columns=self.index, default_fill_value=self._default_fill_value, default_kind=self._default_kind).__finalize__(self)
[ "def", "transpose", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "nv", ".", "validate_transpose", "(", "args", ",", "kwargs", ")", "return", "self", ".", "_constructor", "(", "self", ".", "values", ".", "T", ",", "index", "=", "self", ".", "columns", ",", "columns", "=", "self", ".", "index", ",", "default_fill_value", "=", "self", ".", "_default_fill_value", ",", "default_kind", "=", "self", ".", "_default_kind", ")", ".", "__finalize__", "(", "self", ")" ]
Returns a DataFrame with the rows/columns switched.
[ "Returns", "a", "DataFrame", "with", "the", "rows", "/", "columns", "switched", "." ]
python
train
42.111111
wandb/client
wandb/vendor/prompt_toolkit/eventloop/asyncio_posix.py
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/eventloop/asyncio_posix.py#L28-L86
def run_as_coroutine(self, stdin, callbacks): """ The input 'event loop'. """ assert isinstance(callbacks, EventLoopCallbacks) # Create reader class. stdin_reader = PosixStdinReader(stdin.fileno()) if self.closed: raise Exception('Event loop already closed.') inputstream = InputStream(callbacks.feed_key) try: # Create a new Future every time. self._stopped_f = asyncio.Future(loop=self.loop) # Handle input timouts def timeout_handler(): """ When no input has been received for INPUT_TIMEOUT seconds, flush the input stream and fire the timeout event. """ inputstream.flush() callbacks.input_timeout() timeout = AsyncioTimeout(INPUT_TIMEOUT, timeout_handler, self.loop) # Catch sigwinch def received_winch(): self.call_from_executor(callbacks.terminal_size_changed) self.loop.add_signal_handler(signal.SIGWINCH, received_winch) # Read input data. def stdin_ready(): data = stdin_reader.read() inputstream.feed(data) timeout.reset() # Quit when the input stream was closed. if stdin_reader.closed: self.stop() self.loop.add_reader(stdin.fileno(), stdin_ready) # Block this coroutine until stop() has been called. for f in self._stopped_f: yield f finally: # Clean up. self.loop.remove_reader(stdin.fileno()) self.loop.remove_signal_handler(signal.SIGWINCH) # Don't trigger any timeout events anymore. timeout.stop()
[ "def", "run_as_coroutine", "(", "self", ",", "stdin", ",", "callbacks", ")", ":", "assert", "isinstance", "(", "callbacks", ",", "EventLoopCallbacks", ")", "# Create reader class.", "stdin_reader", "=", "PosixStdinReader", "(", "stdin", ".", "fileno", "(", ")", ")", "if", "self", ".", "closed", ":", "raise", "Exception", "(", "'Event loop already closed.'", ")", "inputstream", "=", "InputStream", "(", "callbacks", ".", "feed_key", ")", "try", ":", "# Create a new Future every time.", "self", ".", "_stopped_f", "=", "asyncio", ".", "Future", "(", "loop", "=", "self", ".", "loop", ")", "# Handle input timouts", "def", "timeout_handler", "(", ")", ":", "\"\"\"\n When no input has been received for INPUT_TIMEOUT seconds,\n flush the input stream and fire the timeout event.\n \"\"\"", "inputstream", ".", "flush", "(", ")", "callbacks", ".", "input_timeout", "(", ")", "timeout", "=", "AsyncioTimeout", "(", "INPUT_TIMEOUT", ",", "timeout_handler", ",", "self", ".", "loop", ")", "# Catch sigwinch", "def", "received_winch", "(", ")", ":", "self", ".", "call_from_executor", "(", "callbacks", ".", "terminal_size_changed", ")", "self", ".", "loop", ".", "add_signal_handler", "(", "signal", ".", "SIGWINCH", ",", "received_winch", ")", "# Read input data.", "def", "stdin_ready", "(", ")", ":", "data", "=", "stdin_reader", ".", "read", "(", ")", "inputstream", ".", "feed", "(", "data", ")", "timeout", ".", "reset", "(", ")", "# Quit when the input stream was closed.", "if", "stdin_reader", ".", "closed", ":", "self", ".", "stop", "(", ")", "self", ".", "loop", ".", "add_reader", "(", "stdin", ".", "fileno", "(", ")", ",", "stdin_ready", ")", "# Block this coroutine until stop() has been called.", "for", "f", "in", "self", ".", "_stopped_f", ":", "yield", "f", "finally", ":", "# Clean up.", "self", ".", "loop", ".", "remove_reader", "(", "stdin", ".", "fileno", "(", ")", ")", "self", ".", "loop", ".", "remove_signal_handler", "(", "signal", ".", "SIGWINCH", ")", "# Don't trigger any timeout events anymore.", "timeout", ".", "stop", "(", ")" ]
The input 'event loop'.
[ "The", "input", "event", "loop", "." ]
python
train
30.661017
pmacosta/peng
peng/functions.py
https://github.com/pmacosta/peng/blob/976935377adaa3de26fc5677aceb2cdfbd6f93a7/peng/functions.py#L804-L1051
def pprint_vector(vector, limit=False, width=None, indent=0, eng=False, frac_length=3): r""" Format a list of numbers (vector) or a Numpy vector for printing. If the argument **vector** is :code:`None` the string :code:`'None'` is returned :param vector: Vector to pretty print or None :type vector: list of integers or floats, Numpy vector or None :param limit: Flag that indicates whether at most 6 vector items are printed (all vector items if its length is equal or less than 6, first and last 3 vector items if it is not) (True), or the entire vector is printed (False) :type limit: boolean :param width: Number of available characters per line. If None the vector is printed in one line :type width: integer or None :param indent: Flag that indicates whether all subsequent lines after the first one are indented (True) or not (False). Only relevant if **width** is not None :type indent: boolean :param eng: Flag that indicates whether engineering notation is used (True) or not (False) :type eng: boolean :param frac_length: Number of digits of fractional part (only applicable if **eng** is True) :type frac_length: integer :raises: ValueError (Argument \`width\` is too small) :rtype: string For example: >>> from __future__ import print_function >>> import peng >>> header = 'Vector: ' >>> data = [1e-3, 20e-6, 300e+6, 4e-12, 5.25e3, -6e-9, 700, 8, 9] >>> print( ... header+peng.pprint_vector( ... data, ... width=30, ... eng=True, ... frac_length=1, ... limit=True, ... indent=len(header) ... ) ... ) Vector: [ 1.0m, 20.0u, 300.0M, ... 700.0 , 8.0 , 9.0 ] >>> print( ... header+peng.pprint_vector( ... data, ... width=30, ... eng=True, ... frac_length=0, ... indent=len(header) ... ) ... ) Vector: [ 1m, 20u, 300M, 4p, 5k, -6n, 700 , 8 , 9 ] >>> print(peng.pprint_vector(data, eng=True, frac_length=0)) [ 1m, 20u, 300M, 4p, 5k, -6n, 700 , 8 , 9 ] >>> print(peng.pprint_vector(data, limit=True)) [ 0.001, 2e-05, 300000000.0, ..., 700, 8, 9 ] """ # pylint: disable=R0912,R0913 num_digits = 12 approx = lambda x: float(x) if "." not in x else round(float(x), num_digits) def limstr(value): str1 = str(value) iscomplex = isinstance(value, complex) str1 = str1.lstrip("(").rstrip(")") if "." not in str1: return str1 if iscomplex: sign = "+" if value.imag >= 0 else "-" regexp = re.compile( r"(.*(?:[Ee][\+-]\d+)?)" + (r"\+" if sign == "+" else "-") + r"(.*(?:[Ee][\+-]\d+)?j)" ) rvalue, ivalue = regexp.match(str1).groups() return ( str(complex(approx(rvalue), approx(sign + ivalue.strip("j")))) .lstrip("(") .rstrip(")") ) str2 = str(round(value, num_digits)) return str2 if len(str1) > len(str2) else str1 def _str(*args): """ Convert numbers to string, optionally represented in engineering notation. Numbers may be integers, float or complex """ ret = [ (limstr(element) if not eng else peng(element, frac_length, True)) if not isinstance(element, complex) else ( limstr(element) if not eng else "{real}{sign}{imag}j".format( real=peng(element.real, frac_length, True), imag=peng(abs(element.imag), frac_length, True), sign="+" if element.imag >= 0 else "-", ) ) for element in args ] return ret[0] if len(ret) == 1 else ret if vector is None: return "None" lvector = len(vector) if (not limit) or (limit and (lvector < 7)): items = _str(*vector) uret = "[ {0} ]".format(", ".join(items)) else: items = _str(*(vector[:3] + vector[-3:])) uret = "[ {0}, ..., {1} ]".format(", ".join(items[:3]), ", ".join(items[-3:])) if (width is None) or (len(uret) < width): return uret # -4 comes from the fact that an opening '[ ' and a closing ' ]' # are added to the multi-line vector string if any([len(item) > width - 4 for item in items]): raise ValueError("Argument `width` is too small") # Text needs to be wrapped in multiple lines # Figure out how long the first line needs to be wobj = textwrap.TextWrapper(initial_indent="[ ", width=width) # uret[2:] -> do not include initial '[ ' as this is specified as # the initial indent to the text wrapper rlist = wobj.wrap(uret[2:]) first_line = rlist[0] first_line_elements = first_line.count(",") # Reconstruct string representation of vector excluding first line # Remove ... from text to be wrapped because it is placed in a single # line centered with the content uret_left = (",".join(uret.split(",")[first_line_elements:])).replace("...,", "") wobj = textwrap.TextWrapper(width=width - 2) wrapped_text = wobj.wrap(uret_left.lstrip()) # Construct candidate wrapped and indented list of vector elements rlist = [first_line] + [ (" " * (indent + 2)) + item.rstrip() for item in wrapped_text ] last_line = rlist[-1] last_line_elements = last_line.count(",") + 1 # "Manually" format limit output so that it is either 3 lines, first and # last line with 3 elements and the middle with '...' or 7 lines, each with # 1 element and the middle with '...' # If numbers are not to be aligned at commas (variable width) then use the # existing results of the wrap() function if limit and (lvector > 6): if (first_line_elements < 3) or ( (first_line_elements == 3) and (last_line_elements < 3) ): rlist = [ "[ {0},".format(_str(vector[0])), _str(vector[1]), _str(vector[2]), "...", _str(vector[-3]), _str(vector[-2]), "{0} ]".format(_str(vector[-1])), ] first_line_elements = 1 else: rlist = [ "[ {0},".format(", ".join(_str(*vector[:3]))), "...", "{0} ]".format(", ".join(_str(*vector[-3:]))), ] first_line = rlist[0] elif limit: rlist = [item.lstrip() for item in rlist] first_comma_index = first_line.find(",") actual_width = len(first_line) - 2 if not eng: if not limit: return "\n".join(rlist) num_elements = len(rlist) return "\n".join( [ "{spaces}{line}{comma}".format( spaces=(" " * (indent + 2)) if num > 0 else "", line=( line.center(actual_width).rstrip() if line.strip() == "..." else line ), comma=( "," if ( (num < num_elements - 1) and (not line.endswith(",")) and (line.strip() != "...") ) else "" ), ) if num > 0 else line for num, line in enumerate(rlist) ] ) # Align elements across multiple lines if limit: remainder_list = [line.lstrip() for line in rlist[1:]] else: remainder_list = _split_every( text=uret[len(first_line) :], sep=",", count=first_line_elements, lstrip=True, ) new_wrapped_lines_list = [first_line] for line in remainder_list[:-1]: new_wrapped_lines_list.append( "{0},".format(line).rjust(actual_width) if line != "..." else line.center(actual_width).rstrip() ) # Align last line on fist comma (if it exists) or # on length of field if does not if remainder_list[-1].find(",") == -1: marker = len(remainder_list[-1]) - 2 else: marker = remainder_list[-1].find(",") new_wrapped_lines_list.append( "{0}{1}".format((first_comma_index - marker - 2) * " ", remainder_list[-1]) ) return "\n".join( [ "{spaces}{line}".format(spaces=" " * (indent + 2), line=line) if num > 0 else line for num, line in enumerate(new_wrapped_lines_list) ] )
[ "def", "pprint_vector", "(", "vector", ",", "limit", "=", "False", ",", "width", "=", "None", ",", "indent", "=", "0", ",", "eng", "=", "False", ",", "frac_length", "=", "3", ")", ":", "# pylint: disable=R0912,R0913", "num_digits", "=", "12", "approx", "=", "lambda", "x", ":", "float", "(", "x", ")", "if", "\".\"", "not", "in", "x", "else", "round", "(", "float", "(", "x", ")", ",", "num_digits", ")", "def", "limstr", "(", "value", ")", ":", "str1", "=", "str", "(", "value", ")", "iscomplex", "=", "isinstance", "(", "value", ",", "complex", ")", "str1", "=", "str1", ".", "lstrip", "(", "\"(\"", ")", ".", "rstrip", "(", "\")\"", ")", "if", "\".\"", "not", "in", "str1", ":", "return", "str1", "if", "iscomplex", ":", "sign", "=", "\"+\"", "if", "value", ".", "imag", ">=", "0", "else", "\"-\"", "regexp", "=", "re", ".", "compile", "(", "r\"(.*(?:[Ee][\\+-]\\d+)?)\"", "+", "(", "r\"\\+\"", "if", "sign", "==", "\"+\"", "else", "\"-\"", ")", "+", "r\"(.*(?:[Ee][\\+-]\\d+)?j)\"", ")", "rvalue", ",", "ivalue", "=", "regexp", ".", "match", "(", "str1", ")", ".", "groups", "(", ")", "return", "(", "str", "(", "complex", "(", "approx", "(", "rvalue", ")", ",", "approx", "(", "sign", "+", "ivalue", ".", "strip", "(", "\"j\"", ")", ")", ")", ")", ".", "lstrip", "(", "\"(\"", ")", ".", "rstrip", "(", "\")\"", ")", ")", "str2", "=", "str", "(", "round", "(", "value", ",", "num_digits", ")", ")", "return", "str2", "if", "len", "(", "str1", ")", ">", "len", "(", "str2", ")", "else", "str1", "def", "_str", "(", "*", "args", ")", ":", "\"\"\"\n Convert numbers to string, optionally represented in engineering notation.\n\n Numbers may be integers, float or complex\n \"\"\"", "ret", "=", "[", "(", "limstr", "(", "element", ")", "if", "not", "eng", "else", "peng", "(", "element", ",", "frac_length", ",", "True", ")", ")", "if", "not", "isinstance", "(", "element", ",", "complex", ")", "else", "(", "limstr", "(", "element", ")", "if", "not", "eng", "else", "\"{real}{sign}{imag}j\"", ".", "format", "(", "real", "=", "peng", "(", "element", ".", "real", ",", "frac_length", ",", "True", ")", ",", "imag", "=", "peng", "(", "abs", "(", "element", ".", "imag", ")", ",", "frac_length", ",", "True", ")", ",", "sign", "=", "\"+\"", "if", "element", ".", "imag", ">=", "0", "else", "\"-\"", ",", ")", ")", "for", "element", "in", "args", "]", "return", "ret", "[", "0", "]", "if", "len", "(", "ret", ")", "==", "1", "else", "ret", "if", "vector", "is", "None", ":", "return", "\"None\"", "lvector", "=", "len", "(", "vector", ")", "if", "(", "not", "limit", ")", "or", "(", "limit", "and", "(", "lvector", "<", "7", ")", ")", ":", "items", "=", "_str", "(", "*", "vector", ")", "uret", "=", "\"[ {0} ]\"", ".", "format", "(", "\", \"", ".", "join", "(", "items", ")", ")", "else", ":", "items", "=", "_str", "(", "*", "(", "vector", "[", ":", "3", "]", "+", "vector", "[", "-", "3", ":", "]", ")", ")", "uret", "=", "\"[ {0}, ..., {1} ]\"", ".", "format", "(", "\", \"", ".", "join", "(", "items", "[", ":", "3", "]", ")", ",", "\", \"", ".", "join", "(", "items", "[", "-", "3", ":", "]", ")", ")", "if", "(", "width", "is", "None", ")", "or", "(", "len", "(", "uret", ")", "<", "width", ")", ":", "return", "uret", "# -4 comes from the fact that an opening '[ ' and a closing ' ]'", "# are added to the multi-line vector string", "if", "any", "(", "[", "len", "(", "item", ")", ">", "width", "-", "4", "for", "item", "in", "items", "]", ")", ":", "raise", "ValueError", "(", "\"Argument `width` is too small\"", ")", "# Text needs to be wrapped in multiple lines", "# Figure out how long the first line needs to be", "wobj", "=", "textwrap", ".", "TextWrapper", "(", "initial_indent", "=", "\"[ \"", ",", "width", "=", "width", ")", "# uret[2:] -> do not include initial '[ ' as this is specified as", "# the initial indent to the text wrapper", "rlist", "=", "wobj", ".", "wrap", "(", "uret", "[", "2", ":", "]", ")", "first_line", "=", "rlist", "[", "0", "]", "first_line_elements", "=", "first_line", ".", "count", "(", "\",\"", ")", "# Reconstruct string representation of vector excluding first line", "# Remove ... from text to be wrapped because it is placed in a single", "# line centered with the content", "uret_left", "=", "(", "\",\"", ".", "join", "(", "uret", ".", "split", "(", "\",\"", ")", "[", "first_line_elements", ":", "]", ")", ")", ".", "replace", "(", "\"...,\"", ",", "\"\"", ")", "wobj", "=", "textwrap", ".", "TextWrapper", "(", "width", "=", "width", "-", "2", ")", "wrapped_text", "=", "wobj", ".", "wrap", "(", "uret_left", ".", "lstrip", "(", ")", ")", "# Construct candidate wrapped and indented list of vector elements", "rlist", "=", "[", "first_line", "]", "+", "[", "(", "\" \"", "*", "(", "indent", "+", "2", ")", ")", "+", "item", ".", "rstrip", "(", ")", "for", "item", "in", "wrapped_text", "]", "last_line", "=", "rlist", "[", "-", "1", "]", "last_line_elements", "=", "last_line", ".", "count", "(", "\",\"", ")", "+", "1", "# \"Manually\" format limit output so that it is either 3 lines, first and", "# last line with 3 elements and the middle with '...' or 7 lines, each with", "# 1 element and the middle with '...'", "# If numbers are not to be aligned at commas (variable width) then use the", "# existing results of the wrap() function", "if", "limit", "and", "(", "lvector", ">", "6", ")", ":", "if", "(", "first_line_elements", "<", "3", ")", "or", "(", "(", "first_line_elements", "==", "3", ")", "and", "(", "last_line_elements", "<", "3", ")", ")", ":", "rlist", "=", "[", "\"[ {0},\"", ".", "format", "(", "_str", "(", "vector", "[", "0", "]", ")", ")", ",", "_str", "(", "vector", "[", "1", "]", ")", ",", "_str", "(", "vector", "[", "2", "]", ")", ",", "\"...\"", ",", "_str", "(", "vector", "[", "-", "3", "]", ")", ",", "_str", "(", "vector", "[", "-", "2", "]", ")", ",", "\"{0} ]\"", ".", "format", "(", "_str", "(", "vector", "[", "-", "1", "]", ")", ")", ",", "]", "first_line_elements", "=", "1", "else", ":", "rlist", "=", "[", "\"[ {0},\"", ".", "format", "(", "\", \"", ".", "join", "(", "_str", "(", "*", "vector", "[", ":", "3", "]", ")", ")", ")", ",", "\"...\"", ",", "\"{0} ]\"", ".", "format", "(", "\", \"", ".", "join", "(", "_str", "(", "*", "vector", "[", "-", "3", ":", "]", ")", ")", ")", ",", "]", "first_line", "=", "rlist", "[", "0", "]", "elif", "limit", ":", "rlist", "=", "[", "item", ".", "lstrip", "(", ")", "for", "item", "in", "rlist", "]", "first_comma_index", "=", "first_line", ".", "find", "(", "\",\"", ")", "actual_width", "=", "len", "(", "first_line", ")", "-", "2", "if", "not", "eng", ":", "if", "not", "limit", ":", "return", "\"\\n\"", ".", "join", "(", "rlist", ")", "num_elements", "=", "len", "(", "rlist", ")", "return", "\"\\n\"", ".", "join", "(", "[", "\"{spaces}{line}{comma}\"", ".", "format", "(", "spaces", "=", "(", "\" \"", "*", "(", "indent", "+", "2", ")", ")", "if", "num", ">", "0", "else", "\"\"", ",", "line", "=", "(", "line", ".", "center", "(", "actual_width", ")", ".", "rstrip", "(", ")", "if", "line", ".", "strip", "(", ")", "==", "\"...\"", "else", "line", ")", ",", "comma", "=", "(", "\",\"", "if", "(", "(", "num", "<", "num_elements", "-", "1", ")", "and", "(", "not", "line", ".", "endswith", "(", "\",\"", ")", ")", "and", "(", "line", ".", "strip", "(", ")", "!=", "\"...\"", ")", ")", "else", "\"\"", ")", ",", ")", "if", "num", ">", "0", "else", "line", "for", "num", ",", "line", "in", "enumerate", "(", "rlist", ")", "]", ")", "# Align elements across multiple lines", "if", "limit", ":", "remainder_list", "=", "[", "line", ".", "lstrip", "(", ")", "for", "line", "in", "rlist", "[", "1", ":", "]", "]", "else", ":", "remainder_list", "=", "_split_every", "(", "text", "=", "uret", "[", "len", "(", "first_line", ")", ":", "]", ",", "sep", "=", "\",\"", ",", "count", "=", "first_line_elements", ",", "lstrip", "=", "True", ",", ")", "new_wrapped_lines_list", "=", "[", "first_line", "]", "for", "line", "in", "remainder_list", "[", ":", "-", "1", "]", ":", "new_wrapped_lines_list", ".", "append", "(", "\"{0},\"", ".", "format", "(", "line", ")", ".", "rjust", "(", "actual_width", ")", "if", "line", "!=", "\"...\"", "else", "line", ".", "center", "(", "actual_width", ")", ".", "rstrip", "(", ")", ")", "# Align last line on fist comma (if it exists) or", "# on length of field if does not", "if", "remainder_list", "[", "-", "1", "]", ".", "find", "(", "\",\"", ")", "==", "-", "1", ":", "marker", "=", "len", "(", "remainder_list", "[", "-", "1", "]", ")", "-", "2", "else", ":", "marker", "=", "remainder_list", "[", "-", "1", "]", ".", "find", "(", "\",\"", ")", "new_wrapped_lines_list", ".", "append", "(", "\"{0}{1}\"", ".", "format", "(", "(", "first_comma_index", "-", "marker", "-", "2", ")", "*", "\" \"", ",", "remainder_list", "[", "-", "1", "]", ")", ")", "return", "\"\\n\"", ".", "join", "(", "[", "\"{spaces}{line}\"", ".", "format", "(", "spaces", "=", "\" \"", "*", "(", "indent", "+", "2", ")", ",", "line", "=", "line", ")", "if", "num", ">", "0", "else", "line", "for", "num", ",", "line", "in", "enumerate", "(", "new_wrapped_lines_list", ")", "]", ")" ]
r""" Format a list of numbers (vector) or a Numpy vector for printing. If the argument **vector** is :code:`None` the string :code:`'None'` is returned :param vector: Vector to pretty print or None :type vector: list of integers or floats, Numpy vector or None :param limit: Flag that indicates whether at most 6 vector items are printed (all vector items if its length is equal or less than 6, first and last 3 vector items if it is not) (True), or the entire vector is printed (False) :type limit: boolean :param width: Number of available characters per line. If None the vector is printed in one line :type width: integer or None :param indent: Flag that indicates whether all subsequent lines after the first one are indented (True) or not (False). Only relevant if **width** is not None :type indent: boolean :param eng: Flag that indicates whether engineering notation is used (True) or not (False) :type eng: boolean :param frac_length: Number of digits of fractional part (only applicable if **eng** is True) :type frac_length: integer :raises: ValueError (Argument \`width\` is too small) :rtype: string For example: >>> from __future__ import print_function >>> import peng >>> header = 'Vector: ' >>> data = [1e-3, 20e-6, 300e+6, 4e-12, 5.25e3, -6e-9, 700, 8, 9] >>> print( ... header+peng.pprint_vector( ... data, ... width=30, ... eng=True, ... frac_length=1, ... limit=True, ... indent=len(header) ... ) ... ) Vector: [ 1.0m, 20.0u, 300.0M, ... 700.0 , 8.0 , 9.0 ] >>> print( ... header+peng.pprint_vector( ... data, ... width=30, ... eng=True, ... frac_length=0, ... indent=len(header) ... ) ... ) Vector: [ 1m, 20u, 300M, 4p, 5k, -6n, 700 , 8 , 9 ] >>> print(peng.pprint_vector(data, eng=True, frac_length=0)) [ 1m, 20u, 300M, 4p, 5k, -6n, 700 , 8 , 9 ] >>> print(peng.pprint_vector(data, limit=True)) [ 0.001, 2e-05, 300000000.0, ..., 700, 8, 9 ]
[ "r", "Format", "a", "list", "of", "numbers", "(", "vector", ")", "or", "a", "Numpy", "vector", "for", "printing", "." ]
python
test
36.447581
NoviceLive/intellicoder
intellicoder/converters.py
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/converters.py#L64-L69
def from_section(cls, stream, section_name='.pic'): """Construct a Converter object from the specified section of the specified binary stream.""" binary = Executable(stream) section_data = binary.get_section_data(section_name) return cls(section_data, binary.system)
[ "def", "from_section", "(", "cls", ",", "stream", ",", "section_name", "=", "'.pic'", ")", ":", "binary", "=", "Executable", "(", "stream", ")", "section_data", "=", "binary", ".", "get_section_data", "(", "section_name", ")", "return", "cls", "(", "section_data", ",", "binary", ".", "system", ")" ]
Construct a Converter object from the specified section of the specified binary stream.
[ "Construct", "a", "Converter", "object", "from", "the", "specified", "section", "of", "the", "specified", "binary", "stream", "." ]
python
train
50.166667
tensorpack/tensorpack
tensorpack/tfutils/tower.py
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/tfutils/tower.py#L429-L442
def get_variable(self, name): """ Get a variable used in this tower. The name should not contain the variable scope prefix of the tower. When the tower has the same variable scope and name scope, this is equivalent to :meth:`get_tensor`. """ name = get_op_tensor_name(name)[1] if len(self.vs_name): name_with_vs = self.vs_name + "/" + name else: name_with_vs = name return get_op_or_tensor_by_name(name_with_vs)
[ "def", "get_variable", "(", "self", ",", "name", ")", ":", "name", "=", "get_op_tensor_name", "(", "name", ")", "[", "1", "]", "if", "len", "(", "self", ".", "vs_name", ")", ":", "name_with_vs", "=", "self", ".", "vs_name", "+", "\"/\"", "+", "name", "else", ":", "name_with_vs", "=", "name", "return", "get_op_or_tensor_by_name", "(", "name_with_vs", ")" ]
Get a variable used in this tower. The name should not contain the variable scope prefix of the tower. When the tower has the same variable scope and name scope, this is equivalent to :meth:`get_tensor`.
[ "Get", "a", "variable", "used", "in", "this", "tower", ".", "The", "name", "should", "not", "contain", "the", "variable", "scope", "prefix", "of", "the", "tower", "." ]
python
train
35.928571
openstack/networking-cisco
networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py#L1525-L1557
def run_delete_sm(self, tenant_id, fw_dict, is_fw_virt): """Runs the delete State Machine. Goes through every state function until the end or when one state returns failure. """ # Read the current state from the DB ret = True serv_obj = self.get_service_obj(tenant_id) state = serv_obj.get_state() # Preserve the ordering of the next lines till while new_state = serv_obj.fixup_state(fw_const.FW_DEL_OP, state) serv_obj.store_local_final_result(fw_const.RESULT_FW_DELETE_INIT) if state != new_state: state = new_state serv_obj.store_state(state) while ret: try: ret = self.fabric_fsm[state][1](tenant_id, fw_dict, is_fw_virt=is_fw_virt) except Exception as exc: LOG.error("Exception %(exc)s for state %(state)s", {'exc': str(exc), 'state': fw_const.fw_state_fn_del_dict.get(state)}) ret = False if ret: LOG.info("State %s return successfully", fw_const.fw_state_fn_del_dict.get(state)) if state == fw_const.INIT_STATE: break state = self.get_next_state(state, ret, fw_const.FW_DEL_OP) serv_obj.store_state(state) return ret
[ "def", "run_delete_sm", "(", "self", ",", "tenant_id", ",", "fw_dict", ",", "is_fw_virt", ")", ":", "# Read the current state from the DB", "ret", "=", "True", "serv_obj", "=", "self", ".", "get_service_obj", "(", "tenant_id", ")", "state", "=", "serv_obj", ".", "get_state", "(", ")", "# Preserve the ordering of the next lines till while", "new_state", "=", "serv_obj", ".", "fixup_state", "(", "fw_const", ".", "FW_DEL_OP", ",", "state", ")", "serv_obj", ".", "store_local_final_result", "(", "fw_const", ".", "RESULT_FW_DELETE_INIT", ")", "if", "state", "!=", "new_state", ":", "state", "=", "new_state", "serv_obj", ".", "store_state", "(", "state", ")", "while", "ret", ":", "try", ":", "ret", "=", "self", ".", "fabric_fsm", "[", "state", "]", "[", "1", "]", "(", "tenant_id", ",", "fw_dict", ",", "is_fw_virt", "=", "is_fw_virt", ")", "except", "Exception", "as", "exc", ":", "LOG", ".", "error", "(", "\"Exception %(exc)s for state %(state)s\"", ",", "{", "'exc'", ":", "str", "(", "exc", ")", ",", "'state'", ":", "fw_const", ".", "fw_state_fn_del_dict", ".", "get", "(", "state", ")", "}", ")", "ret", "=", "False", "if", "ret", ":", "LOG", ".", "info", "(", "\"State %s return successfully\"", ",", "fw_const", ".", "fw_state_fn_del_dict", ".", "get", "(", "state", ")", ")", "if", "state", "==", "fw_const", ".", "INIT_STATE", ":", "break", "state", "=", "self", ".", "get_next_state", "(", "state", ",", "ret", ",", "fw_const", ".", "FW_DEL_OP", ")", "serv_obj", ".", "store_state", "(", "state", ")", "return", "ret" ]
Runs the delete State Machine. Goes through every state function until the end or when one state returns failure.
[ "Runs", "the", "delete", "State", "Machine", "." ]
python
train
42.606061
michael-lazar/rtv
rtv/packages/praw/__init__.py
https://github.com/michael-lazar/rtv/blob/ccef2af042566ad384977028cf0bde01bc524dda/rtv/packages/praw/__init__.py#L2199-L2207
def leave_moderator(self, subreddit): """Abdicate moderator status in a subreddit. Use with care. :param subreddit: The name of the subreddit to leave `status` from. :returns: the json response from the server. """ self.evict(self.config['my_mod_subreddits']) return self._leave_status(subreddit, self.config['leavemoderator'])
[ "def", "leave_moderator", "(", "self", ",", "subreddit", ")", ":", "self", ".", "evict", "(", "self", ".", "config", "[", "'my_mod_subreddits'", "]", ")", "return", "self", ".", "_leave_status", "(", "subreddit", ",", "self", ".", "config", "[", "'leavemoderator'", "]", ")" ]
Abdicate moderator status in a subreddit. Use with care. :param subreddit: The name of the subreddit to leave `status` from. :returns: the json response from the server.
[ "Abdicate", "moderator", "status", "in", "a", "subreddit", ".", "Use", "with", "care", "." ]
python
train
41
project-rig/rig
rig/machine_control/machine_controller.py
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L1127-L1170
def sdram_alloc_as_filelike(self, size, tag=0, x=Required, y=Required, app_id=Required, clear=False): """Like :py:meth:`.sdram_alloc` but returns a :py:class:`file-like object <.MemoryIO>` which allows safe reading and writing to the block that is allocated. Returns ------- :py:class:`.MemoryIO` File-like object which allows accessing the newly allocated region of memory. For example:: >>> # Read, write and seek through the allocated memory just >>> # like a file >>> mem = mc.sdram_alloc_as_filelike(12) # doctest: +SKIP >>> mem.write(b"Hello, world") # doctest: +SKIP 12 >>> mem.seek(0) # doctest: +SKIP >>> mem.read(5) # doctest: +SKIP b"Hello" >>> mem.read(7) # doctest: +SKIP b", world" >>> # Reads and writes are truncated to the allocated region, >>> # preventing accidental clobbering/access of memory. >>> mem.seek(0) # doctest: +SKIP >>> mem.write(b"How are you today?") # doctest: +SKIP 12 >>> mem.seek(0) # doctest: +SKIP >>> mem.read(100) # doctest: +SKIP b"How are you " See the :py:class:`.MemoryIO` class for details of other features of these file-like views of SpiNNaker's memory. Raises ------ rig.machine_control.machine_controller.SpiNNakerMemoryError If the memory cannot be allocated, or the tag is already taken or invalid. """ # Perform the malloc start_address = self.sdram_alloc(size, tag, x, y, app_id, clear) return MemoryIO(self, x, y, start_address, start_address + size)
[ "def", "sdram_alloc_as_filelike", "(", "self", ",", "size", ",", "tag", "=", "0", ",", "x", "=", "Required", ",", "y", "=", "Required", ",", "app_id", "=", "Required", ",", "clear", "=", "False", ")", ":", "# Perform the malloc", "start_address", "=", "self", ".", "sdram_alloc", "(", "size", ",", "tag", ",", "x", ",", "y", ",", "app_id", ",", "clear", ")", "return", "MemoryIO", "(", "self", ",", "x", ",", "y", ",", "start_address", ",", "start_address", "+", "size", ")" ]
Like :py:meth:`.sdram_alloc` but returns a :py:class:`file-like object <.MemoryIO>` which allows safe reading and writing to the block that is allocated. Returns ------- :py:class:`.MemoryIO` File-like object which allows accessing the newly allocated region of memory. For example:: >>> # Read, write and seek through the allocated memory just >>> # like a file >>> mem = mc.sdram_alloc_as_filelike(12) # doctest: +SKIP >>> mem.write(b"Hello, world") # doctest: +SKIP 12 >>> mem.seek(0) # doctest: +SKIP >>> mem.read(5) # doctest: +SKIP b"Hello" >>> mem.read(7) # doctest: +SKIP b", world" >>> # Reads and writes are truncated to the allocated region, >>> # preventing accidental clobbering/access of memory. >>> mem.seek(0) # doctest: +SKIP >>> mem.write(b"How are you today?") # doctest: +SKIP 12 >>> mem.seek(0) # doctest: +SKIP >>> mem.read(100) # doctest: +SKIP b"How are you " See the :py:class:`.MemoryIO` class for details of other features of these file-like views of SpiNNaker's memory. Raises ------ rig.machine_control.machine_controller.SpiNNakerMemoryError If the memory cannot be allocated, or the tag is already taken or invalid.
[ "Like", ":", "py", ":", "meth", ":", ".", "sdram_alloc", "but", "returns", "a", ":", "py", ":", "class", ":", "file", "-", "like", "object", "<", ".", "MemoryIO", ">", "which", "allows", "safe", "reading", "and", "writing", "to", "the", "block", "that", "is", "allocated", "." ]
python
train
46.25
bslatkin/dpxdt
dpxdt/client/workers.py
https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/client/workers.py#L553-L559
def get_coordinator(): """Creates a coordinator and returns it.""" workflow_queue = Queue.Queue() complete_queue = Queue.Queue() coordinator = WorkflowThread(workflow_queue, complete_queue) coordinator.register(WorkflowItem, workflow_queue) return coordinator
[ "def", "get_coordinator", "(", ")", ":", "workflow_queue", "=", "Queue", ".", "Queue", "(", ")", "complete_queue", "=", "Queue", ".", "Queue", "(", ")", "coordinator", "=", "WorkflowThread", "(", "workflow_queue", ",", "complete_queue", ")", "coordinator", ".", "register", "(", "WorkflowItem", ",", "workflow_queue", ")", "return", "coordinator" ]
Creates a coordinator and returns it.
[ "Creates", "a", "coordinator", "and", "returns", "it", "." ]
python
train
39.571429
eaton-lab/toytree
toytree/Drawing.py
https://github.com/eaton-lab/toytree/blob/0347ed2098acc5f707fadf52a0ecd411a6d1859c/toytree/Drawing.py#L90-L132
def add_tip_labels_to_axes(self): """ Add text offset from tips of tree with correction for orientation, and fixed_order which is usually used in multitree plotting. """ # get tip-coords and replace if using fixed_order xpos = self.ttree.get_tip_coordinates('x') ypos = self.ttree.get_tip_coordinates('y') if self.style.orient in ("up", "down"): if self.ttree._fixed_order: xpos = list(range(self.ttree.ntips)) ypos = ypos[self.ttree._fixed_idx] if self.style.tip_labels_align: ypos = np.zeros(self.ttree.ntips) if self.style.orient in ("right", "left"): if self.ttree._fixed_order: xpos = xpos[self.ttree._fixed_idx] ypos = list(range(self.ttree.ntips)) if self.style.tip_labels_align: xpos = np.zeros(self.ttree.ntips) # pop fill from color dict if using color tstyle = deepcopy(self.style.tip_labels_style) if self.style.tip_labels_colors: tstyle.pop("fill") # add tip names to coordinates calculated above self.axes.text( xpos, ypos, self.tip_labels, angle=(0 if self.style.orient in ("right", "left") else -90), style=tstyle, color=self.style.tip_labels_colors, ) # get stroke-width for aligned tip-label lines (optional) # copy stroke-width from the edge_style unless user set it if not self.style.edge_align_style.get("stroke-width"): self.style.edge_align_style["stroke-width"] = ( self.style.edge_style["stroke-width"])
[ "def", "add_tip_labels_to_axes", "(", "self", ")", ":", "# get tip-coords and replace if using fixed_order", "xpos", "=", "self", ".", "ttree", ".", "get_tip_coordinates", "(", "'x'", ")", "ypos", "=", "self", ".", "ttree", ".", "get_tip_coordinates", "(", "'y'", ")", "if", "self", ".", "style", ".", "orient", "in", "(", "\"up\"", ",", "\"down\"", ")", ":", "if", "self", ".", "ttree", ".", "_fixed_order", ":", "xpos", "=", "list", "(", "range", "(", "self", ".", "ttree", ".", "ntips", ")", ")", "ypos", "=", "ypos", "[", "self", ".", "ttree", ".", "_fixed_idx", "]", "if", "self", ".", "style", ".", "tip_labels_align", ":", "ypos", "=", "np", ".", "zeros", "(", "self", ".", "ttree", ".", "ntips", ")", "if", "self", ".", "style", ".", "orient", "in", "(", "\"right\"", ",", "\"left\"", ")", ":", "if", "self", ".", "ttree", ".", "_fixed_order", ":", "xpos", "=", "xpos", "[", "self", ".", "ttree", ".", "_fixed_idx", "]", "ypos", "=", "list", "(", "range", "(", "self", ".", "ttree", ".", "ntips", ")", ")", "if", "self", ".", "style", ".", "tip_labels_align", ":", "xpos", "=", "np", ".", "zeros", "(", "self", ".", "ttree", ".", "ntips", ")", "# pop fill from color dict if using color", "tstyle", "=", "deepcopy", "(", "self", ".", "style", ".", "tip_labels_style", ")", "if", "self", ".", "style", ".", "tip_labels_colors", ":", "tstyle", ".", "pop", "(", "\"fill\"", ")", "# add tip names to coordinates calculated above", "self", ".", "axes", ".", "text", "(", "xpos", ",", "ypos", ",", "self", ".", "tip_labels", ",", "angle", "=", "(", "0", "if", "self", ".", "style", ".", "orient", "in", "(", "\"right\"", ",", "\"left\"", ")", "else", "-", "90", ")", ",", "style", "=", "tstyle", ",", "color", "=", "self", ".", "style", ".", "tip_labels_colors", ",", ")", "# get stroke-width for aligned tip-label lines (optional)", "# copy stroke-width from the edge_style unless user set it", "if", "not", "self", ".", "style", ".", "edge_align_style", ".", "get", "(", "\"stroke-width\"", ")", ":", "self", ".", "style", ".", "edge_align_style", "[", "\"stroke-width\"", "]", "=", "(", "self", ".", "style", ".", "edge_style", "[", "\"stroke-width\"", "]", ")" ]
Add text offset from tips of tree with correction for orientation, and fixed_order which is usually used in multitree plotting.
[ "Add", "text", "offset", "from", "tips", "of", "tree", "with", "correction", "for", "orientation", "and", "fixed_order", "which", "is", "usually", "used", "in", "multitree", "plotting", "." ]
python
train
39.55814
keenlabs/KeenClient-Python
keen/saved_queries.py
https://github.com/keenlabs/KeenClient-Python/blob/266387c3376d1e000d117e17c45045ae3439d43f/keen/saved_queries.py#L93-L139
def update(self, query_name, saved_query_attributes): """ Given a dict of attributes to be updated, update only those attributes in the Saved Query at the resource given by 'query_name'. This will perform two HTTP requests--one to fetch the query definition, and one to set the new attributes. This method will intend to preserve any other properties on the query. Master key must be set. """ query_name_attr_name = "query_name" refresh_rate_attr_name = "refresh_rate" query_attr_name = "query" metadata_attr_name = "metadata" old_saved_query = self.get(query_name) # Create a new query def to send back. We cannot send values for attributes like 'urls', # 'last_modified_date', 'run_information', etc. new_saved_query = { query_name_attr_name: old_saved_query[query_name_attr_name], # expected refresh_rate_attr_name: old_saved_query[refresh_rate_attr_name], # expected query_attr_name: {} } # If metadata was set, preserve it. The Explorer UI currently stores information here. old_metadata = (old_saved_query[metadata_attr_name] if metadata_attr_name in old_saved_query else None) if old_metadata: new_saved_query[metadata_attr_name] = old_metadata # Preserve any non-empty properties of the existing query. We get back values like None # for 'group_by', 'interval' or 'timezone', but those aren't accepted values when updating. old_query = old_saved_query[query_attr_name] # expected # Shallow copy since we want the entire object heirarchy to start with. for (key, value) in six.iteritems(old_query): if value: new_saved_query[query_attr_name][key] = value # Now, recursively overwrite any attributes passed in. SavedQueriesInterface._deep_update(new_saved_query, saved_query_attributes) return self.create(query_name, new_saved_query)
[ "def", "update", "(", "self", ",", "query_name", ",", "saved_query_attributes", ")", ":", "query_name_attr_name", "=", "\"query_name\"", "refresh_rate_attr_name", "=", "\"refresh_rate\"", "query_attr_name", "=", "\"query\"", "metadata_attr_name", "=", "\"metadata\"", "old_saved_query", "=", "self", ".", "get", "(", "query_name", ")", "# Create a new query def to send back. We cannot send values for attributes like 'urls',", "# 'last_modified_date', 'run_information', etc.", "new_saved_query", "=", "{", "query_name_attr_name", ":", "old_saved_query", "[", "query_name_attr_name", "]", ",", "# expected", "refresh_rate_attr_name", ":", "old_saved_query", "[", "refresh_rate_attr_name", "]", ",", "# expected", "query_attr_name", ":", "{", "}", "}", "# If metadata was set, preserve it. The Explorer UI currently stores information here.", "old_metadata", "=", "(", "old_saved_query", "[", "metadata_attr_name", "]", "if", "metadata_attr_name", "in", "old_saved_query", "else", "None", ")", "if", "old_metadata", ":", "new_saved_query", "[", "metadata_attr_name", "]", "=", "old_metadata", "# Preserve any non-empty properties of the existing query. We get back values like None", "# for 'group_by', 'interval' or 'timezone', but those aren't accepted values when updating.", "old_query", "=", "old_saved_query", "[", "query_attr_name", "]", "# expected", "# Shallow copy since we want the entire object heirarchy to start with.", "for", "(", "key", ",", "value", ")", "in", "six", ".", "iteritems", "(", "old_query", ")", ":", "if", "value", ":", "new_saved_query", "[", "query_attr_name", "]", "[", "key", "]", "=", "value", "# Now, recursively overwrite any attributes passed in.", "SavedQueriesInterface", ".", "_deep_update", "(", "new_saved_query", ",", "saved_query_attributes", ")", "return", "self", ".", "create", "(", "query_name", ",", "new_saved_query", ")" ]
Given a dict of attributes to be updated, update only those attributes in the Saved Query at the resource given by 'query_name'. This will perform two HTTP requests--one to fetch the query definition, and one to set the new attributes. This method will intend to preserve any other properties on the query. Master key must be set.
[ "Given", "a", "dict", "of", "attributes", "to", "be", "updated", "update", "only", "those", "attributes", "in", "the", "Saved", "Query", "at", "the", "resource", "given", "by", "query_name", ".", "This", "will", "perform", "two", "HTTP", "requests", "--", "one", "to", "fetch", "the", "query", "definition", "and", "one", "to", "set", "the", "new", "attributes", ".", "This", "method", "will", "intend", "to", "preserve", "any", "other", "properties", "on", "the", "query", "." ]
python
train
43.617021