docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Calculate ISBN checksum. Args: isbn (str): SBN, ISBN-10 or ISBN-13 Returns: ``str``: Checksum for given ISBN or SBN
def calculate_checksum(isbn): isbn = [int(i) for i in _isbn_cleanse(isbn, checksum=False)] if len(isbn) == 9: products = [x * y for x, y in zip(isbn, range(1, 10))] check = sum(products) % 11 if check == 10: check = 'X' else: # As soon as Python 2.4 support is dumped # [(isbn[i] if i % 2 == 0 else isbn[i] * 3) for i in range(12)] products = [] for i in range(12): if i % 2 == 0: products.append(isbn[i]) else: products.append(isbn[i] * 3) check = 10 - sum(products) % 10 if check == 10: check = 0 return str(check)
997,668
Convert ISBNs between ISBN-10 and ISBN-13. Note: No attempt to hyphenate converted ISBNs is made, because the specification requires that *any* hyphenation must be correct but allows ISBNs without hyphenation. Args: isbn (str): SBN, ISBN-10 or ISBN-13 code (str): EAN Bookland code Returns: ``str``: Converted ISBN-10 or ISBN-13 Raise: IsbnError: When ISBN-13 isn't convertible to an ISBN-10
def convert(isbn, code='978'): isbn = _isbn_cleanse(isbn) if len(isbn) == 10: isbn = code + isbn[:-1] return isbn + calculate_checksum(isbn) else: if isbn.startswith('978'): return isbn[3:-1] + calculate_checksum(isbn[3:-1]) else: raise IsbnError('Only ISBN-13s with 978 Bookland code can be ' 'converted to ISBN-10.')
997,669
Initialise a new ``Isbn`` object. Args: isbn (str): ISBN string
def __init__(self, isbn): super(Isbn, self).__init__() self._isbn = isbn if len(isbn) in (9, 12): self.isbn = _isbn_cleanse(isbn, False) else: self.isbn = _isbn_cleanse(isbn)
997,670
Extended pretty printing for ISBN strings. Args: format_spec (str): Extended format to use Returns: ``str``: Human readable string representation of ``Isbn`` object Raises: ValueError: Unknown value for ``format_spec``
def __format__(self, format_spec=None): if not format_spec: # default format calls set format_spec to '' return str(self) elif format_spec == 'url': return self.to_url() elif format_spec.startswith('url:'): parts = format_spec.split(':')[1:] site = parts[0] if len(parts) > 1: country = parts[1] else: country = 'us' return self.to_url(site, country) elif format_spec == 'urn': return self.to_urn() else: raise ValueError('Unknown format_spec %r' % format_spec)
997,671
Generate a link to an online book site. Args: site (str): Site to create link to country (str): Country specific version of ``site`` Returns: ``str``: URL on ``site`` for book Raises: SiteError: Unknown site value CountryError: Unknown country value
def to_url(self, site='amazon', country='us'): try: try: url, tlds = URL_MAP[site] except ValueError: tlds = None url = URL_MAP[site] except KeyError: raise SiteError(site) inject = {'isbn': self._isbn} if tlds: if country not in tlds: raise CountryError(country) tld = tlds[country] if not tld: tld = country inject['tld'] = tld return url % inject
997,673
Initialise a new ``Sbn`` object. Args: sbn (str): SBN string
def __init__(self, sbn): isbn = '0' + sbn super(Sbn, self).__init__(isbn)
997,674
Create a definition dictionary for the TELEX input method Args: w_shorthand (optional): allow a stand-alone w to be interpreted as an ư. Default to True. brackets_shorthand (optional, True): allow typing ][ as shorthand for ươ. Default to True. Returns a dictionary to be passed into process_key().
def get_telex_definition(w_shorthand=True, brackets_shorthand=True): telex = { "a": "a^", "o": "o^", "e": "e^", "w": ["u*", "o*", "a+"], "d": "d-", "f": "\\", "s": "/", "r": "?", "x": "~", "j": ".", } if w_shorthand: telex["w"].append('<ư') if brackets_shorthand: telex.update({ "]": "<ư", "[": "<ơ", "}": "<Ư", "{": "<Ơ" }) return telex
998,144
\ Convert a key sequence into a Vietnamese string with diacritical marks. Args: rules (optional): see docstring for process_key(). skip_non_vietnamese (optional): see docstring for process_key(). It even supports continous key sequences connected by separators. i.e. process_sequence('con meof.ddieen') should work.
def process_sequence(sequence, rules=None, skip_non_vietnamese=True): result = "" raw = result result_parts = [] if rules is None: rules = get_telex_definition() accepted_chars = _accepted_chars(rules) for key in sequence: if key not in accepted_chars: result_parts.append(result) result_parts.append(key) result = "" raw = "" else: result, raw = process_key( string=result, key=key, fallback_sequence=raw, rules=rules, skip_non_vietnamese=skip_non_vietnamese) result_parts.append(result) return ''.join(result_parts)
998,146
Check and Raise an Exception if needed Args: pageNum (int): Page number itemsPerPage (int): Number of items per Page Raises: ErrPaginationLimits: If we are out of limits
def checkAndRaise(pageNum, itemsPerPage): if pageNum < 1: raise ErrPaginationLimits(ErrPaginationLimits.ERR_PAGE_NUM) if itemsPerPage < Settings.itemsPerPageMin or itemsPerPage > Settings.itemsPerPageMax: raise ErrPaginationLimits(ErrPaginationLimits.ERR_ITEMS_PER_PAGE)
998,197
Deploy the app to PYPI. Args: msg (str, optional): Description
def publish(msg="checkpoint: publish package"): test = check() if test.succeeded: # clean() # push(msg) sdist = local("python setup.py sdist") if sdist.succeeded: build = local( 'python setup.py build && python setup.py bdist_egg') if build.succeeded: upload = local("twine upload dist/*") if upload.succeeded: tag()
999,307
Summary. Args: options (dict, optional): settings configuration object.
def __init__(self, options={}): settings = { 'currency': { 'symbol': "$", 'format': "%s%v", 'decimal': ".", 'thousand': ",", 'precision': 2, 'grouping': 3 }, 'number': { 'precision': 0, 'grouping': 3, 'thousand': ",", 'decimal': "." } } if options: settings.update(options) self.settings = settings
1,000,033
Summary. Args: format (TYPE, optional): Description Returns: name (TYPE): Description
def _check_currency_format(self, format=None): defaults = self.settings['currency']['format'] if hasattr(format, '__call__'): format = format() if is_str(format) and re.match('%v', format): # Create and return positive, negative and zero formats: return { 'pos': format, 'neg': format.replace("-", "").replace("%v", "-%v"), 'zero': format } elif not format or not format['por'] or not re.match('%v', format['pos']): self.settings['currency']['format'] = { 'pos': defaults, 'neg': defaults.replace("%v", "-%v"), 'zero': defaults } return self.settings return format
1,000,034
Check and normalise the value of precision (must be positive integer). Args: val (INT): must be positive integer base (INT): Description Returns: VAL (INT): Description
def _change_precision(self, val, base=0): if not isinstance(val, int): raise TypeError('The first argument must be an integer.') val = round(abs(val)) val = (lambda num: base if is_num(num) else num)(val) return val
1,000,035
Will replace special values in decisions array. Args: decisions (array of array of str): Standard decision array format. Raises: ValueError: Row element don't have parent value. Returns: New decision array with updated values.
def __replaceSpecialValues(self, decisions): error = [] for row, line in enumerate(decisions): if '.' in line: for i, element in enumerate(line): if row == 0: error.append( "Row: {}colume: {}==> don't have parent value".format(str(row).ljust(4), str(i).ljust(4))) if element == self.__parentSymbol: if decisions[row - 1][i] == '.': error.append("Row: {}Colume: {}==> don't have parent value".format(str(row).ljust(4), str(i).ljust(4))) decisions[row][i] = decisions[row - 1][i] if error: view.Tli.showErrors('ReplaceSpecialValuesError', error) else: return decisions
1,000,053
Will replace dict values with string values Args: values (dict): Dictionary of values Returns: Updated values dict
def __toString(self, values): for key in values: if not values[key] is str: values[key] = str(values[key]) return values
1,000,054
This is hellper function, so that we can mach decision values with row index as represented in header index. Args: values (dict): Normaly this will have dict of header values and values from decision Return: >>> return() { values[headerName] : int(headerName index in header array), ... }
def __valueKeyWithHeaderIndex(self, values): machingIndexes = {} for index, name in enumerate(self.header): if name in values: machingIndexes[index] = values[name] return machingIndexes
1,000,055
Checker of decision parameters, it will raise ValueError if finds something wrong. Args: result (array of str): See public decision methods **values (array of str): See public decision methods Raise: ValueError: Result array none. ValueError: Values dict none. ValueError: Not find result key in header. ValueError: Result value is empty. Returns: Error array values
def __checkDecisionParameters(self, result, **values): error = [] if not result: error.append('Function parameter (result array) should contain one or more header string!') if not values: error.append('Function parameter (values variables) should contain one or more variable') for header in result: if not header in self.header: error.append('String (' + header + ') in result is not in header!') for header in values: if not header in self.header: error.append('Variable (' + header + ') in values is not in header!') elif not values[header].split(): error.append('Variable (' + header + ') in values is empty string') if error: return error
1,000,056
The main method for decision picking. Args: result (array of str): What values you want to get in return array. multiple (bolean, optional): Do you want multiple result if it finds many maching decisions. **values (dict): What should finder look for, (headerString : value). Returns: Maped result values with finded elements in row/row.
def __getDecision(self, result, multiple=False, **values): values = self.__toString(values) __valueKeyWithHeaderIndex = self.__valueKeyWithHeaderIndex(values) errors = self.__checkDecisionParameters(result, **values) if errors: view.Tli.showErrors('ParametersError', errors) machingData = {} for line in self.decisions: match = True for index in __valueKeyWithHeaderIndex: if line[index] != __valueKeyWithHeaderIndex[index]: if line[index] != self.__wildcardSymbol: match = False break if match: if multiple: for header in result: if header not in machingData: machingData[header] = [line[self.header.index(header)]] else: machingData[header].append(line[self.header.index(header)]) else: for header in result: machingData[header] = line[self.header.index(header)] return machingData if multiple: if machingData: return machingData # Return none if not found (not string so # not found value can be recognized return dict((key, None) for key in result)
1,000,057
Extract a domain name from the url (without subdomain). Args: url (str): Url. Returns: str: Domain name. Raises: DomainNotMatchedError: If url is wrong. Examples: >>> get_domain_name('https://vod.tvp.pl/video/') 'tvp.pl' >>> get_domain_name('https://vod') Traceback (most recent call last): ... rtv.exceptions.WrongUrlError: Couldn't match domain name of this url: https://vod
def get_domain_name(url): if not validate_url(url): raise WrongUrlError(f'Couldn\'t match domain name of this url: {url}') ext = tldextract.extract(url) return f'{ext.domain}.{ext.suffix}'
1,000,113
Clean video data: -> cleans title -> ... Args: _data (dict): Information about the video. Returns: dict: Refined video data.
def clean_video_data(_data): data = _data.copy() # TODO: fix this ugliness title = data.get('title') if title: data['title'] = clean_title(title) return data
1,000,114
Clean title -> remove dates, remove duplicated spaces and strip title. Args: title (str): Title. Returns: str: Clean title without dates, duplicated, trailing and leading spaces.
def clean_title(title): date_pattern = re.compile(r'\W*' r'\d{1,2}' r'[/\-.]' r'\d{1,2}' r'[/\-.]' r'(?=\d*)(?:.{4}|.{2})' r'\W*') title = date_pattern.sub(' ', title) title = re.sub(r'\s{2,}', ' ', title) title = title.strip() return title
1,000,115
Extract an extension from the url. Args: url (str): String representation of a url. Returns: str: Filename extension from a url (without a dot), '' if extension is not present.
def get_ext(url): parsed = urllib.parse.urlparse(url) root, ext = os.path.splitext(parsed.path) return ext.lstrip('.')
1,000,116
Remove duplicates from an iterable, preserving the order. Args: seq: Iterable of various type. Returns: list: List of unique objects.
def delete_duplicates(seq): seen = set() seen_add = seen.add return [x for x in seq if not (x in seen or seen_add(x))]
1,000,117
Load an ABF and makes its stats and sweeps easily available. Arguments: fname - filename of an ABF object createFolder - if True, the ./swhlab/ folder will be created
def __init__(self, fname, createFolder=False): logging.basicConfig(format=swhlab.logFormat, datefmt=swhlab.logDateFormat, level=swhlab.loglevel) self.log = logging.getLogger("swhlab ABF") self.log.setLevel(swhlab.loglevel) if "ABF object" in str(fname): self.log.debug("reusing same ABF object") for item in sorted(dir(fname)): try: setattr(self,item,getattr(fname,item)) except: pass return self.log.debug("_"*60) self.log.info("SWHLab (%s) loading ABF [%s]",swhlab.__version__,str(fname)) if not os.path.exists(str(fname)): self.log.error("path doesn't exist!") return # load the ABF and populate properties self.ABFreader = io.AxonIO(filename=fname) self.ABFblock = self.ABFreader.read_block(lazy=False, cascade=True) self.header=self.ABFreader.read_header() self.protocomment=abfProtocol(fname) # get ABF file comment self.ID=abfIDfromFname(fname) # filename without extension self.filename=os.path.abspath(fname) # full path to file on disk self.fileID=os.path.abspath(os.path.splitext(self.filename)[0]) # no extension self.outFolder=os.path.abspath(os.path.dirname(fname)+"/swhlab/") # save stuff here self.outPre=os.path.join(self.outFolder,self.ID)+'_' # save files prefixed this self.sweeps=self.ABFblock.size["segments"] # number of sweeps in ABF self.timestamp=self.ABFblock.rec_datetime # when the ABF recording started # these I still have to read directly out of the header self.holding = self.header['listDACInfo'][0]['fDACHoldingLevel'] #clamp current or voltage # we've pulled what we can out of the header, now proceed with advanced stuff self.derivative=False # whether or not to use the first derivative self.setsweep() # run setsweep to populate sweep properties self.comments_load() # populate comments self.kernel=None # variable which may be set for convolution if createFolder: self.output_touch() # make sure output folder exists #TODO: detect if invalid or corrupted ABF self.log.debug("ABF loaded. (protocol: %s)"%self.protocomment)
1,000,121
Import a blosc array into a numpy array. Arguments: data: A blosc packed numpy array Returns: A numpy array with data from a blosc compressed array
def to_array(data): try: numpy_data = blosc.unpack_array(data) except Exception as e: raise ValueError("Could not load numpy data. {}".format(e)) return numpy_data
1,000,209
Export a numpy array to a blosc array. Arguments: array: The numpy array to compress to blosc array Returns: Bytes/String. A blosc compressed array
def from_array(array): try: raw_data = blosc.pack_array(array) except Exception as e: raise ValueError("Could not compress data from array. {}".format(e)) return raw_data
1,000,210
Converts an array to its voxel list. Arguments: array (numpy.ndarray): A numpy nd array. This must be boolean! Returns: A list of n-tuples
def to_voxels(array): if type(array) is not numpy.ndarray: raise ValueError("array argument must be of type numpy.ndarray") return numpy.argwhere(array)
1,000,354
Converts a voxel list to an ndarray. Arguments: voxels (tuple[]): A list of coordinates indicating coordinates of populated voxels in an ndarray. Returns: numpy.ndarray The result of the transformation.
def from_voxels(voxels): dimensions = len(voxels[0]) for d in range(len(dimensions)): size.append(max([i[d] for i in voxels])) result = numpy.zeros(dimensions) for v in voxels: result[v] = 1 return result
1,000,355
Import a png file into a numpy array. Arguments: png_filename (str): A string filename of a png datafile Returns: A numpy array with data from the png file
def load(png_filename): # Expand filename to be absolute png_filename = os.path.expanduser(png_filename) try: img = Image.open(png_filename) except Exception as e: raise ValueError("Could not load file {0} for conversion." .format(png_filename)) raise return numpy.array(img)
1,000,437
Export a numpy array to a set of png files, with each Z-index 2D array as its own 2D file. Arguments: png_filename_base: A filename template, such as "my-image-*.png" which will lead to a collection of files named "my-image-0.png", "my-image-1.png", etc. numpy_data: The numpy array data to save to png. Returns: Array. A list of expanded filenames that hold png data.
def save_collection(png_filename_base, numpy_data, start_layers_at=1): file_ext = png_filename_base.split('.')[-1] if file_ext in ['png']: # Filename is "name*.ext", set file_base to "name*". file_base = '.'.join(png_filename_base.split('.')[:-1]) else: # Filename is "name*", set file_base to "name*". # That is, extension wasn't included. file_base = png_filename_base file_ext = ".png" file_base_array = file_base.split('*') # The array of filenames to return output_files = [] # Filename 0-padding i = start_layers_at for layer in numpy_data: layer_filename = (str(i).zfill(6)).join(file_base_array) + file_ext output_files.append(save(layer_filename, layer)) i += 1 return output_files
1,000,439
Initialize a new `RAMONOrganelle`. Organelles take all of the arguments of their parent `RAMONVolume` class. Arguments: organelle_class (int: 0): The organelle class to which this object belongs. For more details, see the online ndstore documentation
def __init__(self, organelle_class=0, xyz_offset=(0, 0, 0), resolution=0, cutout=None, voxels=None, id=DEFAULT_ID, confidence=DEFAULT_CONFIDENCE, kvpairs=DEFAULT_DYNAMIC_METADATA, status=DEFAULT_STATUS, author=DEFAULT_AUTHOR): self.organelle_class = organelle_class RAMONVolume.__init__(self, xyz_offset=xyz_offset, resolution=resolution, cutout=cutout, voxels=voxels, id=id, confidence=confidence, kvpairs=kvpairs, status=status, author=author)
1,000,597
Gets the block-size for a given token at a given resolution. Arguments: token (str): The token to inspect resolution (int : None): The resolution at which to inspect data. If none is specified, uses the minimum available. Returns: int[3]: The xyz blocksize.
def get_block_size(self, token, resolution=None): cdims = self.get_metadata(token)['dataset']['cube_dimension'] if resolution is None: resolution = min(cdims.keys()) return cdims[str(resolution)]
1,000,604
Post a cutout to the server. Arguments: token (str) channel (str) x_start (int) y_start (int) z_start (int) data (numpy.ndarray): A numpy array of data. Pass in (x, y, z) resolution (int : 0): Resolution at which to insert the data Returns: bool: True on success Raises: RemoteDataUploadError: if there's an issue during upload.
def post_cutout(self, token, channel, x_start, y_start, z_start, data, resolution=0): datatype = self.get_proj_info(token)['channels'][channel]['datatype'] if data.dtype.name != datatype: data = data.astype(datatype) data = numpy.rollaxis(data, 1) data = numpy.rollaxis(data, 2) if six.PY3 or data.nbytes > 1.5e9: ul_func = self._post_cutout_no_chunking_npz else: ul_func = self._post_cutout_no_chunking_blosc if data.size < self._chunk_threshold: return ul_func(token, channel, x_start, y_start, z_start, data, resolution) return self._post_cutout_with_chunking(token, channel, x_start, y_start, z_start, data, resolution, ul_func)
1,000,610
Import a TIFF file into a numpy array. Arguments: tiff_filename: A string filename of a TIFF datafile Returns: A numpy array with data from the TIFF file
def load(tiff_filename): # Expand filename to be absolute tiff_filename = os.path.expanduser(tiff_filename) try: img = tiff.imread(tiff_filename) except Exception as e: raise ValueError("Could not load file {0} for conversion." .format(tiff_filename)) raise return numpy.array(img)
1,000,645
Export a numpy array to a TIFF file. Arguments: tiff_filename: A filename to which to save the TIFF data numpy_data: The numpy array to save to TIFF Returns: String. The expanded filename that now holds the TIFF data
def save(tiff_filename, numpy_data): # Expand filename to be absolute tiff_filename = os.path.expanduser(tiff_filename) if type(numpy_data) is str: fp = open(png_filename, "wb") fp.write(numpy_data) fp.close() return png_filename try: img = tiff.imsave(tiff_filename, numpy_data) except Exception as e: raise ValueError("Could not save TIFF file {0}.".format(tiff_filename)) return tiff_filename
1,000,646
Load a multipage tiff into a single variable in x,y,z format. Arguments: tiff_filename: Filename of source data dtype: data type to use for the returned tensor Returns: Array containing contents from input tiff file in xyz order
def load_tiff_multipage(tiff_filename, dtype='float32'): if not os.path.isfile(tiff_filename): raise RuntimeError('could not find file "%s"' % tiff_filename) # load the data from multi-layer TIF files data = tiff.imread(tiff_filename) im = [] while True: Xi = numpy.array(data, dtype=dtype) if Xi.ndim == 2: Xi = Xi[numpy.newaxis, ...] # add slice dimension im.append(Xi) try: data.seek(data.tell()+1) except EOFError: break # this just means hit end of file (not really an error) im = numpy.concatenate(im, axis=0) # list of 2d -> tensor im = numpy.rollaxis(im, 1) im = numpy.rollaxis(im, 2) return im
1,000,647
Initialize a new `RAMONSegment`. Takes all arguments of `RAMONVolume`. Arguments: segmentclass (int: 0): The type of segment this is. See the online ndstore documentation for more details. neuron (int: 0): The neuron that this segment belongs to. synapses (int[]: []): List of synapses that fall in this segment organelles (int[]: []): List of organelles that fall in this seg
def __init__(self, segmentclass=0, neuron=0, synapses=[], organelles=[], xyz_offset=(0, 0, 0), resolution=0, cutout=None, voxels=None, id=DEFAULT_ID, confidence=DEFAULT_CONFIDENCE, kvpairs=DEFAULT_DYNAMIC_METADATA, status=DEFAULT_STATUS, author=DEFAULT_AUTHOR): self.segmentclass = segmentclass self.neuron = neuron self.synapses = synapses self.organelles = organelles RAMONVolume.__init__(self, xyz_offset=xyz_offset, resolution=resolution, cutout=cutout, voxels=voxels, id=id, confidence=confidence, kvpairs=kvpairs, status=status, author=author)
1,000,778
Import a nifti file into a numpy array. TODO: Currently only transfers raw data for compatibility with annotation and ND formats Arguments: nifti_filename (str): A string filename of a nifti datafile Returns: A numpy array with data from the nifti file
def load(nifti_filename): # Expand filename to be absolute nifti_filename = os.path.expanduser(nifti_filename) try: data = nib.load(nifti_filename) img = data.get_data() except Exception as e: raise ValueError("Could not load file {0} for conversion." .format(nifti_filename)) raise return img
1,000,873
Export a numpy array to a nifti file. TODO: currently using dummy headers and identity matrix affine transform. This can be expanded. Arguments: nifti_filename (str): A filename to which to save the nifti data numpy_data (numpy.ndarray): The numpy array to save to nifti Returns: String. The expanded filename that now holds the nifti data
def save(nifti_filename, numpy_data): # Expand filename to be absolute nifti_filename = os.path.expanduser(nifti_filename) try: nifti_img = nib.Nifti1Image(numpy_data, numpy.eye(4)) nib.save(nifti_img, nifti_filename) except Exception as e: raise ValueError("Could not save file {0}.".format(nifti_filename)) return nifti_filename
1,000,874
Return the status-code of the API (estimated using the public-tokens lookup page). Arguments: suffix (str : 'public_tokens/'): The url endpoint to check Returns: int: status code
def ping(self, suffix='public_tokens/'): return self.remote_utils.ping(super(neuroRemote, self).url(), suffix)
1,000,912
Return a constructed URL, appending an optional suffix (uri path). Arguments: suffix (str : ""): The suffix to append to the end of the URL Returns: str: The complete URL
def url(self, suffix=""): return super(neuroRemote, self).url('{}/'.format(self._ext) + suffix)
1,000,913
Return a string representation that can be used to reproduce this instance. `eval(repr(this))` should return an identical copy. Arguments: None Returns: str: Representation of reproducible instance.
def __repr__(self): return "ndio.remote.neuroRemote('{}', '{}')".format( self.hostname, self.protocol, self.meta_url, self.meta_protocol )
1,000,914
Requests a list of next-available-IDs from the server. Arguments: quantity (int): The number of IDs to reserve Returns: int[quantity]: List of IDs you've been granted
def reserve_ids(self, token, channel, quantity): quantity = str(quantity) url = self.url("{}/{}/reserve/{}/".format(token, channel, quantity)) req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataNotFoundError('Invalid req: ' + req.status_code) out = req.json() return [out[0] + i for i in range(out[1])]
1,000,915
Call the restful endpoint to merge two RAMON objects into one. Arguments: token (str): The token to inspect channel (str): The channel to inspect ids (int[]): the list of the IDs to merge delete (bool : False): Whether to delete after merging. Returns: json: The ID as returned by ndstore
def merge_ids(self, token, channel, ids, delete=False): url = self.url() + "/merge/{}/".format(','.join([str(i) for i in ids])) req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataUploadError('Could not merge ids {}'.format( ','.join([str(i) for i in ids]))) if delete: self.delete_ramon(token, channel, ids[1:]) return True
1,000,916
Creates channels given a dictionary in 'new_channels_data' , 'dataset' name, and 'token' (project) name. Arguments: token (str): Token to identify project dataset (str): Dataset name to identify dataset to download from new_channels_data (dict): New channel data to upload into new channels Returns: bool: Process completed succesfully or not
def create_channels(self, dataset, token, new_channels_data): channels = {} for channel_new in new_channels_data: self._check_channel(channel_new.name) if channel_new.channel_type not in ['image', 'annotation']: raise ValueError('Channel type must be ' + 'neuroRemote.IMAGE or ' + 'neuroRemote.ANNOTATION.') if channel_new.readonly * 1 not in [0, 1]: raise ValueError("readonly must be 0 (False) or 1 (True).") channels[channel_new.name] = { "channel_name": channel_new.name, "channel_type": channel_new.channel_type, "datatype": channel_new.dtype, "readonly": channel_new.readonly * 1 } req = requests.post(self.url("/{}/project/".format(dataset) + "{}".format(token)), json={"channels": {channels}}, verify=False) if req.status_code is not 201: raise RemoteDataUploadError('Could not upload {}'.format(req.text)) else: return True
1,000,917
Kick off the propagate function on the remote server. Arguments: token (str): The token to propagate channel (str): The channel to propagate Returns: boolean: Success
def propagate(self, token, channel): if self.get_propagate_status(token, channel) != u'0': return url = self.url('sd/{}/{}/setPropagate/1/'.format(token, channel)) req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataUploadError('Propagate fail: {}'.format(req.text)) return True
1,000,918
Get the propagate status for a token/channel pair. Arguments: token (str): The token to check channel (str): The channel to check Returns: str: The status code
def get_propagate_status(self, token, channel): url = self.url('sd/{}/{}/getPropagate/'.format(token, channel)) req = self.remote_utils.get_url(url) if req.status_code is not 200: raise ValueError('Bad pair: {}/{}'.format(token, channel)) return req.text
1,000,919
Lists a set of projects related to a dataset. Arguments: dataset_name (str): Dataset name to search projects for Returns: dict: Projects found based on dataset query
def list_projects(self, dataset_name): url = self.url() + "/nd/resource/dataset/{}".format(dataset_name)\ + "/project/" req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataNotFoundError('Could not find {}'.format(req.text)) else: return req.json()
1,000,958
Creates a token with the given parameters. Arguments: project_name (str): Project name dataset_name (str): Dataset name project is based on token_name (str): Token name is_public (int): 1 is public. 0 is not public Returns: bool: True if project created, false if not created.
def create_token(self, token_name, project_name, dataset_name, is_public): url = self.url() + '/nd/resource/dataset/{}'.format( dataset_name) + '/project/{}'.format(project_name) + \ '/token/{}/'.format(token_name) json = { "token_name": token_name, "public": is_public } req = self.remote_utils.post_url(url, json=json) if req.status_code is not 201: raise RemoteDataUploadError('Cout not upload {}:'.format(req.text)) if req.content == "" or req.content == b'': return True else: return False
1,000,959
Get a token with the given parameters. Arguments: project_name (str): Project name dataset_name (str): Dataset name project is based on token_name (str): Token name Returns: dict: Token info
def get_token(self, token_name, project_name, dataset_name): url = self.url() + "/nd/resource/dataset/{}".format(dataset_name)\ + "/project/{}".format(project_name)\ + "/token/{}/".format(token_name) req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataUploadError('Could not find {}'.format(req.text)) else: return req.json()
1,000,960
Delete a token with the given parameters. Arguments: project_name (str): Project name dataset_name (str): Dataset name project is based on token_name (str): Token name channel_name (str): Channel name project is based on Returns: bool: True if project deleted, false if not deleted.
def delete_token(self, token_name, project_name, dataset_name): url = self.url() + "/nd/resource/dataset/{}".format(dataset_name)\ + "/project/{}".format(project_name)\ + "/token/{}/".format(token_name) req = self.remote_utils.delete_url(url) if req.status_code is not 204: raise RemoteDataUploadError("Could not delete {}".format(req.text)) if req.content == "" or req.content == b'': return True else: return False
1,000,961
Lists a set of tokens that are public in Neurodata. Arguments: Returns: dict: Public tokens found in Neurodata
def list_tokens(self): url = self.url() + "/nd/resource/public/token/" req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataNotFoundError('Coud not find {}'.format(req.text)) else: return req.json()
1,000,962
Lists datasets in resources. Setting 'get_global_public' to 'True' will retrieve all public datasets in cloud. 'False' will get user's public datasets. Arguments: get_global_public (bool): True if user wants all public datasets in cloud. False if user wants only their public datasets. Returns: dict: Returns datasets in JSON format
def list_datasets(self, get_global_public): appending = "" if get_global_public: appending = "public" url = self.url() + "/resource/{}dataset/".format(appending) req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataNotFoundError('Could not find {}'.format(req.text)) else: return req.json()
1,000,965
Gets info about a channel given its name, name of its project , and name of its dataset. Arguments: channel_name (str): Channel name project_name (str): Project name dataset_name (str): Dataset name Returns: dict: Channel info
def get_channel(self, channel_name, project_name, dataset_name): url = self.url() + "/nd/resource/dataset/{}".format(dataset_name)\ + "/project/{}".format(project_name) + \ "/channel/{}/".format(channel_name) req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataNotFoundError('Could not find {}'.format(req.text)) else: return req.json()
1,000,969
Get the base URL of the Remote. Arguments: None Returns: `str` base URL
def url(self, endpoint=''): if not endpoint.startswith('/'): endpoint = "/" + endpoint return self.protocol + "://" + self.hostname + endpoint
1,001,008
Ping the server to make sure that you can access the base URL. Arguments: None Returns: `boolean` Successful access of server (or status code)
def ping(self, endpoint=''): r = requests.get(self.url() + "/" + endpoint) return r.status_code
1,001,009
Create a VideoDownloader for a given video. Args: video (Video): Video object. quality (str): Quality of the video (best/worst). Audio quality defaults to best. download_dir (str): Destination directory for the downloaded video. templates (dict): Dictionary of templates needed to generate a download path.
def __init__(self, video, quality=None, download_dir=None, templates=None) -> None: self.video = video self.quality = quality or DEFAULT_OPTIONS['quality'] self.download_dir = download_dir or DEFAULT_OPTIONS['download_dir'] self.templates = templates or DEFAULT_OPTIONS['templates'] if self.quality not in ('worst', 'best'): raise WrongQualityError
1,001,212
Converts a dense annotation to a DAE, using Marching Cubes (PyMCubes). Arguments: filename (str): The filename to write out to cutout (numpy.ndarray): The dense annotation level (int): The level at which to run mcubes Returns: boolean success
def export_dae(filename, cutout, level=0): if ".dae" not in filename: filename = filename + ".dae" vs, fs = mcubes.marching_cubes(cutout, level) mcubes.export_mesh(vs, fs, filename, "ndioexport")
1,001,310
Converts a dense annotation to a obj, using Marching Cubes (PyMCubes). Arguments: filename (str): The filename to write out to cutout (numpy.ndarray): The dense annotation level (int): The level at which to run mcubes Returns: boolean success
def export_obj(filename, cutout, level=0): if ".obj" not in filename: filename = filename + ".obj" vs, fs = mcubes.marching_cubes(cutout, level) mcubes.export_obj(vs, fs, filename)
1,001,311
Converts a dense annotation to a .PLY, using Marching Cubes (PyMCubes). Arguments: filename (str): The filename to write out to cutout (numpy.ndarray): The dense annotation level (int): The level at which to run mcubes Returns: boolean success
def export_ply(filename, cutout, level=0): if ".ply" not in filename: filename = filename + ".ply" vs, fs = mcubes.marching_cubes(cutout, level) with open(filename, 'w') as fh: lines = [ "ply" "format ascii 1.0", "comment generated by ndio", "element vertex " + str(len(vs)), "property float32 x", "property float32 y", "property float32 z", "element face " + str(len(fs)), "property list uint8 int32 vertex_index", "end_header" ] fh.writelines(lines) for v in vs: fh.write("{} {} {}".format(v[0], v[1], v[2])) for f in fs: fh.write("3 {} {} {}".format(f[0], f[1], f[2]))
1,001,312
Guess the appropriate data type from file extension. Arguments: ext: The file extension (period optional) Returns: String. The format (without leading period), or False if none was found or couldn't be guessed
def _guess_format_from_extension(ext): ext = ext.strip('.') # We look through FILE_FORMATS for this extension. # - If it appears zero times, return False. We can't guess. # - If it appears once, we can simply return that format. # - If it appears more than once, we can't guess (it's ambiguous, # e.g .m = RAMON or MATLAB) formats = [] for fmt in FILE_FORMATS: if ext in FILE_FORMATS[fmt]: formats.append(fmt) if formats == [] or len(formats) > 1: return False return formats[0]
1,001,373
Reads in a file from disk. Arguments: in_file: The name of the file to read in in_fmt: The format of in_file, if you want to be explicit Returns: numpy.ndarray
def open(in_file, in_fmt=None): fmt = in_file.split('.')[-1] if in_fmt: fmt = in_fmt fmt = fmt.lower() if fmt in ['png', 'jpg', 'tiff', 'tif', 'jpeg']: return Image.open(in_file) else: raise NotImplementedError("Cannot open file of type {fmt}".format(fmt))
1,001,374
Converts in_file to out_file, guessing datatype in the absence of in_fmt and out_fmt. Arguments: in_file: The name of the (existing) datafile to read out_file: The name of the file to create with converted data in_fmt: Optional. The format of incoming data, if not guessable out_fmt: Optional. The format of outgoing data, if not guessable Returns: String. Output filename
def convert(in_file, out_file, in_fmt="", out_fmt=""): # First verify that in_file exists and out_file doesn't. in_file = os.path.expanduser(in_file) out_file = os.path.expanduser(out_file) if not os.path.exists(in_file): raise IOError("Input file {0} does not exist, stopping..." .format(in_file)) # Get formats, either by explicitly naming them or by guessing. # TODO: It'd be neat to check here if an explicit fmt matches the guess. in_fmt = in_fmt.lower() or _guess_format_from_extension( in_file.split('.')[-1].lower()) out_fmt = out_fmt.lower() or _guess_format_from_extension( out_file.split('.')[-1].lower()) if not in_fmt or not out_fmt: raise ValueError("Cannot determine conversion formats.") return False if in_fmt is out_fmt: # This is the case when this module (intended for LONI) is used # indescriminately to 'funnel' data into one format. shutil.copyfileobj(in_file, out_file) return out_file # Import if in_fmt == 'hdf5': from . import hdf5 data = hdf5.load(in_file) elif in_fmt == 'tiff': from . import tiff data = tiff.load(in_file) elif in_fmt == 'png': from . import png data = png.load(in_file) else: return _fail_pair_conversion(in_fmt, out_fmt) # Export if out_fmt == 'hdf5': from . import hdf5 return hdf5.save(out_file, data) elif out_fmt == 'tiff': from . import tiff return tiff.save(out_file, data) elif out_fmt == 'png': from . import png return png.export_png(out_file, data) else: return _fail_pair_conversion(in_fmt, out_fmt) return _fail_pair_conversion(in_fmt, out_fmt)
1,001,375
Initialize a new grute remote. Arguments: hostname (str: "openconnecto.me"): The hostname where grute lives protocol (str: "http"): The protocol over which to access grute email (str: ""): The email to which completion notifications should be sent (unless overridden in individual calls). Note that the completion URLs are also accessible via this Python API.
def __init__(self, hostname=DEFAULT_HOSTNAME, protocol=DEFAULT_PROTOCOL, email=DEFAULT_EMAIL): super(grute, self).__init__(hostname, protocol) self.email = email
1,001,390
Bind a listener to a particular event. Args: event (str): The name of the event to listen for. This may be any string value. listener (def or async def): The callback to execute when the event fires. This may be a sync or async function.
def add_listener(self, event, listener): self.emit('new_listener', event, listener) self._listeners[event].append(listener) self._check_limit(event) return self
1,001,612
Get the number of listeners for the event. Args: event (str): The event for which to count all listeners. The resulting count is a combination of listeners added using 'on'/'add_listener' and 'once'.
def count(self, event): return len(self._listeners[event]) + len(self._once[event])
1,001,620
Converts a RAMON object list to a JSON-style dictionary. Useful for going from an array of RAMONs to a dictionary, indexed by ID. Arguments: ramons (RAMON[]): A list of RAMON objects flatten (boolean: False): Not implemented Returns: dict: A python dictionary of RAMON objects.
def to_dict(ramons, flatten=False): if type(ramons) is not list: ramons = [ramons] out_ramons = {} for r in ramons: out_ramons[r.id] = { "id": r.id, "type": _reverse_ramon_types[type(r)], "metadata": vars(r) } return out_ramons
1,001,622
Exports a RAMON object to an HDF5 file object. Arguments: ramon (RAMON): A subclass of RAMONBase hdf5 (str): Export filename Returns: hdf5.File Raises: InvalidRAMONError: if you pass a non-RAMON object
def to_hdf5(ramon, hdf5=None): if issubclass(type(ramon), RAMONBase) is False: raise InvalidRAMONError("Invalid RAMON supplied to ramon.to_hdf5.") import h5py import numpy if hdf5 is None: tmpfile = tempfile.NamedTemporaryFile(delete=False) else: tmpfile = hdf5 with h5py.File(tmpfile.name, "a") as hdf5: # First we'll export things that all RAMON objects have in # common, starting with the Group that encompasses each ID: grp = hdf5.create_group(str(ramon.id)) grp.create_dataset("ANNOTATION_TYPE", (1,), numpy.uint32, data=AnnotationType.get_int(type(ramon))) if hasattr(ramon, 'cutout'): if ramon.cutout is not None: grp.create_dataset('CUTOUT', ramon.cutout.shape, ramon.cutout.dtype, data=ramon.cutout) grp.create_dataset('RESOLUTION', (1,), numpy.uint32, data=ramon.resolution) grp.create_dataset('XYZOFFSET', (3,), numpy.uint32, data=ramon.xyz_offset) # Next, add general metadata. metadata = grp.create_group('METADATA') metadata.create_dataset('AUTHOR', (1,), dtype=h5py.special_dtype(vlen=str), data=ramon.author) fstring = StringIO() csvw = csv.writer(fstring, delimiter=',') csvw.writerows([r for r in six.iteritems(ramon.kvpairs)]) metadata.create_dataset('KVPAIRS', (1,), dtype=h5py.special_dtype(vlen=str), data=fstring.getvalue()) metadata.create_dataset('CONFIDENCE', (1,), numpy.float, data=ramon.confidence) metadata.create_dataset('STATUS', (1,), numpy.uint32, data=ramon.status) # Finally, add type-specific metadata: if hasattr(ramon, 'segments'): metadata.create_dataset('SEGMENTS', data=numpy.asarray(ramon.segments, dtype=numpy.uint32)) if hasattr(ramon, 'synapse_type'): metadata.create_dataset('SYNAPSE_TYPE', (1,), numpy.uint32, data=ramon.synapse_type) if hasattr(ramon, 'weight'): metadata.create_dataset('WEIGHT', (1,), numpy.float, data=ramon.weight) if hasattr(ramon, 'neuron'): metadata.create_dataset('NEURON', (1,), numpy.uint32, data=ramon.neuron) if hasattr(ramon, 'segmentclass'): metadata.create_dataset('SEGMENTCLASS', (1,), numpy.uint32, data=ramon.segmentclass) if hasattr(ramon, 'synapses'): metadata.create_dataset('SYNAPSES', (len(ramon.synapses),), numpy.uint32, data=ramon.synapses) if hasattr(ramon, 'organelles'): metadata.create_dataset('ORGANELLES', (len(ramon.organelles),), numpy.uint32, data=ramon.organelles) if hasattr(ramon, 'organelle_class'): metadata.create_dataset('ORGANELLECLASS', (1,), numpy.uint32, data=ramon.organelle_class) hdf5.flush() tmpfile.seek(0) return tmpfile return False
1,001,626
Post a cutout to the server. Arguments: token (str) channel (str) x_start (int) y_start (int) z_start (int) data (numpy.ndarray): A numpy array of data. Pass in (x, y, z) resolution (int : 0): Resolution at which to insert the data Returns: bool: True on success Raises: RemoteDataUploadError: if there's an issue during upload.
def post_cutout(self, token, channel, x_start, y_start, z_start, data, resolution=0): return self.data.post_cutout(token, channel, x_start, y_start, z_start, data, resolution)
1,001,632
Creates a token with the given parameters. Arguments: project_name (str): Project name dataset_name (str): Dataset name project is based on token_name (str): Token name is_public (int): 1 is public. 0 is not public Returns: bool: True if project created, false if not created.
def create_token(self, token_name, project_name, dataset_name, is_public): return self.resources.create_token(token_name, project_name, dataset_name, is_public)
1,001,634
Get a token with the given parameters. Arguments: project_name (str): Project name dataset_name (str): Dataset name project is based on token_name (str): Token name Returns: dict: Token info
def get_token(self, token_name, project_name, dataset_name): return self.resources.get_token(token_name, project_name, dataset_name)
1,001,635
Delete a token with the given parameters. Arguments: project_name (str): Project name dataset_name (str): Dataset name project is based on token_name (str): Token name channel_name (str): Channel name project is based on Returns: bool: True if project deleted, false if not deleted.
def delete_token(self, token_name, project_name, dataset_name): return self.resources.delete_token(token_name, project_name, dataset_name)
1,001,636
Gets info about a channel given its name, name of its project , and name of its dataset. Arguments: channel_name (str): Channel name project_name (str): Project name dataset_name (str): Dataset name Returns: dict: Channel info
def get_channel(self, channel_name, project_name, dataset_name): return self.resources.get_channel(channel_name, project_name, dataset_name)
1,001,639
Deletes a channel given its name, name of its project , and name of its dataset. Arguments: channel_name (str): Channel name project_name (str): Project name dataset_name (str): Dataset name Returns: bool: True if channel deleted, False if not
def delete_channel(self, channel_name, project_name, dataset_name): return self.resources.delete_channel(channel_name, project_name, dataset_name)
1,001,640
Get video urls from a link to the onetab shared page. Args: onetab (str): Link to a onetab shared page. Returns: list: List of links to the videos.
def get_urls_from_onetab(onetab): html = requests.get(onetab).text soup = BeautifulSoup(html, 'lxml') divs = soup.findAll('div', {'style': 'padding-left: 24px; ' 'padding-top: 8px; ' 'position: relative; ' 'font-size: 13px;'}) return [div.find('a').attrs['href'] for div in divs]
1,001,939
Get a list of public tokens available on this server. Arguments: None Returns: str[]: list of public tokens
def get_public_tokens(self): r = self.remote_utils.get_url(self.url() + "public_tokens/") return r.json()
1,001,950
NOTE: VERY SLOW! Get a dictionary relating key:dataset to value:[tokens] that rely on that dataset. Arguments: None Returns: dict: relating key:dataset to value:[tokens]
def get_public_datasets_and_tokens(self): datasets = {} tokens = self.get_public_tokens() for t in tokens: dataset = self.get_token_dataset(t) if dataset in datasets: datasets[dataset].append(t) else: datasets[dataset] = [t] return datasets
1,001,951
Return the project info for a given token. Arguments: token (str): Token to return information for Returns: JSON: representation of proj_info
def get_proj_info(self, token): r = self.remote_utils.get_url(self.url() + "{}/info/".format(token)) return r.json()
1,001,952
Insert new metadata into the OCP metadata database. Arguments: token (str): Token of the datum to set data (str): A dictionary to insert as metadata. Include `secret`. Returns: json: Info of the inserted ID (convenience) or an error message. Throws: RemoteDataUploadError: If the token is already populated, or if there is an issue with your specified `secret` key.
def set_metadata(self, token, data): req = requests.post(self.meta_url("metadata/ocp/set/" + token), json=data, verify=False) if req.status_code != 200: raise RemoteDataUploadError( "Could not upload metadata: " + req.json()['message'] ) return req.json()
1,001,954
Initialize a new `RAMONNeuron`. Neurons take all of the arguments of `RAMONBase`. Arguments: segments (int[]: None): A list of RAMON IDs that make up the neuron
def __init__(self, segments=None, id=DEFAULT_ID, confidence=DEFAULT_CONFIDENCE, kvpairs=DEFAULT_DYNAMIC_METADATA, status=DEFAULT_STATUS, author=DEFAULT_AUTHOR): self.segments = segments RAMONBase.__init__(self, id=id, confidence=confidence, kvpairs=kvpairs, status=status, author=author)
1,001,979
Get a response object for a given url. Arguments: url (str): The url make a get to token (str): The authentication token Returns: obj: The response object
def get_url(self, url): try: req = requests.get(url, headers={ 'Authorization': 'Token {}'.format(self._user_token) }, verify=False) if req.status_code is 403: raise ValueError("Access Denied") else: return req except requests.exceptions.ConnectionError as e: if str(e) == '403 Client Error: Forbidden': raise ValueError('Access Denied') else: raise e
1,002,007
Returns a post resquest object taking in a url, user token, and possible json information. Arguments: url (str): The url to make post to token (str): The authentication token json (dict): json info to send Returns: obj: Post request object
def post_url(self, url, token='', json=None, data=None, headers=None): if (token == ''): token = self._user_token if headers: headers.update({'Authorization': 'Token {}'.format(token)}) else: headers = {'Authorization': 'Token {}'.format(token)} if json: return requests.post(url, headers=headers, json=json, verify=False) if data: return requests.post(url, headers=headers, data=data, verify=False) return requests.post(url, headers=headers, verify=False)
1,002,008
Returns a delete resquest object taking in a url and user token. Arguments: url (str): The url to make post to token (str): The authentication token Returns: obj: Delete request object
def delete_url(self, url, token=''): if (token == ''): token = self._user_token return requests.delete(url, headers={ 'Authorization': 'Token {}'.format(token)}, verify=False,)
1,002,009
Ping the server to make sure that you can access the base URL. Arguments: None Returns: `boolean` Successful access of server (or status code)
def ping(self, url, endpoint=''): r = self.get_url(url + "/" + endpoint) return r.status_code
1,002,010
Import a HDF5 file into a numpy array. Arguments: hdf5_filename: A string filename of a HDF5 datafile Returns: A numpy array with data from the HDF5 file
def load(hdf5_filename): # Expand filename to be absolute hdf5_filename = os.path.expanduser(hdf5_filename) try: f = h5py.File(hdf5_filename, "r") # neurodata stores data inside the 'cutout' h5 dataset data_layers = f.get('image').get('CUTOUT') except Exception as e: raise ValueError("Could not load file {0} for conversion. {}".format( hdf5_filename, e)) raise return numpy.array(data_layers)
1,002,115
Export a numpy array to a HDF5 file. Arguments: hdf5_filename (str): A filename to which to save the HDF5 data array (numpy.ndarray): The numpy array to save to HDF5 Returns: String. The expanded filename that now holds the HDF5 data
def save(hdf5_filename, array): # Expand filename to be absolute hdf5_filename = os.path.expanduser(hdf5_filename) try: h = h5py.File(hdf5_filename, "w") h.create_dataset('CUTOUT', data=array) h.close() except Exception as e: raise ValueError("Could not save HDF5 file {0}.".format(hdf5_filename)) return hdf5_filename
1,002,116
Combine noise with wd noise. Combines noise and white dwarf background noise based on greater amplitude value at each noise curve step. Args: f_n (float array): Frequencies of noise curve. amp_n (float array): Amplitude values of noise curve. f_n_wd (float array): Frequencies of wd noise. amp_n_wd (float array): Amplitude values of wd noise. Returns: (tuple of float arrays): Amplitude values of combined noise curve.
def combine_with_wd_noise(f_n, amp_n, f_n_wd, amp_n_wd): # interpolate wd noise amp_n_wd_interp = interpolate.interp1d(f_n_wd, amp_n_wd, bounds_error=False, fill_value=1e-30) # find points of wd noise amplitude at noise curve frequencies amp_n_wd = amp_n_wd_interp(f_n) # keep the greater value at each frequency amp_n = amp_n*(amp_n >= amp_n_wd) + amp_n_wd*(amp_n < amp_n_wd) return f_n, amp_n
1,002,120
List available sensitivity curves This function lists the available sensitivity curve strings in noise_curves folder. Args: return_curves (bool, optional): If True, return a list of curve options. print_curves (bool, optional): If True, print each curve option. Returns: (optional list of str): List of curve options. Raises: ValueError: Both args are False.
def show_available_noise_curves(return_curves=True, print_curves=False): if return_curves is False and print_curves is False: raise ValueError("Both return curves and print_curves are False." + " You will not see the options") cfd = os.path.dirname(os.path.abspath(__file__)) curves = [curve.split('.')[0] for curve in os.listdir(cfd + '/noise_curves/')] if print_curves: for f in curves: print(f) if return_curves: return curves return
1,002,121
Whether a given method exists in the known API. Arguments: method (:py:class:`str`): The name of the method. Returns: :py:class:`bool`: Whether the method is in the known API.
def method_exists(cls, method): methods = cls.API_METHODS for key in method.split('.'): methods = methods.get(key) if methods is None: break if isinstance(methods, str): logger.debug('%r: %r', method, methods) return True return False
1,002,245
Join the real-time messaging service. Arguments: filters (:py:class:`dict`, optional): Dictionary mapping message filters to the functions they should dispatch to. Use a :py:class:`collections.OrderedDict` if precedence is important; only one filter, the first match, will be applied to each message.
async def join_rtm(self, filters=None): if filters is None: filters = [cls(self) for cls in self.MESSAGE_FILTERS] url = await self._get_socket_url() logger.debug('Connecting to %r', url) async with ws_connect(url) as socket: first_msg = await socket.receive() self._validate_first_message(first_msg) self.socket = socket async for message in socket: if message.tp == MsgType.text: await self.handle_message(message, filters) elif message.tp in (MsgType.closed, MsgType.error): if not socket.closed: await socket.close() self.socket = None break logger.info('Left real-time messaging.')
1,002,398
Handle an incoming message appropriately. Arguments: message (:py:class:`aiohttp.websocket.Message`): The incoming message to handle. filters (:py:class:`list`): The filters to apply to incoming messages.
async def handle_message(self, message, filters): data = self._unpack_message(message) logger.debug(data) if data.get('type') == 'error': raise SlackApiError( data.get('error', {}).get('msg', str(data)) ) elif self.message_is_to_me(data): text = data['text'][len(self.address_as):].strip() if text == 'help': return self._respond( channel=data['channel'], text=self._instruction_list(filters), ) elif text == 'version': return self._respond( channel=data['channel'], text=self.VERSION, ) for _filter in filters: if _filter.matches(data): logger.debug('Response triggered') async for response in _filter: self._respond(channel=data['channel'], text=response)
1,002,399
Create a new instance from the API token. Arguments: token (:py:class:`str`, optional): The bot's API token (defaults to ``None``, which means looking in the environment). api_cls (:py:class:`type`, optional): The class to create as the ``api`` argument for API access (defaults to :py:class:`aslack.slack_api.SlackBotApi`). Returns: :py:class:`SlackBot`: The new instance.
async def from_api_token(cls, token=None, api_cls=SlackBotApi): api = api_cls.from_env() if token is None else api_cls(api_token=token) data = await api.execute_method(cls.API_AUTH_ENDPOINT) return cls(data['user_id'], data['user'], api)
1,002,401
Format an outgoing message for transmission. Note: Adds the message type (``'message'``) and incremental ID. Arguments: channel (:py:class:`str`): The channel to send to. text (:py:class:`str`): The message text to send. Returns: :py:class:`str`: The JSON string of the message.
def _format_message(self, channel, text): payload = {'type': 'message', 'id': next(self._msg_ids)} payload.update(channel=channel, text=text) return json.dumps(payload)
1,002,402
Respond to a message on the current socket. Args: channel (:py:class:`str`): The channel to send to. text (:py:class:`str`): The message text to send.
def _respond(self, channel, text): result = self._format_message(channel, text) if result is not None: logger.info( 'Sending message: %r', truncate(result, max_len=50), ) self.socket.send_str(result)
1,002,405
Check the first message matches the expected handshake. Note: The handshake is provided as :py:attr:`RTM_HANDSHAKE`. Arguments: msg (:py:class:`aiohttp.Message`): The message to validate. Raises: :py:class:`SlackApiError`: If the data doesn't match the expected handshake.
def _validate_first_message(cls, msg): data = cls._unpack_message(msg) logger.debug(data) if data != cls.RTM_HANDSHAKE: raise SlackApiError('Unexpected response: {!r}'.format(data)) logger.info('Joined real-time messaging.')
1,002,406
Rules for minifying output. Arguments: drop_semi Drop semicolons whenever possible. Note that if Dedent and OptionalNewline has a handler defined, it will stop final break statements from being resolved due to reliance on normalized resolution.
def minify(drop_semi=True): layout_handlers = { OpenBlock: layout_handler_openbrace, CloseBlock: layout_handler_closebrace, EndStatement: layout_handler_semicolon, Space: layout_handler_space_minimum, OptionalSpace: layout_handler_space_minimum, RequiredSpace: layout_handler_space_imply, (Space, OpenBlock): layout_handler_openbrace, (Space, EndStatement): layout_handler_semicolon, (OptionalSpace, EndStatement): layout_handler_semicolon, } if drop_semi: # if these are defined, they should be dropped; should really # provide these as a flag. # layout_handlers.update({ # OptionalNewline: None, # Dedent: None, # }) layout_handlers.update({ EndStatement: layout_handler_semicolon_optional, # these two rules rely on the normalized resolution (OptionalSpace, EndStatement): layout_handler_semicolon_optional, (EndStatement, CloseBlock): layout_handler_closebrace, # this is a fallback rule for when Dedent is defined by # some other rule, which won't neuter all optional # semicolons. (EndStatement, Dedent): rule_handler_noop, ((OptionalSpace, EndStatement), CloseBlock): layout_handler_closebrace, }) def minify_rule(): return { 'layout_handlers': layout_handlers, 'deferrable_handlers': { Literal: deferrable_handler_literal_continuation, }, } return minify_rule
1,002,548
A complete, standalone indent ruleset. Arguments: indent_str The string used for indentation. Defaults to None, which will defer the value used to the one provided by the Dispatcher.
def indent(indent_str=None): def indentation_rule(): inst = Indentator(indent_str) return {'layout_handlers': { OpenBlock: layout_handler_openbrace, CloseBlock: layout_handler_closebrace, EndStatement: layout_handler_semicolon, Space: layout_handler_space_imply, OptionalSpace: layout_handler_space_optional_pretty, RequiredSpace: layout_handler_space_imply, Indent: inst.layout_handler_indent, Dedent: inst.layout_handler_dedent, Newline: inst.layout_handler_newline, OptionalNewline: inst.layout_handler_newline_optional, (Space, OpenBlock): NotImplemented, (Space, EndStatement): layout_handler_semicolon, (OptionalSpace, EndStatement): layout_handler_semicolon, (Indent, Newline, Dedent): rule_handler_noop, }} return indentation_rule
1,002,549
Update model parameters and re-compute likelihoods. This method is the **only** acceptable way to update model parameters. The likelihood is re-computed as needed by this method. Args: `newvalues` (dict) A dictionary keyed by param name and with value as new value to set. Each parameter name must either be a valid model parameter (in `model.freeparams`).
def updateParams(self, newvalues): for (param, value) in newvalues.items(): if param not in self.model.freeparams: raise RuntimeError("Can't handle param: {0}".format( param)) if newvalues: self.model.updateParams(newvalues) self._updateInternals() self._paramsarray = None
1,002,674
Check if the context could be accepted by the oracle Args: context: s sequence same type as the oracle data Returns: bAccepted: whether the sequence is accepted or not _next: the state where the sequence is accepted
def accept(self, context): _next = 0 for _s in context: _data = [self.data[j] for j in self.trn[_next]] if _s in _data: _next = self.trn[_next][_data.index(_s)] else: return 0, _next return 1, _next
1,002,847
Private method for setting labels. Args: which (str): The indicator of which part of the plots to adjust. This currently handles `xlabel`/`ylabel`, and `title`. label (str): The label to be added. fontsize (int, optional): Fontsize for associated label. Default is None.
def _set_label(self, which, label, **kwargs): prop_default = { 'fontsize': 18, } for prop, default in prop_default.items(): kwargs[prop] = kwargs.get(prop, default) setattr(self.label, which, label) setattr(self.label, which + '_kwargs', kwargs) return
1,002,958