docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Fetches a single instance which has value `keyval` for the attribute `key`. Args: keyval: The value of the attribute. key (str, optional): The attribute to search by. By default, it is 'id'. Returns: A model instance if found. Else None. Examples: >>> User.get(35) [email protected] >>> User.get('[email protected]', key='email') [email protected]
def get(cls, keyval, key='id', user_id=None): if keyval is None: return None if (key in cls.__table__.columns and cls.__table__.columns[key].primary_key): # if user_id and hasattr(cls, 'user_id'): # return cls.query.filter_by(id=keyval, user_id=user_id).first() return cls.query.get(keyval) else: result = cls.query.filter( getattr(cls, key) == keyval) # if user_id and hasattr(cls, 'user_id'): # result = result.filter(cls.user_id == user_id) return result.first()
842,738
Initializes a new instance, adds it to the db and commits the transaction. Args: **kwargs: The keyword arguments for the init constructor. Examples: >>> user = User.create(name="Vicky", email="[email protected]") >>> user.id 35
def create(cls, **kwargs): try: return cls.add(cls.new(**kwargs)) except: cls.session.rollback() raise
842,740
Batch method for creating a list of instances Args: list_of_kwargs(list of dicts): hereA list of dicts where each dict denotes the keyword args that you would pass to the create method separately Examples: >>> Customer.create_all([ ... {'name': 'Vicky', 'age': 34, 'user_id': 1}, ... {'name': 'Ron', 'age': 40, 'user_id': 1, 'gender': 'Male'}])
def create_all(cls, list_of_kwargs): try: return cls.add_all([ cls.new(**kwargs) if kwargs is not None else None for kwargs in list_of_kwargs]) except: cls.session.rollback() raise
842,747
Similar to create. But the transaction is not committed Args: **kwargs : The keyword arguments for the constructor Returns: A model instance which has been added to db session. But session transaction has not been committed yet.
def build(cls, **kwargs): return cls.add(cls.new(**kwargs), commit=False)
842,750
Checks if an instance already exists in db with these kwargs else returns a new, saved instance of the service's model class. Args: **kwargs: instance parameters
def find_or_build(cls, **kwargs): keys = kwargs.pop('keys') if 'keys' in kwargs else [] return cls.first(**subdict(kwargs, keys)) or cls.build(**kwargs)
842,751
Batch method for updating all instances obeying the criterion Args: *criterion: SQLAlchemy query criterion for filtering what instances to update **kwargs: The parameters to be updated Examples: >>> User.update_all(active=True) >>> Customer.update_all(Customer.country=='India', active=True) The second example sets active=True for all customers with country India.
def update_all(cls, *criterion, **kwargs): try: r = cls.query.filter(*criterion).update(kwargs, 'fetch') cls.session.commit() return r except: cls.session.rollback() raise
842,756
Returns an updated instance of the service's model class. Args: model: the model to update **kwargs: update parameters
def get_and_update(cls, id, **kwargs): model = cls.get(id) for k, v in cls._preprocess_params(kwargs).items(): setattr(model, k, v) cls.session.commit() return model
842,757
Returns an updated instance of the service's model class. Args: model: the model to update **kwargs: update parameters
def get_and_setattr(cls, id, **kwargs): model = cls.get(id) for k, v in cls._preprocess_params(kwargs).items(): setattr(model, k, v) return model
842,758
Create new_browser_window_is_opened object. Args: selenium: (:py:class:`~selenium.webdriver.remote.webdriver.WebDriver`): Firefox WebDriver object. handles: (:obj:`list` of str): List of current Firefox window handles.
def __init__(self, selenium, handles): self.selenium = selenium self.handles = handles
842,847
Ask a question and get the input values. This method will validade the input values. Args: field_name(string): Field name used to ask for input value. pattern(tuple): Pattern to validate the input value. is_required(bool): Boolean value if the input value is required. password(bool): Boolean value to get input password with mask. Returns: input_value(string): Input value validated.
def ask_question(self, field_name, pattern=NAME_PATTERN, is_required=False, password=False): input_value = "" question = ("Insert the field using the pattern below:" "\n{}\n{}: ".format(pattern[0], field_name)) while not input_value: input_value = getpass(question) if password else input(question) if not (input_value or is_required): break if password: confirm_password = getpass('Confirm your password: ') if confirm_password != input_value: print("Password does not match") input_value = "" if not self.valid_attribute(input_value, pattern[1]): error_message = "The content must fit the pattern: {}\n" print(error_message.format(pattern[0])) input_value = "" return input_value
842,869
Create a BaseWindow object. Args: selenium: (:py:class:`~selenium.webdriver.remote.webdriver.WebDriver`): Firefox WebDriver object. handle: (str): WebDriver Firefox window handle.
def __init__(self, selenium, handle): self.selenium = selenium self.handle = handle self.wait = WebDriverWait(self.selenium, timeout=10)
842,872
Set info about NApp. Args: user (str): NApps Server username. napp (str): NApp name. version (str): NApp version.
def set_napp(self, user, napp, version=None): self.user = user self.napp = napp self.version = version or 'latest'
842,921
Get napp_dependencies from install NApp. Args: user(string) A Username. napp(string): A NApp name. Returns: napps(list): List with tuples with Username and NApp name. e.g. [('kytos'/'of_core'), ('kytos/of_l2ls')]
def dependencies(self, user=None, napp=None): napps = self._get_napp_key('napp_dependencies', user, napp) return [tuple(napp.split('/')) for napp in napps]
842,924
Return a value from kytos.json. Args: user (string): A Username. napp (string): A NApp name key (string): Key used to get the value within kytos.json. Returns: meta (object): Value stored in kytos.json.
def _get_napp_key(self, key, user=None, napp=None): if user is None: user = self.user if napp is None: napp = self.napp kytos_json = self._installed / user / napp / 'kytos.json' try: with kytos_json.open() as file_descriptor: meta = json.load(file_descriptor) return meta[key] except (FileNotFoundError, json.JSONDecodeError, KeyError): return ''
842,925
Search all server NApps matching pattern. Args: pattern (str): Python regular expression.
def search(pattern): def match(napp): # WARNING: This will change for future versions, when 'author' will # be removed. username = napp.get('username', napp.get('author')) strings = ['{}/{}'.format(username, napp.get('name')), napp.get('description')] + napp.get('tags') return any(pattern.match(string) for string in strings) napps = NAppsClient().get_napps() return [napp for napp in napps if match(napp)]
842,930
Return local NApp root folder. Search for kytos.json in _./_ folder and _./user/napp_. Args: root (pathlib.Path): Where to begin searching. Return: pathlib.Path: NApp root folder. Raises: FileNotFoundError: If there is no such local NApp.
def _get_local_folder(self, root=None): if root is None: root = Path() for folders in ['.'], [self.user, self.napp]: kytos_json = root / Path(*folders) / 'kytos.json' if kytos_json.exists(): with kytos_json.open() as file_descriptor: meta = json.load(file_descriptor) # WARNING: This will change in future versions, when # 'author' will be removed. username = meta.get('username', meta.get('author')) if username == self.user and meta.get('name') == self.napp: return kytos_json.parent raise FileNotFoundError('kytos.json not found.')
842,932
Create module folder with empty __init__.py if it doesn't exist. Args: folder (pathlib.Path): Module path.
def _check_module(folder): if not folder.exists(): folder.mkdir(parents=True, exist_ok=True, mode=0o755) (folder / '__init__.py').touch()
842,938
Build the .napp file to be sent to the napps server. Args: napp_identifier (str): Identifier formatted as <username>/<napp_name> Return: file_payload (binary): The binary representation of the napp package that will be POSTed to the napp server.
def build_napp_package(napp_name): ignored_extensions = ['.swp', '.pyc', '.napp'] ignored_dirs = ['__pycache__', '.git', '.tox'] files = os.listdir() for filename in files: if os.path.isfile(filename) and '.' in filename and \ filename.rsplit('.', 1)[1] in ignored_extensions: files.remove(filename) elif os.path.isdir(filename) and filename in ignored_dirs: files.remove(filename) # Create the '.napp' package napp_file = tarfile.open(napp_name + '.napp', 'x:xz') for local_f in files: napp_file.add(local_f) napp_file.close() # Get the binary payload of the package file_payload = open(napp_name + '.napp', 'rb') # remove the created package from the filesystem os.remove(napp_name + '.napp') return file_payload
842,939
Reload a NApp or all NApps. Args: napps (list): NApp list to be reloaded. Raises: requests.HTTPError: When there's a server error.
def reload(self, napps=None): client = NAppsClient(self._config) client.reload_napps(napps)
842,945
Create a Region object. Args: window (:py:class:`BaseWindow`): Window object this region appears in. root (:py:class:`~selenium.webdriver.remote.webelement.WebElement`): WebDriver element object that serves as the root for the region.
def __init__(self, window, root): self.root = root self.selenium = window.selenium self.wait = window.wait self.window = window
843,118
Remove the pairs identified by the given indices into _pairs. Removes the pair, and updates the _key_ids mapping to be accurate. Removing the ids from the _key_ids is your own responsibility. Params: ids_to_remove -- The indices to remove. MUST be sorted.
def _remove_pairs(self, ids_to_remove): # Remove them. for i in reversed(ids_to_remove): del self._pairs[i] # We use the bisect to tell us how many spots the given index is # shifting up in the list. for ids in self._key_ids.itervalues(): for i, id in enumerate(ids): ids[i] -= bisect(ids_to_remove, id)
843,279
Insert some new pairs, and keep the _key_ids updated. Params: ids_and_pairs -- A list of (index, (key, value)) tuples.
def _insert_pairs(self, ids_and_pairs): ids_to_insert = [x[0] for x in ids_and_pairs] # We use the bisect to tell us how many spots the given index is # shifting up in the list. for ids in self._key_ids.itervalues(): for i, id in enumerate(ids): ids[i] += bisect(ids_to_insert, id) # Do the actual insertion for i, pair in ids_and_pairs: self._pairs.insert(i, pair)
843,280
Wait for the specified notification to be displayed. Args: notification_class (:py:class:`BaseNotification`, optional): The notification class to wait for. If `None` is specified it will wait for any notification to be closed. Defaults to `BaseNotification`. Returns: :py:class:`BaseNotification`: Firefox notification.
def wait_for_notification(self, notification_class=BaseNotification): if notification_class: if notification_class is BaseNotification: message = "No notification was shown." else: message = "{0} was not shown.".format(notification_class.__name__) self.wait.until( lambda _: isinstance(self.notification, notification_class), message=message, ) return self.notification else: self.wait.until( lambda _: self.notification is None, message="Unexpected notification shown.", )
843,326
Open a new browser window. Args: private (bool): Optional parameter to open a private browsing window. Defaults to False. Returns: :py:class:`BrowserWindow`: Opened window.
def open_window(self, private=False): handles_before = self.selenium.window_handles self.switch_to() with self.selenium.context(self.selenium.CONTEXT_CHROME): # Opens private or non-private window self.selenium.find_element(*self._file_menu_button_locator).click() if private: self.selenium.find_element( *self._file_menu_private_window_locator ).click() else: self.selenium.find_element( *self._file_menu_new_window_button_locator ).click() return self.wait.until( expected.new_browser_window_is_opened(self.selenium, handles_before), message="No new browser window opened", )
843,328
Converts and instance to a dictionary with only the specified attributes as keys Args: *args (list): The attributes to serialize Examples: >>> customer = Customer.create(name="James Bond", email="[email protected]", phone="007", city="London") >>> customer.serialize_attrs('name', 'email') {'name': u'James Bond', 'email': u'[email protected]'}
def serialize_attrs(self, *args): # return dict([(a, getattr(self, a)) for a in args]) cls = type(self) result = {} # result = { # a: getattr(self, a) # for a in args # if hasattr(cls, a) and # a not in cls.attrs_forbidden_for_serialization() # } for a in args: if hasattr(cls, a) and a not in cls.attrs_forbidden_for_serialization(): val = getattr(self, a) if is_list_like(val): result[a] = list(val) else: result[a] = val return result
843,346
Convert a napp_id in tuple with username, napp name and version. Args: napp_id: String with the form 'username/napp[:version]' (version is optional). If no version is found, it will be None. Returns: tuple: A tuple with (username, napp, version) Raises: KytosException: If a NApp has not the form _username/name_.
def parse_napp(napp_id): # `napp_id` regex, composed by two mandatory parts (username, napp_name) # and one optional (version). # username and napp_name need to start with a letter, are composed of # letters, numbers and uderscores and must have at least three characters. # They are separated by a colon. # version is optional and can take any format. Is is separated by a hyphen, # if a version is defined. regex = r'([a-zA-Z][a-zA-Z0-9_]{2,})/([a-zA-Z][a-zA-Z0-9_]{2,}):?(.+)?' compiled_regex = re.compile(regex) matched = compiled_regex.fullmatch(napp_id) if not matched: msg = '"{}" NApp has not the form username/napp_name[:version].' raise KytosException(msg.format(napp_id)) return matched.groups()
843,352
Create FoxPuppet object. Args: selenium: (:py:class:`~selenium.webdriver.remote.webdriver.WebDriver`): Firefox WebDriver object.
def __init__(self, selenium): self.selenium = selenium self.window_manager = WindowManager(selenium) # Need to ensure the first window is a browser window self.browser = self.window_manager.windows[0]
843,520
Renders a string from a path template using the provided bindings. Args: bindings (dict): A dictionary of var names to binding strings. Returns: str: The rendered instantiation of this path template. Raises: ValidationError: If a key isn't provided or if a sub-template can't be parsed.
def render(self, bindings): out = [] binding = False for segment in self.segments: if segment.kind == _BINDING: if segment.literal not in bindings: raise ValidationException( ('rendering error: value for key \'{}\' ' 'not provided').format(segment.literal)) out.extend(PathTemplate(bindings[segment.literal]).segments) binding = True elif segment.kind == _END_BINDING: binding = False else: if binding: continue out.append(segment) path = _format(out) self.match(path) return path
843,579
Matches a fully qualified path template string. Args: path (str): A fully qualified path template string. Returns: dict: Var names to matched binding values. Raises: ValidationException: If path can't be matched to the template.
def match(self, path): this = self.segments that = path.split('/') current_var = None bindings = {} segment_count = self.segment_count j = 0 for i in range(0, len(this)): if j >= len(that): break if this[i].kind == _TERMINAL: if this[i].literal == '*': bindings[current_var] = that[j] j += 1 elif this[i].literal == '**': until = j + len(that) - segment_count + 1 segment_count += len(that) - segment_count bindings[current_var] = '/'.join(that[j:until]) j = until elif this[i].literal != that[j]: raise ValidationException( 'mismatched literal: \'%s\' != \'%s\'' % ( this[i].literal, that[j])) else: j += 1 elif this[i].kind == _BINDING: current_var = this[i].literal if j != len(that) or j != segment_count: raise ValidationException( 'match error: could not render from the path template: {}' .format(path)) return bindings
843,580
Returns a list of path template segments parsed from data. Args: data: A path template string. Returns: A list of _Segment.
def parse(self, data): self.binding_var_count = 0 self.segment_count = 0 segments = self.parser.parse(data) # Validation step: checks that there are no nested bindings. path_wildcard = False for segment in segments: if segment.kind == _TERMINAL and segment.literal == '**': if path_wildcard: raise ValidationException( 'validation error: path template cannot contain more ' 'than one path wildcard') path_wildcard = True return segments
843,582
Create a notification object. Args: window (:py:class:`BrowserWindow`): Window object this region appears in. root (:py:class:`~selenium.webdriver.remote.webelement.WebElement`): WebDriver element object that serves as the root for the notification. Returns: :py:class:`BaseNotification`: Firefox notification.
def create(window, root): notifications = {} _id = root.get_property("id") from foxpuppet.windows.browser.notifications import addons notifications.update(addons.NOTIFICATIONS) return notifications.get(_id, BaseNotification)(window, root)
843,597
Instantiate an OpenAPI object. Args: napp_path (string): Napp directory tlp_path (string): File name from template
def __init__(self, napp_path, tpl_path): self._napp_path = napp_path self._template = tpl_path / 'openapi.yml.template' self._api_file = napp_path / 'openapi.yml' metadata = napp_path / 'kytos.json' self._napp = NApp.create_from_json(metadata) # Data for a path self._summary = None self._description = None # Part of template context self._paths = {}
843,671
Gets to a certain marker position in the BED file. Args: n (int): The index of the marker to seek to.
def seek(self, n): if self._mode != "r": raise UnsupportedOperation("not available in 'w' mode") if 0 <= n < self._nb_markers: self._n = n self._bed.seek(self._get_seek_position(n)) else: # Invalid seek value raise ValueError("invalid position in BED: {}".format(n))
843,806
Iterates over genotypes for a list of markers. Args: markers (list): The list of markers to iterate onto. return_index (bool): Wether to return the marker's index or not. Returns: tuple: The name of the marker as a string, and its genotypes as a :py:class:`numpy.ndarray` (additive format).
def iter_geno_marker(self, markers, return_index=False): if self._mode != "r": raise UnsupportedOperation("not available in 'w' mode") # If string, we change to list if isinstance(markers, str): markers = [markers] # Iterating over all markers if return_index: for marker in markers: geno, seek = self.get_geno_marker(marker, return_index=True) yield marker, geno, seek else: for marker in markers: yield marker, self.get_geno_marker(marker)
843,812
Iterates over genotypes for a list of markers (ACGT format). Args: markers (list): The list of markers to iterate onto. Returns: tuple: The name of the marker as a string, and its genotypes as a :py:class:`numpy.ndarray` (ACGT format).
def iter_acgt_geno_marker(self, markers): # We iterate over the markers for snp, geno, s in self.iter_geno_marker(markers, return_index=True): # Getting the SNP position and converting to ACGT yield snp, self._allele_encoding[s][geno]
843,813
Gets the genotypes for a given marker. Args: marker (str): The name of the marker. return_index (bool): Wether to return the marker's index or not. Returns: numpy.ndarray: The genotypes of the marker (additive format).
def get_geno_marker(self, marker, return_index=False): if self._mode != "r": raise UnsupportedOperation("not available in 'w' mode") # Check if the marker exists if marker not in self._bim.index: raise ValueError("{}: marker not in BIM".format(marker)) # Seeking to the correct position seek_index = self._bim.loc[marker, "i"] self.seek(seek_index) if return_index: return self._read_current_marker(), seek_index return self._read_current_marker()
843,814
Gets the genotypes for a given marker (ACGT format). Args: marker (str): The name of the marker. Returns: numpy.ndarray: The genotypes of the marker (ACGT format).
def get_acgt_geno_marker(self, marker): # Getting the marker's genotypes geno, snp_position = self.get_geno_marker(marker, return_index=True) # Returning the ACGT's format of the genotypes return self._allele_encoding[snp_position][geno]
843,815
Write genotypes to binary file. Args: genotypes (numpy.ndarray): The genotypes to write in the BED file.
def write_genotypes(self, genotypes): if self._mode != "w": raise UnsupportedOperation("not available in 'r' mode") # Initializing the number of samples if required if self._nb_values is None: self._nb_values = len(genotypes) # Checking the expected number of samples if self._nb_values != len(genotypes): raise ValueError("{:,d} samples expected, got {:,d}".format( self._nb_values, len(genotypes), )) # Writing to file byte_array = [ g[0] | (g[1] << 2) | (g[2] << 4) | (g[3] << 6) for g in self._grouper((_byte_recode[geno] for geno in genotypes), 4) ] self._bed.write(bytearray(byte_array))
843,816
Collect data into fixed-length chunks or blocks. Args: n (int): The size of the chunk. fillvalue (int): The fill value. Returns: iterator: An iterator over the chunks.
def _grouper(iterable, n, fillvalue=0): args = [iter(iterable)] * n return zip_longest(fillvalue=fillvalue, *args)
843,817
Retrieve the raster in the GRASS ASCII Grid format. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database. Returns: str: GRASS ASCII string.
def getAsGrassAsciiGrid(self, session): if type(self.raster) != type(None): # Make sure the raster field is valid converter = RasterConverter(sqlAlchemyEngineOrSession=session) return converter.getAsGrassAsciiRaster(tableName=self.tableName, rasterIdFieldName='id', rasterId=self.id, rasterFieldName=self.rasterColumnName)
843,909
Bulk ``register_item``. Args: items (iterable[Tree]): Sequence of nodes to be registered as children.
def register_items(self, items): for item in items: item.set_parent(self) self.items.extend(items)
844,032
Fetch database for items matching. Args: page (int): which page will be sliced slice size is ``self.per_page``. order_by (str): a field name to order query by. filters (dict): a ``filter name``: ``value`` dict. Returns: tuple with: items, sliced by page*self.per_page total items without slice
def get_items(self, page=1, order_by=None, filters=None): start = (page-1)*self.per_page query = self.get_query() if order_by is not None: query = query.order_by(self._get_field(order_by)) if filters is not None: query = self._filter(query, filters) return query.offset(start).limit(self.per_page), self.count(query)
844,296
Retrieve the geometry in KML format. This method is a veneer for an SQL query that calls the ``ST_AsKml()`` function on the geometry column. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database. Returns: str: KML string representation of geometry.
def getAsKml(self, session): statement = .format(self.geometryColumnName, self.tableName, self.id) result = session.execute(statement) for row in result: return row.kml
844,489
Retrieve the geometry in Well Known Text format. This method is a veneer for an SQL query that calls the ``ST_AsText()`` function on the geometry column. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database. Returns: str: Well Known Text string representation of geometry.
def getAsWkt(self, session): statement = .format(self.geometryColumnName, self.tableName, self.id) result = session.execute(statement) for row in result: return row.wkt
844,490
Retrieve the geometry in GeoJSON format. This method is a veneer for an SQL query that calls the ``ST_AsGeoJSON()`` function on the geometry column. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database. Returns: str: GeoJSON string representation of geometry.
def getAsGeoJson(self, session): statement = .format(self.geometryColumnName, self.tableName, self.id) result = session.execute(statement) for row in result: return row.json
844,491
Retrieve the spatial reference id by which the geometry column is registered. This method is a veneer for an SQL query that calls the ``ST_SRID()`` function on the geometry column. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database. Returns: str: PostGIS spatial reference ID.
def getSpatialReferenceId(self, session): statement = .format(self.geometryColumnName, self.tableName, self.id) result = session.execute(statement) for row in result: return row.srid
844,492
Append directory to relative paths in project file. By default, the project file paths are read and written as relative paths. Use this method to prepend a directory to all the paths in the project file. Args: directory (str): Directory path to prepend to file paths in project file. projectFilePath (str): Path to project file that will be modified.
def appendDirectory(self, directory, projectFilePath): lines = [] with open(projectFilePath, 'r') as original: for l in original: lines.append(l) with open(projectFilePath, 'w') as new: for line in lines: card = {} try: card = self._extractCard(line) except: card = self._extractDirectoryCard(line) # Determine number of spaces between card and value for nice alignment numSpaces = max(2, 25 - len(card['name'])) if card['value'] is None: rewriteLine = '%s\n' % (card['name']) else: if card['name'] == 'WMS': rewriteLine = '%s %s\n' % (card['name'], card['value']) elif card['name'] == 'PROJECT_PATH': filePath = '"%s"' % os.path.normpath(directory) rewriteLine = '%s%s%s\n' % (card['name'], ' ' * numSpaces, filePath) elif '"' in card['value']: filename = card['value'].strip('"') filePath = '"%s"' % os.path.join(directory, filename) rewriteLine = '%s%s%s\n' % (card['name'], ' ' * numSpaces, filePath) else: rewriteLine = '%s%s%s\n' % (card['name'], ' ' * numSpaces, card['value']) new.write(rewriteLine)
844,634
Retrieve card object for given card name. Args: name (str): Name of card to be retrieved. Returns: :class:`.ProjectCard` or None: Project card object. Will return None if the card is not available.
def getCard(self, name): cards = self.projectCards for card in cards: if card.name.upper() == name.upper(): return card return None
844,646
Adds/updates card for gssha project file Args: name (str): Name of card to be updated/added. value (str): Value to attach to the card. add_quotes (Optional[bool]): If True, will add quotes around string. Default is False.
def setCard(self, name, value, add_quotes=False): gssha_card = self.getCard(name) if add_quotes: value = '"{0}"'.format(value) if gssha_card is None: # add new card new_card = ProjectCard(name=name, value=value) new_card.projectFile = self else: gssha_card.value = value
844,647
Retrieve a Well Known Text representation of the model. Includes polygonized mask map and vector stream network. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database withStreamNetwork (bool, optional): Include stream network. Defaults to True. withNodes (bool, optional): Include nodes. Defaults to False. Returns: str: Well Known Text string
def getModelSummaryAsWkt(self, session, withStreamNetwork=True, withNodes=False): # Get mask map watershedMaskCard = self.getCard('WATERSHED_MASK') maskFilename = watershedMaskCard.value maskExtension = maskFilename.strip('"').split('.')[1] maskMap = session.query(RasterMapFile).\ filter(RasterMapFile.projectFile == self).\ filter(RasterMapFile.fileExtension == maskExtension).\ one() # Get mask map as a KML polygon statement = .format('raster', maskMap.tableName, maskMap.id) result = session.execute(statement) maskMapTextPolygon = '' for row in result: maskMapTextPolygon = row.polygon # Default WKT model representation string is a geometry collection with the mask map polygon wktString = 'GEOMCOLLECTION ({0})'.format(maskMapTextPolygon) if withStreamNetwork: # Get the channel input file for the stream network channelInputFile = self.channelInputFile # Some models may not have streams enabled if channelInputFile is not None: # Use the existing method on the channel input file to generate the stream network WKT wktStreamNetwork = channelInputFile.getStreamNetworkAsWkt(session=session, withNodes=withNodes) # Strip off the "GEOMCOLLECTION" identifier wktStreamNetwork = wktStreamNetwork.replace('GEOMCOLLECTION (', '') # Replace the WKT model representation string with a geometry collection with mask map # and all stream network components wktString = 'GEOMCOLLECTION ({0}, {1}'.format(maskMapTextPolygon, wktStreamNetwork) return wktString
844,650
Retrieve a GeoJSON representation of the model. Includes vectorized mask map and stream network. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database withStreamNetwork (bool, optional): Include stream network. Defaults to True. withNodes (bool, optional): Include nodes. Defaults to False. Returns: str: GeoJSON string
def getModelSummaryAsGeoJson(self, session, withStreamNetwork=True, withNodes=False): # Get mask map watershedMaskCard = self.getCard('WATERSHED_MASK') maskFilename = watershedMaskCard.value maskExtension = maskFilename.strip('"').split('.')[1] maskMap = session.query(RasterMapFile).\ filter(RasterMapFile.projectFile == self).\ filter(RasterMapFile.fileExtension == maskExtension).\ one() # Get mask map as a KML polygon statement = .format('raster', maskMap.tableName, maskMap.id) result = session.execute(statement) maskMapJsonPolygon = '' for row in result: maskMapJsonPolygon = row.polygon jsonString = maskMapJsonPolygon if withStreamNetwork: # Get the channel input file for the stream network channelInputFile = self.channelInputFile if channelInputFile is not None: # Use the existing method on the channel input file to generate the stream network GeoJson jsonStreamNetwork = channelInputFile.getStreamNetworkAsGeoJson(session=session, withNodes=withNodes) # Convert to json Python objects featureCollection = json.loads(jsonStreamNetwork) jsonMaskMapObjects = json.loads(maskMapJsonPolygon) # Create a mask feature maskFeature = {"type": "Feature", "geometry": jsonMaskMapObjects, "properties": {}, "id": maskMap.id} # Add mask map to feature collection tempFeatures = featureCollection['features'] tempFeatures.append(maskFeature) featureCollection['features'] = tempFeatures # Dump to string jsonString = json.dumps(featureCollection) return jsonString
844,651
Sets the outlet grid cell information in the project file. Parameters: col(float): 1-based column index. row(float): 1-based row index. outslope(Optional[float]): River slope at outlet.
def setOutlet(self, col, row, outslope=None): #OUTROW, OUTCOL, OUTSLOPE gssha_grid = self.getGrid() # col, row = gssha_grid.lonlat2pixel(longitude, latitude) # add 1 to row & col becasue GSSHA is 1-based self.setCard(name='OUTROW', value=str(row)) self.setCard(name='OUTCOL', value=str(col)) if outslope is None: self.calculateOutletSlope() else: self.setCard(name='OUTSLOPE', value=str(outslope))
844,657
Write project card to string. Args: originalPrefix (str): Original name to give to files that follow the project naming convention (e.g: prefix.gag). newPrefix (str, optional): If new prefix is desired, pass in this parameter. Defaults to None. Returns: str: Card and value as they would be written to the project file.
def write(self, originalPrefix, newPrefix=None): # Determine number of spaces between card and value for nice alignment numSpaces = max(2, 25 - len(self.name)) # Handle special case of booleans if self.value is None: line = '%s\n' % self.name else: if self.name == 'WMS': line = '%s %s\n' % (self.name, self.value) elif newPrefix is None: line = '%s%s%s\n' % (self.name, ' ' * numSpaces, self.value) elif originalPrefix in self.value: line = '%s%s%s\n' % (self.name, ' ' * numSpaces, self.value.replace(originalPrefix, newPrefix)) else: line = '%s%s%s\n' % (self.name, ' ' * numSpaces, self.value) return line
844,679
Look up spatial reference system using the projection file. Args: directory (str): filename (str): Return: int: Spatial Reference ID
def lookupSpatialReferenceID(cls, directory, filename): path = os.path.join(directory, filename) with open(path, 'r') as f: srid = lookupSpatialReferenceID(f.read()) return srid
844,853
Log events to the console. Args: status (bool, Optional, Default=True) whether logging to console should be turned on(True) or off(False) level (string, Optional, Default=None) : level of logging; whichever level is chosen all higher levels will be logged. See: https://docs.python.org/2/library/logging.html#levels
def log_to_console(status=True, level=None): if status: if level is not None: logger.setLevel(level) console_handler = logging.StreamHandler() # create formatter formatter = logging.Formatter('%(levelname)s-%(name)s: %(message)s') # add formatter to handler console_handler.setFormatter(formatter) logger.addHandler(console_handler) logger.info("GSSHApy {0}".format(version())) else: for h in logger.handlers: if type(h).__name__ == 'StreamHandler': logger.removeHandler(h)
844,856
Log events to a file. Args: status (bool, Optional, Default=True) whether logging to file should be turned on(True) or off(False) filename (string, Optional, Default=None) : path of file to log to level (string, Optional, Default=None) : level of logging; whichever level is chosen all higher levels will be logged. See: https://docs.python.org/2/library/logging.html#levels
def log_to_file(status=True, filename=default_log_file, level=None): if status: if level is not None: logger.setLevel(level) try: os.mkdir(os.path.dirname(filename)) except OSError: pass file_handler = logging.FileHandler(filename) # create formatter formatter = logging.Formatter('%(asctime)s - %(levelname)s-%(name)s: %(message)s') # add formatter to handler file_handler.setFormatter(formatter) logger.addHandler(file_handler) logger.info("GSSHApy {0}".format(version())) else: for h in logger.handlers: if type(h).__name__ == 'FileHandler': logger.removeHandler(h)
844,857
Apply global pre-processing to values during reading throughout the project. Args: valueString (str): String representing the value to be preprocessed. replaceParamsFile (gsshapy.orm.ReplaceParamFile, optional): Instance of the replace param file. Required if replacement variables are included in the project. Returns: str: Processed value as a string
def valueReadPreprocessor(valueString, replaceParamsFile=None): if type(valueString) is bool: log.warning("Only numerical variable types can be handled by the valueReadPreprocessor function.") return valueString # Default processedValue = valueString # Check for replacement variables if replaceParamsFile is not None and valueString is not None: if '[' in valueString or ']' in valueString: # Set default value processedValue = '{0}'.format(REPLACE_NO_VALUE) # Find the matching parameter and return the negative of the id for targetParam in replaceParamsFile.targetParameters: if targetParam.targetVariable == valueString: processedValue = '{0}'.format(-1 * targetParam.id) break return processedValue
844,943
Look up variable name in replace param file for the negative id given and return it. Args: valueString (str): String representing the value to be preprocessed. replaceParamsFile (gsshapy.orm.ReplaceParamFile, optional): Instance of the replace param file. Required if replacement variables are included in the project. Returns: str: Processed value as a string
def valueWritePreprocessor(valueString, replaceParamsFile=None): if type(valueString) is bool: log.warning("Only numerical variable types can be handled by the valueReadPreprocessor function.") return valueString # Default variableString = valueString # Check for replacement variables if replaceParamsFile is not None: # Set Default if variableString == REPLACE_NO_VALUE: variableString = '[NO_VARIABLE]' else: try: number = int(valueString) if number < 0: parameterID = number * -1 # Find the matching parameter for targetParam in replaceParamsFile.targetParameters: if targetParam.id == parameterID: variableString = targetParam.targetVariable break except: pass return variableString
844,944
Executes all the tests for pyplink. Args: verbosity (int): The verbosity level for :py:mod:`unittest`. Just set ``verbosity`` to an integer higher than ``1`` to have more information about the tests.
def test(verbosity=1): # pragma: no cover import unittest from .tests import test_suite # Testing unittest.TextTestRunner(verbosity=verbosity).run(test_suite)
845,187
Retrieve the links in the order of the link number. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database. Returns: list: A list of :class:`.StreamLink` objects.
def getOrderedLinks(self, session): streamLinks = session.query(StreamLink).\ filter(StreamLink.channelInputFile == self).\ order_by(StreamLink.linkNumber).\ all() return streamLinks
845,345
Retrieve the stream network geometry in Well Known Text format. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database withNodes (bool, optional): Include nodes. Defaults to False. Returns: str: Well Known Text string.
def getStreamNetworkAsWkt(self, session, withNodes=True): wkt_list = [] for link in self.streamLinks: wkt_link = link.getAsWkt(session) if wkt_link: wkt_list.append(wkt_link) if withNodes: for node in link.nodes: wkt_node = node.getAsWkt(session) if wkt_node: wkt_list.append(wkt_node) return 'GEOMCOLLECTION ({0})'.format(', '.join(wkt_list))
845,347
Retrieve the stream network geometry in GeoJSON format. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database withNodes (bool, optional): Include nodes. Defaults to False. Returns: str: GeoJSON string.
def getStreamNetworkAsGeoJson(self, session, withNodes=True): features_list = [] # Assemble link features for link in self.streamLinks: link_geoJson = link.getAsGeoJson(session) if link_geoJson: link_geometry = json.loads(link.getAsGeoJson(session)) link_properties = {"link_number": link.linkNumber, "type": link.type, "num_elements": link.numElements, "dx": link.dx, "erode": link.erode, "subsurface": link.subsurface} link_feature = {"type": "Feature", "geometry": link_geometry, "properties": link_properties, "id": link.id} features_list.append(link_feature) # Assemble node features if withNodes: for node in link.nodes: node_geoJson = node.getAsGeoJson(session) if node_geoJson: node_geometry = json.loads(node_geoJson) node_properties = {"link_number": link.linkNumber, "node_number": node.nodeNumber, "elevation": node.elevation} node_feature = {"type": "Feature", "geometry": node_geometry, "properties": node_properties, "id": node.id} features_list.append(node_feature) feature_collection = {"type": "FeatureCollection", "features": features_list} return json.dumps(feature_collection)
845,348
A Basic View with template. Args: view_name (str): The name of the view, used to create a custom template name. success_url (str): The url returned by ``post`` if form is valid.
def __init__(self, view_name, success_url=None): if success_url is not None: self.success_url = success_url self.view_name = view_name
845,455
A simple landing view, template may be overwriten to customize. Args: parent (Group): ``Group`` host of ``self``.
def __init__(self, parent, *args, **kwargs): self.parent = parent super().__init__(*args, **kwargs)
845,457
Concat Urls Args: *args: (str) Returns: str: urls starting and ending with / merged with /
def concat_urls(*urls): normalized_urls = filter(bool, [url.strip('/') for url in urls]) joined_urls = '/'.join(normalized_urls) if not joined_urls: return '/' return '/{}/'.format(joined_urls)
845,468
Add the genes for a variant Get the hgnc symbols from all transcripts and add them to the variant. Args: variant (dict): A variant dictionary Returns: genes (list): A list of Genes
def _get_genes(self, variant): transcripts = variant['transcripts'] ensembl_ids = [transcript['ensembl_id'] for transcript in transcripts if transcript['ensembl_id']] hgnc_symbols = [transcript['hgnc_symbol'] for transcript in transcripts if transcript['hgnc_symbol']] genes = get_gene_info(ensembl_ids, hgnc_symbols) return genes
845,511
Authenticate the gmusicapi Mobileclient instance. Parameters: username (Optional[str]): Your Google Music username. Will be prompted if not given. password (Optional[str]): Your Google Music password. Will be prompted if not given. android_id (Optional[str]): The 16 hex digits from an Android device ID. Default: Use gmusicapi.Mobileclient.FROM_MAC_ADDRESS to create ID from computer's MAC address. Returns: ``True`` on successful login or ``False`` on unsuccessful login.
def login(self, username=None, password=None, android_id=None): cls_name = type(self).__name__ if username is None: username = input("Enter your Google username or email address: ") if password is None: password = getpass.getpass("Enter your Google Music password: ") if android_id is None: android_id = Mobileclient.FROM_MAC_ADDRESS try: self.api.login(username, password, android_id) except OSError: logger.exception("{} authentication failed.".format(cls_name)) if not self.is_authenticated: logger.warning("{} authentication failed.".format(cls_name)) return False logger.info("{} authentication succeeded.\n".format(cls_name)) return True
845,519
Get playlist information of a user-generated Google Music playlist. Parameters: playlist (str): Name or ID of Google Music playlist. Names are case-sensitive. Google allows multiple playlists with the same name. If multiple playlists have the same name, the first one encountered is used. Returns: dict: The playlist dict as returned by Mobileclient.get_all_user_playlist_contents.
def get_google_playlist(self, playlist): logger.info("Loading playlist {0}".format(playlist)) for google_playlist in self.api.get_all_user_playlist_contents(): if google_playlist['name'] == playlist or google_playlist['id'] == playlist: return google_playlist else: logger.warning("Playlist {0} does not exist.".format(playlist)) return {}
845,521
Load a case with individuals. Args: case_obj (puzzle.models.Case): initialized case model
def add_case(self, case_obj, vtype='snv', mode='vcf', ped_svg=None): new_case = Case(case_id=case_obj.case_id, name=case_obj.name, variant_source=case_obj.variant_source, variant_type=vtype, variant_mode=mode, pedigree=ped_svg, compressed=case_obj.compressed, tabix_index=case_obj.tabix_index) # build individuals inds = [Individual( ind_id=ind.ind_id, name=ind.name, mother=ind.mother, father=ind.father, sex=ind.sex, phenotype=ind.phenotype, ind_index=ind.ind_index, variant_source=ind.variant_source, bam_path=ind.bam_path, ) for ind in case_obj.individuals] new_case.individuals = inds if self.case(new_case.case_id): logger.warning("Case already exists in database!") else: self.session.add(new_case) self.save() return new_case
845,531
Delete a case from the database Args: case_obj (puzzle.models.Case): initialized case model
def delete_case(self, case_obj): for ind_obj in case_obj.individuals: self.delete_individual(ind_obj) logger.info("Deleting case {0} from database".format(case_obj.case_id)) self.session.delete(case_obj) self.save() return case_obj
845,532
Delete a case from the database Args: ind_obj (puzzle.models.Individual): initialized individual model
def delete_individual(self, ind_obj): logger.info("Deleting individual {0} from database" .format(ind_obj.ind_id)) self.session.delete(ind_obj) self.save() return ind_obj
845,533
Add the thousand genomes frequency Args: variant_obj (puzzle.models.Variant) info_dict (dict): A info dictionary
def _add_thousand_g(self, variant_obj, info_dict): thousand_g = info_dict.get('1000GAF') if thousand_g: logger.debug("Updating thousand_g to: {0}".format( thousand_g)) variant_obj.thousand_g = float(thousand_g) variant_obj.add_frequency('1000GAF', variant_obj.get('thousand_g'))
845,629
Add the gmaf frequency Args: variant_obj (puzzle.models.Variant) info_dict (dict): A info dictionary
def _add_gmaf(self, variant_obj, info_dict): ##TODO search for max freq in info dict for transcript in variant_obj.transcripts: gmaf_raw = transcript.GMAF if gmaf_raw: gmaf = float(gmaf_raw.split(':')[-1]) variant_obj.add_frequency('GMAF', gmaf) if not variant_obj.thousand_g: variant_obj.thousand_g = gmaf
845,630
Add the gmaf frequency Args: variant_obj (puzzle.models.Variant) info_dict (dict): A info dictionary
def _add_exac(self, variant_obj, info_dict): exac = None exac_keys = ['ExAC', 'EXAC', 'ExACAF', 'EXACAF'] for key in exac_keys: if info_dict.get(key): exac = float(info_dict[key]) #If not found in vcf search transcripts if not exac: for transcript in variant_obj.transcripts: exac_raw = transcript.ExAC_MAF if exac_raw: exac = float(exac_raw.split(':')[-1]) if exac: variant_obj.add_frequency('ExAC', exac)
845,631
Add the genotypes for a variant for all individuals Args: variant_obj (puzzle.models.Variant) gemini_variant (GeminiQueryRow): The gemini variant case_id (str): related case id individual_objs (list(dict)): A list of Individuals
def _add_genotypes(self, variant_obj, gemini_variant, case_id, individual_objs): for ind in individual_objs: index = ind.ind_index variant_obj.add_individual(Genotype( sample_id=ind.ind_id, genotype=gemini_variant['gts'][index], case_id=case_id, phenotype=ind.phenotype, ref_depth=gemini_variant['gt_ref_depths'][index], alt_depth=gemini_variant['gt_alt_depths'][index], depth=gemini_variant['gt_depths'][index], genotype_quality=gemini_variant['gt_quals'][index] ))
845,641
Recognizes and claims MuTect VCFs form the set of all input VCFs. Each defined caller has a chance to evaluate and claim all the incoming files as something that it can process. Args: file_readers: the collection of currently unclaimed files Returns: A tuple of unclaimed readers and MuTectVcfReaders.
def claim(self, file_readers): unclaimed_readers = [] vcf_readers = [] for file_reader in file_readers: if self._is_mutect_vcf(file_reader): vcf_reader = vcf.VcfReader(file_reader) vcf_readers.append(_MutectVcfReader(vcf_reader)) else: unclaimed_readers.append(file_reader) return (unclaimed_readers, vcf_readers)
845,658
Add the gmaf frequency Args: variant_obj (puzzle.models.Variant) gemini_variant (GeminiQueryRow)
def _add_gmaf(self, variant_obj, gemini_variant): max_af = gemini_variant['max_aaf_all'] if max_af: max_af = float(max_af) if max_af != -1.0: variant_obj.set_max_freq(max_af)
845,708
Add the gmaf frequency Args: variant_obj (puzzle.models.Variant) gemini_variant (GeminiQueryRow)
def _add_exac(self, variant_obj, gemini_variant): exac = gemini_variant['aaf_exac_all'] if exac: exac = float(exac) variant_obj.add_frequency('ExAC', exac) logger.debug("Updating ExAC to: {0}".format( exac))
845,709
Return the genes info based on the transcripts found Args: ensembl_ids (Optional[list]): list of Ensembl gene ids hgnc_symbols (Optional[list]): list of HGNC gene symbols Returns: iterable: an iterable with `Gene` objects
def get_gene_info(ensembl_ids=None, hgnc_symbols=None): uniq_ensembl_ids = set(ensembl_id for ensembl_id in (ensembl_ids or [])) uniq_hgnc_symbols = set(hgnc_symbol for hgnc_symbol in (hgnc_symbols or [])) genes = [] gene_data = [] if uniq_ensembl_ids: for ensembl_id in uniq_ensembl_ids: for res in query_gene(ensembl_id=ensembl_id): gene_data.append(res) elif uniq_hgnc_symbols: for hgnc_symbol in uniq_hgnc_symbols: query_res = query_gene(hgnc_symbol=hgnc_symbol) if query_res: for res in query_res: gene_data.append(res) else: # If no result we add just the symbol gene_data.append({ 'hgnc_symbol': hgnc_symbol, 'hgnc_id': None, 'ensembl_id': None, 'description': None, 'chrom': 'unknown', 'start': 0, 'stop': 0, 'hi_score': None, 'constraint_score': None, }) for gene in gene_data: genes.append(Gene( symbol=gene ['hgnc_symbol'], hgnc_id=gene['hgnc_id'], ensembl_id=gene['ensembl_id'], description=gene['description'], chrom=gene['chrom'], start=gene['start'], stop=gene['stop'], location=get_cytoband_coord(gene['chrom'], gene['start']), hi_score=gene['hi_score'], constraint_score=gene['constraint_score'], omim_number=get_omim_number(gene['hgnc_symbol']) )) return genes
845,712
Get the most severe consequence Go through all transcripts and get the most severe consequence Args: transcripts (list): A list of transcripts to evaluate Returns: most_severe_consequence (str): The most severe consequence
def get_most_severe_consequence(transcripts): most_severe_consequence = None most_severe_score = None for transcript in transcripts: for consequence in transcript['consequence'].split('&'): logger.debug("Checking severity score for consequence: {0}".format( consequence )) severity_score = SEVERITY_DICT.get( consequence ) logger.debug("Severity score found: {0}".format( severity_score )) if severity_score != None: if most_severe_score: if severity_score < most_severe_score: most_severe_consequence = consequence most_severe_score = severity_score else: most_severe_consequence = consequence most_severe_score = severity_score return most_severe_consequence
845,713
Get the cytoband coordinate for a position Args: chrom(str): A chromosome pos(int): The position Returns: cytoband
def get_cytoband_coord(chrom, pos): chrom = chrom.strip('chr') pos = int(pos) result = None logger.debug("Finding Cytoband for chrom:{0} pos:{1}".format(chrom, pos)) if chrom in CYTOBANDS: for interval in CYTOBANDS[chrom][pos]: result = "{0}{1}".format(chrom, interval.data) return result
845,714
Create a cases and populate it with individuals Args: variant_source (str): Path to vcf files case_lines (Iterable): Ped like lines case_type (str): Format of case lines Returns: case_objs (list(puzzle.models.Case))
def get_cases(variant_source, case_lines=None, case_type='ped', variant_type='snv', variant_mode='vcf'): individuals = get_individuals( variant_source=variant_source, case_lines=case_lines, case_type=case_type, variant_mode=variant_mode ) case_objs = [] case_ids = set() compressed = False tabix_index = False #If no individuals we still need to have a case id if variant_source.endswith('.gz'): logger.debug("Found compressed variant source") compressed = True tabix_file = '.'.join([variant_source, 'tbi']) if os.path.exists(tabix_file): logger.debug("Found index file") tabix_index = True if len(individuals) > 0: for individual in individuals: case_ids.add(individual.case_id) else: case_ids = [os.path.basename(variant_source)] for case_id in case_ids: logger.info("Found case {0}".format(case_id)) case = Case( case_id=case_id, name=case_id, variant_source=variant_source, variant_type=variant_type, variant_mode=variant_mode, compressed=compressed, tabix_index=tabix_index ) # Add the individuals to the correct case for individual in individuals: if individual.case_id == case_id: logger.info("Adding ind {0} to case {1}".format( individual.name, individual.case_id )) case.add_individual(individual) case_objs.append(case) return case_objs
845,743
Get the individuals from a vcf file, gemini database, and/or a ped file. Args: variant_source (str): Path to a variant source case_lines(Iterable): Ped like lines case_type(str): Format of ped lines Returns: individuals (generator): generator with Individuals
def get_individuals(variant_source, case_lines=None, case_type='ped', variant_mode='vcf'): individuals = [] ind_dict ={} if variant_mode == 'vcf': head = get_header(variant_source) #Dictionary with ind_id:index where index show where in vcf ind info is for index, ind in enumerate(head.individuals): ind_dict[ind] = index if case_lines: # read individuals from ped file family_parser = FamilyParser(case_lines, family_type=case_type) families = family_parser.families logger.debug("Found families {0}".format( ','.join(list(families.keys())))) if len(families) != 1: logger.error("Only one family can be used with vcf adapter") raise IOError case_id = list(families.keys())[0] logger.debug("Family used in analysis: {0}".format(case_id)) for ind_id in family_parser.individuals: ind = family_parser.individuals[ind_id] logger.info("Found individual {0}".format(ind.individual_id)) try: individual = Individual( ind_id=ind_id, case_id=case_id, mother=ind.mother, father=ind.father, sex=str(ind.sex), phenotype=str(ind.phenotype), variant_source=variant_source, ind_index=ind_dict[ind_id], ) individuals.append(individual) except KeyError as err: #This is the case when individuals in ped does not exist #in vcf raise PedigreeError( family_id=case_id, individual_id=ind_id, message="Individual {0} exists in ped file but not in vcf".format(ind_id) ) else: case_id = os.path.basename(variant_source) for ind in ind_dict: individual = Individual( ind_id=ind, case_id=case_id, variant_source=variant_source, ind_index=ind_dict[ind] ) individuals.append(individual) logger.debug("Found individual {0} in {1}".format( ind, variant_source)) elif variant_mode == 'gemini': gq = GeminiQuery(variant_source) #Dictionaru with sample to index in the gemini database ind_dict = gq.sample_to_idx query = "SELECT * from samples" gq.run(query) for individual in gq: logger.debug("Found individual {0} with family id {1}".format( individual['name'], individual['family_id'])) individuals.append( Individual( ind_id=individual['name'], case_id=individual['family_id'], mother=individual['maternal_id'], father=individual['paternal_id'], sex=individual['sex'], phenotype=individual['phenotype'], ind_index=ind_dict.get(individual['name']), variant_source=variant_source, bam_path=None) ) return individuals
845,744
Add the cadd score to the variant Args: variant_obj (puzzle.models.Variant) info_dict (dict): A info dictionary
def _add_cadd_score(self, variant_obj, info_dict): cadd_score = info_dict.get('CADD') if cadd_score: logger.debug("Updating cadd_score to: {0}".format( cadd_score)) variant_obj.cadd_score = float(cadd_score)
845,754
Add the genetic models found Args: variant_obj (puzzle.models.Variant) info_dict (dict): A info dictionary
def _add_genetic_models(self, variant_obj, info_dict): genetic_models_entry = info_dict.get('GeneticModels') if genetic_models_entry: genetic_models = [] for family_annotation in genetic_models_entry.split(','): for genetic_model in family_annotation.split(':')[-1].split('|'): genetic_models.append(genetic_model) logger.debug("Updating genetic models to: {0}".format( ', '.join(genetic_models))) variant_obj.genetic_models = genetic_models
845,755
Add the rank score if found Args: variant_obj (puzzle.models.Variant) info_dict (dict): A info dictionary
def _add_rank_score(self, variant_obj, info_dict): rank_score_entry = info_dict.get('RankScore') if rank_score_entry: for family_annotation in rank_score_entry.split(','): rank_score = family_annotation.split(':')[-1] logger.debug("Updating rank_score to: {0}".format( rank_score)) variant_obj.rank_score = float(rank_score)
845,756
Configure connection to a SQL database. Args: db_uri (str): path/URI to the database to connect to debug (Optional[bool]): whether to output logging information
def connect(self, db_uri, debug=False): kwargs = {'echo': debug, 'convert_unicode': True} # connect to the SQL database if 'mysql' in db_uri: kwargs['pool_recycle'] = 3600 elif '://' not in db_uri: logger.debug("detected sqlite path URI: {}".format(db_uri)) db_path = os.path.abspath(os.path.expanduser(db_uri)) db_uri = "sqlite:///{}".format(db_path) self.engine = create_engine(db_uri, **kwargs) logger.debug('connection established successfully') # make sure the same engine is propagated to the BASE classes BASE.metadata.bind = self.engine # start a session self.session = scoped_session(sessionmaker(bind=self.engine)) # shortcut to query method self.query = self.session.query return self
845,766
Parsers a file or string and returns a list of AudioClipSpec Arguments: specsFileOrString (str): specifications' file or string Examples: >>> SpecsParser.parse('23.4 34.1\n40.2 79.65 Hello World!') [<AudioClipSpec start:23.40, end:34.10, text:''>, <AudioClipSpec start:40.20, end:79.65, text:'Hello World!'>] Returns: list(AudioClipSpec) or None
def parse(cls, specsFileOrString): stringToParse = None # Read the contents of the file if specsFileOrString is not a string if os.path.isfile(specsFileOrString): with open(specsFileOrString, 'r') as f: stringToParse = f.read() else: stringToParse = specsFileOrString # Audacity uses \r for newlines lines = [x.strip() for x in re.split(r'[\r\n]+', stringToParse)] clips = [] for line in lines: if line != '': clips.append(cls._parseLine(line)) # if spec != None: # clips.append(spec) return clips
845,835
Return comments for a case or variant. Args: case_id (str): id for a related case variant_id (Optional[str]): id for a related variant
def comments(self, case_id=None, variant_id=None, username=None): logger.debug("Looking for comments") comment_objs = self.query(Comment) if case_id: comment_objs = comment_objs.filter_by(case_id=case_id) if variant_id: comment_objs = comment_objs.filter_by(variant_id=variant_id) elif case_id: comment_objs = comment_objs.filter_by(variant_id=None) return comment_objs
845,919
Add the consequences found for a variant Args: variant_obj (puzzle.models.Variant) raw_variant_line (str): A raw vcf variant line
def _add_consequences(self, variant_obj, raw_variant_line): consequences = [] for consequence in SO_TERMS: if consequence in raw_variant_line: consequences.append(consequence) variant_obj.consequences = consequences
845,922
Add the most severe consequence Args: variant_obj (puzzle.models.Variant)
def _add_most_severe_consequence(self, variant_obj): most_severe_consequence = None most_severe_score = None for consequence in variant_obj.consequences: logger.debug("Checking severity score for consequence: {0}".format( consequence)) severity_score = SEVERITY_DICT.get(consequence) if severity_score != None: if most_severe_score: if severity_score < most_severe_score: most_severe_consequence = consequence most_severe_score = severity_score else: most_severe_consequence = consequence most_severe_score = severity_score variant_obj.most_severe_consequence = most_severe_consequence
845,923
Add the impact severity for the most severe consequence Args: variant_obj (puzzle.models.Variant)
def _add_impact_severity(self, variant_obj): if variant_obj.most_severe_consequence: variant_obj.impact_severity = IMPACT_SEVERITIES.get( variant_obj.most_severe_consequence )
845,924
Add a individual to the adapter Args: ind_obj (puzzle.models.Individual)
def _add_individual(self, ind_obj): logger.debug("Adding individual {0} to plugin".format(ind_obj.ind_id)) self.individual_objs.append(ind_obj)
846,151
Add a case obj with individuals to adapter Args: case_obj (puzzle.models.Case)
def add_case(self, case_obj): for ind_obj in case_obj.individuals: self._add_individual(ind_obj) logger.debug("Adding case {0} to plugin".format(case_obj.case_id)) self.case_objs.append(case_obj)
846,152
Return a Case object If no case_id is given return one case Args: case_id (str): A case id Returns: case(Case): A Case object
def case(self, case_id=None): cases = self.cases() if case_id: for case in cases: if case.case_id == case_id: return case else: if cases: return cases[0] return None
846,153
Return a individual object Args: ind_id (str): A individual id Returns: individual (puzzle.models.individual)
def individual(self, ind_id=None): for ind_obj in self.individuals: if ind_obj.ind_id == ind_id: return ind_obj return None
846,154
Allows each caller to claim incoming files as they are recognized. Args: unclaimed_file_readers: Usually, all files in the input dir. Returns: A tuple of unclaimed file readers and claimed VcfReaders. The presence of any unclaimed file readers could indicate stray files in the input dir.
def claim(self, unclaimed_file_readers): claimed_vcf_readers = [] for caller in self._callers: (unclaimed_file_readers, translated_vcf_readers) = caller.claim(unclaimed_file_readers) claimed_vcf_readers.extend(translated_vcf_readers) return unclaimed_file_readers, claimed_vcf_readers
846,252
Flask app factory function. Args: config (Optional[path]): path to a Python module config file config_obj (Optional[class]): Python config object
def create_app(config=None, config_obj=None): app = Flask(__name__) # configure application from external configs configure_app(app, config=config, config_obj=config_obj) # register different parts of the application register_blueprints(app) # setup extensions bind_extensions(app) return app
846,315
Configure application instance. Args: app (Flask): initialized Flask app instance config (Optional[path]): path to a Python module config file config_obj (Optional[class]): Python config object
def configure_app(app, config=None, config_obj=None): app.config.from_object(config_obj or BaseConfig) if config is not None: app.config.from_pyfile(config)
846,316
Configure extensions. Args: app (Flask): initialized Flask app instance
def bind_extensions(app): # bind plugin to app object app.db = app.config['PUZZLE_BACKEND'] app.db.init_app(app) # bind bootstrap blueprints bootstrap.init_app(app) markdown(app) @app.template_filter('islist') def islist(object): return isinstance(object, (tuple, list))
846,317