code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def set_number_of_atoms( self, n, selected_sites=None ): self.number_of_atoms = n self.atoms = species.Species( self.lattice.populate_sites( self.number_of_atoms, selected_sites=selected_sites ) )
Set the number of atoms for the simulation, and populate the simulation lattice. Args: n (Int): Number of atoms for this simulation. selected_sites (:obj:(List|Set|String), optional): Selects a subset of site types to be populated with atoms. Defaults to None. Returns: None
def define_lattice_from_file( self, filename, cell_lengths ): self.lattice = init_lattice.lattice_from_sites_file( filename, cell_lengths = cell_lengths )
Set up the simulation lattice from a file containing site data. Uses `init_lattice.lattice_from_sites_file`, which defines the site file spec. Args: filename (Str): sites file filename. cell_lengths (List(x,y,z)): cell lengths for the simulation cell. Returns: None
def is_initialised( self ): if not self.lattice: raise AttributeError('Running a simulation needs the lattice to be initialised') if not self.atoms: raise AttributeError('Running a simulation needs the atoms to be initialised') if not self.number_of_jumps and not self.for_time: raise AttributeError('Running a simulation needs number_of_jumps or for_time to be set')
Check whether the simulation has been initialised. Args: None Returns: None
def run( self, for_time=None ): self.for_time = for_time try: self.is_initialised() except AttributeError: raise if self.number_of_equilibration_jumps > 0: for step in range( self.number_of_equilibration_jumps ): self.lattice.jump() self.reset() if self.for_time: self.number_of_jumps = 0 while self.lattice.time < self.for_time: self.lattice.jump() self.number_of_jumps += 1 else: for step in range( self.number_of_jumps ): self.lattice.jump() self.has_run = True
Run the simulation. Args: for_time (:obj:Float, optional): If `for_time` is set, then run the simulation until a set amount of time has passed. Otherwise, run the simulation for a set number of jumps. Defaults to None. Returns: None
def old_tracer_correlation( self ): if self.has_run: return self.atoms.sum_dr_squared() / float( self.number_of_jumps ) else: return None
Deprecated tracer correlation factor for this simulation. Args: None Returns: (Float): The tracer correlation factor, f. Notes: This function assumes that the jump distance between sites has been normalised to a=1. If the jump distance is not equal to 1 then the value returned by this function should be divided by a^2. Even better, use `self.tracer_correlation`.
def tracer_diffusion_coefficient( self ): if self.has_run: return self.atoms.sum_dr_squared() / ( 6.0 * float( self.number_of_atoms ) * self.lattice.time ) else: return None
Tracer diffusion coefficient, D*. Args: None Returns: (Float): The tracer diffusion coefficient, D*.
def old_collective_correlation( self ): if self.has_run: return self.atoms.collective_dr_squared() / float( self.number_of_jumps ) else: return None
Returns the collective correlation factor, f_I Args: None Returns: (Float): The collective correlation factor, f_I. Notes: This function assumes that the jump distance between sites has been normalised to a=1. If the jumps distance is not equal to 1 then the value returned by this function should be divided by a^2. Even better, use self.collective_correlation
def collective_diffusion_coefficient( self ): if self.has_run: return self.atoms.collective_dr_squared() / ( 6.0 * self.lattice.time ) else: return None
Returns the collective or "jump" diffusion coefficient, D_J. Args: None Returns: (Float): The collective diffusion coefficient, D_J.
def setup_lookup_table( self, hamiltonian='nearest-neighbour' ): expected_hamiltonian_values = [ 'nearest-neighbour', 'coordination_number' ] if hamiltonian not in expected_hamiltonian_values: raise ValueError self.lattice.jump_lookup_table = lookup_table.LookupTable( self.lattice, hamiltonian )
Create a jump-probability look-up table corresponding to the appropriate Hamiltonian. Args: hamiltonian (Str, optional): String specifying the simulation Hamiltonian. valid values are 'nearest-neighbour' (default) and 'coordination_number'. Returns: None
def enforce_periodic_boundary_conditions( self ): for s in self.sites: for i in range(3): if s.r[i] < 0.0: s.r[i] += self.cell_lengths[i] if s.r[i] > self.cell_lengths[i]: s.r[i] -= self.cell_lengths[i]
Ensure that all lattice sites are within the central periodic image of the simulation cell. Sites that are outside the central simulation cell are mapped back into this cell. Args: None Returns: None
def initialise_site_lookup_table( self ): self.site_lookup = {} for site in self.sites: self.site_lookup[ site.number ] = site
Create a lookup table allowing sites in this lattice to be queried using `self.site_lookup[n]` where `n` is the identifying site numbe. Args: None Returns: None
def potential_jumps( self ): jumps = [] if self.number_of_occupied_sites <= self.number_of_sites / 2: for occupied_site in self.occupied_sites(): unoccupied_neighbours = [ site for site in [ self.site_with_id( n ) for n in occupied_site.neighbours ] if not site.is_occupied ] for vacant_site in unoccupied_neighbours: jumps.append( jump.Jump( occupied_site, vacant_site, self.nn_energy, self.cn_energies, self.jump_lookup_table ) ) else: for vacant_site in self.vacant_sites(): occupied_neighbours = [ site for site in [ self.site_with_id( n ) for n in vacant_site.neighbours ] if site.is_occupied ] for occupied_site in occupied_neighbours: jumps.append( jump.Jump( occupied_site, vacant_site, self.nn_energy, self.cn_energies, self.jump_lookup_table ) ) return jumps
All nearest-neighbour jumps not blocked by volume exclusion (i.e. from occupied to neighbouring unoccupied sites). Args: None Returns: (List(Jump)): List of possible jumps.
def update( self, jump ): atom = jump.initial_site.atom dr = jump.dr( self.cell_lengths ) #print( "atom {} jumped from site {} to site {}".format( atom.number, jump.initial_site.number, jump.final_site.number ) ) jump.final_site.occupation = atom.number jump.final_site.atom = atom jump.final_site.is_occupied = True jump.initial_site.occupation = 0 jump.initial_site.atom = None jump.initial_site.is_occupied = False # TODO: updating atom counters could be contained in an atom.move_to( site ) method atom.site = jump.final_site atom.number_of_hops += 1 atom.dr += dr atom.summed_dr2 += np.dot( dr, dr )
Update the lattice state by accepting a specific jump Args: jump (Jump): The jump that has been accepted. Returns: None.
def populate_sites( self, number_of_atoms, selected_sites=None ): if number_of_atoms > self.number_of_sites: raise ValueError if selected_sites: atoms = [ atom.Atom( initial_site = site ) for site in random.sample( [ s for s in self.sites if s.label in selected_sites ], number_of_atoms ) ] else: atoms = [ atom.Atom( initial_site = site ) for site in random.sample( self.sites, number_of_atoms ) ] self.number_of_occupied_sites = number_of_atoms return atoms
Populate the lattice sites with a specific number of atoms. Args: number_of_atoms (Int): The number of atoms to populate the lattice sites with. selected_sites (:obj:List, optional): List of site labels if only some sites are to be occupied. Defaults to None. Returns: None
def jump( self ): potential_jumps = self.potential_jumps() if not potential_jumps: raise BlockedLatticeError('No moves are possible in this lattice') all_transitions = transitions.Transitions( self.potential_jumps() ) random_jump = all_transitions.random() delta_t = all_transitions.time_to_jump() self.time += delta_t self.update_site_occupation_times( delta_t ) self.update( random_jump ) return( all_transitions.time_to_jump() )
Select a jump at random from all potential jumps, then update the lattice state. Args: None Returns: None
def site_occupation_statistics( self ): if self.time == 0.0: return None occupation_stats = { label : 0.0 for label in self.site_labels } for site in self.sites: occupation_stats[ site.label ] += site.time_occupied for label in self.site_labels: occupation_stats[ label ] /= self.time return occupation_stats
Average site occupation for each site type Args: None Returns: (Dict(Str:Float)): Dictionary of occupation statistics, e.g.:: { 'A' : 2.5, 'B' : 25.3 }
def set_site_energies( self, energies ): self.site_energies = energies for site_label in energies: for site in self.sites: if site.label == site_label: site.energy = energies[ site_label ]
Set the energies for every site in the lattice according to the site labels. Args: energies (Dict(Str:Float): Dictionary of energies for each site label, e.g.:: { 'A' : 1.0, 'B', 0.0 } Returns: None
def set_cn_energies( self, cn_energies ): for site in self.sites: site.set_cn_occupation_energies( cn_energies[ site.label ] ) self.cn_energies = cn_energies
Set the coordination number dependent energies for this lattice. Args: cn_energies (Dict(Str:Dict(Int:Float))): Dictionary of dictionaries specifying the coordination number dependent energies for each site type. e.g.:: { 'A' : { 0 : 0.0, 1 : 1.0, 2 : 2.0 }, 'B' : { 0 : 0.0, 1 : 2.0 } } Returns: None
def site_coordination_numbers( self ): coordination_numbers = {} for l in self.site_labels: coordination_numbers[ l ] = set( [ len( site.neighbours ) for site in self.sites if site.label is l ] ) return coordination_numbers
Returns a dictionary of the coordination numbers for each site label. e.g.:: { 'A' : { 4 }, 'B' : { 2, 4 } } Args: none Returns: coordination_numbers (Dict(Str:Set(Int))): dictionary of coordination numbers for each site label.
def max_site_coordination_numbers( self ): return { l : max( c ) for l, c in self.site_coordination_numbers().items() }
Returns a dictionary of the maximum coordination number for each site label. e.g.:: { 'A' : 4, 'B' : 4 } Args: none Returns: max_coordination_numbers (Dict(Str:Int)): dictionary of maxmimum coordination number for each site label.
def site_specific_coordination_numbers( self ): specific_coordination_numbers = {} for site in self.sites: specific_coordination_numbers[ site.label ] = site.site_specific_neighbours() return specific_coordination_numbers
Returns a dictionary of coordination numbers for each site type. Args: None Returns: (Dict(Str:List(Int))) : Dictionary of coordination numbers for each site type, e.g.:: { 'A' : [ 2, 4 ], 'B' : [ 2 ] }
def connected_site_pairs( self ): site_connections = {} for initial_site in self.sites: if not initial_site.label in site_connections: site_connections[ initial_site.label ] = [] for final_site in initial_site.p_neighbours: if final_site.label not in site_connections[ initial_site.label ]: site_connections[ initial_site.label ].append( final_site.label ) return site_connections
Returns a dictionary of all connections between pair of sites (by site label). e.g. for a linear lattice A-B-C will return:: { 'A' : [ 'B' ], 'B' : [ 'A', 'C' ], 'C' : [ 'B' ] } Args: None Returns: site_connections (Dict{Str List[Str]}): A dictionary of neighbouring site types in the lattice.
def transmute_sites( self, old_site_label, new_site_label, n_sites_to_change ): selected_sites = self.select_sites( old_site_label ) for site in random.sample( selected_sites, n_sites_to_change ): site.label = new_site_label self.site_labels = set( [ site.label for site in self.sites ] )
Selects a random subset of sites with a specific label and gives them a different label. Args: old_site_label (String or List(String)): Site label(s) of the sites to be modified.. new_site_label (String): Site label to be applied to the modified sites. n_sites_to_change (Int): Number of sites to modify. Returns: None
def connected_sites( self, site_labels=None ): if site_labels: selected_sites = self.select_sites( site_labels ) else: selected_sites = self.sites initial_clusters = [ cluster.Cluster( [ site ] ) for site in selected_sites ] if site_labels: blocking_sites = self.site_labels - set( site_labels ) for c in initial_clusters: c.remove_sites_from_neighbours( blocking_sites ) final_clusters = [] while initial_clusters: # loop until initial_clusters is empty this_cluster = initial_clusters.pop(0) while this_cluster.neighbours: neighbouring_clusters = [ c for c in initial_clusters if this_cluster.is_neighbouring( c ) ] for nc in neighbouring_clusters: initial_clusters.remove( nc ) this_cluster = this_cluster.merge( nc ) final_clusters.append( this_cluster ) return final_clusters
Searches the lattice to find sets of sites that are contiguously neighbouring. Mutually exclusive sets of contiguous sites are returned as Cluster objects. Args: site_labels (:obj:(List(Str)|Set(Str)|Str), optional): Labels for sites to be considered in the search. This can be a list:: [ 'A', 'B' ] a set:: ( 'A', 'B' ) or a string:: 'A'. Returns: (List(Cluster)): List of Cluster objects for groups of contiguous sites.
def select_sites( self, site_labels ): if type( site_labels ) in ( list, set ): selected_sites = [ s for s in self.sites if s.label in site_labels ] elif type( site_labels ) is str: selected_sites = [ s for s in self.sites if s.label is site_labels ] else: raise ValueError( str( site_labels ) ) return selected_sites
Selects sites in the lattice with specified labels. Args: site_labels (List(Str)|Set(Str)|Str): Labels of sites to select. This can be a List [ 'A', 'B' ], a Set ( 'A', 'B' ), or a String 'A'. Returns: (List(Site)): List of sites with labels given by `site_labels`.
def detached_sites( self, site_labels=None ): clusters = self.connected_sites( site_labels=site_labels ) island_clusters = [ c for c in clusters if not any( c.is_periodically_contiguous() ) ] return list( itertools.chain.from_iterable( ( c.sites for c in island_clusters ) ) )
Returns all sites in the lattice (optionally from the set of sites with specific labels) that are not part of a percolating network. This is determined from clusters of connected sites that do not wrap round to themselves through a periodic boundary. Args: site_labels (String or List(String)): Lables of sites to be considered. Returns: (List(Site)): List of sites not in a periodic percolating network.
def merge( self, other_cluster ): new_cluster = Cluster( self.sites | other_cluster.sites ) new_cluster.neighbours = ( self.neighbours | other_cluster.neighbours ).difference( new_cluster.sites ) return new_cluster
Combine two clusters into a single cluster. Args: other_cluster (Cluster): The second cluster to combine. Returns: (Cluster): The combination of both clusters.
def sites_at_edges( self ): min_x = min( [ s.r[0] for s in self.sites ] ) max_x = max( [ s.r[0] for s in self.sites ] ) min_y = min( [ s.r[1] for s in self.sites ] ) max_y = max( [ s.r[1] for s in self.sites ] ) min_z = min( [ s.r[2] for s in self.sites ] ) max_z = max( [ s.r[2] for s in self.sites ] ) x_max = [ s for s in self.sites if s.r[0] == min_x ] x_min = [ s for s in self.sites if s.r[0] == max_x ] y_max = [ s for s in self.sites if s.r[1] == min_y ] y_min = [ s for s in self.sites if s.r[1] == max_y ] z_max = [ s for s in self.sites if s.r[2] == min_z ] z_min = [ s for s in self.sites if s.r[2] == max_z ] return ( x_max, x_min, y_max, y_min, z_max, z_min )
Finds the six sites with the maximum and minimum coordinates along x, y, and z. Args: None Returns: (List(List)): In the order [ +x, -x, +y, -y, +z, -z ]
def is_periodically_contiguous( self ): edges = self.sites_at_edges() is_contiguous = [ False, False, False ] along_x = any( [ s2 in s1.p_neighbours for s1 in edges[0] for s2 in edges[1] ] ) along_y = any( [ s2 in s1.p_neighbours for s1 in edges[2] for s2 in edges[3] ] ) along_z = any( [ s2 in s1.p_neighbours for s1 in edges[4] for s2 in edges[5] ] ) return ( along_x, along_y, along_z )
logical check whether a cluster connects with itself across the simulation periodic boundary conditions. Args: none Returns ( Bool, Bool, Bool ): Contiguity along the x, y, and z coordinate axes
def remove_sites_from_neighbours( self, remove_labels ): if type( remove_labels ) is str: remove_labels = [ remove_labels ] self.neighbours = set( n for n in self.neighbours if n.label not in remove_labels )
Removes sites from the set of neighbouring sites if these have labels in remove_labels. Args: Remove_labels (List) or (Str): List of Site labels to be removed from the cluster neighbour set. Returns: None
def cumulative_probabilities( self ): partition_function = np.sum( self.p ) return np.cumsum( self.p ) / partition_function
Cumulative sum of the relative probabilities for all possible jumps. Args: None Returns: (np.array): Cumulative sum of relative jump probabilities.
def random( self ): j = np.searchsorted( self.cumulative_probabilities(), random.random() ) return self.jumps[ j ]
Select a jump at random with appropriate relative probabilities. Args: None Returns: (Jump): The randomly selected Jump.
def time_to_jump( self ): k_tot = rate_prefactor * np.sum( self.p ) return -( 1.0 / k_tot ) * math.log( random.random() )
The timestep until the next jump. Args: None Returns: (Float): The timestep until the next jump.
def locate(self): stored_location = self._get_stored_location() if not stored_location: ip_range = self._get_ip_range() stored_location = self._get_corresponding_location(ip_range) return stored_location
Find out what is user location (either from his IP or cookie). :return: :ref:`Custom location model <location_model>`
def _get_real_ip(self): try: # Trying to work with most common proxy headers real_ip = self.request.META['HTTP_X_FORWARDED_FOR'] return real_ip.split(',')[0] except KeyError: return self.request.META['REMOTE_ADDR'] except Exception: # Unknown IP return None
Get IP from request. :param request: A usual request object :type request: HttpRequest :return: ipv4 string or None
def _get_ip_range(self): ip = self._get_real_ip() try: geobase_entry = IpRange.objects.by_ip(ip) except IpRange.DoesNotExist: geobase_entry = None return geobase_entry
Fetches IpRange instance if request IP is found in database. :param request: A ususal request object :type request: HttpRequest :return: IpRange object or None
def _get_stored_location(self): location_storage = storage_class(request=self.request, response=None) return location_storage.get()
Get location from cookie. :param request: A ususal request object :type request: HttpRequest :return: Custom location model
def lazy_translations(cls): return { cloudfiles.errors.NoSuchContainer: errors.NoContainerException, cloudfiles.errors.NoSuchObject: errors.NoObjectException, }
Lazy translations.
def from_info(cls, container, info_obj): create_fn = cls.from_subdir if 'subdir' in info_obj \ else cls.from_file_info return create_fn(container, info_obj)
Create from subdirectory or file info object.
def from_subdir(cls, container, info_obj): return cls(container, info_obj['subdir'], obj_type=cls.type_cls.SUBDIR)
Create from subdirectory info object.
def choose_type(cls, content_type): return cls.type_cls.SUBDIR if content_type in cls.subdir_types \ else cls.type_cls.FILE
Choose object type from content type.
def from_file_info(cls, container, info_obj): # RFC 8601: 2010-04-15T01:52:13.919070 return cls(container, name=info_obj['name'], size=info_obj['bytes'], content_type=info_obj['content_type'], last_modified=dt_from_header(info_obj['last_modified']), obj_type=cls.choose_type(info_obj['content_type']))
Create from regular info object.
def from_obj(cls, container, file_obj): # RFC 1123: Thu, 07 Jun 2007 18:57:07 GMT return cls(container, name=file_obj.name, size=file_obj.size, content_type=file_obj.content_type, last_modified=dt_from_header(file_obj.last_modified), obj_type=cls.choose_type(file_obj.content_type))
Create from regular info object.
def get_objects(self, path, marker=None, limit=settings.CLOUD_BROWSER_DEFAULT_LIST_LIMIT): object_infos, full_query = self._get_object_infos(path, marker, limit) if full_query and len(object_infos) < limit: # The underlying query returned a full result set, but we # truncated it to under limit. Re-run at twice the limit and then # slice back. object_infos, _ = self._get_object_infos(path, marker, 2 * limit) object_infos = object_infos[:limit] return [self.obj_cls.from_info(self, x) for x in object_infos]
Get objects. **Pseudo-directory Notes**: Rackspace has two approaches to pseudo- directories within the (really) flat storage object namespace: 1. Dummy directory storage objects. These are real storage objects of type "application/directory" and must be manually uploaded by the client. 2. Implied subdirectories using the `path` API query parameter. Both serve the same purpose, but the latter is much preferred because there is no independent maintenance of extra dummy objects, and the `path` approach is always correct (for the existing storage objects). This package uses the latter `path` approach, but gets into an ambiguous situation where there is both a dummy directory storage object and an implied subdirectory. To remedy this situation, we only show information for the dummy directory object in results if present, and ignore the implied subdirectory. But, under the hood this means that our `limit` parameter may end up with less than the desired number of objects. So, we use the heuristic that if we **do** have "application/directory" objects, we end up doing an extra query of double the limit size to ensure we can get up to the limit amount of objects. This double query approach is inefficient, but as using dummy objects should now be deprecated, the second query should only rarely occur.
def _get_object_infos(self, path, marker=None, limit=settings.CLOUD_BROWSER_DEFAULT_LIST_LIMIT): # Adjust limit to +1 to handle marker object as first result. # We can get in to this situation for a marker of "foo", that will # still return a 'subdir' object of "foo/" because of the extra # slash. orig_limit = limit limit += 1 # Enforce maximum object size. if limit > RS_MAX_LIST_OBJECTS_LIMIT: raise errors.CloudException("Object limit must be less than %s" % RS_MAX_LIST_OBJECTS_LIMIT) def _collapse(infos): """Remove duplicate dummy / implied objects.""" name = None for info in infos: name = info.get('name', name) subdir = info.get('subdir', '').strip(SEP) if not name or subdir != name: yield info path = path + SEP if path else '' object_infos = self.native_container.list_objects_info( limit=limit, delimiter=SEP, prefix=path, marker=marker) full_query = len(object_infos) == limit if object_infos: # Check first object for marker match and truncate if so. if (marker and object_infos[0].get('subdir', '').strip(SEP) == marker): object_infos = object_infos[1:] # Collapse subdirs and dummy objects. object_infos = list(_collapse(object_infos)) # Adjust to original limit. if len(object_infos) > orig_limit: object_infos = object_infos[:orig_limit] return object_infos, full_query
Get raw object infos (single-shot).
def get_object(self, path): obj = self.native_container.get_object(path) return self.obj_cls.from_obj(self, obj)
Get single object.
def _get_connection(self): kwargs = { 'username': self.account, 'api_key': self.secret_key, } # Only add kwarg for servicenet if True because user could set # environment variable 'RACKSPACE_SERVICENET' separately. if self.servicenet: kwargs['servicenet'] = True if self.authurl: kwargs['authurl'] = self.authurl return cloudfiles.get_connection(**kwargs)
Return native connection object.
def _get_containers(self): infos = self.native_conn.list_containers_info() return [self.cont_cls(self, i['name'], i['count'], i['bytes']) for i in infos]
Return available containers.
def _get_container(self, path): cont = self.native_conn.get_container(path) return self.cont_cls(self, cont.name, cont.object_count, cont.size_used)
Return single container.
def delta_E( self ): site_delta_E = self.final_site.energy - self.initial_site.energy if self.nearest_neighbour_energy: site_delta_E += self.nearest_neighbour_delta_E() if self.coordination_number_energy: site_delta_E += self.coordination_number_delta_E() return site_delta_E
The change in system energy if this jump were accepted. Args: None Returns: (Float): delta E
def nearest_neighbour_delta_E( self ): delta_nn = self.final_site.nn_occupation() - self.initial_site.nn_occupation() - 1 # -1 because the hopping ion is not counted in the final site occupation number return ( delta_nn * self.nearest_neighbour_energy )
Nearest-neighbour interaction contribution to the change in system energy if this jump were accepted. Args: None Returns: (Float): delta E (nearest-neighbour)
def coordination_number_delta_E( self ): initial_site_neighbours = [ s for s in self.initial_site.p_neighbours if s.is_occupied ] # excludes final site, since this is always unoccupied final_site_neighbours = [ s for s in self.final_site.p_neighbours if s.is_occupied and s is not self.initial_site ] # excludes initial site initial_cn_occupation_energy = ( self.initial_site.cn_occupation_energy() + sum( [ site.cn_occupation_energy() for site in initial_site_neighbours ] ) + sum( [ site.cn_occupation_energy() for site in final_site_neighbours ] ) ) final_cn_occupation_energy = ( self.final_site.cn_occupation_energy( delta_occupation = { self.initial_site.label : -1 } ) + sum( [ site.cn_occupation_energy( delta_occupation = { self.initial_site.label : -1 } ) for site in initial_site_neighbours ] ) + sum( [ site.cn_occupation_energy( delta_occupation = { self.final_site.label : +1 } ) for site in final_site_neighbours ] ) ) return ( final_cn_occupation_energy - initial_cn_occupation_energy )
Coordination-number dependent energy conrtibution to the change in system energy if this jump were accepted. Args: None Returns: (Float): delta E (coordination-number)
def dr( self, cell_lengths ): half_cell_lengths = cell_lengths / 2.0 this_dr = self.final_site.r - self.initial_site.r for i in range( 3 ): if this_dr[ i ] > half_cell_lengths[ i ]: this_dr[ i ] -= cell_lengths[ i ] if this_dr[ i ] < -half_cell_lengths[ i ]: this_dr[ i ] += cell_lengths[ i ] return this_dr
Particle displacement vector for this jump Args: cell_lengths (np.array(x,y,z)): Cell lengths for the orthogonal simulation cell. Returns (np.array(x,y,z)): dr
def relative_probability_from_lookup_table( self, jump_lookup_table ): l1 = self.initial_site.label l2 = self.final_site.label c1 = self.initial_site.nn_occupation() c2 = self.final_site.nn_occupation() return jump_lookup_table.jump_probability[ l1 ][ l2 ][ c1 ][ c2 ]
Relative probability of accepting this jump from a lookup-table. Args: jump_lookup_table (LookupTable): the lookup table to be used for this jump. Returns: (Float): relative probability of accepting this jump.
def module_cache_get(cache, module): if getattr(cache, "config", False): config_file = module[:-2] + "yaml" if config_file not in cache.config_files and os.path.exists(config_file): try: config = yaml_safe_load(config_file, type=dict) except TypeError as e: tangelo.log_warning("TANGELO", "Bad configuration in file %s: %s" % (config_file, e)) raise except IOError: tangelo.log_warning("TANGELO", "Could not open config file %s" % (config_file)) raise except ValueError as e: tangelo.log_warning("TANGELO", "Error reading config file %s: %s" % (config_file, e)) raise cache.config_files[config_file] = True else: config = {} cherrypy.config["module-config"][module] = config cherrypy.config["module-store"].setdefault(module, {}) # If two threads are importing the same module nearly concurrently, we # could load it twice unless we use the import lock. imp.acquire_lock() try: if module not in cache.modules: name = module[:-3] # load the module. service = imp.load_source(name, module) cache.modules[module] = service else: service = cache.modules[module] finally: imp.release_lock() return service
Import a module with an optional yaml config file, but only if we haven't imported it already. :param cache: object which holds information on which modules and config files have been loaded and whether config files should be loaded. :param module: the path of the module to load. :returns: the loaded module.
def make_virtual_offset(block_start_offset, within_block_offset): if within_block_offset < 0 or within_block_offset >= 65536: raise ValueError("Require 0 <= within_block_offset < 2**16, got %i" % within_block_offset) if block_start_offset < 0 or block_start_offset >= 281474976710656: raise ValueError("Require 0 <= block_start_offset < 2**48, got %i" % block_start_offset) return (block_start_offset << 16) | within_block_offset
Compute a BGZF virtual offset from block start and within block offsets. The BAM indexing scheme records read positions using a 64 bit 'virtual offset', comprising in C terms: block_start_offset << 16 | within_block_offset Here block_start_offset is the file offset of the BGZF block start (unsigned integer using up to 64-16 = 48 bits), and within_block_offset within the (decompressed) block (unsigned 16 bit integer). >>> make_virtual_offset(0, 0) 0 >>> make_virtual_offset(0, 1) 1 >>> make_virtual_offset(0, 2**16 - 1) 65535 >>> make_virtual_offset(0, 2**16) Traceback (most recent call last): ... ValueError: Require 0 <= within_block_offset < 2**16, got 65536 >>> 65536 == make_virtual_offset(1, 0) True >>> 65537 == make_virtual_offset(1, 1) True >>> 131071 == make_virtual_offset(1, 2**16 - 1) True >>> 6553600000 == make_virtual_offset(100000, 0) True >>> 6553600001 == make_virtual_offset(100000, 1) True >>> 6553600010 == make_virtual_offset(100000, 10) True >>> make_virtual_offset(2**48, 0) Traceback (most recent call last): ... ValueError: Require 0 <= block_start_offset < 2**48, got 281474976710656
def close(self): if self._buffer: self.flush() self._handle.write(_bgzf_eof) self._handle.flush() self._handle.close()
Flush data, write 28 bytes BGZF EOF marker, and close BGZF file. samtools will look for a magic EOF marker, just a 28 byte empty BGZF block, and if it is missing warns the BAM file may be truncated. In addition to samtools writing this block, so too does bgzip - so this implementation does too.
def ensure_secret(): home_dir = os.environ['HOME'] file_name = home_dir + "/.ipcamweb" if os.path.exists(file_name): with open(file_name, "r") as s_file: secret = s_file.readline() else: secret = os.urandom(24) with open(file_name, "w") as s_file: secret = s_file.write(secret+"\n") return secret
Check if secret key to encryot sessions exists, generate it otherwise.
def list_snapshots_days(path, cam_id): screenshoots_path = path + "/" + str(cam_id) if os.path.exists(screenshoots_path): days = [] for day_dir in os.listdir(screenshoots_path): date = datetime.datetime.strptime(day_dir, "%d%m%Y").strftime('%d/%m/%y') days.append((date, day_dir)) return days else: return []
Returns a list of (date, dir) in which snapshopts are present
def list_snapshots_hours(path, cam_id, day): screenshoots_path = path+"/"+str(cam_id)+"/"+day if os.path.exists(screenshoots_path): hours = [] for hour_dir in sorted(os.listdir(screenshoots_path)): hrm = datetime.datetime.strptime(hour_dir, "%H%M").strftime('%H:%M') hours.append((hrm, hour_dir)) return hours else: return []
Returns a list of hour/min in which snapshopts are present
def list_snapshots_for_a_minute(path, cam_id, day, hourm): screenshoots_path = path+"/"+str(cam_id)+"/"+day+"/"+hourm if os.path.exists(screenshoots_path): screenshots = [scr for scr in sorted(os.listdir(screenshoots_path))] return screenshots else: return []
Returns a list of screenshots
def is_snv(self): return len(self.REF) == 1 and all(a.type == "SNV" for a in self.ALT)
Return ``True`` if it is a SNV
def affected_start(self): types = {alt.type for alt in self.ALT} # set! BAD_MIX = {INS, SV, BND, SYMBOLIC} # don't mix well with others if (BAD_MIX & types) and len(types) == 1 and list(types)[0] == INS: # Only insertions, return 0-based position right of first base return self.POS # right of first base else: # Return 0-based start position of first REF base return self.POS - 1
Return affected start position in 0-based coordinates For SNVs, MNVs, and deletions, the behaviour is the start position. In the case of insertions, the position behind the insert position is returned, yielding a 0-length interval together with :py:meth:`~Record.affected_end`
def add_filter(self, label): if label not in self.FILTER: if "PASS" in self.FILTER: self.FILTER = [f for f in self.FILTER if f != "PASS"] self.FILTER.append(label)
Add label to FILTER if not set yet, removing ``PASS`` entry if present
def add_format(self, key, value=None): if key in self.FORMAT: return self.FORMAT.append(key) if value is not None: for call in self: call.data.setdefault(key, value)
Add an entry to format The record's calls ``data[key]`` will be set to ``value`` if not yet set and value is not ``None``. If key is already in FORMAT then nothing is done.
def gt_bases(self): result = [] for a in self.gt_alleles: if a is None: result.append(None) elif a == 0: result.append(self.site.REF) else: result.append(self.site.ALT[a - 1].value) return tuple(result)
Return the actual genotype bases, e.g. if VCF genotype is 0/1, could return ('A', 'T')
def gt_type(self): if not self.called: return None # not called elif all(a == 0 for a in self.gt_alleles): return HOM_REF elif len(set(self.gt_alleles)) == 1: return HOM_ALT else: return HET
The type of genotype, returns one of ``HOM_REF``, ``HOM_ALT``, and ``HET``.
def is_filtered(self, require=None, ignore=None): ignore = ignore or ["PASS"] if "FT" not in self.data or not self.data["FT"]: return False for ft in self.data["FT"]: if ft in ignore: continue # skip if not require: return True elif ft in require: return True return False
Return ``True`` for filtered calls :param iterable ignore: if set, the filters to ignore, make sure to include 'PASS', when setting, default is ``['PASS']`` :param iterable require: if set, the filters to require for returning ``True``
def serialize(self): if self.mate_chrom is None: remote_tag = "." else: if self.within_main_assembly: mate_chrom = self.mate_chrom else: mate_chrom = "<{}>".format(self.mate_chrom) tpl = {FORWARD: "[{}:{}[", REVERSE: "]{}:{}]"}[self.mate_orientation] remote_tag = tpl.format(mate_chrom, self.mate_pos) if self.orientation == FORWARD: return remote_tag + self.sequence else: return self.sequence + remote_tag
Return string representation for VCF
def trend(self, order=LINEAR): '''Override Series.trend() to return a TimeSeries instance.''' coefficients = self.trend_coefficients(order) x = self.timestamps trend_y = LazyImport.numpy().polyval(coefficients, x) return TimeSeries(zip(x, trend_y)f trend(self, order=LINEAR): '''Override Series.trend() to return a TimeSeries instance.''' coefficients = self.trend_coefficients(order) x = self.timestamps trend_y = LazyImport.numpy().polyval(coefficients, x) return TimeSeries(zip(x, trend_y))
Override Series.trend() to return a TimeSeries instance.
def trend_coefficients(self, order=LINEAR): '''Calculate trend coefficients for the specified order.''' if not len(self.points): raise ArithmeticError('Cannot calculate the trend of an empty series') return LazyImport.numpy().polyfit(self.timestamps, self.values, orderf trend_coefficients(self, order=LINEAR): '''Calculate trend coefficients for the specified order.''' if not len(self.points): raise ArithmeticError('Cannot calculate the trend of an empty series') return LazyImport.numpy().polyfit(self.timestamps, self.values, order)
Calculate trend coefficients for the specified order.
def moving_average(self, window, method=SIMPLE): '''Calculate a moving average using the specified method and window''' if len(self.points) < window: raise ArithmeticError('Not enough points for moving average') numpy = LazyImport.numpy() if method == TimeSeries.SIMPLE: weights = numpy.ones(window) / float(window) ma_x = self.timestamps[window-1:] ma_y = numpy.convolve(self.values, weights)[window-1:-(window-1)].tolist() return TimeSeries(zip(ma_x, ma_y)f moving_average(self, window, method=SIMPLE): '''Calculate a moving average using the specified method and window''' if len(self.points) < window: raise ArithmeticError('Not enough points for moving average') numpy = LazyImport.numpy() if method == TimeSeries.SIMPLE: weights = numpy.ones(window) / float(window) ma_x = self.timestamps[window-1:] ma_y = numpy.convolve(self.values, weights)[window-1:-(window-1)].tolist() return TimeSeries(zip(ma_x, ma_y))
Calculate a moving average using the specified method and window
def plot(self, label=None, colour='g', style='-'): # pragma: no cover '''Plot the time series.''' pylab = LazyImport.pylab() pylab.plot(self.dates, self.values, '%s%s' % (colour, style), label=label) if label is not None: pylab.legend() pylab.show(f plot(self, label=None, colour='g', style='-'): # pragma: no cover '''Plot the time series.''' pylab = LazyImport.pylab() pylab.plot(self.dates, self.values, '%s%s' % (colour, style), label=label) if label is not None: pylab.legend() pylab.show()
Plot the time series.
def table_output(data): '''Get a table representation of a dictionary.''' if type(data) == DictType: data = data.items() headings = [ item[0] for item in data ] rows = [ item[1] for item in data ] columns = zip(*rows) if len(columns): widths = [ max([ len(str(y)) for y in row ]) for row in rows ] else: widths = [ 0 for c in headings ] for c, heading in enumerate(headings): widths[c] = max(widths[c], len(heading)) column_count = range(len(rows)) table = [ ' '.join([ headings[c].ljust(widths[c]) for c in column_count ]) ] table.append(' '.join([ '=' * widths[c] for c in column_count ])) for column in columns: table.append(' '.join([ str(column[c]).ljust(widths[c]) for c in column_count ])) return '\n'.join(tablef table_output(data): '''Get a table representation of a dictionary.''' if type(data) == DictType: data = data.items() headings = [ item[0] for item in data ] rows = [ item[1] for item in data ] columns = zip(*rows) if len(columns): widths = [ max([ len(str(y)) for y in row ]) for row in rows ] else: widths = [ 0 for c in headings ] for c, heading in enumerate(headings): widths[c] = max(widths[c], len(heading)) column_count = range(len(rows)) table = [ ' '.join([ headings[c].ljust(widths[c]) for c in column_count ]) ] table.append(' '.join([ '=' * widths[c] for c in column_count ])) for column in columns: table.append(' '.join([ str(column[c]).ljust(widths[c]) for c in column_count ])) return '\n'.join(table)
Get a table representation of a dictionary.
def to_datetime(time): '''Convert `time` to a datetime.''' if type(time) == IntType or type(time) == LongType: time = datetime.fromtimestamp(time // 1000) return timf to_datetime(time): '''Convert `time` to a datetime.''' if type(time) == IntType or type(time) == LongType: time = datetime.fromtimestamp(time // 1000) return time
Convert `time` to a datetime.
def spellCheckTextgrid(tg, targetTierName, newTierName, isleDict, printEntries=False): ''' Spell check words by using the praatio spellcheck function Incorrect items are noted in a new tier and optionally printed to the screen ''' def checkFunc(word): try: isleDict.lookup(word) except isletool.WordNotInISLE: returnVal = False else: returnVal = True return returnVal tg = praatio_scripts.spellCheckEntries(tg, targetTierName, newTierName, checkFunc, printEntries) return tf spellCheckTextgrid(tg, targetTierName, newTierName, isleDict, printEntries=False): ''' Spell check words by using the praatio spellcheck function Incorrect items are noted in a new tier and optionally printed to the screen ''' def checkFunc(word): try: isleDict.lookup(word) except isletool.WordNotInISLE: returnVal = False else: returnVal = True return returnVal tg = praatio_scripts.spellCheckEntries(tg, targetTierName, newTierName, checkFunc, printEntries) return tg
Spell check words by using the praatio spellcheck function Incorrect items are noted in a new tier and optionally printed to the screen
def main(argv=None): parser = argparse.ArgumentParser(description="Parser benchmark") parser.add_argument("--debug", default=False, action="store_true", help="Enable debugging") parser.add_argument("--repetitions", type=int, default=10, help="Number of repetitions") parser.add_argument("--line-count", type=int, default=5000, help="Number of lines to parse") args = parser.parse_args(argv) run(args)
Main program entry point for parsing command line arguments
def run_pyvcf(args): # open VCF reader reader = vcf.Reader(filename=args.input_vcf) # optionally, open VCF writer writer = None # read through input VCF file, optionally also writing out start = time.clock() num = 0 for num, r in enumerate(reader): if num % 10000 == 0: print(num, "".join(map(str, [r.CHROM, ":", r.POS])), sep="\t", file=sys.stderr) if writer: writer.write_record(r) if args.max_records and num >= args.max_records: break end = time.clock() print("Read {} records in {} seconds".format(num, (end - start)), file=sys.stderr)
Main program entry point after parsing arguments
def main(argv=None): parser = argparse.ArgumentParser(description="Benchmark driver") parser.add_argument("--max-records", type=int, default=100 * 1000) parser.add_argument("--engine", type=str, choices=("vcfpy", "pyvcf"), default="vcfpy") parser.add_argument("--input-vcf", type=str, required=True, help="Path to VCF file to read") parser.add_argument( "--output-vcf", type=str, required=False, help="Path to VCF file to write if given" ) args = parser.parse_args(argv) if args.engine == "vcfpy": VCFPyRunner(args).run() else: PyVCFRunner(args).run()
Main program entry point for parsing command line arguments
def run(self): start = time.clock() it = iter(self.reader) if self.args.max_records: it = itertools.islice(it, self.args.max_records) num = self.work(it) end = time.clock() print("Read {} records in {} seconds".format(num, (end - start)), file=sys.stderr)
Main program entry point after parsing arguments
def _crc8(self, buffer): polynomial = 0x31; crc = 0xFF; index = 0 for index in range(0, len(buffer)): crc ^= buffer[index] for i in range(8, 0, -1): if crc & 0x80: crc = (crc << 1) ^ polynomial else: crc = (crc << 1) return crc & 0xFF
Polynomial 0x31 (x8 + x5 +x4 +1)
def split_mapping(pair_str): orig_key, value = pair_str.split("=", 1) key = orig_key.strip() if key != orig_key: warnings.warn( "Mapping key {} has leading or trailing space".format(repr(orig_key)), LeadingTrailingSpaceInKey, ) return key, value
Split the ``str`` in ``pair_str`` at ``'='`` Warn if key needs to be stripped
def parse_mapping(value): if not value.startswith("<") or not value.endswith(">"): raise exceptions.InvalidHeaderException( "Header mapping value was not wrapped in angular brackets" ) # split the comma-separated list into pairs, ignoring commas in quotes pairs = split_quoted_string(value[1:-1], delim=",", quote='"') # split these pairs into key/value pairs, converting flags to mappings # to True key_values = [] for pair in pairs: if "=" in pair: key, value = split_mapping(pair) if value.startswith('"') and value.endswith('"'): value = ast.literal_eval(value) elif value.startswith("[") and value.endswith("]"): value = [v.strip() for v in value[1:-1].split(",")] else: key, value = pair, True key_values.append((key, value)) # return completely parsed mapping as OrderedDict return OrderedDict(key_values)
Parse the given VCF header line mapping Such a mapping consists of "key=value" pairs, separated by commas and wrapped into angular brackets ("<...>"). Strings are usually quoted, for certain known keys, exceptions are made, depending on the tag key. this, however, only gets important when serializing. :raises: :py:class:`vcfpy.exceptions.InvalidHeaderException` if there was a problem parsing the file
def build_header_parsers(): result = { "ALT": MappingHeaderLineParser(header.AltAlleleHeaderLine), "contig": MappingHeaderLineParser(header.ContigHeaderLine), "FILTER": MappingHeaderLineParser(header.FilterHeaderLine), "FORMAT": MappingHeaderLineParser(header.FormatHeaderLine), "INFO": MappingHeaderLineParser(header.InfoHeaderLine), "META": MappingHeaderLineParser(header.MetaHeaderLine), "PEDIGREE": MappingHeaderLineParser(header.PedigreeHeaderLine), "SAMPLE": MappingHeaderLineParser(header.SampleHeaderLine), "__default__": StupidHeaderLineParser(), # fallback } return result
Return mapping for parsers to use for each VCF header type Inject the WarningHelper into the parsers.
def convert_field_value(type_, value): if value == ".": return None elif type_ in ("Character", "String"): if "%" in value: for k, v in record.UNESCAPE_MAPPING: value = value.replace(k, v) return value else: try: return _CONVERTERS[type_](value) except ValueError: warnings.warn( ("{} cannot be converted to {}, keeping as " "string.").format(value, type_), CannotConvertValue, ) return value
Convert atomic field value according to the type
def parse_field_value(field_info, value): if field_info.id == "FT": return [x for x in value.split(";") if x != "."] elif field_info.type == "Flag": return True elif field_info.number == 1: return convert_field_value(field_info.type, value) else: if value == ".": return [] else: return [convert_field_value(field_info.type, x) for x in value.split(",")]
Parse ``value`` according to ``field_info``
def parse_breakend(alt_str): arr = BREAKEND_PATTERN.split(alt_str) mate_chrom, mate_pos = arr[1].split(":", 1) mate_pos = int(mate_pos) if mate_chrom[0] == "<": mate_chrom = mate_chrom[1:-1] within_main_assembly = False else: within_main_assembly = True FWD_REV = {True: record.FORWARD, False: record.REVERSE} orientation = FWD_REV[alt_str[0] == "[" or alt_str[0] == "]"] mate_orientation = FWD_REV["[" in alt_str] if orientation == record.FORWARD: sequence = arr[2] else: sequence = arr[0] return (mate_chrom, mate_pos, orientation, mate_orientation, sequence, within_main_assembly)
Parse breakend and return tuple with results, parameters for BreakEnd constructor
def process_sub_grow(ref, alt_str): if len(alt_str) == 0: raise exceptions.InvalidRecordException("Invalid VCF, empty ALT") elif len(alt_str) == 1: if ref[0] == alt_str[0]: return record.Substitution(record.DEL, alt_str) else: return record.Substitution(record.INDEL, alt_str) else: return record.Substitution(record.INDEL, alt_str)
Process substution where the string grows
def process_sub_shrink(ref, alt_str): if len(ref) == 0: raise exceptions.InvalidRecordException("Invalid VCF, empty REF") elif len(ref) == 1: if ref[0] == alt_str[0]: return record.Substitution(record.INS, alt_str) else: return record.Substitution(record.INDEL, alt_str) else: return record.Substitution(record.INDEL, alt_str)
Process substution where the string shrink
def process_sub(ref, alt_str): if len(ref) == len(alt_str): if len(ref) == 1: return record.Substitution(record.SNV, alt_str) else: return record.Substitution(record.MNV, alt_str) elif len(ref) > len(alt_str): return process_sub_grow(ref, alt_str) else: # len(ref) < len(alt_str): return process_sub_shrink(ref, alt_str)
Process substitution
def process_alt(header, ref, alt_str): # pylint: disable=W0613 # By its nature, this function contains a large number of case distinctions if "]" in alt_str or "[" in alt_str: return record.BreakEnd(*parse_breakend(alt_str)) elif alt_str[0] == "." and len(alt_str) > 0: return record.SingleBreakEnd(record.FORWARD, alt_str[1:]) elif alt_str[-1] == "." and len(alt_str) > 0: return record.SingleBreakEnd(record.REVERSE, alt_str[:-1]) elif alt_str[0] == "<" and alt_str[-1] == ">": inner = alt_str[1:-1] return record.SymbolicAllele(inner) else: # substitution return process_sub(ref, alt_str)
Process alternative value using Header in ``header``
def run(self, s): begins, ends = [0], [] # transition table DISPATCH = { self.NORMAL: self._handle_normal, self.QUOTED: self._handle_quoted, self.ARRAY: self._handle_array, self.DELIM: self._handle_delim, self.ESCAPED: self._handle_escaped, } # run state automaton state = self.NORMAL for pos, c in enumerate(s): state = DISPATCH[state](c, pos, begins, ends) ends.append(len(s)) assert len(begins) == len(ends) # Build resulting list return [s[start:end] for start, end in zip(begins, ends)]
Split string ``s`` at delimiter, correctly interpreting quotes Further, interprets arrays wrapped in one level of ``[]``. No recursive brackets are interpreted (as this would make the grammar non-regular and currently this complexity is not needed). Currently, quoting inside of braces is not supported either. This is just to support the example from VCF v4.3.
def parse_line(self, line): if not line or not line.startswith("##"): raise exceptions.InvalidHeaderException( 'Invalid VCF header line (must start with "##") {}'.format(line) ) if "=" not in line: raise exceptions.InvalidHeaderException( 'Invalid VCF header line (must contain "=") {}'.format(line) ) line = line[len("##") :].rstrip() # trim '^##' and trailing whitespace # split key/value pair at "=" key, value = split_mapping(line) sub_parser = self.sub_parsers.get(key, self.sub_parsers["__default__"]) return sub_parser.parse_key_value(key, value)
Parse VCF header ``line`` (trailing '\r\n' or '\n' is ignored) :param str line: ``str`` with line to parse :param dict sub_parsers: ``dict`` mapping header line types to appropriate parser objects :returns: appropriate :py:class:`HeaderLine` parsed from ``line`` :raises: :py:class:`vcfpy.exceptions.InvalidHeaderException` if there was a problem parsing the file
def parse_line(self, line_str): line_str = line_str.rstrip() if not line_str: return None # empty line, EOF arr = self._split_line(line_str) # CHROM chrom = arr[0] # POS pos = int(arr[1]) # IDS if arr[2] == ".": ids = [] else: ids = arr[2].split(";") # REF ref = arr[3] # ALT alts = [] if arr[4] != ".": for alt in arr[4].split(","): alts.append(process_alt(self.header, ref, alt)) # QUAL if arr[5] == ".": qual = None else: try: qual = int(arr[5]) except ValueError: # try as float qual = float(arr[5]) # FILTER if arr[6] == ".": filt = [] else: filt = arr[6].split(";") self._check_filters(filt, "FILTER") # INFO info = self._parse_info(arr[7], len(alts)) if len(arr) == 9: raise exceptions.IncorrectVCFFormat("Expected 8 or 10+ columns, got 9!") elif len(arr) == 8: format_ = None calls = None else: # FORMAT format_ = arr[8].split(":") # sample/call columns calls = self._handle_calls(alts, format_, arr[8], arr) return record.Record(chrom, pos, ids, ref, alts, qual, filt, info, format_, calls)
Parse line from file (including trailing line break) and return resulting Record
def _handle_calls(self, alts, format_, format_str, arr): if format_str not in self._format_cache: self._format_cache[format_str] = list(map(self.header.get_format_field_info, format_)) # per-sample calls calls = [] for sample, raw_data in zip(self.samples.names, arr[9:]): if self.samples.is_parsed(sample): data = self._parse_calls_data(format_, self._format_cache[format_str], raw_data) call = record.Call(sample, data) self._format_checker.run(call, len(alts)) self._check_filters(call.data.get("FT"), "FORMAT/FT", call.sample) calls.append(call) else: calls.append(record.UnparsedCall(sample, raw_data)) return calls
Handle FORMAT and calls columns, factored out of parse_line
def _split_line(self, line_str): arr = line_str.rstrip().split("\t") if len(arr) != self.expected_fields: raise exceptions.InvalidRecordException( ( "The line contains an invalid number of fields. Was " "{} but expected {}\n{}".format(len(arr), 9 + len(self.samples.names), line_str) ) ) return arr
Split line and check number of columns
def _parse_info(self, info_str, num_alts): result = OrderedDict() if info_str == ".": return result # The standard is very nice to parsers, we can simply split at # semicolon characters, although I (Manuel) don't know how strict # programs follow this for entry in info_str.split(";"): if "=" not in entry: # flag key = entry result[key] = parse_field_value(self.header.get_info_field_info(key), True) else: key, value = split_mapping(entry) result[key] = parse_field_value(self.header.get_info_field_info(key), value) self._info_checker.run(key, result[key], num_alts) return result
Parse INFO column from string
def _parse_calls_data(klass, format_, infos, gt_str): data = OrderedDict() # The standard is very nice to parsers, we can simply split at # colon characters, although I (Manuel) don't know how strict # programs follow this for key, info, value in zip(format_, infos, gt_str.split(":")): data[key] = parse_field_value(info, value) return data
Parse genotype call information from arrays using format array :param list format: List of strings with format names :param gt_str arr: string with genotype information values
def _check_header_lines(self, header_lines): if not header_lines: raise exceptions.InvalidHeaderException( "The VCF file did not contain any header lines!" ) first = header_lines[0] if first.key != "fileformat": raise exceptions.InvalidHeaderException("The VCF file did not start with ##fileformat") if first.value not in SUPPORTED_VCF_VERSIONS: warnings.warn("Unknown VCF version {}".format(first.value), UnknownVCFVersion)
Check header lines, in particular for starting file "##fileformat"
def run(self, key, value, num_alts): field_info = self.header.get_info_field_info(key) if not isinstance(value, list): return TABLE = { ".": len(value), "A": num_alts, "R": num_alts + 1, "G": binomial(num_alts + 1, 2), # diploid only at the moment } expected = TABLE.get(field_info.number, field_info.number) if len(value) != expected: tpl = "Number of elements for INFO field {} is {} instead of {}" warnings.warn( tpl.format(key, len(value), field_info.number), exceptions.IncorrectListLength )
Check value in INFO[key] of record Currently, only checks for consistent counts are implemented :param str key: key of INFO entry to check :param value: value to check :param int alts: list of alternative alleles, for length