docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Get a reduced structure. Args: reduction_algo (str): The lattice reduction algorithm to use. Currently supported options are "niggli" or "LLL".
def get_reduced_structure(self, reduction_algo="niggli"): if reduction_algo == "niggli": reduced_latt = self._lattice.get_niggli_reduced_lattice() elif reduction_algo == "LLL": reduced_latt = self._lattice.get_lll_reduced_lattice() else: raise ValueError("Invalid reduction algo : {}" .format(reduction_algo)) if reduced_latt != self.lattice: return self.__class__(reduced_latt, self.species_and_occu, self.cart_coords, coords_are_cartesian=True, to_unit_cell=True, site_properties=self.site_properties, charge=self._charge) else: return self.copy()
139,084
Reconstitute a Structure object from a dict representation of Structure created using as_dict(). Args: d (dict): Dict representation of structure. Returns: Structure object
def from_dict(cls, d, fmt=None): if fmt == "abivars": from pymatgen.io.abinit.abiobjects import structure_from_abivars return structure_from_abivars(cls=cls, **d) lattice = Lattice.from_dict(d["lattice"]) sites = [PeriodicSite.from_dict(sd, lattice) for sd in d["sites"]] charge = d.get("charge", None) return cls.from_sites(sites, charge=charge)
139,092
Convenience constructor to make a Molecule from a list of sites. Args: sites ([Site]): Sequence of Sites. charge (int): Charge of molecule. Defaults to 0. spin_multiplicity (int): Spin multicipity. Defaults to None, in which it is determined automatically. validate_proximity (bool): Whether to check that atoms are too close.
def from_sites(cls, sites, charge=0, spin_multiplicity=None, validate_proximity=False): props = collections.defaultdict(list) for site in sites: for k, v in site.properties.items(): props[k].append(v) return cls([site.species for site in sites], [site.coords for site in sites], charge=charge, spin_multiplicity=spin_multiplicity, validate_proximity=validate_proximity, site_properties=props)
139,098
Determines the covalent bonds in a molecule. Args: tol (float): The tol to determine bonds in a structure. See CovalentBond.is_bonded. Returns: List of bonds
def get_covalent_bonds(self, tol=0.2): bonds = [] for site1, site2 in itertools.combinations(self._sites, 2): if CovalentBond.is_bonded(site1, site2, tol): bonds.append(CovalentBond(site1, site2)) return bonds
139,100
Reconstitute a Molecule object from a dict representation created using as_dict(). Args: d (dict): dict representation of Molecule. Returns: Molecule object
def from_dict(cls, d): sites = [Site.from_dict(sd) for sd in d["sites"]] charge = d.get("charge", 0) spin_multiplicity = d.get("spin_multiplicity") return cls.from_sites(sites, charge=charge, spin_multiplicity=spin_multiplicity)
139,105
Find all sites within a sphere from a point. Args: pt (3x1 array): Cartesian coordinates of center of sphere. r (float): Radius of sphere. Returns: [(site, dist) ...] since most of the time, subsequent processing requires the distance.
def get_sites_in_sphere(self, pt, r): neighbors = [] for site in self._sites: dist = site.distance_from_point(pt) if dist <= r: neighbors.append((site, dist)) return neighbors
139,106
Get all neighbors to a site within a sphere of radius r. Excludes the site itself. Args: site (Site): Site at the center of the sphere. r (float): Radius of sphere. Returns: [(site, dist) ...] since most of the time, subsequent processing requires the distance.
def get_neighbors(self, site, r): nn = self.get_sites_in_sphere(site.coords, r) return [(s, dist) for (s, dist) in nn if site != s]
139,107
Returns all sites in a shell centered on origin (coords) between radii r-dr and r+dr. Args: origin (3x1 array): Cartesian coordinates of center of sphere. r (float): Inner radius of shell. dr (float): Width of shell. Returns: [(site, dist) ...] since most of the time, subsequent processing requires the distance.
def get_neighbors_in_shell(self, origin, r, dr): outer = self.get_sites_in_sphere(origin, r + dr) inner = r - dr return [(site, dist) for (site, dist) in outer if dist > inner]
139,108
Reads a molecule from a file. Supported formats include xyz, gaussian input (gjf|g03|g09|com|inp), Gaussian output (.out|and pymatgen's JSON serialized molecules. Using openbabel, many more extensions are supported but requires openbabel to be installed. Args: filename (str): The filename to read from. Returns: Molecule
def from_file(cls, filename): filename = str(filename) from pymatgen.io.gaussian import GaussianOutput with zopen(filename) as f: contents = f.read() fname = filename.lower() if fnmatch(fname, "*.xyz*"): return cls.from_str(contents, fmt="xyz") elif any([fnmatch(fname.lower(), "*.{}*".format(r)) for r in ["gjf", "g03", "g09", "com", "inp"]]): return cls.from_str(contents, fmt="g09") elif any([fnmatch(fname.lower(), "*.{}*".format(r)) for r in ["out", "lis", "log"]]): return GaussianOutput(filename).final_structure elif fnmatch(fname, "*.json*") or fnmatch(fname, "*.mson*"): return cls.from_str(contents, fmt="json") elif fnmatch(fname, "*.yaml*"): return cls.from_str(contents, fmt="yaml") else: from pymatgen.io.babel import BabelMolAdaptor m = re.search(r"\.(pdb|mol|mdl|sdf|sd|ml2|sy2|mol2|cml|mrv)", filename.lower()) if m: new = BabelMolAdaptor.from_file(filename, m.group(1)).pymatgen_mol new.__class__ = cls return new raise ValueError("Unrecognized file extension!")
139,113
Remove all occurrences of several species from a structure. Args: species: Sequence of species to remove, e.g., ["Li", "Na"].
def remove_species(self, species): new_sites = [] species = [get_el_sp(s) for s in species] for site in self._sites: new_sp_occu = {sp: amt for sp, amt in site.species.items() if sp not in species} if len(new_sp_occu) > 0: new_sites.append(PeriodicSite( new_sp_occu, site.frac_coords, self._lattice, properties=site.properties)) self._sites = new_sites
139,121
Delete sites with at indices. Args: indices: Sequence of indices of sites to delete.
def remove_sites(self, indices): self._sites = [s for i, s in enumerate(self._sites) if i not in indices]
139,122
Apply a symmetry operation to the structure and return the new structure. The lattice is operated by the rotation matrix only. Coords are operated in full and then transformed to the new lattice. Args: symmop (SymmOp): Symmetry operation to apply. fractional (bool): Whether the symmetry operation is applied in fractional space. Defaults to False, i.e., symmetry operation is applied in cartesian coordinates.
def apply_operation(self, symmop, fractional=False): if not fractional: self._lattice = Lattice([symmop.apply_rotation_only(row) for row in self._lattice.matrix]) def operate_site(site): new_cart = symmop.operate(site.coords) new_frac = self._lattice.get_fractional_coords(new_cart) return PeriodicSite(site.species, new_frac, self._lattice, properties=site.properties) else: new_latt = np.dot(symmop.rotation_matrix, self._lattice.matrix) self._lattice = Lattice(new_latt) def operate_site(site): return PeriodicSite(site.species, symmop.operate(site.frac_coords), self._lattice, properties=site.properties) self._sites = [operate_site(s) for s in self._sites]
139,123
Modify the lattice of the structure. Mainly used for changing the basis. Args: new_lattice (Lattice): New lattice
def modify_lattice(self, new_lattice): self._lattice = new_lattice for site in self._sites: site.lattice = new_lattice
139,124
Apply a strain to the lattice. Args: strain (float or list): Amount of strain to apply. Can be a float, or a sequence of 3 numbers. E.g., 0.01 means all lattice vectors are increased by 1%. This is equivalent to calling modify_lattice with a lattice with lattice parameters that are 1% larger.
def apply_strain(self, strain): s = (1 + np.array(strain)) * np.eye(3) self.lattice = Lattice(np.dot(self._lattice.matrix.T, s).T)
139,125
Translate specific sites by some vector, keeping the sites within the unit cell. Args: indices: Integer or List of site indices on which to perform the translation. vector: Translation vector for sites. frac_coords (bool): Whether the vector corresponds to fractional or cartesian coordinates. to_unit_cell (bool): Whether new sites are transformed to unit cell
def translate_sites(self, indices, vector, frac_coords=True, to_unit_cell=True): if not isinstance(indices, collections.abc.Iterable): indices = [indices] for i in indices: site = self._sites[i] if frac_coords: fcoords = site.frac_coords + vector else: fcoords = self._lattice.get_fractional_coords( site.coords + vector) if to_unit_cell: fcoords = np.mod(fcoords, 1) self._sites[i].frac_coords = fcoords
139,127
Rotate specific sites by some angle around vector at anchor. Args: indices (list): List of site indices on which to perform the translation. theta (float): Angle in radians axis (3x1 array): Rotation axis vector. anchor (3x1 array): Point of rotation. to_unit_cell (bool): Whether new sites are transformed to unit cell
def rotate_sites(self, indices=None, theta=0, axis=None, anchor=None, to_unit_cell=True): from numpy.linalg import norm from numpy import cross, eye from scipy.linalg import expm if indices is None: indices = range(len(self)) if axis is None: axis = [0, 0, 1] if anchor is None: anchor = [0, 0, 0] anchor = np.array(anchor) axis = np.array(axis) theta %= 2 * np.pi rm = expm(cross(eye(3), axis / norm(axis)) * theta) for i in indices: site = self._sites[i] coords = ((np.dot(rm, np.array(site.coords - anchor).T)).T + anchor).ravel() new_site = PeriodicSite( site.species, coords, self._lattice, to_unit_cell=to_unit_cell, coords_are_cartesian=True, properties=site.properties) self._sites[i] = new_site
139,128
Performs a random perturbation of the sites in a structure to break symmetries. Args: distance (float): Distance in angstroms by which to perturb each site.
def perturb(self, distance): def get_rand_vec(): # deals with zero vectors. vector = np.random.randn(3) vnorm = np.linalg.norm(vector) return vector / vnorm * distance if vnorm != 0 else get_rand_vec() for i in range(len(self._sites)): self.translate_sites([i], get_rand_vec(), frac_coords=False)
139,129
Merges sites (adding occupancies) within tol of each other. Removes site properties. Args: tol (float): Tolerance for distance to merge sites. mode (str): Three modes supported. "delete" means duplicate sites are deleted. "sum" means the occupancies are summed for the sites. "average" means that the site is deleted but the properties are averaged Only first letter is considered.
def merge_sites(self, tol=0.01, mode="sum"): mode = mode.lower()[0] from scipy.spatial.distance import squareform from scipy.cluster.hierarchy import fcluster, linkage d = self.distance_matrix np.fill_diagonal(d, 0) clusters = fcluster(linkage(squareform((d + d.T) / 2)), tol, 'distance') sites = [] for c in np.unique(clusters): inds = np.where(clusters == c)[0] species = self[inds[0]].species coords = self[inds[0]].frac_coords props = self[inds[0]].properties for n, i in enumerate(inds[1:]): sp = self[i].species if mode == "s": species += sp offset = self[i].frac_coords - coords coords = coords + ((offset - np.round(offset)) / (n + 2)).astype( coords.dtype) for key in props.keys(): if props[key] is not None and self[i].properties[key] != props[key]: if mode == 'a' and isinstance(props[key], float): # update a running total props[key] = props[key]*(n+1)/(n+2) + self[i].properties[key]/(n+2) else: props[key] = None warnings.warn("Sites with different site property %s are merged. " "So property is set to none" % key) sites.append(PeriodicSite(species, coords, self.lattice, properties=props)) self._sites = sites
139,131
Appends a site to the molecule. Args: species: Species of inserted site coords: Coordinates of inserted site validate_proximity (bool): Whether to check if inserted site is too close to an existing site. Defaults to True. properties (dict): A dict of properties for the Site. Returns: New molecule with inserted site.
def append(self, species, coords, validate_proximity=True, properties=None): return self.insert(len(self), species, coords, validate_proximity=validate_proximity, properties=properties)
139,134
Set the charge and spin multiplicity. Args: charge (int): Charge for the molecule. Defaults to 0. spin_multiplicity (int): Spin multiplicity for molecule. Defaults to None, which means that the spin multiplicity is set to 1 if the molecule has no unpaired electrons and to 2 if there are unpaired electrons.
def set_charge_and_spin(self, charge, spin_multiplicity=None): self._charge = charge nelectrons = 0 for site in self._sites: for sp, amt in site.species.items(): if not isinstance(sp, DummySpecie): nelectrons += sp.Z * amt nelectrons -= charge self._nelectrons = nelectrons if spin_multiplicity: if (nelectrons + spin_multiplicity) % 2 != 1: raise ValueError( "Charge of {} and spin multiplicity of {} is" " not possible for this molecule".format( self._charge, spin_multiplicity)) self._spin_multiplicity = spin_multiplicity else: self._spin_multiplicity = 1 if nelectrons % 2 == 0 else 2
139,135
Insert a site to the molecule. Args: i (int): Index to insert site species: species of inserted site coords (3x1 array): coordinates of inserted site validate_proximity (bool): Whether to check if inserted site is too close to an existing site. Defaults to True. properties (dict): Dict of properties for the Site. Returns: New molecule with inserted site.
def insert(self, i, species, coords, validate_proximity=False, properties=None): new_site = Site(species, coords, properties=properties) if validate_proximity: for site in self: if site.distance(new_site) < self.DISTANCE_TOLERANCE: raise ValueError("New site is too close to an existing " "site!") self._sites.insert(i, new_site)
139,136
Remove all occurrences of a species from a molecule. Args: species: Species to remove.
def remove_species(self, species): new_sites = [] species = [get_el_sp(sp) for sp in species] for site in self._sites: new_sp_occu = {sp: amt for sp, amt in site.species.items() if sp not in species} if len(new_sp_occu) > 0: new_sites.append(Site(new_sp_occu, site.coords, properties=site.properties)) self._sites = new_sites
139,137
Delete sites with at indices. Args: indices: Sequence of indices of sites to delete.
def remove_sites(self, indices): self._sites = [self._sites[i] for i in range(len(self._sites)) if i not in indices]
139,138
Translate specific sites by some vector, keeping the sites within the unit cell. Args: indices (list): List of site indices on which to perform the translation. vector (3x1 array): Translation vector for sites.
def translate_sites(self, indices=None, vector=None): if indices is None: indices = range(len(self)) if vector is None: vector == [0, 0, 0] for i in indices: site = self._sites[i] new_site = Site(site.species, site.coords + vector, properties=site.properties) self._sites[i] = new_site
139,139
Apply a symmetry operation to the molecule. Args: symmop (SymmOp): Symmetry operation to apply.
def apply_operation(self, symmop): def operate_site(site): new_cart = symmop.operate(site.coords) return Site(site.species, new_cart, properties=site.properties) self._sites = [operate_site(s) for s in self._sites]
139,140
create the polymer from the monomer Args: monomer (Molecule) mon_vector (numpy.array): molecule vector that starts from the start atom index to the end atom index
def _create(self, monomer, mon_vector): while self.length != (self.n_units-1): if self.linear_chain: move_direction = np.array(mon_vector) / np.linalg.norm(mon_vector) else: move_direction = self._next_move_direction() self._add_monomer(monomer.copy(), mon_vector, move_direction)
139,145
rotate the monomer so that it is aligned along the move direction Args: monomer (Molecule) mon_vector (numpy.array): molecule vector that starts from the start atom index to the end atom index move_direction (numpy.array): the direction of the polymer chain extension
def _align_monomer(self, monomer, mon_vector, move_direction): axis = np.cross(mon_vector, move_direction) origin = monomer[self.start].coords angle = get_angle(mon_vector, move_direction) op = SymmOp.from_origin_axis_angle(origin, axis, angle) monomer.apply_operation(op)
139,147
extend the polymer molecule by adding a monomer along mon_vector direction Args: monomer (Molecule): monomer molecule mon_vector (numpy.array): monomer vector that points from head to tail. move_direction (numpy.array): direction along which the monomer will be positioned
def _add_monomer(self, monomer, mon_vector, move_direction): translate_by = self.molecule.cart_coords[self.end] + \ self.link_distance * move_direction monomer.translate_sites(range(len(monomer)), translate_by) if not self.linear_chain: self._align_monomer(monomer, mon_vector, move_direction) # add monomer if there are no crossings does_cross = False for i, site in enumerate(monomer): try: self.molecule.append(site.specie, site.coords, properties=site.properties) except: does_cross = True polymer_length = len(self.molecule) self.molecule.remove_sites( range(polymer_length - i, polymer_length)) break if not does_cross: self.length += 1 self.end += len(self.monomer)
139,148
Internal method to format values in the packmol parameter dictionaries Args: param_val: Some object to turn into String Returns: string representation of the object
def _format_param_val(self, param_val): if isinstance(param_val, list): return ' '.join(str(x) for x in param_val) else: return str(param_val)
139,150
Write the packmol input file to the input directory. Args: input_dir (string): path to the input directory
def _write_input(self, input_dir="."): with open(os.path.join(input_dir, self.input_file), 'wt', encoding="utf-8") as inp: for k, v in self.control_params.items(): inp.write('{} {}\n'.format(k, self._format_param_val(v))) # write the structures of the constituent molecules to file and set # the molecule id and the corresponding filename in the packmol # input file. for idx, mol in enumerate(self.mols): filename = os.path.join( input_dir, '{}.{}'.format( idx, self.control_params["filetype"])).encode("ascii") # pdb if self.control_params["filetype"] == "pdb": self.write_pdb(mol, filename, num=idx+1) # all other filetypes else: a = BabelMolAdaptor(mol) pm = pb.Molecule(a.openbabel_mol) pm.write(self.control_params["filetype"], filename=filename, overwrite=True) inp.write("\n") inp.write( "structure {}.{}\n".format( os.path.join(input_dir, str(idx)), self.control_params["filetype"])) for k, v in self.param_list[idx].items(): inp.write(' {} {}\n'.format(k, self._format_param_val(v))) inp.write('end structure\n')
139,152
Write the input file to the scratch directory, run packmol and return the packed molecule. Args: copy_to_current_on_exit (bool): Whether or not to copy the packmol input/output files from the scratch directory to the current directory. site_property (str): if set then the specified site property for the the final packed molecule will be restored. Returns: Molecule object
def run(self, copy_to_current_on_exit=False, site_property=None): scratch = tempfile.gettempdir() with ScratchDir(scratch, copy_to_current_on_exit=copy_to_current_on_exit) as scratch_dir: self._write_input(input_dir=scratch_dir) packmol_input = open(os.path.join(scratch_dir, self.input_file), 'r') p = Popen(self.packmol_bin, stdin=packmol_input, stdout=PIPE, stderr=PIPE) (stdout, stderr) = p.communicate() output_file = os.path.join(scratch_dir, self.control_params["output"]) if os.path.isfile(output_file): packed_mol = BabelMolAdaptor.from_file(output_file, self.control_params["filetype"]) packed_mol = packed_mol.pymatgen_mol print("packed molecule written to {}".format( self.control_params["output"])) if site_property: packed_mol = self.restore_site_properties(site_property=site_property, filename=output_file) return packed_mol else: print("Packmol execution failed") print(stdout, stderr) return None
139,153
Convert list of openbabel atoms to MOlecule. Args: atoms ([OBAtom]): list of OBAtom objects residue_name (str): the key in self.map_residue_to_mol. Usec to restore the site properties in the final packed molecule. site_property (str): the site property to be restored. Returns: Molecule object
def convert_obatoms_to_molecule(self, atoms, residue_name=None, site_property="ff_map"): restore_site_props = True if residue_name is not None else False if restore_site_props and not hasattr(self, "map_residue_to_mol"): self._set_residue_map() coords = [] zs = [] for atm in atoms: coords.append(list(atm.coords)) zs.append(atm.atomicnum) mol = Molecule(zs, coords) if restore_site_props: props = [] ref = self.map_residue_to_mol[residue_name].copy() # sanity check assert len(mol) == len(ref) assert ref.formula == mol.formula # the packed molecules have the atoms in the same order..sigh! for i, site in enumerate(mol): assert site.specie.symbol == ref[i].specie.symbol props.append(getattr(ref[i], site_property)) mol.add_site_property(site_property, props) return mol
139,156
Restore the site properties for the final packed molecule. Args: site_property (str): filename (str): path to the final packed molecule. Returns: Molecule
def restore_site_properties(self, site_property="ff_map", filename=None): # only for pdb if not self.control_params["filetype"] == "pdb": raise ValueError() filename = filename or self.control_params["output"] bma = BabelMolAdaptor.from_file(filename, "pdb") pbm = pb.Molecule(bma._obmol) assert len(pbm.residues) == sum([x["number"] for x in self.param_list]) packed_mol = self.convert_obatoms_to_molecule( pbm.residues[0].atoms, residue_name=pbm.residues[0].name, site_property=site_property) for resid in pbm.residues[1:]: mol = self.convert_obatoms_to_molecule( resid.atoms, residue_name=resid.name, site_property=site_property) for site in mol: packed_mol.append(site.species, site.coords, properties=site.properties) return packed_mol
139,157
LAMMPS wrapper Args: input_filename (string): input file name bin (string): command to run, excluding the input file name
def __init__(self, input_filename="lammps.in", bin="lammps"): self.lammps_bin = bin.split() if not which(self.lammps_bin[-1]): raise RuntimeError( "LammpsRunner requires the executable {} to be in the path. " "Please download and install LAMMPS from " \ "http://lammps.sandia.gov. " "Don't forget to add the binary to your path".format(self.lammps_bin[-1])) self.input_filename = input_filename
139,158
Get an element from an atomic number. Args: z (int): Atomic number Returns: Element with atomic number z.
def from_Z(z: int): for sym, data in _pt_data.items(): if data["Atomic no"] == z: return Element(sym) raise ValueError("No element with this atomic number %s" % z)
139,175
Returns an element from a row and group number. Args: row (int): Row number group (int): Group number .. note:: The 18 group number system is used, i.e., Noble gases are group 18.
def from_row_and_group(row: int, group: int): for sym in _pt_data.keys(): el = Element(sym) if el.row == row and el.group == group: return el raise ValueError("No element with this row and group!")
139,176
A pretty ASCII printer for the periodic table, based on some filter_function. Args: filter_function: A filtering function taking an Element as input and returning a boolean. For example, setting filter_function = lambda el: el.X > 2 will print a periodic table containing only elements with electronegativity > 2.
def print_periodic_table(filter_function: callable = None): for row in range(1, 10): rowstr = [] for group in range(1, 19): try: el = Element.from_row_and_group(row, group) except ValueError: el = None if el and ((not filter_function) or filter_function(el)): rowstr.append("{:3s}".format(el.symbol)) else: rowstr.append(" ") print(" ".join(rowstr))
139,184
Returns a Specie from a string representation. Args: species_string (str): A typical string representation of a species, e.g., "Mn2+", "Fe3+", "O2-". Returns: A Specie object. Raises: ValueError if species_string cannot be intepreted.
def from_string(species_string: str): m = re.search(r"([A-Z][a-z]*)([0-9.]*)([+\-])(.*)", species_string) if m: sym = m.group(1) oxi = 1 if m.group(2) == "" else float(m.group(2)) oxi = -oxi if m.group(3) == "-" else oxi properties = None if m.group(4): toks = m.group(4).replace(",", "").split("=") properties = {toks[0]: float(toks[1])} return Specie(sym, oxi, properties) else: raise ValueError("Invalid Species String")
139,190
Gets the nuclear electric quadrupole moment in units of e*millibarns Args: isotope (str): the isotope to get the quadrupole moment for default is None, which gets the lowest mass isotope
def get_nmr_quadrupole_moment(self, isotope=None): quad_mom = self._el.nmr_quadrupole_moment if len(quad_mom) == 0: return 0.0 if isotope is None: isotopes = list(quad_mom.keys()) isotopes.sort(key=lambda x: int(x.split("-")[1]), reverse=False) return quad_mom.get(isotopes[0], 0.0) else: if isotope not in quad_mom: raise ValueError("No quadrupole moment for isotope {}".format( isotope)) return quad_mom.get(isotope, 0.0)
139,192
Returns a Dummy from a string representation. Args: species_string (str): A string representation of a dummy species, e.g., "X2+", "X3+". Returns: A DummySpecie object. Raises: ValueError if species_string cannot be intepreted.
def from_string(species_string: str): m = re.search(r"([A-Z][a-z]*)([0-9.]*)([+\-]*)(.*)", species_string) if m: sym = m.group(1) if m.group(2) == "" and m.group(3) == "": oxi = 0 else: oxi = 1 if m.group(2) == "" else float(m.group(2)) oxi = -oxi if m.group(3) == "-" else oxi properties = None if m.group(4): toks = m.group(4).split("=") properties = {toks[0]: float(toks[1])} return DummySpecie(sym, oxi, properties) raise ValueError("Invalid DummySpecies String")
139,199
Find the symmetric operations of the reciprocal lattice, to be used for hkl transformations Args: structure (Structure): conventional unit cell symprec: default is 0.001
def get_recp_symmetry_operation(structure, symprec=0.01): recp_lattice = structure.lattice.reciprocal_lattice_crystallographic # get symmetry operations from input conventional unit cell # Need to make sure recp lattice is big enough, otherwise symmetry # determination will fail. We set the overall volume to 1. recp_lattice = recp_lattice.scale(1) recp = Structure(recp_lattice, ["H"], [[0, 0, 0]]) # Creates a function that uses the symmetry operations in the # structure to find Miller indices that might give repetitive slabs analyzer = SpacegroupAnalyzer(recp, symprec=symprec) recp_symmops = analyzer.get_symmetry_operations() return recp_symmops
139,213
Returns all symmetrically distinct indices below a certain max-index for a given structure. Analysis is based on the symmetry of the reciprocal lattice of the structure. Args: structure (Structure): input structure. max_index (int): The maximum index. For example, a max_index of 1 means that (100), (110), and (111) are returned for the cubic structure. All other indices are equivalent to one of these.
def get_symmetrically_distinct_miller_indices(structure, max_index): r = list(range(-max_index, max_index + 1)) r.reverse() # First we get a list of all hkls for conventional (including equivalent) conv_hkl_list = [miller for miller in itertools.product(r, r, r) if any([i != 0 for i in miller])] sg = SpacegroupAnalyzer(structure) # Get distinct hkl planes from the rhombohedral setting if trigonal if sg.get_crystal_system() == "trigonal": transf = sg.get_conventional_to_primitive_transformation_matrix() miller_list = [hkl_transformation(transf, hkl) for hkl in conv_hkl_list] prim_structure = SpacegroupAnalyzer(structure).get_primitive_standard_structure() symm_ops = get_recp_symmetry_operation(prim_structure) else: miller_list = conv_hkl_list symm_ops = get_recp_symmetry_operation(structure) unique_millers, unique_millers_conv = [], [] def is_already_analyzed(miller_index): for op in symm_ops: if in_coord_list(unique_millers, op.operate(miller_index)): return True return False for i, miller in enumerate(miller_list): d = abs(reduce(gcd, miller)) miller = tuple([int(i / d) for i in miller]) if not is_already_analyzed(miller): if sg.get_crystal_system() == "trigonal": # Now we find the distinct primitive hkls using # the primitive symmetry operations and their # corresponding hkls in the conventional setting unique_millers.append(miller) d = abs(reduce(gcd, conv_hkl_list[i])) cmiller = tuple([int(i / d) for i in conv_hkl_list[i]]) unique_millers_conv.append(cmiller) else: unique_millers.append(miller) unique_millers_conv.append(miller) return unique_millers_conv
139,214
Returns the Miller index from setting A to B using a transformation matrix Args: transf (3x3 array): The transformation matrix that transforms a lattice of A to B miller_index ([h, k, l]): Miller index to transform to setting B
def hkl_transformation(transf, miller_index): # Get a matrix of whole numbers (ints) lcm = lambda a, b: a * b // math.gcd(a, b) reduced_transf = reduce(lcm, [int(1 / i) for i in itertools.chain(*transf) if i != 0]) * transf reduced_transf = reduced_transf.astype(int) # perform the transformation t_hkl = np.dot(reduced_transf, miller_index) d = abs(reduce(gcd, t_hkl)) t_hkl = np.array([int(i / d) for i in t_hkl]) # get mostly positive oriented Miller index if len([i for i in t_hkl if i < 0]) > 1: t_hkl *= -1 return tuple(t_hkl)
139,215
Function to get the ranges of the slab regions. Useful for discerning where the slab ends and vacuum begins if the slab is not fully within the cell Args: slab (Structure): Structure object modelling the surface blength (float, Ang): The bondlength between atoms. You generally want this value to be larger than the actual bondlengths in order to find atoms that are part of the slab
def get_slab_regions(slab, blength=3.5): fcoords, indices, all_indices = [], [], [] for site in slab: # find sites with c < 0 (noncontiguous) neighbors = slab.get_neighbors(site, blength, include_index=True, include_image=True) for nn in neighbors: if nn[0].frac_coords[2] < 0: # sites are noncontiguous within cell fcoords.append(nn[0].frac_coords[2]) indices.append(nn[-2]) if nn[-2] not in all_indices: all_indices.append(nn[-2]) if fcoords: # If slab is noncontiguous, locate the lowest # site within the upper region of the slab while fcoords: last_fcoords = copy.copy(fcoords) last_indices = copy.copy(indices) site = slab[indices[fcoords.index(min(fcoords))]] neighbors = slab.get_neighbors(site, blength, include_index=True, include_image=True) fcoords, indices = [], [] for nn in neighbors: if 1 > nn[0].frac_coords[2] > 0 and \ nn[0].frac_coords[2] < site.frac_coords[2]: # sites are noncontiguous within cell fcoords.append(nn[0].frac_coords[2]) indices.append(nn[-2]) if nn[-2] not in all_indices: all_indices.append(nn[-2]) # Now locate the highest site within the lower region of the slab upper_fcoords = [] for site in slab: if all([nn[-1] not in all_indices for nn in slab.get_neighbors(site, blength, include_index=True)]): upper_fcoords.append(site.frac_coords[2]) coords = copy.copy(last_fcoords) if not fcoords else copy.copy(fcoords) min_top = slab[last_indices[coords.index(min(coords))]].frac_coords[2] ranges = [[0, max(upper_fcoords)], [min_top, 1]] else: # If the entire slab region is within the slab cell, just # set the range as the highest and lowest site in the slab sorted_sites = sorted(slab, key=lambda site: site.frac_coords[2]) ranges = [[sorted_sites[0].frac_coords[2], sorted_sites[-1].frac_coords[2]]] return ranges
139,217
Checks if slab is symmetric, i.e., contains inversion symmetry. Args: symprec (float): Symmetry precision used for SpaceGroup analyzer. Returns: (bool) Whether slab contains inversion symmetry.
def is_symmetric(self, symprec=0.1): sg = SpacegroupAnalyzer(self, symprec=symprec) return sg.is_laue()
139,224
Gets the structure of single atom adsorption. slab structure from the Slab class(in [0, 0, 1]) Args: indices ([int]): Indices of sites on which to put the absorbate. Absorbed atom will be displaced relative to the center of these sites. specie (Specie/Element/str): adsorbed atom species distance (float): between centers of the adsorbed atom and the given site in Angstroms.
def add_adsorbate_atom(self, indices, specie, distance): # Let's do the work in cartesian coords center = np.sum([self[i].coords for i in indices], axis=0) / len( indices) coords = center + self.normal * distance / np.linalg.norm(self.normal) self.append(specie, coords, coords_are_cartesian=True)
139,232
Creates an Incar object. Args: params (dict): A set of input parameters as a dictionary.
def __init__(self, params=None): super().__init__() if params: # if Incar contains vector-like magmoms given as a list # of floats, convert to a list of lists if (params.get("MAGMOM") and isinstance(params["MAGMOM"][0], (int, float))) \ and (params.get("LSORBIT") or params.get("LNONCOLLINEAR")): val = [] for i in range(len(params["MAGMOM"])//3): val.append(params["MAGMOM"][i*3:(i+1)*3]) params["MAGMOM"] = val self.update(params)
139,263
Returns a string representation of the INCAR. The reason why this method is different from the __str__ method is to provide options for pretty printing. Args: sort_keys (bool): Set to True to sort the INCAR parameters alphabetically. Defaults to False. pretty (bool): Set to True for pretty aligned output. Defaults to False.
def get_string(self, sort_keys=False, pretty=False): keys = self.keys() if sort_keys: keys = sorted(keys) lines = [] for k in keys: if k == "MAGMOM" and isinstance(self[k], list): value = [] if (isinstance(self[k][0], list) or isinstance(self[k][0], Magmom)) and \ (self.get("LSORBIT") or self.get("LNONCOLLINEAR")): value.append(" ".join(str(i) for j in self[k] for i in j)) elif self.get("LSORBIT") or self.get("LNONCOLLINEAR"): for m, g in itertools.groupby(self[k]): value.append("3*{}*{}".format(len(tuple(g)), m)) else: # float() to ensure backwards compatibility between # float magmoms and Magmom objects for m, g in itertools.groupby(self[k], lambda x: float(x)): value.append("{}*{}".format(len(tuple(g)), m)) lines.append([k, " ".join(value)]) elif isinstance(self[k], list): lines.append([k, " ".join([str(i) for i in self[k]])]) else: lines.append([k, self[k]]) if pretty: return str(tabulate([[l[0], "=", l[1]] for l in lines], tablefmt="plain")) else: return str_delimited(lines, None, " = ") + "\n"
139,267
Reads an Incar object from a string. Args: string (str): Incar string Returns: Incar object
def from_string(string): lines = list(clean_lines(string.splitlines())) params = {} for line in lines: for sline in line.split(';'): m = re.match(r'(\w+)\s*=\s*(.*)', sline.strip()) if m: key = m.group(1).strip() val = m.group(2).strip() val = Incar.proc_val(key, val) params[key] = val return Incar(params)
139,268
Static helper method to convert INCAR parameters to proper types, e.g., integers, floats, lists, etc. Args: key: INCAR parameter key val: Actual value of INCAR parameter.
def proc_val(key, val): list_keys = ("LDAUU", "LDAUL", "LDAUJ", "MAGMOM", "DIPOL", "LANGEVIN_GAMMA", "QUAD_EFG", "EINT") bool_keys = ("LDAU", "LWAVE", "LSCALU", "LCHARG", "LPLANE", "LUSE_VDW", "LHFCALC", "ADDGRID", "LSORBIT", "LNONCOLLINEAR") float_keys = ("EDIFF", "SIGMA", "TIME", "ENCUTFOCK", "HFSCREEN", "POTIM", "EDIFFG", "AGGAC", "PARAM1", "PARAM2") int_keys = ("NSW", "NBANDS", "NELMIN", "ISIF", "IBRION", "ISPIN", "ICHARG", "NELM", "ISMEAR", "NPAR", "LDAUPRINT", "LMAXMIX", "ENCUT", "NSIM", "NKRED", "NUPDOWN", "ISPIND", "LDAUTYPE", "IVDW") def smart_int_or_float(numstr): if numstr.find(".") != -1 or numstr.lower().find("e") != -1: return float(numstr) else: return int(numstr) try: if key in list_keys: output = [] toks = re.findall( r"(-?\d+\.?\d*)\*?(-?\d+\.?\d*)?\*?(-?\d+\.?\d*)?", val) for tok in toks: if tok[2] and "3" in tok[0]: output.extend( [smart_int_or_float(tok[2])] * int(tok[0]) * int(tok[1])) elif tok[1]: output.extend([smart_int_or_float(tok[1])] * int(tok[0])) else: output.append(smart_int_or_float(tok[0])) return output if key in bool_keys: m = re.match(r"^\.?([T|F|t|f])[A-Za-z]*\.?", val) if m: if m.group(1) == "T" or m.group(1) == "t": return True else: return False raise ValueError(key + " should be a boolean type!") if key in float_keys: return float(re.search(r"^-?\d*\.?\d*[e|E]?-?\d*", val).group(0)) if key in int_keys: return int(re.match(r"^-?[0-9]+", val).group(0)) except ValueError: pass # Not in standard keys. We will try a hierarchy of conversions. try: val = int(val) return val except ValueError: pass try: val = float(val) return val except ValueError: pass if "true" in val.lower(): return True if "false" in val.lower(): return False return val.strip().capitalize()
139,269
Convenient static constructor for an automatic Gamma centered Kpoint grid. Args: kpts: Subdivisions N_1, N_2 and N_3 along reciprocal lattice vectors. Defaults to (1,1,1) shift: Shift to be applied to the kpoints. Defaults to (0,0,0). Returns: Kpoints object
def gamma_automatic(kpts=(1, 1, 1), shift=(0, 0, 0)): return Kpoints("Automatic kpoint scheme", 0, Kpoints.supported_modes.Gamma, kpts=[kpts], kpts_shift=shift)
139,275
Convenient static constructor for an automatic Monkhorst pack Kpoint grid. Args: kpts: Subdivisions N_1, N_2 and N_3 along reciprocal lattice vectors. Defaults to (2,2,2) shift: Shift to be applied to the kpoints. Defaults to (0,0,0). Returns: Kpoints object
def monkhorst_automatic(kpts=(2, 2, 2), shift=(0, 0, 0)): return Kpoints("Automatic kpoint scheme", 0, Kpoints.supported_modes.Monkhorst, kpts=[kpts], kpts_shift=shift)
139,276
Returns an automatic Kpoint object based on a structure and a kpoint density. Uses Gamma centered meshes always. For GW. Algorithm: Uses a simple approach scaling the number of divisions along each reciprocal lattice vector proportional to its length. Args: structure: Input structure kppa: Grid density
def automatic_gamma_density(structure, kppa): latt = structure.lattice lengths = latt.abc ngrid = kppa / structure.num_sites mult = (ngrid * lengths[0] * lengths[1] * lengths[2]) ** (1 / 3) num_div = [int(round(mult / l)) for l in lengths] # ensure that numDiv[i] > 0 num_div = [i if i > 0 else 1 for i in num_div] # VASP documentation recommends to use even grids for n <= 8 and odd # grids for n > 8. num_div = [i + i % 2 if i <= 8 else i - i % 2 + 1 for i in num_div] style = Kpoints.supported_modes.Gamma comment = "pymatgen 4.7.6+ generated KPOINTS with grid density = " + \ "{} / atom".format(kppa) num_kpts = 0 return Kpoints(comment, num_kpts, style, [num_div], [0, 0, 0])
139,278
Returns an automatic Kpoint object based on a structure and a kpoint density per inverse Angstrom^3 of reciprocal cell. Algorithm: Same as automatic_density() Args: structure (Structure): Input structure kppvol (int): Grid density per Angstrom^(-3) of reciprocal cell force_gamma (bool): Force a gamma centered mesh Returns: Kpoints
def automatic_density_by_vol(structure, kppvol, force_gamma=False): vol = structure.lattice.reciprocal_lattice.volume kppa = kppvol * vol * structure.num_sites return Kpoints.automatic_density(structure, kppa, force_gamma=force_gamma)
139,279
Convenient static constructor for a KPOINTS in mode line_mode. gamma centered Monkhorst-Pack grids and the number of subdivisions along each reciprocal lattice vector determined by the scheme in the VASP manual. Args: divisions: Parameter determining the number of k-points along each hight symetry lines. ibz: HighSymmKpath object (pymatgen.symmetry.bandstructure) Returns: Kpoints object
def automatic_linemode(divisions, ibz): kpoints = list() labels = list() for path in ibz.kpath["path"]: kpoints.append(ibz.kpath["kpoints"][path[0]]) labels.append(path[0]) for i in range(1, len(path) - 1): kpoints.append(ibz.kpath["kpoints"][path[i]]) labels.append(path[i]) kpoints.append(ibz.kpath["kpoints"][path[i]]) labels.append(path[i]) kpoints.append(ibz.kpath["kpoints"][path[-1]]) labels.append(path[-1]) return Kpoints("Line_mode KPOINTS file", style=Kpoints.supported_modes.Line_mode, coord_type="Reciprocal", kpts=kpoints, labels=labels, num_kpts=int(divisions))
139,280
Reads a Kpoints object from a KPOINTS string. Args: string (str): KPOINTS string. Returns: Kpoints object
def from_string(string): lines = [line.strip() for line in string.splitlines()] comment = lines[0] num_kpts = int(lines[1].split()[0].strip()) style = lines[2].lower()[0] # Fully automatic KPOINTS if style == "a": return Kpoints.automatic(int(lines[3])) coord_pattern = re.compile(r'^\s*([\d+.\-Ee]+)\s+([\d+.\-Ee]+)\s+' r'([\d+.\-Ee]+)') # Automatic gamma and Monk KPOINTS, with optional shift if style == "g" or style == "m": kpts = [int(i) for i in lines[3].split()] kpts_shift = (0, 0, 0) if len(lines) > 4 and coord_pattern.match(lines[4]): try: kpts_shift = [float(i) for i in lines[4].split()] except ValueError: pass return Kpoints.gamma_automatic(kpts, kpts_shift) if style == "g" \ else Kpoints.monkhorst_automatic(kpts, kpts_shift) # Automatic kpoints with basis if num_kpts <= 0: style = Kpoints.supported_modes.Cartesian if style in "ck" \ else Kpoints.supported_modes.Reciprocal kpts = [[float(j) for j in lines[i].split()] for i in range(3, 6)] kpts_shift = [float(i) for i in lines[6].split()] return Kpoints(comment=comment, num_kpts=num_kpts, style=style, kpts=kpts, kpts_shift=kpts_shift) # Line-mode KPOINTS, usually used with band structures if style == "l": coord_type = "Cartesian" if lines[3].lower()[0] in "ck" \ else "Reciprocal" style = Kpoints.supported_modes.Line_mode kpts = [] labels = [] patt = re.compile(r'([e0-9.\-]+)\s+([e0-9.\-]+)\s+([e0-9.\-]+)' r'\s*!*\s*(.*)') for i in range(4, len(lines)): line = lines[i] m = patt.match(line) if m: kpts.append([float(m.group(1)), float(m.group(2)), float(m.group(3))]) labels.append(m.group(4).strip()) return Kpoints(comment=comment, num_kpts=num_kpts, style=style, kpts=kpts, coord_type=coord_type, labels=labels) # Assume explicit KPOINTS if all else fails. style = Kpoints.supported_modes.Cartesian if style in "ck" \ else Kpoints.supported_modes.Reciprocal kpts = [] kpts_weights = [] labels = [] tet_number = 0 tet_weight = 0 tet_connections = None for i in range(3, 3 + num_kpts): toks = lines[i].split() kpts.append([float(j) for j in toks[0:3]]) kpts_weights.append(float(toks[3])) if len(toks) > 4: labels.append(toks[4]) else: labels.append(None) try: # Deal with tetrahedron method if lines[3 + num_kpts].strip().lower()[0] == "t": toks = lines[4 + num_kpts].split() tet_number = int(toks[0]) tet_weight = float(toks[1]) tet_connections = [] for i in range(5 + num_kpts, 5 + num_kpts + tet_number): toks = lines[i].split() tet_connections.append((int(toks[0]), [int(toks[j]) for j in range(1, 5)])) except IndexError: pass return Kpoints(comment=comment, num_kpts=num_kpts, style=Kpoints.supported_modes[str(style)], kpts=kpts, kpts_weights=kpts_weights, tet_number=tet_number, tet_weight=tet_weight, tet_connections=tet_connections, labels=labels)
139,281
Write VASP input to a directory. Args: output_dir (str): Directory to write to. Defaults to current directory ("."). make_dir_if_not_present (bool): Create the directory if not present. Defaults to True.
def write_input(self, output_dir=".", make_dir_if_not_present=True): if make_dir_if_not_present and not os.path.exists(output_dir): os.makedirs(output_dir) for k, v in self.items(): with zopen(os.path.join(output_dir, k), "wt") as f: f.write(v.__str__())
139,299
Read in a set of VASP input from a directory. Note that only the standard INCAR, POSCAR, POTCAR and KPOINTS files are read unless optional_filenames is specified. Args: input_dir (str): Directory to read VASP input from. optional_files (dict): Optional files to read in as well as a dict of {filename: Object type}. Object type must have a static method from_file.
def from_directory(input_dir, optional_files=None): sub_d = {} for fname, ftype in [("INCAR", Incar), ("KPOINTS", Kpoints), ("POSCAR", Poscar), ("POTCAR", Potcar)]: fullzpath = zpath(os.path.join(input_dir, fname)) sub_d[fname.lower()] = ftype.from_file(fullzpath) sub_d["optional_files"] = {} if optional_files is not None: for fname, ftype in optional_files.items(): sub_d["optional_files"][fname] = \ ftype.from_file(os.path.join(input_dir, fname)) return VaspInput(**sub_d)
139,300
Get the simplex facets for the Convex hull. Args: qhull_data (np.ndarray): The data from which to construct the convex hull as a Nxd array (N being number of data points and d being the dimension) joggle (boolean): Whether to joggle the input to avoid precision errors. Returns: List of simplices of the Convex Hull.
def get_facets(qhull_data, joggle=False): if joggle: return ConvexHull(qhull_data, qhull_options="QJ i").simplices else: return ConvexHull(qhull_data, qhull_options="Qt i").simplices
139,303
Given all the facets, convert it into a set of unique lines. Specifically used for converting convex hull facets into line pairs of coordinates. Args: q: A 2-dim sequence, where each row represents a facet. E.g., [[1,2,3],[3,6,7],...] Returns: setoflines: A set of tuple of lines. E.g., ((1,2), (1,3), (2,3), ....)
def uniquelines(q): setoflines = set() for facets in q: for line in itertools.combinations(facets, 2): setoflines.add(tuple(sorted(line))) return setoflines
139,304
Convert a 2D coordinate into a triangle-based coordinate system for a prettier phase diagram. Args: coordinate: coordinate used in the convex hull computation. Returns: coordinates in a triangular-based coordinate system.
def triangular_coord(coord): unitvec = np.array([[1, 0], [0.5, math.sqrt(3) / 2]]) result = np.dot(np.array(coord), unitvec) return result.transpose()
139,305
Standard constructor for phase diagram. Args: entries ([PDEntry]): A list of PDEntry-like objects having an energy, energy_per_atom and composition. elements ([Element]): Optional list of elements in the phase diagram. If set to None, the elements are determined from the the entries themselves.
def __init__(self, entries, elements=None): if elements is None: elements = set() for entry in entries: elements.update(entry.composition.elements) elements = list(elements) dim = len(elements) get_reduced_comp = lambda e: e.composition.reduced_composition entries = sorted(entries, key=get_reduced_comp) el_refs = {} min_entries = [] all_entries = [] for c, g in itertools.groupby(entries, key=get_reduced_comp): g = list(g) min_entry = min(g, key=lambda e: e.energy_per_atom) if c.is_element: el_refs[c.elements[0]] = min_entry min_entries.append(min_entry) all_entries.extend(g) if len(el_refs) != dim: raise PhaseDiagramError( "There are no entries associated with a terminal element!.") data = np.array([ [e.composition.get_atomic_fraction(el) for el in elements] + [ e.energy_per_atom] for e in min_entries ]) # Use only entries with negative formation energy vec = [el_refs[el].energy_per_atom for el in elements] + [-1] form_e = -np.dot(data, vec) inds = np.where(form_e < -self.formation_energy_tol)[0].tolist() # Add the elemental references inds.extend([min_entries.index(el) for el in el_refs.values()]) qhull_entries = [min_entries[i] for i in inds] qhull_data = data[inds][:, 1:] # Add an extra point to enforce full dimensionality. # This point will be present in all upper hull facets. extra_point = np.zeros(dim) + 1 / dim extra_point[-1] = np.max(qhull_data) + 1 qhull_data = np.concatenate([qhull_data, [extra_point]], axis=0) if dim == 1: self.facets = [qhull_data.argmin(axis=0)] else: facets = get_facets(qhull_data) finalfacets = [] for facet in facets: # Skip facets that include the extra point if max(facet) == len(qhull_data) - 1: continue m = qhull_data[facet] m[:, -1] = 1 if abs(np.linalg.det(m)) > 1e-14: finalfacets.append(facet) self.facets = finalfacets self.simplexes = [Simplex(qhull_data[f, :-1]) for f in self.facets] self.all_entries = all_entries self.qhull_data = qhull_data self.dim = dim self.el_refs = el_refs self.elements = elements self.qhull_entries = qhull_entries self._stable_entries = set(self.qhull_entries[i] for i in set(itertools.chain(*self.facets)))
139,320
Returns the formation energy for an entry (NOT normalized) from the elemental references. Args: entry: A PDEntry-like object. Returns: Formation energy from the elemental references.
def get_form_energy(self, entry): c = entry.composition return entry.energy - sum([c[el] * self.el_refs[el].energy_per_atom for el in c.elements])
139,323
Calculates the chemical potentials for each element within a facet. Args: facet: Facet of the phase diagram. Returns: { element: chempot } for all elements in the phase diagram.
def _get_facet_chempots(self, facet): complist = [self.qhull_entries[i].composition for i in facet] energylist = [self.qhull_entries[i].energy_per_atom for i in facet] m = [[c.get_atomic_fraction(e) for e in self.elements] for c in complist] chempots = np.linalg.solve(m, energylist) return dict(zip(self.elements, chempots))
139,327
Provides the decomposition at a particular composition. Args: comp: A composition Returns: Decomposition as a dict of {Entry: amount}
def get_decomposition(self, comp): facet, simplex = self._get_facet_and_simplex(comp) decomp_amts = simplex.bary_coords(self.pd_coords(comp)) return {self.qhull_entries[f]: amt for f, amt in zip(facet, decomp_amts) if abs(amt) > PhaseDiagram.numerical_tol}
139,328
Provides the reaction energy of a stable entry from the neighboring equilibrium stable entries (also known as the inverse distance to hull). Args: entry: A PDEntry like object Returns: Equilibrium reaction energy of entry. Stable entries should have equilibrium reaction energy <= 0.
def get_equilibrium_reaction_energy(self, entry): if entry not in self.stable_entries: raise ValueError("Equilibrium reaction energy is available only " "for stable entries.") if entry.is_element: return 0 entries = [e for e in self.stable_entries if e != entry] modpd = PhaseDiagram(entries, self.elements) return modpd.get_decomp_and_e_above_hull(entry, allow_negative=True)[1]
139,331
Get the critical chemical potentials for an element in the Phase Diagram. Args: element: An element. Has to be in the PD in the first place. Returns: A sorted sequence of critical chemical potentials, from less negative to more negative.
def get_transition_chempots(self, element): if element not in self.elements: raise ValueError("get_transition_chempots can only be called with " "elements in the phase diagram.") critical_chempots = [] for facet in self.facets: chempots = self._get_facet_chempots(facet) critical_chempots.append(chempots[element]) clean_pots = [] for c in sorted(critical_chempots): if len(clean_pots) == 0: clean_pots.append(c) else: if abs(c - clean_pots[-1]) > PhaseDiagram.numerical_tol: clean_pots.append(c) clean_pots.reverse() return tuple(clean_pots)
139,334
Get the critical compositions along the tieline between two compositions. I.e. where the decomposition products change. The endpoints are also returned. Args: comp1, comp2 (Composition): compositions that define the tieline Returns: [(Composition)]: list of critical compositions. All are of the form x * comp1 + (1-x) * comp2
def get_critical_compositions(self, comp1, comp2): n1 = comp1.num_atoms n2 = comp2.num_atoms pd_els = self.elements # the reduced dimensionality Simplexes don't use the # first element in the PD c1 = self.pd_coords(comp1) c2 = self.pd_coords(comp2) # none of the projections work if c1 == c2, so just return *copies* # of the inputs if np.all(c1 == c2): return [comp1.copy(), comp2.copy()] intersections = [c1, c2] for sc in self.simplexes: intersections.extend(sc.line_intersection(c1, c2)) intersections = np.array(intersections) # find position along line l = (c2 - c1) l /= np.sum(l ** 2) ** 0.5 proj = np.dot(intersections - c1, l) # only take compositions between endpoints proj = proj[np.logical_and(proj > -self.numerical_tol, proj < proj[1] + self.numerical_tol)] proj.sort() # only unique compositions valid = np.ones(len(proj), dtype=np.bool) valid[1:] = proj[1:] > proj[:-1] + self.numerical_tol proj = proj[valid] ints = c1 + l * proj[:, None] # reconstruct full-dimensional composition array cs = np.concatenate([np.array([1 - np.sum(ints, axis=-1)]).T, ints], axis=-1) # mixing fraction when compositions are normalized x = proj / np.dot(c2 - c1, l) # mixing fraction when compositions are not normalized x_unnormalized = x * n1 / (n2 + x * (n1 - n2)) num_atoms = n1 + (n2 - n1) * x_unnormalized cs *= num_atoms[:, None] return [Composition((c, v) for c, v in zip(pd_els, m)) for m in cs]
139,335
Writes the phase diagram to an image in a stream. Args: stream: stream to write to. Can be a file stream or a StringIO stream. image_format format for image. Can be any of matplotlib supported formats. Defaults to svg for best results for vector graphics. \\*\\*kwargs: Pass through to get_plot functino.
def write_image(self, stream, image_format="svg", **kwargs): plt = self.get_plot(**kwargs) f = plt.gcf() f.set_size_inches((12, 10)) plt.savefig(stream, format=image_format)
139,356
Writes a set of FEFF input to a directory. Args: output_dir: Directory to output the FEFF input files make_dir_if_not_present: Set to True if you want the directory ( and the whole path) to be created if it is not present.
def write_input(self, output_dir=".", make_dir_if_not_present=True): if make_dir_if_not_present and not os.path.exists(output_dir): os.makedirs(output_dir) feff = self.all_input() feff_input = "\n\n".join(str(feff[k]) for k in ["HEADER", "PARAMETERS", "POTENTIALS", "ATOMS"] if k in feff) for k, v in feff.items(): with open(os.path.join(output_dir, k), "w") as f: f.write(str(v)) with open(os.path.join(output_dir, "feff.inp"), "w") as f: f.write(feff_input) # write the structure to cif file if "ATOMS" not in feff: self.atoms.struct.to(fmt="cif", filename=os.path.join( output_dir, feff["PARAMETERS"]["CIF"]))
139,361
Method that creates two quarter-ellipse functions based on points xx and yy. The ellipsis is supposed to be aligned with the axes. The two ellipsis pass through the two points xx and yy. Args: xx: First point yy: Second point Returns: A dictionary with the lower and upper quarter ellipsis functions.
def quarter_ellipsis_functions(xx, yy): npxx = np.array(xx) npyy = np.array(yy) if np.any(npxx == npyy): raise RuntimeError('Invalid points for quarter_ellipsis_functions') if np.all(npxx < npyy) or np.all(npxx > npyy): if npxx[0] < npyy[0]: p1 = npxx p2 = npyy else: p1 = npyy p2 = npxx c_lower = np.array([p1[0], p2[1]]) c_upper = np.array([p2[0], p1[1]]) b2 = (p2[1] - p1[1]) ** 2 else: if npxx[0] < npyy[0]: p1 = npxx p2 = npyy else: p1 = npyy p2 = npxx c_lower = np.array([p2[0], p1[1]]) c_upper = np.array([p1[0], p2[1]]) b2 = (p1[1] - p2[1]) ** 2 b2overa2 = b2 / (p2[0] - p1[0]) ** 2 def lower(x): return c_lower[1] - np.sqrt(b2 - b2overa2 * (x - c_lower[0]) ** 2) def upper(x): return c_upper[1] + np.sqrt(b2 - b2overa2 * (x - c_upper[0]) ** 2) return {'lower': lower, 'upper': upper}
139,371
Method that creates two (upper and lower) spline functions based on points lower_points and upper_points. Args: lower_points: Points defining the lower function. upper_points: Points defining the upper function. degree: Degree for the spline function Returns: A dictionary with the lower and upper spline functions.
def spline_functions(lower_points, upper_points, degree=3): lower_xx = np.array([pp[0] for pp in lower_points]) lower_yy = np.array([pp[1] for pp in lower_points]) upper_xx = np.array([pp[0] for pp in upper_points]) upper_yy = np.array([pp[1] for pp in upper_points]) lower_spline = UnivariateSpline(lower_xx, lower_yy, k=degree, s=0) upper_spline = UnivariateSpline(upper_xx, upper_yy, k=degree, s=0) def lower(x): return lower_spline(x) def upper(x): return upper_spline(x) return {'lower': lower, 'upper': upper}
139,372
Helper method to calculate the solid angle of a set of coords from the center. Args: center: Center to measure solid angle from. coords: List of coords to determine solid angle. Returns: The solid angle.
def my_solid_angle(center, coords): o = np.array(center) r = [np.array(c) - o for c in coords] r.append(r[0]) n = [np.cross(r[i + 1], r[i]) for i in range(len(r) - 1)] n.append(np.cross(r[1], r[0])) phi = 0.0 for i in range(len(n) - 1): try: value = math.acos(-np.dot(n[i], n[i + 1]) / (np.linalg.norm(n[i]) * np.linalg.norm(n[i + 1]))) except ValueError: mycos = -np.dot(n[i], n[i + 1]) / (np.linalg.norm(n[i]) * np.linalg.norm(n[i + 1])) if 0.999999999999 < mycos < 1.000000000001: value = math.acos(1.0) elif -0.999999999999 > mycos > -1.000000000001: value = math.acos(-1.0) else: raise SolidAngleError(mycos) phi += value return phi + (3 - len(r)) * math.pi
139,375
Adds two COHP together. Checks that energy scales are the same. Otherwise, it raises a ValueError. It also adds ICOHP if present. If ICOHP is only present in one object, it displays a warning and will not add ICOHP. Args: other: Another COHP object. Returns: Sum of the two COHPs as a COHP object.
def __add__(self, other): if not all(np.equal(self.energies, other.energies)): raise ValueError("Energies of both COHP are not compatible.") populations = {spin: self.populations[spin] + other.populations[spin] for spin in self.cohp} if self.icohp is not None and other.icohp is not None: int_pop = {spin: self.icohp[spin] + other.icohp[spin] for spin in self.icohp} else: if self.icohp is not None or other.icohp is not None: warnings.warn("One of the COHP objects does not contain " "ICOHPs. Setting ICOHP to None.") int_pop = None return Cohp(self.efermi, self.energies, populations, icohp=int_pop)
139,418
Returns the COHP or ICOHP for a particular spin. Args: spin: Spin. Can be parsed as spin object, integer (-1/1) or str ("up"/"down") integrated: Return COHP (False) or ICOHP (True) Returns: Returns the CHOP or ICOHP for the input spin. If Spin is None and both spins are present, both spins will be returned as a dictionary.
def get_cohp(self, spin=None, integrated=False): if not integrated: populations = self.cohp else: populations = self.icohp if populations is None: return None elif spin is None: return populations else: if isinstance(spin, int): spin = Spin(spin) elif isinstance(spin, str): s = {"up": 1, "down": -1}[spin.lower()] spin = Spin(s) return {spin: populations[spin]}
139,421
Returns the COHP for a particular energy. Args: energy: Energy to return the COHP value for.
def get_interpolated_value(self, energy, integrated=False): inter = {} for spin in self.cohp: if not integrated: inter[spin] = get_linear_interpolated_value(self.energies, self.cohp[spin], energy) elif self.icohp is not None: inter[spin] = get_linear_interpolated_value(self.energies, self.icohp[spin], energy) else: raise ValueError("ICOHP is empty.") return inter
139,422
Get specific COHP object. Args: label: string (for newer Lobster versions: a number) Returns: Returns the COHP object to simplify plotting
def get_cohp_by_label(self, label): if label.lower() == "average": return Cohp(efermi=self.efermi, energies=self.energies, cohp=self.cohp, are_coops=self.are_coops, icohp=self.icohp) else: try: return Cohp(efermi=self.efermi, energies=self.energies, cohp=self.all_cohps[label].get_cohp(spin=None, integrated=False), are_coops=self.are_coops, icohp=self.all_cohps[label].get_icohp(spin=None)) except KeyError: print("The label does not exist")
139,428
Returns a COHP object that includes a summed COHP divided by divisor Args: label_list: list of labels for the COHP that should be included in the summed cohp divisor: float/int, the summed cohp will be divided by this divisor Returns: Returns a COHP object including a summed COHP
def get_summed_cohp_by_label_list(self, label_list, divisor=1): # check if cohps are spinpolarized or not first_cohpobject = self.get_cohp_by_label(label_list[0]) summed_cohp = first_cohpobject.cohp.copy() summed_icohp = first_cohpobject.icohp.copy() for label in label_list[1:]: cohp_here = self.get_cohp_by_label(label) summed_cohp[Spin.up] = np.sum([summed_cohp[Spin.up], cohp_here.cohp[Spin.up]], axis=0) if Spin.down in summed_cohp: summed_cohp[Spin.down] = np.sum([summed_cohp[Spin.down], cohp_here.cohp[Spin.down]], axis=0) summed_icohp[Spin.up] = np.sum([summed_icohp[Spin.up], cohp_here.icohp[Spin.up]], axis=0) if Spin.down in summed_icohp: summed_icohp[Spin.down] = np.sum([summed_icohp[Spin.down], cohp_here.icohp[Spin.down]], axis=0) divided_cohp = {} divided_icohp = {} divided_cohp[Spin.up] = np.divide(summed_cohp[Spin.up], divisor) divided_icohp[Spin.up] = np.divide(summed_icohp[Spin.up], divisor) if Spin.down in summed_cohp: divided_cohp[Spin.down] = np.divide(summed_cohp[Spin.down], divisor) divided_icohp[Spin.down] = np.divide(summed_icohp[Spin.down], divisor) return Cohp(efermi=first_cohpobject.efermi, energies=first_cohpobject.energies, cohp=divided_cohp, are_coops=first_cohpobject.are_coops, icohp=divided_icohp)
139,429
Returns a COHP object that includes a summed COHP divided by divisor Args: label_list: list of labels for the COHP that should be included in the summed cohp orbital_list: list of orbitals for the COHPs that should be included in the summed cohp (same order as label_list) divisor: float/int, the summed cohp will be divided by this divisor Returns: Returns a COHP object including a summed COHP
def get_summed_cohp_by_label_and_orbital_list(self, label_list, orbital_list, divisor=1): # check if cohps are spinpolarized or not first_cohpobject = self.get_orbital_resolved_cohp(label_list[0], orbital_list[0]) summed_cohp = first_cohpobject.cohp.copy() summed_icohp = first_cohpobject.icohp.copy() for ilabel, label in enumerate(label_list[1:], 1): cohp_here = self.get_orbital_resolved_cohp(label, orbital_list[ilabel]) summed_cohp[Spin.up] = np.sum([summed_cohp[Spin.up], cohp_here.cohp.copy()[Spin.up]], axis=0) if Spin.down in summed_cohp: summed_cohp[Spin.down] = np.sum([summed_cohp[Spin.down], cohp_here.cohp.copy()[Spin.down]], axis=0) summed_icohp[Spin.up] = np.sum([summed_icohp[Spin.up], cohp_here.icohp.copy()[Spin.up]], axis=0) if Spin.down in summed_icohp: summed_icohp[Spin.down] = np.sum([summed_icohp[Spin.down], cohp_here.icohp.copy()[Spin.down]], axis=0) divided_cohp = {} divided_icohp = {} divided_cohp[Spin.up] = np.divide(summed_cohp[Spin.up], divisor) divided_icohp[Spin.up] = np.divide(summed_icohp[Spin.up], divisor) if Spin.down in summed_cohp: divided_cohp[Spin.down] = np.divide(summed_cohp[Spin.down], divisor) divided_icohp[Spin.down] = np.divide(summed_icohp[Spin.down], divisor) return Cohp(efermi=first_cohpobject.efermi, energies=first_cohpobject.energies, cohp=divided_cohp, are_coops=first_cohpobject.are_coops, icohp=divided_icohp)
139,430
get a dict of IcohpValues corresponding to certaind bond lengths Args: minbondlength: defines the minimum of the bond lengths of the bonds maxbondlength: defines the maximum of the bond lengths of the bonds Returns: dict of IcohpValues, the keys correspond to the values from the initial list_labels
def get_icohp_dict_by_bondlengths(self, minbondlength=0.0, maxbondlength=8.0): newicohp_dict = {} for value in self._icohplist.values(): if value._length >= minbondlength and value._length <= maxbondlength: newicohp_dict[value._label] = value return newicohp_dict
139,442
get ICOHP/ICOOP of strongest bond Args: summed_spin_channels: Boolean to indicate whether the ICOHPs/ICOOPs of both spin channels should be summed spin: if summed_spin_channels is equal to False, this spin indicates which spin channel should be returned Returns: lowest ICOHP/largest ICOOP value (i.e. ICOHP/ICOOP value of strongest bond)
def extremum_icohpvalue(self, summed_spin_channels=True, spin=Spin.up): if not self._are_coops: extremum = sys.float_info.max else: extremum = -sys.float_info.max if not self._is_spin_polarized: if spin == Spin.down: warnings.warn("This spin channel does not exist. I am switching to Spin.up") spin = Spin.up for value in self._icohplist.values(): if not value.is_spin_polarized or not summed_spin_channels: if not self._are_coops: if value.icohpvalue(spin) < extremum: extremum = value.icohpvalue(spin) # print(extremum) else: if value.icohpvalue(spin) > extremum: extremum = value.icohpvalue(spin) # print(extremum) else: if not self._are_coops: if value.summed_icohp < extremum: extremum = value.summed_icohp # print(extremum) else: if value.summed_icohp > extremum: extremum = value.summed_icohp # print(extremum) return extremum
139,444
Get distance between two sites. Args: other: Other site. Returns: Distance (float)
def distance(self, other): return np.linalg.norm(other.coords - self.coords)
139,448
Returns distance between the site and a point in space. Args: pt: Cartesian coordinates of point. Returns: Distance (float)
def distance_from_point(self, pt): return np.linalg.norm(np.array(pt) - self.coords)
139,449
Returns True if sites are periodic images of each other. Args: other (PeriodicSite): Other site tolerance (float): Tolerance to compare fractional coordinates check_lattice (bool): Whether to check if the two sites have the same lattice. Returns: bool: True if sites are periodic images of each other.
def is_periodic_image(self, other, tolerance=1e-8, check_lattice=True): if check_lattice and self.lattice != other.lattice: return False if self.species != other.species: return False frac_diff = pbc_diff(self.frac_coords, other.frac_coords) return np.allclose(frac_diff, [0, 0, 0], atol=tolerance)
139,467
Json-serializable dict representation of PeriodicSite. Args: verbosity (int): Verbosity level. Default of 0 only includes the matrix representation. Set to 1 for more details such as cartesian coordinates, etc.
def as_dict(self, verbosity=0): species_list = [] for spec, occu in self._species.items(): d = spec.as_dict() del d["@module"] del d["@class"] d["occu"] = occu species_list.append(d) d = {"species": species_list, "abc": [float(c) for c in self._frac_coords], "lattice": self._lattice.as_dict(verbosity=verbosity), "@module": self.__class__.__module__, "@class": self.__class__.__name__} if verbosity > 0: d["xyz"] = [float(c) for c in self.coords] d["label"] = self.species_string d["properties"] = self.properties return d
139,470
Finds all symmetrically equivalent sites for a particular site Args: site (PeriodicSite): A site in the structure Returns: ([PeriodicSite]): List of all symmetrically equivalent sites.
def find_equivalent_sites(self, site): for sites in self.equivalent_sites: if site in sites: return sites raise ValueError("Site not in structure")
139,480
Read an FiestaInput from a string. Currently tested to work with files generated from this class itself. Args: string_input: string_input to parse. Returns: FiestaInput object
def from_string(cls, string_input): correlation_grid = {} Exc_DFT_option = {} COHSEX_options = {} GW_options = {} BSE_TDDFT_options = {} lines = string_input.strip().split("\n") # number of atoms and species lines.pop(0) l = lines.pop(0).strip() toks = l.split() nat = toks[0] nsp = toks[1] # number of valence bands lines.pop(0) l = lines.pop(0).strip() toks = l.split() nvbands = toks[0] # correlation_grid # number of points and spacing in eV for correlation grid lines.pop(0) l = lines.pop(0).strip() toks = l.split() correlation_grid['n_grid'] = toks[0] correlation_grid['dE_grid'] = toks[1] # Exc DFT # relire=1 ou recalculer=0 Exc DFT lines.pop(0) l = lines.pop(0).strip() toks = l.split() Exc_DFT_option['rdVxcpsi'] = toks[0] # COHSEX # number of COHSEX corrected occp and unoccp bands: C=COHSEX H=HF lines.pop(0) l = lines.pop(0).strip() toks = l.split() COHSEX_options['nv_cohsex'] = toks[0] COHSEX_options['nc_cohsex'] = toks[1] COHSEX_options['eigMethod'] = toks[2] # number of COHSEX iter, scf on wfns, mixing coeff; V=RI-V I=RI-D lines.pop(0) l = lines.pop(0).strip() toks = l.split() COHSEX_options['nit_cohsex'] = toks[0] COHSEX_options['resMethod'] = toks[1] COHSEX_options['scf_cohsex_wf'] = toks[2] COHSEX_options['mix_cohsex'] = toks[3] # GW # number of GW corrected occp and unoccp bands lines.pop(0) l = lines.pop(0).strip() toks = l.split() GW_options['nv_corr'] = toks[0] GW_options['nc_corr'] = toks[1] # number of GW iterations lines.pop(0) l = lines.pop(0).strip() toks = l.split() GW_options['nit_gw'] = toks[0] # BSE # dumping for BSE and TDDFT lines.pop(0) l = lines.pop(0).strip() toks = l.split() BSE_TDDFT_options['do_bse'] = toks[0] BSE_TDDFT_options['do_tddft'] = toks[1] # number of occp. and virtual bands fo BSE: nocore and up to 40 eVs lines.pop(0) l = lines.pop(0).strip() toks = l.split() BSE_TDDFT_options['nv_bse'] = toks[0] BSE_TDDFT_options['nc_bse'] = toks[1] # number of excitations needed and number of iterations lines.pop(0) l = lines.pop(0).strip() toks = l.split() BSE_TDDFT_options['npsi_bse'] = toks[0] BSE_TDDFT_options['nit_bse'] = toks[1] # Molecule # list of symbols in order lines.pop(0) atname = [] i = int(nsp) while i != 0: l = lines.pop(0).strip() toks = l.split() atname.append(toks[0]) i -= 1 # scaling factor lines.pop(0) l = lines.pop(0).strip() toks = l.split() scale = toks[0] # atoms x,y,z cartesian .. will be multiplied by scale lines.pop(0) # Parse geometry species = [] coords = [] i = int(nat) while i != 0: l = lines.pop(0).strip() toks = l.split() coords.append([float(j) for j in toks[0:3]]) species.append(atname[int(toks[3]) - 1]) i -= 1 mol = Molecule(species, coords) return FiestaInput(mol=mol, correlation_grid=correlation_grid, Exc_DFT_option=Exc_DFT_option, COHSEX_options=COHSEX_options, GW_options=GW_options, BSE_TDDFT_options=BSE_TDDFT_options)
139,504
Read an Fiesta input from a file. Currently tested to work with files generated from this class itself. Args: filename: Filename to parse. Returns: FiestaInput object
def from_file(cls, filename): with zopen(filename) as f: return cls.from_string(f.read())
139,505
Initializes a transformed structure from a structure. Args: structure (Structure): Input structure transformations ([Transformations]): List of transformations to apply. history (list): Previous history. other_parameters (dict): Additional parameters to be added.
def __init__(self, structure, transformations=None, history=None, other_parameters=None): self.final_structure = structure self.history = history or [] self.other_parameters = other_parameters or {} self._undone = [] transformations = transformations or [] for t in transformations: self.append_transformation(t)
139,510
Adds a filter. Args: structure_filter (StructureFilter): A filter implementating the AbstractStructureFilter API. Tells transmuter waht structures to retain.
def append_filter(self, structure_filter): hdict = structure_filter.as_dict() hdict["input_structure"] = self.final_structure.as_dict() self.history.append(hdict)
139,515
Extends a sequence of transformations to the TransformedStructure. Args: transformations: Sequence of Transformations return_alternatives: Whether to return alternative TransformedStructures for one-to-many transformations. return_alternatives can be a number, which stipulates the total number of structures to return.
def extend_transformations(self, transformations, return_alternatives=False): for t in transformations: self.append_transformation(t, return_alternatives=return_alternatives)
139,516
Returns VASP input as a dict of vasp objects. Args: vasp_input_set (pymatgen.io.vaspio_set.VaspInputSet): input set to create vasp input files from structures
def get_vasp_input(self, vasp_input_set=MPRelaxSet, **kwargs): d = vasp_input_set(self.final_structure, **kwargs).get_vasp_input() d["transformations.json"] = json.dumps(self.as_dict()) return d
139,517
Writes VASP input to an output_dir. Args: vasp_input_set: pymatgen.io.vaspio_set.VaspInputSet like object that creates vasp input files from structures output_dir: Directory to output files create_directory: Create the directory if not present. Defaults to True. \\*\\*kwargs: All keyword args supported by the VASP input set.
def write_vasp_input(self, vasp_input_set=MPRelaxSet, output_dir=".", create_directory=True, **kwargs): vasp_input_set(self.final_structure, **kwargs).write_input( output_dir, make_dir_if_not_present=create_directory) with open(os.path.join(output_dir, "transformations.json"), "w") as fp: json.dump(self.as_dict(), fp)
139,518
Generates TransformedStructure from a poscar string. Args: poscar_string (str): Input POSCAR string. transformations ([Transformations]): Sequence of transformations to be applied to the input structure.
def from_poscar_string(poscar_string, transformations=None): p = Poscar.from_string(poscar_string) if not p.true_names: raise ValueError("Transformation can be craeted only from POSCAR " "strings with proper VASP5 element symbols.") raw_string = re.sub(r"'", "\"", poscar_string) s = p.structure source_info = {"source": "POSCAR", "datetime": str(datetime.datetime.now()), "original_file": raw_string} return TransformedStructure(s, transformations, history=[source_info])
139,522
Create TransformedStructure from SNL. Args: snl (StructureNL): Starting snl Returns: TransformedStructure
def from_snl(cls, snl): hist = [] for h in snl.history: d = h.description d['_snl'] = {'url': h.url, 'name': h.name} hist.append(d) return cls(snl.structure, history=hist)
139,526
Creates LDos object from raw Feff ldos files by by assuming they are numbered consecutively, i.e. ldos01.dat ldos02.dat... Args: feff_inp_file (str): input file of run to obtain structure ldos_file (str): output ldos file of run to obtain dos info, etc.
def from_file(feff_inp_file='feff.inp', ldos_file='ldos'): header_str = Header.header_string_from_file(feff_inp_file) header = Header.from_string(header_str) structure = header.struct nsites = structure.num_sites parameters = Tags.from_file(feff_inp_file) if "RECIPROCAL" in parameters: pot_dict = dict() pot_readstart = re.compile('.*iz.*lmaxsc.*xnatph.*xion.*folp.*') pot_readend = re.compile('.*ExternalPot.*switch.*') pot_inp = re.sub(r'feff.inp', r'pot.inp', feff_inp_file) dos_index = 1 begin = 0 with zopen(pot_inp, "r") as potfile: for line in potfile: if len(pot_readend.findall(line)) > 0: break if begin == 1: begin += 1 continue if begin == 2: z_number = int(line.strip().split()[0]) ele_name = Element.from_Z(z_number).name if ele_name not in pot_dict: pot_dict[ele_name] = dos_index else: pot_dict[ele_name] = min(dos_index, pot_dict[ele_name]) dos_index += 1 if len(pot_readstart.findall(line)) > 0: begin = 1 else: pot_string = Potential.pot_string_from_file(feff_inp_file) dicts = Potential.pot_dict_from_string(pot_string) pot_dict = dicts[0] with zopen(ldos_file + "00.dat", "r") as fobject: f = fobject.readlines() efermi = float(f[0].split()[4]) dos_energies = [] ldos = {} for i in range(1, len(pot_dict) + 1): if len(str(i)) == 1: ldos[i] = np.loadtxt("{}0{}.dat".format(ldos_file, i)) else: ldos[i] = np.loadtxt("{}{}.dat".format(ldos_file, i)) for i in range(0, len(ldos[1])): dos_energies.append(ldos[1][i][0]) all_pdos = [] vorb = {"s": Orbital.s, "p": Orbital.py, "d": Orbital.dxy, "f": Orbital.f0} forb = {"s": 0, "p": 1, "d": 2, "f": 3} dlength = len(ldos[1]) for i in range(nsites): pot_index = pot_dict[structure.species[i].symbol] all_pdos.append(defaultdict(dict)) for k, v in vorb.items(): density = [ldos[pot_index][j][forb[k] + 1] for j in range(dlength)] updos = density downdos = None if downdos: all_pdos[-1][v] = {Spin.up: updos, Spin.down: downdos} else: all_pdos[-1][v] = {Spin.up: updos} pdos = all_pdos vorb2 = {0: Orbital.s, 1: Orbital.py, 2: Orbital.dxy, 3: Orbital.f0} pdoss = {structure[i]: {v: pdos[i][v] for v in vorb2.values()} for i in range(len(pdos))} forb = {"s": 0, "p": 1, "d": 2, "f": 3} tdos = [0] * dlength for i in range(nsites): pot_index = pot_dict[structure.species[i].symbol] for v in forb.values(): density = [ldos[pot_index][j][v + 1] for j in range(dlength)] for j in range(dlength): tdos[j] = tdos[j] + density[j] tdos = {Spin.up: tdos} dos = Dos(efermi, dos_energies, tdos) complete_dos = CompleteDos(structure, dos, pdoss) charge_transfer = LDos.charge_transfer_from_file(feff_inp_file, ldos_file) return LDos(complete_dos, charge_transfer)
139,528
Get charge transfer from file. Args: feff_inp_file (str): name of feff.inp file for run ldos_file (str): ldos filename for run, assume consequetive order, i.e., ldos01.dat, ldos02.dat.... Returns: dictionary of dictionaries in order of potential sites ({"p": 0.154, "s": 0.078, "d": 0.0, "tot": 0.232}, ...)
def charge_transfer_from_file(feff_inp_file, ldos_file): cht = OrderedDict() parameters = Tags.from_file(feff_inp_file) if 'RECIPROCAL' in parameters: dicts = [dict()] pot_dict = dict() dos_index = 1 begin = 0 pot_inp = re.sub(r'feff.inp', r'pot.inp', feff_inp_file) pot_readstart = re.compile('.*iz.*lmaxsc.*xnatph.*xion.*folp.*') pot_readend = re.compile('.*ExternalPot.*switch.*') with zopen(pot_inp, "r") as potfile: for line in potfile: if len(pot_readend.findall(line)) > 0: break if begin == 1: z_number = int(line.strip().split()[0]) ele_name = Element.from_Z(z_number).name if len(pot_dict) == 0: pot_dict[0] = ele_name elif len(pot_dict) > 0: pot_dict[max(pot_dict.keys()) + 1] = ele_name begin += 1 continue if begin == 2: z_number = int(line.strip().split()[0]) ele_name = Element.from_Z(z_number).name dicts[0][ele_name] = dos_index dos_index += 1 if len(pot_dict) == 0: pot_dict[0] = ele_name elif len(pot_dict) > 0: pot_dict[max(pot_dict.keys()) + 1] = ele_name if len(pot_readstart.findall(line)) > 0: begin = 1 else: pot_string = Potential.pot_string_from_file(feff_inp_file) dicts = Potential.pot_dict_from_string(pot_string) pot_dict = dicts[1] for i in range(0, len(dicts[0]) + 1): if len(str(i)) == 1: with zopen("{}0{}.dat".format(ldos_file, i), "rt") \ as fobject: f = fobject.readlines() s = float(f[3].split()[2]) p = float(f[4].split()[2]) d = float(f[5].split()[2]) f1 = float(f[6].split()[2]) tot = float(f[1].split()[4]) cht[str(i)] = {pot_dict[i]: {'s': s, 'p': p, 'd': d, 'f': f1, 'tot': tot}} else: with zopen(ldos_file + str(i) + ".dat", "rt") as fid: f = fid.readlines() s = float(f[3].split()[2]) p = float(f[4].split()[2]) d = float(f[5].split()[2]) f1 = float(f[6].split()[2]) tot = float(f[1].split()[4]) cht[str(i)] = {pot_dict[i]: {'s': s, 'p': p, 'd': d, 'f': f1, 'tot': tot}} return cht
139,529
Get Xmu from file. Args: xmu_dat_file (str): filename and path for xmu.dat feff_inp_file (str): filename and path of feff.inp input file Returns: Xmu object
def from_file(xmu_dat_file="xmu.dat", feff_inp_file="feff.inp"): data = np.loadtxt(xmu_dat_file) header = Header.from_file(feff_inp_file) parameters = Tags.from_file(feff_inp_file) pots = Potential.pot_string_from_file(feff_inp_file) # site index (Note: in feff it starts from 1) if "RECIPROCAL" in parameters: absorbing_atom = parameters["TARGET"] # species symbol else: absorbing_atom = pots.splitlines()[3].split()[2] return Xmu(header, parameters, absorbing_atom, data)
139,532
Returns a list of unique symmetrically equivalent k-points. Args: kpoint (1x3 array): coordinate of the k-point cartesian (bool): kpoint is in cartesian or fractional coordinates tol (float): tolerance below which coordinates are considered equal Returns: ([1x3 array] or None): if structure is not available returns None
def get_sym_eq_kpoints(self, kpoint, cartesian=False, tol=1e-2): if not self.structure: return None sg = SpacegroupAnalyzer(self.structure) symmops = sg.get_point_group_operations(cartesian=cartesian) points = np.dot(kpoint, [m.rotation_matrix for m in symmops]) rm_list = [] # identify and remove duplicates from the list of equivalent k-points: for i in range(len(points) - 1): for j in range(i + 1, len(points)): if np.allclose(pbc_diff(points[i], points[j]), [0, 0, 0], tol): rm_list.append(i) break return np.delete(points, rm_list, axis=0)
139,570
Returns degeneracy of a given k-point based on structure symmetry Args: kpoint (1x3 array): coordinate of the k-point cartesian (bool): kpoint is in cartesian or fractional coordinates tol (float): tolerance below which coordinates are considered equal Returns: (int or None): degeneracy or None if structure is not available
def get_kpoint_degeneracy(self, kpoint, cartesian=False, tol=1e-2): all_kpts = self.get_sym_eq_kpoints(kpoint, cartesian, tol=tol) if all_kpts is not None: return len(all_kpts)
139,571