docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Initializes a covalent bond between two sites. Args: site1 (Site): First site. site2 (Site): Second site.
def __init__(self, site1, site2): self.site1 = site1 self.site2 = site2
140,576
Sends an e-mail with unix sendmail. Args: subject: String with the subject of the mail. text: String with the body of the mail. mailto: String or list of string with the recipients. sender: string with the sender address. If sender is None, username@hostname is used. Returns: Exit status
def sendmail(subject, text, mailto, sender=None): def user_at_host(): from socket import gethostname return os.getlogin() + "@" + gethostname() # Body of the message. try: sender = user_at_host() if sender is None else sender except OSError: sender = 'abipyscheduler@youknowwhere' if is_string(mailto): mailto = [mailto] from email.mime.text import MIMEText mail = MIMEText(text) mail["Subject"] = subject mail["From"] = sender mail["To"] = ", ".join(mailto) msg = mail.as_string() # sendmail works much better than the python interface. # Note that sendmail is available only on Unix-like OS. from subprocess import Popen, PIPE import sys sendmail = which("sendmail") if sendmail is None: return -1 if sys.version_info[0] < 3: p = Popen([sendmail, "-t"], stdin=PIPE, stderr=PIPE) else: # msg is string not bytes so must use universal_newlines p = Popen([sendmail, "-t"], stdin=PIPE, stderr=PIPE, universal_newlines=True) outdata, errdata = p.communicate(msg) return len(errdata)
140,581
Initialize the object Args: flow: :class:`Flow` object max_njobs_inqueue: The launcher will stop submitting jobs when the number of jobs in the queue is >= Max number of jobs
def __init__(self, flow, **kwargs): self.flow = flow self.max_njobs_inqueue = kwargs.get("max_njobs_inqueue", 200)
140,588
Keeps submitting `Tasks` until we are out of jobs or no job is ready to run. Args: max_nlaunch: Maximum number of launches. default: no limit. max_loops: Maximum number of loops sleep_time: seconds to sleep between rapidfire loop iterations Returns: The number of tasks launched.
def rapidfire(self, max_nlaunch=-1, max_loops=1, sleep_time=5): num_launched, do_exit, launched = 0, False, [] for count in range(max_loops): if do_exit: break if count > 0: time.sleep(sleep_time) tasks = self.fetch_tasks_to_run() # I don't know why but we receive duplicated tasks. if any(task in launched for task in tasks): logger.critical("numtasks %d already in launched list:\n%s" % (len(tasks), launched)) # Preventive test. tasks = [t for t in tasks if t not in launched] if not tasks: continue for task in tasks: fired = task.start() if fired: launched.append(task) num_launched += 1 if num_launched >= max_nlaunch > 0: logger.info('num_launched >= max_nlaunch, going back to sleep') do_exit = True break # Update the database. self.flow.pickle_dump() return num_launched
140,590
Loads the object from a pickle file. Args: filepath: Filename or directory name. It filepath is a directory, we scan the directory tree starting from filepath and we read the first pickle database. Raise RuntimeError if multiple databases are found.
def pickle_load(cls, filepath): if os.path.isdir(filepath): # Walk through each directory inside path and find the pickle database. for dirpath, dirnames, filenames in os.walk(filepath): fnames = [f for f in filenames if f == cls.PICKLE_FNAME] if fnames: if len(fnames) == 1: filepath = os.path.join(dirpath, fnames[0]) break # Exit os.walk else: err_msg = "Found multiple databases:\n %s" % str(fnames) raise RuntimeError(err_msg) else: err_msg = "Cannot find %s inside directory %s" % (cls.PICKLE_FNAME, filepath) raise ValueError(err_msg) with open(filepath, "rb") as fh: new = pickle.load(fh) # new.flows is a list of strings with the workdir of the flows (see __getstate__). # Here we read the Flow from the pickle file so that we have # and up-to-date version and we set the flow in visitor_mode from .flows import Flow flow_workdirs, new.flows = new.flows, [] for flow in map(Flow.pickle_load, flow_workdirs): new.add_flow(flow) return new
140,611
Creates a CTRL file object from an existing file. Args: filename: The name of the CTRL file. Defaults to 'CTRL'. Returns: An LMTOCtrl object.
def from_file(cls, filename="CTRL", **kwargs): with zopen(filename, "rt") as f: contents = f.read() return LMTOCtrl.from_string(contents, **kwargs)
140,622
Creates a CTRL file object from a string. This will mostly be used to read an LMTOCtrl object from a CTRL file. Empty spheres are ignored. Args: data: String representation of the CTRL file. Returns: An LMTOCtrl object.
def from_string(cls, data, sigfigs=8): lines = data.split("\n")[:-1] struc_lines = {"HEADER": [], "VERS": [], "SYMGRP": [], "STRUC": [], "CLASS": [], "SITE": []} for line in lines: if line != "" and not line.isspace(): if not line[0].isspace(): cat = line.split()[0] if cat in struc_lines: struc_lines[cat].append(line) else: pass for cat in struc_lines: struc_lines[cat] = " ".join(struc_lines[cat]).replace("= ", "=") structure_tokens = {"ALAT": None, "PLAT": [], "CLASS": [], "SITE": []} for cat in ["STRUC", "CLASS", "SITE"]: fields = struc_lines[cat].split("=") for f, field in enumerate(fields): token = field.split()[-1] if token == "ALAT": alat = round(float(fields[f+1].split()[0]), sigfigs) structure_tokens["ALAT"] = alat elif token == "ATOM": atom = fields[f+1].split()[0] if not bool(re.match("E[0-9]*$", atom)): if cat == "CLASS": structure_tokens["CLASS"].append(atom) else: structure_tokens["SITE"].append({"ATOM": atom}) else: pass elif token in ["PLAT", "POS"]: try: arr = np.array([round(float(i), sigfigs) for i in fields[f+1].split()]) except ValueError: arr = np.array([round(float(i), sigfigs) for i in fields[f+1].split()[:-1]]) if token == "PLAT": structure_tokens["PLAT"] = arr.reshape([3, 3]) elif not bool(re.match("E[0-9]*$", atom)): structure_tokens["SITE"][-1]["POS"] = arr else: pass else: pass try: spcgrp_index = struc_lines["SYMGRP"].index("SPCGRP") spcgrp = struc_lines["SYMGRP"][spcgrp_index:spcgrp_index+12] structure_tokens["SPCGRP"] = spcgrp.split("=")[1].split()[0] except ValueError: pass for token in ["HEADER", "VERS"]: try: value = re.split(token + r"\s*", struc_lines[token])[1] structure_tokens[token] = value.strip() except IndexError: pass return LMTOCtrl.from_dict(structure_tokens)
140,623
Subroutine to extract bond label, site indices, and length from a COPL header line. The site indices are zero-based, so they can be easily used with a Structure object. Example header line: Fe-1/Fe-1-tr(-1,-1,-1) : 2.482 Ang. Args: line: line in the COHPCAR header describing the bond. Returns: The bond label, the bond length and a tuple of the site indices.
def _get_bond_data(line): line = line.split() length = float(line[2]) # Replacing "/" with "-" makes splitting easier sites = line[0].replace("/", "-").split("-") site_indices = tuple(int(ind) - 1 for ind in sites[1:4:2]) species = tuple(re.split(r"\d+", spec)[0] for spec in sites[0:3:2]) label = "%s%d-%s%d" % (species[0], site_indices[0] + 1, species[1], site_indices[1] + 1) return label, length, site_indices
140,626
Given a structure, returns the predicted volume. Args: structure (Structure): structure w/unknown volume ref_structure (Structure): A reference structure with a similar structure but different species. Returns: a float value of the predicted volume
def predict(self, structure, ref_structure): if self.check_isostructural: m = StructureMatcher() mapping = m.get_best_electronegativity_anonymous_mapping( structure, ref_structure) if mapping is None: raise ValueError("Input structures do not match!") if "ionic" in self.radii_type: try: # Use BV analyzer to determine oxidation states only if the # oxidation states are not already specified in the structure # and use_bv is true. if (not is_ox(structure)) and self.use_bv: a = BVAnalyzer() structure = a.get_oxi_state_decorated_structure(structure) if (not is_ox(ref_structure)) and self.use_bv: a = BVAnalyzer() ref_structure = a.get_oxi_state_decorated_structure( ref_structure) comp = structure.composition ref_comp = ref_structure.composition # Check if all the associated ionic radii are available. if any([k.ionic_radius is None for k in list(comp.keys())]) or \ any([k.ionic_radius is None for k in list(ref_comp.keys())]): raise ValueError("Not all the ionic radii are available!") numerator = 0 denominator = 0 # Here, the 1/3 factor on the composition accounts for atomic # packing. We want the number per unit length. for k, v in comp.items(): numerator += k.ionic_radius * v ** (1 / 3) for k, v in ref_comp.items(): denominator += k.ionic_radius * v ** (1 / 3) return ref_structure.volume * (numerator / denominator) ** 3 except Exception as ex: warnings.warn("Exception occured. Will attempt atomic radii.") # If error occurs during use of ionic radii scheme, pass # and see if we can resolve it using atomic radii. pass if "atomic" in self.radii_type: comp = structure.composition ref_comp = ref_structure.composition # Here, the 1/3 factor on the composition accounts for atomic # packing. We want the number per unit length. numerator = 0 denominator = 0 for k, v in comp.items(): numerator += k.atomic_radius * v ** (1 / 3) for k, v in ref_comp.items(): denominator += k.atomic_radius * v ** (1 / 3) return ref_structure.volume * (numerator / denominator) ** 3 raise ValueError("Cannot find volume scaling based on radii choices " "specified!")
140,643
Given a structure, returns back the structure scaled to predicted volume. Args: structure (Structure): structure w/unknown volume ref_structure (Structure): A reference structure with a similar structure but different species. Returns: a Structure object with predicted volume
def get_predicted_structure(self, structure, ref_structure): new_structure = structure.copy() new_structure.scale_lattice(self.predict(structure, ref_structure)) return new_structure
140,644
Given a structure, returns the predicted volume. Args: structure (Structure) : a crystal structure with an unknown volume. icsd_vol (bool) : True if the input structure's volume comes from ICSD. Returns: a float value of the predicted volume.
def predict(self, structure, icsd_vol=False): # Get standard deviation of electronnegativity in the structure. std_x = np.std([site.specie.X for site in structure]) # Sites that have atomic radii sub_sites = [] # Record the "DLS estimated radius" from bond_params. bp_dict = {} for sp in list(structure.composition.keys()): if sp.atomic_radius: sub_sites.extend([site for site in structure if site.specie == sp]) else: warnings.warn("VolumePredictor: no atomic radius data for " "{}".format(sp)) if sp.symbol not in bond_params: warnings.warn("VolumePredictor: bond parameters not found, " "used atomic radii for {}".format(sp)) else: r, k = bond_params[sp.symbol]["r"], bond_params[sp.symbol]["k"] bp_dict[sp] = float(r) + float(k) * std_x # Structure object that include only sites with known atomic radii. reduced_structure = Structure.from_sites(sub_sites) smallest_ratio = None for site1 in reduced_structure: sp1 = site1.specie neighbors = reduced_structure.get_neighbors(site1, sp1.atomic_radius + self.cutoff) for site2, dist in neighbors: sp2 = site2.specie if sp1 in bp_dict and sp2 in bp_dict: expected_dist = bp_dict[sp1] + bp_dict[sp2] else: expected_dist = sp1.atomic_radius + sp2.atomic_radius if not smallest_ratio or dist / expected_dist < smallest_ratio: smallest_ratio = dist / expected_dist if not smallest_ratio: raise ValueError("Could not find any bonds within the given cutoff " "in this structure.") volume_factor = (1 / smallest_ratio) ** 3 # icsd volume fudge factor if icsd_vol: volume_factor *= 1.05 if self.min_scaling: volume_factor = max(self.min_scaling, volume_factor) if self.max_scaling: volume_factor = min(self.max_scaling, volume_factor) return structure.volume * volume_factor
140,646
Given a structure, returns back the structure scaled to predicted volume. Args: structure (Structure): structure w/unknown volume Returns: a Structure object with predicted volume
def get_predicted_structure(self, structure, icsd_vol=False): new_structure = structure.copy() new_structure.scale_lattice(self.predict(structure, icsd_vol=icsd_vol)) return new_structure
140,647
Get the inchi canonical labels of the heavy atoms in the molecule Args: mol: The molecule. OpenBabel OBMol object Returns: The label mappings. List of tuple of canonical label, original label List of equivalent atoms.
def _inchi_labels(mol): obconv = ob.OBConversion() obconv.SetOutFormat(str("inchi")) obconv.AddOption(str("a"), ob.OBConversion.OUTOPTIONS) obconv.AddOption(str("X"), ob.OBConversion.OUTOPTIONS, str("DoNotAddH")) inchi_text = obconv.WriteString(mol) match = re.search(r"InChI=(?P<inchi>.+)\nAuxInfo=.+" r"/N:(?P<labels>[0-9,;]+)/(E:(?P<eq_atoms>[0-9," r";\(\)]*)/)?", inchi_text) inchi = match.group("inchi") label_text = match.group("labels") eq_atom_text = match.group("eq_atoms") heavy_atom_labels = tuple([int(i) for i in label_text.replace( ';', ',').split(',')]) eq_atoms = [] if eq_atom_text is not None: eq_tokens = re.findall(r'\(((?:[0-9]+,)+[0-9]+)\)', eq_atom_text .replace(';', ',')) eq_atoms = tuple([tuple([int(i) for i in t.split(',')]) for t in eq_tokens]) return heavy_atom_labels, eq_atoms, inchi
140,652
Calculate the centroids of a group atoms indexed by the labels of inchi Args: mol: The molecule. OpenBabel OBMol object ilabel: inchi label map Returns: Centroid. Tuple (x, y, z)
def _group_centroid(mol, ilabels, group_atoms): c1x, c1y, c1z = 0.0, 0.0, 0.0 for i in group_atoms: orig_idx = ilabels[i-1] oa1 = mol.GetAtom(orig_idx) c1x += float(oa1.x()) c1y += float(oa1.y()) c1z += float(oa1.z()) num_atoms = len(group_atoms) c1x /= num_atoms c1y /= num_atoms c1z /= num_atoms return c1x, c1y, c1z
140,653
Create a virtual molecule by unique atoms, the centriods of the equivalent atoms Args: mol: The molecule. OpenBabel OBMol object ilables: inchi label map eq_atoms: equivalent atom labels farthest_group_idx: The equivalent atom group index in which there is the farthest atom to the centroid Return: The virtual molecule
def _virtual_molecule(self, mol, ilabels, eq_atoms): vmol = ob.OBMol() non_unique_atoms = set([a for g in eq_atoms for a in g]) all_atoms = set(range(1, len(ilabels) + 1)) unique_atom_labels = sorted(all_atoms - non_unique_atoms) #try to align molecules using unique atoms for i in unique_atom_labels: orig_idx = ilabels[i-1] oa1 = mol.GetAtom(orig_idx) a1 = vmol.NewAtom() a1.SetAtomicNum(oa1.GetAtomicNum()) a1.SetVector(oa1.GetVector()) #try to align using centroids of the equivalent atoms if vmol.NumAtoms() < 3: for symm in eq_atoms: c1x, c1y, c1z = self._group_centroid(mol, ilabels, symm) min_distance = float("inf") for i in range(1, vmol.NumAtoms()+1): va = vmol.GetAtom(i) distance = math.sqrt((c1x - va.x())**2 + (c1y - va.y())**2 + (c1z - va.z())**2) if distance < min_distance: min_distance = distance if min_distance > 0.2: a1 = vmol.NewAtom() a1.SetAtomicNum(9) a1.SetVector(c1x, c1y, c1z) return vmol
140,654
Align the label of topologically identical atoms of second molecule towards first molecule Args: mol1: First molecule. OpenBabel OBMol object mol2: Second molecule. OpenBabel OBMol object heavy_indices1: inchi label map of the first molecule heavy_indices2: label map of the second molecule Return: corrected label map of all atoms of the second molecule
def _align_hydrogen_atoms(mol1, mol2, heavy_indices1, heavy_indices2): num_atoms = mol2.NumAtoms() all_atom = set(range(1, num_atoms+1)) hydrogen_atoms1 = all_atom - set(heavy_indices1) hydrogen_atoms2 = all_atom - set(heavy_indices2) label1 = heavy_indices1 + tuple(hydrogen_atoms1) label2 = heavy_indices2 + tuple(hydrogen_atoms2) cmol1 = ob.OBMol() for i in label1: oa1 = mol1.GetAtom(i) a1 = cmol1.NewAtom() a1.SetAtomicNum(oa1.GetAtomicNum()) a1.SetVector(oa1.GetVector()) cmol2 = ob.OBMol() for i in label2: oa2 = mol2.GetAtom(i) a2 = cmol2.NewAtom() a2.SetAtomicNum(oa2.GetAtomicNum()) a2.SetVector(oa2.GetVector()) aligner = ob.OBAlign(False, False) aligner.SetRefMol(cmol1) aligner.SetTargetMol(cmol2) aligner.Align() aligner.UpdateCoords(cmol2) hydrogen_label2 = [] hydrogen_label1 = list(range(len(heavy_indices1) + 1, num_atoms + 1)) for h2 in range(len(heavy_indices2) + 1, num_atoms + 1): distance = 99999.0 idx = hydrogen_label1[0] a2 = cmol2.GetAtom(h2) for h1 in hydrogen_label1: a1 = cmol1.GetAtom(h1) d = a1.GetDistance(a2) if d < distance: distance = d idx = h1 hydrogen_label2.append(idx) hydrogen_label1.remove(idx) hydrogen_orig_idx2 = label2[len(heavy_indices2):] hydrogen_canon_orig_map2 = [(canon, orig) for canon, orig in zip(hydrogen_label2, hydrogen_orig_idx2)] hydrogen_canon_orig_map2.sort(key=lambda m: m[0]) hydrogen_canon_indices2 = [x[1] for x in hydrogen_canon_orig_map2] canon_label1 = label1 canon_label2 = heavy_indices2 + tuple(hydrogen_canon_indices2) return canon_label1, canon_label2
140,656
The the elements of the atoms in the specified order Args: mol: The molecule. OpenBabel OBMol object. label: The atom indices. List of integers. Returns: Elements. List of integers.
def _get_elements(mol, label): elements = [int(mol.GetAtom(i).GetAtomicNum()) for i in label] return elements
140,657
Is the molecule a linear one Args: mol: The molecule. OpenBabel OBMol object. Returns: Boolean value.
def _is_molecule_linear(self, mol): if mol.NumAtoms() < 3: return True a1 = mol.GetAtom(1) a2 = mol.GetAtom(2) for i in range(3, mol.NumAtoms()+1): angle = float(mol.GetAtom(i).GetAngle(a2, a1)) if angle < 0.0: angle = -angle if angle > 90.0: angle = 180.0 - angle if angle > self._angle_tolerance: return False return True
140,658
Fit two molecules. Args: mol1: First molecule. OpenBabel OBMol or pymatgen Molecule object mol2: Second molecule. OpenBabel OBMol or pymatgen Molecule object Returns: A boolean value indicates whether two molecules are the same.
def fit(self, mol1, mol2): return self.get_rmsd(mol1, mol2) < self._tolerance
140,662
Calculate the RMSD. Args: mol1: The first molecule. OpenBabel OBMol or pymatgen Molecule object mol2: The second molecule. OpenBabel OBMol or pymatgen Molecule object clabel1: The atom indices that can reorder the first molecule to uniform atom order clabel1: The atom indices that can reorder the second molecule to uniform atom order Returns: The RMSD.
def _calc_rms(mol1, mol2, clabel1, clabel2): obmol1 = BabelMolAdaptor(mol1).openbabel_mol obmol2 = BabelMolAdaptor(mol2).openbabel_mol cmol1 = ob.OBMol() for i in clabel1: oa1 = obmol1.GetAtom(i) a1 = cmol1.NewAtom() a1.SetAtomicNum(oa1.GetAtomicNum()) a1.SetVector(oa1.GetVector()) cmol2 = ob.OBMol() for i in clabel2: oa2 = obmol2.GetAtom(i) a2 = cmol2.NewAtom() a2.SetAtomicNum(oa2.GetAtomicNum()) a2.SetVector(oa2.GetVector()) aligner = ob.OBAlign(True, False) aligner.SetRefMol(cmol1) aligner.SetTargetMol(cmol2) aligner.Align() return aligner.GetRMSD()
140,664
Group molecules by structural equality. Args: mol_list: List of OpenBabel OBMol or pymatgen objects Returns: A list of lists of matched molecules Assumption: if s1=s2 and s2=s3, then s1=s3 This may not be true for small tolerances.
def group_molecules(self, mol_list): mol_hash = [(i, self._mapper.get_molecule_hash(m)) for i, m in enumerate(mol_list)] mol_hash.sort(key=lambda x: x[1]) #Use molecular hash to pre-group molecules. raw_groups = tuple([tuple([m[0] for m in g]) for k, g in itertools.groupby(mol_hash, key=lambda x: x[1])]) group_indices = [] for rg in raw_groups: mol_eq_test = [(p[0], p[1], self.fit(mol_list[p[0]], mol_list[p[1]])) for p in itertools.combinations(sorted(rg), 2)] mol_eq = set([(p[0], p[1]) for p in mol_eq_test if p[2]]) not_alone_mols = set(itertools.chain.from_iterable(mol_eq)) alone_mols = set(rg) - not_alone_mols group_indices.extend([[m] for m in alone_mols]) while len(not_alone_mols) > 0: current_group = {not_alone_mols.pop()} while len(not_alone_mols) > 0: candidate_pairs = set( [tuple(sorted(p)) for p in itertools.product(current_group, not_alone_mols)]) mutual_pairs = candidate_pairs & mol_eq if len(mutual_pairs) == 0: break mutual_mols = set(itertools.chain .from_iterable(mutual_pairs)) current_group |= mutual_mols not_alone_mols -= mutual_mols group_indices.append(sorted(current_group)) group_indices.sort(key=lambda x: (len(x), -x[0]), reverse=True) all_groups = [[mol_list[i] for i in g] for g in group_indices] return all_groups
140,665
Remove all the configurations that do not satisfy the given condition. Args: condition: dict or :class:`Condition` object with operators expressed with a Mongodb-like syntax key: Selects the sub-dictionary on which condition is applied, e.g. key="vars" if we have to filter the configurations depending on the values in vars
def select_with_condition(self, condition, key=None): condition = Condition.as_condition(condition) new_confs = [] for conf in self: # Select the object on which condition is applied obj = conf if key is None else AttrDict(conf[key]) add_it = condition(obj=obj) #if key is "vars": print("conf", conf, "added:", add_it) if add_it: new_confs.append(conf) self._confs = new_confs
140,675
Given a list of parallel configurations, pconfs, this method select an `optimal` configuration according to some criterion as well as the :class:`QueueAdapter` to use. Args: pconfs: :class:`ParalHints` object with the list of parallel configurations Returns: :class:`ParallelConf` object with the `optimal` configuration.
def select_qadapter(self, pconfs): # Order the list of configurations according to policy. policy, max_ncpus = self.policy, self.max_cores pconfs = pconfs.get_ordered_with_policy(policy, max_ncpus) if policy.precedence == "qadapter": # Try to run on the qadapter with the highest priority. for qadpos, qad in enumerate(self.qads): possible_pconfs = [pc for pc in pconfs if qad.can_run_pconf(pc)] if qad.allocation == "nodes": #if qad.allocation in ["nodes", "force_nodes"]: # Select the configuration divisible by nodes if possible. for pconf in possible_pconfs: if pconf.num_cores % qad.hw.cores_per_node == 0: return self._use_qadpos_pconf(qadpos, pconf) # Here we select the first one. if possible_pconfs: return self._use_qadpos_pconf(qadpos, possible_pconfs[0]) elif policy.precedence == "autoparal_conf": # Try to run on the first pconf irrespectively of the priority of the qadapter. for pconf in pconfs: for qadpos, qad in enumerate(self.qads): if qad.allocation == "nodes" and not pconf.num_cores % qad.hw.cores_per_node == 0: continue # Ignore it. not very clean if qad.can_run_pconf(pconf): return self._use_qadpos_pconf(qadpos, pconf) else: raise ValueError("Wrong value of policy.precedence = %s" % policy.precedence) # No qadapter could be found raise RuntimeError("Cannot find qadapter for this run!")
140,692
Build the input files and submit the task via the :class:`Qadapter` Args: task: :class:`TaskObject` Returns: Process object.
def launch(self, task, **kwargs): if task.status == task.S_LOCKED: raise ValueError("You shall not submit a locked task!") # Build the task task.build() # Pass information on the time limit to Abinit (we always assume ndtset == 1) if isinstance(task, AbinitTask): args = kwargs.get("exec_args", []) if args is None: args = [] args = args[:] args.append("--timelimit %s" % qu.time2slurm(self.qadapter.timelimit)) kwargs["exec_args"] = args # Write the submission script script_file = self.write_jobfile(task, **kwargs) # Submit the task and save the queue id. try: qjob, process = self.qadapter.submit_to_queue(script_file) task.set_status(task.S_SUB, msg='Submitted to queue') task.set_qjob(qjob) return process except self.qadapter.MaxNumLaunchesError as exc: # TODO: Here we should try to switch to another qadapter # 1) Find a new parallel configuration in those stored in task.pconfs # 2) Change the input file. # 3) Regenerate the submission script # 4) Relaunch task.set_status(task.S_ERROR, msg="max_num_launches reached: %s" % str(exc)) raise
140,696
Set and return the status of the task. Args: status: Status object or string representation of the status msg: string with human-readable message used in the case of errors.
def set_status(self, status, msg): # truncate string if it's long. msg will be logged in the object and we don't want to waste memory. if len(msg) > 2000: msg = msg[:2000] msg += "\n... snip ...\n" # Locked files must be explicitly unlocked if self.status == self.S_LOCKED or status == self.S_LOCKED: err_msg = ( "Locked files must be explicitly unlocked before calling set_status but\n" "task.status = %s, input status = %s" % (self.status, status)) raise RuntimeError(err_msg) status = Status.as_status(status) changed = True if hasattr(self, "_status"): changed = (status != self._status) self._status = status if status == self.S_RUN: # Set datetimes.start when the task enters S_RUN if self.datetimes.start is None: self.datetimes.start = datetime.datetime.now() # Add new entry to history only if the status has changed. if changed: if status == self.S_SUB: self.datetimes.submission = datetime.datetime.now() self.history.info("Submitted with MPI=%s, Omp=%s, Memproc=%.1f [Gb] %s " % ( self.mpi_procs, self.omp_threads, self.mem_per_proc.to("Gb"), msg)) elif status == self.S_OK: self.history.info("Task completed %s", msg) elif status == self.S_ABICRITICAL: self.history.info("Status set to S_ABI_CRITICAL due to: %s", msg) else: self.history.info("Status changed to %s. msg: %s", status, msg) ####################################################### # The section belows contains callbacks that should not # be executed if we are in spectator_mode ####################################################### if status == self.S_DONE: # Execute the callback self._on_done() if status == self.S_OK: # Finalize the task. if not self.finalized: self._on_ok() # here we remove the output files of the task and of its parents. if self.gc is not None and self.gc.policy == "task": self.clean_output_files() if self.status == self.S_OK: # Because _on_ok might have changed the status. self.send_signal(self.S_OK) return status
140,730
Analyzes the main logfile of the calculation for possible Errors or Warnings. If the ABINIT abort file is found, the error found in this file are added to the output report. Args: source: "output" for the main output file,"log" for the log file. Returns: :class:`EventReport` instance or None if the source file file does not exist.
def get_event_report(self, source="log"): # By default, we inspect the main log file. ofile = { "output": self.output_file, "log": self.log_file}[source] parser = events.EventsParser() if not ofile.exists: if not self.mpiabort_file.exists: return None else: # ABINIT abort file without log! abort_report = parser.parse(self.mpiabort_file.path) return abort_report try: report = parser.parse(ofile.path) #self._prev_reports[source] = report # Add events found in the ABI_MPIABORTFILE. if self.mpiabort_file.exists: logger.critical("Found ABI_MPIABORTFILE!!!!!") abort_report = parser.parse(self.mpiabort_file.path) if len(abort_report) != 1: logger.critical("Found more than one event in ABI_MPIABORTFILE") # Weird case: empty abort file, let's skip the part # below and hope that the log file contains the error message. #if not len(abort_report): return report # Add it to the initial report only if it differs # from the last one found in the main log file. last_abort_event = abort_report[-1] if report and last_abort_event != report[-1]: report.append(last_abort_event) else: report.append(last_abort_event) return report #except parser.Error as exc: except Exception as exc: # Return a report with an error entry with info on the exception. msg = "%s: Exception while parsing ABINIT events:\n %s" % (ofile, str(exc)) self.set_status(self.S_ABICRITICAL, msg=msg) return parser.report_exception(ofile.path, exc)
140,735
This method is called when the task reaches S_OK. It removes all the output files produced by the task that are not needed by its children as well as the output files produced by its parents if no other node needs them. Args: follow_parents: If true, the output files of the parents nodes will be removed if possible. Return: list with the absolute paths of the files that have been removed.
def clean_output_files(self, follow_parents=True): paths = [] if self.status != self.S_OK: logger.warning("Calling task.clean_output_files on a task whose status != S_OK") # Remove all files in tmpdir. self.tmpdir.clean() # Find the file extensions that should be preserved since these files are still # needed by the children who haven't reached S_OK except_exts = set() for child in self.get_children(): if child.status == self.S_OK: continue # Find the position of self in child.deps and add the extensions. i = [dep.node for dep in child.deps].index(self) except_exts.update(child.deps[i].exts) # Remove the files in the outdir of the task but keep except_exts. exts = self.gc.exts.difference(except_exts) #print("Will remove its extensions: ", exts) paths += self.outdir.remove_exts(exts) if not follow_parents: return paths # Remove the files in the outdir of my parents if all the possible dependencies have been fulfilled. for parent in self.get_parents(): # Here we build a dictionary file extension --> list of child nodes requiring this file from parent # e.g {"WFK": [node1, node2]} ext2nodes = collections.defaultdict(list) for child in parent.get_children(): if child.status == child.S_OK: continue i = [d.node for d in child.deps].index(parent) for ext in child.deps[i].exts: ext2nodes[ext].append(child) # Remove extension only if no node depends on it! except_exts = [k for k, lst in ext2nodes.items() if lst] exts = self.gc.exts.difference(except_exts) #print("%s removes extensions %s from parent node %s" % (self, exts, parent)) paths += parent.outdir.remove_exts(exts) self.history.info("Removed files: %s" % paths) return paths
140,740
Create an instance of `AbinitTask` from an ABINIT input. Args: ainput: `AbinitInput` object. workdir: Path to the working directory. manager: :class:`TaskManager` object.
def from_input(cls, input, workdir=None, manager=None): return cls(input, workdir=workdir, manager=manager)
140,744
Build a Task with a temporary workdir. The task is executed via the shell with 1 MPI proc. Mainly used for invoking Abinit to get important parameters needed to prepare the real task. Args: mpi_procs: Number of MPI processes to use.
def temp_shell_task(cls, inp, mpi_procs=1, workdir=None, manager=None): # Build a simple manager to run the job in a shell subprocess import tempfile workdir = tempfile.mkdtemp() if workdir is None else workdir if manager is None: manager = TaskManager.from_user_config() # Construct the task and run it task = cls.from_input(inp, workdir=workdir, manager=manager.to_shell_manager(mpi_procs=mpi_procs)) task.set_name('temp_shell_task') return task
140,745
Helper function used to select the files of a task. Args: what: string with the list of characters selecting the file type Possible choices: i ==> input_file, o ==> output_file, f ==> files_file, j ==> job_file, l ==> log_file, e ==> stderr_file, q ==> qout_file, all ==> all files.
def select_files(self, what="o"): choices = collections.OrderedDict([ ("i", self.input_file), ("o", self.output_file), ("f", self.files_file), ("j", self.job_file), ("l", self.log_file), ("e", self.stderr_file), ("q", self.qout_file), ]) if what == "all": return [getattr(v, "path") for v in choices.values()] selected = [] for c in what: try: selected.append(getattr(choices[c], "path")) except KeyError: logger.warning("Wrong keyword %s" % c) return selected
140,753
Build a :class:`AnaddbTask` with a temporary workdir. The task is executed via the shell with 1 MPI proc. Mainly used for post-processing the DDB files. Args: mpi_procs: Number of MPI processes to use. anaddb_input: string with the anaddb variables. ddb_node: The node that will produce the DDB file. Accept :class:`Task`, :class:`Work` or filepath. See `AnaddbInit` for the meaning of the other arguments.
def temp_shell_task(cls, inp, ddb_node, mpi_procs=1, gkk_node=None, md_node=None, ddk_node=None, workdir=None, manager=None): # Build a simple manager to run the job in a shell subprocess import tempfile workdir = tempfile.mkdtemp() if workdir is None else workdir if manager is None: manager = TaskManager.from_user_config() # Construct the task and run it return cls(inp, ddb_node, gkk_node=gkk_node, md_node=md_node, ddk_node=ddk_node, workdir=workdir, manager=manager.to_shell_manager(mpi_procs=mpi_procs))
140,796
Reads and returns a pymatgen structure from a NetCDF file containing crystallographic data in the ETSF-IO format. Args: ncdata: filename or NetcdfReader instance. site_properties: Dictionary with site properties. cls: The Structure class to instanciate.
def structure_from_ncdata(ncdata, site_properties=None, cls=Structure): ncdata, closeit = as_ncreader(ncdata) # TODO check whether atomic units are used lattice = ArrayWithUnit(ncdata.read_value("primitive_vectors"), "bohr").to("ang") red_coords = ncdata.read_value("reduced_atom_positions") natom = len(red_coords) znucl_type = ncdata.read_value("atomic_numbers") # type_atom[0:natom] --> index Between 1 and number of atom species type_atom = ncdata.read_value("atom_species") # Fortran to C index and float --> int conversion. species = natom * [None] for atom in range(natom): type_idx = type_atom[atom] - 1 species[atom] = int(znucl_type[type_idx]) d = {} if site_properties is not None: for prop in site_properties: d[property] = ncdata.read_value(prop) structure = cls(lattice, species, red_coords, site_properties=d) # Quick and dirty hack. # I need an abipy structure since I need to_abivars and other methods. try: from abipy.core.structure import Structure as AbipyStructure structure.__class__ = AbipyStructure except ImportError: pass if closeit: ncdata.close() return structure
140,806
Returns the value of a dimension. Args: dimname: Name of the variable path: path to the group. default: return `default` if `dimname` is not present and `default` is not `NO_DEFAULT` else raise self.Error.
def read_dimvalue(self, dimname, path="/", default=NO_DEFAULT): try: dim = self._read_dimensions(dimname, path=path)[0] return len(dim) except self.Error: if default is NO_DEFAULT: raise return default
140,810
String representation. kwargs are passed to `pprint.pformat`. Args: verbose: Verbosity level title: Title string.
def to_string(self, verbose=0, title=None, **kwargs): from pprint import pformat s = pformat(self, **kwargs) if title is not None: return "\n".join([marquee(title, mark="="), s]) return s
140,819
Add an electrode to the plot. Args: electrode: An electrode. All electrodes satisfying the AbstractElectrode interface should work. label: A label for the electrode. If None, defaults to a counting system, i.e. 'Electrode 1', 'Electrode 2', ...
def add_electrode(self, electrode, label=None): if not label: label = "Electrode {}".format(len(self._electrodes) + 1) self._electrodes[label] = electrode
140,820
Returns a plot object. Args: width: Width of the plot. Defaults to 8 in. height: Height of the plot. Defaults to 6 in. Returns: A matplotlib plot object.
def get_plot(self, width=8, height=8): plt = pretty_plot(width, height) for label, electrode in self._electrodes.items(): (x, y) = self.get_plot_data(electrode) plt.plot(x, y, '-', linewidth=2, label=label) plt.legend() if self.xaxis == "capacity": plt.xlabel('Capacity (mAh/g)') else: plt.xlabel('Fraction') plt.ylabel('Voltage (V)') plt.tight_layout() return plt
140,822
Save the plot to an image file. Args: filename: Filename to save to. image_format: Format to save to. Defaults to eps.
def save(self, filename, image_format="eps", width=8, height=6): self.get_plot(width, height).savefig(filename, format=image_format)
140,823
Writes a set of VASP input to a directory. Args: output_dir (str): Directory to output the VASP input files make_dir_if_not_present (bool): Set to True if you want the directory (and the whole path) to be created if it is not present. include_cif (bool): Whether to write a CIF file in the output directory for easier opening by VESTA.
def write_input(self, output_dir, make_dir_if_not_present=True, include_cif=False): vinput = self.get_vasp_input() vinput.write_input( output_dir, make_dir_if_not_present=make_dir_if_not_present) if include_cif: s = vinput["POSCAR"].structure fname = Path(output_dir) / ("%s.cif" % re.sub(r'\s', "", s.formula)) s.to(filename=fname)
140,831
Create a Tensor object. Note that the constructor uses __new__ rather than __init__ according to the standard method of subclassing numpy ndarrays. Args: input_array: (array-like with shape 3^N): array-like representing a tensor quantity in standard (i. e. non-voigt) notation vscale: (N x M array-like): a matrix corresponding to the coefficients of the voigt-notation tensor
def __new__(cls, input_array, vscale=None, check_rank=None): obj = np.asarray(input_array).view(cls) obj.rank = len(obj.shape) if check_rank and check_rank != obj.rank: raise ValueError("{} input must be rank {}".format( obj.__class__.__name__, check_rank)) vshape = tuple([3] * (obj.rank % 2) + [6] * (obj.rank // 2)) obj._vscale = np.ones(vshape) if vscale is not None: obj._vscale = vscale if obj._vscale.shape != vshape: raise ValueError("Voigt scaling matrix must be the shape of the " "voigt notation matrix or vector.") if not all([i == 3 for i in obj.shape]): raise ValueError("Pymatgen only supports 3-dimensional tensors, " "and default tensor constructor uses standard " "notation. To construct from voigt notation, use" " {}.from_voigt".format(obj.__class__.__name__)) return obj
140,879
Applies a rotation directly, and tests input matrix to ensure a valid rotation. Args: matrix (3x3 array-like): rotation matrix to be applied to tensor tol (float): tolerance for testing rotation matrix validity
def rotate(self, matrix, tol=1e-3): matrix = SquareTensor(matrix) if not matrix.is_rotation(tol): raise ValueError("Rotation matrix is not valid.") sop = SymmOp.from_rotation_and_translation(matrix, [0., 0., 0.]) return self.transform(sop)
140,883
Convenience method for projection of a tensor into a vector. Returns the tensor dotted into a unit vector along the input n. Args: n (3x1 array-like): direction to project onto Returns (float): scalar value corresponding to the projection of the tensor into the vector
def project(self, n): n = get_uvec(n) return self.einsum_sequence([n] * self.rank)
140,885
Method for averaging the tensor projection over the unit with option for custom quadrature. Args: quad (dict): quadrature for integration, should be dictionary with "points" and "weights" keys defaults to quadpy.sphere.Lebedev(19) as read from file Returns: Average of tensor projected into vectors on the unit sphere
def average_over_unit_sphere(self, quad=None): quad = quad or DEFAULT_QUAD weights, points = quad['weights'], quad['points'] return sum([w * self.project(n) for w, n in zip(weights, points)])
140,886
Wrapper around numpy.round to ensure object of same type is returned Args: decimals :Number of decimal places to round to (default: 0). If decimals is negative, it specifies the number of positions to the left of the decimal point. Returns (Tensor): rounded tensor of same type
def round(self, decimals=0): return self.__class__(np.round(self, decimals=decimals))
140,889
Returns a tensor that is invariant with respect to symmetry operations corresponding to a structure Args: structure (Structure): structure from which to generate symmetry operations symprec (float): symmetry tolerance for the Spacegroup Analyzer used to generate the symmetry operations
def fit_to_structure(self, structure, symprec=0.1): sga = SpacegroupAnalyzer(structure, symprec) symm_ops = sga.get_symmetry_operations(cartesian=True) return sum([self.transform(symm_op) for symm_op in symm_ops]) / len(symm_ops)
140,892
Tests whether a tensor is invariant with respect to the symmetry operations of a particular structure by testing whether the residual of the symmetric portion is below a tolerance Args: structure (Structure): structure to be fit to tol (float): tolerance for symmetry testing
def is_fit_to_structure(self, structure, tol=1e-2): return (self - self.fit_to_structure(structure) < tol).all()
140,893
Returns a dictionary that maps indices in the tensor to those in a voigt representation based on input rank Args: rank (int): Tensor rank to generate the voigt map
def get_voigt_dict(rank): vdict = {} for ind in itertools.product(*[range(3)] * rank): v_ind = ind[:rank % 2] for j in range(rank // 2): pos = rank % 2 + 2 * j v_ind += (reverse_voigt_map[ind[pos:pos + 2]],) vdict[ind] = v_ind return vdict
140,896
Constructor based on the voigt notation vector or matrix. Args: voigt_input (array-like): voigt input for a given tensor
def from_voigt(cls, voigt_input): voigt_input = np.array(voigt_input) rank = sum(voigt_input.shape) // 3 t = cls(np.zeros([3] * rank)) if voigt_input.shape != t._vscale.shape: raise ValueError("Invalid shape for voigt matrix") voigt_input = voigt_input / t._vscale this_voigt_map = t.get_voigt_dict(rank) for ind in this_voigt_map: t[ind] = voigt_input[this_voigt_map[ind]] return cls(t)
140,897
Given a structure associated with a tensor, determines the rotation matrix for IEEE conversion according to the 1987 IEEE standards. Args: structure (Structure): a structure associated with the tensor to be converted to the IEEE standard refine_rotation (bool): whether to refine the rotation using SquareTensor.refine_rotation
def get_ieee_rotation(structure, refine_rotation=True): # Check conventional setting: sga = SpacegroupAnalyzer(structure) dataset = sga.get_symmetry_dataset() trans_mat = dataset['transformation_matrix'] conv_latt = Lattice(np.transpose(np.dot(np.transpose( structure.lattice.matrix), np.linalg.inv(trans_mat)))) xtal_sys = sga.get_crystal_system() vecs = conv_latt.matrix lengths = np.array(conv_latt.abc) angles = np.array(conv_latt.angles) rotation = np.zeros((3, 3)) # IEEE rules: a,b,c || x1,x2,x3 if xtal_sys == "cubic": rotation = [vecs[i] / lengths[i] for i in range(3)] # IEEE rules: a=b in length; c,a || x3, x1 elif xtal_sys == "tetragonal": rotation = np.array([vec / mag for (mag, vec) in sorted(zip(lengths, vecs), key=lambda x: x[0])]) if abs(lengths[2] - lengths[1]) < abs(lengths[1] - lengths[0]): rotation[0], rotation[2] = rotation[2], rotation[0].copy() rotation[1] = get_uvec(np.cross(rotation[2], rotation[0])) # IEEE rules: c<a<b; c,a || x3,x1 elif xtal_sys == "orthorhombic": rotation = [vec / mag for (mag, vec) in sorted(zip(lengths, vecs))] rotation = np.roll(rotation, 2, axis=0) # IEEE rules: c,a || x3,x1, c is threefold axis # Note this also includes rhombohedral crystal systems elif xtal_sys in ("trigonal", "hexagonal"): # find threefold axis: tf_index = np.argmin(abs(angles - 120.)) non_tf_mask = np.logical_not(angles == angles[tf_index]) rotation[2] = get_uvec(vecs[tf_index]) rotation[0] = get_uvec(vecs[non_tf_mask][0]) rotation[1] = get_uvec(np.cross(rotation[2], rotation[0])) # IEEE rules: b,c || x2,x3; alpha=beta=90, c<a elif xtal_sys == "monoclinic": # Find unique axis u_index = np.argmax(abs(angles - 90.)) n_umask = np.logical_not(angles == angles[u_index]) rotation[1] = get_uvec(vecs[u_index]) # Shorter of remaining lattice vectors for c axis c = [vec / mag for (mag, vec) in sorted(zip(lengths[n_umask], vecs[n_umask]))][0] rotation[2] = np.array(c) rotation[0] = np.cross(rotation[1], rotation[2]) # IEEE rules: c || x3, x2 normal to ac plane elif xtal_sys == "triclinic": rotation = [vec / mag for (mag, vec) in sorted(zip(lengths, vecs))] rotation[1] = get_uvec(np.cross(rotation[2], rotation[0])) rotation[0] = np.cross(rotation[1], rotation[2]) rotation = SquareTensor(rotation) if refine_rotation: rotation = rotation.refine_rotation() return rotation
140,898
Serializes the tensor object Args: voigt (bool): flag for whether to store entries in voigt-notation. Defaults to false, as information may be lost in conversion. Returns (Dict): serialized format tensor object
def as_dict(self, voigt=False): input_array = self.voigt if voigt else self d = {"@module": self.__class__.__module__, "@class": self.__class__.__name__, "input_array": input_array.tolist()} if voigt: d.update({"voigt": voigt}) return d
140,903
Helper method for refining rotation matrix by ensuring that second and third rows are perpindicular to the first. Gets new y vector from an orthogonal projection of x onto y and the new z vector from a cross product of the new x and y Args: tol to test for rotation Returns: new rotation matrix
def refine_rotation(self): new_x, y = get_uvec(self[0]), get_uvec(self[1]) # Get a projection on y new_y = y - np.dot(new_x, y) * new_x new_z = np.cross(new_x, new_y) return SquareTensor([new_x, new_y, new_z])
140,918
Initialize a TensorMapping Args: tensor_list ([Tensor]): list of tensors value_list ([]): list of values to be associated with tensors tol (float): an absolute tolerance for getting and setting items in the mapping
def __init__(self, tensors=None, values=None, tol=1e-5): self._tensor_list = tensors or [] self._value_list = values or [] if not len(self._tensor_list) == len(self._value_list): raise ValueError("TensorMapping must be initialized with tensors" "and values of equivalent length") self.tol = tol
140,919
Calculates the average oxidation state of a site Args: site: Site to compute average oxidation state Returns: Average oxidation state of site.
def compute_average_oxidation_state(site): try: avg_oxi = sum([sp.oxi_state * occu for sp, occu in site.species.items() if sp is not None]) return avg_oxi except AttributeError: pass try: return site.charge except AttributeError: raise ValueError("Ewald summation can only be performed on structures " "that are either oxidation state decorated or have " "site charges.")
140,924
Gives total ewald energy for an sub structure in the same lattice. The sub_structure must be a subset of the original structure, with possible different charges. Args: substructure (Structure): Substructure to compute Ewald sum for. tol (float): Tolerance for site matching in fractional coordinates. Returns: Ewald sum of substructure.
def compute_sub_structure(self, sub_structure, tol=1e-3): total_energy_matrix = self.total_energy_matrix.copy() def find_match(site): for test_site in sub_structure: frac_diff = abs(np.array(site.frac_coords) - np.array(test_site.frac_coords)) % 1 frac_diff = [abs(a) < tol or abs(a) > 1 - tol for a in frac_diff] if all(frac_diff): return test_site return None matches = [] for i, site in enumerate(self._s): matching_site = find_match(site) if matching_site: new_charge = compute_average_oxidation_state(matching_site) old_charge = self._oxi_states[i] scaling_factor = new_charge / old_charge matches.append(matching_site) else: scaling_factor = 0 total_energy_matrix[i, :] *= scaling_factor total_energy_matrix[:, i] *= scaling_factor if len(matches) != len(sub_structure): output = ["Missing sites."] for site in sub_structure: if site not in matches: output.append("unmatched = {}".format(site)) raise ValueError("\n".join(output)) return sum(sum(total_energy_matrix))
140,927
Compute the energy for a single site in the structure Args: site_index (int): Index of site ReturnS: (float) - Energy of that site
def get_site_energy(self, site_index): if self._charged: warn('Per atom energies for charged structures not supported in EwaldSummation') return np.sum(self._recip[:,site_index]) + np.sum(self._real[:,site_index]) \ + self._point[site_index]
140,930
Computes a best case given a matrix and manipulation list. Args: matrix: the current matrix (with some permutations already performed) m_list: [(multiplication fraction, number_of_indices, indices, species)] describing the manipulation indices: Set of indices which haven't had a permutation performed on them.
def best_case(self, matrix, m_list, indices_left): m_indices = [] fraction_list = [] for m in m_list: m_indices.extend(m[2]) fraction_list.extend([m[0]] * m[1]) indices = list(indices_left.intersection(m_indices)) interaction_matrix = matrix[indices, :][:, indices] fractions = np.zeros(len(interaction_matrix)) + 1 fractions[:len(fraction_list)] = fraction_list fractions = np.sort(fractions) # Sum associated with each index (disregarding interactions between # indices) sums = 2 * np.sum(matrix[indices], axis=1) sums = np.sort(sums) # Interaction corrections. Can be reduced to (1-x)(1-y) for x,y in # fractions each element in a column gets multiplied by (1-x), and then # the sum of the columns gets multiplied by (1-y) since fractions are # less than 1, there is no effect of one choice on the other step1 = np.sort(interaction_matrix) * (1 - fractions) step2 = np.sort(np.sum(step1, axis=1)) step3 = step2 * (1 - fractions) interaction_correction = np.sum(step3) if self._algo == self.ALGO_TIME_LIMIT: elapsed_time = datetime.utcnow() - self._start_time speedup_parameter = elapsed_time.total_seconds() / 1800 avg_int = np.sum(interaction_matrix, axis=None) avg_frac = np.average(np.outer(1 - fractions, 1 - fractions)) average_correction = avg_int * avg_frac interaction_correction = average_correction * speedup_parameter \ + interaction_correction * (1 - speedup_parameter) best_case = np.sum(matrix) + np.inner(sums[::-1], fractions - 1) \ + interaction_correction return best_case
140,937
This method recursively finds the minimal permutations using a binary tree search strategy. Args: matrix: The current matrix (with some permutations already performed). m_list: The list of permutations still to be performed indices: Set of indices which haven't had a permutation performed on them.
def _recurse(self, matrix, m_list, indices, output_m_list=[]): # check to see if we've found all the solutions that we need if self._finished: return # if we're done with the current manipulation, pop it off. while m_list[-1][1] == 0: m_list = copy(m_list) m_list.pop() # if there are no more manipulations left to do check the value if not m_list: matrix_sum = np.sum(matrix) if matrix_sum < self._current_minimum: self.add_m_list(matrix_sum, output_m_list) return # if we wont have enough indices left, return if m_list[-1][1] > len(indices.intersection(m_list[-1][2])): return if len(m_list) == 1 or m_list[-1][1] > 1: if self.best_case(matrix, m_list, indices) > self._current_minimum: return index = self.get_next_index(matrix, m_list[-1], indices) m_list[-1][2].remove(index) # Make the matrix and new m_list where we do the manipulation to the # index that we just got matrix2 = np.copy(matrix) m_list2 = deepcopy(m_list) output_m_list2 = copy(output_m_list) matrix2[index, :] *= m_list[-1][0] matrix2[:, index] *= m_list[-1][0] output_m_list2.append([index, m_list[-1][3]]) indices2 = copy(indices) indices2.remove(index) m_list2[-1][1] -= 1 # recurse through both the modified and unmodified matrices self._recurse(matrix2, m_list2, indices2, output_m_list2) self._recurse(matrix, m_list, indices, output_m_list)
140,939
Calculates the ensemble averaged Voronoi coordination numbers of a list of Structures using VoronoiNN. Typically used for analyzing the output of a Molecular Dynamics run. Args: structures (list): list of Structures. freq (int): sampling frequency of coordination number [every freq steps]. Returns: Dictionary of elements as keys and average coordination numbers as values.
def average_coordination_number(structures, freq=10): coordination_numbers = {} for spec in structures[0].composition.as_dict().keys(): coordination_numbers[spec] = 0.0 count = 0 for t in range(len(structures)): if t % freq != 0: continue count += 1 vnn = VoronoiNN() for atom in range(len(structures[0])): cn = vnn.get_cn(structures[t], atom, use_weights=True) coordination_numbers[structures[t][atom].species_string] += cn elements = structures[0].composition.as_dict() for el in coordination_numbers: coordination_numbers[el] = coordination_numbers[el] / elements[ el] / count return coordination_numbers
140,940
Helper method to calculate the solid angle of a set of coords from the center. Args: center (3x1 array): Center to measure solid angle from. coords (Nx3 array): List of coords to determine solid angle. Returns: The solid angle.
def solid_angle(center, coords): o = np.array(center) r = [np.array(c) - o for c in coords] r.append(r[0]) n = [np.cross(r[i + 1], r[i]) for i in range(len(r) - 1)] n.append(np.cross(r[1], r[0])) vals = [] for i in range(len(n) - 1): v = -np.dot(n[i], n[i + 1]) \ / (np.linalg.norm(n[i]) * np.linalg.norm(n[i + 1])) vals.append(acos(abs_cap(v))) phi = sum(vals) return phi + (3 - len(r)) * pi
140,941
Provides max bond length estimates for a structure based on the JMol table and algorithms. Args: structure: (structure) el_radius_updates: (dict) symbol->float to update atomic radii Returns: (dict) - (Element1, Element2) -> float. The two elements are ordered by Z.
def get_max_bond_lengths(structure, el_radius_updates=None): #jmc = JMolCoordFinder(el_radius_updates) jmnn = JmolNN(el_radius_updates=el_radius_updates) bonds_lens = {} els = sorted(structure.composition.elements, key=lambda x: x.Z) for i1 in range(len(els)): for i2 in range(len(els) - i1): bonds_lens[els[i1], els[i1 + i2]] = jmnn.get_max_bond_distance( els[i1].symbol, els[i1 + i2].symbol) return bonds_lens
140,942
Determines if a structure contains peroxide anions. Args: structure (Structure): Input structure. relative_cutoff: The peroxide bond distance is 1.49 Angstrom. Relative_cutoff * 1.49 stipulates the maximum distance two O atoms must be to each other to be considered a peroxide. Returns: Boolean indicating if structure contains a peroxide anion.
def contains_peroxide(structure, relative_cutoff=1.1): ox_type = oxide_type(structure, relative_cutoff) if ox_type == "peroxide": return True else: return False
140,944
Determines if an oxide is a peroxide/superoxide/ozonide/normal oxide Args: structure (Structure): Input structure. relative_cutoff (float): Relative_cutoff * act. cutoff stipulates the max distance two O atoms must be from each other. return_nbonds (bool): Should number of bonds be requested?
def oxide_type(structure, relative_cutoff=1.1, return_nbonds=False): ox_obj = OxideType(structure, relative_cutoff) if return_nbonds: return ox_obj.oxide_type, ox_obj.nbonds else: return ox_obj.oxide_type
140,945
Determines if a structure is a sulfide/polysulfide Args: structure (Structure): Input structure. Returns: (str) sulfide/polysulfide/sulfate
def sulfide_type(structure): structure = structure.copy() structure.remove_oxidation_states() s = Element("S") comp = structure.composition if comp.is_element or s not in comp: return None finder = SpacegroupAnalyzer(structure, symprec=0.1) symm_structure = finder.get_symmetrized_structure() s_sites = [sites[0] for sites in symm_structure.equivalent_sites if sites[0].specie == s] def process_site(site): # in an exceptionally rare number of structures, the search # radius needs to be increased to find a neighbor atom search_radius = 4 neighbors = [] while len(neighbors) == 0: neighbors = structure.get_neighbors(site, search_radius) search_radius *= 2 if search_radius > max(structure.lattice.abc)*2: break neighbors = sorted(neighbors, key=lambda n: n[1]) nn, dist = neighbors[0] coord_elements = [site.specie for site, d in neighbors if d < dist + 0.4][:4] avg_electroneg = np.mean([e.X for e in coord_elements]) if avg_electroneg > s.X: return "sulfate" elif avg_electroneg == s.X and s in coord_elements: return "polysulfide" else: return "sulfide" types = set([process_site(site) for site in s_sites]) if "sulfate" in types: return None elif "polysulfide" in types: return "polysulfide" else: return "sulfide"
140,946
Performs Voronoi analysis and returns the polyhedra around atom n in Schlaefli notation. Args: structure (Structure): structure to analyze n (int): index of the center atom in structure Returns: voronoi index of n: <c3,c4,c6,c6,c7,c8,c9,c10> where c_i denotes number of facets with i vertices.
def analyze(self, structure, n=0): center = structure[n] neighbors = structure.get_sites_in_sphere(center.coords, self.cutoff) neighbors = [i[0] for i in sorted(neighbors, key=lambda s: s[1])] qvoronoi_input = np.array([s.coords for s in neighbors]) voro = Voronoi(qvoronoi_input, qhull_options=self.qhull_options) vor_index = np.array([0, 0, 0, 0, 0, 0, 0, 0]) for key in voro.ridge_dict: if 0 in key: # This means if the center atom is in key if -1 in key: # This means if an infinity point is in key raise ValueError("Cutoff too short.") else: try: vor_index[len(voro.ridge_dict[key]) - 3] += 1 except IndexError: # If a facet has more than 10 edges, it's skipped here. pass return vor_index
140,948
Please note that the input and final structures should have the same ordering of sites. This is typically the case for most computational codes. Args: initial_structure (Structure): Initial input structure to calculation. final_structure (Structure): Final output structure from calculation.
def __init__(self, initial_structure, final_structure): if final_structure.formula != initial_structure.formula: raise ValueError("Initial and final structures have different " + "formulas!") self.initial = initial_structure self.final = final_structure
140,951
Assuming there is some value in the connectivity array at indices (1, 3, 12). sitei can be obtained directly from the input structure (structure[1]). sitej can be obtained by passing 3, 12 to this function Args: site_index (int): index of the site (3 in the example) image_index (int): index of the image (12 in the example)
def get_sitej(self, site_index, image_index): atoms_n_occu = self.s[site_index].species lattice = self.s.lattice coords = self.s[site_index].frac_coords + self.offsets[image_index] return PeriodicSite(atoms_n_occu, coords, lattice)
140,958
Finds stress corresponding to zero strain state in stress-strain list Args: strains (Nx3x3 array-like): array corresponding to strains stresses (Nx3x3 array-like): array corresponding to stresses tol (float): tolerance to find zero strain state
def find_eq_stress(strains, stresses, tol=1e-10): stress_array = np.array(stresses) strain_array = np.array(strains) eq_stress = stress_array[np.all(abs(strain_array)<tol, axis=(1,2))] if eq_stress.size != 0: all_same = (abs(eq_stress - eq_stress[0]) < 1e-8).all() if len(eq_stress) > 1 and not all_same: raise ValueError("Multiple stresses found for equilibrium strain" " state, please specify equilibrium stress or " " remove extraneous stresses.") eq_stress = eq_stress[0] else: warnings.warn("No eq state found, returning zero voigt stress") eq_stress = Stress(np.zeros((3, 3))) return eq_stress
140,970
Helper function to find difference coefficients of an derivative on an arbitrary mesh. Args: hvec (1D array-like): sampling stencil n (int): degree of derivative to find
def get_diff_coeff(hvec, n=1): hvec = np.array(hvec, dtype=np.float) acc = len(hvec) exp = np.column_stack([np.arange(acc)]*acc) a = np.vstack([hvec] * acc) ** exp b = np.zeros(acc) b[n] = factorial(n) return np.linalg.solve(a, b)
140,974
Calculate's a given elastic tensor's contribution to the stress using Einstein summation Args: strain (3x3 array-like): matrix corresponding to strain
def calculate_stress(self, strain): strain = np.array(strain) if strain.shape == (6,): strain = Strain.from_voigt(strain) assert strain.shape == (3, 3), "Strain must be 3x3 or voigt-notation" stress_matrix = self.einsum_sequence([strain]*(self.order - 1)) \ / factorial(self.order - 1) return Stress(stress_matrix)
140,976
Calculates the poisson ratio for a specific direction relative to a second, orthogonal direction Args: n (3-d vector): principal direction m (3-d vector): secondary direction orthogonal to n tol (float): tolerance for testing of orthogonality
def directional_poisson_ratio(self, n, m, tol=1e-8): n, m = get_uvec(n), get_uvec(m) if not np.abs(np.dot(n, m)) < tol: raise ValueError("n and m must be orthogonal") v = self.compliance_tensor.einsum_sequence([n]*2 + [m]*2) v *= -1 / self.compliance_tensor.einsum_sequence([n]*4) return v
140,983
Calculates transverse sound velocity (in SI units) using the Voigt-Reuss-Hill average bulk modulus Args: structure: pymatgen structure object Returns: transverse sound velocity (in SI units)
def trans_v(self, structure): nsites = structure.num_sites volume = structure.volume natoms = structure.composition.num_atoms weight = float(structure.composition.weight) mass_density = 1.6605e3 * nsites * weight / (natoms * volume) if self.g_vrh < 0: raise ValueError("k_vrh or g_vrh is negative, " "sound velocity is undefined") return (1e9 * self.g_vrh / mass_density) ** 0.5
140,984
Calculates Snyder's acoustic sound velocity (in SI units) Args: structure: pymatgen structure object Returns: Snyder's acoustic sound velocity (in SI units)
def snyder_ac(self, structure): nsites = structure.num_sites volume = structure.volume natoms = structure.composition.num_atoms num_density = 1e30 * nsites / volume tot_mass = sum([e.atomic_mass for e in structure.species]) avg_mass = 1.6605e-27 * tot_mass / natoms return 0.38483*avg_mass * \ ((self.long_v(structure) + 2.*self.trans_v(structure))/3.) ** 3.\ / (300.*num_density ** (-2./3.) * nsites ** (1./3.))
140,985
Calculates Snyder's optical sound velocity (in SI units) Args: structure: pymatgen structure object Returns: Snyder's optical sound velocity (in SI units)
def snyder_opt(self, structure): nsites = structure.num_sites volume = structure.volume num_density = 1e30 * nsites / volume return 1.66914e-23 * \ (self.long_v(structure) + 2.*self.trans_v(structure))/3. \ / num_density ** (-2./3.) * (1 - nsites ** (-1./3.))
140,986
Calculates Clarke's thermal conductivity (in SI units) Args: structure: pymatgen structure object Returns: Clarke's thermal conductivity (in SI units)
def clarke_thermalcond(self, structure): nsites = structure.num_sites volume = structure.volume tot_mass = sum([e.atomic_mass for e in structure.species]) natoms = structure.composition.num_atoms weight = float(structure.composition.weight) avg_mass = 1.6605e-27 * tot_mass / natoms mass_density = 1.6605e3 * nsites * weight / (natoms * volume) return 0.87 * 1.3806e-23 * avg_mass**(-2./3.) \ * mass_density**(1./6.) * self.y_mod**0.5
140,987
Estimates the debye temperature from longitudinal and transverse sound velocities Args: structure: pymatgen structure object Returns: debye temperature (in SI units)
def debye_temperature(self, structure): v0 = (structure.volume * 1e-30 / structure.num_sites) vl, vt = self.long_v(structure), self.trans_v(structure) vm = 3**(1./3.) * (1 / vl**3 + 2 / vt**3)**(-1./3.) td = 1.05457e-34 / 1.38065e-23 * vm * (6 * np.pi**2 / v0) ** (1./3.) return td
140,988
returns a dictionary of properties derived from the elastic tensor and an associated structure Args: structure (Structure): structure object for which to calculate associated properties include_base_props (bool): whether to include base properties, like k_vrh, etc. ignore_errors (bool): if set to true, will set problem properties that depend on a physical tensor to None, defaults to False
def get_structure_property_dict(self, structure, include_base_props=True, ignore_errors=False): s_props = ["trans_v", "long_v", "snyder_ac", "snyder_opt", "snyder_total", "clarke_thermalcond", "cahill_thermalcond", "debye_temperature"] if ignore_errors and (self.k_vrh < 0 or self.g_vrh < 0): sp_dict = {prop: None for prop in s_props} else: sp_dict = {prop: getattr(self, prop)(structure) for prop in s_props} sp_dict["structure"] = structure if include_base_props: sp_dict.update(self.property_dict) return sp_dict
140,991
Class method to fit an elastic tensor from stress/strain data. Method uses Moore-Penrose pseudoinverse to invert the s = C*e equation with elastic tensor, stress, and strain in voigt notation Args: stresses (Nx3x3 array-like): list or array of stresses strains (Nx3x3 array-like): list or array of strains
def from_pseudoinverse(cls, strains, stresses): # convert the stress/strain to Nx6 arrays of voigt-notation warnings.warn("Pseudoinverse fitting of Strain/Stress lists may yield " "questionable results from vasp data, use with caution.") stresses = np.array([Stress(stress).voigt for stress in stresses]) with warnings.catch_warnings(record=True): strains = np.array([Strain(strain).voigt for strain in strains]) voigt_fit = np.transpose(np.dot(np.linalg.pinv(strains), stresses)) return cls.from_voigt(voigt_fit)
140,992
Initialization method for ElasticTensorExpansion Args: c_list (list or tuple): sequence of Tensor inputs or tensors from which the elastic tensor expansion is constructed.
def __init__(self, c_list): c_list = [NthOrderElasticTensor(c, check_rank=4+i*2) for i, c in enumerate(c_list)] super().__init__(c_list)
140,995
Gets the Generalized Gruneisen tensor for a given third-order elastic tensor expansion. Args: n (3x1 array-like): normal mode direction u (3x1 array-like): polarization direction
def get_ggt(self, n, u): gk = self[0].einsum_sequence([n, u, n, u]) result = -(2*gk*np.outer(u, u) + self[0].einsum_sequence([n, n]) + self[1].einsum_sequence([n, u, n, u])) / (2*gk) return result
140,998
Finds directional frequency contribution to the heat capacity from direction and polarization Args: structure (Structure): Structure to be used in directional heat capacity determination n (3x1 array-like): direction for Cv determination u (3x1 array-like): polarization direction, note that no attempt for verification of eigenvectors is made
def omega(self, structure, n, u): l0 = np.dot(np.sum(structure.lattice.matrix, axis=0), n) l0 *= 1e-10 # in A weight = float(structure.composition.weight) * 1.66054e-27 # in kg vol = structure.volume * 1e-30 # in m^3 vel = (1e9 * self[0].einsum_sequence([n, u, n, u]) / (weight / vol)) ** 0.5 return vel / l0
141,002
Returns the effective elastic constants from the elastic tensor expansion. Args: strain (Strain or 3x3 array-like): strain condition under which to calculate the effective constants order (int): order of the ecs to be returned
def get_effective_ecs(self, strain, order=2): ec_sum = 0 for n, ecs in enumerate(self[order-2:]): ec_sum += ecs.einsum_sequence([strain] * n) / factorial(n) return ec_sum
141,006
Gets the Wallace Tensor for determining yield strength criteria. Args: tau (3x3 array-like): stress at which to evaluate the wallace tensor
def get_wallace_tensor(self, tau): b = 0.5 * (np.einsum("ml,kn->klmn", tau, np.eye(3)) + np.einsum("km,ln->klmn", tau, np.eye(3)) + np.einsum("nl,km->klmn", tau, np.eye(3)) + np.einsum("kn,lm->klmn", tau, np.eye(3)) + -2*np.einsum("kl,mn->klmn", tau, np.eye(3))) strain = self.get_strain_from_stress(tau) b += self.get_effective_ecs(strain) return b
141,007
Gets the symmetrized wallace tensor for determining yield strength criteria. Args: tau (3x3 array-like): stress at which to evaluate the wallace tensor.
def get_symmetric_wallace_tensor(self, tau): wallace = self.get_wallace_tensor(tau) return Tensor(0.5 * (wallace + np.transpose(wallace, [2, 3, 0, 1])))
141,008
Gets the stability criteria from the symmetric Wallace tensor from an input vector and stress value. Args: s (float): Stress value at which to evaluate the stability criteria n (3x1 array-like): direction of the applied stress
def get_stability_criteria(self, s, n): n = get_uvec(n) stress = s * np.outer(n, n) sym_wallace = self.get_symmetric_wallace_tensor(stress) return np.linalg.det(sym_wallace.voigt)
141,009
Gets the yield stress for a given direction Args: n (3x1 array-like): direction for which to find the yield stress
def get_yield_stress(self, n): # TODO: root finding could be more robust comp = root(self.get_stability_criteria, -1, args=n) tens = root(self.get_stability_criteria, 1, args=n) return (comp.x, tens.x)
141,010
Adds the skeleton of the Wigner-Seitz cell of the lattice to a matplotlib Axes Args: lattice: Lattice object ax: matplotlib :class:`Axes` or None if a new figure should be created. kwargs: kwargs passed to the matplotlib function 'plot'. Color defaults to black and linewidth to 1. Returns: matplotlib figure and matplotlib ax
def plot_wigner_seitz(lattice, ax=None, **kwargs): ax, fig, plt = get_ax3d_fig_plt(ax) if "color" not in kwargs: kwargs["color"] = "k" if "linewidth" not in kwargs: kwargs["linewidth"] = 1 bz = lattice.get_wigner_seitz_cell() ax, fig, plt = get_ax3d_fig_plt(ax) for iface in range(len(bz)): for line in itertools.combinations(bz[iface], 2): for jface in range(len(bz)): if iface < jface and any( np.all(line[0] == x) for x in bz[jface]) \ and any(np.all(line[1] == x) for x in bz[jface]): ax.plot(*zip(line[0], line[1]), **kwargs) return fig, ax
141,065
Adds the basis vectors of the lattice provided to a matplotlib Axes Args: lattice: Lattice object ax: matplotlib :class:`Axes` or None if a new figure should be created. kwargs: kwargs passed to the matplotlib function 'plot'. Color defaults to green and linewidth to 3. Returns: matplotlib figure and matplotlib ax
def plot_lattice_vectors(lattice, ax=None, **kwargs): ax, fig, plt = get_ax3d_fig_plt(ax) if "color" not in kwargs: kwargs["color"] = "g" if "linewidth" not in kwargs: kwargs["linewidth"] = 3 vertex1 = lattice.get_cartesian_coords([0.0, 0.0, 0.0]) vertex2 = lattice.get_cartesian_coords([1.0, 0.0, 0.0]) ax.plot(*zip(vertex1, vertex2), **kwargs) vertex2 = lattice.get_cartesian_coords([0.0, 1.0, 0.0]) ax.plot(*zip(vertex1, vertex2), **kwargs) vertex2 = lattice.get_cartesian_coords([0.0, 0.0, 1.0]) ax.plot(*zip(vertex1, vertex2), **kwargs) return fig, ax
141,066
Folds a point with coordinates p inside the first Brillouin zone of the lattice. Args: p: coordinates of one point lattice: Lattice object used to convert from reciprocal to cartesian coordinates coords_are_cartesian: Set to True if you are providing coordinates in cartesian coordinates. Defaults to False. Returns: The cartesian coordinates folded inside the first Brillouin zone
def fold_point(p, lattice, coords_are_cartesian=False): if coords_are_cartesian: p = lattice.get_fractional_coords(p) else: p = np.array(p) p = np.mod(p + 0.5 - 1e-10, 1) - 0.5 + 1e-10 p = lattice.get_cartesian_coords(p) closest_lattice_point = None smallest_distance = 10000 for i in (-1, 0, 1): for j in (-1, 0, 1): for k in (-1, 0, 1): lattice_point = np.dot((i, j, k), lattice.matrix) dist = np.linalg.norm(p - lattice_point) if closest_lattice_point is None or dist < smallest_distance: closest_lattice_point = lattice_point smallest_distance = dist if not np.allclose(closest_lattice_point, (0, 0, 0)): p = p - closest_lattice_point return p
141,069
Gives the plot (as a matplotlib object) of the symmetry line path in the Brillouin Zone. Args: kpath (HighSymmKpath): a HighSymmKPath object ax: matplotlib :class:`Axes` or None if a new figure should be created. **kwargs: provided by add_fig_kwargs decorator Returns: matplotlib figure
def plot_brillouin_zone_from_kpath(kpath, ax=None, **kwargs): lines = [[kpath.kpath['kpoints'][k] for k in p] for p in kpath.kpath['path']] return plot_brillouin_zone(bz_lattice=kpath.prim_rec, lines=lines, ax=ax, labels=kpath.kpath['kpoints'], **kwargs)
141,071
Adds a dos for plotting. Args: label: label for the DOS. Must be unique. dos: Dos object
def add_dos(self, label, dos): energies = dos.energies - dos.efermi if self.zero_at_efermi \ else dos.energies densities = dos.get_smeared_densities(self.sigma) if self.sigma \ else dos.densities efermi = dos.efermi self._doses[label] = {'energies': energies, 'densities': densities, 'efermi': efermi}
141,075
Get a matplotlib plot showing the DOS. Args: xlim: Specifies the x-axis limits. Set to None for automatic determination. ylim: Specifies the y-axis limits.
def get_plot(self, xlim=None, ylim=None): ncolors = max(3, len(self._doses)) ncolors = min(9, ncolors) import palettable colors = palettable.colorbrewer.qualitative.Set1_9.mpl_colors y = None alldensities = [] allenergies = [] plt = pretty_plot(12, 8) # Note that this complicated processing of energies is to allow for # stacked plots in matplotlib. for key, dos in self._doses.items(): energies = dos['energies'] densities = dos['densities'] if not y: y = {Spin.up: np.zeros(energies.shape), Spin.down: np.zeros(energies.shape)} newdens = {} for spin in [Spin.up, Spin.down]: if spin in densities: if self.stack: y[spin] += densities[spin] newdens[spin] = y[spin].copy() else: newdens[spin] = densities[spin] allenergies.append(energies) alldensities.append(newdens) keys = list(self._doses.keys()) keys.reverse() alldensities.reverse() allenergies.reverse() allpts = [] for i, key in enumerate(keys): x = [] y = [] for spin in [Spin.up, Spin.down]: if spin in alldensities[i]: densities = list(int(spin) * alldensities[i][spin]) energies = list(allenergies[i]) if spin == Spin.down: energies.reverse() densities.reverse() x.extend(energies) y.extend(densities) allpts.extend(list(zip(x, y))) if self.stack: plt.fill(x, y, color=colors[i % ncolors], label=str(key)) else: plt.plot(x, y, color=colors[i % ncolors], label=str(key), linewidth=3) if not self.zero_at_efermi: ylim = plt.ylim() plt.plot([self._doses[key]['efermi'], self._doses[key]['efermi']], ylim, color=colors[i % ncolors], linestyle='--', linewidth=2) if xlim: plt.xlim(xlim) if ylim: plt.ylim(ylim) else: xlim = plt.xlim() relevanty = [p[1] for p in allpts if xlim[0] < p[0] < xlim[1]] plt.ylim((min(relevanty), max(relevanty))) if self.zero_at_efermi: ylim = plt.ylim() plt.plot([0, 0], ylim, 'k--', linewidth=2) plt.xlabel('Energies (eV)') plt.ylabel('Density of states') plt.legend() leg = plt.gca().get_legend() ltext = leg.get_texts() # all the text.Text instance in the legend plt.setp(ltext, fontsize=30) plt.tight_layout() return plt
141,076
Save matplotlib plot to a file. Args: filename: Filename to write to. img_format: Image format to use. Defaults to EPS. ylim: Specifies the y-axis limits.
def save_plot(self, filename, img_format="eps", ylim=None, zero_to_efermi=True, smooth=False): plt = self.get_plot(ylim=ylim, zero_to_efermi=zero_to_efermi, smooth=smooth) plt.savefig(filename, format=img_format) plt.close()
141,080
plot two band structure for comparison. One is in red the other in blue (no difference in spins). The two band structures need to be defined on the same symmetry lines! and the distance between symmetry lines is the one of the band structure used to build the BSPlotter Args: another band structure object defined along the same symmetry lines Returns: a matplotlib object with both band structures
def plot_compare(self, other_plotter, legend=True): # TODO: add exception if the band structures are not compatible import matplotlib.lines as mlines plt = self.get_plot() data_orig = self.bs_plot_data() data = other_plotter.bs_plot_data() band_linewidth = 1 for i in range(other_plotter._nb_bands): for d in range(len(data_orig['distances'])): plt.plot(data_orig['distances'][d], [e[str(Spin.up)][i] for e in data['energy']][d], 'c-', linewidth=band_linewidth) if other_plotter._bs.is_spin_polarized: plt.plot(data_orig['distances'][d], [e[str(Spin.down)][i] for e in data['energy']][d], 'm--', linewidth=band_linewidth) if legend: handles = [mlines.Line2D([], [], linewidth=2, color='b', label='bs 1 up'), mlines.Line2D([], [], linewidth=2, color='r', label='bs 1 down', linestyle="--"), mlines.Line2D([], [], linewidth=2, color='c', label='bs 2 up'), mlines.Line2D([], [], linewidth=2, color='m', linestyle="--", label='bs 2 down')] plt.legend(handles=handles) return plt
141,081
Get a matplotlib plot object. Args: bs (BandStructureSymmLine): the bandstructure to plot. Projection data must exist for projected plots. dos (Dos): the Dos to plot. Projection data must exist (i.e., CompleteDos) for projected plots. Returns: matplotlib.pyplot object on which you can call commands like show() and savefig()
def get_plot(self, bs, dos=None): import matplotlib.lines as mlines from matplotlib.gridspec import GridSpec import matplotlib.pyplot as mplt # make sure the user-specified band structure projection is valid bs_projection = self.bs_projection if dos: elements = [e.symbol for e in dos.structure.composition.elements] elif bs_projection and bs.structure: elements = [e.symbol for e in bs.structure.composition.elements] else: elements = [] rgb_legend = self.rgb_legend and bs_projection and \ bs_projection.lower() == "elements" and \ len(elements) in [2, 3] if bs_projection and bs_projection.lower() == "elements" and \ (len(elements) not in [2, 3] or not bs.get_projection_on_elements()): warnings.warn( "Cannot get element projected data; either the projection data " "doesn't exist, or you don't have a compound with exactly 2 " "or 3 unique elements.") bs_projection = None # specify energy range of plot emin = -self.vb_energy_range emax = self.cb_energy_range if self.fixed_cb_energy else \ self.cb_energy_range + bs.get_band_gap()["energy"] # initialize all the k-point labels and k-point x-distances for bs plot xlabels = [] # all symmetry point labels on x-axis xlabel_distances = [] # positions of symmetry point x-labels x_distances = [] # x positions of kpoint data prev_right_klabel = None # used to determine which branches require a midline separator for idx, l in enumerate(bs.branches): # get left and right kpoint labels of this branch left_k, right_k = l["name"].split("-") # add $ notation for LaTeX kpoint labels if left_k[0] == "\\" or "_" in left_k: left_k = "$" + left_k + "$" if right_k[0] == "\\" or "_" in right_k: right_k = "$" + right_k + "$" # add left k label to list of labels if prev_right_klabel is None: xlabels.append(left_k) xlabel_distances.append(0) elif prev_right_klabel != left_k: # used for pipe separator xlabels[-1] = xlabels[-1] + "$\\mid$ " + left_k # add right k label to list of labels xlabels.append(right_k) prev_right_klabel = right_k # add x-coordinates for labels left_kpoint = bs.kpoints[l["start_index"]].cart_coords right_kpoint = bs.kpoints[l["end_index"]].cart_coords distance = np.linalg.norm(right_kpoint - left_kpoint) xlabel_distances.append(xlabel_distances[-1] + distance) # add x-coordinates for kpoint data npts = l["end_index"] - l["start_index"] distance_interval = distance / npts x_distances.append(xlabel_distances[-2]) for i in range(npts): x_distances.append(x_distances[-1] + distance_interval) # set up bs and dos plot gs = GridSpec(1, 2, width_ratios=[2, 1]) if dos else GridSpec(1, 1) fig = mplt.figure(figsize=self.fig_size) fig.patch.set_facecolor('white') bs_ax = mplt.subplot(gs[0]) if dos: dos_ax = mplt.subplot(gs[1]) # set basic axes limits for the plot bs_ax.set_xlim(0, x_distances[-1]) bs_ax.set_ylim(emin, emax) if dos: dos_ax.set_ylim(emin, emax) # add BS xticks, labels, etc. bs_ax.set_xticks(xlabel_distances) bs_ax.set_xticklabels(xlabels, size=self.tick_fontsize) bs_ax.set_xlabel('Wavevector $k$', fontsize=self.axis_fontsize, family=self.font) bs_ax.set_ylabel('$E-E_F$ / eV', fontsize=self.axis_fontsize, family=self.font) # add BS fermi level line at E=0 and gridlines bs_ax.hlines(y=0, xmin=0, xmax=x_distances[-1], color="k", lw=2) bs_ax.set_yticks(np.arange(emin, emax + 1E-5, self.egrid_interval)) bs_ax.set_yticklabels(np.arange(emin, emax + 1E-5, self.egrid_interval), size=self.tick_fontsize) bs_ax.set_axisbelow(True) bs_ax.grid(color=[0.5, 0.5, 0.5], linestyle='dotted', linewidth=1) if dos: dos_ax.set_yticks(np.arange(emin, emax + 1E-5, self.egrid_interval)) dos_ax.set_yticklabels([]) dos_ax.grid(color=[0.5, 0.5, 0.5], linestyle='dotted', linewidth=1) # renormalize the band energy to the Fermi level band_energies = {} for spin in (Spin.up, Spin.down): if spin in bs.bands: band_energies[spin] = [] for band in bs.bands[spin]: band_energies[spin].append([e - bs.efermi for e in band]) # renormalize the DOS energies to Fermi level if dos: dos_energies = [e - dos.efermi for e in dos.energies] # get the projection data to set colors for the band structure colordata = self._get_colordata(bs, elements, bs_projection) # plot the colored band structure lines for spin in (Spin.up, Spin.down): if spin in band_energies: linestyles = "solid" if spin == Spin.up else "dotted" for band_idx, band in enumerate(band_energies[spin]): self._rgbline(bs_ax, x_distances, band, colordata[spin][band_idx, :, 0], colordata[spin][band_idx, :, 1], colordata[spin][band_idx, :, 2], linestyles=linestyles) if dos: # Plot the DOS and projected DOS for spin in (Spin.up, Spin.down): if spin in dos.densities: # plot the total DOS dos_densities = dos.densities[spin] * int(spin) label = "total" if spin == Spin.up else None dos_ax.plot(dos_densities, dos_energies, color=(0.6, 0.6, 0.6), label=label) dos_ax.fill_betweenx(dos_energies, 0,dos_densities, color=(0.7, 0.7, 0.7), facecolor=(0.7, 0.7, 0.7)) if self.dos_projection is None: pass elif self.dos_projection.lower() == "elements": # plot the atom-projected DOS colors = ['b', 'r', 'g', 'm', 'y', 'c', 'k', 'w'] el_dos = dos.get_element_dos() for idx, el in enumerate(elements): dos_densities = el_dos[Element(el)].densities[ spin] * int(spin) label = el if spin == Spin.up else None dos_ax.plot(dos_densities, dos_energies, color=colors[idx], label=label) elif self.dos_projection.lower() == "orbitals": # plot each of the atomic projected DOS colors = ['b', 'r', 'g', 'm'] spd_dos = dos.get_spd_dos() for idx, orb in enumerate([OrbitalType.s, OrbitalType.p, OrbitalType.d, OrbitalType.f]): if orb in spd_dos: dos_densities = spd_dos[orb].densities[spin] * \ int(spin) label = orb if spin == Spin.up else None dos_ax.plot(dos_densities, dos_energies, color=colors[idx], label=label) # get index of lowest and highest energy being plotted, used to help auto-scale DOS x-axis emin_idx = next(x[0] for x in enumerate(dos_energies) if x[1] >= emin) emax_idx = len(dos_energies) - \ next(x[0] for x in enumerate(reversed(dos_energies)) if x[1] <= emax) # determine DOS x-axis range dos_xmin = 0 if Spin.down not in dos.densities else -max( dos.densities[Spin.down][emin_idx:emax_idx + 1] * 1.05) dos_xmax = max([max(dos.densities[Spin.up][emin_idx:emax_idx]) * 1.05, abs(dos_xmin)]) # set up the DOS x-axis and add Fermi level line dos_ax.set_xlim(dos_xmin, dos_xmax) dos_ax.set_xticklabels([]) dos_ax.hlines(y=0, xmin=dos_xmin, xmax=dos_xmax, color="k", lw=2) dos_ax.set_xlabel('DOS', fontsize=self.axis_fontsize, family=self.font) # add legend for band structure if self.bs_legend and not rgb_legend: handles = [] if bs_projection is None: handles = [mlines.Line2D([], [], linewidth=2, color='k', label='spin up'), mlines.Line2D([], [], linewidth=2, color='b', linestyle="dotted", label='spin down')] elif bs_projection.lower() == "elements": colors = ['b', 'r', 'g'] for idx, el in enumerate(elements): handles.append(mlines.Line2D([], [], linewidth=2, color=colors[idx], label=el)) bs_ax.legend(handles=handles, fancybox=True, prop={'size': self.legend_fontsize, 'family': self.font}, loc=self.bs_legend) elif self.bs_legend and rgb_legend: if len(elements) == 2: self._rb_line(bs_ax, elements[1], elements[0], loc=self.bs_legend) elif len(elements) == 3: self._rgb_triangle(bs_ax, elements[1], elements[2], elements[0], loc=self.bs_legend) # add legend for DOS if dos and self.dos_legend: dos_ax.legend(fancybox=True, prop={'size': self.legend_fontsize, 'family': self.font}, loc=self.dos_legend) mplt.subplots_adjust(wspace=0.1) return mplt
141,095
An RGB colored line for plotting. creation of segments based on: http://nbviewer.ipython.org/urls/raw.github.com/dpsanders/matplotlib-examples/master/colorline.ipynb Args: ax: matplotlib axis k: x-axis data (k-points) e: y-axis data (energies) red: red data green: green data blue: blue data alpha: alpha values data linestyles: linestyle for plot (e.g., "solid" or "dotted")
def _rgbline(ax, k, e, red, green, blue, alpha=1, linestyles="solid"): from matplotlib.collections import LineCollection pts = np.array([k, e]).T.reshape(-1, 1, 2) seg = np.concatenate([pts[:-1], pts[1:]], axis=1) nseg = len(k) - 1 r = [0.5 * (red[i] + red[i + 1]) for i in range(nseg)] g = [0.5 * (green[i] + green[i + 1]) for i in range(nseg)] b = [0.5 * (blue[i] + blue[i + 1]) for i in range(nseg)] a = np.ones(nseg, np.float) * alpha lc = LineCollection(seg, colors=list(zip(r, g, b, a)), linewidth=2, linestyles=linestyles) ax.add_collection(lc)
141,096
Get color data, including projected band structures Args: bs: Bandstructure object elements: elements (in desired order) for setting to blue, red, green bs_projection: None for no projection, "elements" for element projection Returns:
def _get_colordata(bs, elements, bs_projection): contribs = {} if bs_projection and bs_projection.lower() == "elements": projections = bs.get_projection_on_elements() for spin in (Spin.up, Spin.down): if spin in bs.bands: contribs[spin] = [] for band_idx in range(bs.nb_bands): colors = [] for k_idx in range(len(bs.kpoints)): if bs_projection and bs_projection.lower() == "elements": c = [0, 0, 0] projs = projections[spin][band_idx][k_idx] # note: squared color interpolations are smoother # see: https://youtu.be/LKnqECcg6Gw projs = dict( [(k, v ** 2) for k, v in projs.items()]) total = sum(projs.values()) if total > 0: for idx, e in enumerate(elements): c[idx] = math.sqrt(projs[ e] / total) # min is to handle round errors c = [c[1], c[2], c[0]] # prefer blue, then red, then green else: c = [0, 0, 0] if spin == Spin.up \ else [0, 0, 1] # black for spin up, blue for spin down colors.append(c) contribs[spin].append(colors) contribs[spin] = np.array(contribs[spin]) return contribs
141,097
Plot the seebeck coefficient in function of Fermi level Args: temp: the temperature xlim: a list of min and max fermi energy by default (0, and band gap) Returns: a matplotlib object
def plot_seebeck_mu(self, temp=600, output='eig', xlim=None): import matplotlib.pyplot as plt plt.figure(figsize=(9, 7)) seebeck = self._bz.get_seebeck(output=output, doping_levels=False)[ temp] plt.plot(self._bz.mu_steps, seebeck, linewidth=3.0) self._plot_bg_limits() self._plot_doping(temp) if output == 'eig': plt.legend(['S$_1$', 'S$_2$', 'S$_3$']) if xlim is None: plt.xlim(-0.5, self._bz.gap + 0.5) else: plt.xlim(xlim[0], xlim[1]) plt.ylabel("Seebeck \n coefficient ($\\mu$V/K)", fontsize=30.0) plt.xlabel("E-E$_f$ (eV)", fontsize=30) plt.xticks(fontsize=25) plt.yticks(fontsize=25) plt.tight_layout() return plt
141,104
Plot the conductivity in function of Fermi level. Semi-log plot Args: temp: the temperature xlim: a list of min and max fermi energy by default (0, and band gap) tau: A relaxation time in s. By default none and the plot is by units of relaxation time Returns: a matplotlib object
def plot_conductivity_mu(self, temp=600, output='eig', relaxation_time=1e-14, xlim=None): import matplotlib.pyplot as plt cond = self._bz.get_conductivity(relaxation_time=relaxation_time, output=output, doping_levels=False)[ temp] plt.figure(figsize=(9, 7)) plt.semilogy(self._bz.mu_steps, cond, linewidth=3.0) self._plot_bg_limits() self._plot_doping(temp) if output == 'eig': plt.legend(['$\\Sigma_1$', '$\\Sigma_2$', '$\\Sigma_3$']) if xlim is None: plt.xlim(-0.5, self._bz.gap + 0.5) else: plt.xlim(xlim) plt.ylim([1e13 * relaxation_time, 1e20 * relaxation_time]) plt.ylabel("conductivity,\n $\\Sigma$ (1/($\\Omega$ m))", fontsize=30.0) plt.xlabel("E-E$_f$ (eV)", fontsize=30.0) plt.xticks(fontsize=25) plt.yticks(fontsize=25) plt.tight_layout() return plt
141,105
Plot the power factor in function of Fermi level. Semi-log plot Args: temp: the temperature xlim: a list of min and max fermi energy by default (0, and band gap) tau: A relaxation time in s. By default none and the plot is by units of relaxation time Returns: a matplotlib object
def plot_power_factor_mu(self, temp=600, output='eig', relaxation_time=1e-14, xlim=None): import matplotlib.pyplot as plt plt.figure(figsize=(9, 7)) pf = self._bz.get_power_factor(relaxation_time=relaxation_time, output=output, doping_levels=False)[ temp] plt.semilogy(self._bz.mu_steps, pf, linewidth=3.0) self._plot_bg_limits() self._plot_doping(temp) if output == 'eig': plt.legend(['PF$_1$', 'PF$_2$', 'PF$_3$']) if xlim is None: plt.xlim(-0.5, self._bz.gap + 0.5) else: plt.xlim(xlim) plt.ylabel("Power factor, ($\\mu$W/(mK$^2$))", fontsize=30.0) plt.xlabel("E-E$_f$ (eV)", fontsize=30.0) plt.xticks(fontsize=25) plt.yticks(fontsize=25) plt.tight_layout() return plt
141,106
Plot the ZT in function of Fermi level. Args: temp: the temperature xlim: a list of min and max fermi energy by default (0, and band gap) tau: A relaxation time in s. By default none and the plot is by units of relaxation time Returns: a matplotlib object
def plot_zt_mu(self, temp=600, output='eig', relaxation_time=1e-14, xlim=None): import matplotlib.pyplot as plt plt.figure(figsize=(9, 7)) zt = self._bz.get_zt(relaxation_time=relaxation_time, output=output, doping_levels=False)[temp] plt.plot(self._bz.mu_steps, zt, linewidth=3.0) self._plot_bg_limits() self._plot_doping(temp) if output == 'eig': plt.legend(['ZT$_1$', 'ZT$_2$', 'ZT$_3$']) if xlim is None: plt.xlim(-0.5, self._bz.gap + 0.5) else: plt.xlim(xlim) plt.ylabel("ZT", fontsize=30.0) plt.xlabel("E-E$_f$ (eV)", fontsize=30.0) plt.xticks(fontsize=25) plt.yticks(fontsize=25) plt.tight_layout() return plt
141,107
Plot the Seebeck coefficient in function of temperature for different doping levels. Args: dopings: the default 'all' plots all the doping levels in the analyzer. Specify a list of doping levels if you want to plot only some. output: with 'average' you get an average of the three directions with 'eigs' you get all the three directions. Returns: a matplotlib object
def plot_seebeck_temp(self, doping='all', output='average'): import matplotlib.pyplot as plt if output == 'average': sbk = self._bz.get_seebeck(output='average') elif output == 'eigs': sbk = self._bz.get_seebeck(output='eigs') plt.figure(figsize=(22, 14)) tlist = sorted(sbk['n'].keys()) doping = self._bz.doping['n'] if doping == 'all' else doping for i, dt in enumerate(['n', 'p']): plt.subplot(121 + i) for dop in doping: d = self._bz.doping[dt].index(dop) sbk_temp = [] for temp in tlist: sbk_temp.append(sbk[dt][temp][d]) if output == 'average': plt.plot(tlist, sbk_temp, marker='s', label=str(dop) + ' $cm^{-3}$') elif output == 'eigs': for xyz in range(3): plt.plot(tlist, zip(*sbk_temp)[xyz], marker='s', label=str(xyz) + ' ' + str(dop) + ' $cm^{-3}$') plt.title(dt + '-type', fontsize=20) if i == 0: plt.ylabel("Seebeck \n coefficient ($\\mu$V/K)", fontsize=30.0) plt.xlabel('Temperature (K)', fontsize=30.0) p = 'lower right' if i == 0 else '' plt.legend(loc=p, fontsize=15) plt.grid() plt.xticks(fontsize=25) plt.yticks(fontsize=25) plt.tight_layout() return plt
141,108