docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
---|---|---|
Returns the positions of the extrema along the MEP. Both local
minimums and maximums are returned.
Args:
normalize_rxn_coordinate (bool): Whether to normalize the
reaction coordinate to between 0 and 1. Defaults to True.
Returns:
(min_extrema, max_extrema), where the extrema are given as
[(x1, y1), (x2, y2), ...]. | def get_extrema(self, normalize_rxn_coordinate=True):
x = np.arange(0, np.max(self.r), 0.01)
y = self.spline(x) * 1000
scale = 1 if not normalize_rxn_coordinate else 1 / self.r[-1]
min_extrema = []
max_extrema = []
for i in range(1, len(x) - 1):
if y[i] < y[i-1] and y[i] < y[i+1]:
min_extrema.append((x[i] * scale, y[i]))
elif y[i] > y[i-1] and y[i] > y[i+1]:
max_extrema.append((x[i] * scale, y[i]))
return min_extrema, max_extrema | 140,042 |
Returns the NEB plot. Uses Henkelman's approach of spline fitting
each section of the reaction path based on tangent force and energies.
Args:
normalize_rxn_coordinate (bool): Whether to normalize the
reaction coordinate to between 0 and 1. Defaults to True.
label_barrier (bool): Whether to label the maximum barrier.
Returns:
matplotlib.pyplot object. | def get_plot(self, normalize_rxn_coordinate=True, label_barrier=True):
plt = pretty_plot(12, 8)
scale = 1 if not normalize_rxn_coordinate else 1 / self.r[-1]
x = np.arange(0, np.max(self.r), 0.01)
y = self.spline(x) * 1000
relative_energies = self.energies - self.energies[0]
plt.plot(self.r * scale, relative_energies * 1000, 'ro',
x * scale, y, 'k-', linewidth=2, markersize=10)
plt.xlabel("Reaction coordinate")
plt.ylabel("Energy (meV)")
plt.ylim((np.min(y) - 10, np.max(y) * 1.02 + 20))
if label_barrier:
data = zip(x * scale, y)
barrier = max(data, key=lambda d: d[1])
plt.plot([0, barrier[0]], [barrier[1], barrier[1]], 'k--')
plt.annotate('%.0f meV' % (np.max(y) - np.min(y)),
xy=(barrier[0] / 2, barrier[1] * 1.02),
xytext=(barrier[0] / 2, barrier[1] * 1.02),
horizontalalignment='center')
plt.tight_layout()
return plt | 140,043 |
Check the status of the works in self.
Args:
show: True to show the status of the flow.
kwargs: keyword arguments passed to show_status | def check_status(self, **kwargs):
for work in self:
work.check_status()
if kwargs.pop("show", False):
self.show_status(**kwargs) | 140,075 |
Print a short summary with the status of the flow and a counter task_status --> number_of_tasks
Args:
stream: File-like object, Default: sys.stdout
Example:
Status Count
--------- -------
Completed 10
<Flow, node_id=27163, workdir=flow_gwconv_ecuteps>, num_tasks=10, all_ok=True | def show_summary(self, **kwargs):
stream = kwargs.pop("stream", sys.stdout)
stream.write("\n")
table = list(self.status_counter.items())
s = tabulate(table, headers=["Status", "Count"])
stream.write(s + "\n")
stream.write("\n")
stream.write("%s, num_tasks=%s, all_ok=%s\n" % (str(self), self.num_tasks, self.all_ok))
stream.write("\n") | 140,079 |
Report the status of the works and the status of the different tasks on the specified stream.
Args:
stream: File-like object, Default: sys.stdout
nids: List of node identifiers. By defaults all nodes are shown
wslice: Slice object used to select works.
verbose: Verbosity level (default 0). > 0 to show only the works that are not finalized. | def show_status(self, **kwargs):
stream = kwargs.pop("stream", sys.stdout)
nids = as_set(kwargs.pop("nids", None))
wslice = kwargs.pop("wslice", None)
verbose = kwargs.pop("verbose", 0)
wlist = None
if wslice is not None:
# Convert range to list of work indices.
wlist = list(range(wslice.start, wslice.step, wslice.stop))
#has_colours = stream_has_colours(stream)
has_colours = True
red = "red" if has_colours else None
for i, work in enumerate(self):
if nids and work.node_id not in nids: continue
print("", file=stream)
cprint_map("Work #%d: %s, Finalized=%s" % (i, work, work.finalized), cmap={"True": "green"}, file=stream)
if wlist is not None and i in wlist: continue
if verbose == 0 and work.finalized:
print(" Finalized works are not shown. Use verbose > 0 to force output.", file=stream)
continue
headers = ["Task", "Status", "Queue", "MPI|Omp|Gb",
"Warn|Com", "Class", "Sub|Rest|Corr", "Time",
"Node_ID"]
table = []
tot_num_errors = 0
for task in work:
if nids and task.node_id not in nids: continue
task_name = os.path.basename(task.name)
# FIXME: This should not be done here.
# get_event_report should be called only in check_status
# Parse the events in the main output.
report = task.get_event_report()
# Get time info (run-time or time in queue or None)
stime = None
timedelta = task.datetimes.get_runtime()
if timedelta is not None:
stime = str(timedelta) + "R"
else:
timedelta = task.datetimes.get_time_inqueue()
if timedelta is not None:
stime = str(timedelta) + "Q"
events = "|".join(2*["NA"])
if report is not None:
events = '{:>4}|{:>3}'.format(*map(str, (
report.num_warnings, report.num_comments)))
para_info = '{:>4}|{:>3}|{:>3}'.format(*map(str, (
task.mpi_procs, task.omp_threads, "%.1f" % task.mem_per_proc.to("Gb"))))
task_info = list(map(str, [task.__class__.__name__,
(task.num_launches, task.num_restarts, task.num_corrections), stime, task.node_id]))
qinfo = "None"
if task.queue_id is not None:
qname = str(task.qname)
if not verbose:
qname = qname[:min(5, len(qname))]
qinfo = str(task.queue_id) + "@" + qname
if task.status.is_critical:
tot_num_errors += 1
task_name = colored(task_name, red)
if has_colours:
table.append([task_name, task.status.colored, qinfo,
para_info, events] + task_info)
else:
table.append([task_name, str(task.status), qinfo, events,
para_info] + task_info)
# Print table and write colorized line with the total number of errors.
print(tabulate(table, headers=headers, tablefmt="grid"), file=stream)
if tot_num_errors:
cprint("Total number of errors: %d" % tot_num_errors, "red", file=stream)
print("", file=stream)
if self.all_ok:
cprint("\nall_ok reached\n", "green", file=stream) | 140,080 |
Print the Abinit events (ERRORS, WARNIING, COMMENTS) to stdout
Args:
status: if not None, only the tasks with this status are select
nids: optional list of node identifiers used to filter the tasks. | def show_events(self, status=None, nids=None):
nrows, ncols = get_terminal_size()
for task in self.iflat_tasks(status=status, nids=nids):
report = task.get_event_report()
if report:
print(make_banner(str(task), width=ncols, mark="="))
print(report) | 140,081 |
Show the corrections applied to the flow at run-time.
Args:
status: if not None, only the tasks with this status are select.
nids: optional list of node identifiers used to filter the tasks.
Return: The number of corrections found. | def show_corrections(self, status=None, nids=None):
nrows, ncols = get_terminal_size()
count = 0
for task in self.iflat_tasks(status=status, nids=nids):
if task.num_corrections == 0: continue
count += 1
print(make_banner(str(task), width=ncols, mark="="))
for corr in task.corrections:
pprint(corr)
if not count: print("No correction found.")
return count | 140,082 |
Print the history of the flow to stdout.
Args:
status: if not None, only the tasks with this status are select
full_history: Print full info set, including nodes with an empty history.
nids: optional list of node identifiers used to filter the tasks.
metadata: print history metadata (experimental) | def show_history(self, status=None, nids=None, full_history=False, metadata=False):
nrows, ncols = get_terminal_size()
works_done = []
# Loop on the tasks and show the history of the work is not in works_done
for task in self.iflat_tasks(status=status, nids=nids):
work = task.work
if work not in works_done:
works_done.append(work)
if work.history or full_history:
cprint(make_banner(str(work), width=ncols, mark="="), **work.status.color_opts)
print(work.history.to_string(metadata=metadata))
if task.history or full_history:
cprint(make_banner(str(task), width=ncols, mark="="), **task.status.color_opts)
print(task.history.to_string(metadata=metadata))
# Print the history of the flow.
if self.history or full_history:
cprint(make_banner(str(self), width=ncols, mark="="), **self.status.color_opts)
print(self.history.to_string(metadata=metadata)) | 140,083 |
Print the input of the tasks to the given stream.
Args:
varnames:
List of Abinit variables. If not None, only the variable in varnames
are selected and printed.
nids:
List of node identifiers. By defaults all nodes are shown
wslice:
Slice object used to select works.
stream:
File-like object, Default: sys.stdout | def show_inputs(self, varnames=None, nids=None, wslice=None, stream=sys.stdout):
if varnames is not None:
# Build dictionary varname --> [(task1, value), (task2, value), ...]
varnames = [s.strip() for s in list_strings(varnames)]
dlist = collections.defaultdict(list)
for task in self.select_tasks(nids=nids, wslice=wslice):
dstruct = task.input.structure.as_dict(fmt="abivars")
for vname in varnames:
value = task.input.get(vname, None)
if value is None: # maybe in structure?
value = dstruct.get(vname, None)
if value is not None:
dlist[vname].append((task, value))
for vname in varnames:
tv_list = dlist[vname]
if not tv_list:
stream.write("[%s]: Found 0 tasks with this variable\n" % vname)
else:
stream.write("[%s]: Found %s tasks with this variable\n" % (vname, len(tv_list)))
for i, (task, value) in enumerate(tv_list):
stream.write(" %s --> %s\n" % (str(value), task))
stream.write("\n")
else:
lines = []
for task in self.select_tasks(nids=nids, wslice=wslice):
s = task.make_input(with_header=True)
# Add info on dependencies.
if task.deps:
s += "\n\nDependencies:\n" + "\n".join(str(dep) for dep in task.deps)
else:
s += "\n\nDependencies: None"
lines.append(2*"\n" + 80 * "=" + "\n" + s + 2*"\n")
stream.writelines(lines) | 140,084 |
Return a list with a subset of tasks.
Args:
nids: List of node identifiers.
wslice: Slice object used to select works.
task_class: String or class used to select tasks. Ignored if None.
.. note::
nids and wslice are mutually exclusive.
If no argument is provided, the full list of tasks is returned. | def select_tasks(self, nids=None, wslice=None, task_class=None):
if nids is not None:
assert wslice is None
tasks = self.tasks_from_nids(nids)
elif wslice is not None:
tasks = []
for work in self[wslice]:
tasks.extend([t for t in work])
else:
# All tasks selected if no option is provided.
tasks = list(self.iflat_tasks())
# Filter by task class
if task_class is not None:
tasks = [t for t in tasks if t.isinstance(task_class)]
return tasks | 140,086 |
Return list of (taks, scfcycle) tuples for all the tasks in the flow with a SCF algorithm
e.g. electronic GS-SCF iteration, DFPT-SCF iterations etc.
Args:
nids: List of node identifiers.
wslice: Slice object used to select works.
task_class: String or class used to select tasks. Ignored if None.
exclude_ok_tasks: True if only running tasks should be considered.
Returns:
List of `ScfCycle` subclass instances. | def get_task_scfcycles(self, nids=None, wslice=None, task_class=None, exclude_ok_tasks=False):
select_status = [self.S_RUN] if exclude_ok_tasks else [self.S_RUN, self.S_OK]
tasks_cycles = []
for task in self.select_tasks(nids=nids, wslice=wslice):
# Fileter
if task.status not in select_status or task.cycle_class is None:
continue
if task_class is not None and not task.isinstance(task_class):
continue
try:
cycle = task.cycle_class.from_file(task.output_file.path)
if cycle is not None:
tasks_cycles.append((task, cycle))
except Exception:
# This is intentionally ignored because from_file can fail for several reasons.
pass
return tasks_cycles | 140,087 |
Print list of tricky tasks i.e. tasks that have been restarted or
launched more than once or tasks with corrections.
Args:
verbose: Verbosity level. If > 0, task history and corrections (if any) are printed. | def show_tricky_tasks(self, verbose=0):
nids, tasks = [], []
for task in self.iflat_tasks():
if task.num_launches > 1 or any(n > 0 for n in (task.num_restarts, task.num_corrections)):
nids.append(task.node_id)
tasks.append(task)
if not nids:
cprint("Everything's fine, no tricky tasks found", color="green")
else:
self.show_status(nids=nids)
if not verbose:
print("Use --verbose to print task history.")
return
for nid, task in zip(nids, tasks):
cprint(repr(task), **task.status.color_opts)
self.show_history(nids=[nid], full_history=False, metadata=False)
#if task.num_restarts:
# self.show_restarts(nids=[nid])
if task.num_corrections:
self.show_corrections(nids=[nid]) | 140,088 |
Inspect the tasks (SCF iterations, Structural relaxation ...) and
produces matplotlib plots.
Args:
nids: List of node identifiers.
wslice: Slice object used to select works.
kwargs: keyword arguments passed to `task.inspect` method.
.. note::
nids and wslice are mutually exclusive.
If nids and wslice are both None, all tasks in self are inspected.
Returns:
List of `matplotlib` figures. | def inspect(self, nids=None, wslice=None, **kwargs):
figs = []
for task in self.select_tasks(nids=nids, wslice=wslice):
if hasattr(task, "inspect"):
fig = task.inspect(**kwargs)
if fig is None:
cprint("Cannot inspect Task %s" % task, color="blue")
else:
figs.append(fig)
else:
cprint("Task %s does not provide an inspect method" % task, color="blue")
return figs | 140,089 |
Parse the timer data in the main output file(s) of Abinit.
Requires timopt /= 0 in the input file (usually timopt = -1)
Args:
nids: optional list of node identifiers used to filter the tasks.
Return: :class:`AbinitTimerParser` instance, None if error. | def parse_timing(self, nids=None):
# Get the list of output files according to nids.
paths = [task.output_file.path for task in self.iflat_tasks(nids=nids)]
# Parse data.
from .abitimer import AbinitTimerParser
parser = AbinitTimerParser()
read_ok = parser.parse(paths)
if read_ok:
return parser
return None | 140,096 |
Write to the given stream the list of ABINIT errors for all tasks whose status is S_ABICRITICAL.
Args:
nids: optional list of node identifiers used to filter the tasks.
stream: File-like object. Default: sys.stdout | def show_abierrors(self, nids=None, stream=sys.stdout):
lines = []
app = lines.append
for task in self.iflat_tasks(status=self.S_ABICRITICAL, nids=nids):
header = "=== " + task.qout_file.path + "==="
app(header)
report = task.get_event_report()
if report is not None:
app("num_errors: %s, num_warnings: %s, num_comments: %s" % (
report.num_errors, report.num_warnings, report.num_comments))
app("*** ERRORS ***")
app("\n".join(str(e) for e in report.errors))
app("*** BUGS ***")
app("\n".join(str(b) for b in report.bugs))
else:
app("get_envent_report returned None!")
app("=" * len(header) + 2*"\n")
return stream.writelines(lines) | 140,097 |
Write to the given stream the content of the queue output file for all tasks whose status is S_QCRITICAL.
Args:
nids: optional list of node identifiers used to filter the tasks.
stream: File-like object. Default: sys.stdout | def show_qouts(self, nids=None, stream=sys.stdout):
lines = []
for task in self.iflat_tasks(status=self.S_QCRITICAL, nids=nids):
header = "=== " + task.qout_file.path + "==="
lines.append(header)
if task.qout_file.exists:
with open(task.qout_file.path, "rt") as fh:
lines += fh.readlines()
else:
lines.append("File does not exist!")
lines.append("=" * len(header) + 2*"\n")
return stream.writelines(lines) | 140,098 |
This method is usually used when the flow didn't completed succesfully
It analyzes the files produced the tasks to facilitate debugging.
Info are printed to stdout.
Args:
status: If not None, only the tasks with this status are selected
nids: optional list of node identifiers used to filter the tasks. | def debug(self, status=None, nids=None):
nrows, ncols = get_terminal_size()
# Test for scheduler exceptions first.
sched_excfile = os.path.join(self.workdir, "_exceptions")
if os.path.exists(sched_excfile):
with open(sched_excfile, "r") as fh:
cprint("Found exceptions raised by the scheduler", "red")
cprint(fh.read(), color="red")
return
if status is not None:
tasks = list(self.iflat_tasks(status=status, nids=nids))
else:
errors = list(self.iflat_tasks(status=self.S_ERROR, nids=nids))
qcriticals = list(self.iflat_tasks(status=self.S_QCRITICAL, nids=nids))
abicriticals = list(self.iflat_tasks(status=self.S_ABICRITICAL, nids=nids))
tasks = errors + qcriticals + abicriticals
# For each task selected:
# 1) Check the error files of the task. If not empty, print the content to stdout and we are done.
# 2) If error files are empty, look at the master log file for possible errors
# 3) If also this check failes, scan all the process log files.
# TODO: This check is not needed if we introduce a new __abinit_error__ file
# that is created by the first MPI process that invokes MPI abort!
#
ntasks = 0
for task in tasks:
print(make_banner(str(task), width=ncols, mark="="))
ntasks += 1
# Start with error files.
for efname in ["qerr_file", "stderr_file",]:
err_file = getattr(task, efname)
if err_file.exists:
s = err_file.read()
if not s: continue
print(make_banner(str(err_file), width=ncols, mark="="))
cprint(s, color="red")
#count += 1
# Check main log file.
try:
report = task.get_event_report()
if report and report.num_errors:
print(make_banner(os.path.basename(report.filename), width=ncols, mark="="))
s = "\n".join(str(e) for e in report.errors)
else:
s = None
except Exception as exc:
s = str(exc)
count = 0 # count > 0 means we found some useful info that could explain the failures.
if s is not None:
cprint(s, color="red")
count += 1
if not count:
# Inspect all log files produced by the other nodes.
log_files = task.tmpdir.list_filepaths(wildcard="*LOG_*")
if not log_files:
cprint("No *LOG_* file in tmpdir. This usually happens if you are running with many CPUs", color="magenta")
for log_file in log_files:
try:
report = EventsParser().parse(log_file)
if report.errors:
print(report)
count += 1
break
except Exception as exc:
cprint(str(exc), color="red")
count += 1
break
if not count:
cprint("Houston, we could not find any error message that can explain the problem", color="magenta")
print("Number of tasks analyzed: %d" % ntasks) | 140,099 |
Build dirs and file of the `Flow` and save the object in pickle format.
Returns 0 if success
Args:
abivalidate: If True, all the input files are validate by calling
the abinit parser. If the validation fails, ValueError is raise. | def build_and_pickle_dump(self, abivalidate=False):
self.build()
if not abivalidate: return self.pickle_dump()
# Validation with Abinit.
isok, errors = self.abivalidate_inputs()
if isok: return self.pickle_dump()
errlines = []
for i, e in enumerate(errors):
errlines.append("[%d] %s" % (i, e))
raise ValueError("\n".join(errlines)) | 140,103 |
Allocate the `Flow` i.e. assign the `workdir` and (optionally)
the :class:`TaskManager` to the different tasks in the Flow.
Args:
workdir: Working directory of the flow. Must be specified here
if we haven't initialized the workdir in the __init__.
Return:
self | def allocate(self, workdir=None, use_smartio=False):
if workdir is not None:
# We set the workdir of the flow here
self.set_workdir(workdir)
for i, work in enumerate(self):
work.set_workdir(os.path.join(self.workdir, "w" + str(i)))
if not hasattr(self, "workdir"):
raise RuntimeError("You must call flow.allocate(workdir) if the workdir is not passed to __init__")
for work in self:
# Each work has a reference to its flow.
work.allocate(manager=self.manager)
work.set_flow(self)
# Each task has a reference to its work.
for task in work:
task.set_work(work)
self.check_dependencies()
if not hasattr(self, "_allocated"): self._allocated = 0
self._allocated += 1
if use_smartio:
self.use_smartio()
return self | 140,109 |
Use :class:`PyLauncher` to submits tasks in rapidfire mode.
kwargs contains the options passed to the launcher.
Args:
check_status:
max_nlaunch: Maximum number of launches. default: no limit.
max_loops: Maximum number of loops
sleep_time: seconds to sleep between rapidfire loop iterations
Return:
Number of tasks submitted. | def rapidfire(self, check_status=True, max_nlaunch=-1, max_loops=1, sleep_time=5, **kwargs):
self.check_pid_file()
self.set_spectator_mode(False)
if check_status: self.check_status()
from .launcher import PyLauncher
return PyLauncher(self, **kwargs).rapidfire(max_nlaunch=max_nlaunch, max_loops=max_loops, sleep_time=sleep_time) | 140,119 |
Build a return a :class:`PyFlowScheduler` to run the flow.
Args:
kwargs: if empty we use the user configuration file.
if `filepath` in kwargs we init the scheduler from filepath.
else pass **kwargs to :class:`PyFlowScheduler` __init__ method. | def make_scheduler(self, **kwargs):
from .launcher import PyFlowScheduler
if not kwargs:
# User config if kwargs is empty
sched = PyFlowScheduler.from_user_config()
else:
# Use from_file if filepath if present, else call __init__
filepath = kwargs.pop("filepath", None)
if filepath is not None:
assert not kwargs
sched = PyFlowScheduler.from_file(filepath)
else:
sched = PyFlowScheduler(**kwargs)
sched.add_flow(self)
return sched | 140,121 |
Run the flow in batch mode, return exit status of the job script.
Requires a manager.yml file and a batch_adapter adapter.
Args:
timelimit: Time limit (int with seconds or string with time given with the slurm convention:
"days-hours:minutes:seconds"). If timelimit is None, the default value specified in the
`batch_adapter` entry of `manager.yml` is used. | def batch(self, timelimit=None):
from .launcher import BatchLauncher
# Create a batch dir from the flow.workdir.
prev_dir = os.path.join(*self.workdir.split(os.path.sep)[:-1])
prev_dir = os.path.join(os.path.sep, prev_dir)
workdir = os.path.join(prev_dir, os.path.basename(self.workdir) + "_batch")
return BatchLauncher(workdir=workdir, flows=self).submit(timelimit=timelimit) | 140,122 |
Generate flow graph in the DOT language.
Args:
engine: Layout command used. ['dot', 'neato', 'twopi', 'circo', 'fdp', 'sfdp', 'patchwork', 'osage']
graph_attr: Mapping of (attribute, value) pairs for the graph.
node_attr: Mapping of (attribute, value) pairs set for all nodes.
edge_attr: Mapping of (attribute, value) pairs set for all edges.
Returns: graphviz.Digraph <https://graphviz.readthedocs.io/en/stable/api.html#digraph> | def get_graphviz(self, engine="automatic", graph_attr=None, node_attr=None, edge_attr=None):
self.allocate()
from graphviz import Digraph
fg = Digraph("flow", #filename="flow_%s.gv" % os.path.basename(self.relworkdir),
engine="fdp" if engine == "automatic" else engine)
# Set graph attributes.
# https://www.graphviz.org/doc/info/
#fg.attr(label="%s@%s" % (self.__class__.__name__, self.relworkdir))
fg.attr(label=repr(self))
#fg.attr(fontcolor="white", bgcolor='purple:pink')
fg.attr(rankdir="LR", pagedir="BL")
#fg.attr(constraint="false", pack="true", packMode="clust")
fg.node_attr.update(color='lightblue2', style='filled')
#fg.node_attr.update(ranksep='equally')
# Add input attributes.
if graph_attr is not None:
fg.graph_attr.update(**graph_attr)
if node_attr is not None:
fg.node_attr.update(**node_attr)
if edge_attr is not None:
fg.edge_attr.update(**edge_attr)
def node_kwargs(node):
return dict(
#shape="circle",
color=node.color_hex,
fontsize="8.0",
label=(str(node) if not hasattr(node, "pos_str") else
node.pos_str + "\n" + node.__class__.__name__),
)
edge_kwargs = dict(arrowType="vee", style="solid")
cluster_kwargs = dict(rankdir="LR", pagedir="BL", style="rounded", bgcolor="azure2")
for work in self:
# Build cluster with tasks.
cluster_name = "cluster%s" % work.name
with fg.subgraph(name=cluster_name) as wg:
wg.attr(**cluster_kwargs)
wg.attr(label="%s (%s)" % (work.__class__.__name__, work.name))
#wg.attr(label=repr(work))
#wg.attr(label="%s (%s)\n%s (%s)" % (
# work.__class__.__name__, work.name, work.relworkdir, work.node_id))
for task in work:
wg.node(task.name, **node_kwargs(task))
# Connect children to task.
for child in task.get_children():
# Find file extensions required by this task
i = [dep.node for dep in child.deps].index(task)
edge_label = "+".join(child.deps[i].exts)
fg.edge(task.name, child.name, label=edge_label, color=task.color_hex,
**edge_kwargs)
# Treat the case in which we have a work producing output for other tasks.
for work in self:
children = work.get_children()
if not children: continue
cluster_name = "cluster%s" % work.name
seen = set()
for child in children:
# This is not needed, too much confusing
#fg.edge(cluster_name, child.name, color=work.color_hex, **edge_kwargs)
# Find file extensions required by work
i = [dep.node for dep in child.deps].index(work)
for ext in child.deps[i].exts:
out = "%s (%s)" % (ext, work.name)
fg.node(out)
fg.edge(out, child.name, **edge_kwargs)
key = (cluster_name, out)
if key not in seen:
seen.add(key)
fg.edge(cluster_name, out, color=work.color_hex, **edge_kwargs)
# Treat the case in which we have a task that depends on external files.
seen = set()
for task in self.iflat_tasks():
#print(task.get_parents())
for node in (p for p in task.get_parents() if p.is_file):
#print("parent file node", node)
#infile = "%s (%s)" % (ext, work.name)
infile = node.filepath
if infile not in seen:
seen.add(infile)
fg.node(infile, **node_kwargs(node))
fg.edge(infile, task.name, color=node.color_hex, **edge_kwargs)
return fg | 140,125 |
Generate flow graph in the DOT language and plot it with matplotlib.
Args:
ax: matplotlib :class:`Axes` or None if a new figure should be created.
figsize: matplotlib figure size (None to use default)
dpi: DPI value.
fmt: Select format for output image
Return: matplotlib Figure | def graphviz_imshow(self, ax=None, figsize=None, dpi=300, fmt="png", **kwargs):
graph = self.get_graphviz(**kwargs)
graph.format = fmt
graph.attr(dpi=str(dpi))
#print(graph)
_, tmpname = tempfile.mkstemp()
path = graph.render(tmpname, view=False, cleanup=True)
ax, fig, _ = get_ax_fig_plt(ax=ax, figsize=figsize, dpi=dpi)
import matplotlib.image as mpimg
ax.imshow(mpimg.imread(path, format="png")) #, interpolation="none")
ax.axis("off")
return fig | 140,126 |
Convenience method to get a crystal from the Materials Project database via
the API. Requires PMG_MAPI_KEY to be set.
Args:
formula (str): A formula
Returns:
(Structure) The lowest energy structure in Materials Project with that
formula. | def get_structure_from_mp(formula):
m = MPRester()
entries = m.get_entries(formula, inc_structure="final")
if len(entries) == 0:
raise ValueError("No structure with formula %s in Materials Project!" %
formula)
elif len(entries) > 1:
warnings.warn("%d structures with formula %s found in Materials "
"Project. The lowest energy structure will be returned." %
(len(entries), formula))
return min(entries, key=lambda e: e.energy_per_atom).structure | 140,157 |
Convenience method to perform quick loading of data from a filename. The
type of object returned depends the file type.
Args:
fname (string): A filename.
Returns:
Note that fname is matched using unix-style, i.e., fnmatch.
(Structure) if *POSCAR*/*CONTCAR*/*.cif
(Vasprun) *vasprun*
(obj) if *json* (passthrough to monty.serialization.loadfn) | def loadfn(fname):
if (fnmatch(fname, "*POSCAR*") or fnmatch(fname, "*CONTCAR*") or
".cif" in fname.lower()) or fnmatch(fname, "*.vasp"):
return Structure.from_file(fname)
elif fnmatch(fname, "*vasprun*"):
from pymatgen.io.vasp import Vasprun
return Vasprun(fname)
elif fnmatch(fname, "*.json*"):
from monty.serialization import loadfn
return loadfn(fname) | 140,158 |
Save the assimilated data to a file.
Args:
filename (str): filename to save the assimilated data to. Note
that if the filename ends with gz or bz2, the relevant gzip
or bz2 compression will be applied. | def save_data(self, filename):
with zopen(filename, "wt") as f:
json.dump(list(self._data), f, cls=MontyEncoder) | 140,163 |
Returns the nac_frequencies for the given direction (not necessarily a versor).
None if the direction is not present or nac_frequencies has not been calculated.
Args:
direction: the direction as a list of 3 elements
Returns:
the frequencies as a numpy array o(3*len(structure), len(qpoints)).
None if not found. | def get_nac_frequencies_along_dir(self, direction):
versor = [i / np.linalg.norm(direction) for i in direction]
for d, f in self.nac_frequencies:
if np.allclose(versor, d):
return f
return None | 140,169 |
Returns the nac_eigendisplacements for the given direction (not necessarily a versor).
None if the direction is not present or nac_eigendisplacements has not been calculated.
Args:
direction: the direction as a list of 3 elements
Returns:
the eigendisplacements as a numpy array of complex numbers with shape
(3*len(structure), len(structure), 3). None if not found. | def get_nac_eigendisplacements_along_dir(self, direction):
versor = [i / np.linalg.norm(direction) for i in direction]
for d, e in self.nac_eigendisplacements:
if np.allclose(versor, d):
return e
return None | 140,170 |
Returns the list of qpoint indices equivalent (meaning they are the
same frac coords) to the given one.
Args:
index: the qpoint index
Returns:
a list of equivalent indices
TODO: now it uses the label we might want to use coordinates instead
(in case there was a mislabel) | def get_equivalent_qpoints(self, index):
#if the qpoint has no label it can"t have a repetition along the band
#structure line object
if self.qpoints[index].label is None:
return [index]
list_index_qpoints = []
for i in range(self.nb_qpoints):
if self.qpoints[i].label == self.qpoints[index].label:
list_index_qpoints.append(i)
return list_index_qpoints | 140,175 |
Returns in what branch(es) is the qpoint. There can be several
branches.
Args:
index: the qpoint index
Returns:
A list of dictionaries [{"name","start_index","end_index","index"}]
indicating all branches in which the qpoint is. It takes into
account the fact that one qpoint (e.g., \\Gamma) can be in several
branches | def get_branch(self, index):
to_return = []
for i in self.get_equivalent_qpoints(index):
for b in self.branches:
if b["start_index"] <= i <= b["end_index"]:
to_return.append({"name": b["name"],
"start_index": b["start_index"],
"end_index": b["end_index"],
"index": i})
return to_return | 140,176 |
Applies a structure_filter to the list of TransformedStructures
in the transmuter.
Args:
structure_filter: StructureFilter to apply. | def apply_filter(self, structure_filter):
def test_transformed_structure(ts):
return structure_filter.test(ts.final_structure)
self.transformed_structures = list(filter(test_transformed_structure,
self.transformed_structures))
for ts in self.transformed_structures:
ts.append_filter(structure_filter) | 140,186 |
Add parameters to the transmuter. Additional parameters are stored in
the as_dict() output.
Args:
key: The key for the parameter.
value: The value for the parameter. | def set_parameter(self, key, value):
for x in self.transformed_structures:
x.other_parameters[key] = value | 140,187 |
Method is overloaded to accept either a list of transformed structures
or transmuter, it which case it appends the second transmuter"s
structures.
Args:
tstructs_or_transmuter: A list of transformed structures or a
transmuter. | def append_transformed_structures(self, tstructs_or_transmuter):
if isinstance(tstructs_or_transmuter, self.__class__):
self.transformed_structures.extend(tstructs_or_transmuter
.transformed_structures)
else:
for ts in tstructs_or_transmuter:
assert isinstance(ts, TransformedStructure)
self.transformed_structures.extend(tstructs_or_transmuter) | 140,189 |
Generates a TransformedStructureCollection from a cif, possibly
containing multiple structures.
Args:
filenames: List of strings of the cif files
transformations: New transformations to be applied to all
structures
primitive: Same meaning as in __init__.
extend_collection: Same meaning as in __init__. | def from_filenames(filenames, transformations=None, primitive=True,
extend_collection=False):
allcifs = []
for fname in filenames:
with open(fname, "r") as f:
allcifs.append(f.read())
return CifTransmuter("\n".join(allcifs), transformations,
primitive=primitive,
extend_collection=extend_collection) | 140,192 |
Convenient constructor to generates a POSCAR transmuter from a list of
POSCAR filenames.
Args:
poscar_filenames: List of POSCAR filenames
transformations: New transformations to be applied to all
structures.
extend_collection:
Same meaning as in __init__. | def from_filenames(poscar_filenames, transformations=None,
extend_collection=False):
tstructs = []
for filename in poscar_filenames:
with open(filename, "r") as f:
tstructs.append(TransformedStructure
.from_poscar_string(f.read(), []))
return StandardTransmuter(tstructs, transformations,
extend_collection=extend_collection) | 140,194 |
Compare multiple cycels on a grid: one subplot per quantity,
all cycles on the same subplot.
Args:
fontsize: Legend fontsize. | def combiplot(self, fontsize=8, **kwargs):
ax_list = None
for i, (label, cycle) in enumerate(self.items()):
fig = cycle.plot(ax_list=ax_list, label=label, fontsize=fontsize,
lw=2.0, marker="o", linestyle="-", show=False)
ax_list = fig.axes
return fig | 140,202 |
Uses matplotlib to plot the evolution of the structural relaxation.
Args:
ax_list: List of axes. If None a new figure is produced.
Returns:
`matplotlib` figure | def slideshow(self, **kwargs):
for i, cycle in enumerate(self.cycles):
cycle.plot(title="Relaxation step %s" % (i + 1),
tight_layout=kwargs.pop("tight_layout", True),
show=kwargs.pop("show", True)) | 140,208 |
Plot relaxation history i.e. the results of the last iteration of each SCF cycle.
Args:
ax_list: List of axes. If None a new figure is produced.
fontsize: legend fontsize.
kwargs: keyword arguments are passed to ax.plot
Returns: matplotlib figure | def plot(self, ax_list=None, fontsize=12, **kwargs):
history = self.history
# Build grid of plots.
num_plots, ncols, nrows = len(history), 1, 1
if num_plots > 1:
ncols = 2
nrows = num_plots // ncols + num_plots % ncols
ax_list, fig, plot = get_axarray_fig_plt(ax_list, nrows=nrows, ncols=ncols,
sharex=True, sharey=False, squeeze=False)
ax_list = np.array(ax_list).ravel()
iter_num = np.array(list(range(self.num_iterations))) + 1
label = kwargs.pop("label", None)
for i, ((key, values), ax) in enumerate(zip(history.items(), ax_list)):
ax.grid(True)
ax.set_xlabel('Relaxation Step')
ax.set_xticks(iter_num, minor=False)
ax.set_ylabel(key)
xx, yy = iter_num, values
if not kwargs and label is None:
ax.plot(xx, yy, "-o", lw=2.0)
else:
ax.plot(xx, yy, label=label if i == 0 else None, **kwargs)
if key in _VARS_SUPPORTING_LOGSCALE and np.all(yy > 1e-22):
ax.set_yscale("log")
if key in _VARS_WITH_YRANGE:
ymin, ymax = _VARS_WITH_YRANGE[key]
val_min, val_max = np.min(yy), np.max(yy)
if abs(val_max - val_min) > abs(ymax - ymin):
ax.set_ylim(ymin, ymax)
if label is not None:
ax.legend(loc="best", fontsize=fontsize, shadow=True)
# Get around a bug in matplotlib.
if num_plots % ncols != 0:
ax_list[-1].plot(xx, yy, lw=0.0)
ax_list[-1].axis('off')
return fig | 140,209 |
Merge GGK files, return the absolute path of the new database.
Args:
gswfk_file: Ground-state WFK filename
dfpt_files: List of 1WFK files to merge.
gkk_files: List of GKK files to merge.
out_gkk: Name of the output GKK file
binascii: Integer flat. 0 --> binary output, 1 --> ascii formatted output | def merge(self, workdir, gswfk_file, dfpt_files, gkk_files, out_gkk, binascii=0):
raise NotImplementedError("This method should be tested")
#out_gkk = out_gkk if cwd is None else os.path.join(os.path.abspath(cwd), out_gkk)
# We work with absolute paths.
gswfk_file = os.path.absath(gswfk_file)
dfpt_files = [os.path.abspath(s) for s in list_strings(dfpt_files)]
gkk_files = [os.path.abspath(s) for s in list_strings(gkk_files)]
print("Will merge %d 1WF files, %d GKK file in output %s" %
(len(dfpt_files), len(gkk_files), out_gkk))
if self.verbose:
for i, f in enumerate(dfpt_files): print(" [%d] 1WF %s" % (i, f))
for i, f in enumerate(gkk_files): print(" [%d] GKK %s" % (i, f))
self.stdin_fname, self.stdout_fname, self.stderr_fname = \
map(os.path.join, 3 * [workdir], ["mrggkk.stdin", "mrggkk.stdout", "mrggkk.stderr"])
inp = StringIO()
inp.write(out_gkk + "\n") # Name of the output file
inp.write(str(binascii) + "\n") # Integer flag: 0 --> binary output, 1 --> ascii formatted output
inp.write(gswfk_file + "\n") # Name of the groud state wavefunction file WF
#dims = len(dfpt_files, gkk_files, ?)
dims = " ".join([str(d) for d in dims])
inp.write(dims + "\n") # Number of 1WF, of GKK files, and number of 1WF files in all the GKK files
# Names of the 1WF files...
for fname in dfpt_files:
inp.write(fname + "\n")
# Names of the GKK files...
for fname in gkk_files:
inp.write(fname + "\n")
self.stdin_data = [s for s in inp.getvalue()]
with open(self.stdin_fname, "w") as fh:
fh.writelines(self.stdin_data)
# Force OS to write data to disk.
fh.flush()
os.fsync(fh.fileno())
self.execute(workdir)
return out_gkk | 140,222 |
Merge POT files containing 1st order DFPT potential
return the absolute path of the new database in workdir.
Args:
delete_source: True if POT1 files should be removed after (successful) merge. | def merge(self, workdir, pot_files, out_dvdb, delete_source=True):
# We work with absolute paths.
pot_files = [os.path.abspath(s) for s in list_strings(pot_files)]
if not os.path.isabs(out_dvdb):
out_dvdb = os.path.join(os.path.abspath(workdir), os.path.basename(out_dvdb))
if self.verbose:
print("Will merge %d files into output DVDB %s" % (len(pot_files), out_dvdb))
for i, f in enumerate(pot_files):
print(" [%d] %s" % (i, f))
# Handle the case of a single file since mrgddb uses 1 to denote GS files!
if len(pot_files) == 1:
with open(pot_files[0], "r") as inh, open(out_dvdb, "w") as out:
for line in inh:
out.write(line)
return out_dvdb
self.stdin_fname, self.stdout_fname, self.stderr_fname = \
map(os.path.join, 3 * [os.path.abspath(workdir)], ["mrgdvdb.stdin", "mrgdvdb.stdout", "mrgdvdb.stderr"])
inp = StringIO()
inp.write(out_dvdb + "\n") # Name of the output file.
inp.write(str(len(pot_files)) + "\n") # Number of input POT files.
# Names of the POT files.
for fname in pot_files:
inp.write(fname + "\n")
self.stdin_data = [s for s in inp.getvalue()]
with open(self.stdin_fname, "wt") as fh:
fh.writelines(self.stdin_data)
# Force OS to write data to disk.
fh.flush()
os.fsync(fh.fileno())
retcode = self.execute(workdir)
if retcode == 0 and delete_source:
# Remove pot files.
for f in pot_files:
try:
os.remove(f)
except IOError:
pass
return out_dvdb | 140,224 |
Runs cut3d with a Cut3DInput
Args:
cut3d_input: a Cut3DInput object.
workdir: directory where cut3d is executed.
Returns:
(string) absolute path to the standard output of the cut3d execution.
(string) absolute path to the output filepath. None if output is required. | def cut3d(self, cut3d_input, workdir):
self.stdin_fname, self.stdout_fname, self.stderr_fname = \
map(os.path.join, 3 * [os.path.abspath(workdir)], ["cut3d.stdin", "cut3d.stdout", "cut3d.stderr"])
cut3d_input.write(self.stdin_fname)
retcode = self._execute(workdir, with_mpirun=False)
if retcode != 0:
raise RuntimeError("Error while running cut3d in %s." % workdir)
output_filepath = cut3d_input.output_filepath
if output_filepath is not None:
if not os.path.isabs(output_filepath):
output_filepath = os.path.abspath(os.path.join(workdir, output_filepath))
if not os.path.isfile(output_filepath):
raise RuntimeError("The file was not converted correctly in %s." % workdir)
return self.stdout_fname, output_filepath | 140,225 |
Remove duplicate structures based on the structure matcher
and symmetry (if symprec is given).
Args:
structure_matcher: Provides a structure matcher to be used for
structure comparison.
symprec: The precision in the symmetry finder algorithm if None (
default value), no symmetry check is performed and only the
structure matcher is used. A recommended value is 1e-5. | def __init__(self, structure_matcher=StructureMatcher(
comparator=ElementComparator()), symprec=None):
self.symprec = symprec
self.structure_list = defaultdict(list)
if isinstance(structure_matcher, dict):
self.structure_matcher = StructureMatcher.from_dict(structure_matcher)
else:
self.structure_matcher = structure_matcher | 140,234 |
Calculates the BV sum of a site.
Args:
site:
The site
nn_list:
List of nearest neighbors in the format [(nn_site, dist), ...].
scale_factor:
A scale factor to be applied. This is useful for scaling distance,
esp in the case of calculation-relaxed structures which may tend
to under (GGA) or over bind (LDA). | def calculate_bv_sum(site, nn_list, scale_factor=1.0):
el1 = Element(site.specie.symbol)
bvsum = 0
for (nn, dist) in nn_list:
el2 = Element(nn.specie.symbol)
if (el1 in ELECTRONEG or el2 in ELECTRONEG) and el1 != el2:
r1 = BV_PARAMS[el1]["r"]
r2 = BV_PARAMS[el2]["r"]
c1 = BV_PARAMS[el1]["c"]
c2 = BV_PARAMS[el2]["c"]
R = r1 + r2 - r1 * r2 * (sqrt(c1) - sqrt(c2)) ** 2 / \
(c1 * r1 + c2 * r2)
vij = exp((R - dist * scale_factor) / 0.31)
bvsum += vij * (1 if el1.X < el2.X else -1)
return bvsum | 140,256 |
Calculates the BV sum of a site for unordered structures.
Args:
site:
The site
nn_list:
List of nearest neighbors in the format [(nn_site, dist), ...].
scale_factor:
A scale factor to be applied. This is useful for scaling distance,
esp in the case of calculation-relaxed structures which may tend
to under (GGA) or over bind (LDA). | def calculate_bv_sum_unordered(site, nn_list, scale_factor=1):
# If the site "site" has N partial occupations as : f_{site}_0,
# f_{site}_1, ... f_{site}_N of elements
# X_{site}_0, X_{site}_1, ... X_{site}_N, and each neighbors nn_i in nn
# has N_{nn_i} partial occupations as :
# f_{nn_i}_0, f_{nn_i}_1, ..., f_{nn_i}_{N_{nn_i}}, then the bv sum of
# site "site" is obtained as :
# \sum_{nn} \sum_j^N \sum_k^{N_{nn}} f_{site}_j f_{nn_i}_k vij_full
# where vij_full is the valence bond of the fully occupied bond
bvsum = 0
for specie1, occu1 in site.species.items():
el1 = Element(specie1.symbol)
for (nn, dist) in nn_list:
for specie2, occu2 in nn.species.items():
el2 = Element(specie2.symbol)
if (el1 in ELECTRONEG or el2 in ELECTRONEG) and el1 != el2:
r1 = BV_PARAMS[el1]["r"]
r2 = BV_PARAMS[el2]["r"]
c1 = BV_PARAMS[el1]["c"]
c2 = BV_PARAMS[el2]["c"]
R = r1 + r2 - r1 * r2 * (sqrt(c1) - sqrt(c2)) ** 2 / \
(c1 * r1 + c2 * r2)
vij = exp((R - dist * scale_factor) / 0.31)
bvsum += occu1 * occu2 * vij * (1 if el1.X < el2.X else -1)
return bvsum | 140,257 |
Add oxidation states to a structure by fractional site.
Args:
oxidation_states (list): List of list of oxidation states for each
site fraction for each site.
E.g., [[2, 4], [3], [-2], [-2], [-2]] | def add_oxidation_state_by_site_fraction(structure, oxidation_states):
try:
for i, site in enumerate(structure):
new_sp = collections.defaultdict(float)
for j, (el, occu) in enumerate(get_z_ordered_elmap(site
.species)):
specie = Specie(el.symbol, oxidation_states[i][j])
new_sp[specie] += occu
structure[i] = new_sp
return structure
except IndexError:
raise ValueError("Oxidation state of all sites must be "
"specified in the list.") | 140,258 |
Get an oxidation state decorated structure. This currently works only
for ordered structures only.
Args:
structure: Structure to analyze
Returns:
A modified structure that is oxidation state decorated.
Raises:
ValueError if the valences cannot be determined. | def get_oxi_state_decorated_structure(self, structure):
s = structure.copy()
if s.is_ordered:
valences = self.get_valences(s)
s.add_oxidation_state_by_site(valences)
else:
valences = self.get_valences(s)
s = add_oxidation_state_by_site_fraction(s, valences)
return s | 140,262 |
Concatenate another trajectory
Args:
trajectory (Trajectory): Trajectory to add | def extend(self, trajectory):
if self.time_step != trajectory.time_step:
raise ValueError('Trajectory not extended: Time steps of trajectories is incompatible')
if len(self.species) != len(trajectory.species) and self.species != trajectory.species:
raise ValueError('Trajectory not extended: species in trajectory do not match')
self.to_positions()
trajectory.to_positions()
self.frac_coords = np.concatenate((self.frac_coords, trajectory.frac_coords), axis=0)
self.lattice, self.constant_lattice = self._combine_attribute(self.lattice, trajectory.lattice,
self.frac_coords.shape[0],
trajectory.frac_coords.shape[0])
self.site_properties = self._combine_attribute(self.site_properties, trajectory.site_properties,
self.frac_coords.shape[0], trajectory.frac_coords.shape[0]) | 140,266 |
Gets a subset of the trajectory if a slice is given, if an int is given, return a structure
Args:
frames (int, slice): int or slice of trajectory to return
Return:
(Trajectory, Structure) Subset of trajectory | def __getitem__(self, frames):
if isinstance(frames, int) and frames < self.frac_coords.shape[0]:
lattice = self.lattice if self.constant_lattice else self.lattice[frames]
site_properties = self.site_properties[frames] if self.site_properties else None
return Structure(Lattice(lattice), self.species, self.frac_coords[frames], site_properties=site_properties,
to_unit_cell=True)
if isinstance(frames, slice):
frames = np.arange(frames.start, frames.stop, frames.step)
elif not (isinstance(frames, list) or isinstance(frames, np.ndarray)):
try:
frames = np.asarray(frames)
except:
raise Exception('Given accessor is not of type int, slice, tuple, list, or array')
if (isinstance(frames, list) or isinstance(frames, np.ndarray)) and \
(np.asarray([frames]) < self.frac_coords.shape[0]).all():
if self.constant_lattice:
lattice = self.lattice
else:
lattice = self.lattice[frames, :]
return Trajectory(lattice, self.species, self.frac_coords[frames, :], self.time_step,
self.site_properties)
else:
warnings.warn('Some or all selected frames exceed trajectory length')
return | 140,267 |
Convenience constructor to obtain trajectory from a list of structures.
Note: Assumes no atoms removed during simulation
Args:
structures (list): list of pymatgen Structure objects.
constant_lattice (bool): Whether the lattice changes during the simulation, such as in an NPT MD
simulation. True results in
Returns:
(Trajectory) | def from_structures(cls, structures, constant_lattice=True, **kwargs):
frac_coords = [structure.frac_coords for structure in structures]
if constant_lattice:
lattice = structures[0].lattice.matrix
else:
lattice = [structure.lattice.matrix for structure in structures]
site_properties = [structure.site_properties for structure in structures]
return cls(lattice, structures[0].species, frac_coords, site_properties=site_properties,
constant_lattice=constant_lattice, **kwargs) | 140,269 |
Convenience constructor to obtain trajectory from XDATCAR or vasprun.xml file
Args:
filename (str): The filename to read from.
constant_lattice (bool): Whether the lattice changes during the simulation, such as in an NPT MD
simulation. True results in
Returns:
(Trajectory) | def from_file(cls, filename, constant_lattice=True, **kwargs):
# TODO: Support other filetypes
fname = os.path.basename(filename)
if fnmatch(fname, "*XDATCAR*"):
structures = Xdatcar(filename).structures
elif fnmatch(fname, "vasprun*.xml*"):
structures = Vasprun(filename).structures
else:
raise ValueError("Unsupported file")
return cls.from_structures(structures, constant_lattice=constant_lattice, **kwargs) | 140,270 |
Return a new istance of the appropriate subclass.
Args:
qtype: String specifying the Resource manager type.
queue_id: Job identifier.
qname: Name of the queue (optional). | def from_qtype_and_id(qtype, queue_id, qname=None):
for cls in all_subclasses(QueueJob):
if cls.QTYPE == qtype: break
else:
logger.critical("Cannot find QueueJob subclass registered for qtype %s" % qtype)
cls = QueueJob
return cls(queue_id, qname=qname) | 140,282 |
Create a new InsertionElectrode.
Args:
entries: A list of ComputedStructureEntries (or subclasses)
representing the different topotactic states of the battery,
e.g. TiO2 and LiTiO2.
working_ion_entry: A single ComputedEntry or PDEntry
representing the element that carries charge across the
battery, e.g. Li. | def __init__(self, entries, working_ion_entry):
self._entries = entries
self._working_ion = working_ion_entry.composition.elements[0]
self._working_ion_entry = working_ion_entry
# Prepare to make phase diagram: determine elements and set their energy
# to be very high
elements = set()
for entry in entries:
elements.update(entry.composition.elements)
# Set an artificial energy for each element for convex hull generation
element_energy = max([entry.energy_per_atom for entry in entries]) + 10
pdentries = []
pdentries.extend(entries)
pdentries.extend([PDEntry(Composition({el: 1}), element_energy)
for el in elements])
# Make phase diagram to determine which entries are stable vs. unstable
pd = PhaseDiagram(pdentries)
lifrac = lambda e: e.composition.get_atomic_fraction(self._working_ion)
# stable entries ordered by amount of Li asc
self._stable_entries = tuple(sorted([e for e in pd.stable_entries
if e in entries], key=lifrac))
# unstable entries ordered by amount of Li asc
self._unstable_entries = tuple(sorted([e for e in pd.unstable_entries
if e in entries], key=lifrac))
# create voltage pairs
self._vpairs = tuple([InsertionVoltagePair(self._stable_entries[i],
self._stable_entries[i + 1],
working_ion_entry)
for i in range(len(self._stable_entries) - 1)]) | 140,292 |
Get the stable entries.
Args:
charge_to_discharge: order from most charge to most discharged
state? Default to True.
Returns:
A list of stable entries in the electrode, ordered by amount of the
working ion. | def get_stable_entries(self, charge_to_discharge=True):
list_copy = list(self._stable_entries)
return list_copy if charge_to_discharge else list_copy.reverse() | 140,293 |
Returns the unstable entries for the electrode.
Args:
charge_to_discharge: Order from most charge to most discharged
state? Defaults to True.
Returns:
A list of unstable entries in the electrode, ordered by amount of
the working ion. | def get_unstable_entries(self, charge_to_discharge=True):
list_copy = list(self._unstable_entries)
return list_copy if charge_to_discharge else list_copy.reverse() | 140,294 |
Return all entries input for the electrode.
Args:
charge_to_discharge:
order from most charge to most discharged state? Defaults to
True.
Returns:
A list of all entries in the electrode (both stable and unstable),
ordered by amount of the working ion. | def get_all_entries(self, charge_to_discharge=True):
all_entries = list(self.get_stable_entries())
all_entries.extend(self.get_unstable_entries())
# sort all entries by amount of working ion ASC
fsrt = lambda e: e.composition.get_atomic_fraction(self.working_ion)
all_entries = sorted([e for e in all_entries],
key=fsrt)
return all_entries if charge_to_discharge else all_entries.reverse() | 140,295 |
The maximum instability along a path for a specific voltage range.
Args:
min_voltage: The minimum allowable voltage.
max_voltage: The maximum allowable voltage.
Returns:
Maximum decomposition energy of all compounds along the insertion
path (a subset of the path can be chosen by the optional arguments) | def get_max_instability(self, min_voltage=None, max_voltage=None):
data = []
for pair in self._select_in_voltage_range(min_voltage, max_voltage):
if pair.decomp_e_charge is not None:
data.append(pair.decomp_e_charge)
if pair.decomp_e_discharge is not None:
data.append(pair.decomp_e_discharge)
return max(data) if len(data) > 0 else None | 140,296 |
The minimum instability along a path for a specific voltage range.
Args:
min_voltage: The minimum allowable voltage.
max_voltage: The maximum allowable voltage.
Returns:
Minimum decomposition energy of all compounds along the insertion
path (a subset of the path can be chosen by the optional arguments) | def get_min_instability(self, min_voltage=None, max_voltage=None):
data = []
for pair in self._select_in_voltage_range(min_voltage, max_voltage):
if pair.decomp_e_charge is not None:
data.append(pair.decomp_e_charge)
if pair.decomp_e_discharge is not None:
data.append(pair.decomp_e_discharge)
return min(data) if len(data) > 0 else None | 140,297 |
Maximum critical oxygen chemical potential along path.
Args:
min_voltage: The minimum allowable voltage.
max_voltage: The maximum allowable voltage.
Returns:
Maximum critical oxygen chemical of all compounds along the
insertion path (a subset of the path can be chosen by the optional
arguments). | def get_max_muO2(self, min_voltage=None, max_voltage=None):
data = []
for pair in self._select_in_voltage_range(min_voltage, max_voltage):
if pair.muO2_discharge is not None:
data.extend([d['chempot'] for d in pair.muO2_discharge])
if pair.muO2_charge is not None:
data.extend([d['chempot'] for d in pair.muO2_discharge])
return max(data) if len(data) > 0 else None | 140,298 |
Minimum critical oxygen chemical potential along path.
Args:
min_voltage: The minimum allowable voltage for a given step
max_voltage: The maximum allowable voltage allowable for a given
step
Returns:
Minimum critical oxygen chemical of all compounds along the
insertion path (a subset of the path can be chosen by the optional
arguments). | def get_min_muO2(self, min_voltage=None, max_voltage=None):
data = []
for pair in self._select_in_voltage_range(min_voltage, max_voltage):
if pair.muO2_discharge is not None:
data.extend([d['chempot'] for d in pair.muO2_discharge])
if pair.muO2_charge is not None:
data.extend([d['chempot'] for d in pair.muO2_discharge])
return min(data) if len(data) > 0 else None | 140,299 |
Generate a summary dict.
Args:
print_subelectrodes: Also print data on all the possible
subelectrodes.
Returns:
A summary of this electrode"s properties in dict format. | def as_dict_summary(self, print_subelectrodes=True):
chg_comp = self.fully_charged_entry.composition
dischg_comp = self.fully_discharged_entry.composition
ion = self.working_ion
d = {"average_voltage": self.get_average_voltage(),
"max_voltage": self.max_voltage,
"min_voltage": self.min_voltage,
"max_delta_volume": self.max_delta_volume,
"max_voltage_step": self.max_voltage_step,
"capacity_grav": self.get_capacity_grav(),
"capacity_vol": self.get_capacity_vol(),
"energy_grav": self.get_specific_energy(),
"energy_vol": self.get_energy_density(),
"working_ion": self._working_ion.symbol,
"nsteps": self.num_steps,
"framework": self._vpairs[0].framework.to_data_dict,
"formula_charge": chg_comp.reduced_formula,
"id_charge": self.fully_charged_entry.entry_id,
"formula_discharge": dischg_comp.reduced_formula,
"id_discharge": self.fully_discharged_entry.entry_id,
"fracA_charge": chg_comp.get_atomic_fraction(ion),
"fracA_discharge": dischg_comp.get_atomic_fraction(ion),
"max_instability": self.get_max_instability(),
"min_instability": self.get_min_instability(),
"material_ids" : [itr_ent.entry_id for itr_ent in self._entries],
"stable_material_ids" : [itr_ent.entry_id for itr_ent in self.get_stable_entries()],
"unstable_material_ids": [itr_ent.entry_id for itr_ent in self.get_unstable_entries()],
}
if all(['decomposition_energy' in itr_ent.data for itr_ent in self._entries]):
d.update({"stability_charge": self.fully_charged_entry.data['decomposition_energy'],
"stability_discharge": self.fully_discharged_entry.data['decomposition_energy'],
"stability_data":{itr_ent.entry_id: itr_ent.data['decomposition_energy'] for itr_ent in self._entries},
})
if all(['muO2' in itr_ent.data for itr_ent in self._entries]):
d.update({"muO2_data" : {itr_ent.entry_id: itr_ent.data['muO2'] for itr_ent in self._entries}})
if print_subelectrodes:
f_dict = lambda c: c.as_dict_summary(print_subelectrodes=False)
d["adj_pairs"] = list(map(f_dict,
self.get_sub_electrodes(adjacent_only=True)))
d["all_pairs"] = list(map(f_dict,
self.get_sub_electrodes(adjacent_only=False)))
return d | 140,301 |
Given coords and a site, find closet site to coords.
Args:
coords (3x1 array): cartesian coords of center of sphere
site: site to find closest to coords
r: radius of sphere. Defaults to diagonal of unit cell
Returns:
Closest site and distance. | def get_nearest_site(self, coords, site, r=None):
index = self.index(site)
if r is None:
r = np.linalg.norm(np.sum(self.lattice.matrix, axis=0))
ns = self.get_sites_in_sphere(coords, r, include_index=True)
# Get sites with identical index to site
ns = [n for n in ns if n[2] == index]
# Sort by distance to coords
ns.sort(key=lambda x: x[1])
# Return PeriodicSite and distance of closest image
return ns[0][0:2] | 140,310 |
Process a single entry with the chosen Corrections.
Args:
entry: A ComputedEntry object.
Returns:
An adjusted entry if entry is compatible, otherwise None is
returned. | def process_entry(self, entry):
try:
corrections = self.get_corrections_dict(entry)
except CompatibilityError:
return None
entry.correction = sum(corrections.values())
return entry | 140,328 |
Returns the corrections applied to a particular entry.
Args:
entry: A ComputedEntry object.
Returns:
({correction_name: value}) | def get_corrections_dict(self, entry):
corrections = {}
for c in self.corrections:
val = c.get_correction(entry)
if val != 0:
corrections[str(c)] = val
return corrections | 140,329 |
Prints an explanation of the corrections that are being applied for a
given compatibility scheme. Inspired by the "explain" methods in many
database methodologies.
Args:
entry: A ComputedEntry. | def explain(self, entry):
d = self.get_explanation_dict(entry)
print("The uncorrected value of the energy of %s is %f eV" %
(entry.composition, d["uncorrected_energy"]))
print("The following corrections / screening are applied for %s:\n" %
d["compatibility"])
for c in d["corrections"]:
print("%s correction: %s\n" % (c["name"],
c["description"]))
print("For the entry, this correction has the value %f eV." % c[
"value"])
print("-" * 30)
print("The final energy after corrections is %f" % d[
"corrected_energy"]) | 140,331 |
Reads a string representation to a Cssr object.
Args:
string (str): A string representation of a CSSR.
Returns:
Cssr object. | def from_string(string):
lines = string.split("\n")
toks = lines[0].split()
lengths = [float(i) for i in toks]
toks = lines[1].split()
angles = [float(i) for i in toks[0:3]]
latt = Lattice.from_lengths_and_angles(lengths, angles)
sp = []
coords = []
for l in lines[4:]:
m = re.match(
r"\d+\s+(\w+)\s+([0-9\-\.]+)\s+([0-9\-\.]+)\s+([0-9\-\.]+)",
l.strip())
if m:
sp.append(m.group(1))
coords.append([float(m.group(i)) for i in range(2, 5)])
return Cssr(Structure(latt, sp, coords)) | 140,334 |
Adds a dos for plotting.
Args:
label:
label for the DOS. Must be unique.
dos:
PhononDos object | def add_dos(self, label, dos):
densities = dos.get_smeared_densities(self.sigma) if self.sigma \
else dos.densities
self._doses[label] = {'frequencies': dos.frequencies, 'densities': densities} | 140,337 |
Add a dictionary of doses, with an optional sorting function for the
keys.
Args:
dos_dict: dict of {label: Dos}
key_sort_func: function used to sort the dos_dict keys. | def add_dos_dict(self, dos_dict, key_sort_func=None):
if key_sort_func:
keys = sorted(dos_dict.keys(), key=key_sort_func)
else:
keys = dos_dict.keys()
for label in keys:
self.add_dos(label, dos_dict[label]) | 140,338 |
Get a matplotlib plot showing the DOS.
Args:
xlim: Specifies the x-axis limits. Set to None for automatic
determination.
ylim: Specifies the y-axis limits.
units: units for the frequencies. Accepted values thz, ev, mev, ha, cm-1, cm^-1. | def get_plot(self, xlim=None, ylim=None, units="thz"):
u = freq_units(units)
ncolors = max(3, len(self._doses))
ncolors = min(9, ncolors)
import palettable
colors = palettable.colorbrewer.qualitative.Set1_9.mpl_colors
y = None
alldensities = []
allfrequencies = []
plt = pretty_plot(12, 8)
# Note that this complicated processing of frequencies is to allow for
# stacked plots in matplotlib.
for key, dos in self._doses.items():
frequencies = dos['frequencies'] * u.factor
densities = dos['densities']
if y is None:
y = np.zeros(frequencies.shape)
if self.stack:
y += densities
newdens = y.copy()
else:
newdens = densities
allfrequencies.append(frequencies)
alldensities.append(newdens)
keys = list(self._doses.keys())
keys.reverse()
alldensities.reverse()
allfrequencies.reverse()
allpts = []
for i, (key, frequencies, densities) in enumerate(zip(keys, allfrequencies, alldensities)):
allpts.extend(list(zip(frequencies, densities)))
if self.stack:
plt.fill(frequencies, densities, color=colors[i % ncolors],
label=str(key))
else:
plt.plot(frequencies, densities, color=colors[i % ncolors],
label=str(key), linewidth=3)
if xlim:
plt.xlim(xlim)
if ylim:
plt.ylim(ylim)
else:
xlim = plt.xlim()
relevanty = [p[1] for p in allpts
if xlim[0] < p[0] < xlim[1]]
plt.ylim((min(relevanty), max(relevanty)))
ylim = plt.ylim()
plt.plot([0, 0], ylim, 'k--', linewidth=2)
plt.xlabel(r'$\mathrm{{Frequencies\ ({})}}$'.format(u.label))
plt.ylabel(r'$\mathrm{Density\ of\ states}$')
plt.legend()
leg = plt.gca().get_legend()
ltext = leg.get_texts() # all the text.Text instance in the legend
plt.setp(ltext, fontsize=30)
plt.tight_layout()
return plt | 140,339 |
Show the plot using matplotlib.
Args:
xlim: Specifies the x-axis limits. Set to None for automatic
determination.
ylim: Specifies the y-axis limits.
units: units for the frequencies. Accepted values thz, ev, mev, ha, cm-1, cm^-1. | def show(self, xlim=None, ylim=None, units="thz"):
plt = self.get_plot(xlim, ylim, units=units)
plt.show() | 140,340 |
Get a matplotlib object for the bandstructure plot.
Args:
ylim: Specify the y-axis (frequency) limits; by default None let
the code choose.
units: units for the frequencies. Accepted values thz, ev, mev, ha, cm-1, cm^-1. | def get_plot(self, ylim=None, units="thz"):
u = freq_units(units)
plt = pretty_plot(12, 8)
band_linewidth = 1
data = self.bs_plot_data()
for d in range(len(data['distances'])):
for i in range(self._nb_bands):
plt.plot(data['distances'][d],
[data['frequency'][d][i][j] * u.factor
for j in range(len(data['distances'][d]))], 'b-',
linewidth=band_linewidth)
self._maketicks(plt)
# plot y=0 line
plt.axhline(0, linewidth=1, color='k')
# Main X and Y Labels
plt.xlabel(r'$\mathrm{Wave\ Vector}$', fontsize=30)
ylabel = r'$\mathrm{{Frequencies\ ({})}}$'.format(u.label)
plt.ylabel(ylabel, fontsize=30)
# X range (K)
# last distance point
x_max = data['distances'][-1][-1]
plt.xlim(0, x_max)
if ylim is not None:
plt.ylim(ylim)
plt.tight_layout()
return plt | 140,344 |
Save matplotlib plot to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
ylim: Specifies the y-axis limits.
units: units for the frequencies. Accepted values thz, ev, mev, ha, cm-1, cm^-1. | def save_plot(self, filename, img_format="eps", ylim=None, units="thz"):
plt = self.get_plot(ylim=ylim, units=units)
plt.savefig(filename, format=img_format)
plt.close() | 140,345 |
plot two band structure for comparison. One is in red the other in blue.
The two band structures need to be defined on the same symmetry lines!
and the distance between symmetry lines is
the one of the band structure used to build the PhononBSPlotter
Args:
another PhononBSPlotter object defined along the same symmetry lines
Returns:
a matplotlib object with both band structures | def plot_compare(self, other_plotter):
data_orig = self.bs_plot_data()
data = other_plotter.bs_plot_data()
if len(data_orig['distances']) != len(data['distances']):
raise ValueError('The two objects are not compatible.')
plt = self.get_plot()
band_linewidth = 1
for i in range(other_plotter._nb_bands):
for d in range(len(data_orig['distances'])):
plt.plot(data_orig['distances'][d],
[e[i] for e in data['frequency']][d],
'r-', linewidth=band_linewidth)
return plt | 140,347 |
Plots the constant volume specific heat C_v in a temperature range.
Args:
tmin: minimum temperature
tmax: maximum temperature
ntemp: number of steps
ylim: tuple specifying the y-axis limits.
kwargs: kwargs passed to the matplotlib function 'plot'.
Returns:
matplotlib figure | def plot_cv(self, tmin, tmax, ntemp, ylim=None, **kwargs):
temperatures = np.linspace(tmin, tmax, ntemp)
if self.structure:
ylabel = r"$C_v$ (J/K/mol)"
else:
ylabel = r"$C_v$ (J/K/mol-c)"
fig = self._plot_thermo(self.dos.cv, temperatures, ylabel=ylabel, ylim=ylim, **kwargs)
return fig | 140,351 |
Plots the vibrational entrpy in a temperature range.
Args:
tmin: minimum temperature
tmax: maximum temperature
ntemp: number of steps
ylim: tuple specifying the y-axis limits.
kwargs: kwargs passed to the matplotlib function 'plot'.
Returns:
matplotlib figure | def plot_entropy(self, tmin, tmax, ntemp, ylim=None, **kwargs):
temperatures = np.linspace(tmin, tmax, ntemp)
if self.structure:
ylabel = r"$S$ (J/K/mol)"
else:
ylabel = r"$S$ (J/K/mol-c)"
fig = self._plot_thermo(self.dos.entropy, temperatures, ylabel=ylabel, ylim=ylim, **kwargs)
return fig | 140,352 |
Plots the vibrational internal energy in a temperature range.
Args:
tmin: minimum temperature
tmax: maximum temperature
ntemp: number of steps
ylim: tuple specifying the y-axis limits.
kwargs: kwargs passed to the matplotlib function 'plot'.
Returns:
matplotlib figure | def plot_internal_energy(self, tmin, tmax, ntemp, ylim=None, **kwargs):
temperatures = np.linspace(tmin, tmax, ntemp)
if self.structure:
ylabel = r"$\Delta E$ (kJ/mol)"
else:
ylabel = r"$\Delta E$ (kJ/mol-c)"
fig = self._plot_thermo(self.dos.internal_energy, temperatures, ylabel=ylabel, ylim=ylim,
factor=1e-3, **kwargs)
return fig | 140,353 |
Plots the vibrational contribution to the Helmoltz free energy in a temperature range.
Args:
tmin: minimum temperature
tmax: maximum temperature
ntemp: number of steps
ylim: tuple specifying the y-axis limits.
kwargs: kwargs passed to the matplotlib function 'plot'.
Returns:
matplotlib figure | def plot_helmholtz_free_energy(self, tmin, tmax, ntemp, ylim=None, **kwargs):
temperatures = np.linspace(tmin, tmax, ntemp)
if self.structure:
ylabel = r"$\Delta F$ (kJ/mol)"
else:
ylabel = r"$\Delta F$ (kJ/mol-c)"
fig = self._plot_thermo(self.dos.helmholtz_free_energy, temperatures, ylabel=ylabel, ylim=ylim,
factor=1e-3, **kwargs)
return fig | 140,354 |
Plots all the thermodynamic properties in a temperature range.
Args:
tmin: minimum temperature
tmax: maximum temperature
ntemp: number of steps
ylim: tuple specifying the y-axis limits.
kwargs: kwargs passed to the matplotlib function 'plot'.
Returns:
matplotlib figure | def plot_thermodynamic_properties(self, tmin, tmax, ntemp, ylim=None, **kwargs):
temperatures = np.linspace(tmin, tmax, ntemp)
mol = "" if self.structure else "-c"
fig = self._plot_thermo(self.dos.cv, temperatures, ylabel="Thermodynamic properties", ylim=ylim,
label=r"$C_v$ (J/K/mol{})".format(mol), **kwargs)
self._plot_thermo(self.dos.entropy, temperatures, ylim=ylim, ax=fig.axes[0],
label=r"$S$ (J/K/mol{})".format(mol), **kwargs)
self._plot_thermo(self.dos.internal_energy, temperatures, ylim=ylim, ax=fig.axes[0], factor=1e-3,
label=r"$\Delta E$ (kJ/K/mol{})".format(mol), **kwargs)
self._plot_thermo(self.dos.helmholtz_free_energy, temperatures, ylim=ylim, ax=fig.axes[0], factor=1e-3,
label=r"$\Delta F$ (kJ/K/mol{})".format(mol), **kwargs)
fig.axes[0].legend(loc="best")
return fig | 140,355 |
Load a PWInput object from a dictionary.
Args:
pwinput_dict (dict): dictionary with PWInput data
Returns:
PWInput object | def from_dict(cls, pwinput_dict):
pwinput = cls(structure=Structure.from_dict(pwinput_dict['structure']),
pseudo=pwinput_dict['pseudo'],
control=pwinput_dict['sections']['control'],
system=pwinput_dict['sections']['system'],
electrons=pwinput_dict['sections']['electrons'],
ions=pwinput_dict['sections']['ions'],
cell=pwinput_dict['sections']['cell'],
kpoints_mode=pwinput_dict['kpoints_mode'],
kpoints_grid=pwinput_dict['kpoints_grid'],
kpoints_shift=pwinput_dict['kpoints_shift'])
return pwinput | 140,359 |
Write the PWSCF input file.
Args:
filename (str): The string filename to output to. | def write_file(self, filename):
with open(filename, "w") as f:
f.write(self.__str__()) | 140,360 |
Reads an PWInput object from a string.
Args:
string (str): PWInput string
Returns:
PWInput object | def from_string(string):
lines = list(clean_lines(string.splitlines()))
def input_mode(line):
if line[0] == "&":
return ("sections", line[1:].lower())
elif "ATOMIC_SPECIES" in line:
return ("pseudo", )
elif "K_POINTS" in line:
return ("kpoints", line.split("{")[1][:-1])
elif "CELL_PARAMETERS" in line or "ATOMIC_POSITIONS" in line:
return ("structure", line.split("{")[1][:-1])
elif line == "/":
return None
else:
return mode
sections = {"control": {}, "system": {}, "electrons": {},
"ions": {}, "cell":{}}
pseudo = {}
pseudo_index = 0
lattice = []
species = []
coords = []
structure = None
site_properties = {"pseudo":[]}
mode = None
for line in lines:
mode = input_mode(line)
if mode == None:
pass
elif mode[0] == "sections":
section = mode[1]
m = re.match(r'(\w+)\(?(\d*?)\)?\s*=\s*(.*)', line)
if m:
key = m.group(1).strip()
key_ = m.group(2).strip()
val = m.group(3).strip()
if key_ != "":
if sections[section].get(key, None) == None:
val_ = [0.0]*20 # MAX NTYP DEFINITION
val_[int(key_)-1] = PWInput.proc_val(key, val)
sections[section][key] = val_
site_properties[key] = []
else:
sections[section][key][int(key_)-1] = PWInput.proc_val(key, val)
else:
sections[section][key] = PWInput.proc_val(key, val)
elif mode[0] == "pseudo":
m = re.match(r'(\w+)\s+(\d*.\d*)\s+(.*)', line)
if m:
pseudo[m.group(1).strip()] = {}
pseudo[m.group(1).strip()]["index"] = pseudo_index
pseudo[m.group(1).strip()]["pseudopot"] = m.group(3).strip()
pseudo_index += 1
elif mode[0] == "kpoints":
m = re.match(r'(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)', line)
if m:
kpoints_grid = (int(m.group(1)), int(m.group(2)), int(m.group(3)))
kpoints_shift = (int(m.group(4)), int(m.group(5)), int(m.group(6)))
else:
kpoints_mode = mode[1]
elif mode[0] == "structure":
m_l = re.match(r'(-?\d+\.?\d*)\s+(-?\d+\.?\d*)\s+(-?\d+\.?\d*)', line)
m_p = re.match(r'(\w+)\s+(-?\d+\.\d*)\s+(-?\d+\.?\d*)\s+(-?\d+\.?\d*)', line)
if m_l:
lattice += [ float(m_l.group(1)), float(m_l.group(2)), float(m_l.group(3)) ]
elif m_p:
site_properties["pseudo"].append(pseudo[m_p.group(1)]["pseudopot"])
species += [pseudo[m_p.group(1)]["pseudopot"].split(".")[0]]
coords += [[float(m_p.group(2)), float(m_p.group(3)), float(m_p.group(4))]]
for k, v in site_properties.items():
if k != "pseudo":
site_properties[k].append(sections['system'][k][pseudo[m_p.group(1)]["index"]])
if mode[1] == "angstrom":
coords_are_cartesian = True
elif mode[1] == "crystal":
coords_are_cartesian = False
structure = Structure(Lattice(lattice), species, coords,
coords_are_cartesian=coords_are_cartesian,
site_properties=site_properties)
return PWInput(structure=structure, control=sections["control"],
system=sections["system"], electrons=sections["electrons"],
ions=sections["ions"], cell=sections["cell"], kpoints_mode=kpoints_mode,
kpoints_grid=kpoints_grid, kpoints_shift=kpoints_shift) | 140,361 |
Static helper method to convert PWINPUT parameters to proper type, e.g.,
integers, floats, etc.
Args:
key: PWINPUT parameter key
val: Actual value of PWINPUT parameter. | def proc_val(key, val):
float_keys = ('etot_conv_thr','forc_conv_thr','conv_thr','Hubbard_U','Hubbard_J0','defauss',
'starting_magnetization',)
int_keys = ('nstep','iprint','nberrycyc','gdir','nppstr','ibrav','nat','ntyp','nbnd','nr1',
'nr2','nr3','nr1s','nr2s','nr3s','nspin','nqx1','nqx2','nqx3','lda_plus_u_kind',
'edir','report','esm_nfit','space_group','origin_choice','electron_maxstep',
'mixing_ndim','mixing_fixed_ns','ortho_para','diago_cg_maxiter','diago_david_ndim',
'nraise','bfgs_ndim','if_pos','nks','nk1','nk2','nk3','sk1','sk2','sk3','nconstr')
bool_keys = ('wf_collect','tstress','tprnfor','lkpoint_dir','tefield','dipfield','lelfield',
'lorbm','lberry','lfcpopt','monopole','nosym','nosym_evc','noinv','no_t_rev',
'force_symmorphic','use_all_frac','one_atom_occupations','starting_spin_angle',
'noncolin','x_gamma_extrapolation','lda_plus_u','lspinorb','london',
'ts_vdw_isolated','xdm','uniqueb','rhombohedral','realxz','block',
'scf_must_converge','adaptive_thr','diago_full_acc','tqr','remove_rigid_rot',
'refold_pos')
def smart_int_or_float(numstr):
if numstr.find(".") != -1 or numstr.lower().find("e") != -1:
return float(numstr)
else:
return int(numstr)
try:
if key in bool_keys:
if val.lower() == ".true.":
return True
elif val.lower() == ".false.":
return False
else:
raise ValueError(key + " should be a boolean type!")
if key in float_keys:
return float(re.search(r"^-?\d*\.?\d*d?-?\d*", val.lower()).group(0).replace("d", "e"))
if key in int_keys:
return int(re.match(r"^-?[0-9]+", val).group(0))
except ValueError:
pass
try:
val = val.replace("d","e")
return smart_int_or_float(val)
except ValueError:
pass
if "true" in val.lower():
return True
if "false" in val.lower():
return False
m = re.match(r"^[\"|'](.+)[\"|']$", val)
if m:
return m.group(1) | 140,362 |
returns the number of jobs in the queue, probably using subprocess or shutil to
call a command like 'qstat'. returns None when the number of jobs cannot be determined.
Args:
username: (str) the username of the jobs to count (default is to autodetect) | def get_njobs_in_queue(self, username=None):
if username is None: username = getpass.getuser()
njobs, process = self._get_njobs_in_queue(username=username)
if process is not None and process.returncode != 0:
# there's a problem talking to squeue server?
err_msg = ('Error trying to get the number of jobs in the queue' +
'The error response reads:\n {}'.format(process.stderr.read()))
logger.critical(err_msg)
if not isinstance(self, ShellAdapter):
logger.info('The number of jobs currently in the queue is: {}'.format(njobs))
return njobs | 140,394 |
Plot pie charts of the different timers.
Args:
key: Keyword used to extract data from timers.
minfract: Don't show sections whose relative weight is less that minfract.
Returns:
`matplotlib` figure | def plot_pie(self, key="wall_time", minfract=0.05, **kwargs):
timers = self.timers()
n = len(timers)
# Make square figures and axes
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
fig = plt.gcf()
gspec = GridSpec(n, 1)
for idx, timer in enumerate(timers):
ax = plt.subplot(gspec[idx, 0])
ax.set_title(str(timer))
timer.pie(ax=ax, key=key, minfract=minfract, show=False)
return fig | 140,430 |
Plot stacked histogram of the different timers.
Args:
key: Keyword used to extract data from the timers. Only the first `nmax`
sections with largest value are show.
mmax: Maximum nuber of sections to show. Other entries are grouped together
in the `others` section.
ax: matplotlib :class:`Axes` or None if a new figure should be created.
Returns:
`matplotlib` figure | def plot_stacked_hist(self, key="wall_time", nmax=5, ax=None, **kwargs):
ax, fig, plt = get_ax_fig_plt(ax=ax)
mpi_rank = "0"
timers = self.timers(mpi_rank=mpi_rank)
n = len(timers)
names, values = [], []
rest = np.zeros(n)
for idx, sname in enumerate(self.section_names(ordkey=key)):
sections = self.get_sections(sname)
svals = np.asarray([s.__dict__[key] for s in sections])
if idx < nmax:
names.append(sname)
values.append(svals)
else:
rest += svals
names.append("others (nmax=%d)" % nmax)
values.append(rest)
# The dataset is stored in values. Now create the stacked histogram.
ind = np.arange(n) # the locations for the groups
width = 0.35 # the width of the bars
colors = nmax * ['r', 'g', 'b', 'c', 'k', 'y', 'm']
bars = []
bottom = np.zeros(n)
for idx, vals in enumerate(values):
color = colors[idx]
bar = ax.bar(ind, vals, width, color=color, bottom=bottom)
bars.append(bar)
bottom += vals
ax.set_ylabel(key)
ax.set_title("Stacked histogram with the %d most important sections" % nmax)
ticks = ind + width / 2.0
labels = ["MPI=%d, OMP=%d" % (t.mpi_nprocs, t.omp_nthreads) for t in timers]
ax.set_xticks(ticks)
ax.set_xticklabels(labels, rotation=15)
# Add legend.
ax.legend([bar[0] for bar in bars], names, loc="best")
return fig | 140,431 |
Plot pie chart for this timer.
Args:
key: Keyword used to extract data from the timer.
minfract: Don't show sections whose relative weight is less that minfract.
ax: matplotlib :class:`Axes` or None if a new figure should be created.
Returns:
`matplotlib` figure | def pie(self, key="wall_time", minfract=0.05, ax=None, **kwargs):
ax, fig, plt = get_ax_fig_plt(ax=ax)
# Set aspect ratio to be equal so that pie is drawn as a circle.
ax.axis("equal")
# Don't show section whose value is less that minfract
labels, vals = self.names_and_values(key, minfract=minfract)
ax.pie(vals, explode=None, labels=labels, autopct='%1.1f%%', shadow=True)
return fig | 140,451 |
Returns ASE Atoms object from pymatgen structure.
Args:
structure: pymatgen.core.structure.Structure
**kwargs: other keyword args to pass into the ASE Atoms constructor
Returns:
ASE Atoms object | def get_atoms(structure, **kwargs):
if not structure.is_ordered:
raise ValueError("ASE Atoms only supports ordered structures")
symbols = [str(site.specie.symbol) for site in structure]
positions = [site.coords for site in structure]
cell = structure.lattice.matrix
return Atoms(symbols=symbols, positions=positions, pbc=True,
cell=cell, **kwargs) | 140,453 |
Returns pymatgen structure from ASE Atoms.
Args:
atoms: ASE Atoms object
cls: The Structure class to instantiate (defaults to pymatgen structure)
Returns:
Equivalent pymatgen.core.structure.Structure | def get_structure(atoms, cls=None):
symbols = atoms.get_chemical_symbols()
positions = atoms.get_positions()
lattice = atoms.get_cell()
cls = Structure if cls is None else cls
return cls(lattice, symbols, positions,
coords_are_cartesian=True) | 140,454 |
Returns the densities, but with a Gaussian smearing of
std dev sigma applied.
Args:
sigma: Std dev of Gaussian smearing function.
Returns:
Gaussian-smeared densities. | def get_smeared_densities(self, sigma):
from scipy.ndimage.filters import gaussian_filter1d
diff = [self.frequencies[i + 1] - self.frequencies[i]
for i in range(len(self.frequencies) - 1)]
avgdiff = sum(diff) / len(diff)
smeared_dens = gaussian_filter1d(self.densities, sigma / avgdiff)
return smeared_dens | 140,456 |
Adds two DOS together. Checks that frequency scales are the same.
Otherwise, a ValueError is thrown.
Args:
other: Another DOS object.
Returns:
Sum of the two DOSs. | def __add__(self, other):
if not all(np.equal(self.frequencies, other.frequencies)):
raise ValueError("Frequencies of both DOS are not compatible!")
densities = self.densities + other.densities
return PhononDos(self.frequencies, densities) | 140,457 |
Sorts a dict by value.
Args:
d: Input dictionary
key: Function which takes an tuple (key, object) and returns a value to
compare and sort by. By default, the function compares the values
of the dict i.e. key = lambda t : t[1]
reverse: Allows to reverse sort order.
Returns:
OrderedDict object whose keys are ordered according to their value. | def sort_dict(d, key=None, reverse=False):
kv_items = [kv for kv in d.items()]
# Sort kv_items according to key.
if key is None:
kv_items.sort(key=lambda t: t[1], reverse=reverse)
else:
kv_items.sort(key=key, reverse=reverse)
# Build ordered dict.
return collections.OrderedDict(kv_items) | 140,470 |
Create a Stress object. Note that the constructor uses __new__
rather than __init__ according to the standard method of
subclassing numpy ndarrays.
Args:
stress_matrix (3x3 array-like): the 3x3 array-like
representing the stress | def __new__(cls, stress_matrix):
obj = super().__new__(cls, stress_matrix)
return obj.view(cls) | 140,507 |
calculates the first Piola-Kirchoff stress
Args:
def_grad (3x3 array-like): deformation gradient tensor | def piola_kirchoff_1(self, def_grad):
if not self.is_symmetric:
raise ValueError("The stress tensor is not symmetric, \
PK stress is based on a symmetric stress tensor.")
def_grad = SquareTensor(def_grad)
return def_grad.det*np.dot(self, def_grad.inv.trans) | 140,509 |
Pass in a structure for analysis
Arguments:
struc_oxid - a Structure object; oxidation states *must* be assigned for this structure; disordered structures should be OK
cation - a String symbol or Element for the cation. It must be positively charged, but can be 1+/2+/3+ etc. | def __init__(self, struc_oxid, cation='Li'):
for site in struc_oxid:
if not hasattr(site.specie, 'oxi_state'):
raise ValueError('BatteryAnalyzer requires oxidation states assigned to structure!')
self.struc_oxid = struc_oxid
self.comp = self.struc_oxid.composition # shortcut for later
if not isinstance(cation, Element):
self.cation = Element(cation)
self.cation_charge = self.cation.max_oxidation_state | 140,514 |
Give max capacity in mAh/g for inserting and removing a charged cation
Note that the weight is normalized to the most lithiated state,
thus removal of 1 Li from LiFePO4 gives the same capacity as insertion of 1 Li into FePO4.
Args:
remove: (bool) whether to allow cation removal
insert: (bool) whether to allow cation insertion
Returns:
max grav capacity in mAh/g | def get_max_capgrav(self, remove=True, insert=True):
weight = self.comp.weight
if insert:
weight += self.max_cation_insertion * self.cation.atomic_mass
return self._get_max_cap_ah(remove, insert) / (weight / 1000) | 140,518 |
Give max capacity in mAh/cc for inserting and removing a charged cation into base structure.
Args:
remove: (bool) whether to allow cation removal
insert: (bool) whether to allow cation insertion
volume: (float) volume to use for normalization (default=volume of initial structure)
Returns:
max vol capacity in mAh/cc | def get_max_capvol(self, remove=True, insert=True, volume=None):
vol = volume if volume else self.struc_oxid.volume
return self._get_max_cap_ah(remove, insert) * 1000 * 1E24 / (vol * const.N_A) | 140,519 |
This is a helper method for get_removals_int_oxid!
Args:
spec_amts_oxi - a dict of species to their amounts in the structure
oxid_el - the element to oxidize
oxid_els - the full list of elements that might be oxidized
numa - a running set of numbers of A cation at integer oxidation steps
Returns:
a set of numbers A; steps for for oxidizing oxid_el first, then the other oxid_els in this list | def _get_int_removals_helper(self, spec_amts_oxi, oxid_el, oxid_els, numa):
# If Mn is the oxid_el, we have a mixture of Mn2+, Mn3+, determine the minimum oxidation state for Mn
#this is the state we want to oxidize!
oxid_old = min([spec.oxi_state for spec in spec_amts_oxi if spec.symbol == oxid_el.symbol])
oxid_new = math.floor(oxid_old + 1)
#if this is not a valid solution, break out of here and don't add anything to the list
if oxid_new > oxid_el.max_oxidation_state:
return numa
#update the spec_amts_oxi map to reflect that the oxidation took place
spec_old = Specie(oxid_el.symbol, oxid_old)
spec_new = Specie(oxid_el.symbol, oxid_new)
specamt = spec_amts_oxi[spec_old]
spec_amts_oxi = {sp: amt for sp, amt in spec_amts_oxi.items() if sp != spec_old}
spec_amts_oxi[spec_new] = specamt
spec_amts_oxi = Composition(spec_amts_oxi)
#determine the amount of cation A in the structure needed for charge balance and add it to the list
oxi_noA = sum([spec.oxi_state * spec_amts_oxi[spec] for spec in spec_amts_oxi if
spec.symbol not in self.cation.symbol])
a = max(0, -oxi_noA / self.cation_charge)
numa = numa.union({a})
#recursively try the other oxidation states
if a == 0:
return numa
else:
for oxid_el in oxid_els:
numa = numa.union(
self._get_int_removals_helper(spec_amts_oxi.copy(), oxid_el, oxid_els, numa))
return numa | 140,521 |
Calculates the energy of a composition.
Args:
composition (Composition): input composition
strict (bool): Whether all potentials must be specified | def get_energy(self, composition, strict=True):
if strict and set(composition.keys()) > set(self.keys()):
s = set(composition.keys()) - set(self.keys())
raise ValueError("Potentials not specified for {}".format(s))
return sum(self.get(k, 0) * v for k, v in composition.items()) | 140,526 |
Create an PiezoTensor object. The constructor throws an error if
the shape of the input_matrix argument is not 3x3x3, i. e. in true
tensor notation. Note that the constructor uses __new__ rather than
__init__ according to the standard method of subclassing numpy
ndarrays.
Args:
input_matrix (3x3x3 array-like): the 3x6 array-like
representing the piezo tensor | def __new__(cls, input_array, tol=1e-3):
obj = super().__new__(cls, input_array, check_rank=3)
if not (obj - np.transpose(obj, (0, 2, 1)) < tol).all():
warnings.warn("Input piezo tensor does "
"not satisfy standard symmetries")
return obj.view(cls) | 140,527 |
Obtain bond lengths for all bond orders from bond length database
Args:
sp1 (Specie): First specie.
sp2 (Specie): Second specie.
default_bl: If a particular type of bond does not exist, use this
bond length as a default value (bond order = 1).
If None, a ValueError will be thrown.
Return:
A dict mapping bond order to bond length in angstrom | def obtain_all_bond_lengths(sp1, sp2, default_bl=None):
if isinstance(sp1, Element):
sp1 = sp1.symbol
if isinstance(sp2, Element):
sp2 = sp2.symbol
syms = tuple(sorted([sp1, sp2]))
if syms in bond_lengths:
return bond_lengths[syms].copy()
elif default_bl is not None:
return {1: default_bl}
else:
raise ValueError("No bond data for elements {} - {}".format(*syms)) | 140,573 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.