body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
7a8fbcc4b5df7aea417793e352ccd156ec00fad91f1a8ded0ac5b9480ff2f3f4 | def disk_usage(path):
'Return the number of bytes used by a file/folder and any descendents.'
total = os.path.getsize(path)
if os.path.isdir(path):
for filename in os.listdir(path):
child_path = os.path.join(path, filename)
total += disk_usage(child_path)
print('{0:<7}'.format(total), path)
return total | Return the number of bytes used by a file/folder and any descendents. | recursion/fs.py | disk_usage | bestgopher/dsa | 0 | python | def disk_usage(path):
total = os.path.getsize(path)
if os.path.isdir(path):
for filename in os.listdir(path):
child_path = os.path.join(path, filename)
total += disk_usage(child_path)
print('{0:<7}'.format(total), path)
return total | def disk_usage(path):
total = os.path.getsize(path)
if os.path.isdir(path):
for filename in os.listdir(path):
child_path = os.path.join(path, filename)
total += disk_usage(child_path)
print('{0:<7}'.format(total), path)
return total<|docstring|>Return the number of bytes used by a file/folder and any descendents.<|endoftext|> |
481fa0d1872fc5a1e0d368fc920a2883cb6877a40ce21aecb78e381f6370b382 | def accuracy(output, target, topk=(1, 5)):
'Computes the precision@k for the specified values of k'
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
(_, pred) = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, (- 1)).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view((- 1)).float().sum(0, keepdim=True)
res.append(correct_k.mul_((100.0 / batch_size)))
return res | Computes the precision@k for the specified values of k | core/video_utils.py | accuracy | Bhaskers-Blu-Org1/bLVNet-TAM | 62 | python | def accuracy(output, target, topk=(1, 5)):
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
(_, pred) = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, (- 1)).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view((- 1)).float().sum(0, keepdim=True)
res.append(correct_k.mul_((100.0 / batch_size)))
return res | def accuracy(output, target, topk=(1, 5)):
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
(_, pred) = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, (- 1)).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view((- 1)).float().sum(0, keepdim=True)
res.append(correct_k.mul_((100.0 / batch_size)))
return res<|docstring|>Computes the precision@k for the specified values of k<|endoftext|> |
1652a6fb20004ec12ecdd0350da6cb2170c7bc1698bb5b91257a8a545d5d9c83 | def probabilities_on_graph(cell_or_clust, results_df, rsrc_loc, clust=True, root_label=None, p_thresh=0.0):
"\n cell_or_clust\n The name of a cell or cluster for which to plot the probabilities of it \n being each cell type in the Cell Ontology.\n results_df\n A DataFrame storing CellO's output probabilities in which rows correspond \n to cells and columns to cell types.\n rsrc_loc\n The location of the CellO resources directory.\n clust: default True\n If True, `cell_or_clust` is the ID of a cluster.\n If False, `cell_or_clust` is the ID of a cell.\n root_label: default None\n Cell type name or ID. Only plot the subgraph of the Cell Ontology rooted \n at this cell type.\n p_thresh: default 0.0\n A probabilitiy value. Only plot the subgraph of the Cell Ontology spanning\n cell types for which the output probability exceeds the given probability.\n "
label_graph = cello._retrieve_label_graph(rsrc_loc)
is_term_ids = ('CL:' in results_df.columns[0])
cell = cell_or_clust
span_labels = set([label for (label, prob) in zip(results_df.columns, results_df.loc[cell]) if (prob > p_thresh)])
if root_label:
if (not is_term_ids):
root_id = ou.get_term_id(root_label)
else:
root_id = root_label
span_labels &= label_graph._downstream_nodes(root_label, label_graph.source_to_targets)
label_graph = graph.subgraph_spanning_nodes(label_graph, span_labels)
label_to_prob = {label: prob for (label, prob) in zip(results_df.columns, results_df.loc[cell]) if (label in label_graph.source_to_targets)}
if is_term_ids:
label_to_name = {label: '{}\n{:.2f}'.format(ou.get_term_name(label), prob) for (label, prob) in label_to_prob.items()}
else:
label_to_name = {label: '{}\n{:.2f}'.format(label, prob) for (label, prob) in label_to_prob.items()}
g = _render_graph(label_graph.source_to_targets, label_to_name, 'Probabilities for {}'.format(cell), label_to_prob)
return g | cell_or_clust
The name of a cell or cluster for which to plot the probabilities of it
being each cell type in the Cell Ontology.
results_df
A DataFrame storing CellO's output probabilities in which rows correspond
to cells and columns to cell types.
rsrc_loc
The location of the CellO resources directory.
clust: default True
If True, `cell_or_clust` is the ID of a cluster.
If False, `cell_or_clust` is the ID of a cell.
root_label: default None
Cell type name or ID. Only plot the subgraph of the Cell Ontology rooted
at this cell type.
p_thresh: default 0.0
A probabilitiy value. Only plot the subgraph of the Cell Ontology spanning
cell types for which the output probability exceeds the given probability. | cello/plot_annotations.py | probabilities_on_graph | Ann-Holmes/CellO | 42 | python | def probabilities_on_graph(cell_or_clust, results_df, rsrc_loc, clust=True, root_label=None, p_thresh=0.0):
"\n cell_or_clust\n The name of a cell or cluster for which to plot the probabilities of it \n being each cell type in the Cell Ontology.\n results_df\n A DataFrame storing CellO's output probabilities in which rows correspond \n to cells and columns to cell types.\n rsrc_loc\n The location of the CellO resources directory.\n clust: default True\n If True, `cell_or_clust` is the ID of a cluster.\n If False, `cell_or_clust` is the ID of a cell.\n root_label: default None\n Cell type name or ID. Only plot the subgraph of the Cell Ontology rooted \n at this cell type.\n p_thresh: default 0.0\n A probabilitiy value. Only plot the subgraph of the Cell Ontology spanning\n cell types for which the output probability exceeds the given probability.\n "
label_graph = cello._retrieve_label_graph(rsrc_loc)
is_term_ids = ('CL:' in results_df.columns[0])
cell = cell_or_clust
span_labels = set([label for (label, prob) in zip(results_df.columns, results_df.loc[cell]) if (prob > p_thresh)])
if root_label:
if (not is_term_ids):
root_id = ou.get_term_id(root_label)
else:
root_id = root_label
span_labels &= label_graph._downstream_nodes(root_label, label_graph.source_to_targets)
label_graph = graph.subgraph_spanning_nodes(label_graph, span_labels)
label_to_prob = {label: prob for (label, prob) in zip(results_df.columns, results_df.loc[cell]) if (label in label_graph.source_to_targets)}
if is_term_ids:
label_to_name = {label: '{}\n{:.2f}'.format(ou.get_term_name(label), prob) for (label, prob) in label_to_prob.items()}
else:
label_to_name = {label: '{}\n{:.2f}'.format(label, prob) for (label, prob) in label_to_prob.items()}
g = _render_graph(label_graph.source_to_targets, label_to_name, 'Probabilities for {}'.format(cell), label_to_prob)
return g | def probabilities_on_graph(cell_or_clust, results_df, rsrc_loc, clust=True, root_label=None, p_thresh=0.0):
"\n cell_or_clust\n The name of a cell or cluster for which to plot the probabilities of it \n being each cell type in the Cell Ontology.\n results_df\n A DataFrame storing CellO's output probabilities in which rows correspond \n to cells and columns to cell types.\n rsrc_loc\n The location of the CellO resources directory.\n clust: default True\n If True, `cell_or_clust` is the ID of a cluster.\n If False, `cell_or_clust` is the ID of a cell.\n root_label: default None\n Cell type name or ID. Only plot the subgraph of the Cell Ontology rooted \n at this cell type.\n p_thresh: default 0.0\n A probabilitiy value. Only plot the subgraph of the Cell Ontology spanning\n cell types for which the output probability exceeds the given probability.\n "
label_graph = cello._retrieve_label_graph(rsrc_loc)
is_term_ids = ('CL:' in results_df.columns[0])
cell = cell_or_clust
span_labels = set([label for (label, prob) in zip(results_df.columns, results_df.loc[cell]) if (prob > p_thresh)])
if root_label:
if (not is_term_ids):
root_id = ou.get_term_id(root_label)
else:
root_id = root_label
span_labels &= label_graph._downstream_nodes(root_label, label_graph.source_to_targets)
label_graph = graph.subgraph_spanning_nodes(label_graph, span_labels)
label_to_prob = {label: prob for (label, prob) in zip(results_df.columns, results_df.loc[cell]) if (label in label_graph.source_to_targets)}
if is_term_ids:
label_to_name = {label: '{}\n{:.2f}'.format(ou.get_term_name(label), prob) for (label, prob) in label_to_prob.items()}
else:
label_to_name = {label: '{}\n{:.2f}'.format(label, prob) for (label, prob) in label_to_prob.items()}
g = _render_graph(label_graph.source_to_targets, label_to_name, 'Probabilities for {}'.format(cell), label_to_prob)
return g<|docstring|>cell_or_clust
The name of a cell or cluster for which to plot the probabilities of it
being each cell type in the Cell Ontology.
results_df
A DataFrame storing CellO's output probabilities in which rows correspond
to cells and columns to cell types.
rsrc_loc
The location of the CellO resources directory.
clust: default True
If True, `cell_or_clust` is the ID of a cluster.
If False, `cell_or_clust` is the ID of a cell.
root_label: default None
Cell type name or ID. Only plot the subgraph of the Cell Ontology rooted
at this cell type.
p_thresh: default 0.0
A probabilitiy value. Only plot the subgraph of the Cell Ontology spanning
cell types for which the output probability exceeds the given probability.<|endoftext|> |
b2a9a58c89c03a9682113d9b6358e5f261fa5fc84d8795ca8fb7efd7f6a02f3d | def findTargetSumWays(self, nums: List[int], S: int) -> int:
'\n TLE Solution\n '
if (not nums):
return 0
cur_sum = S
start_index = 0
return self.dfs(nums, S, start_index) | TLE Solution | Python/lc_494_target_sum.py | findTargetSumWays | cmattey/leetcode_problems | 6 | python | def findTargetSumWays(self, nums: List[int], S: int) -> int:
'\n \n '
if (not nums):
return 0
cur_sum = S
start_index = 0
return self.dfs(nums, S, start_index) | def findTargetSumWays(self, nums: List[int], S: int) -> int:
'\n \n '
if (not nums):
return 0
cur_sum = S
start_index = 0
return self.dfs(nums, S, start_index)<|docstring|>TLE Solution<|endoftext|> |
a1d3fde11899ff087b21de4cfa7b901d1aa0dbc51cb13ccf40a304211af99af7 | def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=(- float('Inf'))):
' Filter a distribution of logits using top-k and/or nucleus (top-p) filtering\n Args:\n logits: logits distribution shape (batch size x vocabulary size)\n top_k > 0: keep only top k tokens with highest probability (top-k filtering).\n top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).\n Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)\n From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317\n '
top_k = min(top_k, logits.size((- 1)))
if (top_k > 0):
indices_to_remove = (logits < torch.topk(logits, top_k)[0][(..., (- 1), None)])
logits[indices_to_remove] = filter_value
if (top_p > 0.0):
(sorted_logits, sorted_indices) = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=(- 1)), dim=(- 1))
sorted_indices_to_remove = (cumulative_probs > top_p)
sorted_indices_to_remove[(..., 1:)] = sorted_indices_to_remove[(..., :(- 1))].clone()
sorted_indices_to_remove[(..., 0)] = 0
indices_to_remove = sorted_indices_to_remove.scatter(dim=1, index=sorted_indices, src=sorted_indices_to_remove)
logits[indices_to_remove] = filter_value
return logits | Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (batch size x vocabulary size)
top_k > 0: keep only top k tokens with highest probability (top-k filtering).
top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317 | gpt2generator.py | top_k_top_p_filtering | Acidburn0zzz/Clover-Edition | 1 | python | def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=(- float('Inf'))):
' Filter a distribution of logits using top-k and/or nucleus (top-p) filtering\n Args:\n logits: logits distribution shape (batch size x vocabulary size)\n top_k > 0: keep only top k tokens with highest probability (top-k filtering).\n top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).\n Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)\n From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317\n '
top_k = min(top_k, logits.size((- 1)))
if (top_k > 0):
indices_to_remove = (logits < torch.topk(logits, top_k)[0][(..., (- 1), None)])
logits[indices_to_remove] = filter_value
if (top_p > 0.0):
(sorted_logits, sorted_indices) = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=(- 1)), dim=(- 1))
sorted_indices_to_remove = (cumulative_probs > top_p)
sorted_indices_to_remove[(..., 1:)] = sorted_indices_to_remove[(..., :(- 1))].clone()
sorted_indices_to_remove[(..., 0)] = 0
indices_to_remove = sorted_indices_to_remove.scatter(dim=1, index=sorted_indices, src=sorted_indices_to_remove)
logits[indices_to_remove] = filter_value
return logits | def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=(- float('Inf'))):
' Filter a distribution of logits using top-k and/or nucleus (top-p) filtering\n Args:\n logits: logits distribution shape (batch size x vocabulary size)\n top_k > 0: keep only top k tokens with highest probability (top-k filtering).\n top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).\n Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)\n From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317\n '
top_k = min(top_k, logits.size((- 1)))
if (top_k > 0):
indices_to_remove = (logits < torch.topk(logits, top_k)[0][(..., (- 1), None)])
logits[indices_to_remove] = filter_value
if (top_p > 0.0):
(sorted_logits, sorted_indices) = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=(- 1)), dim=(- 1))
sorted_indices_to_remove = (cumulative_probs > top_p)
sorted_indices_to_remove[(..., 1:)] = sorted_indices_to_remove[(..., :(- 1))].clone()
sorted_indices_to_remove[(..., 0)] = 0
indices_to_remove = sorted_indices_to_remove.scatter(dim=1, index=sorted_indices, src=sorted_indices_to_remove)
logits[indices_to_remove] = filter_value
return logits<|docstring|>Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (batch size x vocabulary size)
top_k > 0: keep only top k tokens with highest probability (top-k filtering).
top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317<|endoftext|> |
fef1e018e8a7eb949c5e5c07be9cf540f5aa9312d897b2bf23944af3b35a694d | def truncate_multiple_sequences(seqs, max_len=100):
'Truncate multiple sequences, longest first, removing first.'
while (sum((len(s) for s in seqs)) > max_len):
longest = sorted(seqs, key=len, reverse=True)[0]
longest.pop(0) | Truncate multiple sequences, longest first, removing first. | gpt2generator.py | truncate_multiple_sequences | Acidburn0zzz/Clover-Edition | 1 | python | def truncate_multiple_sequences(seqs, max_len=100):
while (sum((len(s) for s in seqs)) > max_len):
longest = sorted(seqs, key=len, reverse=True)[0]
longest.pop(0) | def truncate_multiple_sequences(seqs, max_len=100):
while (sum((len(s) for s in seqs)) > max_len):
longest = sorted(seqs, key=len, reverse=True)[0]
longest.pop(0)<|docstring|>Truncate multiple sequences, longest first, removing first.<|endoftext|> |
3e89304a9407a6746a2bbf01c865901284c4b7f973bf027f27910473eed74faa | @app.route('/')
def control_panel():
'Route that render the main template with current GPIOs status.'
for GPIO_number in GPIOs:
GPIOs[GPIO_number]['status'] = GPIO.input(GPIO_number)
data_for_template = {'pins': GPIOs, 'temp': temp}
return render_template('panel.html', **data_for_template) | Route that render the main template with current GPIOs status. | start-web-opi.py | control_panel | arthur-bryan/web-opi | 2 | python | @app.route('/')
def control_panel():
for GPIO_number in GPIOs:
GPIOs[GPIO_number]['status'] = GPIO.input(GPIO_number)
data_for_template = {'pins': GPIOs, 'temp': temp}
return render_template('panel.html', **data_for_template) | @app.route('/')
def control_panel():
for GPIO_number in GPIOs:
GPIOs[GPIO_number]['status'] = GPIO.input(GPIO_number)
data_for_template = {'pins': GPIOs, 'temp': temp}
return render_template('panel.html', **data_for_template)<|docstring|>Route that render the main template with current GPIOs status.<|endoftext|> |
51692b707237a37c12ec5e9bd4e3f0b33a228a4238674bc35fda20e29a6fa68f | def change_gpio(gpio_num, value):
"Changes the current value of the GPIO.\n\n Args:\n gpio_num (int): the GPIO number to be controlled\n value (str): 'on' to power on the pin, 'off' to power off\n "
if (gpio_num in list(GPIOs.keys())):
status = {'on': True, 'off': False}.get(value)
GPIO.output(gpio_num, status) | Changes the current value of the GPIO.
Args:
gpio_num (int): the GPIO number to be controlled
value (str): 'on' to power on the pin, 'off' to power off | start-web-opi.py | change_gpio | arthur-bryan/web-opi | 2 | python | def change_gpio(gpio_num, value):
"Changes the current value of the GPIO.\n\n Args:\n gpio_num (int): the GPIO number to be controlled\n value (str): 'on' to power on the pin, 'off' to power off\n "
if (gpio_num in list(GPIOs.keys())):
status = {'on': True, 'off': False}.get(value)
GPIO.output(gpio_num, status) | def change_gpio(gpio_num, value):
"Changes the current value of the GPIO.\n\n Args:\n gpio_num (int): the GPIO number to be controlled\n value (str): 'on' to power on the pin, 'off' to power off\n "
if (gpio_num in list(GPIOs.keys())):
status = {'on': True, 'off': False}.get(value)
GPIO.output(gpio_num, status)<|docstring|>Changes the current value of the GPIO.
Args:
gpio_num (int): the GPIO number to be controlled
value (str): 'on' to power on the pin, 'off' to power off<|endoftext|> |
a5239559da7af03a2e263cfa794a3844b65261120ae747450652646ff760c5a9 | def speak(pin_number, status):
'Uses the mpg123 program to play an audio based on the taken action'
os.system(('mpg123 ' + os.path.abspath('static/audio/{}-{}.mp3'.format(pin_number, status)))) | Uses the mpg123 program to play an audio based on the taken action | start-web-opi.py | speak | arthur-bryan/web-opi | 2 | python | def speak(pin_number, status):
os.system(('mpg123 ' + os.path.abspath('static/audio/{}-{}.mp3'.format(pin_number, status)))) | def speak(pin_number, status):
os.system(('mpg123 ' + os.path.abspath('static/audio/{}-{}.mp3'.format(pin_number, status))))<|docstring|>Uses the mpg123 program to play an audio based on the taken action<|endoftext|> |
8ee33f3a68c108c37e8728de9fe4bc4836c08e145fefdefa3d189d0b682fa081 | @app.route('/<pin_number>/<status>')
def send_action(pin_number, status):
"Route that render the updated GPIO's status after an taken action\n On button press, two threads starts: one for speaking the action, other\n for changing the GPIO status.\n "
f1 = threading.Thread(target=speak, args=[int(pin_number), status])
f2 = threading.Thread(target=change_gpio, args=[int(pin_number), status])
f1.start()
f2.start()
for GPIO_number in GPIOs:
GPIOs[GPIO_number]['status'] = GPIO.input(GPIO_number)
data_for_template = {'pins': GPIOs, 'temp': temp}
return render_template('panel.html', **data_for_template) | Route that render the updated GPIO's status after an taken action
On button press, two threads starts: one for speaking the action, other
for changing the GPIO status. | start-web-opi.py | send_action | arthur-bryan/web-opi | 2 | python | @app.route('/<pin_number>/<status>')
def send_action(pin_number, status):
"Route that render the updated GPIO's status after an taken action\n On button press, two threads starts: one for speaking the action, other\n for changing the GPIO status.\n "
f1 = threading.Thread(target=speak, args=[int(pin_number), status])
f2 = threading.Thread(target=change_gpio, args=[int(pin_number), status])
f1.start()
f2.start()
for GPIO_number in GPIOs:
GPIOs[GPIO_number]['status'] = GPIO.input(GPIO_number)
data_for_template = {'pins': GPIOs, 'temp': temp}
return render_template('panel.html', **data_for_template) | @app.route('/<pin_number>/<status>')
def send_action(pin_number, status):
"Route that render the updated GPIO's status after an taken action\n On button press, two threads starts: one for speaking the action, other\n for changing the GPIO status.\n "
f1 = threading.Thread(target=speak, args=[int(pin_number), status])
f2 = threading.Thread(target=change_gpio, args=[int(pin_number), status])
f1.start()
f2.start()
for GPIO_number in GPIOs:
GPIOs[GPIO_number]['status'] = GPIO.input(GPIO_number)
data_for_template = {'pins': GPIOs, 'temp': temp}
return render_template('panel.html', **data_for_template)<|docstring|>Route that render the updated GPIO's status after an taken action
On button press, two threads starts: one for speaking the action, other
for changing the GPIO status.<|endoftext|> |
1987e3137e225f547663a207cf107404b01adaaec1f99cf8c822493f51423d49 | def process_entry(self, defect_entry):
'\n Process a given Defect entry with qualifiers given from initialization of class.\n Order of processing is:\n 1) perform all possible defect corrections with information given\n 2) consider delocalization analyses based on qualifier metrics\n given initialization of class. If delocalized, flag entry as delocalized\n 3) update corrections to defect entry and flag as del\n\n\n Corrections are applied based on:\n i) if free charges are more than free_chg_cutoff then will not apply charge correction,\n because it no longer is applicable\n ii) use charge correction set by preferred_cc\n iii) only use BandFilling correction if use_bandfilling is set to True\n iv) only use BandEdgeShift correction if use_bandedgeshift is set to True\n '
self.perform_all_corrections(defect_entry)
self.delocalization_analysis(defect_entry)
corrections = {}
skip_charge_corrections = False
if ('num_hole_vbm' in defect_entry.parameters.keys()):
if ((self.free_chg_cutoff < defect_entry.parameters['num_hole_vbm']) or (self.free_chg_cutoff < defect_entry.parameters['num_elec_cbm'])):
print('Will not use charge correction because too many free charges')
skip_charge_corrections = True
if skip_charge_corrections:
corrections.update({'charge_correction': 0.0})
elif (('freysoldt' in self.preferred_cc.lower()) and ('freysoldt_meta' in defect_entry.parameters.keys())):
frey_meta = defect_entry.parameters['freysoldt_meta']
frey_corr = (frey_meta['freysoldt_electrostatic'] + frey_meta['freysoldt_potential_alignment_correction'])
corrections.update({'charge_correction': frey_corr})
elif ('kumagai_meta' in defect_entry.parameters.keys()):
kumagai_meta = defect_entry.parameters['kumagai_meta']
kumagai_corr = (kumagai_meta['kumagai_electrostatic'] + kumagai_meta['kumagai_potential_alignment_correction'])
corrections.update({'charge_correction': kumagai_corr})
else:
print('Could not use any charge correction because insufficient metadata was supplied.')
if self.use_bandfilling:
if ('bandfilling_meta' in defect_entry.parameters.keys()):
bfc_corr = defect_entry.parameters['bandfilling_meta']['bandfilling_correction']
corrections.update({'bandfilling_correction': bfc_corr})
else:
print('Could not use band filling correction because insufficient metadata was supplied.')
else:
corrections.update({'bandfilling_correction': 0.0})
if self.use_bandedgeshift:
if ('bandshift_meta' in defect_entry.parameters.keys()):
bandfill_meta = defect_entry.parameters['bandshift_meta']
bes_corr = ((bandfill_meta['vbm_shift_correction'] + bandfill_meta['hole_vbm_shift_correction']) + bandfill_meta['elec_cbm_shift_correction'])
corrections.update({'bandedgeshifting_correction': bes_corr})
defect_entry.parameters.update({'phasediagram_meta': {'vbm': defect_entry.parameters['hybrid_vbm'], 'gap': (defect_entry.parameters['hybrid_cbm'] - defect_entry.parameters['hybrid_vbm'])}})
else:
print('Could not use band edge shifting correction because insufficient metadata was supplied.')
defect_entry.parameters.update({'phasediagram_meta': {'vbm': defect_entry.parameters['vbm'], 'gap': (defect_entry.parameters['cbm'] - defect_entry.parameters['vbm'])}})
else:
corrections.update({'bandedgeshifting_correction': 0.0})
defect_entry.parameters.update({'phasediagram_meta': {'vbm': defect_entry.parameters['vbm'], 'gap': (defect_entry.parameters['cbm'] - defect_entry.parameters['vbm'])}})
defect_entry.corrections.update(corrections)
return defect_entry | Process a given Defect entry with qualifiers given from initialization of class.
Order of processing is:
1) perform all possible defect corrections with information given
2) consider delocalization analyses based on qualifier metrics
given initialization of class. If delocalized, flag entry as delocalized
3) update corrections to defect entry and flag as del
Corrections are applied based on:
i) if free charges are more than free_chg_cutoff then will not apply charge correction,
because it no longer is applicable
ii) use charge correction set by preferred_cc
iii) only use BandFilling correction if use_bandfilling is set to True
iv) only use BandEdgeShift correction if use_bandedgeshift is set to True | pymatgen/analysis/defects/defect_compatibility.py | process_entry | anjlip/pymatgen | 2 | python | def process_entry(self, defect_entry):
'\n Process a given Defect entry with qualifiers given from initialization of class.\n Order of processing is:\n 1) perform all possible defect corrections with information given\n 2) consider delocalization analyses based on qualifier metrics\n given initialization of class. If delocalized, flag entry as delocalized\n 3) update corrections to defect entry and flag as del\n\n\n Corrections are applied based on:\n i) if free charges are more than free_chg_cutoff then will not apply charge correction,\n because it no longer is applicable\n ii) use charge correction set by preferred_cc\n iii) only use BandFilling correction if use_bandfilling is set to True\n iv) only use BandEdgeShift correction if use_bandedgeshift is set to True\n '
self.perform_all_corrections(defect_entry)
self.delocalization_analysis(defect_entry)
corrections = {}
skip_charge_corrections = False
if ('num_hole_vbm' in defect_entry.parameters.keys()):
if ((self.free_chg_cutoff < defect_entry.parameters['num_hole_vbm']) or (self.free_chg_cutoff < defect_entry.parameters['num_elec_cbm'])):
print('Will not use charge correction because too many free charges')
skip_charge_corrections = True
if skip_charge_corrections:
corrections.update({'charge_correction': 0.0})
elif (('freysoldt' in self.preferred_cc.lower()) and ('freysoldt_meta' in defect_entry.parameters.keys())):
frey_meta = defect_entry.parameters['freysoldt_meta']
frey_corr = (frey_meta['freysoldt_electrostatic'] + frey_meta['freysoldt_potential_alignment_correction'])
corrections.update({'charge_correction': frey_corr})
elif ('kumagai_meta' in defect_entry.parameters.keys()):
kumagai_meta = defect_entry.parameters['kumagai_meta']
kumagai_corr = (kumagai_meta['kumagai_electrostatic'] + kumagai_meta['kumagai_potential_alignment_correction'])
corrections.update({'charge_correction': kumagai_corr})
else:
print('Could not use any charge correction because insufficient metadata was supplied.')
if self.use_bandfilling:
if ('bandfilling_meta' in defect_entry.parameters.keys()):
bfc_corr = defect_entry.parameters['bandfilling_meta']['bandfilling_correction']
corrections.update({'bandfilling_correction': bfc_corr})
else:
print('Could not use band filling correction because insufficient metadata was supplied.')
else:
corrections.update({'bandfilling_correction': 0.0})
if self.use_bandedgeshift:
if ('bandshift_meta' in defect_entry.parameters.keys()):
bandfill_meta = defect_entry.parameters['bandshift_meta']
bes_corr = ((bandfill_meta['vbm_shift_correction'] + bandfill_meta['hole_vbm_shift_correction']) + bandfill_meta['elec_cbm_shift_correction'])
corrections.update({'bandedgeshifting_correction': bes_corr})
defect_entry.parameters.update({'phasediagram_meta': {'vbm': defect_entry.parameters['hybrid_vbm'], 'gap': (defect_entry.parameters['hybrid_cbm'] - defect_entry.parameters['hybrid_vbm'])}})
else:
print('Could not use band edge shifting correction because insufficient metadata was supplied.')
defect_entry.parameters.update({'phasediagram_meta': {'vbm': defect_entry.parameters['vbm'], 'gap': (defect_entry.parameters['cbm'] - defect_entry.parameters['vbm'])}})
else:
corrections.update({'bandedgeshifting_correction': 0.0})
defect_entry.parameters.update({'phasediagram_meta': {'vbm': defect_entry.parameters['vbm'], 'gap': (defect_entry.parameters['cbm'] - defect_entry.parameters['vbm'])}})
defect_entry.corrections.update(corrections)
return defect_entry | def process_entry(self, defect_entry):
'\n Process a given Defect entry with qualifiers given from initialization of class.\n Order of processing is:\n 1) perform all possible defect corrections with information given\n 2) consider delocalization analyses based on qualifier metrics\n given initialization of class. If delocalized, flag entry as delocalized\n 3) update corrections to defect entry and flag as del\n\n\n Corrections are applied based on:\n i) if free charges are more than free_chg_cutoff then will not apply charge correction,\n because it no longer is applicable\n ii) use charge correction set by preferred_cc\n iii) only use BandFilling correction if use_bandfilling is set to True\n iv) only use BandEdgeShift correction if use_bandedgeshift is set to True\n '
self.perform_all_corrections(defect_entry)
self.delocalization_analysis(defect_entry)
corrections = {}
skip_charge_corrections = False
if ('num_hole_vbm' in defect_entry.parameters.keys()):
if ((self.free_chg_cutoff < defect_entry.parameters['num_hole_vbm']) or (self.free_chg_cutoff < defect_entry.parameters['num_elec_cbm'])):
print('Will not use charge correction because too many free charges')
skip_charge_corrections = True
if skip_charge_corrections:
corrections.update({'charge_correction': 0.0})
elif (('freysoldt' in self.preferred_cc.lower()) and ('freysoldt_meta' in defect_entry.parameters.keys())):
frey_meta = defect_entry.parameters['freysoldt_meta']
frey_corr = (frey_meta['freysoldt_electrostatic'] + frey_meta['freysoldt_potential_alignment_correction'])
corrections.update({'charge_correction': frey_corr})
elif ('kumagai_meta' in defect_entry.parameters.keys()):
kumagai_meta = defect_entry.parameters['kumagai_meta']
kumagai_corr = (kumagai_meta['kumagai_electrostatic'] + kumagai_meta['kumagai_potential_alignment_correction'])
corrections.update({'charge_correction': kumagai_corr})
else:
print('Could not use any charge correction because insufficient metadata was supplied.')
if self.use_bandfilling:
if ('bandfilling_meta' in defect_entry.parameters.keys()):
bfc_corr = defect_entry.parameters['bandfilling_meta']['bandfilling_correction']
corrections.update({'bandfilling_correction': bfc_corr})
else:
print('Could not use band filling correction because insufficient metadata was supplied.')
else:
corrections.update({'bandfilling_correction': 0.0})
if self.use_bandedgeshift:
if ('bandshift_meta' in defect_entry.parameters.keys()):
bandfill_meta = defect_entry.parameters['bandshift_meta']
bes_corr = ((bandfill_meta['vbm_shift_correction'] + bandfill_meta['hole_vbm_shift_correction']) + bandfill_meta['elec_cbm_shift_correction'])
corrections.update({'bandedgeshifting_correction': bes_corr})
defect_entry.parameters.update({'phasediagram_meta': {'vbm': defect_entry.parameters['hybrid_vbm'], 'gap': (defect_entry.parameters['hybrid_cbm'] - defect_entry.parameters['hybrid_vbm'])}})
else:
print('Could not use band edge shifting correction because insufficient metadata was supplied.')
defect_entry.parameters.update({'phasediagram_meta': {'vbm': defect_entry.parameters['vbm'], 'gap': (defect_entry.parameters['cbm'] - defect_entry.parameters['vbm'])}})
else:
corrections.update({'bandedgeshifting_correction': 0.0})
defect_entry.parameters.update({'phasediagram_meta': {'vbm': defect_entry.parameters['vbm'], 'gap': (defect_entry.parameters['cbm'] - defect_entry.parameters['vbm'])}})
defect_entry.corrections.update(corrections)
return defect_entry<|docstring|>Process a given Defect entry with qualifiers given from initialization of class.
Order of processing is:
1) perform all possible defect corrections with information given
2) consider delocalization analyses based on qualifier metrics
given initialization of class. If delocalized, flag entry as delocalized
3) update corrections to defect entry and flag as del
Corrections are applied based on:
i) if free charges are more than free_chg_cutoff then will not apply charge correction,
because it no longer is applicable
ii) use charge correction set by preferred_cc
iii) only use BandFilling correction if use_bandfilling is set to True
iv) only use BandEdgeShift correction if use_bandedgeshift is set to True<|endoftext|> |
ba8536027c781cb7bcc63860a4a70a087ba1e2da75465c5e7e2835bd6e1717a9 | def delocalization_analysis(self, defect_entry):
'\n Do delocalization analysis. To do this, one considers:\n i) sampling region of planar averaged electrostatic potential (freysoldt approach)\n ii) sampling region of atomic site averaged potentials (kumagai approach)\n iii) structural relaxation amount outside of radius considered in kumagai approach (default is wigner seitz radius)\n iv) if defect is not a vacancy type -> track to see how much the defect has moved\n\n calculations that fail delocalization get "is_compatibile" set to False in parameters\n also parameters recieves a "delocalization_meta" with following dict:\n plnr_avg = {\'is_compatible\': True/False, \'metadata\': metadata used for determining this}\n atomic_site = {\'is_compatible\': True/False, \'metadata\': metadata used for determining this}\n structure_relax = {\'is_compatible\': True/False, \'metadata\': metadata used for determining this}\n defectsite_relax = {\'is_compatible\': True/False, \'metadata\': metadata used for determing this}\n '
defect_entry.parameters.update({'is_compatible': True})
if ('freysoldt_meta' in defect_entry.parameters.keys()):
defect_entry = self.is_freysoldt_delocalized(defect_entry)
else:
print('Insufficient information provided for performing Freysoldt correction delocalization analysis.\nCannot perform planar averaged electrostatic potential compatibility analysis.')
if ('kumagai_meta' in defect_entry.parameters.keys()):
defect_entry = self.is_kumagai_delocalized(defect_entry)
else:
print('Insufficient information provided for performing Kumagai correction delocalization analysis.\nCannot perform atomic site averaged electrostatic potential compatibility analysis.')
if (('final_defect_structure' in defect_entry.parameters.keys()) and ('initial_defect_structure' in defect_entry.parameters.keys()) and ('sampling_radius' in defect_entry.parameters.keys())):
defect_entry = self.is_final_relaxed_structure_delocalized(defect_entry)
else:
print('Insufficient information provided in defect_entry.parameters. Cannot perform full structure site relaxation compatibility analysis.')
return defect_entry | Do delocalization analysis. To do this, one considers:
i) sampling region of planar averaged electrostatic potential (freysoldt approach)
ii) sampling region of atomic site averaged potentials (kumagai approach)
iii) structural relaxation amount outside of radius considered in kumagai approach (default is wigner seitz radius)
iv) if defect is not a vacancy type -> track to see how much the defect has moved
calculations that fail delocalization get "is_compatibile" set to False in parameters
also parameters recieves a "delocalization_meta" with following dict:
plnr_avg = {'is_compatible': True/False, 'metadata': metadata used for determining this}
atomic_site = {'is_compatible': True/False, 'metadata': metadata used for determining this}
structure_relax = {'is_compatible': True/False, 'metadata': metadata used for determining this}
defectsite_relax = {'is_compatible': True/False, 'metadata': metadata used for determing this} | pymatgen/analysis/defects/defect_compatibility.py | delocalization_analysis | anjlip/pymatgen | 2 | python | def delocalization_analysis(self, defect_entry):
'\n Do delocalization analysis. To do this, one considers:\n i) sampling region of planar averaged electrostatic potential (freysoldt approach)\n ii) sampling region of atomic site averaged potentials (kumagai approach)\n iii) structural relaxation amount outside of radius considered in kumagai approach (default is wigner seitz radius)\n iv) if defect is not a vacancy type -> track to see how much the defect has moved\n\n calculations that fail delocalization get "is_compatibile" set to False in parameters\n also parameters recieves a "delocalization_meta" with following dict:\n plnr_avg = {\'is_compatible\': True/False, \'metadata\': metadata used for determining this}\n atomic_site = {\'is_compatible\': True/False, \'metadata\': metadata used for determining this}\n structure_relax = {\'is_compatible\': True/False, \'metadata\': metadata used for determining this}\n defectsite_relax = {\'is_compatible\': True/False, \'metadata\': metadata used for determing this}\n '
defect_entry.parameters.update({'is_compatible': True})
if ('freysoldt_meta' in defect_entry.parameters.keys()):
defect_entry = self.is_freysoldt_delocalized(defect_entry)
else:
print('Insufficient information provided for performing Freysoldt correction delocalization analysis.\nCannot perform planar averaged electrostatic potential compatibility analysis.')
if ('kumagai_meta' in defect_entry.parameters.keys()):
defect_entry = self.is_kumagai_delocalized(defect_entry)
else:
print('Insufficient information provided for performing Kumagai correction delocalization analysis.\nCannot perform atomic site averaged electrostatic potential compatibility analysis.')
if (('final_defect_structure' in defect_entry.parameters.keys()) and ('initial_defect_structure' in defect_entry.parameters.keys()) and ('sampling_radius' in defect_entry.parameters.keys())):
defect_entry = self.is_final_relaxed_structure_delocalized(defect_entry)
else:
print('Insufficient information provided in defect_entry.parameters. Cannot perform full structure site relaxation compatibility analysis.')
return defect_entry | def delocalization_analysis(self, defect_entry):
'\n Do delocalization analysis. To do this, one considers:\n i) sampling region of planar averaged electrostatic potential (freysoldt approach)\n ii) sampling region of atomic site averaged potentials (kumagai approach)\n iii) structural relaxation amount outside of radius considered in kumagai approach (default is wigner seitz radius)\n iv) if defect is not a vacancy type -> track to see how much the defect has moved\n\n calculations that fail delocalization get "is_compatibile" set to False in parameters\n also parameters recieves a "delocalization_meta" with following dict:\n plnr_avg = {\'is_compatible\': True/False, \'metadata\': metadata used for determining this}\n atomic_site = {\'is_compatible\': True/False, \'metadata\': metadata used for determining this}\n structure_relax = {\'is_compatible\': True/False, \'metadata\': metadata used for determining this}\n defectsite_relax = {\'is_compatible\': True/False, \'metadata\': metadata used for determing this}\n '
defect_entry.parameters.update({'is_compatible': True})
if ('freysoldt_meta' in defect_entry.parameters.keys()):
defect_entry = self.is_freysoldt_delocalized(defect_entry)
else:
print('Insufficient information provided for performing Freysoldt correction delocalization analysis.\nCannot perform planar averaged electrostatic potential compatibility analysis.')
if ('kumagai_meta' in defect_entry.parameters.keys()):
defect_entry = self.is_kumagai_delocalized(defect_entry)
else:
print('Insufficient information provided for performing Kumagai correction delocalization analysis.\nCannot perform atomic site averaged electrostatic potential compatibility analysis.')
if (('final_defect_structure' in defect_entry.parameters.keys()) and ('initial_defect_structure' in defect_entry.parameters.keys()) and ('sampling_radius' in defect_entry.parameters.keys())):
defect_entry = self.is_final_relaxed_structure_delocalized(defect_entry)
else:
print('Insufficient information provided in defect_entry.parameters. Cannot perform full structure site relaxation compatibility analysis.')
return defect_entry<|docstring|>Do delocalization analysis. To do this, one considers:
i) sampling region of planar averaged electrostatic potential (freysoldt approach)
ii) sampling region of atomic site averaged potentials (kumagai approach)
iii) structural relaxation amount outside of radius considered in kumagai approach (default is wigner seitz radius)
iv) if defect is not a vacancy type -> track to see how much the defect has moved
calculations that fail delocalization get "is_compatibile" set to False in parameters
also parameters recieves a "delocalization_meta" with following dict:
plnr_avg = {'is_compatible': True/False, 'metadata': metadata used for determining this}
atomic_site = {'is_compatible': True/False, 'metadata': metadata used for determining this}
structure_relax = {'is_compatible': True/False, 'metadata': metadata used for determining this}
defectsite_relax = {'is_compatible': True/False, 'metadata': metadata used for determing this}<|endoftext|> |
4a797421f50968ccbf52524fe59c518861ad4e788a761e258c56a0bfdc6e1ab4 | def __init__(self, root_dir, options, build_config, run_tracker, reporting, target_roots=None, daemon_graph_helper=None, exiter=sys.exit):
'\n :param str root_dir: The root directory of the pants workspace (aka the "build root").\n :param Options options: The global, pre-initialized Options instance.\n :param BuildConfiguration build_config: A pre-initialized BuildConfiguration instance.\n :param Runtracker run_tracker: The global, pre-initialized/running RunTracker instance.\n :param Reporting reporting: The global, pre-initialized Reporting instance.\n :param TargetRoots target_roots: A pre-existing `TargetRoots` object, if available.\n :param LegacyGraphHelper daemon_graph_helper: A LegacyGraphHelper instance for graph reuse. (Optional)\n :param func exiter: A function that accepts an exit code value and exits. (for tests, Optional)\n '
self._root_dir = root_dir
self._options = options
self._build_config = build_config
self._run_tracker = run_tracker
self._reporting = reporting
self._target_roots = target_roots
self._daemon_graph_helper = daemon_graph_helper
self._exiter = exiter
self._requested_goals = self._options.goals
self._help_request = self._options.help_request
self._build_file_parser = BuildFileParser(self._build_config, self._root_dir)
self._build_graph = None
self._address_mapper = None
self._global_options = options.for_global_scope()
self._tag = self._global_options.tag
self._fail_fast = self._global_options.fail_fast
self._explain = self._global_options.explain
self._kill_nailguns = self._global_options.kill_nailguns | :param str root_dir: The root directory of the pants workspace (aka the "build root").
:param Options options: The global, pre-initialized Options instance.
:param BuildConfiguration build_config: A pre-initialized BuildConfiguration instance.
:param Runtracker run_tracker: The global, pre-initialized/running RunTracker instance.
:param Reporting reporting: The global, pre-initialized Reporting instance.
:param TargetRoots target_roots: A pre-existing `TargetRoots` object, if available.
:param LegacyGraphHelper daemon_graph_helper: A LegacyGraphHelper instance for graph reuse. (Optional)
:param func exiter: A function that accepts an exit code value and exits. (for tests, Optional) | src/python/pants/bin/goal_runner.py | __init__ | foursquare/pants | 1 | python | def __init__(self, root_dir, options, build_config, run_tracker, reporting, target_roots=None, daemon_graph_helper=None, exiter=sys.exit):
'\n :param str root_dir: The root directory of the pants workspace (aka the "build root").\n :param Options options: The global, pre-initialized Options instance.\n :param BuildConfiguration build_config: A pre-initialized BuildConfiguration instance.\n :param Runtracker run_tracker: The global, pre-initialized/running RunTracker instance.\n :param Reporting reporting: The global, pre-initialized Reporting instance.\n :param TargetRoots target_roots: A pre-existing `TargetRoots` object, if available.\n :param LegacyGraphHelper daemon_graph_helper: A LegacyGraphHelper instance for graph reuse. (Optional)\n :param func exiter: A function that accepts an exit code value and exits. (for tests, Optional)\n '
self._root_dir = root_dir
self._options = options
self._build_config = build_config
self._run_tracker = run_tracker
self._reporting = reporting
self._target_roots = target_roots
self._daemon_graph_helper = daemon_graph_helper
self._exiter = exiter
self._requested_goals = self._options.goals
self._help_request = self._options.help_request
self._build_file_parser = BuildFileParser(self._build_config, self._root_dir)
self._build_graph = None
self._address_mapper = None
self._global_options = options.for_global_scope()
self._tag = self._global_options.tag
self._fail_fast = self._global_options.fail_fast
self._explain = self._global_options.explain
self._kill_nailguns = self._global_options.kill_nailguns | def __init__(self, root_dir, options, build_config, run_tracker, reporting, target_roots=None, daemon_graph_helper=None, exiter=sys.exit):
'\n :param str root_dir: The root directory of the pants workspace (aka the "build root").\n :param Options options: The global, pre-initialized Options instance.\n :param BuildConfiguration build_config: A pre-initialized BuildConfiguration instance.\n :param Runtracker run_tracker: The global, pre-initialized/running RunTracker instance.\n :param Reporting reporting: The global, pre-initialized Reporting instance.\n :param TargetRoots target_roots: A pre-existing `TargetRoots` object, if available.\n :param LegacyGraphHelper daemon_graph_helper: A LegacyGraphHelper instance for graph reuse. (Optional)\n :param func exiter: A function that accepts an exit code value and exits. (for tests, Optional)\n '
self._root_dir = root_dir
self._options = options
self._build_config = build_config
self._run_tracker = run_tracker
self._reporting = reporting
self._target_roots = target_roots
self._daemon_graph_helper = daemon_graph_helper
self._exiter = exiter
self._requested_goals = self._options.goals
self._help_request = self._options.help_request
self._build_file_parser = BuildFileParser(self._build_config, self._root_dir)
self._build_graph = None
self._address_mapper = None
self._global_options = options.for_global_scope()
self._tag = self._global_options.tag
self._fail_fast = self._global_options.fail_fast
self._explain = self._global_options.explain
self._kill_nailguns = self._global_options.kill_nailguns<|docstring|>:param str root_dir: The root directory of the pants workspace (aka the "build root").
:param Options options: The global, pre-initialized Options instance.
:param BuildConfiguration build_config: A pre-initialized BuildConfiguration instance.
:param Runtracker run_tracker: The global, pre-initialized/running RunTracker instance.
:param Reporting reporting: The global, pre-initialized Reporting instance.
:param TargetRoots target_roots: A pre-existing `TargetRoots` object, if available.
:param LegacyGraphHelper daemon_graph_helper: A LegacyGraphHelper instance for graph reuse. (Optional)
:param func exiter: A function that accepts an exit code value and exits. (for tests, Optional)<|endoftext|> |
7bc8685e8e0a1fcb7b95b58b849f4c28150c787809d3f5ddd2414cbaaa7f99a4 | def _handle_help(self, help_request):
'Handle requests for `help` information.'
if help_request:
help_printer = HelpPrinter(self._options)
result = help_printer.print_help()
self._exiter(result) | Handle requests for `help` information. | src/python/pants/bin/goal_runner.py | _handle_help | foursquare/pants | 1 | python | def _handle_help(self, help_request):
if help_request:
help_printer = HelpPrinter(self._options)
result = help_printer.print_help()
self._exiter(result) | def _handle_help(self, help_request):
if help_request:
help_printer = HelpPrinter(self._options)
result = help_printer.print_help()
self._exiter(result)<|docstring|>Handle requests for `help` information.<|endoftext|> |
a949bbf04fba17b467b70ffa6dd664d1e9d1ee29578797a7926724041581c809 | def _init_graph(self, pants_ignore_patterns, build_ignore_patterns, exclude_target_regexps, target_specs, target_roots, workdir, graph_helper, subproject_build_roots):
"Determine the BuildGraph, AddressMapper and spec_roots for a given run.\n\n :param list pants_ignore_patterns: The pants ignore patterns from '--pants-ignore'.\n :param list build_ignore_patterns: The build ignore patterns from '--build-ignore',\n applied during BUILD file searching.\n :param str workdir: The pants workdir.\n :param list exclude_target_regexps: Regular expressions for targets to be excluded.\n :param list target_specs: The original target specs.\n :param TargetRoots target_roots: The existing `TargetRoots` object, if any.\n :param LegacyGraphHelper graph_helper: A LegacyGraphHelper to use for graph construction,\n if available. This would usually come from the daemon.\n :returns: A tuple of (BuildGraph, AddressMapper, opt Scheduler, TargetRoots).\n "
if (not graph_helper):
native = Native.create(self._global_options)
native.set_panic_handler()
graph_helper = EngineInitializer.setup_legacy_graph(pants_ignore_patterns, workdir, self._global_options.build_file_imports, native=native, build_file_aliases=self._build_config.registered_aliases(), rules=self._build_config.rules(), build_ignore_patterns=build_ignore_patterns, exclude_target_regexps=exclude_target_regexps, subproject_roots=subproject_build_roots, include_trace_on_error=self._options.for_global_scope().print_exception_stacktrace)
target_roots = (target_roots or TargetRootsCalculator.create(options=self._options, build_root=self._root_dir, change_calculator=graph_helper.change_calculator))
(graph, address_mapper) = graph_helper.create_build_graph(target_roots, self._root_dir)
return (graph, address_mapper, graph_helper.scheduler, target_roots) | Determine the BuildGraph, AddressMapper and spec_roots for a given run.
:param list pants_ignore_patterns: The pants ignore patterns from '--pants-ignore'.
:param list build_ignore_patterns: The build ignore patterns from '--build-ignore',
applied during BUILD file searching.
:param str workdir: The pants workdir.
:param list exclude_target_regexps: Regular expressions for targets to be excluded.
:param list target_specs: The original target specs.
:param TargetRoots target_roots: The existing `TargetRoots` object, if any.
:param LegacyGraphHelper graph_helper: A LegacyGraphHelper to use for graph construction,
if available. This would usually come from the daemon.
:returns: A tuple of (BuildGraph, AddressMapper, opt Scheduler, TargetRoots). | src/python/pants/bin/goal_runner.py | _init_graph | foursquare/pants | 1 | python | def _init_graph(self, pants_ignore_patterns, build_ignore_patterns, exclude_target_regexps, target_specs, target_roots, workdir, graph_helper, subproject_build_roots):
"Determine the BuildGraph, AddressMapper and spec_roots for a given run.\n\n :param list pants_ignore_patterns: The pants ignore patterns from '--pants-ignore'.\n :param list build_ignore_patterns: The build ignore patterns from '--build-ignore',\n applied during BUILD file searching.\n :param str workdir: The pants workdir.\n :param list exclude_target_regexps: Regular expressions for targets to be excluded.\n :param list target_specs: The original target specs.\n :param TargetRoots target_roots: The existing `TargetRoots` object, if any.\n :param LegacyGraphHelper graph_helper: A LegacyGraphHelper to use for graph construction,\n if available. This would usually come from the daemon.\n :returns: A tuple of (BuildGraph, AddressMapper, opt Scheduler, TargetRoots).\n "
if (not graph_helper):
native = Native.create(self._global_options)
native.set_panic_handler()
graph_helper = EngineInitializer.setup_legacy_graph(pants_ignore_patterns, workdir, self._global_options.build_file_imports, native=native, build_file_aliases=self._build_config.registered_aliases(), rules=self._build_config.rules(), build_ignore_patterns=build_ignore_patterns, exclude_target_regexps=exclude_target_regexps, subproject_roots=subproject_build_roots, include_trace_on_error=self._options.for_global_scope().print_exception_stacktrace)
target_roots = (target_roots or TargetRootsCalculator.create(options=self._options, build_root=self._root_dir, change_calculator=graph_helper.change_calculator))
(graph, address_mapper) = graph_helper.create_build_graph(target_roots, self._root_dir)
return (graph, address_mapper, graph_helper.scheduler, target_roots) | def _init_graph(self, pants_ignore_patterns, build_ignore_patterns, exclude_target_regexps, target_specs, target_roots, workdir, graph_helper, subproject_build_roots):
"Determine the BuildGraph, AddressMapper and spec_roots for a given run.\n\n :param list pants_ignore_patterns: The pants ignore patterns from '--pants-ignore'.\n :param list build_ignore_patterns: The build ignore patterns from '--build-ignore',\n applied during BUILD file searching.\n :param str workdir: The pants workdir.\n :param list exclude_target_regexps: Regular expressions for targets to be excluded.\n :param list target_specs: The original target specs.\n :param TargetRoots target_roots: The existing `TargetRoots` object, if any.\n :param LegacyGraphHelper graph_helper: A LegacyGraphHelper to use for graph construction,\n if available. This would usually come from the daemon.\n :returns: A tuple of (BuildGraph, AddressMapper, opt Scheduler, TargetRoots).\n "
if (not graph_helper):
native = Native.create(self._global_options)
native.set_panic_handler()
graph_helper = EngineInitializer.setup_legacy_graph(pants_ignore_patterns, workdir, self._global_options.build_file_imports, native=native, build_file_aliases=self._build_config.registered_aliases(), rules=self._build_config.rules(), build_ignore_patterns=build_ignore_patterns, exclude_target_regexps=exclude_target_regexps, subproject_roots=subproject_build_roots, include_trace_on_error=self._options.for_global_scope().print_exception_stacktrace)
target_roots = (target_roots or TargetRootsCalculator.create(options=self._options, build_root=self._root_dir, change_calculator=graph_helper.change_calculator))
(graph, address_mapper) = graph_helper.create_build_graph(target_roots, self._root_dir)
return (graph, address_mapper, graph_helper.scheduler, target_roots)<|docstring|>Determine the BuildGraph, AddressMapper and spec_roots for a given run.
:param list pants_ignore_patterns: The pants ignore patterns from '--pants-ignore'.
:param list build_ignore_patterns: The build ignore patterns from '--build-ignore',
applied during BUILD file searching.
:param str workdir: The pants workdir.
:param list exclude_target_regexps: Regular expressions for targets to be excluded.
:param list target_specs: The original target specs.
:param TargetRoots target_roots: The existing `TargetRoots` object, if any.
:param LegacyGraphHelper graph_helper: A LegacyGraphHelper to use for graph construction,
if available. This would usually come from the daemon.
:returns: A tuple of (BuildGraph, AddressMapper, opt Scheduler, TargetRoots).<|endoftext|> |
28dba97c5abdf524d098e0709b4973140080b8bd626f633f54620e00ae7725ba | def _determine_goals(self, requested_goals):
'Check and populate the requested goals for a given run.'
spec_parser = CmdLineSpecParser(self._root_dir)
for goal in requested_goals:
if self._address_mapper.is_valid_single_address(spec_parser.parse_spec(goal)):
logger.warning("Command-line argument '{0}' is ambiguous and was assumed to be a goal. If this is incorrect, disambiguate it with ./{0}.".format(goal))
goals = [Goal.by_name(goal) for goal in requested_goals]
return goals | Check and populate the requested goals for a given run. | src/python/pants/bin/goal_runner.py | _determine_goals | foursquare/pants | 1 | python | def _determine_goals(self, requested_goals):
spec_parser = CmdLineSpecParser(self._root_dir)
for goal in requested_goals:
if self._address_mapper.is_valid_single_address(spec_parser.parse_spec(goal)):
logger.warning("Command-line argument '{0}' is ambiguous and was assumed to be a goal. If this is incorrect, disambiguate it with ./{0}.".format(goal))
goals = [Goal.by_name(goal) for goal in requested_goals]
return goals | def _determine_goals(self, requested_goals):
spec_parser = CmdLineSpecParser(self._root_dir)
for goal in requested_goals:
if self._address_mapper.is_valid_single_address(spec_parser.parse_spec(goal)):
logger.warning("Command-line argument '{0}' is ambiguous and was assumed to be a goal. If this is incorrect, disambiguate it with ./{0}.".format(goal))
goals = [Goal.by_name(goal) for goal in requested_goals]
return goals<|docstring|>Check and populate the requested goals for a given run.<|endoftext|> |
7fa0d8dee1800e43892c4db422b02f4d673b2bfc7682313a4aff15ec8de571d3 | def _roots_to_targets(self, target_roots):
'Populate the BuildGraph and target list from a set of input TargetRoots.'
with self._run_tracker.new_workunit(name='parse', labels=[WorkUnitLabel.SETUP]):
def filter_for_tag(tag):
return (lambda target: (tag in map(str, target.tags)))
tag_filter = wrap_filters(create_filters(self._tag, filter_for_tag))
def generate_targets():
for address in self._build_graph.inject_roots_closure(target_roots, self._fail_fast):
target = self._build_graph.get_target(address)
if tag_filter(target):
(yield target)
return list(generate_targets()) | Populate the BuildGraph and target list from a set of input TargetRoots. | src/python/pants/bin/goal_runner.py | _roots_to_targets | foursquare/pants | 1 | python | def _roots_to_targets(self, target_roots):
with self._run_tracker.new_workunit(name='parse', labels=[WorkUnitLabel.SETUP]):
def filter_for_tag(tag):
return (lambda target: (tag in map(str, target.tags)))
tag_filter = wrap_filters(create_filters(self._tag, filter_for_tag))
def generate_targets():
for address in self._build_graph.inject_roots_closure(target_roots, self._fail_fast):
target = self._build_graph.get_target(address)
if tag_filter(target):
(yield target)
return list(generate_targets()) | def _roots_to_targets(self, target_roots):
with self._run_tracker.new_workunit(name='parse', labels=[WorkUnitLabel.SETUP]):
def filter_for_tag(tag):
return (lambda target: (tag in map(str, target.tags)))
tag_filter = wrap_filters(create_filters(self._tag, filter_for_tag))
def generate_targets():
for address in self._build_graph.inject_roots_closure(target_roots, self._fail_fast):
target = self._build_graph.get_target(address)
if tag_filter(target):
(yield target)
return list(generate_targets())<|docstring|>Populate the BuildGraph and target list from a set of input TargetRoots.<|endoftext|> |
f59e7732893464b2a1a58e236ce02ef374621b193445bf3464b0e37314649703 | def __init__(self, context, goals, run_tracker, kill_nailguns, exiter=sys.exit):
'\n :param Context context: The global, pre-initialized Context as created by GoalRunnerFactory.\n :param list[Goal] goals: The list of goals to act on.\n :param Runtracker run_tracker: The global, pre-initialized/running RunTracker instance.\n :param bool kill_nailguns: Whether or not to kill nailguns after the run.\n :param func exiter: A function that accepts an exit code value and exits (for tests, Optional).\n '
self._context = context
self._goals = goals
self._run_tracker = run_tracker
self._kill_nailguns = kill_nailguns
self._exiter = exiter | :param Context context: The global, pre-initialized Context as created by GoalRunnerFactory.
:param list[Goal] goals: The list of goals to act on.
:param Runtracker run_tracker: The global, pre-initialized/running RunTracker instance.
:param bool kill_nailguns: Whether or not to kill nailguns after the run.
:param func exiter: A function that accepts an exit code value and exits (for tests, Optional). | src/python/pants/bin/goal_runner.py | __init__ | foursquare/pants | 1 | python | def __init__(self, context, goals, run_tracker, kill_nailguns, exiter=sys.exit):
'\n :param Context context: The global, pre-initialized Context as created by GoalRunnerFactory.\n :param list[Goal] goals: The list of goals to act on.\n :param Runtracker run_tracker: The global, pre-initialized/running RunTracker instance.\n :param bool kill_nailguns: Whether or not to kill nailguns after the run.\n :param func exiter: A function that accepts an exit code value and exits (for tests, Optional).\n '
self._context = context
self._goals = goals
self._run_tracker = run_tracker
self._kill_nailguns = kill_nailguns
self._exiter = exiter | def __init__(self, context, goals, run_tracker, kill_nailguns, exiter=sys.exit):
'\n :param Context context: The global, pre-initialized Context as created by GoalRunnerFactory.\n :param list[Goal] goals: The list of goals to act on.\n :param Runtracker run_tracker: The global, pre-initialized/running RunTracker instance.\n :param bool kill_nailguns: Whether or not to kill nailguns after the run.\n :param func exiter: A function that accepts an exit code value and exits (for tests, Optional).\n '
self._context = context
self._goals = goals
self._run_tracker = run_tracker
self._kill_nailguns = kill_nailguns
self._exiter = exiter<|docstring|>:param Context context: The global, pre-initialized Context as created by GoalRunnerFactory.
:param list[Goal] goals: The list of goals to act on.
:param Runtracker run_tracker: The global, pre-initialized/running RunTracker instance.
:param bool kill_nailguns: Whether or not to kill nailguns after the run.
:param func exiter: A function that accepts an exit code value and exits (for tests, Optional).<|endoftext|> |
cab58d9ce7925f4b2dc79b5dc81141d6eb6fce1a07fdb9512ef76f219e8ad2d3 | @classmethod
def subsystems(cls):
'Subsystems used outside of any task.'
return {SourceRootConfig, Reporting, Reproducer, RunTracker, Changed, BinaryUtilPrivate.Factory, Subprocess.Factory} | Subsystems used outside of any task. | src/python/pants/bin/goal_runner.py | subsystems | foursquare/pants | 1 | python | @classmethod
def subsystems(cls):
return {SourceRootConfig, Reporting, Reproducer, RunTracker, Changed, BinaryUtilPrivate.Factory, Subprocess.Factory} | @classmethod
def subsystems(cls):
return {SourceRootConfig, Reporting, Reproducer, RunTracker, Changed, BinaryUtilPrivate.Factory, Subprocess.Factory}<|docstring|>Subsystems used outside of any task.<|endoftext|> |
0263d0a2fda6ddadff09c1e6d82dc98db626e6a52e568d45f093b114a63968b8 | def test001_uptime():
'TC395\n check ubuntu uptime\n\n **Test Scenario**\n #. Check uptime from system file located at /proc/uptime\n #. Compare it with tested method ubuntu.uptime()\n #. Both uptime from system file and from method are almost equal\n '
info('verfying uptime method')
with open('/proc/uptime') as f:
data = f.read()
(uptime, _) = data.split(' ')
assert isclose(float(uptime), j.sal.ubuntu.uptime(), abs_tol=2) | TC395
check ubuntu uptime
**Test Scenario**
#. Check uptime from system file located at /proc/uptime
#. Compare it with tested method ubuntu.uptime()
#. Both uptime from system file and from method are almost equal | JumpscaleLibs/sal/ubuntu/tests/test_ubuntu.py | test001_uptime | grimpy/jumpscaleX_libs | 0 | python | def test001_uptime():
'TC395\n check ubuntu uptime\n\n **Test Scenario**\n #. Check uptime from system file located at /proc/uptime\n #. Compare it with tested method ubuntu.uptime()\n #. Both uptime from system file and from method are almost equal\n '
info('verfying uptime method')
with open('/proc/uptime') as f:
data = f.read()
(uptime, _) = data.split(' ')
assert isclose(float(uptime), j.sal.ubuntu.uptime(), abs_tol=2) | def test001_uptime():
'TC395\n check ubuntu uptime\n\n **Test Scenario**\n #. Check uptime from system file located at /proc/uptime\n #. Compare it with tested method ubuntu.uptime()\n #. Both uptime from system file and from method are almost equal\n '
info('verfying uptime method')
with open('/proc/uptime') as f:
data = f.read()
(uptime, _) = data.split(' ')
assert isclose(float(uptime), j.sal.ubuntu.uptime(), abs_tol=2)<|docstring|>TC395
check ubuntu uptime
**Test Scenario**
#. Check uptime from system file located at /proc/uptime
#. Compare it with tested method ubuntu.uptime()
#. Both uptime from system file and from method are almost equal<|endoftext|> |
7ce4c85ff6a0165270d203de19173993a4c991d1e4fa6db77eee8c11e7f9a2b0 | def test002_service_install():
'TC396\n service_install is not a package install which is mean only create a config file in /etc/init/ dir\n\n **Test Scenario**\n #. Let take a zdb as out tested service , check the zdb config file existing\n #. Check if the service config file is exist, then we need to uninstall service to verify tested method service install works well\n #. Install zdb service by tested method\n #. Verify that config file existing after enable the service\n #. Uninstall service to return to origin state\n #. if the service config file was exist then we need install service again to return to origin state\n '
mysys = None
zdb_service_file = False
info('installing zdb for testing')
j.builders.db.zdb.install()
info('checking system is systemd or not ')
mysys = _check_init_process()
if (mysys == 'my_init'):
info('system is init system')
zdb_service_file = os.path.exists('/etc/service/zdb/run')
elif (mysys == 'systemd'):
info('system is init systemd')
zdb_service_file = os.path.exists('/etc/systemd/system/zdb.service')
else:
info('something unexpected occurred while checking system type')
assert (mysys in ['systemd', 'my_init']), 'system not supported '
info('checking zdb file existing ')
if (zdb_service_file is True):
info('zdb file is exist ,service_uninstall to zdb service ')
j.sal.ubuntu.service_uninstall('zdb')
info('service_install to zdb service ')
j.sal.ubuntu.service_install('zdb', j.core.tools.text_replace('{DIR_BASE}/bin'))
info('Verify config file existing after using service_install')
if (mysys == 'my_init'):
assert os.path.exists('/etc/service/zdb/run')
else:
assert os.path.exists('/etc/systemd/system/zdb.service')
info('zdb service uninstall to return to origin state')
j.sal.ubuntu.service_uninstall('zdb')
if (zdb_service_file is True):
info('zdb service install to return to origin state')
j.sal.ubuntu.service_install('zdb', j.core.tools.text_replace('{DIR_BASE}/zdb')) | TC396
service_install is not a package install which is mean only create a config file in /etc/init/ dir
**Test Scenario**
#. Let take a zdb as out tested service , check the zdb config file existing
#. Check if the service config file is exist, then we need to uninstall service to verify tested method service install works well
#. Install zdb service by tested method
#. Verify that config file existing after enable the service
#. Uninstall service to return to origin state
#. if the service config file was exist then we need install service again to return to origin state | JumpscaleLibs/sal/ubuntu/tests/test_ubuntu.py | test002_service_install | grimpy/jumpscaleX_libs | 0 | python | def test002_service_install():
'TC396\n service_install is not a package install which is mean only create a config file in /etc/init/ dir\n\n **Test Scenario**\n #. Let take a zdb as out tested service , check the zdb config file existing\n #. Check if the service config file is exist, then we need to uninstall service to verify tested method service install works well\n #. Install zdb service by tested method\n #. Verify that config file existing after enable the service\n #. Uninstall service to return to origin state\n #. if the service config file was exist then we need install service again to return to origin state\n '
mysys = None
zdb_service_file = False
info('installing zdb for testing')
j.builders.db.zdb.install()
info('checking system is systemd or not ')
mysys = _check_init_process()
if (mysys == 'my_init'):
info('system is init system')
zdb_service_file = os.path.exists('/etc/service/zdb/run')
elif (mysys == 'systemd'):
info('system is init systemd')
zdb_service_file = os.path.exists('/etc/systemd/system/zdb.service')
else:
info('something unexpected occurred while checking system type')
assert (mysys in ['systemd', 'my_init']), 'system not supported '
info('checking zdb file existing ')
if (zdb_service_file is True):
info('zdb file is exist ,service_uninstall to zdb service ')
j.sal.ubuntu.service_uninstall('zdb')
info('service_install to zdb service ')
j.sal.ubuntu.service_install('zdb', j.core.tools.text_replace('{DIR_BASE}/bin'))
info('Verify config file existing after using service_install')
if (mysys == 'my_init'):
assert os.path.exists('/etc/service/zdb/run')
else:
assert os.path.exists('/etc/systemd/system/zdb.service')
info('zdb service uninstall to return to origin state')
j.sal.ubuntu.service_uninstall('zdb')
if (zdb_service_file is True):
info('zdb service install to return to origin state')
j.sal.ubuntu.service_install('zdb', j.core.tools.text_replace('{DIR_BASE}/zdb')) | def test002_service_install():
'TC396\n service_install is not a package install which is mean only create a config file in /etc/init/ dir\n\n **Test Scenario**\n #. Let take a zdb as out tested service , check the zdb config file existing\n #. Check if the service config file is exist, then we need to uninstall service to verify tested method service install works well\n #. Install zdb service by tested method\n #. Verify that config file existing after enable the service\n #. Uninstall service to return to origin state\n #. if the service config file was exist then we need install service again to return to origin state\n '
mysys = None
zdb_service_file = False
info('installing zdb for testing')
j.builders.db.zdb.install()
info('checking system is systemd or not ')
mysys = _check_init_process()
if (mysys == 'my_init'):
info('system is init system')
zdb_service_file = os.path.exists('/etc/service/zdb/run')
elif (mysys == 'systemd'):
info('system is init systemd')
zdb_service_file = os.path.exists('/etc/systemd/system/zdb.service')
else:
info('something unexpected occurred while checking system type')
assert (mysys in ['systemd', 'my_init']), 'system not supported '
info('checking zdb file existing ')
if (zdb_service_file is True):
info('zdb file is exist ,service_uninstall to zdb service ')
j.sal.ubuntu.service_uninstall('zdb')
info('service_install to zdb service ')
j.sal.ubuntu.service_install('zdb', j.core.tools.text_replace('{DIR_BASE}/bin'))
info('Verify config file existing after using service_install')
if (mysys == 'my_init'):
assert os.path.exists('/etc/service/zdb/run')
else:
assert os.path.exists('/etc/systemd/system/zdb.service')
info('zdb service uninstall to return to origin state')
j.sal.ubuntu.service_uninstall('zdb')
if (zdb_service_file is True):
info('zdb service install to return to origin state')
j.sal.ubuntu.service_install('zdb', j.core.tools.text_replace('{DIR_BASE}/zdb'))<|docstring|>TC396
service_install is not a package install which is mean only create a config file in /etc/init/ dir
**Test Scenario**
#. Let take a zdb as out tested service , check the zdb config file existing
#. Check if the service config file is exist, then we need to uninstall service to verify tested method service install works well
#. Install zdb service by tested method
#. Verify that config file existing after enable the service
#. Uninstall service to return to origin state
#. if the service config file was exist then we need install service again to return to origin state<|endoftext|> |
2cc868be0c66fbe7ee42046f576bf3cf30cc1f6970bea2deb5dbd17856d76ef6 | def test003_version_get():
'TC398\n Check the ubuntu version\n\n **Test Scenario**\n #. Check Ubuntu version using tested method ubuntu.version_get\n #. Verify step1 output include keyword Ubuntu\n '
info('checking ubuntu version ')
assert ('Ubuntu' in j.sal.ubuntu.version_get()) | TC398
Check the ubuntu version
**Test Scenario**
#. Check Ubuntu version using tested method ubuntu.version_get
#. Verify step1 output include keyword Ubuntu | JumpscaleLibs/sal/ubuntu/tests/test_ubuntu.py | test003_version_get | grimpy/jumpscaleX_libs | 0 | python | def test003_version_get():
'TC398\n Check the ubuntu version\n\n **Test Scenario**\n #. Check Ubuntu version using tested method ubuntu.version_get\n #. Verify step1 output include keyword Ubuntu\n '
info('checking ubuntu version ')
assert ('Ubuntu' in j.sal.ubuntu.version_get()) | def test003_version_get():
'TC398\n Check the ubuntu version\n\n **Test Scenario**\n #. Check Ubuntu version using tested method ubuntu.version_get\n #. Verify step1 output include keyword Ubuntu\n '
info('checking ubuntu version ')
assert ('Ubuntu' in j.sal.ubuntu.version_get())<|docstring|>TC398
Check the ubuntu version
**Test Scenario**
#. Check Ubuntu version using tested method ubuntu.version_get
#. Verify step1 output include keyword Ubuntu<|endoftext|> |
573eb7a9a5fc1cca339aefca4e1c7ec2bfbbb2679b07daba845dd2556e530acc | def test004_apt_install_check():
'TC399\n check if an ubuntu package is installed or not installed will install it\n\n **Test Scenario**\n #. Just run method and if it fails, it will raise an error\n '
info('checking ping is installed or not ')
j.sal.ubuntu.apt_install_check('iputils-ping', 'ping')
try:
j.sal.ubuntu.apt_install_check('iputils-ping', 'elfankosh')
info('There is exceptions RuntimeError due to elfankosh is not a command')
except Exception as myexcept:
assert ("Could not execute: 'which elfankosh'" in myexcept.exception.args[0]) | TC399
check if an ubuntu package is installed or not installed will install it
**Test Scenario**
#. Just run method and if it fails, it will raise an error | JumpscaleLibs/sal/ubuntu/tests/test_ubuntu.py | test004_apt_install_check | grimpy/jumpscaleX_libs | 0 | python | def test004_apt_install_check():
'TC399\n check if an ubuntu package is installed or not installed will install it\n\n **Test Scenario**\n #. Just run method and if it fails, it will raise an error\n '
info('checking ping is installed or not ')
j.sal.ubuntu.apt_install_check('iputils-ping', 'ping')
try:
j.sal.ubuntu.apt_install_check('iputils-ping', 'elfankosh')
info('There is exceptions RuntimeError due to elfankosh is not a command')
except Exception as myexcept:
assert ("Could not execute: 'which elfankosh'" in myexcept.exception.args[0]) | def test004_apt_install_check():
'TC399\n check if an ubuntu package is installed or not installed will install it\n\n **Test Scenario**\n #. Just run method and if it fails, it will raise an error\n '
info('checking ping is installed or not ')
j.sal.ubuntu.apt_install_check('iputils-ping', 'ping')
try:
j.sal.ubuntu.apt_install_check('iputils-ping', 'elfankosh')
info('There is exceptions RuntimeError due to elfankosh is not a command')
except Exception as myexcept:
assert ("Could not execute: 'which elfankosh'" in myexcept.exception.args[0])<|docstring|>TC399
check if an ubuntu package is installed or not installed will install it
**Test Scenario**
#. Just run method and if it fails, it will raise an error<|endoftext|> |
494084b30b6d85de06968d9eb8a5e51117ea0a7dbefd39585062523d766075bd | def test005_apt_install_version():
'TC400\n Install a specific version of an ubuntu package.\n\n **Test Scenario**\n #. Install wget package using apt_install_version method\n #. check version of wget after installing it\n #. step1 and step2 should be identical\n :return:\n '
wget_installed = False
wget_installed = j.sal.ubuntu.is_pkg_installed('wget')
info('print wget install var is {}'.format(wget_installed))
if (wget_installed is True):
info('uninstall wget to test install method ')
info('installing wget with version 1.19.4')
j.sal.ubuntu.apt_install_version('wget', '1.19.4-1ubuntu2')
info('checking installed wget version ')
(rc, out, err) = j.sal.process.execute('wget -V', useShell=True)
info('verifying installed wget version is 1.19.4')
assert ('1.19.4' in out)
info('removing wget to get back to origin state')
j.sal.process.execute('apt remove -y wget')
if (wget_installed is True):
info('uninstall wget and install default version from ubuntu repo')
j.sal.process.execute('apt install -y wget') | TC400
Install a specific version of an ubuntu package.
**Test Scenario**
#. Install wget package using apt_install_version method
#. check version of wget after installing it
#. step1 and step2 should be identical
:return: | JumpscaleLibs/sal/ubuntu/tests/test_ubuntu.py | test005_apt_install_version | grimpy/jumpscaleX_libs | 0 | python | def test005_apt_install_version():
'TC400\n Install a specific version of an ubuntu package.\n\n **Test Scenario**\n #. Install wget package using apt_install_version method\n #. check version of wget after installing it\n #. step1 and step2 should be identical\n :return:\n '
wget_installed = False
wget_installed = j.sal.ubuntu.is_pkg_installed('wget')
info('print wget install var is {}'.format(wget_installed))
if (wget_installed is True):
info('uninstall wget to test install method ')
info('installing wget with version 1.19.4')
j.sal.ubuntu.apt_install_version('wget', '1.19.4-1ubuntu2')
info('checking installed wget version ')
(rc, out, err) = j.sal.process.execute('wget -V', useShell=True)
info('verifying installed wget version is 1.19.4')
assert ('1.19.4' in out)
info('removing wget to get back to origin state')
j.sal.process.execute('apt remove -y wget')
if (wget_installed is True):
info('uninstall wget and install default version from ubuntu repo')
j.sal.process.execute('apt install -y wget') | def test005_apt_install_version():
'TC400\n Install a specific version of an ubuntu package.\n\n **Test Scenario**\n #. Install wget package using apt_install_version method\n #. check version of wget after installing it\n #. step1 and step2 should be identical\n :return:\n '
wget_installed = False
wget_installed = j.sal.ubuntu.is_pkg_installed('wget')
info('print wget install var is {}'.format(wget_installed))
if (wget_installed is True):
info('uninstall wget to test install method ')
info('installing wget with version 1.19.4')
j.sal.ubuntu.apt_install_version('wget', '1.19.4-1ubuntu2')
info('checking installed wget version ')
(rc, out, err) = j.sal.process.execute('wget -V', useShell=True)
info('verifying installed wget version is 1.19.4')
assert ('1.19.4' in out)
info('removing wget to get back to origin state')
j.sal.process.execute('apt remove -y wget')
if (wget_installed is True):
info('uninstall wget and install default version from ubuntu repo')
j.sal.process.execute('apt install -y wget')<|docstring|>TC400
Install a specific version of an ubuntu package.
**Test Scenario**
#. Install wget package using apt_install_version method
#. check version of wget after installing it
#. step1 and step2 should be identical
:return:<|endoftext|> |
343f085443399081c7bf1c99358ca0513c2d1c72e5bd99506df46cc729f26a86 | def test006_deb_install():
'TC402\n Install a debian package.\n\n **Test Scenario**\n #. Download python-tmuxp debian package\n #. Install downloaded debian package by deb_install method\n #. Get the installed package status by dpkg command\n #. Installed package python-tmuxp should be install ok\n '
info('Downloading python-tmuxp debian package')
j.sal.process.execute('curl http://security.ubuntu.com/ubuntu/pool/universe/t/tmuxp/python-tmuxp_1.5.0a-1_all.deb > python-tmuxp_1.5.0a-1_all.deb')
info('Install downloaded debian package by deb_install method')
j.sal.ubuntu.deb_install(path='python-tmuxp_1.5.0a-1_all.deb')
info('Get the installed package status by dpkg command')
(rc, out, err) = j.sal.process.execute('dpkg -s python-tmuxp | grep Status', die=False)
info('Installed package python-tmuxp should be install ok')
assert ('install ok' in out) | TC402
Install a debian package.
**Test Scenario**
#. Download python-tmuxp debian package
#. Install downloaded debian package by deb_install method
#. Get the installed package status by dpkg command
#. Installed package python-tmuxp should be install ok | JumpscaleLibs/sal/ubuntu/tests/test_ubuntu.py | test006_deb_install | grimpy/jumpscaleX_libs | 0 | python | def test006_deb_install():
'TC402\n Install a debian package.\n\n **Test Scenario**\n #. Download python-tmuxp debian package\n #. Install downloaded debian package by deb_install method\n #. Get the installed package status by dpkg command\n #. Installed package python-tmuxp should be install ok\n '
info('Downloading python-tmuxp debian package')
j.sal.process.execute('curl http://security.ubuntu.com/ubuntu/pool/universe/t/tmuxp/python-tmuxp_1.5.0a-1_all.deb > python-tmuxp_1.5.0a-1_all.deb')
info('Install downloaded debian package by deb_install method')
j.sal.ubuntu.deb_install(path='python-tmuxp_1.5.0a-1_all.deb')
info('Get the installed package status by dpkg command')
(rc, out, err) = j.sal.process.execute('dpkg -s python-tmuxp | grep Status', die=False)
info('Installed package python-tmuxp should be install ok')
assert ('install ok' in out) | def test006_deb_install():
'TC402\n Install a debian package.\n\n **Test Scenario**\n #. Download python-tmuxp debian package\n #. Install downloaded debian package by deb_install method\n #. Get the installed package status by dpkg command\n #. Installed package python-tmuxp should be install ok\n '
info('Downloading python-tmuxp debian package')
j.sal.process.execute('curl http://security.ubuntu.com/ubuntu/pool/universe/t/tmuxp/python-tmuxp_1.5.0a-1_all.deb > python-tmuxp_1.5.0a-1_all.deb')
info('Install downloaded debian package by deb_install method')
j.sal.ubuntu.deb_install(path='python-tmuxp_1.5.0a-1_all.deb')
info('Get the installed package status by dpkg command')
(rc, out, err) = j.sal.process.execute('dpkg -s python-tmuxp | grep Status', die=False)
info('Installed package python-tmuxp should be install ok')
assert ('install ok' in out)<|docstring|>TC402
Install a debian package.
**Test Scenario**
#. Download python-tmuxp debian package
#. Install downloaded debian package by deb_install method
#. Get the installed package status by dpkg command
#. Installed package python-tmuxp should be install ok<|endoftext|> |
d8950d16a3154a11102e4c529bf8e1013830691b736806302671a6e102cbeebb | def test007_pkg_list():
'TC403\n list files of dpkg.\n\n **Test Scenario**\n # . no package called ping so output len should equal zero the correct package name is iputils-ping\n '
info('verifying that pkg_list equal zero as no dpkg called ping, it should be iputils-ping')
assert (len(j.sal.ubuntu.pkg_list('ping')) == 0)
assert (len(j.sal.ubuntu.pkg_list('iputils-ping')) >= 1) | TC403
list files of dpkg.
**Test Scenario**
# . no package called ping so output len should equal zero the correct package name is iputils-ping | JumpscaleLibs/sal/ubuntu/tests/test_ubuntu.py | test007_pkg_list | grimpy/jumpscaleX_libs | 0 | python | def test007_pkg_list():
'TC403\n list files of dpkg.\n\n **Test Scenario**\n # . no package called ping so output len should equal zero the correct package name is iputils-ping\n '
info('verifying that pkg_list equal zero as no dpkg called ping, it should be iputils-ping')
assert (len(j.sal.ubuntu.pkg_list('ping')) == 0)
assert (len(j.sal.ubuntu.pkg_list('iputils-ping')) >= 1) | def test007_pkg_list():
'TC403\n list files of dpkg.\n\n **Test Scenario**\n # . no package called ping so output len should equal zero the correct package name is iputils-ping\n '
info('verifying that pkg_list equal zero as no dpkg called ping, it should be iputils-ping')
assert (len(j.sal.ubuntu.pkg_list('ping')) == 0)
assert (len(j.sal.ubuntu.pkg_list('iputils-ping')) >= 1)<|docstring|>TC403
list files of dpkg.
**Test Scenario**
# . no package called ping so output len should equal zero the correct package name is iputils-ping<|endoftext|> |
8e323c568f1cf10e65e62d7f7c3e2a0c65f7357b38d46f6a04eba49e966cd14d | def test008_service_start():
'TC404\n start an ubuntu service.\n\n **Test Scenario**\n #. Check cron status before testing service_start method\n #. If status of cron is running then stop cron service so we can test service_start method\n #. Start cron service using start_service method\n #. Check the corn status by service_status method\n #. As it was running before test,starting cron service after finishing testing by service_start method\n '
cront_status = False
info('check cron status before testing service_start method ')
cront_status = j.sal.ubuntu.service_status('cron')
if (cront_status is True):
info('stopping cron service so we can test service_start method')
j.sal.ubuntu.service_stop('cron')
info('Start cron service using start_service method ')
j.sal.ubuntu.service_start('cron')
info('check the corn status by service_status method')
info('status of service is {} '.format(j.sal.ubuntu.service_status('cron')))
assert j.sal.ubuntu.service_status('cron') | TC404
start an ubuntu service.
**Test Scenario**
#. Check cron status before testing service_start method
#. If status of cron is running then stop cron service so we can test service_start method
#. Start cron service using start_service method
#. Check the corn status by service_status method
#. As it was running before test,starting cron service after finishing testing by service_start method | JumpscaleLibs/sal/ubuntu/tests/test_ubuntu.py | test008_service_start | grimpy/jumpscaleX_libs | 0 | python | def test008_service_start():
'TC404\n start an ubuntu service.\n\n **Test Scenario**\n #. Check cron status before testing service_start method\n #. If status of cron is running then stop cron service so we can test service_start method\n #. Start cron service using start_service method\n #. Check the corn status by service_status method\n #. As it was running before test,starting cron service after finishing testing by service_start method\n '
cront_status = False
info('check cron status before testing service_start method ')
cront_status = j.sal.ubuntu.service_status('cron')
if (cront_status is True):
info('stopping cron service so we can test service_start method')
j.sal.ubuntu.service_stop('cron')
info('Start cron service using start_service method ')
j.sal.ubuntu.service_start('cron')
info('check the corn status by service_status method')
info('status of service is {} '.format(j.sal.ubuntu.service_status('cron')))
assert j.sal.ubuntu.service_status('cron') | def test008_service_start():
'TC404\n start an ubuntu service.\n\n **Test Scenario**\n #. Check cron status before testing service_start method\n #. If status of cron is running then stop cron service so we can test service_start method\n #. Start cron service using start_service method\n #. Check the corn status by service_status method\n #. As it was running before test,starting cron service after finishing testing by service_start method\n '
cront_status = False
info('check cron status before testing service_start method ')
cront_status = j.sal.ubuntu.service_status('cron')
if (cront_status is True):
info('stopping cron service so we can test service_start method')
j.sal.ubuntu.service_stop('cron')
info('Start cron service using start_service method ')
j.sal.ubuntu.service_start('cron')
info('check the corn status by service_status method')
info('status of service is {} '.format(j.sal.ubuntu.service_status('cron')))
assert j.sal.ubuntu.service_status('cron')<|docstring|>TC404
start an ubuntu service.
**Test Scenario**
#. Check cron status before testing service_start method
#. If status of cron is running then stop cron service so we can test service_start method
#. Start cron service using start_service method
#. Check the corn status by service_status method
#. As it was running before test,starting cron service after finishing testing by service_start method<|endoftext|> |
7f6d20bcf293d0be09040981af3f9382d048d79f3777748bf2c44f3c76b50ee4 | def test009_service_stop():
'TC405\n stop an ubuntu service.\n\n **Test Scenario**\n #. Check cron status before testing service_stop method\n #. If status of cron is not running then start before test service_stop method\n #. Service should be running, stopping cron service using tested method service_stop\n #. Get the service status by service_status method should be False\n #. Retrun cron service status as origin state to be running\n #. Stop cron service to be as origin state\n '
cront_status = False
info('check cron status before testing service_stop method ')
cront_status = j.sal.ubuntu.service_status('cron')
if (cront_status is False):
info('status was stopped before test method we need to start it now and stop it after finish test')
j.sal.ubuntu.service_start('cron')
info('service should be running, stopping cron service using tested method service_stop')
j.sal.ubuntu.service_stop('cron')
info('Get the service status by service_status method should be False ')
assert (j.sal.ubuntu.service_status('cron') is False)
info('Retrun cron service status as origin state to be running ')
j.sal.ubuntu.service_start('cron')
if (cront_status is False):
info('stop cron service to be as origin state')
j.sal.ubuntu.service_stop('cron') | TC405
stop an ubuntu service.
**Test Scenario**
#. Check cron status before testing service_stop method
#. If status of cron is not running then start before test service_stop method
#. Service should be running, stopping cron service using tested method service_stop
#. Get the service status by service_status method should be False
#. Retrun cron service status as origin state to be running
#. Stop cron service to be as origin state | JumpscaleLibs/sal/ubuntu/tests/test_ubuntu.py | test009_service_stop | grimpy/jumpscaleX_libs | 0 | python | def test009_service_stop():
'TC405\n stop an ubuntu service.\n\n **Test Scenario**\n #. Check cron status before testing service_stop method\n #. If status of cron is not running then start before test service_stop method\n #. Service should be running, stopping cron service using tested method service_stop\n #. Get the service status by service_status method should be False\n #. Retrun cron service status as origin state to be running\n #. Stop cron service to be as origin state\n '
cront_status = False
info('check cron status before testing service_stop method ')
cront_status = j.sal.ubuntu.service_status('cron')
if (cront_status is False):
info('status was stopped before test method we need to start it now and stop it after finish test')
j.sal.ubuntu.service_start('cron')
info('service should be running, stopping cron service using tested method service_stop')
j.sal.ubuntu.service_stop('cron')
info('Get the service status by service_status method should be False ')
assert (j.sal.ubuntu.service_status('cron') is False)
info('Retrun cron service status as origin state to be running ')
j.sal.ubuntu.service_start('cron')
if (cront_status is False):
info('stop cron service to be as origin state')
j.sal.ubuntu.service_stop('cron') | def test009_service_stop():
'TC405\n stop an ubuntu service.\n\n **Test Scenario**\n #. Check cron status before testing service_stop method\n #. If status of cron is not running then start before test service_stop method\n #. Service should be running, stopping cron service using tested method service_stop\n #. Get the service status by service_status method should be False\n #. Retrun cron service status as origin state to be running\n #. Stop cron service to be as origin state\n '
cront_status = False
info('check cron status before testing service_stop method ')
cront_status = j.sal.ubuntu.service_status('cron')
if (cront_status is False):
info('status was stopped before test method we need to start it now and stop it after finish test')
j.sal.ubuntu.service_start('cron')
info('service should be running, stopping cron service using tested method service_stop')
j.sal.ubuntu.service_stop('cron')
info('Get the service status by service_status method should be False ')
assert (j.sal.ubuntu.service_status('cron') is False)
info('Retrun cron service status as origin state to be running ')
j.sal.ubuntu.service_start('cron')
if (cront_status is False):
info('stop cron service to be as origin state')
j.sal.ubuntu.service_stop('cron')<|docstring|>TC405
stop an ubuntu service.
**Test Scenario**
#. Check cron status before testing service_stop method
#. If status of cron is not running then start before test service_stop method
#. Service should be running, stopping cron service using tested method service_stop
#. Get the service status by service_status method should be False
#. Retrun cron service status as origin state to be running
#. Stop cron service to be as origin state<|endoftext|> |
63f296a5bb9ba05197e98cb9c58402476bc925dcbf80f177bac95588bc6b4200 | def test010_service_restart():
'TC406\n restart an ubuntu service.\n\n **Test Scenario**\n #. Check cron status before testing service_start method\n #. If status of cron is running then stop cron service so we can test service_start method\n #. Restart cron service using start_service method\n #. Check the corn status by service_status method\n #. As it was running before test,starting cron service after finishing testing by service_start method\n '
cront_status = False
info('check cron status before testing service_start method ')
cront_status = j.sal.ubuntu.service_status('cron')
if (cront_status is True):
info('stopping cron service so we can test service_start method')
j.sal.ubuntu.service_stop('cron')
info('restart cron service using start_service method ')
j.sal.ubuntu.service_restart('cron')
info('check the corn status by service command')
assert j.sal.ubuntu.service_status('cron') | TC406
restart an ubuntu service.
**Test Scenario**
#. Check cron status before testing service_start method
#. If status of cron is running then stop cron service so we can test service_start method
#. Restart cron service using start_service method
#. Check the corn status by service_status method
#. As it was running before test,starting cron service after finishing testing by service_start method | JumpscaleLibs/sal/ubuntu/tests/test_ubuntu.py | test010_service_restart | grimpy/jumpscaleX_libs | 0 | python | def test010_service_restart():
'TC406\n restart an ubuntu service.\n\n **Test Scenario**\n #. Check cron status before testing service_start method\n #. If status of cron is running then stop cron service so we can test service_start method\n #. Restart cron service using start_service method\n #. Check the corn status by service_status method\n #. As it was running before test,starting cron service after finishing testing by service_start method\n '
cront_status = False
info('check cron status before testing service_start method ')
cront_status = j.sal.ubuntu.service_status('cron')
if (cront_status is True):
info('stopping cron service so we can test service_start method')
j.sal.ubuntu.service_stop('cron')
info('restart cron service using start_service method ')
j.sal.ubuntu.service_restart('cron')
info('check the corn status by service command')
assert j.sal.ubuntu.service_status('cron') | def test010_service_restart():
'TC406\n restart an ubuntu service.\n\n **Test Scenario**\n #. Check cron status before testing service_start method\n #. If status of cron is running then stop cron service so we can test service_start method\n #. Restart cron service using start_service method\n #. Check the corn status by service_status method\n #. As it was running before test,starting cron service after finishing testing by service_start method\n '
cront_status = False
info('check cron status before testing service_start method ')
cront_status = j.sal.ubuntu.service_status('cron')
if (cront_status is True):
info('stopping cron service so we can test service_start method')
j.sal.ubuntu.service_stop('cron')
info('restart cron service using start_service method ')
j.sal.ubuntu.service_restart('cron')
info('check the corn status by service command')
assert j.sal.ubuntu.service_status('cron')<|docstring|>TC406
restart an ubuntu service.
**Test Scenario**
#. Check cron status before testing service_start method
#. If status of cron is running then stop cron service so we can test service_start method
#. Restart cron service using start_service method
#. Check the corn status by service_status method
#. As it was running before test,starting cron service after finishing testing by service_start method<|endoftext|> |
ef60f6d1a86bbe9a01313b64fd086a1ebb7a081b7bc5bfa1fcaf59b7046d72e4 | def test011_service_status():
'TC407\n check service status\n\n **Test Scenario**\n #. Get service status\n #. if service is not running, verifying tested method return False\n #. else service is running, should return True\n '
info('Get service status')
state = j.sal.ubuntu.service_status('cron')
if (state is False):
info('service is not running, verifying tested method return False')
assert (j.sal.ubuntu.service_status('cron') is False)
else:
info('service is running, verifying tested method should return True')
assert j.sal.ubuntu.service_status('cron') | TC407
check service status
**Test Scenario**
#. Get service status
#. if service is not running, verifying tested method return False
#. else service is running, should return True | JumpscaleLibs/sal/ubuntu/tests/test_ubuntu.py | test011_service_status | grimpy/jumpscaleX_libs | 0 | python | def test011_service_status():
'TC407\n check service status\n\n **Test Scenario**\n #. Get service status\n #. if service is not running, verifying tested method return False\n #. else service is running, should return True\n '
info('Get service status')
state = j.sal.ubuntu.service_status('cron')
if (state is False):
info('service is not running, verifying tested method return False')
assert (j.sal.ubuntu.service_status('cron') is False)
else:
info('service is running, verifying tested method should return True')
assert j.sal.ubuntu.service_status('cron') | def test011_service_status():
'TC407\n check service status\n\n **Test Scenario**\n #. Get service status\n #. if service is not running, verifying tested method return False\n #. else service is running, should return True\n '
info('Get service status')
state = j.sal.ubuntu.service_status('cron')
if (state is False):
info('service is not running, verifying tested method return False')
assert (j.sal.ubuntu.service_status('cron') is False)
else:
info('service is running, verifying tested method should return True')
assert j.sal.ubuntu.service_status('cron')<|docstring|>TC407
check service status
**Test Scenario**
#. Get service status
#. if service is not running, verifying tested method return False
#. else service is running, should return True<|endoftext|> |
b64f39cd32dbfe0109c0a6e364fba0d4e54c367e5622210675e16273da30c3aa | def test012_apt_find_all():
"TC408\n find all packages match with the package_name, this mean must not be installed\n\n **Test Scenario**\n #. alot if packages are containing wget like 'python3-wget', 'wget'\n "
info('verifying all available packages have a keyword wget')
assert ('wget' in j.sal.ubuntu.apt_find_all('wget')) | TC408
find all packages match with the package_name, this mean must not be installed
**Test Scenario**
#. alot if packages are containing wget like 'python3-wget', 'wget' | JumpscaleLibs/sal/ubuntu/tests/test_ubuntu.py | test012_apt_find_all | grimpy/jumpscaleX_libs | 0 | python | def test012_apt_find_all():
"TC408\n find all packages match with the package_name, this mean must not be installed\n\n **Test Scenario**\n #. alot if packages are containing wget like 'python3-wget', 'wget'\n "
info('verifying all available packages have a keyword wget')
assert ('wget' in j.sal.ubuntu.apt_find_all('wget')) | def test012_apt_find_all():
"TC408\n find all packages match with the package_name, this mean must not be installed\n\n **Test Scenario**\n #. alot if packages are containing wget like 'python3-wget', 'wget'\n "
info('verifying all available packages have a keyword wget')
assert ('wget' in j.sal.ubuntu.apt_find_all('wget'))<|docstring|>TC408
find all packages match with the package_name, this mean must not be installed
**Test Scenario**
#. alot if packages are containing wget like 'python3-wget', 'wget'<|endoftext|> |
ff94aee5b4687118fc5f5c8dc26820dde4ce9440b7d5ea107b2373f030727c61 | def test013_is_pkg_installed():
'TC409\n check if the package is installed or not\n\n **Test Scenario**\n #. make sure wget installed successfully\n #. Install it if does not installed\n #. Verifying tested pkg_installed should return True as wget is installed\n #. Remove it to return to origin state\n '
wget_is_installed = False
info('make sure wget installed')
(rc1, out, err) = j.sal.process.execute('dpkg -s wget|grep Status')
if ('deinstall ok' in out):
info('install wget as it does not installed')
j.sal.process.execute('apt install -y wget')
info('verifying tested pkg_installed should return True as wget is installed')
wget_is_installed = j.sal.ubuntu.is_pkg_installed('wget')
info(' wget_is_installed is {} '.format(wget_is_installed))
assert wget_is_installed
if ('install ok' not in out):
info('Remove it to return to origin state')
j.sal.process.execute('apt remove -y wget') | TC409
check if the package is installed or not
**Test Scenario**
#. make sure wget installed successfully
#. Install it if does not installed
#. Verifying tested pkg_installed should return True as wget is installed
#. Remove it to return to origin state | JumpscaleLibs/sal/ubuntu/tests/test_ubuntu.py | test013_is_pkg_installed | grimpy/jumpscaleX_libs | 0 | python | def test013_is_pkg_installed():
'TC409\n check if the package is installed or not\n\n **Test Scenario**\n #. make sure wget installed successfully\n #. Install it if does not installed\n #. Verifying tested pkg_installed should return True as wget is installed\n #. Remove it to return to origin state\n '
wget_is_installed = False
info('make sure wget installed')
(rc1, out, err) = j.sal.process.execute('dpkg -s wget|grep Status')
if ('deinstall ok' in out):
info('install wget as it does not installed')
j.sal.process.execute('apt install -y wget')
info('verifying tested pkg_installed should return True as wget is installed')
wget_is_installed = j.sal.ubuntu.is_pkg_installed('wget')
info(' wget_is_installed is {} '.format(wget_is_installed))
assert wget_is_installed
if ('install ok' not in out):
info('Remove it to return to origin state')
j.sal.process.execute('apt remove -y wget') | def test013_is_pkg_installed():
'TC409\n check if the package is installed or not\n\n **Test Scenario**\n #. make sure wget installed successfully\n #. Install it if does not installed\n #. Verifying tested pkg_installed should return True as wget is installed\n #. Remove it to return to origin state\n '
wget_is_installed = False
info('make sure wget installed')
(rc1, out, err) = j.sal.process.execute('dpkg -s wget|grep Status')
if ('deinstall ok' in out):
info('install wget as it does not installed')
j.sal.process.execute('apt install -y wget')
info('verifying tested pkg_installed should return True as wget is installed')
wget_is_installed = j.sal.ubuntu.is_pkg_installed('wget')
info(' wget_is_installed is {} '.format(wget_is_installed))
assert wget_is_installed
if ('install ok' not in out):
info('Remove it to return to origin state')
j.sal.process.execute('apt remove -y wget')<|docstring|>TC409
check if the package is installed or not
**Test Scenario**
#. make sure wget installed successfully
#. Install it if does not installed
#. Verifying tested pkg_installed should return True as wget is installed
#. Remove it to return to origin state<|endoftext|> |
7eb00b79ed9cf9f845163f73abd4be8028dc788774333795f00777cb685552d2 | def test014_sshkey_generate():
'TC410\n generate a new ssh key\n\n **Test Scenario**\n #. Generate sshkey in path /tmp/id_rsa\n #. verify that there is a files, their names contain id_rsa\n '
info('Generate sshkey in path /tmp/id_rsa')
j.sal.ubuntu.sshkey_generate(path='/tmp/id_rsa')
info('verify that there is a files, their names contain id_rsa')
(rc, out, err) = j.sal.process.execute('ls /tmp | grep id_rsa')
assert ('id_rsa' in out) | TC410
generate a new ssh key
**Test Scenario**
#. Generate sshkey in path /tmp/id_rsa
#. verify that there is a files, their names contain id_rsa | JumpscaleLibs/sal/ubuntu/tests/test_ubuntu.py | test014_sshkey_generate | grimpy/jumpscaleX_libs | 0 | python | def test014_sshkey_generate():
'TC410\n generate a new ssh key\n\n **Test Scenario**\n #. Generate sshkey in path /tmp/id_rsa\n #. verify that there is a files, their names contain id_rsa\n '
info('Generate sshkey in path /tmp/id_rsa')
j.sal.ubuntu.sshkey_generate(path='/tmp/id_rsa')
info('verify that there is a files, their names contain id_rsa')
(rc, out, err) = j.sal.process.execute('ls /tmp | grep id_rsa')
assert ('id_rsa' in out) | def test014_sshkey_generate():
'TC410\n generate a new ssh key\n\n **Test Scenario**\n #. Generate sshkey in path /tmp/id_rsa\n #. verify that there is a files, their names contain id_rsa\n '
info('Generate sshkey in path /tmp/id_rsa')
j.sal.ubuntu.sshkey_generate(path='/tmp/id_rsa')
info('verify that there is a files, their names contain id_rsa')
(rc, out, err) = j.sal.process.execute('ls /tmp | grep id_rsa')
assert ('id_rsa' in out)<|docstring|>TC410
generate a new ssh key
**Test Scenario**
#. Generate sshkey in path /tmp/id_rsa
#. verify that there is a files, their names contain id_rsa<|endoftext|> |
1fef9c83a97b626e640f746f0678b35c64a9b405de856c3bf35e74475bd4708f | def test015_apt_get_cache_keys():
'TC411\n get all cached packages of ubuntu\n\n **Test Scenario**\n #. Get all cached keys by our tested method apt_get_cache_keys\n #. Get a one package from cached packages by apt-cache command\n #. Compare the package name of step2 should be included in keys from step 1\n '
info('Get all cached keys by our tested method apt_get_cache_keys')
cache_list = j.sal.ubuntu.apt_get_cache_keys()
info(' Get a one package from cached packages by apt-cache command')
(rc1, pkg_name, err1) = j.sal.process.execute("apt-cache search 'Network' | head -1| awk '{print $1}'")
name = pkg_name.strip()
info('verify one package if cached packages forn apt-cache command should exist in tested method output')
assert (name in cache_list) | TC411
get all cached packages of ubuntu
**Test Scenario**
#. Get all cached keys by our tested method apt_get_cache_keys
#. Get a one package from cached packages by apt-cache command
#. Compare the package name of step2 should be included in keys from step 1 | JumpscaleLibs/sal/ubuntu/tests/test_ubuntu.py | test015_apt_get_cache_keys | grimpy/jumpscaleX_libs | 0 | python | def test015_apt_get_cache_keys():
'TC411\n get all cached packages of ubuntu\n\n **Test Scenario**\n #. Get all cached keys by our tested method apt_get_cache_keys\n #. Get a one package from cached packages by apt-cache command\n #. Compare the package name of step2 should be included in keys from step 1\n '
info('Get all cached keys by our tested method apt_get_cache_keys')
cache_list = j.sal.ubuntu.apt_get_cache_keys()
info(' Get a one package from cached packages by apt-cache command')
(rc1, pkg_name, err1) = j.sal.process.execute("apt-cache search 'Network' | head -1| awk '{print $1}'")
name = pkg_name.strip()
info('verify one package if cached packages forn apt-cache command should exist in tested method output')
assert (name in cache_list) | def test015_apt_get_cache_keys():
'TC411\n get all cached packages of ubuntu\n\n **Test Scenario**\n #. Get all cached keys by our tested method apt_get_cache_keys\n #. Get a one package from cached packages by apt-cache command\n #. Compare the package name of step2 should be included in keys from step 1\n '
info('Get all cached keys by our tested method apt_get_cache_keys')
cache_list = j.sal.ubuntu.apt_get_cache_keys()
info(' Get a one package from cached packages by apt-cache command')
(rc1, pkg_name, err1) = j.sal.process.execute("apt-cache search 'Network' | head -1| awk '{print $1}'")
name = pkg_name.strip()
info('verify one package if cached packages forn apt-cache command should exist in tested method output')
assert (name in cache_list)<|docstring|>TC411
get all cached packages of ubuntu
**Test Scenario**
#. Get all cached keys by our tested method apt_get_cache_keys
#. Get a one package from cached packages by apt-cache command
#. Compare the package name of step2 should be included in keys from step 1<|endoftext|> |
690de65d1d2a7f94286e6c18850698918d55bbf5457cf2ba9d33b8d65f1c5915 | def test016_apt_get_installed():
'TC412\n Get all the installed packages.\n\n **Test Scenario**\n #. Get length of installed packages from apt list command\n #. Get length of installed packages from tested method\n #. Compare step 1 and 2 should be equal installed packages by tested method and apt list command should be the same\n '
sal_count = 0
info('Get length of installed packages from apt list command ')
(rc1, os_count, err1) = j.sal.process.execute("apt list --installed |grep -v 'Listing...'| wc -l")
os_int_count = int(os_count.strip())
info('Get length of installed packages from tested method')
sal_count = len(j.sal.ubuntu.apt_get_installed())
info('Verifying installed packages by tested method and apt list command should be the same')
assert (sal_count == os_int_count) | TC412
Get all the installed packages.
**Test Scenario**
#. Get length of installed packages from apt list command
#. Get length of installed packages from tested method
#. Compare step 1 and 2 should be equal installed packages by tested method and apt list command should be the same | JumpscaleLibs/sal/ubuntu/tests/test_ubuntu.py | test016_apt_get_installed | grimpy/jumpscaleX_libs | 0 | python | def test016_apt_get_installed():
'TC412\n Get all the installed packages.\n\n **Test Scenario**\n #. Get length of installed packages from apt list command\n #. Get length of installed packages from tested method\n #. Compare step 1 and 2 should be equal installed packages by tested method and apt list command should be the same\n '
sal_count = 0
info('Get length of installed packages from apt list command ')
(rc1, os_count, err1) = j.sal.process.execute("apt list --installed |grep -v 'Listing...'| wc -l")
os_int_count = int(os_count.strip())
info('Get length of installed packages from tested method')
sal_count = len(j.sal.ubuntu.apt_get_installed())
info('Verifying installed packages by tested method and apt list command should be the same')
assert (sal_count == os_int_count) | def test016_apt_get_installed():
'TC412\n Get all the installed packages.\n\n **Test Scenario**\n #. Get length of installed packages from apt list command\n #. Get length of installed packages from tested method\n #. Compare step 1 and 2 should be equal installed packages by tested method and apt list command should be the same\n '
sal_count = 0
info('Get length of installed packages from apt list command ')
(rc1, os_count, err1) = j.sal.process.execute("apt list --installed |grep -v 'Listing...'| wc -l")
os_int_count = int(os_count.strip())
info('Get length of installed packages from tested method')
sal_count = len(j.sal.ubuntu.apt_get_installed())
info('Verifying installed packages by tested method and apt list command should be the same')
assert (sal_count == os_int_count)<|docstring|>TC412
Get all the installed packages.
**Test Scenario**
#. Get length of installed packages from apt list command
#. Get length of installed packages from tested method
#. Compare step 1 and 2 should be equal installed packages by tested method and apt list command should be the same<|endoftext|> |
439be9569754aaddb941b6cdd698154c4f341d8cff1029588faa4856fc4cac8a | def test017_apt_install():
'TC413\n install a specific ubuntu package.\n\n **Test Scenario**\n #. Check if speedtest-cli is installed or not\n #. if installed, remove it and use tested method to install it and verify that is installed\n #. else we install speedtest-cli by tested method\n #. verify that is installed successfully\n #. remove it to be as origin status\n '
info('Check if speedtest-cli is installed or not')
speedtest_installed = j.sal.ubuntu.is_pkg_installed('speedtest-cli')
if speedtest_installed:
info('remove speedtest-cli package')
j.sal.process.execute('apt remove -y speedtest-cli')
info('install speedtest-cli package')
j.sal.ubuntu.apt_install('speedtest-cli')
info('verify that speedtest-cli is installed')
(rc1, out1, err1) = j.sal.process.execute('dpkg -s speedtest-cli|grep Status')
assert ('install ok' in out1)
if (not speedtest_installed):
info('remove it speedtest-cli to be as origin status')
j.sal.process.execute('apt remove -y speedtest-cli') | TC413
install a specific ubuntu package.
**Test Scenario**
#. Check if speedtest-cli is installed or not
#. if installed, remove it and use tested method to install it and verify that is installed
#. else we install speedtest-cli by tested method
#. verify that is installed successfully
#. remove it to be as origin status | JumpscaleLibs/sal/ubuntu/tests/test_ubuntu.py | test017_apt_install | grimpy/jumpscaleX_libs | 0 | python | def test017_apt_install():
'TC413\n install a specific ubuntu package.\n\n **Test Scenario**\n #. Check if speedtest-cli is installed or not\n #. if installed, remove it and use tested method to install it and verify that is installed\n #. else we install speedtest-cli by tested method\n #. verify that is installed successfully\n #. remove it to be as origin status\n '
info('Check if speedtest-cli is installed or not')
speedtest_installed = j.sal.ubuntu.is_pkg_installed('speedtest-cli')
if speedtest_installed:
info('remove speedtest-cli package')
j.sal.process.execute('apt remove -y speedtest-cli')
info('install speedtest-cli package')
j.sal.ubuntu.apt_install('speedtest-cli')
info('verify that speedtest-cli is installed')
(rc1, out1, err1) = j.sal.process.execute('dpkg -s speedtest-cli|grep Status')
assert ('install ok' in out1)
if (not speedtest_installed):
info('remove it speedtest-cli to be as origin status')
j.sal.process.execute('apt remove -y speedtest-cli') | def test017_apt_install():
'TC413\n install a specific ubuntu package.\n\n **Test Scenario**\n #. Check if speedtest-cli is installed or not\n #. if installed, remove it and use tested method to install it and verify that is installed\n #. else we install speedtest-cli by tested method\n #. verify that is installed successfully\n #. remove it to be as origin status\n '
info('Check if speedtest-cli is installed or not')
speedtest_installed = j.sal.ubuntu.is_pkg_installed('speedtest-cli')
if speedtest_installed:
info('remove speedtest-cli package')
j.sal.process.execute('apt remove -y speedtest-cli')
info('install speedtest-cli package')
j.sal.ubuntu.apt_install('speedtest-cli')
info('verify that speedtest-cli is installed')
(rc1, out1, err1) = j.sal.process.execute('dpkg -s speedtest-cli|grep Status')
assert ('install ok' in out1)
if (not speedtest_installed):
info('remove it speedtest-cli to be as origin status')
j.sal.process.execute('apt remove -y speedtest-cli')<|docstring|>TC413
install a specific ubuntu package.
**Test Scenario**
#. Check if speedtest-cli is installed or not
#. if installed, remove it and use tested method to install it and verify that is installed
#. else we install speedtest-cli by tested method
#. verify that is installed successfully
#. remove it to be as origin status<|endoftext|> |
96dbbc823ceebd7d978d056ab8cf0873381279651e605553a9162931cf0a7238 | def test018_apt_sources_list():
'TC414\n represents the full sources.list + sources.list.d file\n\n **Test Scenario**\n #. Get all listed apt sources by tested method apt_sources_list\n #. Get the first line in apt sources list\n #. Verify first item should contains a keyword deb\n '
info('Get all listed apt sources by tested method apt_sources_list')
apt_src_list = j.sal.ubuntu.apt_sources_list()
info('Get the first line in apt sources list')
first_src = apt_src_list[0]
info('Verify first item should contains a keyword deb')
assert ('deb' in first_src) | TC414
represents the full sources.list + sources.list.d file
**Test Scenario**
#. Get all listed apt sources by tested method apt_sources_list
#. Get the first line in apt sources list
#. Verify first item should contains a keyword deb | JumpscaleLibs/sal/ubuntu/tests/test_ubuntu.py | test018_apt_sources_list | grimpy/jumpscaleX_libs | 0 | python | def test018_apt_sources_list():
'TC414\n represents the full sources.list + sources.list.d file\n\n **Test Scenario**\n #. Get all listed apt sources by tested method apt_sources_list\n #. Get the first line in apt sources list\n #. Verify first item should contains a keyword deb\n '
info('Get all listed apt sources by tested method apt_sources_list')
apt_src_list = j.sal.ubuntu.apt_sources_list()
info('Get the first line in apt sources list')
first_src = apt_src_list[0]
info('Verify first item should contains a keyword deb')
assert ('deb' in first_src) | def test018_apt_sources_list():
'TC414\n represents the full sources.list + sources.list.d file\n\n **Test Scenario**\n #. Get all listed apt sources by tested method apt_sources_list\n #. Get the first line in apt sources list\n #. Verify first item should contains a keyword deb\n '
info('Get all listed apt sources by tested method apt_sources_list')
apt_src_list = j.sal.ubuntu.apt_sources_list()
info('Get the first line in apt sources list')
first_src = apt_src_list[0]
info('Verify first item should contains a keyword deb')
assert ('deb' in first_src)<|docstring|>TC414
represents the full sources.list + sources.list.d file
**Test Scenario**
#. Get all listed apt sources by tested method apt_sources_list
#. Get the first line in apt sources list
#. Verify first item should contains a keyword deb<|endoftext|> |
96ba217018527068e136c645e23253ad46952e19ecd1584e026964d3583358cc | def test019_apt_sources_uri_add():
'TC415\n add a new apt source url.\n\n **Test Scenario**\n #. Check if the source link file that am gonna add it exist or not\n #. file exist move it a /tmp dir\n #. Adding new url to apt sources\n #. Check contents of added file under /etc/apt/sources.list.d\n #. Verify file contents are contains deb keyword\n #. Remove created file by tested method\n #. if file was exist in step 1 , move the backup file from /tmp to origin path\n '
info('check if the source link file that am gonna add it exist or not')
file_exist = os.path.exists('/etc/apt/sources.list.d/archive.getdeb.net.list')
if file_exist:
info('file exist move it a /tmp dir')
j.sal.process.execute('mv /etc/apt/sources.list.d/archive.getdeb.net.list /tmp')
info('adding new url to apt sources ')
j.sal.ubuntu.apt_sources_uri_add('http://archive.getdeb.net/ubuntu wily-getdeb games')
info('check contents of added file under /etc/apt/sources.list.d')
(rc1, os_apt_sources, err1) = j.sal.process.execute("grep 'ubuntu wily-getdeb games' /etc/apt/sources.list.d/archive.getdeb.net.list")
info('verify file contents are contains deb keyword')
assert ('deb' in os_apt_sources)
info('remove created file by tested method')
j.sal.process.execute('rm /etc/apt/sources.list.d/archive.getdeb.net.list')
if file_exist:
info('move the backuped file from /tmp to origin path')
j.sal.process.execute('mv /tmp/archive.getdeb.net.list /etc/apt/sources.list.d/') | TC415
add a new apt source url.
**Test Scenario**
#. Check if the source link file that am gonna add it exist or not
#. file exist move it a /tmp dir
#. Adding new url to apt sources
#. Check contents of added file under /etc/apt/sources.list.d
#. Verify file contents are contains deb keyword
#. Remove created file by tested method
#. if file was exist in step 1 , move the backup file from /tmp to origin path | JumpscaleLibs/sal/ubuntu/tests/test_ubuntu.py | test019_apt_sources_uri_add | grimpy/jumpscaleX_libs | 0 | python | def test019_apt_sources_uri_add():
'TC415\n add a new apt source url.\n\n **Test Scenario**\n #. Check if the source link file that am gonna add it exist or not\n #. file exist move it a /tmp dir\n #. Adding new url to apt sources\n #. Check contents of added file under /etc/apt/sources.list.d\n #. Verify file contents are contains deb keyword\n #. Remove created file by tested method\n #. if file was exist in step 1 , move the backup file from /tmp to origin path\n '
info('check if the source link file that am gonna add it exist or not')
file_exist = os.path.exists('/etc/apt/sources.list.d/archive.getdeb.net.list')
if file_exist:
info('file exist move it a /tmp dir')
j.sal.process.execute('mv /etc/apt/sources.list.d/archive.getdeb.net.list /tmp')
info('adding new url to apt sources ')
j.sal.ubuntu.apt_sources_uri_add('http://archive.getdeb.net/ubuntu wily-getdeb games')
info('check contents of added file under /etc/apt/sources.list.d')
(rc1, os_apt_sources, err1) = j.sal.process.execute("grep 'ubuntu wily-getdeb games' /etc/apt/sources.list.d/archive.getdeb.net.list")
info('verify file contents are contains deb keyword')
assert ('deb' in os_apt_sources)
info('remove created file by tested method')
j.sal.process.execute('rm /etc/apt/sources.list.d/archive.getdeb.net.list')
if file_exist:
info('move the backuped file from /tmp to origin path')
j.sal.process.execute('mv /tmp/archive.getdeb.net.list /etc/apt/sources.list.d/') | def test019_apt_sources_uri_add():
'TC415\n add a new apt source url.\n\n **Test Scenario**\n #. Check if the source link file that am gonna add it exist or not\n #. file exist move it a /tmp dir\n #. Adding new url to apt sources\n #. Check contents of added file under /etc/apt/sources.list.d\n #. Verify file contents are contains deb keyword\n #. Remove created file by tested method\n #. if file was exist in step 1 , move the backup file from /tmp to origin path\n '
info('check if the source link file that am gonna add it exist or not')
file_exist = os.path.exists('/etc/apt/sources.list.d/archive.getdeb.net.list')
if file_exist:
info('file exist move it a /tmp dir')
j.sal.process.execute('mv /etc/apt/sources.list.d/archive.getdeb.net.list /tmp')
info('adding new url to apt sources ')
j.sal.ubuntu.apt_sources_uri_add('http://archive.getdeb.net/ubuntu wily-getdeb games')
info('check contents of added file under /etc/apt/sources.list.d')
(rc1, os_apt_sources, err1) = j.sal.process.execute("grep 'ubuntu wily-getdeb games' /etc/apt/sources.list.d/archive.getdeb.net.list")
info('verify file contents are contains deb keyword')
assert ('deb' in os_apt_sources)
info('remove created file by tested method')
j.sal.process.execute('rm /etc/apt/sources.list.d/archive.getdeb.net.list')
if file_exist:
info('move the backuped file from /tmp to origin path')
j.sal.process.execute('mv /tmp/archive.getdeb.net.list /etc/apt/sources.list.d/')<|docstring|>TC415
add a new apt source url.
**Test Scenario**
#. Check if the source link file that am gonna add it exist or not
#. file exist move it a /tmp dir
#. Adding new url to apt sources
#. Check contents of added file under /etc/apt/sources.list.d
#. Verify file contents are contains deb keyword
#. Remove created file by tested method
#. if file was exist in step 1 , move the backup file from /tmp to origin path<|endoftext|> |
2dbf82480aa782e2129f50755fc8706149e6b3eaffd8a697ebb735b87c000a76 | def test020_apt_upgrade():
'TC416\n upgrade is used to install the newest versions of all packages currently installed on the system\n\n **Test Scenario**\n #. Get number of packages that need to be upgraded\n #. Run tested method to upgrade packages\n #. Get number of packages that need to be upgraded again after upgrade\n #. if upgrade runs successfully then number in step 1 should be greater than one in step3\n #. comparing the count of packages need to be upgraded before and after upgarde\n #. if all packages are already upgraded before run our tested method and no need to upgrade any packages they should be equal so i used GreaterEqual\n '
info('Get number of packages that need to be upgraded')
(rc1, upgradable_pack_before_upgrade, err1) = j.sal.process.execute("apt list --upgradable | grep -v 'Listing...'| wc -l")
upgradable_pack_count_before_upgrade = int(upgradable_pack_before_upgrade.strip())
info('Run tested method to upgrade packages')
j.sal.ubuntu.apt_upgrade()
info('Get number of packages that need to be upgraded again after upgrade')
(rc2, upgradable_pack_after_upgrade, err2) = j.sal.process.execute("apt list --upgradable | grep -v 'Listing...'| wc -l")
upgradable_pack_count_after_upgrade = int(upgradable_pack_after_upgrade.strip())
info('comparing the count of packages need to be upgraded before and after upgarde ')
assert (upgradable_pack_count_before_upgrade >= upgradable_pack_count_after_upgrade) | TC416
upgrade is used to install the newest versions of all packages currently installed on the system
**Test Scenario**
#. Get number of packages that need to be upgraded
#. Run tested method to upgrade packages
#. Get number of packages that need to be upgraded again after upgrade
#. if upgrade runs successfully then number in step 1 should be greater than one in step3
#. comparing the count of packages need to be upgraded before and after upgarde
#. if all packages are already upgraded before run our tested method and no need to upgrade any packages they should be equal so i used GreaterEqual | JumpscaleLibs/sal/ubuntu/tests/test_ubuntu.py | test020_apt_upgrade | grimpy/jumpscaleX_libs | 0 | python | def test020_apt_upgrade():
'TC416\n upgrade is used to install the newest versions of all packages currently installed on the system\n\n **Test Scenario**\n #. Get number of packages that need to be upgraded\n #. Run tested method to upgrade packages\n #. Get number of packages that need to be upgraded again after upgrade\n #. if upgrade runs successfully then number in step 1 should be greater than one in step3\n #. comparing the count of packages need to be upgraded before and after upgarde\n #. if all packages are already upgraded before run our tested method and no need to upgrade any packages they should be equal so i used GreaterEqual\n '
info('Get number of packages that need to be upgraded')
(rc1, upgradable_pack_before_upgrade, err1) = j.sal.process.execute("apt list --upgradable | grep -v 'Listing...'| wc -l")
upgradable_pack_count_before_upgrade = int(upgradable_pack_before_upgrade.strip())
info('Run tested method to upgrade packages')
j.sal.ubuntu.apt_upgrade()
info('Get number of packages that need to be upgraded again after upgrade')
(rc2, upgradable_pack_after_upgrade, err2) = j.sal.process.execute("apt list --upgradable | grep -v 'Listing...'| wc -l")
upgradable_pack_count_after_upgrade = int(upgradable_pack_after_upgrade.strip())
info('comparing the count of packages need to be upgraded before and after upgarde ')
assert (upgradable_pack_count_before_upgrade >= upgradable_pack_count_after_upgrade) | def test020_apt_upgrade():
'TC416\n upgrade is used to install the newest versions of all packages currently installed on the system\n\n **Test Scenario**\n #. Get number of packages that need to be upgraded\n #. Run tested method to upgrade packages\n #. Get number of packages that need to be upgraded again after upgrade\n #. if upgrade runs successfully then number in step 1 should be greater than one in step3\n #. comparing the count of packages need to be upgraded before and after upgarde\n #. if all packages are already upgraded before run our tested method and no need to upgrade any packages they should be equal so i used GreaterEqual\n '
info('Get number of packages that need to be upgraded')
(rc1, upgradable_pack_before_upgrade, err1) = j.sal.process.execute("apt list --upgradable | grep -v 'Listing...'| wc -l")
upgradable_pack_count_before_upgrade = int(upgradable_pack_before_upgrade.strip())
info('Run tested method to upgrade packages')
j.sal.ubuntu.apt_upgrade()
info('Get number of packages that need to be upgraded again after upgrade')
(rc2, upgradable_pack_after_upgrade, err2) = j.sal.process.execute("apt list --upgradable | grep -v 'Listing...'| wc -l")
upgradable_pack_count_after_upgrade = int(upgradable_pack_after_upgrade.strip())
info('comparing the count of packages need to be upgraded before and after upgarde ')
assert (upgradable_pack_count_before_upgrade >= upgradable_pack_count_after_upgrade)<|docstring|>TC416
upgrade is used to install the newest versions of all packages currently installed on the system
**Test Scenario**
#. Get number of packages that need to be upgraded
#. Run tested method to upgrade packages
#. Get number of packages that need to be upgraded again after upgrade
#. if upgrade runs successfully then number in step 1 should be greater than one in step3
#. comparing the count of packages need to be upgraded before and after upgarde
#. if all packages are already upgraded before run our tested method and no need to upgrade any packages they should be equal so i used GreaterEqual<|endoftext|> |
6578889ded0c1c12513eec6207bb391a982f52ea65b3e9cbb3862cbd5c549a5d | def test021_check_os():
'TC417\n check is True when the destribution is ubunut or linuxmint\n\n **Test Scenario**\n #. Get os name by lsb_release command\n #. Get release number (version) by lsb_release command\n #. Check OS name should be between "Ubuntu", "LinuxMint"\n #. if OS is Ubuntu or LinuxMint, checking version should be greater than 14\n #. if OS is not Ubuntu or LinuxMint, exceptions RuntimeError gonna appear as Only Ubuntu/Mint supported\n #. if OS version (number) is greater than 14, verifying tested method should return True\n #. if OS version (number) is less than 14, RuntimeError gonna appear as Only ubuntu version 14+ supported\n '
info('Get os name by lsb_release command')
(rc1, distro_name, err1) = j.sal.process.execute("lsb_release -i | awk '{print $3}'")
distro1 = distro_name.strip()
info('Get release number (version) by lsb_release command')
(rc2, out2, err2) = j.sal.process.execute("lsb_release -r|awk '{print $2}'")
distrbo_num = out2.strip()
release_num = float(distrbo_num)
info('Check OS name should be between Ubuntu or LinuxMint')
if (distro1 in ('Ubuntu', 'LinuxMint')):
info('OS is Ubuntu or LinuxMint, checking version should be greater than 14')
if (release_num > 14):
info('verifying tested method should return True')
assert j.sal.ubuntu.check()
else:
try:
j.sal.ubuntu.check()
info('There is exceptions RuntimeError as Only ubuntu version 14+ supported')
except j.exceptions.RuntimeError as myexcept:
assert ('Only ubuntu version 14+ supported' in myexcept.exception.args[0])
else:
try:
j.sal.ubuntu.check()
info('There is exceptions RuntimeError as the OS is not between Ubuntu or LinuxMint')
except j.exceptions.RuntimeError as e:
assert ('Only Ubuntu/Mint supported' in e.exception.args[0]) | TC417
check is True when the destribution is ubunut or linuxmint
**Test Scenario**
#. Get os name by lsb_release command
#. Get release number (version) by lsb_release command
#. Check OS name should be between "Ubuntu", "LinuxMint"
#. if OS is Ubuntu or LinuxMint, checking version should be greater than 14
#. if OS is not Ubuntu or LinuxMint, exceptions RuntimeError gonna appear as Only Ubuntu/Mint supported
#. if OS version (number) is greater than 14, verifying tested method should return True
#. if OS version (number) is less than 14, RuntimeError gonna appear as Only ubuntu version 14+ supported | JumpscaleLibs/sal/ubuntu/tests/test_ubuntu.py | test021_check_os | grimpy/jumpscaleX_libs | 0 | python | def test021_check_os():
'TC417\n check is True when the destribution is ubunut or linuxmint\n\n **Test Scenario**\n #. Get os name by lsb_release command\n #. Get release number (version) by lsb_release command\n #. Check OS name should be between "Ubuntu", "LinuxMint"\n #. if OS is Ubuntu or LinuxMint, checking version should be greater than 14\n #. if OS is not Ubuntu or LinuxMint, exceptions RuntimeError gonna appear as Only Ubuntu/Mint supported\n #. if OS version (number) is greater than 14, verifying tested method should return True\n #. if OS version (number) is less than 14, RuntimeError gonna appear as Only ubuntu version 14+ supported\n '
info('Get os name by lsb_release command')
(rc1, distro_name, err1) = j.sal.process.execute("lsb_release -i | awk '{print $3}'")
distro1 = distro_name.strip()
info('Get release number (version) by lsb_release command')
(rc2, out2, err2) = j.sal.process.execute("lsb_release -r|awk '{print $2}'")
distrbo_num = out2.strip()
release_num = float(distrbo_num)
info('Check OS name should be between Ubuntu or LinuxMint')
if (distro1 in ('Ubuntu', 'LinuxMint')):
info('OS is Ubuntu or LinuxMint, checking version should be greater than 14')
if (release_num > 14):
info('verifying tested method should return True')
assert j.sal.ubuntu.check()
else:
try:
j.sal.ubuntu.check()
info('There is exceptions RuntimeError as Only ubuntu version 14+ supported')
except j.exceptions.RuntimeError as myexcept:
assert ('Only ubuntu version 14+ supported' in myexcept.exception.args[0])
else:
try:
j.sal.ubuntu.check()
info('There is exceptions RuntimeError as the OS is not between Ubuntu or LinuxMint')
except j.exceptions.RuntimeError as e:
assert ('Only Ubuntu/Mint supported' in e.exception.args[0]) | def test021_check_os():
'TC417\n check is True when the destribution is ubunut or linuxmint\n\n **Test Scenario**\n #. Get os name by lsb_release command\n #. Get release number (version) by lsb_release command\n #. Check OS name should be between "Ubuntu", "LinuxMint"\n #. if OS is Ubuntu or LinuxMint, checking version should be greater than 14\n #. if OS is not Ubuntu or LinuxMint, exceptions RuntimeError gonna appear as Only Ubuntu/Mint supported\n #. if OS version (number) is greater than 14, verifying tested method should return True\n #. if OS version (number) is less than 14, RuntimeError gonna appear as Only ubuntu version 14+ supported\n '
info('Get os name by lsb_release command')
(rc1, distro_name, err1) = j.sal.process.execute("lsb_release -i | awk '{print $3}'")
distro1 = distro_name.strip()
info('Get release number (version) by lsb_release command')
(rc2, out2, err2) = j.sal.process.execute("lsb_release -r|awk '{print $2}'")
distrbo_num = out2.strip()
release_num = float(distrbo_num)
info('Check OS name should be between Ubuntu or LinuxMint')
if (distro1 in ('Ubuntu', 'LinuxMint')):
info('OS is Ubuntu or LinuxMint, checking version should be greater than 14')
if (release_num > 14):
info('verifying tested method should return True')
assert j.sal.ubuntu.check()
else:
try:
j.sal.ubuntu.check()
info('There is exceptions RuntimeError as Only ubuntu version 14+ supported')
except j.exceptions.RuntimeError as myexcept:
assert ('Only ubuntu version 14+ supported' in myexcept.exception.args[0])
else:
try:
j.sal.ubuntu.check()
info('There is exceptions RuntimeError as the OS is not between Ubuntu or LinuxMint')
except j.exceptions.RuntimeError as e:
assert ('Only Ubuntu/Mint supported' in e.exception.args[0])<|docstring|>TC417
check is True when the destribution is ubunut or linuxmint
**Test Scenario**
#. Get os name by lsb_release command
#. Get release number (version) by lsb_release command
#. Check OS name should be between "Ubuntu", "LinuxMint"
#. if OS is Ubuntu or LinuxMint, checking version should be greater than 14
#. if OS is not Ubuntu or LinuxMint, exceptions RuntimeError gonna appear as Only Ubuntu/Mint supported
#. if OS version (number) is greater than 14, verifying tested method should return True
#. if OS version (number) is less than 14, RuntimeError gonna appear as Only ubuntu version 14+ supported<|endoftext|> |
16c4b25696233ae21cad4ed049f83b76d8c5c459c8b3c9ca0951fd428e14fbbe | def test022_deb_download_install():
'TC418\n check download and install the package\n\n **Test Scenario**\n #. Check status of nano is installed or not\n #. If nano installed remove it by apt remove before install it\n #. Installed it again by tested method\n #. Get nano status should be installed successfully\n #. Verify that nano installed successfully\n #. Remove nano to return to origin state\n #. Install nano to return to origin state as we remove it before testing\n '
info('Check status of nano: is installed or not')
nano_installed = j.sal.ubuntu.is_pkg_installed('nano')
if nano_installed:
info('nano is installed, removing it')
j.sal.process.execute('apt remove -y nano')
info('installed nano again by tested method')
j.sal.ubuntu.deb_download_install('http://archive.ubuntu.com/ubuntu/pool/main/n/nano/nano_2.9.3-2_amd64.deb', remove_downloaded=True)
info('Get nano status should be installed successfully ')
(rc2, out2, err2) = j.sal.process.execute('dpkg -s nano|grep Status')
info('verify that nano installed successfully')
assert ('install ok' in out2)
info('remove nano to return to origin state')
j.sal.process.execute('apt remove -y nano')
if nano_installed:
info('install nano to return to origin state as we remove it before testing ')
j.sal.process.execute('apt install -y nano') | TC418
check download and install the package
**Test Scenario**
#. Check status of nano is installed or not
#. If nano installed remove it by apt remove before install it
#. Installed it again by tested method
#. Get nano status should be installed successfully
#. Verify that nano installed successfully
#. Remove nano to return to origin state
#. Install nano to return to origin state as we remove it before testing | JumpscaleLibs/sal/ubuntu/tests/test_ubuntu.py | test022_deb_download_install | grimpy/jumpscaleX_libs | 0 | python | def test022_deb_download_install():
'TC418\n check download and install the package\n\n **Test Scenario**\n #. Check status of nano is installed or not\n #. If nano installed remove it by apt remove before install it\n #. Installed it again by tested method\n #. Get nano status should be installed successfully\n #. Verify that nano installed successfully\n #. Remove nano to return to origin state\n #. Install nano to return to origin state as we remove it before testing\n '
info('Check status of nano: is installed or not')
nano_installed = j.sal.ubuntu.is_pkg_installed('nano')
if nano_installed:
info('nano is installed, removing it')
j.sal.process.execute('apt remove -y nano')
info('installed nano again by tested method')
j.sal.ubuntu.deb_download_install('http://archive.ubuntu.com/ubuntu/pool/main/n/nano/nano_2.9.3-2_amd64.deb', remove_downloaded=True)
info('Get nano status should be installed successfully ')
(rc2, out2, err2) = j.sal.process.execute('dpkg -s nano|grep Status')
info('verify that nano installed successfully')
assert ('install ok' in out2)
info('remove nano to return to origin state')
j.sal.process.execute('apt remove -y nano')
if nano_installed:
info('install nano to return to origin state as we remove it before testing ')
j.sal.process.execute('apt install -y nano') | def test022_deb_download_install():
'TC418\n check download and install the package\n\n **Test Scenario**\n #. Check status of nano is installed or not\n #. If nano installed remove it by apt remove before install it\n #. Installed it again by tested method\n #. Get nano status should be installed successfully\n #. Verify that nano installed successfully\n #. Remove nano to return to origin state\n #. Install nano to return to origin state as we remove it before testing\n '
info('Check status of nano: is installed or not')
nano_installed = j.sal.ubuntu.is_pkg_installed('nano')
if nano_installed:
info('nano is installed, removing it')
j.sal.process.execute('apt remove -y nano')
info('installed nano again by tested method')
j.sal.ubuntu.deb_download_install('http://archive.ubuntu.com/ubuntu/pool/main/n/nano/nano_2.9.3-2_amd64.deb', remove_downloaded=True)
info('Get nano status should be installed successfully ')
(rc2, out2, err2) = j.sal.process.execute('dpkg -s nano|grep Status')
info('verify that nano installed successfully')
assert ('install ok' in out2)
info('remove nano to return to origin state')
j.sal.process.execute('apt remove -y nano')
if nano_installed:
info('install nano to return to origin state as we remove it before testing ')
j.sal.process.execute('apt install -y nano')<|docstring|>TC418
check download and install the package
**Test Scenario**
#. Check status of nano is installed or not
#. If nano installed remove it by apt remove before install it
#. Installed it again by tested method
#. Get nano status should be installed successfully
#. Verify that nano installed successfully
#. Remove nano to return to origin state
#. Install nano to return to origin state as we remove it before testing<|endoftext|> |
2f784a2511d37a17bc47f53cf665418a81bcdf18fd36101feabce4bf70e12c57 | def test023_pkg_remove():
'TC419\n remove an ubuntu package.\n\n **Test Scenario**\n #. Check the tcpdummp is installed or not\n #. If tcpdump not installed, install it manually\n #. Remove tcpdump by tested method pkg_remove\n #. Verify package has been removed by tested method\n #. Remove tcpdump to return to origin state\n '
info('Check the tcpdump is installed or not')
tcpdump_already_installed = j.sal.ubuntu.is_pkg_installed('tcpdump')
if (not tcpdump_already_installed):
info('tcpdump not installed, installing it ')
j.sal.process.execute('apt install -y tcpdump')
info('remove tcpdump by tested method pkg_remove')
j.sal.ubuntu.pkg_remove('tcpdump')
info('verify package has been removed by tested method')
assert (j.sal.ubuntu.is_pkg_installed('tcpdump') is False)
if (not tcpdump_already_installed):
info('remove tcpdump to return to origin state')
j.sal.process.execute('apt remove -y tcpdump') | TC419
remove an ubuntu package.
**Test Scenario**
#. Check the tcpdummp is installed or not
#. If tcpdump not installed, install it manually
#. Remove tcpdump by tested method pkg_remove
#. Verify package has been removed by tested method
#. Remove tcpdump to return to origin state | JumpscaleLibs/sal/ubuntu/tests/test_ubuntu.py | test023_pkg_remove | grimpy/jumpscaleX_libs | 0 | python | def test023_pkg_remove():
'TC419\n remove an ubuntu package.\n\n **Test Scenario**\n #. Check the tcpdummp is installed or not\n #. If tcpdump not installed, install it manually\n #. Remove tcpdump by tested method pkg_remove\n #. Verify package has been removed by tested method\n #. Remove tcpdump to return to origin state\n '
info('Check the tcpdump is installed or not')
tcpdump_already_installed = j.sal.ubuntu.is_pkg_installed('tcpdump')
if (not tcpdump_already_installed):
info('tcpdump not installed, installing it ')
j.sal.process.execute('apt install -y tcpdump')
info('remove tcpdump by tested method pkg_remove')
j.sal.ubuntu.pkg_remove('tcpdump')
info('verify package has been removed by tested method')
assert (j.sal.ubuntu.is_pkg_installed('tcpdump') is False)
if (not tcpdump_already_installed):
info('remove tcpdump to return to origin state')
j.sal.process.execute('apt remove -y tcpdump') | def test023_pkg_remove():
'TC419\n remove an ubuntu package.\n\n **Test Scenario**\n #. Check the tcpdummp is installed or not\n #. If tcpdump not installed, install it manually\n #. Remove tcpdump by tested method pkg_remove\n #. Verify package has been removed by tested method\n #. Remove tcpdump to return to origin state\n '
info('Check the tcpdump is installed or not')
tcpdump_already_installed = j.sal.ubuntu.is_pkg_installed('tcpdump')
if (not tcpdump_already_installed):
info('tcpdump not installed, installing it ')
j.sal.process.execute('apt install -y tcpdump')
info('remove tcpdump by tested method pkg_remove')
j.sal.ubuntu.pkg_remove('tcpdump')
info('verify package has been removed by tested method')
assert (j.sal.ubuntu.is_pkg_installed('tcpdump') is False)
if (not tcpdump_already_installed):
info('remove tcpdump to return to origin state')
j.sal.process.execute('apt remove -y tcpdump')<|docstring|>TC419
remove an ubuntu package.
**Test Scenario**
#. Check the tcpdummp is installed or not
#. If tcpdump not installed, install it manually
#. Remove tcpdump by tested method pkg_remove
#. Verify package has been removed by tested method
#. Remove tcpdump to return to origin state<|endoftext|> |
fef78b8ab01e35bb58eff0b71bdaba99810a418aa56deb9b5e13ae9245f8bade | def test024_service_disable_start_boot():
'TC420\n remove all links are named as /etc/rcrunlevel.d/[SK]NNname that point to the script /etc/init.d/name.\n\n **Test Scenario**\n #. Check cron file link exist or not\n #. If file does not exist, enable service so file will created\n #. Disable cron service by using tested method service_disable_start_boot\n #. Verify that file does not exist after disable cron service\n #. Enable cron service to create service file to return as origin state\n #. Disable cron service as cron service does not exist before testing to return back to origin state\n '
info('check cron file link exist or not ')
cron_file_exist = os.path.exists('/etc/rc5.d/S01cron')
if (not cron_file_exist):
info('file does not exist, enable service so file will created')
j.sal.ubuntu.service_enable_start_boot('cron')
info('disable cron service by using tested method service_disable_start_boot ')
j.sal.ubuntu.service_disable_start_boot('cron')
info('verify that file does not exist after disable cron service')
assert (os.path.exists('/etc/rc5.d/S01cron') is False)
info('enable cron service to create service file to return as origin state')
j.sal.ubuntu.service_enable_start_boot('cron')
if (not cron_file_exist):
info('disable cron service as cron service does not exist before testing to return back to origin state')
j.sal.ubuntu.service_disable_start_boot('cron') | TC420
remove all links are named as /etc/rcrunlevel.d/[SK]NNname that point to the script /etc/init.d/name.
**Test Scenario**
#. Check cron file link exist or not
#. If file does not exist, enable service so file will created
#. Disable cron service by using tested method service_disable_start_boot
#. Verify that file does not exist after disable cron service
#. Enable cron service to create service file to return as origin state
#. Disable cron service as cron service does not exist before testing to return back to origin state | JumpscaleLibs/sal/ubuntu/tests/test_ubuntu.py | test024_service_disable_start_boot | grimpy/jumpscaleX_libs | 0 | python | def test024_service_disable_start_boot():
'TC420\n remove all links are named as /etc/rcrunlevel.d/[SK]NNname that point to the script /etc/init.d/name.\n\n **Test Scenario**\n #. Check cron file link exist or not\n #. If file does not exist, enable service so file will created\n #. Disable cron service by using tested method service_disable_start_boot\n #. Verify that file does not exist after disable cron service\n #. Enable cron service to create service file to return as origin state\n #. Disable cron service as cron service does not exist before testing to return back to origin state\n '
info('check cron file link exist or not ')
cron_file_exist = os.path.exists('/etc/rc5.d/S01cron')
if (not cron_file_exist):
info('file does not exist, enable service so file will created')
j.sal.ubuntu.service_enable_start_boot('cron')
info('disable cron service by using tested method service_disable_start_boot ')
j.sal.ubuntu.service_disable_start_boot('cron')
info('verify that file does not exist after disable cron service')
assert (os.path.exists('/etc/rc5.d/S01cron') is False)
info('enable cron service to create service file to return as origin state')
j.sal.ubuntu.service_enable_start_boot('cron')
if (not cron_file_exist):
info('disable cron service as cron service does not exist before testing to return back to origin state')
j.sal.ubuntu.service_disable_start_boot('cron') | def test024_service_disable_start_boot():
'TC420\n remove all links are named as /etc/rcrunlevel.d/[SK]NNname that point to the script /etc/init.d/name.\n\n **Test Scenario**\n #. Check cron file link exist or not\n #. If file does not exist, enable service so file will created\n #. Disable cron service by using tested method service_disable_start_boot\n #. Verify that file does not exist after disable cron service\n #. Enable cron service to create service file to return as origin state\n #. Disable cron service as cron service does not exist before testing to return back to origin state\n '
info('check cron file link exist or not ')
cron_file_exist = os.path.exists('/etc/rc5.d/S01cron')
if (not cron_file_exist):
info('file does not exist, enable service so file will created')
j.sal.ubuntu.service_enable_start_boot('cron')
info('disable cron service by using tested method service_disable_start_boot ')
j.sal.ubuntu.service_disable_start_boot('cron')
info('verify that file does not exist after disable cron service')
assert (os.path.exists('/etc/rc5.d/S01cron') is False)
info('enable cron service to create service file to return as origin state')
j.sal.ubuntu.service_enable_start_boot('cron')
if (not cron_file_exist):
info('disable cron service as cron service does not exist before testing to return back to origin state')
j.sal.ubuntu.service_disable_start_boot('cron')<|docstring|>TC420
remove all links are named as /etc/rcrunlevel.d/[SK]NNname that point to the script /etc/init.d/name.
**Test Scenario**
#. Check cron file link exist or not
#. If file does not exist, enable service so file will created
#. Disable cron service by using tested method service_disable_start_boot
#. Verify that file does not exist after disable cron service
#. Enable cron service to create service file to return as origin state
#. Disable cron service as cron service does not exist before testing to return back to origin state<|endoftext|> |
095ffaa07eab63fec190846dd60bd14d8e95fa2d8b6e71035e6432d9d2ebc923 | def test025_service_enable_start_boot():
'TC421\n it makes links named /etc/rcrunlevel.d/[SK]NNname that point to the script /etc/init.d/name.\n\n **Test Scenario**\n #. Check cron file link exist or not\n #. If file exist,backup service file to /tmp before disabling it\n #. Disable service at boot\n #. Verify that file does not eixst after disabling service\n #. Enable service at boot again to check tested method\n #. Verify cron file is exist after enabling service\n #. Return back the backup file to origin path\n '
info('check cron file link exist or not ')
cron_file_exist = os.path.exists('/etc/rc5.d/S01cron')
if cron_file_exist:
info('file exist,backup service file to /tmp before disabling it')
j.sal.process.execute('cp /etc/rc5.d/S01cron /tmp')
info('disable service at boot')
j.sal.ubuntu.service_disable_start_boot('cron')
info('Verify that file does not eixst after disabling service ')
assert (os.path.exists('/etc/rc5.d/S01cron') is False)
info('enable service at boot again to check tested method ')
j.sal.ubuntu.service_enable_start_boot('cron')
info('Verify cron file is exist after enabling service')
assert os.path.exists('/etc/rc5.d/S01cron')
if cron_file_exist:
info('retrun back the backup file to origin path')
j.sal.process.execute('cp /tmp/S01cron /etc/rc5.d/S01cron ') | TC421
it makes links named /etc/rcrunlevel.d/[SK]NNname that point to the script /etc/init.d/name.
**Test Scenario**
#. Check cron file link exist or not
#. If file exist,backup service file to /tmp before disabling it
#. Disable service at boot
#. Verify that file does not eixst after disabling service
#. Enable service at boot again to check tested method
#. Verify cron file is exist after enabling service
#. Return back the backup file to origin path | JumpscaleLibs/sal/ubuntu/tests/test_ubuntu.py | test025_service_enable_start_boot | grimpy/jumpscaleX_libs | 0 | python | def test025_service_enable_start_boot():
'TC421\n it makes links named /etc/rcrunlevel.d/[SK]NNname that point to the script /etc/init.d/name.\n\n **Test Scenario**\n #. Check cron file link exist or not\n #. If file exist,backup service file to /tmp before disabling it\n #. Disable service at boot\n #. Verify that file does not eixst after disabling service\n #. Enable service at boot again to check tested method\n #. Verify cron file is exist after enabling service\n #. Return back the backup file to origin path\n '
info('check cron file link exist or not ')
cron_file_exist = os.path.exists('/etc/rc5.d/S01cron')
if cron_file_exist:
info('file exist,backup service file to /tmp before disabling it')
j.sal.process.execute('cp /etc/rc5.d/S01cron /tmp')
info('disable service at boot')
j.sal.ubuntu.service_disable_start_boot('cron')
info('Verify that file does not eixst after disabling service ')
assert (os.path.exists('/etc/rc5.d/S01cron') is False)
info('enable service at boot again to check tested method ')
j.sal.ubuntu.service_enable_start_boot('cron')
info('Verify cron file is exist after enabling service')
assert os.path.exists('/etc/rc5.d/S01cron')
if cron_file_exist:
info('retrun back the backup file to origin path')
j.sal.process.execute('cp /tmp/S01cron /etc/rc5.d/S01cron ') | def test025_service_enable_start_boot():
'TC421\n it makes links named /etc/rcrunlevel.d/[SK]NNname that point to the script /etc/init.d/name.\n\n **Test Scenario**\n #. Check cron file link exist or not\n #. If file exist,backup service file to /tmp before disabling it\n #. Disable service at boot\n #. Verify that file does not eixst after disabling service\n #. Enable service at boot again to check tested method\n #. Verify cron file is exist after enabling service\n #. Return back the backup file to origin path\n '
info('check cron file link exist or not ')
cron_file_exist = os.path.exists('/etc/rc5.d/S01cron')
if cron_file_exist:
info('file exist,backup service file to /tmp before disabling it')
j.sal.process.execute('cp /etc/rc5.d/S01cron /tmp')
info('disable service at boot')
j.sal.ubuntu.service_disable_start_boot('cron')
info('Verify that file does not eixst after disabling service ')
assert (os.path.exists('/etc/rc5.d/S01cron') is False)
info('enable service at boot again to check tested method ')
j.sal.ubuntu.service_enable_start_boot('cron')
info('Verify cron file is exist after enabling service')
assert os.path.exists('/etc/rc5.d/S01cron')
if cron_file_exist:
info('retrun back the backup file to origin path')
j.sal.process.execute('cp /tmp/S01cron /etc/rc5.d/S01cron ')<|docstring|>TC421
it makes links named /etc/rcrunlevel.d/[SK]NNname that point to the script /etc/init.d/name.
**Test Scenario**
#. Check cron file link exist or not
#. If file exist,backup service file to /tmp before disabling it
#. Disable service at boot
#. Verify that file does not eixst after disabling service
#. Enable service at boot again to check tested method
#. Verify cron file is exist after enabling service
#. Return back the backup file to origin path<|endoftext|> |
ddcdce168cfb6da6eac743885fa4c805b4616faeedd2f980ba7e28b5f320401d | def test026_service_uninstall():
'TC422\n remove an ubuntu service.\n\n **Test Scenario**\n #. Check cron service config file existing under /etc/init\n #. If ron service file config does not exist in /etc/ini, install service so config file will created\n #. Backup the config file to /tmp before testing\n #. Uninstall service to test tested method service_uninstall\n #. Verify the cron config file does not exist after uninstalling service\n #. Return back backup file to orgin path after testing\n #. If file was not exist, remove service config file to return back to origin state\n '
mysys = None
zdb_service_file = False
info('installing zdb from builder')
j.builders.db.zdb.install()
info('checking system is systemd or not ')
mysys = _check_init_process()
if (mysys == 'my_init'):
info('system is init system')
zdb_service_file = os.path.exists('/etc/service/zdb/run')
elif (mysys == 'systemd'):
info('system is init systemd')
zdb_service_file = os.path.exists('/etc/systemd/system/zdb.service')
else:
info('something unexpected occurred while checking system type')
assert (mysys in ['systemd', 'my_init']), 'system not supported '
if (zdb_service_file is False):
info('zdb service file config does not exist, install service so config file will created ')
j.sal.ubuntu.service_install('zdb', j.core.tools.text_replace('{DIR_BASE}/bin'))
info('backup the config file to /tmp before testing ')
if (mysys == 'my_init'):
j.sal.process.execute('cp /etc/service/zdb/run /tmp/run_zdb')
else:
j.sal.process.execute('cp /etc/systemd/system/zdb.service /tmp')
info('uninstall service to test tested method service_uninstall')
j.sal.ubuntu.service_uninstall('zdb')
info('Verify the zdb config file does not exist after uninstalling service ')
if (mysys == 'my_init'):
assert (os.path.exists('/etc/service/zdb/run') is False)
else:
assert (os.path.exists('/etc/systemd/system/zdb.service') is False)
info('return back backup file to orgin path after testing ')
if (mysys == 'my_init'):
j.sal.process.execute('cp /tmp/run_zdb /etc/service/zdb/run ')
else:
j.sal.process.execute('cp /tmp/zdb.service /etc/systemd/system/zdb.service ')
if (zdb_service_file is False):
info('remove service config file to return back to origin state')
if (mysys == 'my_init'):
j.sal.process.execute('rm /etc/service/zdb/run')
else:
j.sal.process.execute('rm /etc/systemd/system/zdb.service') | TC422
remove an ubuntu service.
**Test Scenario**
#. Check cron service config file existing under /etc/init
#. If ron service file config does not exist in /etc/ini, install service so config file will created
#. Backup the config file to /tmp before testing
#. Uninstall service to test tested method service_uninstall
#. Verify the cron config file does not exist after uninstalling service
#. Return back backup file to orgin path after testing
#. If file was not exist, remove service config file to return back to origin state | JumpscaleLibs/sal/ubuntu/tests/test_ubuntu.py | test026_service_uninstall | grimpy/jumpscaleX_libs | 0 | python | def test026_service_uninstall():
'TC422\n remove an ubuntu service.\n\n **Test Scenario**\n #. Check cron service config file existing under /etc/init\n #. If ron service file config does not exist in /etc/ini, install service so config file will created\n #. Backup the config file to /tmp before testing\n #. Uninstall service to test tested method service_uninstall\n #. Verify the cron config file does not exist after uninstalling service\n #. Return back backup file to orgin path after testing\n #. If file was not exist, remove service config file to return back to origin state\n '
mysys = None
zdb_service_file = False
info('installing zdb from builder')
j.builders.db.zdb.install()
info('checking system is systemd or not ')
mysys = _check_init_process()
if (mysys == 'my_init'):
info('system is init system')
zdb_service_file = os.path.exists('/etc/service/zdb/run')
elif (mysys == 'systemd'):
info('system is init systemd')
zdb_service_file = os.path.exists('/etc/systemd/system/zdb.service')
else:
info('something unexpected occurred while checking system type')
assert (mysys in ['systemd', 'my_init']), 'system not supported '
if (zdb_service_file is False):
info('zdb service file config does not exist, install service so config file will created ')
j.sal.ubuntu.service_install('zdb', j.core.tools.text_replace('{DIR_BASE}/bin'))
info('backup the config file to /tmp before testing ')
if (mysys == 'my_init'):
j.sal.process.execute('cp /etc/service/zdb/run /tmp/run_zdb')
else:
j.sal.process.execute('cp /etc/systemd/system/zdb.service /tmp')
info('uninstall service to test tested method service_uninstall')
j.sal.ubuntu.service_uninstall('zdb')
info('Verify the zdb config file does not exist after uninstalling service ')
if (mysys == 'my_init'):
assert (os.path.exists('/etc/service/zdb/run') is False)
else:
assert (os.path.exists('/etc/systemd/system/zdb.service') is False)
info('return back backup file to orgin path after testing ')
if (mysys == 'my_init'):
j.sal.process.execute('cp /tmp/run_zdb /etc/service/zdb/run ')
else:
j.sal.process.execute('cp /tmp/zdb.service /etc/systemd/system/zdb.service ')
if (zdb_service_file is False):
info('remove service config file to return back to origin state')
if (mysys == 'my_init'):
j.sal.process.execute('rm /etc/service/zdb/run')
else:
j.sal.process.execute('rm /etc/systemd/system/zdb.service') | def test026_service_uninstall():
'TC422\n remove an ubuntu service.\n\n **Test Scenario**\n #. Check cron service config file existing under /etc/init\n #. If ron service file config does not exist in /etc/ini, install service so config file will created\n #. Backup the config file to /tmp before testing\n #. Uninstall service to test tested method service_uninstall\n #. Verify the cron config file does not exist after uninstalling service\n #. Return back backup file to orgin path after testing\n #. If file was not exist, remove service config file to return back to origin state\n '
mysys = None
zdb_service_file = False
info('installing zdb from builder')
j.builders.db.zdb.install()
info('checking system is systemd or not ')
mysys = _check_init_process()
if (mysys == 'my_init'):
info('system is init system')
zdb_service_file = os.path.exists('/etc/service/zdb/run')
elif (mysys == 'systemd'):
info('system is init systemd')
zdb_service_file = os.path.exists('/etc/systemd/system/zdb.service')
else:
info('something unexpected occurred while checking system type')
assert (mysys in ['systemd', 'my_init']), 'system not supported '
if (zdb_service_file is False):
info('zdb service file config does not exist, install service so config file will created ')
j.sal.ubuntu.service_install('zdb', j.core.tools.text_replace('{DIR_BASE}/bin'))
info('backup the config file to /tmp before testing ')
if (mysys == 'my_init'):
j.sal.process.execute('cp /etc/service/zdb/run /tmp/run_zdb')
else:
j.sal.process.execute('cp /etc/systemd/system/zdb.service /tmp')
info('uninstall service to test tested method service_uninstall')
j.sal.ubuntu.service_uninstall('zdb')
info('Verify the zdb config file does not exist after uninstalling service ')
if (mysys == 'my_init'):
assert (os.path.exists('/etc/service/zdb/run') is False)
else:
assert (os.path.exists('/etc/systemd/system/zdb.service') is False)
info('return back backup file to orgin path after testing ')
if (mysys == 'my_init'):
j.sal.process.execute('cp /tmp/run_zdb /etc/service/zdb/run ')
else:
j.sal.process.execute('cp /tmp/zdb.service /etc/systemd/system/zdb.service ')
if (zdb_service_file is False):
info('remove service config file to return back to origin state')
if (mysys == 'my_init'):
j.sal.process.execute('rm /etc/service/zdb/run')
else:
j.sal.process.execute('rm /etc/systemd/system/zdb.service')<|docstring|>TC422
remove an ubuntu service.
**Test Scenario**
#. Check cron service config file existing under /etc/init
#. If ron service file config does not exist in /etc/ini, install service so config file will created
#. Backup the config file to /tmp before testing
#. Uninstall service to test tested method service_uninstall
#. Verify the cron config file does not exist after uninstalling service
#. Return back backup file to orgin path after testing
#. If file was not exist, remove service config file to return back to origin state<|endoftext|> |
d4b0121657bd6af1e9b1bb588b6e9294f32a183ef027dca86aa4b49267c305d9 | def test027_whoami():
'TC397\n check current login user\n\n **Test Scenario**\n #. Check whoami method output\n #. Check os current user by using command whoami\n #. Comapre step1 and step2, should be identical\n\n '
info('checking whoami method output')
sal_user = j.sal.ubuntu.whoami()
info('checking OS whoami command output')
(rc2, os_user, err2) = j.sal.process.execute('whoami')
info('comparing whoami method output vs OS whoami command output')
assert (os_user.strip() == sal_user) | TC397
check current login user
**Test Scenario**
#. Check whoami method output
#. Check os current user by using command whoami
#. Comapre step1 and step2, should be identical | JumpscaleLibs/sal/ubuntu/tests/test_ubuntu.py | test027_whoami | grimpy/jumpscaleX_libs | 0 | python | def test027_whoami():
'TC397\n check current login user\n\n **Test Scenario**\n #. Check whoami method output\n #. Check os current user by using command whoami\n #. Comapre step1 and step2, should be identical\n\n '
info('checking whoami method output')
sal_user = j.sal.ubuntu.whoami()
info('checking OS whoami command output')
(rc2, os_user, err2) = j.sal.process.execute('whoami')
info('comparing whoami method output vs OS whoami command output')
assert (os_user.strip() == sal_user) | def test027_whoami():
'TC397\n check current login user\n\n **Test Scenario**\n #. Check whoami method output\n #. Check os current user by using command whoami\n #. Comapre step1 and step2, should be identical\n\n '
info('checking whoami method output')
sal_user = j.sal.ubuntu.whoami()
info('checking OS whoami command output')
(rc2, os_user, err2) = j.sal.process.execute('whoami')
info('comparing whoami method output vs OS whoami command output')
assert (os_user.strip() == sal_user)<|docstring|>TC397
check current login user
**Test Scenario**
#. Check whoami method output
#. Check os current user by using command whoami
#. Comapre step1 and step2, should be identical<|endoftext|> |
4490538c9a0460608ba82e9f5789fd2a9bf9f433e0b8b2f4b765a6d9e27de14e | def main():
'\n to run:\n kosmos \'j.sal.ubuntu.test(name="ubuntu")\'\n '
before()
test001_uptime()
test002_service_install()
test003_version_get()
test004_apt_install_check()
test005_apt_install_version()
test006_deb_install()
test007_pkg_list()
test008_service_start()
test009_service_stop()
test010_service_restart()
test011_service_status()
test012_apt_find_all()
test013_is_pkg_installed()
test014_sshkey_generate()
test015_apt_get_cache_keys()
test016_apt_get_installed()
test017_apt_install()
test018_apt_sources_list()
test019_apt_sources_uri_add()
test020_apt_upgrade()
test021_check_os()
test022_deb_download_install()
test023_pkg_remove()
test024_service_disable_start_boot()
test025_service_enable_start_boot()
test026_service_uninstall()
test027_whoami() | to run:
kosmos 'j.sal.ubuntu.test(name="ubuntu")' | JumpscaleLibs/sal/ubuntu/tests/test_ubuntu.py | main | grimpy/jumpscaleX_libs | 0 | python | def main():
'\n to run:\n kosmos \'j.sal.ubuntu.test(name="ubuntu")\'\n '
before()
test001_uptime()
test002_service_install()
test003_version_get()
test004_apt_install_check()
test005_apt_install_version()
test006_deb_install()
test007_pkg_list()
test008_service_start()
test009_service_stop()
test010_service_restart()
test011_service_status()
test012_apt_find_all()
test013_is_pkg_installed()
test014_sshkey_generate()
test015_apt_get_cache_keys()
test016_apt_get_installed()
test017_apt_install()
test018_apt_sources_list()
test019_apt_sources_uri_add()
test020_apt_upgrade()
test021_check_os()
test022_deb_download_install()
test023_pkg_remove()
test024_service_disable_start_boot()
test025_service_enable_start_boot()
test026_service_uninstall()
test027_whoami() | def main():
'\n to run:\n kosmos \'j.sal.ubuntu.test(name="ubuntu")\'\n '
before()
test001_uptime()
test002_service_install()
test003_version_get()
test004_apt_install_check()
test005_apt_install_version()
test006_deb_install()
test007_pkg_list()
test008_service_start()
test009_service_stop()
test010_service_restart()
test011_service_status()
test012_apt_find_all()
test013_is_pkg_installed()
test014_sshkey_generate()
test015_apt_get_cache_keys()
test016_apt_get_installed()
test017_apt_install()
test018_apt_sources_list()
test019_apt_sources_uri_add()
test020_apt_upgrade()
test021_check_os()
test022_deb_download_install()
test023_pkg_remove()
test024_service_disable_start_boot()
test025_service_enable_start_boot()
test026_service_uninstall()
test027_whoami()<|docstring|>to run:
kosmos 'j.sal.ubuntu.test(name="ubuntu")'<|endoftext|> |
a962523a97b6de0310877378bbb0d7050562ffd4292ffa8528f525b75fe0d697 | def dictstatus(node_list, reports_dict, status_dict, sort=True, sortby=None, asc=False, get_status='all', puppet_run_time=PUPPET_RUN_INTERVAL):
'\n :param node_list: dict\n :param status_dict: dict\n :param sortby: Takes a field name to sort by \'certname\', \'latestCatalog\', \'latestReport\', \'latestFacts\', \'success\', \'noop\', \'failure\', \'skipped\'\n :param get_status: Status type to return. all, changed, failed, unreported, noops\n :return: tuple(tuple,tuple)\n\n node_dict input:\n {\n \'certname\': {\n "name": <string>,\n "deactivated": <timestamp>,\n "catalog_timestamp": <timestamp>,\n "facts_timestamp": <timestamp>,\n "report_timestamp": <timestamp>\n },\n }\n --------------------------------\n status_dict input:\n {\n \'certname\': {\n "subject-type": "certname",\n "subject": { "title": "foo.local" },\n "failures": 0,\n "successes": 2,\n "noops": 0,\n "skips": 1\n },\n }\n '
def check_failed_compile(report_timestamp, fact_timestamp, catalog_timestamp, puppet_run_interval=puppet_run_time):
'\n :param report_timestamp: str\n :param fact_timestamp: str\n :param catalog_timestamp: str\n :return: Bool\n Returns False if the compiled run has not failed\n Returns True if the compiled run has failed\n '
if ((report_timestamp is None) or (catalog_timestamp is None) or (fact_timestamp is None)):
return True
report_time = json_to_datetime(report_timestamp)
fact_time = json_to_datetime(fact_timestamp)
catalog_time = json_to_datetime(catalog_timestamp)
diffs = dict()
diffs['catalog_fact'] = (catalog_time - fact_time)
diffs['fact_catalog'] = (fact_time - catalog_time)
diffs['report_fact'] = (report_time - fact_time)
diffs['fact_report'] = (fact_time - report_time)
diffs['report_catalog'] = (report_time - catalog_time)
diffs['catalog_report'] = (catalog_time - report_time)
for (key, value) in diffs.items():
if (value > timedelta(minutes=(puppet_run_interval / 2))):
return True
return False
def append_list(n_data, s_data, m_list, r_status):
if ((type(n_data) is not dict) or ((type(s_data) is not dict) and (type(m_list) is not list) and (not r_status))):
raise ValueError('Incorrect type given as input. Expects n_data, s_data as dict and m_list as list.')
m_list.append((n_data['certname'], (filters.date(localtime(json_to_datetime(n_data['catalog_timestamp'])), 'Y-m-d H:i:s') if (n_data['catalog_timestamp'] is not None) else ''), (filters.date(localtime(json_to_datetime(n_data['report_timestamp'])), 'Y-m-d H:i:s') if (n_data['report_timestamp'] is not None) else ''), (filters.date(localtime(json_to_datetime(n_data['facts_timestamp'])), 'Y-m-d H:i:s') if (n_data['facts_timestamp'] is not None) else ''), s_data.get('successes', 0), s_data.get('noops', 0), s_data.get('failures', 0), s_data.get('skips', 0), r_status))
return m_list
sortables = {'certname': 0, 'catalog_timestamp': 1, 'report_timestamp': 2, 'facts_timestamp': 3, 'successes': 4, 'noops': 5, 'failures': 6, 'skips': 7}
if sortby:
sortbycol = sortables.get(sortby, 2)
else:
sortbycol = 2
merged_list = []
failed_list = []
unreported_list = []
changed_list = []
pending_list = []
mismatch_list = []
if (get_status != 'all'):
for node in node_list:
node_is_unreported = False
node_has_mismatching_timestamps = False
if is_unreported(node['report_timestamp']):
node_is_unreported = True
if check_failed_compile(report_timestamp=node.get('report_timestamp', None), fact_timestamp=node.get('facts_timestamp', None), catalog_timestamp=node.get('catalog_timestamp', None)):
node_has_mismatching_timestamps = True
if (node['certname'] in reports_dict):
report_status = reports_dict[node['certname']]['status']
"\n Can be used later but right now we just utilize the event-counts response.\n # Dictify the metrics for the report.\n metrics_data = {item['category'] + '-' + item['name']: item for item in\n reports_dict[node_name]['metrics']['data']}\n "
if (node['certname'] in status_dict):
if ((report_status == 'unchanged') and (status_dict[node['certname']]['noops'] > 0)):
report_status = 'pending'
else:
status_dict[node['certname']] = {}
if (node_is_unreported is True):
unreported_list = append_list(node, status_dict[node['certname']], unreported_list, report_status)
if (node_has_mismatching_timestamps is True):
mismatch_list = append_list(node, status_dict[node['certname']], mismatch_list, report_status)
if (report_status == 'changed'):
changed_list = append_list(node, status_dict[node['certname']], changed_list, report_status)
elif (report_status == 'failed'):
failed_list = append_list(node, status_dict[node['certname']], failed_list, report_status)
elif (report_status == 'pending'):
pending_list = append_list(node, status_dict[node['certname']], pending_list, report_status)
elif ((sortbycol <= 3) and (get_status == 'all')):
for node in node_list:
if (node['certname'] in reports_dict):
report_status = reports_dict[node['certname']]['status']
"\n Can be used later but right now we just utilize the event-counts response.\n # Dictify the metrics for the report.\n metrics_data = {item['category'] + '-' + item['name']: item for item in\n reports_dict[node_name]['metrics']['data']}\n "
if (node['certname'] in status_dict):
if status_dict[node['certname']]:
if ((report_status == 'unchanged') and (status_dict[node['certname']]['noops'] > 0)):
report_status = 'pending'
else:
status_dict[node['certname']] = {}
merged_list = append_list(node, status_dict[node['certname']], merged_list, report_status)
elif ((sortbycol >= 4) and (get_status == 'all')):
sort = True
node_dict = {item['certname']: item for item in node_list}
for (status, value) in status_dict.items():
if (value['subject']['title'] in reports_dict):
report_status = reports_dict[value['subject']['title']]['status']
if ((value['subject']['title'] in node_dict) and report_status):
merged_list = append_list(node_dict[value['subject']['title']], value, merged_list, report_status)
if (sort and (get_status == 'all')):
return sort_table(merged_list, order=asc, col=sortbycol)
elif (sort and (get_status != 'all')):
sorted_unreported_list = sort_table(unreported_list, order=asc, col=sortbycol)
sorted_changed_list = sort_table(changed_list, order=asc, col=sortbycol)
sorted_failed_list = sort_table(failed_list, order=asc, col=sortbycol)
sorted_mismatch_list = sort_table(mismatch_list, order=asc, col=sortbycol)
sorted_pending_list = sort_table(pending_list, order=asc, col=sortbycol)
return (sorted_failed_list, sorted_changed_list, sorted_unreported_list, sorted_mismatch_list, sorted_pending_list)
if (get_status == 'all'):
return merged_list
else:
return (failed_list, changed_list, unreported_list, mismatch_list, pending_list) | :param node_list: dict
:param status_dict: dict
:param sortby: Takes a field name to sort by 'certname', 'latestCatalog', 'latestReport', 'latestFacts', 'success', 'noop', 'failure', 'skipped'
:param get_status: Status type to return. all, changed, failed, unreported, noops
:return: tuple(tuple,tuple)
node_dict input:
{
'certname': {
"name": <string>,
"deactivated": <timestamp>,
"catalog_timestamp": <timestamp>,
"facts_timestamp": <timestamp>,
"report_timestamp": <timestamp>
},
}
--------------------------------
status_dict input:
{
'certname': {
"subject-type": "certname",
"subject": { "title": "foo.local" },
"failures": 0,
"successes": 2,
"noops": 0,
"skips": 1
},
} | pano/methods/dictfuncs.py | dictstatus | jeroenzeegers/panopuppet | 0 | python | def dictstatus(node_list, reports_dict, status_dict, sort=True, sortby=None, asc=False, get_status='all', puppet_run_time=PUPPET_RUN_INTERVAL):
'\n :param node_list: dict\n :param status_dict: dict\n :param sortby: Takes a field name to sort by \'certname\', \'latestCatalog\', \'latestReport\', \'latestFacts\', \'success\', \'noop\', \'failure\', \'skipped\'\n :param get_status: Status type to return. all, changed, failed, unreported, noops\n :return: tuple(tuple,tuple)\n\n node_dict input:\n {\n \'certname\': {\n "name": <string>,\n "deactivated": <timestamp>,\n "catalog_timestamp": <timestamp>,\n "facts_timestamp": <timestamp>,\n "report_timestamp": <timestamp>\n },\n }\n --------------------------------\n status_dict input:\n {\n \'certname\': {\n "subject-type": "certname",\n "subject": { "title": "foo.local" },\n "failures": 0,\n "successes": 2,\n "noops": 0,\n "skips": 1\n },\n }\n '
def check_failed_compile(report_timestamp, fact_timestamp, catalog_timestamp, puppet_run_interval=puppet_run_time):
'\n :param report_timestamp: str\n :param fact_timestamp: str\n :param catalog_timestamp: str\n :return: Bool\n Returns False if the compiled run has not failed\n Returns True if the compiled run has failed\n '
if ((report_timestamp is None) or (catalog_timestamp is None) or (fact_timestamp is None)):
return True
report_time = json_to_datetime(report_timestamp)
fact_time = json_to_datetime(fact_timestamp)
catalog_time = json_to_datetime(catalog_timestamp)
diffs = dict()
diffs['catalog_fact'] = (catalog_time - fact_time)
diffs['fact_catalog'] = (fact_time - catalog_time)
diffs['report_fact'] = (report_time - fact_time)
diffs['fact_report'] = (fact_time - report_time)
diffs['report_catalog'] = (report_time - catalog_time)
diffs['catalog_report'] = (catalog_time - report_time)
for (key, value) in diffs.items():
if (value > timedelta(minutes=(puppet_run_interval / 2))):
return True
return False
def append_list(n_data, s_data, m_list, r_status):
if ((type(n_data) is not dict) or ((type(s_data) is not dict) and (type(m_list) is not list) and (not r_status))):
raise ValueError('Incorrect type given as input. Expects n_data, s_data as dict and m_list as list.')
m_list.append((n_data['certname'], (filters.date(localtime(json_to_datetime(n_data['catalog_timestamp'])), 'Y-m-d H:i:s') if (n_data['catalog_timestamp'] is not None) else ), (filters.date(localtime(json_to_datetime(n_data['report_timestamp'])), 'Y-m-d H:i:s') if (n_data['report_timestamp'] is not None) else ), (filters.date(localtime(json_to_datetime(n_data['facts_timestamp'])), 'Y-m-d H:i:s') if (n_data['facts_timestamp'] is not None) else ), s_data.get('successes', 0), s_data.get('noops', 0), s_data.get('failures', 0), s_data.get('skips', 0), r_status))
return m_list
sortables = {'certname': 0, 'catalog_timestamp': 1, 'report_timestamp': 2, 'facts_timestamp': 3, 'successes': 4, 'noops': 5, 'failures': 6, 'skips': 7}
if sortby:
sortbycol = sortables.get(sortby, 2)
else:
sortbycol = 2
merged_list = []
failed_list = []
unreported_list = []
changed_list = []
pending_list = []
mismatch_list = []
if (get_status != 'all'):
for node in node_list:
node_is_unreported = False
node_has_mismatching_timestamps = False
if is_unreported(node['report_timestamp']):
node_is_unreported = True
if check_failed_compile(report_timestamp=node.get('report_timestamp', None), fact_timestamp=node.get('facts_timestamp', None), catalog_timestamp=node.get('catalog_timestamp', None)):
node_has_mismatching_timestamps = True
if (node['certname'] in reports_dict):
report_status = reports_dict[node['certname']]['status']
"\n Can be used later but right now we just utilize the event-counts response.\n # Dictify the metrics for the report.\n metrics_data = {item['category'] + '-' + item['name']: item for item in\n reports_dict[node_name]['metrics']['data']}\n "
if (node['certname'] in status_dict):
if ((report_status == 'unchanged') and (status_dict[node['certname']]['noops'] > 0)):
report_status = 'pending'
else:
status_dict[node['certname']] = {}
if (node_is_unreported is True):
unreported_list = append_list(node, status_dict[node['certname']], unreported_list, report_status)
if (node_has_mismatching_timestamps is True):
mismatch_list = append_list(node, status_dict[node['certname']], mismatch_list, report_status)
if (report_status == 'changed'):
changed_list = append_list(node, status_dict[node['certname']], changed_list, report_status)
elif (report_status == 'failed'):
failed_list = append_list(node, status_dict[node['certname']], failed_list, report_status)
elif (report_status == 'pending'):
pending_list = append_list(node, status_dict[node['certname']], pending_list, report_status)
elif ((sortbycol <= 3) and (get_status == 'all')):
for node in node_list:
if (node['certname'] in reports_dict):
report_status = reports_dict[node['certname']]['status']
"\n Can be used later but right now we just utilize the event-counts response.\n # Dictify the metrics for the report.\n metrics_data = {item['category'] + '-' + item['name']: item for item in\n reports_dict[node_name]['metrics']['data']}\n "
if (node['certname'] in status_dict):
if status_dict[node['certname']]:
if ((report_status == 'unchanged') and (status_dict[node['certname']]['noops'] > 0)):
report_status = 'pending'
else:
status_dict[node['certname']] = {}
merged_list = append_list(node, status_dict[node['certname']], merged_list, report_status)
elif ((sortbycol >= 4) and (get_status == 'all')):
sort = True
node_dict = {item['certname']: item for item in node_list}
for (status, value) in status_dict.items():
if (value['subject']['title'] in reports_dict):
report_status = reports_dict[value['subject']['title']]['status']
if ((value['subject']['title'] in node_dict) and report_status):
merged_list = append_list(node_dict[value['subject']['title']], value, merged_list, report_status)
if (sort and (get_status == 'all')):
return sort_table(merged_list, order=asc, col=sortbycol)
elif (sort and (get_status != 'all')):
sorted_unreported_list = sort_table(unreported_list, order=asc, col=sortbycol)
sorted_changed_list = sort_table(changed_list, order=asc, col=sortbycol)
sorted_failed_list = sort_table(failed_list, order=asc, col=sortbycol)
sorted_mismatch_list = sort_table(mismatch_list, order=asc, col=sortbycol)
sorted_pending_list = sort_table(pending_list, order=asc, col=sortbycol)
return (sorted_failed_list, sorted_changed_list, sorted_unreported_list, sorted_mismatch_list, sorted_pending_list)
if (get_status == 'all'):
return merged_list
else:
return (failed_list, changed_list, unreported_list, mismatch_list, pending_list) | def dictstatus(node_list, reports_dict, status_dict, sort=True, sortby=None, asc=False, get_status='all', puppet_run_time=PUPPET_RUN_INTERVAL):
'\n :param node_list: dict\n :param status_dict: dict\n :param sortby: Takes a field name to sort by \'certname\', \'latestCatalog\', \'latestReport\', \'latestFacts\', \'success\', \'noop\', \'failure\', \'skipped\'\n :param get_status: Status type to return. all, changed, failed, unreported, noops\n :return: tuple(tuple,tuple)\n\n node_dict input:\n {\n \'certname\': {\n "name": <string>,\n "deactivated": <timestamp>,\n "catalog_timestamp": <timestamp>,\n "facts_timestamp": <timestamp>,\n "report_timestamp": <timestamp>\n },\n }\n --------------------------------\n status_dict input:\n {\n \'certname\': {\n "subject-type": "certname",\n "subject": { "title": "foo.local" },\n "failures": 0,\n "successes": 2,\n "noops": 0,\n "skips": 1\n },\n }\n '
def check_failed_compile(report_timestamp, fact_timestamp, catalog_timestamp, puppet_run_interval=puppet_run_time):
'\n :param report_timestamp: str\n :param fact_timestamp: str\n :param catalog_timestamp: str\n :return: Bool\n Returns False if the compiled run has not failed\n Returns True if the compiled run has failed\n '
if ((report_timestamp is None) or (catalog_timestamp is None) or (fact_timestamp is None)):
return True
report_time = json_to_datetime(report_timestamp)
fact_time = json_to_datetime(fact_timestamp)
catalog_time = json_to_datetime(catalog_timestamp)
diffs = dict()
diffs['catalog_fact'] = (catalog_time - fact_time)
diffs['fact_catalog'] = (fact_time - catalog_time)
diffs['report_fact'] = (report_time - fact_time)
diffs['fact_report'] = (fact_time - report_time)
diffs['report_catalog'] = (report_time - catalog_time)
diffs['catalog_report'] = (catalog_time - report_time)
for (key, value) in diffs.items():
if (value > timedelta(minutes=(puppet_run_interval / 2))):
return True
return False
def append_list(n_data, s_data, m_list, r_status):
if ((type(n_data) is not dict) or ((type(s_data) is not dict) and (type(m_list) is not list) and (not r_status))):
raise ValueError('Incorrect type given as input. Expects n_data, s_data as dict and m_list as list.')
m_list.append((n_data['certname'], (filters.date(localtime(json_to_datetime(n_data['catalog_timestamp'])), 'Y-m-d H:i:s') if (n_data['catalog_timestamp'] is not None) else ), (filters.date(localtime(json_to_datetime(n_data['report_timestamp'])), 'Y-m-d H:i:s') if (n_data['report_timestamp'] is not None) else ), (filters.date(localtime(json_to_datetime(n_data['facts_timestamp'])), 'Y-m-d H:i:s') if (n_data['facts_timestamp'] is not None) else ), s_data.get('successes', 0), s_data.get('noops', 0), s_data.get('failures', 0), s_data.get('skips', 0), r_status))
return m_list
sortables = {'certname': 0, 'catalog_timestamp': 1, 'report_timestamp': 2, 'facts_timestamp': 3, 'successes': 4, 'noops': 5, 'failures': 6, 'skips': 7}
if sortby:
sortbycol = sortables.get(sortby, 2)
else:
sortbycol = 2
merged_list = []
failed_list = []
unreported_list = []
changed_list = []
pending_list = []
mismatch_list = []
if (get_status != 'all'):
for node in node_list:
node_is_unreported = False
node_has_mismatching_timestamps = False
if is_unreported(node['report_timestamp']):
node_is_unreported = True
if check_failed_compile(report_timestamp=node.get('report_timestamp', None), fact_timestamp=node.get('facts_timestamp', None), catalog_timestamp=node.get('catalog_timestamp', None)):
node_has_mismatching_timestamps = True
if (node['certname'] in reports_dict):
report_status = reports_dict[node['certname']]['status']
"\n Can be used later but right now we just utilize the event-counts response.\n # Dictify the metrics for the report.\n metrics_data = {item['category'] + '-' + item['name']: item for item in\n reports_dict[node_name]['metrics']['data']}\n "
if (node['certname'] in status_dict):
if ((report_status == 'unchanged') and (status_dict[node['certname']]['noops'] > 0)):
report_status = 'pending'
else:
status_dict[node['certname']] = {}
if (node_is_unreported is True):
unreported_list = append_list(node, status_dict[node['certname']], unreported_list, report_status)
if (node_has_mismatching_timestamps is True):
mismatch_list = append_list(node, status_dict[node['certname']], mismatch_list, report_status)
if (report_status == 'changed'):
changed_list = append_list(node, status_dict[node['certname']], changed_list, report_status)
elif (report_status == 'failed'):
failed_list = append_list(node, status_dict[node['certname']], failed_list, report_status)
elif (report_status == 'pending'):
pending_list = append_list(node, status_dict[node['certname']], pending_list, report_status)
elif ((sortbycol <= 3) and (get_status == 'all')):
for node in node_list:
if (node['certname'] in reports_dict):
report_status = reports_dict[node['certname']]['status']
"\n Can be used later but right now we just utilize the event-counts response.\n # Dictify the metrics for the report.\n metrics_data = {item['category'] + '-' + item['name']: item for item in\n reports_dict[node_name]['metrics']['data']}\n "
if (node['certname'] in status_dict):
if status_dict[node['certname']]:
if ((report_status == 'unchanged') and (status_dict[node['certname']]['noops'] > 0)):
report_status = 'pending'
else:
status_dict[node['certname']] = {}
merged_list = append_list(node, status_dict[node['certname']], merged_list, report_status)
elif ((sortbycol >= 4) and (get_status == 'all')):
sort = True
node_dict = {item['certname']: item for item in node_list}
for (status, value) in status_dict.items():
if (value['subject']['title'] in reports_dict):
report_status = reports_dict[value['subject']['title']]['status']
if ((value['subject']['title'] in node_dict) and report_status):
merged_list = append_list(node_dict[value['subject']['title']], value, merged_list, report_status)
if (sort and (get_status == 'all')):
return sort_table(merged_list, order=asc, col=sortbycol)
elif (sort and (get_status != 'all')):
sorted_unreported_list = sort_table(unreported_list, order=asc, col=sortbycol)
sorted_changed_list = sort_table(changed_list, order=asc, col=sortbycol)
sorted_failed_list = sort_table(failed_list, order=asc, col=sortbycol)
sorted_mismatch_list = sort_table(mismatch_list, order=asc, col=sortbycol)
sorted_pending_list = sort_table(pending_list, order=asc, col=sortbycol)
return (sorted_failed_list, sorted_changed_list, sorted_unreported_list, sorted_mismatch_list, sorted_pending_list)
if (get_status == 'all'):
return merged_list
else:
return (failed_list, changed_list, unreported_list, mismatch_list, pending_list)<|docstring|>:param node_list: dict
:param status_dict: dict
:param sortby: Takes a field name to sort by 'certname', 'latestCatalog', 'latestReport', 'latestFacts', 'success', 'noop', 'failure', 'skipped'
:param get_status: Status type to return. all, changed, failed, unreported, noops
:return: tuple(tuple,tuple)
node_dict input:
{
'certname': {
"name": <string>,
"deactivated": <timestamp>,
"catalog_timestamp": <timestamp>,
"facts_timestamp": <timestamp>,
"report_timestamp": <timestamp>
},
}
--------------------------------
status_dict input:
{
'certname': {
"subject-type": "certname",
"subject": { "title": "foo.local" },
"failures": 0,
"successes": 2,
"noops": 0,
"skips": 1
},
}<|endoftext|> |
b9088c28b5c2adadbbab63812a499d026d3bfc7a8c76186958339a0e1ef53045 | def check_failed_compile(report_timestamp, fact_timestamp, catalog_timestamp, puppet_run_interval=puppet_run_time):
'\n :param report_timestamp: str\n :param fact_timestamp: str\n :param catalog_timestamp: str\n :return: Bool\n Returns False if the compiled run has not failed\n Returns True if the compiled run has failed\n '
if ((report_timestamp is None) or (catalog_timestamp is None) or (fact_timestamp is None)):
return True
report_time = json_to_datetime(report_timestamp)
fact_time = json_to_datetime(fact_timestamp)
catalog_time = json_to_datetime(catalog_timestamp)
diffs = dict()
diffs['catalog_fact'] = (catalog_time - fact_time)
diffs['fact_catalog'] = (fact_time - catalog_time)
diffs['report_fact'] = (report_time - fact_time)
diffs['fact_report'] = (fact_time - report_time)
diffs['report_catalog'] = (report_time - catalog_time)
diffs['catalog_report'] = (catalog_time - report_time)
for (key, value) in diffs.items():
if (value > timedelta(minutes=(puppet_run_interval / 2))):
return True
return False | :param report_timestamp: str
:param fact_timestamp: str
:param catalog_timestamp: str
:return: Bool
Returns False if the compiled run has not failed
Returns True if the compiled run has failed | pano/methods/dictfuncs.py | check_failed_compile | jeroenzeegers/panopuppet | 0 | python | def check_failed_compile(report_timestamp, fact_timestamp, catalog_timestamp, puppet_run_interval=puppet_run_time):
'\n :param report_timestamp: str\n :param fact_timestamp: str\n :param catalog_timestamp: str\n :return: Bool\n Returns False if the compiled run has not failed\n Returns True if the compiled run has failed\n '
if ((report_timestamp is None) or (catalog_timestamp is None) or (fact_timestamp is None)):
return True
report_time = json_to_datetime(report_timestamp)
fact_time = json_to_datetime(fact_timestamp)
catalog_time = json_to_datetime(catalog_timestamp)
diffs = dict()
diffs['catalog_fact'] = (catalog_time - fact_time)
diffs['fact_catalog'] = (fact_time - catalog_time)
diffs['report_fact'] = (report_time - fact_time)
diffs['fact_report'] = (fact_time - report_time)
diffs['report_catalog'] = (report_time - catalog_time)
diffs['catalog_report'] = (catalog_time - report_time)
for (key, value) in diffs.items():
if (value > timedelta(minutes=(puppet_run_interval / 2))):
return True
return False | def check_failed_compile(report_timestamp, fact_timestamp, catalog_timestamp, puppet_run_interval=puppet_run_time):
'\n :param report_timestamp: str\n :param fact_timestamp: str\n :param catalog_timestamp: str\n :return: Bool\n Returns False if the compiled run has not failed\n Returns True if the compiled run has failed\n '
if ((report_timestamp is None) or (catalog_timestamp is None) or (fact_timestamp is None)):
return True
report_time = json_to_datetime(report_timestamp)
fact_time = json_to_datetime(fact_timestamp)
catalog_time = json_to_datetime(catalog_timestamp)
diffs = dict()
diffs['catalog_fact'] = (catalog_time - fact_time)
diffs['fact_catalog'] = (fact_time - catalog_time)
diffs['report_fact'] = (report_time - fact_time)
diffs['fact_report'] = (fact_time - report_time)
diffs['report_catalog'] = (report_time - catalog_time)
diffs['catalog_report'] = (catalog_time - report_time)
for (key, value) in diffs.items():
if (value > timedelta(minutes=(puppet_run_interval / 2))):
return True
return False<|docstring|>:param report_timestamp: str
:param fact_timestamp: str
:param catalog_timestamp: str
:return: Bool
Returns False if the compiled run has not failed
Returns True if the compiled run has failed<|endoftext|> |
d7439c8f3ccf9568ddf7cc8e09e42c25895f15f2579ad4d3a7b1015953c68966 | def claims(request):
'\n Show all current claims.\n '
claims = Claim.objects.all()
paginator = Paginator(claims, 10)
page = request.GET.get('page')
try:
claims = paginator.page(page)
except PageNotAnInteger:
claims = paginator.page(1)
except EmptyPage:
claims = paginator.page(paginator.num_pages)
return render(request, 'list_claims.html', {'claims': claims}) | Show all current claims. | django/search/views.py | claims | arunchaganty/odd-nails | 0 | python | def claims(request):
'\n \n '
claims = Claim.objects.all()
paginator = Paginator(claims, 10)
page = request.GET.get('page')
try:
claims = paginator.page(page)
except PageNotAnInteger:
claims = paginator.page(1)
except EmptyPage:
claims = paginator.page(paginator.num_pages)
return render(request, 'list_claims.html', {'claims': claims}) | def claims(request):
'\n \n '
claims = Claim.objects.all()
paginator = Paginator(claims, 10)
page = request.GET.get('page')
try:
claims = paginator.page(page)
except PageNotAnInteger:
claims = paginator.page(1)
except EmptyPage:
claims = paginator.page(paginator.num_pages)
return render(request, 'list_claims.html', {'claims': claims})<|docstring|>Show all current claims.<|endoftext|> |
f7fda0206a061d24cfaa16ee25d4745a1bdd97dc541ddd020e0d61ccc4d37aa5 | def build_mathematica(target, source, env):
'\n Build targets with a Mathematica command\n \n This function executes a Mathematica function to build objects \n specified by target using the objects specified by source.\n It requires Mathematica to be callable from the command line \n via `math` (or `MathKernel` for OS X).\n '
builder_attributes = {'name': 'Mathematica', 'valid_extensions': ['.m'], 'exec_opts': '-script'}
builder = MathematicaBuilder(target, source, env, **builder_attributes)
builder.execute_system_call()
return None | Build targets with a Mathematica command
This function executes a Mathematica function to build objects
specified by target using the objects specified by source.
It requires Mathematica to be callable from the command line
via `math` (or `MathKernel` for OS X). | gslab_scons/builders/build_mathematica.py | build_mathematica | gslab-econ/gslab_python | 12 | python | def build_mathematica(target, source, env):
'\n Build targets with a Mathematica command\n \n This function executes a Mathematica function to build objects \n specified by target using the objects specified by source.\n It requires Mathematica to be callable from the command line \n via `math` (or `MathKernel` for OS X).\n '
builder_attributes = {'name': 'Mathematica', 'valid_extensions': ['.m'], 'exec_opts': '-script'}
builder = MathematicaBuilder(target, source, env, **builder_attributes)
builder.execute_system_call()
return None | def build_mathematica(target, source, env):
'\n Build targets with a Mathematica command\n \n This function executes a Mathematica function to build objects \n specified by target using the objects specified by source.\n It requires Mathematica to be callable from the command line \n via `math` (or `MathKernel` for OS X).\n '
builder_attributes = {'name': 'Mathematica', 'valid_extensions': ['.m'], 'exec_opts': '-script'}
builder = MathematicaBuilder(target, source, env, **builder_attributes)
builder.execute_system_call()
return None<|docstring|>Build targets with a Mathematica command
This function executes a Mathematica function to build objects
specified by target using the objects specified by source.
It requires Mathematica to be callable from the command line
via `math` (or `MathKernel` for OS X).<|endoftext|> |
93d039bdaf07e7117907f6737498291acce34fef257803ccc5f0cb2780b5c417 | def is_downloaded(folder):
' Returns whether data has been downloaded '
return (os.path.isfile(get_data_path(folder)) and os.path.isfile(get_data_path(folder))) | Returns whether data has been downloaded | datasets/motor.py | is_downloaded | DaniUPC/tf_dataio | 2 | python | def is_downloaded(folder):
' '
return (os.path.isfile(get_data_path(folder)) and os.path.isfile(get_data_path(folder))) | def is_downloaded(folder):
' '
return (os.path.isfile(get_data_path(folder)) and os.path.isfile(get_data_path(folder)))<|docstring|>Returns whether data has been downloaded<|endoftext|> |
425d1a9f25297befba3e90eaf3778f51c63674b953b3e96aa3e3787229922563 | def __init__(self, data_path):
' See base class '
super(MotorSerialize, self).__init__(data_path)
create_dir(data_path)
if (not is_downloaded(data_path)):
logger.info('Downloading Motor dataset ...')
urllib.request.urlretrieve(DATA_URL, get_data_path(data_path)) | See base class | datasets/motor.py | __init__ | DaniUPC/tf_dataio | 2 | python | def __init__(self, data_path):
' '
super(MotorSerialize, self).__init__(data_path)
create_dir(data_path)
if (not is_downloaded(data_path)):
logger.info('Downloading Motor dataset ...')
urllib.request.urlretrieve(DATA_URL, get_data_path(data_path)) | def __init__(self, data_path):
' '
super(MotorSerialize, self).__init__(data_path)
create_dir(data_path)
if (not is_downloaded(data_path)):
logger.info('Downloading Motor dataset ...')
urllib.request.urlretrieve(DATA_URL, get_data_path(data_path))<|docstring|>See base class<|endoftext|> |
899c87f9633db109b65eda6b46e89b5fcc09c3679c452b3abf0a3932c62ea947 | def construct(identifier, uco_object, *args):
'Constructs property bundles based on the given identifier.\n\n Args:\n identifier: The unique identifier to associate to a property bundle to create.\n uco_object: The uco_object to place the property bundles in.\n *args: Extra arguments used by the given property bundle constructor.\n '
if (identifier in registry):
registry[identifier](uco_object, *args) | Constructs property bundles based on the given identifier.
Args:
identifier: The unique identifier to associate to a property bundle to create.
uco_object: The uco_object to place the property bundles in.
*args: Extra arguments used by the given property bundle constructor. | case_plaso/file_relationships.py | construct | casework/CASE-Implementation-Plaso | 1 | python | def construct(identifier, uco_object, *args):
'Constructs property bundles based on the given identifier.\n\n Args:\n identifier: The unique identifier to associate to a property bundle to create.\n uco_object: The uco_object to place the property bundles in.\n *args: Extra arguments used by the given property bundle constructor.\n '
if (identifier in registry):
registry[identifier](uco_object, *args) | def construct(identifier, uco_object, *args):
'Constructs property bundles based on the given identifier.\n\n Args:\n identifier: The unique identifier to associate to a property bundle to create.\n uco_object: The uco_object to place the property bundles in.\n *args: Extra arguments used by the given property bundle constructor.\n '
if (identifier in registry):
registry[identifier](uco_object, *args)<|docstring|>Constructs property bundles based on the given identifier.
Args:
identifier: The unique identifier to associate to a property bundle to create.
uco_object: The uco_object to place the property bundles in.
*args: Extra arguments used by the given property bundle constructor.<|endoftext|> |
07a73ff33d05aa099f0bf1d2cb8c239eeb2000bac6ef1fc99da04714262ea231 | def __init__(self, model, Q, R):
'\n Constructs stage cost\n\n Parameters\n ---------\n model : object of nmpccodegen.models.model or nmpccodegen.models.model_continious \n Q : quadratic cost on the state\n R : quadratic cost on the input\n '
self._Q = Q
self._R = R
self._model = model | Constructs stage cost
Parameters
---------
model : object of nmpccodegen.models.model or nmpccodegen.models.model_continious
Q : quadratic cost on the state
R : quadratic cost on the input | old_code/src_python/nmpccodegen/controller/stage_costs.py | __init__ | kul-forbes/nmpc-codegen | 24 | python | def __init__(self, model, Q, R):
'\n Constructs stage cost\n\n Parameters\n ---------\n model : object of nmpccodegen.models.model or nmpccodegen.models.model_continious \n Q : quadratic cost on the state\n R : quadratic cost on the input\n '
self._Q = Q
self._R = R
self._model = model | def __init__(self, model, Q, R):
'\n Constructs stage cost\n\n Parameters\n ---------\n model : object of nmpccodegen.models.model or nmpccodegen.models.model_continious \n Q : quadratic cost on the state\n R : quadratic cost on the input\n '
self._Q = Q
self._R = R
self._model = model<|docstring|>Constructs stage cost
Parameters
---------
model : object of nmpccodegen.models.model or nmpccodegen.models.model_continious
Q : quadratic cost on the state
R : quadratic cost on the input<|endoftext|> |
6b4ea8203d7b7c15e2dc2232f45c4d45fea178d2ec9973a72f8d5370538028dc | def evaluate_cost(self, state, input, iteration_index, state_reference, input_reference):
" \n Calculate stage cost \n\n Parameters\n ---------\n state : current state of the system\n input : current input of the system\n iteration_index : step index of the discrete system\n state_reference : wanted state of the system\n input_reference : wanted input of the system\n\n Returns\n ------\n Stage Cost (x'Qx + u'Ru)\n "
stage_cost = 0
for i_col in range(0, self._model.number_of_states):
for i_row in range(0, self._model.number_of_states):
stage_cost += (((state[i_col] - state_reference[i_col]) * self._Q[(i_col, i_row)]) * (state[i_row] - state_reference[i_row]))
for i_col in range(0, self._model.number_of_inputs):
for i_row in range(0, self._model.number_of_inputs):
stage_cost += (((input[i_col] - input_reference[i_col]) * self._R[(i_col, i_row)]) * (input[i_row] - input_reference[i_row]))
return stage_cost | Calculate stage cost
Parameters
---------
state : current state of the system
input : current input of the system
iteration_index : step index of the discrete system
state_reference : wanted state of the system
input_reference : wanted input of the system
Returns
------
Stage Cost (x'Qx + u'Ru) | old_code/src_python/nmpccodegen/controller/stage_costs.py | evaluate_cost | kul-forbes/nmpc-codegen | 24 | python | def evaluate_cost(self, state, input, iteration_index, state_reference, input_reference):
" \n Calculate stage cost \n\n Parameters\n ---------\n state : current state of the system\n input : current input of the system\n iteration_index : step index of the discrete system\n state_reference : wanted state of the system\n input_reference : wanted input of the system\n\n Returns\n ------\n Stage Cost (x'Qx + u'Ru)\n "
stage_cost = 0
for i_col in range(0, self._model.number_of_states):
for i_row in range(0, self._model.number_of_states):
stage_cost += (((state[i_col] - state_reference[i_col]) * self._Q[(i_col, i_row)]) * (state[i_row] - state_reference[i_row]))
for i_col in range(0, self._model.number_of_inputs):
for i_row in range(0, self._model.number_of_inputs):
stage_cost += (((input[i_col] - input_reference[i_col]) * self._R[(i_col, i_row)]) * (input[i_row] - input_reference[i_row]))
return stage_cost | def evaluate_cost(self, state, input, iteration_index, state_reference, input_reference):
" \n Calculate stage cost \n\n Parameters\n ---------\n state : current state of the system\n input : current input of the system\n iteration_index : step index of the discrete system\n state_reference : wanted state of the system\n input_reference : wanted input of the system\n\n Returns\n ------\n Stage Cost (x'Qx + u'Ru)\n "
stage_cost = 0
for i_col in range(0, self._model.number_of_states):
for i_row in range(0, self._model.number_of_states):
stage_cost += (((state[i_col] - state_reference[i_col]) * self._Q[(i_col, i_row)]) * (state[i_row] - state_reference[i_row]))
for i_col in range(0, self._model.number_of_inputs):
for i_row in range(0, self._model.number_of_inputs):
stage_cost += (((input[i_col] - input_reference[i_col]) * self._R[(i_col, i_row)]) * (input[i_row] - input_reference[i_row]))
return stage_cost<|docstring|>Calculate stage cost
Parameters
---------
state : current state of the system
input : current input of the system
iteration_index : step index of the discrete system
state_reference : wanted state of the system
input_reference : wanted input of the system
Returns
------
Stage Cost (x'Qx + u'Ru)<|endoftext|> |
63dda0155c8c97163d2354941872707f3e7622299a2ca8c22ecd074f6af675c9 | def initialize_schema(self):
'Create every necessary objects (like tables or indices) in the\n backend.\n\n This is excuted when the ``cliquet migrate`` command is ran.\n '
raise NotImplementedError | Create every necessary objects (like tables or indices) in the
backend.
This is excuted when the ``cliquet migrate`` command is ran. | cliquet/cache/__init__.py | initialize_schema | ravitejavalluri/cliquet | 89 | python | def initialize_schema(self):
'Create every necessary objects (like tables or indices) in the\n backend.\n\n This is excuted when the ``cliquet migrate`` command is ran.\n '
raise NotImplementedError | def initialize_schema(self):
'Create every necessary objects (like tables or indices) in the\n backend.\n\n This is excuted when the ``cliquet migrate`` command is ran.\n '
raise NotImplementedError<|docstring|>Create every necessary objects (like tables or indices) in the
backend.
This is excuted when the ``cliquet migrate`` command is ran.<|endoftext|> |
c267ea5dd835bbb742b9ddb78c33572872f7a2c8bd7c9bff492255f204215462 | def flush(self):
'Delete every values.'
raise NotImplementedError | Delete every values. | cliquet/cache/__init__.py | flush | ravitejavalluri/cliquet | 89 | python | def flush(self):
raise NotImplementedError | def flush(self):
raise NotImplementedError<|docstring|>Delete every values.<|endoftext|> |
9f37b7ecefaf076b93e24a1b8e24fce537b206637f9ef1166023da4100efb3db | def ttl(self, key):
'Obtain the expiration value of the specified `key`.\n\n :param str key: key\n :returns: number of seconds or negative if no TTL.\n :rtype: float\n '
raise NotImplementedError | Obtain the expiration value of the specified `key`.
:param str key: key
:returns: number of seconds or negative if no TTL.
:rtype: float | cliquet/cache/__init__.py | ttl | ravitejavalluri/cliquet | 89 | python | def ttl(self, key):
'Obtain the expiration value of the specified `key`.\n\n :param str key: key\n :returns: number of seconds or negative if no TTL.\n :rtype: float\n '
raise NotImplementedError | def ttl(self, key):
'Obtain the expiration value of the specified `key`.\n\n :param str key: key\n :returns: number of seconds or negative if no TTL.\n :rtype: float\n '
raise NotImplementedError<|docstring|>Obtain the expiration value of the specified `key`.
:param str key: key
:returns: number of seconds or negative if no TTL.
:rtype: float<|endoftext|> |
b93c49281ade0fa93a3bd5c03367d56a66051ae24b8fe29c8e2fa1e8166d928a | def expire(self, key, ttl):
'Set the expiration value `ttl` for the specified `key`.\n\n :param str key: key\n :param float ttl: number of seconds\n '
raise NotImplementedError | Set the expiration value `ttl` for the specified `key`.
:param str key: key
:param float ttl: number of seconds | cliquet/cache/__init__.py | expire | ravitejavalluri/cliquet | 89 | python | def expire(self, key, ttl):
'Set the expiration value `ttl` for the specified `key`.\n\n :param str key: key\n :param float ttl: number of seconds\n '
raise NotImplementedError | def expire(self, key, ttl):
'Set the expiration value `ttl` for the specified `key`.\n\n :param str key: key\n :param float ttl: number of seconds\n '
raise NotImplementedError<|docstring|>Set the expiration value `ttl` for the specified `key`.
:param str key: key
:param float ttl: number of seconds<|endoftext|> |
6ab7ee58c0d6bf74f51ae27a4ba0ecb437bbe14a8278b52b7053d078f8d13ea1 | def set(self, key, value, ttl=None):
'Store a value with the specified `key`. If `ttl` is provided,\n set an expiration value.\n\n :param str key: key\n :param str value: value to store\n :param float ttl: expire after number of seconds\n '
raise NotImplementedError | Store a value with the specified `key`. If `ttl` is provided,
set an expiration value.
:param str key: key
:param str value: value to store
:param float ttl: expire after number of seconds | cliquet/cache/__init__.py | set | ravitejavalluri/cliquet | 89 | python | def set(self, key, value, ttl=None):
'Store a value with the specified `key`. If `ttl` is provided,\n set an expiration value.\n\n :param str key: key\n :param str value: value to store\n :param float ttl: expire after number of seconds\n '
raise NotImplementedError | def set(self, key, value, ttl=None):
'Store a value with the specified `key`. If `ttl` is provided,\n set an expiration value.\n\n :param str key: key\n :param str value: value to store\n :param float ttl: expire after number of seconds\n '
raise NotImplementedError<|docstring|>Store a value with the specified `key`. If `ttl` is provided,
set an expiration value.
:param str key: key
:param str value: value to store
:param float ttl: expire after number of seconds<|endoftext|> |
e7fe6353d54c0909cf298e75102c15b587ebb0261df71da0301d98913d319ffd | def get(self, key):
'Obtain the value of the specified `key`.\n\n :param str key: key\n :returns: the stored value or None if missing.\n :rtype: str\n '
raise NotImplementedError | Obtain the value of the specified `key`.
:param str key: key
:returns: the stored value or None if missing.
:rtype: str | cliquet/cache/__init__.py | get | ravitejavalluri/cliquet | 89 | python | def get(self, key):
'Obtain the value of the specified `key`.\n\n :param str key: key\n :returns: the stored value or None if missing.\n :rtype: str\n '
raise NotImplementedError | def get(self, key):
'Obtain the value of the specified `key`.\n\n :param str key: key\n :returns: the stored value or None if missing.\n :rtype: str\n '
raise NotImplementedError<|docstring|>Obtain the value of the specified `key`.
:param str key: key
:returns: the stored value or None if missing.
:rtype: str<|endoftext|> |
5d581adabfda22516216dd014f9d957c82fc9e1ba63429fdd6c99b12fc02044b | def delete(self, key):
'Delete the value of the specified `key`.\n\n :param str key: key\n '
raise NotImplementedError | Delete the value of the specified `key`.
:param str key: key | cliquet/cache/__init__.py | delete | ravitejavalluri/cliquet | 89 | python | def delete(self, key):
'Delete the value of the specified `key`.\n\n :param str key: key\n '
raise NotImplementedError | def delete(self, key):
'Delete the value of the specified `key`.\n\n :param str key: key\n '
raise NotImplementedError<|docstring|>Delete the value of the specified `key`.
:param str key: key<|endoftext|> |
ca913ca87f2a6e9feb95af81414d1104b0e32ef3dcc970a3411551e905583e8c | def ping(request):
'Test that cache backend is operationnal.\n\n :param request: current request object\n :type request: :class:`~pyramid:pyramid.request.Request`\n :returns: ``True`` is everything is ok, ``False`` otherwise.\n :rtype: bool\n '
try:
if (random.random() < _HEARTBEAT_DELETE_RATE):
backend.delete(_HEARTBEAT_KEY)
else:
backend.set(_HEARTBEAT_KEY, 'alive', _HEARTBEAT_TTL_SECONDS)
return True
except:
logger.exception('Heartbeat Failure')
return False | Test that cache backend is operationnal.
:param request: current request object
:type request: :class:`~pyramid:pyramid.request.Request`
:returns: ``True`` is everything is ok, ``False`` otherwise.
:rtype: bool | cliquet/cache/__init__.py | ping | ravitejavalluri/cliquet | 89 | python | def ping(request):
'Test that cache backend is operationnal.\n\n :param request: current request object\n :type request: :class:`~pyramid:pyramid.request.Request`\n :returns: ``True`` is everything is ok, ``False`` otherwise.\n :rtype: bool\n '
try:
if (random.random() < _HEARTBEAT_DELETE_RATE):
backend.delete(_HEARTBEAT_KEY)
else:
backend.set(_HEARTBEAT_KEY, 'alive', _HEARTBEAT_TTL_SECONDS)
return True
except:
logger.exception('Heartbeat Failure')
return False | def ping(request):
'Test that cache backend is operationnal.\n\n :param request: current request object\n :type request: :class:`~pyramid:pyramid.request.Request`\n :returns: ``True`` is everything is ok, ``False`` otherwise.\n :rtype: bool\n '
try:
if (random.random() < _HEARTBEAT_DELETE_RATE):
backend.delete(_HEARTBEAT_KEY)
else:
backend.set(_HEARTBEAT_KEY, 'alive', _HEARTBEAT_TTL_SECONDS)
return True
except:
logger.exception('Heartbeat Failure')
return False<|docstring|>Test that cache backend is operationnal.
:param request: current request object
:type request: :class:`~pyramid:pyramid.request.Request`
:returns: ``True`` is everything is ok, ``False`` otherwise.
:rtype: bool<|endoftext|> |
14e2efa685bbd5865d8543dc78b8e2e58a5afa347c67a0364e49c15323c77ae3 | def fmu_qss_gen():
'Generate an FMU-QSS from an FMU-ME'
parser = argparse.ArgumentParser()
parser.add_argument('ME', help='FMU-ME fmu or xml file', default='modelDescription.xml')
parser.add_argument('--qss', help='QSS method (x)(LI)QSS(1|2|3) [QSS2]', default='QSS2')
parser.add_argument('--rTol', help='relative tolerance [FMU]', type=float)
parser.add_argument('--aTol', help='absolute tolerance [1e-6]', type=float, default=1e-06)
parser.add_argument('--tEnd', help='simulation end time [FMU]', type=float)
args = parser.parse_args()
args.qss = args.qss.upper()
if (args.qss not in ('QSS1', 'QSS2', 'QSS3', 'LIQSS1', 'LIQSS2', 'LIQSS3', 'xQSS1', 'xQSS2', 'xQSS3')):
print((('\nUnsupported QSS method: ' + args.qss) + ': Must be one of QSS1, QSS2, QSS3, LIQSS1, LIQSS2, LIQSS3, xQSS1, xQSS2, xQSS3'))
sys.exit(1)
if ((args.rTol is not None) and (args.rTol < 0.0)):
print(('\nNegative rTol: ' + '{:.16f}'.format(args.rTol)))
sys.exit(1)
if (args.aTol <= 0.0):
print(('\nNonpositive aTol: ' + '{:.16f}'.format(args.aTol)))
sys.exit(1)
if ((args.tEnd is not None) and (args.tEnd < 0.0)):
print(('\nNegative tEnd: ' + '{:.16f}'.format(args.tEnd)))
sys.exit(1)
ME_lower = args.ME.lower()
if ME_lower.endswith('.xml'):
me_fmu_name = me_name = None
me_xml_name = args.ME
elif ME_lower.endswith('.fmu'):
me_fmu_name = args.ME
me_name = os.path.splitext(os.path.basename(me_fmu_name))[0]
me_xml_name = 'modelDescription.xml'
else:
print(('\nFMU-ME input is not a .fmu or .xml file: ' + args.ME))
sys.exit(1)
if me_fmu_name:
try:
zip_file = ZipFile(me_fmu_name)
zip_file.extract('modelDescription.xml')
zip_file.close()
except:
print('\nExtracting modelDescription.xml from FMU-ME fmu failed')
sys.exit(1)
try:
parser = etree.XMLParser(remove_blank_text=True)
tree = etree.parse(me_xml_name, parser)
root = tree.getroot()
except:
print(('\nFMU-ME XML open and parse failed: ' + me_xml_name))
sys.exit(1)
if (root.tag != 'fmiModelDescription'):
print(('\nRoot is not fmiModelDescription in FMU-ME XML: ' + me_xml_name))
sys.exit(1)
fmiModelDescription = root
if ('modelName' in fmiModelDescription.attrib):
fmiModelDescription.attrib['modelName'] = (fmiModelDescription.attrib['modelName'] + '_QSS')
if ('numberOfEventIndicators' in fmiModelDescription.attrib):
fmiModelDescription.attrib['numberOfEventIndicators'] = '0'
guid_placeholder = '@FMU-QSS_GUID@'
fmiModelDescription.attrib['guid'] = guid_placeholder
ModelExchange = root.find('ModelExchange')
if (ModelExchange is None):
print(('\nModelExchange not found in ' + me_xml_name))
sys.exit(1)
if ('modelIdentifier' in ModelExchange.attrib):
ModelExchange.attrib['modelIdentifier'] = (ModelExchange.attrib['modelIdentifier'] + '_QSS')
ModelVariables = root.find('ModelVariables')
if (ModelVariables is None):
print(('\nModelVariables not found in ' + me_xml_name))
sys.exit(1)
VendorAnnotations = root.find('VendorAnnotations')
if (VendorAnnotations is None):
VendorAnnotations = etree.Element('VendorAnnotations')
ModelVariables.addprevious(VendorAnnotations)
QSS = etree.SubElement(VendorAnnotations, 'Tool', attrib={'name': 'QSS'})
Annotations = etree.SubElement(QSS, 'Annotations')
etree.SubElement(Annotations, 'Annotation', attrib=OrderedDict([('name', 'qss'), ('value', args.qss)]))
if (args.rTol is not None):
etree.SubElement(Annotations, 'Annotation', attrib=OrderedDict([('name', 'rTol'), ('value', '{:.16f}'.format(args.rTol))]))
etree.SubElement(Annotations, 'Annotation', attrib=OrderedDict([('name', 'aTol'), ('value', '{:.16f}'.format(args.aTol))]))
if (args.tEnd is not None):
etree.SubElement(Annotations, 'Annotation', attrib=OrderedDict([('name', 'tEnd'), ('value', '{:.16f}'.format(args.tEnd))]))
try:
QSS_option_name = 'FMU_QSS_options.hh'
if (sys.version_info >= (3, 0)):
QSS_option_file = open(QSS_option_name, 'w', newline='\n')
else:
QSS_option_file = open(QSS_option_name, 'wb')
except:
print(('\nQSS options header open failed: ' + QSS_option_name))
sys.exit(1)
try:
QSS_option_file.write('#ifndef FMU_QSS_options_hh_INCLUDED\n')
QSS_option_file.write('#define FMU_QSS_options_hh_INCLUDED\n')
QSS_option_file.write((('QSS::options::QSS const fmu_qss_qss( QSS::options::QSS::' + args.qss) + ' );\n'))
if (args.rTol is not None):
QSS_option_file.write((('double const fmu_qss_rTol( ' + '{:.16f}'.format(args.rTol)) + ' );\n'))
else:
QSS_option_file.write('double const fmu_qss_rTol( -1.0 ); // Negative => Unspecified\n')
QSS_option_file.write((('double const fmu_qss_aTol( ' + '{:.16f}'.format(args.aTol)) + ' );\n'))
if (args.tEnd is not None):
QSS_option_file.write((('double const fmu_qss_tEnd( ' + '{:.16f}'.format(args.tEnd)) + ' );\n'))
else:
QSS_option_file.write('double const fmu_qss_tEnd( -1.0 ); // Negative => Unspecified\n')
QSS_option_file.write('#endif\n')
QSS_option_file.close()
except Exception as err:
print(((('\nQSS options header write failed: ' + QSS_option_name) + ': ') + str(err)))
sys.exit(1)
ScalarVariables = ModelVariables.findall('ScalarVariable')
try:
ModelStructure = root.find('ModelStructure')
Derivatives = ModelStructure.find('Derivatives')
Unknowns = Derivatives.findall('Unknown')
except:
Unknowns = []
derivatives_indexes = set()
for Unknown in Unknowns:
try:
derivatives_indexes.add(int(Unknown.attrib['index']))
except:
pass
is_state = {i: False for i in range(1, (len(ScalarVariables) + 1))}
for i in range(len(ScalarVariables)):
v = ScalarVariables[i]
Real = v.find('Real')
if (Real is not None):
derivative_of = (Real.attrib['derivative'] if ('derivative' in Real.attrib) else None)
if (derivative_of is not None):
try:
derivative_of_index = int(derivative_of)
if ((i + 1) in derivatives_indexes):
is_state[derivative_of_index] = True
except Exception as err:
name = (v.attrib['name'] if ('name' in v.attrib) else '')
print(((('Non-integer derivative in ' + name) + ': ') + str(derivative_of)))
io = {}
i = o = 0
outputs = []
n_real = n_integer = n_boolean = n_string = 0
n_input_real = n_output_real = 0
try:
n_input_real_max_order = n_output_real_max_order = int(args.qss[(- 1)])
except Exception as err:
print(('\nFMU-QSS XML generation failed: QSS method order not identified from last character of qss argument: ' + str(args.qss)))
sys.exit(1)
for v in ScalarVariables:
i += 1
a = v.attrib
name = (a['name'] if ('name' in a) else '')
causality = (a['causality'] if ('causality' in a) else 'local')
variability = (a['variability'] if ('variability' in a) else 'continuous')
previous = v.getprevious()
comment = (previous if ((previous is not None) and (previous.tag is etree.Comment) and str(previous).startswith(('<!-- Variable with index #', '<!-- Index for next variable = '))) else None)
if ((causality in ('input', 'output')) and (not ((causality == 'output') and name.startswith('__zc_')))):
o += 1
io[i] = o
Real = v.find('Real')
Integer = v.find('Integer')
Boolean = v.find('Boolean')
String = v.find('String')
if (Real is not None):
n_real += 1
elif (Integer is not None):
n_integer += 1
elif (Boolean is not None):
n_boolean += 1
elif (String is not None):
n_string += 1
if (causality == 'output'):
outputs.append(o)
if (Real is not None):
n_output_real += 1
elif (Real is not None):
n_input_real += 1
set_comment = True
elif ((causality == 'local') and (variability == 'continuous') and is_state[i]):
a['causality'] = 'output'
o += 1
io[i] = o
outputs.append(o)
if ('initial' in a):
del a['initial']
set_comment = True
Real = v.find('Real')
if (Real is not None):
n_real += 1
n_output_real += 1
if ('start' in Real.attrib):
del Real.attrib['start']
else:
print(('\nFMU-ME (continuous) state variable is not Real: ' + name))
sys.exit(1)
else:
ModelVariables.remove(v)
if (comment is not None):
ModelVariables.remove(comment)
set_comment = False
if set_comment:
if (comment is not None):
comment.text = ((((' Variable with index #' + str(o)) + ' (') + str(i)) + ') ')
else:
v.addprevious(etree.Comment(((((' Variable with index #' + str(o)) + ' (') + str(i)) + ') ')))
ScalarVariables = ModelVariables.findall('ScalarVariable')
for v in ScalarVariables:
Real = v.find('Real')
if (Real is not None):
derivative = (Real.attrib['derivative'] if ('derivative' in Real.attrib) else None)
if (derivative is not None):
try:
derivative_index = int(derivative)
try:
Real.attrib['derivative'] = str(io[derivative_index])
except Exception as err:
print(((('Derivative re-indexing failed for ' + (v.attrib['name'] if ('name' in v.attrib) else '')) + ': ') + str(err)))
except:
pass
ModelStructure = root.find('ModelStructure')
if (ModelStructure is None):
ModelStructure = etree.Element('ModelStructure')
ModelVariables.addnext(ModelStructure)
for g in ('Derivatives', 'DiscreteStates', 'InitialUnknowns'):
e = ModelStructure.find(g)
if (e is not None):
ModelStructure.remove(e)
Outputs = ModelStructure.find('Outputs')
if (Outputs is None):
Outputs = etree.SubElement(ModelStructure, 'Outputs')
Unknowns = Outputs.findall('Unknown')
for u in Unknowns:
Outputs.remove(u)
for o in outputs:
etree.SubElement(Outputs, 'Unknown', attrib=OrderedDict([('index', str(o)), ('dependencies', '')]))
try:
qss_xml_name = ('FMU-QSS_' + os.path.basename(me_xml_name))
tree.write(qss_xml_name, encoding='UTF-8', xml_declaration=True, pretty_print=True)
except Exception as err:
print(((('\nFMU-QSS XML write failed: ' + qss_xml_name) + ': ') + str(err)))
sys.exit(1)
try:
subprocess.call(['fmu-uuid', qss_xml_name, guid_placeholder, qss_xml_name, 'FMU_QSS_GUID.hh', 'FMU_QSS_GUID'])
except OSError as e:
if (e.errno == errno.ENOENT):
print('\nFMU-QSS XML GUID computation failed: fmu-uuid program not in PATH')
else:
print(('\nFMU-QSS XML GUID computation failed: ' + str(e)))
print('Generic no-check GUID header generated')
try:
guid_name = 'FMU_QSS_GUID.hh'
if (sys.version_info >= (3, 0)):
guid_file = open(guid_name, 'w', newline='\n')
else:
guid_file = open(guid_name, 'wb')
except:
print(('\nGUID header open failed: ' + guid_name))
sys.exit(1)
try:
guid_file.write('#ifndef FMU_QSS_GUID\n')
guid_file.write('#define FMU_QSS_GUID "FMU-QSS_GUID" // No-check value\n')
guid_file.write('#endif\n')
guid_file.close()
except:
print(('\nGUID header write failed: ' + guid_name))
sys.exit(1)
try:
sizing_name = 'FMU_QSS_defines.hh'
if (sys.version_info >= (3, 0)):
sizing_file = open(sizing_name, 'w', newline='\n')
else:
sizing_file = open(sizing_name, 'wb')
except:
print(('\nSizing header open failed: ' + sizing_name))
sys.exit(1)
try:
sizing_file.write('#ifndef FMU_QSS_defines_hh_INCLUDED\n')
sizing_file.write('#define FMU_QSS_defines_hh_INCLUDED\n')
sizing_file.write('// Note: Sizes are >=1 to avoid illegal 0-sized arrays\n')
sizing_file.write('#define BUFFER 1024\n')
sizing_file.write((('#define N_REAL ' + str(max(n_real, 1))) + '\n'))
sizing_file.write((('#define N_INTEGER ' + str(max(n_integer, 1))) + '\n'))
sizing_file.write((('#define N_BOOLEAN ' + str(max(n_boolean, 1))) + '\n'))
sizing_file.write((('#define N_STRING ' + str(max(n_string, 1))) + '\n'))
sizing_file.write((('#define N_INPUT_REAL ' + str(max(n_input_real, 1))) + '\n'))
sizing_file.write((('#define N_INPUT_REAL_MAX_ORDER ' + str(max(n_input_real_max_order, 1))) + '\n'))
sizing_file.write((('#define N_OUTPUT_REAL ' + str(max(n_output_real, 1))) + '\n'))
sizing_file.write((('#define N_OUTPUT_REAL_MAX_ORDER ' + str(max(n_output_real_max_order, 1))) + '\n'))
sizing_file.write('#endif\n')
sizing_file.close()
except Exception as err:
print(((('\nSizing header write failed: ' + sizing_name) + ': ') + str(err)))
sys.exit(1)
if me_fmu_name:
try:
qss_name = (me_name + '_QSS')
if os.path.exists(qss_name):
if os.path.isdir(qss_name):
shutil.rmtree(qss_name)
elif os.path.isfile(qss_name):
os.remove(qss_name)
os.mkdir(qss_name)
os.mkdir(os.path.join(qss_name, 'binaries'))
if (not platform.machine().endswith('64')):
print('\nFMU-QSS generation only supports 64-bit OS at this time')
sys.exit(1)
if sys.platform.startswith('linux'):
binaries_dir = (qss_name + '/binaries/linux64')
elif sys.platform.startswith('win'):
binaries_dir = (qss_name + '\\binaries\\win64')
else:
print('\nPlatform is not supported for FMU-QSS generation')
sys.exit(1)
os.mkdir(binaries_dir)
os.mkdir(((qss_name + os.sep) + 'resources'))
except Exception as err:
print(('\nFMU-QSS directory tree setup failed: ' + str(err)))
sys.exit(1)
try:
QSS = os.environ.get('QSS')
QSS_bin = os.environ.get('QSS_bin')
PlatformCompiler = os.environ.get('PlatformCompiler')
if (QSS and QSS_bin and PlatformCompiler):
QSS_src = os.path.join(QSS, 'src', 'QSS')
fmu_src = os.path.join(QSS_src, 'fmu')
if os.path.exists('src'):
if os.path.isdir('src'):
shutil.rmtree('src')
elif os.path.isfile('src'):
os.remove('src')
src_bld = 'src'
fmu_bld = os.path.join('src', 'QSS', 'fmu')
os.makedirs(fmu_bld)
shutil.copy('FMU_QSS_defines.hh', fmu_bld)
shutil.copy('FMU_QSS_GUID.hh', fmu_bld)
shutil.copy(os.path.join(fmu_src, PlatformCompiler, 'GNUmakefile'), src_bld)
cwd = os.getcwd()
os.chdir(src_bld)
with open('GNUmakefile', 'r') as sources:
lines = sources.readlines()
with open('GNUmakefile', 'w') as sources:
for line in lines:
if line.startswith(('DLB := $(BIN_PATH)' + os.sep)):
sources.write(line.replace(('$(BIN_PATH)' + os.sep), ''))
else:
sources.write(line)
try:
import psutil
n_processors = psutil.cpu_count()
except:
print('\nNon-parallel make used: psutil processor count lookup failed')
n_processors = 1
try:
subprocess.call(['make', '-j', str(n_processors)])
try:
if sys.platform.startswith('linux'):
qss_lib = os.path.join(cwd, binaries_dir, (qss_name + '.so'))
if os.path.isfile(qss_lib):
os.remove(qss_lib)
os.rename('libFMU-QSS.so', qss_lib)
elif sys.platform.startswith('win'):
qss_lib = os.path.join(cwd, binaries_dir, (qss_name + '.dll'))
if os.path.isfile(qss_lib):
os.remove(qss_lib)
os.rename('libFMU-QSS.dll', qss_lib)
except Exception as err:
print(('\nFMU-QSS library move into staging directory failed: ' + str(err)))
except Exception as err:
print(('\nFMU-QSS library make failed: ' + str(err)))
os.chdir(cwd)
shutil.rmtree(src_bld)
else:
print("\nFMU-QSS library can't be built: QSS and QSS_bin environment variables are not set")
except Exception as err:
print(('\nFMU-QSS library build failed: ' + str(err)))
try:
shutil.copyfile(qss_xml_name, ((qss_name + os.sep) + 'modelDescription.xml'))
shutil.copy(me_fmu_name, ((qss_name + os.sep) + 'resources'))
except Exception as err:
print(('\nFMU-QSS file setup failed: ' + str(err)))
sys.exit(1)
try:
qss_fmu_name = (qss_name + '.fmu')
if os.path.exists(qss_fmu_name):
if os.path.isfile(qss_fmu_name):
os.remove(qss_fmu_name)
elif os.path.isdir(qss_fmu_name):
shutil.rmtree(qss_fmu_name)
zip_file = ZipFile(qss_fmu_name, mode='w')
os.chdir(qss_name)
for (root, dirs, files) in os.walk('.'):
dirs.sort()
for dir in dirs:
zip_file.write(os.path.join(root, dir))
files.sort()
for file in files:
zip_file.write(os.path.join(root, file))
os.chdir('..')
zip_file.close()
except Exception as err:
print(('\nFMU-QSS zip into .fmu failed: ' + str(err)))
sys.exit(1) | Generate an FMU-QSS from an FMU-ME | bin/FMU-QSS.gen.py | fmu_qss_gen | NREL/SOEP-QSS | 13 | python | def fmu_qss_gen():
parser = argparse.ArgumentParser()
parser.add_argument('ME', help='FMU-ME fmu or xml file', default='modelDescription.xml')
parser.add_argument('--qss', help='QSS method (x)(LI)QSS(1|2|3) [QSS2]', default='QSS2')
parser.add_argument('--rTol', help='relative tolerance [FMU]', type=float)
parser.add_argument('--aTol', help='absolute tolerance [1e-6]', type=float, default=1e-06)
parser.add_argument('--tEnd', help='simulation end time [FMU]', type=float)
args = parser.parse_args()
args.qss = args.qss.upper()
if (args.qss not in ('QSS1', 'QSS2', 'QSS3', 'LIQSS1', 'LIQSS2', 'LIQSS3', 'xQSS1', 'xQSS2', 'xQSS3')):
print((('\nUnsupported QSS method: ' + args.qss) + ': Must be one of QSS1, QSS2, QSS3, LIQSS1, LIQSS2, LIQSS3, xQSS1, xQSS2, xQSS3'))
sys.exit(1)
if ((args.rTol is not None) and (args.rTol < 0.0)):
print(('\nNegative rTol: ' + '{:.16f}'.format(args.rTol)))
sys.exit(1)
if (args.aTol <= 0.0):
print(('\nNonpositive aTol: ' + '{:.16f}'.format(args.aTol)))
sys.exit(1)
if ((args.tEnd is not None) and (args.tEnd < 0.0)):
print(('\nNegative tEnd: ' + '{:.16f}'.format(args.tEnd)))
sys.exit(1)
ME_lower = args.ME.lower()
if ME_lower.endswith('.xml'):
me_fmu_name = me_name = None
me_xml_name = args.ME
elif ME_lower.endswith('.fmu'):
me_fmu_name = args.ME
me_name = os.path.splitext(os.path.basename(me_fmu_name))[0]
me_xml_name = 'modelDescription.xml'
else:
print(('\nFMU-ME input is not a .fmu or .xml file: ' + args.ME))
sys.exit(1)
if me_fmu_name:
try:
zip_file = ZipFile(me_fmu_name)
zip_file.extract('modelDescription.xml')
zip_file.close()
except:
print('\nExtracting modelDescription.xml from FMU-ME fmu failed')
sys.exit(1)
try:
parser = etree.XMLParser(remove_blank_text=True)
tree = etree.parse(me_xml_name, parser)
root = tree.getroot()
except:
print(('\nFMU-ME XML open and parse failed: ' + me_xml_name))
sys.exit(1)
if (root.tag != 'fmiModelDescription'):
print(('\nRoot is not fmiModelDescription in FMU-ME XML: ' + me_xml_name))
sys.exit(1)
fmiModelDescription = root
if ('modelName' in fmiModelDescription.attrib):
fmiModelDescription.attrib['modelName'] = (fmiModelDescription.attrib['modelName'] + '_QSS')
if ('numberOfEventIndicators' in fmiModelDescription.attrib):
fmiModelDescription.attrib['numberOfEventIndicators'] = '0'
guid_placeholder = '@FMU-QSS_GUID@'
fmiModelDescription.attrib['guid'] = guid_placeholder
ModelExchange = root.find('ModelExchange')
if (ModelExchange is None):
print(('\nModelExchange not found in ' + me_xml_name))
sys.exit(1)
if ('modelIdentifier' in ModelExchange.attrib):
ModelExchange.attrib['modelIdentifier'] = (ModelExchange.attrib['modelIdentifier'] + '_QSS')
ModelVariables = root.find('ModelVariables')
if (ModelVariables is None):
print(('\nModelVariables not found in ' + me_xml_name))
sys.exit(1)
VendorAnnotations = root.find('VendorAnnotations')
if (VendorAnnotations is None):
VendorAnnotations = etree.Element('VendorAnnotations')
ModelVariables.addprevious(VendorAnnotations)
QSS = etree.SubElement(VendorAnnotations, 'Tool', attrib={'name': 'QSS'})
Annotations = etree.SubElement(QSS, 'Annotations')
etree.SubElement(Annotations, 'Annotation', attrib=OrderedDict([('name', 'qss'), ('value', args.qss)]))
if (args.rTol is not None):
etree.SubElement(Annotations, 'Annotation', attrib=OrderedDict([('name', 'rTol'), ('value', '{:.16f}'.format(args.rTol))]))
etree.SubElement(Annotations, 'Annotation', attrib=OrderedDict([('name', 'aTol'), ('value', '{:.16f}'.format(args.aTol))]))
if (args.tEnd is not None):
etree.SubElement(Annotations, 'Annotation', attrib=OrderedDict([('name', 'tEnd'), ('value', '{:.16f}'.format(args.tEnd))]))
try:
QSS_option_name = 'FMU_QSS_options.hh'
if (sys.version_info >= (3, 0)):
QSS_option_file = open(QSS_option_name, 'w', newline='\n')
else:
QSS_option_file = open(QSS_option_name, 'wb')
except:
print(('\nQSS options header open failed: ' + QSS_option_name))
sys.exit(1)
try:
QSS_option_file.write('#ifndef FMU_QSS_options_hh_INCLUDED\n')
QSS_option_file.write('#define FMU_QSS_options_hh_INCLUDED\n')
QSS_option_file.write((('QSS::options::QSS const fmu_qss_qss( QSS::options::QSS::' + args.qss) + ' );\n'))
if (args.rTol is not None):
QSS_option_file.write((('double const fmu_qss_rTol( ' + '{:.16f}'.format(args.rTol)) + ' );\n'))
else:
QSS_option_file.write('double const fmu_qss_rTol( -1.0 ); // Negative => Unspecified\n')
QSS_option_file.write((('double const fmu_qss_aTol( ' + '{:.16f}'.format(args.aTol)) + ' );\n'))
if (args.tEnd is not None):
QSS_option_file.write((('double const fmu_qss_tEnd( ' + '{:.16f}'.format(args.tEnd)) + ' );\n'))
else:
QSS_option_file.write('double const fmu_qss_tEnd( -1.0 ); // Negative => Unspecified\n')
QSS_option_file.write('#endif\n')
QSS_option_file.close()
except Exception as err:
print(((('\nQSS options header write failed: ' + QSS_option_name) + ': ') + str(err)))
sys.exit(1)
ScalarVariables = ModelVariables.findall('ScalarVariable')
try:
ModelStructure = root.find('ModelStructure')
Derivatives = ModelStructure.find('Derivatives')
Unknowns = Derivatives.findall('Unknown')
except:
Unknowns = []
derivatives_indexes = set()
for Unknown in Unknowns:
try:
derivatives_indexes.add(int(Unknown.attrib['index']))
except:
pass
is_state = {i: False for i in range(1, (len(ScalarVariables) + 1))}
for i in range(len(ScalarVariables)):
v = ScalarVariables[i]
Real = v.find('Real')
if (Real is not None):
derivative_of = (Real.attrib['derivative'] if ('derivative' in Real.attrib) else None)
if (derivative_of is not None):
try:
derivative_of_index = int(derivative_of)
if ((i + 1) in derivatives_indexes):
is_state[derivative_of_index] = True
except Exception as err:
name = (v.attrib['name'] if ('name' in v.attrib) else )
print(((('Non-integer derivative in ' + name) + ': ') + str(derivative_of)))
io = {}
i = o = 0
outputs = []
n_real = n_integer = n_boolean = n_string = 0
n_input_real = n_output_real = 0
try:
n_input_real_max_order = n_output_real_max_order = int(args.qss[(- 1)])
except Exception as err:
print(('\nFMU-QSS XML generation failed: QSS method order not identified from last character of qss argument: ' + str(args.qss)))
sys.exit(1)
for v in ScalarVariables:
i += 1
a = v.attrib
name = (a['name'] if ('name' in a) else )
causality = (a['causality'] if ('causality' in a) else 'local')
variability = (a['variability'] if ('variability' in a) else 'continuous')
previous = v.getprevious()
comment = (previous if ((previous is not None) and (previous.tag is etree.Comment) and str(previous).startswith(('<!-- Variable with index #', '<!-- Index for next variable = '))) else None)
if ((causality in ('input', 'output')) and (not ((causality == 'output') and name.startswith('__zc_')))):
o += 1
io[i] = o
Real = v.find('Real')
Integer = v.find('Integer')
Boolean = v.find('Boolean')
String = v.find('String')
if (Real is not None):
n_real += 1
elif (Integer is not None):
n_integer += 1
elif (Boolean is not None):
n_boolean += 1
elif (String is not None):
n_string += 1
if (causality == 'output'):
outputs.append(o)
if (Real is not None):
n_output_real += 1
elif (Real is not None):
n_input_real += 1
set_comment = True
elif ((causality == 'local') and (variability == 'continuous') and is_state[i]):
a['causality'] = 'output'
o += 1
io[i] = o
outputs.append(o)
if ('initial' in a):
del a['initial']
set_comment = True
Real = v.find('Real')
if (Real is not None):
n_real += 1
n_output_real += 1
if ('start' in Real.attrib):
del Real.attrib['start']
else:
print(('\nFMU-ME (continuous) state variable is not Real: ' + name))
sys.exit(1)
else:
ModelVariables.remove(v)
if (comment is not None):
ModelVariables.remove(comment)
set_comment = False
if set_comment:
if (comment is not None):
comment.text = ((((' Variable with index #' + str(o)) + ' (') + str(i)) + ') ')
else:
v.addprevious(etree.Comment(((((' Variable with index #' + str(o)) + ' (') + str(i)) + ') ')))
ScalarVariables = ModelVariables.findall('ScalarVariable')
for v in ScalarVariables:
Real = v.find('Real')
if (Real is not None):
derivative = (Real.attrib['derivative'] if ('derivative' in Real.attrib) else None)
if (derivative is not None):
try:
derivative_index = int(derivative)
try:
Real.attrib['derivative'] = str(io[derivative_index])
except Exception as err:
print(((('Derivative re-indexing failed for ' + (v.attrib['name'] if ('name' in v.attrib) else )) + ': ') + str(err)))
except:
pass
ModelStructure = root.find('ModelStructure')
if (ModelStructure is None):
ModelStructure = etree.Element('ModelStructure')
ModelVariables.addnext(ModelStructure)
for g in ('Derivatives', 'DiscreteStates', 'InitialUnknowns'):
e = ModelStructure.find(g)
if (e is not None):
ModelStructure.remove(e)
Outputs = ModelStructure.find('Outputs')
if (Outputs is None):
Outputs = etree.SubElement(ModelStructure, 'Outputs')
Unknowns = Outputs.findall('Unknown')
for u in Unknowns:
Outputs.remove(u)
for o in outputs:
etree.SubElement(Outputs, 'Unknown', attrib=OrderedDict([('index', str(o)), ('dependencies', )]))
try:
qss_xml_name = ('FMU-QSS_' + os.path.basename(me_xml_name))
tree.write(qss_xml_name, encoding='UTF-8', xml_declaration=True, pretty_print=True)
except Exception as err:
print(((('\nFMU-QSS XML write failed: ' + qss_xml_name) + ': ') + str(err)))
sys.exit(1)
try:
subprocess.call(['fmu-uuid', qss_xml_name, guid_placeholder, qss_xml_name, 'FMU_QSS_GUID.hh', 'FMU_QSS_GUID'])
except OSError as e:
if (e.errno == errno.ENOENT):
print('\nFMU-QSS XML GUID computation failed: fmu-uuid program not in PATH')
else:
print(('\nFMU-QSS XML GUID computation failed: ' + str(e)))
print('Generic no-check GUID header generated')
try:
guid_name = 'FMU_QSS_GUID.hh'
if (sys.version_info >= (3, 0)):
guid_file = open(guid_name, 'w', newline='\n')
else:
guid_file = open(guid_name, 'wb')
except:
print(('\nGUID header open failed: ' + guid_name))
sys.exit(1)
try:
guid_file.write('#ifndef FMU_QSS_GUID\n')
guid_file.write('#define FMU_QSS_GUID "FMU-QSS_GUID" // No-check value\n')
guid_file.write('#endif\n')
guid_file.close()
except:
print(('\nGUID header write failed: ' + guid_name))
sys.exit(1)
try:
sizing_name = 'FMU_QSS_defines.hh'
if (sys.version_info >= (3, 0)):
sizing_file = open(sizing_name, 'w', newline='\n')
else:
sizing_file = open(sizing_name, 'wb')
except:
print(('\nSizing header open failed: ' + sizing_name))
sys.exit(1)
try:
sizing_file.write('#ifndef FMU_QSS_defines_hh_INCLUDED\n')
sizing_file.write('#define FMU_QSS_defines_hh_INCLUDED\n')
sizing_file.write('// Note: Sizes are >=1 to avoid illegal 0-sized arrays\n')
sizing_file.write('#define BUFFER 1024\n')
sizing_file.write((('#define N_REAL ' + str(max(n_real, 1))) + '\n'))
sizing_file.write((('#define N_INTEGER ' + str(max(n_integer, 1))) + '\n'))
sizing_file.write((('#define N_BOOLEAN ' + str(max(n_boolean, 1))) + '\n'))
sizing_file.write((('#define N_STRING ' + str(max(n_string, 1))) + '\n'))
sizing_file.write((('#define N_INPUT_REAL ' + str(max(n_input_real, 1))) + '\n'))
sizing_file.write((('#define N_INPUT_REAL_MAX_ORDER ' + str(max(n_input_real_max_order, 1))) + '\n'))
sizing_file.write((('#define N_OUTPUT_REAL ' + str(max(n_output_real, 1))) + '\n'))
sizing_file.write((('#define N_OUTPUT_REAL_MAX_ORDER ' + str(max(n_output_real_max_order, 1))) + '\n'))
sizing_file.write('#endif\n')
sizing_file.close()
except Exception as err:
print(((('\nSizing header write failed: ' + sizing_name) + ': ') + str(err)))
sys.exit(1)
if me_fmu_name:
try:
qss_name = (me_name + '_QSS')
if os.path.exists(qss_name):
if os.path.isdir(qss_name):
shutil.rmtree(qss_name)
elif os.path.isfile(qss_name):
os.remove(qss_name)
os.mkdir(qss_name)
os.mkdir(os.path.join(qss_name, 'binaries'))
if (not platform.machine().endswith('64')):
print('\nFMU-QSS generation only supports 64-bit OS at this time')
sys.exit(1)
if sys.platform.startswith('linux'):
binaries_dir = (qss_name + '/binaries/linux64')
elif sys.platform.startswith('win'):
binaries_dir = (qss_name + '\\binaries\\win64')
else:
print('\nPlatform is not supported for FMU-QSS generation')
sys.exit(1)
os.mkdir(binaries_dir)
os.mkdir(((qss_name + os.sep) + 'resources'))
except Exception as err:
print(('\nFMU-QSS directory tree setup failed: ' + str(err)))
sys.exit(1)
try:
QSS = os.environ.get('QSS')
QSS_bin = os.environ.get('QSS_bin')
PlatformCompiler = os.environ.get('PlatformCompiler')
if (QSS and QSS_bin and PlatformCompiler):
QSS_src = os.path.join(QSS, 'src', 'QSS')
fmu_src = os.path.join(QSS_src, 'fmu')
if os.path.exists('src'):
if os.path.isdir('src'):
shutil.rmtree('src')
elif os.path.isfile('src'):
os.remove('src')
src_bld = 'src'
fmu_bld = os.path.join('src', 'QSS', 'fmu')
os.makedirs(fmu_bld)
shutil.copy('FMU_QSS_defines.hh', fmu_bld)
shutil.copy('FMU_QSS_GUID.hh', fmu_bld)
shutil.copy(os.path.join(fmu_src, PlatformCompiler, 'GNUmakefile'), src_bld)
cwd = os.getcwd()
os.chdir(src_bld)
with open('GNUmakefile', 'r') as sources:
lines = sources.readlines()
with open('GNUmakefile', 'w') as sources:
for line in lines:
if line.startswith(('DLB := $(BIN_PATH)' + os.sep)):
sources.write(line.replace(('$(BIN_PATH)' + os.sep), ))
else:
sources.write(line)
try:
import psutil
n_processors = psutil.cpu_count()
except:
print('\nNon-parallel make used: psutil processor count lookup failed')
n_processors = 1
try:
subprocess.call(['make', '-j', str(n_processors)])
try:
if sys.platform.startswith('linux'):
qss_lib = os.path.join(cwd, binaries_dir, (qss_name + '.so'))
if os.path.isfile(qss_lib):
os.remove(qss_lib)
os.rename('libFMU-QSS.so', qss_lib)
elif sys.platform.startswith('win'):
qss_lib = os.path.join(cwd, binaries_dir, (qss_name + '.dll'))
if os.path.isfile(qss_lib):
os.remove(qss_lib)
os.rename('libFMU-QSS.dll', qss_lib)
except Exception as err:
print(('\nFMU-QSS library move into staging directory failed: ' + str(err)))
except Exception as err:
print(('\nFMU-QSS library make failed: ' + str(err)))
os.chdir(cwd)
shutil.rmtree(src_bld)
else:
print("\nFMU-QSS library can't be built: QSS and QSS_bin environment variables are not set")
except Exception as err:
print(('\nFMU-QSS library build failed: ' + str(err)))
try:
shutil.copyfile(qss_xml_name, ((qss_name + os.sep) + 'modelDescription.xml'))
shutil.copy(me_fmu_name, ((qss_name + os.sep) + 'resources'))
except Exception as err:
print(('\nFMU-QSS file setup failed: ' + str(err)))
sys.exit(1)
try:
qss_fmu_name = (qss_name + '.fmu')
if os.path.exists(qss_fmu_name):
if os.path.isfile(qss_fmu_name):
os.remove(qss_fmu_name)
elif os.path.isdir(qss_fmu_name):
shutil.rmtree(qss_fmu_name)
zip_file = ZipFile(qss_fmu_name, mode='w')
os.chdir(qss_name)
for (root, dirs, files) in os.walk('.'):
dirs.sort()
for dir in dirs:
zip_file.write(os.path.join(root, dir))
files.sort()
for file in files:
zip_file.write(os.path.join(root, file))
os.chdir('..')
zip_file.close()
except Exception as err:
print(('\nFMU-QSS zip into .fmu failed: ' + str(err)))
sys.exit(1) | def fmu_qss_gen():
parser = argparse.ArgumentParser()
parser.add_argument('ME', help='FMU-ME fmu or xml file', default='modelDescription.xml')
parser.add_argument('--qss', help='QSS method (x)(LI)QSS(1|2|3) [QSS2]', default='QSS2')
parser.add_argument('--rTol', help='relative tolerance [FMU]', type=float)
parser.add_argument('--aTol', help='absolute tolerance [1e-6]', type=float, default=1e-06)
parser.add_argument('--tEnd', help='simulation end time [FMU]', type=float)
args = parser.parse_args()
args.qss = args.qss.upper()
if (args.qss not in ('QSS1', 'QSS2', 'QSS3', 'LIQSS1', 'LIQSS2', 'LIQSS3', 'xQSS1', 'xQSS2', 'xQSS3')):
print((('\nUnsupported QSS method: ' + args.qss) + ': Must be one of QSS1, QSS2, QSS3, LIQSS1, LIQSS2, LIQSS3, xQSS1, xQSS2, xQSS3'))
sys.exit(1)
if ((args.rTol is not None) and (args.rTol < 0.0)):
print(('\nNegative rTol: ' + '{:.16f}'.format(args.rTol)))
sys.exit(1)
if (args.aTol <= 0.0):
print(('\nNonpositive aTol: ' + '{:.16f}'.format(args.aTol)))
sys.exit(1)
if ((args.tEnd is not None) and (args.tEnd < 0.0)):
print(('\nNegative tEnd: ' + '{:.16f}'.format(args.tEnd)))
sys.exit(1)
ME_lower = args.ME.lower()
if ME_lower.endswith('.xml'):
me_fmu_name = me_name = None
me_xml_name = args.ME
elif ME_lower.endswith('.fmu'):
me_fmu_name = args.ME
me_name = os.path.splitext(os.path.basename(me_fmu_name))[0]
me_xml_name = 'modelDescription.xml'
else:
print(('\nFMU-ME input is not a .fmu or .xml file: ' + args.ME))
sys.exit(1)
if me_fmu_name:
try:
zip_file = ZipFile(me_fmu_name)
zip_file.extract('modelDescription.xml')
zip_file.close()
except:
print('\nExtracting modelDescription.xml from FMU-ME fmu failed')
sys.exit(1)
try:
parser = etree.XMLParser(remove_blank_text=True)
tree = etree.parse(me_xml_name, parser)
root = tree.getroot()
except:
print(('\nFMU-ME XML open and parse failed: ' + me_xml_name))
sys.exit(1)
if (root.tag != 'fmiModelDescription'):
print(('\nRoot is not fmiModelDescription in FMU-ME XML: ' + me_xml_name))
sys.exit(1)
fmiModelDescription = root
if ('modelName' in fmiModelDescription.attrib):
fmiModelDescription.attrib['modelName'] = (fmiModelDescription.attrib['modelName'] + '_QSS')
if ('numberOfEventIndicators' in fmiModelDescription.attrib):
fmiModelDescription.attrib['numberOfEventIndicators'] = '0'
guid_placeholder = '@FMU-QSS_GUID@'
fmiModelDescription.attrib['guid'] = guid_placeholder
ModelExchange = root.find('ModelExchange')
if (ModelExchange is None):
print(('\nModelExchange not found in ' + me_xml_name))
sys.exit(1)
if ('modelIdentifier' in ModelExchange.attrib):
ModelExchange.attrib['modelIdentifier'] = (ModelExchange.attrib['modelIdentifier'] + '_QSS')
ModelVariables = root.find('ModelVariables')
if (ModelVariables is None):
print(('\nModelVariables not found in ' + me_xml_name))
sys.exit(1)
VendorAnnotations = root.find('VendorAnnotations')
if (VendorAnnotations is None):
VendorAnnotations = etree.Element('VendorAnnotations')
ModelVariables.addprevious(VendorAnnotations)
QSS = etree.SubElement(VendorAnnotations, 'Tool', attrib={'name': 'QSS'})
Annotations = etree.SubElement(QSS, 'Annotations')
etree.SubElement(Annotations, 'Annotation', attrib=OrderedDict([('name', 'qss'), ('value', args.qss)]))
if (args.rTol is not None):
etree.SubElement(Annotations, 'Annotation', attrib=OrderedDict([('name', 'rTol'), ('value', '{:.16f}'.format(args.rTol))]))
etree.SubElement(Annotations, 'Annotation', attrib=OrderedDict([('name', 'aTol'), ('value', '{:.16f}'.format(args.aTol))]))
if (args.tEnd is not None):
etree.SubElement(Annotations, 'Annotation', attrib=OrderedDict([('name', 'tEnd'), ('value', '{:.16f}'.format(args.tEnd))]))
try:
QSS_option_name = 'FMU_QSS_options.hh'
if (sys.version_info >= (3, 0)):
QSS_option_file = open(QSS_option_name, 'w', newline='\n')
else:
QSS_option_file = open(QSS_option_name, 'wb')
except:
print(('\nQSS options header open failed: ' + QSS_option_name))
sys.exit(1)
try:
QSS_option_file.write('#ifndef FMU_QSS_options_hh_INCLUDED\n')
QSS_option_file.write('#define FMU_QSS_options_hh_INCLUDED\n')
QSS_option_file.write((('QSS::options::QSS const fmu_qss_qss( QSS::options::QSS::' + args.qss) + ' );\n'))
if (args.rTol is not None):
QSS_option_file.write((('double const fmu_qss_rTol( ' + '{:.16f}'.format(args.rTol)) + ' );\n'))
else:
QSS_option_file.write('double const fmu_qss_rTol( -1.0 ); // Negative => Unspecified\n')
QSS_option_file.write((('double const fmu_qss_aTol( ' + '{:.16f}'.format(args.aTol)) + ' );\n'))
if (args.tEnd is not None):
QSS_option_file.write((('double const fmu_qss_tEnd( ' + '{:.16f}'.format(args.tEnd)) + ' );\n'))
else:
QSS_option_file.write('double const fmu_qss_tEnd( -1.0 ); // Negative => Unspecified\n')
QSS_option_file.write('#endif\n')
QSS_option_file.close()
except Exception as err:
print(((('\nQSS options header write failed: ' + QSS_option_name) + ': ') + str(err)))
sys.exit(1)
ScalarVariables = ModelVariables.findall('ScalarVariable')
try:
ModelStructure = root.find('ModelStructure')
Derivatives = ModelStructure.find('Derivatives')
Unknowns = Derivatives.findall('Unknown')
except:
Unknowns = []
derivatives_indexes = set()
for Unknown in Unknowns:
try:
derivatives_indexes.add(int(Unknown.attrib['index']))
except:
pass
is_state = {i: False for i in range(1, (len(ScalarVariables) + 1))}
for i in range(len(ScalarVariables)):
v = ScalarVariables[i]
Real = v.find('Real')
if (Real is not None):
derivative_of = (Real.attrib['derivative'] if ('derivative' in Real.attrib) else None)
if (derivative_of is not None):
try:
derivative_of_index = int(derivative_of)
if ((i + 1) in derivatives_indexes):
is_state[derivative_of_index] = True
except Exception as err:
name = (v.attrib['name'] if ('name' in v.attrib) else )
print(((('Non-integer derivative in ' + name) + ': ') + str(derivative_of)))
io = {}
i = o = 0
outputs = []
n_real = n_integer = n_boolean = n_string = 0
n_input_real = n_output_real = 0
try:
n_input_real_max_order = n_output_real_max_order = int(args.qss[(- 1)])
except Exception as err:
print(('\nFMU-QSS XML generation failed: QSS method order not identified from last character of qss argument: ' + str(args.qss)))
sys.exit(1)
for v in ScalarVariables:
i += 1
a = v.attrib
name = (a['name'] if ('name' in a) else )
causality = (a['causality'] if ('causality' in a) else 'local')
variability = (a['variability'] if ('variability' in a) else 'continuous')
previous = v.getprevious()
comment = (previous if ((previous is not None) and (previous.tag is etree.Comment) and str(previous).startswith(('<!-- Variable with index #', '<!-- Index for next variable = '))) else None)
if ((causality in ('input', 'output')) and (not ((causality == 'output') and name.startswith('__zc_')))):
o += 1
io[i] = o
Real = v.find('Real')
Integer = v.find('Integer')
Boolean = v.find('Boolean')
String = v.find('String')
if (Real is not None):
n_real += 1
elif (Integer is not None):
n_integer += 1
elif (Boolean is not None):
n_boolean += 1
elif (String is not None):
n_string += 1
if (causality == 'output'):
outputs.append(o)
if (Real is not None):
n_output_real += 1
elif (Real is not None):
n_input_real += 1
set_comment = True
elif ((causality == 'local') and (variability == 'continuous') and is_state[i]):
a['causality'] = 'output'
o += 1
io[i] = o
outputs.append(o)
if ('initial' in a):
del a['initial']
set_comment = True
Real = v.find('Real')
if (Real is not None):
n_real += 1
n_output_real += 1
if ('start' in Real.attrib):
del Real.attrib['start']
else:
print(('\nFMU-ME (continuous) state variable is not Real: ' + name))
sys.exit(1)
else:
ModelVariables.remove(v)
if (comment is not None):
ModelVariables.remove(comment)
set_comment = False
if set_comment:
if (comment is not None):
comment.text = ((((' Variable with index #' + str(o)) + ' (') + str(i)) + ') ')
else:
v.addprevious(etree.Comment(((((' Variable with index #' + str(o)) + ' (') + str(i)) + ') ')))
ScalarVariables = ModelVariables.findall('ScalarVariable')
for v in ScalarVariables:
Real = v.find('Real')
if (Real is not None):
derivative = (Real.attrib['derivative'] if ('derivative' in Real.attrib) else None)
if (derivative is not None):
try:
derivative_index = int(derivative)
try:
Real.attrib['derivative'] = str(io[derivative_index])
except Exception as err:
print(((('Derivative re-indexing failed for ' + (v.attrib['name'] if ('name' in v.attrib) else )) + ': ') + str(err)))
except:
pass
ModelStructure = root.find('ModelStructure')
if (ModelStructure is None):
ModelStructure = etree.Element('ModelStructure')
ModelVariables.addnext(ModelStructure)
for g in ('Derivatives', 'DiscreteStates', 'InitialUnknowns'):
e = ModelStructure.find(g)
if (e is not None):
ModelStructure.remove(e)
Outputs = ModelStructure.find('Outputs')
if (Outputs is None):
Outputs = etree.SubElement(ModelStructure, 'Outputs')
Unknowns = Outputs.findall('Unknown')
for u in Unknowns:
Outputs.remove(u)
for o in outputs:
etree.SubElement(Outputs, 'Unknown', attrib=OrderedDict([('index', str(o)), ('dependencies', )]))
try:
qss_xml_name = ('FMU-QSS_' + os.path.basename(me_xml_name))
tree.write(qss_xml_name, encoding='UTF-8', xml_declaration=True, pretty_print=True)
except Exception as err:
print(((('\nFMU-QSS XML write failed: ' + qss_xml_name) + ': ') + str(err)))
sys.exit(1)
try:
subprocess.call(['fmu-uuid', qss_xml_name, guid_placeholder, qss_xml_name, 'FMU_QSS_GUID.hh', 'FMU_QSS_GUID'])
except OSError as e:
if (e.errno == errno.ENOENT):
print('\nFMU-QSS XML GUID computation failed: fmu-uuid program not in PATH')
else:
print(('\nFMU-QSS XML GUID computation failed: ' + str(e)))
print('Generic no-check GUID header generated')
try:
guid_name = 'FMU_QSS_GUID.hh'
if (sys.version_info >= (3, 0)):
guid_file = open(guid_name, 'w', newline='\n')
else:
guid_file = open(guid_name, 'wb')
except:
print(('\nGUID header open failed: ' + guid_name))
sys.exit(1)
try:
guid_file.write('#ifndef FMU_QSS_GUID\n')
guid_file.write('#define FMU_QSS_GUID "FMU-QSS_GUID" // No-check value\n')
guid_file.write('#endif\n')
guid_file.close()
except:
print(('\nGUID header write failed: ' + guid_name))
sys.exit(1)
try:
sizing_name = 'FMU_QSS_defines.hh'
if (sys.version_info >= (3, 0)):
sizing_file = open(sizing_name, 'w', newline='\n')
else:
sizing_file = open(sizing_name, 'wb')
except:
print(('\nSizing header open failed: ' + sizing_name))
sys.exit(1)
try:
sizing_file.write('#ifndef FMU_QSS_defines_hh_INCLUDED\n')
sizing_file.write('#define FMU_QSS_defines_hh_INCLUDED\n')
sizing_file.write('// Note: Sizes are >=1 to avoid illegal 0-sized arrays\n')
sizing_file.write('#define BUFFER 1024\n')
sizing_file.write((('#define N_REAL ' + str(max(n_real, 1))) + '\n'))
sizing_file.write((('#define N_INTEGER ' + str(max(n_integer, 1))) + '\n'))
sizing_file.write((('#define N_BOOLEAN ' + str(max(n_boolean, 1))) + '\n'))
sizing_file.write((('#define N_STRING ' + str(max(n_string, 1))) + '\n'))
sizing_file.write((('#define N_INPUT_REAL ' + str(max(n_input_real, 1))) + '\n'))
sizing_file.write((('#define N_INPUT_REAL_MAX_ORDER ' + str(max(n_input_real_max_order, 1))) + '\n'))
sizing_file.write((('#define N_OUTPUT_REAL ' + str(max(n_output_real, 1))) + '\n'))
sizing_file.write((('#define N_OUTPUT_REAL_MAX_ORDER ' + str(max(n_output_real_max_order, 1))) + '\n'))
sizing_file.write('#endif\n')
sizing_file.close()
except Exception as err:
print(((('\nSizing header write failed: ' + sizing_name) + ': ') + str(err)))
sys.exit(1)
if me_fmu_name:
try:
qss_name = (me_name + '_QSS')
if os.path.exists(qss_name):
if os.path.isdir(qss_name):
shutil.rmtree(qss_name)
elif os.path.isfile(qss_name):
os.remove(qss_name)
os.mkdir(qss_name)
os.mkdir(os.path.join(qss_name, 'binaries'))
if (not platform.machine().endswith('64')):
print('\nFMU-QSS generation only supports 64-bit OS at this time')
sys.exit(1)
if sys.platform.startswith('linux'):
binaries_dir = (qss_name + '/binaries/linux64')
elif sys.platform.startswith('win'):
binaries_dir = (qss_name + '\\binaries\\win64')
else:
print('\nPlatform is not supported for FMU-QSS generation')
sys.exit(1)
os.mkdir(binaries_dir)
os.mkdir(((qss_name + os.sep) + 'resources'))
except Exception as err:
print(('\nFMU-QSS directory tree setup failed: ' + str(err)))
sys.exit(1)
try:
QSS = os.environ.get('QSS')
QSS_bin = os.environ.get('QSS_bin')
PlatformCompiler = os.environ.get('PlatformCompiler')
if (QSS and QSS_bin and PlatformCompiler):
QSS_src = os.path.join(QSS, 'src', 'QSS')
fmu_src = os.path.join(QSS_src, 'fmu')
if os.path.exists('src'):
if os.path.isdir('src'):
shutil.rmtree('src')
elif os.path.isfile('src'):
os.remove('src')
src_bld = 'src'
fmu_bld = os.path.join('src', 'QSS', 'fmu')
os.makedirs(fmu_bld)
shutil.copy('FMU_QSS_defines.hh', fmu_bld)
shutil.copy('FMU_QSS_GUID.hh', fmu_bld)
shutil.copy(os.path.join(fmu_src, PlatformCompiler, 'GNUmakefile'), src_bld)
cwd = os.getcwd()
os.chdir(src_bld)
with open('GNUmakefile', 'r') as sources:
lines = sources.readlines()
with open('GNUmakefile', 'w') as sources:
for line in lines:
if line.startswith(('DLB := $(BIN_PATH)' + os.sep)):
sources.write(line.replace(('$(BIN_PATH)' + os.sep), ))
else:
sources.write(line)
try:
import psutil
n_processors = psutil.cpu_count()
except:
print('\nNon-parallel make used: psutil processor count lookup failed')
n_processors = 1
try:
subprocess.call(['make', '-j', str(n_processors)])
try:
if sys.platform.startswith('linux'):
qss_lib = os.path.join(cwd, binaries_dir, (qss_name + '.so'))
if os.path.isfile(qss_lib):
os.remove(qss_lib)
os.rename('libFMU-QSS.so', qss_lib)
elif sys.platform.startswith('win'):
qss_lib = os.path.join(cwd, binaries_dir, (qss_name + '.dll'))
if os.path.isfile(qss_lib):
os.remove(qss_lib)
os.rename('libFMU-QSS.dll', qss_lib)
except Exception as err:
print(('\nFMU-QSS library move into staging directory failed: ' + str(err)))
except Exception as err:
print(('\nFMU-QSS library make failed: ' + str(err)))
os.chdir(cwd)
shutil.rmtree(src_bld)
else:
print("\nFMU-QSS library can't be built: QSS and QSS_bin environment variables are not set")
except Exception as err:
print(('\nFMU-QSS library build failed: ' + str(err)))
try:
shutil.copyfile(qss_xml_name, ((qss_name + os.sep) + 'modelDescription.xml'))
shutil.copy(me_fmu_name, ((qss_name + os.sep) + 'resources'))
except Exception as err:
print(('\nFMU-QSS file setup failed: ' + str(err)))
sys.exit(1)
try:
qss_fmu_name = (qss_name + '.fmu')
if os.path.exists(qss_fmu_name):
if os.path.isfile(qss_fmu_name):
os.remove(qss_fmu_name)
elif os.path.isdir(qss_fmu_name):
shutil.rmtree(qss_fmu_name)
zip_file = ZipFile(qss_fmu_name, mode='w')
os.chdir(qss_name)
for (root, dirs, files) in os.walk('.'):
dirs.sort()
for dir in dirs:
zip_file.write(os.path.join(root, dir))
files.sort()
for file in files:
zip_file.write(os.path.join(root, file))
os.chdir('..')
zip_file.close()
except Exception as err:
print(('\nFMU-QSS zip into .fmu failed: ' + str(err)))
sys.exit(1)<|docstring|>Generate an FMU-QSS from an FMU-ME<|endoftext|> |
32f76d4ad855507dda06b64afe7370a398f1e75b1b18efefcbc19ea0e3d74fa0 | def generate_states(start: int=0, stop: int=14, n_states: int=100, parity: Union[(str, int)]='both'):
'\n Generate correct string for input to `kshell_ui.py` when asked for\n which states to calculate. Copy the string generated by this\n function and paste it into `kshell_ui.py` when it prompts for\n states.\n\n DEPRECATED: RANGE FUNCTIONALITY WAS ADDED IN kshell_ui.py MAKING\n THIS FUNCTION OBSOLETE. WILL BE REMOVED.\n\n Parameters\n ----------\n start : int\n The lowest spin value.\n\n stop : int\n The largest spin value.\n\n n_states : int\n The number of states per spin value.\n\n parity : Union[str, int]\n The parity of the states. Allowed values are: 1, -1, \'both\',\n \'positive\', \'negative\', \'pos\', \'neg\', \'+\', \'-\'.\n\n Examples\n --------\n ``` python\n >>> import kshell_utilities as ksutil\n >>> ksutil.generate_states(start=0, stop=3, n_states=100, parity="both")\n 0+100, 0.5+100, 1+100, 1.5+100, 2+100, 2.5+100, 3+100, 0-100, 0.5-100, 1-100, 1.5-100, 2-100, 2.5-100, 3-100,\n ```\n '
allowed_positive_parity_inputs = ['positive', 'pos', '+', '1', '+1', 1, 'both']
allowed_negative_parity_inputs = ['negative', 'neg', '-', '-1', (- 1), 'both']
def correct_syntax(lst):
for elem in lst:
print(elem, end=', ')
if (parity in allowed_positive_parity_inputs):
positive = [f"{i:g}{'+'}{n_states}" for i in np.arange(start, (stop + 0.5), 0.5)]
correct_syntax(positive)
if (parity in allowed_negative_parity_inputs):
negative = [f"{i:g}{'-'}{n_states}" for i in np.arange(start, (stop + 0.5), 0.5)]
correct_syntax(negative) | Generate correct string for input to `kshell_ui.py` when asked for
which states to calculate. Copy the string generated by this
function and paste it into `kshell_ui.py` when it prompts for
states.
DEPRECATED: RANGE FUNCTIONALITY WAS ADDED IN kshell_ui.py MAKING
THIS FUNCTION OBSOLETE. WILL BE REMOVED.
Parameters
----------
start : int
The lowest spin value.
stop : int
The largest spin value.
n_states : int
The number of states per spin value.
parity : Union[str, int]
The parity of the states. Allowed values are: 1, -1, 'both',
'positive', 'negative', 'pos', 'neg', '+', '-'.
Examples
--------
``` python
>>> import kshell_utilities as ksutil
>>> ksutil.generate_states(start=0, stop=3, n_states=100, parity="both")
0+100, 0.5+100, 1+100, 1.5+100, 2+100, 2.5+100, 3+100, 0-100, 0.5-100, 1-100, 1.5-100, 2-100, 2.5-100, 3-100,
``` | kshell_utilities/kshell_utilities.py | generate_states | GaffaSnobb/kshell_utilities | 0 | python | def generate_states(start: int=0, stop: int=14, n_states: int=100, parity: Union[(str, int)]='both'):
'\n Generate correct string for input to `kshell_ui.py` when asked for\n which states to calculate. Copy the string generated by this\n function and paste it into `kshell_ui.py` when it prompts for\n states.\n\n DEPRECATED: RANGE FUNCTIONALITY WAS ADDED IN kshell_ui.py MAKING\n THIS FUNCTION OBSOLETE. WILL BE REMOVED.\n\n Parameters\n ----------\n start : int\n The lowest spin value.\n\n stop : int\n The largest spin value.\n\n n_states : int\n The number of states per spin value.\n\n parity : Union[str, int]\n The parity of the states. Allowed values are: 1, -1, \'both\',\n \'positive\', \'negative\', \'pos\', \'neg\', \'+\', \'-\'.\n\n Examples\n --------\n ``` python\n >>> import kshell_utilities as ksutil\n >>> ksutil.generate_states(start=0, stop=3, n_states=100, parity="both")\n 0+100, 0.5+100, 1+100, 1.5+100, 2+100, 2.5+100, 3+100, 0-100, 0.5-100, 1-100, 1.5-100, 2-100, 2.5-100, 3-100,\n ```\n '
allowed_positive_parity_inputs = ['positive', 'pos', '+', '1', '+1', 1, 'both']
allowed_negative_parity_inputs = ['negative', 'neg', '-', '-1', (- 1), 'both']
def correct_syntax(lst):
for elem in lst:
print(elem, end=', ')
if (parity in allowed_positive_parity_inputs):
positive = [f"{i:g}{'+'}{n_states}" for i in np.arange(start, (stop + 0.5), 0.5)]
correct_syntax(positive)
if (parity in allowed_negative_parity_inputs):
negative = [f"{i:g}{'-'}{n_states}" for i in np.arange(start, (stop + 0.5), 0.5)]
correct_syntax(negative) | def generate_states(start: int=0, stop: int=14, n_states: int=100, parity: Union[(str, int)]='both'):
'\n Generate correct string for input to `kshell_ui.py` when asked for\n which states to calculate. Copy the string generated by this\n function and paste it into `kshell_ui.py` when it prompts for\n states.\n\n DEPRECATED: RANGE FUNCTIONALITY WAS ADDED IN kshell_ui.py MAKING\n THIS FUNCTION OBSOLETE. WILL BE REMOVED.\n\n Parameters\n ----------\n start : int\n The lowest spin value.\n\n stop : int\n The largest spin value.\n\n n_states : int\n The number of states per spin value.\n\n parity : Union[str, int]\n The parity of the states. Allowed values are: 1, -1, \'both\',\n \'positive\', \'negative\', \'pos\', \'neg\', \'+\', \'-\'.\n\n Examples\n --------\n ``` python\n >>> import kshell_utilities as ksutil\n >>> ksutil.generate_states(start=0, stop=3, n_states=100, parity="both")\n 0+100, 0.5+100, 1+100, 1.5+100, 2+100, 2.5+100, 3+100, 0-100, 0.5-100, 1-100, 1.5-100, 2-100, 2.5-100, 3-100,\n ```\n '
allowed_positive_parity_inputs = ['positive', 'pos', '+', '1', '+1', 1, 'both']
allowed_negative_parity_inputs = ['negative', 'neg', '-', '-1', (- 1), 'both']
def correct_syntax(lst):
for elem in lst:
print(elem, end=', ')
if (parity in allowed_positive_parity_inputs):
positive = [f"{i:g}{'+'}{n_states}" for i in np.arange(start, (stop + 0.5), 0.5)]
correct_syntax(positive)
if (parity in allowed_negative_parity_inputs):
negative = [f"{i:g}{'-'}{n_states}" for i in np.arange(start, (stop + 0.5), 0.5)]
correct_syntax(negative)<|docstring|>Generate correct string for input to `kshell_ui.py` when asked for
which states to calculate. Copy the string generated by this
function and paste it into `kshell_ui.py` when it prompts for
states.
DEPRECATED: RANGE FUNCTIONALITY WAS ADDED IN kshell_ui.py MAKING
THIS FUNCTION OBSOLETE. WILL BE REMOVED.
Parameters
----------
start : int
The lowest spin value.
stop : int
The largest spin value.
n_states : int
The number of states per spin value.
parity : Union[str, int]
The parity of the states. Allowed values are: 1, -1, 'both',
'positive', 'negative', 'pos', 'neg', '+', '-'.
Examples
--------
``` python
>>> import kshell_utilities as ksutil
>>> ksutil.generate_states(start=0, stop=3, n_states=100, parity="both")
0+100, 0.5+100, 1+100, 1.5+100, 2+100, 2.5+100, 3+100, 0-100, 0.5-100, 1-100, 1.5-100, 2-100, 2.5-100, 3-100,
```<|endoftext|> |
9a77251b4d9d088cd34d5811c164a3dff0e7a76a2804763d8148e0b6a82cb4f7 | def _generate_unique_identifier(path: str) -> str:
'\n Generate a unique identifier based on the shell script and the\n save_input file from KSHELL.\n\n Parameters\n ----------\n path : str\n The path to a summary file or a directory with a summary file.\n '
shell_file_content = ''
save_input_content = ''
msg = 'Not able to generate unique identifier!'
if os.path.isfile(path):
'\n If a file is specified, extract the directory from the path.\n '
directory = path.rsplit('/', 1)[0]
if (directory == path):
"\n Example: path is 'summary.txt'\n "
directory = '.'
for elem in os.listdir(directory):
'\n Loop over all elements in the directory and find the shell\n script and save_input file.\n '
if elem.endswith('.sh'):
with open(f'{directory}/{elem}', 'r') as infile:
shell_file_content += infile.read()
elif ('save_input_ui.txt' in elem):
with open(f'{directory}/{elem}', 'r') as infile:
save_input_content += infile.read()
else:
print(msg)
if ((shell_file_content == '') and (save_input_content == '')):
print(msg)
return hashlib.sha1((shell_file_content + save_input_content).encode()).hexdigest() | Generate a unique identifier based on the shell script and the
save_input file from KSHELL.
Parameters
----------
path : str
The path to a summary file or a directory with a summary file. | kshell_utilities/kshell_utilities.py | _generate_unique_identifier | GaffaSnobb/kshell_utilities | 0 | python | def _generate_unique_identifier(path: str) -> str:
'\n Generate a unique identifier based on the shell script and the\n save_input file from KSHELL.\n\n Parameters\n ----------\n path : str\n The path to a summary file or a directory with a summary file.\n '
shell_file_content =
save_input_content =
msg = 'Not able to generate unique identifier!'
if os.path.isfile(path):
'\n If a file is specified, extract the directory from the path.\n '
directory = path.rsplit('/', 1)[0]
if (directory == path):
"\n Example: path is 'summary.txt'\n "
directory = '.'
for elem in os.listdir(directory):
'\n Loop over all elements in the directory and find the shell\n script and save_input file.\n '
if elem.endswith('.sh'):
with open(f'{directory}/{elem}', 'r') as infile:
shell_file_content += infile.read()
elif ('save_input_ui.txt' in elem):
with open(f'{directory}/{elem}', 'r') as infile:
save_input_content += infile.read()
else:
print(msg)
if ((shell_file_content == ) and (save_input_content == )):
print(msg)
return hashlib.sha1((shell_file_content + save_input_content).encode()).hexdigest() | def _generate_unique_identifier(path: str) -> str:
'\n Generate a unique identifier based on the shell script and the\n save_input file from KSHELL.\n\n Parameters\n ----------\n path : str\n The path to a summary file or a directory with a summary file.\n '
shell_file_content =
save_input_content =
msg = 'Not able to generate unique identifier!'
if os.path.isfile(path):
'\n If a file is specified, extract the directory from the path.\n '
directory = path.rsplit('/', 1)[0]
if (directory == path):
"\n Example: path is 'summary.txt'\n "
directory = '.'
for elem in os.listdir(directory):
'\n Loop over all elements in the directory and find the shell\n script and save_input file.\n '
if elem.endswith('.sh'):
with open(f'{directory}/{elem}', 'r') as infile:
shell_file_content += infile.read()
elif ('save_input_ui.txt' in elem):
with open(f'{directory}/{elem}', 'r') as infile:
save_input_content += infile.read()
else:
print(msg)
if ((shell_file_content == ) and (save_input_content == )):
print(msg)
return hashlib.sha1((shell_file_content + save_input_content).encode()).hexdigest()<|docstring|>Generate a unique identifier based on the shell script and the
save_input file from KSHELL.
Parameters
----------
path : str
The path to a summary file or a directory with a summary file.<|endoftext|> |
62220fd102b860389ff4d69a5b57e221451f2c944ae5842db8f44ba1eb155e6b | def _load_energy_levels(infile):
'\n Load excitation energy, spin and parity into a list of structure:\n levels = [[energy, spin, parity], ...].\n Example\n -------\n Energy levels\n\n N J prty N_Jp T E(MeV) Ex(MeV) log-file\n\n 1 5/2 + 1 3/2 -16.565 0.000 log_O19_sdpf-mu_m1p.txt \n 2 3/2 + 1 3/2 -15.977 0.588 log_O19_sdpf-mu_m1p.txt \n 3 1/2 + 1 3/2 -15.192 1.374 log_O19_sdpf-mu_m1p.txt \n 4 9/2 + 1 3/2 -13.650 2.915 log_O19_sdpf-mu_m1p.txt \n 5 7/2 + 1 3/2 -13.267 3.298 log_O19_sdpf-mu_m1p.txt \n 6 5/2 + 2 3/2 -13.074 3.491 log_O19_sdpf-mu_m1p.txt\n '
levels = []
negative_spin_counts = 0
for _ in range(3):
infile.readline()
for line in infile:
try:
tmp = line.split()
if (tmp[1] == '-1'):
'\n -1 spin states in the KSHELL data file indicates\n bad states which should not be included.\n '
negative_spin_counts += 1
continue
parity = (1 if (tmp[2] == '+') else (- 1))
energy = float(tmp[5])
spin = (2 * float(Fraction(tmp[1])))
idx = int(tmp[3])
levels.append([energy, spin, parity, idx])
except IndexError:
'\n End of energies.\n '
break
return (levels, negative_spin_counts) | Load excitation energy, spin and parity into a list of structure:
levels = [[energy, spin, parity], ...].
Example
-------
Energy levels
N J prty N_Jp T E(MeV) Ex(MeV) log-file
1 5/2 + 1 3/2 -16.565 0.000 log_O19_sdpf-mu_m1p.txt
2 3/2 + 1 3/2 -15.977 0.588 log_O19_sdpf-mu_m1p.txt
3 1/2 + 1 3/2 -15.192 1.374 log_O19_sdpf-mu_m1p.txt
4 9/2 + 1 3/2 -13.650 2.915 log_O19_sdpf-mu_m1p.txt
5 7/2 + 1 3/2 -13.267 3.298 log_O19_sdpf-mu_m1p.txt
6 5/2 + 2 3/2 -13.074 3.491 log_O19_sdpf-mu_m1p.txt | kshell_utilities/kshell_utilities.py | _load_energy_levels | GaffaSnobb/kshell_utilities | 0 | python | def _load_energy_levels(infile):
'\n Load excitation energy, spin and parity into a list of structure:\n levels = [[energy, spin, parity], ...].\n Example\n -------\n Energy levels\n\n N J prty N_Jp T E(MeV) Ex(MeV) log-file\n\n 1 5/2 + 1 3/2 -16.565 0.000 log_O19_sdpf-mu_m1p.txt \n 2 3/2 + 1 3/2 -15.977 0.588 log_O19_sdpf-mu_m1p.txt \n 3 1/2 + 1 3/2 -15.192 1.374 log_O19_sdpf-mu_m1p.txt \n 4 9/2 + 1 3/2 -13.650 2.915 log_O19_sdpf-mu_m1p.txt \n 5 7/2 + 1 3/2 -13.267 3.298 log_O19_sdpf-mu_m1p.txt \n 6 5/2 + 2 3/2 -13.074 3.491 log_O19_sdpf-mu_m1p.txt\n '
levels = []
negative_spin_counts = 0
for _ in range(3):
infile.readline()
for line in infile:
try:
tmp = line.split()
if (tmp[1] == '-1'):
'\n -1 spin states in the KSHELL data file indicates\n bad states which should not be included.\n '
negative_spin_counts += 1
continue
parity = (1 if (tmp[2] == '+') else (- 1))
energy = float(tmp[5])
spin = (2 * float(Fraction(tmp[1])))
idx = int(tmp[3])
levels.append([energy, spin, parity, idx])
except IndexError:
'\n End of energies.\n '
break
return (levels, negative_spin_counts) | def _load_energy_levels(infile):
'\n Load excitation energy, spin and parity into a list of structure:\n levels = [[energy, spin, parity], ...].\n Example\n -------\n Energy levels\n\n N J prty N_Jp T E(MeV) Ex(MeV) log-file\n\n 1 5/2 + 1 3/2 -16.565 0.000 log_O19_sdpf-mu_m1p.txt \n 2 3/2 + 1 3/2 -15.977 0.588 log_O19_sdpf-mu_m1p.txt \n 3 1/2 + 1 3/2 -15.192 1.374 log_O19_sdpf-mu_m1p.txt \n 4 9/2 + 1 3/2 -13.650 2.915 log_O19_sdpf-mu_m1p.txt \n 5 7/2 + 1 3/2 -13.267 3.298 log_O19_sdpf-mu_m1p.txt \n 6 5/2 + 2 3/2 -13.074 3.491 log_O19_sdpf-mu_m1p.txt\n '
levels = []
negative_spin_counts = 0
for _ in range(3):
infile.readline()
for line in infile:
try:
tmp = line.split()
if (tmp[1] == '-1'):
'\n -1 spin states in the KSHELL data file indicates\n bad states which should not be included.\n '
negative_spin_counts += 1
continue
parity = (1 if (tmp[2] == '+') else (- 1))
energy = float(tmp[5])
spin = (2 * float(Fraction(tmp[1])))
idx = int(tmp[3])
levels.append([energy, spin, parity, idx])
except IndexError:
'\n End of energies.\n '
break
return (levels, negative_spin_counts)<|docstring|>Load excitation energy, spin and parity into a list of structure:
levels = [[energy, spin, parity], ...].
Example
-------
Energy levels
N J prty N_Jp T E(MeV) Ex(MeV) log-file
1 5/2 + 1 3/2 -16.565 0.000 log_O19_sdpf-mu_m1p.txt
2 3/2 + 1 3/2 -15.977 0.588 log_O19_sdpf-mu_m1p.txt
3 1/2 + 1 3/2 -15.192 1.374 log_O19_sdpf-mu_m1p.txt
4 9/2 + 1 3/2 -13.650 2.915 log_O19_sdpf-mu_m1p.txt
5 7/2 + 1 3/2 -13.267 3.298 log_O19_sdpf-mu_m1p.txt
6 5/2 + 2 3/2 -13.074 3.491 log_O19_sdpf-mu_m1p.txt<|endoftext|> |
79f07b4651aa580e2b258bb6bf953de3bdd9885717d36601bf54c3d64d70fd44 | def _load_transition_probabilities_old(infile):
'\n For summary files with old syntax (pre 2021-11-24).\n Parameters\n ----------\n infile:\n The KSHELL summary file.\n '
reduced_transition_prob_decay_list = []
negative_spin_counts = 0
for _ in range(2):
infile.readline()
for line in infile:
try:
"\n Example of possible lines in file:\n J_i Ex_i J_f Ex_f dE B(M1)-> B(M1)<- \n 2+(11) 18.393 2+(10) 17.791 0.602 0.1( 0.0) 0.1( 0.0)\n 3/2+( 1) 0.072 5/2+( 1) 0.000 0.071 0.127( 0.07) 0.084( 0.05)\n 2+(10) 17.791 2+( 1) 5.172 12.619 0.006( 0.00) 0.006( 0.00)\n 3+( 8) 19.503 2+(11) 18.393 1.111 0.000( 0.00) 0.000( 0.00)\n 1+( 7) 19.408 2+( 9) 16.111 3.297 0.005( 0.00) 0.003( 0.00)\n 5.0+(60) 32.170 4.0+(100) 31.734 0.436 0.198( 0.11) 0.242( 0.14)\n 4.0-( 3) 3.191 3.0+(10) 3.137 0.054 0.0( 0.0) 0.0( 0.0)\n 0.0+(46)', '47.248', '1.0+(97)', '45.384', '1.864', '23.973(13.39)', '7.991(', '4.46)\n "
tmp = line.split()
len_tmp = len(tmp)
case_ = None
parity_idx = (tmp[0].index('(') - 1)
parity_initial = (1 if (tmp[0][parity_idx] == '+') else (- 1))
parity_initial_symbol = tmp[0][parity_idx]
spin_initial = float(Fraction(tmp[0][:parity_idx]))
if ((tmp[1][(- 1)] != ')') and (tmp[3][(- 1)] != ')') and (len_tmp == 9)):
'\n Example:\n J_i Ex_i J_f Ex_f dE B(M1)-> B(M1)<- \n 2+(11) 18.393 2+(10) 17.791 0.602 0.1( 0.0) 0.1( 0.0)\n 5.0+(60) 32.170 4.0+(100) 31.734 0.436 0.198( 0.11) 0.242( 0.14)\n '
case_ = 0
E_gamma = float(tmp[4])
Ex_initial = float(tmp[1])
reduced_transition_prob_decay = float(tmp[5][:(- 1)])
reduced_transition_prob_excite = float(tmp[7][:(- 1)])
parity_final_symbol = tmp[2].split('(')[0][(- 1)]
spin_final = float(Fraction(tmp[2].split(parity_final_symbol)[0]))
Ex_final = float(tmp[3])
idx_initial = int(tmp[0].split('(')[1].split(')')[0])
idx_final = int(tmp[2].split('(')[1].split(')')[0])
elif ((tmp[1][(- 1)] != ')') and (tmp[3][(- 1)] == ')') and (len_tmp == 10)):
'\n Example:\n J_i Ex_i J_f Ex_f dE B(M1)-> B(M1)<- \n 2+(10) 17.791 2+( 1) 5.172 12.619 0.006( 0.00) 0.006( 0.00)\n '
case_ = 1
E_gamma = float(tmp[5])
Ex_initial = float(tmp[1])
reduced_transition_prob_decay = float(tmp[6][:(- 1)])
reduced_transition_prob_excite = float(tmp[8][:(- 1)])
parity_final_symbol = tmp[2].split('(')[0][(- 1)]
spin_final = float(Fraction(tmp[2].split(parity_final_symbol)[0]))
Ex_final = float(tmp[4])
idx_initial = int(tmp[0].split('(')[1].split(')')[0])
idx_final = int(tmp[3][0])
elif ((tmp[1][(- 1)] == ')') and (tmp[4][(- 1)] != ')') and (len_tmp == 10)):
'\n Example:\n J_i Ex_i J_f Ex_f dE B(M1)-> B(M1)<- \n 3+( 8) 19.503 2+(11) 18.393 1.111 0.000( 0.00) 0.000( 0.00)\n 1.0+( 1) 5.357 0.0+(103) 0.000 5.357 0.002( 0.00) 0.007( 0.00)\n 4.0-( 3) 3.191 3.0+(10) 3.137 0.054 0.0( 0.0) 0.0( 0.0)\n '
case_ = 2
E_gamma = float(tmp[5])
Ex_initial = float(tmp[2])
reduced_transition_prob_decay = float(tmp[6][:(- 1)])
reduced_transition_prob_excite = float(tmp[8][:(- 1)])
parity_final_symbol = tmp[3].split('(')[0][(- 1)]
spin_final = float(Fraction(tmp[3].split(parity_final_symbol)[0]))
Ex_final = float(tmp[4])
idx_initial = int(tmp[1][0])
idx_final = int(tmp[3].split('(')[1].split(')')[0])
elif ((tmp[1][(- 1)] == ')') and (tmp[4][(- 1)] == ')') and (len_tmp == 11)):
'\n Example:\n J_i Ex_i J_f Ex_f dE B(M1)-> B(M1)<- \n 1+( 7) 19.408 2+( 9) 16.111 3.297 0.005( 0.00) 0.003( 0.00)\n '
case_ = 3
E_gamma = float(tmp[6])
Ex_initial = float(tmp[2])
reduced_transition_prob_decay = float(tmp[7][:(- 1)])
reduced_transition_prob_excite = float(tmp[9][:(- 1)])
parity_final_symbol = tmp[3].split('(')[0][(- 1)]
spin_final = float(Fraction(tmp[3].split(parity_final_symbol)[0]))
Ex_final = float(tmp[5])
idx_initial = int(tmp[1][0])
idx_final = int(tmp[4][0])
elif ((tmp[5][(- 1)] == ')') and (tmp[2][(- 1)] == ')') and (len_tmp == 8)):
'\n Example:\n J_i Ex_i J_f Ex_f dE B(M1)-> B(M1)<- \n 0.0+(46) 47.248 1.0+(97) 45.384 1.864 23.973(13.39) 7.991( 4.46)\n '
case_ = 4
E_gamma = float(tmp[4])
Ex_initial = float(tmp[1])
reduced_transition_prob_decay = float(tmp[5].split('(')[0])
reduced_transition_prob_excite = float(tmp[6][:(- 1)])
parity_final_symbol = tmp[2].split('(')[0][(- 1)]
spin_final = float(Fraction(tmp[2].split(parity_final_symbol)[0]))
Ex_final = float(tmp[3])
idx_initial = int(tmp[0].split('(')[1].split(')')[0])
idx_final = int(tmp[2].split('(')[1].split(')')[0])
else:
msg = 'ERROR: Structure not accounted for!'
msg += f'''
line={line!r}'''
raise KshellDataStructureError(msg)
if (parity_final_symbol == '+'):
parity_final = 1
elif (parity_final_symbol == '-'):
parity_final = (- 1)
else:
msg = f'Could not properly read the final parity! case_={case_!r}'
raise KshellDataStructureError(msg)
if ((spin_final == (- 1)) or (spin_initial == (- 1))):
'\n -1 spin states in the KSHELL data file indicates\n bad states which should not be included.\n '
negative_spin_counts += 1
continue
reduced_transition_prob_decay_list.append([(2 * spin_initial), parity_initial, idx_initial, Ex_initial, (2 * spin_final), parity_final, idx_final, Ex_final, E_gamma, reduced_transition_prob_decay, reduced_transition_prob_excite])
except ValueError as err:
'\n One of the float conversions failed indicating that\n the structure of the line is not accounted for.\n '
msg = ((('\n' + err.__str__()) + f'''
case_={case_!r}''') + f'''
line={line!r}''')
raise KshellDataStructureError(msg)
except IndexError:
'\n End of probabilities.\n '
break
return (reduced_transition_prob_decay_list, negative_spin_counts) | For summary files with old syntax (pre 2021-11-24).
Parameters
----------
infile:
The KSHELL summary file. | kshell_utilities/kshell_utilities.py | _load_transition_probabilities_old | GaffaSnobb/kshell_utilities | 0 | python | def _load_transition_probabilities_old(infile):
'\n For summary files with old syntax (pre 2021-11-24).\n Parameters\n ----------\n infile:\n The KSHELL summary file.\n '
reduced_transition_prob_decay_list = []
negative_spin_counts = 0
for _ in range(2):
infile.readline()
for line in infile:
try:
"\n Example of possible lines in file:\n J_i Ex_i J_f Ex_f dE B(M1)-> B(M1)<- \n 2+(11) 18.393 2+(10) 17.791 0.602 0.1( 0.0) 0.1( 0.0)\n 3/2+( 1) 0.072 5/2+( 1) 0.000 0.071 0.127( 0.07) 0.084( 0.05)\n 2+(10) 17.791 2+( 1) 5.172 12.619 0.006( 0.00) 0.006( 0.00)\n 3+( 8) 19.503 2+(11) 18.393 1.111 0.000( 0.00) 0.000( 0.00)\n 1+( 7) 19.408 2+( 9) 16.111 3.297 0.005( 0.00) 0.003( 0.00)\n 5.0+(60) 32.170 4.0+(100) 31.734 0.436 0.198( 0.11) 0.242( 0.14)\n 4.0-( 3) 3.191 3.0+(10) 3.137 0.054 0.0( 0.0) 0.0( 0.0)\n 0.0+(46)', '47.248', '1.0+(97)', '45.384', '1.864', '23.973(13.39)', '7.991(', '4.46)\n "
tmp = line.split()
len_tmp = len(tmp)
case_ = None
parity_idx = (tmp[0].index('(') - 1)
parity_initial = (1 if (tmp[0][parity_idx] == '+') else (- 1))
parity_initial_symbol = tmp[0][parity_idx]
spin_initial = float(Fraction(tmp[0][:parity_idx]))
if ((tmp[1][(- 1)] != ')') and (tmp[3][(- 1)] != ')') and (len_tmp == 9)):
'\n Example:\n J_i Ex_i J_f Ex_f dE B(M1)-> B(M1)<- \n 2+(11) 18.393 2+(10) 17.791 0.602 0.1( 0.0) 0.1( 0.0)\n 5.0+(60) 32.170 4.0+(100) 31.734 0.436 0.198( 0.11) 0.242( 0.14)\n '
case_ = 0
E_gamma = float(tmp[4])
Ex_initial = float(tmp[1])
reduced_transition_prob_decay = float(tmp[5][:(- 1)])
reduced_transition_prob_excite = float(tmp[7][:(- 1)])
parity_final_symbol = tmp[2].split('(')[0][(- 1)]
spin_final = float(Fraction(tmp[2].split(parity_final_symbol)[0]))
Ex_final = float(tmp[3])
idx_initial = int(tmp[0].split('(')[1].split(')')[0])
idx_final = int(tmp[2].split('(')[1].split(')')[0])
elif ((tmp[1][(- 1)] != ')') and (tmp[3][(- 1)] == ')') and (len_tmp == 10)):
'\n Example:\n J_i Ex_i J_f Ex_f dE B(M1)-> B(M1)<- \n 2+(10) 17.791 2+( 1) 5.172 12.619 0.006( 0.00) 0.006( 0.00)\n '
case_ = 1
E_gamma = float(tmp[5])
Ex_initial = float(tmp[1])
reduced_transition_prob_decay = float(tmp[6][:(- 1)])
reduced_transition_prob_excite = float(tmp[8][:(- 1)])
parity_final_symbol = tmp[2].split('(')[0][(- 1)]
spin_final = float(Fraction(tmp[2].split(parity_final_symbol)[0]))
Ex_final = float(tmp[4])
idx_initial = int(tmp[0].split('(')[1].split(')')[0])
idx_final = int(tmp[3][0])
elif ((tmp[1][(- 1)] == ')') and (tmp[4][(- 1)] != ')') and (len_tmp == 10)):
'\n Example:\n J_i Ex_i J_f Ex_f dE B(M1)-> B(M1)<- \n 3+( 8) 19.503 2+(11) 18.393 1.111 0.000( 0.00) 0.000( 0.00)\n 1.0+( 1) 5.357 0.0+(103) 0.000 5.357 0.002( 0.00) 0.007( 0.00)\n 4.0-( 3) 3.191 3.0+(10) 3.137 0.054 0.0( 0.0) 0.0( 0.0)\n '
case_ = 2
E_gamma = float(tmp[5])
Ex_initial = float(tmp[2])
reduced_transition_prob_decay = float(tmp[6][:(- 1)])
reduced_transition_prob_excite = float(tmp[8][:(- 1)])
parity_final_symbol = tmp[3].split('(')[0][(- 1)]
spin_final = float(Fraction(tmp[3].split(parity_final_symbol)[0]))
Ex_final = float(tmp[4])
idx_initial = int(tmp[1][0])
idx_final = int(tmp[3].split('(')[1].split(')')[0])
elif ((tmp[1][(- 1)] == ')') and (tmp[4][(- 1)] == ')') and (len_tmp == 11)):
'\n Example:\n J_i Ex_i J_f Ex_f dE B(M1)-> B(M1)<- \n 1+( 7) 19.408 2+( 9) 16.111 3.297 0.005( 0.00) 0.003( 0.00)\n '
case_ = 3
E_gamma = float(tmp[6])
Ex_initial = float(tmp[2])
reduced_transition_prob_decay = float(tmp[7][:(- 1)])
reduced_transition_prob_excite = float(tmp[9][:(- 1)])
parity_final_symbol = tmp[3].split('(')[0][(- 1)]
spin_final = float(Fraction(tmp[3].split(parity_final_symbol)[0]))
Ex_final = float(tmp[5])
idx_initial = int(tmp[1][0])
idx_final = int(tmp[4][0])
elif ((tmp[5][(- 1)] == ')') and (tmp[2][(- 1)] == ')') and (len_tmp == 8)):
'\n Example:\n J_i Ex_i J_f Ex_f dE B(M1)-> B(M1)<- \n 0.0+(46) 47.248 1.0+(97) 45.384 1.864 23.973(13.39) 7.991( 4.46)\n '
case_ = 4
E_gamma = float(tmp[4])
Ex_initial = float(tmp[1])
reduced_transition_prob_decay = float(tmp[5].split('(')[0])
reduced_transition_prob_excite = float(tmp[6][:(- 1)])
parity_final_symbol = tmp[2].split('(')[0][(- 1)]
spin_final = float(Fraction(tmp[2].split(parity_final_symbol)[0]))
Ex_final = float(tmp[3])
idx_initial = int(tmp[0].split('(')[1].split(')')[0])
idx_final = int(tmp[2].split('(')[1].split(')')[0])
else:
msg = 'ERROR: Structure not accounted for!'
msg += f'
line={line!r}'
raise KshellDataStructureError(msg)
if (parity_final_symbol == '+'):
parity_final = 1
elif (parity_final_symbol == '-'):
parity_final = (- 1)
else:
msg = f'Could not properly read the final parity! case_={case_!r}'
raise KshellDataStructureError(msg)
if ((spin_final == (- 1)) or (spin_initial == (- 1))):
'\n -1 spin states in the KSHELL data file indicates\n bad states which should not be included.\n '
negative_spin_counts += 1
continue
reduced_transition_prob_decay_list.append([(2 * spin_initial), parity_initial, idx_initial, Ex_initial, (2 * spin_final), parity_final, idx_final, Ex_final, E_gamma, reduced_transition_prob_decay, reduced_transition_prob_excite])
except ValueError as err:
'\n One of the float conversions failed indicating that\n the structure of the line is not accounted for.\n '
msg = ((('\n' + err.__str__()) + f'
case_={case_!r}') + f'
line={line!r}')
raise KshellDataStructureError(msg)
except IndexError:
'\n End of probabilities.\n '
break
return (reduced_transition_prob_decay_list, negative_spin_counts) | def _load_transition_probabilities_old(infile):
'\n For summary files with old syntax (pre 2021-11-24).\n Parameters\n ----------\n infile:\n The KSHELL summary file.\n '
reduced_transition_prob_decay_list = []
negative_spin_counts = 0
for _ in range(2):
infile.readline()
for line in infile:
try:
"\n Example of possible lines in file:\n J_i Ex_i J_f Ex_f dE B(M1)-> B(M1)<- \n 2+(11) 18.393 2+(10) 17.791 0.602 0.1( 0.0) 0.1( 0.0)\n 3/2+( 1) 0.072 5/2+( 1) 0.000 0.071 0.127( 0.07) 0.084( 0.05)\n 2+(10) 17.791 2+( 1) 5.172 12.619 0.006( 0.00) 0.006( 0.00)\n 3+( 8) 19.503 2+(11) 18.393 1.111 0.000( 0.00) 0.000( 0.00)\n 1+( 7) 19.408 2+( 9) 16.111 3.297 0.005( 0.00) 0.003( 0.00)\n 5.0+(60) 32.170 4.0+(100) 31.734 0.436 0.198( 0.11) 0.242( 0.14)\n 4.0-( 3) 3.191 3.0+(10) 3.137 0.054 0.0( 0.0) 0.0( 0.0)\n 0.0+(46)', '47.248', '1.0+(97)', '45.384', '1.864', '23.973(13.39)', '7.991(', '4.46)\n "
tmp = line.split()
len_tmp = len(tmp)
case_ = None
parity_idx = (tmp[0].index('(') - 1)
parity_initial = (1 if (tmp[0][parity_idx] == '+') else (- 1))
parity_initial_symbol = tmp[0][parity_idx]
spin_initial = float(Fraction(tmp[0][:parity_idx]))
if ((tmp[1][(- 1)] != ')') and (tmp[3][(- 1)] != ')') and (len_tmp == 9)):
'\n Example:\n J_i Ex_i J_f Ex_f dE B(M1)-> B(M1)<- \n 2+(11) 18.393 2+(10) 17.791 0.602 0.1( 0.0) 0.1( 0.0)\n 5.0+(60) 32.170 4.0+(100) 31.734 0.436 0.198( 0.11) 0.242( 0.14)\n '
case_ = 0
E_gamma = float(tmp[4])
Ex_initial = float(tmp[1])
reduced_transition_prob_decay = float(tmp[5][:(- 1)])
reduced_transition_prob_excite = float(tmp[7][:(- 1)])
parity_final_symbol = tmp[2].split('(')[0][(- 1)]
spin_final = float(Fraction(tmp[2].split(parity_final_symbol)[0]))
Ex_final = float(tmp[3])
idx_initial = int(tmp[0].split('(')[1].split(')')[0])
idx_final = int(tmp[2].split('(')[1].split(')')[0])
elif ((tmp[1][(- 1)] != ')') and (tmp[3][(- 1)] == ')') and (len_tmp == 10)):
'\n Example:\n J_i Ex_i J_f Ex_f dE B(M1)-> B(M1)<- \n 2+(10) 17.791 2+( 1) 5.172 12.619 0.006( 0.00) 0.006( 0.00)\n '
case_ = 1
E_gamma = float(tmp[5])
Ex_initial = float(tmp[1])
reduced_transition_prob_decay = float(tmp[6][:(- 1)])
reduced_transition_prob_excite = float(tmp[8][:(- 1)])
parity_final_symbol = tmp[2].split('(')[0][(- 1)]
spin_final = float(Fraction(tmp[2].split(parity_final_symbol)[0]))
Ex_final = float(tmp[4])
idx_initial = int(tmp[0].split('(')[1].split(')')[0])
idx_final = int(tmp[3][0])
elif ((tmp[1][(- 1)] == ')') and (tmp[4][(- 1)] != ')') and (len_tmp == 10)):
'\n Example:\n J_i Ex_i J_f Ex_f dE B(M1)-> B(M1)<- \n 3+( 8) 19.503 2+(11) 18.393 1.111 0.000( 0.00) 0.000( 0.00)\n 1.0+( 1) 5.357 0.0+(103) 0.000 5.357 0.002( 0.00) 0.007( 0.00)\n 4.0-( 3) 3.191 3.0+(10) 3.137 0.054 0.0( 0.0) 0.0( 0.0)\n '
case_ = 2
E_gamma = float(tmp[5])
Ex_initial = float(tmp[2])
reduced_transition_prob_decay = float(tmp[6][:(- 1)])
reduced_transition_prob_excite = float(tmp[8][:(- 1)])
parity_final_symbol = tmp[3].split('(')[0][(- 1)]
spin_final = float(Fraction(tmp[3].split(parity_final_symbol)[0]))
Ex_final = float(tmp[4])
idx_initial = int(tmp[1][0])
idx_final = int(tmp[3].split('(')[1].split(')')[0])
elif ((tmp[1][(- 1)] == ')') and (tmp[4][(- 1)] == ')') and (len_tmp == 11)):
'\n Example:\n J_i Ex_i J_f Ex_f dE B(M1)-> B(M1)<- \n 1+( 7) 19.408 2+( 9) 16.111 3.297 0.005( 0.00) 0.003( 0.00)\n '
case_ = 3
E_gamma = float(tmp[6])
Ex_initial = float(tmp[2])
reduced_transition_prob_decay = float(tmp[7][:(- 1)])
reduced_transition_prob_excite = float(tmp[9][:(- 1)])
parity_final_symbol = tmp[3].split('(')[0][(- 1)]
spin_final = float(Fraction(tmp[3].split(parity_final_symbol)[0]))
Ex_final = float(tmp[5])
idx_initial = int(tmp[1][0])
idx_final = int(tmp[4][0])
elif ((tmp[5][(- 1)] == ')') and (tmp[2][(- 1)] == ')') and (len_tmp == 8)):
'\n Example:\n J_i Ex_i J_f Ex_f dE B(M1)-> B(M1)<- \n 0.0+(46) 47.248 1.0+(97) 45.384 1.864 23.973(13.39) 7.991( 4.46)\n '
case_ = 4
E_gamma = float(tmp[4])
Ex_initial = float(tmp[1])
reduced_transition_prob_decay = float(tmp[5].split('(')[0])
reduced_transition_prob_excite = float(tmp[6][:(- 1)])
parity_final_symbol = tmp[2].split('(')[0][(- 1)]
spin_final = float(Fraction(tmp[2].split(parity_final_symbol)[0]))
Ex_final = float(tmp[3])
idx_initial = int(tmp[0].split('(')[1].split(')')[0])
idx_final = int(tmp[2].split('(')[1].split(')')[0])
else:
msg = 'ERROR: Structure not accounted for!'
msg += f'
line={line!r}'
raise KshellDataStructureError(msg)
if (parity_final_symbol == '+'):
parity_final = 1
elif (parity_final_symbol == '-'):
parity_final = (- 1)
else:
msg = f'Could not properly read the final parity! case_={case_!r}'
raise KshellDataStructureError(msg)
if ((spin_final == (- 1)) or (spin_initial == (- 1))):
'\n -1 spin states in the KSHELL data file indicates\n bad states which should not be included.\n '
negative_spin_counts += 1
continue
reduced_transition_prob_decay_list.append([(2 * spin_initial), parity_initial, idx_initial, Ex_initial, (2 * spin_final), parity_final, idx_final, Ex_final, E_gamma, reduced_transition_prob_decay, reduced_transition_prob_excite])
except ValueError as err:
'\n One of the float conversions failed indicating that\n the structure of the line is not accounted for.\n '
msg = ((('\n' + err.__str__()) + f'
case_={case_!r}') + f'
line={line!r}')
raise KshellDataStructureError(msg)
except IndexError:
'\n End of probabilities.\n '
break
return (reduced_transition_prob_decay_list, negative_spin_counts)<|docstring|>For summary files with old syntax (pre 2021-11-24).
Parameters
----------
infile:
The KSHELL summary file.<|endoftext|> |
f200a2798c834db6422a71a78d59bf19843c3fe2680b90b2959aff4f2b99e599 | def _load_transition_probabilities(infile):
'\n Example structure:\n B(E2) ( > -0.0 W.u.) mass = 50 1 W.u. = 10.9 e^2 fm^4\n e^2 fm^4 (W.u.)\n J_i pi_i idx_i Ex_i J_f pi_f idx_f Ex_f dE B(E2)-> B(E2)->[wu] B(E2)<- B(E2)<-[wu]\n 5 + 1 0.036 6 + 1 0.000 0.036 70.43477980 6.43689168 59.59865983 5.44660066\n 4 + 1 0.074 6 + 1 0.000 0.074 47.20641983 4.31409897 32.68136758 2.98668391\n Parameters\n ----------\n infile:\n The KSHELL summary file.\n '
reduced_transition_prob_decay_list = []
negative_spin_counts = 0
for _ in range(2):
infile.readline()
for line in infile:
line_split = line.split()
if (not line_split):
break
spin_initial = float(Fraction(line_split[0]))
parity_initial = parity_string_to_integer(line_split[1])
idx_initial = int(line_split[2])
Ex_initial = float(line_split[3])
spin_final = float(Fraction(line_split[4]))
parity_final = parity_string_to_integer(line_split[5])
idx_final = int(line_split[2])
Ex_final = float(line_split[7])
E_gamma = float(line_split[8])
reduced_transition_prob_decay = float(line_split[9])
reduced_transition_prob_excite = float(line_split[11])
if ((spin_final < 0) or (spin_initial < 0)):
'\n -1 spin states in the KSHELL data file indicates\n bad states which should not be included.\n '
negative_spin_counts += 1
continue
reduced_transition_prob_decay_list.append([(2 * spin_initial), parity_initial, idx_initial, Ex_initial, (2 * spin_final), parity_final, idx_final, Ex_final, E_gamma, reduced_transition_prob_decay, reduced_transition_prob_excite])
return (reduced_transition_prob_decay_list, negative_spin_counts) | Example structure:
B(E2) ( > -0.0 W.u.) mass = 50 1 W.u. = 10.9 e^2 fm^4
e^2 fm^4 (W.u.)
J_i pi_i idx_i Ex_i J_f pi_f idx_f Ex_f dE B(E2)-> B(E2)->[wu] B(E2)<- B(E2)<-[wu]
5 + 1 0.036 6 + 1 0.000 0.036 70.43477980 6.43689168 59.59865983 5.44660066
4 + 1 0.074 6 + 1 0.000 0.074 47.20641983 4.31409897 32.68136758 2.98668391
Parameters
----------
infile:
The KSHELL summary file. | kshell_utilities/kshell_utilities.py | _load_transition_probabilities | GaffaSnobb/kshell_utilities | 0 | python | def _load_transition_probabilities(infile):
'\n Example structure:\n B(E2) ( > -0.0 W.u.) mass = 50 1 W.u. = 10.9 e^2 fm^4\n e^2 fm^4 (W.u.)\n J_i pi_i idx_i Ex_i J_f pi_f idx_f Ex_f dE B(E2)-> B(E2)->[wu] B(E2)<- B(E2)<-[wu]\n 5 + 1 0.036 6 + 1 0.000 0.036 70.43477980 6.43689168 59.59865983 5.44660066\n 4 + 1 0.074 6 + 1 0.000 0.074 47.20641983 4.31409897 32.68136758 2.98668391\n Parameters\n ----------\n infile:\n The KSHELL summary file.\n '
reduced_transition_prob_decay_list = []
negative_spin_counts = 0
for _ in range(2):
infile.readline()
for line in infile:
line_split = line.split()
if (not line_split):
break
spin_initial = float(Fraction(line_split[0]))
parity_initial = parity_string_to_integer(line_split[1])
idx_initial = int(line_split[2])
Ex_initial = float(line_split[3])
spin_final = float(Fraction(line_split[4]))
parity_final = parity_string_to_integer(line_split[5])
idx_final = int(line_split[2])
Ex_final = float(line_split[7])
E_gamma = float(line_split[8])
reduced_transition_prob_decay = float(line_split[9])
reduced_transition_prob_excite = float(line_split[11])
if ((spin_final < 0) or (spin_initial < 0)):
'\n -1 spin states in the KSHELL data file indicates\n bad states which should not be included.\n '
negative_spin_counts += 1
continue
reduced_transition_prob_decay_list.append([(2 * spin_initial), parity_initial, idx_initial, Ex_initial, (2 * spin_final), parity_final, idx_final, Ex_final, E_gamma, reduced_transition_prob_decay, reduced_transition_prob_excite])
return (reduced_transition_prob_decay_list, negative_spin_counts) | def _load_transition_probabilities(infile):
'\n Example structure:\n B(E2) ( > -0.0 W.u.) mass = 50 1 W.u. = 10.9 e^2 fm^4\n e^2 fm^4 (W.u.)\n J_i pi_i idx_i Ex_i J_f pi_f idx_f Ex_f dE B(E2)-> B(E2)->[wu] B(E2)<- B(E2)<-[wu]\n 5 + 1 0.036 6 + 1 0.000 0.036 70.43477980 6.43689168 59.59865983 5.44660066\n 4 + 1 0.074 6 + 1 0.000 0.074 47.20641983 4.31409897 32.68136758 2.98668391\n Parameters\n ----------\n infile:\n The KSHELL summary file.\n '
reduced_transition_prob_decay_list = []
negative_spin_counts = 0
for _ in range(2):
infile.readline()
for line in infile:
line_split = line.split()
if (not line_split):
break
spin_initial = float(Fraction(line_split[0]))
parity_initial = parity_string_to_integer(line_split[1])
idx_initial = int(line_split[2])
Ex_initial = float(line_split[3])
spin_final = float(Fraction(line_split[4]))
parity_final = parity_string_to_integer(line_split[5])
idx_final = int(line_split[2])
Ex_final = float(line_split[7])
E_gamma = float(line_split[8])
reduced_transition_prob_decay = float(line_split[9])
reduced_transition_prob_excite = float(line_split[11])
if ((spin_final < 0) or (spin_initial < 0)):
'\n -1 spin states in the KSHELL data file indicates\n bad states which should not be included.\n '
negative_spin_counts += 1
continue
reduced_transition_prob_decay_list.append([(2 * spin_initial), parity_initial, idx_initial, Ex_initial, (2 * spin_final), parity_final, idx_final, Ex_final, E_gamma, reduced_transition_prob_decay, reduced_transition_prob_excite])
return (reduced_transition_prob_decay_list, negative_spin_counts)<|docstring|>Example structure:
B(E2) ( > -0.0 W.u.) mass = 50 1 W.u. = 10.9 e^2 fm^4
e^2 fm^4 (W.u.)
J_i pi_i idx_i Ex_i J_f pi_f idx_f Ex_f dE B(E2)-> B(E2)->[wu] B(E2)<- B(E2)<-[wu]
5 + 1 0.036 6 + 1 0.000 0.036 70.43477980 6.43689168 59.59865983 5.44660066
4 + 1 0.074 6 + 1 0.000 0.074 47.20641983 4.31409897 32.68136758 2.98668391
Parameters
----------
infile:
The KSHELL summary file.<|endoftext|> |
843dc061447c6dbfed7caab751dfb08f2204456a5fb443283ff5bce1bf49548c | def _load_parallel(arg_list):
'\n For parallel data loads.\n [self.fname_summary, "Energy", self._load_energy_levels, None]\n '
(fname, condition, loader, thread_idx) = arg_list
print(f'Thread {thread_idx} loading {condition} values...')
load_time = time.perf_counter()
with open(fname, 'r') as infile:
for line in infile:
if (condition in line):
ans = loader(infile)
load_time = (time.perf_counter() - load_time)
print(f'Thread {thread_idx} finished loading {condition} values in {load_time:.2f} s')
return ans | For parallel data loads.
[self.fname_summary, "Energy", self._load_energy_levels, None] | kshell_utilities/kshell_utilities.py | _load_parallel | GaffaSnobb/kshell_utilities | 0 | python | def _load_parallel(arg_list):
'\n For parallel data loads.\n [self.fname_summary, "Energy", self._load_energy_levels, None]\n '
(fname, condition, loader, thread_idx) = arg_list
print(f'Thread {thread_idx} loading {condition} values...')
load_time = time.perf_counter()
with open(fname, 'r') as infile:
for line in infile:
if (condition in line):
ans = loader(infile)
load_time = (time.perf_counter() - load_time)
print(f'Thread {thread_idx} finished loading {condition} values in {load_time:.2f} s')
return ans | def _load_parallel(arg_list):
'\n For parallel data loads.\n [self.fname_summary, "Energy", self._load_energy_levels, None]\n '
(fname, condition, loader, thread_idx) = arg_list
print(f'Thread {thread_idx} loading {condition} values...')
load_time = time.perf_counter()
with open(fname, 'r') as infile:
for line in infile:
if (condition in line):
ans = loader(infile)
load_time = (time.perf_counter() - load_time)
print(f'Thread {thread_idx} finished loading {condition} values in {load_time:.2f} s')
return ans<|docstring|>For parallel data loads.
[self.fname_summary, "Energy", self._load_energy_levels, None]<|endoftext|> |
3406c905b322eba9453d5444590a1bc142efb80a448c136371d9017b31e63354 | def _process_kshell_output_in_parallel(args):
'\n Simple wrapper for parallelizing loading of KSHELL files.\n '
(filepath, load_and_save_to_file, old_or_new) = args
print(filepath)
return ReadKshellOutput(filepath, load_and_save_to_file, old_or_new) | Simple wrapper for parallelizing loading of KSHELL files. | kshell_utilities/kshell_utilities.py | _process_kshell_output_in_parallel | GaffaSnobb/kshell_utilities | 0 | python | def _process_kshell_output_in_parallel(args):
'\n \n '
(filepath, load_and_save_to_file, old_or_new) = args
print(filepath)
return ReadKshellOutput(filepath, load_and_save_to_file, old_or_new) | def _process_kshell_output_in_parallel(args):
'\n \n '
(filepath, load_and_save_to_file, old_or_new) = args
print(filepath)
return ReadKshellOutput(filepath, load_and_save_to_file, old_or_new)<|docstring|>Simple wrapper for parallelizing loading of KSHELL files.<|endoftext|> |
182f25e6df1b45ce7da2a5a679e4adf030d6ec0c36398b90c9e7e59f3b857bc4 | def loadtxt(path: str, is_directory: bool=False, filter_: Union[(None, str)]=None, load_and_save_to_file: Union[(bool, str)]=True, old_or_new='new') -> list:
"\n Wrapper for using ReadKshellOutput class as a function.\n TODO: Consider changing 'path' to 'fname' to be the same as\n np.loadtxt.\n\n Parameters\n ----------\n path : str\n Filename (and path) of `KSHELL` output data file, or path to\n directory containing sub-directories with `KSHELL` output data.\n \n is_directory : bool\n If True, and 'path' is a directory containing sub-directories\n with `KSHELL` data files, the contents of 'path' will be scanned\n for `KSHELL` data files. Currently supports only summary files.\n\n filter_ : Union[None, str]\n NOTE: Shouldnt the type be list, not str?\n\n load_and_save_to_file : Union[bool, str]\n Toggle saving data as `.npy` files on / off. If 'overwrite',\n saved `.npy` files are overwritten.\n\n old_or_new : str\n Choose between old and new summary file syntax. All summary\n files generated pre 2021-11-24 use old style.\n New:\n J_i pi_i idx_i Ex_i J_f pi_f idx_f Ex_f dE B(E2)-> B(E2)->[wu] B(E2)<- B(E2)<-[wu]\n 5 + 1 0.036 6 + 1 0.000 0.036 70.43477980 6.43689168 59.59865983 5.44660066\n Old:\n J_i Ex_i J_f Ex_f dE B(M1)-> B(M1)<- \n 2+(11) 18.393 2+(10) 17.791 0.602 0.1( 0.0) 0.1( 0.0)\n\n Returns\n -------\n data : list\n List of instances with data from `KSHELL` data file as\n attributes.\n "
loadtxt_time = time.perf_counter()
all_fnames = None
data = []
if (old_or_new not in (old_or_new_allowed := ['old', 'new'])):
msg = f"'old_or_new' argument must be in {old_or_new_allowed}!"
msg += f" Got '{old_or_new}'."
raise ValueError(msg)
if (is_directory and (not os.path.isdir(path))):
msg = f'{path} is not a directory'
raise NotADirectoryError(msg)
elif ((not is_directory) and (not os.path.isfile(path))):
msg = f'{path} is not a file'
raise FileNotFoundError(msg)
elif (is_directory and os.path.isdir(path)):
msg = "The 'is_directory' option is not properly tested and is"
msg += ' deprecated at the moment. Might return in the future.'
raise DeprecationWarning(msg)
all_fnames = {}
for element in sorted(os.listdir(path)):
'\n List all content in path.\n '
if os.path.isdir((path + element)):
'\n If element is a directory, enter it to find data files.\n '
all_fnames[element] = []
for isotope in os.listdir((path + element)):
'\n List all content in the element directory.\n '
if (isotope.startswith('summary') and isotope.endswith('.txt')):
'\n Extract summary data files.\n '
try:
'\n Example: O16.\n '
n_neutrons = int(isotope[9:11])
except ValueError:
'\n Example: Ne20.\n '
n_neutrons = int(isotope[10:12])
n_neutrons -= atomic_numbers[element.split('_')[1]]
all_fnames[element].append([((element + '/') + isotope), n_neutrons])
pool = multiprocessing.Pool()
for key in all_fnames:
"\n Sort each list in the dict by the number of neutrons. Loop\n over all directories in 'all_fnames' and extract KSHELL data\n and append to a list.\n "
if (filter_ is not None):
if (key.split('_')[1] not in filter_):
'\n Skip elements not in filter_.\n '
continue
all_fnames[key].sort(key=(lambda tup: tup[1]))
sub_fnames = all_fnames[key]
arg_list = [((path + i[0]), load_and_save_to_file, old_or_new) for i in sub_fnames]
data += pool.map(_process_kshell_output_in_parallel, arg_list)
else:
'\n Only a single KSHELL data file.\n '
data.append(ReadKshellOutput(path, load_and_save_to_file, old_or_new))
if (not data):
msg = 'No KSHELL data loaded. Most likely error is that the given'
msg += f' directory has no KSHELL data files. path={path!r}'
raise RuntimeError(msg)
loadtxt_time = (time.perf_counter() - loadtxt_time)
if flags['debug']:
print(f'loadtxt_time = {loadtxt_time!r} s')
return data | Wrapper for using ReadKshellOutput class as a function.
TODO: Consider changing 'path' to 'fname' to be the same as
np.loadtxt.
Parameters
----------
path : str
Filename (and path) of `KSHELL` output data file, or path to
directory containing sub-directories with `KSHELL` output data.
is_directory : bool
If True, and 'path' is a directory containing sub-directories
with `KSHELL` data files, the contents of 'path' will be scanned
for `KSHELL` data files. Currently supports only summary files.
filter_ : Union[None, str]
NOTE: Shouldnt the type be list, not str?
load_and_save_to_file : Union[bool, str]
Toggle saving data as `.npy` files on / off. If 'overwrite',
saved `.npy` files are overwritten.
old_or_new : str
Choose between old and new summary file syntax. All summary
files generated pre 2021-11-24 use old style.
New:
J_i pi_i idx_i Ex_i J_f pi_f idx_f Ex_f dE B(E2)-> B(E2)->[wu] B(E2)<- B(E2)<-[wu]
5 + 1 0.036 6 + 1 0.000 0.036 70.43477980 6.43689168 59.59865983 5.44660066
Old:
J_i Ex_i J_f Ex_f dE B(M1)-> B(M1)<-
2+(11) 18.393 2+(10) 17.791 0.602 0.1( 0.0) 0.1( 0.0)
Returns
-------
data : list
List of instances with data from `KSHELL` data file as
attributes. | kshell_utilities/kshell_utilities.py | loadtxt | GaffaSnobb/kshell_utilities | 0 | python | def loadtxt(path: str, is_directory: bool=False, filter_: Union[(None, str)]=None, load_and_save_to_file: Union[(bool, str)]=True, old_or_new='new') -> list:
"\n Wrapper for using ReadKshellOutput class as a function.\n TODO: Consider changing 'path' to 'fname' to be the same as\n np.loadtxt.\n\n Parameters\n ----------\n path : str\n Filename (and path) of `KSHELL` output data file, or path to\n directory containing sub-directories with `KSHELL` output data.\n \n is_directory : bool\n If True, and 'path' is a directory containing sub-directories\n with `KSHELL` data files, the contents of 'path' will be scanned\n for `KSHELL` data files. Currently supports only summary files.\n\n filter_ : Union[None, str]\n NOTE: Shouldnt the type be list, not str?\n\n load_and_save_to_file : Union[bool, str]\n Toggle saving data as `.npy` files on / off. If 'overwrite',\n saved `.npy` files are overwritten.\n\n old_or_new : str\n Choose between old and new summary file syntax. All summary\n files generated pre 2021-11-24 use old style.\n New:\n J_i pi_i idx_i Ex_i J_f pi_f idx_f Ex_f dE B(E2)-> B(E2)->[wu] B(E2)<- B(E2)<-[wu]\n 5 + 1 0.036 6 + 1 0.000 0.036 70.43477980 6.43689168 59.59865983 5.44660066\n Old:\n J_i Ex_i J_f Ex_f dE B(M1)-> B(M1)<- \n 2+(11) 18.393 2+(10) 17.791 0.602 0.1( 0.0) 0.1( 0.0)\n\n Returns\n -------\n data : list\n List of instances with data from `KSHELL` data file as\n attributes.\n "
loadtxt_time = time.perf_counter()
all_fnames = None
data = []
if (old_or_new not in (old_or_new_allowed := ['old', 'new'])):
msg = f"'old_or_new' argument must be in {old_or_new_allowed}!"
msg += f" Got '{old_or_new}'."
raise ValueError(msg)
if (is_directory and (not os.path.isdir(path))):
msg = f'{path} is not a directory'
raise NotADirectoryError(msg)
elif ((not is_directory) and (not os.path.isfile(path))):
msg = f'{path} is not a file'
raise FileNotFoundError(msg)
elif (is_directory and os.path.isdir(path)):
msg = "The 'is_directory' option is not properly tested and is"
msg += ' deprecated at the moment. Might return in the future.'
raise DeprecationWarning(msg)
all_fnames = {}
for element in sorted(os.listdir(path)):
'\n List all content in path.\n '
if os.path.isdir((path + element)):
'\n If element is a directory, enter it to find data files.\n '
all_fnames[element] = []
for isotope in os.listdir((path + element)):
'\n List all content in the element directory.\n '
if (isotope.startswith('summary') and isotope.endswith('.txt')):
'\n Extract summary data files.\n '
try:
'\n Example: O16.\n '
n_neutrons = int(isotope[9:11])
except ValueError:
'\n Example: Ne20.\n '
n_neutrons = int(isotope[10:12])
n_neutrons -= atomic_numbers[element.split('_')[1]]
all_fnames[element].append([((element + '/') + isotope), n_neutrons])
pool = multiprocessing.Pool()
for key in all_fnames:
"\n Sort each list in the dict by the number of neutrons. Loop\n over all directories in 'all_fnames' and extract KSHELL data\n and append to a list.\n "
if (filter_ is not None):
if (key.split('_')[1] not in filter_):
'\n Skip elements not in filter_.\n '
continue
all_fnames[key].sort(key=(lambda tup: tup[1]))
sub_fnames = all_fnames[key]
arg_list = [((path + i[0]), load_and_save_to_file, old_or_new) for i in sub_fnames]
data += pool.map(_process_kshell_output_in_parallel, arg_list)
else:
'\n Only a single KSHELL data file.\n '
data.append(ReadKshellOutput(path, load_and_save_to_file, old_or_new))
if (not data):
msg = 'No KSHELL data loaded. Most likely error is that the given'
msg += f' directory has no KSHELL data files. path={path!r}'
raise RuntimeError(msg)
loadtxt_time = (time.perf_counter() - loadtxt_time)
if flags['debug']:
print(f'loadtxt_time = {loadtxt_time!r} s')
return data | def loadtxt(path: str, is_directory: bool=False, filter_: Union[(None, str)]=None, load_and_save_to_file: Union[(bool, str)]=True, old_or_new='new') -> list:
"\n Wrapper for using ReadKshellOutput class as a function.\n TODO: Consider changing 'path' to 'fname' to be the same as\n np.loadtxt.\n\n Parameters\n ----------\n path : str\n Filename (and path) of `KSHELL` output data file, or path to\n directory containing sub-directories with `KSHELL` output data.\n \n is_directory : bool\n If True, and 'path' is a directory containing sub-directories\n with `KSHELL` data files, the contents of 'path' will be scanned\n for `KSHELL` data files. Currently supports only summary files.\n\n filter_ : Union[None, str]\n NOTE: Shouldnt the type be list, not str?\n\n load_and_save_to_file : Union[bool, str]\n Toggle saving data as `.npy` files on / off. If 'overwrite',\n saved `.npy` files are overwritten.\n\n old_or_new : str\n Choose between old and new summary file syntax. All summary\n files generated pre 2021-11-24 use old style.\n New:\n J_i pi_i idx_i Ex_i J_f pi_f idx_f Ex_f dE B(E2)-> B(E2)->[wu] B(E2)<- B(E2)<-[wu]\n 5 + 1 0.036 6 + 1 0.000 0.036 70.43477980 6.43689168 59.59865983 5.44660066\n Old:\n J_i Ex_i J_f Ex_f dE B(M1)-> B(M1)<- \n 2+(11) 18.393 2+(10) 17.791 0.602 0.1( 0.0) 0.1( 0.0)\n\n Returns\n -------\n data : list\n List of instances with data from `KSHELL` data file as\n attributes.\n "
loadtxt_time = time.perf_counter()
all_fnames = None
data = []
if (old_or_new not in (old_or_new_allowed := ['old', 'new'])):
msg = f"'old_or_new' argument must be in {old_or_new_allowed}!"
msg += f" Got '{old_or_new}'."
raise ValueError(msg)
if (is_directory and (not os.path.isdir(path))):
msg = f'{path} is not a directory'
raise NotADirectoryError(msg)
elif ((not is_directory) and (not os.path.isfile(path))):
msg = f'{path} is not a file'
raise FileNotFoundError(msg)
elif (is_directory and os.path.isdir(path)):
msg = "The 'is_directory' option is not properly tested and is"
msg += ' deprecated at the moment. Might return in the future.'
raise DeprecationWarning(msg)
all_fnames = {}
for element in sorted(os.listdir(path)):
'\n List all content in path.\n '
if os.path.isdir((path + element)):
'\n If element is a directory, enter it to find data files.\n '
all_fnames[element] = []
for isotope in os.listdir((path + element)):
'\n List all content in the element directory.\n '
if (isotope.startswith('summary') and isotope.endswith('.txt')):
'\n Extract summary data files.\n '
try:
'\n Example: O16.\n '
n_neutrons = int(isotope[9:11])
except ValueError:
'\n Example: Ne20.\n '
n_neutrons = int(isotope[10:12])
n_neutrons -= atomic_numbers[element.split('_')[1]]
all_fnames[element].append([((element + '/') + isotope), n_neutrons])
pool = multiprocessing.Pool()
for key in all_fnames:
"\n Sort each list in the dict by the number of neutrons. Loop\n over all directories in 'all_fnames' and extract KSHELL data\n and append to a list.\n "
if (filter_ is not None):
if (key.split('_')[1] not in filter_):
'\n Skip elements not in filter_.\n '
continue
all_fnames[key].sort(key=(lambda tup: tup[1]))
sub_fnames = all_fnames[key]
arg_list = [((path + i[0]), load_and_save_to_file, old_or_new) for i in sub_fnames]
data += pool.map(_process_kshell_output_in_parallel, arg_list)
else:
'\n Only a single KSHELL data file.\n '
data.append(ReadKshellOutput(path, load_and_save_to_file, old_or_new))
if (not data):
msg = 'No KSHELL data loaded. Most likely error is that the given'
msg += f' directory has no KSHELL data files. path={path!r}'
raise RuntimeError(msg)
loadtxt_time = (time.perf_counter() - loadtxt_time)
if flags['debug']:
print(f'loadtxt_time = {loadtxt_time!r} s')
return data<|docstring|>Wrapper for using ReadKshellOutput class as a function.
TODO: Consider changing 'path' to 'fname' to be the same as
np.loadtxt.
Parameters
----------
path : str
Filename (and path) of `KSHELL` output data file, or path to
directory containing sub-directories with `KSHELL` output data.
is_directory : bool
If True, and 'path' is a directory containing sub-directories
with `KSHELL` data files, the contents of 'path' will be scanned
for `KSHELL` data files. Currently supports only summary files.
filter_ : Union[None, str]
NOTE: Shouldnt the type be list, not str?
load_and_save_to_file : Union[bool, str]
Toggle saving data as `.npy` files on / off. If 'overwrite',
saved `.npy` files are overwritten.
old_or_new : str
Choose between old and new summary file syntax. All summary
files generated pre 2021-11-24 use old style.
New:
J_i pi_i idx_i Ex_i J_f pi_f idx_f Ex_f dE B(E2)-> B(E2)->[wu] B(E2)<- B(E2)<-[wu]
5 + 1 0.036 6 + 1 0.000 0.036 70.43477980 6.43689168 59.59865983 5.44660066
Old:
J_i Ex_i J_f Ex_f dE B(M1)-> B(M1)<-
2+(11) 18.393 2+(10) 17.791 0.602 0.1( 0.0) 0.1( 0.0)
Returns
-------
data : list
List of instances with data from `KSHELL` data file as
attributes.<|endoftext|> |
cb4b60a2eb3c2c1ac66cf0043f6a6b71ec3d4bd0acdf2a170aa55f8afdedbc41 | def _get_timing_data(path: str):
'\n Get timing data from KSHELL log files.\n\n Parameters\n ----------\n path : str\n Path to log file.\n\n Examples\n --------\n Last 10 lines of log_Ar30_usda_m0p.txt:\n ```\n total 20.899 2 10.44928 1.0000\n pre-process 0.029 1 0.02866 0.0014\n operate 3.202 1007 0.00318 0.1532\n re-orthog. 11.354 707 0.01606 0.5433\n thick-restart 0.214 12 0.01781 0.0102\n diag tri-mat 3.880 707 0.00549 0.1857\n misc 2.220 0.1062\n\n tmp 0.002 101 0.00002 0.0001\n ```\n '
if ('log' not in path):
msg = f"Unknown log file name! Got '{path}'"
raise KshellDataStructureError(msg)
if (not os.path.isfile(path)):
raise FileNotFoundError(path)
res = os.popen(f'tail -n 20 {path}').read()
res = res.split('\n')
total = None
if ('_tr_' not in path):
'\n KSHELL log.\n '
for elem in res:
tmp = elem.split()
try:
if (tmp[0] == 'total'):
total = float(tmp[1])
break
except IndexError:
continue
elif ('_tr_' in path):
'\n Transit log.\n '
for elem in res:
tmp = elem.split()
try:
if (tmp[0] == 'total'):
total = float(tmp[3])
break
except IndexError:
continue
if (total is None):
msg = f"Not able to extract timing data from '{path}'!"
raise KshellDataStructureError(msg)
return total | Get timing data from KSHELL log files.
Parameters
----------
path : str
Path to log file.
Examples
--------
Last 10 lines of log_Ar30_usda_m0p.txt:
```
total 20.899 2 10.44928 1.0000
pre-process 0.029 1 0.02866 0.0014
operate 3.202 1007 0.00318 0.1532
re-orthog. 11.354 707 0.01606 0.5433
thick-restart 0.214 12 0.01781 0.0102
diag tri-mat 3.880 707 0.00549 0.1857
misc 2.220 0.1062
tmp 0.002 101 0.00002 0.0001
``` | kshell_utilities/kshell_utilities.py | _get_timing_data | GaffaSnobb/kshell_utilities | 0 | python | def _get_timing_data(path: str):
'\n Get timing data from KSHELL log files.\n\n Parameters\n ----------\n path : str\n Path to log file.\n\n Examples\n --------\n Last 10 lines of log_Ar30_usda_m0p.txt:\n ```\n total 20.899 2 10.44928 1.0000\n pre-process 0.029 1 0.02866 0.0014\n operate 3.202 1007 0.00318 0.1532\n re-orthog. 11.354 707 0.01606 0.5433\n thick-restart 0.214 12 0.01781 0.0102\n diag tri-mat 3.880 707 0.00549 0.1857\n misc 2.220 0.1062\n\n tmp 0.002 101 0.00002 0.0001\n ```\n '
if ('log' not in path):
msg = f"Unknown log file name! Got '{path}'"
raise KshellDataStructureError(msg)
if (not os.path.isfile(path)):
raise FileNotFoundError(path)
res = os.popen(f'tail -n 20 {path}').read()
res = res.split('\n')
total = None
if ('_tr_' not in path):
'\n KSHELL log.\n '
for elem in res:
tmp = elem.split()
try:
if (tmp[0] == 'total'):
total = float(tmp[1])
break
except IndexError:
continue
elif ('_tr_' in path):
'\n Transit log.\n '
for elem in res:
tmp = elem.split()
try:
if (tmp[0] == 'total'):
total = float(tmp[3])
break
except IndexError:
continue
if (total is None):
msg = f"Not able to extract timing data from '{path}'!"
raise KshellDataStructureError(msg)
return total | def _get_timing_data(path: str):
'\n Get timing data from KSHELL log files.\n\n Parameters\n ----------\n path : str\n Path to log file.\n\n Examples\n --------\n Last 10 lines of log_Ar30_usda_m0p.txt:\n ```\n total 20.899 2 10.44928 1.0000\n pre-process 0.029 1 0.02866 0.0014\n operate 3.202 1007 0.00318 0.1532\n re-orthog. 11.354 707 0.01606 0.5433\n thick-restart 0.214 12 0.01781 0.0102\n diag tri-mat 3.880 707 0.00549 0.1857\n misc 2.220 0.1062\n\n tmp 0.002 101 0.00002 0.0001\n ```\n '
if ('log' not in path):
msg = f"Unknown log file name! Got '{path}'"
raise KshellDataStructureError(msg)
if (not os.path.isfile(path)):
raise FileNotFoundError(path)
res = os.popen(f'tail -n 20 {path}').read()
res = res.split('\n')
total = None
if ('_tr_' not in path):
'\n KSHELL log.\n '
for elem in res:
tmp = elem.split()
try:
if (tmp[0] == 'total'):
total = float(tmp[1])
break
except IndexError:
continue
elif ('_tr_' in path):
'\n Transit log.\n '
for elem in res:
tmp = elem.split()
try:
if (tmp[0] == 'total'):
total = float(tmp[3])
break
except IndexError:
continue
if (total is None):
msg = f"Not able to extract timing data from '{path}'!"
raise KshellDataStructureError(msg)
return total<|docstring|>Get timing data from KSHELL log files.
Parameters
----------
path : str
Path to log file.
Examples
--------
Last 10 lines of log_Ar30_usda_m0p.txt:
```
total 20.899 2 10.44928 1.0000
pre-process 0.029 1 0.02866 0.0014
operate 3.202 1007 0.00318 0.1532
re-orthog. 11.354 707 0.01606 0.5433
thick-restart 0.214 12 0.01781 0.0102
diag tri-mat 3.880 707 0.00549 0.1857
misc 2.220 0.1062
tmp 0.002 101 0.00002 0.0001
```<|endoftext|> |
6b8dc4df1920697583beda577262f9a1be8530ffa1e8716b34b89072080a8a66 | def _get_memory_usage(path: str) -> Union[(float, None)]:
'\n Get memory usage from KSHELL log files.\n\n Parameters\n ----------\n path : str\n Path to a single log file.\n\n Returns\n -------\n total : float, None\n Memory usage in GB or None if memory usage could not be read.\n '
total = None
if ('tr' not in path):
'\n KSHELL log.\n '
with open(path, 'r') as infile:
for line in infile:
if line.startswith('Total Memory for Lanczos vectors:'):
try:
total = float(line.split()[(- 2)])
except ValueError:
msg = f"Error reading memory usage from '{path}'."
msg += f" Got '{line.split()[(- 2)]}'."
raise KshellDataStructureError(msg)
break
elif ('tr' in path):
'\n Transit log. NOTE: Not yet implemented.\n '
return 0
if (total is None):
msg = f"Not able to extract memory data from '{path.split('/')[(- 1)]}'!"
raise KshellDataStructureError(msg)
return total | Get memory usage from KSHELL log files.
Parameters
----------
path : str
Path to a single log file.
Returns
-------
total : float, None
Memory usage in GB or None if memory usage could not be read. | kshell_utilities/kshell_utilities.py | _get_memory_usage | GaffaSnobb/kshell_utilities | 0 | python | def _get_memory_usage(path: str) -> Union[(float, None)]:
'\n Get memory usage from KSHELL log files.\n\n Parameters\n ----------\n path : str\n Path to a single log file.\n\n Returns\n -------\n total : float, None\n Memory usage in GB or None if memory usage could not be read.\n '
total = None
if ('tr' not in path):
'\n KSHELL log.\n '
with open(path, 'r') as infile:
for line in infile:
if line.startswith('Total Memory for Lanczos vectors:'):
try:
total = float(line.split()[(- 2)])
except ValueError:
msg = f"Error reading memory usage from '{path}'."
msg += f" Got '{line.split()[(- 2)]}'."
raise KshellDataStructureError(msg)
break
elif ('tr' in path):
'\n Transit log. NOTE: Not yet implemented.\n '
return 0
if (total is None):
msg = f"Not able to extract memory data from '{path.split('/')[(- 1)]}'!"
raise KshellDataStructureError(msg)
return total | def _get_memory_usage(path: str) -> Union[(float, None)]:
'\n Get memory usage from KSHELL log files.\n\n Parameters\n ----------\n path : str\n Path to a single log file.\n\n Returns\n -------\n total : float, None\n Memory usage in GB or None if memory usage could not be read.\n '
total = None
if ('tr' not in path):
'\n KSHELL log.\n '
with open(path, 'r') as infile:
for line in infile:
if line.startswith('Total Memory for Lanczos vectors:'):
try:
total = float(line.split()[(- 2)])
except ValueError:
msg = f"Error reading memory usage from '{path}'."
msg += f" Got '{line.split()[(- 2)]}'."
raise KshellDataStructureError(msg)
break
elif ('tr' in path):
'\n Transit log. NOTE: Not yet implemented.\n '
return 0
if (total is None):
msg = f"Not able to extract memory data from '{path.split('/')[(- 1)]}'!"
raise KshellDataStructureError(msg)
return total<|docstring|>Get memory usage from KSHELL log files.
Parameters
----------
path : str
Path to a single log file.
Returns
-------
total : float, None
Memory usage in GB or None if memory usage could not be read.<|endoftext|> |
8ea1cb6a69e9161ec44e3a810d071847786c426834150cded5eb18fe15767fd4 | def _sortkey(filename):
"\n Key for sorting filenames based on angular momentum and parity.\n Example filename: 'log_Sc44_GCLSTsdpfsdgix5pn_j0n.txt'\n (angular momentum = 0). \n "
tmp = filename.split('_')[(- 1)]
tmp = tmp.split('.')[0]
spin = int(tmp[1:(- 1)])
return spin | Key for sorting filenames based on angular momentum and parity.
Example filename: 'log_Sc44_GCLSTsdpfsdgix5pn_j0n.txt'
(angular momentum = 0). | kshell_utilities/kshell_utilities.py | _sortkey | GaffaSnobb/kshell_utilities | 0 | python | def _sortkey(filename):
"\n Key for sorting filenames based on angular momentum and parity.\n Example filename: 'log_Sc44_GCLSTsdpfsdgix5pn_j0n.txt'\n (angular momentum = 0). \n "
tmp = filename.split('_')[(- 1)]
tmp = tmp.split('.')[0]
spin = int(tmp[1:(- 1)])
return spin | def _sortkey(filename):
"\n Key for sorting filenames based on angular momentum and parity.\n Example filename: 'log_Sc44_GCLSTsdpfsdgix5pn_j0n.txt'\n (angular momentum = 0). \n "
tmp = filename.split('_')[(- 1)]
tmp = tmp.split('.')[0]
spin = int(tmp[1:(- 1)])
return spin<|docstring|>Key for sorting filenames based on angular momentum and parity.
Example filename: 'log_Sc44_GCLSTsdpfsdgix5pn_j0n.txt'
(angular momentum = 0).<|endoftext|> |
ba8242dfd9500e601685a84440118dfd564d12e998e4acafcfefa102faf114df | def _get_data_general(path: str, func: Callable, plot: bool):
'\n General input handling for timing data and memory data.\n\n Parameters\n ----------\n path : str\n Path to a single log file or path to a directory of log files.\n\n func : Callable\n _get_timing_data or _get_memory_usage.\n '
total_negative = []
total_positive = []
filenames_negative = []
filenames_positive = []
if os.path.isfile(path):
return func(path)
elif os.path.isdir(path):
for elem in os.listdir(path):
'\n Select only log files in path.\n '
tmp = elem.split('_')
try:
if (((tmp[0] == 'log') or (tmp[1] == 'log')) and elem.endswith('.txt')):
tmp = tmp[(- 1)].split('.')
parity = tmp[0][(- 1)]
if (parity == 'n'):
filenames_negative.append(elem)
elif (parity == 'p'):
filenames_positive.append(elem)
except IndexError:
continue
filenames_negative.sort(key=_sortkey)
filenames_positive.sort(key=_sortkey)
for elem in filenames_negative:
total_negative.append(func(f'{path}/{elem}'))
for elem in filenames_positive:
total_positive.append(func(f'{path}/{elem}'))
if plot:
xticks_negative = (['sum'] + [str(Fraction((_sortkey(i) / 2))) for i in filenames_negative])
xticks_positive = (['sum'] + [str(Fraction((_sortkey(i) / 2))) for i in filenames_positive])
sum_total_negative = sum(total_negative)
sum_total_positive = sum(total_positive)
(fig0, ax0) = plt.subplots(ncols=1, nrows=2)
(fig1, ax1) = plt.subplots(ncols=1, nrows=2)
bars = ax0[0].bar(xticks_negative, ([((sum_total_negative / 60) / 60)] + [((i / 60) / 60) for i in total_negative]), color='black')
ax0[0].set_title('negative')
for rect in bars:
height = rect.get_height()
ax0[0].text(x=(rect.get_x() + (rect.get_width() / 2.0)), y=height, s=f'{height:.3f}', ha='center', va='bottom')
bars = ax1[0].bar(xticks_negative, ([(sum_total_negative / sum_total_negative)] + [(i / sum_total_negative) for i in total_negative]), color='black')
ax1[0].set_title('negative')
for rect in bars:
height = rect.get_height()
ax1[0].text(x=(rect.get_x() + (rect.get_width() / 2.0)), y=height, s=f'{height:.3f}', ha='center', va='bottom')
bars = ax0[1].bar(xticks_positive, ([((sum_total_positive / 60) / 60)] + [((i / 60) / 60) for i in total_positive]), color='black')
ax0[1].set_title('positive')
for rect in bars:
height = rect.get_height()
ax0[1].text(x=(rect.get_x() + (rect.get_width() / 2.0)), y=height, s=f'{height:.3f}', ha='center', va='bottom')
bars = ax1[1].bar(xticks_positive, ([(sum_total_positive / sum_total_positive)] + [(i / sum_total_positive) for i in total_positive]), color='black')
ax1[1].set_title('positive')
for rect in bars:
height = rect.get_height()
ax1[1].text(x=(rect.get_x() + (rect.get_width() / 2.0)), y=height, s=f'{height:.3f}', ha='center', va='bottom')
fig0.text(x=0.02, y=0.5, s='Time [h]', rotation='vertical')
fig0.text(x=0.5, y=0.02, s='Angular momentum')
fig1.text(x=0.02, y=0.5, s='Norm. time', rotation='vertical')
fig1.text(x=0.5, y=0.02, s='Angular momentum')
plt.show()
return (sum(total_negative) + sum(total_positive))
else:
msg = f"'{path}' is neither a file nor a directory!"
raise FileNotFoundError(msg) | General input handling for timing data and memory data.
Parameters
----------
path : str
Path to a single log file or path to a directory of log files.
func : Callable
_get_timing_data or _get_memory_usage. | kshell_utilities/kshell_utilities.py | _get_data_general | GaffaSnobb/kshell_utilities | 0 | python | def _get_data_general(path: str, func: Callable, plot: bool):
'\n General input handling for timing data and memory data.\n\n Parameters\n ----------\n path : str\n Path to a single log file or path to a directory of log files.\n\n func : Callable\n _get_timing_data or _get_memory_usage.\n '
total_negative = []
total_positive = []
filenames_negative = []
filenames_positive = []
if os.path.isfile(path):
return func(path)
elif os.path.isdir(path):
for elem in os.listdir(path):
'\n Select only log files in path.\n '
tmp = elem.split('_')
try:
if (((tmp[0] == 'log') or (tmp[1] == 'log')) and elem.endswith('.txt')):
tmp = tmp[(- 1)].split('.')
parity = tmp[0][(- 1)]
if (parity == 'n'):
filenames_negative.append(elem)
elif (parity == 'p'):
filenames_positive.append(elem)
except IndexError:
continue
filenames_negative.sort(key=_sortkey)
filenames_positive.sort(key=_sortkey)
for elem in filenames_negative:
total_negative.append(func(f'{path}/{elem}'))
for elem in filenames_positive:
total_positive.append(func(f'{path}/{elem}'))
if plot:
xticks_negative = (['sum'] + [str(Fraction((_sortkey(i) / 2))) for i in filenames_negative])
xticks_positive = (['sum'] + [str(Fraction((_sortkey(i) / 2))) for i in filenames_positive])
sum_total_negative = sum(total_negative)
sum_total_positive = sum(total_positive)
(fig0, ax0) = plt.subplots(ncols=1, nrows=2)
(fig1, ax1) = plt.subplots(ncols=1, nrows=2)
bars = ax0[0].bar(xticks_negative, ([((sum_total_negative / 60) / 60)] + [((i / 60) / 60) for i in total_negative]), color='black')
ax0[0].set_title('negative')
for rect in bars:
height = rect.get_height()
ax0[0].text(x=(rect.get_x() + (rect.get_width() / 2.0)), y=height, s=f'{height:.3f}', ha='center', va='bottom')
bars = ax1[0].bar(xticks_negative, ([(sum_total_negative / sum_total_negative)] + [(i / sum_total_negative) for i in total_negative]), color='black')
ax1[0].set_title('negative')
for rect in bars:
height = rect.get_height()
ax1[0].text(x=(rect.get_x() + (rect.get_width() / 2.0)), y=height, s=f'{height:.3f}', ha='center', va='bottom')
bars = ax0[1].bar(xticks_positive, ([((sum_total_positive / 60) / 60)] + [((i / 60) / 60) for i in total_positive]), color='black')
ax0[1].set_title('positive')
for rect in bars:
height = rect.get_height()
ax0[1].text(x=(rect.get_x() + (rect.get_width() / 2.0)), y=height, s=f'{height:.3f}', ha='center', va='bottom')
bars = ax1[1].bar(xticks_positive, ([(sum_total_positive / sum_total_positive)] + [(i / sum_total_positive) for i in total_positive]), color='black')
ax1[1].set_title('positive')
for rect in bars:
height = rect.get_height()
ax1[1].text(x=(rect.get_x() + (rect.get_width() / 2.0)), y=height, s=f'{height:.3f}', ha='center', va='bottom')
fig0.text(x=0.02, y=0.5, s='Time [h]', rotation='vertical')
fig0.text(x=0.5, y=0.02, s='Angular momentum')
fig1.text(x=0.02, y=0.5, s='Norm. time', rotation='vertical')
fig1.text(x=0.5, y=0.02, s='Angular momentum')
plt.show()
return (sum(total_negative) + sum(total_positive))
else:
msg = f"'{path}' is neither a file nor a directory!"
raise FileNotFoundError(msg) | def _get_data_general(path: str, func: Callable, plot: bool):
'\n General input handling for timing data and memory data.\n\n Parameters\n ----------\n path : str\n Path to a single log file or path to a directory of log files.\n\n func : Callable\n _get_timing_data or _get_memory_usage.\n '
total_negative = []
total_positive = []
filenames_negative = []
filenames_positive = []
if os.path.isfile(path):
return func(path)
elif os.path.isdir(path):
for elem in os.listdir(path):
'\n Select only log files in path.\n '
tmp = elem.split('_')
try:
if (((tmp[0] == 'log') or (tmp[1] == 'log')) and elem.endswith('.txt')):
tmp = tmp[(- 1)].split('.')
parity = tmp[0][(- 1)]
if (parity == 'n'):
filenames_negative.append(elem)
elif (parity == 'p'):
filenames_positive.append(elem)
except IndexError:
continue
filenames_negative.sort(key=_sortkey)
filenames_positive.sort(key=_sortkey)
for elem in filenames_negative:
total_negative.append(func(f'{path}/{elem}'))
for elem in filenames_positive:
total_positive.append(func(f'{path}/{elem}'))
if plot:
xticks_negative = (['sum'] + [str(Fraction((_sortkey(i) / 2))) for i in filenames_negative])
xticks_positive = (['sum'] + [str(Fraction((_sortkey(i) / 2))) for i in filenames_positive])
sum_total_negative = sum(total_negative)
sum_total_positive = sum(total_positive)
(fig0, ax0) = plt.subplots(ncols=1, nrows=2)
(fig1, ax1) = plt.subplots(ncols=1, nrows=2)
bars = ax0[0].bar(xticks_negative, ([((sum_total_negative / 60) / 60)] + [((i / 60) / 60) for i in total_negative]), color='black')
ax0[0].set_title('negative')
for rect in bars:
height = rect.get_height()
ax0[0].text(x=(rect.get_x() + (rect.get_width() / 2.0)), y=height, s=f'{height:.3f}', ha='center', va='bottom')
bars = ax1[0].bar(xticks_negative, ([(sum_total_negative / sum_total_negative)] + [(i / sum_total_negative) for i in total_negative]), color='black')
ax1[0].set_title('negative')
for rect in bars:
height = rect.get_height()
ax1[0].text(x=(rect.get_x() + (rect.get_width() / 2.0)), y=height, s=f'{height:.3f}', ha='center', va='bottom')
bars = ax0[1].bar(xticks_positive, ([((sum_total_positive / 60) / 60)] + [((i / 60) / 60) for i in total_positive]), color='black')
ax0[1].set_title('positive')
for rect in bars:
height = rect.get_height()
ax0[1].text(x=(rect.get_x() + (rect.get_width() / 2.0)), y=height, s=f'{height:.3f}', ha='center', va='bottom')
bars = ax1[1].bar(xticks_positive, ([(sum_total_positive / sum_total_positive)] + [(i / sum_total_positive) for i in total_positive]), color='black')
ax1[1].set_title('positive')
for rect in bars:
height = rect.get_height()
ax1[1].text(x=(rect.get_x() + (rect.get_width() / 2.0)), y=height, s=f'{height:.3f}', ha='center', va='bottom')
fig0.text(x=0.02, y=0.5, s='Time [h]', rotation='vertical')
fig0.text(x=0.5, y=0.02, s='Angular momentum')
fig1.text(x=0.02, y=0.5, s='Norm. time', rotation='vertical')
fig1.text(x=0.5, y=0.02, s='Angular momentum')
plt.show()
return (sum(total_negative) + sum(total_positive))
else:
msg = f"'{path}' is neither a file nor a directory!"
raise FileNotFoundError(msg)<|docstring|>General input handling for timing data and memory data.
Parameters
----------
path : str
Path to a single log file or path to a directory of log files.
func : Callable
_get_timing_data or _get_memory_usage.<|endoftext|> |
fbb2a920d8588f3ea9552a22b2f3414a4aa22707df026c2f2ba462add385fa17 | def get_timing_data(path: str, plot: bool=False) -> float:
'\n Wrapper for _get_timing_data. Input a single log filename and get\n the timing data. Input a path to a directory several log files and\n get the summed timing data. In units of seconds.\n\n Parameters\n ----------\n path : str\n Path to a single log file or path to a directory of log files.\n\n Returns\n -------\n : float\n The summed times for all input log files.\n '
return _get_data_general(path, _get_timing_data, plot) | Wrapper for _get_timing_data. Input a single log filename and get
the timing data. Input a path to a directory several log files and
get the summed timing data. In units of seconds.
Parameters
----------
path : str
Path to a single log file or path to a directory of log files.
Returns
-------
: float
The summed times for all input log files. | kshell_utilities/kshell_utilities.py | get_timing_data | GaffaSnobb/kshell_utilities | 0 | python | def get_timing_data(path: str, plot: bool=False) -> float:
'\n Wrapper for _get_timing_data. Input a single log filename and get\n the timing data. Input a path to a directory several log files and\n get the summed timing data. In units of seconds.\n\n Parameters\n ----------\n path : str\n Path to a single log file or path to a directory of log files.\n\n Returns\n -------\n : float\n The summed times for all input log files.\n '
return _get_data_general(path, _get_timing_data, plot) | def get_timing_data(path: str, plot: bool=False) -> float:
'\n Wrapper for _get_timing_data. Input a single log filename and get\n the timing data. Input a path to a directory several log files and\n get the summed timing data. In units of seconds.\n\n Parameters\n ----------\n path : str\n Path to a single log file or path to a directory of log files.\n\n Returns\n -------\n : float\n The summed times for all input log files.\n '
return _get_data_general(path, _get_timing_data, plot)<|docstring|>Wrapper for _get_timing_data. Input a single log filename and get
the timing data. Input a path to a directory several log files and
get the summed timing data. In units of seconds.
Parameters
----------
path : str
Path to a single log file or path to a directory of log files.
Returns
-------
: float
The summed times for all input log files.<|endoftext|> |
c04a3f8375c4587a4eff2776c61ebdd23741a5b10782b180dcbd4af8dfb8a189 | def get_memory_usage(path: str) -> float:
'\n Wrapper for _get_memory_usage. Input a single log filename and get\n the memory data. Input a path to a directory several log files and\n get the summed memory data. In units of GB.\n\n Parameters\n ----------\n path : str\n Path to a single log file or path to a directory of log files.\n\n Returns\n -------\n : float\n The summed memory usage for all input log files.\n '
return _get_data_general(path, _get_memory_usage, False) | Wrapper for _get_memory_usage. Input a single log filename and get
the memory data. Input a path to a directory several log files and
get the summed memory data. In units of GB.
Parameters
----------
path : str
Path to a single log file or path to a directory of log files.
Returns
-------
: float
The summed memory usage for all input log files. | kshell_utilities/kshell_utilities.py | get_memory_usage | GaffaSnobb/kshell_utilities | 0 | python | def get_memory_usage(path: str) -> float:
'\n Wrapper for _get_memory_usage. Input a single log filename and get\n the memory data. Input a path to a directory several log files and\n get the summed memory data. In units of GB.\n\n Parameters\n ----------\n path : str\n Path to a single log file or path to a directory of log files.\n\n Returns\n -------\n : float\n The summed memory usage for all input log files.\n '
return _get_data_general(path, _get_memory_usage, False) | def get_memory_usage(path: str) -> float:
'\n Wrapper for _get_memory_usage. Input a single log filename and get\n the memory data. Input a path to a directory several log files and\n get the summed memory data. In units of GB.\n\n Parameters\n ----------\n path : str\n Path to a single log file or path to a directory of log files.\n\n Returns\n -------\n : float\n The summed memory usage for all input log files.\n '
return _get_data_general(path, _get_memory_usage, False)<|docstring|>Wrapper for _get_memory_usage. Input a single log filename and get
the memory data. Input a path to a directory several log files and
get the summed memory data. In units of GB.
Parameters
----------
path : str
Path to a single log file or path to a directory of log files.
Returns
-------
: float
The summed memory usage for all input log files.<|endoftext|> |
f5086febe6eeed1703cfaff6e280e1d7b8bf32fac04793e80f38732e7bb8daaf | def get_parameters(path: str, verbose: bool=True) -> dict:
'\n Extract the parameters which are fed to KSHELL throught the shell\n script.\n\n Parameters\n ----------\n path : str\n Path to a KSHELL work directory.\n\n Returns\n -------\n res : dict\n A dictionary where the keys are the parameter names and the\n values are the corresponding values.\n '
res = {}
shell_filename = None
if os.path.isdir(path):
for elem in os.listdir(path):
if elem.endswith('.sh'):
shell_filename = f'{path}/{elem}'
break
else:
print('Directly specifying path to .sh file not yet implemented!')
if (shell_filename is None):
if verbose:
msg = f"No .sh file found in path '{path}'!"
print(msg)
return res
with open(shell_filename, 'r') as infile:
for line in infile:
if line.startswith('&input'):
break
for line in infile:
if line.startswith('&end'):
'\n End of parameters.\n '
break
tmp = line.split('=')
key = tmp[0].strip()
value = tmp[1].strip()
try:
value = ast.literal_eval(value)
except ValueError:
'\n Cant convert strings. Keep them as strings.\n '
pass
except SyntaxError:
'\n Cant convert Fortran booleans (.true., .false.). Keep\n them as strings.\n '
pass
res[key] = value
return res | Extract the parameters which are fed to KSHELL throught the shell
script.
Parameters
----------
path : str
Path to a KSHELL work directory.
Returns
-------
res : dict
A dictionary where the keys are the parameter names and the
values are the corresponding values. | kshell_utilities/kshell_utilities.py | get_parameters | GaffaSnobb/kshell_utilities | 0 | python | def get_parameters(path: str, verbose: bool=True) -> dict:
'\n Extract the parameters which are fed to KSHELL throught the shell\n script.\n\n Parameters\n ----------\n path : str\n Path to a KSHELL work directory.\n\n Returns\n -------\n res : dict\n A dictionary where the keys are the parameter names and the\n values are the corresponding values.\n '
res = {}
shell_filename = None
if os.path.isdir(path):
for elem in os.listdir(path):
if elem.endswith('.sh'):
shell_filename = f'{path}/{elem}'
break
else:
print('Directly specifying path to .sh file not yet implemented!')
if (shell_filename is None):
if verbose:
msg = f"No .sh file found in path '{path}'!"
print(msg)
return res
with open(shell_filename, 'r') as infile:
for line in infile:
if line.startswith('&input'):
break
for line in infile:
if line.startswith('&end'):
'\n End of parameters.\n '
break
tmp = line.split('=')
key = tmp[0].strip()
value = tmp[1].strip()
try:
value = ast.literal_eval(value)
except ValueError:
'\n Cant convert strings. Keep them as strings.\n '
pass
except SyntaxError:
'\n Cant convert Fortran booleans (.true., .false.). Keep\n them as strings.\n '
pass
res[key] = value
return res | def get_parameters(path: str, verbose: bool=True) -> dict:
'\n Extract the parameters which are fed to KSHELL throught the shell\n script.\n\n Parameters\n ----------\n path : str\n Path to a KSHELL work directory.\n\n Returns\n -------\n res : dict\n A dictionary where the keys are the parameter names and the\n values are the corresponding values.\n '
res = {}
shell_filename = None
if os.path.isdir(path):
for elem in os.listdir(path):
if elem.endswith('.sh'):
shell_filename = f'{path}/{elem}'
break
else:
print('Directly specifying path to .sh file not yet implemented!')
if (shell_filename is None):
if verbose:
msg = f"No .sh file found in path '{path}'!"
print(msg)
return res
with open(shell_filename, 'r') as infile:
for line in infile:
if line.startswith('&input'):
break
for line in infile:
if line.startswith('&end'):
'\n End of parameters.\n '
break
tmp = line.split('=')
key = tmp[0].strip()
value = tmp[1].strip()
try:
value = ast.literal_eval(value)
except ValueError:
'\n Cant convert strings. Keep them as strings.\n '
pass
except SyntaxError:
'\n Cant convert Fortran booleans (.true., .false.). Keep\n them as strings.\n '
pass
res[key] = value
return res<|docstring|>Extract the parameters which are fed to KSHELL throught the shell
script.
Parameters
----------
path : str
Path to a KSHELL work directory.
Returns
-------
res : dict
A dictionary where the keys are the parameter names and the
values are the corresponding values.<|endoftext|> |
6e78a97d5f9eab3e7e197dd81e6f50c74ff4b9040d9a9dc82ff52f1ee61840bc | def __init__(self, path: str, load_and_save_to_file: bool, old_or_new: str):
'\n Parameters\n ----------\n path : string\n Path of `KSHELL` output file directory, or path to a\n specific `KSHELL` data file.\n\n load_and_save_to_file : bool\n Toggle saving data as `.npy` files on / off. If `overwrite`,\n saved `.npy` files are overwritten.\n\n old_or_new : str\n Choose between old and new summary file syntax. All summary\n files generated pre 2021-11-24 use old style.\n New:\n J_i pi_i idx_i Ex_i J_f pi_f idx_f Ex_f dE B(E2)-> B(E2)->[wu] B(E2)<- B(E2)<-[wu]\n 5 + 1 0.036 6 + 1 0.000 0.036 70.43477980 6.43689168 59.59865983 5.44660066\n Old:\n J_i Ex_i J_f Ex_f dE B(M1)-> B(M1)<- \n 2+(11) 18.393 2+(10) 17.791 0.602 0.1( 0.0) 0.1( 0.0)\n '
self.path = path
self.load_and_save_to_file = load_and_save_to_file
self.old_or_new = old_or_new
self.fname_summary = None
self.fname_ptn = None
self.nucleus = None
self.model_space = None
self.proton_partition = None
self.neutron_partition = None
self.levels = None
self.transitions_BM1 = [None]
self.transitions_BE2 = [None]
self.transitions_BE1 = [None]
self.truncation = None
self.negative_spin_counts = np.array([0, 0, 0, 0])
if (isinstance(self.load_and_save_to_file, str) and (self.load_and_save_to_file != 'overwrite')):
msg = "Allowed values for 'load_and_save_to_file' are: 'True', 'False', 'overwrite'."
msg += f" Got '{self.load_and_save_to_file}'."
raise ValueError(msg)
if os.path.isdir(path):
"\n If input 'path' is a directory containing KSHELL files,\n extract info from both summary and .ptn file.\n "
for elem in os.listdir(path):
if elem.startswith('summary'):
self.fname_summary = f'{path}/{elem}'
self._extract_info_from_summary_fname()
self._read_summary()
elif elem.endswith('.ptn'):
self.fname_ptn = f'{path}/{elem}'
self._extract_info_from_ptn_fname()
self.read_ptn()
else:
"\n 'path' is a single file, not a directory.\n "
fname = path.split('/')[(- 1)]
if fname.startswith('summary'):
self.fname_summary = path
self._extract_info_from_summary_fname()
self._read_summary()
elif fname.endswith('.ptn'):
self.fname_ptn = path
self._extract_info_from_ptn_fname()
self._read_ptn()
else:
msg = f'Handling for file {fname} is not implemented.'
raise KshellDataStructureError(msg) | Parameters
----------
path : string
Path of `KSHELL` output file directory, or path to a
specific `KSHELL` data file.
load_and_save_to_file : bool
Toggle saving data as `.npy` files on / off. If `overwrite`,
saved `.npy` files are overwritten.
old_or_new : str
Choose between old and new summary file syntax. All summary
files generated pre 2021-11-24 use old style.
New:
J_i pi_i idx_i Ex_i J_f pi_f idx_f Ex_f dE B(E2)-> B(E2)->[wu] B(E2)<- B(E2)<-[wu]
5 + 1 0.036 6 + 1 0.000 0.036 70.43477980 6.43689168 59.59865983 5.44660066
Old:
J_i Ex_i J_f Ex_f dE B(M1)-> B(M1)<-
2+(11) 18.393 2+(10) 17.791 0.602 0.1( 0.0) 0.1( 0.0) | kshell_utilities/kshell_utilities.py | __init__ | GaffaSnobb/kshell_utilities | 0 | python | def __init__(self, path: str, load_and_save_to_file: bool, old_or_new: str):
'\n Parameters\n ----------\n path : string\n Path of `KSHELL` output file directory, or path to a\n specific `KSHELL` data file.\n\n load_and_save_to_file : bool\n Toggle saving data as `.npy` files on / off. If `overwrite`,\n saved `.npy` files are overwritten.\n\n old_or_new : str\n Choose between old and new summary file syntax. All summary\n files generated pre 2021-11-24 use old style.\n New:\n J_i pi_i idx_i Ex_i J_f pi_f idx_f Ex_f dE B(E2)-> B(E2)->[wu] B(E2)<- B(E2)<-[wu]\n 5 + 1 0.036 6 + 1 0.000 0.036 70.43477980 6.43689168 59.59865983 5.44660066\n Old:\n J_i Ex_i J_f Ex_f dE B(M1)-> B(M1)<- \n 2+(11) 18.393 2+(10) 17.791 0.602 0.1( 0.0) 0.1( 0.0)\n '
self.path = path
self.load_and_save_to_file = load_and_save_to_file
self.old_or_new = old_or_new
self.fname_summary = None
self.fname_ptn = None
self.nucleus = None
self.model_space = None
self.proton_partition = None
self.neutron_partition = None
self.levels = None
self.transitions_BM1 = [None]
self.transitions_BE2 = [None]
self.transitions_BE1 = [None]
self.truncation = None
self.negative_spin_counts = np.array([0, 0, 0, 0])
if (isinstance(self.load_and_save_to_file, str) and (self.load_and_save_to_file != 'overwrite')):
msg = "Allowed values for 'load_and_save_to_file' are: 'True', 'False', 'overwrite'."
msg += f" Got '{self.load_and_save_to_file}'."
raise ValueError(msg)
if os.path.isdir(path):
"\n If input 'path' is a directory containing KSHELL files,\n extract info from both summary and .ptn file.\n "
for elem in os.listdir(path):
if elem.startswith('summary'):
self.fname_summary = f'{path}/{elem}'
self._extract_info_from_summary_fname()
self._read_summary()
elif elem.endswith('.ptn'):
self.fname_ptn = f'{path}/{elem}'
self._extract_info_from_ptn_fname()
self.read_ptn()
else:
"\n 'path' is a single file, not a directory.\n "
fname = path.split('/')[(- 1)]
if fname.startswith('summary'):
self.fname_summary = path
self._extract_info_from_summary_fname()
self._read_summary()
elif fname.endswith('.ptn'):
self.fname_ptn = path
self._extract_info_from_ptn_fname()
self._read_ptn()
else:
msg = f'Handling for file {fname} is not implemented.'
raise KshellDataStructureError(msg) | def __init__(self, path: str, load_and_save_to_file: bool, old_or_new: str):
'\n Parameters\n ----------\n path : string\n Path of `KSHELL` output file directory, or path to a\n specific `KSHELL` data file.\n\n load_and_save_to_file : bool\n Toggle saving data as `.npy` files on / off. If `overwrite`,\n saved `.npy` files are overwritten.\n\n old_or_new : str\n Choose between old and new summary file syntax. All summary\n files generated pre 2021-11-24 use old style.\n New:\n J_i pi_i idx_i Ex_i J_f pi_f idx_f Ex_f dE B(E2)-> B(E2)->[wu] B(E2)<- B(E2)<-[wu]\n 5 + 1 0.036 6 + 1 0.000 0.036 70.43477980 6.43689168 59.59865983 5.44660066\n Old:\n J_i Ex_i J_f Ex_f dE B(M1)-> B(M1)<- \n 2+(11) 18.393 2+(10) 17.791 0.602 0.1( 0.0) 0.1( 0.0)\n '
self.path = path
self.load_and_save_to_file = load_and_save_to_file
self.old_or_new = old_or_new
self.fname_summary = None
self.fname_ptn = None
self.nucleus = None
self.model_space = None
self.proton_partition = None
self.neutron_partition = None
self.levels = None
self.transitions_BM1 = [None]
self.transitions_BE2 = [None]
self.transitions_BE1 = [None]
self.truncation = None
self.negative_spin_counts = np.array([0, 0, 0, 0])
if (isinstance(self.load_and_save_to_file, str) and (self.load_and_save_to_file != 'overwrite')):
msg = "Allowed values for 'load_and_save_to_file' are: 'True', 'False', 'overwrite'."
msg += f" Got '{self.load_and_save_to_file}'."
raise ValueError(msg)
if os.path.isdir(path):
"\n If input 'path' is a directory containing KSHELL files,\n extract info from both summary and .ptn file.\n "
for elem in os.listdir(path):
if elem.startswith('summary'):
self.fname_summary = f'{path}/{elem}'
self._extract_info_from_summary_fname()
self._read_summary()
elif elem.endswith('.ptn'):
self.fname_ptn = f'{path}/{elem}'
self._extract_info_from_ptn_fname()
self.read_ptn()
else:
"\n 'path' is a single file, not a directory.\n "
fname = path.split('/')[(- 1)]
if fname.startswith('summary'):
self.fname_summary = path
self._extract_info_from_summary_fname()
self._read_summary()
elif fname.endswith('.ptn'):
self.fname_ptn = path
self._extract_info_from_ptn_fname()
self._read_ptn()
else:
msg = f'Handling for file {fname} is not implemented.'
raise KshellDataStructureError(msg)<|docstring|>Parameters
----------
path : string
Path of `KSHELL` output file directory, or path to a
specific `KSHELL` data file.
load_and_save_to_file : bool
Toggle saving data as `.npy` files on / off. If `overwrite`,
saved `.npy` files are overwritten.
old_or_new : str
Choose between old and new summary file syntax. All summary
files generated pre 2021-11-24 use old style.
New:
J_i pi_i idx_i Ex_i J_f pi_f idx_f Ex_f dE B(E2)-> B(E2)->[wu] B(E2)<- B(E2)<-[wu]
5 + 1 0.036 6 + 1 0.000 0.036 70.43477980 6.43689168 59.59865983 5.44660066
Old:
J_i Ex_i J_f Ex_f dE B(M1)-> B(M1)<-
2+(11) 18.393 2+(10) 17.791 0.602 0.1( 0.0) 0.1( 0.0)<|endoftext|> |
e6096f3b027338be86453547e75ed97fcf382db5bfe88a83580b440398d17a30 | def _extract_info_from_ptn_fname(self):
'\n Extract nucleus and model space name.\n '
fname_split = self.fname_ptn.split('/')[(- 1)]
fname_split = fname_split.split('_')
self.nucleus = fname_split[0]
self.model_space = fname_split[1] | Extract nucleus and model space name. | kshell_utilities/kshell_utilities.py | _extract_info_from_ptn_fname | GaffaSnobb/kshell_utilities | 0 | python | def _extract_info_from_ptn_fname(self):
'\n \n '
fname_split = self.fname_ptn.split('/')[(- 1)]
fname_split = fname_split.split('_')
self.nucleus = fname_split[0]
self.model_space = fname_split[1] | def _extract_info_from_ptn_fname(self):
'\n \n '
fname_split = self.fname_ptn.split('/')[(- 1)]
fname_split = fname_split.split('_')
self.nucleus = fname_split[0]
self.model_space = fname_split[1]<|docstring|>Extract nucleus and model space name.<|endoftext|> |
91e286a4c19e78a392c427af0d7250743b267001157b6fcd32ef30fc3882f2cc | def _read_ptn(self):
'\n Read `KSHELL` partition file (.ptn) and extract proton\n partition, neutron partition, and particle-hole truncation data.\n Save as instance attributes.\n '
line_number = 0
line_number_inner = 0
self.truncation = []
with open(self.fname_ptn, 'r') as infile:
for line in infile:
line_number += 1
if line.startswith('# proton partition'):
for line_inner in infile:
"\n Read until next '#'.\n "
line_number_inner += 1
if line_inner.startswith('#'):
line = line_inner
break
self.proton_partition = np.loadtxt(fname=self.fname_ptn, skiprows=line_number, max_rows=line_number_inner)
line_number += line_number_inner
line_number_inner = 0
if line.startswith('# neutron partition'):
for line_inner in infile:
"\n Read until next '#'.\n "
line_number_inner += 1
if line_inner.startswith('#'):
line = line_inner
break
self.neutron_partition = np.loadtxt(fname=self.fname_ptn, skiprows=line_number, max_rows=line_number_inner)
line_number += line_number_inner
line_number_inner = 0
if line.startswith('# particle-hole truncation'):
for line_inner in infile:
'\n Loop over all particle-hole truncation lines.\n '
line_number += 1
line_inner_split = line_inner.split()
if (len(line_inner_split) < 2):
'\n Condition will probably not get fulfilled.\n Safety precaution due to indexing in this\n loop.\n '
break
if line_inner_split[1].startswith('['):
"\n '[' indicates that 'line_inner' is still\n containing truncation information.\n "
for (colon_index, elem) in enumerate(line_inner_split):
"\n Find the index of the colon ':' to\n decide the orbit numbers and occupation\n numbers.\n "
if (elem == ':'):
break
occupation = [int(occ) for occ in line_inner_split[(colon_index + 1):]]
orbit_numbers = ''.join(line_inner_split[1:colon_index])
orbit_numbers = orbit_numbers.replace('[', '')
orbit_numbers = orbit_numbers.replace(']', '')
orbit_numbers = orbit_numbers.replace(' ', '')
orbit_numbers = orbit_numbers.split(',')
orbit_numbers = [int(orbit) for orbit in orbit_numbers]
for orbit in orbit_numbers:
self.truncation.append((orbit, occupation))
else:
"\n Line does not contain '[' and thus does not\n contain truncation information.\n "
break | Read `KSHELL` partition file (.ptn) and extract proton
partition, neutron partition, and particle-hole truncation data.
Save as instance attributes. | kshell_utilities/kshell_utilities.py | _read_ptn | GaffaSnobb/kshell_utilities | 0 | python | def _read_ptn(self):
'\n Read `KSHELL` partition file (.ptn) and extract proton\n partition, neutron partition, and particle-hole truncation data.\n Save as instance attributes.\n '
line_number = 0
line_number_inner = 0
self.truncation = []
with open(self.fname_ptn, 'r') as infile:
for line in infile:
line_number += 1
if line.startswith('# proton partition'):
for line_inner in infile:
"\n Read until next '#'.\n "
line_number_inner += 1
if line_inner.startswith('#'):
line = line_inner
break
self.proton_partition = np.loadtxt(fname=self.fname_ptn, skiprows=line_number, max_rows=line_number_inner)
line_number += line_number_inner
line_number_inner = 0
if line.startswith('# neutron partition'):
for line_inner in infile:
"\n Read until next '#'.\n "
line_number_inner += 1
if line_inner.startswith('#'):
line = line_inner
break
self.neutron_partition = np.loadtxt(fname=self.fname_ptn, skiprows=line_number, max_rows=line_number_inner)
line_number += line_number_inner
line_number_inner = 0
if line.startswith('# particle-hole truncation'):
for line_inner in infile:
'\n Loop over all particle-hole truncation lines.\n '
line_number += 1
line_inner_split = line_inner.split()
if (len(line_inner_split) < 2):
'\n Condition will probably not get fulfilled.\n Safety precaution due to indexing in this\n loop.\n '
break
if line_inner_split[1].startswith('['):
"\n '[' indicates that 'line_inner' is still\n containing truncation information.\n "
for (colon_index, elem) in enumerate(line_inner_split):
"\n Find the index of the colon ':' to\n decide the orbit numbers and occupation\n numbers.\n "
if (elem == ':'):
break
occupation = [int(occ) for occ in line_inner_split[(colon_index + 1):]]
orbit_numbers = .join(line_inner_split[1:colon_index])
orbit_numbers = orbit_numbers.replace('[', )
orbit_numbers = orbit_numbers.replace(']', )
orbit_numbers = orbit_numbers.replace(' ', )
orbit_numbers = orbit_numbers.split(',')
orbit_numbers = [int(orbit) for orbit in orbit_numbers]
for orbit in orbit_numbers:
self.truncation.append((orbit, occupation))
else:
"\n Line does not contain '[' and thus does not\n contain truncation information.\n "
break | def _read_ptn(self):
'\n Read `KSHELL` partition file (.ptn) and extract proton\n partition, neutron partition, and particle-hole truncation data.\n Save as instance attributes.\n '
line_number = 0
line_number_inner = 0
self.truncation = []
with open(self.fname_ptn, 'r') as infile:
for line in infile:
line_number += 1
if line.startswith('# proton partition'):
for line_inner in infile:
"\n Read until next '#'.\n "
line_number_inner += 1
if line_inner.startswith('#'):
line = line_inner
break
self.proton_partition = np.loadtxt(fname=self.fname_ptn, skiprows=line_number, max_rows=line_number_inner)
line_number += line_number_inner
line_number_inner = 0
if line.startswith('# neutron partition'):
for line_inner in infile:
"\n Read until next '#'.\n "
line_number_inner += 1
if line_inner.startswith('#'):
line = line_inner
break
self.neutron_partition = np.loadtxt(fname=self.fname_ptn, skiprows=line_number, max_rows=line_number_inner)
line_number += line_number_inner
line_number_inner = 0
if line.startswith('# particle-hole truncation'):
for line_inner in infile:
'\n Loop over all particle-hole truncation lines.\n '
line_number += 1
line_inner_split = line_inner.split()
if (len(line_inner_split) < 2):
'\n Condition will probably not get fulfilled.\n Safety precaution due to indexing in this\n loop.\n '
break
if line_inner_split[1].startswith('['):
"\n '[' indicates that 'line_inner' is still\n containing truncation information.\n "
for (colon_index, elem) in enumerate(line_inner_split):
"\n Find the index of the colon ':' to\n decide the orbit numbers and occupation\n numbers.\n "
if (elem == ':'):
break
occupation = [int(occ) for occ in line_inner_split[(colon_index + 1):]]
orbit_numbers = .join(line_inner_split[1:colon_index])
orbit_numbers = orbit_numbers.replace('[', )
orbit_numbers = orbit_numbers.replace(']', )
orbit_numbers = orbit_numbers.replace(' ', )
orbit_numbers = orbit_numbers.split(',')
orbit_numbers = [int(orbit) for orbit in orbit_numbers]
for orbit in orbit_numbers:
self.truncation.append((orbit, occupation))
else:
"\n Line does not contain '[' and thus does not\n contain truncation information.\n "
break<|docstring|>Read `KSHELL` partition file (.ptn) and extract proton
partition, neutron partition, and particle-hole truncation data.
Save as instance attributes.<|endoftext|> |
1f8ec1e02e50617a2a510b3f9bba7723c79eec83c96ef03b1a61e2863bee863c | def _extract_info_from_summary_fname(self):
'\n Extract nucleus and model space name.\n '
fname_split = self.fname_summary.split('/')[(- 1)]
fname_split = fname_split.split('_')
self.nucleus = fname_split[1]
self.model_space = fname_split[2][:(- 4)] | Extract nucleus and model space name. | kshell_utilities/kshell_utilities.py | _extract_info_from_summary_fname | GaffaSnobb/kshell_utilities | 0 | python | def _extract_info_from_summary_fname(self):
'\n \n '
fname_split = self.fname_summary.split('/')[(- 1)]
fname_split = fname_split.split('_')
self.nucleus = fname_split[1]
self.model_space = fname_split[2][:(- 4)] | def _extract_info_from_summary_fname(self):
'\n \n '
fname_split = self.fname_summary.split('/')[(- 1)]
fname_split = fname_split.split('_')
self.nucleus = fname_split[1]
self.model_space = fname_split[2][:(- 4)]<|docstring|>Extract nucleus and model space name.<|endoftext|> |
a5063e4cd9c4d92e88aa286a60d41586d8e9b67729f2a3a3a11bb880d4b3d25e | def _read_summary(self):
'\n Read energy level data, transition probabilities and transition\n strengths from `KSHELL` output files.\n\n Raises\n ------\n KshellDataStructureError\n If the `KSHELL` file has unexpected structure / syntax.\n '
npy_path = 'tmp'
base_fname = self.path.split('/')[(- 1)][:(- 4)]
try:
os.mkdir(npy_path)
except FileExistsError:
pass
with open(f'{npy_path}/README.txt', 'w') as outfile:
msg = 'This directory contains binary numpy data of KSHELL summary data.'
msg += ' The purpose is to speed up subsequent runs which use the same summary data.'
msg += ' It is safe to delete this entire directory if you have the original summary text file, '
msg += 'though at the cost of having to read the summary text file over again which may take some time.'
msg += " The ksutil.loadtxt parameter load_and_save_to_file = 'overwrite' will force a re-write of the binary numpy data."
outfile.write(msg)
unique_id = _generate_unique_identifier(self.path)
levels_fname = f'{npy_path}/{base_fname}_levels_{unique_id}.npy'
transitions_BM1_fname = f'{npy_path}/{base_fname}_transitions_BM1_{unique_id}.npy'
transitions_BE2_fname = f'{npy_path}/{base_fname}_transitions_BE2_{unique_id}.npy'
transitions_BE1_fname = f'{npy_path}/{base_fname}_transitions_BE1_{unique_id}.npy'
debug_fname = f'{npy_path}/{base_fname}_debug_{unique_id}.npy'
fnames = [levels_fname, transitions_BE2_fname, transitions_BM1_fname, transitions_BE1_fname, debug_fname]
if (self.load_and_save_to_file != 'overwrite'):
'\n Do not load files if overwrite parameter has been passed.\n '
if (all([os.path.isfile(fname) for fname in fnames]) and self.load_and_save_to_file):
'\n If all files exist, load them. If any of the files do\n not exist, all will be generated.\n '
self.levels = np.load(file=levels_fname, allow_pickle=True)
self.transitions_BM1 = np.load(file=transitions_BM1_fname, allow_pickle=True)
self.transitions_BE2 = np.load(file=transitions_BE2_fname, allow_pickle=True)
self.transitions_BE1 = np.load(file=transitions_BE1_fname, allow_pickle=True)
self.debug = np.load(file=debug_fname, allow_pickle=True)
msg = 'Summary data loaded from .npy!'
msg += " Use loadtxt parameter load_and_save_to_file = 'overwrite'"
msg += ' to re-read data from the summary file.'
print(msg)
return
if (self.old_or_new == 'new'):
parallel_args = [[self.fname_summary, 'Energy', _load_energy_levels, 0], [self.fname_summary, 'B(M1)', _load_transition_probabilities, 1], [self.fname_summary, 'B(E2)', _load_transition_probabilities, 2], [self.fname_summary, 'B(E1)', _load_transition_probabilities, 3]]
elif (self.old_or_new == 'old'):
parallel_args = [[self.fname_summary, 'Energy', _load_energy_levels, 0], [self.fname_summary, 'B(M1)', _load_transition_probabilities_old, 1], [self.fname_summary, 'B(E2)', _load_transition_probabilities_old, 2], [self.fname_summary, 'B(E1)', _load_transition_probabilities_old, 3]]
pool = multiprocessing.Pool()
pool_res = pool.map(_load_parallel, parallel_args)
try:
(self.levels, self.negative_spin_counts[0]) = pool_res[0]
except TypeError:
'\n If no energy values are found in the file:\n TypeError: cannot unpack non-iterable NoneType object\n '
pass
try:
(self.transitions_BM1, self.negative_spin_counts[1]) = pool_res[1]
except (TypeError, IndexError):
'\n If no BM1 values are found in the file:\n TypeError: cannot unpack non-iterable NoneType object\n '
pass
try:
(self.transitions_BE2, self.negative_spin_counts[2]) = pool_res[2]
except (TypeError, IndexError):
'\n If no BE2 values are found in the file:\n TypeError: cannot unpack non-iterable NoneType object\n '
pass
try:
(self.transitions_BE1, self.negative_spin_counts[3]) = pool_res[3]
except (TypeError, IndexError):
'\n If no BE1 values are found in the file:\n TypeError: cannot unpack non-iterable NoneType object\n '
pass
self.levels = np.array(self.levels)
self.transitions_BM1 = np.array(self.transitions_BM1)
self.transitions_BE2 = np.array(self.transitions_BE2)
self.transitions_BE1 = np.array(self.transitions_BE1)
self.debug = 'DEBUG\n'
self.debug += f'''skipped -1 states in levels: {self.negative_spin_counts[0]}
'''
self.debug += f'''skipped -1 states in BM1: {self.negative_spin_counts[1]}
'''
self.debug += f'''skipped -1 states in BE2: {self.negative_spin_counts[2]}
'''
self.debug += f'''skipped -1 states in BE1: {self.negative_spin_counts[3]}
'''
self.debug = np.array(self.debug)
if self.load_and_save_to_file:
np.save(file=levels_fname, arr=self.levels, allow_pickle=True)
np.save(file=transitions_BM1_fname, arr=self.transitions_BM1, allow_pickle=True)
np.save(file=transitions_BE2_fname, arr=self.transitions_BE2, allow_pickle=True)
np.save(file=transitions_BE1_fname, arr=self.transitions_BE1, allow_pickle=True)
np.save(file=debug_fname, arr=self.debug, allow_pickle=True) | Read energy level data, transition probabilities and transition
strengths from `KSHELL` output files.
Raises
------
KshellDataStructureError
If the `KSHELL` file has unexpected structure / syntax. | kshell_utilities/kshell_utilities.py | _read_summary | GaffaSnobb/kshell_utilities | 0 | python | def _read_summary(self):
'\n Read energy level data, transition probabilities and transition\n strengths from `KSHELL` output files.\n\n Raises\n ------\n KshellDataStructureError\n If the `KSHELL` file has unexpected structure / syntax.\n '
npy_path = 'tmp'
base_fname = self.path.split('/')[(- 1)][:(- 4)]
try:
os.mkdir(npy_path)
except FileExistsError:
pass
with open(f'{npy_path}/README.txt', 'w') as outfile:
msg = 'This directory contains binary numpy data of KSHELL summary data.'
msg += ' The purpose is to speed up subsequent runs which use the same summary data.'
msg += ' It is safe to delete this entire directory if you have the original summary text file, '
msg += 'though at the cost of having to read the summary text file over again which may take some time.'
msg += " The ksutil.loadtxt parameter load_and_save_to_file = 'overwrite' will force a re-write of the binary numpy data."
outfile.write(msg)
unique_id = _generate_unique_identifier(self.path)
levels_fname = f'{npy_path}/{base_fname}_levels_{unique_id}.npy'
transitions_BM1_fname = f'{npy_path}/{base_fname}_transitions_BM1_{unique_id}.npy'
transitions_BE2_fname = f'{npy_path}/{base_fname}_transitions_BE2_{unique_id}.npy'
transitions_BE1_fname = f'{npy_path}/{base_fname}_transitions_BE1_{unique_id}.npy'
debug_fname = f'{npy_path}/{base_fname}_debug_{unique_id}.npy'
fnames = [levels_fname, transitions_BE2_fname, transitions_BM1_fname, transitions_BE1_fname, debug_fname]
if (self.load_and_save_to_file != 'overwrite'):
'\n Do not load files if overwrite parameter has been passed.\n '
if (all([os.path.isfile(fname) for fname in fnames]) and self.load_and_save_to_file):
'\n If all files exist, load them. If any of the files do\n not exist, all will be generated.\n '
self.levels = np.load(file=levels_fname, allow_pickle=True)
self.transitions_BM1 = np.load(file=transitions_BM1_fname, allow_pickle=True)
self.transitions_BE2 = np.load(file=transitions_BE2_fname, allow_pickle=True)
self.transitions_BE1 = np.load(file=transitions_BE1_fname, allow_pickle=True)
self.debug = np.load(file=debug_fname, allow_pickle=True)
msg = 'Summary data loaded from .npy!'
msg += " Use loadtxt parameter load_and_save_to_file = 'overwrite'"
msg += ' to re-read data from the summary file.'
print(msg)
return
if (self.old_or_new == 'new'):
parallel_args = [[self.fname_summary, 'Energy', _load_energy_levels, 0], [self.fname_summary, 'B(M1)', _load_transition_probabilities, 1], [self.fname_summary, 'B(E2)', _load_transition_probabilities, 2], [self.fname_summary, 'B(E1)', _load_transition_probabilities, 3]]
elif (self.old_or_new == 'old'):
parallel_args = [[self.fname_summary, 'Energy', _load_energy_levels, 0], [self.fname_summary, 'B(M1)', _load_transition_probabilities_old, 1], [self.fname_summary, 'B(E2)', _load_transition_probabilities_old, 2], [self.fname_summary, 'B(E1)', _load_transition_probabilities_old, 3]]
pool = multiprocessing.Pool()
pool_res = pool.map(_load_parallel, parallel_args)
try:
(self.levels, self.negative_spin_counts[0]) = pool_res[0]
except TypeError:
'\n If no energy values are found in the file:\n TypeError: cannot unpack non-iterable NoneType object\n '
pass
try:
(self.transitions_BM1, self.negative_spin_counts[1]) = pool_res[1]
except (TypeError, IndexError):
'\n If no BM1 values are found in the file:\n TypeError: cannot unpack non-iterable NoneType object\n '
pass
try:
(self.transitions_BE2, self.negative_spin_counts[2]) = pool_res[2]
except (TypeError, IndexError):
'\n If no BE2 values are found in the file:\n TypeError: cannot unpack non-iterable NoneType object\n '
pass
try:
(self.transitions_BE1, self.negative_spin_counts[3]) = pool_res[3]
except (TypeError, IndexError):
'\n If no BE1 values are found in the file:\n TypeError: cannot unpack non-iterable NoneType object\n '
pass
self.levels = np.array(self.levels)
self.transitions_BM1 = np.array(self.transitions_BM1)
self.transitions_BE2 = np.array(self.transitions_BE2)
self.transitions_BE1 = np.array(self.transitions_BE1)
self.debug = 'DEBUG\n'
self.debug += f'skipped -1 states in levels: {self.negative_spin_counts[0]}
'
self.debug += f'skipped -1 states in BM1: {self.negative_spin_counts[1]}
'
self.debug += f'skipped -1 states in BE2: {self.negative_spin_counts[2]}
'
self.debug += f'skipped -1 states in BE1: {self.negative_spin_counts[3]}
'
self.debug = np.array(self.debug)
if self.load_and_save_to_file:
np.save(file=levels_fname, arr=self.levels, allow_pickle=True)
np.save(file=transitions_BM1_fname, arr=self.transitions_BM1, allow_pickle=True)
np.save(file=transitions_BE2_fname, arr=self.transitions_BE2, allow_pickle=True)
np.save(file=transitions_BE1_fname, arr=self.transitions_BE1, allow_pickle=True)
np.save(file=debug_fname, arr=self.debug, allow_pickle=True) | def _read_summary(self):
'\n Read energy level data, transition probabilities and transition\n strengths from `KSHELL` output files.\n\n Raises\n ------\n KshellDataStructureError\n If the `KSHELL` file has unexpected structure / syntax.\n '
npy_path = 'tmp'
base_fname = self.path.split('/')[(- 1)][:(- 4)]
try:
os.mkdir(npy_path)
except FileExistsError:
pass
with open(f'{npy_path}/README.txt', 'w') as outfile:
msg = 'This directory contains binary numpy data of KSHELL summary data.'
msg += ' The purpose is to speed up subsequent runs which use the same summary data.'
msg += ' It is safe to delete this entire directory if you have the original summary text file, '
msg += 'though at the cost of having to read the summary text file over again which may take some time.'
msg += " The ksutil.loadtxt parameter load_and_save_to_file = 'overwrite' will force a re-write of the binary numpy data."
outfile.write(msg)
unique_id = _generate_unique_identifier(self.path)
levels_fname = f'{npy_path}/{base_fname}_levels_{unique_id}.npy'
transitions_BM1_fname = f'{npy_path}/{base_fname}_transitions_BM1_{unique_id}.npy'
transitions_BE2_fname = f'{npy_path}/{base_fname}_transitions_BE2_{unique_id}.npy'
transitions_BE1_fname = f'{npy_path}/{base_fname}_transitions_BE1_{unique_id}.npy'
debug_fname = f'{npy_path}/{base_fname}_debug_{unique_id}.npy'
fnames = [levels_fname, transitions_BE2_fname, transitions_BM1_fname, transitions_BE1_fname, debug_fname]
if (self.load_and_save_to_file != 'overwrite'):
'\n Do not load files if overwrite parameter has been passed.\n '
if (all([os.path.isfile(fname) for fname in fnames]) and self.load_and_save_to_file):
'\n If all files exist, load them. If any of the files do\n not exist, all will be generated.\n '
self.levels = np.load(file=levels_fname, allow_pickle=True)
self.transitions_BM1 = np.load(file=transitions_BM1_fname, allow_pickle=True)
self.transitions_BE2 = np.load(file=transitions_BE2_fname, allow_pickle=True)
self.transitions_BE1 = np.load(file=transitions_BE1_fname, allow_pickle=True)
self.debug = np.load(file=debug_fname, allow_pickle=True)
msg = 'Summary data loaded from .npy!'
msg += " Use loadtxt parameter load_and_save_to_file = 'overwrite'"
msg += ' to re-read data from the summary file.'
print(msg)
return
if (self.old_or_new == 'new'):
parallel_args = [[self.fname_summary, 'Energy', _load_energy_levels, 0], [self.fname_summary, 'B(M1)', _load_transition_probabilities, 1], [self.fname_summary, 'B(E2)', _load_transition_probabilities, 2], [self.fname_summary, 'B(E1)', _load_transition_probabilities, 3]]
elif (self.old_or_new == 'old'):
parallel_args = [[self.fname_summary, 'Energy', _load_energy_levels, 0], [self.fname_summary, 'B(M1)', _load_transition_probabilities_old, 1], [self.fname_summary, 'B(E2)', _load_transition_probabilities_old, 2], [self.fname_summary, 'B(E1)', _load_transition_probabilities_old, 3]]
pool = multiprocessing.Pool()
pool_res = pool.map(_load_parallel, parallel_args)
try:
(self.levels, self.negative_spin_counts[0]) = pool_res[0]
except TypeError:
'\n If no energy values are found in the file:\n TypeError: cannot unpack non-iterable NoneType object\n '
pass
try:
(self.transitions_BM1, self.negative_spin_counts[1]) = pool_res[1]
except (TypeError, IndexError):
'\n If no BM1 values are found in the file:\n TypeError: cannot unpack non-iterable NoneType object\n '
pass
try:
(self.transitions_BE2, self.negative_spin_counts[2]) = pool_res[2]
except (TypeError, IndexError):
'\n If no BE2 values are found in the file:\n TypeError: cannot unpack non-iterable NoneType object\n '
pass
try:
(self.transitions_BE1, self.negative_spin_counts[3]) = pool_res[3]
except (TypeError, IndexError):
'\n If no BE1 values are found in the file:\n TypeError: cannot unpack non-iterable NoneType object\n '
pass
self.levels = np.array(self.levels)
self.transitions_BM1 = np.array(self.transitions_BM1)
self.transitions_BE2 = np.array(self.transitions_BE2)
self.transitions_BE1 = np.array(self.transitions_BE1)
self.debug = 'DEBUG\n'
self.debug += f'skipped -1 states in levels: {self.negative_spin_counts[0]}
'
self.debug += f'skipped -1 states in BM1: {self.negative_spin_counts[1]}
'
self.debug += f'skipped -1 states in BE2: {self.negative_spin_counts[2]}
'
self.debug += f'skipped -1 states in BE1: {self.negative_spin_counts[3]}
'
self.debug = np.array(self.debug)
if self.load_and_save_to_file:
np.save(file=levels_fname, arr=self.levels, allow_pickle=True)
np.save(file=transitions_BM1_fname, arr=self.transitions_BM1, allow_pickle=True)
np.save(file=transitions_BE2_fname, arr=self.transitions_BE2, allow_pickle=True)
np.save(file=transitions_BE1_fname, arr=self.transitions_BE1, allow_pickle=True)
np.save(file=debug_fname, arr=self.debug, allow_pickle=True)<|docstring|>Read energy level data, transition probabilities and transition
strengths from `KSHELL` output files.
Raises
------
KshellDataStructureError
If the `KSHELL` file has unexpected structure / syntax.<|endoftext|> |
029af502c307e76ff52da0b2c0f448023378f5c8e5f1c87e68806f0b4176b47f | def level_plot(self, max_spin_states: int=1000, filter_spins: Union[(None, list)]=None):
'\n Wrapper method to include level plot as an attribute to this\n class. Generate a level plot for a single isotope. Spin on the x\n axis, energy on the y axis.\n\n Parameters\n ----------\n max_spin_states : int\n The maximum amount of states to plot for each spin. Default\n set to a large number to indicate ≈ no limit.\n\n filter_spins : Union[None, list]\n Which spins to include in the plot. If `None`, all spins are\n plotted. Defaults to `None`\n '
level_plot(levels=self.levels, max_spin_states=max_spin_states, filter_spins=filter_spins) | Wrapper method to include level plot as an attribute to this
class. Generate a level plot for a single isotope. Spin on the x
axis, energy on the y axis.
Parameters
----------
max_spin_states : int
The maximum amount of states to plot for each spin. Default
set to a large number to indicate ≈ no limit.
filter_spins : Union[None, list]
Which spins to include in the plot. If `None`, all spins are
plotted. Defaults to `None` | kshell_utilities/kshell_utilities.py | level_plot | GaffaSnobb/kshell_utilities | 0 | python | def level_plot(self, max_spin_states: int=1000, filter_spins: Union[(None, list)]=None):
'\n Wrapper method to include level plot as an attribute to this\n class. Generate a level plot for a single isotope. Spin on the x\n axis, energy on the y axis.\n\n Parameters\n ----------\n max_spin_states : int\n The maximum amount of states to plot for each spin. Default\n set to a large number to indicate ≈ no limit.\n\n filter_spins : Union[None, list]\n Which spins to include in the plot. If `None`, all spins are\n plotted. Defaults to `None`\n '
level_plot(levels=self.levels, max_spin_states=max_spin_states, filter_spins=filter_spins) | def level_plot(self, max_spin_states: int=1000, filter_spins: Union[(None, list)]=None):
'\n Wrapper method to include level plot as an attribute to this\n class. Generate a level plot for a single isotope. Spin on the x\n axis, energy on the y axis.\n\n Parameters\n ----------\n max_spin_states : int\n The maximum amount of states to plot for each spin. Default\n set to a large number to indicate ≈ no limit.\n\n filter_spins : Union[None, list]\n Which spins to include in the plot. If `None`, all spins are\n plotted. Defaults to `None`\n '
level_plot(levels=self.levels, max_spin_states=max_spin_states, filter_spins=filter_spins)<|docstring|>Wrapper method to include level plot as an attribute to this
class. Generate a level plot for a single isotope. Spin on the x
axis, energy on the y axis.
Parameters
----------
max_spin_states : int
The maximum amount of states to plot for each spin. Default
set to a large number to indicate ≈ no limit.
filter_spins : Union[None, list]
Which spins to include in the plot. If `None`, all spins are
plotted. Defaults to `None`<|endoftext|> |
8c725164d7792ec362239bd7182d699e0e40d08ae2b5718bdcd86e66c2dfccd8 | def level_density_plot(self, bin_width: Union[(int, float)]=0.2, include_n_states: Union[(None, int)]=None, plot: bool=True, save_plot: bool=False):
'\n Wrapper method to include level density plotting as\n an attribute to this class. Generate the level density with the\n input bin size.\n\n Parameters\n ----------\n See level_density in general_utilities.py for parameter\n information.\n '
(bins, density) = level_density(levels=self.levels, bin_width=bin_width, include_n_states=include_n_states, plot=plot, save_plot=save_plot)
return (bins, density) | Wrapper method to include level density plotting as
an attribute to this class. Generate the level density with the
input bin size.
Parameters
----------
See level_density in general_utilities.py for parameter
information. | kshell_utilities/kshell_utilities.py | level_density_plot | GaffaSnobb/kshell_utilities | 0 | python | def level_density_plot(self, bin_width: Union[(int, float)]=0.2, include_n_states: Union[(None, int)]=None, plot: bool=True, save_plot: bool=False):
'\n Wrapper method to include level density plotting as\n an attribute to this class. Generate the level density with the\n input bin size.\n\n Parameters\n ----------\n See level_density in general_utilities.py for parameter\n information.\n '
(bins, density) = level_density(levels=self.levels, bin_width=bin_width, include_n_states=include_n_states, plot=plot, save_plot=save_plot)
return (bins, density) | def level_density_plot(self, bin_width: Union[(int, float)]=0.2, include_n_states: Union[(None, int)]=None, plot: bool=True, save_plot: bool=False):
'\n Wrapper method to include level density plotting as\n an attribute to this class. Generate the level density with the\n input bin size.\n\n Parameters\n ----------\n See level_density in general_utilities.py for parameter\n information.\n '
(bins, density) = level_density(levels=self.levels, bin_width=bin_width, include_n_states=include_n_states, plot=plot, save_plot=save_plot)
return (bins, density)<|docstring|>Wrapper method to include level density plotting as
an attribute to this class. Generate the level density with the
input bin size.
Parameters
----------
See level_density in general_utilities.py for parameter
information.<|endoftext|> |
ae6468fbcf49f11cb2dedfbcec47fcac68f255f4493bd7cf542375fe345d1d3f | def nld(self, bin_width: Union[(int, float)]=0.2, include_n_states: Union[(None, int)]=None, plot: bool=True, save_plot: bool=False):
'\n Wrapper method to level_density_plot.\n '
return self.level_density_plot(bin_width=bin_width, include_n_states=include_n_states, plot=plot, save_plot=save_plot) | Wrapper method to level_density_plot. | kshell_utilities/kshell_utilities.py | nld | GaffaSnobb/kshell_utilities | 0 | python | def nld(self, bin_width: Union[(int, float)]=0.2, include_n_states: Union[(None, int)]=None, plot: bool=True, save_plot: bool=False):
'\n \n '
return self.level_density_plot(bin_width=bin_width, include_n_states=include_n_states, plot=plot, save_plot=save_plot) | def nld(self, bin_width: Union[(int, float)]=0.2, include_n_states: Union[(None, int)]=None, plot: bool=True, save_plot: bool=False):
'\n \n '
return self.level_density_plot(bin_width=bin_width, include_n_states=include_n_states, plot=plot, save_plot=save_plot)<|docstring|>Wrapper method to level_density_plot.<|endoftext|> |
0be06deaa7572b333dd87244352f67737377ab23c5b640f9ca330f2ce47ea70a | def gamma_strength_function_average_plot(self, bin_width: Union[(float, int)]=0.2, Ex_min: Union[(float, int)]=5, Ex_max: Union[(float, int)]=50, multipole_type: str='M1', prefactor_E1: Union[(None, float)]=None, prefactor_M1: Union[(None, float)]=None, prefactor_E2: Union[(None, float)]=None, initial_or_final: str='initial', partial_or_total: str='partial', include_only_nonzero_in_average: bool=True, include_n_states: Union[(None, int)]=None, filter_spins: Union[(None, list)]=None, filter_parities: str='both', porter_thomas: bool=False, plot: bool=True, save_plot: bool=False):
'\n Wrapper method to include gamma ray strength function\n calculations as an attribute to this class.\n\n Parameters\n ----------\n See gamma_strength_function_average in general_utilities.py\n for parameter descriptions.\n '
transitions_dict = {'M1': self.transitions_BM1, 'E2': self.transitions_BE2, 'E1': self.transitions_BE1}
return gamma_strength_function_average(levels=self.levels, transitions=transitions_dict[multipole_type], bin_width=bin_width, Ex_min=Ex_min, Ex_max=Ex_max, multipole_type=multipole_type, prefactor_E1=prefactor_E1, prefactor_M1=prefactor_M1, prefactor_E2=prefactor_E2, initial_or_final=initial_or_final, partial_or_total=partial_or_total, include_only_nonzero_in_average=include_only_nonzero_in_average, include_n_states=include_n_states, filter_spins=filter_spins, filter_parities=filter_parities, porter_thomas=porter_thomas, plot=plot, save_plot=save_plot) | Wrapper method to include gamma ray strength function
calculations as an attribute to this class.
Parameters
----------
See gamma_strength_function_average in general_utilities.py
for parameter descriptions. | kshell_utilities/kshell_utilities.py | gamma_strength_function_average_plot | GaffaSnobb/kshell_utilities | 0 | python | def gamma_strength_function_average_plot(self, bin_width: Union[(float, int)]=0.2, Ex_min: Union[(float, int)]=5, Ex_max: Union[(float, int)]=50, multipole_type: str='M1', prefactor_E1: Union[(None, float)]=None, prefactor_M1: Union[(None, float)]=None, prefactor_E2: Union[(None, float)]=None, initial_or_final: str='initial', partial_or_total: str='partial', include_only_nonzero_in_average: bool=True, include_n_states: Union[(None, int)]=None, filter_spins: Union[(None, list)]=None, filter_parities: str='both', porter_thomas: bool=False, plot: bool=True, save_plot: bool=False):
'\n Wrapper method to include gamma ray strength function\n calculations as an attribute to this class.\n\n Parameters\n ----------\n See gamma_strength_function_average in general_utilities.py\n for parameter descriptions.\n '
transitions_dict = {'M1': self.transitions_BM1, 'E2': self.transitions_BE2, 'E1': self.transitions_BE1}
return gamma_strength_function_average(levels=self.levels, transitions=transitions_dict[multipole_type], bin_width=bin_width, Ex_min=Ex_min, Ex_max=Ex_max, multipole_type=multipole_type, prefactor_E1=prefactor_E1, prefactor_M1=prefactor_M1, prefactor_E2=prefactor_E2, initial_or_final=initial_or_final, partial_or_total=partial_or_total, include_only_nonzero_in_average=include_only_nonzero_in_average, include_n_states=include_n_states, filter_spins=filter_spins, filter_parities=filter_parities, porter_thomas=porter_thomas, plot=plot, save_plot=save_plot) | def gamma_strength_function_average_plot(self, bin_width: Union[(float, int)]=0.2, Ex_min: Union[(float, int)]=5, Ex_max: Union[(float, int)]=50, multipole_type: str='M1', prefactor_E1: Union[(None, float)]=None, prefactor_M1: Union[(None, float)]=None, prefactor_E2: Union[(None, float)]=None, initial_or_final: str='initial', partial_or_total: str='partial', include_only_nonzero_in_average: bool=True, include_n_states: Union[(None, int)]=None, filter_spins: Union[(None, list)]=None, filter_parities: str='both', porter_thomas: bool=False, plot: bool=True, save_plot: bool=False):
'\n Wrapper method to include gamma ray strength function\n calculations as an attribute to this class.\n\n Parameters\n ----------\n See gamma_strength_function_average in general_utilities.py\n for parameter descriptions.\n '
transitions_dict = {'M1': self.transitions_BM1, 'E2': self.transitions_BE2, 'E1': self.transitions_BE1}
return gamma_strength_function_average(levels=self.levels, transitions=transitions_dict[multipole_type], bin_width=bin_width, Ex_min=Ex_min, Ex_max=Ex_max, multipole_type=multipole_type, prefactor_E1=prefactor_E1, prefactor_M1=prefactor_M1, prefactor_E2=prefactor_E2, initial_or_final=initial_or_final, partial_or_total=partial_or_total, include_only_nonzero_in_average=include_only_nonzero_in_average, include_n_states=include_n_states, filter_spins=filter_spins, filter_parities=filter_parities, porter_thomas=porter_thomas, plot=plot, save_plot=save_plot)<|docstring|>Wrapper method to include gamma ray strength function
calculations as an attribute to this class.
Parameters
----------
See gamma_strength_function_average in general_utilities.py
for parameter descriptions.<|endoftext|> |
f26ad7c09bd63e92790bd03b7c44e54dcd3a09cbd426f1caf67e48ac45ca426a | def gsf(self, bin_width: Union[(float, int)]=0.2, Ex_min: Union[(float, int)]=5, Ex_max: Union[(float, int)]=50, multipole_type: str='M1', prefactor_E1: Union[(None, float)]=None, prefactor_M1: Union[(None, float)]=None, prefactor_E2: Union[(None, float)]=None, initial_or_final: str='initial', partial_or_total: str='partial', include_only_nonzero_in_average: bool=True, include_n_states: Union[(None, int)]=None, filter_spins: Union[(None, list)]=None, filter_parities: str='both', porter_thomas: bool=False, plot: bool=True, save_plot: bool=False):
'\n Alias for gamma_strength_function_average_plot. See that\n docstring for details.\n '
return self.gamma_strength_function_average_plot(bin_width=bin_width, Ex_min=Ex_min, Ex_max=Ex_max, multipole_type=multipole_type, prefactor_E1=prefactor_E1, prefactor_M1=prefactor_M1, prefactor_E2=prefactor_E2, initial_or_final=initial_or_final, partial_or_total=partial_or_total, include_only_nonzero_in_average=include_only_nonzero_in_average, include_n_states=include_n_states, filter_spins=filter_spins, filter_parities=filter_parities, porter_thomas=porter_thomas, plot=plot, save_plot=save_plot) | Alias for gamma_strength_function_average_plot. See that
docstring for details. | kshell_utilities/kshell_utilities.py | gsf | GaffaSnobb/kshell_utilities | 0 | python | def gsf(self, bin_width: Union[(float, int)]=0.2, Ex_min: Union[(float, int)]=5, Ex_max: Union[(float, int)]=50, multipole_type: str='M1', prefactor_E1: Union[(None, float)]=None, prefactor_M1: Union[(None, float)]=None, prefactor_E2: Union[(None, float)]=None, initial_or_final: str='initial', partial_or_total: str='partial', include_only_nonzero_in_average: bool=True, include_n_states: Union[(None, int)]=None, filter_spins: Union[(None, list)]=None, filter_parities: str='both', porter_thomas: bool=False, plot: bool=True, save_plot: bool=False):
'\n Alias for gamma_strength_function_average_plot. See that\n docstring for details.\n '
return self.gamma_strength_function_average_plot(bin_width=bin_width, Ex_min=Ex_min, Ex_max=Ex_max, multipole_type=multipole_type, prefactor_E1=prefactor_E1, prefactor_M1=prefactor_M1, prefactor_E2=prefactor_E2, initial_or_final=initial_or_final, partial_or_total=partial_or_total, include_only_nonzero_in_average=include_only_nonzero_in_average, include_n_states=include_n_states, filter_spins=filter_spins, filter_parities=filter_parities, porter_thomas=porter_thomas, plot=plot, save_plot=save_plot) | def gsf(self, bin_width: Union[(float, int)]=0.2, Ex_min: Union[(float, int)]=5, Ex_max: Union[(float, int)]=50, multipole_type: str='M1', prefactor_E1: Union[(None, float)]=None, prefactor_M1: Union[(None, float)]=None, prefactor_E2: Union[(None, float)]=None, initial_or_final: str='initial', partial_or_total: str='partial', include_only_nonzero_in_average: bool=True, include_n_states: Union[(None, int)]=None, filter_spins: Union[(None, list)]=None, filter_parities: str='both', porter_thomas: bool=False, plot: bool=True, save_plot: bool=False):
'\n Alias for gamma_strength_function_average_plot. See that\n docstring for details.\n '
return self.gamma_strength_function_average_plot(bin_width=bin_width, Ex_min=Ex_min, Ex_max=Ex_max, multipole_type=multipole_type, prefactor_E1=prefactor_E1, prefactor_M1=prefactor_M1, prefactor_E2=prefactor_E2, initial_or_final=initial_or_final, partial_or_total=partial_or_total, include_only_nonzero_in_average=include_only_nonzero_in_average, include_n_states=include_n_states, filter_spins=filter_spins, filter_parities=filter_parities, porter_thomas=porter_thomas, plot=plot, save_plot=save_plot)<|docstring|>Alias for gamma_strength_function_average_plot. See that
docstring for details.<|endoftext|> |
15f526c6f74d3a959ddc70e521fef07c5843c6eac949053fc470ed2682e22d7a | @property
def help(self):
'\n Generate a list of instance attributes without magic and private\n methods.\n\n Returns\n -------\n help_list : list\n A list of non-magic instance attributes.\n '
help_list = []
for elem in dir(self):
if (not elem.startswith('_')):
help_list.append(elem)
return help_list | Generate a list of instance attributes without magic and private
methods.
Returns
-------
help_list : list
A list of non-magic instance attributes. | kshell_utilities/kshell_utilities.py | help | GaffaSnobb/kshell_utilities | 0 | python | @property
def help(self):
'\n Generate a list of instance attributes without magic and private\n methods.\n\n Returns\n -------\n help_list : list\n A list of non-magic instance attributes.\n '
help_list = []
for elem in dir(self):
if (not elem.startswith('_')):
help_list.append(elem)
return help_list | @property
def help(self):
'\n Generate a list of instance attributes without magic and private\n methods.\n\n Returns\n -------\n help_list : list\n A list of non-magic instance attributes.\n '
help_list = []
for elem in dir(self):
if (not elem.startswith('_')):
help_list.append(elem)
return help_list<|docstring|>Generate a list of instance attributes without magic and private
methods.
Returns
-------
help_list : list
A list of non-magic instance attributes.<|endoftext|> |
3b20d5932a57043daa215b87c230dcf8d19f57d35789ddd986c6bec62610c1e9 | @property
def parameters(self) -> dict:
'\n Get the KSHELL parameters from the shell file.\n\n Returns\n -------\n : dict\n A dictionary of KSHELL parameters.\n '
path = self.path
if os.path.isfile(path):
path = path.rsplit('/', 1)[0]
return get_parameters(path) | Get the KSHELL parameters from the shell file.
Returns
-------
: dict
A dictionary of KSHELL parameters. | kshell_utilities/kshell_utilities.py | parameters | GaffaSnobb/kshell_utilities | 0 | python | @property
def parameters(self) -> dict:
'\n Get the KSHELL parameters from the shell file.\n\n Returns\n -------\n : dict\n A dictionary of KSHELL parameters.\n '
path = self.path
if os.path.isfile(path):
path = path.rsplit('/', 1)[0]
return get_parameters(path) | @property
def parameters(self) -> dict:
'\n Get the KSHELL parameters from the shell file.\n\n Returns\n -------\n : dict\n A dictionary of KSHELL parameters.\n '
path = self.path
if os.path.isfile(path):
path = path.rsplit('/', 1)[0]
return get_parameters(path)<|docstring|>Get the KSHELL parameters from the shell file.
Returns
-------
: dict
A dictionary of KSHELL parameters.<|endoftext|> |
113f5fea67db5980ba7bd031b3abd4d0698a4492dcf6f7d6067594f5b5e2e536 | def makeRegistry(doc, configBaseType=Config):
'A convenience function to create a new registry.\n\n The returned value is an instance of a trivial subclass of Registry whose only purpose is to\n customize its doc string and set attrList.\n '
cls = type('Registry', (Registry,), {'__doc__': doc})
return cls(configBaseType=configBaseType) | A convenience function to create a new registry.
The returned value is an instance of a trivial subclass of Registry whose only purpose is to
customize its doc string and set attrList. | gempy/library/config/registry.py | makeRegistry | astrochun/DRAGONS | 19 | python | def makeRegistry(doc, configBaseType=Config):
'A convenience function to create a new registry.\n\n The returned value is an instance of a trivial subclass of Registry whose only purpose is to\n customize its doc string and set attrList.\n '
cls = type('Registry', (Registry,), {'__doc__': doc})
return cls(configBaseType=configBaseType) | def makeRegistry(doc, configBaseType=Config):
'A convenience function to create a new registry.\n\n The returned value is an instance of a trivial subclass of Registry whose only purpose is to\n customize its doc string and set attrList.\n '
cls = type('Registry', (Registry,), {'__doc__': doc})
return cls(configBaseType=configBaseType)<|docstring|>A convenience function to create a new registry.
The returned value is an instance of a trivial subclass of Registry whose only purpose is to
customize its doc string and set attrList.<|endoftext|> |
16ebbbd6b8d4546592feaa9dd5463f0a47bb03fed7f01dd178ef047e92c4c111 | def registerConfigurable(name, registry, ConfigClass=None):
"A decorator that adds a class as a configurable in a Registry.\n\n If the 'ConfigClass' argument is None, the class's ConfigClass attribute will be used.\n "
def decorate(cls):
registry.register(name, target=cls, ConfigClass=ConfigClass)
return cls
return decorate | A decorator that adds a class as a configurable in a Registry.
If the 'ConfigClass' argument is None, the class's ConfigClass attribute will be used. | gempy/library/config/registry.py | registerConfigurable | astrochun/DRAGONS | 19 | python | def registerConfigurable(name, registry, ConfigClass=None):
"A decorator that adds a class as a configurable in a Registry.\n\n If the 'ConfigClass' argument is None, the class's ConfigClass attribute will be used.\n "
def decorate(cls):
registry.register(name, target=cls, ConfigClass=ConfigClass)
return cls
return decorate | def registerConfigurable(name, registry, ConfigClass=None):
"A decorator that adds a class as a configurable in a Registry.\n\n If the 'ConfigClass' argument is None, the class's ConfigClass attribute will be used.\n "
def decorate(cls):
registry.register(name, target=cls, ConfigClass=ConfigClass)
return cls
return decorate<|docstring|>A decorator that adds a class as a configurable in a Registry.
If the 'ConfigClass' argument is None, the class's ConfigClass attribute will be used.<|endoftext|> |
dc5cafc8c4f8666cafc26dcef4fe5a6e08983f845f1c8b3788fac44b79605580 | def registerConfig(name, registry, target):
'A decorator that adds a class as a ConfigClass in a Registry, and associates it with the given\n configurable.\n '
def decorate(cls):
registry.register(name, target=target, ConfigClass=cls)
return cls
return decorate | A decorator that adds a class as a ConfigClass in a Registry, and associates it with the given
configurable. | gempy/library/config/registry.py | registerConfig | astrochun/DRAGONS | 19 | python | def registerConfig(name, registry, target):
'A decorator that adds a class as a ConfigClass in a Registry, and associates it with the given\n configurable.\n '
def decorate(cls):
registry.register(name, target=target, ConfigClass=cls)
return cls
return decorate | def registerConfig(name, registry, target):
'A decorator that adds a class as a ConfigClass in a Registry, and associates it with the given\n configurable.\n '
def decorate(cls):
registry.register(name, target=target, ConfigClass=cls)
return cls
return decorate<|docstring|>A decorator that adds a class as a ConfigClass in a Registry, and associates it with the given
configurable.<|endoftext|> |
d04f2a2b18748e078a3a15f01704f1654fe6878a9c106b0046ce4220fed211c9 | def __init__(self, configBaseType=Config):
'Construct a registry of name: configurables\n\n @param configBaseType: base class for config classes in registry\n '
if (not issubclass(configBaseType, Config)):
raise TypeError(('configBaseType=%s must be a subclass of Config' % _typeStr(configBaseType)))
self._configBaseType = configBaseType
self._dict = {} | Construct a registry of name: configurables
@param configBaseType: base class for config classes in registry | gempy/library/config/registry.py | __init__ | astrochun/DRAGONS | 19 | python | def __init__(self, configBaseType=Config):
'Construct a registry of name: configurables\n\n @param configBaseType: base class for config classes in registry\n '
if (not issubclass(configBaseType, Config)):
raise TypeError(('configBaseType=%s must be a subclass of Config' % _typeStr(configBaseType)))
self._configBaseType = configBaseType
self._dict = {} | def __init__(self, configBaseType=Config):
'Construct a registry of name: configurables\n\n @param configBaseType: base class for config classes in registry\n '
if (not issubclass(configBaseType, Config)):
raise TypeError(('configBaseType=%s must be a subclass of Config' % _typeStr(configBaseType)))
self._configBaseType = configBaseType
self._dict = {}<|docstring|>Construct a registry of name: configurables
@param configBaseType: base class for config classes in registry<|endoftext|> |
ac0b2256fcd3da8fa2b5290a896184d8b074ea1c6a86da6f87efafbb73f40331 | def register(self, name, target, ConfigClass=None):
"Add a new item to the registry.\n\n @param target A callable 'object that takes a Config instance as its first argument.\n This may be a Python type, but is not required to be.\n @param ConfigClass A subclass of pex_config Config used to configure the configurable;\n if None then configurable.ConfigClass is used.\n\n @note: If ConfigClass is provided then then 'target' is wrapped in a new object that forwards\n function calls to it. Otherwise the original 'target' is stored.\n\n @raise AttributeError if ConfigClass is None and target does not have attribute ConfigClass\n "
if (name in self._dict):
raise RuntimeError(('An item with name %r already exists' % name))
if (ConfigClass is None):
wrapper = target
else:
wrapper = ConfigurableWrapper(target, ConfigClass)
if (not issubclass(wrapper.ConfigClass, self._configBaseType)):
raise TypeError(('ConfigClass=%s is not a subclass of %r' % (_typeStr(wrapper.ConfigClass), _typeStr(self._configBaseType))))
self._dict[name] = wrapper | Add a new item to the registry.
@param target A callable 'object that takes a Config instance as its first argument.
This may be a Python type, but is not required to be.
@param ConfigClass A subclass of pex_config Config used to configure the configurable;
if None then configurable.ConfigClass is used.
@note: If ConfigClass is provided then then 'target' is wrapped in a new object that forwards
function calls to it. Otherwise the original 'target' is stored.
@raise AttributeError if ConfigClass is None and target does not have attribute ConfigClass | gempy/library/config/registry.py | register | astrochun/DRAGONS | 19 | python | def register(self, name, target, ConfigClass=None):
"Add a new item to the registry.\n\n @param target A callable 'object that takes a Config instance as its first argument.\n This may be a Python type, but is not required to be.\n @param ConfigClass A subclass of pex_config Config used to configure the configurable;\n if None then configurable.ConfigClass is used.\n\n @note: If ConfigClass is provided then then 'target' is wrapped in a new object that forwards\n function calls to it. Otherwise the original 'target' is stored.\n\n @raise AttributeError if ConfigClass is None and target does not have attribute ConfigClass\n "
if (name in self._dict):
raise RuntimeError(('An item with name %r already exists' % name))
if (ConfigClass is None):
wrapper = target
else:
wrapper = ConfigurableWrapper(target, ConfigClass)
if (not issubclass(wrapper.ConfigClass, self._configBaseType)):
raise TypeError(('ConfigClass=%s is not a subclass of %r' % (_typeStr(wrapper.ConfigClass), _typeStr(self._configBaseType))))
self._dict[name] = wrapper | def register(self, name, target, ConfigClass=None):
"Add a new item to the registry.\n\n @param target A callable 'object that takes a Config instance as its first argument.\n This may be a Python type, but is not required to be.\n @param ConfigClass A subclass of pex_config Config used to configure the configurable;\n if None then configurable.ConfigClass is used.\n\n @note: If ConfigClass is provided then then 'target' is wrapped in a new object that forwards\n function calls to it. Otherwise the original 'target' is stored.\n\n @raise AttributeError if ConfigClass is None and target does not have attribute ConfigClass\n "
if (name in self._dict):
raise RuntimeError(('An item with name %r already exists' % name))
if (ConfigClass is None):
wrapper = target
else:
wrapper = ConfigurableWrapper(target, ConfigClass)
if (not issubclass(wrapper.ConfigClass, self._configBaseType)):
raise TypeError(('ConfigClass=%s is not a subclass of %r' % (_typeStr(wrapper.ConfigClass), _typeStr(self._configBaseType))))
self._dict[name] = wrapper<|docstring|>Add a new item to the registry.
@param target A callable 'object that takes a Config instance as its first argument.
This may be a Python type, but is not required to be.
@param ConfigClass A subclass of pex_config Config used to configure the configurable;
if None then configurable.ConfigClass is used.
@note: If ConfigClass is provided then then 'target' is wrapped in a new object that forwards
function calls to it. Otherwise the original 'target' is stored.
@raise AttributeError if ConfigClass is None and target does not have attribute ConfigClass<|endoftext|> |
9d0b83e99dca24153a05dede7d60960d49eea79cc07420966b97f7540882394d | def apply(self, *args, **kw):
'Call the active target(s) with the active config as a keyword arg\n\n If this is a multi-selection field, return a list obtained by calling\n each active target with its corresponding active config.\n\n Additional arguments will be passed on to the configurable target(s)\n '
if (self.active is None):
msg = ('No selection has been made. Options: %s' % ' '.join(list(self._field.typemap.registry.keys())))
raise FieldValidationError(self._field, self._config, msg)
if self._field.multi:
retvals = []
for c in self._selection:
retvals.append(self._field.typemap.registry[c](*args, config=self[c], **kw))
return retvals
else:
return self._field.typemap.registry[self.name](*args, config=self[self.name], **kw) | Call the active target(s) with the active config as a keyword arg
If this is a multi-selection field, return a list obtained by calling
each active target with its corresponding active config.
Additional arguments will be passed on to the configurable target(s) | gempy/library/config/registry.py | apply | astrochun/DRAGONS | 19 | python | def apply(self, *args, **kw):
'Call the active target(s) with the active config as a keyword arg\n\n If this is a multi-selection field, return a list obtained by calling\n each active target with its corresponding active config.\n\n Additional arguments will be passed on to the configurable target(s)\n '
if (self.active is None):
msg = ('No selection has been made. Options: %s' % ' '.join(list(self._field.typemap.registry.keys())))
raise FieldValidationError(self._field, self._config, msg)
if self._field.multi:
retvals = []
for c in self._selection:
retvals.append(self._field.typemap.registry[c](*args, config=self[c], **kw))
return retvals
else:
return self._field.typemap.registry[self.name](*args, config=self[self.name], **kw) | def apply(self, *args, **kw):
'Call the active target(s) with the active config as a keyword arg\n\n If this is a multi-selection field, return a list obtained by calling\n each active target with its corresponding active config.\n\n Additional arguments will be passed on to the configurable target(s)\n '
if (self.active is None):
msg = ('No selection has been made. Options: %s' % ' '.join(list(self._field.typemap.registry.keys())))
raise FieldValidationError(self._field, self._config, msg)
if self._field.multi:
retvals = []
for c in self._selection:
retvals.append(self._field.typemap.registry[c](*args, config=self[c], **kw))
return retvals
else:
return self._field.typemap.registry[self.name](*args, config=self[self.name], **kw)<|docstring|>Call the active target(s) with the active config as a keyword arg
If this is a multi-selection field, return a list obtained by calling
each active target with its corresponding active config.
Additional arguments will be passed on to the configurable target(s)<|endoftext|> |
7bb945ffe54deef31be000f4ff76c0ce8b199a77b8be281301527458e289bac3 | def __deepcopy__(self, memo):
'Customize deep-copying, want a reference to the original registry.\n WARNING: this must be overridden by subclasses if they change the\n constructor signature!\n '
other = type(self)(doc=self.doc, registry=self.registry, default=copy.deepcopy(self.default), optional=self.optional, multi=self.multi)
other.source = self.source
return other | Customize deep-copying, want a reference to the original registry.
WARNING: this must be overridden by subclasses if they change the
constructor signature! | gempy/library/config/registry.py | __deepcopy__ | astrochun/DRAGONS | 19 | python | def __deepcopy__(self, memo):
'Customize deep-copying, want a reference to the original registry.\n WARNING: this must be overridden by subclasses if they change the\n constructor signature!\n '
other = type(self)(doc=self.doc, registry=self.registry, default=copy.deepcopy(self.default), optional=self.optional, multi=self.multi)
other.source = self.source
return other | def __deepcopy__(self, memo):
'Customize deep-copying, want a reference to the original registry.\n WARNING: this must be overridden by subclasses if they change the\n constructor signature!\n '
other = type(self)(doc=self.doc, registry=self.registry, default=copy.deepcopy(self.default), optional=self.optional, multi=self.multi)
other.source = self.source
return other<|docstring|>Customize deep-copying, want a reference to the original registry.
WARNING: this must be overridden by subclasses if they change the
constructor signature!<|endoftext|> |
805670c7152eff4df5111d07caa3052e25d2b82a325548f22f64aaa625b875fa | def condense_coords(matches):
'restructure point match dictionary structure to Nx2 array\n\n Parameters\n ----------\n matches : list of dict\n list of match dictionaries in Render format\n\n Returns\n -------\n coords : numpy.ndarray\n Nx2 array representing matches\n '
x = []
y = []
for m in matches:
x += m['matches']['p'][0]
x += m['matches']['q'][0]
y += m['matches']['p'][1]
y += m['matches']['q'][1]
coords = np.transpose(np.vstack((np.array(x), np.array(y))))
return coords | restructure point match dictionary structure to Nx2 array
Parameters
----------
matches : list of dict
list of match dictionaries in Render format
Returns
-------
coords : numpy.ndarray
Nx2 array representing matches | em_stitch/lens_correction/mesh_and_solve_transform.py | condense_coords | AllenInstitute/em_stitch | 2 | python | def condense_coords(matches):
'restructure point match dictionary structure to Nx2 array\n\n Parameters\n ----------\n matches : list of dict\n list of match dictionaries in Render format\n\n Returns\n -------\n coords : numpy.ndarray\n Nx2 array representing matches\n '
x = []
y = []
for m in matches:
x += m['matches']['p'][0]
x += m['matches']['q'][0]
y += m['matches']['p'][1]
y += m['matches']['q'][1]
coords = np.transpose(np.vstack((np.array(x), np.array(y))))
return coords | def condense_coords(matches):
'restructure point match dictionary structure to Nx2 array\n\n Parameters\n ----------\n matches : list of dict\n list of match dictionaries in Render format\n\n Returns\n -------\n coords : numpy.ndarray\n Nx2 array representing matches\n '
x = []
y = []
for m in matches:
x += m['matches']['p'][0]
x += m['matches']['q'][0]
y += m['matches']['p'][1]
y += m['matches']['q'][1]
coords = np.transpose(np.vstack((np.array(x), np.array(y))))
return coords<|docstring|>restructure point match dictionary structure to Nx2 array
Parameters
----------
matches : list of dict
list of match dictionaries in Render format
Returns
-------
coords : numpy.ndarray
Nx2 array representing matches<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.