metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jergosh/pyranges",
"score": 3
}
|
#### File: pyranges/explore/k_nearest.py
```python
import numpy as np
import pandas as pd
# from pyranges.methods.join import _both_dfs
np.random.seed(0)
def sort_one_by_one(d, col1, col2):
"""
Equivalent to pd.sort_values(by=[col1, col2]), but faster.
"""
d = d.sort_values(by=[col2])
return d.sort_values(by=[col1], kind='mergesort')
def _insert_distance(ocdf, dist, suffix):
if "Distance" not in ocdf:
distance_column_name = "Distance"
elif "Distance" + suffix not in ocdf:
distance_column_name = "Distance" + suffix
else:
i = 1
while "Distance" + str(i) in ocdf:
i += 1
distance_column_name = "Distance" + str(i)
ocdf.insert(ocdf.shape[1], distance_column_name,
pd.Series(dist, index=ocdf.index).fillna(-1).astype(int))
return ocdf
large = True
if large:
size_chr1 = 248956422
n = int(1e6)
s1 = np.random.randint(size_chr1, size=n)
e1 = s1 + np.random.randint(10000, size=n)
s2 = np.random.randint(size_chr1, size=n)
e2 = s2 + np.random.randint(10000, size=n)
else:
n = 10
n2 = n * 2
s1 = np.random.randint(100, size=n2)
e1 = s1 + 10
s2 = np.random.randint(100, size=n)
e2 = s2 + 10
d1 = pd.DataFrame({"Start": s1, "End": e1})
d2 = pd.DataFrame({"Start": s2, "End": e2})
def nearest_next_idx(d1, d2, k=1):
"""Return k indexes from d1, d2 and their distance.
d1.End <= d2.Start, i.e. next in forward direction.
dist negative means invalid, i.e. there were less than k nearest intervals."""
d1e = d1.End.sort_values()
d2s = d2.Start.sort_values()
ix = np.searchsorted(d2s, d1e, side="left")
if k != 1:
new_ix = np.ones(len(ix) * (k), dtype=d1e.dtype) * -1
new_ix[::k] = ix
for _k in range(1, k):
_k_m1 = _k - 1
new_ix[_k::k] = new_ix[_k_m1::k] + 1
ix = new_ix
ix[ix >= len(d2s)] = len(d2s) - 1
d1_idx = d1e.index
if k != 1:
r = np.repeat(np.arange(len(d1e)), k)
d1e = d1e.iloc[r]
d1_idx = d1e.index
d2_idx = d2s.iloc[ix].index
d2s = d2s[d2_idx].values
dist = d2s - d1e
return pd.DataFrame({"D1X": d1_idx, "D2X": d2_idx, "Dist": dist})
def nearest_next(d1, d2, k, suffix):
x = nearest_next_idx(d1, d2, k)
d1 = d1.loc[x.D1X]
d1.index = range(len(d1))
d2 = d2.loc[x.D2X]
d2 = _insert_distance(d2, x.Dist, suffix)
d2.index = range(len(d2))
return d1.join(d2, rsuffix=suffix)
def nearest_previous_idx(d1, d2, k=1):
d1s = d1.Start.sort_values()
d2e = d2.End.sort_values()
ix = np.searchsorted(d2e, d1s, side="right") - 1
ix[ix < 0] = 0
d2_idx = d2e.iloc[ix].index
if k != 1:
new_ix = np.ones(len(ix) * (k), dtype=d1s.dtype) * -1
new_ix[::k] = ix
for _k in range(1, k):
_k_m1 = _k - 1
new_ix[_k::k] = new_ix[_k_m1::k] - 1
ix = new_ix
ix[ix < 0] = 0
d1_idx = d1s.index
if k != 1:
r = np.repeat(np.arange(len(d1s)), k)
d1s = d1s.iloc[r]
d1_idx = d1s.index
d2_idx = d2e.iloc[ix].index
dist = d1s - d2e[d2_idx].values
return pd.DataFrame({"D1X": d1_idx, "D2X": d2_idx, "Dist": dist})
def nearest_idx(d1, d2, k=1):
n = nearest_next_idx(d1, d2, k)
p = nearest_previous_idx(d1, d2, k)
df = pd.concat([n, p])
df = df[df.Dist >= 0]
df = sort_one_by_one(df, "D1X", "Dist")
df = df.groupby("D1X", sort=False).head(k)
return df
n = nearest_idx(d1, d2, k=1)
# print(n)
# print(d1.loc[n.D1X].head())
# print(d2.loc[n.D2X].head())
# print(n.Dist.head())
# def _overlapping_for_nearest(scdf, ocdf, suffix):
# nearest_df = pd.DataFrame(columns="Chromosome Start End Strand".split())
# scdf2, ocdf2 = _both_dfs(scdf, ocdf, how="first")
# if not ocdf2.empty:
# original_idx = scdf.index
# idxs = scdf2.index
# original_idx = scdf.index.copy(deep=True)
# missing_idxs = ~original_idx.isin(idxs)
# missing_overlap = scdf.index[missing_idxs]
# df_to_find_nearest_in = scdf.reindex(missing_overlap)
# odf = ocdf.reindex(ocdf2.index)
# odf.index = idxs
# sdf = scdf.reindex(idxs)
# nearest_df = sdf.join(odf, rsuffix=suffix)
# nearest_df = _insert_distance(nearest_df, 0, suffix)
# else:
# df_to_find_nearest_in = scdf
# return nearest_df, df_to_find_nearest_in
# def _nearest(scdf, ocdf, kwargs):
# if scdf.empty or ocdf.empty:
# return None
# overlap = kwargs["overlap"]
# how = kwargs["how"]
# suffix = kwargs["suffix"]
# k = kwargs["k"]
# if how == "upstream":
# strand = scdf.Strand.iloc[0]
# how = {"+": "previous", "-": "next"}[strand]
# elif how == "downstream":
# strand = scdf.Strand.iloc[0]
# how = {"+": "next", "-": "previous"}[strand]
# if overlap:
# nearest_df, df_to_find_nearest_in = _overlapping_for_nearest(
# scdf, ocdf, suffix)
# else:
# df_to_find_nearest_in = scdf
# if not df_to_find_nearest_in.empty:
# if how == "next":
# df = nearest_next(scdf, df_to_find_nearest_in, k, suffix)
# print(df)
# raise
# print(_nearest(d1, d2, {"overlap": False, "suffix": "hooo", "k": 1, "how": "next"}))
# d1x, d2x, dist = nearest_next(d1, d2, k=2)
# d1x, d2x, dist = nearest_previous(d1, d2, k=2)
# print("d1")
# print(d1)
# print("d2")
# print(d2)
# print("d1 right order")
# print(d1.loc[d1x])
# print("d2 right order")
# print(d2.loc[d2x])
# print(dist)
# print(len(d1))
# print(d1)
# print(len(res))
# print(res)
# a1 = np.sort(a1)
# # CPU times: user 9.78 s, sys: 648 ms, total: 10.4 s
# # Wall time: 951 ms
# a2_s = np.sort(a2)
# # CPU times: user 9.82 s, sys: 544 ms, total: 10.4 s
# # Wall time: 956 ms
# d1s = sort_one_by_one(d1, "Start", "End")
# # CPU times: user 48.2 s, sys: 3.88 s, total: 52.1 s
# # Wall time: 4.22 s
# # time d1.sort_values(["Start", "End"])
# # CPU times: user 1min, sys: 3.92 s, total: 1min 4s
# # Wall time: 25.3 s
# d2s = sort_one_by_one(d2, "Start", "End")
# r = np.searchsorted(d1s.Start, d2s.End)
```
#### File: pyranges/methods/cluster.py
```python
from sorted_nearest import annotate_clusters, cluster_by
def _cluster(df, kwargs):
if df.empty:
return None
slack = kwargs.get("slack", 0)
count = kwargs.get("count", False)
cdf = df.sort_values("Start")
ids = annotate_clusters(cdf.Start.values, cdf.End.values, slack)
cdf.insert(df.shape[1], "Cluster", ids)
if count:
_count = cdf.groupby("Cluster").Cluster.count()
_count.name = "Count"
cdf = cdf.merge(_count, on="Cluster")
return cdf
def _cluster_by(df, kwargs):
if df.empty:
return None
slack = kwargs.get("slack", 0)
count = kwargs.get("count", False)
by = kwargs["by"]
cdf = df.sort_values(by)
new_ids = (cdf[by] != cdf[by].shift()).cumsum()
cdf.insert(cdf.shape[1], "ClusterBy", new_ids)
cdf = cdf.sort_values(["ClusterBy", "Start"])
ids = cluster_by(cdf.Start.values, cdf.End.values, cdf.ClusterBy.values,
slack)
cdf = cdf.drop("ClusterBy", axis=1)
cdf.insert(cdf.shape[1], "Cluster", ids)
if count:
_count = cdf.groupby("Cluster").Cluster.count()
_count.name = "Count"
cdf = cdf.merge(_count, on="Cluster")
return cdf
```
#### File: pyranges/methods/drop_duplicates.py
```python
def _drop_duplicate_positions(df, kwargs):
strand = kwargs.get("strand")
columns = ["Start", "End"]
if strand:
columns.append("Strand")
return df.drop_duplicates(columns)
```
#### File: pyranges/methods/summary.py
```python
from collections import OrderedDict
import pandas as pd
from tabulate import tabulate
def _summary(self):
lengths = OrderedDict()
lengths["pyrange"] = self.lengths(as_dict=True)
if self.stranded:
c = self.merge(strand=True)
lengths["coverage_stranded"] = c.lengths(as_dict=True)
c = self.merge(strand=False)
lengths["coverage_unstranded"] = c.lengths(as_dict=True)
summaries = OrderedDict()
for summary, d in lengths.items():
summaries[summary] = pd.concat(d.values()).describe()
summary = pd.concat(summaries.values(), axis=1)
summary.columns = list(summaries)
str_repr = tabulate(summary, headers=summary.columns, tablefmt='psql')
print(str_repr)
```
#### File: pyranges/methods/windows.py
```python
from sorted_nearest import makewindows
from sorted_nearest import maketiles
import numpy as np
def _windows(df, kwargs):
window_size = kwargs["window_size"]
idxs, starts, ends = makewindows(df.index.values, df.Start.values,
df.End.values, window_size)
df = df.reindex(idxs)
df.loc[:, "Start"] = starts
df.loc[:, "End"] = ends
return df
def _intersect_tile(df):
overlap = np.minimum(df.End, df.__End__) - np.maximum(df.Start, df.__Start__)
df.insert(df.shape[1], "TileOverlap", overlap)
return df
def _tiles(df, kwargs):
overlap = kwargs.get("overlap")
if overlap:
df = df.copy()
df.insert(df.shape[1], "__Start__", df.Start)
df.insert(df.shape[1], "__End__", df.End)
window_size = kwargs["tile_size"]
idxs, starts, ends = maketiles(df.index.values, df.Start.values,
df.End.values, window_size)
df = df.reindex(idxs)
df.loc[:, "Start"] = starts
df.loc[:, "End"] = ends
if overlap:
df = _intersect_tile(df)
df = df.drop(["__Start__", "__End__"], axis=1)
return df
```
|
{
"source": "jer-green/triage-bandit-sandbox",
"score": 2
}
|
#### File: triage_bandit_sandbox/inference/models.py
```python
from abc import ABC
from collections import defaultdict
from typing import Dict, List, Mapping, NamedTuple, Optional, Set
import attr
import pandas as pd
from babylon_pgm.constants import LabelType
from babylon_pgm.models.nodes import Node
from babylon_pgm.models.diagnosis import Differential
from dataenforce import Dataset
Priors = Mapping[str, float]
RelativeRisks = Mapping[str, Mapping[str, float]] # {Disease: {Risk: value}}
SymptomDiseaseMarginals = Mapping[
str, Mapping[str, float]
] # {Symptom: {Disease: values}}
SpecialSymptomLeaks = Mapping[str, float]
EvidenceMap = Mapping[str, str] # concept_id: state
class ConceptGroup(NamedTuple):
id: str
name: str
exclusive: bool
exhaustive: bool
concept_ids: List[str]
class ConceptHierarchy(NamedTuple):
concept_id: str
groups: List[ConceptGroup]
class Differentials:
def __init__(self, list_of_differentials: List[Differential]):
self.df_of_differentials = None
self.dict_of_differentials = {p.node.id: p for p in list_of_differentials}
def to_df(self) -> Dataset:
if self.df_of_differentials is not None:
return self.df_of_differentials
node_list = [attr.asdict(p.node) for p in self.dict_of_differentials.values()]
node_table = pd.DataFrame.from_records(
node_list, columns=[f.name for f in attr.fields(Node)]
)
probability_field_name = attr.fields(Differential)[1].name
node_table[probability_field_name] = [
p.probability for p in self.dict_of_differentials.values()
]
self.df_of_differentials = node_table.sort_values(
by=probability_field_name, ascending=False
)
return self.df_of_differentials
def to_dict(self) -> Dict[str, float]:
return self.dict_of_differentials
def __repr__(self):
# Try to print an instance of this class to see the magic of this method.
differentials = ", ".join(f"{c!s}" for c in self.dict_of_differentials.values())
return f"{self.__class__.__name__}({differentials})"
def __getitem__(self, key: str):
# This either returns a single item, or a list of items (depending on match)
try:
return self.dict_of_differentials[key]
except KeyError:
return self.fuzzy_search(key, self.dict_of_differentials)
@staticmethod
def fuzzy_search(key: str, dict_to_search: Dict[str, Differential]) -> Differential:
# TODO: This incurs a O(N) cost per search, we can optimise if that is an issue.
# TODO: Should we move this to a utils module?
relevant_keys = [rel_key for rel_key in dict_to_search.keys() if key in rel_key]
if len(relevant_keys) == 1:
return dict_to_search[relevant_keys[0]]
elif len(relevant_keys) > 1:
return [dict_to_search[rel_key] for rel_key in relevant_keys]
else:
return None
@attr.s(auto_attribs=True)
class InferenceModel(ABC):
nodes: List[Node]
concept_hierarchies: List[ConceptHierarchy]
priors: Priors
symptom_disease_marginals: SymptomDiseaseMarginals
relative_risks: RelativeRisks
special_symptom_leaks: SpecialSymptomLeaks
def __attrs_post_init__(self):
self._build_mappings()
def _build_mappings(self):
disease_to_risk_factors = {
disease_id: set(relative_risks.keys())
for disease_id, relative_risks in self.relative_risks.items()
}
symptom_to_diseases = {
symptom_id: set(marginals.keys())
for symptom_id, marginals in self.symptom_disease_marginals.items()
}
# Invert mappings
risk_factor_to_diseases = defaultdict(set)
for disease_id, risk_factor_ids in disease_to_risk_factors.items():
for risk_factor_id in risk_factor_ids:
risk_factor_to_diseases[risk_factor_id].add(disease_id)
disease_to_symptoms = defaultdict(set)
for symptom_id, disease_ids in symptom_to_diseases.items():
for disease_id in disease_ids:
disease_to_symptoms[disease_id].add(symptom_id)
# Concept hierarchies
self._group_id_to_parent_concept_id = {}
self._concept_id_to_group_id = {}
self._group_id_to_group = {}
for concept_hierarchy in self.concept_hierarchies:
for group in concept_hierarchy.groups:
self._group_id_to_parent_concept_id[
group.id
] = concept_hierarchy.concept_id
self._group_id_to_group[group.id] = group
for concept_id in group.concept_ids:
self._concept_id_to_group_id[concept_id] = group.id
self._parent_concept_id_to_concept_hierarchy = {
concept_hierarchy.concept_id: concept_hierarchy
for concept_hierarchy in self.concept_hierarchies
}
self._id_to_node = {node.id: node for node in self.nodes}
self._id_to_parents = {**disease_to_risk_factors, **symptom_to_diseases}
self._id_to_children = {**risk_factor_to_diseases, **disease_to_symptoms}
def get_parents(self, node_id: str) -> Set[Node]:
return {
self.get_node(parent_id)
for parent_id in self._id_to_parents.get(node_id, {})
if self.get_node(parent_id)
}
def get_children(self, node_id: str) -> Set[Node]:
return {
self.get_node(child_id)
for child_id in self._id_to_children.get(node_id, {})
if self.get_node(child_id)
}
def get_nodes_by_label(self, label: LabelType) -> List[Node]:
return [node for node in self.nodes if node.label == label]
def get_node(self, node_id: str) -> Optional[Node]:
return self._id_to_node.get(node_id)
def get_prior(self, node_id) -> float:
return self.priors[node_id]
def get_symptom_disease_marginal(
self,
*,
symptom_id: str,
disease_id: str,
) -> Optional[float]:
return self.symptom_disease_marginals.get(symptom_id, {}).get(disease_id)
def get_symptom_disease_marginals(self, *, symptom_id: str) -> Mapping[str, float]:
return self.symptom_disease_marginals.get(symptom_id, {})
def get_relative_risk(
self, *, risk_factor_id: str, disease_id: str
) -> Optional[float]:
return self.relative_risks.get(disease_id, {}).get(risk_factor_id)
def get_concept_hierarchy(self, concept_id: str) -> Optional[ConceptHierarchy]:
return self._parent_concept_id_to_concept_hierarchy.get(concept_id)
def get_group_parent_concept_id(self, group_id: str) -> str:
return self._group_id_to_parent_concept_id[group_id]
def get_group_id(self, concept_id: str) -> Optional[str]:
return self._concept_id_to_group_id.get(concept_id)
def get_group(self, group_id: str) -> ConceptGroup:
return self._group_id_to_group[group_id]
def get_special_symptom_leak(self, node_id) -> Optional[float]:
return self.special_symptom_leaks.get(node_id)
```
#### File: triage_models_sandbox/triage_data_transformers/utility_model_transformer.py
```python
from typing import List, Dict, Optional
import torch
from babylon_pgm.triage_models.triage_decisions import (
TRIAGE_MODEL_DECISION_TYPE,
TriageModelDecisionDefault,
)
from babylon_pgm.models.nodes import Node
from babylon_pgm.models.diagnosis import Differential
from babylon_pgm.constants import DOSI, DOSI_NODE_ID, LabelType
from babylon_pgm.exceptions import ModelError
from ..interfaces import (
TriageDataset,
PreparedData,
TriageDataTransformer,
)
DOSI_TO_LEAK_TRIAGE = {
DOSI.MINUTES: TriageModelDecisionDefault.SELF_CARE,
DOSI.HOURS: TriageModelDecisionDefault.SELF_CARE,
DOSI.DAYS: TriageModelDecisionDefault.GP,
DOSI.WEEKS: TriageModelDecisionDefault.GP,
DOSI.MONTHS: TriageModelDecisionDefault.GP,
}
UNKNOWN_DOSI_LEAK_TRIAGE = TriageModelDecisionDefault.GP
class UtilityModelTransformer(TriageDataTransformer):
"""
Transform a TriageDataset object into a PreparedData object,
which are then passed to the utility model.
"""
def __init__(
self,
triage_decisions: Optional[TRIAGE_MODEL_DECISION_TYPE] = TriageModelDecisionDefault,
max_diseases: Optional[int] = 15,
dosi_to_leak_triage: Optional[Dict] = DOSI_TO_LEAK_TRIAGE,
unknown_dosi_leak_triage: Optional[TriageModelDecisionDefault] = UNKNOWN_DOSI_LEAK_TRIAGE,
):
"""
:param triage_decisions: Triage decisions to be used (according to world region).
:param max_diseases: Number of diseases differentials to use as features.
:param dosi_to_leak_triage: Triage decision for DOSI.
:param unknown_dosi_leak_triage:
"""
self._triage_decisions = triage_decisions
self.max_diseases = max_diseases
self.dosi_to_leak_triage = dosi_to_leak_triage
self.unknown_dosi_leak_triage = unknown_dosi_leak_triage
def _get_node_decision(self, node: Node) -> TRIAGE_MODEL_DECISION_TYPE:
if not node.triage:
raise ModelError(f"Missing triage for node {node.id}")
return self._triage_decisions[node.triage]
def _encode_decisions(self, decisions_list):
# Encode a list of decisions into a long tensor
return torch.LongTensor([decision.index for decision in decisions_list])
def _extract_disease_differentials(
self,
differentials_per_case: List[Differential]
) -> List[Differential]:
return [
diff for diff in differentials_per_case
if diff.node.label == LabelType.disease
]
def fit(self, dataset: TriageDataset):
pass
def transform(self, dataset: TriageDataset, train: bool) -> PreparedData:
"""
Transform the data.
:param dataset: Data to be transformed.
:param train: Whether the data are used to train the model.
:return: PreparedData object, suited to be fed to the utility model.
"""
lists_of_differentials = []
lists_of_evidence_sets = []
correct_decisions = []
list_of_ages = []
for case_card_data in dataset:
disease_differentials = self._extract_disease_differentials(case_card_data.differentials)
if train:
if case_card_data.doctor_outcomes:
for outcome in case_card_data.doctor_outcomes:
if outcome.triage:
lists_of_differentials.append(disease_differentials)
lists_of_evidence_sets.append(case_card_data.evidence)
correct_decisions.append(
self._triage_decisions[outcome.triage]
)
list_of_ages.append(case_card_data.age)
elif case_card_data.judgements:
for judg in case_card_data.judgements:
lists_of_differentials.append(disease_differentials)
lists_of_evidence_sets.append(case_card_data.evidence)
correct_decisions.append(
self._triage_decisions[judg.ideal_triage]
)
list_of_ages.append(case_card_data.age)
else:
lists_of_differentials.append(disease_differentials)
lists_of_evidence_sets.append(case_card_data.evidence)
list_of_ages.append(case_card_data.age)
# Preallocate tensors
num_differentials = len(lists_of_differentials)
num_decisions = len(self._triage_decisions)
likelihoods = torch.Tensor(num_differentials, self.max_diseases).zero_()
disease_triages = torch.LongTensor(num_differentials, self.max_diseases).zero_()
for i, differential in enumerate(lists_of_differentials):
# Reorder by probability
sorted_differential = sorted(
differential, key=lambda disease: disease.probability, reverse=True
)
for j, disease in enumerate(sorted_differential[:self.max_diseases]):
disease_decision = self._get_node_decision(disease.node)
likelihoods[i, j] = disease.probability
disease_triages[i, j] = disease_decision.index
# Get leaks
leak_triages = []
for evidence_set in lists_of_evidence_sets:
if evidence_set is None:
leak_triages.append(self.unknown_dosi_leak_triage)
else:
dosi_state = next(
(e.state for e in evidence_set if e.node.id == DOSI_NODE_ID), None
)
if dosi_state:
dosi_value = DOSI(dosi_state)
leak_triage = self.dosi_to_leak_triage[dosi_value]
else:
leak_triage = self.unknown_dosi_leak_triage
leak_triages.append(leak_triage)
# Add leak term
leak_probabilities = 1 - likelihoods.sum(1)
likelihoods = torch.cat((likelihoods, leak_probabilities.unsqueeze(1)), 1)
leak_triage_indices = [leak_triage.index for leak_triage in leak_triages]
disease_triages = torch.cat(
(disease_triages, torch.LongTensor(leak_triage_indices).unsqueeze(1)), 1
)
decision_mask = torch.Tensor(
num_differentials, self.max_diseases + 1, num_decisions,
).zero_()
for k, decision in enumerate(self._triage_decisions):
decision_mask[:, :, k] = (decision.index < disease_triages).float()
if train:
correct_decisions = (
self._encode_decisions(correct_decisions) if correct_decisions else None
)
return PreparedData(
likelihoods=likelihoods,
disease_triages=disease_triages,
decision_mask=decision_mask,
correct_decisions=correct_decisions,
)
def fit_transform(self, dataset: TriageDataset, train: bool = True) -> PreparedData:
"""
Transform the data.
:param dataset: Data to be transformed.
:param train: Whether the data are used to train the model.
:return: PreparedData object, suited to be fed to the utility model.
"""
return self.transform(dataset, train=train)
```
#### File: triage_bandit_sandbox/utils/constants.py
```python
from typing import Tuple
from babylon_pgm.constants import LabelType, AGE_MIN, AGE_MAX
CORTEX_LABEL_MAPPING = {
"DISEASE": LabelType.disease,
"RISK_FACTOR": LabelType.risk,
"SYMPTOM": LabelType.symptom,
"SUPER_NODE": LabelType.super_,
}
AGE_RANGES = [
(15, 24),
(25, 39),
(40, 59),
(60, 74),
(75, 100),
]
def _get_age_range(age: int) -> Tuple:
for r in AGE_RANGES:
if r[0] <= age <= r[1]:
return r
if age < AGE_MIN:
return AGE_RANGES[0]
if age > AGE_MAX:
return AGE_RANGES[-1]
```
|
{
"source": "Jerhaad/purple_lamp",
"score": 3
}
|
#### File: Jerhaad/purple_lamp/openhab_lib.py
```python
import logging
# Extended Python #
from openhab import OpenHAB
# Constants #
SERVER_FORMAT = "http://{ip_addr}:{port}/rest".format
COLOR_NAME_ATTR = "colorName"
log_ = logging.getLogger(__name__)
class OpenHABServer:
"""Get a handle to the OpenHAB Server"""
def __init__(self, ip_addr, port, light_base):
"""
:param ip_addr: <string>
:param port: <number>
:param light_base: <string> base code for OpenHAB Thing that all of its items share. # TODO: Trim out the obv
"""
self._raw_base = light_base
self._openhab = None
self._is_open = None
self._light_base = _base_transform(light_base)
self._light_color_name = self._light_base + COLOR_NAME_ATTR
self.color_name_item = None
self.ip_addr = ip_addr
self.port = port
def __enter__(self):
"""
Dunder for context management open
:return self:
"""
self.open()
return self
def __exit__(self, exc_type=None, exc_value=None, traceback=None):
"""
Dunder for context management close
"""
self.close(exc_type, exc_value, traceback)
def open(self):
"""
Open the connection.
# TODO: Not sure how long it can stay open yet.
"""
if self._is_open:
log_.warning("Already Open!")
else:
log_.info("Opening connection to the OpenHAB Server.")
log_.debug(" at %s:%s", self.ip_addr, self.port)
server_path = SERVER_FORMAT(ip_addr=self.ip_addr, port=self.port)
self._openhab = OpenHAB(server_path)
self.color_name_item = self._openhab.get_item(self._light_color_name)
self._is_open = True
def close(self, exc_type=None, exc_value=None, traceback=None):
"""
Close the connection down.
"""
if self._is_open:
log_.debug("Closing connection to the OpenHAB Server")
self._is_open = False
else:
log_.warning("Already Closed!")
log_.error(
"Passing exception data: %s, %s, %s.", exc_type, exc_value, traceback
)
def change_light_color(self, color):
"""
Post an update to the light.
:param color: <string>
"""
# TODO: Hysteresis? Do we need this?
log_.info("Posting update to Light")
log_.debug(" color: %s", color)
self.color_name_item.command(color)
def _base_transform(raw):
"""
OpenHAB's Thing format needs to be transformed for the python API to get Items:
: -> _
- -> _
:param raw: <string>
:return treated: <string>
"""
log_.debug("Transforming Raw Thing Base.")
log_.debug(" raw: %s", raw)
if not raw.endswith(":"):
raw = raw + ":"
treated = raw.replace(":", "_").replace("-", "_")
log_.debug("Sending back treated: %s", treated)
return treated
```
|
{
"source": "jerheff/scikit-advance",
"score": 3
}
|
#### File: tests/test_model_selection/test_lowcomplexityselector.py
```python
import pytest
import numpy as np
from sklearn.datasets import make_classification
from sklearn.model_selection import RandomizedSearchCV
from sklearn.tree import DecisionTreeClassifier
from skadvance.model_selection import (
LowComplexitySelector,
SingleColumnComplexity,
)
from skadvance.distributions import uniform
def test_lowcomplexityselector():
X, y = make_classification(n_samples=1000, random_state=42)
hps = {"min_samples_split": uniform(0, 1)}
complexity_hp = "min_samples_split"
selected_hp_values = []
for complexity_direction in [True, False]:
complexity = SingleColumnComplexity(
complexity_hp, larger_more_complex=complexity_direction
)
print(f"Complexity fn: {complexity}")
selector = LowComplexitySelector(complexity)
gs = RandomizedSearchCV(
DecisionTreeClassifier(random_state=42),
param_distributions=hps,
n_iter=10,
scoring="roc_auc",
refit=selector,
random_state=42,
)
gs.fit(X, y)
print(f"Best test score: {np.max(gs.cv_results_['mean_test_score'])}")
print(
f"Selected score: {gs.cv_results_['mean_test_score'][gs.best_index_]}, index: {gs.best_index_}, params: {gs.best_params_}"
)
selected_hp_values.append(gs.best_params_[complexity_hp])
assert selected_hp_values[0] < selected_hp_values[1]
```
|
{
"source": "jericbryledy/summer-framework",
"score": 2
}
|
#### File: jericbryledy/summer-framework/conanfile.py
```python
from conans import ConanFile, CMake
class SummerFrameworkConan(ConanFile):
generators = "cmake"
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
```
|
{
"source": "JerichoKain/tkinter_projects",
"score": 4
}
|
#### File: tkinter_projects/block_jump/main.py
```python
from tkinter import *
import time
import random
class Sprite:
def __init__(self, canvas):
self.pos = None
self.canvas = canvas
self.sprite = None
self.vx = 0
self.vy = 0
def update(self):
if self.canvas is not None and self.sprite is not None:
self.canvas.move(self.sprite, self.vx, self.vy)
self.pos = self.canvas.coords(self.sprite)
return self
def create(self, coords):
self.sprite = self.canvas.create_polygon(coords)
self.pos = self.canvas.coords(self.sprite)
def destroy(self):
self.sprite.remove()
def alive(self):
return True
class Square(Sprite):
def __init__(self, canvas):
super().__init__(canvas)
def makeCoords(self, x1,y1,l):
return (x1,y1, x1+l,y1, x1+l,y1+l, x1,y1+l)
def create(self, x1, y1):
super().create(self.makeCoords(x1, y1, 30))
def left(self):
return self.pos[0]
def bottom(self):
return self.pos[1]
def top(self):
return self.pos[5]
def right(self):
return self.pos[4]
def collision(self, square):
return ((self.pos[4] >= square.pos[0] and self.pos[4] <= square.pos[2])
and (self.pos[5] >= square.pos[1] or self.pos[7] >= square.pos[3]))
class ObstacleSquare(Square):
def __init__(self, canvas):
print('ObstacleSquare Created')
super().__init__(canvas)
self.vx = -3
self.create(505,150)
random_color = random.choice(['red', 'orange', 'yellow', 'green', 'blue'])
self.canvas.itemconfig(self.sprite, fill=random_color)
def alive(self):
if self.right() > 0:
return True
else:
self.canvas.delete(self.sprite)
return False
class PlayerSquare(Square):
def __init__(self, canvas):
super().__init__(canvas)
self.floor = 150
self.ceil = 75
self.create(50,150)
self.canvas.bind_all('<space>', self.jump)
def jump(self, event):
print('jump')
if self.bottom() >= self.floor:
self.vy = -4
def update(self):
if self.top() <= self.ceil and self.vy == -4:
self.vy = 4
if self.bottom() >= self.floor and self.vy == 4:
self.vy = 0
super().update()
def alive(self, sprites):
return len([s for s in sprites if self.collision(s)]) == 0
class Game:
def __init__(self):
self.tk = Tk()
self.tk.title("InfiRun")
self.tk.resizable(0, 0)
self.canvas = Canvas(self.tk, width=500, height=250)
self.canvas.pack()
self.player = PlayerSquare(self.canvas)
self.boxes = None
self.canvas.bind_all('<Return>', self.reset)
self.splash = self.canvas.create_text(250, 130, text='press <enter> to begin', fill='black', font=('Helvetica', 20))
self.mainloop()
def reset(self, event):
if self.boxes is not None:
[self.canvas.delete(b.sprite) for b in self.boxes]
self.boxes = [ObstacleSquare(self.canvas)]
self.canvas.itemconfig(self.splash, text='')
def mainloop(self):
while True:
#rest
while self.boxes is None:
self.tk.update()
time.sleep(.01)
#play
while self.player.alive(self.boxes):
if ((random.randint(0, 100) > 99 and self.boxes[-1].right() <= 400)
or (len(self.boxes) == 1 and self.boxes[0].left() < 150)):
self.boxes.append(ObstacleSquare(self.canvas))
print(len(self.boxes))
self.tk.update()
self.boxes = [b.update() for b in self.boxes if b.alive()]
self.player.update()
time.sleep(0.01)
#reset
[self.canvas.delete(b.sprite) for b in self.boxes]
self.boxes = None
self.canvas.itemconfig(self.splash, text='GAME OVER\npress <enter> to begin')
if __name__ == '__main__':
Game()
```
|
{
"source": "jerichooconnell/lcse_tools",
"score": 2
}
|
#### File: lcse_tools/src/dpp_server.py
```python
__version__ = 5.1
# TODO: Capture Ctrl+C (nompi) http://snakesthatbite.blogspot.com/2010/09/cpython-threading-interrupting.html
# TODO: Make default options process everything
# TODO: Simplify command line options (--vars)
# TODO: Processing status report (dump: processed, hvs)
import argparse
import gzip
import hashlib
import io
import os
import threading
import time
import logging
import lockfile
import shutil
import signal
import subprocess
import sys
import tempfile
import Queue
log = logging.getLogger(__name__)
log.propagate = False
ch = logging.StreamHandler()
log.addHandler(ch)
log.setLevel(logging.INFO)
#log.setLevel(logging.DEBUG)
try:
import lcse
hvr_enabled = True
except ImportError:
hvr_enabled = False
error = "Warning: LCSE Python library not found, HVR rendering disabled"
log.error(error)
from lcse_tools import ppm_plots
tag_ready = 1
tag_dump = 2
variable_list = [
dict(name='fv', original='FV-hires-01', dir='FV-hiret-01', file='FV-hiret01', ext='bob8aaa', resolution=2),
dict(name='vort', original='Lg10Vort-01', dir='Lg10Voru-01'),
dict(name='enuc', original='Lg10ENUCbyP', dir='Lg10ENVCbyP'),
dict(name='uy', original='TanhUY--001', dir='TanhUY-0001'),
dict(name='divu', original='TanhDivU-01', dir='TanhDivV-01')
]
var_map = dict((k['name'], k) for k in variable_list)
#var_map = dict((k['dir'], k) for k in variable_list)
variables_original = [k['original'] for k in variable_list]
dir_t = '{dir:s}/{dump:04d}/{var:s}'
dir_var_t = '{dir:s}/{var:s}'
dir_var2_t = '{dir:s}/{var:s}-{suffix:s}'
filename_t = '{var:s}-{dump:04d}.{ext:s}'
path_t = '{dir:s}/{var:s}/{var:s}-{dump:04d}.{ext:s}'
path2_t = '{dir:s}/{var:s}-{suffix:s}/{var:s}-{suffix:s}-{dump:04d}.{ext:s}'
# 1010/FV-hiret-01/FV-hiret01-1010.bob8aaa
# 1010/Lg10Voru-01/Lg10Voru-01-1010.bobaaa
# 1010/TanhUY-0001/TanhUY-0001-1010.bobaaa
# 1010/TanhDivV-01/TanhDivV-01-1010.bobaaa
# 1010/Lg10ENVCbyP/Lg10ENVCbyP-1010.bobaaa
#TODO: Remove this garbage
class keyfile_reader(object):
#TODO: Change image with / height
def __init__(self, filename, hv_dir, img_dir, image_size):
self.key_name = os.path.basename(filename)[:-4]
self.hv_dir = os.path.normpath(hv_dir) if hv_dir else None
self.img_dir = os.path.normpath(img_dir) if img_dir else None
self.image_size = image_size
self.key = lcse.hvr_key(filename)
hvfile = self.key.get('hvfile')
self.var = os.path.basename(hvfile)[:-8]
if hv_dir:
self.key._data['hvfile'] = os.path.join(hv_dir, os.path.basename(hvfile))
last_sigint = None
def sig_int_handler(signal, frame):
global last_sigint
log.info("Received signal %s last %s" % (signal, last_sigint))
t = time.time()
if last_sigint is not None and (t - last_sigint) < 5.0:
log.info("Received second Ctrl+C within 5 seconds, exiting")
sys.exit(0)
last_sigint = t
def setup_logging(args):
if not args.mpi:
try:
from colorlog import ColoredFormatter
formatter = ColoredFormatter("%(log_color)s %(asctime)s - %(levelname)s : %(white)s%(message)s",
datefmt=None,
reset=True,
log_colors={
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red,bg_white',
},
secondary_log_colors={},
style='%'
)
ch.setFormatter(formatter)
return
except:
pass
# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
if args.log:
if not os.path.exists(args.logs_dir):
os.makedirs(args.logs_dir)
fh = logging.FileHandler(os.path.join(args.logs_dir, args.log))
fh.setFormatter(formatter)
log.addHandler(fh)
def execute(rank, thread, cmd, cwd=None):
if cmd[0].startswith('.'):
cmd[0] = os.path.abspath(cmd[0])
print cmd[0]
cmd_line = ' '.join(cmd)
log.info("rank %i.%i : exec '%s'" % (rank, thread, cmd_line))
a = time.time()
try:
p = subprocess.Popen(cmd_line, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd, shell=True)
p.communicate()
ret_val = p.wait()
except Exception as e:
log.warning("execute() caught an exception %s" % e)
ret_val = -1
b = time.time()
duration = b - a
log.info("rank %i.%i : exec '%s' finished with code %i in %f.2" % (rank, thread, cmd_line, ret_val, duration))
return (ret_val, duration)
def get_dumps(args):
if args.auto:
dumps_contents = [d for d in os.listdir(args.dumps_dir) if os.path.isdir(os.path.join(args.dumps_dir, d))]
dumps = [int(c) for c in dumps_contents if c.isdigit()]
if not dumps and args.render:
dumps = list(set([int(hv[-7:-3]) for hv in os.listdir(args.hvs_dir) if hv.endswith('.hv')]))
else:
# Should be able to run for 1 or a range of dumps
dumps = args.dumps if len(args.dumps) == 1 else range(args.dumps[0], args.dumps[1]+1)
dumps.sort()
if args.start_dump:
log.info("Starting from dump %i" % args.start_dump)
dumps = [d for d in dumps if d > args.start_dump]
log.debug("Dumps are: %s" % dumps)
return dumps
def get_keys(args):
return
def remove_processed(vars, dumps):
#TODO: Write this list as a shell script
# for creating paths
hv_contents = os.listdir(args.hvs_dir)
# Generate list of all files
hvr_dict = dict(ext='hvr', var='', dump='')
for var in vars:
for dump in dumps:
names.append(filename_t.format(dump=dump, var=var, ext='hvr'))
#
def archive_dump(args, thread_id, dump):
"""
We take files like ./0340/FV-hires-01/FV-hires01-0340.bob8abd and compress them into
./dump-archive/FV-hires-01/FV-hires-01-0340.gz/FV-hires01-0340.bob8abd.gz
"""
results = {}
log.info("rank %i.%i : started archiving dump %i" % (args.rank, thread_id, dump))
output_dir = args.archive_dir
gzip_path_t = '{dir:s}/{var:s}/{var:s}-{dump:04d}.gz/'
gzip_filename = '{filename:s}.gz'
#unpack_command = 'gzip -d {gzip:s};\nmv {file:s} {dest:s}'
for var in variables_original:
var_dir = var_map[var]['dir']
bob_dir = dir_t.format(dir=args.dumps_dir, dump=dump, var=var_dir)
bob_files = os.listdir(bob_dir)
bob_files.sort()
dir_out = gzip_path_t.format(dir=args.archive_dir, var=var_dir, dump=dump)
if not os.path.exists(dir_out):
os.makedirs(dir_out)
hashes_path = os.path.join(dir_out, 'hashes.md5')
moveum_path = os.path.join(dir_out, 'uncompressum.sh')
verifyum_path = os.path.join(dir_out, 'verifyum.sh')
if os.path.exists(hashes_path):
log.warning("rank %i.%i : hashes file %s already exists skipping this dump/variable" % (args.rank, thread_id, hashes_path))
continue
hashes = []
file_pairs = []
for filename in bob_files:
path_in = os.path.join(bob_dir, filename)
filename_gz = gzip_filename.format(filename=filename)
path_out = os.path.join(dir_out, filename_gz)
file_pairs.append((filename, filename_gz))
if os.path.exists(path_out):
# log.info("rank %i.%i : gzip file %s exists, skipping" % (args.rank, thread_id, path_out))
log.warn("rank %i.%i : gzip file %s exists, not skipping (for safety)" % (args.rank, thread_id, path_out))
#TODO: Timing information
with open(path_in, 'rb') as f_in, gzip.open(path_out, 'wb') as f_out:
data_in = f_in.read()
# Gzip save the data
f_out.write(data_in)
# Compute MD5 sum
m = hashlib.md5()
m.update(data_in)
hash = m.hexdigest()
hashes.append((filename, hash))
log.info("rank %i.%i : gziping file %s (md5sum: %s) to %s" % (args.rank, thread_id, path_in,
hash, path_out))
if not hashes:
continue
# Write out the uncompressed hashes file
with io.open(hashes_path, 'wb') as f:
f.writelines('%s *%s\n' % (hash, filename) for filename, hash in hashes)
log.debug("rank %i.%i : wrote hashes to %s" % (args.rank, thread_id, hashes_path))
out_path = '../../uncompressed-dumps/%04d/%s' % (dump, var_dir)
# Write out the moveum file
with io.open(moveum_path, 'wb') as f:
f.write("mkdir -p %s\n" % out_path)
f.writelines('gzip -cd %s > %s/%s\n' % (filename_gz, out_path, filename) for filename, filename_gz in file_pairs)
log.debug("rank %i.%i : wrote moveum to %s" % (args.rank, thread_id, hashes_path))
# Write out hashes file
with io.open(verifyum_path, 'wb') as f:
for filename, hash in hashes:
f.write('''a=`gzip -cd %s.gz | md5sum | awk '{print $1}'`; b="%s"; \n''' % (filename, hash))
f.write('''if [ "$a" == "$b" ]; then echo "%s ok"; else echo %s fail"; fi; unset a; unset b; \n\n''' % (filename, filename));
log.debug("rank %i.%i : wrote verifyum to %s" % (args.rank, thread_id, hashes_path))
log.info("rank %i.%i : finished archiving dump %i" % (args.rank, thread_id, dump))
return results
def process_dump(args, dump, thread_id):
log.info("rank %i.%i : started processing dump %i" % (args.rank, thread_id, dump))
results = {}
# Process the dumps
rank = args.rank
vars = []
existing = []
to_process = []
for var in args.vars:
var_dir = var_map[var]['dir']
hv_dir = dir_var_t.format(dir=args.hvs_dir, var=var_map[var].get('file', var_dir))
hv_file = path_t.format(dir=args.hvs_dir, var=var_map[var].get('file', var_dir), dump=dump, ext='hv')
if not os.path.exists(hv_file):
vars.append(var)
to_process.append((var, hv_dir))
else:
existing.append(var)
# Check if we are duplicating something
# TODO: Could reuse the paths here
results['existing'] = existing
xreformat_path = args.xreformat_exec
bob2hv_path = args.bob2hv_exec
# Clean up any stale old files
rm_cmd = []
if args.cleanup or args.reformat:
rm_cmd.extend(['rm', '-rf'])
rm_cmd.extend([dir_t.format(dir=args.dumps_dir, dump=dump, var=var_map[var]['dir']) for var in args.vars])
if vars and args.reformat:
dump_str = '%04i' % dump
# Remove old and stale stuff before running reformat
execute(args.rank, thread_id, rm_cmd)
cwd = args.dumps_dir if args.multidir else dir_t.format(dir=args.dumps_dir, dump=dump, var=var_map[to_process[0][0]]['original'])
log.warning("CWD is " + cwd)
reformat_cmd = [xreformat_path, dump_str, dump_str, ">> ./logs/process.%s.log 2>&1" % dump_str]
ret_val, t = execute(args.rank, thread_id, reformat_cmd, cwd=cwd)
results['reformat'] = dict(ret=ret_val, duration=t)
if args.bob2hv:
results['bob2hv'] = {}
results['mv_hv'] = {}
for var, hv_dir in to_process:
var_info = var_map[var]
var_dir = var_info['dir']
res = var_info.get('resolution', 1)
ext = var_info.get('ext', 'bobaaa')
file_prefix = var_info.get('file', var_dir)
dump_dir = dir_t.format(dir=args.dumps_dir, dump=dump, var=var_dir)
# Run bob2hv
if args.bob2hv:
if not os.path.exists(dump_dir):
log.info("rank %i.%i : skipping %4i:%s because %s does not exist" % (args.rank, thread_id,
dump, var, dump_dir))
continue
size = [str(res * i) for i in args.tile_size]
teams = [str(t) for t in args.tiles]
bob_filename = filename_t.format(dump=dump, var=file_prefix, ext=ext)
bob2hv_cmd = [bob2hv_path, str(size[0]), str(size[1]), str(size[2]),
bob_filename, '-t', teams[0], teams[1], teams[2], '-s', '128',
">> ../../logs/process.%04i.log 2>&1" % dump]
ret_val, duration = execute(args.rank, thread_id, bob2hv_cmd, cwd=dump_dir)
results['bob2hv']['vars'] = dict(ret=ret_val, duration=duration)
if not os.path.exists(hv_dir):
os.makedirs(hv_dir)
hv_file_path = os.path.join(dump_dir, filename_t.format(dump=dump, var=file_prefix, ext='hv'))
if not os.path.exists(hv_file_path):
log.warning("rank %i.%i : hv %s not created for dump %4i" % (args.rank, thread_id,
hv_file_path, dump))
continue
mv_cmd = ['mv', hv_file_path, hv_dir]
ret_val, duration = execute(args.rank, thread_id, mv_cmd)
results['mv_hv']['vars'] = dict(ret=ret_val, duration=duration)
results['success'] = True
if args.cleanup:
execute(args.rank, thread_id, rm_cmd)
log.info("rank %i.%i : finished processing dump %i" % (args.rank, thread_id, dump))
return results
def render_dump(args, thread_id, dump):
log.info("rank %i.%i : started rendering dump %4i" % (args.rank, thread_id, dump))
hvr_path = args.hvr_exec
keys = args.keys
results = {'success': False}
for key in keys:
for var in args.specified_vars:
if var not in args.lut_map:
log.info("rank %i.%i : skipping dump %4i variable %s because no lut was provided" % (args.rank, thread_id, dump, var))
continue
size_str = 'x'.join(str(d) for d in key.key.get('imagesize'))
hv_name = var_map[var].get('file', var_map[var]['dir'])
hv_file = path_t.format(dir=args.hvs_dir, var=hv_name, dump=dump, ext='hv')
if not os.path.exists(hv_file):
log.warning("rank %i.%i : skipping dump %4i because %s does not exist" % (args.rank, thread_id,
dump, hv_file))
continue
var_name = var_map[var]['original']
luts = args.lut_map[var]
for lut in luts:
#suffix = '%s-%s' % (lut.name, key.key_name)
suffix = '%s-%s' % (lut.name, key.key_name) if len(luts) > 1 else key.key_name
# Have to remap the annoying Fv-hires thing
raw_file = path2_t.format(dir=args.images_dir, var=var_name, dump=dump, suffix=suffix, ext='raw')
png_file = path2_t.format(dir=args.images_dir, var=var_name, dump=dump, suffix=suffix, ext='png')
var_dir = dir_var2_t.format(dir=args.images_dir, var=var_name, suffix=suffix)
if not os.path.exists(var_dir):
os.makedirs(var_dir)
if os.path.exists(png_file):
log.info("rank %i.%i : skipping dump %4i because %s exists" % (args.rank, thread_id, dump,
png_file))
# results['success'] = True
continue
if not os.path.exists(raw_file):
key.key._data['hvfile'] = hv_file
key.key._data['imagefile'] = raw_file
key.key.lut = lut
# key_filename = os.path.join(args.temp_dir, "%s_tmp_%s.key" % (var, key.key_name))
# key.key.save(key_filename)
_, key_filename = tempfile.mkstemp(prefix='%s-%s' % (var_name, suffix), dir=args.temp_dir)
with open(key_filename, 'wb') as key_file:
key_file.write(str(key.key))
log.debug('rank %i.%i : calling hvr for %s, temp %s' % (args.rank, thread_id, hv_file, key_filename))
hvr_cmd = [hvr_path, '<', key_filename]
# hvr_cmd = [hvr_path, '<', key_filename, ">> ./logs/render.%04i.log 2>&1" % dump]
ret,_ = execute(args.rank, thread_id, hvr_cmd)
# log.debug('rank %i.%i : calling hvr for %s' % (args.rank, thread_id, hv_file))
# key_string = str(key)
# p = subprocess.Popen(hvr_path, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# output = p.communicate(key_string)
# ret = p.wait()
log.debug('rank %i.%i : hvr exit with code %i' % (args.rank, thread_id, ret))
if os.path.exists(raw_file):
convert_cmd = ['convert','-depth','8','-flip', '-size', size_str, 'rgb:%s' % raw_file, png_file]
ret,_ = execute(args.rank, thread_id, convert_cmd)
# Delete .raw file only if we converted it to png successfuly
if ret == 0:
rm_raw_cmd = ['rm', '-f', raw_file]
ret = execute(args.rank, thread_id, rm_raw_cmd)
results['success'] = True
log.info("rank %i.%i : finished rendering dump %4i, success %s" % (args.rank, thread_id, dump, results['success']))
return results
def render_keyfile(args, thread_id, keyfile):
hvr_path = args.hvr_exec
key = keyfile_reader(keyfile, args.hvs_dir, args.images_dir, args.image_size)
raw_file = os.path.join(args.images_dir, '%s.raw' % key.key_name)
png_file = os.path.join(args.images_dir, '%s.png' % key.key_name)
results = {}
log.info("rank %i.%i : started rendering keyfile %s" % (args.rank, thread_id, keyfile))
if os.path.exists(png_file):
log.info("rank %i.%i : skipping keyfile %s because %s exists" % (args.rank, thread_id, keyfile, png_file))
return results
key.key.set('imagefile', raw_file)
key_filename = "%s_%04i_%04i_tmp.key" % (key.key_name, args.rank, thread_id)
key.save(key_filename)
hvr_cmd = [hvr_path, '<', key_filename, ">> ./logs/render.%s.log 2>&1" % key.key_name]
ret,_ = execute(args.rank, thread_id, hvr_cmd)
if os.path.exists(raw_file):
size = key.key.get('imagesize')
convert_cmd = ['convert','-depth','8','-flip', '-size', 'x'.join(size), 'rgb:%s' % raw_file, png_file]
ret, duration = execute(args.rank, thread_id, convert_cmd)
if ret == 0:
rm_raw_cmd = ['rm', '-f', raw_file]
ret, _ = execute(args.rank, thread_id, rm_raw_cmd)
log.info("rank %i.%i : finished rendering keyfile %s" % (args.rank, thread_id, keyfile))
return results
def work_thread_main(args, in_queue, out_queue, thread_id):
'''Process data from in_queue and write it to out_queue'''
# TODO: Make the lock more generic so it locks anything we need .lock.<work_type>
# TODO: The message payload should indicate the type of work we are doing
processing = args.bob2hv or args.cleanup or args.reformat
rendering = args.render and hvr_enabled
log.info("rank %i.%i : work_thread_main" % (args.rank, thread_id))
while True:
work = in_queue.get()
if isinstance(work, dict) and work.get('exit'):
log.info("rank %i.%i : worker commanded to exit" % (args.rank, thread_id))
break
results = {'work':work}
try:
if not work and work != 0:
log.info("rank %i.%i : worker is out of work, exiting. (work %s)" % (args.rank, thread_id, str(work)))
break
if args.plot:
results.update(plot_dump(args, thread_id, work))
if args.render_keys:
keyfile = work
results.update(render_keyfile(args, thread_id, keyfile))
elif processing or rendering:
dump = work
log.debug("rank %i.%i : worker start processing unit %s" % (args.rank, thread_id, work))
if processing:
lock_path = dir_t.format(dir=args.dumps_dir, dump=dump, var='.dpp%s' % args.lock_name)
lock = lockfile.LockFile(lock_path)
if lock.is_locked():
lock_age = time.time() - os.path.getctime(lock.lock_file)
# Anything over 60 minutes is stale
if lock_age > 3600.0:
log.info("rank %i.%i : found stale lock (%i s) on %4i" % (args.rank, thread_id,
int(lock_age), dump))
lock.break_lock()
try:
lock.acquire(timeout=-1)
results.update(process_dump(args, dump, thread_id))
lock.release()
except lockfile.AlreadyLocked:
log.warning("rank %i.%i : dump %4i is locked, skipping" % (args.rank, thread_id, dump))
if rendering:
results.update(render_dump(args, thread_id, work))
elif args.archive:
results.update(archive_dump(args, thread_id, work))
except IndexError as e:
# except Exception as e:
log.error("rank %i.%i : failed processing work %s with Exception: %s" % (args.rank,
thread_id, work,e))
in_queue.task_done()
out_queue.put(results)
def monitor_thread_main(args, work_queue, start_dumps):
sleep_time = 60
known = list(start_dumps)
thread_id = 0
while True:
dumps = get_dumps(args)
new_dumps = [d for d in dumps if d not in known]
new_dumps.sort()
# Last dumps is sort of invalid anyway
new_dumps = new_dumps[:-1]
if new_dumps:
log.info("rank %i.%i : monitor found new dumps %s" % (args.rank, thread_id, new_dumps))
for d in new_dumps:
work_queue.put(d)
known.append(d)
time.sleep(sleep_time)
def plot_dump(args, thread_id, dump):
""" Plot all of the things """
rp = args.rp_set.get_file(dump)
fig = args.fig
plot_path = args.plot_dir
dump = rp.get('dump')
log.info("rank %i.%i : plotting dump %04i" % (args.rank, thread_id, dump))
fig.clear()
ppm_plots.plot_ceul_mach_global(rp, fig, path=plot_path)
fig.clear()
ppm_plots.plot_ekr_ekt_entropy(rp, fig, path=plot_path)
# Linear plots
for var in ["enuc", "fnuc"]:
fig.clear()
ppm_plots.plot_var_one_bucket(rp, var, fig, path=plot_path)
# Log plots
for var in ["enuc", "fnuc", "fv_hi"]:
fig.clear()
ppm_plots.plot_log_var_one_bucket(rp, var, fig, path=plot_path)
# Process per-dump things
for bucket in range(1, rp.get('nbuckets') + 1):
for var in ["enuc", "fv_hi", "p"]:
fig.clear()
ppm_plots.plot_log_var_one_bucket(rp, var, fig, path=plot_path, bucket=bucket)
fig.clear()
ppm_plots.plot_var_one_bucket(rp, var, fig, path=plot_path, bucket=bucket)
return {}
def worker_thread_manager(args, in_queue, out_queue):
threads = []
nthreads = args.threads
for thread_id in range(1, nthreads+1):
log.info("rank %i.%i : worker_thread_manager spawning thread %i " % (args.rank, 0, thread_id))
t = threading.Thread(target=work_thread_main, args=(args, in_queue, out_queue, thread_id))
t.daemon = True
t.start()
threads.append(t)
return threads
def mpi_worker_main(comm, args):
'''Query server for new data and process it'''
processing = args.bob2hv
process_q = Queue.Queue()
ready_q = Queue.Queue()
nthreads = args.threads
log.debug("rank %i.%i : mpi_worker_main asking for work" % (args.rank, 0))
comm.send({'ready':True, 'units':nthreads}, dest=0, tag=1)
data = comm.recv(source=0, tag=2)
work_units = data.get('work')
threads = worker_thread_manager(args, process_q, ready_q)
while work_units:
for w in work_units:
process_q.put(w)
log.info("rank %i.%i : Waiting for item" % (args.rank, 0))
results = ready_q.get()
log.info("rank %i.%i : Got item %s " % (args.rank, 0, results))
comm.send({'ready':True, 'results':results}, dest=0, tag=1)
log.info("rank %i.%i : sent %s to leader" % (args.rank, 0, results))
data = comm.recv(source=0, tag=2)
log.info("rank %i.%i : recv %s from leader" % (args.rank, 0, data))
work_units = data.get('work')
# Wait for all items to finish
log.info("rank %i.%i : mpi_worker_main waiting for all items to finish" % (args.rank, 0))
process_q.join()
for i in range(nthreads):
process_q.put({'exit':True})
results = []
log.info("rank %i.%i : mpi_worker_main collecting final results" % (args.rank, 0))
while not process_q.empty():
results.append(process_q.get())
log.info("rank %i.%i : mpi_worker_main reporting final results" % (args.rank, 0))
comm.send({'ready':False, 'results':results}, dest=0, tag=1)
def run_el_presidente(args):
'''Main routine for work distribution rank'''
rendering = (args.render_keys or args.render) and hvr_enabled
log.info("El Presidente version %s: starting %s " % (str(__version__), "rendering" if (args.render_keys) else "processing"))
# Make directories
dirs_to_make = [args.logs_dir]
if args.bob2hv: dirs_to_make.append(args.hvs_dir)
if rendering: dirs_to_make.extend([args.images_dir])
if args.plot: dirs_to_make.append(args.plots_dir)
for d in dirs_to_make:
if not os.path.exists(d):
os.makedirs(d)
process_q = Queue.Queue()
ready_q = Queue.Queue()
work_units = []
processed = []
if args.render_keys:
keys_dir = args.render_keys
work_units = [dict(keyfile=os.path.join(keys_dir, f)) for f in os.listdir(keys_dir) if f.endswith('.key')]
else:
work_units = get_dumps(args)
for w in work_units:
process_q.put(w)
log.info("El Presidente: starting worker-thread manager")
threads = worker_thread_manager(args, process_q, ready_q)
if args.monitor:
#TODO: This should be able to monitor a directory for keys?
t = threading.Thread(target=monitor_thread_main, args=(args, process_q, work_units))
t.daemon = True
t.start()
if args.mpi:
# Start MPI things that will be handling workers
from mpi4py import MPI
comm = MPI.COMM_WORLD
size = comm.Get_size()
ranks = range(1,size)
status = MPI.Status()
wait_q = Queue.Queue()
while True:
msg_in = comm.recv(source=MPI.ANY_SOURCE, status=status, tag=tag_ready)
log.debug("El Presidente: Received message %s" % msg_in)
if status is None:
log.error("el_presidente: Received status None")
continue
source = status.Get_source()
units = msg_in.get('units', 1)
log.info("El Presidente: Recieved request for %i units for %i " % (units, source))
msg_out = {}
work_out = []
# Here if we are a monitor we will block on this and wait indefinitely for more
# queue elements. If we are not a monitor we should terminate when the queue
# is empty.
try:
for i in range(units):
work_out.append(process_q.get(args.monitor))
except Queue.Empty:
log.info("El Presidente: out of work")
break
log.info("El Presidente: sending %s work to %i " % (work_out, source))
comm.send({'work':work_out}, dest=source, tag=tag_dump)
# Clean up
log.info("Cleaning up %s " % ranks)
for source in ranks:
log.info("Cleaning up rank %i" % source)
msg_in = comm.recv(source=source, status=status, tag=tag_ready)
comm.send({'work':[]}, dest=source, tag=tag_dump)
log.info("El Presidente: Waiting for Processing Queue to be complete")
process_q.join()
log.info("El Presidente: Sending Quit messages")
for i in range(args.threads):
process_q.put({'exit':True})
log.info("El Presidente: Exiting")
def build_argparse():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--dumps', nargs='+',type=int, help='Manually specify dump number or dump range (e.g. `--dumps 355` or `--dumps 800 900)`')
parser.add_argument('--start-dump', type=int, help='Start processing from this dump')
parser.add_argument('--threads', type=int, default=1, help='Number of threads')
parser.add_argument('--local', dest='mpi', action='store_false', help='Run without MPI')
parser.add_argument('--process', action='store_true', help='reformat, generate HVs, move, cleanup')
parser.add_argument('--render', action='store_true', help='Render dump after processing')
parser.add_argument('--archive', action='store_true', help='Create a gzip archive of the dumps')
parser.add_argument('--plot', action='store_true', help='Plot RProfiles')
parser.add_argument('--plot-dir', default='./plots', help='')
parser.add_argument('--archive-dir', default='./dump-archive', help='Directory containing dump files')
parser.add_argument('--dumps-dir', default='.', help='Directory containing dump files')
parser.add_argument('--hvs-dir', default='./hvs', help='Directory to move HV files')
parser.add_argument('--logs-dir', default='./logs', help='Logs directory')
parser.add_argument('--plots-dir', default='./plots', help='Directory for plots')
parser.add_argument('--temp-dir', help='Temp directory')
parser.add_argument('--lock-name', default='', help='Hack: Unique name for this lock (e.g. "bob" or "bob8")')
parser.add_argument('--log', help='Log filename')
p_group = parser.add_argument_group('Processing Options')
p_group.add_argument('--reformat', action='store_true', help='Reformat bobs')
p_group.add_argument('--bob2hv', action='store_true', help='Generate HV files')
p_group.add_argument('--cleanup', action='store_true', help='Remove reformated bobs')
p_group.add_argument('--monitor', action='store_true', help='Keep checking for new dumps to appear')
p_group.add_argument('-t', '--tiles', nargs='+', type=int, help='Number of tiles (1 or 3 numbers)')
p_group.add_argument('-g', '--tile-size', nargs='+', type=int, help='Size of a single tile (1 or 3 numbers)')
p_group.add_argument('--bob2hv-exec', default='/u/sciteam/sandalsk/tools/bob2hv', help='')
p_group.add_argument('--hvr-exec', default='/u/sciteam/sandalsk/lcse_serv/hvr_cl.exe', help='')
p_group.add_argument('--xreformat-exec', default='./xreformat', help='')
p_group.add_argument('--no-multidir', dest='multidir', action='store_false', default=True)
r_group = parser.add_argument_group('Rendering Options')
r_group.add_argument('--image-size', nargs='+', type=int, help='Image size as two space separated numbers (e.g. 1280 768). By default it uses the information from they key file')
r_group.add_argument('--render-keys', help='Render key files in the given directory')
r_group.add_argument('--keys', dest='key_files', nargs='+', help='A directory of key files or a list of key files to render for the selected dumps')
r_group.add_argument('--images-dir', default='./images', help='Directory to store images')
# Variable specific color maps that are used instead of the stuff inside the .lut
r_group.add_argument('--lut-fv', nargs='+', help='')
r_group.add_argument('--lut-vort', nargs='+', help='')
r_group.add_argument('--lut-uy', nargs='+', help='')
r_group.add_argument('--lut-enuc', nargs='+', help='')
r_group.add_argument('--lut-divu', nargs='+', help='')
v_group = parser.add_argument_group('Variables')
v_group.add_argument('--all-vars', action='store_true', help='Process all things (fv, enuc, vort, divu, uy) (default)')
v_group.add_argument('--fv', action='append_const', const='fv', dest='vars', help='FV High-Res (FV-hires-01)')
v_group.add_argument('--vort', action='append_const', const='vort', dest='vars', help='Vorticity (Lg10Vort-01)')
v_group.add_argument('--uy', action='append_const', const='uy', dest='vars', help='(TanhUY--001)')
v_group.add_argument('--enuc', action='append_const', const='enuc', dest='vars', help='Process Enuc files (Lg10ENUCbyP)')
v_group.add_argument('--divu', action='append_const', const='divu', dest='vars', help='(TanhDivU-01')
# v_group.add_argument('--fv', action='append_const', const='FV-hiret-01', dest='vars', help='FV High-Res (FV-hires-01)')
# v_group.add_argument('--vort', action='append_const', const='Lg10Voru-01', dest='vars', help='Vorticity (Lg10Vort-01)')
# v_group.add_argument('--uy', action='append_const', const='TanhUY-0001', dest='vars', help='(TanhUY--001)')
# v_group.add_argument('--enuc', action='append_const', const='Lg10ENVCbyP', dest='vars', help='Process Enuc files (Lg10ENUCbyP)')
# v_group.add_argument('--divu', action='append_const', const='TanhDivV-01', dest='vars', help='(TanhDivU-01')
return parser
def main():
# Available variables
parser = build_argparse()
args = parser.parse_args()
setup_logging(args)
if not args.render_keys:
args.auto = not (args.dumps and len(args.dumps) <= 2)
args.rank = 0
# If the grid is symmetric we want to be able to specify only one size
if args.tile_size:
args.tile_size = args.tile_size if len(args.tile_size) == 3 else 3 * [args.tile_size[0]]
if args.tiles:
args.tiles = args.tiles if len(args.tiles) == 3 else 3 * [args.tiles[0]]
if args.process:
args.reformat = True
args.bob2hv = True
args.cleanup = True
if args.plot:
args.rp_set = lcse.rprofile_set(path="./RProfile-01", lazy=False, logging=True)
args.fig = ppm_plots.new_figure()
if args.vars:
args.specified_vars = args.vars
else:
args.vars = var_map.keys()
args.specified_vars = []
if args.bob2hv and not (args.tile_size and args.tiles):
reformat_file = 'Reformat-mPPM.F'
if os.path.exists(reformat_file):
defines = dict(l.split()[1:] for l in open(reformat_file).readlines() if l.startswith('#define'))
defines_expanded = dict([(k, defines.get(v, v)) for k,v in defines.items()])
args.tiles = [int(defines_expanded[v]) for v in ['nnxteams', 'nnyteams', 'nnzteams']]
args.tile_size = [int(defines_expanded['nnnn%s' % v]) * int(defines_expanded['nnt%sbricks' % v]) for v in ['x', 'y', 'z']]
log.info("Parsing %s indicates %s tiles of size %s" % (reformat_file, args.tiles, args.tile_size))
else:
log.error("Tile-size and number of tiles are required for bob2hv. Specify -t and -g options or a `Reformat-mPPM.F` file.")
return 1
# Here we go over the key files and create key objects
args.keys = []
if args.key_files:
if len(args.key_files) == 1 and os.path.isdir(args.key_files[0]):
keys_dir = args.key_files[0]
args.key_files = [os.path.join(keys_dir, f) for f in os.listdir(keys_dir) if f.endswith('.key')]
for k in args.key_files:
log.debug("main() generating key for %s" % (k))
key = keyfile_reader(k, args.hvs_dir, args.images_dir, args.image_size)
args.keys.append(key)
lut_file_map = {}
# HACK:
if args.lut_fv:
lut_file_map['fv'] = args.lut_fv
if args.lut_vort:
lut_file_map['vort'] = args.lut_vort
if args.lut_enuc:
lut_file_map['enuc'] = args.lut_enuc
if args.lut_divu:
lut_file_map['divu'] = args.lut_divu
if args.lut_uy:
lut_file_map['uy'] = args.lut_uy
args.lut_map = {}
if lut_file_map:
for k, v in lut_file_map.items():
args.lut_map[k] = [lcse.lut(filename=f) for f in v]
# Register the signal handler
# signal.signal(signal.SIGINT, sig_int_handler)
# signal.pause()
if args.mpi:
from mpi4py import MPI
comm = MPI.COMM_WORLD
args.rank = comm.Get_rank()
size = comm.Get_size()
args.mpi = size > 0
log.info("We are running in MPI mode, I am %s of %s" % (args.rank, size))
if args.rank > 0:
mpi_worker_main(comm, args)
return
# Either way we run El Presidente
run_el_presidente(args)
if __name__ == '__main__':
main()
```
#### File: lcse_tools/src/movie_menu.py
```python
import pygtk
pygtk.require('2.0')
import gtk
from subprocess import call
# Init our movie data
path_1 = "/home/user/converted-movies"
path_2 = "/mnt/scratch/low-z/1536/movies/new"
font_size = 25000
wall_player_path='/home/user/converted-movies/wall_player'
movies = [
dict(name='ICF-10K-FV-dump25-27-fly27', path=path_1, files=['ICF-10K-FV-dump25-27-fly27-panel-1.avi', 'ICF-10K-FV-dump25-27-fly27-panel-3.avi', 'ICF-10K-FV-dump25-27-fly27-panel-2.avi']),
dict(name='ICF-10K-FV-dump27_fly', path=path_1, files=['ICF-10K-FV-dump27_fly_panel-1.avi', 'ICF-10K-FV-dump27_fly_panel-3.avi', 'ICF-10K-FV-dump27_fly_panel-2.avi']),
dict(name='ICF-10K-FV-dump27', path=path_1, files=['ICF-10K-FV-dump27-panel-1.avi', 'ICF-10K-FV-dump27-panel-3.avi', 'ICF-10K-FV-dump27-panel-2.avi']),
dict(name='icfv2-12fps', path=path_1, files=['icfv2-12fps-panel-1.avi', 'icfv2-12fps-panel-2.avi', 'icfv2-12fps-pane-3.avi']),
dict(name='icfv2-18fps', path=path_1, files=['icfv2-18fps-panel-1.avi', 'icfv2-18fps-panel-3.avi', 'icfv2-18fps-panel-2.avi']),
dict(name='phlin5pw', path=path_1, files=['phlin5pw-panel-1.avi', 'phlin5pw-panel-3.avi', 'phlin5pw-panel-2.avi']),
dict(name='rgb16', path=path_1, files=['rgb16-DT-3-SCmovie-PW-panel-1.avi', 'rgb16-DT-3-SCmovie-PW-panel-3.avi', 'rgb16-DT-3-SCmovie-PW-panel-2.avi']),
dict(name='Sakurai-1536', path=path_1, files=['Sakurai-1536-Lg10Vort-part1-panel-1_0000-1359_18.avi', 'Sakurai-1536-TanhUY-part1-panel-1_0000-1358_18.avi', 'Sakurai-1536-FV-part1-panel-1_0000-1359_18.avi']),
dict(name='tp2-2d', path=path_1, files=['tp2-2d-panel-1.avi', 'tp2-2d-panel-3.avi', 'tp2-2d-panel-2.avi']),
dict(name='u05PWall1-Lvort', path=path_1, files=['u05PWall1-Lvort-panel-1.avi', 'u05PWall1-Lvort-panel-3.avi', 'u05PWall1-Lvort-panel-2.avi']),
dict(name='low-z-1536 Vort', path=path_2, files=['low-z-1536_Lg10Vort-01-slice_1_0001-1960_4k_18.avi', 'low-z-1536_Lg10Vort-01-slice_3_0001-1960_4k_18.avi', 'low-z-1536_Lg10Vort-01-back_0001-1960_4k_18.avi']),
dict(name='low-z-1536 FV', path=path_2, files=['low-z-1536_FV-hires-01-slice_1_0001-1960_4k_18.avi', 'low-z-1536_FV-hires-01-slice_3_0001-1960_4k_18.avi', 'low-z-1536_FV-hires-01-back_0001-1960_4k_18.avi']),
dict(name='low-z-1536 Vort FV TanhUY', path=path_2, files=['low-z-1536_Lg10Vort-01-back_0001-1960_4k_18.avi', 'low-z-1536_TanhUY--001-back_0001-1960_4k_18.avi', 'low-z-1536_FV-hires-01-back_0001-1960_4k_18.avi']),
dict(name='low-z-1536 Vort FV Enuc', path=path_2, files=['low-z-1536_Lg10Vort-01-back_0001-1960_4k_18.avi', 'low-z-1536_Lg10ENUCbyP-back_0001-1960_4k_18.avi', 'low-z-1536_FV-hires-01-back_0001-1960_4k_18.avi']),
]
class MoviesMenu:
def play_movie_callback(self, widget, data):
print "Playing movie" % data
cli = [wall_player_path] + data.get('files')
print "Executing ", cli
call(cli)
# another callback
def delete_event(self, widget, event, data=None):
gtk.main_quit()
return False
def __init__(self):
# Create a new window
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.resize(800, 1024)
self.window.set_title("LCSE Demo Movies")
self.window.connect("delete_event", self.delete_event)
# Sets the border width of the window.
self.window.set_border_width(10)
self.box = gtk.VBox(True, 10)
self.window.add(self.box)
for movie in movies:
#label = gtk.Label()
#label.set_markup('<span size="38000">%s</span>' % movie.get('name'))
#label.show()
button = gtk.Button(movie.get('name'))
label = button.get_child()
label.set_markup('<span size="%i">%s</span>' % (font_size, movie.get('name')))
#button.get_label().set_use_markup(gtk.TRUE)
button.connect("clicked", self.play_movie_callback, movie)
button.show()
self.box.pack_start(button, True, True, 0)
self.box.show()
self.window.show()
def main():
gtk.main()
if __name__ == "__main__":
hello = MoviesMenu()
main()
```
|
{
"source": "jerichosiahaya/depression-detection-naive-bayes",
"score": 3
}
|
#### File: jerichosiahaya/depression-detection-naive-bayes/traintesting.py
```python
import pandas as pd
import string
import nltk
import re
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import make_pipeline
from sklearn.metrics import confusion_matrix
import numpy as np
from sklearn.metrics import classification_report
#import dataset
dataset_columns = ["target", "ids", "date", "flag", "user", "text"]
dataset_encode = "ISO-8859-1"
data = pd.read_csv("training.1600000.processed.noemoticon.csv", encoding = dataset_encode, names = dataset_columns)
data.drop(['ids','date','flag','user'],axis = 1,inplace = True)
#remove punctuation
def remove_punctuation(text):
no_punct=[words for words in text if words not in string.punctuation]
words_wo_punct=''.join(no_punct)
return words_wo_punct
data['clean_text']=data['text'].apply(lambda x: remove_punctuation(x))
#remove hyperlink
data['clean_text'] = data['clean_text'].str.replace(r"http\S+", "")
#remove emoji
data['clean_text'] = data['clean_text'].str.replace('[^\w\s#@/:%.,_-]', '', flags=re.UNICODE)
#convert all words to lowercase
data['clean_text'] = data['clean_text'].str.lower()
#tokenization
nltk.download('punkt')
def tokenize(text):
split=re.split("\W+",text)
return split
data['clean_text_tokenize']=data['clean_text'].apply(lambda x: tokenize(x.lower()))
#stopwords
nltk.download('stopwords')
stopword = nltk.corpus.stopwords.words('english')
def remove_stopwords(text):
text=[word for word in text if word not in stopword]
return text
data['clean_text_tokenize_stopwords'] = data['clean_text_tokenize'].apply(lambda x: remove_stopwords(x))
# store label and text into new dataframe
new_data = pd.DataFrame()
new_data['text'] = data['clean_text']
new_data['label'] = data['target']
new_data['label'] = new_data['label'].replace(4,1) # 1 for positive, 0 for negative
#split training and testing
X = new_data['text']
y = new_data['label']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.05, random_state=42)
#### Modelling
# model pipeline tfidf -> multinomialnb
model = make_pipeline(TfidfVectorizer(), MultinomialNB())
#fitting train data to model
model.fit(X_train,y_train)
#predict with testing data
validation = model.predict(X_test)
from sklearn.metrics import accuracy_score
#accuracy score
print(accuracy_score(y_test, validation))
#classification report
print(classification_report(y_test, validation))
```
|
{
"source": "jerichosu/YOLO_V1_Pytorch",
"score": 3
}
|
#### File: jerichosu/YOLO_V1_Pytorch/dataset.py
```python
import torch
import os
import pandas as pd
from PIL import Image
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
from utils import (
non_max_suppression,
mean_average_precision,
intersection_over_union,
cellboxes_to_boxes,
get_bboxes,
plot_image,
save_checkpoint,
load_checkpoint,
)
class VOCDataset(torch.utils.data.Dataset):
def __init__(
self, csv_file, img_dir, label_dir, S=7, B=2, C=20, transform=None, # csv_file: either train.csv or test.csv on archive
):
self.annotations = pd.read_csv(csv_file) #[000001.jpg, 000001.txt] stuff like that
self.img_dir = img_dir
self.label_dir = label_dir
self.transform = transform
self.S = S
self.B = B
self.C = C
def __len__(self):
return len(self.annotations)
def __getitem__(self, index):
label_path = os.path.join(self.label_dir, self.annotations.iloc[index, 1]) # labels
# print(label_path) # ../../../archive/labels/2011_000763.txt ....
boxes = []
with open(label_path) as f:
for label in f.readlines():
class_label, x, y, width, height = [
float(x) if float(x) != int(float(x)) else int(x)
for x in label.replace("\n", "").split()
]
boxes.append([class_label, x, y, width, height])
img_path = os.path.join(self.img_dir, self.annotations.iloc[index, 0]) #images
image = Image.open(img_path)
boxes = torch.tensor(boxes)
# print(image)
# print(image.shape)
# print(boxes.shape) # torch.size([N,5]), each row contains 1 bbox (and class) in the image
if self.transform:
# image = self.transform(image)
image, boxes = self.transform(image, boxes)
# put all boxes and it's corresponding class into the cube,
# so that each label.txt-->7x7x30 cube
# Convert To Cells
# create an empty tensor (7x7x30)
label_matrix = torch.zeros((self.S, self.S, self.C + 5 * self.B)) # 7 x 7 x (20+5*2)
for box in boxes:
class_label, x, y, width, height = box.tolist()
class_label = int(class_label) # convert class_label in box.tolist() from str to int
# i,j represents the cell row and cell column
i, j = int(self.S * y), int(self.S * x)
x_cell, y_cell = self.S * x - j, self.S * y - i # position relative to the (i,j) cell,
# for example: from computing i,j we know the cell (i,j) is responsible for predicting this bbox
# x_cell, y_cell is the centroid of bbox in this (i,j) cell
"""
Calculating the width and height of cell of bounding box,
relative to the cell is done by the following, with
width as the example:
width_pixels = (width*self.image_width)
cell_pixels = (self.image_width)
Then to find the width relative to the cell is simply:
width_pixels/cell_pixels, simplification leads to the
formulas below.
"""
# determins the height and width of the bbox on the (i,j) cell
width_cell, height_cell = (
width * self.S,
height * self.S,
)
# If no object already found for specific cell i,j
# Note: This means we restrict to ONE object
# per cell!
if label_matrix[i, j, 20] == 0: #label_matrix: 7x7x30 cube initialized previously
# NOTE: HERE WE ONLY CARE (i,j) position, NOT ALL POSITION!!!!
# Set that there exists an object
label_matrix[i, j, 20] = 1
# Box coordinates
box_coordinates = torch.tensor(
[x_cell, y_cell, width_cell, height_cell]
)
label_matrix[i, j, 21:25] = box_coordinates
# Set one hot encoding for class_label
label_matrix[i, j, class_label] = 1 # set the class_label_th number to be 1
# meaning something like 14th class is [0,0,0...1 (14th position), 0, 0, 0,|l_obj(20th), box_coord(21~25), zeros(25:end)]
# print(image) #normalized already
# print(image.shape)
return image, label_matrix #note image here is NOT tensor yet!!!!!
if __name__ == '__main__':
path = '../../../archive/train.csv'
IMG_DIR = '../../../archive/images'
LABEL_DIR = '../../../archive/labels'
BATCH_SIZE = 1
NUM_WORKERS = 8
DEVICE = 'cuda'
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img, bboxes):
for t in self.transforms:
img, bboxes = t(img), bboxes
return img, bboxes
transform = Compose([transforms.Resize((448, 448)), transforms.ToTensor(),])
# when you use ToTensor() class, PyTorch automatically converts all images into [0,1].
train_dataset = VOCDataset(
path, transform=transform, img_dir=IMG_DIR, label_dir=LABEL_DIR,
)
train_loader = DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE, num_workers=NUM_WORKERS,
pin_memory=True, shuffle=True, drop_last=True,
)
for x, y in train_loader:
# x = x.to(DEVICE)
# for idx in range(8):
# bboxes = cellboxes_to_boxes(model(x))
# bboxes = non_max_suppression(bboxes[idx], iou_threshold=0.5, threshold=0.4, box_format="midpoint")
# plot_image(x[idx].permute(1,2,0).to("cpu"), bboxes)
print(x.shape)
print(y.shape)
# print(1)
break
```
|
{
"source": "JericHunter/CS-1.3-Core-Data-Structures",
"score": 4
}
|
#### File: CS-1.3-Core-Data-Structures/Code/jumble.py
```python
class Jumble(object):
def __init__(self):
self.dict = self.create_dict()
def create_dict(self):
f = open('/usr/share/dict/words', 'r')
sortedict = {}
for word in f:
word = word.strip().lower()
sort = ''.join(sorted(word))
sortedict[sort] = word
return sortedict
def create_word_solver(self,word):
sorted_input = ''.join(sorted(word))
if sorted_input in self.dict:
return self.dict[sorted_input]
def create_solver(self, words):
# List to add solved words to
solved = []
# loop through the given words
for word in words:
solved_words = self.create_word_solver(word)
# check to make sure there are words
if solved_words:
if len(solved_words) == 1:
solved.prepend(solved_words)
else:
solved.append(solved_words)
# return the word list
return solved
if __name__ == "__main__":
jumble = Jumble()
words = ['shast', 'doore', 'ditnic', 'bureek']
solved_words = jumble.create_solver(words)
print(solved_words)
```
#### File: CS-1.3-Core-Data-Structures/Code/sets.py
```python
from hashtable import HashTable
class HashSet(object):
"""Initialize the new empty set, and add each element if the sequence is given"""
def __init__(self, elements=None):
self.ht = HashTable()
if elements is not None:
for element in elements:
self.add(element)
def size(self):
"""This property tracks the number of elements in constant time"""
return self.ht.size
def contains(self, element):
"""Returns a boolean to indicate whether an element is in this set"""
return self.ht.contains(element)
def add(self, element):
"""Add an element to this set if it is not already present"""
if self.contains(element):
return False
else:
self.ht.set(element,1)
return True
def remove(self, element):
"""Remove the element from this set"""
self.ht.delete(element)
def union(self, other_set):
new_set = HashSet()
for item in self.ht.keys():
new_set.add(item)
for item in other_set.ht.keys():
new_set.add(item)
return new_set
def intersection(self, other_set):
"""Returns a new set that is the intersection of this set and other_set"""
new_set = HashSet()
for item in self.ht.keys():
if other_set.contains(item):
new_set.add(item)
for item in other_set.ht.keys():
if self.contains(item):
new_set.add(item)
return new_set
def difference(self, other_set):
"""Returns a new set that is the difference of this set and other_set"""
new_set = HashSet()
for item in other_set.ht.keys():
if self.contains(item) == False:
new_set.add(item)
return new_set
def is_subset(self, other_set):
"""Returns a boolean indicating whether other_set is a subset of this set"""
for item in other_set.ht.keys():
if self.contains(item) == False:
return False
return True
```
|
{
"source": "JericHunter/SPD-2.31-Testing-and-Architecture",
"score": 4
}
|
#### File: lab/refactoring/remove_control_flag2.py
```python
n = 16
file = 'foobar.file'
def readfile(file, n):
with open(file, 'rb') as fp:
chunk = fp.read(n)
if chunk == '': # end of file, stop running.
return
print(chunk)
# process(chunk)
readfile(file, n)
```
#### File: lab/refactoring/split_temp_variable.py
```python
class Burger:
PATTY = 70
PICKLE = 20
TOMATO = 25
LETTUCE = 15
BUN = 95
def __init__(self, name):
self.name = name
def calc_weight(self):
return 2 * self.PATTY + 4 * self.PICKLE + 3 * self.TOMATO + 2 * self.LETTUCE + 2 * self.BUN
def get_info(self):
print(f'{self.name}: {self.calc_weight()} grams')
class SeoulBurger(Burger):
KIMCHI = 30
MAYO = 5
def __init__(self, name):
super().__init__(name)
def calc_weight(self):
return super().calc_weight() + self.KIMCHI + self.MAYO
ny_burger = Burger('NY Burger').get_info()
seoul_burger = SeoulBurger('Seoul Kimchi Burger').get_info()
```
|
{
"source": "jericks/geoscript-py",
"score": 2
}
|
#### File: geoscript/geom/bounds.py
```python
from org.geotools.geometry.jts import ReferencedEnvelope
from geoscript.util import deprecated
from geoscript import core, proj
class Bounds(ReferencedEnvelope):
"""
A two dimensional bounding box.
"""
def __init__(self, west=None, south=None, east=None, north=None, prj=None, env=None):
if prj:
prj = proj.Projection(prj)
if env:
if prj:
ReferencedEnvelope.__init__(self, env, prj._crs)
elif hasattr(env, 'crs') and env.crs():
ReferencedEnvelope.__init__(self, env, env.crs())
else:
ReferencedEnvelope.__init__(self, env, None)
else:
if west != None:
ReferencedEnvelope.__init__(self, west, east, south, north,
prj._crs if prj else None)
elif prj:
ReferencedEnvelope.__init__(self, prj._crs)
else:
ReferencedEnvelope.__init__(self)
def getwest(self):
return self.minX()
west = property(getwest,None,None,'The leftmost/westmost oordinate of the bounds.')
@deprecated
def get_l(self):
return self.west
l = property(get_l, None, None, "Use west.")
def getsouth(self):
return self.minY()
south = property(getsouth,None,None,'The bottomtmost/southmost oordinate of the bounds.')
@deprecated
def get_b(self):
return self.south
b = property(get_b, None, None, "Use south.")
def geteast(self):
return self.maxX()
east = property(geteast,None,None,'The rightmost/eastmost oordinate of the bounds.')
@deprecated
def get_r(self):
return self.east
r = property(get_r, None, None, 'Use east.')
def getnorth(self):
return self.maxY()
north = property(getnorth,None,None,'The topmost/northmost oordinate of the bounds.')
@deprecated
def get_t(self):
return self.north
t = property(get_t, None, None, 'Use north.')
def getproj(self):
crs = self.coordinateReferenceSystem
if crs:
return proj.Projection(crs)
proj = property(getproj,None,None,'The :class:`Projection <geoscript.proj.Projection>` of the bounds. ``None`` if the projection is unknown.')
def get_aspect(self):
return self.width / self.height
aspect = property(get_aspect,None,None,'Ratio of width to height for this bounds.')
def reproject(self, prj):
"""
Reprojects the bounding box.
*prj* is the destination :class:`Projection <geoscript.proj.Projection>`
"""
if not self.proj:
raise Exception('No projection set on bounds, unable to reproject')
prj = proj.Projection(prj)
return Bounds(env=self.transform(prj._crs, True), prj=prj)
def scale(self, factor):
"""
Scales the bounds by a specified factor.
*factor* is the scale factor. The scale factor must be greather than 0. A
value greater than 1 will grow the bounds whereas a value of less than 1
will shrink the bounds.
This method returns a new :class:`Bounds <geoscript.geom.bounds.Bounds>`
object.
>>> b = Bounds(0, 0, 1, 1)
>>> b.scale(1.5)
(-0.25, -0.25, 1.25, 1.25)
"""
w = self.width * (factor - 1) / 2
h = self.height * (factor - 1) / 2
return Bounds(self.west - w, self.south - h, self.east + w, self.north + h,
self.proj)
def expand(self, other):
"""
Expands this bounds to include another.
"""
self.expandToInclude(other)
return self
def toPolygon(self):
"""
Converts the bounding box to a :class:`Polygon <geoscript.geom.polygon.Polygon>`.
"""
from geoscript.geom import Polygon
return Polygon([(self.west,self.south), (self.west,self.north),
(self.east,self.north), (self.east,self.south), (self.west,self.south)])
def tile(self, res):
"""
Partitions the bounding box into a set of smaller bounding boxes.
The ``res`` argument is the resolution to tile at and should be in the range
(0,1].
"""
dx = self.width * res
dy = self.height * res
y = self.south
while y < self.north:
x = self.west
while x < self.east:
yield Bounds(x,y,min(x+dx,self.east),min(y+dy,self.north),self.proj)
x += dx
y += dy
def __add__(self, other):
b = Bounds(env=self)
if self.proj and other.proj and other.proj != self.proj:
other = other.reproject(self.proj)
b.expandToInclude(other)
return b
def __repr__(self):
s = '(%s, %s, %s, %s' % (self.west, self.south, self.east, self.north)
if self.proj:
s = '%s, %s' % (s, self.proj.id)
return '%s)' % s
core.registerTypeMapping(ReferencedEnvelope, Bounds, lambda x: Bounds(env=x))
```
#### File: geom/io/wkt.py
```python
from com.vividsolutions.jts.io import WKTReader, WKTWriter
from geoscript.util import deprecated
def readWKT(wkt):
"""
Constructs a geometry from Well Known Text.
*wkt* is the Well Known Text string representing the geometry as described by http://en.wikipedia.org/wiki/Well-known_text.
>>> readWKT('POINT (1 2)')
POINT (1 2)
"""
return WKTReader().read(wkt)
@deprecated
def fromWKT(wkt):
"""Use :func:`readWKT`"""
return readWKT(wkt)
def writeWKT(g):
"""
Writes a geometry as Well Known Text.
*g* is the geometry to serialize.
>>> from geoscript.geom import Point
>>> str(writeWKT(Point(1,2)))
'POINT (1 2)'
"""
return WKTWriter().write(g)
```
#### File: geoscript/geom/linestring.py
```python
from com.vividsolutions.jts.geom import Coordinate
from com.vividsolutions.jts.geom import LineString as _LineString
from geoscript import core
import geom
class LineString(_LineString):
"""
A LineString geometry.
*coords* is a variable list of ``list``/``tuple`` arguments.
>>> LineString([1,2], [3,4])
LINESTRING (1 2, 3 4)
"""
def __init__(self, *coords):
if len(coords) == 1 and isinstance(coords[0], _LineString):
ls = coords[0]
else:
l = []
for c in coords:
l.append( Coordinate(c[0],c[1]) )
if len(c) > 2:
l[-1].z = c[2]
ls = geom._factory.createLineString(l)
_LineString.__init__(self, ls.coordinateSequence, geom._factory)
geom._enhance(LineString)
core.registerTypeMapping(_LineString, LineString)
```
#### File: geoscript/layer/cursor.py
```python
from org.geotools.feature import FeatureCollection
from geoscript import core
from geoscript.feature import Feature
from geoscript.filter import Filter
class Cursor(object):
"""
A cursor or iterator over :class:`Feature <geoscript.feature.feature.Feature>` objects.
"""
def __init__(self, fcol, layer=None):
self._fcol = fcol
self._reader = None
self.layer = layer
def next(self):
if not self._reader:
self._reader = self._fcol.features()
"""
Returns the next feature. Raises `StopIteration` if no more features are available.
"""
if not self._reader.hasNext():
self._reader.close()
raise StopIteration
return Feature(schema=self.layer.schema if self.layer else None, f=self._reader.next())
def read(self, n):
"""
Reads n features into a `list`. If less than n features are available the resulting list will have a size less than n.
*n* is the number of features to read.
"""
features = []
for i in range(n):
try:
features.append(self.next())
except StopIteration:
break
return features
def close(self):
"""
Closes the cursor. This function should *always* be called by client code after the cursor is no longer needed or has been exhausted.
"""
if self._reader:
self._reader.close()
def __iter__(self):
return self
core.registerTypeMapping(FeatureCollection, Cursor)
core.registerTypeUnmapping(Cursor, FeatureCollection, lambda x: x._fcol)
```
#### File: geoscript/workspace/__init__.py
```python
from workspace import Workspace
from memory import Memory
def _import(mod, clas):
try:
m = __import__(mod, globals(), locals(), [clas])
return getattr(m, clas)
except ImportError, (errmsg):
print 'Error import module %s: %s' % (mod, errmsg)
PostGIS = _import('postgis', 'PostGIS')
H2 = _import('h2', 'H2')
Directory = _import('directory', 'Directory')
Spatialite = _import('spatialite', 'SpatiaLite')
MySQL = _import('mysql', 'MySQL')
Teradata = _import('teradata', 'Teradata')
Property = _import('property', 'Property')
```
#### File: geoscript/workspace/postgis.py
```python
from java.lang.System import getProperty as sysprop
from geoscript.workspace import Workspace
from org.geotools.data.postgis import PostgisNGDataStoreFactory
class PostGIS(Workspace):
"""
A subclass of :class:`Workspace <geoscript.workspace.workspace.Workspace>` for a PostGIS database. Layers of the workspace correspond to tables in the database.
*db* is the name of the database.
*host* is the optional host name. Defaults to 'localhost'.
*port* is the optional port the database is listening on as an ``int``. Defaults to 5432.
*schema* is the optional database schema to connect to. Defaults to 'public'.
*user* is the optional username to connect as. Defaults to the current user.
*passwd* is the optional password to connect with.
*estimated_extent* is an optional flag that controls whether to use the PostGIS
``estimated_extent`` function when calculating bounds.
"""
def __init__(self, db, host='localhost', port=5432, schema='public',
user=sysprop('user.name'), passwd=None, estimated_extent=False):
params = {'host': host, 'port': port, 'schema': schema, 'database': db,
'user':user, 'passwd': <PASSWORD>, 'dbtype': 'postgis',
'Estimated extends': estimated_extent}
Workspace.__init__(self, PostgisNGDataStoreFactory(), params)
```
#### File: tests/style/test_font.py
```python
import java
import unittest
from geoscript.style import Font
class Style_Test:
def testSimple(self):
f = Font('italic bold 12px/30px Georgia, serif;')
```
|
{
"source": "j-erickson/aws-security-hub-automated-response-and-remediation",
"score": 2
}
|
#### File: LambdaLayers/test/test_api_cached_client.py
```python
from botocore.stub import Stubber, ANY
import pytest
from awsapi_cached_client import AWSCachedClient
AWS = AWSCachedClient('us-east-1')
def test_create_client():
AWS.get_connection('sns') # in us-east-1
my_account = AWS.account
assert my_account
assert 'sns' in AWS.client
assert 'us-east-1' in AWS.client['sns']
AWS.get_connection('ec2')
assert 'ec2' in AWS.client
assert 'us-east-1' in AWS.client['ec2']
AWS.get_connection('iam','ap-northeast-1')
assert 'iam' in AWS.client
assert 'ap-northeast-1' in AWS.client['iam']
```
#### File: LambdaLayers/test/test_sechub_findings.py
```python
import json
import boto3
from botocore.stub import Stubber
import pytest
from pytest_mock import mocker
import sechub_findings as findings
from logger import Logger
from applogger import LogHandler
import utils
from awsapi_cached_client import AWSCachedClient
log_level = 'info'
logger = Logger(loglevel=log_level)
test_data = 'test/test_json_data/'
stubber = Stubber(findings.securityhub)
my_session = boto3.session.Session()
my_region = my_session.region_name
AWS = AWSCachedClient(my_region)
ssmclient = AWS.get_connection('ssm')
stubbed_ssm_client = Stubber(ssmclient)
#------------------------------------------------------------------------------
# CIS v1.2.0
#------------------------------------------------------------------------------
def test_parse_cis_v120(mocker):
test_data_in = open(test_data + 'CIS-1.3.json')
event = json.loads(test_data_in.read())
test_data_in.close()
stubbed_ssm_client.add_response(
'get_parameter',
{
"Parameter": {
"Name": "/Solutions/SO0111/cis-aws-foundations-benchmark/shortname",
"Type": "String",
"Value": "CIS",
"Version": 1,
"LastModifiedDate": "2021-04-23T08:11:30.658000-04:00",
"ARN": f'arn:aws:ssm:{my_region}:111111111111:parameter/Solutions/SO0111/cis-aws-foundations-benchmark/shortname',
"DataType": "text"
}
})
stubbed_ssm_client.add_client_error(
'get_parameter','ParameterNotFound','The requested parameter does not exist'
)
stubbed_ssm_client.add_response(
'get_parameter',
{
"Parameter": {
"Name": "/Solutions/SO0111/cis-aws-foundations-benchmark/1.2.0",
"Type": "String",
"Value": "enabled",
"Version": 1,
"LastModifiedDate": "2021-04-23T08:12:13.893000-04:00",
"ARN": f'arn:aws:ssm:{my_region}:111111111111:parameter/Solutions/SO0111/cis-aws-foundations-benchmark/version',
"DataType": "text"
}
})
stubbed_ssm_client.activate()
mocker.patch('sechub_findings.get_ssm_connection', return_value=ssmclient)
finding = findings.Finding(event['detail']['findings'][0])
assert finding.details.get('Id') == event['detail']['findings'][0]['Id']
assert finding.generator_id == 'arn:aws:securityhub:::ruleset/cis-aws-foundations-benchmark/v/1.2.0/rule/1.3'
assert finding.account_id == '111111111111'
assert finding.standard_name == 'cis-aws-foundations-benchmark'
assert finding.standard_shortname == 'CIS'
assert finding.standard_version == '1.2.0'
assert finding.standard_control == '1.3'
assert finding.standard_version_supported == 'True'
stubbed_ssm_client.deactivate()
#------------------------------------------------------------------------------
#
#------------------------------------------------------------------------------
def test_parse_bad_imported():
test_file = open(test_data + 'CIS-bad.json')
event = json.loads(test_file.read())
test_file.close()
with pytest.raises(findings.InvalidFindingJson):
finding = findings.Finding(event['detail']['findings'][0])
#------------------------------------------------------------------------------
# CIS v1.7.0 finding should show unsupported
#------------------------------------------------------------------------------
def test_parse_unsupported_version(mocker):
test_data_in = open(test_data + 'CIS_unsupversion.json')
event = json.loads(test_data_in.read())
test_data_in.close()
# ssmclient = AWS.get_connection('ssm')
stubbed_ssm_client = Stubber(ssmclient)
stubbed_ssm_client.add_response(
'get_parameter',
{
"Parameter": {
"Name": "/Solutions/SO0111/cis-aws-foundations-benchmark/shortname",
"Type": "String",
"Value": "CIS",
"Version": 1,
"LastModifiedDate": "2021-04-23T08:11:30.658000-04:00",
"ARN": f'arn:aws:ssm:{my_region}:111111111111:parameter/Solutions/SO0111/cis-aws-foundations-benchmark/shortname',
"DataType": "text"
}
})
stubbed_ssm_client.add_client_error(
'get_parameter','ParameterNotFound','The requested parameter does not exist'
)
stubbed_ssm_client.activate()
mocker.patch('sechub_findings.get_ssm_connection', return_value=ssmclient)
finding = findings.Finding(event['detail']['findings'][0])
assert finding.details.get('Id') == event['detail']['findings'][0]['Id']
assert finding.generator_id == 'arn:aws:securityhub:::ruleset/cis-aws-foundations-benchmark/v/1.7.0/rule/1.6'
assert finding.account_id == '111111111111'
assert finding.standard_name == 'cis-aws-foundations-benchmark'
assert finding.standard_shortname == 'CIS'
assert finding.standard_version == '1.7.0'
assert finding.standard_control == '1.6'
assert finding.standard_version_supported == 'False'
stubbed_ssm_client.deactivate()
#------------------------------------------------------------------------------
# AFSBP v1.0.0
#------------------------------------------------------------------------------
def test_parse_afsbp_v100(mocker):
test_data_in = open(test_data + 'afsbp-ec2.7.json')
event = json.loads(test_data_in.read())
test_data_in.close()
# ssmclient = AWS.get_connection('ssm')
stubbed_ssm_client = Stubber(ssmclient)
stubbed_ssm_client.add_response(
'get_parameter',
{
"Parameter": {
"Name": "/Solutions/SO0111/aws-foundational-security-best-practices/shortname",
"Type": "String",
"Value": "AFSBP",
"Version": 1,
"LastModifiedDate": "2021-04-23T08:11:30.658000-04:00",
"ARN": f'arn:aws:ssm:{my_region}:111111111111:parameter/Solutions/SO0111/aws-foundational-security-best-practices/shortname',
"DataType": "text"
}
})
stubbed_ssm_client.add_client_error(
'get_parameter','ParameterNotFound','The requested parameter does not exist'
)
stubbed_ssm_client.add_response(
'get_parameter',
{
"Parameter": {
"Name": "/Solutions/SO0111/aws-foundational-security-best-practices/1.0.0",
"Type": "String",
"Value": "enabled",
"Version": 1,
"LastModifiedDate": "2021-04-23T08:12:13.893000-04:00",
"ARN": f'arn:aws:ssm:us-{my_region}-1:111111111111:parameter/Solutions/SO0111/aws-foundational-security-best-practices/version',
"DataType": "text"
}
})
stubbed_ssm_client.activate()
mocker.patch('sechub_findings.get_ssm_connection', return_value=ssmclient)
finding = findings.Finding(event['detail']['findings'][0])
assert finding.details.get('Id') == event['detail']['findings'][0]['Id']
assert finding.account_id == '111111111111'
assert finding.standard_name == 'aws-foundational-security-best-practices'
assert finding.standard_shortname == 'AFSBP'
assert finding.standard_version == '1.0.0'
assert finding.standard_control == 'EC2.7'
assert finding.standard_version_supported == 'True'
stubbed_ssm_client.deactivate()
#------------------------------------------------------------------------------
# Security Standard not found
#------------------------------------------------------------------------------
def test_undefined_security_standard(mocker):
test_data_in = open(test_data + 'afsbp-ec2.7.json')
event = json.loads(test_data_in.read())
test_data_in.close()
event['detail']['findings'][0]['ProductFields']['StandardsControlArn'] = \
"arn:aws:securityhub:::standards/aws-invalid-security-standard/v/1.2.3/ABC.1"
# ssmclient = AWS.get_connection('ssm')
stubbed_ssm_client = Stubber(ssmclient)
stubbed_ssm_client.add_client_error(
'get_parameter','ParameterNotFound','The requested parameter does not exist'
)
stubbed_ssm_client.add_client_error(
'get_parameter','ParameterNotFound','The requested parameter does not exist'
)
stubbed_ssm_client.add_client_error(
'get_parameter','ParameterNotFound','The requested parameter does not exist'
)
stubbed_ssm_client.activate()
mocker.patch('sechub_findings.get_ssm_connection', return_value=ssmclient)
finding = findings.Finding(event['detail']['findings'][0])
assert finding.details.get('Id') == event['detail']['findings'][0]['Id']
assert finding.account_id == '111111111111'
assert finding.standard_name == 'aws-invalid-security-standard'
assert finding.standard_shortname == 'error'
assert finding.security_standard == 'notfound'
assert finding.standard_version == '1.2.3'
assert finding.standard_control == 'ABC.1'
assert finding.standard_version_supported == 'False'
stubbed_ssm_client.deactivate()
```
#### File: source/LambdaLayers/utils.py
```python
import json
import re
from awsapi_cached_client import AWSCachedClient
class StepFunctionLambdaAnswer:
"""
Maintains a hash of AWS API Client connections by region and service
"""
status = 'init'
message = ''
executionid = ''
affected_object = ''
remediation_status = ''
logdata = []
securitystandard = ''
securitystandardversion = ''
standardsupported = ''
controlid = ''
accountid = ''
automationdocid = ''
remediationrole = ''
eventtype = ''
def __init__(self):
"""Set message and status - minimum required fields"""
self.status = ''
self.message = ''
self.remediation_status = ''
self.logdata = []
def __str__(self):
return json.dumps(self.__dict__)
def json(self):
return self.__dict__
def update_status(self, status):
"""Set status"""
self.status = status
def update_message(self, message):
"""Set status"""
self.message = message
def update_logdata(self, logdata):
"""Set logdata (list)"""
self.logdata = logdata
def update_executionid(self, executionid):
"""Set execution id (string)"""
self.executionid = executionid
def update_affected_object(self, affected_object):
"""Set affected_object (string)"""
self.affected_object = affected_object
def update_remediation_status(self, status):
"""Set execution id (string)"""
self.remediation_status = status
def update_securitystandard(self, value):
"""Set securitystandard (string)"""
self.securitystandard = value
def update_securitystandardversion(self, value):
"""Set securitystandardversion (string)"""
self.securitystandardversion = value
def update_standardsupported(self, value):
"""Set standardsupported (string)"""
self.standardsupported = value
def update_controlid(self, value):
"""Set controlid (string)"""
self.controlid = value
def update_accountid(self, value):
"""Set accountid (string)"""
self.accountid = value
def update_automationdocid(self, value):
"""Set automationdocid (string)"""
self.automationdocid = value
def update_remediationrole(self, value):
"""Set remediationrole (string)"""
self.remediationrole = value
def update_eventtype(self, value):
"""Set eventtype (string)"""
self.eventtype = value
def update(self, answer_data):
if "status" in answer_data:
self.update_status(answer_data['status'])
if "message" in answer_data:
self.update_message(answer_data['message'])
if "remediation_status" in answer_data:
self.update_remediation_status(answer_data['remediation_status'])
if "logdata" in answer_data:
self.update_logdata(answer_data['logdata'])
if "executionid" in answer_data:
self.update_executionid(answer_data['executionid'])
if "affected_object" in answer_data:
self.update_affected_object(answer_data['affected_object'])
if "securitystandard" in answer_data:
self.update_securitystandard(answer_data['securitystandard'])
if "securitystandardversion" in answer_data:
self.update_securitystandardversion(answer_data['securitystandardversion'])
if "standardsupported" in answer_data:
self.update_standardsupported(answer_data['standardsupported'])
if "controlid" in answer_data:
self.update_controlid(answer_data['controlid'])
if "accountid" in answer_data:
self.update_accountid(answer_data['accountid'])
if "automationdocid" in answer_data:
self.update_automationdocid(answer_data['automationdocid'])
if "remediationrole" in answer_data:
self.update_remediationrole(answer_data['remediationrole'])
if "eventtype" in answer_data:
self.update_eventtype(answer_data['eventtype'])
def resource_from_arn(arn):
"""
Strip off the leading parts of the ARN: arn:*:*:*:*:
Return what's left. If no match, return the original predicate.
"""
arn_pattern = re.compile(r'arn\:[\w,-]+:[\w,-]+:.*:[0-9]*:(.*)')
arn_match = arn_pattern.match(arn)
answer = arn
if arn_match:
answer = arn_match.group(1)
return answer
def partition_from_region(region_name):
"""
returns the partition for a given region
Note: this should be a Boto3 function and should be deprecated once it is.
On success returns a string
On failure returns NoneType
"""
parts = region_name.split('-')
try:
if parts[0] == 'us' and parts[1] == 'gov':
return 'aws-us-gov'
elif parts[0] == 'cn':
return 'aws-cn'
else:
return 'aws'
except:
return
def publish_to_sns(topic_name, message, region=None):
"""
Post a message to an SNS topic
"""
AWS = AWSCachedClient(region) # cached client object
partition = None
if region:
partition = partition_from_region(region)
else:
partition = 'aws'
region = 'us-east-1'
topic_arn = 'arn:' + partition + ':sns:' + region + ':' + AWS.account + ':' + topic_name
json_message = json.dumps({"default":json.dumps(message)})
message_id = AWS.get_connection('sns', region).publish(
TopicArn=topic_arn,
Message=json_message,
MessageStructure='json'
).get('MessageId', 'error')
return message_id
```
#### File: source/Orchestrator/check_ssm_doc_state.py
```python
import json
import boto3
import os
from botocore.config import Config
from botocore.exceptions import ClientError
from logger import Logger
from awsapi_cached_client import BotoSession
from sechub_findings import Finding
import utils
# Get AWS region from Lambda environment. If not present then we're not
# running under lambda, so defaulting to us-east-1
AWS_REGION = os.getenv('AWS_DEFAULT_REGION', 'us-east-1') # MUST BE SET in global variables
AWS_PARTITION = os.getenv('AWS_PARTITION', 'aws') # MUST BE SET in global variables
ORCH_ROLE_BASE_NAME = 'SO0111-SHARR-Orchestrator-Member' # role to use for cross-account
# initialise loggers
LOG_LEVEL = os.getenv('log_level', 'info')
LOGGER = Logger(loglevel=LOG_LEVEL)
def _get_ssm_client(account, role, region):
"""
Create a client for ssm
"""
sess = BotoSession(
account,
f'{role}_{region}'
)
return sess.client('ssm')
def lambda_handler(event, context):
answer = utils.StepFunctionLambdaAnswer()
LOGGER.info(event)
if "Finding" not in event or \
"EventType" not in event:
answer.update({
'status':'ERROR',
'message':'Missing required data in request'
})
LOGGER.error(answer.message)
return answer.json()
finding = Finding(event['Finding'])
answer.update({
'securitystandard': finding.standard_shortname,
'securitystandardversion': finding.standard_version,
'controlid': finding.standard_control,
'standardsupported': finding.standard_version_supported, # string True/False
'accountid': finding.account_id
})
if finding.standard_version_supported != 'True':
answer.update({
'status':'NOTENABLED',
'message':f'Security Standard is not enabled": "{finding.standard_name} version {finding.standard_version}"'
})
return answer.json()
# Connect to APIs
ssm = _get_ssm_client(finding.account_id, ORCH_ROLE_BASE_NAME, AWS_REGION)
automation_docid = f'SHARR-{finding.standard_shortname}_{finding.standard_version}_{finding.remediation_control}'
remediation_role = f'SO0111-Remediate-{finding.standard_shortname}-{finding.standard_version}-{finding.remediation_control}'
answer.update({
'automationdocid': automation_docid,
'remediationrole': remediation_role
})
# Validate input
try:
docinfo = ssm.describe_document(
Name=automation_docid
)['Document']
doctype = docinfo.get('DocumentType', 'unknown')
if doctype != "Automation":
answer.update({
'status':'ERROR',
'message':'Document Type is not "Automation": ' + str(doctype)
})
LOGGER.error(answer.message)
docstate = docinfo.get('Status', 'unknown')
if docstate != "Active":
answer.update({
'status':'NOTACTIVE',
'message':'Document Status is not "Active": ' + str(docstate)
})
LOGGER.error(answer.message)
answer.update({
'status':'ACTIVE'
})
except ClientError as ex:
exception_type = ex.response['Error']['Code']
if exception_type in "InvalidDocument":
answer.update({
'status':'NOTFOUND',
'message': f'Document {automation_docid} does not exist.'
})
LOGGER.error(answer.message)
else:
answer.update({
'status':'CLIENTERROR',
'message':'An unhandled client error occurred: ' + exception_type
})
LOGGER.error(answer.message)
except Exception as e:
answer.update({
'status':'ERROR',
'message':'An unhandled error occurred: ' + str(e)
})
LOGGER.error(answer.message)
return answer.json()
```
#### File: source/Orchestrator/exec_ssm_doc.py
```python
import json
import os
import re
import boto3
from botocore.exceptions import ClientError
from logger import Logger
from awsapi_cached_client import BotoSession
from applogger import LogHandler
import utils
# Get AWS region from Lambda environment. If not present then we're not
# running under lambda, so defaulting to us-east-1
AWS_REGION = os.getenv('AWS_DEFAULT_REGION', 'us-east-1') # MUST BE SET in global variables
AWS_PARTITION = os.getenv('AWS_PARTITION', 'aws') # MUST BE SET in global variables
SOLUTION_ID = os.getenv('SOLUTION_ID', 'SO0111')
SOLUTION_ID = re.sub(r'^DEV-', '', SOLUTION_ID)
SOLUTION_VERSION = os.getenv('SOLUTION_VERSION', 'undefined')
# initialise loggers
LOG_LEVEL = os.getenv('log_level', 'info')
LOGGER = Logger(loglevel=LOG_LEVEL)
def _get_ssm_client(accountid, role):
"""
Create a client for ssm
"""
return BotoSession(
accountid,
role
).client('ssm')
def _get_iam_client(accountid, role):
"""
Create a client for iam
"""
return BotoSession(
accountid,
role
).client('iam')
def lambda_role_exists(client, rolename):
try:
client.get_role(
RoleName=rolename
)
return True
except ClientError as ex:
exception_type = ex.response['Error']['Code']
if exception_type in "NoSuchEntity":
return False
else:
exit('An unhandled client error occurred: ' + exception_type)
except Exception as e:
exit('An unhandled error occurred: ' + str(e))
def lambda_handler(event, context):
# Expected:
# {
# Finding: {
# AwsAccountId: <aws account>,
# ControlId: string
# },
# RemediationRole: string,
# AutomationDocId: string
# }
# Returns:
# {
# status: { 'UNKNOWN'| string },
# message: { '' | string },
# executionid: { '' | string }
# }
answer = utils.StepFunctionLambdaAnswer()
automation_doc = event['AutomationDocument']
if "SecurityStandard" not in automation_doc or \
"ControlId" not in automation_doc or \
"AccountId" not in automation_doc:
answer.update({
'status':'ERROR',
'message':'Missing AutomationDocument data in request: ' + json.dumps(automation_doc)
})
LOGGER.error(answer.message)
return answer.json()
orchestrator_member_role = SOLUTION_ID + '-SHARR-Orchestrator-Member_' + AWS_REGION
standard_role = automation_doc['RemediationRole'] + '_' + AWS_REGION
# If the standard/version/control has a specific role defined then use it
# Otherwise, use the Orchestrator Member role
remediation_role = orchestrator_member_role
iam = _get_iam_client(automation_doc['AccountId'], remediation_role)
if lambda_role_exists(iam, standard_role):
remediation_role = standard_role
print(f'Using role {remediation_role} for remediation in {automation_doc["AccountId"]} document {automation_doc["AutomationDocId"]}')
remediation_role_arn = 'arn:' + AWS_PARTITION + ':iam::' + automation_doc['AccountId'] + \
':role/' + remediation_role
print(f'ARN: {remediation_role_arn}')
ssm = _get_ssm_client(automation_doc['AccountId'], remediation_role)
exec_id = ssm.start_automation_execution(
# Launch SSM Doc via Automation
DocumentName=automation_doc['AutomationDocId'],
Parameters={
"Finding": [
json.dumps(event['Finding'])
],
"AutomationAssumeRole": [
remediation_role_arn
]
}
)['AutomationExecutionId']
answer.update({
'status':'SUCCESS',
'message': automation_doc['ControlId'] +
' remediation was successfully invoked via AWS Systems Manager in account ' +
automation_doc['AccountId'] + ': ' + exec_id,
'executionid': exec_id
})
LOGGER.info(answer.message)
return answer.json()
```
#### File: Orchestrator/test/test_exec_ssm_doc.py
```python
import os
import pytest
import boto3
from botocore.stub import Stubber, ANY
from exec_ssm_doc import lambda_handler
from awsapi_cached_client import AWSCachedClient
from pytest_mock import mocker
REGION = os.getenv('AWS_DEFAULT_REGION', 'us-east-1')
def test_exec_runbook(mocker):
"""
Verifies correct operation on success
"""
step_input = {
"EventType": "Security Hub Findings - Custom Action",
"Finding": {
"SchemaVersion": "2018-10-08",
"Id": "arn:aws:securityhub:us-east-1:111111111111:subscription/aws-foundational-security-best-practices/v/1.0.0/AutoScaling.1/finding/635ceb5d-3dfd-4458-804e-48a42cd723e4",
"ProductArn": "arn:aws:securityhub:us-east-1::product/aws/securityhub",
"GeneratorId": "aws-foundational-security-best-practices/v/1.0.0/AutoScaling.1",
"AwsAccountId": "111111111111",
"Types": [
"Software and Configuration Checks/Industry and Regulatory Standards/AWS-Foundational-Security-Best-Practices"
],
"FirstObservedAt": "2020-07-24T01:34:19.369Z",
"LastObservedAt": "2021-02-18T13:45:30.638Z",
"CreatedAt": "2020-07-24T01:34:19.369Z",
"UpdatedAt": "2021-02-18T13:45:28.802Z",
"Severity": {
"Product": 0,
"Label": "INFORMATIONAL",
"Normalized": 0,
"Original": "INFORMATIONAL"
},
"Title": "AutoScaling.1 Auto scaling groups associated with a load balancer should use load balancer health checks",
"Description": "This control checks whether your Auto Scaling groups that are associated with a load balancer are using Elastic Load Balancing health checks.",
"Remediation": {
"Recommendation": {
"Text": "For directions on how to fix this issue, please consult the AWS Security Hub Foundational Security Best Practices documentation.",
"Url": "https://docs.aws.amazon.com/console/securityhub/AutoScaling.1/remediation"
}
},
"ProductFields": {
"StandardsArn": "arn:aws:securityhub:::standards/aws-foundational-security-best-practices/v/1.0.0",
"StandardsSubscriptionArn": "arn:aws:securityhub:us-east-1:111111111111:subscription/aws-foundational-security-best-practices/v/1.0.0",
"ControlId": "AutoScaling.1",
"RecommendationUrl": "https://docs.aws.amazon.com/console/securityhub/AutoScaling.1/remediation",
"RelatedAWSResources:0/name": "securityhub-autoscaling-group-elb-healthcheck-required-f986ecc9",
"RelatedAWSResources:0/type": "AWS::Config::ConfigRule",
"StandardsControlArn": "arn:aws:securityhub:us-east-1:111111111111:control/aws-foundational-security-best-practices/v/1.0.0/AutoScaling.1",
"aws/securityhub/ProductName": "Security Hub",
"aws/securityhub/CompanyName": "AWS",
"aws/securityhub/annotation": "AWS Config evaluated your resources against the rule. The rule did not apply to the AWS resources in its scope, the specified resources were deleted, or the evaluation results were deleted.",
"aws/securityhub/FindingId": "arn:aws:securityhub:us-east-1::product/aws/securityhub/arn:aws:securityhub:us-east-1:111111111111:subscription/aws-foundational-security-best-practices/v/1.0.0/AutoScaling.1/finding/635ceb5d-3dfd-4458-804e-48a42cd723e4"
},
"Resources": [
{
"Type": "AwsAccount",
"Id": "arn:aws:autoscaling:us-east-1:111111111111:autoScalingGroup:785df3481e1-cd66-435d-96de-d6ed5416defd:autoScalingGroupName/sharr-test-autoscaling-1",
"Partition": "aws",
"Region": "us-east-1"
}
],
"Compliance": {
"Status": "FAILED",
"StatusReasons": [
{
"ReasonCode": "CONFIG_EVALUATIONS_EMPTY",
"Description": "AWS Config evaluated your resources against the rule. The rule did not apply to the AWS resources in its scope, the specified resources were deleted, or the evaluation results were deleted."
}
]
},
"WorkflowState": "NEW",
"Workflow": {
"Status": "NEW"
},
"RecordState": "ACTIVE"
},
"AutomationDocument": {
"DocState": "ACTIVE",
"SecurityStandardVersion": "1.0.0",
"AccountId": "111111111111",
"Message": "Document Status is not \"Active\": unknown",
"AutomationDocId": "SHARR-AFSBP_1.0.0_AutoScaling.1",
"RemediationRole": "SO0111-Remediate-AFSBP-1.0.0-AutoScaling.1",
"ControlId": "AutoScaling.1",
"SecurityStandard": "AFSBP",
"SecurityStandardSupported": "True"
}
}
expected_result = {
'executionid': '43374019-a309-4627-b8a2-c641e0140262',
'logdata': [],
'message': 'AutoScaling.1 remediation was successfully invoked via AWS Systems Manager in account 111111111111: 43374019-a309-4627-b8a2-c641e0140262',
'remediation_status': '',
'status': 'SUCCESS'
}
AWS = AWSCachedClient(REGION)
account = AWS.get_connection('sts').get_caller_identity()['Account']
step_input['AutomationDocument']['AccountId'] = account
iam_c = AWS.get_connection('iam')
iamc_stub = Stubber(iam_c)
iamc_stub.add_client_error(
'get_role',
'NoSuchEntity'
)
iamc_stub.activate()
ssm_c = AWS.get_connection('ssm')
ssmc_stub = Stubber(ssm_c)
ssmc_stub.add_response(
'start_automation_execution',
{
'AutomationExecutionId': '43374019-a309-4627-b8a2-c641e0140262'
},
{
'DocumentName': 'SHARR-AFSBP_1.0.0_AutoScaling.1',
'Parameters': {
"Finding": [
ANY
],
"AutomationAssumeRole": [
ANY
]
}
}
)
ssmc_stub.activate()
mocker.patch('exec_ssm_doc._get_ssm_client', return_value=ssm_c)
mocker.patch('exec_ssm_doc._get_iam_client', return_value=iam_c)
response = lambda_handler(step_input, {})
assert response['executionid'] == expected_result['executionid']
assert response['remediation_status'] == expected_result['remediation_status']
assert response['status'] == expected_result['status']
ssmc_stub.deactivate()
iamc_stub.deactivate()
```
#### File: remediation_runbooks/scripts/CreateAccessLoggingBucket_createloggingbucket.py
```python
import boto3
from botocore.exceptions import ClientError
from botocore.config import Config
def connect_to_s3(boto_config):
return boto3.client('s3', config=boto_config)
def create_logging_bucket(event, context):
boto_config = Config(
retries ={
'mode': 'standard'
}
)
s3 = connect_to_s3(boto_config)
try:
kwargs = {
'Bucket': event['BucketName'],
'GrantWrite': 'uri=http://acs.amazonaws.com/groups/s3/LogDelivery',
'GrantReadACP': 'uri=http://acs.amazonaws.com/groups/s3/LogDelivery'
}
if event['AWS_REGION'] != 'us-east-1':
kwargs['CreateBucketConfiguration'] = {
'LocationConstraint': event['AWS_REGION']
}
s3.create_bucket(**kwargs)
s3.put_bucket_encryption(
Bucket=event['BucketName'],
ServerSideEncryptionConfiguration={
'Rules': [
{
'ApplyServerSideEncryptionByDefault': {
'SSEAlgorithm': 'AES256'
}
}
]
}
)
return {
"output": {
"Message": f'Bucket {event["BucketName"]} created'
}
}
except ClientError as error:
if error.response['Error']['Code'] != 'BucketAlreadyExists' and \
error.response['Error']['Code'] != 'BucketAlreadyOwnedByYou':
exit(str(error))
else:
return {
"output": {
"Message": f'Bucket {event["BucketName"]} already exists'
}
}
except Exception as e:
print(e)
exit(str(e))
```
#### File: remediation_runbooks/scripts/EnableAWSConfig_createconfigbucket.py
```python
import json
import boto3
from botocore.config import Config
from botocore.exceptions import ClientError
from botocore.retries import bucket
boto_config = Config(
retries ={
'mode': 'standard'
}
)
def connect_to_s3(boto_config):
return boto3.client('s3', config=boto_config)
def create_bucket(bucket_name, aws_region):
s3 = connect_to_s3(boto_config)
try:
if aws_region == 'us-east-1':
s3.create_bucket(
ACL='private',
Bucket=bucket_name
)
else:
s3.create_bucket(
ACL='private',
Bucket=bucket_name,
CreateBucketConfiguration={
'LocationConstraint': aws_region
}
)
return "created"
except ClientError as ex:
exception_type = ex.response['Error']['Code']
# bucket already exists - return
if exception_type in ["BucketAlreadyExists", "BucketAlreadyOwnedByYou"]:
print('Bucket ' + bucket_name + ' already exists')
return "already exists"
else:
exit(f'ERROR creating bucket {bucket_name}: {str(ex)}')
except Exception as e:
exit(f'ERROR creating bucket {bucket_name}: {str(e)}')
def encrypt_bucket(bucket_name, kms_key):
s3 = connect_to_s3(boto_config)
try:
s3.put_bucket_encryption(
Bucket=bucket_name,
ServerSideEncryptionConfiguration={
'Rules': [
{
'ApplyServerSideEncryptionByDefault': {
'SSEAlgorithm': 'aws:kms',
'KMSMasterKeyID': kms_key
}
}
]
}
)
except Exception as e:
exit(f'ERROR putting bucket encryption for {bucket_name}: {str(e)}')
def block_public_access(bucket_name):
s3 = connect_to_s3(boto_config)
try:
s3.put_public_access_block(
Bucket=bucket_name,
PublicAccessBlockConfiguration={
'BlockPublicAcls': True,
'IgnorePublicAcls': True,
'BlockPublicPolicy': True,
'RestrictPublicBuckets': True
}
)
except Exception as e:
exit(f'ERROR setting public access block for bucket {bucket_name}: {str(e)}')
def enable_access_logging(bucket_name, logging_bucket):
s3 = connect_to_s3(boto_config)
try:
s3.put_bucket_logging(
Bucket=bucket_name,
BucketLoggingStatus={
'LoggingEnabled': {
'TargetBucket': logging_bucket,
'TargetPrefix': f'access-logs/{bucket_name}'
}
}
)
except Exception as e:
exit(f'Error setting access logging for bucket {bucket_name}: {str(e)}')
def create_bucket_policy(config_bucket, aws_partition):
s3 = connect_to_s3(boto_config)
try:
bucket_policy = {
"Version": "2012-10-17",
"Statement": [
{
"Sid": "AWSConfigBucketPermissionsCheck",
"Effect": "Allow",
"Principal": {
"Service": [
"config.amazonaws.com"
]
},
"Action": "s3:GetBucketAcl",
"Resource": "arn:" + aws_partition + ":s3:::" + config_bucket
},
{
"Sid": "AWSConfigBucketExistenceCheck",
"Effect": "Allow",
"Principal": {
"Service": [
"config.amazonaws.com"
]
},
"Action": "s3:ListBucket",
"Resource": "arn:" + aws_partition + ":s3:::" + config_bucket
},
{
"Sid": "AWSConfigBucketDelivery",
"Effect": "Allow",
"Principal": {
"Service": [
"config.amazonaws.com"
]
},
"Action": "s3:PutObject",
"Resource": "arn:" + aws_partition + ":s3:::" + config_bucket + "/*",
"Condition": {
"StringEquals": {
"s3:x-amz-acl": "bucket-owner-full-control"
}
}
}
]
}
s3.put_bucket_policy(
Bucket=config_bucket,
Policy=json.dumps(bucket_policy)
)
except Exception as e:
exit(f'ERROR: PutBucketPolicy failed for {config_bucket}: {str(e)}')
def create_encrypted_bucket(event, context):
kms_key_arn = event['kms_key_arn']
aws_partition = event['partition']
aws_account = event['account']
aws_region = event['region']
logging_bucket = event['logging_bucket']
bucket_name = 'so0111-aws-config-' + aws_region + '-' + aws_account
if create_bucket(bucket_name, aws_region) == 'already exists':
return {"config_bucket": bucket_name}
encrypt_bucket(bucket_name, kms_key_arn.split('key/')[1])
block_public_access(bucket_name)
enable_access_logging(bucket_name, logging_bucket)
create_bucket_policy(bucket_name, aws_partition)
return {"config_bucket": bucket_name}
```
#### File: remediation_runbooks/scripts/EnableAWSConfig_createtopic.py
```python
import json
import boto3
from botocore.config import Config
from botocore.exceptions import ClientError
boto_config = Config(
retries ={
'mode': 'standard'
}
)
def connect_to_sns():
return boto3.client('sns', config=boto_config)
def connect_to_ssm():
return boto3.client('ssm', config=boto_config)
def create_encrypted_topic(event, context):
kms_key_arn = event['kms_key_arn']
new_topic = False
topic_arn = ''
topic_name = event['topic_name']
try:
sns = connect_to_sns()
topic_arn = sns.create_topic(
Name=topic_name,
Attributes={
'KmsMasterKeyId': kms_key_arn.split('key/')[1]
}
)['TopicArn']
new_topic = True
except ClientError as client_exception:
exception_type = client_exception.response['Error']['Code']
if exception_type == 'InvalidParameter':
print(f'Topic {topic_name} already exists. This remediation may have been run before.')
print('Ignoring exception - remediation continues.')
topic_arn = sns.create_topic(
Name=topic_name
)['TopicArn']
else:
exit(f'ERROR: Unhandled client exception: {client_exception}')
except Exception as e:
exit(f'ERROR: could not create SNS Topic {topic_name}: {str(e)}')
if new_topic:
try:
ssm = connect_to_ssm()
ssm.put_parameter(
Name='/Solutions/SO0111/SNS_Topic_Config.1',
Description='SNS Topic for AWS Config updates',
Type='String',
Overwrite=True,
Value=topic_arn
)
except Exception as e:
exit(f'ERROR: could not create SNS Topic {topic_name}: {str(e)}')
create_topic_policy(topic_arn)
return {"topic_arn": topic_arn}
def create_topic_policy(topic_arn):
sns = connect_to_sns()
try:
topic_policy = {
"Id": "Policy_ID",
"Statement": [
{
"Sid": "AWSConfigSNSPolicy",
"Effect": "Allow",
"Principal": {
"Service": "config.amazonaws.com"
},
"Action": "SNS:Publish",
"Resource": topic_arn,
}]
}
sns.set_topic_attributes(
TopicArn=topic_arn,
AttributeName='Policy',
AttributeValue=json.dumps(topic_policy)
)
except Exception as e:
exit(f'ERROR: Failed to SetTopicAttributes for {topic_arn}: {str(e)}')
```
#### File: remediation_runbooks/scripts/GetPublicEBSSnapshots.py
```python
import json
import boto3
from botocore.config import Config
from botocore.exceptions import ClientError
boto_config = Config(
retries = {
'mode': 'standard',
'max_attempts': 10
}
)
def connect_to_ec2(boto_config):
return boto3.client('ec2', config=boto_config)
def get_public_snapshots(event, context):
account_id = event['account_id']
if 'testmode' in event and event['testmode']:
return [
{
"Description": "Snapshot of idle volume before deletion",
"Encrypted": False,
"OwnerId": "111111111111",
"Progress": "100%",
"SnapshotId": "snap-12341234123412345",
"StartTime": "2021-03-11T08:23:02.785Z",
"State": "completed",
"VolumeId": "vol-12341234123412345",
"VolumeSize": 4,
"Tags": [
{
"Key": "SnapshotDate",
"Value": "2021-03-11 08:23:02.376859"
},
{
"Key": "DeleteEBSVolOnCompletion",
"Value": "False"
},
{
"Key": "SnapshotReason",
"Value": "Idle Volume"
}
]
},
{
"Description": "Snapshot of idle volume before deletion",
"Encrypted": False,
"OwnerId": "111111111111",
"Progress": "100%",
"SnapshotId": "snap-12341234123412345",
"StartTime": "2021-03-11T08:20:37.399Z",
"State": "completed",
"VolumeId": "vol-12341234123412345",
"VolumeSize": 4,
"Tags": [
{
"Key": "DeleteEBSVolOnCompletion",
"Value": "False"
},
{
"Key": "SnapshotDate",
"Value": "2021-03-11 08:20:37.224101"
},
{
"Key": "SnapshotReason",
"Value": "Idle Volume"
}
]
},
{
"Description": "Snapshot of idle volume before deletion",
"Encrypted": False,
"OwnerId": "111111111111",
"Progress": "100%",
"SnapshotId": "snap-12341234123412345",
"StartTime": "2021-03-11T08:22:48.936Z",
"State": "completed",
"VolumeId": "vol-12341234123412345",
"VolumeSize": 4,
"Tags": [
{
"Key": "SnapshotReason",
"Value": "Idle Volume"
},
{
"Key": "SnapshotDate",
"Value": "2021-03-11 08:22:48.714893"
},
{
"Key": "DeleteEBSVolOnCompletion",
"Value": "False"
}
]
},
{
"Description": "Snapshot of idle volume before deletion",
"Encrypted": False,
"OwnerId": "111111111111",
"Progress": "100%",
"SnapshotId": "snap-12341234123412345",
"StartTime": "2021-03-11T08:23:05.156Z",
"State": "completed",
"VolumeId": "vol-12341234123412345",
"VolumeSize": 4,
"Tags": [
{
"Key": "DeleteEBSVolOnCompletion",
"Value": "False"
},
{
"Key": "SnapshotReason",
"Value": "Idle Volume"
},
{
"Key": "SnapshotDate",
"Value": "2021-03-11 08:23:04.876640"
}
]
},
{
"Description": "Snapshot of idle volume before deletion",
"Encrypted": False,
"OwnerId": "111111111111",
"Progress": "100%",
"SnapshotId": "snap-12341234123412345",
"StartTime": "2021-03-11T08:22:34.850Z",
"State": "completed",
"VolumeId": "vol-12341234123412345",
"VolumeSize": 4,
"Tags": [
{
"Key": "DeleteEBSVolOnCompletion",
"Value": "False"
},
{
"Key": "SnapshotReason",
"Value": "Idle Volume"
},
{
"Key": "SnapshotDate",
"Value": "2021-03-11 08:22:34.671355"
}
]
}
]
return list_public_snapshots(account_id)
def list_public_snapshots(account_id):
ec2 = connect_to_ec2(boto_config)
control_token = 'start'
try:
buffer = []
while control_token:
if control_token == 'start': # needed a value to start the loop. Now reset it
control_token = ''
kwargs = {
'MaxResults': 100,
'OwnerIds': [ account_id ],
'RestorableByUserIds': [ 'all' ]
}
if control_token:
kwargs['NextToken'] = control_token
response = ec2.describe_snapshots(
**kwargs
)
if 'NextToken' in response:
control_token = response['NextToken']
else:
control_token = ''
buffer += response['Snapshots']
return buffer
except Exception as e:
print(e)
exit('Failed to describe_snapshots')
```
#### File: scripts/test/test_createaccessloggingbucket.py
```python
import boto3
import json
import botocore.session
from botocore.stub import Stubber
from botocore.config import Config
import pytest
from pytest_mock import mocker
import CreateAccessLoggingBucket_createloggingbucket as script
my_session = boto3.session.Session()
my_region = my_session.region_name
def test_create_logging_bucket(mocker):
event = {
'SolutionId': 'SO0000',
'SolutionVersion': '1.2.3',
'BucketName': 'mahbukkit',
'AWS_REGION': my_region
}
BOTO_CONFIG = Config(
retries ={
'mode': 'standard'
},
region_name=my_region
)
s3 = botocore.session.get_session().create_client('s3', config=BOTO_CONFIG)
s3_stubber = Stubber(s3)
kwargs = {
'Bucket': event['BucketName'],
'GrantWrite': 'uri=http://acs.amazonaws.com/groups/s3/LogDelivery',
'GrantReadACP': 'uri=http://acs.amazonaws.com/groups/s3/LogDelivery'
}
if event['AWS_REGION'] != 'us-east-1':
kwargs['CreateBucketConfiguration'] = {
'LocationConstraint': event['AWS_REGION']
}
s3_stubber.add_response(
'create_bucket',
{},
kwargs
)
s3_stubber.add_response(
'put_bucket_encryption',
{},
{
'Bucket': event['BucketName'],
'ServerSideEncryptionConfiguration': {
'Rules': [
{
'ApplyServerSideEncryptionByDefault': {
'SSEAlgorithm': 'AES256'
}
}
]
}
}
)
s3_stubber.activate()
mocker.patch('CreateAccessLoggingBucket_createloggingbucket.connect_to_s3', return_value=s3)
script.create_logging_bucket(event, {})
s3_stubber.assert_no_pending_responses()
s3_stubber.deactivate()
def test_bucket_already_exists(mocker):
event = {
'SolutionId': 'SO0000',
'SolutionVersion': '1.2.3',
'BucketName': 'mahbukkit',
'AWS_REGION': my_region
}
BOTO_CONFIG = Config(
retries ={
'mode': 'standard'
},
region_name=my_region
)
s3 = botocore.session.get_session().create_client('s3', config=BOTO_CONFIG)
s3_stubber = Stubber(s3)
s3_stubber.add_client_error(
'create_bucket',
'BucketAlreadyExists'
)
s3_stubber.activate()
mocker.patch('CreateAccessLoggingBucket_createloggingbucket.connect_to_s3', return_value=s3)
script.create_logging_bucket(event, {})
s3_stubber.assert_no_pending_responses()
s3_stubber.deactivate()
```
#### File: scripts/test/test_enableautoscalinggroupelbhealthcheck.py
```python
import boto3
import json
import botocore.session
from botocore.stub import Stubber
from botocore.config import Config
import pytest
from pytest_mock import mocker
import EnableAutoScalingGroupELBHealthCheck_validate as validate
my_session = boto3.session.Session()
my_region = my_session.region_name
#=====================================================================================
# EnableAutoScalingGroupELBHealthCheck_remediation SUCCESS
#=====================================================================================
def test_validation_success(mocker):
event = {
'SolutionId': 'SO0000',
'SolutionVersion': '1.2.3',
'AsgName': 'my_asg',
'region': my_region
}
good_response = {
"AutoScalingGroups": [
{
"AutoScalingGroupName": "sharr-test-autoscaling-1",
"AutoScalingGroupARN": "arn:aws:autoscaling:us-east-1:111111111111:autoScalingGroup:785d81e1-cd66-435d-96de-d6ed5416defd:autoScalingGroupName/sharr-test-autoscaling-1",
"LaunchTemplate": {
"LaunchTemplateId": "lt-05ad2fca4f4ea7d2f",
"LaunchTemplateName": "sharrtest",
"Version": "$Default"
},
"MinSize": 0,
"MaxSize": 1,
"DesiredCapacity": 0,
"DefaultCooldown": 300,
"AvailabilityZones": [
"us-east-1b"
],
"LoadBalancerNames": [],
"TargetGroupARNs": [
"arn:aws:elasticloadbalancing:us-east-1:111111111111:targetgroup/WebDemoTarget/fc9a82512b92af62"
],
"HealthCheckType": "ELB",
"HealthCheckGracePeriod": 300,
"Instances": [],
"CreatedTime": "2021-01-27T14:08:16.949000+00:00",
"SuspendedProcesses": [],
"VPCZoneIdentifier": "subnet-86a594ab",
"EnabledMetrics": [],
"Tags": [],
"TerminationPolicies": [
"Default"
],
"NewInstancesProtectedFromScaleIn": False,
"ServiceLinkedRoleARN": "arn:aws:iam::111111111111:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling"
}
]
}
BOTO_CONFIG = Config(
retries ={
'mode': 'standard'
},
region_name=my_region
)
asg_client = botocore.session.get_session().create_client('autoscaling', config=BOTO_CONFIG)
asg_stubber = Stubber(asg_client)
asg_stubber.add_response(
'describe_auto_scaling_groups',
good_response
)
asg_stubber.activate()
mocker.patch('EnableAutoScalingGroupELBHealthCheck_validate.connect_to_autoscaling', return_value=asg_client)
assert validate.verify(event, {}) == {
"response": {
"message": "Autoscaling Group health check type updated to ELB",
"status": "Success"
}
}
asg_stubber.deactivate()
def test_validation_failed(mocker):
event = {
'SolutionId': 'SO0000',
'SolutionVersion': '1.2.3',
'AsgName': 'my_asg',
'region': my_region
}
bad_response = {
"AutoScalingGroups": [
{
"AutoScalingGroupName": "sharr-test-autoscaling-1",
"AutoScalingGroupARN": "arn:aws:autoscaling:us-east-1:111111111111:autoScalingGroup:785d81e1-cd66-435d-96de-d6ed5416defd:autoScalingGroupName/sharr-test-autoscaling-1",
"LaunchTemplate": {
"LaunchTemplateId": "lt-05ad2fca4f4ea7d2f",
"LaunchTemplateName": "sharrtest",
"Version": "$Default"
},
"MinSize": 0,
"MaxSize": 1,
"DesiredCapacity": 0,
"DefaultCooldown": 300,
"AvailabilityZones": [
"us-east-1b"
],
"LoadBalancerNames": [],
"TargetGroupARNs": [
"arn:aws:elasticloadbalancing:us-east-1:111111111111:targetgroup/WebDemoTarget/fc9a82512b92af62"
],
"HealthCheckType": "EC2",
"HealthCheckGracePeriod": 300,
"Instances": [],
"CreatedTime": "2021-01-27T14:08:16.949000+00:00",
"SuspendedProcesses": [],
"VPCZoneIdentifier": "subnet-86a594ab",
"EnabledMetrics": [],
"Tags": [],
"TerminationPolicies": [
"Default"
],
"NewInstancesProtectedFromScaleIn": False,
"ServiceLinkedRoleARN": "arn:aws:iam::111111111111:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling"
}
]
}
BOTO_CONFIG = Config(
retries ={
'mode': 'standard'
},
region_name=my_region
)
asg_client = botocore.session.get_session().create_client('autoscaling', config=BOTO_CONFIG)
asg_stubber = Stubber(asg_client)
asg_stubber.add_response(
'describe_auto_scaling_groups',
bad_response
)
asg_stubber.activate()
mocker.patch('EnableAutoScalingGroupELBHealthCheck_validate.connect_to_autoscaling', return_value=asg_client)
assert validate.verify(event, {}) == {
"response": {
"message": "Autoscaling Group health check type is not ELB",
"status": "Failed"
}
}
asg_stubber.deactivate()
```
|
{
"source": "jericson/feedvalidator",
"score": 2
}
|
#### File: src/feedvalidator/validators.py
```python
__author__ = "<NAME> <http://intertwingly.net/> and <NAME> <http://diveintomark.org/>"
__version__ = "$Revision$"
__copyright__ = "Copyright (c) 2002 <NAME> and <NAME>"
from .base import validatorBase
from .logging import *
import re, time, datetime
from .uri import canonicalForm, urljoin
from rfc822 import AddressList, parsedate, parsedate_tz, mktime_tz
rdfNS = "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
def implausible_822(value):
if value[0] < 1990: return True
try:
from rfc822 import parsedate_tz, mktime_tz
except:
# no time zone functions available, granularity is a day
pvalue=parsedate(value)
return value > time.gmtime(time.time()+86400) or pvalue[0]<1990
try:
pvalue=parsedate_tz(value)
zvalue=mktime_tz(pvalue)
except:
# outside of range of what parsedate supports: definitely problematic
return True
# when time zone functions are available, granularity is ten minutes
return zvalue > time.time()+600 or pvalue[0]<1990
def implausible_8601(value):
if value < '1990-01-01': return True
try:
import xml.utils.iso8601
except:
# no time zone functions available, granularity is a day
tomorrow=time.strftime("%Y-%m-%dT%H:%M:%SZ",time.gmtime(time.time()+86400))
return (value > tomorrow)
try:
zvalue = xml.utils.iso8601.parse(value)
except:
# outside of range of what parse supports: definitely problematic
return True
# when time zone functions are available, granularity is ten minutes
return zvalue > time.time() + 600
#
# Valid mime type
#
mime_re = re.compile('[^\s()<>,;:\\"/[\]?=]+/[^\s()<>,;:\\"/[\]?=]+(\s*;\s*[^\s()<>,;:\\"/[\]?=]+=("(\\"|[^"])*"|[^\s()<>,;:\\"/[\]?=]+))*$')
#
# Extensibility hook: logic varies based on type of feed
#
def any(self, name, qname, attrs):
if self.getFeedType() != TYPE_RSS1:
return eater()
else:
from .rdf import rdfExtension
return rdfExtension(qname)
#
# This class simply eats events. Useful to prevent cascading of errors
#
class eater(validatorBase):
def getExpectedAttrNames(self):
return self.attrs.getNames()
def characters(self, string):
for c in string:
if 0x80 <= ord(c) <= 0x9F or c == u'\ufffd':
from .validators import BadCharacters
self.log(BadCharacters({"parent":self.parent.name, "element":self.name}))
def startElementNS(self, name, qname, attrs):
# RSS 2.0 arbitrary restriction on extensions
feedtype=self.getFeedType()
if (not qname) and feedtype and (feedtype==TYPE_RSS2) and self.name.find('_')>=0:
from .logging import NotInANamespace
self.log(NotInANamespace({"parent":self.name, "element":name, "namespace":'""'}))
# ensure element is "namespace well formed"
if name.find(':') != -1:
from .logging import MissingNamespace
self.log(MissingNamespace({"parent":self.name, "element":name}))
# ensure all attribute namespaces are properly defined
for (namespace,attr) in attrs.keys():
if ':' in attr and not namespace:
from .logging import MissingNamespace
self.log(MissingNamespace({"parent":self.name, "element":attr}))
for c in attrs.get((namespace,attr)):
if 0x80 <= ord(c) <= 0x9F or c == u'\ufffd':
from .validators import BadCharacters
self.log(BadCharacters({"parent":name, "element":attr}))
# eat children
self.push(self.__class__(), name, attrs)
from feedvalidator.vendor.HTMLParser import HTMLParser, HTMLParseError
class HTMLValidator(HTMLParser):
htmltags = [
"a", "abbr", "acronym", "address", "applet", "area", "article", "aside",
"audio", "b", "base", "basefont", "bdo", "big", "blockquote", "body",
"br", "button", "canvas", "caption", "center", "cite", "code", "col",
"colgroup", "command", "datagrid", "datalist", "dd", "del", "details",
"dialog", "dir", "div", "dfn", "dl", "dt", "em", "event-source",
"fieldset", "figcaption", "figure", "font", "footer", "form", "frame",
"frameset", "h1", "h2", "h3", "h4", "h5", "h6", "head", "header", "hr",
"html", "i", "iframe", "img", "input", "ins", "isindex", "kbd", "label",
"legend", "li", "link", "m", "map", "menu", "meta", "meter", "nav",
"noframes", "noscript", "object", "ol", "output", "optgroup", "option",
"p", "param", "pre", "progress", "q", "s", "samp", "script", "section",
"select", "small", "source", "span", "strike", "strong", "style", "sub",
"sup", "table", "tbody", "td", "textarea", "tfoot", "th", "thead", "time",
"title", "tr", "tt", "u", "ul", "var", "xmp", "plaintext", "embed",
"comment", "listing", "video", "wbr"]
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'article',
'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button', 'canvas',
'caption', 'center', 'cite', 'code', 'col', 'colgroup', 'command',
'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn', 'dialog', 'dir',
'div', 'dl', 'dt', 'em', 'event-source', 'fieldset', "figcaption", 'figure',
'footer', 'font', 'form', 'header', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'hr', 'i', 'img', 'input', 'ins', 'keygen', 'kbd', 'label', 'legend', 'li',
'm', 'map', 'menu', 'meter', 'multicol', 'nav', 'nextid', 'ol', 'output',
'optgroup', 'option', 'p', 'pre', 'progress', 'q', 's', 'samp', 'section',
'select', 'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot', 'th',
'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video', 'noscript', 'wbr']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'autoplay', 'autocomplete', 'autofocus', 'axis',
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color', 'cols',
'colspan', 'compact', 'contenteditable', 'coords', 'data', 'datafld',
'datapagesize', 'datasrc', 'datetime', 'default', 'delay', 'dir',
'disabled', 'draggable', 'dynsrc', 'enctype', 'end', 'face', 'for',
'form', 'frame', 'galleryimg', 'gutter', 'headers', 'height', 'hidefocus',
'hidden', 'high', 'href', 'hreflang', 'hspace', 'icon', 'id', 'inputmode',
'ismap', 'keytype', 'label', 'leftspacing', 'lang', 'list', 'longdesc',
'loop', 'loopcount', 'loopend', 'loopstart', 'low', 'lowsrc', 'max',
'maxlength', 'media', 'method', 'min', 'multiple', 'name', 'nohref',
'noshade', 'nowrap', 'open', 'optimum', 'pattern', 'ping', 'point-size',
'prompt', 'pqg', 'radiogroup', 'readonly', 'rel', 'repeat-max',
'repeat-min', 'replace', 'required', 'rev', 'rightspacing', 'rows',
'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src',
'srcset',
'start', 'step', 'summary', 'suppress', 'tabindex', 'target', 'template',
'title', 'toppadding', 'type', 'unselectable', 'usemap', 'urn', 'valign',
'value', 'variable', 'volume', 'vspace', 'vrml', 'width', 'wrap',
'xml:lang', 'xmlns']
acceptable_css_properties = ['azimuth', 'background', 'background-color',
'border', 'border-bottom', 'border-bottom-color', 'border-bottom-style',
'border-bottom-width', 'border-collapse', 'border-color', 'border-left',
'border-left-color', 'border-left-style', 'border-left-width',
'border-right', 'border-right-color', 'border-right-style',
'border-right-width', 'border-spacing', 'border-style', 'border-top',
'border-top-color', 'border-top-style', 'border-top-width', 'border-width',
'clear', 'color', 'cursor', 'direction', 'display', 'elevation', 'float',
'font', 'font-family', 'font-size', 'font-style', 'font-variant',
'font-weight', 'height', 'letter-spacing', 'line-height',
'list-style-type', 'margin', 'margin-bottom', 'margin-left',
'margin-right', 'margin-top', 'overflow', 'padding', 'padding-bottom',
'padding-left', 'padding-right', 'padding-top', 'pause', 'pause-after',
'pause-before', 'pitch', 'pitch-range', 'richness', 'speak',
'speak-header', 'speak-numeral', 'speak-punctuation', 'speech-rate',
'stress', 'text-align', 'text-decoration', 'text-indent', 'unicode-bidi',
'vertical-align', 'voice-family', 'volume', 'white-space', 'width']
# survey of common keywords found in feeds
acceptable_css_keywords = ['aqua', 'auto', 'black', 'block', 'blue', 'bold',
'both', 'bottom', 'brown', 'center', 'collapse', 'dashed', 'dotted',
'fuchsia', 'gray', 'green', '!important', 'italic', 'left', 'lime',
'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow']
valid_css_values = re.compile('^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|' +
'\d?\.?\d?\d(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$')
mathml_elements = ['annotation', 'annotation-xml', 'maction', 'math',
'merror', 'mfrac', 'mi', 'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded',
'mphantom', 'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle',
'msub', 'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none', 'semantics']
mathml_attributes = ['actiontype', 'align', 'columnalign', 'columnalign',
'columnalign', 'columnlines', 'columnspacing', 'columnspan', 'depth',
'display', 'displaystyle', 'encoding', 'equalcolumns', 'equalrows',
'fence', 'fontstyle', 'fontweight', 'frame', 'height', 'linethickness',
'lspace', 'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant',
'maxsize', 'minsize', 'other', 'rowalign', 'rowalign', 'rowalign',
'rowlines', 'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
'separator', 'stretchy', 'width', 'width', 'xlink:href', 'xlink:show',
'xlink:type', 'xmlns', 'xmlns:xlink']
# svgtiny - foreignObject + linearGradient + radialGradient + stop - image
svg_elements = ['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'circle', 'defs', 'desc', 'ellipse', 'font-face',
'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph', 'mpath',
'path', 'polygon', 'polyline', 'radialGradient', 'rect', 'set', 'stop',
'svg', 'switch', 'text', 'title', 'tspan', 'use']
# svgtiny + class + opacity + offset + xmlns + xmlns:xlink
svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'color', 'color-rendering', 'content', 'cx', 'cy', 'd', 'dx',
'dy', 'descent', 'display', 'dur', 'end', 'fill', 'fill-opacity',
'fill-rule', 'font-family', 'font-size', 'font-stretch', 'font-style',
'font-variant', 'font-weight', 'from', 'fx', 'fy', 'g1', 'g2',
'glyph-name', 'gradientUnits', 'hanging', 'height', 'horiz-adv-x',
'horiz-origin-x', 'id', 'ideographic', 'k', 'keyPoints', 'keySplines',
'keyTimes', 'lang', 'mathematical', 'marker-end', 'marker-mid',
'marker-start', 'markerHeight', 'markerUnits', 'markerWidth', 'max',
'min', 'name', 'offset', 'opacity', 'orient', 'origin',
'overline-position', 'overline-thickness', 'panose-1', 'path',
'pathLength', 'points', 'preserveAspectRatio', 'r', 'refX', 'refY',
'repeatCount', 'repeatDur', 'requiredExtensions', 'requiredFeatures',
'restart', 'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv', 'stop-color',
'stop-opacity', 'strikethrough-position', 'strikethrough-thickness',
'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap',
'stroke-linejoin', 'stroke-miterlimit', 'stroke-opacity',
'stroke-width', 'style', 'systemLanguage', 'target', 'text-anchor', 'to',
'transform', 'type', 'u1', 'u2', 'underline-position',
'underline-thickness', 'unicode', 'unicode-range', 'units-per-em',
'values', 'version', 'viewBox', 'visibility', 'width', 'widths', 'x',
'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole', 'xlink:href',
'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type', 'xml:base',
'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y', 'y1', 'y2',
'zoomAndPan']
def log(self,msg):
offset = [self.element.line + self.getpos()[0] - 1 -
self.element.dispatcher.locator.getLineNumber(),
-self.element.dispatcher.locator.getColumnNumber()]
self.element.log(msg, offset)
def __init__(self,value,element):
self.element=element
self.stack = []
self.valid = True
HTMLParser.__init__(self)
if value.lower().find('<?import ') >= 0:
self.log(SecurityRisk({"parent":self.element.parent.name, "element":self.element.name, "tag":"?import"}))
try:
self.feed(value)
self.close()
if self.valid:
self.log(ValidHtml({"parent":self.element.parent.name, "element":self.element.name}))
except HTMLParseError as msg:
element = self.element
offset = [element.line - element.dispatcher.locator.getLineNumber(),
- element.dispatcher.locator.getColumnNumber()]
match = re.search(', at line (\d+), column (\d+)',str(msg))
if match: offset[0] += int(match.group(1))-1
element.log(NotHtml({"parent":element.parent.name, "element":element.name, "message":"Invalid HTML", "value": str(msg)}),offset)
def handle_starttag(self, tag, attributes):
if tag.lower() not in self.htmltags:
self.log(NotHtml({"parent":self.element.parent.name, "element":self.element.name,"value":tag, "message": "Non-html tag"}))
self.valid = False
elif tag.lower() not in HTMLValidator.acceptable_elements:
if not 'embed' in self.stack and not 'object' in self.stack:
self.log(SecurityRisk({"parent":self.element.parent.name, "element":self.element.name, "tag":tag}))
else:
for (name,value) in attributes:
if name.lower() == 'style':
for evil in checkStyle(value):
self.log(DangerousStyleAttr({"parent":self.element.parent.name, "element":self.element.name, "attr":"style", "value":evil}))
elif name.lower() not in self.acceptable_attributes:
self.log(SecurityRiskAttr({"parent":self.element.parent.name, "element":self.element.name, "attr":name}))
self.stack.append(tag)
def handle_endtag(self, tag):
if tag in self.stack:
while self.stack[-1] != tag: self.stack.pop()
self.stack.pop()
def handle_charref(self, name):
if name.startswith('x'):
value = int(name[1:],16)
else:
value = int(name)
if 0x80 <= value <= 0x9F or value == 0xfffd:
self.log(BadCharacters({"parent":self.element.parent.name,
"element":self.element.name, "value":"&#" + name + ";"}))
#
# Scub CSS properties for potentially evil intent
#
def checkStyle(style):
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style):
return [style]
if not re.match("^(\s*[-\w]+\s*:\s*[^:;]*(;|$))*$", style):
return [style]
unsafe = []
for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style.lower()):
if prop not in HTMLValidator.acceptable_css_properties:
if prop not in unsafe: unsafe.append(prop)
elif prop.split('-')[0] in ['background','border','margin','padding']:
for keyword in value.split():
if keyword not in HTMLValidator.acceptable_css_keywords and \
not HTMLValidator.valid_css_values.match(keyword):
if keyword not in unsafe: unsafe.append(keyword)
return unsafe
#
# This class simply html events. Identifies unsafe events
#
class htmlEater(validatorBase):
def getExpectedAttrNames(self):
if self.attrs and len(self.attrs):
return self.attrs.getNames()
def textOK(self): pass
def startElementNS(self, name, qname, attrs):
for attr in attrs.getNames():
if attr[0]==None:
if attr[1].lower() == 'style':
for value in checkStyle(attrs.get(attr)):
self.log(DangerousStyleAttr({"parent":self.parent.name, "element":self.name, "attr":attr[1], "value":value}))
elif attr[1].lower() not in HTMLValidator.acceptable_attributes:
self.log(SecurityRiskAttr({"parent":self.parent.name, "element":self.name, "attr":attr[1]}))
self.push(htmlEater(), self.name, attrs)
if name.lower() not in HTMLValidator.acceptable_elements:
self.log(SecurityRisk({"parent":self.parent.name, "element":self.name, "tag":name}))
def endElementNS(self,name,qname):
pass
#
# text: i.e., no child elements allowed (except rdf:Description).
#
class text(validatorBase):
def textOK(self): pass
def getExpectedAttrNames(self):
if self.getFeedType() == TYPE_RSS1:
return [(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'parseType'),
(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'datatype'),
(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'resource')]
else:
return []
def startElementNS(self, name, qname, attrs):
if self.getFeedType() == TYPE_RSS1:
if self.value.strip() or self.children:
if self.attrs.get((u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'parseType')) != 'Literal':
self.log(InvalidRDF({"message":"mixed content"}))
if name=="div" and qname=="http://www.w3.org/1999/xhtml":
from .content import diveater
self.push(diveater(), name, attrs)
else:
from .rdf import rdfExtension
self.push(rdfExtension(qname), name, attrs)
else:
from .base import namespaces
ns = namespaces.get(qname, '')
if name.find(':') != -1:
from .logging import MissingNamespace
self.log(MissingNamespace({"parent":self.name, "element":name}))
else:
self.log(UndefinedElement({"parent":self.name, "element":name}))
self.push(eater(), name, attrs)
#
# noduplicates: no child elements, no duplicate siblings
#
class noduplicates(validatorBase):
def __init__(self, message=DuplicateElement):
self.message=message
validatorBase.__init__(self)
def startElementNS(self, name, qname, attrs):
pass
def characters(self, string):
pass
def prevalidate(self):
if self.name in self.parent.children:
self.log(self.message({"parent":self.parent.name, "element":self.name}))
#
# valid e-mail addr-spec
#
class addr_spec(text):
domain_re = '''(([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([A-Z0-9\-]+\.)+))([A-Z0-9][-A-Z0-9]*)'''
email_re = re.compile("([A-Z0-9_\-\+\.\']+)@" + domain_re + "$", re.I)
simple_email_re = re.compile('^[\w._%+-]+@[A-Za-z][\w.-]+$')
message = InvalidAddrSpec
def validate(self, value=None):
if not value: value=self.value
if not self.email_re.match(value):
if not self.simple_email_re.match(value):
self.log(self.message({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
try:
import socket
socket.gethostbyname(value.split('@')[-1])
except:
self.log(UnknownHost({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
self.log(ValidContact({"parent":self.parent.name, "element":self.name, "value":self.value}))
#
# iso639 language code
#
def iso639_validate(log,value,element,parent):
from . import iso639codes
if '-' in value:
lang, sublang = value.split('-', 1)
else:
lang = value
if unicode.lower(unicode(lang)) not in iso639codes.isoLang:
log(InvalidLanguage({"parent":parent, "element":element, "value":value}))
else:
log(ValidLanguage({"parent":parent, "element":element}))
class iso639(text):
def validate(self):
iso639_validate(self.log, self.value, self.name, self.parent.name)
#
# Encoding charset
#
class Charset(text):
def validate(self):
try:
import codecs
codecs.lookup(self.value)
except:
self.log(InvalidEncoding({'value': self.value}))
#
# Mime type
#
class MimeType(text):
def validate(self):
if not mime_re.match(self.value):
self.log(InvalidMIMEType({'attr':'type'}))
class MediaRange(MimeType):
def validate(self):
if not self.value.strip(): return
original_value = self.value
for value in original_value.split(','):
self.value = value.strip()
if value.find(';q=')>=0:
self.log(UndefinedParam({'param':'q'}))
MimeType.validate(self)
#
# iso8601 dateTime
#
class unbounded_iso8601(text):
iso8601_re = re.compile("^\d\d\d\d(-\d\d(-\d\d(T\d\d:\d\d(:\d\d(\.\d*)?)?" +
"(Z|([+-]\d\d:\d\d))?)?)?)?$")
message = InvalidISO8601DateTime
def validate(self):
if not self.iso8601_re.match(self.value):
self.log(self.message({"parent":self.parent.name, "element":self.name, "value":self.value}))
return
work=self.value.split('T')
date=work[0].split('-')
year=int(date[0])
if len(date)>1:
month=int(date[1])
try:
if len(date)>2: datetime.date(year,month,int(date[2]))
except ValueError as e:
return self.log(self.message({"parent":self.parent.name, "element":self.name, "value":str(e)}))
if len(work) > 1:
time=work[1].split('Z')[0].split('+')[0].split('-')[0]
time=time.split(':')
if int(time[0])>23:
self.log(self.message({"parent":self.parent.name, "element":self.name, "value":self.value}))
return
if len(time)>1 and int(time[1])>60:
self.log(self.message({"parent":self.parent.name, "element":self.name, "value":self.value}))
return
if len(time)>2 and float(time[2])>60.0:
self.log(self.message({"parent":self.parent.name, "element":self.name, "value":self.value}))
return
self.log(ValidW3CDTFDate({"parent":self.parent.name, "element":self.name, "value":self.value}))
return 1
class iso8601(unbounded_iso8601):
bounded = 1
def validate(self):
if self.bounded and unbounded_iso8601.validate(self):
if implausible_8601(self.value):
self.log(ImplausibleDate({"parent":self.parent.name,
"element":self.name, "value":self.value}))
return 1
class w3cdtf(iso8601):
# The same as in iso8601, except a timezone is not optional when
# a time is present
iso8601_re = re.compile("^\d\d\d\d(-\d\d(-\d\d(T\d\d:\d\d(:\d\d(\.\d*)?)?" +
"(Z|([+-]\d\d:\d\d)))?)?)?$")
message = InvalidW3CDTFDate
class unbounded_w3cdtf(w3cdtf):
bounded = 0
class rfc3339(iso8601):
# The same as in iso8601, except that the only thing that is optional
# is the seconds
iso8601_re = re.compile("^\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\d(\.\d*)?" +
"(Z|([+-]\d\d:\d\d))$")
message = InvalidRFC3339Date
class iso8601_date(iso8601):
date_re = re.compile("^\d\d\d\d-\d\d-\d\d$")
def validate(self):
if iso8601.validate(self):
if not self.date_re.search(self.value):
self.log(InvalidISO8601Date({"parent":self.parent.name, "element":self.name, "value":self.value}))
iana_schemes = [ # http://www.iana.org/assignments/uri-schemes.html
"ftp", "http", "gopher", "mailto", "news", "nntp", "telnet", "wais",
"file", "prospero", "z39.50s", "z39.50r", "cid", "mid", "vemmi",
"service", "imap", "nfs", "acap", "rtsp", "tip", "pop", "data", "dav",
"opaquelocktoken", "sip", "sips", "tel", "fax", "modem", "ldap",
"https", "soap.beep", "soap.beeps", "xmlrpc.beep", "xmlrpc.beeps",
"urn", "go", "h323", "ipp", "tftp", "mupdate", "pres", "im", "mtqp",
"iris.beep", "dict", "snmp", "crid", "tag", "dns", "info",
"aaa", "aaas", "cap", "iax", "icap", "iris", "iris.xpc", "iris.xpcs",
"iris.lwz", "msrp", "msrps", "shttp", "thismessage", "tv", "xmpp"
]
#
# rfc2396 fully qualified (non-relative) uri
#
class rfc2396(text):
rfc2396_re = re.compile("([a-zA-Z][0-9a-zA-Z+\\-\\.]*:)?/{0,2}" +
"(\\[[0-9A-Fa-f:]+\\])?" +
"[0-9a-zA-Z;/?:@&=+$\\.\\-_!~*'()%,#]*$")
urn_re = re.compile(r"^[Uu][Rr][Nn]:[a-zA-Z0-9][a-zA-Z0-9-]{1,31}:([a-zA-Z0-9()+,\.:=@;$_!*'\-]|%[0-9A-Fa-f]{2})+$")
uuid_re = re.compile(r"^[Uu][Rr][Nn]:[Uu][Uu][Ii][Dd]:[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$")
tag_re = re.compile(r"^tag:([a-z0-9\-\._]+?@)?[a-z0-9\.\-]+?,\d{4}(-\d{2}(-\d{2})?)?:[0-9a-zA-Z;/\?:@&=+$\.\-_!~*'\(\)%,]*(#[0-9a-zA-Z;/\?:@&=+$\.\-_!~*'\(\)%,]*)?$")
urichars_re=re.compile("[0-9a-zA-Z;/?:@&=+$\\.\\-_!~*'()%,#]")
def validate(self, errorClass=InvalidLink, successClass=ValidURI, extraParams={}):
success = 0
scheme=self.value.split(':')[0].lower()
if scheme=='tag':
if self.tag_re.match(self.value):
success = 1
logparams = {"parent":self.parent.name, "element":self.name, "value":self.value}
logparams.update(extraParams)
self.log(ValidTAG(logparams))
else:
logparams = {"parent":self.parent.name, "element":self.name, "value":self.value}
logparams.update(extraParams)
self.log(InvalidTAG(logparams))
elif scheme=="urn":
if self.value.lower().startswith('urn:uuid:') and not \
self.uuid_re.match(self.value):
logparams = {"parent":self.parent.name, "element":self.name, "value":self.value}
logparams.update(extraParams)
self.log(InvalidUUID(logparams))
elif self.urn_re.match(self.value):
success = 1
logparams = {"parent":self.parent.name, "element":self.name, "value":self.value}
logparams.update(extraParams)
self.log(ValidURN(logparams))
else:
logparams = {"parent":self.parent.name, "element":self.name, "value":self.value}
logparams.update(extraParams)
self.log(InvalidURN(logparams))
elif not self.rfc2396_re.match(self.value):
logparams = {"parent":self.parent.name, "element":self.name, "value":self.value}
logparams.update(extraParams)
for c in self.value:
if ord(c)<128 and not rfc2396.urichars_re.match(c):
logparams['char'] = repr(str(c))
logparams['value'] = self.value
self.log(InvalidUriChar(logparams))
break
else:
try:
if self.rfc2396_re.match(self.value.encode('idna')):
errorClass=UriNotIri
except:
pass
self.log(errorClass(logparams))
elif scheme in ['http','ftp']:
if not re.match('^\w+://[^/].*',self.value):
logparams = {"parent":self.parent.name, "element":self.name, "value":self.value}
logparams.update(extraParams)
self.log(errorClass(logparams))
else:
success = 1
elif self.value.find(':')>=0 and scheme.isalpha() and scheme not in iana_schemes:
self.log(SchemeNotIANARegistered({"parent":self.parent.name, "element":self.name, "value":scheme}))
else:
success = 1
if success:
logparams = {"parent":self.parent.name, "element":self.name, "value":self.value}
logparams.update(extraParams)
self.log(successClass(logparams))
return success
#
# rfc3987 iri
#
class rfc3987(rfc2396):
def validate(self, errorClass=InvalidIRI, successClass=ValidURI, extraParams={}):
try:
if self.value: self.value = self.value.encode('idna')
except:
pass # apparently '.' produces label too long
return rfc2396.validate(self, errorClass, successClass, extraParams)
class rfc2396_full(rfc2396):
rfc2396_re = re.compile("[a-zA-Z][0-9a-zA-Z+\\-\\.]*:(//)?" +
"[0-9a-zA-Z;/?:@&=+$\\.\\-_!~*'()%,#]+$")
def validate(self, errorClass=InvalidFullLink, successClass=ValidURI, extraParams={}):
return rfc2396.validate(self, errorClass, successClass, extraParams)
#
# URI reference resolvable relative to xml:base
#
class xmlbase(rfc3987):
def validate(self, errorClass=InvalidIRI, successClass=ValidURI, extraParams={}):
if rfc3987.validate(self, errorClass, successClass, extraParams):
if self.dispatcher.xmlBase != self.xmlBase:
docbase=canonicalForm(self.dispatcher.xmlBase).split('#')[0]
elembase=canonicalForm(self.xmlBase).split('#')[0]
value=canonicalForm(urljoin(elembase,self.value)).split('#')[0]
if (value==elembase) and (elembase.encode('idna')!=docbase):
self.log(SameDocumentReference({"parent":self.parent.name, "element":self.name, "value":self.value}))
#
# rfc822 dateTime (+Y2K extension)
#
class rfc822(text):
rfc822_re = re.compile("(((mon)|(tue)|(wed)|(thu)|(fri)|(sat)|(sun))\s*,\s*)?" +
"\d\d?\s+((jan)|(feb)|(mar)|(apr)|(may)|(jun)|(jul)|(aug)|(sep)|(oct)|" +
"(nov)|(dec))\s+\d\d(\d\d)?\s+\d\d:\d\d(:\d\d)?\s+(([+-]\d\d\d\d)|" +
"(ut)|(gmt)|(est)|(edt)|(cst)|(cdt)|(mst)|(mdt)|(pst)|(pdt)|[a-ik-z])?$",
re.UNICODE)
rfc2822_re = re.compile("(((Mon)|(Tue)|(Wed)|(Thu)|(Fri)|(Sat)|(Sun)), )?" +
"\d\d? ((Jan)|(Feb)|(Mar)|(Apr)|(May)|(Jun)|(Jul)|(Aug)|(Sep)|(Oct)|" +
"(Nov)|(Dec)) \d\d\d\d \d\d:\d\d(:\d\d)? (([+-]?\d\d[03]0)|" +
"(UT)|(GMT)|(EST)|(EDT)|(CST)|(CDT)|(MST)|(MDT)|(PST)|(PDT)|Z)$")
def validate(self):
if self.rfc2822_re.match(self.value):
import calendar
value = parsedate(self.value)
try:
if value[0] > 1900:
dow = datetime.date(*value[:3]).strftime("%a")
if self.value.find(',')>0 and dow.lower() != self.value[:3].lower():
self.log(IncorrectDOW({"parent":self.parent.name, "element":self.name, "value":self.value[:3]}))
return
except ValueError as e:
self.log(InvalidRFC2822Date({"parent":self.parent.name, "element":self.name, "value":str(e)}))
return
if implausible_822(self.value):
self.log(ImplausibleDate({"parent":self.parent.name,
"element":self.name, "value":self.value}))
else:
self.log(ValidRFC2822Date({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
value1,value2 = '', self.value
value2 = re.sub(r'[\\](.)','',value2)
while value1!=value2: value1,value2=value2,re.sub('\([^(]*?\)',' ',value2)
if not self.rfc822_re.match(value2.strip().lower()):
self.log(InvalidRFC2822Date({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
self.log(ProblematicalRFC822Date({"parent":self.parent.name, "element":self.name, "value":self.value}))
#
# Decode html entityrefs
#
from htmlentitydefs import name2codepoint
def decodehtml(data):
chunks=re.split('&#?(\w+);',data)
for i in range(1,len(chunks),2):
if chunks[i].isdigit():
# print chunks[i]
chunks[i]=unichr(int(chunks[i]))
elif chunks[i] in name2codepoint:
chunks[i]=unichr(name2codepoint[chunks[i]])
else:
chunks[i]='&' + chunks[i] +';'
# print repr(chunks)
return u"".join(map(unicode,chunks))
#
# Scan HTML for relative URLs
#
class absUrlMixin:
anchor_re = re.compile('<a\s+href=(?:"(.*?)"|\'(.*?)\'|([\w-]+))[\s>]', re.IGNORECASE)
img_re = re.compile('<img\s+[^>]*src=(?:"(.*?)"|\'(.*?)\'|([\w-]+))[\s>]', re.IGNORECASE)
absref_re = re.compile("\w+:")
def validateAbsUrl(self,value):
refs = self.img_re.findall(self.value) + self.anchor_re.findall(self.value)
for ref in [reduce(lambda a,b: a or b, x) for x in refs]:
ref = decodehtml(ref).strip()
if not self.absref_re.match(ref):
for c in ref:
if ord(c)<128 and not rfc2396.urichars_re.match(c):
# print "Invalid character:", ref
# self.log(InvalidUriChar({'value':repr(str(c))}))
self.log(InvalidUriChar({'value':ref, 'char':repr(str(c))}))
break
else:
self.log(ContainsRelRef({"parent":self.parent.name, "element":self.name, "value": ref}))
#
# Scan HTML for 'devious' content
#
class safeHtmlMixin:
def validateSafe(self,value):
HTMLValidator(value, self)
class safeHtml(text, safeHtmlMixin, absUrlMixin):
def prevalidate(self):
self.children.append(True) # force warnings about "mixed" content
def validate(self):
self.validateSafe(self.value)
self.validateAbsUrl(self.value)
#
# Elements for which email addresses are discouraged
#
class nonemail(text):
email_re = re.compile("<" + addr_spec.email_re.pattern[:-1] + ">", re.I)
def validate(self):
if self.email_re.search(self.value):
self.log(ContainsEmail({"parent":self.parent.name, "element":self.name}))
#
# Elements for which html is discouraged, also checks for relative URLs
#
class nonhtml(text,safeHtmlMixin):#,absUrlMixin):
htmlEndTag_re = re.compile("</(\w+)>")
htmlEntity_re = re.compile("&(#?\w+)")
def start(self):
nonhtml.startline = self.__dict__['startline'] = self.line
def prevalidate(self):
self.start()
self.children.append(True) # force warnings about "mixed" content
def validate(self, message=ContainsHTML):
tags = [t for t in self.htmlEndTag_re.findall(self.value) if t.lower() in HTMLValidator.htmltags]
if tags:
self.log(message({"parent":self.parent.name, "element":self.name, "value":tags[0]}))
# experimental RSS-Profile support
elif self.htmlEntity_re.search(self.value):
for value in self.htmlEntity_re.findall(self.value):
from htmlentitydefs import name2codepoint
if value in name2codepoint or value == 'apos' or not value.isalpha():
if not hasattr(self,'startline'): self.startline=self.line
lines = self.dispatcher.rssCharData[self.startline-1:self.line]
if not [chardata for chardata in lines if chardata]:
self.log(message({"parent":self.parent.name, "element":self.name, "value":'&'+value+';'}))
# experimental RSS-Profile support
# &#x � &ent </ <a
elif self.getFeedType() == TYPE_RSS2:
if re.search('&#[x0-9]|<[/a-zA-Z]', self.value):
lines = self.dispatcher.rssCharData[self.startline-1:self.line]
if not [chardata for chardata in lines if chardata]:
rss = self.parent.parent
while rss and rss.name!='rss': rss=rss.parent
if rss.version.startswith("2."):
self.log(CharacterData({}))
#
# valid e-mail addresses
#
class email(addr_spec,nonhtml):
message = InvalidContact
def validate(self):
value=self.value
list = AddressList(value)
if len(list)==1: value=list[0][1]
nonhtml.validate(self)
addr_spec.validate(self, value)
class email_with_name(email):
def validate(self):
if self.value.startswith('mailto:'):
from urllib import unquote
self.value = unquote(self.value.split(':',1)[1])
if self.value.find('@')>0:
if not self.value.endswith(")"):
if self.value.find(' ')>0:
self.log(EmailFormat({}))
else:
self.log(MissingRealName({}))
else:
email.validate(self)
else:
email.validate(self)
class nonNegativeInteger(text):
def validate(self):
try:
t = int(self.value)
if t < 0:
raise ValueError
else:
self.log(ValidInteger({"parent":self.parent.name, "element":self.name, "value":self.value}))
except ValueError:
self.log(InvalidNonNegativeInteger({"parent":self.parent.name, "element":self.name, "value":self.value}))
class positiveInteger(text):
max = 0
def validate(self):
try:
t = int(self.value)
if t <= 0:
raise ValueError
elif self.max and t>self.max:
self.log(IntegerOverflow({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
self.log(ValidInteger({"parent":self.parent.name, "element":self.name, "value":self.value}))
except ValueError:
self.log(InvalidPositiveInteger({"parent":self.parent.name, "element":self.name, "value":self.value}))
class UINT31(positiveInteger):
max = 2147483647
class Integer(text):
def validate(self):
if self.value == '': return
try:
t = int(self.value)
self.log(ValidInteger({"parent":self.parent.name, "element":self.name, "value":self.value}))
except ValueError:
self.log(InvalidInteger({"parent":self.parent.name, "element":self.name, "value":self.value}))
class Float(text):
def validate(self, name=None):
if not re.match('\d+\.?\d*$', self.value):
self.log(InvalidFloat({"attr":name or self.name, "value":self.value}))
class alphanumeric(text):
def validate(self):
if not re.match('^\s*[A-Za-z0-9]+\s*$', self.value):
self.log(InvalidAlphanum({"attr":self.name, "value":self.value}))
class percentType(text):
def validate(self):
try:
t = float(self.value)
if t < 0.0 or t > 100.0:
raise ValueError
else:
self.log(ValidPercentage({"parent":self.parent.name, "element":self.name, "value":self.value}))
except ValueError:
self.log(InvalidPercentage({"parent":self.parent.name, "element":self.name, "value":self.value}))
class latitude(text):
def validate(self):
try:
lat = float(self.value)
if lat > 90 or lat < -90:
raise ValueError
else:
self.log(ValidLatitude({"parent":self.parent.name, "element":self.name, "value":self.value}))
except ValueError:
self.log(InvalidLatitude({"parent":self.parent.name, "element":self.name, "value":self.value}))
class longitude(text):
def validate(self):
try:
lon = float(self.value)
if lon > 180 or lon < -180:
raise ValueError
else:
self.log(ValidLongitude({"parent":self.parent.name, "element":self.name, "value":self.value}))
except ValueError:
self.log(InvalidLongitude({"parent":self.parent.name, "element":self.name, "value":self.value}))
class httpURL(text):
http_re = re.compile("(http|https)://" + addr_spec.domain_re + '(?::\d+)?' + '(/|$)', re.IGNORECASE)
def validate(self):
if not self.http_re.match(self.value):
self.log(InvalidURLAttribute({"parent":self.parent.name, "element":self.name, "value":self.value}))
elif not rfc2396_full.rfc2396_re.match(self.value):
self.log(InvalidURLAttribute({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
self.log(ValidURLAttribute({"parent":self.parent.name, "element":self.name, "value":self.value}))
class rdfResourceURI(rfc2396):
def getExpectedAttrNames(self):
return [(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'resource'),
(u'http://purl.org/dc/elements/1.1/', u'title')]
def validate(self):
if (rdfNS, 'resource') in self.attrs.getNames():
self.value=self.attrs.getValue((rdfNS, 'resource'))
rfc2396.validate(self)
elif self.getFeedType() == TYPE_RSS1:
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"rdf:resource"}))
class rdfAbout(validatorBase):
def getExpectedAttrNames(self):
return [(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'about')]
def startElementNS(self, name, qname, attrs):
pass
def validate(self):
if (rdfNS, 'about') not in self.attrs.getNames():
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"rdf:about"}))
else:
test=rfc2396().setElement(self.name, self.attrs, self)
test.value=self.attrs.getValue((rdfNS, 'about'))
test.validate()
class nonblank(text):
def validate(self, errorClass=NotBlank, extraParams={}):
if not self.value:
logparams={"parent":self.parent.name,"element":self.name}
logparams.update(extraParams)
self.log(errorClass(logparams))
class nows(text):
def __init__(self):
self.ok = 1
text.__init__(self)
def characters(self, string):
text.characters(self, string)
if self.ok and (self.value != self.value.strip()):
self.log(UnexpectedWhitespace({"parent":self.parent.name, "element":self.name}))
self.ok = 0
class unique(nonblank):
def __init__(self, name, scope, message=DuplicateValue):
self.scope_name=name
self.scope=scope
self.message=message
nonblank.__init__(self)
if not name+'s' in self.scope.__dict__:
self.scope.__dict__[name+'s']=[]
def validate(self):
nonblank.validate(self)
list=self.scope.__dict__[self.scope_name+'s']
if self.value in list:
self.log(self.message({"parent":self.parent.name, "element":self.name,"value":self.value}))
elif self.value:
list.append(self.value)
class rfc3987_full(xmlbase):
rfc2396_re = rfc2396_full.rfc2396_re
def validate(self, errorClass=InvalidFullLink, successClass=ValidURI, extraParams={}):
return rfc2396.validate(self, errorClass, successClass, extraParams)
class canonicaluri(rfc3987_full):
def validate(self):
prestrip = self.value
self.value = self.value.strip()
if rfc3987_full.validate(self):
c = canonicalForm(self.value)
if c is None or c != prestrip:
self.log(NonCanonicalURI({"parent":self.parent.name,"element":self.name,"uri":prestrip, "curi":c or 'N/A'}))
class yesno(text):
def normalizeWhitespace(self):
pass
def validate(self):
if not self.value in ['yes','no']:
self.log(InvalidYesNo({"parent":self.parent.name, "element":self.name,"value":self.value}))
class truefalse(text):
def normalizeWhitespace(self):
pass
def validate(self):
if not self.value.lower() in ['true','false']:
self.log(InvalidTrueFalse({"parent":self.parent.name, "element":self.name,"value":self.value}))
class truefalsestrict(text):
def normalizeWhitespace(self):
pass
def validate(self):
if not self.value in ['true','false']:
self.log(InvalidTrueFalse({"parent":self.parent.name, "element":self.name,"value":self.value}))
class duration(text):
duration_re = re.compile("\d+(:[0-5][0-9](:[0-5][0-9])?)?$")
def validate(self):
if not self.duration_re.match(self.value):
self.log(InvalidDuration({"parent":self.parent.name, "element":self.name
, "value":self.value}))
class lengthLimitedText(nonhtml):
def __init__(self, max):
self.max = max
text.__init__(self)
def validate(self):
if len(self.value)>self.max:
self.log(TooLong({"parent":self.parent.name, "element":self.name,
"len": len(self.value), "max": self.max}))
nonhtml.validate(self)
class keywords(text):
def validate(self):
if self.value.find(' ')>=0 and self.value.find(',')<0:
self.log(InvalidKeywords({"parent":self.parent.name, "element":self.name}))
class commaSeparatedIntegers(text):
def validate(self):
if not re.match("^\d+(,\s*\d+)*$", self.value):
self.log(InvalidCommaSeparatedIntegers({"parent":self.parent.name,
"element":self.name}))
class formname(text):
def validate(self):
if not re.match("^[a-zA-z][a-zA-z0-9:._]*", self.value):
self.log(InvalidFormComponentName({"parent":self.parent.name,
"element":self.name, "value":self.value}))
class enumeration(text):
def validate(self):
if self.value not in self.valuelist:
self.log(self.error({"parent":self.parent.name, "element":self.name,
"attr": ':'.join(self.name.split('_',1)), "value":self.value}))
class caseinsensitive_enumeration(enumeration):
def validate(self):
self.value=self.value.lower()
enumeration.validate(self)
class iso3166(enumeration):
error = InvalidCountryCode
valuelist = [
"AD", "AE", "AF", "AG", "AI", "AM", "AN", "AO", "AQ", "AR", "AS", "AT",
"AU", "AW", "AZ", "BA", "BB", "BD", "BE", "BF", "BG", "BH", "BI", "BJ",
"BM", "BN", "BO", "BR", "BS", "BT", "BV", "BW", "BY", "BZ", "CA", "CC",
"CD", "CF", "CG", "CH", "CI", "CK", "CL", "CM", "CN", "CO", "CR", "CU",
"CV", "CX", "CY", "CZ", "DE", "DJ", "DK", "DM", "DO", "DZ", "EC", "EE",
"EG", "EH", "ER", "ES", "ET", "FI", "FJ", "FK", "FM", "FO", "FR", "GA",
"GB", "GD", "GE", "GF", "GH", "GI", "GL", "GM", "GN", "GP", "GQ", "GR",
"GS", "GT", "GU", "GW", "GY", "HK", "HM", "HN", "HR", "HT", "HU", "ID",
"IE", "IL", "IN", "IO", "IQ", "IR", "IS", "IT", "JM", "JO", "JP", "KE",
"KG", "KH", "KI", "KM", "KN", "KP", "KR", "KW", "KY", "KZ", "LA", "LB",
"LC", "LI", "LK", "LR", "LS", "LT", "LU", "LV", "LY", "MA", "MC", "MD",
"MG", "MH", "MK", "ML", "MM", "MN", "MO", "MP", "MQ", "MR", "MS", "MT",
"MU", "MV", "MW", "MX", "MY", "MZ", "NA", "NC", "NE", "NF", "NG", "NI",
"NL", "NO", "NP", "NR", "NU", "NZ", "OM", "PA", "PE", "PF", "PG", "PH",
"PK", "PL", "PM", "PN", "PR", "PS", "PT", "PW", "PY", "QA", "RE", "RO",
"RU", "RW", "SA", "SB", "SC", "SD", "SE", "SG", "SH", "SI", "SJ", "SK",
"SL", "SM", "SN", "SO", "SR", "ST", "SV", "SY", "SZ", "TC", "TD", "TF",
"TG", "TH", "TJ", "TK", "TM", "TN", "TO", "TR", "TT", "TV", "TW", "TZ",
"UA", "UG", "UM", "US", "UY", "UZ", "VA", "VC", "VE", "VG", "VI", "VN",
"VU", "WF", "WS", "YE", "YT", "ZA", "ZM", "ZW"]
class iso4217(enumeration):
error = InvalidCurrencyUnit
valuelist = [
"AED", "AFN", "ALL", "AMD", "ANG", "AOA", "ARS", "AUD", "AWG", "AZM",
"BAM", "BBD", "BDT", "BGN", "BHD", "BIF", "BMD", "BND", "BOB", "BOV",
"BRL", "BSD", "BTN", "BWP", "BYR", "BZD", "CAD", "CDF", "CHE", "CHF",
"CHW", "CLF", "CLP", "CNY", "COP", "COU", "CRC", "CSD", "CUP", "CVE",
"CYP", "CZK", "DJF", "DKK", "DOP", "DZD", "EEK", "EGP", "ERN", "ETB",
"EUR", "FJD", "FKP", "GBP", "GEL", "GHC", "GIP", "GMD", "GNF", "GTQ",
"GWP", "GYD", "HKD", "HNL", "HRK", "HTG", "HUF", "IDR", "ILS", "INR",
"IQD", "IRR", "ISK", "JMD", "JOD", "JPY", "KES", "KGS", "KHR", "KMF",
"KPW", "KRW", "KWD", "KYD", "KZT", "LAK", "LBP", "LKR", "LRD", "LSL",
"LTL", "LVL", "LYD", "MAD", "MDL", "MGA", "MKD", "MMK", "MNT", "MOP",
"MRO", "MTL", "MUR", "MWK", "MXN", "MXV", "MYR", "MZM", "NAD", "NGN",
"NIO", "NOK", "NPR", "NZD", "OMR", "PAB", "PEN", "PGK", "PHP", "PKR",
"PLN", "PYG", "QAR", "ROL", "RON", "RUB", "RWF", "SAR", "SBD", "SCR",
"SDD", "SEK", "SGD", "SHP", "SIT", "SKK", "SLL", "SOS", "SRD", "STD",
"SVC", "SYP", "SZL", "THB", "TJS", "TMM", "TND", "TOP", "TRL", "TRY",
"TTD", "TWD", "TZS", "UAH", "UGX", "USD", "USN", "USS", "UYU", "UZS",
"VEB", "VND", "VUV", "WST", "XAF", "XAG", "XAU", "XBA", "XBB", "XBC",
"XBD", "XCD", "XDR", "XFO", "XFU", "XOF", "XPD", "XPF", "XPT", "XTS",
"XXX", "YER", "ZAR", "ZMK", "ZWD"]
```
|
{
"source": "jerielizabeth/text2topics",
"score": 3
}
|
#### File: text2topics/text2topics/utilities.py
```python
from nltk.tokenize import WhitespaceTokenizer
from nltk import word_tokenize
import os
import pandas as pd
import re
import tarfile
def readfile( input_dir, filename ):
"""Reads in file from directory and file name.
Returns the content of the file.
Usage::
>>> text = readfile(input_dir, filename)
Args:
input_dir (str): Directory with the input file.
filename (str): Name of file to be read.
Returns:
str: Returns the content of the file as a string.
"""
with open(os.path.join(input_dir, filename)) as f:
return f.read()
def strip_punct( text ):
"""Remove punctuation and numbers.
Remove select punctuation marks and numbers from a text and replaces them with a space.
Non-permanent changes for evaluation purposes only.
Uses the :mod:`re` library.
Args:
text (str): Content to be evaluated.
Returns:
str: Returns the content string without the following characters: 0-9,.!?$:;&".
"""
text_cleaned = re.sub(r"[0-9,.!?$:;&\"]", " ", text)
return text_cleaned
def tokenize_text( text, tokenizer='whitespace' ):
"""Converts file content to a list of tokens.
Uses :meth:`nltk.tokenize.regexp.WhitespaceTokenizer`.
Args:
text (str): Content to be tokenized.
tokenizer(str): option of tokenizer. Current options are 'whitespace' and
'word'.
Returns:
list: Returns a list of the tokens in the text, separated by white space.
"""
if tokenizer == 'whitespace' or tokenizer == 'word':
if tokenizer == 'whitespace':
return WhitespaceTokenizer().tokenize(text)
elif tokenizer == 'word':
return word_tokenize(text)
else:
raise ValueError('Tokenizer value {} is invalid. Must be "whitespace" or "word"'.format(tokenizer))
def to_lower( tokens ):
"""Convert all tokens to lower case.
Args:
tokens (list): List of tokens generated using a tokenizer.
Returns:
list: List of all tokens converted to lowercase.
"""
return [w.lower() for w in tokens]
def create_spelling_dictionary( directory, wordlists ):
"""Compile a spelling dictionary.
Compiles a spelling dictionary from one or multiple
wordlist files. Returns results as a set.
References:
:func:`GoH.utilities.readfile`
Args:
directory (str): Location of the wordlist files.
wordlists (list): List of filenames for the wordlist files.
Returns:
set: List of unique words in all the compiled lists.
"""
spelling_dictionary = []
for wordlist in wordlists:
words = readfile(directory, wordlist).splitlines()
word_list = [w.lower() for w in words]
for each in word_list:
spelling_dictionary.append(each)
return set(spelling_dictionary)
def get_year( page_id ):
"""Extract year information from a page ids.
Note:
File names must be structured as follows::
TITLEYYYYMMDD-V00-00-page0.txt
or::
TITLEYYYYMMDD-V00-00.pdf
Args:
page_id (str): Filename to parse, format according to the note.
Returns:
str: Returns the first four digits, which corresponds to the year of publication.
"""
split_id = page_id.split('-')
dates = re.search(r'[0-9]+', split_id[0])
return dates.group()[:4]
def get_title( page_id ):
"""Extract year information from a page ids.
Note:
File names must be structured as follows::
TITLEYYYYMMDD-V00-00-page0.txt
or::
TITLEYYYYMMDD-V00-00.pdf
Args:
page_id (str): Filename to parse, formatted according to the note.
Returns:
str: Returns the title information from the page id.
"""
split_id = page_id.split('-')
title = re.match("[A-Za-z]+", split_id[0])
return title.group()
def open_original_docs(filenames,
pdf_dir='/Users/jeriwieringa/Dissertation/text/corpus-pdf/periodicals/',
text_dir='/Users/jeriwieringa/Dissertation/text/text/2017-04-Final-Corpus/'):
"""Opens the PDF and TXT files for a list of page ids.
Used to verify the content of files that report high error rates or unusual error information.
Args:
pdf_dir (str): Path to directory with PDF files.
text_dir (str): Path to directory with TXT files.
filenames (list): List of filenames to open.
"""
print("Opened files: \n")
for filename in filenames:
base_filename = filename.split('-')[:-1]
pdf_filename = "{}.pdf".format('-'.join(base_filename))
os.system("open {}".format(os.path.join(text_dir, filename)))
os.system("open {}".format(os.path.join(pdf_dir, pdf_filename)))
print("{}\n".format(filename))
def define_directories( prev, cycle, base_dir ):
"""Helper function for iterating through document cleaning.
This function redefines the directory information for each round of cleaning.
Args:
prev (str): Name of cycle that was just completed.
cycle (str): Name of current cycle.
base_dir (str): Root directory
Returns:
dict: Dictionary with keys `prev` and `cycle` and values of the corresponding directory paths.
"""
return {'prev': os.path.join(base_dir, prev), 'cycle': os.path.join(base_dir, cycle)}
def extract_words_from_dictionary(filepath):
"""Helper function for extracting a list of tokens from the output of Gensim's id2word function.
Uses Pandas to load dictionary data as a dataframe. Returns tokens as a list.
Args:
filepath (str): Path to Gensim dictionary file (txt)
Returns:
list: List of unique tokens.
"""
with open(filepath) as f:
df = pd.read_table(f, header=None, names=['word_id', 'word', 'count'])
return df.word.unique().tolist()
def create_tar_files(corpusDir, samplePrefix, tarFullCorpusObject, selectList):
"""Creates two corpus tar files from a selection list, a sample file and a holdout file.
Note:
Numbers in filenames denote [min tokens, max error rate, percent included]
Args:
corpusDir (str): Output path for tar files
samplePrefix (str): Unique identifier for tar files
tarFullCorpusObject (): Tar object from the full corpus
selectList (list): List of filenames (basenames) to include in sample.
Returns:
No return
"""
SampleTar = tarfile.open(os.path.join(corpusDir, '{}Sample.tar.gz'.format(samplePrefix)), 'w:gz')
HoldoutTar = tarfile.open(os.path.join(corpusDir, '{}Holdout.tar.gz'.format(samplePrefix)), 'w:gz')
#Skip first member of tar file, as it is the directory
for member in tarFullCorpusObject.getmembers()[1:]:
if os.path.basename(member.name) in selectList:
SampleTar.addfile(member, tarFullCorpusObject.extractfile(member))
else:
HoldoutTar.addfile(member, tarFullCorpusObject.extractfile(member))
SampleTar.close()
HoldoutTar.close()
```
|
{
"source": "jeriewang/crh-botnet",
"score": 2
}
|
#### File: docs/examples/button_controls_drive.py
```python
from crh_botnet import *
import gpiozero
robot = Robot()
robot.network.set_server_address('choate-robotics-rpi-01.local')
motor_left = gpiozero.PWMOutputDevice(17)
motor_right = gpiozero.PWMOutputDevice(18)
H_left_1 = gpiozero.DigitalOutputDevice(22)
H_left_2 = gpiozero.DigitalOutputDevice(23)
H_right_1 = gpiozero.DigitalOutputDevice(26)
H_right_2 = gpiozero.DigitalOutputDevice(20)
def setup():
H_left_1.on()
H_right_1.on()
def on_message(message):
if message == 'on':
motor_left.value = 0.75
motor_right.value = 0.75
elif message == 'off':
motor_left.value = 0
motor_right.value = 0
robot.run(globals())
```
#### File: docs/examples/h_bridge_drive.py
```python
from crh_botnet import *
from crh_botnet.drive import HBridgeDrive
robot = Robot()
driver = HBridgeDrive(20, 26, 18, 22, 23, 17, reverse_left=True)
def loop():
driver.drive(1, 1)
robot.run(globals(), offline=True)
```
|
{
"source": "jeriewang/Latin-Helper",
"score": 3
}
|
#### File: Latin-Helper/source/Adjective.py
```python
from Noun import NounFrame
try:
import tkinter as tk
import tkinter.ttk as ttk
except ImportError:
import Tkinter as tk
import ttk
def centerwindow(window,h,w):
sh = window.winfo_screenheight()
sw = window.winfo_screenwidth()
window.geometry('+' + str((sw - w) // 2) + '+' + str((sh - h) // 2))
def getwinfo(widget):
widget.update()
print('height= %s, width= %s'% (widget.winfo_height(), widget.winfo_width()))
class AdjectiveFrame(NounFrame):
def __init__(self,master=None,gender=''):
NounFrame.__init__(self,master)
tk.Label(self,text=gender,font=('Times New Romans',14,'bold')).grid(row=0,column=1,columnspan=2)
class AdjectiveAllGender(tk.Frame):
def __init__(self,master=None):
tk.Frame.__init__(self,master)
self.masculine = AdjectiveFrame(self, 'masculine')
self.feminine = AdjectiveFrame(self, 'feminine')
self.neuter = AdjectiveFrame(self, 'neuter')
self.masculine.grid(row=1, column=0)
ttk.Separator(self).grid(row=1, column=1, sticky='ns')
self.feminine.grid(row=1, column=2, padx=20)
ttk.Separator(self).grid(row=1, column=3, sticky='ns')
self.neuter.grid(row=1, column=4, padx=20)
def fill_in_the_answer(self,answers):
"answers should be a 3 elements tuple or list, goes in order of (m),(s),(n)"
self.masculine.fill_in_the_answer(answers[0])
self.feminine.fill_in_the_answer(answers[1])
self.neuter.fill_in_the_answer(answers[2])
def suffix(stem, suffix):
answer = []
for e in suffix:
answer.append(stem + e)
return answer
class Adjective(tk.Frame):
def __init__(self,master=None,window_widget=None):
tk.Frame.__init__(self, master)
self.window_widget=window_widget
padingframe=tk.Frame(self,padx=10)
lf=tk.LabelFrame(padingframe,text='Instructions',relief='ridge',font=('Sans-serif',16,'italic'))
tk.Label(lf,text='Please enter the dictionary form of a adjective.').pack()
tk.Label(lf, text='e.g. celeber, celebris, celebre').pack()
tk.Label(lf, text='However, if two or more parts are the same, they can be omitted.').pack()
exframe1=tk.Frame(lf)
tk.Label(exframe1, text='e.g.').grid(column=0,row=0)
exentry1 = tk.Entry(exframe1,width=len('brevis, breve')-4)
exentry1.grid(column=1,row=0)
exentry1.insert(0,'brevis, breve')
exentry1.config(state='readonly')
tk.Label(exframe1, text=' for brevis, brevis, breve,').grid(column=2,row=0)
exframe1.pack()
exframe2 = tk.Frame(lf)
tk.Label(exframe2, text='or').grid(column=0, row=0)
exentry2 = tk.Entry(exframe2, width=len('sapiens'))
exentry2.grid(column=1, row=0)
exentry2.insert(0, 'sapiens')
exentry2.config(state='readonly')
tk.Label(exframe2, text='for sapiens, sapiens, sapiens').grid(column=2, row=0)
exframe2.pack()
tk.Label(self,text='').pack() ## a blank line
entry=tk.Frame(self)
self.entry=tk.Entry(entry,width=30)
self.entry.bind('<Return>',self.build)
self.entry.pack(side=tk.LEFT)
self.entry.config(fg='gray')
self.entry.insert(0,'This result might not be accurate!')
self.entry.bind('<FocusIn>',lambda event: (f for f in (self.entry.delete(0,tk.END),self.entry.config(fg='black'),self.entry.unbind('<FocusIn>'))))
tk.Button(entry,text='generate',command=self.build).pack(side=tk.RIGHT)
entry.pack()
self.positive_selection=tk.BooleanVar()
self.positive_selection.set(1)
self.superlative_selection = tk.BooleanVar()
self.comparative_selection = tk.BooleanVar()
sel=tk.Frame(self)
tk.Checkbutton(sel,text='positive',variable=self.positive_selection).grid(column=0,row=0)
tk.Checkbutton(sel, text='comparative', variable=self.comparative_selection).grid(column=1,row=0)
tk.Checkbutton(sel, text='superlative', variable=self.superlative_selection).grid(column=2,row=0)
sel.pack()
lf.pack()
padingframe.pack()
def build(self,event=None):
def first(nom):
stem = nom[:-1]
answer = suffix(stem, 'a ae ae am ā a ae arum is as is ae'.split())
return answer
def second(nom):
if nom[-2:] == 'um':
stem = nom[:-2]
suf = 'um i o um o um a orum is a is a'.split()
elif nom[-1] == 'r':
if nom[-2:] == 'er':
stem = nom[:-2]+'r'
else:
stem = nom[:-1]
suf = 'r i o um o r i orum is os is i'.split()
elif nom[-3:] == 'ius':
stem = nom[:-3]
suf = ['us', 'i', 'o', 'um', 'o', '', 'i', 'orum', 'is', 'os', 'is', 'i']
else:
stem = nom[:-2]
suf = 'us i o um o e i orum is os is i'.split()
answer = suffix(stem, suf)
if nom[-1] == 'r':
answer[0] = answer[5] = nom
return answer
def third(nom, isistem, isneuter, base=None):
stem = nom[:-2] if base is None else base
if isneuter:
if isistem:
answer = suffix(stem, 'x is i x i x ia ium ibus ia ium ia'.split())
else:
answer = suffix(stem, 'x is i x e x a um ibus a ibus a'.split())
answer[0] = answer[3] = answer[5] = nom
else:
if isistem:
answer = suffix(stem, 'x is i em i x es um ibus es ibus es'.split())
else:
answer = suffix(stem, 'x is i em e x es um ibus es ibus es'.split())
answer[0] = answer[5] = nom
answer[7] = stem + 'ium' if isistem else stem + 'um'
return answer
def decide_declension(input_):
if (input_[0][-2:] == 'us' or input_[0][-3:] == 'ius' or input_[0][-1] == 'r') and input_[1][-1] == 'a' and \
input_[2][-2:] == 'um':
answerp=[]
answerc=[]
answers=[]
answerp.append(second(input_[0]))
answerp.append(first(input_[1]))
answerp.append(second(input_[2]))
answerc.append(third(input_[1][:-1]+'ior',False,False,input_[1][:-1] + 'ior'))
answerc.append(third(input_[1][:-1] + 'ior', False, False,input_[1][:-1] + 'ior'))
answerc.append(third(input_[1][:-1] + 'ius', False, True,input_[1][:-1] + 'ior'))
answers.append(second(input_[1][:-1]+'issimus' if (input_[1][-2] != 'r' or input_[1][-2] != 'l' )else input_[1][:-1]+input_[1][-2]+'imus'))
answers.append(first(input_[1][:-1]+'issima' if (input_[1][-2] != 'r' or input_[1][-2] != 'l' )else input_[1][:-1]+input_[1][-2]+'ima'))
answers.append(second(input_[1][:-1]+'issimum' if (input_[1][-2] != 'r' or input_[1][-2] != 'l' )else input_[1][:-1]+input_[1][-2]+'imum'))
self.positive.fill_in_the_answer(answerp)
self.comparative.fill_in_the_answer(answerc)
self.superlative.fill_in_the_answer(answers)
return 'first/second declension'
else: #third declension
top=tk.Toplevel()
centerwindow(top, 124, 466)
def varify_blank(event=None):
if manual_stem.get().replace(' ','') != '' or tmpbutton.get():
top.destroy()
tk.Label(top,text='The machine detected your input is a third declension adjective. Please pro-').pack()
tk.Label(top,text='vide the positive masculine singular genitive, or let the machine decide this.').pack()
manual_stem=tk.StringVar()
tmpentry=tk.Entry(top,textvariable=manual_stem)
tmpentry.pack()
tmpentry.bind('<Return>',varify_blank)
tmpbutton=tk.BooleanVar()
tk.Checkbutton(top,text='Let the machine decide it (very likely to be inaccurate and get weird result!)',variable=tmpbutton).pack()
tk.Button(top,text='Continue',command=varify_blank).pack()
top.wm_protocol('WM_DELETE_WINDOW',varify_blank)
result_panel.withdraw()
result_panel.wait_window(top)
if not tmpbutton.get():
stem = manual_stem.get()[:-2]
else:
if input_[0]==input_[1]==input_[2]:
if input_[0][-2:]=='ox':
stem=input_[0][:-1]+'c'
elif input_[0][-2:] == 'ax':
stem = input_[0][:-1] + 'c'
elif input_[0][-2:]=='ix':
stem = input_[0][-2:] +'ic'
elif input_[0][-2:]=='ex':
stem = input_[0][-2:] +'ic'
elif input_[0][-1]=='s':
stem=input_[0][:-1]+'t'
else: stem=input_[0][:-2]
elif input_[0]==input_[1]:
stem = input_[1][:-2]
else:
stem = input_[1][:-2]
answerp = []
answerc = []
answers = []
answerp.append(third(input_[0], True, False, stem))
answerp.append(third(input_[1], True, False, stem))
answerp.append(third(input_[2], True, True, stem))
answerc.append(third(stem + 'ior', False, False, stem + 'ior'))
answerc.append(third(stem + 'ior', False, False, stem + 'ior'))
answerc.append(third(stem + 'ius', False, True, stem + 'ior'))
answers.append(
second(stem + 'issimus' if (stem[-1] != 'r' and stem[-1] != 'l') else stem + stem[-1]+ 'imus'))
answers.append(
first(stem + 'issima' if (stem[-1] != 'r' and stem[-1] != 'l') else stem + stem[-1]+'ima'))
answers.append(
second(stem + 'issimum' if (stem[-1] != 'r' and stem[-1] != 'l') else stem + stem[-1]+'imum'))
self.positive.fill_in_the_answer(answerp)
self.comparative.fill_in_the_answer(answerc)
self.superlative.fill_in_the_answer(answers)
result_panel.deiconify()
return 'third declension'
quit_protocol = lambda: (f for f in (self.window_widget.deiconify(), result_panel.destroy()))
self.window_widget.withdraw()
result_panel=tk.Toplevel()
tmp=0
for e in (self.positive_selection.get(),self.comparative_selection.get(),self.superlative_selection.get()):
if e:
tmp+=1
if tmp==1: centerwindow(result_panel,324,1192)
elif tmp==2: centerwindow(result_panel,583,1192)
else: centerwindow(result_panel,842,1192)
del tmp
self.positive_frame = tk.Frame(result_panel)
self.comparative_frame = tk.Frame(result_panel)
self.superlative_frame = tk.Frame(result_panel)
self.positive=AdjectiveAllGender(self.positive_frame)
self.comparative=AdjectiveAllGender(self.comparative_frame)
self.superlative=AdjectiveAllGender(self.superlative_frame)
input_ = self.entry.get().replace(' ', '').split(',')
if len(input_) == 0:
quit_protocol()
return 'break'
elif len(input_)== 1:
input_+=input_+input_
elif len(input_) == 2:
input_.insert(0,input_[0])
try:
declension = decide_declension(input_)
except IndexError as err:
print(err)
quit_protocol()
return 'break'
tk.Label(result_panel, text='Declension of %s (%s)' % (self.entry.get(), declension),font=('Times New Romans', 24, 'bold')).pack()
tk.Label(self.positive_frame,text='Positive',font=('Times New Romans',16,'bold')).pack(pady=10)
self.positive.pack()
tk.Label(self.comparative_frame, text='Comparative', font=('Times New Romans', 16, 'bold')).pack(pady=10)
self.comparative.pack()
tk.Label(self.superlative_frame, text='Superlative', font=('Times New Romans', 16, 'bold')).pack(pady=10)
self.superlative.pack()
if self.positive_selection.get():
self.positive_frame.pack()
if self.comparative_selection.get():
self.comparative_frame.pack()
if self.superlative_selection.get():
self.superlative_frame.pack()
tk.Button(result_panel, text='Back', command=quit_protocol).pack()
result_panel.wm_protocol('WM_DELETE_WINDOW',quit_protocol)
if __name__ == '__main__':
root=tk.Tk()
adjective = Adjective(root,root)
adjective.pack()
root.attributes('-topmost', True)
root.bind('<FocusIn>', lambda x: [f for f in (root.attributes('-topmost', False), root.unbind('<FocusIn'))])
root.mainloop()
```
|
{
"source": "jerik/py-money",
"score": 3
}
|
#### File: py-money/money/utils.py
```python
import subprocess
def applescript(cmd):
"""Execute an apple script command.
Return the output of the command or raise an exception on failure.
"""
proc = subprocess.Popen(["osascript", "-"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate(input=cmd.encode("utf-8"), timeout=60)
if proc.returncode != 0:
raise Exception(f'Apple Script: {err.decode("utf-8")}')
return out
```
|
{
"source": "jeriksson/graphserver",
"score": 3
}
|
#### File: ext/gtfs/pggtfsdb.py
```python
import psycopg2
class PostgresGIS_GTFSDB:
#
# initializer method for connecting to PostgreSQL database
#
def __init__(self, db_connect_string):
# store the connection string
self.db_connect_string = db_connect_string
#
# method to create a database connection
#
def create_pggtfsdb_connection(self):
# create a database connection
conn = psycopg2.connect(self.db_connect_string)
# grab a database cursor
cur = conn.cursor()
# prepare query for get_board/alight_event_data
prepare_stop_data_query = "PREPARE get_board_alight_event_data_stop_data (text) AS SELECT stop_name, stop_lat, stop_lon, parent_station FROM stops WHERE stop_id=$1"
# create prepared statement for get_board/alight_event_data
cur.execute(prepare_stop_data_query)
# prepare queries for get_board_event_data
prepare_route_data_query = "PREPARE get_board_event_data_route_data (text) AS SELECT routes.agency_id, routes.route_id, routes.route_long_name, routes.route_short_name, routes.route_type FROM routes, trips WHERE routes.route_id=trips.route_id AND trip_id=$1"
prepare_stop_headsign_query = "PREPARE get_board_event_data_stop_headsign (text, text) AS SELECT stop_headsign FROM stop_times WHERE trip_id=$1 AND stop_id=$2"
# create prepared statements for get_board_event_data
cur.execute(prepare_route_data_query)
cur.execute(prepare_stop_headsign_query)
# prepare query for get_station_vertex_from_coords
prepare_dist_query = "PREPARE get_station_vertex_from_coords (text, text) AS SELECT stop_id, ST_distance_sphere(ST_SetSRID(ST_GeomFromText($1),4326),location) as dist from stops where location && ST_SetSRID($2::box3d,4326) ORDER BY dist ASC LIMIT 1"
# create prepared statement for get_station_vertex_from_coords
cur.execute(prepare_dist_query)
# prepare query for get_coords_for_station_vertex
prepare_vertex_query = "PREPARE get_coords_for_station_vertex (text) AS SELECT ST_AsText(location) FROM stops WHERE stop_id=$1"
# create prepared statement for get_coords_for_station_vertex
cur.execute(prepare_vertex_query)
# return database connection
return conn
#
# method to create a database connection for get_transit_path_points function
#
def create_transit_path_pggtfsdb_connection(self):
# create a database connection
conn = psycopg2.connect(self.db_connect_string)
# grab a database cursor
cur = conn.cursor()
# prepare queries for get_transit_path_points
prepare_stop_data_query = "PREPARE get_transit_path_points_stop_data (text) AS SELECT stop_lat, stop_lon FROM stops WHERE stop_id=$1"
prepare_shape_pt_sequence_query = "PREPARE get_transit_path_points_shape_pt_sequence (text) AS SELECT shapes.shape_pt_sequence, ST_Distance(shapes.location, stops.location) AS distance FROM shapes, stops WHERE shapes.shape_id=$1 AND stops.stop_id=$2 ORDER BY distance ASC LIMIT 1"
# create prepared statements for get_transit_path_points
cur.execute(prepare_stop_data_query)
cur.execute(prepare_shape_pt_sequence_query)
# return database connection
return conn
#
# method to close a database connection
#
def close_pggtfsdb_connection(self, conn):
# close database connection
conn.close()
#
# method for returning the data for a transit board event
#
def get_board_event_data(self, conn, trip_id, stop_id):
# grab database cursor
cur = conn.cursor()
# execute route data prepared statement
cur.execute("EXECUTE get_board_event_data_route_data ('" + trip_id + "')")
# grab the route data
agency_id, route_id, route_long_name, route_short_name, route_type = cur.fetchone()
# execute stop data prepared statement
cur.execute("EXECUTE get_board_alight_event_data_stop_data ('" + stop_id + "')")
# grab the stop data
stop_name, stop_lat, stop_lon, parent_station = cur.fetchone()
# execute stop headsign prepared statement
cur.execute("EXECUTE get_board_event_data_stop_headsign ('" + trip_id + "','" + stop_id + "')")
# grab the stop headsign data
stop_headsign = cur.fetchone()[0]
return (agency_id, route_id, route_long_name, route_short_name, route_type, stop_name, stop_lat, stop_lon, parent_station, stop_headsign)
#
# method for returning the data for a transit alight event
#
def get_alight_event_data(self, conn, stop_id):
# grab database cursor
cur = conn.cursor()
# execute stop data prepared statement
cur.execute("EXECUTE get_board_alight_event_data_stop_data ('" + stop_id + "')")
# grab the stop data
stop_name, stop_lat, stop_lon, parent_station = cur.fetchone()
return (stop_name, stop_lat, stop_lon, parent_station)
#
# method for returning the closest station vertex to a coordinate pair
#
def get_station_vertex_from_coords(self, conn, longitude, latitude):
# grab database cursor
cur = conn.cursor()
# place coordinates in POINT GIS object
geom_point = "'POINT(" + str(longitude) + ' ' + str(latitude) + ")'"
# longitude/latitude offset
offset = 0.05
# created BOX3D object for search space
box3d_coords = "'BOX3D(" + str(longitude - offset) + ' ' + str(latitude - offset) + ',' + str(longitude + offset) + ' ' + str(latitude + offset) + ")'"
# execute the box3d-enhanced prepared statement
cur.execute("EXECUTE get_station_vertex_from_coords (" + geom_point + "," + box3d_coords + ")")
# fetch the first row from the results
first_row = cur.fetchone()
# if the first row contains no results
if (first_row is None):
# execute the non-box3d-enhanced query
cur.execute("SELECT stop_id, ST_distance_sphere(ST_SetSRID(ST_GeomFromText(" + geom_point + "),4326),location) AS dist FROM stops ORDER BY dist ASC LIMIT 1")
# fetch the first row from the results
first_row = cur.fetchone()
# return osm vertex id
return ('sta-' + first_row[0], first_row[1])
#
# method for returning the coordinates (lat, lon) for a station vertex
#
def get_coords_for_station_vertex(self, conn, vertex_id):
# grab database cursor
cur = conn.cursor()
# strip 'osm-' prefix from vertex_id
vertex_id = vertex_id.replace('sta-','')
# execute the prepared statement
cur.execute("EXECUTE get_coords_for_station_vertex ('" + vertex_id + "')")
# fetch the first row from the results
first_row = cur.fetchone()
# grab raw coordinates
vertex_coords = first_row[0].replace('POINT(','').replace(')','')
# return coordinates (lat, lon)
return (float(vertex_coords[vertex_coords.index(' ')+1:]), float(vertex_coords[0:vertex_coords.index(' ')]))
#
# method for returning the points along a transit path between board_stop_id and alight_stop_id
#
def get_transit_path_points(self, conn, trip_id, board_stop_id, alight_stop_id):
# grab database cursor
cur = conn.cursor()
# execute stop data prepared statement
cur.execute("EXECUTE get_transit_path_points_stop_data ('" + board_stop_id + "')")
# grab the board stop data
board_stop_lat, board_stop_lon = cur.fetchone()
# execute stop data prepared statement
cur.execute("EXECUTE get_transit_path_points_stop_data ('" + alight_stop_id + "')")
# grab the alight stop data
alight_stop_lat, alight_stop_lon = cur.fetchone()
# execute query to get trip shape id
cur.execute("select shape_id from trips where trip_id='" + str(trip_id) + "'")
# grab the shape id
shape_id = cur.fetchone()[0]
# create list for storing points along the the shape between the stops
path_points = []
if (shape_id is not None):
# check the shape id
if (shape_id.strip() == ''):
return [str(board_stop_lat) + ',' + str(board_stop_lon), str(alight_stop_lat) + ',' + str(alight_stop_lon)]
# execute prepared statement to get shape point sequence value for the board stop
cur.execute("EXECUTE get_transit_path_points_shape_pt_sequence ('" + shape_id + "','" + board_stop_id + "')")
# grab the shape point sequence value for the board stop
board_shape_pt_sequence = cur.fetchone()[0]
# execute prepared statement to get shape point sequence value for the alight stop
cur.execute("EXECUTE get_transit_path_points_shape_pt_sequence ('" + shape_id + "','" + alight_stop_id + "')")
# grab the shape point sequence value for the alight stop
alight_shape_pt_sequence = cur.fetchone()[0]
# determine which point sequence value is greater
if (board_shape_pt_sequence < alight_shape_pt_sequence):
# execute query to get the list of points along the shape between the board and alight stops
cur.execute("select ST_AsText(location) from shapes where shape_id='" + shape_id + "' and shape_pt_sequence >= " + str(board_shape_pt_sequence) + " and shape_pt_sequence <= " + str(alight_shape_pt_sequence) + " order by shape_pt_sequence asc")
else:
# execute query to get the list of points along the shape between the alight and board stops
cur.execute("select ST_AsText(location) from shapes where shape_id='" + shape_id + "' and shape_pt_sequence >= " + str(alight_shape_pt_sequence) + " and shape_pt_sequence <= " + str(board_shape_pt_sequence) + " order by shape_pt_sequence desc")
# grab list of points along the the shape between the stops
path_points = cur.fetchall()
# iterate through points
for i in range(len(path_points)):
mod_point = path_points[i][0].replace('POINT(','').replace(')','').replace(' ',',')
point_lat = mod_point[mod_point.index(',')+1:]
point_lon = mod_point[0:mod_point.index(',')]
path_points[i] = point_lat + ',' + point_lon
# insert board stop location to front of path points list
path_points.insert(0, str(board_stop_lat) + ',' + str(board_stop_lon))
# append alight stop location to end of path points list
path_points.append(str(alight_stop_lat) + ',' + str(alight_stop_lon))
# return transit path points
return path_points
```
|
{
"source": "jerilj/calculator_unittest",
"score": 4
}
|
#### File: jerilj/calculator_unittest/CalculatorTests.py
```python
import unittest
from Calculator import Calculator
from CSVReader import CSVReader
class MyTestCase(unittest.TestCase):
def setUp(self):
self.calculator = Calculator()
def test_instantiate_calculator(self):
self.assertIsInstance(self.calculator, Calculator)
def test_results_property(self):
self.assertEqual(self.calculator.result, 0)
def test_add_method(self):
add_data = CSVReader('csv/Unit Test Addition.csv')
for row in add_data.data:
self.assertEqual(self.calculator.add(int(row['Value 1']), int(row['Value 2'])), int(row['Result']))
def test_subtract_method(self):
sub_data = CSVReader('csv/Unit Test Subtraction.csv')
for row in sub_data.data:
self.assertEqual(self.calculator.subtract(int(row['Value 2']), int(row['Value 1'])), int(row['Result']))
def test_multiply_method(self):
sub_data = CSVReader('csv/Unit Test Multiplication.csv')
for row in sub_data.data:
self.assertEqual(self.calculator.multiply(int(row['Value 2']), int(row['Value 1'])), int(row['Result']))
def test_divide_method(self):
sub_data = CSVReader('csv/Unit Test Division.csv')
for row in sub_data.data:
self.assertEqual(round(self.calculator.divide(float(row['Value 2']), float(row['Value 1'])), 9),
float(row['Result']))
def test_square_method(self):
sub_data = CSVReader('csv/Unit Test Square.csv')
for row in sub_data.data:
self.assertEqual(self.calculator.square(int(row['Value 1'])), int(row['Result']))
def test_sqrt_method(self):
sub_data = CSVReader('csv/Unit Test Square Root.csv')
for row in sub_data.data:
self.assertEqual(round(self.calculator.square_root(float(row['Value 1'])), 8),
round(float(row['Result']), 8))
if __name__ == '__main__':
unittest.main()
```
#### File: jerilj/calculator_unittest/CSVReader.py
```python
import csv
class CSVReader:
def __init__(self, file_path):
self.data = []
with open(file_path) as csv_file:
csv_reader = csv.DictReader(csv_file, delimiter=',')
for row in csv_reader:
self.data.append(row)
pass
```
|
{
"source": "jerilj/stats_calculator",
"score": 4
}
|
#### File: stats_calculator/Calculator/Addition.py
```python
def addition(a, b):
return a + b
def add_list(data):
total = 0
for i in data:
total = addition(total, i)
return total
```
#### File: stats_calculator/Calculator/Division.py
```python
def division(a, b):
if b == 0:
raise Exception("Division By Zero Is Not Allowed")
else:
return a / b
```
#### File: stats_calculator/Calculator/Proportion.py
```python
import math
class Proportion:
def proportion(CSValues):
try:
ans = []
total = sum(CSValues)
for i in CSValues:
temp = i/total
ans.append('{:.4f}'.format(temp))
return ans
except:
return 'Pay attion, also I can not divide by zero :('
#if __name__=="__main__":
# print(Proportion.p roportion([5,9,10,12,6,3,4]))
```
#### File: stats_calculator/CSVReader/FileReader.py
```python
import csv
# list to hold numbers from out file
dataSet = []
def readCSV(csvFile):
with open(csvFile) as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
row = row.pop(0)
row = int(row, base=10)
dataSet.append(row)
return dataSet
# you can pass in any csv file you want here
# readCSV('CSV_files/test.csv')
```
#### File: stats_calculator/CSVReader/TableReader.py
```python
import csv
class TableReader:
def __init__(self, file_path):
self.data = []
self.header = None
row_count = 0
with open(file_path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
if row_count == 0:
self.header = row
else:
self.data.append(row)
row_count = row_count+1
pass
```
#### File: stats_calculator/Statistics/Sampling.py
```python
import random
from Calculator.Square import square
from Calculator.SquareRoot import get_square_root
from Statistics.Scores import get_t_score, get_z_score
from Calculator.Subtraction import subtraction as subtract
from Calculator.Division import division as divide
from Calculator.Multiplication import multiplicaton as multiply
from Calculator.Addition import addition as add
def seed_select_sample(data, n):
random.seed(5)
return random.sample(data, n)
def conf_inter(x, cl, s, n):
var_1 = n - 1
var_2 = subtract(1, cl)
var_3 = divide(var_2, 2)
var_4 = get_t_score(float(var_1), float(var_3))
var_5 = get_square_root(n)
var_6 = divide(s, var_5)
var_7 = multiply(float(var_4), var_6)
low = subtract(x, var_7)
high = add(x, var_7)
return [low, high]
def mar_err(z, s, n):
var_1 = get_square_root(n)
var_2 = divide(s, var_1)
return multiply(z, var_2)
def cochran(z, p, q, e):
var_1 = square(z)
var_2 = multiply(p, q)
var_3 = multiply(var_1, var_2)
var_4 = square(e)
return divide(var_3, var_4)
def sample_size_ci(z, p, w):
var_1 = divide(z, 2)
z_score = get_z_score(var_1)
e = divide(w, 2)
q = subtract(1, p)
var_2 = multiply(p, q)
var_3 = divide(float(z_score), e)
var_5 = square(var_3)
return multiply(var_2, var_5)
```
#### File: stats_calculator/Statistics/Validation.py
```python
def is_valid(data):
if len(data) == 0:
raise Exception("Data is empty")
valid = True
for i in data:
if type(i) == int or type(i) == float:
valid = True
else:
valid = False
break
return valid
```
#### File: stats_calculator/test/randoms.py
```python
import random
def rand_int(a, b):
return random.randint(a, b)
def rand_dec(a, b):
return random.uniform(a, b)
def seed_int(a, b):
random.seed(5)
return random.randint(a, b)
def seed_dec(a, b):
random.seed(5)
return random.uniform(a, b)
def seed_list_int(a, b, n):
random.seed(5)
return random.choices(range(a, b), k=n)
def seed_list_dec(a, b, n):
tmp = []
counter = 0
random.seed(5)
while counter < n:
val = random.uniform(a, b)
tmp.append(val)
counter = counter + 1
return tmp
def select_item(data):
return random.choice(data)
def seed_select_item(data):
random.seed(5)
return random.choice(data)
def select_items(data, n):
return random.choices(data, k=n)
def seed_select_items(data, n):
random.seed(5)
return random.choices(data, k=n)
```
#### File: stats_calculator/test/test_cases_module.py
```python
import math
from FileReader import readCSV
import scipy
from scipy.stats import t
import numpy
import StatisticsModule
import FileReader
dataSet = readCSV('CSV_files/test.csv')
testData = [1,2,3]
def test_csv_reader():
dataSet = readCSV('CSV_files/test.csv')
assert len(dataSet) == 19 * 2
def test_csv_reader_fail():
dataSet = readCSV('CSV_files/test.csv')
assert len(dataSet) != 19
#4 - Population Standard Deviation Tests
def test_calc_populationStandardDeviation():
from StatisticsModule import populationStandardDeviation
assert populationStandardDeviation(testData) == 0.816496580927726
def test_calc_populationStandardDeviation_fail():
from StatisticsModule import populationStandardDeviation
assert populationStandardDeviation(testData) != 0.48
#5 - Variance of Population Proportion Tests
def test_calc_variancePopulationProportion():
from StatisticsModule import variancePopulationProportion
assert variancePopulationProportion(testData) == 6
def test_calc_variancePopulationProportion_fail():
from StatisticsModule import variancePopulationProportion
assert variancePopulationProportion(testData) != 1
#6 - Z-Score Tests
def test_zScore():
from StatisticsModule import zScore
scores = zScore(testData)
assert scores == [-1.0, 0.0, 1.0]
def test_zScore_fail():
from StatisticsModule import zScore
scores = zScore(testData)
assert scores != [0.0, 2, 5]
#7 - Standardized Score Tests
#def test_calc_standardizedScore():
# from StatisticsModule import standardizedScore
# scores = standardizedScore(testData)
# assert scores == [(-1.0/(math.sqrt(2/3))), 0.0, (1.0/(math.sqrt(2/3)))]
def test_calc_standardizedScore_fail():
from StatisticsModule import standardizedScore
scores = standardizedScore(testData)
assert standardizedScore(testData) != [3.6, 9.6, 18.6]
#8 - Population Correleation Coefficient Tests
def test_calc_populationCorrelationCoefficient():
from StatisticsModule import populationCorrelationCoefficient
assert populationCorrelationCoefficient(dataSet) == -0.43371655224528094
def test_calc_populationCorrelationCoefficient_fail():
from StatisticsModule import populationCorrelationCoefficient
assert populationCorrelationCoefficient(dataSet) != 5
#9 - Confidence Interval Tests
#def test_calc_confidenceInterval():
#from StatisticsModule import confidenceInterval
#assert confidenceInterval(testData) == (4.4841377118437524, -0.48413771184375287)
#def test_calc_confidenceInterval_fail():
#from StatisticsModule import confidenceInterval
#assert confidenceInterval(testData) != 5
#10 - Population Variance Tests
def test_calc_variance():
from StatisticsModule import variance
assert variance(dataSet) == variance(dataSet)
def test_calc_variance_fail():
from StatisticsModule import variance
assert variance(dataSet) != 2
#11 - P-Value Tests
def test_calc_pValue():
from StatisticsModule import pValue
assert pValue(dataSet) == "p-value is less than alpha. Null hypothesis accepted: means are equal."
def test_calc_pValue_fail():
from StatisticsModule import pValue
assert pValue(dataSet) != "p-value is greater than alpha. Null hypothesis rejected: means are not equal."
#12 - Proportion Tests
def test_calc_proportion():
from StatisticsModule import proportion
assert proportion(testData) == ['0.1667', '0.3333', '0.5000']
def test_calc_proportion_fail():
from StatisticsModule import proportion
assert proportion(testData) != ['0.5000', '0.2000', '0.3000']
#13 - Sample Mean Tests
def test_calc_sampleMean():
from StatisticsModule import sampleMean
assert sampleMean(testData) == 2
def test_calc_sampleMean_fail():
from StatisticsModule import sampleMean
assert sampleMean(testData) != 1
#14 - Sample Standard Deviation Tests
def test_calc_std():
from StatisticsModule import standardDeviation
assert standardDeviation(dataSet) == standardDeviation(dataSet)
def test_calc_std_fail():
from StatisticsModule import standardDeviation
assert standardDeviation(dataSet) != 2
#15 - Variance of Sample Proportion Tests
def test_calc_varianceSampleProportion():
from StatisticsModule import varianceSampleProportion
assert varianceSampleProportion(testData) == 1
def test_calc_varianceSampleProportion_fail():
from StatisticsModule import varianceSampleProportion
assert varianceSampleProportion(testData) != 2
```
#### File: stats_calculator/test/test_csvreader.py
```python
import unittest
from CSVReader.CSVReader import CSVReader
class MyTestCase(unittest.TestCase):
def setUp(self):
self.csv_reader = CSVReader('csv/Unit Test Addition.csv')
def test_instantiate_calculator(self):
self.assertIsInstance(self.csv_reader, CSVReader)
if __name__ == '__main__':
unittest.main()
```
#### File: stats_calculator/test/test_statistics.py
```python
import unittest
import random
import statistics as stats
import test.randoms as rand
from Statistics.Statistics import Statistics
class MyTestCase(unittest.TestCase):
def setUp(self):
self.statistics = Statistics()
def test_instantiate_statistics(self):
self.assertIsInstance(self.statistics, Statistics)
def test_mean_method(self):
random.seed(5)
data = rand.seed_list_int(0, 10, 5)
ans = stats.mean(data)
self.assertEqual(self.statistics.mean(data), ans)
def test_mean_method_dec(self):
random.seed(5)
data = rand.seed_list_dec(0, 10, 5)
ans = stats.mean(data)
self.assertEqual(self.statistics.mean(data), ans)
def test_mean_method_empty(self):
data = []
with self.assertRaises(Exception):
self.statistics.mean(data)
def test_mean_method_str(self):
data = [1, 2, "Hello World"]
with self.assertRaises(TypeError):
self.statistics.mean(data)
def test_median_method_odd(self):
random.seed(5)
data = rand.seed_list_int(0, 10, 5)
ans = stats.median(data)
self.assertEqual(self.statistics.median(data), ans)
def test_median_method_even(self):
random.seed(5)
data = rand.seed_list_dec(0, 10, 6)
ans = stats.median(data)
self.assertEqual(self.statistics.median(data), ans)
def test_median_method_empty(self):
data = []
with self.assertRaises(Exception):
self.statistics.median(data)
def test_median_method_str(self):
data = [1, 2, "Hello World"]
with self.assertRaises(TypeError):
self.statistics.median(data)
def test_mode_method(self):
random.seed(5)
data = rand.seed_list_int(0, 10, 20)
ans = stats.mode(data)
self.assertEqual(self.statistics.mode(data), ans)
def test_mode_method_empty(self):
data = []
with self.assertRaises(Exception):
self.statistics.mode(data)
def test_mode_method_str(self):
data = [1, 2, "Hello World"]
with self.assertRaises(TypeError):
self.statistics.mode(data)
def test_simple_sampling_method(self):
data = [1, 2, 3, 4, 5, 6, 7, 8, 9]
size = 3
ans = [5, 6, 7]
self.assertEqual(self.statistics.simple_random_sample(data, size), ans)
def test_confidence_interval_method(self):
self.assertEqual(self.statistics.confidence_interval(240, 0.95, 25, 10), [222.1173198317478, 257.8826801682522])
def test_margin_of_error_method(self):
self.assertEqual(self.statistics.margin_of_err(1.645, 0.4, 900), 0.021933333333333336)
def test_cochran_method(self):
self.assertEqual(self.statistics.cochran(1.960, 0.5, 0.5, 0.05), 384.1599999999999)
def test_sample_size_ci_method(self):
self.assertEqual(self.statistics.sample_size_ci(0.95, 0.41, 0.06), 1032.536711111111)
def test_variance_method(self):
data = [600, 470, 170, 430, 300]
ans = stats.variance(data)
self.assertEqual(self.statistics.sample_variance(data), ans)
def test_standard_deviation_method(self):
data = [600, 470, 170, 430, 300]
ans = stats.stdev(data)
self.assertEqual(self.statistics.sample_standard_deviation(data), ans)
def test_z_score_method(self):
self.assertEqual(self.statistics.z_score(190, 150, 25), 1.6)
def test_z_score_list_method(self):
data = [0.7972, 0.0767, 0.4383]
ans = [1.2232121397887195, -1.2262718699883022, 0.0030597301995827185]
self.assertEqual(self.statistics.z_score_list(data), ans)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jerilseb/brainfuck-py",
"score": 4
}
|
#### File: jerilseb/brainfuck-py/brainfuck.py
```python
from sys import argv
def brainfuck(src, input):
tape_index = 0
tape = [0]
src_index = 0
src_length = len(src)
input_index = 0
while src_index < src_length:
character = src[src_index]
if character == ">":
tape_index += 1
while len(tape) <= tape_index:
tape.append(0)
elif character == "<":
if tape_index > 0:
tape_index -= 1
elif character == "+":
tape[tape_index] += 1
elif character == "-":
tape[tape_index] -= 1
elif character == ".":
print chr(tape[tape_index]) + " (" + str(tape[tape_index]) + ")"
elif character == ",":
tape[tape_index] = ord(input[input_index])
input_index += 1
elif character == "[":
if tape[tape_index] == 0:
depth = 1
while depth > 0:
src_index += 1
if src[src_index] == "[":
depth += 1
elif src[src_index] == "]":
depth -= 1
elif character == "]":
depth = 1
while depth > 0:
src_index -= 1
if src[src_index] == "[":
depth -= 1
elif src[src_index] == "]":
depth += 1
src_index -= 1
src_index += 1
return tape
if __name__ == "__main__":
filename = argv[1]
input = argv[2]
with open(filename) as file:
src = file.read()
tape = brainfuck(src, input)
print tape
```
|
{
"source": "Jerimat/MITx-6.86-MachineLearning_EdX",
"score": 3
}
|
#### File: mnist/part2-twodigit/conv.py
```python
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from train_utils import batchify_data, run_epoch, train_model, Flatten
import utils_multiMNIST as U
path_to_data_dir = '../Datasets/'
use_mini_dataset = True
batch_size = 64
nb_classes = 10
nb_epoch = 30
num_classes = 10
img_rows, img_cols = 42, 28 # input image dimensions
class CNN(nn.Module):
def __init__(self, input_dimension):
super(CNN, self).__init__()
self.linear1 = nn.Linear(input_dimension, 64)
self.linear2 = nn.Linear(64, 64)
self.linear_first_digit = nn.Linear(64, 10)
self.linear_second_digit = nn.Linear(64, 10)
self.encoder = nn.Sequential(
nn.Conv2d(1, 8, (3, 3)),
nn.ReLU(),
nn.MaxPool2d((2, 2)),
nn.Conv2d(8, 16, (3, 3)),
nn.ReLU(),
nn.MaxPool2d((2, 2)),
Flatten(),
nn.Linear(720, 128),
nn.Dropout(0.5)
)
self.first_digit_classifier = nn.Linear(128, 10)
self.second_digit_classifier = nn.Linear(128, 10)
def forward(self, x):
out = self.encoder(x)
out_first_digit = self.first_digit_classifier(out)
out_second_digit = self.second_digit_classifier(out)
return out_first_digit, out_second_digit
def main():
X_train, y_train, X_test, y_test = U.get_data(path_to_data_dir, use_mini_dataset)
# Split into train and dev
dev_split_index = int(9 * len(X_train) / 10)
X_dev = X_train[dev_split_index:]
y_dev = [y_train[0][dev_split_index:], y_train[1][dev_split_index:]]
X_train = X_train[:dev_split_index]
y_train = [y_train[0][:dev_split_index], y_train[1][:dev_split_index]]
permutation = np.array([i for i in range(len(X_train))])
np.random.shuffle(permutation)
X_train = [X_train[i] for i in permutation]
y_train = [[y_train[0][i] for i in permutation], [y_train[1][i] for i in permutation]]
# Split dataset into batches
train_batches = batchify_data(X_train, y_train, batch_size)
dev_batches = batchify_data(X_dev, y_dev, batch_size)
test_batches = batchify_data(X_test, y_test, batch_size)
# Load model
input_dimension = img_rows * img_cols
model = CNN(input_dimension)
# Train
train_model(train_batches, dev_batches, model)
## Evaluate the model on test data
loss, acc = run_epoch(test_batches, model.eval(), None)
print('Test loss1: {:.6f} accuracy1: {:.6f} loss2: {:.6f} accuracy2: {:.6f}'.format(loss[0], acc[0], loss[1], acc[1]))
if __name__ == '__main__':
# Specify seed for deterministic behavior, then shuffle. Do not change seed for official submissions to edx
np.random.seed(12321) # for reproducibility
torch.manual_seed(12321) # for reproducibility
main()
```
|
{
"source": "Jerin111/chameleon",
"score": 2
}
|
#### File: Jerin111/chameleon/clustertools.py
```python
import numpy as np
from scipy.special import comb
def external_index(v1, v2):
TP, FN, FP, TN = confusion_index(v1, v2)
RI = (TP + TN) / (TP + FN + FP + TN);
ARI = 2 * (TP * TN - FN * FP) / ((TP + FN) * (FN + TN) + (TP + FP) * (FP + TN));
JI = TP / (TP + FN + FP);
FM = TP / np.sqrt((TP + FN) * (TP + FP));
return RI, ARI, JI, FM
def confusion_index(v1, v2):
cmatrix = contingency(v1, v2)
size = np.size(v1)
sum_rows = np.sum(cmatrix, 0)
sum_cols = np.sum(cmatrix, 1)
N = comb(size, 2)
TP = np.sum(list(map(lambda x: comb(x, 2), cmatrix)))
FN = np.sum(list(map(lambda x: comb(x, 2), sum_rows))) - TP
FP = np.sum(list(map(lambda x: comb(x, 2), sum_cols))) - TP
TN = N - TP - FN - FP
return TP, FN, FP, TN
def contingency(v1, v2):
res = np.zeros((np.max(v1), np.max(v2)))
for i in range(0, np.size(v1)):
res[v1[i] - 1, v2[i] - 1] = res[v1[i] - 1, v2[i] - 1] + 1
return res
```
|
{
"source": "jerinisready/django-tenant-schema-demo",
"score": 2
}
|
#### File: django-tenant-schema-demo/root/public_urls.py
```python
import os
import debug_toolbar
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.shortcuts import render
from django.urls import path, include
from django.views.generic import TemplateView
urlpatterns = [
path('', include('apps.customer.public_urls')),
path('admin/', admin.site.urls),
]
if settings.DEBUG:
urlpatterns.append(path('__debug__/', include(debug_toolbar.urls))),
urlpatterns += static(settings.STATIC_URL, document_root=os.path.join(settings.BASE_DIR, "assets"))
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
def error_404(request, exception=None):
return render(request, '404.html', status=404)
def error_500(request, exception=None):
return render(request, '500.html', status=500)
def error_400(request, exception=None):
return render(request, '400.html', status=400)
def error_403(request, exception=None):
return render(request, '403.html', status=403)
handler500 = error_500
handler403 = error_403
handler400 = error_400
handler404 = error_404
```
|
{
"source": "jerinisready/MindPower",
"score": 2
}
|
#### File: MindPower/app/models.py
```python
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.db import models
from app.utils import answer_filter
class Event(models.Model):
name = models.CharField(max_length=32)
key = models.CharField(max_length=32, unique=True)
user = models.ForeignKey(User, on_delete=models.CASCADE)
starttime = models.DateTimeField(auto_now=True)
def save(self, *args, **kwargs):
self.key = self.key.upper()
super(Event, self).save(*args, **kwargs)
def __str__(self):
return "{0.name} ({0.key})".format(self)
class Question(models.Model):
image = models.ImageField()
answer = models.CharField(max_length=32)
event = models.ForeignKey(Event, on_delete=models.CASCADE, blank=True)
def save(self, *args, **kwargs):
self.answer = answer_filter(self.answer)
super(Question, self).save(*args, **kwargs)
class Player(models.Model):
username = models.CharField(max_length=30, unique=True)
score = models.FloatField(default=0)
def __str__(self):
return ' {"%s" : "%s"}' % (self.username, self.score)
class Input(models.Model):
user = models.ForeignKey(Player, on_delete=models.CASCADE)
word = models.CharField(max_length=32, unique=True)
def __str__(self):
return '{"%s" : "%s"}' % (self.user.username, self.word)
def save(self, *args, **kwargs):
self.word = answer_filter(self.answer)
super(Input, self).save(*args, **kwargs)
```
#### File: MindPower/app/utils.py
```python
def answer_filter(answer):
return answer.replace(' ', '').replace('.', '').lower()
```
|
{
"source": "jerinjohny-ktnm/Mikasa",
"score": 3
}
|
#### File: cinderella/modules/dictionary.py
```python
import requests
from telegram import Bot, Message, Update, ParseMode
from telegram.ext import CommandHandler, run_async
from cinderella import dispatcher
@run_async
def define(bot: Bot, update: Update, args):
msg = update.effective_message
word = " ".join(args)
res = requests.get(f"https://api.dictionaryapi.dev/api/v2/entries/en/{word}")
if res.status_code == 200:
info = res.json()[0].get("meaning")
if info:
meaning = ""
for count, (key, value) in enumerate(info.items(), start=1):
meaning += f"<b>{count}. {word}</b> <i>({key})</i>\n"
for i in value:
defs = i.get("definition")
meaning += f"• <i>{defs}</i>\n"
msg.reply_text(meaning, parse_mode=ParseMode.HTML)
else:
return
else:
msg.reply_text("No results found!")
__help__ = """
Ever stumbled upon a word that you didn't know of and wanted to look it up?
With this module, you can find the definitions of words without having to leave the app!
*Available commands:*
- /define <word>: returns the definition of the word.
"""
__mod_name__ = "Dictionary"
DEFINE_HANDLER = CommandHandler("define", define, pass_args=True)
dispatcher.add_handler(DEFINE_HANDLER)
```
#### File: cinderella/modules/lydia.py
```python
from time import time, sleep
from coffeehouse.lydia import LydiaAI
from coffeehouse.api import API
from coffeehouse.exception import CoffeeHouseError as CFError
from telegram import Message, Chat, User, Update, Bot
from telegram.ext import CommandHandler, MessageHandler, Filters, run_async
from cinderella import dispatcher, LYDIA_API, OWNER_ID
import cinderella.modules.sql.lydia_sql as sql
from cinderella.modules.helper_funcs.filters import CustomFilters
CoffeeHouseAPI = API(LYDIA_API)
api_client = LydiaAI(CoffeeHouseAPI)
@run_async
def add_chat(bot: Bot, update: Update):
global api_client
chat_id = update.effective_chat.id
msg = update.effective_message
is_chat = sql.is_chat(chat_id)
if not is_chat:
ses = api_client.create_session()
ses_id = str(ses.id)
expires = str(ses.expires)
sql.set_ses(chat_id, ses_id, expires)
msg.reply_text("Lydia successfully enabled for this chat!")
else:
msg.reply_text("Lydia is already enabled for this chat!")
@run_async
def remove_chat(bot: Bot, update: Update):
msg = update.effective_message
chat_id = update.effective_chat.id
is_chat = sql.is_chat(chat_id)
if not is_chat:
msg.reply_text("Lydia isn't enabled here in the first place!")
else:
sql.rem_chat(chat_id)
msg.reply_text("Lydia disabled successfully!")
def check_message(bot: Bot, message):
reply_msg = message.reply_to_message
if message.text.lower() == "mikasa":
return True
if reply_msg:
if reply_msg.from_user.id == bot.get_me().id:
return True
else:
return False
@run_async
def lydia(bot: Bot, update: Update):
global api_client
msg = update.effective_message
chat_id = update.effective_chat.id
is_chat = sql.is_chat(chat_id)
if not is_chat:
return
if msg.text and not msg.document:
if not check_message(bot, msg):
return
sesh, exp = sql.get_ses(chat_id)
query = msg.text
try:
if int(exp) < time():
ses = api_client.create_session()
ses_id = str(ses.id)
expires = str(ses.expires)
sql.set_ses(chat_id, ses_id, expires)
sesh, exp = sql.get_ses(chat_id)
except ValueError:
pass
try:
bot.send_chat_action(chat_id, action='typing')
rep = api_client.think_thought(sesh, query)
sleep(0.3)
msg.reply_text(rep, timeout=60)
except CFError as e:
bot.send_message(OWNER_ID, f"lydia error: {e} occurred in {chat_id}!")
__mod_name__ = "Lydia/Chatbot"
__help__ = """
Commands
- /elydia : Enables Lydia mode in the chat.
- /dlydia : Disables Lydia mode in the chat.
"""
ADD_CHAT_HANDLER = CommandHandler("elydia", add_chat, filters=CustomFilters.dev_filter)
REMOVE_CHAT_HANDLER = CommandHandler("dlydia", remove_chat, filters=CustomFilters.dev_filter)
LYDIA_HANDLER = MessageHandler(Filters.text & (~Filters.regex(r"^#[^\s]+") & ~Filters.regex(r"^!")
& ~Filters.regex(r"^s\/")), lydia)
# Filters for ignoring #note messages, !commands and sed.
dispatcher.add_handler(ADD_CHAT_HANDLER)
dispatcher.add_handler(REMOVE_CHAT_HANDLER)
dispatcher.add_handler(LYDIA_HANDLER)
```
#### File: cinderella/modules/music.py
```python
import html
import time
import datetime
from telegram.ext import CommandHandler, run_async, Filters
import requests, logging
from telegram.ext import Updater, MessageHandler, Filters, CommandHandler
from telegram import Message, Chat, Update, Bot, MessageEntity
from cinderella import dispatcher, OWNER_ID, SUDO_USERS, SUPPORT_USERS, WHITELIST_USERS, BAN_STICKER
from telegram import ParseMode, InlineKeyboardMarkup, InlineKeyboardButton
from cinderella.modules.helper_funcs.chat_status import user_admin, sudo_plus
# Credit: @meanii
# Only gay will remove this lines
count = 0
@run_async
def music(bot: Bot, update: Update, args):
message = update.effective_message
global count
chatId = update.message.chat_id
video_id = ''.join(args)
if video_id.find('youtu.be') != -1:
index = video_id.rfind('/') + 1
video_id = video_id[index:][:11]
message.reply_text("Please wait...\nDownloading audio.")
elif video_id.find('youtube') != -1:
index = video_id.rfind('?v=') + 3
video_id = video_id[index:][:11]
message.reply_text("Please wait...\nDownloading audio.")
elif not video_id.find('youtube') != -1:
message.reply_text("Please provide me youtube link")
elif not video_id.find('youtu.be') != -1:
message.reply_text("Please provide me youtube link")
r = requests.get(f'https://api.pointmp3.com/dl/{video_id}?format=mp3')
json1_response = r.json()
if not json1_response['error']:
redirect_link = json1_response['url']
r = requests.get(redirect_link)
json2_response = r.json()
if not json2_response['error']:
payload = json2_response['payload']
info = '*{0}* \nUploaded by CINDERELLA'.format(payload['fulltitle'])
try:
bot.send_audio(chat_id=chatId, audio=json2_response['url'] ,parse_mode='Markdown',text="meanya", caption=info)
count += 1
print("\033[1m\033[96m" + "Download count: " + str(count) + "\033[0m")
except:
bot.send_message(chat_id=chatId, text="""That api we are using to download music, is down for weeks...
It will be up soon.""")
__help__ = """ Youtube audio Downloader
- /music <Youtube link> : Bot can download audio file from youtube link.
⚠️Currently not working, sorry for the inconvenience.
"""
__mod_name__ = "MP3 Downloader"
music_handler = CommandHandler('music', music, pass_args=True)
dispatcher.add_handler(music_handler)
```
|
{
"source": "jerinka/face_mask_detection",
"score": 3
}
|
#### File: face_mask_detection/Face_detect_opencv/face_det.py
```python
import numpy as np
import argparse
import cv2
import pprint
import os
class FaceDet:
def __init__(self):
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
self.path = os.path.abspath(os.path.dirname(__file__))
ap.add_argument("-i", "--image", default=self.abspath('test_image01.jpg'), help="patho to input image")
ap.add_argument("-p", "--prototxt", default=self.abspath('deploy.prototxt.txt'), help="path to Caffee 'deploy' prototxt file")
ap.add_argument("-m", "--model", default=self.abspath('res10_300x300_ssd_iter_140000.caffemodel'), help="path to Caffe pre-trained model")
ap.add_argument("-c", "--confidence", type=float, default=0.5, help="minimum probability to filter weak detections")
self.args = vars(ap.parse_args())
# load model from disk
print("[INFO] loading from model...")
self.net = cv2.dnn.readNetFromCaffe(self.args["prototxt"], self.args["model"])
def abspath(self,filename):
return os.path.join(self.path, filename)
def detect(self,image=None,show=True):
# load the input image and construct an input blob for the image and resize image to
# fixed 300x300 pixels and then normalize it
if image is None:
image = cv2.imread(self.args["image"])
(h, w) = image.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(image, (300, 300)), 1.0, (300, 300), (103.93, 116.77, 123.68))
# pass the blob through the network and obtain the detections and
# predictions
print("[INFO] computing object detections...")
self.net.setInput(blob)
detections = self.net.forward()
dets={}
det_count=0
# loop over the detections
for i in range(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with the
# prediction
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the `confidence` is
# greater than the minimum confidence
if confidence > self.args["confidence"]:
# compute the (x, y)-coordinates of the bounding box for the
# object
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
crop = image[startY:endY, startX:endX]
#import pdb;pdb.set_trace()
dets[det_count] = {'class_id':'face', 'startX':startX,'startY':startY,'endX':endX,'endY':endY,'confidence':confidence, 'crop':crop}
# draw the bounding box of the face along with the associated
# probability
text = "{:.2f}%".format(confidence * 100)
y = startY - 10 if startY - 10 > 10 else startY + 10
cv2.rectangle(image, (startX, startY), (endX, endY),
(0, 0, 255), 2)
cv2.putText(image, text, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
det_count+=1
if show==True:
# show the output image
cv2.imshow("Output", image)
cv2.waitKey(500)
return dets
if __name__=='__main__':
face_obj = FaceDet()
dets = face_obj.detect(image=None)
pprint.pprint(dets)
for det_count,box in dets.items():
crop = box['crop']
cv2.imshow('crop',crop)
cv2.waitKey(0)
```
#### File: face_mask_detection/MaskClassifier/classifier.py
```python
import tensorflow as tf
import cv2
import numpy as np
from keras.preprocessing import image
from PIL import Image
import os
class MaskClassifier:
def __init__(self):
self.model=tf.keras.models.load_model(self.abspath('model.h5'))
self.img_height, self.img_width = 128, 128
self.classes = ['yesmask','nomask']
def abspath(self,filename):
self.path = os.path.abspath(os.path.dirname(__file__))
return os.path.join(self.path, filename)
def predict_opencv_image(self, img):
# convert the color from BGR to RGB then convert to PIL array
cvt_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
im_pil = Image.fromarray(cvt_image)
# resize the array (image) then PIL image
im_resized = im_pil.resize((self.img_width, self.img_height))
img_array = image.img_to_array(im_resized)
return self.predict(img_array)
def predict(self,img_array=None,th=.5):
if img_array is None:
#sunflower_path = 'DB/mask_nomask/nomask/0_0_chenxiang_0006.jpg'
sunflower_path = 'DB/mask_nomask/mask/0_0_0 copy 4.jpg'
img = tf.keras.preprocessing.image.load_img(
sunflower_path, target_size=(self.img_height, self.img_width)
)
img_array = tf.keras.preprocessing.image.img_to_array(img)
img_array = tf.expand_dims(img_array, 0) # Create a batch
predictions = self.model.predict(img_array)
score = tf.nn.softmax(predictions)
print( score)
classid = score[0].numpy().argmax()
confidence = score[0].numpy()[classid]
print(self.classes[classid])
#import pdb;pdb.set_trace()
return self.classes[classid], confidence
if __name__ == '__main__':
class_obj = MaskClassifier()
#img = cv2.imread('DB/mask_nomask/mask/0_0_0 copy 4.jpg')
img = cv2.imread('DB/mask_nomask/nomask/0_0_chenxiang_0006.jpg')
classname,confidence = class_obj.predict_opencv_image (img)
print(classname,confidence)
```
#### File: jerinka/face_mask_detection/mask_det.py
```python
import numpy as np
import argparse
import cv2
import pprint
import os
from Face_detect_opencv.face_det import FaceDet
from Person_MobilNet_SSD_opencv.person_det import PersonDet
from MaskClassifier.classifier import MaskClassifier
class MaskDet:
def __init__(self):
self.path = os.path.abspath(os.path.dirname(__file__))
# construct the argument parse
parser = argparse.ArgumentParser(
description='Script to do mask detection')
parser.add_argument("--image", default= self.abspath("images/Trump.jpg"), help="path to image input")
self.args = parser.parse_args()
# Person detection
self.person_obj = PersonDet()
#Face detection
self.face_obj = FaceDet()
#face-mask classifier
self.mask_classifier = MaskClassifier()
def abspath(self,filename):
return os.path.join(self.path, filename)
def detect(self, frame=None):
if frame is None:
frame = cv2.imread(self.args.image)
#import pdb;pdb.set_trace()
dets1 = self.person_obj.detect(frame=frame.copy())
pprint.pprint(dets1)
detsp={}
det_countp=0
for det_countp,boxp in dets1.items():
crop = boxp['crop']
H,W,_=crop.shape
H=min(H,W)
W=H
crop = crop[0:H,0:W]
#cv2.imshow('crop1',crop)
#cv2.waitKey(100)
x1p = boxp['xLeftBottom_']
y1p = boxp['yLeftBottom_']
x2p = boxp['xRightTop_']
y2p = boxp['yRightTop_']
dets2 = self.face_obj.detect(image=crop)
pprint.pprint(dets2)
detsf={}
det_countf=0
maskstatus=True
for det_countf,boxf in dets2.items():
x1 = boxp['xLeftBottom_']+boxf['startX']
y1 = boxp['yLeftBottom_']+boxf['startY']
x2 = boxp['xLeftBottom_']+boxf['endX']
y2 = boxp['yLeftBottom_']+boxf['endY']
crop = frame[y1:y2,x1:x2]
classname,confidence = self.mask_classifier.predict_opencv_image(crop)
print('classname: ',classname)
if classname=='nomask':
maskstatus = False
#import pdb;pdb.set_trace()
detsf[det_countf] = {'class_id':'face', 'x1':x1,'y1':y1,'x2':x2,'y2':y2, 'crop':crop,'status':classname,'confidence':confidence}
det_countf+=1
if classname=='yesmask':
cv2.rectangle(frame, (x1, y1), (x2, y2),(0, 255, 0),2)
else:
cv2.rectangle(frame, (x1, y1), (x2, y2),(0, 0, 255),2)
cv2.imshow('frame4',frame)
cv2.waitKey(100)
if maskstatus==True:
cv2.rectangle(frame, (x1p, y1p), (x2p, y2p),(0, 255, 0),2)
else:
cv2.rectangle(frame, (x1p, y1p), (x2p, y2p),(0, 0, 255),2)
detsp[det_countp]={'class':'person','x1p':x1p,'y1p':y1p,'x2p':x2p,'y2p':y2p,'faces':detsf,'maskstatus':maskstatus}
cv2.imshow('frame4',frame)
cv2.waitKey(200)
return detsp
if __name__ == '__main__':
appobj=MaskDet()
detsp = appobj.detect()
pprint.pprint(detsp)
```
|
{
"source": "jerinka/image_morphing",
"score": 3
}
|
#### File: jerinka/image_morphing/face_landmarks.py
```python
import cv2
import numpy as np
import dlib
import time
class FaceLandMarkPts:
def __init__(self):
# Landmark detector
self.detector = dlib.get_frontal_face_detector()
self.predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
def get_landmark_pts(self, img):
# Face 1
img1_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = self.detector(img1_gray)
face = faces[0]
landmarks = self.predictor(img1_gray, face)
landmarks_points = []
for n in range(0, 68):
x = landmarks.part(n).x
y = landmarks.part(n).y
landmarks_points.append((x, y))
return landmarks_points
```
#### File: jerinka/image_morphing/morph_simple.py
```python
import cv2
import numpy as np
import time
def get_triangulation_indices(points):
"""Get indices triples for every triangle
"""
# Bounding rectangle
bounding_rect = (*points.min(axis=0), *points.max(axis=0))
# Triangulate all points
subdiv = cv2.Subdiv2D(bounding_rect)
#import pdb;pdb.set_trace()
pt_list = [(int(p[0]),int(p[1])) for p in points]
subdiv.insert(pt_list)
# Iterate over all triangles
for x1, y1, x2, y2, x3, y3 in subdiv.getTriangleList():
# Get index of all points
yield [(points==point).all(axis=1).nonzero()[0][0] for point in [(x1,y1), (x2,y2), (x3,y3)]]
def crop_to_triangle(img, triangle):
"""Crop image to triangle
"""
# Get bounding rectangle
bounding_rect = cv2.boundingRect(triangle)
# Crop image to bounding box
img_cropped = img[bounding_rect[1]:bounding_rect[1] + bounding_rect[3],
bounding_rect[0]:bounding_rect[0] + bounding_rect[2]]
# Move triangle to coordinates in cropped image
triangle_cropped = [(point[0]-bounding_rect[0], point[1]-bounding_rect[1]) for point in triangle]
return triangle_cropped, img_cropped
def transform(src_img, src_points, dst_img, dst_points):
"""Transforms source image to target image, overwriting the target image.
"""
src_points = np.array(src_points, np.int32)
dst_points = np.array(dst_points, np.int32)
for indices in get_triangulation_indices(src_points):
# Get triangles from indices
#import pdb;pdb.set_trace()
src_triangle = src_points[indices]
dst_triangle = dst_points[indices]
# Crop to triangle, to make calculations more efficient
src_triangle_cropped, src_img_cropped = crop_to_triangle(src_img, src_triangle)
dst_triangle_cropped, dst_img_cropped = crop_to_triangle(dst_img, dst_triangle)
# Calculate transfrom to warp from old image to new
transform = cv2.getAffineTransform(np.float32(src_triangle_cropped), np.float32(dst_triangle_cropped))
# Warp image
dst_img_warped = cv2.warpAffine(src_img_cropped, transform, (dst_img_cropped.shape[1], dst_img_cropped.shape[0]), None, flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101 )
# Create mask for the triangle we want to transform
mask = np.zeros(dst_img_cropped.shape, dtype = np.uint8)
cv2.fillConvexPoly(mask, np.int32(dst_triangle_cropped), (1.0, 1.0, 1.0), 16, 0);
# Delete all existing pixels at given mask
dst_img_cropped*=1-mask
# Add new pixels to masked area
dst_img_cropped+=dst_img_warped*mask
if __name__ == "__main__":
from mouse_pts import MousePtsThread
# Inputs
src_img = cv2.imread("images/bradley_cooper.jpg")
dst_img = cv2.imread("images/jim_carrey.jpg")
mouseobj1 = MousePtsThread(src_img,win='src',pts_name='pts1.npy')
mouseobj2 = MousePtsThread(dst_img,win='dst',pts_name='pts2.npy')
mouseobj1.start()
mouseobj2.start()
while True:
# Mian thread logic can goes here
time.sleep(1)
if not(mouseobj1.is_alive() or mouseobj1.is_alive()):
break
mouseobj1.join()
mouseobj2.join()
src_points = mouseobj1.get_pts()
dst_points = mouseobj2.get_pts()
print('Final src pts:',src_points)
print('Final dst pts:',dst_points)
#import pdb;pdb.set_trace()
# # Landmark detector
# landmark_obj = FaceLandMarkPts()
# src_points = landmark_obj.get_landmark_pts(src_img)
# dst_points = landmark_obj.get_landmark_pts(dst_img)
# Apply transformation
transform(src_img, src_points, dst_img, dst_points)
# Show result
cv2.imshow("Transformed", dst_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
```
#### File: jerinka/image_morphing/process_class.py
```python
import multiprocessing as mp
from multiprocessing import Queue
import cv2
import time
class Worker(mp.Process):
def __init__(self, img, q):
print ("Init")
self.img = img
mp.Process.__init__(self)
self.q = q
self.count=0
def run(self):
while True:
cv2.imshow('img',self.img)
k=cv2.waitKey(30)
if not self.q.empty():
self.q.get()
self.q.put([self.count])
self.count+=1
if k==27:
break
print("Stopping thread process")
if __name__=='__main__':
img1 = cv2.imread("images/bradley_cooper.jpg")
img2 = cv2.imread("images/jim_carrey.jpg")
q1 = Queue(maxsize=1)
q2 = Queue(maxsize=1)
p1 = Worker(img1, q1)
p2 = Worker(img2, q2)
p1.start()
p2.start()
while (p1.is_alive() or p2.is_alive()):
if not q1.empty():
print('q1:',q1.get())
if not q2.empty():
print('q2:',q2.get())
#import pdb;pdb.set_trace()
time.sleep(1)
print("Stoping main thread while loop")
p1.join()
p2.join()
```
|
{
"source": "jerinka/LSTM_Video_classifier",
"score": 3
}
|
#### File: jerinka/LSTM_Video_classifier/convLSTM.py
```python
import numpy as np, scipy.ndimage, matplotlib.pyplot as plt
from keras.models import Sequential,Model
from keras.layers import Dense, Dropout, Activation, Flatten,Input,TimeDistributed,BatchNormalization
from keras.layers import Convolution2D, ConvLSTM2D, MaxPooling2D, UpSampling2D,GlobalAveragePooling2D,AveragePooling3D,Reshape
from sklearn.metrics import accuracy_score, confusion_matrix, cohen_kappa_score
from sklearn.preprocessing import MinMaxScaler, StandardScaler
np.random.seed(123)
raw = np.arange(96).reshape(8,3,4)
data1 = scipy.ndimage.zoom(raw, zoom=(1,100,100), order=1, mode='nearest') #low res
print (data1.shape)
#(8, 300, 400)
data2 = np.arange(9)
print (data2.shape)
#(8, 300, 400)
X_train = data1.reshape(1, data1.shape[0], data1.shape[1], data1.shape[2], 1)
Y_train = data2.reshape(1, data2.shape[0])
#(samples,time, rows, cols, channels)
print (X_train.shape)
print (Y_train.shape)
import pdb;pdb.set_trace()
def getmodel1():
model = Sequential()
input_shape = (data1.shape[0], data1.shape[1], data1.shape[2], 1)
#samples, time, rows, cols, channels
model.add(ConvLSTM2D(16, kernel_size=(3,3), activation='sigmoid',padding='same',input_shape=input_shape,
return_sequences=True))
model.add(ConvLSTM2D(8, kernel_size=(3,3), activation='sigmoid',padding='same'))
model.add(GlobalAveragePooling2D())
model.add(Dense(10, activation='softmax')) # output shape: (None, 10)
print (model.summary())
return model
'''
x = Input(shape=(300, 400, 8))
y = GlobalAveragePooling2D()(x)
y = Dense(10, activation='softmax')(y)
classifier = Model(inputs=x, outputs=y)
x = Input(shape=(data1.shape[0], data1.shape[1], data1.shape[2], 1))
y = ConvLSTM2D(16, kernel_size=(3, 3),
activation='sigmoid',
padding='same',
return_sequences=True)(x)
y = ConvLSTM2D(8, kernel_size=(3, 3),
activation='sigmoid',
padding='same',
return_sequences=True)(y)
y = TimeDistributed(classifier)(y) # output shape: (None, 8, 10)
model = Model(inputs=x, outputs=y)
model.compile(loss='mean_squared_error',
optimizer='adam',
metrics=['accuracy'])
'''
def getmodel2():
model = Sequential()
input_shape = (data1.shape[0], data1.shape[1], data1.shape[2], 1)
model.add(ConvLSTM2D(16, kernel_size=(3, 3), activation='sigmoid', padding='same',
input_shape=input_shape,
return_sequences=True))
model.add(ConvLSTM2D(1, kernel_size=(3, 3), activation='sigmoid', padding='same',
return_sequences=True))
model.compile(loss='mse', optimizer='adam')
return model
def getmodel3():
model = Sequential()
model.add(ConvLSTM2D(
filters=40,
kernel_size=(3, 3),
input_shape=(None, 300, 400, 1),
padding='same',
return_sequences=True))
model.add(BatchNormalization())
model.add(ConvLSTM2D(
filters=40,
kernel_size=(3, 3),
padding='same',
return_sequences=True))
model.add(BatchNormalization())
model.add(ConvLSTM2D(
filters=40,
kernel_size=(3, 3),
padding='same',
return_sequences=True))
model.add(BatchNormalization())
model.add(AveragePooling3D((1, 135, 240)))
model.add(Reshape((-1, 40)))
model.add(Dense(
units=9,
activation='sigmoid'))
model.compile(
loss='categorical_crossentropy',
optimizer='adadelta'
)
print(model.summary())
return model
model = getmodel3()
model.fit(X_train, Y_train,
batch_size=1, epochs=10, verbose=1)
model.save('model.h5')
import pdb;pdb.set_trace()
x,y = model.evaluate(X_train, Y_train, verbose=0)
print (x,y)
```
|
{
"source": "jerinka/ml_utils",
"score": 3
}
|
#### File: cv_basic_utils/video_utils/h265write.py
```python
import cv2
import numpy as np
import subprocess as sp
import shlex
class H265Writer:
def __init__(self, outfile,fps, width, height):
self.outfile = outfile
self.fourcc = cv2.VideoWriter_fourcc(*'H265')
self.fps = fps
self.width = width
self.height = height
# Open ffmpeg application as sub-process
# FFmpeg input PIPE: RAW images in BGR color format
# FFmpeg output MP4 file encoded with HEVC codec.
# Arguments list:
# -y Overwrite output file without asking
# -s {width}x{height} Input resolution width x height (1344x756)
# -pixel_format bgr24 Input frame color format is BGR with 8 bits per color component
# -f rawvideo Input format: raw video
# -r {fps} Frame rate: fps (25fps)
# -i pipe: ffmpeg input is a PIPE
# -vcodec libx265 Video codec: H.265 (HEVC)
# -pix_fmt yuv420p Output video color space YUV420 (saving space compared to YUV444)
# -crf 24 Constant quality encoding (lower value for higher quality and larger output file).
# {output_filename} Output file name: output_filename (output.mp4)
self.process = sp.Popen(shlex.split(f'ffmpeg -y -s {width}x{height} -pixel_format bgr24 -f rawvideo -r {fps} -i pipe: -vcodec libx265 -pix_fmt yuv420p -crf 24 {output_filename}'), stdin=sp.PIPE)
def write(self, frame):
# Write raw video frame to input stream of ffmpeg sub-process.
self.process.stdin.write(frame.tobytes())
def close(self):
# Close and flush stdin
self.process.stdin.close()
# Wait for sub-process to finish
self.process.wait()
# Terminate the sub-process
self.process.terminate()
def __del__(self):
self.close()
if __name__=='__main__':
width, height, n_frames, fps = 1344, 756, 50, 25 # 50 frames, resolution 1344x756, and 25 fps
output_filename = 'output.mp4'
cap = H265Writer(output_filename, fps, width, height)
# Build synthetic video frames and write them to ffmpeg input stream.
for i in range(n_frames):
# Build synthetic image for testing ("render" a video frame).
img = np.full((height, width, 3), 60, np.uint8)
cv2.putText(img, str(i+1), (width//2-100*len(str(i+1)), height//2+100), cv2.FONT_HERSHEY_DUPLEX, 10, (255, 30, 30), 20) # Blue number
# Write raw video frame to input stream of ffmpeg sub-process.
cap.write(img)
cap.close()
```
|
{
"source": "jerinka/python_modules",
"score": 2
}
|
#### File: python_modules/A/mainA.py
```python
from module1.script1 import hello1
from module2.script1 import hello2
def hello():
hello1()
hello2()
if __name__=='__main__':
hello()
```
|
{
"source": "jerinka/ray_basics",
"score": 3
}
|
#### File: jerinka/ray_basics/A1_simple_task.py
```python
import ray
ray.init()
# A regular Python function.
def my_function():
return 1
# By adding the `@ray.remote` decorator, a regular Python function
# becomes a Ray remote function.
@ray.remote
def my_function():
return 1
# To invoke this remote function, use the `remote` method.
# This will immediately return an object ref (a future) and then create
# a task that will be executed on a worker process.
obj_ref = my_function.remote()
# The result can be retrieved with ``ray.get``.
assert ray.get(obj_ref) == 1
```
#### File: jerinka/ray_basics/A5_remote_objects.py
```python
import numpy as np
import ray
import time
ray.init()
def create_matrix1(size):
return np.random.normal(size=size)
@ray.remote
def create_matrix2(size):
return np.random.normal(size=size)
@ray.remote
def multiply_matrices(x, y):
return np.dot(x, y)
start = time.time()
x_id = create_matrix2.remote([1000, 1000])
y_id1 = create_matrix1([1000, 1000])
y_id = ray.put(y_id1)
z_id = multiply_matrices.remote(x_id, y_id)
# Get the results.
z = ray.get(z_id)
print('z:',z)
print('Time=',time.time()-start)
```
|
{
"source": "jerinka/register_net",
"score": 3
}
|
#### File: jerinka/register_net/reg_cnn.py
```python
import tensorflow as tf
import cv2
#from tensorflow import keras
import keras
from keras import layers
from datagen import DataGenerator
from keras.applications import resnet_v2
gpus = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(gpus[0], True)
# your code
def get_base_Resnet(input_shape=(128,128,3)):
model1 = resnet_v2.ResNet50V2(include_top=False, weights='imagenet', input_shape=input_shape)
layer_name = 'conv2_block1_2_relu'
model2= keras.Model(inputs=model1.input, outputs=model1.get_layer(layer_name).output)
return model2
def get_base_model(input_shape=(128,128,3)):
input = layers.Input(shape=input_shape)
x = layers.Conv2D(64,5,activation='relu',strides=2,padding='valid')(input)
x = layers.BatchNormalization()(x)
x = layers.Conv2D(128,5,activation='relu',strides=2,padding='valid')(x)
x = layers.BatchNormalization()(x)
x = layers.Conv2D(256,5,activation='relu',strides=2,padding='valid')(x)
output = layers.BatchNormalization()(x)
return keras.Model(input, output,name="base_model")
def get_model(input_shape=(128,128,3)):
input1 = layers.Input(shape=input_shape)
input2 = layers.Input(shape=input_shape)
basemodel = get_base_Resnet() #get_base_model()
basemodel.trainable = False
basemodel.summary()
feat1 = basemodel(input1)
feat2 = basemodel(input2)
x = layers.Concatenate(axis=-1)([feat1, feat2])
x = layers.Conv2D(256, 3,strides=1,padding='valid',activation='relu')(x)
x = layers.Conv2D(256, 3,strides=1,padding='valid',activation='relu')(x)
x = layers.GlobalMaxPooling2D()(x)
#x = layers.Flatten()(x)
x = layers.Dense(128,activation='sigmoid')(x)
x = layers.Dense(128,activation='sigmoid')(x)
output = layers.Dense(1, activation='linear')(x)
return keras.Model([input1, input2], output)
if __name__=='__main__':
input_shape=(128,128,3)
base=get_base_model()
base.summary()
model = get_model()
model.summary()
if 1:
checkpoint_path = 'weights/'
model = tf.keras.models.load_model(checkpoint_path)
#model.build(input_shape)
datagen = DataGenerator(path = 'cat_dog/cats_and_dogs_filtered/train')
batch_x, batch_y = datagen.__getitem__(0)
print('batchx[0]',batch_x[0].shape)
print('actual batchy:',batch_y)
print('predicted y:',model.predict(batch_x))
import pdb;pdb.set_trace()
```
|
{
"source": "jerinka/RetinaNet_mask",
"score": 3
}
|
#### File: RetinaNet_mask/keras-retinanet/xml2csv.py
```python
import cv2
import os
import pandas as pd
import xml.etree.ElementTree as ET
import csv
def xml2df(xml_path):
"""Convert XML to CSV
Args:
xml_path (str): Location of annotated XML file
Returns:
pd.DataFrame: converted csv file
"""
print("xml to csv {}".format(xml_path))
xml_list = []
xml_df=pd.DataFrame()
base = os.path.split(xml_path)[0]
#try:
if 1:
tree = ET.parse(xml_path)
root = tree.getroot()
for member in root.findall('object'):
value = (os.path.join(base,root.find('filename').text),
#int(root.find('size')[0].text),
#int(root.find('size')[1].text),
int(member[4][0].text),
int(member[4][1].text),
int(member[4][2].text),
int(member[4][3].text),
member[0].text
)
xml_list.append(value)
column_name = ['image_name', 'x_min', 'y_min','x_max','y_max','class_name']
xml_df = pd.DataFrame(xml_list, columns=column_name)
#except Exception as e:
# print('xml conversion failed:{}'.format(e))
# return pd.DataFrame(columns=['filename', 'xmin', 'ymin', 'xmax', 'ymax', 'class'])
return xml_df
def df2csv(df):
with open('innovators.csv', 'w', newline='') as file:
writer = csv.writer(file)
for row in df.iterrows():
print(row)
#print( row['filename'], row['xmin'], row['ymin'], row['xmax'], row['ymax'] , row['class'])
#writer.writerow([row['filename'], row['xmin'], row['ymin'], row['xmax'], row['ymax'] , row['class']])
def xmls2csv(xml_dir):
cwd = os.getcwd()
df = pd.DataFrame()
for (root,dirs,files) in os.walk(xml_dir, topdown=True):
appen = root.split(os.sep) [-(len(root.split(os.sep))-len(cwd.split(os.sep))) :]
#import pdb;pdb.set_trace()
appen = os.path.join(*appen)
#print('appen',appen)
for file_ in files:
if file_.endswith('.xml'):
file_ = os.path.join(appen, file_)
print(file_)
xml_df= xml2df(file_)
df = df.append(xml_df, ignore_index=True)
return df
if __name__=='__main__':
xml_dir = 'maskdb/train/'
xml_dir=os.path.join(os.getcwd(),xml_dir)
df = xmls2csv(xml_dir)
df.to_csv('alldata.csv',index=False)#header=False
print('xml_df',df)
#import pdb;pdb.set_trace()
```
|
{
"source": "jerinka/traffic_sign_frcnn",
"score": 2
}
|
#### File: jerinka/traffic_sign_frcnn/cleanup.py
```python
import os
import numpy as np
import shutil
def mkdir(path):
if not os.path.isdir(path):
os.makedirs(path)
if 1: #(fabric case)
src_images_path = 'data/dataset_fabric_defect'
img_ext = ['.tif']
dst_image_p = 'data/allimages'
dst_label_p = 'data/alllabels'
mkdir(dst_image_p)
mkdir(dst_label_p)
cnt=0
# Cleanup mixed images and labels
for dirname, _, filenames in os.walk(src_images_path):
for filename in filenames:
filepath = os.path.join(dirname, filename)
#import pdb;pdb.set_trace()
if os.path.splitext(filepath)[1] in img_ext:
img_path = filepath
annot_path = os.path.splitext(img_path)[0]+'.xml'
if os.path.isfile(annot_path):
dst_image = os.path.splitext(filename)[0]+'_'+str(cnt).zfill(3)+'.tif'
dst_label = os.path.splitext(filename)[0]+'_'+str(cnt).zfill(3)+'.xml'
dst_image = os.path.join(dst_image_p, dst_image)
dst_label = os.path.join(dst_label_p, dst_label)
shutil.copy(img_path, dst_image)
shutil.copy(annot_path, dst_label)
print('copied :' ,dst_image)
cnt+=1
if 0:
src_images_path = 'data/dataset_fabric_defect'
img_ext = ['.tif']
# Cleanup
for dirname, _, filenames in os.walk(src_images_path):
for filename in filenames:
filepath = os.path.join(dirname, filename)
import pdb;pdb.set_trace()
if os.path.splitext(filepath)[1] in img_ext:
annot_path = os.path.splitext(img_path)[0]+'.xml'
head,tail1 = os.path.split(annot_path)
head,tail2 = os.path.split(head)
annot_path = os.path.join(head, 'annotations',tail1)
if not os.path.isfile(annot_path):
os.remove(img_path)
print('removed:' ,img_path)
```
|
{
"source": "jerinka/Trapezoid_Crop",
"score": 3
}
|
#### File: jerinka/Trapezoid_Crop/MousePts.py
```python
import cv2
import numpy as np
import copy
#events = [i for i in dir(cv2) if 'EVENT' in i]
#print (events)
def put_text(image,txt,x0=50,y0=50):
h,w=image.shape[:2]
# font
font = cv2.FONT_HERSHEY_SIMPLEX
# fontScale
fontScale = int(h/500)
# org
org = (x0, y0+2*fontScale)
# Blue color in BGR
color = (0, 255, 0)
# Line thickness of 2 px
thickness = max(1,int(h/500))
# Using cv2.putText() method
image = cv2.putText(image, txt, org, font,fontScale, color, thickness, cv2.LINE_AA)
return image
class MousePts:
def __init__(self,img=None, windowname='image'):
self.windowname = windowname
if img is not None:
self.img1 = img.copy()
self.img = self.img1.copy()
cv2.namedWindow(windowname,cv2.WINDOW_NORMAL)
cv2.imshow(windowname,img)
self.curr_pt = []
self.point = []
self.ix = -1
self.iy = -1
self.drawing = False
self.callback_flag = True
self.pts=[]
def select_point(self,event,x,y,flags,param):
if event == cv2.EVENT_LBUTTONDOWN:
self.point.append([x,y])
#print(self.point)
cv2.circle(self.img,(x,y),3,(0,255,0),2)
elif event == cv2.EVENT_MOUSEMOVE:
self.curr_pt = [x,y]
#print(self.point)
def select_roi(self,img):
'''select points in image and returns roi'''
cv2.namedWindow("Image",cv2.WINDOW_NORMAL)
cv2.setMouseCallback("Image", self.draw_rectangle_with_drag)
while True:
img_copy = img.copy()
txt="Click on TL, TR, BR, BL points"
img_copy = put_text(img_copy, txt, 50, img.shape[0]//2)
if len(self.pts) > 0:
points = np.array(self.pts, np.int32)
cv2.polylines(img_copy, [points], False, (255, 255, 255), 2)
cv2.line(img_copy, tuple(self.curr_pt),tuple(self.pts[-1]), (255, 255, 255), thickness=2, lineType=cv2.LINE_AA)
if len(self.pts)==4:
cv2.line(img_copy, tuple(self.pts[-1]), tuple(self.pts[0]), (255, 255, 255), thickness=2, lineType=cv2.LINE_AA)
cv2.namedWindow("Image",cv2.WINDOW_NORMAL)
cv2.imshow("Image", img_copy)
k = cv2.waitKeyEx(500)
break
cv2.namedWindow("Image",cv2.WINDOW_NORMAL)
cv2.imshow("Image", img_copy)
k = cv2.waitKeyEx(30)
if k == 27:
break
cv2.destroyWindow(self.windowname)
return points
def draw_rectangle_with_drag(self,event, x, y, flags, param):
if self.callback_flag:
#global ix, iy, drawing, img,curr_pt
if event == cv2.EVENT_LBUTTONDOWN:
self.drawing = True
self.ix = x
self.iy = y
self.pts.append([int(x), int(y)])
if event == cv2.EVENT_MOUSEMOVE:
self.curr_pt = (x,y)
def draw(self,closed=False):
"""
The function to draw a polygon on any image.
Parameters:
-----------
image: It is the image on which circle is to be drawn.
pts: Array of polygonal curves.
isClosed: Flag indicating whether the drawn polylines are closed or not.
color: It is the color of polyline to be drawn. For BGR, we pass a tuple.
thickness: It is thickness of the polyline edges.
Returns:
-----------
It returns an image with overlayed polygon
"""
if len(self.point)>0:
polys=copy.deepcopy(self.point)
polys.append(self.curr_pt)
self.img = cv2.polylines(img=self.img, pts=np.array([polys]), isClosed=closed, color=(255,0,0), thickness=2)
def drawRect(self):
"""
Draw a rectangle on any image.
"""
if len(self.point)>0:
start_point = (self.point[0][0],self.point[0][1])
end_point = (self.curr_pt[0],self.curr_pt[1])
color = (255, 0, 0)
thickness = 2
self.img = cv2.rectangle(self.img, start_point, end_point, color, thickness)
def getpt(self,count=1,img=None,plotline=True):
if img is not None:
self.img = img
self.img1 = self.img.copy()
else:
self.img = self.img1.copy()
txt = 'Click on '+str(count)+' points'
self.img1 = put_text(self.img1,txt, 0, 100)
cv2.namedWindow(self.windowname,cv2.WINDOW_NORMAL)
cv2.imshow(self.windowname,self.img)
cv2.setMouseCallback(self.windowname,self.select_point)
self.point = []
k=0
while(1):
self.img = self.img1.copy()
self.draw()
cv2.imshow(self.windowname,self.img)
k = cv2.waitKey(20) & 0xFF
if k == 27:
break
if len(self.point)>=count:
self.img = self.img1.copy()
self.draw(closed=True)
cv2.imshow(self.windowname,self.img)
cv2.waitKey(500)
break
#print(self.point)
cv2.setMouseCallback(self.windowname, lambda *args : None)
cv2.destroyWindow(self.windowname)
return self.point
def sanity_check(self, frame, startpt,endpt, x_minimum, x_maximum):
ix, iy= startpt
x, y= endpt
sanity_flag=True
if (ix < x_minimum) or (x<x_minimum) or (ix>x_maximum) or (x>x_maximum) or (iy < 0) or (y<0) or (iy>frame.shape[0]) or (y>frame.shape[0]):
sanity_flag=False
return sanity_flag
def selectRect(self,img,windowname='RectSelect', boxList=None, x_min=None, x_max=None):
"""
Function to select a rectangle portion in an image.
"""
self.img = img
self.img1 = self.img.copy()
self.windowname = windowname
txt = 'Click on corner points, Drag to select, r to reset, Enter to finish, Esc to quit'
self.img1 = put_text(self.img1, [[txt]], offsetval=(50, img.shape[0] - 80))
if len(boxList)>0:
for box in boxList:
x1, y1 = box[:2]
width, height = box[2:]
x2, y2 = x1+width, y1+height
cv2.rectangle(self.img1, (x1, y1), (x2, y2), (0, 255, 0), thickness=6)
cv2.namedWindow(self.windowname,cv2.WINDOW_NORMAL)
cv2.imshow(self.windowname,self.img)
cv2.setMouseCallback(self.windowname,self.select_point)
self.point = []
p1=[]
p2=[]
k=0
while(1):
self.img = self.img1.copy()
self.drawRect()
txt="Click on TL, TR, BR, BL points"
txt='Select rectangle top left and bottom right points'
self.img1 = put_text(self.img1, [[txt]], offsetval=(50, img.shape[0] - 80))
cv2.imshow(self.windowname,self.img)
k = cv2.waitKey(20) & 0xFF
import pdb;pdb.set_trace()
if k == 27:
return [],[]
if k == ord('r'):
self.point = []
if k == 27:
break
if len(self.point)>1 and k==13:
# if len(self.point)==4:# and k==13:
self.img = self.img1.copy()
self.drawRect()
cv2.imshow(self.windowname,self.img)
p1, p2=self.point[0], self.curr_pt
#import pdb;pdb.set_trace()
x1,y1=min(p1[0],p2[0]),min(p1[1],p2[1])
x2,y2=max(p1[0],p2[0]),max(p1[1],p2[1])
p1=(x1,y1)
p2=(x2,y2)
if x_min is not None and x_max is not None:
sanity_check_flag = self.sanity_check(self.img, p1, p2, x_min, x_max)
if p1!=p2 and sanity_check_flag==True:
break
else:
print('Please draw inside image only')
else:
break
#print(self.point)
cv2.setMouseCallback(self.windowname, lambda *args : None)
#cv2.destroyAllWindows()
return p1, p2
if __name__=='__main__':
if 0:
img = np.zeros((512,512,3), np.uint8)
windowname = 'image'
# coordinateStore = MousePts(img, windowname)
# pts,img = MousePts(img, windowname).getpt(3)
# print(pts)
# pts,img = MousePts(img, windowname).getpt(3,img)
# print(pts)
mouse_obj=MousePts(windowname=windowname)
pts1,pts2=mouse_obj.selectRect(img,windowname)
print("pts1,pts2",pts1,pts2)
cv2.imshow(windowname,img)
cv2.waitKey(0)
if 0:
image = "../../cropped_video_17.png"
img = cv2.imread(image)
windowname = 'Image'
Mouse_obj = MousePts(img, windowname)
#roi = Mouse_obj.multi_select_points(image)
#print("ROI",roi)
pts = Mouse_obj.getpt(count=5)
print("pts",pts)
if 1:
image = "corrected.png"
img = cv2.imread(image)
windowname = 'Image'
Mouse_obj = MousePts(img, windowname)
roi = Mouse_obj.select_roi(img)
print("ROI",roi)
```
#### File: jerinka/Trapezoid_Crop/trapezoid_crop_gui.py
```python
import os
import cv2
import sys
import numpy as np
from math import sqrt
import os.path as osp
import argparse
path = osp.dirname(osp.abspath(__file__))
from MousePts import MousePts
def get_euclidian_distance(pt1, pt2):
return np.sqrt((pt1[0]-pt2[0])**2 + (pt1[1]-pt2[1])**2)
def apply_transform(point_list,M):
out_point_list= (cv2.perspectiveTransform(np.array(point_list),M)).astype(int)
return out_point_list
def trapezoidHandCrop(image, ht=None, wd=None):
"""
Function that asks user to click on four corner points of trapezoidal shape and
generates rectangular transformed image of selected trapezoid. ht and wd are height and
width of final crop image, by default it will be calculated based on input points.
"""
image_copy = image.copy()
rows, cols = image.shape[:2]
if 1:
src_points = MousePts().select_roi(image_copy)
src_points = np.float32(src_points)
np.savetxt('pts.txt', src_points)
else:
src_points = np.loadtxt('pts.txt')
print('src_points:',src_points)
pt1, pt2, pt3, pt4 = src_points
w1 = get_euclidian_distance(pt2, pt1)
h1 = get_euclidian_distance(pt2, pt3)
w1 = wd if wd is not None else w1 #assign crop width as user's input
h1 = ht if ht is not None else h1 #assign crop height as user's input
x1, y1 = 0, 0
dst_points = np.float32([[x1,y1], [x1 + w1, y1], [x1 + w1, y1 + h1], [x1, y1 + h1]])
M = cv2.getPerspectiveTransform(src_points, dst_points)
crop_image = cv2.warpPerspective(image_copy, M, (int(w1), int(h1)))
pointsOut = convert_pts(src_points, M)
print('pointsOut: ', pointsOut)
return crop_image
def convert_pts(boxpoints, M):
boxpoints = np.float32(boxpoints)
warp_boxes = []
for b in boxpoints:
b = np.array(b).reshape(1, 1, 2)
w_b = apply_transform(b, M)
w_box_pt = list(w_b[0][0])
warp_boxes.append(w_box_pt)
return warp_boxes
def Get_warped_image(img, M=None):
rows, cols = img.shape[:2]
warped = cv2.warpPerspective(img, M, (cols, rows))
return warped
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-p',"--path" , default='crops/18.png', help="path of image")
args = parser.parse_args()
imgPath = args.path
image = cv2.imread(imgPath)
crop_img = trapezoidHandCrop(image)
path='crops'
if not os.path.exists(path):
os.makedirs(path)
basename = os.path.basename(imgPath)
#import pdb;pdb.set_trace()
filename = os.path.splitext(basename)[0]+'.png'
cv2.imwrite(os.path.join(path, filename),crop_img)
cv2.namedWindow('Cropped', cv2.WINDOW_NORMAL)
cv2.imshow('Cropped', crop_img)
cv2.waitKey(0)
```
|
{
"source": "jerinmax/edge-ai-extension",
"score": 2
}
|
#### File: edge-ai-extension/server/__main__.py
```python
import argparse
import os
import sys
from vaserving.vaserving import VAServing
from grpc_server import GrpcServer
from http_server import HttpServer
from common import logging, constants
from common.exception_handler import log_exception
PROGRAM_NAME = "DL Streamer Edge AI Extension"
def parse_args(args=None, program_name=PROGRAM_NAME):
parser = argparse.ArgumentParser(
prog=program_name,
fromfile_prefix_chars="@",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--protocol",
type=str.lower,
choices=[constants.GRPC_PROTOCOL, constants.HTTP_PROTOCOL],
help="Extension protocol (grpc or http)",
default=os.getenv("PROTOCOL", "grpc").lower(),
)
parser.add_argument(
"--grpc-port",
action="store",
help="Port number to serve gRPC server",
type=int,
default=int(os.getenv("GRPC_PORT", constants.GRPC_PORT)),
)
parser.add_argument(
"--http-port",
action="store",
help="Port number to serve HTTP server",
type=int,
default=int(os.getenv("HTTP_PORT", constants.HTTP_PORT)),
)
parser.add_argument(
"--max-running-pipelines",
action="store",
type=int,
default=int(os.getenv("MAX_RUNNING_PIPELINES", "10")),
)
parser.add_argument(
"--log-level",
action="store",
choices=['INFO', 'DEBUG'],
default=os.getenv('EXTENSION_LOG_LEVEL', 'INFO'))
if isinstance(args, dict):
args = ["--{}={}".format(key, value)
for key, value in args.items() if value]
return parser.parse_known_args(args)
def append_default_server_args(va_serving_args, max_running_pipelines):
va_serving_args.append("--max_running_pipelines")
va_serving_args.append(str(max_running_pipelines))
return va_serving_args
if __name__ == "__main__":
args, va_serving_args = parse_args()
logging.set_default_log_level(args.log_level)
logger = logging.get_logger("Main")
server = None
try:
server_args = append_default_server_args(
va_serving_args, args.max_running_pipelines
)
try:
VAServing.start(server_args)
except Exception as error:
logger.error(error)
logger.error("Exception encountered during VAServing start")
raise
if args.protocol == constants.GRPC_PROTOCOL:
server = GrpcServer(args)
else:
server = HttpServer(args)
server.start()
except (KeyboardInterrupt, SystemExit, Exception):
log_exception()
sys.exit(-1)
finally:
if server:
server.stop()
VAServing.stop()
```
|
{
"source": "jerinmax/live-video-analytics",
"score": 3
}
|
#### File: app/nginx/lva_grpc_app.py
```python
from PIL import Image, ImageDraw, ImageFont
import numpy as np
import io
import json
#import os
#from datetime import datetime
import requests
# Imports for the REST API
from flask import Flask, request, jsonify, Response
app = Flask(__name__)
# / routes to the default function which returns 'Hello World'
@app.route('/', methods=['GET'])
def defaultPage():
return Response(response='Hello', status=200)
@app.route('/stream/<id>')
def stream(id):
respBody = ("<html>"
"<h1>MJPEG stream</h1>"
"<img src=\"/mjpeg/" + id + "\"/>"
"</html>")
return Response(respBody, status= 200)
if __name__ == '__main__':
# Run the server
app.run(host='0.0.0.0', port=8888)
```
#### File: lvaextension/server/model_wrapper.py
```python
import os
import threading
import cv2
import numpy as np
from exception_handler import PrintGetExceptionDetails
from scipy import special
import onnxruntime
import logging
class YoloV4Model:
def __init__(self):
try:
self._lock = threading.Lock()
self._modelFileName = 'yolov4.onnx'
self._modelLabelFileName = 'coco.names'
self._anchorsFileName = 'yolov4_anchors.txt'
self._labelList = None
# Get labels
with open(self._modelLabelFileName, "r") as f:
self._labelList = [l.rstrip() for l in f]
# Get anchors
with open(self._anchorsFileName, "r") as f:
anchors = f.readline()
anchors = np.array(anchors.split(','), dtype=np.float32)
self._anchors = anchors.reshape(3, 3, 2)
# YoloV4 specific model vars.
self._strides = np.array([8, 16, 32])
self._xyscale = [1.2, 1.1, 1.05]
# Get Topology struct and create inference session
self._onnxSession = onnxruntime.InferenceSession(self._modelFileName)
self._onnxSessionOutputName = self._onnxSession.get_outputs()[0].name
self._onnxSessionInputName = self._onnxSession.get_inputs()[0].name
except:
PrintGetExceptionDetails()
raise
def Preprocess(self, cvImage):
try:
imageBlob = cv2.cvtColor(cvImage, cv2.COLOR_BGR2RGB)
imageBlob = np.array(imageBlob, dtype='float32')
imageBlob /= 255.
# imageBlob = np.transpose(imageBlob, [0, 1, 2])
imageBlob = np.expand_dims(imageBlob, 0)
return imageBlob
except:
PrintGetExceptionDetails()
raise
def PostprocessBbox(self, predBbox):
for i, pred in enumerate(predBbox):
convShape = pred.shape
outputSize = convShape[1]
convRawdxdy = pred[:, :, :, :, 0:2]
convRawdwdh = pred[:, :, :, :, 2:4]
xyGrid = np.meshgrid(np.arange(outputSize), np.arange(outputSize))
xyGrid = np.expand_dims(np.stack(xyGrid, axis=-1), axis=2)
xyGrid = np.tile(np.expand_dims(xyGrid, axis=0), [1, 1, 1, 3, 1])
xyGrid = xyGrid.astype(np.float)
predXY = ((special.expit(convRawdxdy) * self._xyscale[i]) - 0.5 * (self._xyscale[i] - 1) + xyGrid) * self._strides[i]
predWH = (np.exp(convRawdwdh) * self._anchors[i])
pred[:, :, :, :, 0:4] = np.concatenate([predXY, predWH], axis=-1)
predBbox = [np.reshape(x, (-1, np.shape(x)[-1])) for x in predBbox]
predBbox = np.concatenate(predBbox, axis=0)
return predBbox
def PostprocessBoxes(self, predBbox, orgImgShape, inputSize, scoreThreshold):
validScale=[0, np.inf]
predBbox = np.array(predBbox)
predXYWH = predBbox[:, 0:4]
predConf = predBbox[:, 4]
predProb = predBbox[:, 5:]
# # (1) (x, y, w, h) --> (xmin, ymin, xmax, ymax)
predCoor = np.concatenate([predXYWH[:, :2] - predXYWH[:, 2:] * 0.5,
predXYWH[:, :2] + predXYWH[:, 2:] * 0.5], axis=-1)
# # (2) (xmin, ymin, xmax, ymax) -> (xmin_org, ymin_org, xmax_org, ymax_org)
orgH, orgW = orgImgShape
resizeRatio = min(inputSize / orgW, inputSize / orgH)
dw = (inputSize - resizeRatio * orgW) / 2
dh = (inputSize - resizeRatio * orgH) / 2
predCoor[:, 0::2] = 1.0 * (predCoor[:, 0::2] - dw) / resizeRatio
predCoor[:, 1::2] = 1.0 * (predCoor[:, 1::2] - dh) / resizeRatio
# # (3) clip some boxes that are out of range
predCoor = np.concatenate([np.maximum(predCoor[:, :2], [0, 0]),
np.minimum(predCoor[:, 2:], [orgW - 1, orgH - 1])], axis=-1)
invalidMask = np.logical_or((predCoor[:, 0] > predCoor[:, 2]), (predCoor[:, 1] > predCoor[:, 3]))
predCoor[invalidMask] = 0
# # (4) discard some invalid boxes
bboxesScale = np.sqrt(np.multiply.reduce(predCoor[:, 2:4] - predCoor[:, 0:2], axis=-1))
scaleMask = np.logical_and((validScale[0] < bboxesScale), (bboxesScale < validScale[1]))
# # (5) discard some boxes with low scores
classes = np.argmax(predProb, axis=-1)
scores = predConf * predProb[np.arange(len(predCoor)), classes]
score_mask = scores > scoreThreshold
mask = np.logical_and(scaleMask, score_mask)
coors, scores, classes = predCoor[mask], scores[mask], classes[mask]
return np.concatenate([coors, scores[:, np.newaxis], classes[:, np.newaxis]], axis=-1)
def bboxesIOU(self, boxes1, boxes2):
'''calculate the Intersection Over Union value'''
boxes1 = np.array(boxes1)
boxes2 = np.array(boxes2)
boxes1_area = (boxes1[..., 2] - boxes1[..., 0]) * (boxes1[..., 3] - boxes1[..., 1])
boxes2_area = (boxes2[..., 2] - boxes2[..., 0]) * (boxes2[..., 3] - boxes2[..., 1])
left_up = np.maximum(boxes1[..., :2], boxes2[..., :2])
right_down = np.minimum(boxes1[..., 2:], boxes2[..., 2:])
inter_section = np.maximum(right_down - left_up, 0.0)
inter_area = inter_section[..., 0] * inter_section[..., 1]
union_area = boxes1_area + boxes2_area - inter_area
ious = np.maximum(1.0 * inter_area / union_area, np.finfo(np.float32).eps)
return ious
def nms(self, bboxes, iou_threshold, sigma=0.3, method='nms'):
"""
:param bboxes: (xmin, ymin, xmax, ymax, score, class)
Note: soft-nms, https://arxiv.org/pdf/1704.04503.pdf
https://github.com/bharatsingh430/soft-nms
"""
classes_in_img = list(set(bboxes[:, 5]))
best_bboxes = []
for cls in classes_in_img:
cls_mask = (bboxes[:, 5] == cls)
cls_bboxes = bboxes[cls_mask]
while len(cls_bboxes) > 0:
max_ind = np.argmax(cls_bboxes[:, 4])
best_bbox = cls_bboxes[max_ind]
best_bboxes.append(best_bbox)
cls_bboxes = np.concatenate([cls_bboxes[: max_ind], cls_bboxes[max_ind + 1:]])
iou = self.bboxesIOU(best_bbox[np.newaxis, :4], cls_bboxes[:, :4])
weight = np.ones((len(iou),), dtype=np.float32)
assert method in ['nms', 'soft-nms']
if method == 'nms':
iou_mask = iou > iou_threshold
weight[iou_mask] = 0.0
if method == 'soft-nms':
weight = np.exp(-(1.0 * iou ** 2 / sigma))
cls_bboxes[:, 4] = cls_bboxes[:, 4] * weight
score_mask = cls_bboxes[:, 4] > 0.
cls_bboxes = cls_bboxes[score_mask]
return best_bboxes
def Score(self, cvImage):
try:
with self._lock:
imageBlob = self.Preprocess(cvImage)
detections = self._onnxSession.run([self._onnxSessionOutputName], {self._onnxSessionInputName: imageBlob})[0]
predBbox = self.PostprocessBbox(np.expand_dims(detections, axis=0))
originalImageSize = cvImage.shape[:2]
bboxes = self.PostprocessBoxes(predBbox, originalImageSize, 416, 0.25)
# bboxes: [x_min, y_min, x_max, y_max, probability, cls_id] format coordinates.
bboxes = self.nms(bboxes, 0.213, method='nms')
return bboxes, originalImageSize
except:
PrintGetExceptionDetails()
raise
```
|
{
"source": "jerinmax/model_server",
"score": 2
}
|
#### File: tests/unit/test_classification_attributes_model.py
```python
import json
from typing import Dict
import pytest
import numpy as np
from src.api.models.classification_attributes_model import ClassificationAttributes
from src.api.models.model_config import ModelOutputConfiguration
MOCK_INFERENCE_OUTPUT = {
'color':
np.array([[
[[0.05]],
[[0.2]],
[[0.3]],
[[0.1]],
[[0.1]],
[[0.2]],
[[0.05]],
]]),
'type':
np.array([[
[[0.2]],
[[0.5]],
[[0.1]],
[[0.2]]
]])
}
@pytest.fixture
def fake_output_config() -> Dict[str, ModelOutputConfiguration]:
return {
'color': ModelOutputConfiguration(output_name='color',
classes={
"white": 0.0,
"gray": 1.0,
"yellow": 2.0,
"red": 3.0,
"green": 4.0,
"blue": 5.0,
"black": 6.0
},
is_softmax=True
),
'type': ModelOutputConfiguration(output_name='type',
classes={
"car": 0.0,
"van": 1.0,
"truck": 2.0,
"bus": 3.0
},
is_softmax=True
)
}
@pytest.mark.parametrize("inference_output,expected_response", [
(MOCK_INFERENCE_OUTPUT,
{
"inferences": [
{
"type": "classification",
"subtype": "color",
"classification": {
"tag": "yellow"
}
},
{
"type": "classification",
"subtype": "type",
"classification": {
"tag": "van"
}
}
]
}
)])
def test_postprocess_inference_output(inference_output, expected_response, fake_output_config):
model = ClassificationAttributes(endpoint=None, ovms_connector=None, input_configs=None,
output_configs=fake_output_config)
print(model.postprocess_inference_output(inference_output))
assert model.postprocess_inference_output(
inference_output) == json.dumps(expected_response)
@pytest.mark.parametrize("inference_output,expected_response,top_k", [
(MOCK_INFERENCE_OUTPUT,
{
"inferences": [
{
"type": "classification",
"subtype": "color",
"classification": {
"tag": "yellow"
}
},
{
"type": "classification",
"subtype": "type",
"classification": {
"tag": "van"
}
}
]
},
1
)])
def test_postprocess_inference_output_top_k(inference_output, expected_response, top_k, fake_output_config):
for key in fake_output_config.keys():
fake_output_config[key].top_k_results = top_k
model = ClassificationAttributes(endpoint=None, ovms_connector=None, input_configs=None,
output_configs=fake_output_config)
print(model.postprocess_inference_output(inference_output))
assert model.postprocess_inference_output(
inference_output) == json.dumps(expected_response)
@pytest.mark.parametrize("inference_output,expected_response,confidence_threshold", [
(MOCK_INFERENCE_OUTPUT,
{
"inferences": [
{
"type": "classification",
"subtype": "color",
"classification": {
"tag": "yellow"
}
},
{
"type": "classification",
"subtype": "type",
"classification": {
"tag": "van"
}
}
]
},
0.2
)])
def test_postprocess_inference_output_confidence_threshold(inference_output,
expected_response,
confidence_threshold, fake_output_config):
for key in fake_output_config.keys():
fake_output_config[key].confidence_threshold = confidence_threshold
model = ClassificationAttributes(endpoint=None, ovms_connector=None, input_configs=None,
output_configs=fake_output_config)
assert model.postprocess_inference_output(
inference_output) == json.dumps(expected_response)
```
|
{
"source": "jerinpetergeorge/django-akaunting",
"score": 2
}
|
#### File: django-akaunting/organizations/models.py
```python
from django.db import models
from django.utils.translation import gettext_lazy as _
from django_extensions.db.models import TimeStampedModel
class Organization(TimeStampedModel):
name = models.CharField(_("Name"), max_length=100)
email = models.EmailField(_("Email"), unique=True)
class Meta:
verbose_name = _("Organization")
verbose_name_plural = _("Organizations")
db_table = "Organization"
ordering = ["name"]
def __str__(self):
return self.name
```
#### File: sales/customers/models.py
```python
from django.db import models
from django.utils.translation import gettext_lazy as _
from django_extensions.db.models import TimeStampedModel
class Customer(TimeStampedModel):
name = models.CharField(_("Name"), max_length=100)
class Meta:
verbose_name = _("Customer")
verbose_name_plural = _("Customers")
db_table = "Customer"
def __str__(self):
return self.name
```
|
{
"source": "jerin/smart-steel-task",
"score": 2
}
|
#### File: jerin/smart-steel-task/app.py
```python
from models import LogData, TaskData
from flask import Flask, request, render_template
from flask_migrate import Migrate
from config import Config
from database.database import Base, db_session
from logs import Log
from task import TaskData as TaskDataOP
app = Flask(__name__)
app.config.update({
'SQLALCHEMY_DATABASE_URI': Config.SQLALCHEMY_DATABASE_URI
})
migrate = Migrate(app, Base)
@app.route('/')
def welcome():
return "welcome to smart steel application"
@app.route('/taskdata')
def task_data():
Log.insert_log("info", "task data requested", "task data requested")
data = TaskDataOP.get_task_data()
return render_template('task_data.html',
data=data,
title="Show Data")
@app.route('/logs')
def log_data():
data = Log.get_log()
return render_template('log_data.html',
data=data,
title="Show Log")
@app.errorhandler(404)
def page_not_found(e):
Log.insert_log("error", "invalid url", request.path)
app.logger.error('Page not found: %s', (request.path))
return render_template("404.html"), 404
@app.errorhandler(500)
def internal_server_error(error):
Log.insert_log("error", "internal server error", str(error))
app.logger.error('Server Error: %s', (error))
return render_template('500.html', error=error), 500
@app.teardown_appcontext
def shutdown_session(exception=None):
db_session.remove()
if __name__ == '__main__':
app.run(threaded=True)
```
#### File: smart-steel-task/tests/app_test.py
```python
import unittest
import pytest
from app import app
class BasicTests(unittest.TestCase):
# executed prior to each test
def setUp(self):
app.config['TESTING'] = True
self.app = app.test_client()
def tearDown(self):
pass
def test_application_root(self):
"""check that the application root is responding"""
response = self.app.get('/', follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertIn(b'welcome to smart steel application', response.data)
def test_task_data(self):
"""check that the task data get method is response should be 200"""
response = self.app.get('/taskdata', follow_redirects=True)
self.assertEqual(response.status_code, 200)
def test_log_data(self):
"""check that the log data get method is response should be 200"""
response = self.app.get('/logs', follow_redirects=True)
self.assertEqual(response.status_code, 200)
def test_invalid_url(self):
"""check that the response for invalid URL should be 404"""
response = self.app.get('/invalidurl', follow_redirects=True)
self.assertEqual(response.status_code, 404)
def test_internal_server_error(self):
"""check that the response for internal server error should be 500"""
with pytest.raises(Exception):
response = self.app.get('/taskdata', follow_redirects=True)
self.assertEqual(response.status_code, 500)
if __name__ == "__main__":
unittest.main()
```
#### File: smart-steel-task/tests/log_test.py
```python
from logs import Log
import pytest
import uuid
from datetime import datetime
class TestLog:
@staticmethod
@pytest.mark.usefixtures('protect_db')
def test_log():
Log.insert_log("info","test action","test message")
log = Log.get_log()
assert len(log)>0
```
|
{
"source": "JERisBRISK/archivists-pride",
"score": 2
}
|
#### File: JERisBRISK/archivists-pride/alert.py
```python
import dearpygui.dearpygui as dpg
# inspired by https://discord.com/channels/736279277242417272/876200434468016178/879776888824922122
def alert(title, message):
# guarantee these commands happen in the same frame
with dpg.mutex():
viewport_width = dpg.get_viewport_client_width()
viewport_height = dpg.get_viewport_client_height()
with dpg.window(label=title, modal=True, no_close=True) as modal_id:
dpg.add_text(message)
dpg.add_button(label="Ok", width=75, user_data=modal_id, callback=on_alert_confirmation)
# guarantee these commands happen in another frame
dpg.split_frame()
width = dpg.get_item_width(modal_id)
height = dpg.get_item_height(modal_id)
dpg.set_item_pos(modal_id, [viewport_width // 2 - width // 2, viewport_height // 2 - height // 2])
def on_alert_confirmation(sender, unused, user_data):
dpg.delete_item(user_data)
```
#### File: JERisBRISK/archivists-pride/singleton.py
```python
from threading import Lock
# ala https://en.wikipedia.org/wiki/Singleton_pattern#Python_implementation
# and https://stackoverflow.com/questions/51896862/how-to-create-singleton-class-with-arguments-in-python
class Singleton (type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
# Based on tornado.ioloop.IOLoop.instance() approach.
# See https://github.com/facebook/tornado
# Whole idea for this metaclass is taken from: https://stackoverflow.com/a/6798042/2402281
class ThreadSafeSingleton(type):
_instances = {}
_singleton_lock = Lock()
def __call__(cls, *args, **kwargs):
# double-checked locking pattern (https://en.wikipedia.org/wiki/Double-checked_locking)
if cls not in cls._instances:
with cls._singleton_lock:
if cls not in cls._instances:
cls._instances[cls] = super(ThreadSafeSingleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
```
|
{
"source": "jeristanus/OHSIHA",
"score": 2
}
|
#### File: OHSIHA/tweetsentiment/models.py
```python
from django.db import models
from django.conf import settings
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.contrib.auth.models import User
# UserSettings model contains all the settings of users
class UserSettings(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
# The threshold for considering a tweet positive or negative
polarity_interpretation_sensitivity = models.FloatField(default=0.2)
# The last hashtag the user searched
last_hashtag_searched = models.TextField(max_length=25, default="")
# Functions to automate the creation and updating of UserSettings, when User -model is referenced and changed
@receiver(post_save, sender=User)
def create_UserSettings(sender, instance, created, **kwargs):
if created:
UserSettings.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_UserSettings(sender, instance, **kwargs):
instance.usersettings.save()
pass
# Geocode_record is used to save the geocoding results from google.
# This makes the app faster, as the geocoding doesn't need to be done multiple times for the same location
class geocode_record(models.Model):
twitter_location = models.TextField(max_length=60, primary_key=True)
geocode_state = models.TextField(max_length=30, null=True)
geocode_country = models.TextField(max_length=40, null=True)
```
#### File: OHSIHA/tweetsentiment/views.py
```python
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.template import loader
from django.urls import reverse
from django.contrib.auth.decorators import login_required
from django.conf import settings
from django.views.decorators.csrf import csrf_exempt
from rest_framework import status
from rest_framework.decorators import api_view, authentication_classes, permission_classes
from rest_framework.authentication import SessionAuthentication, BasicAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from .models import *
from .tools import *
from .tweetsentiment import *
def index(request):
# Index of the page
template = loader.get_template('tweetsentiment/index.html')
context = {}
return HttpResponse(template.render(context, request))
@login_required
def TweetSentiment(request):
# Let's see if we have a hashtag
if 'hashtag' not in request.GET or request.GET['hashtag'] == "" or request.GET['hashtag'].find(" ") != -1:
# No hashtag provided!
template = loader.get_template('tweetsentiment/tweetsentiment.html')
context = {
'hashtag': request.user.usersettings.last_hashtag_searched,
'user': request.user,
}
return HttpResponse(template.render(context, request))
hashtag = request.GET['hashtag'].lower()
#** Let's run a sentiment analysis and compile a dataset for the template **
# The sensitivity determines easily a positive/negative tweet will be shown with a green/red color
# Let's save a a new sensitivity the user gave, or use the previous value
try:
sensitivity = float(request.GET['polaritySensitivity'])
request.user.usersettings.polarity_interpretation_sensitivity = sensitivity
request.user.usersettings.last_hashtag_searched = hashtag
request.user.usersettings.save()
except:
sensitivity = request.user.usersettings.polarity_interpretation_sensitivity
# Let's get the tweetdata
tweetdata = get_and_sentimentanalyze_tweets(hashtag, sensitivity)
if tweetdata == TWEETSENTIMENT_ERROR:
# Error fetching the tweets! Let's return an error message
template = loader.get_template('tweetsentiment/tweetsentiment.html')
context = {
'hashtag': hashtag,
'error_message': "Error when trying to get tweets!"
}
return HttpResponse(template.render(context, request))
# Let's wait for all the threads to finish
# join_tweet_location_threads()
# Let's "unlist" all the location data
# for i in range(0, len(tweetdata)):
# tweetdata[i]['location'] = tweetdata[i]['location'][0]
# Let's get the uState passable dataset of the US sentiments
uState_data = create_US_state_average_sentiments(tweetdata)
template = loader.get_template('tweetsentiment/tweetsentiment.html')
context = {
'statuses': tweetdata,
'hashtag': hashtag,
'user': request.user,
'uState_data': uState_data,
}
return HttpResponse(template.render(context, request))
###############################
# Views for the rest API access
# Returns the <count> number of latest tweets with the hashtag <hashtag>
@api_view(['GET'])
@authentication_classes((SessionAuthentication, BasicAuthentication))
@permission_classes((IsAuthenticated,))
def api_get_tweets(request, hashtag, count):
# Let's get the user's polaritySensitivity
polaritySensitivity = request.user.usersettings.polarity_interpretation_sensitivity
print("PolaritySensitivity:", polaritySensitivity)
# API is currently limited to max 100 tweet. If the user asks for more, let's return an error
if count > 100:
return Response(status=status.HTTP_400_BAD_REQUEST)
# Location data is omitted from API calls, as those take a lot of time
tweetdata = get_and_sentimentanalyze_tweets(hashtag, sensitivity=polaritySensitivity, count=count, getlocation=False, include_original_status=False)
return Response(tweetdata)
@api_view(['PUT'])
@authentication_classes((SessionAuthentication, BasicAuthentication))
@permission_classes((IsAuthenticated,))
def api_set_sensitivity(request, polaritySensitivity):
# Let's set the user's polaritySensitivity
try:
polaritySensitivity = float(polaritySensitivity)
# Let's check if the sensitivity is within the required interval
if polaritySensitivity < 0 or 1 < polaritySensitivity:
# The sensitivity is under 0 or over 1, which is not permitted
Response(status=status.HTTP_406_NOT_ACCEPTABLE)
# Let's set the new sensitivity
request.user.usersettings.polarity_interpretation_sensitivity = polaritySensitivity
request.user.usersettings.save()
return Response(status=status.HTTP_202_ACCEPTED)
except:
return Response(status=status.HTTP_406_NOT_ACCEPTABLE)
```
|
{
"source": "Jerit-Baiju/mysite-django",
"score": 2
}
|
#### File: mysite-django/api/views.py
```python
from django.http import HttpResponse
from base.models import AdminLog
from base.views import push
# Create your views here.
def latest_log(request):
context = AdminLog.objects.get(name='api_log').latest_log
return HttpResponse(str(context))
def log(request):
context = AdminLog.objects.get(name='api_log').log
return HttpResponse(str(context).replace('\n','<br><br>'))
def clr_admin_log(request):
log = AdminLog.objects.get(name='api_log')
log.log = ''
log.save()
push('API LOG CLEARED')
return HttpResponse('Cleared')
```
#### File: mysite-django/base/views.py
```python
from datetime import datetime
import random
from django.http import HttpResponse
import pytz
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.shortcuts import redirect, render
from pushbullet import Pushbullet
from .models import User, AdminLog, AdminSecret
try:
pb_key, _ = AdminSecret.objects.get_or_create(name='pushbullet')
except:
None
def push(text):
try:
text = str(text).capitalize()
pb = Pushbullet(pb_key.secret)
pb.push_note("MySite", text, pb.devices[0])
except:
None
def log(request, data):
date = datetime.now(pytz.timezone("Asia/Kolkata")).date()
time = datetime.now(pytz.timezone("Asia/Kolkata")).strftime("%H:%M")
agent = request.META['HTTP_USER_AGENT']
admin_log, _ = AdminLog.objects.get_or_create(name='api_log')
if request.user.is_authenticated:
user = request.user
body = f"{date} | {time} | {user} | {data} | {agent}"
admin_log.latest_log = body
admin_log.log = f"{body}\n{admin_log.log}"
admin_log.save()
user_log = f"{date} | {time} | {data} | {agent}\n{user.log}"
user.log = user_log
user.save()
else:
body = f"{date} | {time} | {data} | {agent}"
admin_log.latest_log = body
admin_log.log = f"{body}\n{admin_log.log}"
admin_log.save()
intro = '''Hi, I'm Jerit. I am passionate about Coding and building things. I am
particularly interested in projects that touch Artificial Intelligence,
Web Development, Chatbot, Django. If you think I can be helpful to you or would
like to meet me, please feel free to'''
def registerPage(request):
if request.method == 'POST':
name = request.POST['name']
username = str(request.POST['username']).lower().lstrip().rstrip()
mail = request.POST['mail']
password = request.POST['<PASSWORD>']
confirm = request.POST['<PASSWORD>']
name_split = name.split()
if len(name_split) < 1:
messages.error(request, 'Please enter your full name')
return render(request, 'base/register.html', {'title': 'Register | <NAME>'})
else:
first_name = name.split()[0].capitalize()
last_name = name.split()[1].capitalize()
if " " in username:
messages.error(request, 'Username must not cantain space')
return render(request, 'base/register.html', {'title': 'Register | <NAME>'})
elif password == confirm:
if User.objects.filter(username=username).exists():
messages.error(request, 'Username already exists.')
return render(request, 'base/register.html', {'title': 'Register | <NAME>'})
elif User.objects.filter(email=mail).exists():
messages.error(request, 'Email already exists')
return render(request, 'base/register.html', {'title': 'Register | <NAME>'})
else:
user = User.objects.create_user(
username=username, password=password, email=mail, name=name, first_name=first_name, last_name=last_name)
user.save()
login(request, user)
push(f'Registered - {name}')
try:
url = request.POST.get('next')
return redirect(url)
except:
return redirect('home')
else:
messages.error(request, 'Passwords does not match')
return render(request, 'base/register.html', {'title': 'Register | <NAME>'})
else:
return render(request, 'base/register.html', {'title': 'Register | <NAME>'})
def loginPage(request):
if request.user.is_authenticated:
return redirect('home')
if request.method == 'POST':
username = request.POST.get('username').lower()
password = request.POST.get('password')
try:
user = User.objects.get(username=username)
except:
messages.error(request, 'User does not exist')
return render(request, 'base/login.html', {'title': 'Login | <NAME>'})
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
push(f'Login - {user.name}')
try:
url = request.POST.get('next')
return redirect(url)
except:
return redirect('home')
else:
messages.error(request, 'Username OR password does not exit')
return render(request, 'base/login.html', {'title': 'Login | <NAME>'})
return render(request, 'base/login.html', {'title': 'Login | <NAME>'})
def logoutPage(request):
push(f"Logout - {request.user.name}")
log(request, 'Logout')
logout(request)
return redirect('home')
def home(request):
if request.user.is_authenticated:
if request.user.username != 'jerit':
push(f'Visited - {request.user.name}')
else:
push(f'Visited - Unknown User')
log(request, 'Home')
quotes = [
{'quote': 'Just turn your Passion into your Profession.', 'author': '<NAME>'}, {'quote': 'The computer was born to solve problems that did not exist before.', 'author': '<NAME>'}, {'quote': "People don't care about what you say, they care about what you build.", 'author': '<NAME>'}, {'quote':"First, solve the problem. Then, write the code." , 'author': '<NAME>'}, {'quote': 'A computer is like a mischievous genie. It will give you exactly what you ask for, but not always what you want.', 'author': '<NAME>'}, {'quote':'A good programmer looks both ways before crossing a onw-way street.','author':'Unknown'},{'quote':'A person who never made a mistake never tried anything new.','author':'<NAME>'}]
context = {
'title': '<NAME>',
'bio': ['> Junior Pythoneer.',
'> Captivating from life.',
'> Words cannot express my Passion.',
'> Currently saying Yes to New Adventures.',
'> Learner @ Web Development'],
'intro': intro,
'quote': random.choice(quotes)
}
return render(request, 'base/index.html', context)
def gallery(request):
log(request, 'Gallery')
return redirect('https://jeritbaiju.herokuapp.com')
def about(request):
log(request, 'About')
history = [
"2016 - I was 10 years old studying in 4th class, my parents bought me a laptop. I was so excited about it. I learned to control the cursor and to type easily with MS-PAINT, NOTEPAD.",
"2017 - Started to play the MS mini games like CHESS, MINESWEEPER etc. And learned MS-LOGO, created many objects and played with them.",
"2018 - I was 12. Joined for the activity PHOTOSHOP AND ANIMATION with Adobe Flash-Macromedia. Started to search every files and locations on computer in my school. Unexpectedly opened an HTML file and I thought it was programming, I asked to my teacher (Reshmi miss) about it and she taught me some basics of HTML.",
"2019 - I created my own website using HTML only. Heard about programming languages(C++, Python, JavaScript).",
"2020 - Made my own chatbot using VISUAL BASIC. I upgraded my computer windows 7 to windows 10 alone. Started to learn python, made simple command line applications. I made GUI apps for windows using Tkinter.",
"2021 - Switched to Linux. Learned Flask, CSS, JavaScript. I published my website on pythonanywhere (currently not available). Learned GIT, REACT and some DS. Made many projects like ChatBot named Clara (WEB), Weather app (CLI)",
"2022 - Learned Django and hosted my website on HEROKU. Made a Package called PYFLIT for FLASK users, and created projects Weather App (WEB), NUM GAME etc. Currently working on Django + React."
]
context = {
'title': 'About Me',
'history': history,
'intro': intro
}
return render(request, 'base/about.html', context)
def stats(request):
log(request, 'Stats')
user = request.user
jerit = User.objects.get(username='jerit')
about_me = [
{'key': 'age', 'value': '16', 'class': 'grey'},
{'key': 'current city', 'value': 'kerala, India', 'class': 'white'},
{'key': 'birthday', 'value': '10 February 2006', 'class': 'grey'},
{'key': 'started coding on', 'value': '2018', 'class': 'white'},
{'key': 'currently learning',
'value': 'web development - python', 'class': 'grey'},
{'key': 'OS', 'value': 'Ubuntu', 'class': 'white'},
]
about_web = [
{'key': 'languages', 'value': 'python, JS, HTML, CSS', 'class': 'grey'},
{'key': 'Backend', 'value': 'Django | Python', 'class': 'white'},
{'key': 'packages',
'value': 'Jinja, BS4, Random, PushBullet', 'class': 'grey'},
{'key': 'DataBase', 'value': 'SQLITE3', 'class': 'white'},
{'key': 'Hosted on', 'value': 'Heroku', 'class': 'grey'},
{'key': 'last updated at', 'value': jerit.last_login.date, 'class': 'white'}
]
if user.is_authenticated == True:
about_user = [
{'key': 'name', 'value': user.name, 'class': 'grey'},
{'key': 'username', 'value': user.username, 'class': 'white'},
{'key': 'e-mail', 'value': user.email, 'class': 'grey'},
{'key': 'score', 'value': user.score, 'class': 'white'},
{'key': 'last login', 'value': user.last_login.date, 'class': 'grey'},
{'key': 'date joined', 'value': user.date_joined.date, 'class': 'white'},
]
else:
about_user = [
{'key': 'authenticated', 'value': 'not', 'class': 'grey'},
{'key': 'note', 'value': 'please login to see all the details', 'class': 'white'}
]
stats = [
{'name': 'some stats about me', 'contents': about_me},
{'name': 'some stats about this website', 'contents': about_web},
{'name': 'some stats about you', 'contents': about_user}
]
context = {
'title': 'Stats | <NAME>',
'stats': stats,
}
return render(request, 'base/stats.html', context)
def sitemap(request):
log(request, 'Sitemap')
return HttpResponse(open('sitemap.xml').read(), content_type='text/xml')
def robots(request):
log(request, 'Robots')
return HttpResponse(open('robots.txt').read(), content_type='text/plain')
def github(request):
log(request, 'GitHub')
return redirect('https://github.com/Jerit-Baiju')
def instagram(request):
log(request, 'Instagram')
return redirect('https://www.instagram.com/d__doc_46')
def whatsapp(request):
log(request, 'Whatsapp')
return redirect('http://wa.me/+918592060520?text=Hi%20Jerit%20%F0%9F%91%8B%F0%9F%8F%BB')
def vijayamatha(request):
log(request, 'Vijayamatha')
return redirect('https://vijayamathaschool.in')
```
#### File: mysite-django/projects/views.py
```python
import random
from datetime import datetime
import pytz
import requests
from base.views import log
from bs4 import BeautifulSoup
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect, render
# Create your views here.
def projects(request):
log(request, 'Projects')
projects = [
{'name': 'Weather App', 'info': 'A project to get weather from all places, working by scrapping data from GOOGLE WEATHER.',
'src': '/projects/weather', 'btn': 'Go'},
{'name': 'PyFlit', 'info': 'Tool for adding components and pages in FLASK. Can be used to send PYTHON variables to JAVASCRIPT.',
'src': 'https://pypi.org/project/pyflit/', 'btn': 'Pypi'},
{'name': 'Clara', 'info': 'CHAT-BOT made with ELIO-BOT-API. Simple STATIC Project.',
'src': '/projects/clara', 'btn': 'Chat'},
{'name': 'MySite', 'info': 'DYNAMIC WEBSITE made with DJANGO framework, FEATURES - Admin Panel, User Management, User-Score Handling, LOGS, etc.. ',
'src': 'https://github.com/jerit-baiju/mysite-django', 'btn': 'GitHub'},
{'name': 'Number Game', 'info': 'We should assume the number that is picked from 0-100 randomly by computer with TEN chances.',
'src': '/projects/num_game', 'btn': 'Play'},
{'name': 'Elio Bot API', 'info': 'This API provides you free commands, wikipedia support, user detection.',
'src': 'https://github.com/jerit-baiju/chat_bot_api', 'btn': 'GitHub'},
{'name': 'GitHub Activity Generator', 'info': 'Python script for creating commits, can specify number of commits, code consistency, and many more.',
'src': 'https://github.com/jerit-baiju/activity_generator', 'btn': 'GitHub'}
]
random.shuffle(projects)
context = {
'title': 'Projects | <NAME>',
'projects': projects,
'page': 'projects'
}
return render(request, 'projects/projects.html', context)
@login_required(login_url='login-page')
def clara(request):
log(request, 'Clara')
context = {
'title': 'Clara | <NAME>',
'name': request.user.first_name,
'date': str(datetime.now(pytz.timezone("Asia/Kolkata")).date()),
'time': datetime.now(pytz.timezone("Asia/Kolkata")).strftime("%H:%M")
}
return render(request, 'projects/clara.html', context)
@login_required(login_url='login-page')
def num_Game(request):
if request.user.score == None:
score = 0
else:
score = request.user.score
if request.method == 'POST':
context = {
'score': score,
'win': True,
'dark': True,
'title': 'Number Game'
}
return render(request, 'projects/num_game.html', context)
else:
log(request, 'Num Game')
context = {
'score': score,
'title': 'Number Game',
'randint': random.randint(0, 100),
'dark': True
}
return render(request, 'projects/num_game.html', context)
@login_required(login_url='login-page')
def num_Game_add(request):
if request.user.score == None:
score = 0
else:
score = request.user.score
request.user.score = score + 5
request.user.save()
log(request, 'scored')
return redirect('/projects/num_game')
@login_required(login_url='login-page')
def weather(request):
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36"
}
context = {
'title': 'Weather App',
'dark': True
}
if request.method == 'POST':
def get_weather(city):
try:
url = f"https://www.google.com/search?q=weather+in+{city}"
page = requests.get(url, headers=headers)
soup = BeautifulSoup(page.content, 'html.parser')
temperature = soup.find('span', attrs={'id': 'wob_ttm'}).text
status = soup.find('span', attrs={'id': 'wob_dc'}).text
location = soup.find('div', attrs={'id': 'wob_loc'}).text
temperature_op = (f"{temperature} °F \n")
status_op = (f"Status - {status} \n")
src = soup.find('img', attrs={'id': 'wob_tci'}).get('src')
day = soup.find('div', attrs={'id': 'wob_dts'}).text
context = {'tmp': temperature_op, 'loc': location,
'sts': status_op, 'day': day, 'src': src, 'dark': True}
except:
context = {'tmp': '', 'loc': 'No Location Found, Try entering your nearest place or city',
'sts': '', 'day': '', 'src': '', 'title': 'Weather App', 'dark': True}
return context
city = request.POST['city']
log(request, f'weather - {city}')
return render(request, 'projects/weather.html', get_weather(city))
log(request, 'weather')
return render(request, 'projects/weather.html', context)
```
|
{
"source": "Jerit-Baiju/PyFlit",
"score": 2
}
|
#### File: PyFlit/pyflit/flit.py
```python
class Page:
__index__ = """<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>{{title}}</title>
<!-- ADD YOUR CSS & CDN HERE WHICH IS APPLICABLE TO ALL PAGES -->
<style>
/* {{style}} */
</style>
<link href="https://cdn.jsdelivr.net/npm/[email protected]/dist/css/bootstrap.min.css" rel="stylesheet"
integrity="<KEY>" crossorigin="anonymous" />
</head>
<body onload=''>
{{root}}
<script>
// {{script}}
</script>
<!-- ADD YOUR JS & CDN HERE WHICH IS APPLICABLE TO ALL PAGES -->
</body>
</html>"""
def __write_index__(self):
open('index.html', 'w').write(self.__index__)
def __init__(self, name):
self.name = name.capitalize()
self.__errs__ = []
self.title = 'Document'
self.__welcome__ = f"<center><h1 class='fw-light'>PyFlit</h1></center><h2 class='fw-light'>Template : {self.name} <br> Running Successful</h2>"
self.__content__ = ""
try:
self.__op__ = open(f"index.html").read()
if "<title>{{title}}</title>" not in self.__op__:
self.write_index()
if "/* {{style}} */" not in self.__op__:
self.write_index()
if "{{root}}" not in self.__op__:
self.write_index()
if "// {{script}}" not in self.__op__:
self.write_index()
except:
self.__op__ = self.__index__
open('index.html', 'w').write(self.__op__)
def __add_err__(self, err):
__err__ = err.capitalize()
self.__errs__.append(__err__)
def __add__(self, content):
try:
self.__content__ = self.__content__ + content
except:
self.__add_err__('root adding error')
def add_html(self,html):
self.__add__(html)
def add_component(self, component):
if ".html" in component:
component = component.replace('.html', '')
try:
self.__add__(open(f"components/{component}.html", 'r').read())
except:
self.__errs__.append(f'Create {component}.html in components folder')
def add_page(self, page):
if ".html" in page:
page = page.replace('.html', '')
try:
self.__add__(open(f"pages/{page}.html", 'r').read())
except:
self.__errs__.append(
f"Your page not found ({page}.html). Please create your pages in pages folder")
def add_css(self, css_file):
if ".css" in css_file:
css_file = css_file.replace('.css', '')
try:
self.__op__ = self.__op__.replace(
'/* {{style}} */', open(f'static/css/{css_file}.css', 'r').read() + "\n" + "/* {{style}} */")
except:
self.__errs__.append(
f"Your css file is not found ({css_file}.css). Please create your css file in this path => /static/css/{css_file}.css")
def add_js(self, js_file):
if ".js" in js_file:
js_file = js_file.replace('.js', '')
try:
self.__op__ = self.__op__.replace(
'// {{script}}', open(f'static/js/{js_file}.js', 'r').read() + "\n" + "// {{script}}")
except:
self.__errs__.append(f"Your JS file is not found ({js_file}.js). Please create your JS file in this path => /static/js/{js_file}.js")
def add_body_onload(self, func):
self.__op__ = self.__op__.replace("body onload=''", f"body onload='{func}()'")
def add_var(self, var, content):
if f"const {var} = ''" in self.__op__:
content = f"const {var} = '{content}'"
self.__op__ = self.op.replace(f"const {var} = ''", content)
elif f'const {var} = ""' in self.__op__:
content = f'const {var} = "{content}"'
self.__op__ = self.__op__.replace(f'const {var} = ""', content)
else:
self.__errs__.append(f'please add an empty var like this : const {var} = "" ')
def check_err(self):
if self.__errs__ == []:
return self.__op__
else:
return "<h2>" + str(self.__errs__).replace(',', '<br>').replace('[', '').replace(']', '').replace("'", "") + "</h2>"
def __clear__(self):
if self.__content__ == "":
self.__content__ = self.__welcome__
self.__op__ = self.__op__.replace('{{root}}', self.__content__)
self.__op__ = self.__op__.replace('/* {{style}} */', '')
self.__op__ = self.__op__.replace('// {{script}}', '')
def export(self):
self.__op__ = self.__op__.replace('{{title}}',self.title)
self.__clear__()
return self.check_err()
```
|
{
"source": "jeritgeorge/ros_robodk_post_processors",
"score": 2
}
|
#### File: ros_robodk_post_processors/robodk_post_processors/ABB_RAPID_IRC5.py
```python
from robodk import *
ONETAB = ' ' # one tab equals 4 spaces
# Define a custom header (variable declaration)
CUSTOM_HEADER = '''
! -------------------------------
! Define your variables here
! ...
'''
# Define your custom programs (predefined functions, not altered by RoboDK):
CUSTOM_FUNCTIONS = '''
! -------------------------------
! Define your functions here
! ...
'''
# ----------------------------------------------------
def pose_2_str(pose):
"""Prints a pose target"""
[x,y,z,q1,q2,q3,q4] = Pose_2_ABB(pose)
return ('[%.3f, %.3f, %.3f],[%.8f, %.8f, %.8f, %.8f]' % (x,y,z,q1,q2,q3,q4))
def angles_2_str(angles):
"""Prints a joint target"""
njoints = len(angles)
# extend the joint target if the robot has less than 6 degrees of freedom
if njoints < 6:
angles.extend([0]*(6-njoints))
# Generate a string like:
# [10,20,30,40,50,60]
# with up to 6 decimals
return '[%s]' % (','.join(format(ji, ".6f") for ji in angles[0:6]))
def extaxes_2_str(angles):
"""Prints the external axes, if any"""
# extend the joint target if the robot has less than 6 degrees of freedom
njoints = len(angles)
if njoints <= 6:
# should print 9E9 for unset external axes
# [9E+09,9E+09,9E+09,9E+09,9E+09,9E+09]
return '[9E+09,9E+09,9E+09,9E+09,9E+09,9E+09]'
extaxes_str = (','.join(format(ji, ".6f") for ji in angles[6:njoints]))
if njoints < 12:
extaxes_str = extaxes_str + ',' + ','.join(['9E9']*(12-njoints))
# If angles is [j1,j2,j3,j4,j5,j6,10,20], it will generate a string like:
# [10,20,9E9,9E9,9E9,9E9]
# with up to 6 decimals
return '[%s]' % extaxes_str
# ----------------------------------------------------
# Object class that handles the robot instructions/syntax
class RobotPost(object):
"""Robot post object"""
MAX_LINES_X_PROG = 5000 # maximum number of lines per program. It will then generate multiple "pages (files)"
INCLUDE_SUB_PROGRAMS = True
PROG_EXT = 'mod' # set the program extension
# other variables
ROBOT_POST = 'ABB IRC5 including arc welding and 3D printing options'
ROBOT_NAME = 'unknown'
PROG_NAMES = []
PROG_FILES = []
PROG_LIST = []
PROG_CALLS = []
PROG_CALLS_LIST = []
nLines = 0
nProgs = 0
PROG = []
TAB = ''
LOG = ''
SPEEDDATA = 'rdkSpeed'
ZONEDATA = 'z1'
TOOLDATA = 'rdkTool'
WOBJDATA = 'rdkWObj'
nAxes = 6
CLAD_ON = False
CLAD_DATA = 'clad1'
ARC_ON = False
ARC_WELDDATA = 'weld1'
ARC_WEAVEDATA = 'weave1'
ARC_SEAMDATA = 'seam1'
NEW_E_LENGTH = None
def __init__(self, robotpost=None, robotname=None, robot_axes = 6, **kwargs):
self.ROBOT_POST = robotpost
self.ROBOT_NAME = robotname
self.PROG = []
self.LOG = ''
self.nAxes = robot_axes
for k,v in kwargs.items():
if k == 'lines_x_prog':
self.MAX_LINES_X_PROG = v
def ProgStart(self, progname, new_page = False):
progname_i = progname
nPages = len(self.PROG_LIST)
if new_page:
if nPages == 0:
progname_i = progname
else:
progname_i = "%s%i" % (self.PROG_NAME, nPages)
else:
self.nProgs = self.nProgs + 1
if self.nProgs == 1:# and not self.INCLUDE_SUB_PROGRAMS:
self.PROG_NAME = progname
#self.nProgs = self.nProgs + 1
if new_page or not self.INCLUDE_SUB_PROGRAMS or self.nProgs == 1:
# new file!
self.PROG_NAMES.append(progname_i)
self.TAB = ''
self.addline('%%%')
self.addline(' VERSION:1')
self.addline(' LANGUAGE:ENGLISH')
self.addline('%%%')
self.addline('')
self.addline('MODULE MOD_%s' % progname_i)
self.TAB = ONETAB
if self.nProgs == 1 and nPages == 0:
self.TAB = ONETAB
self.addline('')
self.addline('LOCAL PERS tooldata %s := [TRUE,[[0,0,0],[1,0,0,0]],[2,[0,0,15],[1,0,0,0],0,0,0.005]];' % self.TOOLDATA)
self.addline('LOCAL PERS wobjdata %s := [FALSE, TRUE, "", [[0,0,0],[1,0,0,0]],[[0,0,0],[1,0,0,0]]];' % self.WOBJDATA)
self.addline('VAR speeddata %s := [250,500,5000,1000]; ! set default speed' % self.SPEEDDATA)
self.addcode(CUSTOM_HEADER)
self.addcode(CUSTOM_FUNCTIONS)
self.TAB = ONETAB
self.addline('')
self.addline('PROC %s()' % progname_i)
self.TAB = ONETAB + ONETAB # instructions need two tabs
self.addline('ConfJ \On;')
self.addline('ConfL \Off;')
def ProgFinish(self, progname, new_page = False):
self.TAB = ONETAB
self.PROG += [ONETAB + 'ENDPROC\n']
if new_page or not self.INCLUDE_SUB_PROGRAMS:# or self.nProgs == 1:
self.PROG += ['ENDMODULE']
self.PROG_LIST.append(self.PROG)
self.PROG_CALLS_LIST.append(self.PROG_CALLS)
self.PROG = []
self.PROG_CALLS = []
self.nLines = 0
#elif self.nProgs <= 1 or self.INCLUDE_SUB_PROGRAMS:
# self.PROG += ['ENDMODULE']
def progsave(self, folder, progname, ask_user = False, show_result = False):
progname = progname + '.' + self.PROG_EXT
if ask_user or not DirExists(folder):
filesave = getSaveFile(folder, progname, 'Save program as...')
if filesave is not None:
filesave = filesave.name
else:
return
else:
filesave = folder + '/' + progname
fid = open(filesave, "w")
for line in self.PROG:
fid.write(line)
fid.write('\n') # print new line
fid.close()
print('SAVED: %s\n' % filesave) # tell RoboDK the path of the saved file
self.PROG_FILES.append(filesave)
# open file with default application
if show_result:
if type(show_result) is str:
# Open file with provided application
import subprocess
p = subprocess.Popen([show_result, filesave])
elif type(show_result) is list:
import subprocess
p = subprocess.Popen(show_result + [filesave])
else:
# open file with default application
import os
os.startfile(filesave)
if len(self.LOG) > 0:
mbox('Program generation LOG:\n\n' + self.LOG)
def ProgSave(self, folder, progname, ask_user = False, show_result = False):
if len(self.PROG_LIST) >= 1:
if self.nLines > 0:
self.PROG += ['ENDMODULE']
self.PROG_LIST.append(self.PROG)
self.PROG_CALLS_LIST.append(self.PROG_CALLS)
self.PROG = []
self.PROG_CALLS = []
self.nLines = 0
npages = len(self.PROG_LIST)
progname_main = progname + "Main"
mainprog = []
mainprog += ["MODULE MOD_%s\n" % progname_main]
mainprog += [ONETAB+"!PROC Main()"]
mainprog += [ONETAB+"PROC %s()" % progname_main]
mainprog += [ONETAB+ONETAB+"! This main program needs to be executed to run: %s\n" % progname]
for i in range(npages):
mainprog += [ONETAB+ONETAB+"%s()" % self.PROG_NAMES[i]]
mainprog += ["\n"+ONETAB+"ENDPROC\n"]
mainprog += ["ENDMODULE"]
self.PROG = mainprog
self.progsave(folder, progname_main, ask_user, show_result)
self.LOG = ''
if len(self.PROG_FILES) == 0:
# cancelled by user
return
first_file = self.PROG_FILES[0]
folder_user = getFileDir(first_file)
# progname_user = getFileName(self.FILE_SAVED)
for i in range(npages):
self.PROG = self.PROG_LIST[i]
self.PROG_CALLS = self.PROG_CALLS_LIST[i]
self.progsave(folder_user, self.PROG_NAMES[i], False, show_result)
else:
self.PROG += ['ENDMODULE'] # Very important!
self.progsave(folder, progname, ask_user, show_result)
def ProgSendRobot(self, robot_ip, remote_path, ftp_user, ftp_pass):
"""Send a program to the robot using the provided parameters. This method is executed right after ProgSave if we selected the option "Send Program to Robot".
The connection parameters must be provided in the robot connection menu of RoboDK"""
UploadFTP(self.PROG_FILES, robot_ip, remote_path, ftp_user, ftp_pass)
def MoveJ(self, pose, joints, conf_RLF=None):
"""Add a joint movement"""
self.addline('MoveAbsJ [%s,%s],%s,%s,%s,\WObj:=%s;' % (angles_2_str(joints), extaxes_2_str(joints), self.SPEEDDATA, self.ZONEDATA, self.TOOLDATA, self.WOBJDATA))
def MoveL(self, pose, joints, conf_RLF=None):
"""Add a linear movement"""
# Control turning arc movement off
if self.ARC_ON and self.NEW_E_LENGTH is None:
self.ARC_ON = False
self.addline('ArcLEnd;')
target = ''
if pose is None:
target = 'CalcRobT([%s,%s],%s,\WObj:=%s)' % (angles_2_str(joints), extaxes_2_str(joints), self.TOOLDATA, self.WOBJDATA)
else:
if conf_RLF is None:
conf_RLF = [0,0,0]
cf1 = 0
cf4 = 0
cf6 = 0
if joints is not None and len(joints) >= 6:
cf1 = math.floor(joints[0]/90.0)
cf4 = math.floor(joints[3]/90.0)
cf6 = math.floor(joints[5]/90.0)
[REAR, LOWERARM, FLIP] = conf_RLF
cfx = 4*REAR + 2*LOWERARM + FLIP
target = '[%s,[%i,%i,%i,%i],%s]' % (pose_2_str(pose), cf1, cf4, cf6,cfx, extaxes_2_str(joints))
if self.ARC_ON:
# ArcL p100, v100, seam1, weld5 \Weave:=weave1, z10, gun1;
self.addline('ArcL %s,%s,%s,%s,\Weave:=%s,%s,%s,\WObj:=%s;' % (target, self.SPEEDDATA, self.ARC_SEAMDATA, self.ARC_WELDDATA, self.ARC_WEAVEDATA, self.ZONEDATA, self.TOOLDATA, self.WOBJDATA))
elif self.CLAD_ON:
self.addline('CladL %s,%s,%s,%s,%s,\WObj:=%s;' % (target, self.SPEEDDATA, self.CLAD_DATA, self.ZONEDATA, self.TOOLDATA, self.WOBJDATA))
else:
self.addline('MoveL %s,%s,%s,%s,\WObj:=%s;' % (target, self.SPEEDDATA, self.ZONEDATA, self.TOOLDATA, self.WOBJDATA))
# Modification for Paul
self.NEW_E_LENGTH = None
def MoveC(self, pose1, joints1, pose2, joints2, conf_RLF_1=None, conf_RLF_2=None):
"""Add a circular movement"""
target1 = ''
target2 = ''
if pose1 is None:
target1 = 'CalcRobT([%s,%s], %s \WObj:=%s)' % (angles_2_str(joints1), extaxes_2_str(joints1), self.TOOLDATA, self.WOBJDATA)
else:
if conf_RLF_1 is None:
conf_RLF_1 = [0,0,0]
cf1_1 = 0
cf4_1 = 0
cf6_1 = 0
if joints1 is not None and len(joints1) >= 6:
cf1_1 = math.floor(joints1[0]/90.0)
cf4_1 = math.floor(joints1[3]/90.0)
cf6_1 = math.floor(joints1[5]/90.0)
[REAR, LOWERARM, FLIP] = conf_RLF_1
cfx_1 = 4*REAR + 2*LOWERARM + FLIP
target1 = '[%s,[%i,%i,%i,%i],%s]' % (pose_2_str(pose1), cf1_1, cf4_1, cf6_1,cfx_1, extaxes_2_str(joints1))
if pose2 is None:
target2 = 'CalcRobT([%s,%s],%s,\WObj:=%s)' % (angles_2_str(joints2), extaxes_2_str(joints2), self.TOOLDATA, self.WOBJDATA)
else:
if conf_RLF_2 is None:
conf_RLF_2 = [0,0,0]
cf1_2 = 0
cf4_2 = 0
cf6_2 = 0
if joints2 is not None and len(joints2) >= 6:
cf1_2 = math.floor(joints2[0]/90.0)
cf4_2 = math.floor(joints2[3]/90.0)
cf6_2 = math.floor(joints2[5]/90.0)
[REAR, LOWERARM, FLIP] = conf_RLF_2
cfx_2 = 4*REAR + 2*LOWERARM + FLIP
target2 = '[%s,[%i,%i,%i,%i],%s]' % (pose_2_str(pose2), cf1_2, cf4_2, cf6_2,cfx_2, extaxes_2_str(joints2))
if self.ARC_ON:
# ArcL p100, v100, seam1, weld5 \Weave:=weave1, z10, gun1;
self.addline('ArcC %s,%s,%s,%s,%s,\Weave:=%s,%s,%s,\WObj:=%s;' % (target1, target2, self.SPEEDDATA, self.ARC_SEAMDATA, self.ARC_WELDDATA, self.ARC_WEAVEDATA, self.ZONEDATA, self.TOOLDATA, self.WOBJDATA))
elif self.CLAD_ON:
self.addline('CladC %s,%s,%s,%s,%s,%s,\WObj:=%s;' % (target1, target2, self.SPEEDDATA, self.CLAD_DATA, self.ZONEDATA, self.TOOLDATA, self.WOBJDATA))
else:
self.addline('MoveC %s,%s,%s,%s,%s,\WObj:=%s;' % (target1, target2, self.SPEEDDATA, self.ZONEDATA, self.TOOLDATA, self.WOBJDATA))
def setFrame(self, pose, frame_id=None, frame_name=None):
"""Change the robot reference frame"""
#self.addline('%s := [FALSE, TRUE, "", [%s],[[0,0,0],[1,0,0,0]]];' % (self.WOBJDATA, pose_2_str(pose)))
self.addline('%s.uframe := [%s];' % (self.WOBJDATA, pose_2_str(pose)))
def setTool(self, pose, tool_id=None, tool_name=None):
"""Change the robot TCP"""
#self.addline('%s := [TRUE,[%s],[2,[0,0,15],[1,0,0,0],0,0,0.005]];' % (self.TOOLDATA, pose_2_str(pose)))
self.addline('%s.tframe := [%s];' % (self.TOOLDATA, pose_2_str(pose)))
def Pause(self, time_ms):
"""Pause the robot program"""
if time_ms <= 0:
self.addline('STOP;')
else:
self.addline('WaitTime %.3f;' % (time_ms*0.001))
def setSpeed(self, speed_mms):
"""Changes the robot speed (in mm/s)"""
#self.SPEEDDATA = 'v%i' % speed_mms
self.addline('%s := [%.2f,500,5000,1000];' % (self.SPEEDDATA, speed_mms))
def setAcceleration(self, accel_mmss):
"""Changes the robot acceleration (in mm/s2)"""
self.addlog('setAcceleration is not defined')
def setSpeedJoints(self, speed_degs):
"""Changes the robot joint speed (in deg/s)"""
self.addlog('setSpeedJoints not defined')
def setAccelerationJoints(self, accel_degss):
"""Changes the robot joint acceleration (in deg/s2)"""
self.addlog('setAccelerationJoints not defined')
def setZoneData(self, zone_mm):
"""Changes the zone data approach (makes the movement more smooth)"""
if zone_mm < 0:
self.ZONEDATA = 'fine'
else:
self.ZONEDATA = 'z%i' % zone_mm
def setDO(self, io_var, io_value):
"""Sets a variable (output) to a given value"""
if type(io_var) != str: # set default variable name if io_var is a number
io_var = 'D_OUT_%s' % str(io_var)
if type(io_value) != str: # set default variable value if io_value is a number
if io_value > 0:
io_value = '1'
else:
io_value = '0'
# at this point, io_var and io_value must be string values
self.addline('SetDO %s, %s;' % (io_var, io_value))
def waitDI(self, io_var, io_value, timeout_ms=-1):
"""Waits for an input io_var to attain a given value io_value. Optionally, a timeout can be provided."""
if type(io_var) != str: # set default variable name if io_var is a number
io_var = 'D_IN_%s' % str(io_var)
if type(io_value) != str: # set default variable value if io_value is a number
if io_value > 0:
io_value = '1'
else:
io_value = '0'
# at this point, io_var and io_value must be string values
if timeout_ms < 0:
self.addline('WaitDI %s, %s;' % (io_var, io_value))
else:
self.addline('WaitDI %s, %s, \MaxTime:=%.1f;' % (io_var, io_value, timeout_ms*0.001))
def RunCode(self, code, is_function_call = False):
"""Adds code or a function call"""
if is_function_call:
code = code.replace(' ','_')
if code.startswith('ArcLStart'):
self.ARC_ON = True
elif code.startswith('ArcLEnd'):
self.ARC_ON = False
elif code.startswith('CladLStart'):
self.CLAD_ON = True
elif code.startswith('CladLEnd'):
self.CLAD_ON = False
elif code.startswith("Extruder("):
self.addline(code + ';')
return
# if the program call is Extruder(123.56), we extract the number as a string and convert it to a number
self.NEW_E_LENGTH = float(code[9:-1]) # it needs to retrieve the extruder length from the program call
# Parse the Extruder into ArcLStart
if not self.ARC_ON:
# Generate ArcLStart if we are not welding yet
self.ARC_ON = True
self.addline('ArcLStart;')
# Do not generate the program call
return
self.addline(code + ';')
else:
if code.startswith('END') or code.startswith('ELSEIF'):
# remove tab after ENDWHILE or ENDIF
self.TAB = self.TAB[:-len(ONETAB)]
self.addline(code.replace('\t',' '))# replace each tab by 2 spaces
if code.startswith('IF ') or code.startswith('ELSEIF ') or code.startswith('WHILE '):
# add tab (one tab = two spaces)
self.TAB = self.TAB + ONETAB
def RunMessage(self, message, iscomment = False):
"""Add a joint movement"""
if iscomment:
self.addline('! ' + message)
else:
self.addline('TPWrite "%s";' % message)
# ------------------ private ----------------------
def addline(self, newline):
"""Add a program line"""
if self.nProgs > 1 and not self.INCLUDE_SUB_PROGRAMS:
return
if self.nLines > self.MAX_LINES_X_PROG:
self.nLines = 0
self.ProgFinish(self.PROG_NAME, True)
self.ProgStart(self.PROG_NAME, True)
self.PROG += [self.TAB + newline]
self.nLines = self.nLines + 1
def addlog(self, newline):
"""Add a log message"""
if self.nProgs > 1 and not self.INCLUDE_SUB_PROGRAMS:
return
self.LOG = self.LOG + newline + '\n'
def addcode(self, code):
"""Adds custom code, such as a custom header"""
self.PROG += [code]
# -------------------------------------------------
# ------------ For testing purposes ---------------
def Pose(xyzrpw):
[x,y,z,r,p,w] = xyzrpw
a = r*math.pi/180
b = p*math.pi/180
c = w*math.pi/180
ca = math.cos(a)
sa = math.sin(a)
cb = math.cos(b)
sb = math.sin(b)
cc = math.cos(c)
sc = math.sin(c)
return Mat([[cb*ca, ca*sc*sb - cc*sa, sc*sa + cc*ca*sb, x],[cb*sa, cc*ca + sc*sb*sa, cc*sb*sa - ca*sc, y],[-sb, cb*sc, cc*cb, z],[0,0,0,1]])
def test_post():
"""Test the post with a basic program"""
robot = RobotPost(r'ABB_RAPID_IRC5', r'ABB IRB 6700-155/2.85', 6, axes_type=['R','R','R','R','R','R'])
robot.ProgStart(r'Prog1')
robot.RunMessage(r'Program generated by RoboDK 3.1.5 for ABB IRB 6700-155/2.85 on 18/05/2017 11:02:41', True)
robot.RunMessage(r'Using nominal kinematics.', True)
robot.setFrame(Pose([0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000]),-1,r'ABB IRB 6700-155/2.85 Base')
robot.setTool(Pose([380.000000, 0.000000, 200.000000, 0.000000, 90.000000, 0.000000]),1,r'Tool 1')
robot.setSpeed(2000.000)
robot.MoveJ(Pose([2103.102861, 0.000000, 1955.294643, -180.000000, -3.591795, -180.000000]), [0.00000, 3.93969, -14.73451, 0.00000, 14.38662, -0.00000], [0.0, 0.0, 0.0])
robot.MoveJ(Pose([2065.661612, 700.455189, 1358.819971, 180.000000, -3.591795, -180.000000]), [22.50953, 5.58534, 8.15717, 67.51143, -24.42689, -64.06258], [0.0, 0.0, 1.0])
robot.Pause(500.0)
robot.setSpeed(100.000)
robot.RunCode(r'ArcLStart', True)
robot.MoveL(Pose([2065.661612, 1074.197508, 1358.819971, 149.453057, -3.094347, -178.175378]), [36.19352, 22.86988, -12.37860, 88.83085, -66.57439, -81.72795], [0.0, 0.0, 1.0])
robot.MoveC(Pose([2468.239418, 1130.614560, 1333.549802, -180.000000, -3.591795, -180.000000]), [28.37934, 35.45210, -28.96667, 85.54799, -28.41204, -83.00289], Pose([2457.128674, 797.241647, 1156.545094, 180.000000, -37.427062, -180.000000]), [18.58928, 43.77805, -40.05410, 155.58093, -37.76022, -148.70252], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0])
robot.MoveL(Pose([2457.128674, 797.241647, 1156.545094, 180.000000, -37.427062, -180.000000]), [18.58928, 43.77805, -40.05410, 155.58093, -37.76022, -148.70252], [0.0, 0.0, 1.0])
robot.MoveL(Pose([2469.684137, 397.051453, 1356.565545, -180.000000, -3.591795, -180.000000]), [10.73523, 21.17902, -10.22963, 56.13802, -12.93695, -54.77268], [0.0, 0.0, 1.0])
robot.MoveL(Pose([2494.452316, 404.343933, 1751.146172, -180.000000, -3.591795, -180.000000]), [10.80299, 25.05092, -31.54821, 132.79244, -14.76878, -133.06820], [0.0, 0.0, 1.0])
robot.MoveL(Pose([2494.452316, 834.649436, 1751.146172, -180.000000, -3.591795, -180.000000]), [21.49850, 33.45974, -43.37980, 121.21995, -25.32130, -122.42907], [0.0, 0.0, 1.0])
robot.setZoneData(5.000)
robot.MoveL(Pose([2147.781731, 834.649436, 1772.906995, -180.000000, -3.591795, -180.000000]), [25.21677, 13.65153, -17.95808, 107.03387, -26.40518, -107.19412], [0.0, 0.0, 1.0])
robot.MoveL(Pose([2147.781731, 375.769504, 1772.906995, -180.000000, -3.591795, -180.000000]), [11.97030, 5.74930, -8.96838, 119.55454, -13.76610, -119.51539], [0.0, 0.0, 1.0])
robot.MoveL(Pose([2147.781731, 61.363728, 1772.906995, -180.000000, -3.591795, -180.000000]), [1.98292, 3.75693, -6.84136, -16.54793, 6.96416, 16.55673], [0.0, 0.0, 0.0])
robot.RunCode(r'ArcLEnd', True)
robot.MoveL(Pose([2147.781731, 275.581430, 1772.906995, -180.000000, -3.591795, -180.000000]), [8.83799, 4.80606, -7.95436, 127.27676, -11.11070, -127.24243], [0.0, 0.0, 1.0])
robot.ProgFinish(r'Prog1')
for line in robot.PROG:
print(line)
#print(robot.PROG)
if len(robot.LOG) > 0:
mbox('Program generation LOG:\n\n' + robot.LOG)
input("Press Enter to close...")
if __name__ == "__main__":
"""Function to call when the module is executed by itself: test"""
test_post()
```
#### File: ros_robodk_post_processors/robodk_post_processors/CLOOS.py
```python
def get_safe_name(progname, max_chars = 7):
"""Get a safe program name"""
# Remove special characters
for c in r'-[]/\;,><&*:%=+@!#^()|?^':
progname = progname.replace(c,'')
# Set a program name by default:
if len(progname) <= 0:
progname = 'Program'
# Force the program to start with a letter (not a number)
if progname[0].isdigit():
progname = 'P' + progname
# Set the maximum size of a program (number of characters)
if len(progname) > max_chars:
progname = progname[:max_chars]
return progname
# ----------------------------------------------------
# Import RoboDK tools
from robodk import *
import sys
# ----------------------------------------------------
# Object class that handles the robot instructions/syntax
class RobotPost(object):
"""Robot post object defined for Motoman robots"""
# Enter the robot model:
PATH_MAKE_SP = 'C:/Program Files (x86)/CLOOS/'
ROBOT_MODEL = '310'
SERIAL_NR = '3511462'
PROG_EXT = 'txt' # set the program extension
MAX_LINES_X_PROG = 5000 # maximum number of lines per program. It will then generate multiple "pages (files)". This can be overriden by RoboDK settings.
SPEED_MMS = 50 # Default speed in mm/s
INCLUDE_SUB_PROGRAMS = True # Generate sub programs
PULSES_ZERO = 4194304 # zero position for pulses
# Pulses per degree (provide these in the robot parameters menu: Double click the motoman robot in RoboDK, select "Parameters"
PULSES_X_DEG = [1,1,1,1,1,1]
# PROG specific variables:
LINE_COUNT = 0 # Count the number of instructions (limited by MAX_LINES_X_PROG)
P_COUNT = 0 # Count the number of P targets in one file
C_COUNT = 0 # Count the number of P targets in one file
nProgs = 0 # Count the number of programs and sub programs
# other variables
ROBOT_POST = ''
ROBOT_NAME = ''
PROG_FILES = [] # List of Program files to be uploaded through FTP
PROG_NAMES = [] # List of PROG NAMES
PROG_LIST = [] # List of PROG
PROG_TARGETS_LIST = [] # List of PROG
PROG_NAME = 'unknown' # Original name of the current program (example: ProgA)
PROG_NAME_CURRENT = 'unknown' # Auto generated name (different from PROG_NAME if we have more than 1 page per program. Example: ProgA2)
nPages = 0 # Count the number of pages
PROG_NAMES_MAIN = [] # List of programs called by a main program due to splitting
PROG = [] # Save the program lines
PROG_TARGETS = [] # Save the program lines (targets section)
LOG = '' # Save a log
nAxes = 6 # Important: This is usually provided by RoboDK automatically. Otherwise, override the __init__ procedure.
AXES_TYPE = ['R','R','R','R','R','R'] # Important: This is usually set up by RoboDK automatically. Otherwise, override the __init__ procedure.
# 'R' for rotative axis, 'L' for linear axis, 'T' for external linear axis (linear track), 'J' for external rotative axis (turntable)
#AXES_TYPE = ['R','R','R','R','R','R','T','J','J'] #example of a robot with one external linear track axis and a turntable with 2 rotary axes
AXES_TRACK = []
AXES_TURNTABLE = []
HAS_TRACK = False
HAS_TURNTABLE = False
# Specific to ARC welding applications
SPEED_BACKUP = None
LAST_POSE = None
POSE_FRAME = eye(4)
POSE_FRAME = eye(4)
def __init__(self, robotpost=None, robotname=None, robot_axes = 6, **kwargs):
self.ROBOT_POST = robotpost
self.ROBOT_NAME = robotname
self.nAxes = robot_axes
self.PROG = []
self.LOG = ''
#for k,v in kwargs.iteritems(): # python2
for k,v in kwargs.items():
if k == 'lines_x_prog':
self.MAX_LINES_X_PROG = v
if k == 'axes_type':
self.AXES_TYPE = v
if k == 'pulses_x_deg':
self.PULSES_X_DEG = v
for i in range(len(self.AXES_TYPE)):
if self.AXES_TYPE[i] == 'T':
self.AXES_TRACK.append(i)
self.HAS_TRACK = True
elif self.AXES_TYPE[i] == 'J':
self.AXES_TURNTABLE.append(i)
self.HAS_TURNTABLE = True
def ProgStart(self, progname, new_page = False):
progname = get_safe_name(progname)
progname_i = progname
if new_page:
#nPages = len(self.PROG_LIST)
if self.nPages == 0:
if len(self.PROG_NAMES_MAIN) > 0:
print("Can't split %s: Two or more programs are split into smaller programs" % progname)
print(self.PROG_NAMES_MAIN)
raise Exception("Only one program at a time can be split into smaller programs")
self.PROG_NAMES_MAIN.append(self.PROG_NAME) # add the first program in the list to be genrated as a subprogram call
self.nPages = self.nPages + 1
self.nPages = self.nPages + 1
progname_i = "%s%i" % (self.PROG_NAME, self.nPages)
self.PROG_NAMES_MAIN.append(progname_i)
else:
if self.nProgs > 1 and not self.INCLUDE_SUB_PROGRAMS:
return
self.PROG_NAME = progname
self.nProgs = self.nProgs + 1
self.MoveJ(None, [0]*self.nAxes)
#self.PROG_NAMES = []
self.PROG_NAME_CURRENT = progname_i
self.PROG_NAMES.append(progname_i)
def ProgFinish(self, progname, new_page = False):
progname = get_safe_name(progname)
if not new_page:
# Reset page count
self.nPages = 0
header = ''
header += 'RESTART' + '\n'
header += 'LIST 1=(4211,3,0,49,91,20,530,0,0,40,50,0,0,0,0,0,0,3,0,0,3,0)' + '\n'
header += '' + '\n'
header += 'MAIN' + '\n'
header += 'STCP (10,0,4998)' + '\n'
header += 'STOV (-1,0,-1)' + '\n'
header += '$ (1)' + '\n'
header_pkt = ''
header_pkt += '( Robot : %s )' % self.ROBOT_MODEL + '\n'
header_pkt += '( Serial Nr : %s )' % self.SERIAL_NR + '\n'
header_pkt += '( Achszahl : %i )' % self.nAxes + '\n'
header_pkt += '( Resolution: 2:2:2:2:2:2: Konfigend)'
self.PROG.insert(0, header)
self.PROG.append('\nEND')
self.PROG_TARGETS.insert(0, header_pkt)
# Save PROG in PROG_LIST
self.PROG_LIST.append(self.PROG)
self.PROG_TARGETS_LIST.append(self.PROG_TARGETS)
self.PROG = []
self.PROG_TARGETS = []
self.LINE_COUNT = 0
self.P_COUNT = 0
self.C_COUNT = 0
#RESTART
#LIST 1=(4211,3,0,49,91,20,530,0,0,40,50,0,0,0,0,0,0,3,0,0,3,0)
#
#MAIN
#$ (1)
def progsave(self, folder, progname, ask_user = False, show_result = False):
print(folder)
if not folder.endswith('/'):
folder = folder + '/'
progname = progname + '.' + self.PROG_EXT
if ask_user or not DirExists(folder):
filesave = getSaveFile(folder, progname, 'Save program as...')
if filesave is not None:
filesave = filesave.name
else:
return
else:
filesave = folder + progname
# Save TXT file
fid = open(filesave, "w")
for line in self.PROG:
fid.write(line)
fid.write('\n')
fid.close()
# Save PKT file
filesave_pkt = filesave[:-3]+'pkt'
fid2 = open(filesave_pkt, "w")
for line in self.PROG_TARGETS:
fid2.write(line)
fid2.write('\n')
fid2.close()
print('SAVED: %s\n' % filesave) # tell RoboDK the path of the saved file
self.PROG_FILES.append(filesave)
# open file with default application
if show_result:
if type(show_result) is str:
# Open file with provided application
import subprocess
p = subprocess.Popen([show_result, filesave, filesave_pkt])
elif type(show_result) is list:
import subprocess
p = subprocess.Popen(show_result + [filesave])
else:
# open file with default application
import os
os.startfile(filesave)
#if len(self.LOG) > 0:
# mbox('Program generation LOG:\n\n' + self.LOG)
# -------- build with CONVSP ---------
if FileExists(self.PATH_MAKE_SP + 'CONVSP.exe'):
filesave_S = filesave[:-4] + 'S'
filesave_P = filesave[:-4] + 'P'
print("POPUP: Compiling S file with CONVSP.exe: %s..." % progname)
sys.stdout.flush()
import subprocess
command_list = []
command_list.append([self.PATH_MAKE_SP + 'CONVSP', filesave.replace('/','\\'), filesave_S.replace('/','\\')])
command_list.append([self.PATH_MAKE_SP + 'CONVSP', filesave_pkt.replace('/','\\'), filesave_P.replace('/','\\')])
#output = subprocess.check_output(command)
#self.LOG = output.decode('utf-8')
self.LOG += 'Program generation for: ' + progname + '\n'
for command in command_list:
with subprocess.Popen(command, stdout=subprocess.PIPE, bufsize=1, universal_newlines=True) as p:
for line in p.stdout:
line_ok = line.strip()
self.LOG += line_ok + '\n'
print("POPUP: " + line_ok)
sys.stdout.flush()
self.LOG += '\n'
def ProgSave(self, folder, progname, ask_user = False, show_result = False):
progname = get_safe_name(progname)
nfiles = len(self.PROG_LIST)
if nfiles >= 1:
if self.LINE_COUNT > 0:
# Progfinish was not called!
print("Warning: ProgFinish was not called properly")
self.PROG_LIST.append(self.PROG)
self.PROG_TARGETS_LIST.append(self.PROG_TARGETS)
self.PROG_NAMES.append("Unknown")
self.PROG = []
self.PROG_TARGETS = []
self.LINE_COUNT = 0
self.P_COUNT = 0
self.C_COUNT = 0
if len(self.PROG_NAMES_MAIN) > 1:
# Warning: the program might be cut to a maximum number of chars
progname_main = "M_" + self.PROG_NAMES_MAIN[0]
self.INCLUDE_SUB_PROGRAMS = True # Force generation of main program
self.ProgStart(progname_main)
for prog_call in self.PROG_NAMES_MAIN:
self.RunCode(prog_call, True)
self.ProgFinish(progname_main)
# Save the last program added to the PROG_LIST
self.PROG = self.PROG_LIST.pop()
self.PROG_TARGETS = self.PROG_TARGETS_LIST.pop()
progname_last = self.PROG_NAMES.pop()
self.progsave(folder, progname_last, ask_user, show_result)
#-------------------------
#self.LOG = ''
if len(self.PROG_FILES) == 0:
# cancelled by user
return
first_file = self.PROG_FILES[0]
folder_user = getFileDir(first_file)
# progname_user = getFileName(self.FILE_SAVED)
# Generate each program
for i in range(len(self.PROG_LIST)):
self.PROG = self.PROG_LIST[i]
self.PROG_TARGETS = self.PROG_TARGETS_LIST[i]
self.progsave(folder_user, self.PROG_NAMES[i], False, show_result)
elif nfiles == 1:
self.PROG = self.PROG_LIST[0]
self.PROG_TARGETS = self.PROG_TARGETS_LIST[0]
self.progsave(folder, progname, ask_user, show_result)
else:
print("Warning! Program has not been properly finished")
self.progsave(folder, progname, ask_user, show_result)
if show_result and len(self.LOG) > 0:
mbox('Program generation LOG:\n\n' + self.LOG)
def ProgSendRobot(self, robot_ip, remote_path, ftp_user, ftp_pass):
"""Send a program to the robot using the provided parameters. This method is executed right after ProgSave if we selected the option "Send Program to Robot".
The connection parameters must be provided in the robot connection menu of RoboDK"""
UploadFTP(self.PROG_FILES, robot_ip, remote_path, ftp_user, ftp_pass)
def MoveJ(self, pose, joints, conf_RLF=None):
"""Add a joint movement"""
self.page_size_control() # Important to control the maximum lines per program and not save last target on new program
target_id = self.add_target_joints(joints, 1)
self.addline("GP (%i)" % (target_id))
self.LAST_POSE = pose
def MoveL(self, pose, joints, conf_RLF=None):
"""Add a linear movement"""
self.page_size_control() # Important to control the maximum lines per program and not save last target on new program
target_id = -1
if pose is None:
target_id = self.add_target_joints(joints)
else:
target_id = self.add_target_cartesian(self.POSE_FRAME*pose, joints, conf_RLF)
self.addline("GC (%i)" % (target_id))
self.LAST_POSE = pose
def MoveC(self, pose1, joints1, pose2, joints2, conf_RLF_1=None, conf_RLF_2=None):
"""Add a circular movement"""
self.page_size_control() # Important to control the maximum lines per program and not save last target on new program
target_id1 = self.add_target_cartesian(self.POSE_FRAME*pose1, joints1, conf_RLF_1)
target_id2 = self.add_target_cartesian(self.POSE_FRAME*pose2, joints2, conf_RLF_2)
self.addline("ARC (%i,%i,%i)" % (target_id1-1,target_id1,target_id2))
def setFrame(self, pose, frame_id, frame_name):
"""Change the robot reference frame"""
xyzwpr = Pose_2_Motoman(pose)
self.POSE_FRAME = pose
self.RunMessage('Using %s (targets wrt base):' % (str(frame_name)), True)
self.RunMessage('%.1f,%.1f,%.1f,%.1f,%.1f,%.1f' % (xyzwpr[0], xyzwpr[1], xyzwpr[2], xyzwpr[3], xyzwpr[4], xyzwpr[5]), True)
def setTool(self, pose, tool_id, tool_name):
"""Change the robot TCP"""
xyzwpr = Pose_2_Motoman(pose)
self.RunMessage('Tool %s should be close to:' % (str(tool_name)), True)
self.RunMessage('%.1f,%.1f,%.1f,%.1f,%.1f,%.1f' % (xyzwpr[0], xyzwpr[1], xyzwpr[2], xyzwpr[3], xyzwpr[4], xyzwpr[5]), True)
def Pause(self, time_ms):
"""Pause the robot program"""
if time_ms <= 0:
self.addline('PAUSE')
else:
self.addline('WAITM %.0f' % (time_ms))
def setSpeed(self, speed_mms):
"""Changes the robot speed (in mm/s)"""
self.SPEED_MMS = speed_mms
def setAcceleration(self, accel_mmss):
"""Changes the robot acceleration (in mm/s2)"""
self.addlog('Set acceleration not defined')
def setSpeedJoints(self, speed_degs):
"""Changes the robot joint speed (in deg/s)"""
speedj = max(0.01,min(speed,100.0)) # Joint speed must be in %
if speedj < 100:
self.STR_VJ = "VJ=%.2f" % speedj
else:
self.STR_VJ = "VJ=%.1f" % speedj
def setAccelerationJoints(self, accel_degss):
"""Changes the robot joint acceleration (in deg/s2)"""
self.addlog('Set acceleration not defined')
def setZoneData(self, zone_mm):
"""Changes the zone data approach (makes the movement more smooth)"""
if zone_mm < 0:
self.STR_PL = ''
else:
self.STR_PL = ' PL=%i' % round(min(zone_mm/25, 4))
def setDO(self, io_var, io_value):
"""Sets a variable (output) to a given value"""
if type(io_var) != str: # set default variable name if io_var is a number
io_var = 'OT#(%s)' % str(io_var)
if type(io_value) != str: # set default variable value if io_value is a number
if io_value > 0:
io_value = 'ON'
else:
io_value = 'OFF'
# at this point, io_var and io_value must be string values
#DOUT OT#(2) ON
self.addline('DOUT %s %s' % (io_var, io_value))
def waitDI(self, io_var, io_value, timeout_ms=-1):
"""Waits for an input io_var to attain a given value io_value. Optionally, a timeout can be provided."""
if type(io_var) != str: # set default variable name if io_var is a number
io_var = 'IN#(%s)' % str(io_var)
if type(io_value) != str: # set default variable value if io_value is a number
if io_value > 0:
io_value = 'ON'
else:
io_value = 'OFF'
# at this point, io_var and io_value must be string values
if timeout_ms < 0:
#WAIT IN#(12)=ON
self.addline('WAIT %s=%s' % (io_var, io_value))
else:
#self.LBL_ID_COUNT = self.LBL_ID_COUNT + 1
self.addline('WAIT %s=%s T=%.2f' % (io_var, io_value, timeout_ms*0.001))
def RunCode(self, code, is_function_call = False):
"""Adds code or a function call"""
if is_function_call:
code = get_safe_name(code)
#if code.startswith("ArcStart"):
#return
# default program call
if code.startswith("Extrude"):
return
code.replace(' ','_')
self.addline('CALL %s' % (code))
else:
#if code.endswith(';'):
#code = code[:-1]
self.addline(code)
def RunMessage(self, message, iscomment = False):
"""Add a joint movement"""
if iscomment:
self.addline("! %s" % message)
else:
self.addline('! MSG %s' % message)
# ------------------ private ----------------------
def page_size_control(self):
if self.LINE_COUNT >= self.MAX_LINES_X_PROG:
self.ProgFinish(self.PROG_NAME, True)
self.ProgStart(self.PROG_NAME, True)
def addline(self, newline, movetype = ' '):
"""Add a program line"""
if self.nProgs > 1 and not self.INCLUDE_SUB_PROGRAMS:
return
self.page_size_control()
self.LINE_COUNT = self.LINE_COUNT + 1
self.PROG.append(newline)
def addline_targets(self, newline):
"""Add a line at the end of the program (used for targets)"""
self.PROG_TARGETS.append(newline)
def addlog(self, newline):
"""Add a log message"""
if self.nProgs > 1 and not self.INCLUDE_SUB_PROGRAMS:
return
self.LOG = self.LOG + newline + '\n'
# ------------------ targets ----------------------
def add_target_joints(self, joints, interpolation = 0):
if self.nProgs > 1 and not self.INCLUDE_SUB_PROGRAMS:
return
self.C_COUNT = self.C_COUNT + 1
cid = self.C_COUNT
str_pulses=[]
for i in range(len(joints)):
str_pulses.append('%010i' % round(self.PULSES_ZERO + joints[i] * self.PULSES_X_DEG[i]))
speed = self.SPEED_MMS
outputs = 0
self.addline_targets('%05i,%05i,%05i,%05i,' % (cid, speed, interpolation, outputs) + ','.join(str_pulses))
return cid
#cid,speed,0,0,
#00000,00000,00000,00000,004194304,004194304,004194304,004194304,004194304,004194304
#00001,00050,00000,00001,004294285,005054619,004652139,004286872,003928097,004116772
#00002,00050,00000,00000,004321619,005055554,004649218,004312598,003928967,004111329
#00003,00050,00000,00000,004346289,005060069,004635445,004338196,003934233,004105402
def add_target_cartesian(self, pose, joints, conf_RLF):
if self.nProgs > 1 and not self.INCLUDE_SUB_PROGRAMS:
return
return self.add_target_joints(joints)
xyzwpr = Pose_2_Motoman(pose)
if conf_RLF is None:
conf_RLF = [0,0,0]
turns = [0,0,0]
if len(joints) >= 6:
turnJ4 = (joints[3]+180)//360
turnJ6 = (joints[5]+180)//360
turnJ1 = (joints[0]+180)//360
turns = [turnJ4, turnJ6, turnJ1]
confdata = '%i,%i,%i,%i,%i,%i,0,0' % tuple(conf_RLF[:3] + turns[:3])
self.C_COUNT = self.C_COUNT + 1
cid = self.C_COUNT
self.addline_targets('%i,' % cid + '%.3f,%.3f,%.3f,%.2f,%.2f,%.2f' % tuple(xyzwpr))
return cid
# -------------------------------------------------
# ------------ For testing purposes ---------------
def Pose(xyzrpw):
[x,y,z,r,p,w] = xyzrpw
a = r*math.pi/180
b = p*math.pi/180
c = w*math.pi/180
ca = math.cos(a)
sa = math.sin(a)
cb = math.cos(b)
sb = math.sin(b)
cc = math.cos(c)
sc = math.sin(c)
return Mat([[cb*ca, ca*sc*sb - cc*sa, sc*sa + cc*ca*sb, x],[cb*sa, cc*ca + sc*sb*sa, cc*sb*sa - ca*sc, y],[-sb, cb*sc, cc*cb, z],[0,0,0,1]])
def test_post():
"""Test the post with a basic program"""
robot = RobotPost('CLOOS test', 'CLOOS robot robot', 6)
robot.ProgStart("Program")
robot.RunMessage("Program generated by RoboDK", True)
robot.setFrame(Pose([807.766544, -963.699898, 41.478944, 0, 0, 0]), None, 0)
robot.setTool(Pose([62.5, -108.253175, 100, -60, 90, 0]), None, 0)
robot.MoveJ(Pose([200, 200, 500, 180, 0, 180]), [-46.18419, -6.77518, -20.54925, 71.38674, 49.58727, -302.54752] )
robot.MoveL(Pose([200, 250, 348.734575, 180, 0, -150]), [-41.62707, -8.89064, -30.01809, 60.62329, 49.66749, -258.98418] )
robot.MoveL(Pose([200, 200, 262.132034, 180, 0, -150]), [-43.73892, -3.91728, -35.77935, 58.57566, 54.11615, -253.81122] )
robot.RunMessage("Setting air valve 1 on")
robot.RunCode("TCP_On", True)
robot.Pause(1000)
robot.MoveL(Pose([200, 250, 348.734575, 180, 0, -150]), [-41.62707, -8.89064, -30.01809, 60.62329, 49.66749, -258.98418] )
robot.MoveL(Pose([250, 300, 278.023897, 180, 0, -150]), [-37.52588, -6.32628, -34.59693, 53.52525, 49.24426, -251.44677] )
robot.MoveL(Pose([250, 250, 191.421356, 180, 0, -150]), [-39.75778, -1.04537, -40.37883, 52.09118, 54.15317, -246.94403] )
robot.RunMessage("Setting air valve off")
robot.RunCode("TCP_Off", True)
robot.Pause(1000)
robot.MoveL(Pose([250, 300, 278.023897, 180, 0, -150]), [-37.52588, -6.32628, -34.59693, 53.52525, 49.24426, -251.44677] )
robot.MoveL(Pose([250, 200, 278.023897, 180, 0, -150]), [-41.85389, -1.95619, -34.89154, 57.43912, 52.34162, -253.73403] )
robot.MoveL(Pose([250, 150, 191.421356, 180, 0, -150]), [-43.82111, 3.29703, -40.29493, 56.02402, 56.61169, -249.23532] )
robot.ProgFinish("Program")
# robot.ProgSave(".","Program",True)
robot.PROG = robot.PROG_LIST.pop()
print("\n\n--------------- TXT file ----------------\n")
for line in robot.PROG:
print(line)
print("\n\n--------------- PKT file ----------------\n")
robot.PROG_TARGETS = robot.PROG_TARGETS_LIST.pop()
for line in robot.PROG_TARGETS:
print(line)
if len(robot.LOG) > 0:
mbox('Program generation LOG:\n\n' + robot.LOG)
input("Press Enter to close...")
if __name__ == "__main__":
"""Function to call when the module is executed by itself: test"""
test_post()
```
#### File: ros_robodk_post_processors/test/services_tests.py
```python
package = 'ros_robodk_post_processors'
service_base_name = "/robodk_post_processors/"
from ros_robodk_post_processors.srv import *
import geometry_msgs.msg
import rospy
import unittest
def checkService(service_name):
service_available = False
try:
rospy.wait_for_service(service_name, 1)
service_available = True
except:
rospy.logerr("Could not connect to service %s" % service_name)
return service_available
class ServicesTests(unittest.TestCase):
def testWaitForServices(self):
services = ["move_c", "move_j", "move_l", "pause", "prog_finish", "prog_save", "prog_send_robot", "prog_start", "run_code", "run_message", "set_do", "set_go", "set_frame", "set_speed", "set_speed_joints", "set_tool", "set_zone_data", "wait_di"]
for name in services:
service = service_base_name + name
self.assertEquals(checkService(service), True, "Service %s is not available!" % service)
def testFanucProgram(self):
rospy.wait_for_service(service_base_name + "prog_start")
rospy.wait_for_service(service_base_name + "set_tool")
rospy.wait_for_service(service_base_name + "set_frame")
rospy.wait_for_service(service_base_name + "move_c")
rospy.wait_for_service(service_base_name + "set_speed_joints")
rospy.wait_for_service(service_base_name + "move_j")
rospy.wait_for_service(service_base_name + "set_zone_data")
rospy.wait_for_service(service_base_name + "set_speed")
rospy.wait_for_service(service_base_name + "move_l")
rospy.wait_for_service(service_base_name + "run_message")
rospy.wait_for_service(service_base_name + "pause")
rospy.wait_for_service(service_base_name + "set_do")
rospy.wait_for_service(service_base_name + "set_go")
rospy.wait_for_service(service_base_name + "run_message")
rospy.wait_for_service(service_base_name + "wait_di")
rospy.wait_for_service(service_base_name + "run_code")
rospy.wait_for_service(service_base_name + "prog_finish")
rospy.wait_for_service(service_base_name + "prog_save")
#------prog_start-----
service = service_base_name + "prog_start"
srv = rospy.ServiceProxy(service, ProgStart)
success = False
try:
resp = srv("Fanuc_R30iA", "test", "")
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------set_tool-----
service = service_base_name + "set_tool"
srv = rospy.ServiceProxy(service, SetTool)
success = False
try:
resp = srv(0, "tool", geometry_msgs.msg.Pose(geometry_msgs.msg.Point(0, 0, 0), geometry_msgs.msg.Quaternion(0, 0, 0, 1)))
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------set_frame-----
service = service_base_name + "set_frame"
srv = rospy.ServiceProxy(service, SetFrame)
success = False
try:
resp = srv(1, "frame", geometry_msgs.msg.Pose(geometry_msgs.msg.Point(0, 0, 0), geometry_msgs.msg.Quaternion(0, 0, 0, 1)))
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------move_c-----
# service = service_base_name + "move_c"
# srv = rospy.ServiceProxy(service, MoveC)
# success = False
# try:
# resp = srv(geometry_msgs.msg.Pose(geometry_msgs.msg.Point(1, 0, 0), geometry_msgs.msg.Quaternion(0, 0, 0, 1)),
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0],
# geometry_msgs.msg.Pose(geometry_msgs.msg.Point(1.5, 0, 0), geometry_msgs.msg.Quaternion(0, 0, 0, 1)),
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0])
# success = True
# except rospy.ServiceException as exc:
# rospy.logerr("Service did not process request: " + str(exc))
# self.assertEquals(success, True, "Failed to call service %s" % srv)
# self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------set_speed_joints-----
service = service_base_name + "set_speed_joints"
srv = rospy.ServiceProxy(service, SetSpeedJoints)
success = False
try:
resp = srv(20.0) #takes in degrees/sec, inserts % speed for joint moves
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------move_j-----
service = service_base_name + "move_j"
srv = rospy.ServiceProxy(service, MoveJ)
success = False
try:
resp = srv(geometry_msgs.msg.Pose(geometry_msgs.msg.Point(1, 0, 0), geometry_msgs.msg.Quaternion(0, 0, 0, 1)),
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0])
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------set_zone_data-----
service = service_base_name + "set_zone_data"
srv = rospy.ServiceProxy(service, SetZoneData)
success = False
try:
resp = srv(2.0)
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------set_speed-----
service = service_base_name + "set_speed"
srv = rospy.ServiceProxy(service, SetSpeed)
success = False
try:
resp = srv(20.0)
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------move_l-----
service = service_base_name + "move_l"
srv = rospy.ServiceProxy(service, MoveL)
success = False
try:
resp = srv(geometry_msgs.msg.Pose(geometry_msgs.msg.Point(1, 0.5, 0), geometry_msgs.msg.Quaternion(0, 0, 0, 1)),
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0])
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------move_l-----joint position
service = service_base_name + "move_l"
srv = rospy.ServiceProxy(service, MoveL)
success = False
try:
resp = srv(None,
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0])
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------run_message-----
service = service_base_name + "run_message"
srv = rospy.ServiceProxy(service, RunMessage)
success = False
try:
resp = srv("A run message")
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------pause-----
service = service_base_name + "pause"
srv = rospy.ServiceProxy(service, Pause)
success = False
try:
resp = srv(1.0)
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------set_do-----
service = service_base_name + "set_do"
srv = rospy.ServiceProxy(service, SetDO)
success = False
try:
resp = srv('1', True)
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------set_go-----
service = service_base_name + "set_go"
srv = rospy.ServiceProxy(service, SetGO)
success = False
try:
resp = srv('12', '1')
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------wait_di-----
service = service_base_name + "wait_di"
srv = rospy.ServiceProxy(service, WaitDI)
success = False
try:
resp = srv('2', True, 0)
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------run_code-----
service = service_base_name + "run_code"
srv = rospy.ServiceProxy(service, RunCode)
success = False
try:
resp = srv("MY_FUNC", False)
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------prog_finish-----
service = service_base_name + "prog_finish"
srv = rospy.ServiceProxy(service, ProgFinish)
success = False
try:
resp = srv("test")
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
#------prog_save-----
service = service_base_name + "prog_save"
srv = rospy.ServiceProxy(service, ProgSave)
success = False
try:
resp = srv("test", "/home/controls/catkin_ws/src/ros_robodk_post_processors")
success = True
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
self.assertEquals(success, True, "Failed to call service %s" % srv)
self.assertEquals(len(resp.error), 0, "Service %s failed with an error: %s" % (srv, resp.error))
if __name__ == '__main__':
import rostest
rostest.rosrun(package, "services_tests", ServicesTests, sys.argv)
```
|
{
"source": "jerith/pyconza2017-surviving-a-legacy-codebase",
"score": 2
}
|
#### File: code/breaking_dependencies/seams_tasks.py
```python
@app.task(time_limit=60)
def updateServer(xenserver):
# $HL
session = getSession(
xenserver.hostname, xenserver.username, xenserver.password)
# $HL
# ... Dozens of lines of code to update a server ...
for vmref, vmobj in allvms.items():
# $HL
updateVm.delay(xenserver, vmref, vmobj)
# $HL
# ... Dozens more lines of server-related code ...
@app.task(time_limit=60)
def updateVm(xenserver, vmref, vmobj):
# ... A few lines of check and setup code ...
# $HL
session = getSession(
xenserver.hostname, xenserver.username, xenserver.password)
# $HL
# ... Dozens of lines of code to update the VM ...
```
#### File: code/example_create_vm/before.py
```python
@app.task(time_limit=120)
def create_vm(vm, xenserver, template, name, **others):
session = getSession(
xenserver.hostname, xenserver.username, xenserver.password)
storage = session.xenapi.SR.get_all()
# ... Another 180 lines of VM creation using the session ...
```
#### File: code/example_create_vm/getSession.py
```python
def getSession(hostname, username, password):
session = xenapi.Session('https://%s:443/' % (hostname))
session.xenapi.login_with_password(username, password)
return session
```
|
{
"source": "jerith/pyconza2020-trio-structured-concurrency-for-python",
"score": 3
}
|
#### File: pyconza2020-trio-structured-concurrency-for-python/code/2_asyncio.py
```python
import asyncio
async def print_lines(msg, n):
for i in range(n):
print(i+1, msg)
await asyncio.sleep(0.2)
async def main():
await print_lines("basically sync", 3)
t1 = asyncio.create_task(print_lines("task1", 3))
t2 = asyncio.create_task(print_lines("task2", 6))
await t1
await t2
asyncio.run(main())
```
#### File: pyconza2020-trio-structured-concurrency-for-python/code/3_cancellation.py
```python
import trio
async def print_lines(msg, n):
for i in range(n):
print(i+1, msg)
await trio.sleep(0.4)
async def timebomb(delay):
await trio.sleep(delay)
raise Exception("KABOOOM!")
async def main():
try:
async with trio.open_nursery() as n:
n.start_soon(print_lines, "child", 5)
n.start_soon(timebomb, 1)
await print_lines("body", 5)
except Exception as e:
print(f"Error: {e}")
trio.run(main)
```
#### File: pyconza2020-trio-structured-concurrency-for-python/code/3_echo_client.py
```python
import sys
import trio
async def sender(client_stream, data):
while True:
print(f"sender: sending {data!r}")
await client_stream.send_all(data)
await trio.sleep(1)
async def receiver(client_stream):
async for data in client_stream:
print(f"receiver: got data {data!r}")
print("receiver: connection closed")
sys.exit()
async def main(argv):
data = (argv + ["default message"])[1].encode("utf-8")
client_stream = await trio.open_tcp_stream("127.0.0.1", 12345)
async with client_stream:
async with trio.open_nursery() as nursery:
nursery.start_soon(sender, client_stream, data)
nursery.start_soon(receiver, client_stream)
trio.run(main, sys.argv)
```
#### File: pyconza2020-trio-structured-concurrency-for-python/code/3_nursery.py
```python
import trio
async def print_lines(msg, n):
for i in range(n):
print(i+1, msg)
await trio.sleep(0.2)
print("task finished:", msg)
async def main():
print("parent: started!")
async with trio.open_nursery() as nursery:
nursery.start_soon(print_lines, "task1", 4)
nursery.start_soon(print_lines, "task2", 2)
print("parent: waiting for tasks to finish...")
print("parent: all done!")
trio.run(main)
```
|
{
"source": "jerith/pytest-trialtemp",
"score": 2
}
|
#### File: pytest-trialtemp/pytest_trialtemp/trialtemp.py
```python
import os
import pytest
@pytest.fixture(scope="session", autouse=True)
def trial_temp(request):
# pytest monkey-patches Failure at some point, and there's an interaction
# between importing things from Twisted during plugin discovery and
# pytest-xdist that causes problems capturing error output.
# Therefore, we import these in here.
from twisted.python.filepath import FilePath
from twisted.trial.util import _unusedTestDirectory
olddir = os.getcwd()
testdir, testdir_lock = _unusedTestDirectory(FilePath("_trial_temp"))
os.chdir(testdir.path)
def teardown_tempdir():
os.chdir(olddir)
testdir_lock.unlock()
request.addfinalizer(teardown_tempdir)
```
|
{
"source": "jerivas/bikeshed",
"score": 2
}
|
#### File: bikeshed/refs/ReferenceManager.py
```python
import json
import random
import re
from collections import defaultdict
from operator import itemgetter
from .. import biblio, config, constants, datablocks
from ..h import *
from ..messages import *
from .RefSource import RefSource
from .utils import *
class ReferenceManager:
__slots__ = [
"dataFile",
"specs",
"defaultSpecs",
"ignoredSpecs",
"replacedSpecs",
"biblios",
"loadedBiblioGroups",
"biblioKeys",
"biblioNumericSuffixes",
"preferredBiblioNames",
"headings",
"defaultStatus",
"localRefs",
"anchorBlockRefs",
"foreignRefs",
"shortname",
"specLevel",
"spec",
"testing",
]
def __init__(self, defaultStatus=None, fileRequester=None, testing=False):
if fileRequester is None:
self.dataFile = config.defaultRequester
else:
self.dataFile = fileRequester
self.testing = testing
# Dict of {spec vshortname => spec data}
self.specs = dict()
# Dict of {linking text => link-defaults data}
self.defaultSpecs = defaultdict(list)
# Set of spec vshortnames to remove from consideration when there are other possible anchors
self.ignoredSpecs = set()
# Set of (obsolete spec vshortname, replacing spec vshortname), when both obsolete and replacing specs are possible anchors
self.replacedSpecs = set()
# Dict of {biblio term => biblio data}
# Sparsely populated, with more loaded on demand
self.biblios = defaultdict(list)
self.loadedBiblioGroups = set()
# Most of the biblio keys, for biblio near-miss correction
# (Excludes dated versions, and all huge "foo\d+" groups that can't usefully correct typos.)
self.biblioKeys = set()
# Dict of {suffixless key => [keys with numeric suffixes]}
# (So you can tell when it's appropriate to default a numeric-suffix ref to a suffixless one.)
self.biblioNumericSuffixes = dict()
# Dict of {base key name => preferred display name}
self.preferredBiblioNames = dict()
# Dict of {spec vshortname => headings}
# Each heading is either {#foo => heading-dict}, {/foo#bar => heading-dict} or {#foo => [page-heading-keys]}
# In the latter, it's a list of the heading keys (of the form /foo#bar) that collide for that id.
self.headings = dict()
if defaultStatus is None:
self.defaultStatus = constants.refStatus.current
else:
self.defaultStatus = constants.refStatus[defaultStatus]
self.localRefs = RefSource("local", fileRequester=fileRequester)
self.anchorBlockRefs = RefSource("anchor-block", fileRequester=fileRequester)
self.foreignRefs = RefSource(
"foreign",
specs=self.specs,
ignored=self.ignoredSpecs,
replaced=self.replacedSpecs,
fileRequester=fileRequester,
)
self.shortname = None
self.specLevel = None
self.spec = None
def initializeRefs(self, doc=None):
"""
Load up the xref data
This is oddly split up into sub-functions to make it easier to track performance.
"""
def initSpecs():
self.specs.update(json.loads(self.dataFile.fetch("specs.json", str=True)))
initSpecs()
def initMethods():
self.foreignRefs.methods.update(json.loads(self.dataFile.fetch("methods.json", str=True)))
initMethods()
def initFors():
self.foreignRefs.fors.update(json.loads(self.dataFile.fetch("fors.json", str=True)))
initFors()
if doc and doc.inputSource and doc.inputSource.hasDirectory:
datablocks.transformInfo(self.dataFile.fetch("link-defaults.infotree", str=True).split("\n"), doc)
# Get local anchor data
shouldGetLocalAnchorData = doc.md.externalInfotrees["anchors.bsdata"]
if not shouldGetLocalAnchorData and doc.inputSource.cheaplyExists("anchors.bsdata"):
warn(
"Found anchors.bsdata next to the specification without a matching\n"
+ "External Infotrees: anchors.bsdata yes\n"
+ "in the metadata. This data won't be found when building via a URL."
)
# We should remove this after giving specs time to react to the warning:
shouldGetLocalAnchorData = True
if shouldGetLocalAnchorData:
try:
datablocks.transformAnchors(doc.inputSource.relative("anchors.bsdata").read().rawLines, doc)
except OSError:
warn("anchors.bsdata not found despite being listed in the External Infotrees metadata.")
# Get local link defaults
shouldGetLocalLinkDefaults = doc.md.externalInfotrees["link-defaults.infotree"]
if not shouldGetLocalLinkDefaults and doc.inputSource.cheaplyExists("link-defaults.infotree"):
warn(
"Found link-defaults.infotree next to the specification without a matching\n"
+ "External Infotrees: link-defaults.infotree yes\n"
+ "in the metadata. This data won't be found when building via a URL."
)
# We should remove this after giving specs time to react to the warning:
shouldGetLocalLinkDefaults = True
if shouldGetLocalLinkDefaults:
try:
datablocks.transformInfo(
doc.inputSource.relative("link-defaults.infotree").read().rawLines,
doc,
)
except OSError:
warn("link-defaults.infotree not found despite being listed in the External Infotrees metadata.")
def fetchHeadings(self, spec):
if spec in self.headings:
return self.headings[spec]
with self.dataFile.fetch("headings", f"headings-{spec}.json", okayToFail=True) as fh:
try:
data = json.load(fh)
except ValueError:
# JSON couldn't be decoded, *should* only be because of empty file
return
self.headings[spec] = data
return data
def initializeBiblio(self):
self.biblioKeys.update(json.loads(self.dataFile.fetch("biblio-keys.json", str=True)))
self.biblioNumericSuffixes.update(json.loads(self.dataFile.fetch("biblio-numeric-suffixes.json", str=True)))
# Get local bibliography data
try:
storage = defaultdict(list)
with open("biblio.json", encoding="utf-8") as fh:
biblio.processSpecrefBiblioFile(fh.read(), storage, order=2)
except OSError:
# Missing file is fine
pass
for k, vs in storage.items():
self.biblioKeys.add(k)
self.biblios[k].extend(vs)
# Hardcode RFC2119, used in most spec's boilerplates,
# to avoid having to parse the entire biblio-rf.data file for this single reference.
self.biblioKeys.add("rfc2119")
self.biblios["rfc2119"].append(
{
"linkText": "rfc2119\n",
"date": "March 1997\n",
"status": "Best Current Practice\n",
"title": "Key words for use in RFCs to Indicate Requirement Levels\n",
"snapshot_url": "https://datatracker.ietf.org/doc/html/rfc2119\n",
"current_url": "\n",
"obsoletedBy": "\n",
"other": "\n",
"etAl": False,
"order": 3,
"biblioFormat": "dict",
"authors": ["<NAME>\n"],
}
)
def setSpecData(self, md):
if md.defaultRefStatus:
self.defaultStatus = md.defaultRefStatus
elif md.status in config.snapshotStatuses:
self.defaultStatus = constants.refStatus.snapshot
elif md.status in config.shortToLongStatus:
self.defaultStatus = constants.refStatus.current
self.shortname = md.shortname
self.specLevel = md.level
self.spec = md.vshortname
for term, defaults in md.linkDefaults.items():
for default in defaults:
self.defaultSpecs[term].append(default)
# Need to get a real versioned shortname,
# with the possibility of overriding the "shortname-level" pattern.
self.removeSameSpecRefs()
def removeSameSpecRefs(self):
# Kill all the non-local anchors with the same shortname as the current spec,
# so you don't end up accidentally linking to something that's been removed from the local copy.
# TODO: This is dumb.
for _, refs in self.foreignRefs.refs.items():
for ref in refs:
if ref["status"] != "local" and ref["shortname"].rstrip() == self.shortname:
ref["export"] = False
def addLocalDfns(self, dfns):
for el in dfns:
if hasClass(el, "no-ref"):
continue
try:
linkTexts = config.linkTextsFromElement(el)
except config.DuplicatedLinkText as e:
die(
f"The term '{e.offendingText}' is in both lt and local-lt of the element {outerHTML(e.el)}.",
el=e.el,
)
linkTexts = e.allTexts
for linkText in linkTexts:
linkText = unfixTypography(linkText)
linkText = re.sub(r"\s+", " ", linkText)
linkType = treeAttr(el, "data-dfn-type")
if linkType not in config.dfnTypes:
die(f"Unknown local dfn type '{linkType}':\n {outerHTML(el)}", el=el)
continue
if linkType in config.lowercaseTypes:
linkText = linkText.lower()
dfnFor = treeAttr(el, "data-dfn-for")
if dfnFor is None:
dfnFor = set()
existingRefs = self.localRefs.queryRefs(linkType=linkType, text=linkText, linkFor="/", exact=True)[
0
]
if existingRefs and existingRefs[0].el is not el:
die(f"Multiple local '{linkType}' <dfn>s have the same linking text '{linkText}'.", el=el)
continue
else:
dfnFor = set(config.splitForValues(dfnFor))
encounteredError = False
for singleFor in dfnFor:
existingRefs = self.localRefs.queryRefs(
linkType=linkType,
text=linkText,
linkFor=singleFor,
exact=True,
)[0]
if existingRefs and existingRefs[0].el is not el:
encounteredError = True
die(
f"Multiple local '{linkType}' <dfn>s for '{singleFor}' have the same linking text '{linkText}'.",
el=el,
)
break
if encounteredError:
continue
for term in dfnFor.copy():
# Saying a value is for a descriptor with @foo/bar
# should also make it for the bare descriptor bar.
match = re.match(r"@[a-zA-Z0-9-_]+/(.*)", term)
if match:
dfnFor.add(match.group(1).strip())
# convert back into a list now, for easier JSONing
dfnFor = sorted(dfnFor)
ref = {
"type": linkType,
"status": "local",
"spec": self.spec,
"shortname": self.shortname,
"level": self.specLevel,
"url": "#" + el.get("id"),
"export": True,
"for": dfnFor,
"el": el,
}
self.localRefs.refs[linkText].append(ref)
for for_ in dfnFor:
self.localRefs.fors[for_].append(linkText)
methodishStart = re.match(r"([^(]+\()[^)]", linkText)
if methodishStart:
self.localRefs.addMethodVariants(linkText, dfnFor, ref["shortname"])
def filterObsoletes(self, refs):
return filterObsoletes(
refs,
replacedSpecs=self.replacedSpecs,
ignoredSpecs=self.ignoredSpecs,
localShortname=self.shortname,
localSpec=self.spec,
)
def queryAllRefs(self, **kwargs):
r1, _ = self.localRefs.queryRefs(**kwargs)
r2, _ = self.anchorBlockRefs.queryRefs(**kwargs)
r3, _ = self.foreignRefs.queryRefs(**kwargs)
refs = r1 + r2 + r3
if kwargs.get("ignoreObsoletes") is True:
refs = self.filterObsoletes(refs)
return refs
def getRef(
self,
linkType,
text,
spec=None,
status=None,
statusHint=None,
linkFor=None,
explicitFor=False,
linkForHint=None,
error=True,
el=None,
):
# If error is False, this function just shuts up and returns a reference or None
# Otherwise, it pops out debug messages for the user.
# 'maybe' links might not link up, so it's fine for them to have no references.
# The relevent errors are gated by this variable.
zeroRefsError = error and linkType not in ["maybe", "extended-attribute"]
text = unfixTypography(text)
if linkType in config.lowercaseTypes:
text = text.lower()
if spec is not None:
spec = spec.lower()
if statusHint is None:
statusHint = self.defaultStatus
if status not in config.linkStatuses and status is not None:
if error:
die(
f"Unknown spec status '{status}'. Status must be {config.englishFromList(config.linkStatuses)}.",
el=el,
)
return None
# Local refs always get precedence, unless you manually specified a spec.
if spec is None:
localRefs, _ = self.localRefs.queryRefs(
linkType=linkType,
text=text,
linkFor=linkFor,
linkForHint=linkForHint,
explicitFor=explicitFor,
el=el,
)
# If the autolink was for-less, it found a for-full local link,
# but there was a for-less version in a foreign spec,
# emit a warning (unless it was supressed).
if localRefs and linkFor is None and any(x.for_ for x in localRefs):
forlessRefs, _ = self.anchorBlockRefs.queryRefs(
linkType=linkType, text=text, linkFor="/", export=True, el=el
)
forlessRefs = self.filterObsoletes(forlessRefs)
if not forlessRefs:
forlessRefs, _ = self.foreignRefs.queryRefs(
linkType=linkType, text=text, linkFor="/", export=True, el=el
)
forlessRefs = self.filterObsoletes(forlessRefs)
if forlessRefs:
reportAmbiguousForlessLink(el, text, forlessRefs, localRefs)
return None
if len(localRefs) == 1:
return localRefs[0]
elif len(localRefs) > 1:
if self.testing:
# Generate a stable answer
chosenRef = sorted(localRefs, key=lambda x: x.url)[0]
else:
# CHAOS MODE (so you're less likely to rely on it)
chosenRef = random.choice(localRefs)
if error:
linkerror(
f"Multiple possible '{linkType}' local refs for '{text}'.\nRandomly chose one of them; other instances might get a different random choice.",
el=el,
)
return chosenRef
if status == "local":
# Already checked local refs, can early-exit now.
return
# Take defaults into account
if not spec or not status or not linkFor:
variedTexts = [v for v in linkTextVariations(text, linkType) if v in self.defaultSpecs]
if variedTexts:
for dfnSpec, dfnType, dfnStatus, dfnFor in reversed(self.defaultSpecs[variedTexts[0]]):
if not config.linkTypeIn(dfnType, linkType):
continue
if linkFor and dfnFor:
if isinstance(linkFor, str) and linkFor != dfnFor:
continue
if dfnFor not in linkFor:
continue
spec = spec or dfnSpec
status = status or dfnStatus
linkFor = linkFor or dfnFor
linkType = dfnType
break
# Then anchor-block refs get preference
blockRefs, _ = self.anchorBlockRefs.queryRefs(
linkType=linkType,
text=text,
spec=spec,
linkFor=linkFor,
linkForHint=linkForHint,
explicitFor=explicitFor,
el=el,
)
if blockRefs and linkFor is None and any(x.for_ for x in blockRefs):
forlessRefs, _ = self.foreignRefs.queryRefs(linkType=linkType, text=text, linkFor="/", export=True, el=el)
forlessRefs = self.filterObsoletes(forlessRefs)
if forlessRefs:
reportAmbiguousForlessLink(el, text, forlessRefs, blockRefs)
return None
if len(blockRefs) == 1:
return blockRefs[0]
elif len(blockRefs) > 1:
reportMultiplePossibleRefs(
simplifyPossibleRefs(blockRefs),
linkText=text,
linkType=linkType,
linkFor=linkFor,
defaultRef=blockRefs[0],
el=el,
)
return blockRefs[0]
# Get the relevant refs
if spec is None:
export = True
else:
export = None
refs, failure = self.foreignRefs.queryRefs(
text=text,
linkType=linkType,
spec=spec,
status=status,
statusHint=statusHint,
linkFor=linkFor,
linkForHint=linkForHint,
explicitFor=explicitFor,
export=export,
ignoreObsoletes=True,
)
if (
failure
and linkType in ("argument", "idl")
and linkFor is not None
and any(x.endswith("()") for x in linkFor)
):
# foo()/bar failed, because foo() is technically the wrong signature
# let's see if we can find the right signature, and it's unambiguous
for lf in linkFor:
if not lf.endswith("()"):
continue
if "/" in lf:
interfaceName, _, methodName = lf.partition("/")
else:
methodName = lf
interfaceName = None
methodSignatures = self.foreignRefs.methods.get(methodName, None)
if methodSignatures is None:
# Nope, foo() is just wrong entirely.
# Jump out and fail in a normal way
break
# Find all method signatures that contain the arg in question
# and, if interface is specified, are for that interface.
# Dedup/collect by url, so I'll get all the signatures for a given dfn.
possibleMethods = defaultdict(list)
for argfullName, metadata in methodSignatures.items():
if (
text in metadata["args"]
and (interfaceName in metadata["for"] or interfaceName is None)
and metadata["shortname"] != self.shortname
):
possibleMethods[metadata["shortname"]].append(argfullName)
possibleMethods = list(possibleMethods.values())
if not possibleMethods:
# No method signatures with this argument/interface.
# Jump out and fail in a normal way.
break
if len(possibleMethods) > 1:
# Too many to disambiguate.
linkerror(
f"The argument autolink '{text}' for '{linkFor}' has too many possible overloads to disambiguate. Please specify the full method signature this argument is for.",
el=el,
)
# Try out all the combinations of interface/status/signature
linkForPatterns = ["{i}/{m}", "{m}"]
statuses = ["local", status]
for p in linkForPatterns:
for s in statuses:
for m in possibleMethods[0]:
refs, failure = self.foreignRefs.queryRefs(
text=text,
linkType=linkType,
spec=spec,
status=s,
linkFor=p.format(i=interfaceName, m=m),
ignoreObsoletes=True,
)
if refs:
break
if refs:
break
if refs:
break
# Now we can break out and just let the normal error-handling machinery take over.
break
# Allow foo(bar) to be for'd to with just foo() if it's completely unambiguous.
methodPrefix = methodName[:-1]
candidates, _ = self.localRefs.queryRefs(linkType="functionish", linkFor=interfaceName)
methodRefs = list({c.url: c for c in candidates if c.text.startswith(methodPrefix)}.values())
if not methodRefs:
# Look for non-locals, then
c1, _ = self.anchorBlockRefs.queryRefs(
linkType="functionish",
spec=spec,
status=status,
statusHint=statusHint,
linkFor=interfaceName,
export=export,
ignoreObsoletes=True,
)
c2, _ = self.foreignRefs.queryRefs(
linkType="functionish",
spec=spec,
status=status,
statusHint=statusHint,
linkFor=interfaceName,
export=export,
ignoreObsoletes=True,
)
candidates = c1 + c2
methodRefs = list({c.url: c for c in candidates if c.text.startswith(methodPrefix)}.values())
if zeroRefsError and len(methodRefs) > 1:
# More than one possible foo() overload, can't tell which to link to
linkerror(
f"Too many possible method targets to disambiguate '{linkFor}/{text}'. Please specify the names of the required args, like 'foo(bar, baz)', in the 'for' attribute.",
el=el,
)
return
# Otherwise
if failure in ("text", "type"):
if linkType in ("property", "propdesc", "descriptor") and text.startswith("--"):
# Custom properties/descriptors aren't ever defined anywhere
return None
if zeroRefsError:
linkerror(f"No '{linkType}' refs found for '{text}'.", el=el)
return None
elif failure == "export":
if zeroRefsError:
linkerror(f"No '{linkType}' refs found for '{text}' that are marked for export.", el=el)
return None
elif failure == "spec":
if zeroRefsError:
linkerror(f"No '{linkType}' refs found for '{text}' with spec '{spec}'.", el=el)
return None
elif failure == "for":
if zeroRefsError:
if spec is None:
linkerror(f"No '{linkType}' refs found for '{text}' with for='{linkFor}'.", el=el)
else:
linkerror(f"No '{linkType}' refs found for '{text}' with for='{linkFor}' in spec '{spec}'.", el=el)
return None
elif failure == "status":
if zeroRefsError:
if spec is None:
linkerror(f"No '{linkType}' refs found for '{text}' compatible with status '{status}'.", el=el)
else:
linkerror(
f"No '{linkType}' refs found for '{text}' compatible with status '{status}' in spec '{spec}'.",
el=el,
)
return None
elif failure == "ignored-specs":
if zeroRefsError:
linkerror(f"The only '{linkType}' refs for '{text}' were in ignored specs:\n{outerHTML(el)}", el=el)
return None
elif failure:
die(f"Programming error - I'm not catching '{failure}'-type link failures. Please report!", el=el)
return None
if len(refs) == 1:
# Success!
return refs[0]
# If we hit this point, there are >1 possible refs to choose from.
# Default to linking to the first one.
defaultRef = refs[0]
if linkType == "propdesc":
# If both props and descs are possible, default to prop.
for ref in refs:
if ref.type == "property":
defaultRef = ref
break
if error:
reportMultiplePossibleRefs(
simplifyPossibleRefs(refs),
linkText=text,
linkType=linkType,
linkFor=linkFor,
defaultRef=defaultRef,
el=el,
)
return defaultRef
def getBiblioRef(
self,
text,
status=None,
generateFakeRef=False,
allowObsolete=False,
el=None,
quiet=False,
depth=0,
):
if depth > 100:
die(f"Data error in biblio files; infinitely recursing trying to find [{text}].")
return
key = text.lower()
while True:
candidates = self.bibliosFromKey(key)
if candidates:
break
# Did it fail because SpecRef *only* has the *un*versioned name?
# Aka you said [[foo-1]], but SpecRef only has [[foo]].
# "Only" is important - if there are versioned names that just don't match what you gave,
# then I shouldn't autofix;
# if you say [[foo-2]], and [[foo]] and [[foo-1]] exist,
# then [[foo-2]] is just an error.
match = re.match(r"(.+)-\d+$", key)
failFromWrongSuffix = False
if match and match.group(1) in self.biblios:
unversionedKey = match.group(1)
if unversionedKey in self.biblioNumericSuffixes:
# Nope, there are more numeric-suffixed versions,
# just not the one you asked for
failFromWrongSuffix = True
else:
# Load up the unversioned url!
candidates = self.biblios[unversionedKey]
break
# Did it fail because I only know about the spec from Shepherd?
if key in self.specs and generateFakeRef:
spec = self.specs[key]
return biblio.SpecBasedBiblioEntry(spec, preferredURL=status)
if failFromWrongSuffix and not quiet:
numericSuffixes = self.biblioNumericSuffixes[unversionedKey]
die(
f"A biblio link references {text}, but only {config.englishFromList(numericSuffixes)} exists in SpecRef."
)
return None
candidate = self._bestCandidateBiblio(candidates)
# TODO: When SpecRef definitely has all the CSS specs, turn on this code.
# if candidates[0]['order'] > 3: # 3 is SpecRef level
# warn(f"Bibliography term '{text}' wasn't found in SpecRef.\n Please find the equivalent key in SpecRef, or submit a PR to SpecRef.")
if candidate["biblioFormat"] == "string":
bib = biblio.StringBiblioEntry(**candidate)
elif candidate["biblioFormat"] == "alias":
# Follow the chain to the real candidate
bib = self.getBiblioRef(candidate["aliasOf"], status=status, el=el, quiet=True, depth=depth + 1)
if bib is None:
die(f"Biblio ref [{text}] claims to be an alias of [{candidate['aliasOf']}], which doesn't exist.")
return None
elif candidate.get("obsoletedBy", "").strip():
# Obsoleted by something. Unless otherwise indicated, follow the chain.
if allowObsolete:
bib = biblio.BiblioEntry(preferredURL=status, **candidate)
else:
bib = self.getBiblioRef(
candidate["obsoletedBy"],
status=status,
el=el,
quiet=True,
depth=depth + 1,
)
if not quiet:
die(
f"Obsolete biblio ref: [{candidate['linkText']}] is replaced by [{bib.linkText}]. Either update the reference, or use [{candidate['linkText']} obsolete] if this is an intentionally-obsolete reference."
)
else:
bib = biblio.BiblioEntry(preferredURL=status, **candidate)
# If a canonical name has been established, use it.
if bib.linkText in self.preferredBiblioNames:
bib.originalLinkText, bib.linkText = (
bib.linkText,
self.preferredBiblioNames[bib.linkText],
)
return bib
def bibliosFromKey(self, key):
# Load up the biblio data necessary to fetch the given key
# and then actually fetch it.
# If you don't call this,
# the current data might not be reliable.
if key not in self.biblios:
# Try to load the group up, if necessary
group = key[0:2]
if group not in self.loadedBiblioGroups:
with self.dataFile.fetch("biblio", f"biblio-{group}.data", okayToFail=True) as lines:
biblio.loadBiblioDataFile(lines, self.biblios)
self.loadedBiblioGroups.add(group)
return self.biblios.get(key, [])
def _bestCandidateBiblio(self, candidates):
return stripLineBreaks(sorted(candidates, key=itemgetter("order"))[0])
def getLatestBiblioRef(self, key):
# Takes a biblio reference name,
# returns the latest dated variant of that name
# (names in the form FOO-19700101)
candidates = self.bibliosFromKey(key)
if not candidates:
return None
latestDate = None
latestRefs = None
for k, biblios in self.biblios.items():
if not k.startswith(key):
continue
match = re.search(r"(\d{8})$", k)
if not match:
continue
date = match.group(1)
if latestDate is None or date > latestDate:
latestDate = date
latestRefs = biblios
if latestRefs is None:
return None
return biblio.BiblioEntry(**self._bestCandidateBiblio(latestRefs))
def vNamesFromSpecNames(self, specName):
# Takes an unversioned specName,
# returns the versioned names that Shepherd knows about.
chosenVNames = []
for vSpecName in self.specs:
if not vSpecName.startswith(specName):
continue
match = re.match(r"-?(\d+)", vSpecName[len(specName) :])
if match is None:
continue
chosenVNames.append(vSpecName)
return chosenVNames
def simplifyPossibleRefs(refs, alwaysShowFor=False):
# "Simplifies" the set of possible refs according to their 'for' value;
# returns a list of text/type/spec/for objects,
# with the for value filled in *only if necessary for disambiguation*.
forVals = defaultdict(list)
for ref in refs:
if ref.for_:
for for_ in ref.for_: # ref.for_ is a list
forVals[(ref.text, ref.type, ref.spec)].append((for_, ref.url))
else:
forVals[(ref.text, ref.type, ref.spec)].append(("/", ref.url))
retRefs = []
for (text, type, spec), fors in forVals.items():
if len(fors) >= 2 or alwaysShowFor:
# Needs for-based disambiguation
for for_, url in fors:
retRefs.append({"text": text, "type": type, "spec": spec, "for_": for_, "url": url})
else:
retRefs.append(
{
"text": text,
"type": type,
"spec": spec,
"for_": None,
"url": fors[0][1],
}
)
return retRefs
def refToText(ref):
if ref["for_"]:
return "spec:{spec}; type:{type}; for:{for_}; text:{text}".format(**ref)
else:
return "spec:{spec}; type:{type}; text:{text}".format(**ref)
def reportMultiplePossibleRefs(possibleRefs, linkText, linkType, linkFor, defaultRef, el):
# Sometimes a badly-written spec has indistinguishable dfns.
# Detect this by seeing if more than one stringify to the same thing.
allRefs = defaultdict(list)
for ref in possibleRefs:
allRefs[refToText(ref)].append(ref)
uniqueRefs = []
mergedRefs = []
for refs in allRefs.values():
if len(refs) == 1:
uniqueRefs.append(refs[0])
else:
mergedRefs.append(refs)
if linkFor:
error = f"Multiple possible '{linkText}' {linkType} refs for '{linkFor}'."
else:
error = f"Multiple possible '{linkText}' {linkType} refs."
error += f"\nArbitrarily chose {defaultRef.url}"
if uniqueRefs:
error += "\nTo auto-select one of the following refs, insert one of these lines into a <pre class=link-defaults> block:\n"
error += "\n".join(refToText(r) for r in uniqueRefs)
if mergedRefs:
error += "\nThe following refs show up multiple times in their spec, in a way that Bikeshed can't distinguish between. Either create a manual link, or ask the spec maintainer to add disambiguating attributes (usually a for='' attribute to all of them)."
for refs in mergedRefs:
error += "\n" + refToText(refs[0])
for ref in refs:
error += "\n " + ref["url"]
linkerror(error, el=el)
def reportAmbiguousForlessLink(el, text, forlessRefs, localRefs):
localRefText = "\n".join([refToText(ref) for ref in simplifyPossibleRefs(localRefs, alwaysShowFor=True)])
forlessRefText = "\n".join([refToText(ref) for ref in simplifyPossibleRefs(forlessRefs, alwaysShowFor=True)])
linkerror(
f"Ambiguous for-less link for '{text}', please see <https://tabatkins.github.io/bikeshed/#ambi-for> for instructions:\nLocal references:\n{localRefText}\nfor-less references:\n{forlessRefText}",
el=el,
)
```
|
{
"source": "jerivas/django-file-router",
"score": 2
}
|
#### File: views/colors/__init__.py
```python
from demo.models import Color
from file_router import render_str
def view(request):
colors = Color.objects.all()
return render_str(__doc__, request, {"colors": colors})
```
|
{
"source": "jerivas/mezzanine-surveys",
"score": 2
}
|
#### File: surveys/admin/surveys.py
```python
from __future__ import absolute_import, unicode_literals
from copy import deepcopy
from django.contrib import admin
from django.utils.html import format_html
from django.utils.translation import ugettext_lazy as _
from mezzanine.core.admin import TabularDynamicInlineAdmin
from mezzanine.pages.admin import PageAdmin
from mezzanine.utils.admin import admin_url
from mezzy.utils.admin import LinkedInlineMixin
from ..models import SurveyPage, SurveyPurchase, SurveyPurchaseCode, Category
surveypage_fieldsets = [
(None, {
"fields": [
"title", "status", ("publish_date", "expiry_date"), "content", "in_menus",
"login_required"],
}),
(_("Purchasing"), {
"classes": ["collapse-closed"],
"fields": ["get_purchases_link", "cost", "purchase_response"],
}),
(_("Instructions"), {
"classes": ["collapse-closed"],
"fields": ["max_rating", "instructions", "completed_message"],
}),
(_("Report"), {
"classes": ["collapse-closed"],
"fields": ["report_explanation"],
}),
deepcopy(PageAdmin.fieldsets[-1]), # Meta panel
]
class SurveyPurchaseCodeInlineAdmin(TabularDynamicInlineAdmin):
model = SurveyPurchaseCode
class CategoryInlineAdmin(LinkedInlineMixin):
"""
Inline admin with links to the complete Category admin.
"""
count_field = "subcategories"
link_text = _("Edit content and subcategories")
model = Category
@admin.register(SurveyPage)
class SurveyPageAdmin(PageAdmin):
"""
Allows staff users to create and manage the available surveys.
"""
fieldsets = surveypage_fieldsets
readonly_fields = ["get_purchases_link"]
inlines = [SurveyPurchaseCodeInlineAdmin, CategoryInlineAdmin]
def get_purchases_link(self, obj):
if obj.pk is None:
return ""
return format_html(
"<a href='{}?survey__page_ptr__exact={}'>Manage {} purchase(s)</a>",
admin_url(SurveyPurchase, "changelist"),
obj.pk,
obj.purchases.count()
)
get_purchases_link.short_description = _("Purchases")
@admin.register(SurveyPurchase)
class SurveyPurchaseAdmin(admin.ModelAdmin):
"""
Allows staff users to filter and edit completed purchases.
"""
list_display = [
"purchaser", "survey", "amount", "payment_method", "transaction_id", "created"]
list_filter = ["survey"]
search_fields = ["purchaser__email", "purchaser__username", "payment_method", "transaction_id"]
date_hierarchy = "created"
fieldsets = [
(None, {
"fields": [
"purchaser", "survey", "amount", "payment_method", "transaction_id", "notes",
"created"]
}),
("Responses", {
"fields": ["get_public_link", "get_response_count", "report_generated"]
})
]
readonly_fields = ["created", "get_response_count", "get_public_link"]
def get_response_count(self, obj):
return obj.responses.count()
get_response_count.short_description = _("Responses")
def get_public_link(self, obj):
return format_html(
"<a href='{}' target='_blank'>Open public page</a>",
obj.get_response_create_url())
get_public_link.short_description = _("Public link")
```
#### File: surveys/models/questions.py
```python
from __future__ import absolute_import, unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from mezzanine.core.fields import RichTextField
from mezzanine.core.models import Orderable, TimeStamped
from mezzy.utils.models import TitledInline
from ..managers import RatingDataQuerySet, QuestionResponseQuerySet
class Category(TitledInline):
"""
A Category that contains one or more Subcategories.
"""
survey = models.ForeignKey(
"surveys.SurveyPage", on_delete=models.CASCADE, related_name="categories")
description = RichTextField(_("Description"))
objects = RatingDataQuerySet.as_manager()
class Meta:
verbose_name = _("category")
verbose_name_plural = _("categories")
def get_rating_data(self, purchase):
"""
Returns a serializable object with rating data for this category.
"""
rating_responses = QuestionResponse.objects.filter(
question__subcategory__category=self,
question__field_type=Question.RATING_FIELD,
response__purchase=purchase)
count = rating_responses.count()
if not count:
return None # Don't return data if no rating responses exist
return {
"id": self.pk,
"title": self.title,
"description": self.description,
"rating": {
"count": count,
"average": rating_responses.get_average(),
"frequencies":
rating_responses.get_frequencies(purchase.survey.get_rating_choices()),
},
"subcategories": self.subcategories.get_rating_data(purchase),
}
class Subcategory(TitledInline):
"""
A Subcategory that contains one or more Questions.
"""
category = models.ForeignKey(Category, on_delete=models.CASCADE, related_name="subcategories")
description = RichTextField(_("Description"))
objects = RatingDataQuerySet.as_manager()
class Meta:
verbose_name = _("subcategory")
verbose_name_plural = _("subcategories")
def get_rating_data(self, purchase):
"""
Returns a serializable object with rating data for this subcategory.
"""
rating_responses = QuestionResponse.objects.filter(
question__subcategory=self,
question__field_type=Question.RATING_FIELD,
response__purchase=purchase)
count = rating_responses.count()
if not count:
return None # Don't return data if no rating responses exist
return {
"id": self.pk,
"title": self.title,
"description": self.description,
"rating": {
"count": count,
"average": rating_responses.get_average(),
"frequencies":
rating_responses.get_frequencies(purchase.survey.get_rating_choices()),
},
"questions": self.questions.get_rating_data(purchase),
}
@python_2_unicode_compatible
class Question(Orderable):
"""
A question on a SurveyPage.
"""
RATING_FIELD = 1
TEXT_FIELD = 2
QUESTION_TYPES = (
(RATING_FIELD, "Rating"),
(TEXT_FIELD, "Text"),
)
subcategory = models.ForeignKey(
Subcategory, on_delete=models.CASCADE, related_name="questions")
field_type = models.IntegerField(_("Question type"), choices=QUESTION_TYPES)
prompt = models.CharField(_("Prompt"), max_length=300)
required = models.BooleanField(_("Required"), default=True)
invert_rating = models.BooleanField(_("Invert rating"), default=False)
objects = RatingDataQuerySet.as_manager()
class Meta:
verbose_name = _("question")
verbose_name_plural = _("questions")
def __str__(self):
return self.prompt
def get_rating_data(self, purchase):
"""
Returns a serializable object with rating data for this question.
"""
rating_responses = QuestionResponse.objects.filter(
question=self,
question__field_type=Question.RATING_FIELD,
response__purchase=purchase)
count = rating_responses.count()
if not count:
return None # Don't return data if no rating responses exist
return {
"id": self.pk,
"prompt": self.prompt,
"invert_rating": self.invert_rating,
"rating": {
"count": count,
"average": rating_responses.get_average(),
"frequencies":
rating_responses.get_frequencies(purchase.survey.get_rating_choices()),
},
}
@python_2_unicode_compatible
class SurveyResponse(TimeStamped):
"""
Collection of all responses related to a Purchase.
"""
purchase = models.ForeignKey(
"surveys.SurveyPurchase", related_name="responses", on_delete=models.CASCADE)
def __str__(self):
return str(self.created)
@python_2_unicode_compatible
class QuestionResponse(models.Model):
"""
Response to a single Question.
"""
response = models.ForeignKey(
SurveyResponse, related_name="responses", on_delete=models.CASCADE)
question = models.ForeignKey(Question, related_name="responses", on_delete=models.CASCADE)
rating = models.PositiveSmallIntegerField(_("Rating"), blank=True, null=True)
text_response = models.TextField(_("Text response"), blank=True)
objects = QuestionResponseQuerySet.as_manager()
def __str__(self):
if self.rating is not None:
return str(self.rating)
return self.text_response
def normalize_rating(self):
"""
Invert the rating if the question requires it.
"""
if self.rating is not None and self.question.invert_rating:
max_rating = self.question.subcategory.category.survey.max_rating
self.rating = max_rating - int(self.rating) + 1
```
#### File: surveys/models/surveys.py
```python
from __future__ import unicode_literals
import json
import uuid
from builtins import range
from django.db import models
from django.core.urlresolvers import reverse
from django.core.validators import MinValueValidator, MaxValueValidator
from django.utils.encoding import python_2_unicode_compatible
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from mezzanine.conf import settings
from mezzanine.core.fields import RichTextField
from mezzanine.core.models import RichText, TimeStamped
from mezzanine.pages.models import Page
from ..managers import SurveyPurchaseQuerySet
class SurveyPage(Page, RichText):
"""
Survey that's available for purchase.
"""
instructions = RichTextField(_("Instructions"))
cost = models.DecimalField(_("Cost"), max_digits=7, decimal_places=2, default=0)
purchase_response = RichTextField(_("Purchase response"))
completed_message = RichTextField(
_("Completed message"),
help_text=_("Message shown to users after completing the survey"))
max_rating = models.PositiveSmallIntegerField(
_("Maximum rating"), default=5,
validators=[MinValueValidator(2), MaxValueValidator(10)],
help_text=_("For rating questions. Must be a number between 2 and 10"))
report_explanation = RichTextField(
_("Explanation"),
help_text=_("Helping content shown before the results' detail"))
def get_questions(self):
"""
Collect all questions related to this survey.
"""
from .questions import Question
return Question.objects.filter(subcategory__category__survey=self)
def get_rating_choices(self):
return range(1, self.max_rating + 1)
def get_requires_payment(self):
return self.cost > 0
class Meta:
verbose_name = _("survey page")
verbose_name_plural = _("survey pages")
@python_2_unicode_compatible
class SurveyPurchaseCode(models.Model):
"""
Code to gain access to a Survey without paying.
"""
survey = models.ForeignKey(SurveyPage, related_name="purchase_codes")
code = models.CharField(
_("Code"), max_length=20, blank=True,
help_text=_("If left blank it will be automatically generated"))
uses_remaining = models.PositiveIntegerField(_("Remaining uses"), default=0)
class Meta:
verbose_name = _("purchase code")
verbose_name_plural = _("purchase codes")
unique_together = ("survey", "code")
def __str__(self):
return self.code
def save(self, *args, **kwargs):
"""
Generate a UUID if the code hasn't been defined
"""
if not self.code:
self.code = str(uuid.uuid4()).strip("-")[4:23]
super(SurveyPurchaseCode, self).save(*args, **kwargs)
@python_2_unicode_compatible
class SurveyPurchase(TimeStamped):
"""
A record of a user purchasing a Survey.
"""
survey = models.ForeignKey(SurveyPage, on_delete=models.CASCADE, related_name="purchases")
purchaser = models.ForeignKey(
settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name="survey_purchases")
public_id = models.UUIDField(db_index=True, default=uuid.uuid4, editable=False)
transaction_id = models.CharField(_("Transaction ID"), max_length=200, blank=True)
payment_method = models.CharField(_("Payment method"), max_length=100, blank=True)
amount = models.DecimalField(
_("Amount"), max_digits=8, decimal_places=2, blank=True, null=True)
notes = models.TextField(_("Notes"), blank=True)
report_generated = models.DateTimeField(_("Report generated"), blank=True, null=True)
report_cache = models.TextField(_("Report (cached)"), default="[]")
objects = SurveyPurchaseQuerySet.as_manager()
class Meta:
verbose_name = _("purchase")
verbose_name_plural = _("purchases")
def __str__(self):
return str(self.survey)
def get_absolute_url(self):
return reverse("surveys:purchase_detail", args=[self.public_id])
def get_response_create_url(self):
return reverse("surveys:response_create", args=[self.public_id])
def get_complete_url(self):
return reverse("surveys:response_complete", args=[self.public_id])
def get_report_url(self):
return reverse("surveys:purchase_report", args=[self.public_id])
def generate_report(self):
"""
Generate a report of all responses related to this purchase.
A cached copy will be stored in self.report_cache.
The report includes nested data in the shape of Category / Subcategory / Question.
"""
from .questions import Question, QuestionResponse
rating_responses = QuestionResponse.objects.filter(
response__purchase=self, question__field_type=Question.RATING_FIELD)
text_questions = []
for question in self.survey.get_questions().filter(field_type=Question.TEXT_FIELD):
responses = question.responses.filter(response__purchase=self)
text_questions.append({
"id": question.pk,
"prompt": question.prompt,
"responses": list(responses.values_list("text_response", flat=True)),
})
report = {
"rating": {
"count": rating_responses.count(),
"average": rating_responses.get_average(),
"frequencies": rating_responses.get_frequencies(self.survey.get_rating_choices()),
},
"categories": self.survey.categories.get_rating_data(purchase=self),
"text_questions": text_questions,
}
self.report_cache = json.dumps(report)
self.report_generated = now()
self.save()
return report
def get_report_as_json(self):
"""
Load the cached report as JSON.
"""
return json.loads(self.report_cache)
```
#### File: surveys/tests/test_models.py
```python
from __future__ import absolute_import, unicode_literals
from django.contrib.auth.models import User
from django.test import TestCase
from django_dynamic_fixture import get
from surveys.models import SurveyPage, SurveyPurchase
class BaseSurveyPageTest(TestCase):
"""
Create a SurveyPage and user as fixtures.
"""
@classmethod
def setUpTestData(cls):
super(BaseSurveyPageTest, cls).setUpTestData()
cls.USER = get(User, is_active=True, is_staff=False)
cls.SURVEY = SurveyPage.objects.create(cost=10, max_rating=4)
class SurveyPurchaseTestCase(BaseSurveyPageTest):
def test_manager(self):
kwargs = {"survey": self.SURVEY, "report_generated": None}
# Create 3 open purchases for the user
purchases = [
get(SurveyPurchase, purchaser=self.USER, **kwargs),
get(SurveyPurchase, purchaser=self.USER, **kwargs),
get(SurveyPurchase, purchaser=self.USER, **kwargs),
]
# Create some other purchases for other users
get(SurveyPurchase, **kwargs)
get(SurveyPurchase, **kwargs)
get(SurveyPurchase, **kwargs)
# Test all purchases are open before any reports are generated
self.assertEqual(SurveyPurchase.objects.open().count(), 6)
self.assertEqual(SurveyPurchase.objects.closed().count(), 0)
self.assertEqual(self.USER.survey_purchases.open().count(), 3)
self.assertEqual(self.USER.survey_purchases.closed().count(), 0)
# The first purchase should be closed once the report is generated
purchases[0].generate_report()
self.assertEqual(SurveyPurchase.objects.open().count(), 5)
self.assertEqual(SurveyPurchase.objects.closed().count(), 1)
self.assertEqual(self.USER.survey_purchases.open().count(), 2)
self.assertEqual(self.USER.survey_purchases.closed()[0], purchases[0])
```
#### File: surveys/views/surveys.py
```python
from __future__ import absolute_import, unicode_literals
from django.contrib import messages
from django.core.exceptions import ValidationError
from django.db import IntegrityError
from django.db.models import F
from django.shortcuts import get_object_or_404, redirect
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from django.views import generic
from mezzy.utils.views import FormMessagesMixin, LoginRequiredMixin, UserPassesTestMixin
from ..forms.surveys import SurveyPurchaseForm, SurveyResponseForm
from ..models import SurveyPage, SurveyPurchase, SurveyPurchaseCode
class SurveyPurchaseMixin(object):
"""
Generic view to get SurveyPurchase intstances by public ID.
"""
@cached_property
def purchase(self):
qs = SurveyPurchase.objects.all() \
.select_related("survey") \
.prefetch_related("survey__categories__subcategories__questions")
return get_object_or_404(qs, public_id=self.kwargs["public_id"])
def get_context_data(self, **kwargs):
kwargs.update({
"purchase": self.purchase,
"survey": self.purchase.survey,
})
return super(SurveyPurchaseMixin, self).get_context_data(**kwargs)
class SurveyPurchaseCreate(LoginRequiredMixin, FormMessagesMixin, generic.CreateView):
"""
Allows users to purchase surveys.
"""
form_class = SurveyPurchaseForm
template_name = "surveys/survey_purchase_create.html"
success_message = _("You have successfully purchased this survey")
error_message = _("There was a problem with the purchase process")
@cached_property
def survey(self):
return get_object_or_404(
SurveyPage.objects.published(for_user=self.request.user),
slug=self.kwargs["slug"])
def get_context_data(self, **kwargs):
kwargs.update({
"survey": self.survey,
})
return super(SurveyPurchaseCreate, self).get_context_data(**kwargs)
def form_valid(self, form):
"""
The form is valid, let's process the payment and save the purchase.
"""
form.instance.survey = self.survey
form.instance.purchaser = self.request.user
# Process via purchase code or regular payment
purchase_code = form.cleaned_data.get("purchase_code")
try:
if purchase_code:
self.process_purchase_code(purchase_code, form)
else:
self.process_payment(form)
except ValidationError as error:
form.add_error(None, error)
return self.form_invalid(form)
# Code or payment processed successfully, save and continue
return super(SurveyPurchaseCreate, self).form_valid(form)
def process_purchase_code(self, purchase_code, form):
"""
Process a purchase based on code entered by the user.
"""
try:
code = self.survey.purchase_codes.get(code=purchase_code, uses_remaining__gt=0)
except SurveyPurchaseCode.DoesNotExist:
raise ValidationError(_("The code you entered is not valid"))
code.uses_remaining = F("uses_remaining") - 1
try:
code.save()
except IntegrityError: # Raised when uses_remaining becomes negative
raise ValidationError(_("The code you entered is no longer available"))
form.instance.amount = 0
form.instance.transaction_id = purchase_code
form.instance.payment_method = "Purchase Code"
def process_payment(self, form):
"""
By default all purchases are complimentary.
Payment processors should implement their logic here and set these fields accordingly.
"""
form.instance.transaction_id = "Complimentary"
form.instance.payment_method = "Complimentary"
form.instance.amount = 0
class SurveyPurchaseDetail(UserPassesTestMixin, SurveyPurchaseMixin, generic.TemplateView):
"""
Allows users to manage a survey they've purchased.
"""
template_name = "surveys/survey_purchase_detail.html"
def test_func(self):
"""
Only allow access to the user that purchased this survey.
"""
user = self.request.user
if not user.is_authenticated():
return False
return self.purchase.purchaser == user
class SurveyResponseCreate(FormMessagesMixin, SurveyPurchaseMixin, generic.CreateView):
"""
Allows a user to answer a survey and submit it.
"""
form_class = SurveyResponseForm
template_name = "surveys/survey_response_create.html"
success_message = "Thank you! Your responses have been saved successfully"
def get_form_kwargs(self):
kwargs = super(SurveyResponseCreate, self).get_form_kwargs()
kwargs.update({
"purchase": self.purchase
})
return kwargs
def get_success_url(self):
return self.purchase.get_complete_url()
class SurveyResponseComplete(SurveyPurchaseMixin, generic.TemplateView):
"""
Displays a confirmation message after the user has completed a survey.
"""
template_name = "surveys/survey_response_complete.html"
class SurveyPurchaseReport(SurveyPurchaseDetail):
"""
Allow users to generate a report for their survey when requested via POST.
The report is stored as JSON in the SurveyPurchase and can be retrieved via GET.
"""
template_name = "surveys/survey_purchase_report.html"
def post(self, request, *args, **kwargs):
self.purchase.generate_report()
messages.success(request, _("Report generated successfully"), fail_silently=True)
return redirect(self.purchase.get_report_url())
```
|
{
"source": "jerjohste/ecpy_qcircuits",
"score": 2
}
|
#### File: drivers/visa/rohde_and_schwarz_psa.py
```python
from __future__ import (division, unicode_literals, print_function,
absolute_import)
from inspect import cleandoc
import numpy as np
try:
from visa import ascii, single, double
except ImportError:
ascii = 2
single = 1
double = 3
from ..driver_tools import (InstrIOError, secure_communication,
instrument_property)
from ..visa_tools import VisaInstrument
FORMATTING_DICT = {'PHAS': lambda x: np.angle(x, deg=True),
'MLIN': np.abs,
'MLOG': lambda x: 10*np.log10(np.abs(x)),
'REAL': np.real,
'IMAG': np.imag}
class RohdeAndSchwarzPSA(VisaInstrument):
"""
"""
_channel = 1
port = 1
caching_permissions = {'frequency': True,
'power': True,
'selected_measure': True,
'if_bandwidth': True,
'sweep_type': True,
'sweep_points': True,
'average_state': True,
'average_count': True,
'average_mode': True}
def __init__(self, connection_info,caching_allowed=True,
caching_permissions={},auto_open=True):
super(RohdeAndSchwarzPSA, self).__init__(connection_info, caching_allowed,
caching_permissions,auto_open=True)
def _close(self):
self.close_connection()
# def reopen_connection(self):
# """
# """
# self._pna.reopen_connection()
@secure_communication()
def read_raw_data(self, measname):
""" Read raw data for a measure.
Parameters
----------
channel : int
trace : int
Returns
-------
data : numpy.array
Array of Floating points holding the data.
"""
#stop continuous measure mode
self.write('INIT:CONT OFF')
#start new measurement and wait before continuing with the commands
self.write('INIT;*WAI')
#get sweep data
trace = measname[1]
data_request = 'TRAC? TRACE{}'.format(trace)
data = np.array(self.ask(data_request).split(','),dtype = float)
if list(data):
return data
else:
raise InstrIOError(cleandoc('''Rhode&Schwarz PSA did not return the
data for trace {}'''.format(trace)))
@secure_communication()
def get_x_data(self, measname):
""" Read raw data for a measure.
Parameters
----------
channel : int
trace : int
Returns
-------
data : numpy.array
Array of Floating points holding the data.
"""
trace = measname[1]
data_request = 'TRAC:X? TRACE{}'.format(trace)
data = np.array(self.ask(data_request).split(','),dtype = float)
if list(data):
return data
else:
raise InstrIOError(cleandoc('''Rhode&Schwarz PSA did not return the
x values for trace {}'''.format(trace)))
@instrument_property
@secure_communication()
def resBW(self):
rbw = self.ask(':BAND?')
return rbw
@secure_communication()
@resBW.setter
def resBW(self,value):
self.write(':BAND {}'.format(value))
if self.ask(':BAND?') != value:
raise InstrIOError(cleandoc('''Rhode&Schwarz PSA did not set the
res BW properly'''))
@instrument_property
@secure_communication()
def videoBW(self):
vbw = self.ask(':BAND:VID?')
return vbw
@secure_communication()
@videoBW.setter
def videoBW(self,value):
self.write(':BAND:VID {}'.format(value))
if self.ask(':BAND:VID?') != value:
raise InstrIOError(cleandoc('''Rhode&Schwarz PSA did not set the
video BW properly'''))
@instrument_property
@secure_communication()
def span(self):
span = self.ask(':FREQ:SPAN?')
return span
@secure_communication()
@span.setter
def span(self,value):
self.write(':FREQ:SPAN {}'.format(value))
if self.ask(':FREQ:SPAN?') != value:
raise InstrIOError(cleandoc('''Rhode&Schwarz PSA did not set the
frequency span properly'''))
@secure_communication()
def get_single_freq(self,freq,reflevel,rbw,vbw,avrg_num):
# get the single frequencies with sensible PSA parameters
values = []
for i in range(avrg_num):
values.append(float(self.ask(
'SENSe:LIST:POW? {},{},0,OFF,NORM,{},{},500us,0;*OPC'.
format(freq,reflevel,rbw,vbw))))
return 10*np.log10(np.average(10**(np.array(values)/10)))
```
#### File: tasks/instr/rf_ENA_tasks.py
```python
import numbers
from atom.api import (Unicode, Bool, set_default, Enum)
from exopy.tasks.api import (InstrumentTask, InterfaceableTaskMixin,
validators)
CONVERSION_FACTORS = {'GHz': {'Hz': 1e9, 'kHz': 1e6, 'MHz': 1e3, 'GHz': 1},
'MHz': {'Hz': 1e6, 'kHz': 1e3, 'MHz': 1, 'GHz': 1e-3},
'kHz': {'Hz': 1e3, 'kHz': 1, 'MHz': 1e-3, 'GHz': 1e-6},
'Hz': {'Hz': 1, 'kHz': 1e-3, 'MHz': 1e-6, 'GHz': 1e-9}}
CONVERSION_FACTORS_TIME = {'ns': {'s': 1e-9, 'ms': 1e6, 'us': 1e3, 'ns': 1},
'us': {'s': 1e6, 'ms': 1e3, 'us': 1, 'ns': 1e-3},
'ms': {'s': 1e3, 'ms': 1, 'us': 1e-3, 'ns': 1e-6},
's': {'s': 1, 'ms': 1e-3, 'us': 1e-6, 'ns': 1e-9}}
LOOP_REAL = validators.SkipLoop(types=numbers.Real)
class SetRFFrequencyTask(InterfaceableTaskMixin, InstrumentTask):
"""Set the frequency of the signal delivered by a RF source.
"""
# Target frequency (dynamically evaluated)
frequency = Unicode().tag(pref=True, feval=LOOP_REAL)
# Unit of the frequency
unit = Enum('GHz', 'MHz', 'kHz', 'Hz').tag(pref=True)
# Whether to start the source if its output is off.
auto_start = Bool(False).tag(pref=True)
database_entries = set_default({'frequency': 1.0, 'unit': 'GHz'})
def check(self, *args, **kwargs):
"""Add the unit into the database.
"""
test, traceback = super(SetRFFrequencyTask, self).check(*args,
**kwargs)
self.write_in_database('unit', self.unit)
return test, traceback
def i_perform(self, frequency=None):
"""Default interface for simple sources.
"""
if self.auto_start:
self.driver.output = 'On'
if frequency is None:
frequency = self.format_and_eval_string(self.frequency)
self.driver.frequency_unit = self.unit
self.driver.frequency = frequency
self.write_in_database('frequency', frequency)
def convert(self, frequency, unit):
""" Convert a frequency to the given unit.
Parameters
----------
frequency : float
Frequency expressed in the task unit
unit : {'Hz', 'kHz', 'MHz', 'GHz'}
Unit in which to express the result
Returns
-------
converted_frequency : float
"""
return frequency*CONVERSION_FACTORS[self.unit][unit]
class SetRFPeriodTask(InterfaceableTaskMixin, InstrumentTask):
"""Set the period of the signal delivered by a RF source.
"""
# Target period (dynamically evaluated)
period = Unicode().tag(pref=True, feval=LOOP_REAL)
# Unit of the period
unit = Enum('ns', 'us', 'ms', 's').tag(pref=True)
# Whether to start the source if its output is off.
auto_start = Bool(False).tag(pref=True)
database_entries = set_default({'period': 1.0, 'unit': 's'})
def check(self, *args, **kwargs):
"""Add the unit into the database.
"""
test, traceback = super(SetRFPeriodTask, self).check(*args,
**kwargs)
self.write_in_database('unit', self.unit)
return test, traceback
def i_perform(self, period=None):
"""Default interface for simple sources.
"""
if self.auto_start:
self.driver.output = 'On'
if period is None:
period = self.format_and_eval_string(self.period)
self.driver.period_unit = self.unit
self.driver.period = period
self.write_in_database('period', period)
def convert(self, period, unit):
""" Convert a frequency to the given unit.
Parameters
----------
frequency : float
Frequency expressed in the task unit
unit : {'Hz', 'kHz', 'MHz', 'GHz'}
Unit in which to express the result
Returns
-------
converted_frequency : float
"""
return period*CONVERSION_FACTORS_TIME[self.unit][unit]
class SetRFENAPowerTask(InterfaceableTaskMixin, InstrumentTask):
"""Set the power of the signal delivered by the source.
"""
# Target power (dynamically evaluated)
power = Unicode().tag(pref=True, feval=LOOP_REAL)
# Whether to start the source if its output is off.
auto_start = Bool(False).tag(pref=True)
database_entries = set_default({'power': -10})
def i_perform(self, power=None):
"""
"""
if self.auto_start:
self.driver.output = 'On'
if power is None:
power = self.format_and_eval_string(self.power)
self.driver.power = power
self.write_in_database('power', power)
class SetRFOnOffTask(InterfaceableTaskMixin, InstrumentTask):
"""Switch on/off the output of the source.
"""
# Desired state of the output, runtime value can be 0 or 1.
switch = Unicode('Off').tag(pref=True, feval=validators.SkipLoop())
database_entries = set_default({'output': 0})
def check(self, *args, **kwargs):
"""Validate the value of the of the switch.
"""
test, traceback = super(SetRFOnOffTask, self).check(*args, **kwargs)
if test and self.switch:
try:
switch = self.format_and_eval_string(self.switch)
except Exception:
return False, traceback
if switch not in ('Off', 'On', 0, 1):
test = False
traceback[self.get_error_path() + '-switch'] =\
'{} is not an acceptable value.'.format(self.switch)
return test, traceback
def i_perform(self, switch=None):
"""Default interface behavior.
"""
if switch is None:
switch = self.format_and_eval_string(self.switch)
if switch == 'On' or switch == 1:
self.driver.output = 'On'
self.write_in_database('output', 1)
else:
self.driver.output = 'Off'
self.write_in_database('output', 0)
class SetPulseModulationTask(InterfaceableTaskMixin, InstrumentTask):
"""Switch on/off the pulse modulation of the source.
"""
# Desired state of the output, runtime value can be 0 or 1.
switch = Unicode('Off').tag(pref=True, feval=validators.SkipLoop())
database_entries = set_default({'pm_state': 0})
def check(self, *args, **kwargs):
"""Validate the value of the switch.
"""
test, traceback = super(SetPulseModulationTask, self).check(*args,
**kwargs)
if test and self.switch:
try:
switch = self.format_and_eval_string(self.switch)
except Exception:
return False, traceback
if switch not in ('Off', 'On', 0, 1):
test = False
traceback[self.get_error_path() + '-switch'] =\
'{} is not an acceptable value.'.format(self.switch)
return test, traceback
def i_perform(self, switch=None):
"""Default interface behavior.
"""
if switch is None:
switch = self.format_and_eval_string(self.switch)
if switch == 'On' or switch == 1:
self.driver.pm_state = 'On'
self.write_in_database('pm_state', 1)
else:
self.driver.pm_state = 'Off'
self.write_in_database('pm_state', 0)
```
|
{
"source": "jerjohste/exopy",
"score": 2
}
|
#### File: app/packages/plugin.py
```python
import pkg_resources
import logging
from atom.api import List, Dict
from enaml.workbench.api import Plugin, PluginManifest
from ...utils.traceback import format_exc
logger = logging.getLogger(__name__)
class PackagesPlugin(Plugin):
"""Plugin collecting and registering all manifest contributed by extension
packages.
"""
#: Dictionary listing the extension packages registered at startup, each
#: entries can contain either a dict listing the id of the registered
#: manifest with a message indicating whether registering succeeded, or
#: a message explaining why the package was not loaded.
packages = Dict()
def stop(self):
"""Unregister all manifest contributed by extension packages.
"""
# Sort to respect the given priority when unregistering.
heap = sorted(self._registered)
for manifest_id in heap:
self.workbench.unregister(manifest_id[2])
self.packages.clear()
self._registered = []
def collect_and_register(self):
"""Iter over packages and register the manifest they are providing.
"""
# Getting core plugin to signal errors.
core = self.workbench.get_plugin('enaml.workbench.core')
cmd = 'exopy.app.errors.signal'
packages = dict()
registered = []
core.invoke_command('exopy.app.errors.enter_error_gathering', {})
for ep in pkg_resources.iter_entry_points('exopy_package_extension'):
# Check that all dependencies are satisfied.
try:
ep.require()
except Exception:
msg = 'Could not load extension package %s : %s'
msg = msg % (ep.name, format_exc())
packages[ep.name] = msg
core.invoke_command(cmd, dict(kind='package', id=ep.name,
message=msg))
continue
# Get all manifests
packages[ep.name] = {}
manifests = ep.load()()
if not isinstance(manifests, list):
msg = 'Package %s entry point must return a list, not %s'
msg = msg % (ep.name, str(type(manifests)))
packages[ep.name] = msg
core.invoke_command(cmd, dict(kind='package', id=ep.name,
message=msg))
continue
if any(not issubclass(m, PluginManifest) for m in manifests):
msg = 'Package %s entry point must only return PluginManifests'
msg = msg % ep.name
packages[ep.name] = msg
core.invoke_command(cmd, dict(kind='package', id=ep.name,
message=msg))
continue
for manifest in manifests:
inst = manifest()
try:
self.workbench.register(inst)
except ValueError:
core.invoke_command(cmd,
dict(kind='registering', id=inst.id,
message=format_exc()))
continue
packages[ep.name][inst.id] = 'Successfully registered'
priority = getattr(inst, 'priority', 100)
# Keep the insertion index, to avoid comparing id when
# sorting (it would make no sense).
registered.append((priority, len(registered), inst.id))
self.packages = packages
self._registered = registered
core.invoke_command('exopy.app.errors.exit_error_gathering', {})
# =========================================================================
# --- Private API ---------------------------------------------------------
# =========================================================================
#: Private list of registered manifest used when stopping the plugin.
_registered = List()
```
#### File: monitors/text_monitor/entry.py
```python
from atom.api import (Unicode, List)
from enaml.application import deferred_call
from exopy.utils.atom_util import HasPrefAtom
class MonitoredEntry(HasPrefAtom):
"""Entry to display by the text monitor.
"""
#: User understandable name of the monitored entry.
name = Unicode().tag(pref=True)
#: Full name of the entry as found or built from the database.
path = Unicode().tag(pref=True)
#: Formatting of the entry.
formatting = Unicode().tag(pref=True)
#: Current value that the monitor should display.
value = Unicode()
#: List of database entries the entry depend_on.
depend_on = List().tag(pref=True)
def update(self, database_vals):
""" Method updating the value of the entry given the current state of
the database.
"""
# TODO : handle evaluation delimited by $. Imply a try except
vals = {d: database_vals[d] for d in self.depend_on}
new_val = self.formatting.format(**vals)
deferred_call(setattr, self, 'value', new_val)
```
#### File: monitors/text_monitor/monitor.py
```python
import os
from ast import literal_eval
from textwrap import fill
import enaml
from atom.api import (List, Dict, ForwardTyped, Property, Value)
from ..base_monitor import BaseMonitor
from .entry import MonitoredEntry
with enaml.imports():
from enaml.stdlib.message_box import information
def import_monitor_plugin():
""" Delayed import of the plugin to avoid circular imports.
"""
from .plugin import TextMonitorPlugin
return TextMonitorPlugin
class TextMonitor(BaseMonitor):
"""Simple monitor displaying entries as text in a widget.
"""
#: List of the entries which should be displayed when a measurement is
#: running.
#: This should not be manipulated directly by user code.
displayed_entries = List(MonitoredEntry)
#: List of the entries which should not be displayed when a measurement is
#: running. This should not be manipulated directly by user code.
undisplayed_entries = List(MonitoredEntry)
#: List of the entries which should be not displayed when a measurement is
#: running because they would be redundant with another entry. (created by
#: a rule for example.)
#: This should not be manipulated directly by user code.
hidden_entries = List(MonitoredEntry)
#: Mapping between a database entry and a list of callable used for
#: updating a monitor entry which relies on the database entry.
updaters = Dict()
#: List of rules which should be used to build monitor entries.
rules = List()
#: List of user created monitor entries.
custom_entries = List(MonitoredEntry)
#: List of all the known database entries.
known_monitored_entries = Property()
def process_news(self, news):
"""Handle a news by calling every related entrt updater.
"""
key, value = news
values = self._database_values
values[key] = value
if key in self.updaters:
for updater in self.updaters[key]:
updater(values)
def refresh_monitored_entries(self, entries=None):
"""Rebuild entries based on the rules and database entries.
Parameters
----------
entries : dict, optional
Database entries to use when rebuilding the monitor entries.
"""
if not entries:
entries = self._database_values
else:
self._database_values = entries
# Preserve the custom entries.
custom = self.custom_entries[:]
self._clear_state()
self.custom_entries = custom
for entry, value in entries.items():
self.handle_database_entries_change(('added', entry, value))
def handle_database_entries_change(self, news):
"""Generate new entries for added values and clean removed values.
"""
# Unwrap multiple notifications.
if isinstance(news[0], tuple):
for n in news:
self.handle_database_entries_change(n)
# Handle the addition of a new entry to the database
if news[0] == 'added':
_, path, value = news
# Store the new value.
self._database_values[path] = value
# Add a default entry to the displayed monitor entries.
new_entry = self._create_default_entry(path, value)
self.add_entries('displayed', (new_entry,))
# Try to apply rules.
for rule in self.rules:
rule.try_apply(path, self)
# Check whether any custom entry is currently hidden.
hidden_custom = [e for e in self.custom_entries
if e not in self.displayed_entries and
e not in self.undisplayed_entries]
# If there is one checks whether all the dependences are once
# more available.
if hidden_custom:
for e in hidden_custom:
if all(d in self.monitored_entries for d in e.depend_on):
self.add_entries('displayed', (e,))
# Handle the case of a database entry being suppressed, by removing all
# monitors entries which where depending on this entry.
elif news[0] == 'removed':
_, path = news
self.displayed_entries = [m for m in self.displayed_entries
if path not in m.depend_on]
self.undisplayed_entries = [m for m in self.undisplayed_entries
if path not in m.depend_on]
self.hidden_entries = [m for m in self.hidden_entries
if path not in m.depend_on]
if path in self.monitored_entries:
self.monitored_entries.remove(path)
if path in self.updaters:
del self.updaters[path]
if path in self._database_values:
del self._database_values[path]
# Handle the case of a database entry being renamed.
elif news[0] == 'renamed':
_, old, new, value = news
_, old_entry_name = old.rsplit('/', 1)
_, new_entry_name = new.rsplit('/', 1)
suffix = os.path.commonprefix((old_entry_name[::-1],
new_entry_name[::-1]))[::-1]
old_task_name = old_entry_name[:-len(suffix)]
new_task_name = new_entry_name[:-len(suffix)]
for entries in ('displayed_entries', 'undisplayed_entries',
'hidden_entries'):
for entry in getattr(self, entries):
if entry.path == old:
entry.path = new
entry.name = new_entry_name
elif old_task_name in entry.name:
entry.name = (new_task_name +
entry.name[len(old_task_name):])
if old in entry.depend_on:
new_dep = entry.depend_on[:]
new_dep[new_dep.index(old)] = new
entry.depend_on = new_dep
if old in self.monitored_entries:
ind = self.monitored_entries.index(old)
new_entries = self.monitored_entries[:]
new_entries[ind] = new
self.monitored_entries = new_entries
if old in self.updaters:
self.updaters[new] = self.updaters[old]
del self.updaters[old]
if old in self._database_values:
del self._database_values[old]
self._database_values[new] = value
def handle_database_nodes_change(self, news):
"""Update the paths when a node is renamed.
"""
# Unwrap multiple notifications.
if isinstance(news[0], tuple):
for n in news:
self.handle_database_nodes_change(n)
if news[0] == 'renamed':
_, path, old, new = news
old_path = path + '/' + old
new_path = path + '/' + new
for entries in ('displayed_entries', 'undisplayed_entries',
'hidden_entries'):
for entry in getattr(self, entries):
if entry.path.startswith(old_path):
entry.path = new_path + entry.path[len(old_path):]
new_depend_on = []
for p in entry.depend_on:
if p.startswith(old_path):
new_depend_on.append(new_path + p[len(old_path):])
else:
new_depend_on.append(p)
entry.depend_on = new_depend_on
new_monitored = []
for e in self.monitored_entries:
if e.startswith(old_path):
new_monitored.append(new_path + e[len(old_path):])
else:
new_monitored.append(e)
self.monitored_entries = new_monitored
for attr in ('updaters', '_database_values'):
new_val = {}
for k, v in getattr(self, attr).items():
if k.startswith(old_path):
new_val[new_path + k[len(old_path):]] = v
else:
new_val[k] = v
setattr(self, attr, new_val)
def get_state(self):
"""Write the state of the monitor in a dictionary.
"""
prefs = self.preferences_from_members()
# Get the definitions of the custom entries.
for i, custom_entry in enumerate(self.custom_entries):
aux = 'custom_{}'.format(i)
prefs[aux] = custom_entry.preferences_from_members()
# Get the definitions of the rules.
for i, rule in enumerate(self.rules):
aux = 'rule_{}'.format(i)
if rule.id in self._plugin._rule_configs.contributions:
prefs[aux] = rule.id
else:
prefs[aux] = rule.preferences_from_members()
# Get the displayed/undisplayed status of each entry based on its path.
# If the monitor was never linked keep the values stored.
if self._state:
prefs['displayed'] = self._state['displayed']
prefs['undisplayed'] = self._state['undisplayed']
prefs['hidden'] = self._state['hidden']
else:
prefs['displayed'] = repr([e.path for e in self.displayed_entries])
prefs['undisplayed'] = repr([e.path
for e in self.undisplayed_entries])
prefs['hidden'] = repr([e.path for e in self.hidden_entries])
return prefs
def set_state(self, state):
"""Rebuild all rules and dispatch entries according to the state.
"""
# Identify all the rules.
rules_config = [conf for name, conf in state.items()
if name.startswith('rule_')]
# Rebuild all rules.
rules = []
for rule_config in rules_config:
rule = self._plugin.build_rule(rule_config)
if rule is not None:
rules.append(rule)
self.rules = rules
customs_config = [conf for name, conf in state.items()
if name.startswith('custom_')]
for custom_config in customs_config:
entry = MonitoredEntry()
entry.update_members_from_preferences(custom_config)
self.custom_entries.append(entry)
self._state = state
def link_to_measurement(self, measurement):
"""Set the entries according to the state if one is present.
"""
super(TextMonitor, self).link_to_measurement(measurement)
database = measurement.root_task.database
self.refresh_monitored_entries(database.list_all_entries(values=True))
if self._state:
m_entries = set(self.displayed_entries + self.undisplayed_entries +
self.hidden_entries + self.custom_entries)
config = self._state
del self._state
pref_disp = literal_eval(config['displayed'])
pref_undisp = literal_eval(config['undisplayed'])
pref_hidden = literal_eval(config['hidden'])
disp = [e for e in m_entries if e.path in pref_disp]
m_entries -= set(disp)
undisp = [e for e in m_entries if e.path in pref_undisp]
m_entries -= set(undisp)
hidden = [e for e in m_entries if e.path in pref_hidden]
m_entries -= set(hidden)
# TODO this should not assume the UI exists
if m_entries:
e_l = [e.name for e in m_entries]
mess = ('The following entries were not expected from the '
'config : {}. These entries has been added to the '
'displayed ones.')
information(parent=None,
title='Unhandled entries',
text=fill(mess.format(e_l)))
disp += list(m_entries)
self.displayed_entries = disp
self.undisplayed_entries = undisp
self.hidden_entries = hidden
def add_entries(self, section, entries):
"""Add entries to the specified section.
The entries should not be present in another section. (save hidden)
Parameters
----------
section : {'displayed', 'undisplayed', 'hidden'}
Section in which to add the entries.
entry : iterable[MonitoredEntry]
Entries to add.
"""
name = section+'_entries'
container = getattr(self, name, None)
if container is None:
raise ValueError('Section must be one of : displayed, undisplayed,'
' hidden, not %s' % section)
copy = container[:]
copy.extend(entries)
if section == 'displayed':
for e in entries:
self._displayed_entry_added(e)
setattr(self, name, copy)
def move_entries(self, origin, destination, entries):
"""Move entries from a section to another.
Parameters
----------
origin : {'displayed', 'undisplayed', 'hidden'}
Section in which the entries currently are.
destination : {'displayed', 'undisplayed', 'hidden'}
Section in which to put the entries.
entries : iterable[MonitoredEntry]
Entries to move.
"""
o_name = origin+'_entries'
o_container = getattr(self, o_name, None)
if o_container is None:
raise ValueError('Origin must be one of : displayed, undisplayed,'
' hidden, not %s' % origin)
d_name = destination+'_entries'
d_container = getattr(self, d_name, None)
if d_container is None:
raise ValueError('Destination must be one of : displayed, '
'undisplayed, hidden, not %s' % destination)
if origin == 'displayed':
for e in entries:
self._displayed_entry_removed(e)
if destination == 'displayed':
for e in entries:
self._displayed_entry_added(e)
setattr(self, o_name, [e for e in o_container if e not in entries])
copy = d_container[:]
copy.extend(entries)
setattr(self, d_name, copy)
def remove_entries(self, section, entries):
"""Remove entries to the specified section.
The entries should not be present in another section.
Parameters
----------
section : {'displayed', 'undisplayed', 'hidden'}
Section from which to remove the entries.
entry : iterable[MonitoredEntry]
Entries to remove.
"""
name = section+'_entries'
container = getattr(self, name, None)
if container is None:
raise ValueError('Origin must be one of : displayed, undisplayed,'
' hidden, not %s' % section)
if section == 'displayed':
for e in entries:
self._displayed_entry_removed(e)
setattr(self, name, [e for e in container if e not in entries])
# =========================================================================
# --- Private API ---------------------------------------------------------
# =========================================================================
#: Known values of the database entries used when recomputing an entry
#: value depending not on a single value. During edition all values are
#: stored, regardless of whether or not the entry needs to be observed,
#: when the start method is called the dict is cleaned.
_database_values = Dict()
#: Reference to the monitor plugin handling the rules persistence.
_plugin = ForwardTyped(import_monitor_plugin)
#: Temporary storage of the state that is preserved till the tool is
#: linked to a measurement.
_state = Value()
@staticmethod
def _create_default_entry(entry_path, value):
""" Create a monitor entry for a database entry.
Parameters
----------
entry_path : unicode
Path of the database entries for which to create a monitor entry.
Returns
-------
entry : MonitoredEntry
Monitor entry to be added to the monitor.
"""
_, name = entry_path.rsplit('/', 1)
formatting = '{' + entry_path + '}'
entry = MonitoredEntry(name=name, path=entry_path,
formatting=formatting, depend_on=[entry_path])
entry.value = '{}'.format(value)
return entry
def _clear_state(self):
""" Clear the monitor state.
"""
with self.suppress_notifications(): # Need to clarify this
self.displayed_entries = []
self.undisplayed_entries = []
self.hidden_entries = []
self.updaters = {}
self.custom_entries = []
self.monitored_entries = []
def _displayed_entry_added(self, entry):
""" Tackle the addition of a displayed monitor entry.
First this method will add the entry updater into the updaters dict for
each of its dependence and if one dependence is absent from the
monitored_entries it will be added.
Parameters
----------
entry : MonitoredEntry
The entry being added to the list of displayed entries of the
monitor.
"""
for dependence in entry.depend_on:
if dependence in self.updaters:
self.updaters[dependence].append(entry.update)
else:
self.updaters[dependence] = [entry.update]
if dependence not in self.monitored_entries:
self.monitored_entries.append(dependence)
def _displayed_entry_removed(self, entry):
""" Tackle the deletion of a displayed monitor entry.
First this method will remove the entry updater for each of its
dependence and no updater remain for that database entry, the entry
will be removed from the monitored_entries
Parameters
----------
entry : MonitoredEntry
The entry being added to the list of displayed entries of the
monitor.
"""
for dependence in entry.depend_on:
self.updaters[dependence].remove(entry.update)
if not self.updaters[dependence]:
del self.updaters[dependence]
self.monitored_entries.remove(dependence)
def _get_known_monitored_entries(self):
"""Getter for the known_monitored_entries property.
"""
return self._database_values.keys()
```
#### File: text_monitor/rules/std_rules.py
```python
from atom.api import (Unicode, Bool)
from ..entry import MonitoredEntry
from .base import BaseRule
class RejectRule(BaseRule):
"""Automatically remove an entry matching one of the specified suffixes.
"""
def try_apply(self, new_entry, monitor):
"""Hide an entry if it suffix match.
"""
for suffix in self.suffixes:
if new_entry.endswith(suffix):
for entry in monitor.displayed_entries:
if entry.path == new_entry:
monitor.move_entries('displayed', 'undisplayed',
(entry,))
break
class FormatRule(BaseRule):
""" Create a new entry with a special formatting if some entries exist.
Simple entries which would be redundant with the informations contained
in the new formatting can be automatically hidden.
"""
#: The format in which the new entry created by the rule should be
#: displayed
new_entry_formatting = Unicode().tag(pref=True)
#: The suffix of the new entry created by the rule.
new_entry_suffix = Unicode().tag(pref=True)
#: Whether or not to hide the entries used by the rules.
hide_entries = Bool(True).tag(pref=True)
def try_apply(self, new_entry, monitor):
"""If all suffixes are found for a single task, create a new entry
and hide the components if asked to.
"""
entries = monitor.monitored_entries
for suffix in self.suffixes:
# Check whether the new entry match one suffix
if new_entry.endswith(suffix):
entry_path, entry_name = new_entry.rsplit('/', 1)
# Getting the prefix of the entry (remove the found suffix)
prefix = entry_path + '/' + entry_name.replace('_' + suffix,
'_')
# Find all entries with the same prefix.
prefixed_entries = [entry for entry in entries
if entry.startswith(prefix)]
# Check if all the entries needed to apply the rule exists.
if all(any(entry.endswith(suffix)
for entry in prefixed_entries)
for suffix in self.suffixes):
# Create the name of the entry.
name_prefix = entry_name.replace('_' + suffix, '')
name = name_prefix + '_' + self.new_entry_suffix
path = entry_path + '/' + name
# Create the right formatting by replacing the rule fields
# by the full name of the entries.
formatting = self.new_entry_formatting
for suffix in self.suffixes:
formatting = formatting.replace(suffix,
prefix + suffix)
# Create a list of all the dependencies.
depend = [prefix + suffix
for suffix in self.suffixes]
# Create the monitor entry and add it to the list of
# displayed entries.
entry = MonitoredEntry(name=name, path=path,
formatting=formatting,
depend_on=depend)
monitor.add_entries('displayed', [entry])
# If requested hide all the entries redundant with the
# one created by the rule.
if self.hide_entries:
for e in depend:
for entry in monitor.displayed_entries:
if entry.path == e:
monitor.move_entries('displayed', 'hidden',
(entry,))
break
else:
break
```
#### File: exopy/measurement/processor.py
```python
import os
import logging
from time import sleep
from threading import Thread, RLock
import enaml
from atom.api import Atom, Typed, ForwardTyped, Value, Bool
from enaml.widgets.api import Window
from enaml.layout.api import InsertTab, FloatItem
from enaml.application import deferred_call, schedule
from .engines.api import BaseEngine, ExecutionInfos
from .measurement import Measurement
from ..utils.flags import BitFlag
from ..utils.traceback import format_exc
logger = logging.getLogger(__name__)
def plugin():
"""Delayed import to avoid circular references.
"""
from .plugin import MeasurementPlugin
return MeasurementPlugin
def schedule_and_block(func, args=(), kwargs={}, priority=100):
"""Schedule a function call on the main thread and wait for it to complete.
"""
sheduled = schedule(func, args, kwargs, priority=100)
while sheduled.pending():
sleep(0.05)
class MeasurementProcessor(Atom):
"""Object reponsible for a measurement execution.
"""
#: Boolean indicating whether or not the processor is working.
active = Bool()
#: Reference to the measurement plugin.
plugin = ForwardTyped(plugin)
#: Currently run measurement or last measurement run.
running_measurement = Typed(Measurement)
#: Instance of the currently used engine.
engine = Typed(BaseEngine)
#: Boolean indicating whether or not process all enqueued measurements.
continuous_processing = Bool(True)
#: Monitors window
monitors_window = Typed(Window)
def start_measurement(self, measurement):
"""Start a new measurement.
"""
if self._thread and self._thread.is_alive():
self._state.set('stop_processing')
self._thread.join(5)
if self._thread.is_alive():
core = self.plugin.workbench.get_plugin('enaml.workbench.core')
cmd = 'exopy.app.errors.signal'
msg = ("Can't stop the running execution thread. Please "
"restart the application and consider reporting this "
"as a bug.")
core.invoke_command(cmd, dict(kind='error', message=msg))
return
if self.continuous_processing:
self._state.set('continuous_processing')
else:
self._state.clear('continuous_processing')
deferred_call(setattr, self, 'active', True)
self._thread = Thread(target=self._run_measurements,
args=(measurement,))
self._thread.daemon = True
self._thread.start()
def pause_measurement(self):
"""Pause the currently active measurement.
"""
logger.info('Pausing measurement %s.', self.running_measurement.name)
self.running_measurement.status = 'PAUSING'
self._state.set('pause_attempt')
if self._state.test('running_main'):
self.engine.pause()
self.engine.observe('status', self._watch_engine_state)
else:
if self._active_hook:
self._active_hook.pause()
self._active_hook.observe('paused', self._watch_hook_state)
def resume_measurement(self):
"""Resume the currently paused measurement.
"""
logger.info('Resuming measurement %s.', self.running_measurement.name)
self.running_measurement.status = 'RESUMING'
self._state.clear('paused')
self._state.set('resuming')
if self._state.test('running_main'):
self.engine.resume()
self.engine.observe('status', self._watch_engine_state)
else:
if self._active_hook:
self._active_hook.resume()
self._active_hook.observe('resumed',
self._watch_hook_state)
def stop_measurement(self, no_post_exec=False, force=False):
"""Stop the currently active measurement.
"""
if no_post_exec or force:
self._state.set('no_post_exec')
self._state.set('stop_attempt')
if self.running_measurement:
logger.info('Stopping measurement %s.',
self.running_measurement.name)
self.running_measurement.status = 'STOPPING'
if self._state.test('running_main'):
self.engine.stop(force)
else:
if self._active_hook:
self._active_hook.stop(force)
def stop_processing(self, no_post_exec=False, force=False):
"""Stop processing the enqueued measurements.
"""
if self.running_measurement:
logger.info('Stopping measurement %s.',
self.running_measurement.name)
if no_post_exec or force:
self._state.set('no_post_exec')
self._state.set('stop_attempt', 'stop_processing')
self._state.clear('processing')
if self._state.test('running_main'):
self.engine.stop(force)
else:
if self._active_hook:
self._active_hook.stop(force)
# =========================================================================
# --- Private API ---------------------------------------------------------
# =========================================================================
#: Background thread handling the measurement execution
_thread = Value()
#: Internal flags used to keep track of the execution state.
_state = Typed(BitFlag,
(('processing', 'running_pre_hooks', 'running_main',
'running_post_hooks', 'pause_attempt', 'paused',
'resuming', 'stop_attempt', 'stop_processing',
'no_post_exec', 'continuous_processing'),)
)
#: Hook currently executed. The value is meaningful only when
#: 'running_pre_hooks' or 'running_post_hooks' is set.
_active_hook = Value()
#: Lock to avoid race condition when pausing.
_lock = Value(factory=RLock)
def _run_measurements(self, measurement):
"""Run measurements (either all enqueued or only one)
This code is executed by a thread (stored in _thread)
Parameters
----------
measurement : Measurement
First measurement to run. Other measurements will be run in their
order of appearance in the queue if the user enable continuous
processing.
"""
# If the engine does not exist, create one.
plugin = self.plugin
if not self.engine:
engine = plugin.create('engine', plugin.selected_engine)
schedule_and_block(setattr, (self, 'engine', engine))
# Mark that we started processing measurements.
self._state.set('processing')
# Process enqueued measurement as long as we are supposed to.
while not self._state.test('stop_processing'):
# Clear the internal state to start fresh.
self._clear_state()
# If we were provided with a measurement use it, otherwise find the
# next one.
if measurement:
meas = measurement
measurement = None
else:
meas = self.plugin.find_next_measurement()
# If there is a measurement register it as the running one, update
# its status and log its execution.
if meas is not None:
meas_id = meas.name + '_' + meas.id
self._set_measurement_state('RUNNING',
'The measurement is being run.',
meas)
msg = 'Starting execution of measurement %s'
logger.info(msg % meas.name + meas.id)
status, infos = self._run_measurement(meas)
# Release runtime dependencies.
meas.dependencies.release_runtimes()
# If no measurement remains stop.
else:
break
# Log the result.
mess = 'Measurement %s processed, status : %s' % (meas_id, status)
if infos:
mess += '\n' + infos
logger.info(mess)
# Update the status and infos.
self._set_measurement_state(status, infos, clear=True)
# If we are supposed to stop, stop.
if (not self._state.test('continuous_processing') or
self._state.test('stop_processing')):
break
if self.engine and self.plugin.engine_policy == 'stop':
self._stop_engine()
self._state.clear('processing')
deferred_call(setattr, self, 'active', False)
def _run_measurement(self, measurement):
"""Run a single measurement.
"""
# Switch to running state.
measurement.enter_running_state()
meas_id = measurement.name + '_' + measurement.id
# Collect runtime dependencies
res, msg, errors = measurement.dependencies.collect_runtimes()
if not res:
status = 'SKIPPED' if 'unavailable' in msg else 'FAILED'
return status, msg + '\n' + errors_to_msg(errors)
# Records that we got access to all the runtimes.
mess = ('The use of all runtime resources have been granted to the '
'measurement %s' % meas_id)
logger.info(mess.replace('\n', ' '))
# Run checks now that we have all the runtimes.
if not measurement.forced_enqueued:
res, errors = measurement.run_checks()
if not res:
msg = 'Measurement %s failed to pass the checks :\n' % meas_id
return 'FAILED', msg + errors_to_msg(errors)
# Now that we know the measurement is going to run save it.
default_filename = meas_id + '.meas.ini'
path = os.path.join(measurement.root_task.default_path,
default_filename)
measurement.save(path)
logger.info('Starting measurement {}.'.format(meas_id))
# Execute all pre-execution hooks.
result, errors = self._run_pre_execution(measurement)
if not result:
msg = ('Measurement %s failed to run pre-execution hooks :\n' %
meas_id)
return 'FAILED', msg + errors_to_msg(errors)
result = True
errors = {}
if self._check_for_pause_or_stop():
# Connect new monitors, and start them.
logger.debug('Connecting monitors for measurement %s',
meas_id)
self._start_monitors(measurement)
# Assemble the task infos for the engine to run the main task.
deps = measurement.dependencies
infos = ExecutionInfos(
id=meas_id+'-main',
task=measurement.root_task,
build_deps=deps.get_build_dependencies().dependencies,
runtime_deps=deps.get_runtime_dependencies('main'),
observed_entries=measurement.collect_monitored_entries(),
checks=not measurement.forced_enqueued,
)
# Ask the engine to perform the main task.
logger.debug('Passing measurement %s to the engine.',
meas_id)
self._state.set('running_main')
execution_result = self.engine.perform(infos)
self._state.clear('running_main')
# Record the result and store engine return value in the
# measurement for the post execution hooks.
result &= execution_result.success
errors.update(execution_result.errors)
measurement.task_execution_result = execution_result
# Disconnect monitors.
logger.debug('Disonnecting monitors for measurement %s',
meas_id)
self._stop_monitors(measurement)
# Save the stop_attempt state to allow to run post execution if we
# are supposed to do so.
state = self._state.test('stop_attempt')
self._state.clear('stop_attempt')
# Execute all post-execution hooks if pertinent.
if not self._state.test('no_post_exec'):
res, errors = self._run_post_execution(measurement)
result &= res
if state:
self._state.set('stop_attempt')
if self._state.test('stop_attempt'):
return ('INTERRUPTED',
'The measurement has been interrupted by the user.')
if not result:
if not execution_result.success:
msg = 'Execution of the main task failed :\n'
else:
msg = 'Some post-execution hook failed to run :\n'
return 'FAILED', msg + errors_to_msg(errors)
return 'COMPLETED', 'The measurement successfully completed.'
def _run_pre_execution(self, measurement):
"""Run pre measurement execution operations.
Returns
-------
result : bool
Boolean indicating whether or not the operations succeeded.
report : dict
Dict storing the errors (as dict) by id of the operation in which
they occured.
"""
result = True
full_report = {}
self._state.set('running_pre_hooks')
meas_id = measurement.name + '_' + measurement.id
for id, hook in measurement.pre_hooks.items():
if not self._check_for_pause_or_stop():
break
logger.debug('Calling pre-measurement hook %s for measurement %s',
id, meas_id)
with self._lock:
self._active_hook = hook
try:
hook.run(self.plugin.workbench, self.engine)
except Exception:
result = False
full_report[id] = format_exc()
# Prevent issues with pausing/resuming
with self._lock:
self._active_hook.unobserve('paused', self._watch_hook_state)
self._active_hook = None
self._state.clear('running_pre_hooks')
return result, full_report
def _run_post_execution(self, measurement):
"""Run post measurement operations.
Parameters
----------
measurement : Measurement
Returns
-------
result : bool
Boolean indicating whether or not the operations succeeded.
report : dict
Dict storing the errors (as dict) by id of the operation in which
they occured.
"""
result = True
full_report = {}
self._state.set('running_post_hooks')
meas_id = measurement.name + '_' + measurement.id
for id, hook in measurement.post_hooks.items():
if not self._check_for_pause_or_stop():
break
logger.debug('Calling post-measurement hook %s for measurement %s',
id, meas_id)
with self._lock:
self._active_hook = hook
try:
hook.run(self.plugin.workbench, self.engine)
except Exception:
result = False
full_report[id] = format_exc()
# Prevent issues with pausing/resuming
with self._lock:
self._active_hook.unobserve('paused', self._watch_hook_state)
self._active_hook = None
self._state.clear('running_post_hooks')
return result, full_report
def _start_monitors(self, measurement):
"""Start the monitors attached to a measurement and display them.
If no dedicated window exists one will be created. For monitors for
which a dockitem already exists it is re-used.
"""
def start_monitors(self, measurement):
"""Start the monitors attached to a measurement.
Called in the main thread.
"""
workbench = self.plugin.workbench
if not self.monitors_window:
with enaml.imports():
from .workspace.monitors_window import MonitorsWindow
self.monitors_window = MonitorsWindow()
else:
self.monitors_window.send_to_front()
self.monitors_window.measurement = measurement
dock_area = self.monitors_window.dock_area
anchor = ''
for dock_item in dock_area.dock_items():
if dock_item.name not in measurement.monitors:
dock_item.destroy()
elif not anchor:
anchor = dock_item.name
# We show the window now because otherwise the layout ops are not
# properly executed.
if self.plugin.auto_show_monitors:
self.monitors_window.show()
ops = []
for monitor in measurement.monitors.values():
decl = monitor.declaration
dock_item = dock_area.find(decl.id)
if dock_item is None:
try:
dock_item = decl.create_item(workbench, dock_area)
except Exception:
msg = 'Failed to create widget for monitor %s :\n %s'
logger.error(msg, decl.id, format_exc())
continue
if dock_item is not None:
if dock_item.float_default:
ops.append(FloatItem(item=decl.id))
else:
ops.append(InsertTab(item=decl.id,
target=anchor))
self.engine.observe('progress', monitor.process_news)
if dock_item:
dock_item.monitor = monitor
monitor.start()
if ops:
dock_area.update_layout(ops)
# Executed in the main thread to avoid GUI update issues.
schedule_and_block(start_monitors, (self, measurement), priority=100)
def _stop_monitors(self, measurement):
"""Disconnect the monitors from the engine and stop them.
The monitors windows is not hidden as the user may want to check it
later.
"""
def stop_monitors(engine, measurement):
"""Stop the monitors.
Executed on the main thread.
"""
if engine:
engine.unobserve('news')
for monitor in measurement.monitors.values():
monitor.stop()
# Executed in the main thread to avoid GUI update issues.
schedule_and_block(stop_monitors, (self.engine, measurement),
priority=100)
def _check_for_pause_or_stop(self):
"""Check if a pause or stop request is pending and process it.
Returns
-------
should_stop : bool
Booelan indicating whether or not the execution of the measurement
should stop.
"""
flag = self._state
if flag.test('stop_attempt'):
return False
if flag.test('pause_attempt'):
flag.clear('pause_attempt')
self._set_measurement_state('PAUSED', 'The measurement is paused.')
flag.set('paused')
while True:
if flag.wait(0.1, 'resuming'):
flag.clear('resuming')
self._set_measurement_state('RUNNING',
'The measurement has resumed.')
return True
if flag.test('stop_attempt'):
return False
return True
# Those must post update of measurement.status and remove observers
def _watch_engine_state(self, change):
"""Observe engine state to notify that the engine paused or resumed.
"""
if change['value'] == 'Paused':
self._state.clear('pause_attempt')
self.engine.unobserve('status', self._watch_engine_state)
self._set_measurement_state('PAUSED', 'The measurement is paused.')
self._state.set('paused')
elif change['value'] == 'Running':
self._state.clear('resuming')
self.engine.unobserve('status', self._watch_engine_state)
self._set_measurement_state('RUNNING',
'The measurement has resumed.')
def _watch_hook_state(self, change):
"""Observe hook paused/resumed events to validate pausing/resuming.
"""
if change['name'] == 'paused':
self._active_hook.unobserve('status', self._watch_hook_state)
self._set_measurement_state('PAUSED', 'The measurement is paused.')
self._state.clear('pause_attempt')
self._state.set('paused')
elif change['name'] == 'resumed':
self._state.clear('resuming')
self._active_hook.unobserve('status', self._watch_hook_state)
self._set_measurement_state('RUNNING',
'The measurement has resumed.')
def _set_measurement_state(self, status, infos, measurement=None,
clear=False):
"""Set the measurement status and infos in the main thread.
"""
def set_state(processor, status, infos, meas, clear):
if meas:
processor.running_measurement = meas
measurement = processor.running_measurement
measurement.status = status
measurement.infos = infos
if clear:
processor.running_measurement = None
# Executed in the main thread to avoid GUI update issues.
schedule_and_block(set_state,
(self, status, infos, measurement, clear),
priority=100)
def _stop_engine(self):
"""Stop the engine.
"""
logger.debug('Stopping engine')
engine = self.engine
engine.shutdown()
i = 0
while engine and engine.status != 'Stopped':
sleep(0.5)
i += 1
if i > 10:
engine.shutdown(force=True)
def _clear_state(self):
"""Clear the state when starting while preserving persistent settings.
"""
flags = list(self._state.flags)
flags.remove('processing')
flags.remove('continuous_processing')
self._state.clear(*flags)
def _post_setattr_continuous_processing(self, old, new):
"""Make sure the internal bit flag does reflect the real setting.
"""
if new:
self._state.set('continuous_processing')
else:
self._state.clear('continuous_processing')
def errors_to_msg(errors):
"""Convert a dictionary of errors in a well formatted message.
"""
err = '\n'.join(('- %s : %s' % (k, v) for k, v in errors.items()))
return 'The following errors occured :\n' + err
```
#### File: tasks/configs/base_configs.py
```python
import random
from atom.api import (Atom, Bool, Unicode, Subclass, ForwardTyped, Typed)
from inspect import getdoc
from ..tasks.base_tasks import BaseTask
from ..utils.templates import load_template
from ..utils.building import build_task_from_config
# Circular import protection
def task_manager():
"""Delayed import of TaskManagerPlugin.
"""
from ..plugin import TaskManagerPlugin
return TaskManagerPlugin
class BaseTaskConfig(Atom):
"""Base class for task configurer.
"""
#: Future parent in the task hierarchy used to enforce name uniqueness.
future_parent = Typed(BaseTask)
#: Task manager, necessary to retrieve task implementations.
manager = ForwardTyped(task_manager)
#: Name of the task to create.
task_name = Unicode()
#: Class of the task to create.
task_class = Subclass(BaseTask)
#: Bool indicating if the build can be done.
ready = Bool(False)
#: Bool indicating if the name is valid.
name_valid = Bool(False)
def __init__(self, **kwargs):
super(BaseTaskConfig, self).__init__(**kwargs)
# Force check to ensure that the possible default value of task_name
# is tested.
self.check_parameters()
def check_parameters(self):
"""The only parameter required is a unique task name.
"""
# Checking the parameters make sense only if the manager is known
if not self.manager:
return
names = []
if self.future_parent:
names = self.future_parent.root.get_used_names()
self.name_valid = self.task_name != "" and self.task_name not in names
self.ready = self.name_valid
def build_task(self):
"""This method use the user parameters to build the task object
Returns
-------
task : BaseTask
Task object built using the user parameters. Ready to be
inserted in a task hierarchy.
"""
raise NotImplementedError()
def _post_setattr_future_parent(self, _old, _new):
"""If the object was not initialized with a future_parent, we weren't
able to perform all the checks so we perform them now
"""
self.check_parameters()
def _post_setattr_task_name(self, old, new):
"""Everytime the task name change check whether ornot it is valid.
"""
self.check_parameters()
def _default_task_name(self):
names = self.manager.auto_task_names
if names:
name = random.choice(names)
return name
else:
return ''
class PyTaskConfig(BaseTaskConfig):
""" Standard configurer for python tasks.
This configurer is suitable for most python task whose initialisation
simply requires a name.
"""
# Docstring of the class to help pepole know what they are going to create.
task_doc = Unicode()
def __init__(self, **kwargs):
super(PyTaskConfig, self).__init__(**kwargs)
self.task_doc = getdoc(self.task_class).replace('\n', ' ')
def build_task(self):
return self.task_class(name=self.task_name)
class TemplateTaskConfig(BaseTaskConfig):
"""Configurer for template task.
This configurer use the data stored about a task hierarchy to rebuild it
from scratch.
"""
#: Path to the file storing the hierarchy.
template_path = Unicode()
#: Description of the template.
template_doc = Unicode()
def __init__(self, **kwargs):
super(TemplateTaskConfig, self).__init__(**kwargs)
if self.template_path:
_, doc = load_template(self.template_path)
self.template_doc = doc
def build_task(self):
"""Build the task stored in the selected template.
"""
config, _ = load_template(self.template_path)
built_task = build_task_from_config(config, self.manager.workbench)
return built_task
```
#### File: exopy/tasks/plugin.py
```python
import os
import logging
from collections import defaultdict
from atom.api import List, Dict, Typed, Unicode
from watchdog.observers import Observer
from .declarations import (Task, Interface, Tasks, Interfaces, TaskConfig,
TaskConfigs)
from .filters import TaskFilter
from ..utils.plugin_tools import (HasPreferencesPlugin, ExtensionsCollector,
DeclaratorsCollector)
from ..utils.watchdog import SystematicFileUpdater
TASK_EXT_POINT = 'exopy.tasks.declarations'
FILTERS_POINT = 'exopy.tasks.filters'
CONFIG_POINT = 'exopy.tasks.configs'
FOLDER_PATH = os.path.dirname(__file__)
class TaskManagerPlugin(HasPreferencesPlugin):
"""Plugin responsible for collecting and providing tasks.
"""
#: Known templates (store full path to .ini).
#: This should not be manipulated by user code.
templates = Dict()
#: List of the filters.
filters = List()
#: Path to the file in which the names for the tasks are located.
auto_task_path = Unicode(os.path.join(FOLDER_PATH,
'tasknames.txt')).tag(pref=True)
#: List of names to use when creating a new task.
auto_task_names = List()
def start(self):
"""Collect all declared tasks and start observers.
"""
super(TaskManagerPlugin, self).start()
core = self.workbench.get_plugin('enaml.workbench.core')
core.invoke_command('exopy.app.errors.enter_error_gathering')
state = core.invoke_command('exopy.app.states.get',
{'state_id': 'exopy.app.directory'})
t_dir = os.path.join(state.app_directory, 'tasks')
# Create tasks subfolder if it does not exist.
if not os.path.isdir(t_dir):
os.mkdir(t_dir)
temp_dir = os.path.join(t_dir, 'templates')
# Create profiles subfolder if it does not exist.
if not os.path.isdir(temp_dir):
os.mkdir(temp_dir)
self._template_folders = [temp_dir]
self._filters = ExtensionsCollector(workbench=self.workbench,
point=FILTERS_POINT,
ext_class=TaskFilter)
self._filters.start()
self.filters = list(self._filters.contributions)
self._configs = DeclaratorsCollector(workbench=self.workbench,
point=CONFIG_POINT,
ext_class=(TaskConfig,
TaskConfigs))
self._configs.start()
self._tasks = DeclaratorsCollector(workbench=self.workbench,
point=TASK_EXT_POINT,
ext_class=(Tasks, Task, Interfaces,
Interface)
)
self._tasks.start()
self._refresh_templates()
if self.auto_task_path:
self.load_auto_task_names()
self._bind_observers()
core.invoke_command('exopy.app.errors.exit_error_gathering')
def stop(self):
"""Discard collected tasks and remove observers.
"""
self._unbind_observers()
self._tasks.stop()
self.templates.clear()
self._filters.stop()
self._configs.stop()
def list_tasks(self, filter='All'):
"""List the known tasks using the specified filter.
Parameters
----------
filter : unicode, optional
Name of the filter to use
Returns
-------
tasks : list(unicode) or None
Task ids selected by the filter, or None if the filter does not
exist.
"""
t_filter = self._filters.contributions.get(filter)
if t_filter:
return t_filter.filter_tasks(self._tasks.contributions,
self.templates)
def get_task_infos(self, task):
"""Access a given task infos.
Parameters
----------
task : unicode
Id of the task class for which to return the actual class.
Returns
-------
infos : TaskInfos or None
Object containing all the infos about the requested task.
This object should never be manipulated directly by user code.
"""
if task not in self._tasks.contributions:
return None
return self._tasks.contributions[task]
def get_task(self, task, view=False):
"""Access a given task class.
Parameters
----------
task : unicode
Id of the task class for which to return the actual class.
view : bool, optional
Whether or not to return the view assoicated with the task.
Returns
-------
task_cls : type or None
Class associated to the requested task or None if the task was not
found.
task_view : EnamlDefMeta or None, optional
Associated view if requested.
"""
infos = self.get_task_infos(task)
if infos is None:
answer = None if not view else (None, None)
return answer
return infos.cls if not view else (infos.cls, infos.view)
def get_tasks(self, tasks):
"""Access an ensemble of task classes.
Parameters
----------
tasks : list(unicode)
Ids of the task classes for which to return the actual classes.
Returns
-------
tasks_cls : dict
Dictionary mapping the requested tasks to the actual classes.
missing : list
List of classes that were not found.
"""
tasks_cls = {}
missing = []
for t in tasks:
res = self.get_task(t)
if res:
tasks_cls[t] = res
else:
missing.append(t)
return tasks_cls, missing
def get_interface_infos(self, interface):
"""Access a given interface infos.
Parameters
----------
interface : unicode
Id of the task this interface is linked to followed by the ids
of the intermediate interfaces if any and finally id of the
interface itself. All ids should be separated by ':'
ex 'exopy.LoopTask:exopy.IterableLoopInterface'
views : bool, optional
Whether or not to return the views assoicated with the interface.
Returns
-------
infos : InterfaceInfos
Object containing all the infos about the requested interface.
this object should never be manipulated directly by user code.
"""
lookup_dict = self._tasks.contributions
ids = interface.split(':')
interface_id = ids.pop(-1)
interface_anchor = ids
try:
for anchor in interface_anchor:
lookup_dict = lookup_dict[anchor].interfaces
except KeyError:
logger = logging.getLogger(__name__)
msg = 'Looking for {} (anchor {}) failed to found {}'
logger.debug(msg.format(interface_id, interface_anchor,
anchor))
return None
if interface_id in lookup_dict:
return lookup_dict[interface_id]
else:
return None
def get_interface(self, interface, views=False):
"""Access a given interface class.
Parameters
----------
interface: tuple[unicode|tuple|list]
- Name of the task class for which to return the actual class.
- Name of the task to which this interface is linked and names of
the intermediate interfaces if any (going from the most general
ones to the more specialised ones).
views : bool, optional
Whether or not to return the views assoicated with the interface.
Returns
-------
interface_cls : type or None
Class corresponding to the requested interface or None if the class
was not found.
views : list or None, optional
List of views associated with the interface.
"""
infos = self.get_interface_infos(interface)
if infos is not None:
return infos.cls if not views else (infos.cls, infos.views)
else:
return None if not views else (None, None)
def get_interfaces(self, interfaces):
"""Access an ensemble of interface classes.
Parameters
----------
interfaces : list[tuple[unicode|tuple|list]]
List of pairs (name of the interface class, corrisponding anchor)
for which to return the actual classes.
Returns
-------
interfaces_cls : dict
Dictionary mapping the requested interfaces to the actual classes.
missing : list
List of classes that were not found.
"""
interfaces_cls = {}
missing = []
for i in interfaces:
i_cls = self.get_interface(i)
if i_cls:
interfaces_cls[i] = i_cls
else:
missing.append(i)
return interfaces_cls, missing
def get_config(self, task_id):
"""Access the proper config for a task.
Parameters
----------
task : unicode
Id of the task for which a config is required
Returns
-------
config : tuple
Tuple containing the requested config object, and its
visualisation.
"""
templates = self.templates
if task_id in templates:
infos = configs = self._configs.contributions['__template__']
config = infos.cls(manager=self,
template_path=templates[task_id])
return config, infos.view(config=config)
elif task_id in self._tasks.contributions:
configs = self._configs.contributions
# Look up the hierarchy of the selected task to get the appropriate
# TaskConfig
task_class = self._tasks.contributions[task_id].cls
for t_class in type.mro(task_class):
if t_class in configs:
infos = configs[t_class]
c = infos.cls(manager=self,
task_class=task_class)
return c, infos.view(config=c)
return None, None
def load_auto_task_names(self):
""" Generate a list of task names from a file.
"""
path = self.auto_task_path
if not os.path.isfile(path):
core = self.workbench.get_plugin('enaml.workbench.core')
msg = 'Path {} does not point to a real file.'.format(path)
core.invoke_command('exopy.app.errors.signal',
dict(kind='error', message=msg))
return
with open(path, encoding='utf-8') as f:
aux = f.readlines()
self.auto_task_names = [l.rstrip() for l in aux]
# =========================================================================
# --- Private API ---------------------------------------------------------
# =========================================================================
#: Dictionary storing all known tasks declarartion, using TaskInfos.
_tasks = Typed(DeclaratorsCollector)
#: Private storage keeping track of which extension declared which object.
_extensions = Typed(defaultdict, (list,))
#: Contributed task filters.
_filters = Typed(ExtensionsCollector)
#: Contributed task configs.
_configs = Typed(DeclaratorsCollector)
#: List of folders in which to search for templates.
# TODO make that list editable and part of the preferences
_template_folders = List()
#: Watchdog observer tracking changes to the templates folders.
_observer = Typed(Observer, ())
def _refresh_templates(self):
"""Refresh the list of template tasks.
"""
# TODO rework to handle in an nicer fashion same template in multiple
# folders
templates = {}
for path in self._template_folders:
if os.path.isdir(path):
filenames = sorted(f for f in os.listdir(path)
if f.endswith('.task.ini') and
(os.path.isfile(os.path.join(path, f))))
for filename in filenames:
template_path = os.path.join(path, filename)
# Beware redundant names are overwrited
name = filename[:-len('.task.ini')]
templates[name] = template_path
else:
logger = logging.getLogger(__name__)
logger.warning('{} is not a valid directory'.format(path))
self.templates = templates
def _update_templates(self):
"""Simply refresh the templates task.
"""
self._refresh_templates()
def _update_filters(self, change):
"""Update the available list of filters.
"""
self.filters = list(change['value'].keys())
def _bind_observers(self):
"""Setup all observers.
"""
for folder in self._template_folders:
handler = SystematicFileUpdater(self._update_templates)
self._observer.schedule(handler, folder, recursive=True)
self._observer.start()
self._filters.observe('contributions', self._update_filters)
def _unbind_observers(self):
"""Remove all observers.
"""
self._filters.unobserve('contributions', self._update_filters)
self._observer.unschedule_all()
self._observer.stop()
try:
self._observer.join()
except RuntimeError:
pass
```
#### File: tasks/logic/loop_task.py
```python
from atom.api import (Typed, Bool, set_default)
from timeit import default_timer
from ..base_tasks import (SimpleTask, ComplexTask)
from ..task_interface import InterfaceableTaskMixin
from ..decorators import handle_stop_pause
from .loop_exceptions import BreakException, ContinueException
class LoopTask(InterfaceableTaskMixin, ComplexTask):
"""Complex task which, at each iteration, call all its child tasks.
"""
#: Flag indicating whether or not to time the loop.
timing = Bool().tag(pref=True)
#: Task to call before other child tasks with current loop value. This task
#: is simply a convenience and can be set to None.
task = Typed(SimpleTask).tag(child=50)
database_entries = set_default({'point_number': 11, 'index': 1,
'value': 0})
def check(self, *args, **kwargs):
"""Overriden so that interface check are run before children ones.
"""
test = True
traceback = {}
if self.interface:
i_test, i_traceback = self.interface.check(*args, **kwargs)
traceback.update(i_traceback)
test &= i_test
c_test, c_traceback = super().check(*args, **kwargs)
traceback.update(c_traceback)
test &= c_test
return test, traceback
def perform_loop(self, iterable):
"""Perform the loop on the iterable calling all child tasks at each
iteration.
This method shoulf be called by the interface at the appropriate time.
Parameters
----------
iterable : iterable
Iterable on which the loop should be performed.
"""
if self.timing:
if self.task:
self._perform_loop_timing_task(iterable)
else:
self._perform_loop_timing(iterable)
else:
if self.task:
self._perform_loop_task(iterable)
else:
self._perform_loop(iterable)
# =========================================================================
# --- Private API ---------------------------------------------------------
# =========================================================================
def _perform_loop(self, iterable):
"""Perform the loop when there is no child and timing is not required.
"""
self.write_in_database('point_number', len(iterable))
root = self.root
for i, value in enumerate(iterable):
if handle_stop_pause(root):
return
self.write_in_database('index', i+1)
self.write_in_database('value', value)
try:
for child in self.children:
child.perform_()
except BreakException:
break
except ContinueException:
continue
def _perform_loop_task(self, iterable):
"""Perform the loop when there is a child and timing is not required.
"""
self.write_in_database('point_number', len(iterable))
root = self.root
for i, value in enumerate(iterable):
if handle_stop_pause(root):
return
self.write_in_database('index', i+1)
self.task.perform_(value)
try:
for child in self.children:
child.perform_()
except BreakException:
break
except ContinueException:
continue
def _perform_loop_timing(self, iterable):
"""Perform the loop when there is no child and timing is required.
"""
self.write_in_database('point_number', len(iterable))
root = self.root
for i, value in enumerate(iterable):
if handle_stop_pause(root):
return
self.write_in_database('index', i+1)
self.write_in_database('value', value)
tic = default_timer()
try:
for child in self.children:
child.perform_()
except BreakException:
self.write_in_database('elapsed_time', default_timer()-tic)
break
except ContinueException:
self.write_in_database('elapsed_time', default_timer()-tic)
continue
self.write_in_database('elapsed_time', default_timer()-tic)
def _perform_loop_timing_task(self, iterable):
"""Perform the loop when there is a child and timing is required.
"""
self.write_in_database('point_number', len(iterable))
root = self.root
for i, value in enumerate(iterable):
if handle_stop_pause(root):
return
self.write_in_database('index', i+1)
tic = default_timer()
self.task.perform_(value)
try:
for child in self.children:
child.perform_()
except BreakException:
self.write_in_database('elapsed_time', default_timer()-tic)
break
except ContinueException:
self.write_in_database('elapsed_time', default_timer()-tic)
continue
self.write_in_database('elapsed_time', default_timer()-tic)
def _post_setattr_task(self, old, new):
"""Keep the database entries in sync with the task member.
"""
if old:
if self.root is not None:
old.unregister_from_database()
old.root = None
old.parent = None
if new:
new.name = self.name
if self.root is not None:
new.depth = self.depth + 1
new.database = self.database
new.path = self._child_path()
# Give him its root so that it can proceed to any child
# registration it needs to.
new.parent = self
new.root = self.root
# Ask the child to register in database
new.register_in_database()
aux = self.database_entries.copy()
if 'value' in aux:
del aux['value']
self.database_entries = aux
else:
aux = self.database_entries.copy()
aux['value'] = 1.0
self.database_entries = aux
if self.root is not None:
self.register_preferences()
def _post_setattr_timing(self, old, new):
"""Keep the database entries in sync with the timing flag.
"""
if new:
aux = self.database_entries.copy()
aux['elapsed_time'] = 1.0
self.database_entries = aux
else:
aux = self.database_entries.copy()
if 'elapsed_time' in aux:
del aux['elapsed_time']
self.database_entries = aux
def _post_setattr_name(self, old, new):
"""Sets the subtask's name to its parent's name.
"""
if self.task:
self.task.name = new
super()._post_setattr_name(old, new)
```
#### File: tasks/tasks/string_evaluation.py
```python
from textwrap import fill
from inspect import cleandoc
from math import (cos, sin, tan, acos, asin, atan, sqrt, log10,
exp, log, cosh, sinh, tanh, atan2)
from cmath import pi as Pi
import cmath as cm
try:
import numpy as np
NP_TIP = ["- numpy function are available under np"]
except ImportError: # pragma: no cover
NP_TIP = [] # pragma: no cover
FORMATTER_TOOLTIP = fill(cleandoc("""In this field you can enter a text and
include fields which will be replaced by database
entries by using the delimiters '{' and '}'."""), 80)
EVALUATER_TOOLTIP = '\n'.join([
fill(cleandoc("""In this field you can enter a text and
include fields which will be replaced by database
entries by using the delimiters '{' and '}' and
which will then be evaluated."""), 80),
"Available math functions:",
"- cos, sin, tan, acos, asin, atan, atan2",
"- exp, log, log10, cosh, sinh, tanh, sqrt",
"- complex math function are available under cm",
"- pi is available as Pi"] + NP_TIP)
def safe_eval(expr, local_var):
"""Eval expr with the given local variables.
"""
return eval(expr, globals(), local_var)
```
#### File: testing/tasks/fixtures.py
```python
from time import sleep
import pytest
import enaml
from exopy.tasks.api import RootTask
from exopy.testing.util import exit_on_err
with enaml.imports():
from enaml.workbench.core.core_manifest import CoreManifest
from exopy.app.app_manifest import AppManifest
from exopy.app.preferences.manifest import PreferencesManifest
from exopy.app.dependencies.manifest import DependenciesManifest
from exopy.app.states.manifest import StateManifest
from exopy.app.icons.manifest import IconManagerManifest
from exopy.app.errors.manifest import ErrorsManifest
from exopy.app.errors.plugin import ErrorsPlugin
from exopy.tasks.manifest import TasksManagerManifest
from exopy.tasks.tasks.base_views import RootTaskView
from ..windows import ContainerTestingWindow
pytests_plugin = str('exopy.testing.fixtures'),
@pytest.yield_fixture
def task_workbench(workbench, monkeypatch, app_dir):
"""Setup the workbench in such a way that the task manager can be tested.
"""
monkeypatch.setattr(ErrorsPlugin, 'exit_error_gathering', exit_on_err)
workbench.register(CoreManifest())
workbench.register(AppManifest())
workbench.register(PreferencesManifest())
workbench.register(IconManagerManifest())
workbench.register(ErrorsManifest())
workbench.register(StateManifest())
workbench.register(DependenciesManifest())
workbench.register(TasksManagerManifest())
yield workbench
for m_id in ('exopy.tasks', 'exopy.app.dependencies', 'exopy.app.errors',
'exopy.app.icons', 'exopy.app.preferences', 'exopy.app'):
try:
workbench.unregister(m_id)
except Exception:
pass
# Give some time to the os to release resources linked to file
# monitoring.
sleep(0.1)
@pytest.fixture
def root_view(task_workbench):
"""Initialize a root view.
"""
c = task_workbench.get_plugin('enaml.workbench.core')
task = RootTask()
view = RootTaskView(task=task, core=c)
w = ContainerTestingWindow(workbench=task_workbench)
view.set_parent(w)
return view
```
#### File: exopy/utils/configobj_ops.py
```python
from configobj import Section
def include_configobj(new_parent, config):
""" Make a ConfigObj part of another one and preserves the depth.
This function will copy all entries from config.
Parameters
----------
new_parent : configobj.Section
Section in which information should be added.
config : configobj.Section
Section to merge into the new_parent.
"""
for key, val in config.items():
if isinstance(val, Section):
new_parent[key] = {}
include_configobj(new_parent[key], val)
else:
new_parent[key] = val
def traverse_config(config, depth=-1):
"""Traverse a ConfigObj object by yielding all sections.
Parameters
----------
depth : int
How deep should we explore the tree of tasks. When this number
reaches zero deeper children should not be explored but simply
yielded.
"""
yield config
if depth == 0:
for s in config.sections:
yield config[s]
else:
for s in config.sections:
for c in traverse_config(config[s], depth - 1):
yield c
```
#### File: exopy/utils/declarator.py
```python
import re
from importlib import import_module
from atom.api import Unicode, Bool
from enaml.core.api import Declarative, d_
from .traceback import format_exc
class Declarator(Declarative):
"""Base class for extension object which uses a visitor pattern.
"""
#: Flag indicating whether the declarator has been successfully registered
is_registered = Bool()
def get_path(self):
"""Query from parent the path to use for this declarator.
Returns
-------
path : unicode or None
Path declared by the parent. This can be None if no path is
declared.
"""
if isinstance(self.parent, Declarator):
return self.parent.get_path()
def get_group(self):
"""Get the group defined by the closest parent.
"""
if not isinstance(self.parent, Declarator):
return
group = getattr(self.parent, 'group', None)
if group:
return group
return self.parent.get_group()
def register(self, collector, traceback):
"""Add the contribution of this extension to the plugin.
Parameters
----------
collector : DeclaratorCollector
Collector in charge handling the registering of declarators.
Contributions should be added to the contributions member (Dict).
If a declarator cannot be registered because another one need to be
registered first it should add itself to the _delayed member (List)
traceback : dict
Dictionary in which any issue occuring during registration should
be recorded.
"""
raise NotImplementedError()
def unregister(self, plugin):
"""Remove the contribution of this extension to the plugin.
Parameters
----------
collector : DeclaratorCollector
Collector in charge handling the registering of declarators.
"""
raise NotImplementedError()
def __str__(self):
"""Provide a nice string representation of the object.
"""
raise NotImplementedError()
PATH_VALIDATOR = re.compile('^(\.?\w+)*$')
class GroupDeclarator(Declarator):
"""Declarator used to group an ensemble of declarator.
"""
#: Prefix path to use for all children Declarator. Path should be dot
#: separated.
path = d_(Unicode())
#: Id of the group common to all children Declarator. It is the
#: responsability of the children to mention they are part of a group.
group = d_(Unicode())
def get_path(self):
"""Overriden method to walk all parents.
"""
paths = []
if isinstance(self.parent, GroupDeclarator):
parent_path = self.parent.get_path()
if parent_path:
paths.append(parent_path)
if self.path:
paths.append(self.path)
if paths:
return '.'.join(paths)
def register(self, plugin, traceback):
"""Register all children Declarator.
"""
if not PATH_VALIDATOR.match(self.path):
msg = 'Invalid path {} in {} (path {}, group {})'
traceback['Error %s' % len(traceback)] = msg.format(self.path,
type(self),
self.path,
self.group)
return
for ch in self.children:
if not isinstance(ch, Declarator):
msg = 'All children of GroupDeclarator must be Declarator, got'
traceback['Error %s' % len(traceback)] = msg + '%s' % type(ch)
continue
ch.register(plugin, traceback)
self.is_registered = True
def unregister(self, plugin):
"""Unregister all children Declarator.
"""
if self.is_registered:
for ch in self.children:
if isinstance(ch, Declarator):
ch.unregister(plugin)
self.is_registered = False
def __str__(self):
"""Identify the declarator by its path and group.
"""
st = '{} whose path is "{}" and group is "{}" declaring :\n{}'
return st.format(type(self).__name__, self.path, self.group,
'\n'.join(' - {}'.format(c) for c in self.children))
def import_and_get(path, name, traceback, id):
"""Function importing a module and retrieving an object from it.
This function provides a common pattern for declarator.
"""
import enaml
try:
with enaml.imports():
mod = import_module(path)
except Exception:
msg = 'Failed to import {} :\n{}'
traceback[id] = msg.format(path, format_exc())
return
try:
return getattr(mod, name)
except AttributeError:
msg = '{} has no attribute {}:\n{}'
traceback[id] = msg.format(path, name, format_exc())
return
```
#### File: exopy/utils/priority_heap.py
```python
import heapq
#: Sentinel used to invalidated an object in the heap.
_REMOVED = object()
class PriorityHeap(object):
"""A priority heap implementation based on a heapq.
"""
__slots__ = ('_heap', '_map', '_counter')
def __init__(self):
super(PriorityHeap, self).__init__()
self._heap = []
self._map = {}
self._counter = 0
def push(self, priority, obj):
"""Push a task with a given priority on the queue.
Parameters
----------
priority : int
Priority associated with the object to push.
obj :
Object to push on the heap.
"""
task = [priority, self._counter, obj]
heapq.heappush(self._heap, task)
self._map[obj] = task
self._counter += 1
def pop(self):
"""Pop a task from the queue.
"""
while True:
_, _, obj = heapq.heappop(self._heap)
if obj is not _REMOVED:
del self._map[obj]
break
if not self._heap:
self._counter = 0
return obj
def remove(self, obj):
"""Mark a task as being outdated.
This is the only way to remove an object from a heap without messing
with the sorting.
"""
if obj in self._map:
heapobj = self._map[obj]
heapobj[2] = _REMOVED
del self._map[obj]
def __iter__(self):
"""Allow to use this object as an iterator.
"""
return self
def __len__(self):
"""Return the length of the underlying list.
"""
return len([t for t in self._heap if t[2] is not _REMOVED])
def __next__(self):
"""Iterate over the heap by poping object.
Iterating over the heap will destroy it.
"""
try:
return self.pop()
except IndexError:
raise StopIteration
```
#### File: utils/widgets/qt_autoscroll_html.py
```python
from atom.api import Unicode
from enaml.core.declarative import d_
from enaml.qt import QtGui, QtWidgets
from enaml.widgets.api import RawWidget
class QtAutoscrollHtml(RawWidget):
""" Custom Html display which scrolls down to the last line on update.
Carriage returns are automatically converted to '<br>' so that there
is no issue in the Html rendering.
"""
#: Text displayed by the widget. Any Html mark up will be rendered.
text = d_(Unicode())
hug_width = 'ignore'
hug_height = 'ignore'
def create_widget(self, parent):
"""Finishes initializing the editor by creating the underlying toolkit
widget.
"""
widget = QtWidgets.QTextEdit(parent)
widget.setReadOnly(True)
widget.setHtml(self.text)
return widget
def _post_setattr_text(self, old, new):
"""Updates the editor when the object trait changes externally to the
editor.
"""
if self.proxy_is_active:
widget = self.get_widget()
text = new.replace('\n', '<br>')
widget.setHtml(text)
widget.moveCursor(QtGui.QTextCursor.End)
```
#### File: utils/widgets/qt_completers.py
```python
from atom.api import List, Tuple, Unicode, Bool, Callable, Value
from enaml.core.declarative import d_
from enaml.qt import QtCore, QtWidgets
from enaml.widgets.api import RawWidget, Feature
class QDelimitedCompleter(QtWidgets.QCompleter):
"""A custom completer to use with QtLineCompleter, QtTextEdit.
This completer only propose completion between specified characters.
Parameters
----------
parent : QLineEdit or QTextEdit
Widget for which to provide a completion.
delimiters : tuple
Tuple of length 2 specifying the characters marking the begining end
of completion.
entries : iterable
Iterable of values used to propose completion.
entries_updaters : callable
Callable used to refresh the list of entries called once for the first
completion after the widget gained focus.
"""
# Signal emmitted to notify the completer it should propose a completion.
completionNeeded = QtCore.Signal()
def __init__(self, parent, delimiters, entries, entries_updater):
super(QDelimitedCompleter, self).__init__(parent)
self.delimiters = delimiters
if isinstance(parent, QtWidgets.QLineEdit):
self.text_getter = parent.text
self.cursor_pos = parent.cursorPosition
self.insert_text = parent.insert
parent.textChanged[str].connect(self.text_changed)
self.completionNeeded.connect(self.complete)
elif isinstance(parent, QtWidgets.QTextEdit):
parent.textChanged.connect(self.text_changed)
self.cursor_pos = lambda: parent.textCursor().position()
self.insert_text =\
lambda text: parent.textCursor().insertText(text)
self.text_getter = parent.toPlainText
self.completionNeeded.connect(self._text_edit_complete)
else:
msg = 'Parent of QtCompleter must QLineEdit or QTextEdit, not {}'
raise ValueError(msg.format(parent))
self.setCaseSensitivity(QtCore.Qt.CaseSensitive)
self.setModel(QtCore.QStringListModel(entries, self))
self.activated[str].connect(self.complete_text)
self.setWidget(parent)
self._upddate_entries = True
self._popup_active = False
self.entries_updater = entries_updater
def text_changed(self, text=None):
"""Callback handling the text being edited on the parent.
"""
if not text:
text = self.text_getter()
if self._upddate_entries and self.entries_updater:
entries = self.entries_updater()
self.setModel(QtCore.QStringListModel(entries, self))
self._upddate_entries = False
all_text = str(text)
text = all_text[:self.cursor_pos()]
split = text.split(self.delimiters[0])
prefix = split[-1].strip()
if len(split) > 1:
self.setCompletionPrefix(prefix)
self.completionNeeded.emit()
elif self.popup().isVisible():
self.popup().hide()
def complete_text(self, completion):
"""When the user validate a completion add it to the text.
"""
cursor_pos = self.cursor_pos()
text = str(self.text_getter())
before_text = text[:cursor_pos]
after_text = text[cursor_pos:]
prefix_len = len(before_text.split(self.delimiters[0])[-1].strip())
completion = completion[prefix_len:]
if not after_text.startswith(self.delimiters[1]):
completion += self.delimiters[1]
self.insert_text(completion)
def on_focus_gained(self):
"""Mark the entries for refreshing when the widget loses focus.
"""
self._upddate_entries = True
def _update_entries(self, entries):
"""Update the completer completion model.
"""
self.setModel(QtCore.QStringListModel(entries))
def _text_edit_complete(self):
"""Propose completion for QTextEdit.
"""
cr = self.widget().cursorRect()
popup = self.popup()
cr.setWidth(popup.sizeHintForColumn(0) +
popup.verticalScrollBar().sizeHint().width())
self.complete(cr)
class QtLineCompleter(RawWidget):
"""Simple line editor supporting completion.
"""
#: Text being edited by this widget.
text = d_(Unicode())
#: Static list of entries used to propose completion. This member value is
#: not updated by the entries_updater.
entries = d_(List())
#: Callable to use to refresh the completions.
entries_updater = d_(Callable())
#: Delimiters marking the begining and end of completed section.
delimiters = d_(Tuple(Unicode(), ('{', '}')))
hug_width = 'ignore'
features = Feature.FocusEvents
#: Flag avoiding circular updates.
_no_update = Bool(False)
#: Reference to the QCompleter used by the widget.
_completer = Value()
# PySide requires weakrefs for using bound methods as slots.
# PyQt doesn't, but executes unsafe code if not using weakrefs.
__slots__ = '__weakref__'
def create_widget(self, parent):
"""Finishes initializing by creating the underlying toolkit widget.
"""
widget = QtWidgets.QLineEdit(parent)
self._completer = QDelimitedCompleter(widget, self.delimiters,
self.entries,
self.entries_updater)
widget.setText(self.text)
self.proxy.widget = widget # Anticipated so that selection works
widget.textEdited.connect(self.update_object)
return widget
def update_object(self):
""" Handles the user entering input data in the edit control.
"""
if (not self._no_update) and self.activated:
value = self.get_widget().text()
self._no_update = True
self.text = value
self._no_update = False
def _post_setattr_text(self, old, new):
"""Updates the editor when the object changes externally to the editor.
"""
if (not self._no_update) and self.get_widget():
self._no_update = True
self.get_widget().setText(new)
self._no_update = False
def _post_setattr_entries(self, old, new):
"""Updates the completer entries.
"""
if self._completer:
self._completer._update_entries(new)
def focus_gained(self):
"""Notify the completer the focus was lost.
"""
self._completer.on_focus_gained()
class QCompletableTexEdit(QtWidgets.QTextEdit):
"""A QTextEdit letting the completer handles key presses when visible.
"""
__slots__ = ('completer', )
def keyPressEvent(self, event):
"""Overriden to let the completer handle some events when visible.
"""
if self.completer.popup().isVisible():
key = event.key()
if key in (QtCore.Qt.Key_Enter, QtCore.Qt.Key_Return,
QtCore.Qt.Key_Escape, QtCore.Qt.Key_Tab,
QtCore.Qt.Key_Backtab):
event.ignore()
return
super(QCompletableTexEdit, self).keyPressEvent(event)
class QtTextCompleter(RawWidget):
"""Simple text editor supporting completion.
"""
#: Text being edited by this widget.
text = d_(Unicode())
#: Static list of entries used to propose completion. This member value is
#: not updated by the entries_updater.
entries = d_(List())
#: Callable to use to refresh the completions.
entries_updater = d_(Callable())
#: Delimiters marking the begining and end of completed section.
delimiters = d_(Tuple(Unicode(), ('{', '}')))
hug_width = 'ignore'
features = Feature.FocusEvents
#: Flag avoiding circular updates.
_no_update = Bool(False)
#: Reference to the QCompleter used by the widget.
_completer = Value()
# PySide requires weakrefs for using bound methods as slots.
# PyQt doesn't, but executes unsafe code if not using weakrefs.
__slots__ = '__weakref__'
def create_widget(self, parent):
"""Finishes initializing by creating the underlying toolkit widget.
"""
widget = QCompletableTexEdit(parent)
self._completer = QDelimitedCompleter(widget, self.delimiters,
self.entries,
self.entries_updater)
widget.completer = self._completer
widget.setText(self.text)
self.proxy.widget = widget # Anticipated so that selection works
widget.textChanged.connect(self.update_object)
return widget
def update_object(self):
""" Handles the user entering input data in the edit control.
"""
if (not self._no_update) and self.activated:
value = self.get_widget().toPlainText()
self._no_update = True
self.text = value
self._no_update = False
def focus_gained(self):
"""Notify the completer the focus was lost.
"""
self._completer.on_focus_gained()
def _post_setattr_text(self, old, new):
"""Updates the editor when the object changes externally to the editor.
"""
if (not self._no_update) and self.get_widget():
self._no_update = True
self.get_widget().setText(new)
self._no_update = False
def _post_setattr_entries(self, old, new):
"""Updates the completer entries.
"""
if self.proxy_is_active and self._completer:
self._completer._update_entries(new)
```
|
{
"source": "jerjohste/exopy_hqc_legacy",
"score": 2
}
|
#### File: drivers/visa/anritsu_signal_source.py
```python
import re
from textwrap import fill
from inspect import cleandoc
from visa import VisaTypeError
from ..driver_tools import (InstrIOError, secure_communication,
instrument_property)
from ..visa_tools import VisaInstrument
class AnritsuMG3694(VisaInstrument):
"""Driver for the Anritsu MG 3694 microwave source.
"""
def __init__(self, connection_info, caching_allowed=True,
caching_permissions={}, auto_open=True):
super(AnritsuMG3694, self).__init__(connection_info,
caching_allowed,
caching_permissions,
auto_open)
self.frequency_unit = 'GHz'
def open_connection(self, **para):
"""Open the connection to the instr using the `connection_str`.
"""
super(AnritsuMG3694, self).open_connection(**para)
self.write_termination = '\n'
self.read_termination = '\n'
self.write("DSPL 4")
self.write("EBW3") # if the external reference is very stable in phase
# the largest EBW must be chosen
self.write("LO0") # no offset on the power
self.write("LOG") # Selects logarithmic power level operation in dBm
self.write("TR1") # Sets 40 dB of attenuation when RF is switched off
self.write("PS1") # Turns on the Phase Offset
self.write("DS1") # Turns off the secure mode
self.write("AT1") # Selects ALC step attenuator decoupling
self.write("IL1") # Selects internal leveling of output power
@instrument_property
@secure_communication()
def frequency(self):
"""Frequency getter method
"""
freq = self.ask_for_values('FREQ?')[0]
if freq:
if self.frequency_unit == 'GHz':
return freq/1e9
elif self.frequency_unit == 'MHz':
return freq/1e6
elif self.frequency_unit == 'kHz':
return freq/1e3
else:
return freq
else:
raise InstrIOError(''' ''')
@frequency.setter
@secure_communication()
def frequency(self, value):
"""Frequency setter method
"""
unit = self.frequency_unit
self.write('FREQ {}{}'.format(value, unit))
result = self.ask_for_values('FREQ?')
if result:
if unit == 'GHz':
result[0] /= 1e9
elif unit == 'MHz':
result[0] /= 1e6
elif unit == 'kHz':
result[0] /= 1e3
if abs(result[0] - value) > 10**-12:
mes = 'Instrument did not set correctly the frequency'
raise InstrIOError(mes)
@instrument_property
@secure_communication()
def power(self):
"""Power getter method
"""
power = self.ask_for_values(':POW?')[0]
if power is not None:
return power
else:
raise InstrIOError
@power.setter
@secure_communication()
def power(self, value):
"""Power setter method
"""
self.write('POW {}'.format(value))
result = self.ask_for_values('POW?')[0]
if abs(result - value) > 10**-12:
raise InstrIOError('Instrument did not set correctly the power')
@instrument_property
@secure_communication()
def output(self):
"""Output getter method
"""
output = self.ask_for_values('OUTP?')[0]
if output == 1:
return 'ON'
elif output == 0:
return 'OFF'
else:
mes = 'Anritsu signal source did not return its output'
raise InstrIOError(mes)
@output.setter
@secure_communication()
def output(self, value):
"""Output setter method. 'ON', 'OFF'
"""
on = re.compile('on', re.IGNORECASE)
off = re.compile('off', re.IGNORECASE)
if on.match(value) or value == 1:
self.write('OUTP 1')
if self.ask_for_values('OUTP?')[0] != 1:
raise InstrIOError(cleandoc('''Instrument did not set correctly
the output'''))
elif off.match(value) or value == 0:
self.write('OUTP 0')
if self.ask_for_values('OUTP?')[0] != 0:
raise InstrIOError(cleandoc('''Instrument did not set correctly
the output'''))
else:
mess = fill(cleandoc('''The invalid value {} was sent to
switch_on_off method''').format(value), 80)
raise VisaTypeError(mess)
```
#### File: tests/conversion/test_conversion.py
```python
import os
from traceback import print_exc
import pytest
import enaml
from configobj import ConfigObj
from exopy.measurement.measurement import Measurement
from exopy_hqc_legacy.conversion.convert import (update_task,
update_task_interface,
update_monitor,
iterate_on_sections,
convert_measure)
with enaml.imports():
from exopy.tasks.manifest import TasksManagerManifest
from exopy_hqc_legacy.manifest import HqcLegacyManifest
def test_update_task():
"""Test updating the informations about a task.
"""
config = ConfigObj({'task_name': 'decoy',
'task_class': 'SetDCVoltageTask',
'selected_driver': None,
'selected_profile': None,
'voltage': '1.0'})
update_task(config)
assert 'task_class' not in config
assert config['task_id'] == 'exopy_hqc_legacy.SetDCVoltageTask'
assert 'dep_type' in config
assert 'selected_driver' not in config
assert 'selected_profile' not in config
assert 'voltage' in config
with pytest.raises(ValueError):
config = {'task_class': '__dummy__'}
update_task(config)
def test_update_task_interface():
"""Test updating the informations about a task interface.
"""
config = {'interface_class': 'IterableLoopInterface',
'iterable': '[]'}
update_task_interface(config)
assert 'interface_class' not in config
assert (config['interface_id'] ==
'exopy.LoopTask:exopy.IterableLoopInterface')
assert 'dep_type' in config
assert 'iterable' in config
with pytest.raises(ValueError):
config = {'interface_class': '__dummy__'}
update_task_interface(config)
# XXX add tests for the new update functions
def test_update_monitor():
"""Test updating the informations related to a monitor.
"""
config = {'id': None, 'undisplayed': '[]', 'rule_0': {},
'custom_0': {}}
update_monitor(config)
assert 'id' not in config
assert config['undisplayed'] == repr(['meas_name', 'meas_id', 'meas_date'])
assert config['rule_0'] == 'Loop progress'
assert 'custom_0' not in config
def test_iterate_on_sections():
"""Test iterating on section sections
"""
section = ConfigObj()
section['a'] = {'val1': None}
section['b'] = {'val2': None}
section['b']['c'] = {'val3': None}
class Checker(object):
__slots__ = ('called')
def __call__(self, section):
self.called = True
check1, check2 = Checker(), Checker()
actions = {lambda x: 'val1' in x: check1, lambda x: 'val2' in x: check2}
with pytest.raises(ValueError):
iterate_on_sections(section, actions)
assert check1.called
assert check2.called
MEASURE_DIRECTORY = os.path.join(os.path.dirname(__file__), 'test_measures')
MEASURES_FILES = [
'Avg_FreqCav.ini',
'Bfield_cav.ini',
'Bfield-Gate_IPhA_hysteres.ini',
'Bfield_IPhA_hysteres.ini',
'Eps-Bfield_PhA_oneway.ini',
pytest.param('FastGateCal_PhA.ini', marks=pytest.mark.xfail),
'Find-Set_fc_avg.ini',
'Find-Set_fc_Hetero-LockIn.ini',
'Find-Set_fc_Hetero-LockIn_TestEXG.ini',
'Find-Set_fc.ini',
'Find-Set_fc_SPCard.ini',
'Gate-Bfield_IPhA_oneway.ini',
'Gate-Bfield_PhA_oneway.ini',
'Gate-Power-Frequence_PhA.ini',
'Gate-Power_PhA.ini',
'Gate-Spectro_ON-OFF_IPhA.ini',
'Gate-Spectro_ON-OFF_PhA.ini',
'GotoField.ini',
'GrayscaleAlphaEpsilon.ini',
'GrayscaleAlphaEpsilon-Skewed.ini',
'GrayScale_Current.ini',
'GrayScale_IPhA_Hetero-LockIn.ini',
'GrayScale_IPhA.ini',
'GrayScale_IPhA_SPCard.ini',
'GrayScale_multi_PhA.ini',
'GrayScale_PhA_Hetero-LockIn_cav_pulsed.ini',
'GrayScale_PhA_Hetero-LockIn.ini',
'GrayScale_PhA_Hetero-LockIn_TestEXG.ini',
'GrayScale_PhA_Hetero-LockInTK.ini',
'GrayScale_PhA.ini',
'GrayScale_PhA_SPCard_good.ini',
'GrayScale_PhA_SP.ini',
'GrayScale_PhA_SP_Vgt.ini',
'GrayScale_PhA_Vsd.ini',
'GrayScale_Vgt_PhA_Hetero-LockIn.ini',
'Power_scancav.ini',
'RFspectroCAL_Freq-Avg_IPhA.ini',
'RFspectroCAL_Freq-Gate_IPhA.ini',
'RFspectroCAL_Freq-Gate_PhA.ini',
'RFspectroCAL_Gate-Freq_PhA.ini',
'RFspectro_Eps-Freq_PhA.ini',
'RFspectro_Freq-Avg_I_Harmonics.ini',
'RFspectro_Freq-Avg_IPhA.ini',
'RFspectro_Freq-B_I.ini',
'RFspectro_Freq-CavPower_PhA.ini',
'RFspectro_Freq-Gate_IPhA.ini',
'RFspectro_Freq-Gate_PhA_aroundcav.ini',
'RFspectro_Freq-Gate_PhA_belowcav.ini',
'RFspectro_Freq-Gate_PhA.ini',
'RFspectro_Freq-Gate_PhA_SP_cont.ini',
'RFspectro_Freq-Gate_PhA_SP_pulsed.ini',
'RFspectro_Freq-Power_I-PhA.ini',
'RFspectro_Freq-Power_PhA.ini',
'RFspectro_Gate-Freq_PhA.ini',
'Scan-Cav_Hetero-LockIn.ini',
'Scan-Cav_Hetero-LockIn_pulsed.ini',
'Scan_cav_SPCard.ini',
'Spectro-Bfield_IPhA_oneway.ini',
'Spectro-Bfield_PhA_aroundfc.ini',
'Spectro-Bfield_PhA_oneway.ini',
'Spectro_gate-Freq_ON-OFF_PhA.ini',
'Spectro_gate-Freq_ON-OFF_PhA_PNA.ini',
'Spectro_gate-Freq_PhA_PNA.ini',
'Spectro_gate-Power_Freq_PhA_PNA.ini',
'Spectro_ON-OFF_PhA.ini',
'Spectro_Power-Freq_ON-OFF_PhA.ini',
'Spectro_Power-Freq_ON-OFF_PhA_PNA.ini',
'Spectro_Power-Freq_PhA_PNA.ini',
'Spectro_Power-Freq_PhA_SP.ini',
'Sweep_2_sources.ini',
'SweepBfield_FreqCav-Gate.ini',
'SweepBfield_FreqCav.ini',
'SweepBfield_Gate.ini',
'SweepBfield.ini',
'SweepBfield_multipleGates_IPhA.ini',
'SweepEps_FreqCav.ini',
'SweepEps_PhA.ini',
'SweepFreq-Gate-Bfield_PhA.ini',
'SweepFreq-Gate_PhA.ini',
pytest.param('SweepGate_FastGateTest_IphA.ini', marks=pytest.mark.xfail),
'SweepGate_FastGateTest_PSG_IphA.ini',
pytest.param('SweepGate_FastGate_Vg2Vg1_IphA.ini', marks=pytest.mark.xfail),
'SweepGate_FreqCav.ini',
'SweepGate_FreqCav+spectro.ini',
'SweepGate_IPhA_Hetero-LockIn.ini',
'SweepGate_IphA.ini',
'SweepGate_Keithley.ini',
'SweepGate_ParaAmpTest_PSG_IphA.ini',
'SweepGate_ParaAmpTest_Vg1Vsd_PSG_IphA.ini',
'SweepGate_PhA_CavPulsed.ini',
'SweepGate_PhA_Hetero-LockIn.ini',
'SweepGate_PhA.ini',
'SweepGate_PhA_SP.ini',
'Sweep-PowerEps_PhA.ini',
'SweepPower_FreqCav.ini',
'SweepPower-Freq_I.ini',
'SweepPowerGate_PhA_SP.ini',
'SweepPower_GrayScale_PhA_SP.ini',
'SweepPower_I.ini',
'Sweep-Skewed-PowerEps_PhA.ini',
'Time_SPCard.ini',
'transfer_sequence.ini',
'Vsd_GrayScale_PhA.ini',
]
@pytest.mark.parametrize('meas_file', MEASURES_FILES)
def test_converting_a_measurement(measurement_workbench, meas_file, tmpdir,
monkeypatch):
"""Test converting a measurement created using HQCMeas to make it run on
Exopy.
"""
import enaml
from exopy.measurement.monitors.text_monitor import monitor
monkeypatch.setattr(monitor, 'information', lambda *args, **kwargs: 1)
measurement_workbench.register(TasksManagerManifest())
measurement_workbench.register(HqcLegacyManifest())
try:
with enaml.imports():
from exopy_pulses.pulses.manifest import PulsesManagerManifest
measurement_workbench.register(PulsesManagerManifest())
from exopy_pulses.tasks.manifest import PulsesTasksManifest
measurement_workbench.register(PulsesTasksManifest())
from exopy_hqc_legacy.pulses.manifest\
import HqcLegacyPulsesManifest
measurement_workbench.register(HqcLegacyPulsesManifest())
except ImportError:
print('Exopy pulses is not installed')
print_exc()
plugin = measurement_workbench.get_plugin('exopy.measurement')
path = convert_measure(os.path.join(MEASURE_DIRECTORY, meas_file),
dest_folder=str(tmpdir))
res, errors = Measurement.load(plugin, path)
with open(path) as f:
print(errors.get('main task'), f.read())
assert res
```
#### File: tasks/instr/test_set_dc_voltage.py
```python
from multiprocessing import Event
import pytest
import enaml
from exopy.tasks.api import RootTask
from exopy.tasks.tasks.logic.loop_task import LoopTask
from exopy.testing.util import show_and_close_widget
from exopy_hqc_legacy.tasks.tasks.instr.dc_tasks\
import (SetDCVoltageTask, MultiChannelVoltageSourceInterface)
with enaml.imports():
from exopy.tasks.tasks.logic.views.loop_view import LoopView
from exopy_hqc_legacy.tasks.tasks.instr.views.dc_views\
import SetDcVoltageView
from .instr_helper import InstrHelper, InstrHelperStarter, PROFILES, DRIVERS
class TestSetDCVoltageTask(object):
def setup(self):
self.root = RootTask(should_stop=Event(), should_pause=Event())
self.task = SetDCVoltageTask(name='Test')
self.task.back_step = 0.1
self.task.delay = 0.1
self.root.add_child_task(0, self.task)
self.root.run_time[DRIVERS] = {'Test': (InstrHelper,
InstrHelperStarter())}
self.root.run_time[PROFILES] =\
{'Test1': {'connections': {'C': {'owner': []}},
'settings': {'S': {'check_connection': [True]}}
}
}
# This is set simply to make sure the test of InstrTask pass.
self.task.selected_instrument = ('Test1', 'Test', 'C', 'S')
def test_check_base_interface1(self):
"""Simply test that everything is ok if voltage can be evaluated.
"""
self.task.target_value = '1.0'
test, traceback = self.task.check(test_instr=True)
assert test
assert not traceback
def test_check_base_interface2(self):
"""Check handling a wrong voltage.
"""
self.task.target_value = '*1.0*'
test, traceback = self.task.check(test_instr=True)
assert not test
assert len(traceback) == 1
def test_check_multichannel_interface1(self):
"""Check the multichannel specific tests, passing.
"""
interface = MultiChannelVoltageSourceInterface(task=self.task)
interface.channel = (1, 1)
self.task.interface = interface
self.task.target_value = '1.0'
c = self.root.run_time[PROFILES]['Test1']['connections']
c['C'] = {'defined_channels': [[(1, 1)]]}
test, traceback = self.task.check(test_instr=True)
assert test
assert not traceback
def test_check_multichannel_interface2(self):
"""Check the multichannel specific tests, failing = driver.
"""
interface = MultiChannelVoltageSourceInterface(task=self.task)
interface.channel = (1, 1)
self.task.interface = interface
self.task.target_value = '1.0'
self.root.run_time[DRIVERS] = {}
c = self.root.run_time[PROFILES]['Test1']['connections']
c['C'] = {'defined_channels': [[(1, 1)]]}
test, traceback = self.task.check(test_instr=True)
assert not test
assert len(traceback) == 1
def test_check_multichannel_interface3(self):
"""Check the multichannel specific tests, failing = profile.
"""
interface = MultiChannelVoltageSourceInterface(task=self.task)
interface.channel = (1, 1)
self.task.interface = interface
self.task.target_value = '1.0'
self.task.selected_instrument = ()
test, traceback = self.task.check()
assert not test
assert len(traceback) == 1
def test_check_multichannel_interface4(self):
"""Check the multichannel specific tests, failing = channel.
"""
interface = MultiChannelVoltageSourceInterface(task=self.task)
interface.channel = (2, 1)
self.task.interface = interface
self.task.target_value = '1.0'
c = self.root.run_time[PROFILES]['Test1']['connections']
c['C'] = {'defined_channels': [[(1, 1)]]}
test, traceback = self.task.check(test_instr=True)
assert not test
assert len(traceback) == 1
def test_smooth_set_stopping(self):
"""Test stopping in the middle of a smooth stepping.
"""
c = self.root.run_time[PROFILES]['Test1']['connections']
c['C'] = {'voltage': [0.0], 'funtion': ['VOLT'], 'owner': [None]}
self.root.prepare()
self.root.should_stop.set()
setter = lambda value: setattr(self.driver, 'voltage', value)
self.task.smooth_set(1.0, setter, 0.0)
assert self.root.get_from_database('Test_voltage') == 0.0
def test_perform_base_interface(self):
"""Test also that a target which is not a multiple of the back step
is correctly handled.
"""
self.task.target_value = '0.05'
c = self.root.run_time[PROFILES]['Test1']['connections']
c['C'] = {'voltage': [0.0], 'funtion': ['VOLT'], 'owner': [None]}
self.root.prepare()
self.task.perform()
assert self.root.get_from_database('Test_voltage') == 0.05
self.task.target_value = '1.06'
self.task.perform()
assert self.root.get_from_database('Test_voltage') == 1.06
def test_perform_multichannel_interface(self):
"""Test using the interface for the setting.
"""
interface = MultiChannelVoltageSourceInterface(task=self.task)
interface.channel = (1, 1)
self.task.interface = interface
self.task.target_value = '1.0'
c = self.root.run_time[PROFILES]['Test1']['connections']
c['C'] = {'voltage': [0.0], 'funtion': ['VOLT'], 'owner': [None]}
s = self.root.run_time[PROFILES]['Test1']['settings']
s['S'] = {'get_channel': lambda x, i: x}
self.root.prepare()
self.task.perform()
assert self.root.get_from_database('Test_voltage') == 1.0
@pytest.mark.ui
def test_set_dc_voltage_view(exopy_qtbot, root_view, task_workbench):
"""Test SetDCVoltageView widget outisde of a LoopTask.
"""
task = SetDCVoltageTask(name='Test')
root_view.task.add_child_task(0, task)
show_and_close_widget(exopy_qtbot, SetDcVoltageView(task=task, root=root_view))
@pytest.mark.ui
def test_set_dc_voltage_view2(exopy_qtbot, root_view, task_workbench):
"""Test SetDCVoltageView widget inside of a LoopTask.
"""
task = SetDCVoltageTask(name='Test')
interface = MultiChannelVoltageSourceInterface(task=task)
task.interface = interface
loop = LoopTask(name='r', task=task)
root_view.task.add_child_task(0, loop)
# XXX check for absence of target field
show_and_close_widget(exopy_qtbot, LoopView(task=loop, root=root_view))
```
|
{
"source": "jerjohste/exopy_pulses",
"score": 2
}
|
#### File: tests/pulses/test_manifest.py
```python
import os
import pytest
import enaml
from configobj import ConfigObj
from exopy.testing.util import handle_dialog
with enaml.imports():
from exopy.app.errors.widgets import ErrorsDialog
from exopy_pulses.pulses.sequences.base_sequences import RootSequence
from exopy_pulses.testing.context import DummyContext
with enaml.imports():
from exopy_pulses.pulses.utils.widgets.building import BuilderView
@pytest.fixture
def root():
root = RootSequence(context=DummyContext())
return root
def test_create_sequence(root, workbench, exopy_qtbot, monkeypatch):
"""Test creating a sequence.
"""
core = workbench.get_plugin('enaml.workbench.core')
def select_sequence(exopy_qtbot, dial):
"""Select the sequence to build.
"""
dial.selector.selected_sequence = 'exopy_pulses.BaseSequence'
with handle_dialog(exopy_qtbot, 'accept', select_sequence):
cmd = 'exopy.pulses.create_sequence'
seq = core.invoke_command(cmd, dict(root=root))
assert seq is not None
with handle_dialog(exopy_qtbot, 'reject'):
cmd = 'exopy.pulses.create_sequence'
seq = core.invoke_command(cmd, dict(root=root))
assert seq is None
def raise_on_build(*args, **kwargs):
raise Exception()
from exopy_pulses.pulses.configs.base_config import SequenceConfig
monkeypatch.setattr(SequenceConfig, 'build_sequence', raise_on_build)
with handle_dialog(exopy_qtbot, 'accept', cls=ErrorsDialog, time=500):
with handle_dialog(exopy_qtbot, 'accept', cls=BuilderView):
cmd = 'exopy.pulses.create_sequence'
seq = core.invoke_command(cmd, dict(root=root))
def test_build_sequence_from_path(workbench, root, tmpdir):
"""Test building a sequence stored in a file.
"""
path = os.path.join(str(tmpdir), 'test.pulse.ini')
pref = root.preferences_from_members()
conf = ConfigObj(pref)
with open(path, 'wb') as f:
conf.write(f)
core = workbench.get_plugin('enaml.workbench.core')
cmd = 'exopy.pulses.build_sequence'
assert isinstance(core.invoke_command(cmd, dict(path=path)), type(root))
with pytest.raises(ValueError):
core.invoke_command(cmd, dict())
def test_build_sequence_from_config(workbench, root):
"""Test building a sequence stored in a file.
"""
prefs = ConfigObj(root.preferences_from_members())
core = workbench.get_plugin('enaml.workbench.core')
cmd = 'exopy.pulses.build_sequence'
assert isinstance(core.invoke_command(cmd, dict(prefs=prefs)), type(root))
def test_build_sequence_handle_dependencies_issues(workbench, root):
"""Test handling issue in collecting dependencies.
"""
prefs = ConfigObj(root.preferences_from_members())
prefs['dep_type'] = '__dumb__'
core = workbench.get_plugin('enaml.workbench.core')
cmd = 'exopy.pulses.build_sequence'
with pytest.raises(RuntimeError):
core.invoke_command(cmd, dict(prefs=prefs))
prefs = ConfigObj(root.preferences_from_members())
prefs['item_id'] = '__dumb__'
core = workbench.get_plugin('enaml.workbench.core')
cmd = 'exopy.pulses.build_sequence'
with pytest.raises(RuntimeError):
core.invoke_command(cmd, dict(prefs=prefs))
def test_create_context(workbench, root, exopy_qtbot):
"""Test creating a context for a sequence.
"""
core = workbench.get_plugin('enaml.workbench.core')
def select_context(exopy_qtbot, dial):
"""Select the sequence to build.
"""
obj_combo = dial.central_widget().widgets()[0]
obj_combo.selected_item = 'Dummy'
with handle_dialog(exopy_qtbot, 'accept', select_context):
cmd = 'exopy.pulses.create_context'
core.invoke_command(cmd, dict(root=root))
assert root.context is not None
del root.context
with handle_dialog(exopy_qtbot, 'reject'):
cmd = 'exopy.pulses.create_context'
core.invoke_command(cmd, dict(root=root))
assert root.context is None
```
|
{
"source": "jerjohste/exopy",
"score": 2
}
|
#### File: app/icons/test_api.py
```python
from exopy.app.icons.api import get_icon
def test_get_icon(app, icon_workbench):
"""Test getting an icon using the helper function.
"""
assert get_icon(icon_workbench, 'folder-open')
```
#### File: app/log/test_plugin.py
```python
import os
import sys
import logging
from enaml.workbench.api import Workbench
from exopy.testing.util import handle_dialog
import enaml
with enaml.imports():
from enaml.workbench.core.core_manifest import CoreManifest
from exopy.app.app_manifest import AppManifest
from exopy.app.states.manifest import StateManifest
from exopy.app.preferences.manifest import PreferencesManifest
from exopy.app.log.manifest import LogManifest
from exopy.app.log.tools import (LogModel, GuiHandler, StreamToLogRedirector)
PLUGIN_ID = 'exopy.app.logging'
class CMDArgs(object):
pass
class TestLogPlugin(object):
"""Test all the commands deined by the LogPLugin.
"""
def setup(self):
self.workbench = Workbench()
self.workbench.register(CoreManifest())
self.workbench.register(AppManifest())
self.workbench.register(PreferencesManifest())
self.workbench.register(StateManifest())
self.workbench.register(LogManifest())
def teardown(self):
self.workbench.unregister(PLUGIN_ID)
def test_handler1(self, logger):
"""Test adding removing handler.
"""
core = self.workbench.get_plugin(u'enaml.workbench.core')
handler = GuiHandler(model=LogModel())
core.invoke_command('exopy.app.logging.add_handler',
{'id': 'ui', 'handler': handler, 'logger': 'test'},
self)
log_plugin = self.workbench.get_plugin(PLUGIN_ID)
assert log_plugin.handler_ids == ['ui']
assert handler in logger.handlers
assert log_plugin._handlers == {'ui': (handler, 'test')}
core.invoke_command('exopy.app.logging.remove_handler',
{'id': 'ui'}, self)
assert log_plugin.handler_ids == []
assert handler not in logger.handlers
assert log_plugin._handlers == {}
def test_handler2(self, logger):
"""Test adding a GUI handler using the mode keyword.
"""
core = self.workbench.get_plugin(u'enaml.workbench.core')
core.invoke_command('exopy.app.logging.add_handler',
{'id': 'ui', 'mode': 'ui', 'logger': 'test'},
self)
log_plugin = self.workbench.get_plugin(PLUGIN_ID)
assert log_plugin.handler_ids == [u'ui']
assert logger.handlers
core.invoke_command('exopy.app.logging.remove_handler',
{'id': 'ui'}, self)
assert log_plugin.handler_ids == []
assert not logger.handlers
def test_handler3(self, logger):
"""Test adding an handler using a non recognised mode.
"""
core = self.workbench.get_plugin(u'enaml.workbench.core')
core.invoke_command('exopy.app.logging.add_handler',
{'id': 'ui', 'logger': 'test'},
self)
log_plugin = self.workbench.get_plugin(PLUGIN_ID)
assert log_plugin.handler_ids == []
assert not logger.handlers
def test_filter1(self, logger):
"""Test adding removing filter.
"""
core = self.workbench.get_plugin(u'enaml.workbench.core')
handler = GuiHandler(model=LogModel())
core.invoke_command('exopy.app.logging.add_handler',
{'id': 'ui', 'handler': handler, 'logger': 'test'},
self)
class Filter(object):
def filter(self, record):
return True
test_filter = Filter()
core.invoke_command('exopy.app.logging.add_filter',
{'id': 'filter', 'filter': test_filter,
'handler_id': 'ui'},
self)
log_plugin = self.workbench.get_plugin(PLUGIN_ID)
assert log_plugin.filter_ids == [u'filter']
assert log_plugin._filters == {u'filter': (test_filter, u'ui')}
core.invoke_command('exopy.app.logging.remove_filter',
{'id': 'filter'}, self)
assert log_plugin.filter_ids == []
assert log_plugin._filters == {}
def test_filter2(self):
"""Test adding a filter and removing the handler.
"""
core = self.workbench.get_plugin(u'enaml.workbench.core')
handler = GuiHandler(model=LogModel())
core.invoke_command('exopy.app.logging.add_handler',
{'id': 'ui', 'handler': handler, 'logger': 'test'},
self)
class Filter(object):
def filter(self, record):
return True
test_filter = Filter()
core.invoke_command('exopy.app.logging.add_filter',
{'id': 'filter', 'filter': test_filter,
'handler_id': 'ui'},
self)
log_plugin = self.workbench.get_plugin(PLUGIN_ID)
assert log_plugin.filter_ids == [u'filter']
assert log_plugin._filters == {u'filter': (test_filter, u'ui')}
core.invoke_command('exopy.app.logging.remove_handler',
{'id': 'ui'}, self)
assert log_plugin.filter_ids == []
assert log_plugin._filters == {}
def test_filter3(self, logger):
"""Test adding an improper filter.
"""
core = self.workbench.get_plugin(u'enaml.workbench.core')
core.invoke_command('exopy.app.logging.add_filter',
{'id': 'filter', 'filter': object(),
'handler_id': 'ui'},
self)
def test_filter4(self, logger):
"""Test adding a filter to a non-existing handler.
"""
core = self.workbench.get_plugin(u'enaml.workbench.core')
class Filter(object):
def filter(self, record):
return True
core.invoke_command('exopy.app.logging.add_filter',
{'id': 'filter', 'filter': Filter(),
'handler_id': 'ui'},
self)
def test_formatter(self, logger, exopy_qtbot):
"""Test setting the formatter of a handler.
"""
core = self.workbench.get_plugin(u'enaml.workbench.core')
model = LogModel()
handler = GuiHandler(model=model)
core.invoke_command('exopy.app.logging.add_handler',
{'id': 'ui', 'handler': handler, 'logger': 'test'},
self)
formatter = logging.Formatter('test : %(message)s')
core.invoke_command('exopy.app.logging.set_formatter',
{'formatter': formatter, 'handler_id': 'ui'},
self)
logger.info('test')
def assert_text():
assert model.text == 'test : test\n'
exopy_qtbot.wait_until(assert_text)
def test_formatter2(self, logger, exopy_qtbot):
"""Test setting the formatter of a non existing handler.
"""
core = self.workbench.get_plugin(u'enaml.workbench.core')
formatter = logging.Formatter('test : %(message)s')
core.invoke_command('exopy.app.logging.set_formatter',
{'formatter': formatter,
'handler_id': 'non-existing'},
self)
exopy_qtbot.wait(10)
def test_start_logging1(self, app_dir):
"""Test startup function when redirection of sys.stdout is required
"""
cmd_args = CMDArgs()
cmd_args.nocapture = False
old = sys.stdout
app = self.workbench.get_plugin('exopy.app')
app.run_app_startup(cmd_args)
plugin = self.workbench.get_plugin(PLUGIN_ID)
try:
assert os.path.isdir(os.path.join(app_dir, 'logs'))
assert 'exopy.file_log' in plugin.handler_ids
assert 'exopy.gui_log' in plugin.handler_ids
assert plugin.gui_model
assert isinstance(sys.stdout, StreamToLogRedirector)
assert isinstance(sys.stderr, StreamToLogRedirector)
finally:
sys.stdout = old
def test_start_logging2(self, app_dir):
"""Test startup function when redirection of sys.stdout is not required
"""
cmd_args = CMDArgs()
cmd_args.nocapture = True
old = sys.stdout
app = self.workbench.get_plugin('exopy.app')
app.run_app_startup(cmd_args)
plugin = self.workbench.get_plugin(PLUGIN_ID)
try:
assert os.path.isdir(os.path.join(app_dir, 'logs'))
assert 'exopy.file_log' in plugin.handler_ids
assert 'exopy.gui_log' in plugin.handler_ids
assert plugin.gui_model
# Fail in no capture mode (unknown reason).
assert not isinstance(sys.stdout, StreamToLogRedirector)
assert not isinstance(sys.stderr, StreamToLogRedirector)
finally:
sys.stdout = old
def test_display_current_log(self, app_dir, exopy_qtbot):
"""Test the log display window
"""
cmd_args = CMDArgs()
cmd_args.nocapture = True
app = self.workbench.get_plugin('exopy.app')
app.run_app_startup(cmd_args)
core = self.workbench.get_plugin(u'enaml.workbench.core')
with handle_dialog(exopy_qtbot):
core.invoke_command('exopy.app.logging.display_current_log', {}, self)
```
#### File: tests/app/test_app_plugin.py
```python
import enaml
from enaml.workbench.api import Workbench
from enaml.widgets.window import CloseEvent
with enaml.imports():
from enaml.workbench.core.core_manifest import CoreManifest
from exopy.app.errors.manifest import ErrorsManifest
from exopy.app.app_manifest import AppManifest
from .app_helpers import (StartupContributor, ClosingContributor1,
ClosingContributor2, ClosedContributor)
class FalseWindow(object):
"""False WorkbenchWindow used for testing closing as need an object with
a reference to the workbench.
"""
def __init__(self, workbench):
self.workbench = workbench
class TestAppPlugin(object):
"""Test the AppPlugin capabilities.
"""
def setup(self):
self.workbench = Workbench()
self.workbench.register(AppManifest())
self.workbench.register(CoreManifest())
self.workbench.register(ErrorsManifest())
def test_app_start_up(self):
"""Test running startups leading to new startup registrations.
"""
manifest = StartupContributor()
self.workbench.register(manifest)
plugin = self.workbench.get_plugin('exopy.app')
plugin.run_app_startup(object())
assert manifest.called == ['test_nested.startup1', 'test.startup2',
'test_nested.startup2']
self.workbench.unregister('exopy.app')
def test_closing(self):
"""Test that validation stops as soon as the event is rejected.
"""
manifest1 = ClosingContributor1()
manifest2 = ClosingContributor2()
self.workbench.register(manifest1)
self.workbench.register(manifest2)
window = FalseWindow(self.workbench)
plugin = self.workbench.get_plugin('exopy.app')
ev = CloseEvent()
plugin.validate_closing(window, ev)
assert not ev.is_accepted()
assert not manifest2.called or not manifest1.called
manifest1.accept = True
manifest2.accept = True
plugin.validate_closing(window, ev)
assert ev.is_accepted()
assert manifest2.called
def test_app_cleanup(self):
"""Test running the app cleanup.
"""
manifest = ClosedContributor()
self.workbench.register(manifest)
plugin = self.workbench.get_plugin('exopy.app')
plugin.run_app_cleanup()
assert manifest.called == ['test_nested.closed1', 'test.closed2',
'test_nested.closed2']
def test_app_startup_registation(self):
"""Test the AppStartup discovery.
"""
manifest = StartupContributor()
self.workbench.register(manifest)
plugin = self.workbench.get_plugin('exopy.app')
assert len(plugin.startup.contributions) == 2
assert len(plugin._start_heap) == 2
self.workbench.unregister(manifest.id)
assert not plugin.startup.contributions
assert len(plugin._start_heap) == 0
def test_app_closing_registation(self):
"""Test the AppClosing discovery.
"""
manifest = ClosingContributor1()
self.workbench.register(manifest)
plugin = self.workbench.get_plugin('exopy.app')
assert len(plugin.closing.contributions) == 1
self.workbench.unregister(manifest.id)
assert not plugin.closing.contributions
def test_app_closed_registation(self):
"""Test the AppClosed discovery.
"""
manifest = ClosedContributor()
self.workbench.register(manifest)
plugin = self.workbench.get_plugin('exopy.app')
assert len(plugin.closed.contributions) == 2
assert len(plugin._clean_heap) == 2
self.workbench.unregister(manifest.id)
assert not plugin.closed.contributions
assert len(plugin._clean_heap) == 0
```
#### File: instruments/connections/test_visa_connections.py
```python
import logging
import enaml
import pytest
from exopy.testing.util import show_widget, wait_for_destruction
with enaml.imports():
from exopy.instruments.connections.visa_connections\
import (VisaRaw, VisaRS232, VisaGPIB, VisaUSB, VisaTCPIP,
VisaConnection)
try:
from pyvisa.rname import assemble_canonical_name
except ImportError:
assemble_canonical_name = lambda **x: True
def test_visa_raw(exopy_qtbot):
"""Test the raw visa connection used for aliases or unsupported resources.
"""
c = VisaRaw()
show_widget(exopy_qtbot, c)
c.widgets()[1].text = 'dummy'
assert c.gather_infos() == {'resource_name': 'dummy'}
def test_visa_rs232(exopy_qtbot):
"""Test the rs232 visa connection.
"""
c = VisaRS232()
show_widget(exopy_qtbot, c)
c.widgets()[-1].text = '1'
def assert_infos():
assert c.gather_infos() == {'interface_type': 'ASRL',
'resource_class': 'INSTR',
'board': '1'}
exopy_qtbot.wait_until(assert_infos)
assemble_canonical_name(**c.gather_infos())
def test_visa_GPIB(exopy_qtbot):
"""Test the GPIB visa connection.
"""
c = VisaGPIB()
show_widget(exopy_qtbot, c)
c.widgets()[-2].text = '1'
exopy_qtbot.wait(10)
c.widgets()[-1].checked = True
def assert_infos():
assert c.gather_infos() == {'interface_type': 'GPIB',
'resource_class': 'INSTR',
'board': '0',
'primary_address': '1',
'secondary_address': '0'}
exopy_qtbot.wait_until(assert_infos)
assemble_canonical_name(**c.gather_infos())
def test_visa_usb(exopy_qtbot):
""" Test the visa usb connection.
"""
c = VisaUSB()
show_widget(exopy_qtbot, c)
c.widgets()[-6].text = '0x00'
c.widgets()[-4].text = '0x01'
c.widgets()[-2].text = '0x02'
exopy_qtbot.wait(10)
c.widgets()[-1].checked = True
def assert_infos():
assert c.gather_infos() == {'interface_type': 'USB',
'resource_class': 'INSTR',
'manufacturer_id': '0x00',
'model_code': '0x01',
'serial_number': '0x02',
'usb_interface_number': '0',
'board': '0'}
exopy_qtbot.wait_until(assert_infos)
assemble_canonical_name(**c.gather_infos())
def test_visa_tcpip_instr(exopy_qtbot):
"""Test the visa tcpip connection.
"""
c = VisaTCPIP()
show_widget(exopy_qtbot, c)
c.widgets()[-4].text = '192.168.0.10'
exopy_qtbot.wait(10)
c.widgets()[-1].checked = True
def assert_infos():
assert c.gather_infos() == {'interface_type': 'TCPIP',
'resource_class': 'INSTR',
'host_address': '192.168.0.10',
'lan_device_name': 'inst0',
'board': '0'}
exopy_qtbot.wait_until(assert_infos)
assemble_canonical_name(**c.gather_infos())
def test_visa_tcpip_socket(exopy_qtbot, dialog_sleep):
"""Test the visa tcpip connection.
"""
c = VisaTCPIP()
show_widget(exopy_qtbot, c)
c.resource_class = 'SOCKET'
exopy_qtbot.wait(10 + dialog_sleep)
c.widgets()[-4].text = '192.168.0.10'
c.widgets()[-2].text = '10000'
exopy_qtbot.wait(10)
c.widgets()[-1].checked = True
def assert_infos():
assert c.gather_infos() == {'interface_type': 'TCPIP',
'resource_class': 'SOCKET',
'host_address': '192.168.0.10',
'port': '10000',
'board': '0'}
exopy_qtbot.wait_until(assert_infos)
assemble_canonical_name(**c.gather_infos())
def test_creating_a_visa_connection(prof_plugin, exopy_qtbot, caplog):
"""Test creating a Visa connection through VisaConnection.new
"""
caplog.set_level(logging.INFO)
c = prof_plugin.create_connection('VisaTCPIP', {'__junk': ''}, True)
w = show_widget(exopy_qtbot, c)
assert caplog.records
assert c.read_only
w.close()
wait_for_destruction(exopy_qtbot, w)
@pytest.mark.parametrize('id, defaults, should_log',
[('VisaRS232',
{'interface_type': 'ASRL',
'resource_class': 'INSTR',
'board': 1},
False),
('VisaRS232',
{'interface_type': 'ASRL',
'resource_class': 'INSTR',
'board': 1, 'bad': 1},
True),
('VisaGPIB',
{'interface_type': 'GPIB',
'resource_class': 'INSTR',
'board': 0,
'primary_address': 1,
'secondary_address': 0},
False),
('VisaGPIB',
{'interface_type': 'GPIB',
'resource_class': 'INSTR',
'board': 0,
'primary_address': 1,
'secondary_address': 0,
'bad': 1},
True),
('VisaUSB',
{'interface_type': 'USB',
'resource_class': 'INSTR',
'manufacturer_id': '0x00',
'model_code': '0x01',
'serial_number': '0x02',
'usb_interface_number': 0,
'board': 0},
False),
('VisaUSB',
{'interface_type': 'USB',
'resource_class': 'INSTR',
'manufacturer_id': '0x00',
'model_code': '0x01',
'serial_number': '0x02',
'usb_interface_number': 0,
'board': 0,
'bad': 1},
True),
('VisaTCPIP',
{'interface_type': 'TCPIP',
'resource_class': 'INSTR',
'host_address': '192.168.0.10',
'lan_device_name': 'inst0',
'port': 8000,
'board': 0},
False),
('VisaTCPIP',
{'interface_type': 'TCPIP',
'resource_class': 'INSTR',
'host_address': '192.168.0.10',
'lan_device_name': 'inst0',
'board': 0,
'port': 8000,
'bad': 1},
True),
('VisaTCPIP',
{'interface_type': 'TCPIP',
'resource_class': 'SOCKET',
'host_address': '192.168.0.10',
'port': 8000,
'lan_device_name': 'inst0',
'board': 0},
False),
('VisaTCPIP',
{'interface_type': 'TCPIP',
'resource_class': 'SOCKET',
'host_address': '192.168.0.10',
'port': 8000,
'lan_device_name': 'inst0',
'bad': 0},
True) ])
def test_validating_connection_default(id, defaults, should_log,
exopy_qtbot, caplog, prof_plugin):
"""Test that keyword filtering works as expected.
"""
caplog.set_level(logging.INFO)
c = prof_plugin.create_connection(id, defaults, False)
w = show_widget(exopy_qtbot, c)
if should_log:
assert caplog.records
else:
assert not caplog.records
assert not c.read_only
w.close()
wait_for_destruction(exopy_qtbot, w)
```
#### File: tests/instruments/test_infos.py
```python
import os
import pytest
from configobj import ConfigObj
from exopy.instruments.infos import (DriverInfos, InstrumentModelInfos,
SeriesInfos, ManufacturerInfos,
ManufacturersHolder, ProfileInfos,
validate_profile_infos)
from exopy.instruments.manufacturer_aliases import ManufacturerAlias
def test_driver():
"""Test that the driver validation does work.
"""
class FalsePlugin(object):
"""Lighter than using the real plugin.
"""
pass
p = FalsePlugin()
p.starters = {'starter': None}
p.connections = {'c1': {}, 'c2': {}, 'c3': {}}
p.settings = {'s1': {}, 's2': {}, 's3': {}}
# Test that when we know everything validation does work
d = DriverInfos(starter='starter', connections={'c1': {}, 'c2': {}},
settings={'s2': {}, 's3': {}})
assert d.validate(p)[0] and d.valid
# Test validation failing because of starter
p.starters.clear()
assert not d.validate(p)[0] and not d.valid
# Test validation failing because of one missing connection
p.starters = {'starter': None}
d.connections['c4'] = {}
assert not d.validate(p)[0] and not d.valid
# Test validation failing because of one missing settings
del d.connections['c4']
d.settings['s4'] = {}
assert not d.validate(p)[0] and not d.valid
def create_driver_infos(id, manufacturer='M', serie='S', model='m',
kind='Other', architecture='a',
connections={'c1': {'d': 1}},
settings={'s1': {'s': 1}}):
return DriverInfos(id=id, connections=connections, settings=settings,
infos=dict(manufacturer=manufacturer, serie=serie,
model=model, kind=kind,
architecture=architecture)
)
def test_model_update():
"""Test updating an instrument model infos using a list of drivers infos.
"""
d = [create_driver_infos('1'),
create_driver_infos('2', connections={'c1': {'d2': 2}, 'c2': {}}),
create_driver_infos('3', settings={'s1': {'w': 2}, 's2': {}})
]
i = InstrumentModelInfos()
i.update(d)
assert i.connections == {'c1': {'d': 1, 'd2': 2}, 'c2': {}}
assert i.settings == {'s1': {'s': 1, 'w': 2}, 's2': {}}
i.update([d[2]], removed=True)
assert i.settings == {'s1': {'s': 1}}
i.update([d[1]], removed=True)
assert i.connections == {'c1': {'d': 1}}
def test_model_find_matching_drivers():
"""Test filtering the drivers based on connections and settings infos.
"""
d = [create_driver_infos('1'),
create_driver_infos('2', connections={'c1': {'d2': 2}, 'c2': {}}),
create_driver_infos('3', settings={'s1': {'w': 2}, 's2': {}})
]
i = InstrumentModelInfos()
i.update(d)
c_filtered = i.find_matching_drivers('c2')
assert len(c_filtered) == 1 and c_filtered[0].id == '2'
s_filtered = i.find_matching_drivers('c1', 's2')
assert len(s_filtered) == 1 and s_filtered[0].id == '3'
assert i.find_matching_drivers('c1') == d
def test_series():
"""Test updating the information of a serie from a list of drivers.
"""
d = [create_driver_infos(str(i), model=m)
for i, m in enumerate(('m1', 'm2', 'm2'))]
s = SeriesInfos()
s.update_models(d)
assert len(s.instruments) == 2
i1 = s.instruments[0]
assert (i1.manufacturer == 'M' and i1.serie == 'S' and i1.model == 'm1' and
i1.kind == 'Other' and
i1.connections and i1.settings and len(i1.drivers) == 1)
i2 = s.instruments[1]
assert (i2.manufacturer == 'M' and i2.serie == 'S' and i2.model == 'm2' and
i2.kind == 'Other' and
i2.connections and i2.settings and len(i2.drivers) == 2)
# Test filtering
i1.kind = 'Lock-in'
s.kind = 'Lock-in'
assert len(s.instruments) == 1 and s.instruments[0] is i1
# Test adding more connections/settings
d2 = [create_driver_infos('a', model='m1', connections={'c2': {}}),
create_driver_infos('b', model='m2', settings={'s2': {}})]
s.update_models(d2)
s.kind = 'All'
assert 'c2' in s.instruments[0].connections
assert 's2' in s.instruments[1].settings
# Test removing drivers
s.update_models(d2, removed=True)
assert 'c2' not in s.instruments[0].connections
assert 's2' not in s.instruments[1].settings
s.update_models([d[0]], removed=True)
assert len(s.instruments) == 1 and s.instruments[0] is i2
def test_manufaturer_using_series():
"""Test the capabilities of the manufacturer infos.
"""
d = [create_driver_infos(m+s, model=m, serie=s,
kind='Lock-in' if m == 'm1' else 'Other')
for s in ('s', 's2', '')
for m in ('m1', 'm2', 'm2')
]
m = ManufacturerInfos()
m.update_series_and_models(d)
assert len(m.instruments) == 4 # Two series and two models with no serie
s_names = ['s', 's2']
for s_or_m in m.instruments:
if isinstance(s_or_m, SeriesInfos):
assert s_or_m.name in s_names
s_names.remove(s_or_m.name)
# Filtering by kind
m.kind = 'Lock-in'
assert len(m.instruments) == 3
for s_or_m in m.instruments:
if isinstance(s_or_m, SeriesInfos):
assert len(s_or_m.instruments) == 1
m.kind = 'All'
# Remove some drivers and hence update the series
m.update_series_and_models(d[:2], removed=True)
assert len(m.instruments) == 4
for s_or_m in m.instruments:
if isinstance(s_or_m, SeriesInfos) and s_or_m.name == 's':
assert len(s_or_m.instruments) == 1
# Remove a full serie
m.update_series_and_models(d[:3], removed=True)
assert len(m.instruments) == 3
def test_manufaturer_not_using_series():
"""Test the capabilities of the manufacturer infos.
"""
d = [create_driver_infos(m+s, model=m, serie=s,
kind='Lock-in' if m == 'm1' else 'Other')
for s in ('s', 's2', '')
for m in ('m1', 'm2', 'm2')
]
m = ManufacturerInfos()
m.update_series_and_models(d)
m.use_series = False
assert len(m.instruments) == 6
# Filtering by kind
m.kind = 'Lock-in'
assert len(m.instruments) == 3
m.kind = 'All'
# Remove some drivers and hence update the series
m.update_series_and_models(d[:2], removed=True)
assert len(m.instruments) == 5
# Remove a full serie
m.update_series_and_models(d[:3], removed=True)
assert len(m.instruments) == 4
def test_manufacturer_switching_series_use():
"""Test switching between series and non series display.
"""
d = [create_driver_infos(m+s, model=m, serie=s,
kind='Lock-in' if m == 'm1' else 'Other')
for s in ('s', 's2', '')
for m in ('m1', 'm2', 'm2')
]
m = ManufacturerInfos()
m.update_series_and_models(d)
assert len(m.instruments) == 4
m.use_series = False
assert len(m.instruments) == 6
m.use_series = True
print(m.instruments)
assert len(m.instruments) == 4
@pytest.fixture
def false_plugin():
"""False instrument plugin to test the profile infos.
"""
class FalsePlugin(object):
"""Lighter than using the real plugin.
"""
pass
p = FalsePlugin()
p.starters = {'starter': None}
p.connections = {'c1': {}, 'c2': {}, 'c3': {}}
p.settings = {'s1': {}, 's2': {}, 's3': {}}
p._aliases = FalsePlugin()
p._aliases.contributions = {}
return p
def test_holder1(false_plugin):
"""Test the capabilities of the ManufacturersHolder.
"""
d = [create_driver_infos(m+s, model=m, serie=s, manufacturer=man,
kind='Lock-in' if m == 'm1' else 'Other')
for man in ('man1', 'man2')
for s in ('s', 's2', '')
for m in ('m1', 'm2', 'm2')
]
h = ManufacturersHolder(plugin=false_plugin)
h.update_manufacturers(d)
assert len(h.manufacturers) == 2
h.kind = 'Lock-in'
for m in h.manufacturers:
assert m.kind == h.kind
h.use_series = not h.use_series
for m in h.manufacturers:
assert m.use_series == h.use_series
h.kind = 'All'
# Remove some drivers
h.update_manufacturers(d[:6]+d[9:], removed=True)
assert len(h.manufacturers) == 1
assert not h.manufacturers[0]._series
# Remove all drivers
h.update_manufacturers(d, removed=True)
assert not h.manufacturers
def test_holder2(false_plugin):
"""Test the automatic handling of aliases by ManufacturersHolder.
"""
d = [create_driver_infos(m+s, model=m, serie=s, manufacturer=man,
kind='Lock-in' if m == 'm1' else 'Other')
for man in ('man1', 'man2')
for s in ('s', 's2', '')
for m in ('m1', 'm2', 'm2')
]
false_plugin._aliases.contributions = {'man1':
ManufacturerAlias(aliases=['man2'])}
h = ManufacturersHolder(plugin=false_plugin)
h.update_manufacturers(d)
assert len(h.manufacturers) == 1
PROFILE_PATH = os.path.join(os.path.dirname(__file__),
'false_profile.instr.ini')
@pytest.fixture
def false_plugin_with_holder(false_plugin):
"""False instrument plugin to test the profile infos.
"""
p = false_plugin
h = ManufacturersHolder(plugin=false_plugin)
d = [create_driver_infos(m, model=m, serie='' if m != 'm2' else 'S',
manufacturer=man,
kind='Lock-in' if m == 'm2' else 'Other')
for man in ('manufacturer', 'man2')
for m in ('model', 'm2', 'm2')
]
h.update_manufacturers(d)
p._manufacturers = h
return p
def test_profile_members(false_plugin_with_holder):
"""Test accessing the values stored in the profile.
"""
p = ProfileInfos(path=PROFILE_PATH, plugin=false_plugin_with_holder)
assert p.id == 'profile'
assert p.model.model == 'model'
assert sorted(p.connections) == ['visa_tcpip', 'visa_usb']
assert sorted(p.settings) == ['lantz', 'lantz-sim']
p._config = {'id': 'new'}
assert p.id == 'new'
def test_profile_write(tmpdir, false_plugin_with_holder):
"""Test writing a modified profile.
"""
p = ProfileInfos(path=PROFILE_PATH, plugin=false_plugin_with_holder)
p.id = 'new'
p.model.serie = 'S'
p.model.model = 'm2'
p.connections['new'] = {'inf': 1}
p.settings['new'] = {'inf': 2}
path = str(tmpdir.join('new.ini'))
p.path = path
p.write_to_file()
p = ProfileInfos(path=path, plugin=false_plugin_with_holder)
assert p.id == 'new'
assert p.model.model == 'm2'
assert 'new' in p.connections and 'visa_tcpip' in p.connections
assert 'new' in p.settings and 'lantz' in p.settings
def test_profile_clone(false_plugin_with_holder):
"""Test cloning a profile.
"""
p = ProfileInfos(path=PROFILE_PATH, plugin=false_plugin_with_holder)
p.id = 'new'
p2 = p.clone()
assert p2.id == 'new'
assert p2.model.model == 'model'
def test_profile_blank(false_plugin_with_holder):
"""Test creating a blank profile.
"""
p = ProfileInfos.create_blank(false_plugin_with_holder)
assert not p.id
assert not p.connections
assert not p.settings
assert p._config
def test_validate_profile_infos(tmpdir, false_plugin_with_holder):
"""Test validating a profile.
"""
i = ProfileInfos(path=PROFILE_PATH, plugin=false_plugin_with_holder)
r, msg = validate_profile_infos(i)
assert r
for p, m in [(os.path.join(str(tmpdir), 'd_%s.instr.ini' % m), m)
for m in ('id', 'model_id', 'connections', 'settings')]:
c = ConfigObj(PROFILE_PATH)
del c[m]
with open(p, 'wb') as f:
c.write(f)
i = ProfileInfos(path=p, plugin=false_plugin_with_holder)
r, msg = validate_profile_infos(i)
assert not r
assert m in msg
```
#### File: instruments/widgets/test_profile_selection.py
```python
import enaml
from exopy.testing.util import wait_for_window_displayed
with enaml.imports():
from exopy.instruments.widgets.profile_selection\
import (ProfileSelectionDialog)
def test_selecting_profile_from_scratch(prof_plugin, exopy_qtbot,
dialog_sleep):
"""Test selecting a profile.
"""
d = ProfileSelectionDialog(plugin=prof_plugin)
d.show()
wait_for_window_displayed(exopy_qtbot, d)
exopy_qtbot.wait(dialog_sleep)
d.profile = 'fp2'
assert not d.connection
assert not d.settings
exopy_qtbot.wait(10 + dialog_sleep)
d.connection = 'false_connection1'
d.settings = 'false_settings1'
d.driver = 'instruments.test.FalseDriver%s' % ('' if d.driver.endswith('2')
else 2)
assert not d.connection
assert not d.settings
exopy_qtbot.wait(10 + dialog_sleep)
d.connection = 'false_connection'
d.settings = 'false_settings'
exopy_qtbot.wait(10 + dialog_sleep)
d.central_widget().widgets()[-1].clicked = True
def assert_result():
assert d.result
exopy_qtbot.wait_until(assert_result)
def test_editing_a_previous_selection(prof_plugin, exopy_qtbot, dialog_sleep):
"""Test editing a profile selection.
"""
d = ProfileSelectionDialog(plugin=prof_plugin,
profile='fp2',
driver='instruments.test.FalseDriver2',
connection='false_connection',
settings='false_settings')
d.show()
wait_for_window_displayed(exopy_qtbot, d)
exopy_qtbot.wait(dialog_sleep)
assert d.profile == 'fp2'
assert d.driver == 'instruments.test.FalseDriver2'
assert d.connection == 'false_connection'
assert d.settings == 'false_settings'
d.central_widget().widgets()[-2].clicked = True
def assert_result():
assert not d.result
exopy_qtbot.wait_until(assert_result)
def test_using_custom_filtering(prof_plugin, exopy_qtbot, dialog_sleep):
"""Test using a custom filtering function to reduce the available profiles
and drivers.
"""
d = ProfileSelectionDialog(plugin=prof_plugin, profile='fp1',
filter_profiles=lambda p: ['fp1'],
filter_drivers=lambda d: [d[0]])
d.show()
wait_for_window_displayed(exopy_qtbot, d)
exopy_qtbot.wait(dialog_sleep)
w = d.central_widget().widgets()[0]
assert len(w._drivers) == 1
```
#### File: measurement/editors/test_database_editor.py
```python
import pytest
import enaml
from enaml.widgets.api import FlowArea, Menu
from exopy.utils.container_change import ContainerChange
from exopy.tasks.api import RootTask, ComplexTask, SimpleTask
from exopy.measurement.editors.api import Editor
from exopy.measurement.editors.database_access_editor.editor_model import\
EditorModel
from exopy.testing.util import wait_for_window_displayed
with enaml.imports():
from exopy.measurement.editors.database_access_editor import\
DatabaseAccessEditor
from exopy.testing.windows import PageTestingWindow
@pytest.fixture
def task():
"""Task used to test the editor.
Root:
SimpleTask: simp1, entries: t
ComplexTask: comp1, entries: t1, t2
SimpleTask: simp2, entris: t
ComplexTask: comp2, entries: t1, t2
SimpleTask: simp3, entries: t
"""
r = RootTask()
r.add_child_task(0, SimpleTask(name='simp1', database_entries={'t': 1}))
c = ComplexTask(name='comp1', database_entries={'t1': 2, 't2': 'r'})
c.add_child_task(0,
SimpleTask(name='simp2', database_entries={'t': 1}))
c2 = ComplexTask(name='comp2', database_entries={'t1': 2, 't2': 'r'})
c2.add_child_task(0,
SimpleTask(name='simp3', database_entries={'t': 1}))
c.add_child_task(1, c2)
r.add_child_task(1, c)
return r
def test_node_creation(task):
"""Test creating the editor when exceptions already exists.
"""
# Add an access exception to the deepest level
simp3 = task.children[1].children[1].children[0]
simp3.add_access_exception('t', 1)
ed = EditorModel(root=task)
assert ed.nodes[simp3.path].has_exceptions
def test_node_sorting(task):
"""Test that a node model correctly order its children and react to
task re-ordering.
"""
ed = EditorModel(root=task)
nmodel = ed.nodes['root']
task.add_child_task(0, ComplexTask(name='cc'))
nmodel.sort_nodes()
assert [c.task.name for c in nmodel.children] == ['cc', 'comp1']
assert sorted(nmodel.entries) == sorted(['default_path', 'simp1_t',
'comp1_t1', 'comp1_t2'])
task.move_child_task(0, 2)
assert [c.task.name for c in nmodel.children] == ['comp1', 'cc']
assert (sorted(nmodel.children[0].entries) ==
sorted(['simp2_t', 'comp2_t1', 'comp2_t2']))
change = ContainerChange(collapsed=[ContainerChange()])
nmodel._react_to_task_children_event(change) # For coverage
def test_editor_modifying_exception_level(task):
"""Test modifying the level of an access exception.
"""
ed = EditorModel(root=task)
rnode = ed.nodes['root']
parent_node = rnode.children[0]
node = parent_node.children[0]
# Check that we can desambiguate between task with same prefix
node.task.add_child_task(0, SimpleTask(name='simp3_t',
database_entries={'t': 1}))
node.add_exception('simp3_t')
assert 'simp3_t' in parent_node.exceptions
assert 't' in node.task.children[1].access_exs
ed.increase_exc_level('root/comp1', 'simp3_t')
assert 'simp3_t' not in parent_node.exceptions
assert 'simp3_t' in rnode.exceptions
ed.decrease_exc_level('root', 'simp3_t')
assert 'simp3_t' in parent_node.exceptions
assert 'simp3_t' not in rnode.exceptions
ed.decrease_exc_level('root/comp1', 'simp3_t')
assert 'simp3_t' not in parent_node.exceptions
assert 't' not in node.task.children[1].access_exs
parent_node.add_exception('simp2_t')
assert 'simp2_t' in rnode.exceptions
def test_editor_changing_root(task):
"""Setting a new root.
"""
ed = EditorModel(root=RootTask())
assert len(ed.nodes) == 1
ed.root = task
assert len(ed.nodes) == 3
assert ('root' in ed.nodes and 'root/comp1' in ed.nodes and
'root/comp1/comp2' in ed.nodes)
assert ed.nodes['root/comp1'] in ed.nodes['root'].children
assert ed.nodes['root/comp1/comp2'] in ed.nodes['root/comp1'].children
def test_handling_entry_modification(task):
"""Test handling the possible modifications at the entry level.
"""
ed = EditorModel(root=task)
child = task.children[1].children[0]
entries = child.database_entries.copy()
entries['t2'] = 1
child.database_entries = entries
assert 'simp2_t2' in ed.nodes['root/comp1'].entries
child = task.children[1].children[1]
child.name = 'cc'
assert 'cc_t1' in ed.nodes['root/comp1'].entries
assert 'cc_t2' in ed.nodes['root/comp1'].entries
assert 'comp2_t1' not in ed.nodes['root/comp1'].entries
assert 'comp2_t2' not in ed.nodes['root/comp1'].entries
child = task.children[1].children[1].children[0]
child.add_access_exception('t', 2)
assert 'simp3_t' in ed.nodes['root'].exceptions
child.database_entries = {}
assert not ed.nodes['root/comp1/cc'].entries
assert 'simp2_t' not in ed.nodes['root'].exceptions
def test_handling_exceptions_modifications(task):
"""Test handling the possible modifictaion at the level of an exception.
"""
ed = EditorModel(root=task)
child = task.children[1].children[1].children[0]
child.add_access_exception('t', 1)
assert 'simp3_t' in ed.nodes['root/comp1'].exceptions
assert 'simp3_t' in ed.nodes['root/comp1/comp2'].has_exceptions
child.name = 'ss'
assert 'ss_t' in ed.nodes['root/comp1'].exceptions
assert 'ss_t' in ed.nodes['root/comp1/comp2'].has_exceptions
parent = task.children[1]
parent.name = 'cc'
assert 'ss_t' in ed.nodes['root/cc'].exceptions
assert 'ss_t' in ed.nodes['root/cc/comp2'].has_exceptions
child.remove_access_exception('t')
assert 'ss_t' not in ed.nodes['root/cc'].exceptions
assert 'ss_t' not in ed.nodes['root/cc/comp2'].has_exceptions
# For coverage try removing all exceptions.
task.database.remove_access_exception('root/cc')
def test_handling_node_manipulation(task):
"""Test handling manipulation occuring on a node.
"""
ed = EditorModel(root=task)
cc = ComplexTask(name='cc')
task.add_child_task(0, cc)
assert 'root/cc' in ed.nodes
assert cc is ed.nodes['root'].children[0].task
task.remove_child_task(0)
assert 'root/cc' not in ed.nodes
# For coverage check that we could handle a list of changes
ed._react_to_nodes([('', '', '')])
# Test failing to find a task by path
with pytest.raises(ValueError):
ed._get_task('root/unknown')
def test_editor_widget(exopy_qtbot, task, dialog_sleep):
"""That the interaction with the editor widget makes sense.
"""
dialog_sleep = dialog_sleep or 1
def get_task_widget(editor):
return editor.page_widget().widgets()[0].scroll_widget()
def get_flow_area(widget):
return [w for w in widget.children if isinstance(w, FlowArea)][0]
def get_menu(task_widget, widget_index):
flow_area = task_widget.widgets()[0]
flow_item = flow_area.flow_items()[widget_index]
menu = flow_item.flow_widget().widgets()[0].children[0]
return menu
task_with_exs = task.children[1].children[1].children[0]
editor = DatabaseAccessEditor(declaration=Editor(id='exopy.database'),
selected_task=task)
window = PageTestingWindow(widget=editor)
window.show()
wait_for_window_displayed(exopy_qtbot, window)
exopy_qtbot.wait(dialog_sleep)
r_widget = get_task_widget(editor)
flow_area = get_flow_area(r_widget)
# Check that there is no contextual menu attached.
assert not [w for w in flow_area.flow_items()[0].flow_widget().children
if isinstance(w, Menu)]
# Ask the editor to hide its children by clicking the button (this does
# not check that the layout actually changed simply that is is correct)
r_widget.widgets()[-2].clicked = True
assert r_widget.widgets()[-1].visible is False
assert not r_widget.widgets()[-1].layout_constraints()
# Undo
r_widget.widgets()[-2].clicked = True
assert r_widget.widgets()[-1].visible is True
assert r_widget.widgets()[-1].layout_constraints()
# Add an access exception to the lowest level.
editor.selected_task = task.children[1].children[1]
exopy_qtbot.wait(10 + dialog_sleep)
widget = get_task_widget(editor)
add_ex_action = get_menu(widget, 0).items()[0]
add_ex_action.triggered = True
def assert_access_exs():
assert task_with_exs.access_exs['t'] == 1
exopy_qtbot.wait_until(assert_access_exs)
exopy_qtbot.wait(dialog_sleep)
# Move the exception up
editor.selected_task = task.children[1]
def assert_flows():
assert len(flow_area.flow_items()) == 4
exopy_qtbot.wait_until(assert_flows)
exopy_qtbot.wait(dialog_sleep)
widget = get_task_widget(editor)
flow_area = get_flow_area(widget)
menu = get_menu(widget, -1)
assert len(menu.items()) == 2 # Check that both actions are there.
move_up_action = menu.items()[0]
move_up_action.triggered = True
def assert_access_exs():
assert task_with_exs.access_exs['t'] == 2
exopy_qtbot.wait_until(assert_access_exs)
exopy_qtbot.wait(dialog_sleep)
# Move the exception down
editor.selected_task = task
def assert_flows():
assert len(flow_area.flow_items()) == 3
exopy_qtbot.wait_until(assert_flows)
exopy_qtbot.wait(dialog_sleep)
widget = get_task_widget(editor)
flow_area = get_flow_area(widget)
menu = get_menu(widget, -1)
assert len(menu.items()) == 1 # Check that only one action is there.
move_down_action = menu.items()[0]
move_down_action.triggered = True
def assert_access_exs():
assert task_with_exs.access_exs['t'] == 1
exopy_qtbot.wait_until(assert_access_exs)
exopy_qtbot.wait(dialog_sleep)
# Move the exception down (it disappears)
editor.selected_task = task.children[1]
exopy_qtbot.wait(10 + dialog_sleep)
widget = get_task_widget(editor)
flow_area = get_flow_area(widget)
assert len(flow_area.flow_items()) == 4
menu = get_menu(widget, -1)
move_down_action = menu.items()[1]
move_down_action.triggered = True
def assert_access_exs():
assert not task_with_exs.access_exs
exopy_qtbot.wait_until(assert_access_exs)
exopy_qtbot.wait(dialog_sleep)
# Destroy a task such that it leads to the destruction of a node
editor.selected_task = task
old_cache = editor._cache.copy()
task.remove_child_task(1)
def assert_cache():
assert len(editor._cache) == 1
exopy_qtbot.wait_until(assert_cache)
for node in old_cache:
editor.discard_view(node)
exopy_qtbot.wait(dialog_sleep)
```
#### File: monitors/text_monitor/test_monitor.py
```python
from operator import attrgetter
import pytest
import enaml
from exopy.tasks.tasks.database import TaskDatabase
from exopy.measurement.monitors.text_monitor.entry import MonitoredEntry
from exopy.measurement.monitors.text_monitor.rules.std_rules\
import FormatRule, RejectRule
from exopy.testing.util import handle_dialog, wait_for_window_displayed
with enaml.imports():
from exopy.testing.windows import (ContainerTestingWindow,
DockItemTestingWindow)
from exopy.measurement.monitors.text_monitor.monitor_views\
import (TextMonitorItem, TextMonitorEdit)
@pytest.fixture
def monitor(text_monitor_workbench):
"""Bare text monitor as created by the plugin.
"""
p = text_monitor_workbench.get_plugin('exopy.measurement')
return p.create('monitor', 'exopy.text_monitor', False)
@pytest.fixture
def database():
"""Database used to generate events.
"""
return TaskDatabase()
def test_create_default_entry(monitor):
""" Test creating an entryt from a path.
"""
entry = monitor._create_default_entry('test/entry_test', 1)
assert entry.path == 'test/entry_test'
assert entry.name == 'entry_test'
assert entry.formatting == '{test/entry_test}'
assert entry.depend_on == ['test/entry_test']
assert entry.value == '1'
def test_adding_removing_moving_entries(monitor):
"""Test adding an entry to the displayed ones.
"""
entry = monitor._create_default_entry('test/entry_test', 1)
monitor.add_entries('displayed', [entry])
assert entry in monitor.displayed_entries
assert monitor.updaters == {'test/entry_test': [entry.update]}
assert monitor.monitored_entries == ['test/entry_test']
entry2 = monitor._create_default_entry('test/entry_test2', 1)
monitor.add_entries('undisplayed', [entry])
assert monitor.updaters == {'test/entry_test': [entry.update]}
assert monitor.monitored_entries == ['test/entry_test']
monitor.move_entries('undisplayed', 'displayed', [entry2])
assert 'test/entry_test2' in monitor.updaters
assert 'test/entry_test2' in monitor.monitored_entries
monitor.remove_entries('displayed', [entry2])
assert 'test/entry_test2' not in monitor.updaters
assert 'test/entry_test2' not in monitor.monitored_entries
with pytest.raises(ValueError):
monitor.add_entries('', ())
with pytest.raises(ValueError):
monitor.remove_entries('', ())
with pytest.raises(ValueError):
monitor.move_entries('', 'displayed', ())
with pytest.raises(ValueError):
monitor.move_entries('displayed', '', ())
def test_linking_to_measurement(monitor, measurement):
"""Test that linking the monitor to a measurement does start the database
observation.
"""
monitor.link_to_measurement(measurement)
measurement.root_task.database_entries = {'default_path': '', 'dummy': ''}
assert 'root/dummy' in monitor.monitored_entries
def test_handle_database_change1(monitor, database):
""" Test handling the adding of an entry to the database.
"""
database.observe('notifier', monitor.handle_database_entries_change)
database.set_value('root', 'entry_test', 1)
assert monitor.monitored_entries == ['root/entry_test']
assert len(monitor.displayed_entries) == 1
assert not monitor.undisplayed_entries
assert not monitor.hidden_entries
entry = monitor.displayed_entries[0]
assert entry.path == 'root/entry_test'
assert entry.name == 'entry_test'
assert entry.formatting == '{root/entry_test}'
assert entry.depend_on == ['root/entry_test']
assert monitor._database_values == {'root/entry_test': 1}
assert 'root/entry_test' in monitor.updaters
def test_handle_database_change2(monitor, database):
""" Test handling the adding of an entry subject to a reject rule.
"""
monitor.rules.append(RejectRule(id='Test', suffixes=['test']))
database.observe('notifier', monitor.handle_database_entries_change)
database.set_value('root', 'make_test', 1)
assert monitor.monitored_entries == []
assert not monitor.displayed_entries
assert len(monitor.undisplayed_entries) == 1
assert not monitor.hidden_entries
assert monitor.undisplayed_entries[0].depend_on == ['root/make_test']
assert monitor._database_values == {'root/make_test': 1}
assert not monitor.updaters
def test_handle_database_change3(exopy_qtbot, monitor, database):
""" Test handling the adding of entries subject to a format rule.
"""
rule = FormatRule(id='Test', suffixes=['loop', 'index'],
new_entry_suffix='progress',
new_entry_formatting='{index}/{loop}')
monitor.rules.append(rule)
database.observe('notifier', monitor.handle_database_entries_change)
database.set_value('root', 'test_loop', 10)
assert monitor.monitored_entries == ['root/test_loop']
assert len(monitor.displayed_entries) == 1
assert not monitor.undisplayed_entries
assert not monitor.hidden_entries
assert monitor.displayed_entries[0].depend_on == ['root/test_loop']
assert monitor._database_values == {'root/test_loop': 10}
assert 'root/test_loop' in monitor.updaters
database.set_value('root', 'test2_index', 1)
assert (monitor.monitored_entries == ['root/test_loop',
'root/test2_index'])
assert len(monitor.displayed_entries) == 2
assert not monitor.undisplayed_entries
assert not monitor.hidden_entries
assert (monitor._database_values == {'root/test_loop': 10,
'root/test2_index': 1})
database.set_value('root', 'test_index', 1)
assert (monitor.monitored_entries == ['root/test_loop',
'root/test2_index',
'root/test_index'])
assert len(monitor.displayed_entries) == 2
assert not monitor.undisplayed_entries
assert len(monitor.hidden_entries) == 2
assert (monitor._database_values == {'root/test_loop': 10,
'root/test2_index': 1,
'root/test_index': 1})
assert len(monitor.updaters['root/test_loop']) == 1
assert len(monitor.updaters['root/test_index']) == 1
entry = monitor.displayed_entries[0]
if entry.name != 'test_progress':
entry = monitor.displayed_entries[1]
assert entry.name == 'test_progress'
assert entry.path == 'root/test_progress'
assert entry.depend_on == ['root/test_loop', 'root/test_index']
assert entry.formatting == '{root/test_index}/{root/test_loop}'
entry.update(monitor._database_values)
def assert_entry_value():
assert entry.value == '1/10'
exopy_qtbot.wait_until(assert_entry_value)
rule.hide_entries = False
database.set_value('root', 'test2_loop', 10)
assert (monitor.monitored_entries == ['root/test_loop',
'root/test2_index',
'root/test_index',
'root/test2_loop'])
assert len(monitor.displayed_entries) == 4
assert not monitor.undisplayed_entries
assert len(monitor.hidden_entries) == 2
assert (monitor._database_values == {'root/test_loop': 10,
'root/test2_index': 1,
'root/test_index': 1,
'root/test2_loop': 10})
assert len(monitor.updaters['root/test2_loop']) == 2
assert len(monitor.updaters['root/test2_index']) == 2
def test_handle_database_change4(monitor, database):
""" Test handling the adding/removing an entry linked to a custom one.
"""
entry = monitor._create_default_entry('dummy/test', 1)
entry.name = 'Custom'
entry.path = 'custom'
entry.formatting = 'This test n {root/test}'
entry.depend_on = ['root/test']
monitor.custom_entries.append(entry)
database.observe('notifier', monitor.handle_database_entries_change)
database.set_value('root', 'aux', 1)
assert monitor.monitored_entries == ['root/aux']
assert len(monitor.displayed_entries) == 1
assert not monitor.undisplayed_entries
assert not monitor.hidden_entries
assert monitor._database_values == {'root/aux': 1}
database.set_value('root', 'test', 2)
assert monitor.monitored_entries == ['root/aux', 'root/test']
assert len(monitor.displayed_entries) == 3
assert not monitor.undisplayed_entries
assert not monitor.hidden_entries
assert monitor._database_values == {'root/aux': 1, 'root/test': 2}
assert len(monitor.updaters['root/test']) == 2
database.set_value('root', 'new', 2)
assert len(monitor.displayed_entries) == 4
database.delete_value('root', 'test')
assert monitor.monitored_entries == ['root/aux', 'root/new']
assert len(monitor.displayed_entries) == 2
assert not monitor.undisplayed_entries
assert not monitor.hidden_entries
assert monitor._database_values == {'root/aux': 1, 'root/new': 2}
assert monitor.custom_entries
assert 'root/test' not in monitor.updaters
def test_handle_database_change5(monitor, database):
"""Test handling entries being renamed.
"""
rule = FormatRule(id='Test', suffixes=['loop', 'index'],
new_entry_suffix='progress',
new_entry_formatting='{index}/{loop}')
monitor.rules.append(rule)
database.observe('notifier', monitor.handle_database_entries_change)
database.set_value('root', 'test_loop', 10)
database.set_value('root', 'test_index', 10)
old_updaters = monitor.updaters.copy()
database.rename_values('root', ['test_loop', 'test_index'],
['new_loop', 'new_index'])
progress_entry = monitor.displayed_entries[0]
assert progress_entry.name == 'new_progress'
assert (sorted(progress_entry.depend_on) ==
sorted(['root/new_index', 'root/new_loop']))
assert len(monitor.hidden_entries) == 2
for e in monitor.hidden_entries:
assert e.name in ('new_index', 'new_loop')
assert e.path in ('root/new_index', 'root/new_loop')
assert e.depend_on[0] in ('root/new_index', 'root/new_loop')
assert len(monitor.updaters) == 2
assert (monitor.updaters['root/new_index'] is
old_updaters['root/test_index'])
assert (monitor.updaters['root/new_loop'] is
old_updaters['root/test_loop'])
assert len(monitor._database_values) == 2
assert 'root/new_index' in monitor._database_values
assert 'root/new_loop' in monitor._database_values
def test_handle_database_change6(monitor, database):
"""Test handling node being renamed.
"""
database.create_node('root', 'test')
database.observe('notifier', monitor.handle_database_entries_change)
database.observe('nodes_notifier', monitor.handle_database_nodes_change)
database.set_value('root', 'value', 1)
database.set_value('root/test', 'test_loop', 10)
old_updater = monitor.updaters['root/test/test_loop']
database.rename_node('root', 'test', 'new')
assert 'root/new/test_loop' in [e.path for e in monitor.displayed_entries]
assert (sorted(monitor.monitored_entries) ==
sorted(['root/new/test_loop', 'root/value']))
assert monitor.updaters['root/new/test_loop'] is old_updater
assert 'root/new/test_loop' in monitor._database_values
monitor.handle_database_nodes_change([('renamed', 'root', 'new', 'old')])
assert (sorted(monitor.monitored_entries) ==
sorted(['root/old/test_loop', 'root/value']))
def test_refresh_monitored_entries(monitor, database):
""" Test refreshing entries (with a custom entry).
"""
entry = monitor._create_default_entry('dummy/test', 1)
entry.name = 'Custom'
entry.path = 'custom'
entry.formatting = 'This test n {test}'
entry.depend_on = ['root/test']
monitor.custom_entries.append(entry)
database.observe('notifier', monitor.handle_database_entries_change)
database.set_value('root', 'test', 1)
monitor.refresh_monitored_entries({'root/test': 2})
assert monitor.monitored_entries == ['root/test']
assert len(monitor.displayed_entries) == 2
assert not monitor.undisplayed_entries
assert not monitor.hidden_entries
assert monitor._database_values == {'root/test': 2}
monitor._database_values = {'root/aux': 2}
monitor.refresh_monitored_entries()
assert monitor.monitored_entries == ['root/aux']
assert len(monitor.displayed_entries) == 1
assert not monitor.undisplayed_entries
assert not monitor.hidden_entries
def test_process_news(exopy_qtbot, monitor, database):
""" Test processing news coming from a database.
"""
rule = FormatRule(id='Test', suffixes=['loop', 'index'],
new_entry_suffix='progress',
new_entry_formatting='{index}/{loop}',
hide_entries=False)
monitor.rules.append(rule)
database.observe('notifier', monitor.handle_database_entries_change)
database.set_value('root', 'test_loop', 10)
database.set_value('root', 'test_index', 1)
monitor.process_news(('root/test_index', 2))
def assert_displayed_entries():
assert monitor.displayed_entries[0].value == '10'
assert monitor.displayed_entries[1].value == '2'
assert monitor.displayed_entries[2].value == '2/10'
exopy_qtbot.wait_until(assert_displayed_entries)
monitor.updaters = {}
monitor.process_news(('root/test_index', 2))
exopy_qtbot.wait(10)
# Should simply pass silently
def test_clear_state(monitor, database):
"""Test clearing the monitor state.
"""
rule = FormatRule(id='Test', suffixes=['loop', 'index'],
new_entry_suffix='progress',
new_entry_formatting='{index}/{loop}')
monitor.rules.append(rule)
database.observe('notifier', monitor.handle_database_entries_change)
database.set_value('root', 'test_loop', 10)
database.set_value('root', 'test_index', 1)
database.set_value('root', 'test2_index', 1)
monitor._clear_state()
assert not monitor.displayed_entries
assert not monitor.undisplayed_entries
assert not monitor.hidden_entries
assert not monitor.updaters
assert not monitor.custom_entries
assert not monitor.monitored_entries
def test_get_set_state(monitor, monkeypatch, measurement, database):
""" Test get_state.
"""
entry = monitor._create_default_entry('root/test', 1)
entry.name = 'Custom'
entry.path = 'custom'
entry.formatting = 'This test n {root/test_loop}*{root/test2_loop}'
entry.depend_on = ['root/test_loop', 'root/test2_loop']
monitor.custom_entries.append(entry)
rule = FormatRule(id='Test', suffixes=['loop', 'index'],
new_entry_suffix='progress',
new_entry_formatting='{index}/{loop}')
monitor.rules.append(rule)
monitor.rules.append(monitor._plugin.build_rule('Measurement entries'))
database.observe('notifier', monitor.handle_database_entries_change)
database.set_value('root', 'test_loop', 10)
database.set_value('root', 'test_index', 1)
database.set_value('root', 'test2_index', 1)
database.set_value('root', 'test2_loop', 10)
state = monitor.get_state()
assert 'rule_0' in state
rule = state['rule_0']
assert (rule == {'class_id': 'exopy.FormatRule', 'id': 'Test',
'description': '',
'hide_entries': 'True',
'suffixes': repr([u'loop', u'index']),
'new_entry_suffix': 'progress',
'new_entry_formatting': '{index}/{loop}'})
assert 'custom_0' in state
custom = state['custom_0']
aux = {'name': 'Custom', 'path': 'custom',
'formatting': 'This test n {root/test_loop}*{root/test2_loop}',
'depend_on': repr([u'root/test_loop', u'root/test2_loop'])}
assert custom == aux
assert (state['displayed'] ==
repr([e.path for e in monitor.displayed_entries]))
assert (state['undisplayed'] ==
repr([e.path for e in monitor.undisplayed_entries]))
assert (state['hidden'] ==
repr([e.path for e in monitor.hidden_entries]))
monitor._clear_state()
import exopy.measurement.monitors.text_monitor.monitor as mod
monkeypatch.setattr(mod, 'information',
lambda *args, **kwargs: print(args, kwargs))
monitor.set_state(state)
assert monitor.rules
assert monitor.rules[0].id == 'Test'
assert monitor._state
state2 = monitor.get_state()
assert state == state2
from exopy.tasks.tasks.database import TaskDatabase
monkeypatch.setattr(TaskDatabase, 'list_all_entries',
lambda *args, **kwargs: {'root/test_loop': 10,
'root/test2_index': 1,
'root/test_index': 1,
'root/test2_loop': 10,
'root/r': 1})
monitor.link_to_measurement(measurement)
assert not monitor._state
print(sorted([e.path for e in monitor.displayed_entries]))
assert (sorted([e.path for e in monitor.displayed_entries]) ==
sorted(['custom', 'root/test_progress', 'root/test2_progress',
'root/r']))
def test_known_monitored_entries(monitor):
""" Test all_database_entries property.
"""
test = {'test': 1, '2': 'aux'}
monitor._database_values = test
assert sorted(monitor.known_monitored_entries) == sorted(test)
def test_edition_window(exopy_qtbot, text_monitor_workbench, dialog_sleep,
monkeypatch):
"""Test the capabalities of the widget used to edit a text monitor.
"""
p = text_monitor_workbench.get_plugin(
'exopy.measurement.monitors.text_monitor')
m = p.create_monitor(False)
m.rules.append(p.build_rule(dict(id='test', class_id='exopy.FormatRule',
new_entry_formatting='{index}/{number}',
suffixes=['index', 'number'],
new_entry_suffix='progress')))
assert m.rules[0]
m.custom_entries.append(MonitoredEntry(name='dummy', path='dummy',
formatting='2*{root/test}',
depend_on=['root/test']))
m.handle_database_entries_change(('added', 'root/test', 0))
m.handle_database_entries_change(('added', 'root/simp/t_test2', 0))
m.handle_database_entries_change(('added', 'root/comp/t_index', 0))
m.handle_database_entries_change(('added', 'root/comp/t_number', 0))
assert len(m.displayed_entries) == 4
assert len(m.hidden_entries) == 2
w = ContainerTestingWindow(widget=TextMonitorEdit(monitor=m))
w.show()
wait_for_window_displayed(exopy_qtbot, w)
exopy_qtbot.wait(dialog_sleep)
editor = w.widget
# Test hide all
editor.widgets()[6].clicked = True
def assert_displayed_entries():
assert not m.displayed_entries
exopy_qtbot.wait_until(assert_displayed_entries)
exopy_qtbot.wait(dialog_sleep)
# Test show one
editor.widgets()[1].selected_item = m.undisplayed_entries[0]
editor.widgets()[5].clicked = True
def assert_displayed_entries():
assert m.displayed_entries
exopy_qtbot.wait_until(assert_displayed_entries)
exopy_qtbot.wait(dialog_sleep)
# Test hide one
editor.widgets()[3].selected_item = m.displayed_entries[0]
editor.widgets()[7].clicked = True
def assert_displayed_entries():
assert not m.displayed_entries
exopy_qtbot.wait_until(assert_displayed_entries)
exopy_qtbot.wait(dialog_sleep)
# Test show all
editor.widgets()[4].clicked = True
def assert_displayed_entries():
assert not m.undisplayed_entries
exopy_qtbot.wait_until(assert_displayed_entries)
exopy_qtbot.wait(dialog_sleep)
# Test show hidden
editor.widgets()[8].checked = True
def assert_hidden():
assert m.hidden_entries
exopy_qtbot.wait_until(assert_hidden)
for e in m.hidden_entries:
assert e in m.undisplayed_entries
# Test edit rules
def handle_rule_edition(bot, dialog):
dialog.monitor.rules.append(RejectRule(id='__dummy',
suffixes=['test2']))
dialog.monitor.refresh_monitored_entries()
with handle_dialog(exopy_qtbot, handler=handle_rule_edition):
editor.widgets()[9].clicked = True
assert 't_test2' not in [e.name for e in m.displayed_entries]
# Test add entry
def handle_entry_creation(bot, dialog):
dialog.entry = MonitoredEntry(name='new_entry')
with handle_dialog(exopy_qtbot, handler=handle_entry_creation):
editor.widgets()[10].clicked = True
assert 'new_entry' in [e.name for e in m.displayed_entries]
# Test edit entry
e = [e for e in m.displayed_entries if e.name == 'new_entry'][0]
editor.selected = e
with handle_dialog(exopy_qtbot, 'reject'):
editor.widgets()[11].clicked = True
# Test delete entry
with enaml.imports():
from exopy.measurement.monitors.text_monitor import monitor_views
def false_question(*args, **kwargs):
class Obj(object):
action = 'reject'
return Obj
monkeypatch.setattr(monitor_views, 'question', false_question)
editor.widgets()[12].clicked = True
def assert_entry():
assert e in m.displayed_entries
exopy_qtbot.wait_until(assert_entry)
exopy_qtbot.wait(dialog_sleep)
m.add_entries('undisplayed', [e])
with enaml.imports():
from exopy.measurement.monitors.text_monitor import monitor_views
def false_question(*args, **kwargs):
class Obj(object):
action = 'accept'
return Obj
monkeypatch.setattr(monitor_views, 'question', false_question)
editor.widgets()[12].clicked = True
assert e not in m.displayed_entries
exopy_qtbot.wait(dialog_sleep)
def test_text_monitor_item(exopy_qtbot, text_monitor_workbench, monitor,
dialog_sleep):
"""Test that the dock item of the text monitor does display the right
entries.
"""
# Check only displayed entries are indeed shown.
monitor.handle_database_entries_change(('added', 'root/test', 0))
monitor.handle_database_entries_change(('added', 'root/simp/test', 0))
monitor.handle_database_entries_change(('added', 'root/comp/index', 0))
monitor.move_entries('displayed', 'undisplayed',
[monitor.displayed_entries[0]])
w = DockItemTestingWindow(widget=TextMonitorItem(monitor=monitor,
name='test'))
w.show()
wait_for_window_displayed(exopy_qtbot, w)
f = w.widget.dock_widget().widgets()[0].scroll_widget()
assert (sorted([l.text for l in f.widgets()[::2]]) ==
sorted([e.name for e in monitor.displayed_entries]))
exopy_qtbot.wait(dialog_sleep)
e = sorted(monitor.displayed_entries, key=attrgetter('path'))[0]
e.name = 'new'
e.value = '1'
def assert_text():
assert f.widgets()[0].text == 'new'
assert f.widgets()[1].text == '1'
exopy_qtbot.wait_until(assert_text)
```
#### File: measurement/workspace/test_checks_display.py
```python
import enaml
from exopy.testing.util import handle_question, wait_for_window_displayed
with enaml.imports():
from exopy.measurement.workspace.checks_display import ChecksDisplay
def test_checks_display_not_warning(exopy_qtbot, dialog_sleep):
"""Test displaying checks for a situation that do not allow enqueuing.
"""
dial = ChecksDisplay(errors={'test': 'dummy', 'complex': {'rr': 'tt'}})
dial.show()
wait_for_window_displayed(exopy_qtbot, dial)
exopy_qtbot.wait(dialog_sleep)
assert dial.central_widget().widgets()[-1].text == 'Force enqueue'
with handle_question(exopy_qtbot, 'no'):
dial.central_widget().widgets()[-1].clicked = True
def assert_result():
assert not dial.result
exopy_qtbot.wait_until(assert_result)
def test_checks_display_not_warning_force_enqueue(exopy_qtbot, dialog_sleep):
"""Test displaying checks for a situation that do not allow enqueuing.
"""
dial = ChecksDisplay(errors={'test': 'dummy', 'complex': {'rr': 'tt'}})
dial.show()
wait_for_window_displayed(exopy_qtbot, dial)
exopy_qtbot.wait(dialog_sleep)
assert dial.central_widget().widgets()[-1].text == 'Force enqueue'
with handle_question(exopy_qtbot, 'yes'):
dial.central_widget().widgets()[-1].clicked = True
def assert_result():
assert dial.result
exopy_qtbot.wait_until(assert_result)
def test_checks_display_warning(exopy_qtbot, dialog_sleep):
"""Test displaying checks that allow enqueuing.
"""
dial = ChecksDisplay(errors={'test': 'dummy', 'internal': {'rr': 'tt'}},
is_warning=True)
dial.show()
wait_for_window_displayed(exopy_qtbot, dial)
exopy_qtbot.wait(dialog_sleep)
assert dial.central_widget().widgets()[-1].text == 'Enqueue'
dial.central_widget().widgets()[-1].clicked = True
def assert_result():
assert dial.result
exopy_qtbot.wait_until(assert_result)
```
#### File: measurement/workspace/test_measurement_execution.py
```python
import pytest
import enaml
from exopy.measurement.measurement import Measurement
from exopy.tasks.api import RootTask
from exopy.testing.util import (handle_dialog, wait_for_window_displayed,
CallSpy)
with enaml.imports():
from exopy.testing.windows import (ContainerTestingWindow,
DockItemTestingWindow)
from exopy.measurement.workspace.measurement_execution\
import MeasView, ExecutionDockItem
@pytest.fixture
def execution_view(measurement_workbench, workspace, exopy_qtbot):
"""Start plugins and add measurements before creating the execution view.
"""
pl = measurement_workbench.get_plugin('exopy.measurement')
pl.enqueued_measurements.add(Measurement(plugin=pl, root_task=RootTask(),
name='Dummy', id='001'))
pl.enqueued_measurements.add(Measurement(plugin=pl, root_task=RootTask(),
name='Dummy', id='002'))
pl.enqueued_measurements.measurements[1].name = 'dummy_test'
pl.selected_engine = 'dummy'
engine = pl.create('engine', pl.selected_engine)
pl.processor.engine = engine
item = ExecutionDockItem(workspace=workspace)
return DockItemTestingWindow(widget=item)
def test_measurement_view(measurement, exopy_qtbot, dialog_sleep, monkeypatch,
workspace):
"""Test that the displayed buttons do reflect the state of the measurement.
"""
measurement.status = 'READY'
view = MeasView(model=measurement)
w = ContainerTestingWindow(widget=view)
w.show()
wait_for_window_displayed(exopy_qtbot, w)
exopy_qtbot.wait(dialog_sleep)
assert view.widgets()[2].enabled # cd1 inserted its children before itself
def test_state(bot, dial):
assert dial.measurement.status == 'EDITING'
with handle_dialog(exopy_qtbot, 'reject', handler=test_state):
view.widgets()[2].clicked = True
assert view.widgets()[-1].enabled
measurement.plugin.processor.active = True
def assert_enabled():
assert not view.widgets()[-1].enabled
exopy_qtbot.wait_until(assert_enabled)
measurement.plugin.processor.active = False
from exopy.measurement.workspace.workspace import MeasurementSpace
spy = CallSpy()
monkeypatch.setattr(MeasurementSpace, 'process_single_measurement', spy)
view.widgets()[-1].clicked = True
assert spy.called
measurement.status = 'RUNNING'
def assert_widgets():
assert len(view.widgets()) == 2
exopy_qtbot.wait_until(assert_widgets)
measurement.status = 'COMPLETED'
def assert_widgets():
assert len(view.widgets()) == 3
exopy_qtbot.wait_until(assert_widgets)
spy = CallSpy()
monkeypatch.setattr(MeasurementSpace, 'reenqueue_measurement', spy)
view.widgets()[-1].clicked = True
assert view.widgets()[1].text == 'COMPLETED'
def test_measurement_manipulations(exopy_qtbot, execution_view, dialog_sleep):
"""Test moving/removing measurement using editor
"""
execution_view.show()
wait_for_window_displayed(exopy_qtbot, execution_view)
exopy_qtbot.wait(dialog_sleep)
item = execution_view.widget
ed = item.dock_widget().widgets()[0]
meas = item.workspace.plugin.enqueued_measurements.measurements
ed.operations['move'](0, 1)
def assert_meas_name():
assert meas[0].name == 'dummy_test'
exopy_qtbot.wait_until(assert_meas_name)
ed.operations['move'](0, 1)
def assert_meas_name():
assert meas[1].name == 'dummy_test'
exopy_qtbot.wait_until(assert_meas_name)
ed.operations['remove'](0)
def assert_meas_name():
assert meas[0].name == 'dummy_test'
exopy_qtbot.wait_until(assert_meas_name)
assert len(meas) == 1
def test_start_button(exopy_qtbot, execution_view, monkeypatch, dialog_sleep):
"""Test that the start button displays the right text and called the
appropriate method.
"""
execution_view.show()
wait_for_window_displayed(exopy_qtbot, execution_view)
exopy_qtbot.wait(dialog_sleep)
item = execution_view.widget
from exopy.measurement.workspace.workspace import MeasurementSpace
spies = {}
for n in ('start_processing_measurements', 'resume_current_measurement',
'pause_current_measurement'):
spy = CallSpy()
monkeypatch.setattr(MeasurementSpace, n, spy)
spies[n] = spy
st_btn = item.dock_widget().widgets()[1]
assert st_btn.enabled
assert st_btn.text == 'Start'
st_btn.clicked = True
def assert_called():
assert spies['start_processing_measurements'].called
exopy_qtbot.wait_until(assert_called)
meas = item.workspace.plugin.enqueued_measurements.measurements[0]
item.workspace.plugin.processor.running_measurement = meas
item.workspace.plugin.processor.active = True
def assert_enabled():
assert st_btn.enabled
assert st_btn.text == 'Pause'
exopy_qtbot.wait_until(assert_enabled)
st_btn.clicked = True
def assert_called():
assert spies['pause_current_measurement'].called
exopy_qtbot.wait_until(assert_called)
meas.status = 'PAUSING'
def assert_enabled():
assert not st_btn.enabled
exopy_qtbot.wait_until(assert_enabled)
meas.status = 'PAUSED'
def assert_enabled():
assert st_btn.enabled
assert st_btn.text == 'Resume'
exopy_qtbot.wait_until(assert_enabled)
st_btn.clicked = True
def assert_called():
assert spies['resume_current_measurement'].called
exopy_qtbot.wait_until(assert_called)
def test_stop_button(exopy_qtbot, execution_view, monkeypatch, dialog_sleep):
"""Test the behavoir of the stop button.
"""
execution_view.show()
wait_for_window_displayed(exopy_qtbot, execution_view)
exopy_qtbot.wait(dialog_sleep)
item = execution_view.widget
from exopy.measurement.workspace.workspace import MeasurementSpace
spy = CallSpy()
monkeypatch.setattr(MeasurementSpace, 'stop_current_measurement', spy)
with enaml.imports():
from exopy.measurement.workspace import measurement_execution
qspy = CallSpy()
monkeypatch.setattr(measurement_execution, 'question', qspy)
st_btn = item.dock_widget().widgets()[2]
# Check idle state.
assert not st_btn.enabled
assert st_btn.text == 'Stop'
assert not st_btn.stopping
# Check enabled when running.
meas = item.workspace.plugin.enqueued_measurements.measurements[0]
item.workspace.plugin.processor.running_measurement = meas
item.workspace.plugin.processor.active = True
assert st_btn.enabled
# Stop and skip
item.workspace.plugin.processor.continuous_processing = False
skip = st_btn.children[0].children[0]
skip.triggered = True
def assert_called():
assert spy.called
assert spy.kwargs['no_post_exec']
assert not spy.kwargs['force']
assert not qspy.called
exopy_qtbot.wait_until(assert_called)
# Stop and don't skip
item.workspace.plugin.processor.continuous_processing = True
no_skip = st_btn.children[0].children[1]
no_skip.triggered = True
def assert_called():
assert spy.called == 2
assert not spy.kwargs.get('no_post_exec')
assert not spy.kwargs['force']
assert qspy.called
assert not item.workspace.plugin.processor.continuous_processing
exopy_qtbot.wait_until(assert_called)
# Check stopping behavior
meas.status = 'STOPPING'
assert st_btn.stopping
assert st_btn.text == 'Force stop'
# Check force stopping and no question when no measurement remains in queue
item.workspace.plugin.enqueued_measurements.remove(meas)
qspy.called = 0
no_skip.triggered = True
def assert_called():
assert spy.kwargs['force']
assert not qspy.called
exopy_qtbot.wait_until(assert_called)
spy.kwargs = {}
skip.triggered = True
def assert_called():
assert spy.kwargs['force']
assert not qspy.called
exopy_qtbot.wait_until(assert_called)
def test_continuous_processing(exopy_qtbot, execution_view, dialog_sleep):
"""Test that the checkbox does reflect the underlying setting.
"""
execution_view.show()
wait_for_window_displayed(exopy_qtbot, execution_view)
exopy_qtbot.wait(dialog_sleep)
item = execution_view.widget
ch_box = item.dock_widget().widgets()[3]
proc = item.workspace.plugin.processor
def assert_checked():
assert ch_box.checked == proc.continuous_processing
assert_checked()
ch_box.checked = not proc.continuous_processing
exopy_qtbot.wait_until(assert_checked)
proc.continuous_processing = not ch_box.checked
exopy_qtbot.wait_until(assert_checked)
def test_clear(exopy_qtbot, execution_view, dialog_sleep):
"""Test clearing the enqueued measurements.
"""
execution_view.show()
wait_for_window_displayed(exopy_qtbot, execution_view)
exopy_qtbot.wait(dialog_sleep)
item = execution_view.widget
cl_btn = item.dock_widget().widgets()[4]
assert cl_btn.enabled
# Check disabled when running
meas = item.workspace.plugin.enqueued_measurements.measurements[0]
item.workspace.plugin.processor.running_measurement = meas
item.workspace.plugin.processor.active = True
assert not cl_btn.enabled
item.workspace.plugin.processor.active = False
assert cl_btn.enabled
measurements = item.workspace.plugin.enqueued_measurements.measurements
measurements[0].status = 'COMPLETED'
measurements[1].status = 'FAILED'
cl_btn.clicked = True
def assert_enabled():
assert not item.workspace.plugin.enqueued_measurements.measurements
assert not cl_btn.enabled
exopy_qtbot.wait_until(assert_enabled)
def test_show_monitors(exopy_qtbot, execution_view, dialog_sleep):
"""Test restoring the monitor window.
"""
execution_view.show()
wait_for_window_displayed(exopy_qtbot, execution_view)
exopy_qtbot.wait(dialog_sleep)
item = execution_view.widget
mon_btn = item.dock_widget().widgets()[5]
assert not mon_btn.enabled
with enaml.imports():
from exopy.measurement.workspace.monitors_window import MonitorsWindow
meas = item.workspace.plugin.enqueued_measurements.measurements[0]
mon_win = MonitorsWindow(item, measurement=meas)
item.workspace.plugin.processor.monitors_window = mon_win
assert not mon_win.visible
mon_btn.clicked = True
def assert_visible():
assert mon_win.visible
exopy_qtbot.wait_until(assert_visible)
def test_engine_status(exopy_qtbot, execution_view, dialog_sleep):
"""Test the display of the engine status.
"""
execution_view.show()
wait_for_window_displayed(exopy_qtbot, execution_view)
exopy_qtbot.wait(dialog_sleep)
item = execution_view.widget
del item.workspace.plugin.processor.engine
en_stat = item.dock_widget().widgets()[-1]
assert not en_stat.visible
pl = item.workspace.plugin
pl.processor.engine = pl.create('engine', 'dummy')
pl.processor.engine.status = 'Stopped'
def assert_visible():
assert en_stat.visible
exopy_qtbot.wait_until(assert_visible)
def assert_status():
assert en_stat.widgets()[1].text == 'Stopped'
exopy_qtbot.wait_until(assert_status)
meas = item.workspace.plugin.enqueued_measurements.measurements[0]
item.workspace.plugin.processor.running_measurement = meas
item.workspace.plugin.processor.active = True
assert not en_stat.widgets()[2].enabled
assert en_stat.widgets()[2].text == 'Shut down'
```
#### File: tasks/configs/test_loop_config.py
```python
import enaml
from exopy.testing.util import show_and_close_widget, show_widget
from exopy.tasks.tasks.base_tasks import RootTask
from exopy.tasks.configs.loop_config import (LoopTaskConfig)
with enaml.imports():
from exopy.tasks.configs.loop_config_view import LoopConfigView
def test_loop_config(exopy_qtbot, task_workbench):
"""Test the loop config.
"""
plugin = task_workbench.get_plugin('exopy.tasks')
root = RootTask()
config = LoopTaskConfig(manager=plugin,
task_class=plugin.get_task('exopy.LoopTask'),
future_parent=root)
assert config.task_name
assert config.ready
assert config.task_doc
config.task_name = ''
assert not config.ready
config.task_name = 'Test'
assert config.ready
task = config.build_task()
assert task.name == 'Test'
root.add_child_task(0, task)
config2 = LoopTaskConfig(manager=plugin,
task_class=plugin.get_task('exopy.LoopTask'),
future_parent=root)
config2.task_name = 'Test'
assert not config2.ready
config2.task_name = 'ADifferentName'
assert config2.ready
plugin.auto_task_names = []
config = LoopTaskConfig(manager=plugin,
task_class=plugin.get_task('exopy.LoopTask'))
assert not config.task_name
assert not config.ready
show_and_close_widget(exopy_qtbot, LoopConfigView(config=config))
def test_loop_config_with_subtask(task_workbench, exopy_qtbot, dialog_sleep,
monkeypatch):
"""Test the loop config.
"""
plugin = task_workbench.get_plugin('exopy.tasks')
config = LoopTaskConfig(manager=plugin,
task_class=plugin.get_task('exopy.LoopTask'),
future_parent=RootTask(),
task_name='Test')
show_widget(exopy_qtbot, LoopConfigView(config=config))
assert config.ready
exopy_qtbot.wait(dialog_sleep)
config.use_subtask = True
assert not config.ready
exopy_qtbot.wait(dialog_sleep + 100)
config.subtask = 'exopy.BreakTask'
assert config.ready
exopy_qtbot.wait(dialog_sleep + 100)
def dummy(self):
self.ready = False
monkeypatch.setattr(type(config.subconfig), 'check_parameters',
dummy)
config.task_name = 'Bis'
assert config.subconfig.task_name == 'Bis' # Check sync
assert not config.ready # Result from the monkeypatch
exopy_qtbot.wait(dialog_sleep + 100)
config.use_subtask = False
assert config.ready
exopy_qtbot.wait(dialog_sleep + 100)
config.use_subtask = True
config.subtask = 'exopy.ContinueTask'
task = config.build_task()
assert task.name == 'Bis'
assert type(task.task).__name__ == 'ContinueTask'
```
#### File: tasks/tasks/false_driver.py
```python
from exopy.instruments.api import BaseStarter
class FalseDriver(object):
"""False driver to test the declarator.
"""
pass
class FalseDriver2(object):
"""False driver to test the declarator.
"""
pass
class FalseDriver3(object):
"""False driver to test the declarator.
"""
pass
class FalseDriver4(object):
"""False driver to test the declarator.
"""
pass
class FalseDriver5(object):
"""False driver to test the declarator.
"""
pass
class FalseDriver6(object):
"""False driver to test the declarator.
"""
pass
class DummyStarter(BaseStarter):
"""Dummy starter for testing purposes.
"""
def start(self, driver, connection, settings):
pass
def check_infos(self, driver, connection, settings):
return True, ''
def reset(self, driver):
pass
def stop(self, driver):
pass
```
#### File: tasks/logic/test_conditional_task.py
```python
import gc
import pytest
import enaml
from multiprocessing import Event
from exopy.testing.tasks.util import CheckTask
from exopy.testing.util import show_and_close_widget
from exopy.tasks.api import RootTask
from exopy.tasks.tasks.logic.conditional_task import ConditionalTask
with enaml.imports():
from exopy.tasks.tasks.logic.views.conditional_view import ConditionalView
class TestConditionTask(object):
"""Test ConditionalTask.
"""
def setup(self):
self.root = RootTask(should_stop=Event(), should_pause=Event())
self.task = ConditionalTask(name='Test')
self.root.add_child_task(0, self.task)
self.check = CheckTask(name='check')
self.task.add_child_task(0, self.check)
def teardown(self):
del self.root.should_pause
del self.root.should_stop
# Ensure we collect the file descriptor of the events. Otherwise we can
# get funny errors on MacOS.
gc.collect()
def test_check1(self):
"""Test that everything is ok if condition is evaluable.
"""
self.task.condition = 'True'
test, traceback = self.task.check()
assert test
assert not traceback
assert self.check.check_called
def test_check2(self):
"""Test handling a wrong condition.
"""
self.task.condition = '*True'
test, traceback = self.task.check(test_instr=True)
assert not test
assert len(traceback) == 1
assert 'root/Test-condition' in traceback
def test_perform1(self):
"""Test performing when condition is True.
"""
self.task.condition = 'True'
self.root.prepare()
self.task.perform()
assert self.check.perform_called
def test_perform2(self):
"""Test performing when condition is False.
"""
self.task.condition = '1 < 0'
self.root.prepare()
self.task.perform()
assert not self.check.perform_called
def test_perform3(self):
"""Test performing when condition is False.
"""
self.task.condition = 'False'
self.root.prepare()
self.task.perform()
assert not self.check.perform_called
@pytest.mark.ui
def test_view(exopy_qtbot):
"""Test the ConditionalTask view.
"""
show_and_close_widget(exopy_qtbot,
ConditionalView(task=ConditionalTask(name='Test')))
```
#### File: tasks/tasks/test_instr_view.py
```python
import os
import pytest
import enaml
from configobj import ConfigObj
from exopy.testing.util import (handle_dialog,
show_and_close_widget)
from exopy.testing.instruments.util import add_profile
from exopy.tasks.api import (RootTask, InstrTaskView, TaskInterface,
InstrumentTask, InterfaceableTaskMixin)
from exopy.tasks.infos import TaskInfos, InterfaceInfos
with enaml.imports():
from exopy.tasks.manifest import TasksManagerManifest
from exopy.tasks.tasks.base_views import RootTaskView
from .instrument_contributor import InstrContributor
PROFILE_PATH = os.path.join(os.path.dirname(__file__), 'fp.instr.ini')
class InterInstrTask(InterfaceableTaskMixin, InstrumentTask):
task_id = 'exopy.InstrumentTask'
@pytest.fixture
def instr_task_workbench(instr_workbench):
"""Workbench with instrument and task support and patched task.
"""
w = instr_workbench
w.register(InstrContributor())
c = ConfigObj(PROFILE_PATH, encoding='utf-8')
add_profile(instr_workbench, c, ['fp1', 'fp2', 'fp3', 'fp4'])
w.register(TasksManagerManifest())
p = w.get_plugin('exopy.tasks')
infos = TaskInfos(cls=InterInstrTask,
instruments=['tasks.test.FalseDriver'])
infos.interfaces = \
{'test.I': InterfaceInfos(cls=TaskInterface, parent=infos,
instruments=['tasks.test.FalseDriver4',
'tasks.test.FalseDriver2']
)
}
p._tasks.contributions['exopy.InstrumentTask'] = infos
return w
@pytest.fixture
def instr_view(instr_task_workbench):
"""Initialize a instr view.
"""
c = instr_task_workbench.get_plugin('enaml.workbench.core')
task = RootTask()
view = RootTaskView(task=task, core=c)
i_view = InstrTaskView(task=InterInstrTask(root=task), root=view)
i_view.set_parent(view)
return i_view
def test_instr_view_display(instr_view, exopy_qtbot):
"""Test displaying the instyr_view.
"""
show_and_close_widget(exopy_qtbot, instr_view)
def test_profile_filtering(instr_task_workbench, instr_view):
"""Test filtering the profiles allowed for a task.
"""
c = ConfigObj(PROFILE_PATH, encoding='utf-8')
c['model_id'] = 'Dummy.dumb.002'
add_profile(instr_task_workbench, c, ['fp5'])
p = instr_task_workbench.get_plugin('exopy.instruments')
filtered = instr_view.filter_profiles(p._profiles)
assert 'fp5' not in filtered
c['model_id'] = 'Dummy.dumb.003'
add_profile(instr_task_workbench, c, ['fp6'])
p = instr_task_workbench.get_plugin('exopy.instruments')
filtered = instr_view.filter_profiles(p._profiles)
assert 'fp6' in filtered
def test_driver_filtering(instr_task_workbench, instr_view):
"""Test filtering the drivers allowed for a task.
"""
p = instr_task_workbench.get_plugin('exopy.instruments')
filtered = instr_view.filter_drivers(p._profiles['fp1'].model.drivers)
assert len(filtered) == 2
pt = instr_task_workbench.get_plugin('exopy.tasks')
del pt._tasks.contributions['exopy.InstrumentTask'].interfaces
filtered = instr_view.filter_drivers(p._profiles['fp1'].model.drivers)
assert len(filtered) == 1
def test_make_tooltip(instr_view):
"""Test building the tool tip based on the selected instrument.
"""
selected = ('p', 'd', 'c', None)
t = instr_view.make_selected_instrument_tooltip(selected)
assert 'settings' not in t
selected = ('p', 'd', 'c', 's')
t = instr_view.make_selected_instrument_tooltip(selected)
assert 'settings' in t
def test_select_instrument(instr_task_workbench, instr_view, exopy_qtbot):
"""Test selecting an instrument from the view.
"""
tool_btn = instr_view.widgets()[-1].widgets()[-1]
selec = ('fp1', 'tasks.test.FalseDriver',
'false_connection', 'false_settings')
instr_view.task.selected_instrument = selec
with handle_dialog(exopy_qtbot, 'reject'):
tool_btn.clicked = True
assert instr_view.task.selected_instrument == selec
with handle_dialog(exopy_qtbot, 'accept'):
tool_btn.clicked = True
assert instr_view.task.selected_instrument == selec
def test_select_interface_based_on_instrument(instr_task_workbench,
instr_view):
"""Test finding the interface matching the selected instrument.
"""
instr_view.select_interface()
assert not instr_view.task.interface
selec = ('fp1', 'tasks.test.FalseDriver2',
'false_connection', 'false_settings')
instr_view.task.selected_instrument = selec
instr_view.select_interface()
assert instr_view.task.interface
# Check that moving back to an instrument with no associated interface
# does discard the interface.
selec = ('fp1', 'tasks.test.FalseDriver',
'false_connection', 'false_settings')
instr_view.task.selected_instrument = selec
instr_view.select_interface()
assert not instr_view.task.interface
selec = ('fp1', 'tasks.test.FalseDriver2',
'false_connection', 'false_settings')
instr_view.task.selected_instrument = selec
instr_view.select_interface()
instr_view.task.selected_instrument = ()
instr_view.select_interface()
assert not instr_view.task.interface
```
#### File: tasks/tasks/test_task_interfaces.py
```python
import pytest
from atom.api import Bool, Unicode, set_default
from exopy.tasks.tasks.base_tasks import ComplexTask, RootTask
from exopy.tasks.tasks.validators import Feval
from exopy.tasks.tasks.task_interface import (InterfaceableTaskMixin,
TaskInterface,
InterfaceableInterfaceMixin,
IInterface)
class InterfaceTest(TaskInterface):
"""Base task interface for testing purposes.
"""
#: Control flag for the check method.
answer = Bool()
#: Flag indicating whether or not the check method was called.
called = Bool()
database_entries = set_default({'itest': 1.0})
def check(self, *args, **kwargs):
self.called = True
if self.answer:
return True, {}
else:
return False, {'i': 0}
def prepare(self):
self.called = True
def perform(self):
self.task.write_in_database('itest', 2.0)
class InterfaceTest2(TaskInterface):
"""Subclass with a different default value for the database entry.
"""
#: Member to test auto formatting of tagged members.
fmt = Unicode().tag(fmt=True)
#: Member to test auto evaluation of tagged members.
feval = Unicode().tag(feval=Feval())
database_entries = set_default({'fmt': '', 'feval': 0, 'itest': 2.0})
class InterfaceTest2bis(TaskInterface):
"""Subclass with a different default value for the database entry.
"""
#: Member to test auto formatting of tagged members.
fmt = Unicode().tag(fmt=True)
#: Member to test auto evaluation of tagged members.
feval = Unicode().tag(feval=object())
database_entries = set_default({'fmt': '', 'feval': 0, 'itest': 2.0})
class InterfaceTest3(InterfaceableInterfaceMixin, TaskInterface):
"""Interfaceable interface
"""
database_entries = set_default({'test': 2.0})
class InterfaceTest4(InterfaceableInterfaceMixin, TaskInterface):
"""Interfaceable interface with default interface.
"""
database_entries = set_default({'test': 2.0})
def i_perform(self):
self.task.write_in_database('test', 3.0)
class IIinterfaceTest1(IInterface):
"""Base IInterface for testing.
"""
#: Control flag for the check method.
answer = Bool()
#: Flag indicating whether or not the check method was called.
called = Bool()
database_entries = set_default({'itest': 1.0})
def check(self, *args, **kwargs):
self.called = True
if self.answer:
return True, {}
else:
return False, {'i': 0}
def perform(self):
self.task.write_in_database('itest', 2.0)
class IIinterfaceTest2(IInterface):
"""Base IInterface for testing.
"""
#: Member to test auto formatting of tagged members.
fmt = Unicode().tag(fmt=True)
#: Member to test auto evaluation of tagged members.
feval = Unicode().tag(feval=Feval())
database_entries = set_default({'fmt': '', 'feval': 0, 'itest': 2.0})
class Mixin(InterfaceableTaskMixin, ComplexTask):
"""Complex task with interfaces.
"""
database_entries = set_default({'test': 2.0})
class IMixin(InterfaceableTaskMixin, ComplexTask):
"""Complex task with support for interfaces but with a default behavior.
"""
database_entries = set_default({'test': 2.0})
def i_perform(self):
self.write_in_database('test', 3.0)
class TestInterfaceableTaskMixin(object):
"""Test the capabilities of task interfaces.
"""
def setup(self):
self.root = RootTask()
self.mixin = Mixin(name='Simple')
self.root.add_child_task(0, self.mixin)
def test_interface_observer(self):
"""Test changing the interface.
"""
i1 = InterfaceTest()
i2 = InterfaceTest2()
self.mixin.interface = i1
assert i1.task is self.mixin
assert self.mixin.database_entries == {'test': 2.0, 'itest': 1.0}
assert i1.interface_id == (self.mixin.task_id +
':tasks.' + i1.__class__.__name__)
self.mixin.interface = i2
assert i2.task is self.mixin
assert i1.task is None
assert self.mixin.database_entries == {'test': 2.0, 'itest': 2.0,
'fmt': '', 'feval': 0}
def test_check1(self):
"""Test running checks when the interface is present.
"""
self.mixin.interface = InterfaceTest(answer=True)
res, traceback = self.mixin.check()
assert res
assert not traceback
assert self.mixin.interface.called
def test_check2(self):
"""Test running checks when no interface exist but i_perform is
implemented.
"""
res, traceback = IMixin().check()
assert res
assert not traceback
def test_check3(self):
"""Test handling missing interface.
"""
res, traceback = self.mixin.check()
assert not res
assert traceback
assert len(traceback) == 1
assert 'root/Simple-interface' in traceback
def test_check4(self):
"""Test handling a non-passing test from the interface.
"""
self.mixin.interface = InterfaceTest()
res, traceback = self.mixin.check()
assert not res
assert len(traceback) == 1
assert self.mixin.interface.called
def test_check5(self):
"""Check that auto-check of fmt and feval tagged members works.
"""
self.mixin.interface = InterfaceTest2(fmt='{Simple_test}',
feval='2*{Simple_test}')
res, traceback = self.mixin.check()
assert res
assert not traceback
assert self.root.get_from_database('Simple_fmt') == '2.0'
assert self.root.get_from_database('Simple_feval') == 4.0
self.mixin.interface = InterfaceTest2bis(fmt='{Simple_test}',
feval='2*{Simple_test}')
res, traceback = self.mixin.check()
assert not res
assert 'root/Simple-feval' in traceback
def test_check6(self):
"""Check that auto-check of fmt and feavl handle errors.
"""
self.mixin.interface = InterfaceTest2(fmt='{Simple_test*}',
feval='2*{Simple_test}*')
res, traceback = self.mixin.check()
assert not res
assert self.root.get_from_database('Simple_fmt') == ''
assert self.root.get_from_database('Simple_feval') == 0
assert len(traceback) == 2
assert 'root/Simple-fmt' in traceback
assert 'root/Simple-feval' in traceback
def test_prepare(self):
"""Test that the prepare method does prepare the interface.
"""
self.mixin.interface = InterfaceTest()
self.mixin.prepare()
assert self.mixin.interface.called
def test_perform1(self):
"""Test perform does call interface if present.
"""
self.mixin.interface = InterfaceTest()
self.root.database.prepare_to_run()
self.mixin.perform()
assert self.mixin.get_from_database('Simple_itest') == 2.0
def test_perform2(self):
"""Test perform use i_perform when no interface exists.
"""
self.root.remove_child_task(0)
self.mixin = IMixin(name='Simple')
self.root.add_child_task(0, self.mixin)
self.root.database.prepare_to_run()
self.mixin.perform()
assert self.root.get_from_database('Simple_test') == 3.0
def test_build_from_config1(self):
"""Test building a interfaceable task with no interface from a config.
"""
aux = RootTask()
aux.add_child_task(0, IMixin())
bis = RootTask.build_from_config(aux.preferences,
{'exopy.task':
{'tasks.IMixin': IMixin,
'exopy.RootTask': RootTask}})
assert type(bis.children[0]).__name__ == 'IMixin'
def test_build_from_config2(self):
"""Test building a interfaceable task with an interface from a config.
"""
self.mixin.interface = InterfaceTest(answer=True)
self.root.update_preferences_from_members()
deps = {'exopy.task': {'tasks.Mixin': Mixin,
'exopy.RootTask': RootTask},
'exopy.tasks.interface':
{'tasks.Mixin:tasks.InterfaceTest': InterfaceTest}}
bis = RootTask.build_from_config(self.root.preferences, deps)
assert type(bis.children[0].interface).__name__ == 'InterfaceTest'
def test_traverse(self):
"""Test traversing a task with interface.
"""
self.mixin.interface = InterfaceTest2()
w = list(self.mixin.traverse())
assert w == [self.mixin, self.mixin.interface]
class TestInterfaceableInterfaceMixin(object):
"""Test the capabilities of task interfaces.
"""
def setup(self):
self.root = RootTask()
self.mixin = InterfaceTest3()
self.root.add_child_task(0, Mixin(name='Simple', interface=self.mixin))
def test_interface_observer(self):
"""Test changing the interface.
"""
i1 = IIinterfaceTest1()
i2 = IIinterfaceTest2()
self.mixin.interface = i1
assert i1.parent is self.mixin
assert i1.task is self.mixin.task
assert i1.interface_id == (self.mixin.interface_id +
':tasks.' + i1.__class__.__name__)
assert self.mixin.task.database_entries == {'test': 2.0, 'itest': 1.0}
self.mixin.interface = i2
assert i2.task is self.mixin.task
assert i1.parent is None
with pytest.raises(AttributeError):
i1.task
assert self.mixin.task.database_entries == {'test': 2.0, 'itest': 2.0,
'fmt': '', 'feval': 0}
def test_check1(self):
"""Test running checks when the interface is present.
"""
self.mixin.interface = IIinterfaceTest1(answer=True)
res, traceback = self.mixin.check()
assert res
assert not traceback
assert self.mixin.interface.called
def test_check2(self):
"""Test running checks when no interface exist but i_perform is
implemented.
"""
interface = InterfaceTest4()
self.root.children[0].interface = interface
res, traceback = interface.check()
assert res
assert not traceback
def test_check3(self):
"""Test handling missing interface.
"""
res, traceback = self.mixin.check()
assert not res
assert traceback
assert len(traceback) == 1
assert 'root/Simple/InterfaceTest3-interface' in traceback
def test_check4(self):
"""Test handling a non-passing test from the interface.
"""
self.mixin.interface = IIinterfaceTest1()
res, traceback = self.mixin.check()
assert not res
assert len(traceback) == 1
assert self.mixin.interface.called
def test_check5(self):
"""Check that auto-check of fmt and feavl tagged members works.
"""
self.mixin.interface = IIinterfaceTest2(fmt='{Simple_test}',
feval='2*{Simple_test}')
res, traceback = self.mixin.check()
assert res
assert not traceback
assert self.root.get_from_database('Simple_fmt') == '2.0'
assert self.root.get_from_database('Simple_feval') == 4.0
def test_check6(self):
"""Check that auto-check of fmt and feavl handle errors.
"""
self.mixin.interface = IIinterfaceTest2(fmt='{Simple_test*}',
feval='2*{Simple_test}*')
res, traceback = self.mixin.check()
assert not res
assert self.root.get_from_database('Simple_fmt') == ''
assert self.root.get_from_database('Simple_feval') == 0
assert len(traceback) == 2
assert 'root/Simple-fmt' in traceback
assert 'root/Simple-feval' in traceback
def test_perform1(self):
"""Test perform does call interface if present.
"""
self.mixin.interface = IIinterfaceTest1()
self.root.database.prepare_to_run()
self.mixin.perform()
assert self.root.get_from_database('Simple_itest') == 2.0
def test_perform2(self):
"""Test perform use i_perform when no interface exists.
"""
self.mixin = InterfaceTest4()
self.root.children[0].interface = self.mixin
self.root.database.prepare_to_run()
self.mixin.perform()
assert self.root.get_from_database('Simple_test') == 3.0
def test_build_from_config1(self):
"""Test building a interfaceable interface with no interface from a
config.
"""
aux = RootTask()
mixin = Mixin()
mixin.interface = InterfaceTest3()
aux.add_child_task(0, mixin)
deps = {'exopy.task': {'tasks.Mixin': Mixin,
'exopy.RootTask': RootTask},
'exopy.tasks.interface':
{'tasks.Mixin:tasks.InterfaceTest3': InterfaceTest3}}
bis = RootTask.build_from_config(aux.preferences, deps)
assert type(bis.children[0].interface).__name__ == 'InterfaceTest3'
def test_build_from_config2(self):
"""Test building a interfaceable interface with an interface from a
config.
"""
self.mixin.interface = IIinterfaceTest1(answer=True)
self.root.update_preferences_from_members()
deps = {'exopy.task': {'tasks.Mixin': Mixin,
'exopy.RootTask': RootTask},
'exopy.tasks.interface':
{'tasks.Mixin:tasks.InterfaceTest3': InterfaceTest3,
'tasks.Mixin:tasks.InterfaceTest3:tasks.IIinterfaceTest1':
IIinterfaceTest1
}
}
bis = RootTask.build_from_config(self.root.preferences, deps)
interface = bis.children[0].interface.interface
assert type(interface).__name__ == 'IIinterfaceTest1'
assert self.root.children[0].database_entries ==\
{'test': 2.0, 'itest': 1.0}
def test_traverse(self):
"""Test traversing a task with an interfaceable interface.
"""
class Test(InterfaceableInterfaceMixin, IIinterfaceTest2):
pass
iaux = IIinterfaceTest1()
self.mixin.interface = Test()
self.mixin.interface.interface = iaux
task = self.root.children[0]
w = list(task.traverse())
assert w == [task, self.mixin, self.mixin.interface, iaux]
w = list(task.traverse(1))
assert w == [task, self.mixin, self.mixin.interface]
```
#### File: tasks/tasks/test_validators.py
```python
import gc
import numbers
from multiprocessing import Event
import pytest
from atom.api import Unicode
from exopy.tasks.api import RootTask, validators
from exopy.tasks.tasks.logic.loop_task import LoopTask
from exopy.testing.tasks.util import CheckTask
@pytest.yield_fixture
def task():
"""Create a task to test the validators.
"""
class Tester(CheckTask):
"""Class for testing feval validators.
"""
feval = Unicode()
root = RootTask(should_stop=Event(), should_pause=Event())
task = Tester(name='test', database_entries={'val': 1})
loop = LoopTask(name='Loop', task=task)
root.add_child_task(0, loop)
yield task
del root.should_pause
del root.should_stop
gc.collect()
def test_base_validation(task):
"""Test simply validating the evaluation.
"""
task.feval = '2*{Loop_val}'
val, res, msg = validators.Feval().check(task, 'feval')
assert val == 2
assert res
assert not msg
task.feval = '2-*{Loop_val}'
val, res, msg = validators.Feval().check(task, 'feval')
assert val is None
assert not res
assert msg
def test_type_validation(task):
"""Test type validating an entry.
"""
validator = validators.Feval(types=numbers.Real)
task.feval = '2*{Loop_val}'
val, res, msg = validator.check(task, 'feval')
assert val == 2
assert res
assert not msg
task.feval = '2j*{Loop_val}'
val, res, msg = validator.check(task, 'feval')
assert val is None
assert not res
assert msg
def test_warn_on_error(task):
"""Test simply warning on error.
"""
task.feval = '2-*{test_val}'
val, res, msg = validators.Feval(warn=True).check(task, 'feval')
assert val is None
assert res
assert msg
def test_skip_empty(task):
"""Test skipping an empty value.
"""
task.feval = '2*{Loop_val}'
val, res, msg = validators.SkipEmpty().check(task, 'feval')
assert val == 2
assert res
assert not msg
task.feval = ''
val, res, msg = validators.SkipEmpty().check(task, 'feval')
assert val is None
assert res
assert not msg
def test_skip_in_loop(task):
"""Skip testing the field if the task is embedded in a LoopTask.
"""
task.feval = ''
val, res, msg = validators.SkipLoop().check(task, 'feval')
assert val is None
assert res
assert not msg
task.feval = '2*{Loop_val}'
root = task.root
task.parent.task = None
root.add_child_task(0, task)
val, res, msg = validators.SkipLoop().check(task, 'feval')
assert val == 2
assert res
assert not msg
```
#### File: tasks/util/test_formula_task.py
```python
import gc
import pytest
import enaml
from multiprocessing import Event
from collections import OrderedDict
from exopy.testing.util import show_and_close_widget
from exopy.tasks.tasks.base_tasks import RootTask
from exopy.tasks.tasks.util.formula_task import FormulaTask
from exopy.utils.atom_util import (ordered_dict_from_pref)
with enaml.imports():
from exopy.tasks.tasks.util.views.formula_view import FormulaView
class TestFormulaTask(object):
"""Test FormulaTask.
"""
def setup(self):
self.root = RootTask(should_stop=Event(), should_pause=Event())
self.task = FormulaTask(name='Test')
self.root.add_child_task(0, self.task)
def teardown(self):
del self.root.should_pause
del self.root.should_stop
# Ensure we collect the file descriptor of the events. Otherwise we can
# get funny errors on MacOS.
gc.collect()
def test_perform1(self):
"""Test checking that the evaluated formula gets written to the
database
"""
self.task.formulas = OrderedDict([('key1', "1.0+3.0"),
('key2', '3.0+4.0')])
self.root.prepare()
self.task.perform()
assert (self.task.get_from_database('Test_key1') == 4.0 and
self.task.get_from_database('Test_key2') == 7.0)
def test_perform_from_load(self):
"""Test checking for correct loading from pref and that we can still
recall values from the database
"""
self.task.write_in_database('pi', 3.1)
self.task.formulas = \
ordered_dict_from_pref(self, self.task.formulas,
("[(u'key1', '1.0+3.0'), "
"(u'key2', '3.0 + {Test_pi}')]"))
self.root.prepare()
self.task.perform()
assert (self.task.get_from_database('Test_key1') == 4.0 and
self.task.get_from_database('Test_key2') == 6.1)
def test_check(self):
"""Test checking that an unformattable formula gives an error
"""
self.task.formulas = OrderedDict([('key1', "1.0+3.0"),
('key2', '3.0+4.0 + {Test_pi}')])
test, traceback = self.task.check()
assert not test
assert len(traceback) == 1
assert 'root/Test-key2' in traceback
@pytest.mark.ui
def test_view(exopy_qtbot):
"""Test the FormulaTask view.
"""
show_and_close_widget(exopy_qtbot,
FormulaView(task=FormulaTask(name='Test')))
```
#### File: tests/tasks/test_dependencies.py
```python
from operator import getitem
import pytest
from exopy.app.dependencies.api import (BuildDependency,
RuntimeDependencyAnalyser)
from exopy.tasks.api import ComplexTask, InstrumentTask, TaskInterface
from exopy.tasks.infos import (TaskInfos, InterfaceInfos,
INSTR_RUNTIME_TASK_DRIVERS_ID,
INSTR_RUNTIME_TASK_PROFILES_ID,
INSTR_RUNTIME_INTERFACE_DRIVERS_ID,
INSTR_RUNTIME_INTERFACE_PROFILES_ID)
@pytest.fixture
def task_dep_collector(task_workbench):
"""Collector for task dependencies.
"""
plugin = task_workbench.get_plugin('exopy.tasks')
dep_ext = [e for e in plugin.manifest.extensions
if e.id == 'build_deps'][0]
return [b for b in dep_ext.get_children(BuildDependency)
if b.id == 'exopy.task'][0]
@pytest.fixture
def interface_dep_collector(task_workbench):
"""Collector for interface dependencies.
"""
plugin = task_workbench.get_plugin('exopy.tasks')
dep_ext = [e for e in plugin.manifest.extensions
if e.id == 'build_deps'][0]
return [b for b in dep_ext.get_children(BuildDependency)
if b.id == 'exopy.tasks.interface'][0]
@pytest.fixture
def driver_dep_collector(task_workbench):
"""Collector for driver dependencies for task supporting instrument and
having the proper selected_intrument member.
"""
plugin = task_workbench.get_plugin('exopy.tasks')
dep_ext = [e for e in plugin.manifest.extensions
if e.id == 'runtime_deps'][0]
return [b for b in dep_ext.get_children(RuntimeDependencyAnalyser)
if b.id == INSTR_RUNTIME_TASK_DRIVERS_ID][0]
@pytest.fixture
def profile_dep_collector(task_workbench):
"""Collector for profile dependencies for task supporting instrument and
having the proper selected_intrument member.
"""
plugin = task_workbench.get_plugin('exopy.tasks')
dep_ext = [e for e in plugin.manifest.extensions
if e.id == 'runtime_deps'][0]
return [b for b in dep_ext.get_children(RuntimeDependencyAnalyser)
if b.id == INSTR_RUNTIME_TASK_PROFILES_ID][0]
@pytest.fixture
def i_driver_dep_collector(task_workbench):
"""Collector for driver dependencies for interface supporting instrument
and having the proper selected_intrument member or being attached to a task
that does.
"""
plugin = task_workbench.get_plugin('exopy.tasks')
dep_ext = [e for e in plugin.manifest.extensions
if e.id == 'runtime_deps'][0]
return [b for b in dep_ext.get_children(RuntimeDependencyAnalyser)
if b.id == INSTR_RUNTIME_INTERFACE_DRIVERS_ID][0]
@pytest.fixture
def i_profile_dep_collector(task_workbench):
"""Collector for profile dependencies for interface supporting instrument
and having the proper selected_intrument member or being attached to a task
that does.
"""
plugin = task_workbench.get_plugin('exopy.tasks')
dep_ext = [e for e in plugin.manifest.extensions
if e.id == 'runtime_deps'][0]
return [b for b in dep_ext.get_children(RuntimeDependencyAnalyser)
if b.id == INSTR_RUNTIME_INTERFACE_PROFILES_ID][0]
def test_analysing_task_dependencies(monkeypatch, task_workbench,
task_dep_collector):
"""Test analysing the dependencies of a task.
"""
runtime = {'test'}
plugin = task_workbench.get_plugin('exopy.tasks')
monkeypatch.setattr(plugin.get_task_infos('exopy.ComplexTask'),
'dependencies', runtime)
dep = set()
errors = dict()
run = task_dep_collector.analyse(task_workbench, ComplexTask(), getattr,
dep, errors)
assert run == runtime
assert 'exopy.ComplexTask' in dep
assert not errors
dep = set()
run = task_dep_collector.analyse(task_workbench, {'task_id': '__dummy__'},
getitem, dep, errors)
assert not run
assert not dep
assert '__dummy__' in errors
def test_validating_task_dependencies(task_workbench, task_dep_collector):
"""Test validating task dependencies.
"""
errors = {}
task_dep_collector.validate(task_workbench,
{'exopy.ComplexTask', '__dummy__'}, errors)
assert 'exopy.ComplexTask' not in errors
assert '__dummy__' in errors
def test_collecting_task_dependencies(task_workbench, task_dep_collector):
"""Test collecting the dependencies found in a task.
"""
dependencies = dict.fromkeys(['exopy.ComplexTask', '__dummy__'])
errors = {}
task_dep_collector.collect(task_workbench, dependencies, errors)
assert 'exopy.ComplexTask' in dependencies
assert '__dummy__' in errors
def test_analysing_interface_dependencies(monkeypatch, task_workbench,
interface_dep_collector):
"""Test analysing the dependencies in an interface.
"""
runtime = {'test'}
interface = 'exopy.LoopTask:exopy.LinspaceLoopInterface'
plugin = task_workbench.get_plugin('exopy.tasks')
monkeypatch.setattr(plugin.get_interface_infos(interface), 'dependencies',
runtime)
dep = set()
errors = dict()
run = interface_dep_collector.analyse(task_workbench,
{'interface_id': str(interface)},
getitem, dep, errors)
assert run == runtime
assert interface in dep
assert not errors
dep.clear()
run = interface_dep_collector.analyse(task_workbench,
{'interface_id':
'LoopTask:__dummy__'},
getitem, dep, errors)
assert not run
assert not dep
assert 'LoopTask:__dummy__' in errors
def test_validating_interface_dependencies(task_workbench,
interface_dep_collector):
"""Test validating interface dependencies.
"""
errors = {}
interface_dep_collector.validate(
task_workbench,
{'exopy.LoopTask:exopy.LinspaceLoopInterface',
'LoopTask:__dummy__'}, errors)
assert 'exopy.LoopTask:exopy.LinspaceLoopInterface' not in errors
assert 'LoopTask:__dummy__' in errors
def test_collecting_interface_dependencies(task_workbench,
interface_dep_collector):
"""Test collecting the dependencies found in an interface.
"""
dependencies = dict.fromkeys(['exopy.LoopTask:exopy.LinspaceLoopInterface',
'LoopTask:__dummy__'])
errors = {}
interface_dep_collector.collect(task_workbench, dependencies, errors)
assert 'exopy.LoopTask:exopy.LinspaceLoopInterface' in dependencies
assert 'LoopTask:__dummy__' in errors
def test_analysing_instr_task_dependencies(monkeypatch, task_workbench,
task_dep_collector,
profile_dep_collector,
driver_dep_collector):
"""Test analysing the dependencies of a task.
"""
plugin = task_workbench.get_plugin('exopy.tasks')
plugin._tasks.contributions['exopy.InstrumentTask'] =\
TaskInfos(cls=InstrumentTask, instruments=['test'])
dep = set()
errors = dict()
t = InstrumentTask(selected_instrument=('test', 'dummy', 'c', None))
run = task_dep_collector.analyse(task_workbench, t, getattr,
dep, errors)
assert run == {'exopy.tasks.instruments.drivers',
'exopy.tasks.instruments.profiles'}
assert 'exopy.InstrumentTask' in dep
assert not errors
dep.clear()
profile_dep_collector.analyse(task_workbench, t, dep, errors)
assert 'test' in dep
assert not errors
dep.clear()
driver_dep_collector.analyse(task_workbench, t, dep, errors)
assert 'dummy' in dep
assert not errors
def test_analysing_instr_interface_dependencies(monkeypatch, task_workbench,
interface_dep_collector,
i_profile_dep_collector,
i_driver_dep_collector):
"""Test analysing the dependencies of an interface.
"""
class FalseI(TaskInterface):
__slots__ = ('__dict__')
plugin = task_workbench.get_plugin('exopy.tasks')
p_infos = TaskInfos(cls=InstrumentTask, instruments=['test'])
plugin._tasks.contributions['exopy.InstrumentTask'] = p_infos
p_infos.interfaces['tasks.FalseI'] =\
InterfaceInfos(cls=FalseI, instruments=['test'], parent=p_infos)
dep = set()
errors = dict()
i = FalseI()
t = InstrumentTask(selected_instrument=('test', 'dummy', 'c', None))
i.task = t
run = interface_dep_collector.analyse(task_workbench, i, getattr,
dep, errors)
assert run == {INSTR_RUNTIME_INTERFACE_DRIVERS_ID,
INSTR_RUNTIME_INTERFACE_PROFILES_ID}
assert 'exopy.InstrumentTask:tasks.FalseI' in dep
assert not errors
dep.clear()
i_profile_dep_collector.analyse(task_workbench, i, dep, errors)
assert 'test' in dep
assert not errors
dep.clear()
i_driver_dep_collector.analyse(task_workbench, i, dep, errors)
assert 'dummy' in dep
assert not errors
i.selected_instrument = ('test2', 'dummy2', 'c', None)
dep.clear()
i_profile_dep_collector.analyse(task_workbench, i, dep, errors)
assert 'test2' in dep
assert not errors
dep.clear()
i_driver_dep_collector.analyse(task_workbench, i, dep, errors)
assert 'dummy2' in dep
assert not errors
```
#### File: tests/utils/test_container_change.py
```python
from pytest import raises
from exopy.utils.container_change import ContainerChange
class TestContainerChange(object):
"""Test the ContainerChange capabilities.
"""
def setup(self):
self.obj = object()
self.name = 'name'
self.container = ContainerChange(obj=self.obj, name=self.name)
def test_adding_moved(self):
self.container.add_operation('moved', (1, 2, 'test'))
assert (1, 2, 'test') in self.container.moved
assert not self.container.added
assert not self.container.removed
assert not self.container.collapsed
def test_adding_added(self):
self.container.add_operation('added', (1, 'test'))
assert (1, 'test') in self.container.added
assert not self.container.moved
assert not self.container.removed
assert not self.container.collapsed
def test_adding_removed(self):
self.container.add_operation('removed', (1, 'test'))
assert (1, 'test') in self.container.removed
assert not self.container.added
assert not self.container.moved
assert not self.container.collapsed
def test_adding_wrong_typ(self):
with raises(ValueError):
self.container.add_operation('test', (1, 'test'))
def test_adding_wrong_desc(self):
with raises(ValueError):
self.container.add_operation('added', ('test'))
with raises(ValueError):
self.container.add_operation('moved', ('test'))
with raises(ValueError):
self.container.add_operation('removed', ('test'))
def test_collapsing(self):
self.container.add_operation('moved', (1, 2, 'test'))
self.container.add_operation('added', (1, 'test'))
self.container.add_operation('added', (2, 'aux'))
assert self.container.collapsed
assert not self.container.added
assert not self.container.moved
assert not self.container.removed
assert len(self.container.collapsed) == 2
assert len(self.container.collapsed[0].moved) == 1
assert len(self.container.collapsed[1].added) == 2
for c in self.container.collapsed:
assert c.obj == self.obj
assert c.name == self.name
```
#### File: tests/utils/test_watchdog.py
```python
import pytest
from watchdog.events import FileCreatedEvent, FileDeletedEvent, FileMovedEvent
from exopy.utils.watchdog import SystematicFileUpdater
@pytest.fixture
def updater():
class Tester(SystematicFileUpdater):
def __init__(self):
self.counter = 0
super(Tester, self).__init__(lambda: setattr(self, 'counter',
self.counter + 1))
return Tester()
def test_file_creation(updater):
updater.on_created(FileCreatedEvent(''))
assert updater.counter == 1
def test_file_deletion(updater):
updater.on_deleted(FileDeletedEvent(''))
assert updater.counter == 1
def test_file_moved(updater):
updater.on_moved(FileMovedEvent('', ''))
assert updater.counter == 1
```
|
{
"source": "jerjou/recipe-scrapers",
"score": 2
}
|
#### File: recipe-scrapers/recipe_scrapers/mybakingaddiction.py
```python
from ._abstract import AbstractScraper
from ._utils import get_minutes, normalize_string, get_yields
class MyBakingAddiction(AbstractScraper):
@classmethod
def host(cls):
return 'mybakingaddiction.com'
def title(self):
return self.soup.find('h1').get_text()
def total_time(self):
return get_minutes(self.soup.find(
'div',
{'class': 'mv-create-time-total'}
).get_text())
def yields(self):
return get_yields(self.soup.find(
'div',
{'class': 'mv-create-time-yield'}
))
def ingredients(self):
ingredients = self.soup.find(
'div',
{'class': "mv-create-ingredients"}
).findAll('li')
return [
normalize_string(ingredient.get_text())
for ingredient in ingredients
]
def instructions(self):
instructions = self.soup.find(
'div',
{'class': 'mv-create-instructions'}
).findAll('li')
return '\n'.join([
normalize_string(instruction.get_text())
for instruction in instructions
])
def ratings(self):
rating = self.soup.find(
"div",
{"class": "mv-create-reviews"}
).attrs.get('data-mv-create-rating', None)
return round(float(rating), 2)
```
#### File: recipe-scrapers/recipe_scrapers/tineno.py
```python
from ._abstract import AbstractScraper
from ._utils import get_minutes, normalize_string, get_yields
class TineNo(AbstractScraper):
@classmethod
def host(cls):
return 'tine.no'
def title(self):
return self.soup.find('h1').get_text()
def total_time(self):
total_time = 0
tt1 = self.soup.find(
'li',
{'class': "m-recipe-overview__total-time"}
)
if tt1:
tt = tt1.find(
'span',
{'class': "t-info"}
)
tt1 = normalize_string(tt.get_text())
tt2 = get_minutes(tt1)
if tt1 and (tt2 == 0):
total_time = tt1
else:
total_time = tt2
return total_time
def yields(self):
recipe_yield = self.soup.find(
'input', {'id': "portions"}
)
if recipe_yield:
return recipe_yield['value']
else:
return get_yields(self.soup.find(
'div',
{'class': 'recipe-adjust-servings__original-serving'}
).get_text())
def image(self):
image = self.soup.find(
'img',
{'id': "HeaderMediaContent"}
)
if image:
# tag = image.find('src')
src = image.get("src", None)
return src if image else None
def ingredients(self):
# site uses <section><section>...</section></section>
ingredientsOuter = self.soup.findAll(
'div',
{'id': 'ingredient-groups-container'}
)
ingGroup = []
ingredients1 = ingredientsOuter[0].findAll('section')
for ings in ingredients1:
ingredients = ings.findAll('section')
for ingredient in ingredients:
if len(ingredient) > 1:
try:
header = ingredient.find(
'h3', {'class': 't-tertiary-heading o-recipe-ingredients__sub-title'}).get_text()
except Exception:
header = ''
tablelines = ingredient.findAll('td')
lst = []
cntr = 0
tmplst = []
for item in tablelines:
tmplst.append(normalize_string(
item.get_text(strip=True)))
cntr += 1
if cntr % 2 == 0:
lst.append(" ".join(tmplst))
tmplst = []
if header != '':
ingGroup.append(header)
for l in lst:
ingGroup.append(l)
return ingGroup
def instructions(self):
instructions = self.soup.find(
'ol', {'class': 'o-recipe-steps__step-groups'})
ins = instructions.findAll(
'li', {'class': 'o-recipe-steps__step-group'})
return '\n'.join([normalize_string(inst.text)
for inst in ins
])
def description(self):
d = normalize_string(self.soup.find(
'div',
{'class': 't-ingress m-page-ingress m-page-ingress--recipe l-recipe__ingress'}).text)
return d if d else None
```
#### File: recipe-scrapers/tests/test_lovingitvegan.py
```python
from tests import ScraperTest
from recipe_scrapers.lovingitvegan import Lovingitvegan
class TestLovingitveganScraper(ScraperTest):
scraper_class = Lovingitvegan
def test_host(self):
self.assertEqual(
'lovingitvegan.com',
self.harvester_class.host()
)
def test_title(self):
self.assertEqual(
self.harvester_class.title(),
'Kale Smoothie'
)
def test_yields(self):
self.assertEqual("2 serving(s)", self.harvester_class.yields())
def test_image(self):
self.assertEqual(
'https://lovingitvegan.com/wp-content/uploads/2018/08/Kale-Smoothie-8-225x225.jpg',
self.harvester_class.image()
)
def test_ingredients(self):
self.assertCountEqual(
[
"1 and 1/4 cups (300ml) Soy Milk (or sub almond milk)",
"2 Frozen Bananas (~200g, previously peeled, broken into quarters and frozen for at least 12 hours)",
"1/2 cup (75g) Raw Cashews",
"2 cups (56g) Kale (torn up, packed)",
"4 Medjool Dates (pitted)*",
"1 tsp Minced Ginger",
"1/8 tsp Cinnamon",
"1 Tbsp Lime Juice (freshly squeezed)",
"1 cup Ice Cubes"
],
self.harvester_class.ingredients()
)
def test_instructions(self):
return self.assertEqual(
'Add the soy milk to your blender jug and then the frozen bananas, cashews, kale, dates, minced ginger, cinnamon and fresh lime juice. Top with ice cubes.\nBlend until very smooth.\nPour out into glasses and serve.',
self.harvester_class.instructions()
)
```
#### File: recipe-scrapers/tests/test_thespruceeats.py
```python
from tests import ScraperTest
from recipe_scrapers.thespruceeats import TheSpruceEats
class TestTheSpruceEatsScraper(ScraperTest):
scraper_class = TheSpruceEats
def test_host(self):
self.assertEqual(
'thespruceeats.com',
self.harvester_class.host()
)
def test_title(self):
self.assertEqual(
self.harvester_class.title(),
'Homemade Doner Kebab: A Turkish Classic'
)
def test_total_time(self):
self.assertEqual(
50,
self.harvester_class.total_time()
)
def test_yields(self):
self.assertEqual("4 Servings", self.harvester_class.yields())
def test_image(self):
self.assertEqual(
'https://www.thespruceeats.com/thmb/DKGKiOeI7L3iiDtLzOllsoaGksA=/2105x1184/smart/filters:no_upscale()/aqIMG_1498fsq-5b343910c9e77c001a218bd0.jpg',
self.harvester_class.image()
)
def test_ingredients(self):
self.assertCountEqual(
[
'1 lb. ground lamb (or 1/2 lb. each of ground lamb and ground beef)',
'1 egg',
'4 cloves garlic (peeled and finely minced)',
'1 teaspoon ground cumin',
'1 teaspoon ground coriander',
'1 teaspoon smoked paprika',
'1 teaspoon dried oregano',
'1/2 teaspoon salt',
'1/4 teaspoon ground black pepper',
'4 rounds of pita (or naan or flatbread)',
'1 cup assorted lettuce',
'1 large tomato (sliced)',
'1/2 seedless English cucumber (sliced)',
'1/4 large red onion (peeled and sliced)',
'Tzatziki sauce or tahini sauce'
],
self.harvester_class.ingredients()
)
def test_instructions(self):
return self.assertEqual(
'Gather the ingredients.\nPre-heat the oven to 350 F.\nIn a large bowl, combine the ground lamb, egg, garlic, ground cumin, ground coriander, smoked paprika, dried oregano, salt, and black pepper.\nPlace the mixture into a loaf pan and cook in the oven for approximately 30 minutes or until the top is a light golden brown.\nYou can slice the loaf immediately if you like but, for best results, cool completely, wrap in aluminum foil, and refrigerate until firm.\nTo reheat, add a little olive oil to a large skillet, slice the loaf very thinly and crisp up the slices in the hot pan for a few minutes.\nAssemble the sandwiches with the vegetables and sauce.\nLamb\nkebab\ndinner\nmiddle eastern',
self.harvester_class.instructions()
)
def test_ratings(self):
self.assertEqual(
4.0,
self.harvester_class.ratings()
)
```
|
{
"source": "jerke123/mridc",
"score": 3
}
|
#### File: common/data/dataset.py
```python
__author__ = "<NAME>"
# Taken and adapted from: https://github.com/NVIDIA/NeMo/blob/main/nemo/collections/common/data/dataset.py
from abc import ABC
from typing import Any, List
import numpy as np
import torch.utils.data as pt_data
__all__ = ["ConcatDataset"]
class ConcatDataset(pt_data.IterableDataset, ABC):
"""
A dataset that accepts as argument multiple datasets and then samples from them based on the specified
sampling technique.
Args:
datasets (list): A list of datasets to sample from.
shuffle (bool): Whether to shuffle individual datasets. Only works with non-iterable datasets.
Defaults to True.
sampling_technique (str): Sampling technique to choose which dataset to draw a sample from.
Defaults to 'random'. Currently supports 'random' and 'round-robin'.
sampling_probabilities (list): Probability values for sampling. Only used when sampling_technique = 'random'.
global_rank (int): Worker rank, used for partitioning map style datasets. Defaults to 0.
world_size (int): Total number of processes, used for partitioning map style datasets. Defaults to 1.
"""
def __init__(
self,
datasets: List[Any],
shuffle: bool = True,
sampling_technique: str = "random",
sampling_probabilities: List[float] = None,
global_rank: int = 0,
world_size: int = 1,
):
super().__init__()
self.datasets = datasets
self.iterables = [None] * len(datasets)
self.shuffle = shuffle
self.global_rank = global_rank
self.world_size = world_size
self.sampling_kwargs = {}
if sampling_technique == "random":
self.index_generator = ConcatDataset.random_generator
self.sampling_kwargs["p"] = sampling_probabilities # type: ignore
elif sampling_technique == "round-robin":
self.index_generator = ConcatDataset.round_robin_generator
else:
supported_sampling_techniques = ["random", "round-robin"]
raise ValueError(f"Currently we only support sampling techniques in {supported_sampling_techniques}.")
self.length = 0
if isinstance(datasets[0], pt_data.IterableDataset):
self.kind = "iterable"
else:
self.kind = "map"
for dataset in datasets:
isiterable = isinstance(dataset, pt_data.IterableDataset)
if isiterable and self.kind != "iterable" or (not isiterable and self.kind == "iterable"):
raise ValueError("All datasets in ConcatDataset must be of the same kind (Iterable or Map).")
if self.kind == "map":
self.length += np.floor_divide(len(dataset), world_size)
else:
self.length += len(dataset)
def get_iterable(self, dataset):
"""Returns an iterable dataset."""
if isinstance(dataset, pt_data.IterableDataset):
return dataset.__iter__()
indices = np.arange(len(dataset))
if self.shuffle:
np.random.shuffle(indices)
return iter(indices)
def __iter__(self):
"""Returns an iterator over the dataset."""
worker_info = pt_data.get_worker_info()
if worker_info is None:
max_elements = self.length
wid = 0
wnum = 1
else:
wid = worker_info.id
wnum = worker_info.num_workers
max_elements = len(range(wid, self.length, wnum))
if self.kind == "map":
for idx in range(len(self.datasets)):
start_idx = np.floor_divide(len(self.datasets[idx]), self.world_size) * self.global_rank
end_idx = start_idx + np.floor_divide(len(self.datasets[idx]), self.world_size)
if self.global_rank == self.world_size - 1:
end_idx = len(self.datasets[idx])
indices = range(start_idx + wid, end_idx, wnum)
self.datasets[idx] = pt_data.Subset(self.datasets[idx], indices)
for idx, dataset in enumerate(self.datasets):
iterable = self.get_iterable(dataset)
self.iterables[idx] = iterable # type: ignore
n = 0
ind_gen = self.index_generator(self.datasets, **self.sampling_kwargs)
while n < max_elements:
n += 1
try:
ind = next(ind_gen)
except StopIteration:
return
try:
val = next(self.iterables[ind]) # type: ignore
if self.kind == "map":
val = self.datasets[ind][val]
yield val
except StopIteration:
self.iterables[ind] = self.get_iterable(self.datasets[ind]) # type: ignore
n -= 1
def __len__(self):
"""Returns the number of elements in the dataset."""
return self.length
@staticmethod
def round_robin_generator(datasets, **kwargs):
"""Generates indices in a round-robin fashion."""
num = len(datasets)
while True:
yield from range(num)
@staticmethod
def random_generator(datasets, **kwargs):
"""Generates random indices."""
p = kwargs.get("p")
if not p:
raise ValueError("Random generator expects a 'p' keyowrd argument for sampling probabilities.")
num = len(datasets)
if len(p) != num:
raise ValueError("Length of probabilities list must be equal to the number of datasets.")
while True:
yield np.random.choice(np.arange(num), p=p)
```
#### File: models/cascadenet/ccnn_block.py
```python
__author__ = "<NAME>"
import torch
from mridc.collections.common.parts.fft import fft2c, ifft2c
from mridc.collections.common.parts.utils import complex_conj, complex_mul
class CascadeNetBlock(torch.nn.Module):
"""
Model block for CascadeNet & Convolution Recurrent Neural Network.
This model applies a combination of soft data consistency with the input model as a regularizer.
A series of these blocks can be stacked to form the full variational network.
"""
def __init__(self, model: torch.nn.Module, fft_type: str = "orthogonal", no_dc: bool = False):
"""
Initialize the model block.
Args:
model: Model to apply soft data consistency.
fft_type: Type of FFT to use.
no_dc: Whether to remove the DC component.
"""
super().__init__()
self.model = model
self.fft_type = fft_type
self.no_dc = no_dc
self.dc_weight = torch.nn.Parameter(torch.ones(1))
def sens_expand(self, x: torch.Tensor, sens_maps: torch.Tensor) -> torch.Tensor:
"""
Expand the sensitivity maps to the same size as the input.
Args:
x: Input data.
sens_maps: Sensitivity maps.
Returns:
SENSE reconstruction expanded to the same size as the input.
"""
return fft2c(complex_mul(x, sens_maps), fft_type=self.fft_type)
def sens_reduce(self, x: torch.Tensor, sens_maps: torch.Tensor) -> torch.Tensor:
"""
Reduce the sensitivity maps to the same size as the input.
Args:
x: Input data.
sens_maps: Sensitivity maps.
Returns:
SENSE reconstruction reduced to the same size as the input.
"""
x = ifft2c(x, fft_type=self.fft_type)
return complex_mul(x, complex_conj(sens_maps)).sum(dim=1, keepdim=True)
def forward(
self,
pred: torch.Tensor,
ref_kspace: torch.Tensor,
sens_maps: torch.Tensor,
mask: torch.Tensor,
) -> torch.Tensor:
"""
Forward pass of the model.
Args:
pred: Predicted k-space data.
ref_kspace: Reference k-space data.
sens_maps: Sensitivity maps.
mask: Mask to apply to the data.
Returns
-------
Reconstructed image.
"""
zero = torch.zeros(1, 1, 1, 1, 1).to(pred)
soft_dc = torch.where(mask.bool(), pred - ref_kspace, zero) * self.dc_weight
eta = self.sens_reduce(pred, sens_maps)
eta = self.model(eta.squeeze(1).permute(0, 3, 1, 2)).permute(0, 2, 3, 1)
eta = self.sens_expand(eta, sens_maps)
if not self.no_dc:
eta = pred - soft_dc - eta
return eta
```
#### File: models/convrecnet/crnn_block.py
```python
__author__ = "<NAME>"
from typing import Any, List, Union
import torch
from mridc.collections.common.parts.fft import fft2c, ifft2c
from mridc.collections.common.parts.utils import complex_conj, complex_mul
class DataConsistencyLayer(torch.nn.Module):
"""Data consistency layer for the CRNN.
This layer is used to ensure that the output of the CRNN is the same as the input.
"""
def __init__(self):
"""Initializes the data consistency layer."""
super().__init__()
self.dc_weight = torch.nn.Parameter(torch.ones(1))
def forward(self, pred_kspace, ref_kspace, mask):
"""Forward pass of the data consistency layer."""
zero = torch.zeros(1, 1, 1, 1, 1).to(pred_kspace)
return torch.where(mask.bool(), pred_kspace - ref_kspace, zero) * self.dc_weight
class RecurrentConvolutionalNetBlock(torch.nn.Module):
"""
Model block for Recurrent Convolution Neural Network inspired by [1]_.
References
----------
.. [1] <NAME>, <NAME>, <NAME>, <NAME>, <NAME> and <NAME>,
"Convolutional Recurrent Neural Networks for Dynamic MR Image Reconstruction," in IEEE Transactions on Medical
Imaging, vol. 38, no. 1, pp. 280-290, Jan. 2019, doi: 10.1109/TMI.2018.2863670.
"""
def __init__(
self, model: torch.nn.Module, num_iterations: int = 10, fft_type: str = "orthogonal", no_dc: bool = False
):
"""
Initialize the model block.
Args:
model: Model to apply soft data consistency.
num_iterations: Number of iterations.
fft_type: Type of FFT to use.
no_dc: Whether to remove the DC component.
"""
super().__init__()
self.model = model
self.num_iterations = num_iterations
self.fft_type = fft_type
self.no_dc = no_dc
self.dc_weight = torch.nn.Parameter(torch.ones(1))
def sens_expand(self, x: torch.Tensor, sens_maps: torch.Tensor) -> torch.Tensor:
"""
Expand the sensitivity maps to the same size as the input.
Args:
x: Input data.
sens_maps: Sensitivity maps.
Returns:
SENSE reconstruction expanded to the same size as the input.
"""
return fft2c(complex_mul(x, sens_maps), fft_type=self.fft_type)
def sens_reduce(self, x: torch.Tensor, sens_maps: torch.Tensor) -> torch.Tensor:
"""
Reduce the sensitivity maps to the same size as the input.
Args:
x: Input data.
sens_maps: Sensitivity maps.
Returns:
SENSE reconstruction reduced to the same size as the input.
"""
x = ifft2c(x, fft_type=self.fft_type)
return complex_mul(x, complex_conj(sens_maps)).sum(1)
def forward(
self,
ref_kspace: torch.Tensor,
sens_maps: torch.Tensor,
mask: torch.Tensor,
) -> List[Union[torch.Tensor, Any]]:
"""
Forward pass of the model.
Args:
ref_kspace: Reference k-space data.
sens_maps: Sensitivity maps.
mask: Mask to apply to the data.
Returns
-------
Reconstructed image.
"""
zero = torch.zeros(1, 1, 1, 1, 1).to(ref_kspace)
pred = ref_kspace.clone()
preds = []
for _ in range(self.num_iterations):
soft_dc = torch.where(mask.bool(), pred - ref_kspace, zero) * self.dc_weight
eta = self.sens_reduce(pred, sens_maps)
eta = self.model(eta.permute(0, 3, 1, 2)).permute(0, 2, 3, 1) + eta
eta = self.sens_expand(eta.unsqueeze(1), sens_maps)
if not self.no_dc:
# TODO: Check if this is correct
eta = pred - soft_dc - eta
pred = eta
preds.append(eta)
return preds
```
#### File: models/multidomain/multidomain.py
```python
__author__ = "<NAME>"
# Taken and adapted from:https://github.com/NKI-AI/direct/blob/main/direct/nn/multidomainnet/multidomain.py
# Copyright (c) DIRECT Contributors
import torch
import torch.nn as nn
import torch.nn.functional as F
from mridc.collections.common.parts.fft import fft2c, ifft2c
from mridc.collections.common.parts.utils import complex_conj, complex_mul
class MultiDomainConv2d(nn.Module):
"""Multi-domain convolution layer."""
def __init__(
self,
fft_type,
in_channels,
out_channels,
**kwargs,
):
super().__init__()
self.image_conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels // 2, **kwargs)
self.kspace_conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels // 2, **kwargs)
self.fft_type = fft_type
self._channels_dim = 1
self._spatial_dims = [1, 2]
def forward(self, image):
"""Forward method for the MultiDomainConv2d class."""
kspace = [
fft2c(im, fft_type=self.fft_type, fft_dim=self._spatial_dims)
for im in torch.split(image.permute(0, 2, 3, 1).contiguous(), 2, -1)
]
kspace = torch.cat(kspace, -1).permute(0, 3, 1, 2)
kspace = self.kspace_conv(kspace)
backward = [
ifft2c(ks.float(), fft_type=self.fft_type, fft_dim=self._spatial_dims).type(image.type())
for ks in torch.split(kspace.permute(0, 2, 3, 1).contiguous(), 2, -1)
]
backward = torch.cat(backward, -1).permute(0, 3, 1, 2)
image = self.image_conv(image)
image = torch.cat([image, backward], dim=self._channels_dim)
return image
class MultiDomainConvTranspose2d(nn.Module):
"""Multi-Domain convolutional transpose layer."""
def __init__(
self,
fft_type,
in_channels,
out_channels,
**kwargs,
):
super().__init__()
self.image_conv = nn.ConvTranspose2d(in_channels=in_channels, out_channels=out_channels // 2, **kwargs)
self.kspace_conv = nn.ConvTranspose2d(in_channels=in_channels, out_channels=out_channels // 2, **kwargs)
self.fft_type = fft_type
self._channels_dim = 1
self._spatial_dims = [1, 2]
def forward(self, image):
"""Forward method for the MultiDomainConvTranspose2d class."""
kspace = [
fft2c(im, fft_type=self.fft_type, fft_dim=self._spatial_dims)
for im in torch.split(image.permute(0, 2, 3, 1).contiguous(), 2, -1)
]
kspace = torch.cat(kspace, -1).permute(0, 3, 1, 2)
kspace = self.kspace_conv(kspace)
backward = [
ifft2c(ks.float(), fft_type=self.fft_type, fft_dim=self._spatial_dims).type(image.type())
for ks in torch.split(kspace.permute(0, 2, 3, 1).contiguous(), 2, -1)
]
backward = torch.cat(backward, -1).permute(0, 3, 1, 2)
image = self.image_conv(image)
return torch.cat([image, backward], dim=self._channels_dim)
class MultiDomainConvBlock(nn.Module):
"""
A multi-domain convolutional block that consists of two multi-domain convolution layers each followed by instance
normalization, LeakyReLU activation and dropout.
"""
def __init__(self, fft_type, in_channels: int, out_channels: int, dropout_probability: float):
"""
Parameters
----------
in_channels: int
Number of input channels.
out_channels: int
Number of output channels.
dropout_probability: float
Dropout probability.
"""
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.dropout_probability = dropout_probability
self.layers = nn.Sequential(
MultiDomainConv2d(fft_type, in_channels, out_channels, kernel_size=3, padding=1, bias=False),
nn.InstanceNorm2d(out_channels),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(dropout_probability),
MultiDomainConv2d(fft_type, out_channels, out_channels, kernel_size=3, padding=1, bias=False),
nn.InstanceNorm2d(out_channels),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(dropout_probability),
)
def forward(self, _input: torch.Tensor):
"""
Parameters
----------
_input: torch.Tensor
Returns
-------
torch.Tensor
"""
return self.layers(_input)
def __repr__(self):
return (
f"MultiDomainConvBlock(in_channels={self.in_channels}, out_channels={self.out_channels}, "
f"dropout_probability={self.dropout_probability})"
)
class TransposeMultiDomainConvBlock(nn.Module):
"""
A Transpose Convolutional Block that consists of one convolution transpose layers followed by instance
normalization and LeakyReLU activation.
"""
def __init__(self, fft_type, in_channels: int, out_channels: int):
"""
Parameters
----------
in_channels: int
Number of input channels.
out_channels: int
Number of output channels.
"""
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.layers = nn.Sequential(
MultiDomainConvTranspose2d(fft_type, in_channels, out_channels, kernel_size=2, stride=2, bias=False),
nn.InstanceNorm2d(out_channels),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
def forward(self, input_data: torch.Tensor):
"""
Parameters
----------
input_data: torch.Tensor
Returns
-------
torch.Tensor
"""
return self.layers(input_data)
def __repr__(self):
return f"MultiDomainConvBlock(in_channels={self.in_channels}, out_channels={self.out_channels})"
class StandardizationLayer(nn.Module):
r"""
Multi-channel data standardization method. Inspired by AIRS model submission to the Fast MRI 2020 challenge.
Given individual coil images :math:`\{x_i\}_{i=1}^{N_c}` and sensitivity coil maps :math:`\{S_i\}_{i=1}^{N_c}`
it returns
.. math::
[(x_{\text{sense}}, {x_{\text{res}}}_1), ..., (x_{\text{sense}}, {x_{\text{res}}}_{N_c})]
where :math:`{x_{\text{res}}}_i = xi - S_i \times x_{\text{sense}}` and
:math:`x_{\text{sense}} = \sum_{i=1}^{N_c} {S_i}^{*} \times x_i`.
"""
def __init__(self, coil_dim=1, channel_dim=-1):
super().__init__()
self.coil_dim = coil_dim
self.channel_dim = channel_dim
def forward(self, coil_images: torch.Tensor, sensitivity_map: torch.Tensor) -> torch.Tensor:
"""Forward pass."""
combined_image = complex_mul(coil_images, complex_conj(sensitivity_map)).sum(self.coil_dim)
residual_image = combined_image.unsqueeze(self.coil_dim) - complex_mul(
combined_image.unsqueeze(self.coil_dim), sensitivity_map
)
return torch.cat(
[
torch.cat(
[combined_image, residual_image.select(self.coil_dim, idx)],
self.channel_dim,
).unsqueeze(self.coil_dim)
for idx in range(coil_images.size(self.coil_dim))
],
self.coil_dim,
)
class MultiDomainUnet2d(nn.Module):
"""
Unet modification to be used with Multi-domain network as in AIRS Medical submission to the Fast MRI 2020
challenge.
"""
def __init__(
self,
in_channels: int,
out_channels: int,
num_filters: int,
num_pool_layers: int,
dropout_probability: float,
fft_type: str = "orthogonal",
):
"""
Parameters
----------
in_channels: int
Number of input channels to the u-net.
out_channels: int
Number of output channels to the u-net.
num_filters: int
Number of output channels of the first convolutional layer.
num_pool_layers: int
Number of down-sampling and up-sampling layers (depth).
dropout_probability: float
Dropout probability.
fft_type: str
FFT type.
"""
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.num_filters = num_filters
self.num_pool_layers = num_pool_layers
self.dropout_probability = dropout_probability
self.fft_type = fft_type
self.down_sample_layers = nn.ModuleList(
[MultiDomainConvBlock(fft_type, in_channels, num_filters, dropout_probability)]
)
ch = num_filters
for _ in range(num_pool_layers - 1):
self.down_sample_layers += [MultiDomainConvBlock(fft_type, ch, ch * 2, dropout_probability)]
ch *= 2
self.conv = MultiDomainConvBlock(fft_type, ch, ch * 2, dropout_probability)
self.up_conv = nn.ModuleList()
self.up_transpose_conv = nn.ModuleList()
for _ in range(num_pool_layers - 1):
self.up_transpose_conv += [TransposeMultiDomainConvBlock(fft_type, ch * 2, ch)]
self.up_conv += [MultiDomainConvBlock(fft_type, ch * 2, ch, dropout_probability)]
ch //= 2
self.up_transpose_conv += [TransposeMultiDomainConvBlock(fft_type, ch * 2, ch)]
self.up_conv += [
nn.Sequential(
MultiDomainConvBlock(fft_type, ch * 2, ch, dropout_probability),
nn.Conv2d(ch, self.out_channels, kernel_size=1, stride=1),
)
]
def forward(self, input_data: torch.Tensor):
"""
Parameters
----------
input_data: torch.Tensor
Returns
-------
torch.Tensor
"""
stack = []
output = input_data
# Apply down-sampling layers
for layer in self.down_sample_layers:
output = layer(output)
stack.append(output)
output = F.avg_pool2d(output, kernel_size=2, stride=2, padding=0)
output = self.conv(output)
# Apply up-sampling layers
for transpose_conv, conv in zip(self.up_transpose_conv, self.up_conv):
downsample_layer = stack.pop()
output = transpose_conv(output)
# Reflect pad on the right/bottom if needed to handle odd input dimensions.
padding = [0, 0, 0, 0]
if output.shape[-1] != downsample_layer.shape[-1]:
padding[1] = 1 # Padding right
if output.shape[-2] != downsample_layer.shape[-2]:
padding[3] = 1 # Padding bottom
if sum(padding) != 0:
output = F.pad(output, padding, "reflect")
output = torch.cat([output, downsample_layer], dim=1)
output = conv(output)
return output
```
#### File: models/recurrentvarnet/conv2gru.py
```python
__author__ = "<NAME>"
# Taken and adapted from: https://github.com/NKI-AI/direct/blob/main/direct/nn/recurrent/recurrent.py
# Copyright (c) DIRECT Contributors
from typing import List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
class Conv2dGRU(nn.Module):
"""2D Convolutional GRU Network."""
def __init__(
self,
in_channels: int,
hidden_channels: int,
out_channels: Optional[int] = None,
num_layers: int = 2,
gru_kernel_size=1,
orthogonal_initialization: bool = True,
instance_norm: bool = False,
dense_connect: int = 0,
replication_padding: bool = True,
):
"""Inits Conv2dGRU.
Parameters
----------
in_channels: int
Number of input channels.
hidden_channels: int
Number of hidden channels.
out_channels: Optional[int]
Number of output channels. If None, same as in_channels. Default: None.
num_layers: int
Number of layers. Default: 2.
gru_kernel_size: int
Size of the GRU kernel. Default: 1.
orthogonal_initialization: bool
Orthogonal initialization is used if set to True. Default: True.
instance_norm: bool
Instance norm is used if set to True. Default: False.
dense_connect: int
Number of dense connections.
replication_padding: bool
If set to true replication padding is applied.
"""
super().__init__()
if out_channels is None:
out_channels = in_channels
self.num_layers = num_layers
self.hidden_channels = hidden_channels
self.dense_connect = dense_connect
self.reset_gates = nn.ModuleList([])
self.update_gates = nn.ModuleList([])
self.out_gates = nn.ModuleList([])
self.conv_blocks = nn.ModuleList([])
# Create convolutional blocks
for idx in range(num_layers + 1):
in_ch = in_channels if idx == 0 else (1 + min(idx, dense_connect)) * hidden_channels
out_ch = hidden_channels if idx < num_layers else out_channels
padding = 0 if replication_padding else (2 if idx == 0 else 1)
block = []
if replication_padding:
if idx == 1:
block.append(nn.ReplicationPad2d(2))
else:
block.append(nn.ReplicationPad2d(2 if idx == 0 else 1))
block.append(
nn.Conv2d(
in_channels=in_ch,
out_channels=out_ch,
kernel_size=5 if idx == 0 else 3,
dilation=(2 if idx == 1 else 1),
padding=padding,
)
)
self.conv_blocks.append(nn.Sequential(*block))
# Create GRU blocks
for _ in range(num_layers):
for gru_part in [self.reset_gates, self.update_gates, self.out_gates]:
block = []
if instance_norm:
block.append(nn.InstanceNorm2d(2 * hidden_channels))
block.append(
nn.Conv2d(
in_channels=2 * hidden_channels,
out_channels=hidden_channels,
kernel_size=gru_kernel_size,
padding=gru_kernel_size // 2,
)
)
gru_part.append(nn.Sequential(*block))
if orthogonal_initialization:
for reset_gate, update_gate, out_gate in zip(self.reset_gates, self.update_gates, self.out_gates):
nn.init.orthogonal_(reset_gate[-1].weight)
nn.init.orthogonal_(update_gate[-1].weight)
nn.init.orthogonal_(out_gate[-1].weight)
nn.init.constant_(reset_gate[-1].bias, -1.0)
nn.init.constant_(update_gate[-1].bias, 0.0)
nn.init.constant_(out_gate[-1].bias, 0.0)
def forward(
self,
cell_input: torch.Tensor,
previous_state: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Computes Conv2dGRU forward pass given tensors `cell_input` and `previous_state`.
Parameters
----------
cell_input: torch.Tensor
Reconstruction input
previous_state: torch.Tensor
Tensor of previous states.
Returns
-------
out, new_states: (torch.Tensor, torch.Tensor)
Output and new states.
"""
new_states: List[torch.Tensor] = []
conv_skip: List[torch.Tensor] = []
if previous_state is None:
batch_size, spatial_size = cell_input.size(0), (cell_input.size(2), cell_input.size(3))
state_size = [batch_size, self.hidden_channels] + list(spatial_size) + [self.num_layers]
previous_state = torch.zeros(*state_size, dtype=cell_input.dtype).to(cell_input.device)
for idx in range(self.num_layers):
if len(conv_skip) > 0:
cell_input = F.relu(
self.conv_blocks[idx](torch.cat([*conv_skip[-self.dense_connect :], cell_input], dim=1)),
inplace=True,
)
else:
cell_input = F.relu(self.conv_blocks[idx](cell_input), inplace=True)
if self.dense_connect > 0:
conv_skip.append(cell_input)
stacked_inputs = torch.cat([cell_input, previous_state[:, :, :, :, idx]], dim=1)
update = torch.sigmoid(self.update_gates[idx](stacked_inputs))
reset = torch.sigmoid(self.reset_gates[idx](stacked_inputs))
delta = torch.tanh(
self.out_gates[idx](torch.cat([cell_input, previous_state[:, :, :, :, idx] * reset], dim=1))
)
cell_input = previous_state[:, :, :, :, idx] * (1 - update) + delta * update
new_states.append(cell_input)
cell_input = F.relu(cell_input, inplace=False)
if len(conv_skip) > 0:
out = self.conv_blocks[self.num_layers](torch.cat([*conv_skip[-self.dense_connect :], cell_input], dim=1))
else:
out = self.conv_blocks[self.num_layers](cell_input)
return out, torch.stack(new_states, dim=-1)
```
#### File: models/rim/conv_layers.py
```python
__author__ = "<NAME>"
import torch
import torch.nn as nn
class ConvRNNStack(nn.Module):
"""A stack of convolutional RNNs."""
def __init__(self, convs, rnn):
"""
Args:
convs: list of convolutional layers
rnn: list of RNN layers
"""
super(ConvRNNStack, self).__init__()
self.convs = convs
self.rnn = rnn
def forward(self, x, hidden):
"""
Args:
x: (batch_size, seq_len, input_size)
hidden: (num_layers * num_directions, batch_size, hidden_size)
Returns:
output: (batch_size, seq_len, hidden_size)
"""
return self.rnn(self.convs(x), hidden)
class ConvNonlinear(nn.Module):
"""A convolutional layer with nonlinearity."""
def __init__(self, input_size, features, conv_dim, kernel_size, dilation, bias, nonlinear="relu"):
"""
Initializes the convolutional layer.
Args:
input_size: number of input channels.
features: number of output channels.
conv_dim: number of dimensions of the convolutional layer.
kernel_size: size of the convolutional kernel.
dilation: dilation of the convolutional kernel.
bias: whether to use bias.
nonlinear: nonlinearity of the convolutional layer.
"""
super(ConvNonlinear, self).__init__()
self.input_size = input_size
self.features = features
self.kernel_size = kernel_size
self.dilation = dilation
self.bias = bias
self.conv_dim = conv_dim
self.conv_class = self.determine_conv_class(conv_dim)
if nonlinear is not None and nonlinear.upper() == "RELU":
self.nonlinear = torch.nn.ReLU()
elif nonlinear is None:
self.nonlinear = lambda x: x
else:
raise ValueError("Please specify a proper nonlinearity")
self.padding = [
torch.nn.ReplicationPad1d(torch.div(dilation * (kernel_size - 1), 2, rounding_mode="trunc").item()),
torch.nn.ReplicationPad2d(torch.div(dilation * (kernel_size - 1), 2, rounding_mode="trunc").item()),
torch.nn.ReplicationPad3d(torch.div(dilation * (kernel_size - 1), 2, rounding_mode="trunc").item()),
][conv_dim - 1]
self.conv_layer = self.conv_class(
in_channels=input_size,
out_channels=features,
kernel_size=kernel_size,
padding=0,
dilation=dilation,
bias=bias,
)
self.reset_parameters()
def reset_parameters(self):
"""
Resets the parameters of the convolutional layer.
Returns:
None.
"""
torch.nn.init.kaiming_normal_(self.conv_layer.weight, nonlinearity="relu")
if self.conv_layer.bias is not None:
nn.init.zeros_(self.conv_layer.bias)
@staticmethod
def determine_conv_class(n_dim):
"""
Determines the convolutional layer class.
Args:
n_dim: number of dimensions.
Returns:
conv_class: convolutional layer class.
"""
if n_dim == 1:
return nn.Conv1d
if n_dim == 2:
return nn.Conv2d
if n_dim == 3:
return nn.Conv3d
raise ValueError(f"Convolution of: {n_dim} dims is not implemented")
def extra_repr(self):
"""
Extra information about the layer.
Returns:
str: extra information about the layer.
"""
s = "{input_size}, {features}"
if "bias" in self.__dict__ and self.bias is not True:
s += ", bias={bias}"
if "nonlinear" in self.__dict__ and self.nonlinear != "tanh":
s += ", nonlinearity={nonlinear}"
return s.format(**self.__dict__)
def check_forward_input(self, _input):
"""
Checks input for correct size and shape.
Args:
_input: input to the convolutional layer.
Returns:
_input: input to the convolutional layer.
"""
if _input.size(1) != self.input_size:
raise RuntimeError(f"input has inconsistent input_size: got {_input.size(1)}, expected {self.input_size}")
def forward(self, _input):
"""
Forward pass of the convolutional layer.
Args:
_input: input to the convolutional layer.
Returns:
_output: output of the convolutional layer.
"""
return self.nonlinear(self.conv_layer(self.padding(_input)))
```
#### File: models/rim/rim_block.py
```python
__author__ = "<NAME>"
from typing import Any, Tuple, Union
import torch
from mridc.collections.common.parts.fft import fft2c, ifft2c
from mridc.collections.common.parts.utils import complex_conj, complex_mul
from mridc.collections.reconstruction.models.rim.conv_layers import ConvNonlinear, ConvRNNStack
from mridc.collections.reconstruction.models.rim.rnn_cells import ConvGRUCell, ConvMGUCell, IndRNNCell
from mridc.collections.reconstruction.models.rim.utils import log_likelihood_gradient
class RIMBlock(torch.nn.Module):
"""RIMBlock is a block of Recurrent Inference Machines (RIMs)."""
def __init__(
self,
recurrent_layer=None,
conv_filters=None,
conv_kernels=None,
conv_dilations=None,
conv_bias=None,
recurrent_filters=None,
recurrent_kernels=None,
recurrent_dilations=None,
recurrent_bias=None,
depth: int = 2,
time_steps: int = 8,
conv_dim: int = 2,
no_dc: bool = False,
fft_type: str = "orthogonal",
dimensionality: int = 2,
):
"""
Args:
recurrent_layer: Type of recurrent layer.
conv_filters: Number of filters in the convolutional layers.
conv_kernels: Kernel size of the convolutional layers.
conv_dilations: Dilation of the convolutional layers.
conv_bias: Bias of the convolutional layers.
recurrent_filters: Number of filters in the recurrent layers.
recurrent_kernels: Kernel size of the recurrent layers.
recurrent_dilations: Dilation of the recurrent layers.
recurrent_bias: Bias of the recurrent layers.
depth: Number of layers in the block.
time_steps: Number of time steps in the block.
conv_dim: Dimension of the convolutional layers.
no_dc: If True, the DC component is removed from the input.
fft_type: Type of FFT.
dimensionality: Dimensionality of the input.
"""
super(RIMBlock, self).__init__()
self.input_size = depth * 2
self.time_steps = time_steps
self.layers = torch.nn.ModuleList()
for (
(conv_features, conv_k_size, conv_dilation, l_conv_bias, nonlinear),
(rnn_features, rnn_k_size, rnn_dilation, rnn_bias, rnn_type),
) in zip(
zip(conv_filters, conv_kernels, conv_dilations, conv_bias, ["relu", "relu", None]),
zip(
recurrent_filters,
recurrent_kernels,
recurrent_dilations,
recurrent_bias,
[recurrent_layer, recurrent_layer, None],
),
):
conv_layer = None
if conv_features != 0:
conv_layer = ConvNonlinear(
self.input_size,
conv_features,
conv_dim=conv_dim,
kernel_size=conv_k_size,
dilation=conv_dilation,
bias=l_conv_bias,
nonlinear=nonlinear,
)
self.input_size = conv_features
if rnn_features != 0 and rnn_type is not None:
if rnn_type.upper() == "GRU":
rnn_type = ConvGRUCell
elif rnn_type.upper() == "MGU":
rnn_type = ConvMGUCell
elif rnn_type.upper() == "INDRNN":
rnn_type = IndRNNCell
else:
raise ValueError("Please specify a proper recurrent layer type.")
rnn_layer = rnn_type(
self.input_size,
rnn_features,
conv_dim=conv_dim,
kernel_size=rnn_k_size,
dilation=rnn_dilation,
bias=rnn_bias,
)
self.input_size = rnn_features
self.layers.append(ConvRNNStack(conv_layer, rnn_layer))
self.final_layer = torch.nn.Sequential(conv_layer)
self.recurrent_filters = recurrent_filters
self.fft_type = fft_type
self.dimensionality = dimensionality
self.spatial_dims = [2, 3] # if self.dimensionality == 2 else [3, 4]
self.coil_dim = 1 # if self.dimensionality == 2 else 2
self.no_dc = no_dc
if not self.no_dc:
self.dc_weight = torch.nn.Parameter(torch.ones(1))
self.zero = torch.zeros(1, 1, 1, 1, 1)
def forward(
self,
pred: torch.Tensor,
masked_kspace: torch.Tensor,
sense: torch.Tensor,
mask: torch.Tensor,
eta: torch.Tensor = None,
hx: torch.Tensor = None,
sigma: float = 1.0,
keep_eta: bool = False,
) -> Tuple[Any, Union[list, torch.Tensor, None]]:
"""
Forward pass of the RIMBlock.
Args:
pred: torch.Tensor
masked_kspace: torch.Tensor
sense: torch.Tensor
mask: torch.Tensor
eta: torch.Tensor
hx: torch.Tensor
sigma: float
keep_eta: bool
Returns
-------
Reconstructed image and hidden states.
"""
if self.dimensionality == 3:
batch, slices = masked_kspace.shape[0], masked_kspace.shape[1]
# 2D pred.shape = [batch, coils, height, width, 2]
# 3D pred.shape = [batch, slices, coils, height, width, 2] -> [batch * slices, coils, height, width, 2]
pred = pred.reshape(
[pred.shape[0]*pred.shape[1], pred.shape[2], pred.shape[3], pred.shape[4], pred.shape[5]]
)
masked_kspace = masked_kspace.reshape(
[masked_kspace.shape[0]*masked_kspace.shape[1], masked_kspace.shape[2], masked_kspace.shape[3],
masked_kspace.shape[4], masked_kspace.shape[5]]
)
mask = mask.reshape(
[mask.shape[0]*mask.shape[1], mask.shape[2], mask.shape[3], mask.shape[4], mask.shape[5]]
)
sense = sense.reshape(
[sense.shape[0]*sense.shape[1], sense.shape[2], sense.shape[3], sense.shape[4], sense.shape[5]]
)
else:
batch = masked_kspace.shape[0]
slices = 1
if hx is None:
hx = [
masked_kspace.new_zeros((masked_kspace.size(0), f, *masked_kspace.size()[2:-1]))
for f in self.recurrent_filters
if f != 0
]
if isinstance(pred, list):
pred = pred[-1].detach()
if eta is None or eta.ndim < 3:
eta = (
pred
if keep_eta
else torch.sum(
complex_mul(ifft2c(pred, fft_type=self.fft_type, fft_dim=self.spatial_dims), complex_conj(sense)),
self.coil_dim,
)
)
etas = []
for _ in range(self.time_steps):
grad_eta = log_likelihood_gradient(
eta, masked_kspace, sense, mask, sigma=sigma, fft_type=self.fft_type
).contiguous()
if self.dimensionality == 3:
grad_eta = grad_eta.view([slices * batch, 4, grad_eta.shape[2], grad_eta.shape[3]]).permute(1, 0, 2, 3)
for h, convrnn in enumerate(self.layers):
hx[h] = convrnn(grad_eta, hx[h])
if self.dimensionality == 3:
hx[h] = hx[h].squeeze(0)
grad_eta = hx[h]
grad_eta = self.final_layer(grad_eta)
if self.dimensionality == 2:
grad_eta = grad_eta.permute(0, 2, 3, 1)
elif self.dimensionality == 3:
grad_eta = grad_eta.permute(1, 2, 3, 0)
for h in range(len(hx)):
hx[h] = hx[h].permute(1, 0, 2, 3)
eta = eta + grad_eta
etas.append(eta)
eta = etas
if self.no_dc:
return eta, None
soft_dc = torch.where(mask, pred - masked_kspace, self.zero.to(masked_kspace)) * self.dc_weight
current_kspace = [
masked_kspace - soft_dc - fft2c(complex_mul(e.unsqueeze(self.coil_dim + 1), sense), fft_type=self.fft_type,
fft_dim=self.spatial_dims) for e in eta
]
return current_kspace, None
```
#### File: models/sigmanet/sensitivity_net.py
```python
__author__ = "<NAME>"
# Taken and adapted from:
# https://github.com/khammernik/sigmanet/blob/master/reconstruction/common/mytorch/models/sn.py
import numpy as np
import torch
def matrix_invert(xx, xy, yx, yy):
det = xx * yy - xy * yx
return yy.div(det), -xy.div(det), -yx.div(det), xx.div(det)
class ComplexInstanceNorm(torch.nn.Module):
"""Motivated by 'Deep Complex Networks' (https://arxiv.org/pdf/1705.09792.pdf)"""
def __init__(self):
super(ComplexInstanceNorm, self).__init__()
self.mean = 0
self.cov_xx_half = 1 / np.sqrt(2)
self.cov_xy_half = 0
self.cov_yx_half = 0
self.cov_yy_half = 1 / np.sqrt(2)
def complex_instance_norm(self, x, eps=1e-5):
"""Operates on images x of size [nBatch, nSmaps, nFE, nPE, 2]"""
x_combined = torch.sum(x, dim=1, keepdim=True)
mean = x_combined.mean(dim=(1, 2, 3), keepdim=True)
x_m = x - mean
self.mean = mean
self.complex_pseudocovariance(x_m)
def complex_pseudocovariance(self, data):
"""Data variable hast to be already mean-free! Operates on images x of size [nBatch, nSmaps, nFE, nPE, 2]"""
if data.size(-1) != 2:
raise AssertionError
shape = data.shape
# compute number of elements
N = shape[2] * shape[3]
# separate real/imaginary channel
re, im = torch.unbind(data, dim=-1)
# dimensions is now length of original shape - 1 (because channels are seperated)
dim = list(range(1, len(shape) - 1))
# compute covariance entries. cxy = cyx
cxx = (re * re).sum(dim=dim, keepdim=True) / (N - 1)
cyy = (im * im).sum(dim=dim, keepdim=True) / (N - 1)
cxy = (re * im).sum(dim=dim, keepdim=True) / (N - 1)
# Eigenvalue decomposition C = V*S*inv(V)
# compute eigenvalues
s1 = (cxx + cyy) / 2 - torch.sqrt((cxx + cyy) ** 2 / 4 - cxx * cyy + cxy**2)
s2 = (cxx + cyy) / 2 + torch.sqrt((cxx + cyy) ** 2 / 4 - cxx * cyy + cxy**2)
# compute eigenvectors
v1x = s1 - cyy
v1y = cxy
v2x = s2 - cyy
v2y = cxy
# normalize eigenvectors
norm1 = torch.sqrt(torch.sum(v1x * v1x + v1y * v1y, dim=dim, keepdim=True))
norm2 = torch.sqrt(torch.sum(v2x * v2x + v2y * v2y, dim=dim, keepdim=True))
v1x = v1x.div(norm1)
v1y = v1y.div(norm1)
v2x = v2x.div(norm2)
v2y = v2y.div(norm2)
# now we need the sqrt of the covariance matrix.
# C^{-0.5} = V * sqrt(S) * inv(V)
det = v1x * v2y - v2x * v1y
s1 = torch.sqrt(s1).div(det)
s2 = torch.sqrt(s2).div(det)
self.cov_xx_half = v1x * v2y * s1 - v1y * v2x * s2
self.cov_yy_half = v1x * v2y * s2 - v1y * v2x * s1
self.cov_xy_half = v1x * v2x * (s2 - s1)
self.cov_yx_half = v1y * v2y * (s1 - s2)
def forward(self, input):
return self.normalize(input)
def set_normalization(self, input):
mean = torch.tensor([torch.mean(input).item()]).to(input)
self.complex_pseudocovariance(input - mean)
self.mean = mean.unsqueeze(1).unsqueeze(1).unsqueeze(1)
self.cov_xx_half = self.cov_xx_half.view(-1, 1, 1, 1)
self.cov_xy_half = self.cov_xy_half.view(-1, 1, 1, 1)
self.cov_yx_half = self.cov_yx_half.view(-1, 1, 1, 1)
self.cov_yy_half = self.cov_yy_half.view(-1, 1, 1, 1)
def normalize(self, x):
x_m = x - self.mean
re, im = torch.unbind(x_m, dim=-1)
cov_xx_half_inv, cov_xy_half_inv, cov_yx_half_inv, cov_yy_half_inv = matrix_invert(
self.cov_xx_half, self.cov_xy_half, self.cov_yx_half, self.cov_yy_half
)
x_norm_re = cov_xx_half_inv * re + cov_xy_half_inv * im
x_norm_im = cov_yx_half_inv * re + cov_yy_half_inv * im
img = torch.stack([x_norm_re, x_norm_im], dim=-1)
img = img.clamp(-6, 6)
return img
def unnormalize(self, x):
re, im = torch.unbind(x, dim=-1)
x_unnorm_re = self.cov_xx_half * re + self.cov_xy_half * im
x_unnorm_im = self.cov_yx_half * re + self.cov_yy_half * im
return torch.stack([x_unnorm_re, x_unnorm_im], dim=-1) + self.mean
class ComplexNormWrapper(torch.nn.Module):
def __init__(self, model):
super().__init__()
self.model = model
self.complex_instance_norm = ComplexInstanceNorm()
def forward(self, input):
# compute complex instance norm on sample of size [nBatch, nSmaps, nFE, nPE, 2]
self.complex_instance_norm.set_normalization(input)
output = self.complex_instance_norm.normalize(input)
# re-shape data from [nBatch, nSmaps, nFE, nPE, 2] to [nBatch*nSmaps, 2, nFE, nPE]
shp = output.shape
output = output.view(shp[0] * shp[1], *shp[2:]).permute(0, 3, 1, 2)
# apply denoising
output = self.model(output)
# re-shape data from [nBatch*nSmaps, 2, nFE, nPE]
# to [nBatch, nSmaps, nFE, nPE, 2]
output = output.permute(0, 2, 3, 1).view(*shp)
# unnormalize
output = self.complex_instance_norm.unnormalize(output)
return output
class SensitivityNetwork(torch.nn.Module):
"""Sensitivity network with data term based on forward and adjoint containing the sensitivity maps"""
def __init__(
self,
num_iter,
model,
datalayer,
shared_params=True,
save_space=False,
reset_cache=False,
):
super().__init__()
self.shared_params = shared_params
self.num_iter = 1 if self.shared_params else num_iter
self.num_iter_total = num_iter
self.is_trainable = [True] * num_iter
# setup the modules
self.gradR = torch.nn.ModuleList([ComplexNormWrapper(model) for _ in range(self.num_iter)])
self.gradD = torch.nn.ModuleList([datalayer for _ in range(self.num_iter)])
self.save_space = save_space
if self.save_space:
self.forward = self.forward_save_space
self.reset_cache = reset_cache
def forward(self, x, y, smaps, mask):
x_all = [x]
x_half_all = []
if self.shared_params:
num_iter = self.num_iter_total
else:
num_iter = min(np.where(self.is_trainable)[0][-1] + 1, self.num_iter)
for i in range(num_iter):
x_thalf = x - self.gradR[i % self.num_iter](x)
x = self.gradD[i % self.num_iter](x_thalf, y, smaps, mask)
x_all.append(x)
x_half_all.append(x_thalf)
return x_all[-1]
def forward_save_space(self, x, y, smaps, mask):
if self.shared_params:
num_iter = self.num_iter_total
else:
num_iter = min(np.where(self.is_trainable)[0][-1] + 1, self.num_iter)
for i in range(num_iter):
x_thalf = x - self.gradR[i % self.num_iter](x)
x = self.gradD[i % self.num_iter](x_thalf, y, smaps, mask)
# would run out of memory at test time
# if this is False for some cases
if self.reset_cache:
torch.cuda.empty_cache()
torch.backends.cuda.cufft_plan_cache.clear()
return x
def freeze(self, i):
"""freeze parameter of cascade i"""
for param in self.gradR[i].parameters():
param.require_grad_ = False
self.is_trainable[i] = False
def unfreeze(self, i):
"""freeze parameter of cascade i"""
for param in self.gradR[i].parameters():
param.require_grad_ = True
self.is_trainable[i] = True
def freeze_all(self):
"""freeze parameter of cascade i"""
for i in range(self.num_iter):
self.freeze(i)
def unfreeze_all(self):
"""freeze parameter of cascade i"""
for i in range(self.num_iter):
self.unfreeze(i)
def copy_params(self, src_i, trg_j):
"""copy i-th cascade net parameters to j-th cascade net parameters"""
src_params = self.gradR[src_i].parameters()
trg_params = self.gradR[trg_j].parameters()
for trg_param, src_param in zip(trg_params, src_params):
trg_param.data.copy_(src_param.data)
def stage_training_init(self):
self.freeze_all()
self.unfreeze(0)
print(self.is_trainable)
def stage_training_transition_i(self, copy=False):
if self.shared_params:
return
# if all unlocked, don't do anything
if not np.all(self.is_trainable):
for i in range(self.num_iter):
# if last cascade is reached, unlock all
if i == self.num_iter - 1:
self.unfreeze_all()
break
# freeze current i, unlock next. copy parameter if specified
if self.is_trainable[i]:
self.freeze(i)
self.unfreeze(i + 1)
if copy:
self.copy_params(i, i + 1)
break
```
#### File: models/unet_base/unet_block.py
```python
__author__ = "<NAME>"
# Parts of the code have been taken from https://github.com/facebookresearch/fastMRI
import math
from typing import List, Tuple
import torch
class NormUnet(torch.nn.Module):
"""
Normalized U-Net model.
This is the same as a regular U-Net, but with normalization applied to the input before the U-Net.
This keeps the values more numerically stable during training.
"""
def __init__(
self,
chans: int,
num_pools: int,
in_chans: int = 2,
out_chans: int = 2,
drop_prob: float = 0.0,
padding_size: int = 15,
normalize: bool = True,
norm_groups: int = 2,
):
"""
Initialize the model.
Args:
chans : Number of output channels of the first convolution layer.
num_pools : Number of down-sampling and up-sampling layers.
in_chans : Number of channels in the input to the U-Net model.
out_chans : Number of channels in the output to the U-Net model.
drop_prob : Dropout probability.
padding_size: Size of the padding.
normalize: Whether to normalize the input.
norm_groups: Number of groups to use for group normalization.
"""
super().__init__()
self.unet = Unet(
in_chans=in_chans, out_chans=out_chans, chans=chans, num_pool_layers=num_pools, drop_prob=drop_prob
)
self.padding_size = padding_size
self.normalize = normalize
self.norm_groups = norm_groups
@staticmethod
def complex_to_chan_dim(x: torch.Tensor) -> torch.Tensor:
"""
Convert the last dimension of the input to complex.
Args:
x: Input tensor.
Returns:
Input tensor with the last dimension converted to complex.
"""
b, c, h, w, two = x.shape
if two != 2:
raise AssertionError
return x.permute(0, 4, 1, 2, 3).reshape(b, 2 * c, h, w)
@staticmethod
def chan_complex_to_last_dim(x: torch.Tensor) -> torch.Tensor:
"""
Convert the last dimension of the input to complex.
Args:
x: Input tensor.
Returns:
Input tensor with the last dimension converted to complex.
"""
b, c2, h, w = x.shape
if c2 % 2 != 0:
raise AssertionError
c = torch.div(c2, 2, rounding_mode="trunc")
return x.view(b, 2, c, h, w).permute(0, 2, 3, 4, 1).contiguous()
def norm(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Normalize the input.
Args:
x: Input tensor.
Returns:
Tuple of mean, standard deviation, and normalized input.
"""
# group norm
b, c, h, w = x.shape
x = x.reshape(b, self.norm_groups, -1)
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
x = (x - mean) / std
x = x.reshape(b, c, h, w)
return x, mean, std
def unnorm(self, x: torch.Tensor, mean: torch.Tensor, std: torch.Tensor) -> torch.Tensor:
"""
Unnormalize the input.
Args:
x: Input tensor.
mean: Mean of the input.
std: Standard deviation of the input.
Returns:
Unnormalized input.
"""
b, c, h, w = x.shape
input_data = x.reshape(b, self.norm_groups, -1)
return (input_data * std + mean).reshape(b, c, h, w)
def pad(self, x: torch.Tensor) -> Tuple[torch.Tensor, Tuple[List[int], List[int], int, int]]:
"""
Pad the input with zeros to make it square.
Args:
x: Input tensor.
Returns:
Padded input tensor and the padding.
"""
_, _, h, w = x.shape
w_mult = ((w - 1) | self.padding_size) + 1
h_mult = ((h - 1) | self.padding_size) + 1
w_pad = [math.floor((w_mult - w) / 2), math.ceil((w_mult - w) / 2)]
h_pad = [math.floor((h_mult - h) / 2), math.ceil((h_mult - h) / 2)]
# TODO: fix this type when PyTorch fixes theirs
# the documentation lies - this actually takes a list
# https://github.com/pytorch/pytorch/blob/master/torch/nn/functional.py#L3457
# https://github.com/pytorch/pytorch/pull/16949
x = torch.nn.functional.pad(x, w_pad + h_pad)
return x, (h_pad, w_pad, h_mult, w_mult)
@staticmethod
def unpad(x: torch.Tensor, h_pad: List[int], w_pad: List[int], h_mult: int, w_mult: int) -> torch.Tensor:
"""
Unpad the input.
Args:
x: Input tensor.
h_pad: Horizontal padding.
w_pad: Vertical padding.
h_mult: Horizontal multiplier.
w_mult: Vertical multiplier.
Returns:
Unpadded input tensor.
"""
return x[..., h_pad[0] : h_mult - h_pad[1], w_pad[0] : w_mult - w_pad[1]]
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Forward pass of the network.
Args:
x: Input tensor.
Returns:
Normalized UNet output tensor.
"""
iscomplex = False
if x.shape[-1] == 2:
x = self.complex_to_chan_dim(x)
iscomplex = True
mean = 1.0
std = 1.0
if self.normalize:
x, mean, std = self.norm(x)
x, pad_sizes = self.pad(x)
x = self.unet(x)
x = self.unpad(x, *pad_sizes)
if self.normalize:
x = self.unnorm(x, mean, std)
if iscomplex:
x = self.chan_complex_to_last_dim(x)
return x
class Unet(torch.nn.Module):
"""
PyTorch implementation of a U-Net model.
<NAME>, <NAME>, and <NAME>. U-net: Convolutional networks
for biomedical image segmentation. In International Conference on Medical
image computing and computer-assisted intervention, pages 234–241.
Springer, 2015.
"""
def __init__(
self, in_chans: int, out_chans: int, chans: int = 32, num_pool_layers: int = 4, drop_prob: float = 0.0
):
"""
Args:
in_chans: Number of channels in the input to the U-Net model.
out_chans: Number of channels in the output to the U-Net model.
chans: Number of output channels of the first convolution layer.
num_pool_layers: Number of down-sampling and up-sampling layers.
drop_prob: Dropout probability.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.chans = chans
self.num_pool_layers = num_pool_layers
self.drop_prob = drop_prob
self.down_sample_layers = torch.nn.ModuleList([ConvBlock(in_chans, chans, drop_prob)])
ch = chans
for _ in range(num_pool_layers - 1):
self.down_sample_layers.append(ConvBlock(ch, ch * 2, drop_prob))
ch *= 2
self.conv = ConvBlock(ch, ch * 2, drop_prob)
self.up_conv = torch.nn.ModuleList()
self.up_transpose_conv = torch.nn.ModuleList()
for _ in range(num_pool_layers - 1):
self.up_transpose_conv.append(TransposeConvBlock(ch * 2, ch))
self.up_conv.append(ConvBlock(ch * 2, ch, drop_prob))
ch //= 2
self.up_transpose_conv.append(TransposeConvBlock(ch * 2, ch))
self.up_conv.append(
torch.nn.Sequential(
ConvBlock(ch * 2, ch, drop_prob), torch.nn.Conv2d(ch, self.out_chans, kernel_size=1, stride=1)
)
)
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H, W)`.
"""
stack = []
output = image
# apply down-sampling layers
for layer in self.down_sample_layers:
output = layer(output)
stack.append(output)
output = torch.nn.functional.avg_pool2d(output, kernel_size=2, stride=2, padding=0)
output = self.conv(output)
# apply up-sampling layers
for transpose_conv, conv in zip(self.up_transpose_conv, self.up_conv):
downsample_layer = stack.pop()
output = transpose_conv(output)
# reflect pad on the right/bottom if needed to handle odd input dimensions
padding = [0, 0, 0, 0]
if output.shape[-1] != downsample_layer.shape[-1]:
padding[1] = 1 # padding right
if output.shape[-2] != downsample_layer.shape[-2]:
padding[3] = 1 # padding bottom
if torch.sum(torch.tensor(padding)) != 0:
output = torch.nn.functional.pad(output, padding, "reflect")
output = torch.cat([output, downsample_layer], dim=1)
output = conv(output)
return output
class ConvBlock(torch.nn.Module):
"""
A Convolutional Block that consists of two convolution layers each followed by
instance normalization, LeakyReLU activation and dropout.
"""
def __init__(self, in_chans: int, out_chans: int, drop_prob: float):
"""
Args:
in_chans: Number of channels in the input.
out_chans: Number of channels in the output.
drop_prob: Dropout probability.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.drop_prob = drop_prob
self.layers = torch.nn.Sequential(
torch.nn.Conv2d(in_chans, out_chans, kernel_size=3, padding=1, bias=False),
torch.nn.InstanceNorm2d(out_chans),
torch.nn.LeakyReLU(negative_slope=0.2, inplace=True),
torch.nn.Dropout2d(drop_prob),
torch.nn.Conv2d(out_chans, out_chans, kernel_size=3, padding=1, bias=False),
torch.nn.InstanceNorm2d(out_chans),
torch.nn.LeakyReLU(negative_slope=0.2, inplace=True),
torch.nn.Dropout2d(drop_prob),
)
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H, W)`.
"""
return self.layers(image)
class TransposeConvBlock(torch.nn.Module):
"""
A Transpose Convolutional Block that consists of one convolution transpose
layers followed by instance normalization and LeakyReLU activation.
"""
def __init__(self, in_chans: int, out_chans: int):
"""
Args:
in_chans: Number of channels in the input.
out_chans: Number of channels in the output.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.layers = torch.nn.Sequential(
torch.nn.ConvTranspose2d(in_chans, out_chans, kernel_size=2, stride=2, bias=False),
torch.nn.InstanceNorm2d(out_chans),
torch.nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Args:
image: Input 4D tensor of shape `(N, in_chans, H, W)`.
Returns:
Output tensor of shape `(N, out_chans, H*2, W*2)`.
"""
return self.layers(image)
```
#### File: reconstruction/models/vn.py
```python
__author__ = "<NAME>"
from abc import ABC
import torch
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import Trainer
from torch.nn import L1Loss
from mridc.collections.common.losses.ssim import SSIMLoss
from mridc.collections.common.parts.fft import ifft2c
from mridc.collections.common.parts.utils import coil_combination
from mridc.collections.reconstruction.models.base import BaseMRIReconstructionModel, BaseSensitivityModel
from mridc.collections.reconstruction.models.unet_base.unet_block import NormUnet
from mridc.collections.reconstruction.models.varnet.vn_block import VarNetBlock
from mridc.collections.reconstruction.parts.utils import center_crop_to_smallest
from mridc.core.classes.common import typecheck
__all__ = ["VarNet"]
class VarNet(BaseMRIReconstructionModel, ABC):
"""
End-to-end Variational Network (VN) model implementation as presented in [1]_.
References
----------
.. [1] <NAME>. et al. (2020) ‘End-to-End Variational Networks for Accelerated MRI Reconstruction’.
Available at: https://github.com/facebookresearch/fastMRI.
"""
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
# init superclass
super().__init__(cfg=cfg, trainer=trainer)
cfg_dict = OmegaConf.to_container(cfg, resolve=True)
self.no_dc = cfg_dict.get("no_dc")
self.fft_type = cfg_dict.get("fft_type")
self.num_cascades = cfg_dict.get("num_cascades")
# Cascades of VN blocks
self.cascades = torch.nn.ModuleList(
[
VarNetBlock(
NormUnet(
chans=cfg_dict.get("channels"),
num_pools=cfg_dict.get("pooling_layers"),
padding_size=cfg_dict.get("padding_size"),
normalize=cfg_dict.get("normalize"),
),
fft_type=self.fft_type,
no_dc=self.no_dc,
)
for _ in range(self.num_cascades)
]
)
self.output_type = cfg_dict.get("output_type")
# Initialize the sensitivity network if use_sens_net is True
self.use_sens_net = cfg_dict.get("use_sens_net")
if self.use_sens_net:
self.sens_net = BaseSensitivityModel(
cfg_dict.get("sens_chans"),
cfg_dict.get("sens_pools"),
fft_type=self.fft_type,
mask_type=cfg_dict.get("sens_mask_type"),
normalize=cfg_dict.get("sens_normalize"),
)
# initialize weights if not using pretrained vn
# TODO if not cfg_dict.get("pretrained", False)
self.train_loss_fn = SSIMLoss() if cfg_dict.get("train_loss_fn") == "ssim" else L1Loss()
self.eval_loss_fn = SSIMLoss() if cfg_dict.get("eval_loss_fn") == "ssim" else L1Loss()
self.dc_weight = torch.nn.Parameter(torch.ones(1))
self.accumulate_estimates = False
@typecheck()
def forward(
self,
y: torch.Tensor,
sensitivity_maps: torch.Tensor,
mask: torch.Tensor,
init_pred: torch.Tensor,
target: torch.Tensor,
) -> torch.Tensor:
"""
Forward pass of the network.
Args:
y: torch.Tensor, shape [batch_size, n_coils, n_x, n_y, 2], masked kspace data
sensitivity_maps: torch.Tensor, shape [batch_size, n_coils, n_x, n_y, 2], coil sensitivity maps
mask: torch.Tensor, shape [1, 1, n_x, n_y, 1], sampling mask
init_pred: torch.Tensor, shape [batch_size, n_x, n_y, 2], initial guess for pred
target: torch.Tensor, shape [batch_size, n_x, n_y, 2], target data
Returns:
Final prediction of the network.
"""
sensitivity_maps = self.sens_net(y, mask) if self.use_sens_net else sensitivity_maps
estimation = y.clone()
for cascade in self.cascades:
# Forward pass through the cascades
estimation = cascade(estimation, y, sensitivity_maps, mask)
estimation = ifft2c(estimation, fft_type=self.fft_type)
estimation = coil_combination(estimation, sensitivity_maps, method=self.output_type, dim=1)
estimation = torch.view_as_complex(estimation)
_, estimation = center_crop_to_smallest(target, estimation)
return estimation
```
#### File: reconstruction/models/zf.py
```python
__author__ = "<NAME>"
from abc import ABC
from typing import Any, Dict, Tuple, Union
import numpy as np
import torch
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import Trainer
from mridc.collections.common.parts.fft import ifft2c
from mridc.collections.common.parts.utils import check_stacked_complex, coil_combination
from mridc.collections.reconstruction.models.base import BaseMRIReconstructionModel, BaseSensitivityModel
from mridc.collections.reconstruction.parts.utils import center_crop_to_smallest
from mridc.core.classes.common import typecheck
__all__ = ["ZF"]
class ZF(BaseMRIReconstructionModel, ABC):
"""
Zero-Filled reconstruction using either root-sum-of-squares (RSS) or SENSE (SENSitivity Encoding) [1].
References
----------
.. [1] <NAME>, <NAME>, <NAME>, <NAME>. SENSE: Sensitivity encoding for fast MRI.
Magn Reson Med 1999; 42:952-962.
"""
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
# init superclass
super().__init__(cfg=cfg, trainer=trainer)
zf_cfg_dict = OmegaConf.to_container(cfg, resolve=True)
self.zf_method = zf_cfg_dict.get("zf_method")
self.fft_type = zf_cfg_dict.get("fft_type")
# Initialize the sensitivity network if use_sens_net is True
self.use_sens_net = zf_cfg_dict.get("use_sens_net")
if self.use_sens_net:
self.sens_net = BaseSensitivityModel(
zf_cfg_dict.get("sens_chans"),
zf_cfg_dict.get("sens_pools"),
fft_type=self.fft_type,
mask_type=zf_cfg_dict.get("sens_mask_type"),
normalize=zf_cfg_dict.get("sens_normalize"),
)
@staticmethod
def process_inputs(y, mask):
"""Process the inputs to the network."""
if isinstance(y, list):
r = np.random.randint(len(y))
y = y[r]
mask = mask[r]
else:
r = 0
return y, mask, r
@typecheck()
def forward(
self,
y: torch.Tensor,
sensitivity_maps: torch.Tensor,
mask: torch.Tensor,
target: torch.Tensor = None,
) -> Union[list, Any]:
"""
Forward pass of ZF.
Args:
y: torch.Tensor, shape [batch_size, n_coils, n_x, n_y, 2], masked kspace data
sensitivity_maps: torch.Tensor, shape [batch_size, n_coils, n_x, n_y, 2], coil sensitivity maps
mask: torch.Tensor, shape [1, 1, n_x, n_y, 1], sampling mask
target: torch.Tensor, shape [batch_size, n_x, n_y, 2], target data
Returns:
Final estimation of the network.
"""
sensitivity_maps = self.sens_net(y, mask) if self.use_sens_net else sensitivity_maps
pred = coil_combination(
ifft2c(y, fft_type=self.fft_type), sensitivity_maps, method=self.zf_method.upper(), dim=1
)
pred = check_stacked_complex(pred)
_, pred = center_crop_to_smallest(target, pred)
return pred
def test_step(self, batch: Dict[float, torch.Tensor], batch_idx: int) -> Tuple[str, int, torch.Tensor]:
"""Test step for ZF."""
y, sensitivity_maps, mask, init_pred, target, fname, slice_num, _ = batch
y, mask, _ = self.process_inputs(y, mask)
prediction = self.forward(y, sensitivity_maps, mask, target)
slice_num = int(slice_num)
name = str(fname[0]) # type: ignore
key = f"{name}_images_idx_{slice_num}" # type: ignore
output = torch.abs(prediction).detach().cpu()
target = torch.abs(target).detach().cpu()
output = output / output.max() # type: ignore
target = target / target.max() # type: ignore
error = torch.abs(target - output)
self.log_image(f"{key}/target", target)
self.log_image(f"{key}/reconstruction", output)
self.log_image(f"{key}/error", error)
return name, slice_num, prediction.detach().cpu().numpy()
```
#### File: core/classes/module.py
```python
__author__ = "<NAME>"
# Taken and adapted from: https://github.com/NVIDIA/NeMo/blob/main/nemo/core/classes/module.py
from abc import ABC
from contextlib import contextmanager
from torch.nn import Module
__all__ = ["NeuralModule"]
from mridc.core.classes.common import FileIO, Serialization, Typing
class NeuralModule(Module, Typing, Serialization, FileIO, ABC):
"""Abstract class offering interface shared between all PyTorch Neural Modules."""
@property
def num_weights(self):
"""Utility property that returns the total number of parameters of NeuralModule."""
return sum(p.numel() for p in self.parameters() if p.requires_grad)
@staticmethod
def input_example(max_batch=None, max_dim=None):
"""
Override this method if random inputs won't work
Args:
max_batch: Maximum batch size to generate
max_dim: Maximum dimension to generate
Returns:
A tuple sample of valid input data.
"""
return None
def freeze(self) -> None:
r"""Freeze all params for inference."""
for param in self.parameters():
param.requires_grad = False
self.eval()
def unfreeze(self) -> None:
"""Unfreeze all parameters for training."""
for param in self.parameters():
param.requires_grad = True
self.train()
@contextmanager
def as_frozen(self):
"""Context manager which temporarily freezes a module, yields control and finally unfreezes the module."""
self.freeze()
try:
yield
finally:
self.unfreeze()
```
#### File: core/neural_types/neural_type.py
```python
__author__ = "<NAME>"
# Taken and adapted from: https://github.com/NVIDIA/NeMo/blob/main/nemo/core/neural_types/neural_type.py
from typing import Optional, Tuple
__all__ = ["NeuralType", "NeuralTypeError", "NeuralPortNameMismatchError", "NeuralPortNmTensorMismatchError"]
from mridc.core.neural_types.axes import AxisKind, AxisType
from mridc.core.neural_types.comparison import NeuralTypeComparisonResult
from mridc.core.neural_types.elements import ElementType, VoidType
class NeuralType:
"""This is the main class which would represent neural type concept.
It is used to represent *the types* of inputs and outputs.
Args:
axes (Optional[Tuple]): a tuple of AxisTypes objects representing the semantics of what varying each axis means
You can use a short, string-based form here. For example: ('B', 'C', 'H', 'W') would correspond to an NCHW
format frequently used in computer vision. ('B', 'T', 'D') is frequently used for signal processing and
means [batch, time, dimension/channel].
elements_type (ElementType): an instance of ElementType class representing the semantics of what is stored
inside the tensor. For example: logits (LogitsType), log probabilities (LogprobType), etc.
optional (bool): By default, this is false. If set to True, it would means that input to the port of this
type can be optional.
"""
def __str__(self):
if self.axes is not None:
return f"axes: {self.axes}; elements_type: {self.elements_type.__class__.__name__}"
return f"axes: None; elements_type: {self.elements_type.__class__.__name__}"
def __init__(self, axes: Optional[Tuple] = None, elements_type: ElementType = VoidType(), optional=False):
if not isinstance(elements_type, ElementType):
raise ValueError(
"elements_type of NeuralType must be an instance of a class derived from ElementType. "
"Did you pass a class instead?"
)
self.elements_type = elements_type
if axes is not None:
NeuralType.__check_sanity(axes)
axes_list = []
for axis in axes:
if isinstance(axis, str):
axes_list.append(AxisType(AxisKind.from_str(axis), None))
elif isinstance(axis, AxisType):
axes_list.append(axis)
else:
raise ValueError("axis type must be either str or AxisType instance")
self.axes = tuple(axes_list) # type: ignore
else:
self.axes = None # type: ignore
self.optional = optional
def compare(self, second) -> NeuralTypeComparisonResult:
"""
Performs neural type comparison of self with second. When you chain two modules' inputs/outputs via __call__
method, this comparison will be called to ensure neural type compatibility.
"""
# First, handle dimensionality
axes_a = self.axes
axes_b = second.axes
# "Big void" type
if isinstance(self.elements_type, VoidType) and self.axes is None:
return NeuralTypeComparisonResult.SAME
if self.axes is None:
if second.axes is None:
return self.elements_type.compare(second.elements_type)
return NeuralTypeComparisonResult.INCOMPATIBLE
dimensions_pass = NeuralType.__compare_axes(axes_a, axes_b) # type: ignore
element_comparison_result = self.elements_type.compare(second.elements_type)
# SAME DIMS
if dimensions_pass == 0:
return element_comparison_result
# TRANSPOSE_SAME DIMS
if dimensions_pass == 1 and element_comparison_result == NeuralTypeComparisonResult.SAME:
return NeuralTypeComparisonResult.TRANSPOSE_SAME
if (
dimensions_pass == 1
or dimensions_pass == 2
and element_comparison_result != NeuralTypeComparisonResult.SAME
):
return NeuralTypeComparisonResult.INCOMPATIBLE
if dimensions_pass == 2:
return NeuralTypeComparisonResult.DIM_INCOMPATIBLE
return NeuralTypeComparisonResult.INCOMPATIBLE
def compare_and_raise_error(self, parent_type_name, port_name, second_object):
"""Method compares definition of one type with another and raises an error if not compatible."""
type_compatibility = self.compare(second_object)
if type_compatibility not in (NeuralTypeComparisonResult.SAME, NeuralTypeComparisonResult.GREATER):
raise NeuralPortNmTensorMismatchError(
parent_type_name, port_name, str(self), str(second_object.ntype), type_compatibility
)
def __eq__(self, other):
"""Checks if two NeuralTypes are equal."""
return self.compare(other) if isinstance(other, NeuralType) else False
@staticmethod
def __check_sanity(axes):
"""Check that list come before any tensor dimension"""
are_strings = True
for axis in axes:
if not isinstance(axis, str):
are_strings = False
if isinstance(axis, str) and not are_strings:
raise ValueError("Either use full class names or all strings")
if are_strings:
return
checks_passed = True
saw_tensor_dim = False
for axis in axes:
if not axis.is_list:
saw_tensor_dim = True
elif saw_tensor_dim: # which is preceded by tensor dim
checks_passed = False
if not checks_passed:
raise ValueError(
"You have list dimension after Tensor dimension. All list dimensions must preceded Tensor dimensions"
)
@staticmethod
def __compare_axes(axes_a, axes_b) -> int:
"""
Compares axes_a and axes_b
Args:
axes_a: first axes tuple
axes_b: second axes tuple
Returns:
0 - if they are exactly the same
1 - if they are "TRANSPOSE_SAME"
2 - if the are "DIM_INCOMPATIBLE"
3 - if they are different
"""
if axes_a is None and axes_b is None:
return 0
if axes_a is None:
return 3
if axes_b is None:
return 3
if len(axes_a) != len(axes_b):
return 3
# After these ifs we know that len(axes_a) == len(axes_b)
same = True
kinds_a = {}
kinds_b = {}
for axis_a, axis_b in zip(axes_a, axes_b):
kinds_a[axis_a.kind] = axis_a.size
kinds_b[axis_b.kind] = axis_b.size
if axis_a.kind == AxisKind.Any:
same = True
elif (
axis_a.kind != axis_b.kind
or axis_a.is_list != axis_b.is_list
or (axis_a.size != axis_b.size and axis_a.size is not None)
):
same = False
if same:
return 0
# can be TRANSPOSE_SAME, DIM_INCOMPATIBLE
if kinds_a.keys() == kinds_b.keys():
return next((2 for key, value in kinds_a.items() if kinds_b[key] != value), 1)
return 3
def __repr__(self):
"""Returns string representation of NeuralType."""
axes = str(self.axes) if self.axes is not None else "None"
if self.elements_type is not None:
element_type = repr(self.elements_type)
else:
element_type = "None"
data = f"axis={axes}, element_type={element_type}"
if self.optional:
data = f"{data}, optional={self.optional}"
return f"{self.__class__.__name__}({data})"
class NeuralTypeError(Exception):
"""Base class for neural type related exceptions."""
class NeuralPortNameMismatchError(NeuralTypeError):
"""Exception raised when neural module is called with incorrect port names."""
def __init__(self, input_port_name):
super().__init__()
self.message = "Wrong input port name: {0}".format(input_port_name)
class NeuralPortNmTensorMismatchError(NeuralTypeError):
"""Exception raised when a port is fed with a NmTensor of incompatible type."""
def __init__(self, class_name, port_name, first_type, second_type, type_compatibility):
super().__init__()
self.message = "\nIn {}. \nPort: {} and a NmTensor it was fed are \n".format(
class_name, port_name
) + "of incompatible neural types:\n\n{} \n\n and \n\n{}".format(first_type, second_type)
self.message += "\n\nType comparison result: {}".format(type_compatibility)
```
#### File: core/optim/adafactor.py
```python
__author__ = "<NAME>"
# Taken and adapted from:
# https://github.com/wdika/NeMo/blob/9d095ff261319301e4711edf7530a6bb7cf6c8b6/nemo/core/optim/adafactor.py
import math
import torch
from torch.optim.optimizer import Optimizer
__all__ = ["Adafactor"]
class Adafactor(Optimizer):
"""Implements Adafactor algorithm.
This implementation is based on: `Adafactor: Adaptive Learning Rates with Sublinear Memory Cost`
(see https://arxiv.org/abs/1804.04235)
Note that this optimizer internally adjusts the learning rate depending on the *scale_parameter*, *relative_step*
and *warmup_init* options. To use a manual (external) learning rate schedule you should set `scale_parameter=False`
and `relative_step=False`.
Args:
params (iterable): iterable of parameters to optimize or dicts defining parameter groups
lr (float, optional): external learning rate (default: None)
eps (tuple[float, float]): regularization constants for square gradient
and parameter scale respectively (default: (1e-30, 1e-3))
clip_threshold (float): threshold of root mean square of
final gradient update (default: 1.0)
decay_rate (float): coefficient used to compute running averages of square
gradient (default: -0.8)
beta1 (float): coefficient used for computing running averages of gradient
(default: None)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
scale_parameter (bool): if True, learning rate is scaled by root mean square of
parameter (default: True)
relative_step (bool): if True, time-dependent learning rate is computed
instead of external learning rate (default: True)
warmup_init (bool): time-dependent learning rate computation depends on
whether warm-up initialization is being used (default: False)
"""
def __init__(
self,
params,
lr=None,
eps=(1e-30, 1e-3),
clip_threshold=1.0,
decay_rate=-0.8,
beta1=None,
weight_decay=0.0,
scale_parameter=True,
relative_step=True,
warmup_init=False,
min_step=1e-2,
):
if lr is not None and relative_step:
raise ValueError("Cannot combine manual lr and relative_step options")
if warmup_init and not relative_step:
raise ValueError("warmup_init requires relative_step=True")
self.min_step = min_step
defaults = dict(
lr=lr,
eps=eps,
clip_threshold=clip_threshold,
decay_rate=decay_rate,
beta1=beta1,
weight_decay=weight_decay,
scale_parameter=scale_parameter,
relative_step=relative_step,
warmup_init=warmup_init,
min_step=min_step,
)
super().__init__(params, defaults)
@property
def supports_memory_efficient_fp16(self):
"""Whether optimizer supports memory efficient fp16"""
return True
@property
def supports_flat_params(self):
"""Whether the optimizer supports flat parameters."""
return False
def _get_lr(self, param_group, param_state):
"""Returns the learning rate for the current layer."""
rel_step_sz = param_group["lr"]
if param_group["relative_step"]:
min_step = 1e-6 * param_state["step"] if param_group["warmup_init"] else self.min_step
rel_step_sz = min(min_step, 1.0 / math.sqrt(param_state["step"]))
param_scale = 1.0
if param_group["scale_parameter"]:
param_scale = max(param_group["eps"][1], param_state["RMS"])
return param_scale * rel_step_sz
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model and returns the loss.
"""
loss = closure() if closure is not None else None
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.dtype in {torch.float16, torch.bfloat16}:
grad = grad.float()
if grad.is_sparse:
raise RuntimeError("Adafactor does not support sparse gradients.")
state = self.state[p]
grad_shape = grad.shape
factored, use_first_moment = _get_options(group, grad_shape)
# State Initialization
if len(state) == 0:
state["step"] = 0
if use_first_moment:
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(grad)
if factored:
state["exp_avg_sq_row"] = torch.zeros(grad_shape[:-1]).to(grad)
state["exp_avg_sq_col"] = torch.zeros(grad_shape[:-2] + grad_shape[-1:]).to(grad)
else:
state["exp_avg_sq"] = torch.zeros_like(grad)
state["RMS"] = 0
else:
if use_first_moment:
state["exp_avg"] = state["exp_avg"].to(grad)
if factored:
state["exp_avg_sq_row"] = state["exp_avg_sq_row"].to(grad)
state["exp_avg_sq_col"] = state["exp_avg_sq_col"].to(grad)
else:
state["exp_avg_sq"] = state["exp_avg_sq"].to(grad)
p_data_fp32 = p.data
if p.data.dtype in {torch.float16, torch.bfloat16}:
p_data_fp32 = p_data_fp32.float()
state["step"] += 1
state["RMS"] = _rms(p_data_fp32)
group["lr"] = self._get_lr(group, state)
beta2t = 1.0 - math.pow(state["step"], group["decay_rate"])
update = (grad**2) + group["eps"][0]
if factored:
exp_avg_sq_row = state["exp_avg_sq_row"]
exp_avg_sq_col = state["exp_avg_sq_col"]
exp_avg_sq_row.mul_(beta2t).add_(update.mean(dim=-1), alpha=1.0 - beta2t)
exp_avg_sq_col.mul_(beta2t).add_(update.mean(dim=-2), alpha=1.0 - beta2t)
# Approximation of exponential moving average of square of gradient
update = _approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col)
update.mul_(grad)
else:
exp_avg_sq = state["exp_avg_sq"]
exp_avg_sq.mul_(beta2t).add_(update, alpha=1.0 - beta2t)
update = exp_avg_sq.rsqrt().mul_(grad)
update.div_((_rms(update) / group["clip_threshold"]).clamp_(min=1.0))
update.mul_(group["lr"])
if use_first_moment:
exp_avg = state["exp_avg"]
exp_avg.mul_(group["beta1"]).add_(update, alpha=1 - group["beta1"])
update = exp_avg
if group["weight_decay"] != 0:
p_data_fp32.add_(p_data_fp32, alpha=-group["weight_decay"] * group["lr"])
p_data_fp32.add_(-update)
if p.data.dtype in {torch.float16, torch.bfloat16}:
p.data.copy_(p_data_fp32)
return loss
@staticmethod
def _get_options(param_group, param_shape):
"""Returns the options for the current layer."""
factored = len(param_shape) >= 2
use_first_moment = param_group["beta1"] is not None
return factored, use_first_moment
@staticmethod
def _rms(tensor):
"""Compute the root mean square of a tensor."""
return tensor.norm(2) / (tensor.numel() ** 0.5)
@staticmethod
def _approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col):
"""
Compute the square of the gradient, but approximate the sqrt using the exponential moving average of the
squared gradient.
"""
r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True)).rsqrt_().unsqueeze(-1)
c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt()
return torch.mul(r_factor, c_factor)
```
#### File: mridc/utils/get_rank.py
```python
__author__ = "<NAME>"
# Taken and adapted from: https://github.com/NVIDIA/NeMo/blob/main/nemo/utils/get_rank.py
from mridc.utils.env_var_parsing import get_envint
def is_global_rank_zero():
"""Helper function to determine if the current process is global_rank 0 (the main process)"""
# Try to get the pytorch RANK env var RANK is set by torch.distributed.launch
rank = get_envint("RANK", None)
if rank is not None:
return rank == 0
# Try to get the SLURM global rank env var SLURM_PROCID is set by SLURM
slurm_rank = get_envint("SLURM_PROCID", None)
if slurm_rank is not None:
return slurm_rank == 0
# if neither pytorch and SLURM env vars are set check NODE_RANK/GROUP_RANK and LOCAL_RANK env vars assume
# global_rank is zero if undefined
node_rank = get_envint("NODE_RANK", get_envint("GROUP_RANK", 0))
local_rank = get_envint("LOCAL_RANK", 0)
return node_rank == 0 and local_rank == 0
```
#### File: mridc/utils/metaclasses.py
```python
__author__ = "<NAME>"
# Taken and adapted from: https://github.com/NVIDIA/NeMo/blob/main/nemo/utils/metaclasses.py
import threading
from typing import Any, Dict
class Singleton(type):
"""Implementation of a generic, tread-safe singleton meta-class.
Can be used as meta-class, i.e. will create
"""
# List of instances - one per class.
__instances: Dict[Any, Any] = {}
# Lock used for accessing the instance.
__lock = threading.Lock()
def __call__(cls, *args, **kwargs):
"""Returns singleton instance. A thread safe implementation."""
if cls not in cls.__instances:
# Enter critical section.
with cls.__lock:
# Check once again.
if cls not in cls.__instances:
# Create a new object instance - one per class.
cls.__instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
# Return the instance.
return cls.__instances[cls]
```
#### File: reconstruction/models/test_didn.py
```python
__author__ = "<NAME>"
# Taken and adapted from: https://github.com/NKI-AI/direct/blob/main/tests/tests_nn/test_didn.py
# Copyright (c) DIRECT Contributors
import pytest
import torch
from mridc.collections.reconstruction.models.didn.didn import DIDN
def create_input(shape):
"""Create a random input tensor."""
return torch.rand(shape).float()
@pytest.mark.parametrize(
"shape",
[
[3, 2, 32, 32],
[3, 2, 16, 16],
],
)
@pytest.mark.parametrize(
"out_channels",
[3, 5],
)
@pytest.mark.parametrize(
"hidden_channels",
[16, 8],
)
@pytest.mark.parametrize(
"n_dubs",
[3, 4],
)
@pytest.mark.parametrize(
"num_convs_recon",
[3, 4],
)
@pytest.mark.parametrize(
"skip",
[True, False],
)
def test_didn(shape, out_channels, hidden_channels, n_dubs, num_convs_recon, skip):
"""
Test the DIDN
Args:
shape (): shape of the input
out_channels (): number of output channels
hidden_channels (): number of hidden channels
n_dubs (): number of dubs
num_convs_recon (): number of convolutions in the reconstruction network
skip (): whether to use skip connections or not
Returns:
None
"""
model = DIDN(shape[1], out_channels, hidden_channels, n_dubs, num_convs_recon, skip)
data = create_input(shape).cpu()
out = model(data)
if list(out.shape) != [shape[0]] + [out_channels] + shape[2:]:
raise AssertionError
```
#### File: reconstruction/models/test_jointicnet.py
```python
__author__ = "<NAME>"
import pytest
import torch
from omegaconf import OmegaConf
from mridc.collections.reconstruction.data.subsample import RandomMaskFunc
from mridc.collections.reconstruction.models.jointicnet import JointICNet
from mridc.collections.reconstruction.parts import transforms
def create_input(shape):
"""Create a random input tensor."""
return torch.rand(shape).float()
@pytest.mark.parametrize(
"shape, cfg, center_fractions, accelerations",
[
(
[1, 3, 32, 16, 2],
{
"num_iter": 2,
"kspace_unet_num_filters": 4,
"kspace_unet_num_pool_layers": 2,
"kspace_unet_dropout_probability": 0.0,
"kspace_unet_padding_size": 11,
"kspace_unet_normalize": True,
"imspace_unet_num_filters": 4,
"imspace_unet_num_pool_layers": 2,
"imspace_unet_dropout_probability": 0.0,
"imspace_unet_padding_size": 11,
"imspace_unet_normalize": True,
"sens_unet_num_filters": 4,
"sens_unet_num_pool_layers": 2,
"sens_unet_dropout_probability": 0.0,
"sens_unet_padding_size": 11,
"sens_unet_normalize": True,
},
[0.08],
[4],
),
(
[1, 3, 32, 16, 2],
{
"num_iter": 4,
"kspace_unet_num_filters": 16,
"kspace_unet_num_pool_layers": 4,
"kspace_unet_dropout_probability": 0.05,
"kspace_unet_padding_size": 15,
"kspace_unet_normalize": False,
"imspace_unet_num_filters": 16,
"imspace_unet_num_pool_layers": 4,
"imspace_unet_dropout_probability": 0.05,
"imspace_unet_padding_size": 11,
"imspace_unet_normalize": False,
"sens_unet_num_filters": 16,
"sens_unet_num_pool_layers": 4,
"sens_unet_dropout_probability": 0.05,
"sens_unet_padding_size": 15,
"sens_unet_normalize": False,
},
[0.08],
[4],
),
],
)
def test_jointicnet(shape, cfg, center_fractions, accelerations):
"""
Test JointICNet
Args:
shape (): shape of the input
cfg (): configuration
center_fractions (): center fractions
accelerations (): accelerations
Returns:
None
"""
mask_func = RandomMaskFunc(center_fractions, accelerations)
x = create_input(shape)
outputs, masks = [], []
for i in range(x.shape[0]):
output, mask, _ = transforms.apply_mask(x[i : i + 1], mask_func, seed=123)
outputs.append(output)
masks.append(mask)
output = torch.cat(outputs)
mask = torch.cat(masks)
cfg = OmegaConf.create(cfg)
cfg = OmegaConf.create(OmegaConf.to_container(cfg, resolve=True))
jointicnet = JointICNet(cfg)
with torch.no_grad():
y = jointicnet.forward(output, output, mask, output, target=torch.abs(torch.view_as_complex(output)))
if y.shape[1:] != x.shape[2:4]:
raise AssertionError
```
#### File: tests/core/test_typecheck.py
```python
__author__ = "<NAME>"
# Taken and adapted from: https://github.com/wdika/NeMo/blob/main/tests/core/test_typecheck.py
from mridc.core.neural_types.comparison import NeuralTypeComparisonResult
def recursive_assert_shape(x, shape):
"""Perform recursive shape assert"""
if isinstance(x, (list, tuple)):
for xi in x:
recursive_assert_shape(xi, shape)
return
if x.shape != shape:
raise AssertionError
def recursive_assert_homogeneous_type(x, type_val):
"""Perform recursive type homogeneous assert"""
if isinstance(x, (list, tuple)):
for xi in x:
recursive_assert_homogeneous_type(xi, type_val)
return
if x.neural_type.compare(type_val) != NeuralTypeComparisonResult.SAME:
raise AssertionError
```
|
{
"source": "jerkeeler/teeny-weeny-analytics",
"score": 2
}
|
#### File: management/commands/backfill_visitor.py
```python
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from analytics_minimal.models import PageView, Visitor, Session
class Command(BaseCommand):
help = 'Backfill pageviews that do not have Visitors and create those that do.'
def handle(self, *args, **options):
starts = PageView.objects.filter(is_new_user=True, session=None)
self.stdout.write(self.style.SUCCESS(f'Creating {starts.count()} new Visitors!'))
res = input('Are you sure you wish to backfill Visitors (y/N) ')
if res.lower() != 'y':
raise CommandError('Command aborted!')
def get_next_page_view(previous_event_id):
page_views = PageView.objects.filter(previous_event_id=previous_event_id)
if page_views.count() == 0:
return None
if page_views.count() == 1:
return page_views.first()
# find the "most correct" page view out of possible
event_ids = page_views.values('event_id').all()
nexts = PageView.objects.filter(previous_event_id__in=event_ids)
if nexts.count() == 0:
return page_views.order_by('-created_at').first()
if nexts.count() == 1:
return page_views.filter(event_id=nexts.first().previous_event_id).first()
return page_views.filter(event_id=nexts.first().previous_event_id).order_by('-created_at').first()
with transaction.atomic():
for start in starts:
visitor = Visitor.objects.create(site=start.site)
session = Session.objects.create(first_session=True, visitor=visitor, site=start.site,
first_page_view=start, last_page_view=start, active=True)
start.session = session
start.save()
previous_page_view = start
previous_session = session
next_page_view = get_next_page_view(start.event_id)
while next_page_view is not None:
# Traverse the start chain
# Get next, if more than one just get a random first
previous_page_view.next_event_id = next_page_view.event_id
previous_page_view.save()
if next_page_view.is_new_session:
session = Session.objects.create(visitor=visitor, site=start.site,
first_page_view=next_page_view,
last_page_view=next_page_view, active=True)
previous_session.active = False
previous_session.duration = (previous_session.last_page_view.created_at -
previous_session.first_page_view.created_at)
previous_session.save()
else:
session.last_page_view = next_page_view
session.save()
next_page_view.session = session
next_page_view.save()
previous_page_view = next_page_view
next_page_view = get_next_page_view(previous_page_view.event_id)
self.stdout.write(self.style.SUCCESS('Successfully backfilled Visitors'))
```
#### File: backend/analytics_minimal/views.py
```python
from datetime import timedelta
from typing import Any
from django.db.models import Avg, Count, Q, FloatField
from django.db.models.functions import Cast
from django.http import HttpResponse, HttpRequest, JsonResponse, Http404
from django.utils import timezone
from django.views import View
from django.views.generic.base import TemplateView
from analytics_minimal.models import APIKey, PageView, Session, Site, Visitor
from analytics_minimal.utils import anonymize_ip, get_gif_response, get_hostname, get_browser
BAD_RESPONSE = JsonResponse({'error': 'Bad request'}, status=400)
BOT_NAMES = ['googlebot', 'slurp', 'twiceler', 'msnbot', 'kaloogabot', 'yodaobot', 'baiduspider', 'speedy spider',
'dotbot', 'duckduckbot', 'baidu', 'bingbot']
class V1Collect(View):
def _convert_to_page_view(self, request: HttpRequest, api_key: APIKey) -> PageView:
query_params = request.GET
field_map = PageView.fields_to_query()
page_view = PageView()
page_view.event_id = query_params.get(field_map['event_id'])
page_view.event_name = query_params.get(field_map['event_name'])
page_view.previous_event_id = query_params.get(field_map['previous_event_id'])
page_view.hostname = query_params.get(field_map['hostname'])
page_view.path = query_params.get(field_map['path'])
page_view.browser = query_params.get(field_map['browser'])
page_view.page_load_time = query_params.get(field_map['page_load_time'])
page_view.is_new_session = query_params.get(field_map['is_new_session'], False)
page_view.is_new_user = query_params.get(field_map['is_new_user'], False)
page_view.referrer = query_params.get(field_map['referrer'])
page_view.screen_height = query_params.get(field_map['screen_height'])
page_view.screen_width = query_params.get(field_map['screen_width'])
page_view.api_key = api_key
page_view.site = api_key.site
ip = request.META.get('HTTP_X_FORWARDED_FOR', request.META.get('REMOTE_ADDR'))
if ip:
page_view.ip = anonymize_ip(ip)
referrer = query_params.get(field_map['referrer'])
if referrer:
page_view.referrer = get_hostname(referrer)
browser = query_params.get(field_map['browser'])
if browser:
page_view.browser = get_browser(browser)
page_view.raw_request = request.build_absolute_uri()
page_view.save()
return page_view
def get(self, request: HttpRequest, *args: Any, **kwargs: Any) -> HttpResponse:
if request.META.get('DNT', '0') == '1':
# Should never get here because JS shouldn't send GET request in the first place if DNT
return get_gif_response()
# Don't want to track any bots!
ua = request.META.get('HTTP_USER_AGENT', '').lower()
if any(filter(lambda bot_name: bot_name in ua, BOT_NAMES)):
return get_gif_response()
query_params = request.GET
field_map = PageView.fields_to_query()
api_key = query_params.get(field_map['api_key'])
hostname = query_params.get(field_map['hostname'])
try:
norm_hostname = hostname.replace('http://', '').replace('https://', '')
actual_key = APIKey.objects.get(site__hostname=norm_hostname, active=True)
except APIKey.DoesNotExist:
return BAD_RESPONSE
except AttributeError:
# hostname is not provided
return BAD_RESPONSE
if api_key != actual_key.token:
return BAD_RESPONSE
event_id = query_params.get(field_map['event_id'])
event_name = query_params.get(field_map['event_name'])
path = query_params.get(field_map['path'])
if any([x is None for x in [event_id, event_name, hostname, path]]):
return BAD_RESPONSE
page_view = self._convert_to_page_view(request, actual_key)
# Get the new page view from DB so input objects are correctly coerced to their respective formats
page_view = PageView.objects.get(token=page_view.token)
visitor, session, previous_session, previous_view = None, None, None, None
if page_view.previous_event_id:
try:
previous_view = PageView.objects.get(api_key=actual_key, event_id=page_view.previous_event_id)
previous_view.next_event_id = page_view.event_id
except PageView.DoesNotExist:
print('Previous page view does not exist!')
except PageView.MultipleObjectsReturned:
print('Multiple page views with the previous id!')
previous_view = (PageView.objects.filter(api_key=actual_key, event_id=page_view.previous_event_id)
.order_by('-created_at').first())
previous_view.next_event_id = page_view.event_id
if previous_view and not page_view.is_new_session:
previous_view.is_bounce = False
previous_view.duration = page_view.created_at - previous_view.created_at
if previous_view:
previous_view.save()
if previous_view and page_view.is_new_session:
try:
previous_session = Session.objects.get(site=page_view.site, last_page_view=previous_view)
previous_session.active = False
previous_session.duration = (previous_session.last_page_view.created_at -
previous_session.first_page_view.created_at)
previous_session.save()
except Session.DoesNotExist:
print('Previous session does not exist!')
except Session.MultipleObjectsReturned:
print('Multiple sessions with the same last page view!')
previous_session = (Session.objects.filter(site=page_view.site, last_page_view=previous_view)
.order_by('-created_at').first())
previous_session.active = False
previous_session.duration = (previous_session.last_page_view.created_at -
previous_session.first_page_view.created_at)
previous_session.save()
if page_view.is_new_session:
session = Session.objects.create(active=True, site=page_view.site, first_page_view=page_view,
last_page_view=page_view)
elif previous_view:
try:
session = Session.objects.get(site=page_view.site, last_page_view=previous_view)
session.last_page_view = page_view
except Session.DoesNotExist:
print('Previous session does not exist!')
except Session.MultipleObjectsReturned:
print('Multiple sessions with the same last page view!')
if page_view.is_new_user:
visitor = Visitor.objects.create(site=page_view.site)
elif previous_session or session:
visitor = (previous_session and previous_session.visitor) or (session and session.visitor)
if page_view.is_new_user and session:
session.first_session = True
if visitor and session and session.visitor is None:
session.visitor = visitor
if session:
session.save()
if not visitor:
visitor = Visitor.objects.create(site=page_view.site)
if not session:
session = Session.objects.create(active=True, site=page_view.site, first_page_view=page_view,
last_page_view=page_view, visitor=visitor)
page_view.session = session
page_view.save()
return get_gif_response()
class Sandbox(View):
def get(self, request: HttpRequest, *args: Any, **kwargs: Any) -> HttpResponse:
return get_gif_response()
class SiteView(TemplateView):
template_name = 'site.html'
def get(self, request, *args, **kwargs):
if not request.user.is_authenticated:
raise Http404
return super().get(self, request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
site = Site.objects.get(hostname=kwargs.get('hostname'))
context['site'] = site
context['site_stats']= {
'bounce_rate': round((PageView.objects.filter(site=site, is_bounce=True).count() /
PageView.objects.filter(site=site).count()) * 100, 2),
'uniques': Visitor.objects.filter(site=site).count(),
'sessions': Session.objects.filter(site=site).count(),
'total': PageView.objects.filter(site=site).count(),
'duration': PageView.objects.filter(site=site).all().aggregate(Avg('duration'))['duration__avg'],
'session_duration': Session.objects.filter(site=site).all().aggregate(Avg('duration'))['duration__avg'],
}
paths = PageView.objects.values('path').filter(site=site).annotate(pcount=Count('path'))
path_data = {}
for path in paths:
path_stats = PageView.objects.filter(site=site, path=path['path']).aggregate(
views=Count('token'),
avg_duration=Avg('duration'),
bounce_rate=(Cast(Count('token', filter=Q(is_bounce=True)), FloatField()) /
Cast(Count('token'), FloatField())) * 100,
)
unique_views = (Visitor.objects.values('token', 'session__pageview__path')
.filter(site=site, session__pageview__path=path['path']))
uniques = set()
for view in unique_views:
uniques.add(view['token'])
path_stats['unique_page_views'] = len(uniques)
path_stats['path'] = path['path']
path_data[path['path']] = path_stats
data = [value for key, value in path_data.items()]
data = sorted(data, key=lambda x: x['unique_page_views'], reverse=True)
context['page_stats'] = data
screens = (PageView.objects.values('screen_height', 'screen_width').filter(site=site)
.annotate(tcount=Count('token')).order_by('-tcount'))
screen_data = {}
for screen in screens:
screen_tuple = (screen['screen_width'], screen['screen_height'],)
screen_data[screen_tuple] = screen
unique_screens = (Visitor.objects.values('token')
.filter(site=site, session__pageview__screen_width=screen_tuple[0],
session__pageview__screen_height=screen_tuple[1]))
uniques = set()
for un in unique_screens:
uniques.add(un['token'])
screen['unique_occurrences'] = len(uniques)
screen_data[screen_tuple] = screen
screen_data = [value for key, value in screen_data.items()]
screens = sorted(screen_data, key=lambda x: x['unique_occurrences'], reverse=True)
context['screen_stats'] = screens
browsers = (PageView.objects.values('browser').filter(site=site)
.annotate(tcount=Count('token')).order_by('-tcount'))
browser_data = {}
for browser in browsers:
browser_data[browser['browser']] = browser
unique_browsers = (Visitor.objects.values('token')
.filter(site=site, session__pageview__browser=browser['browser']))
uniques = set()
for un in unique_browsers:
uniques.add(un['token'])
browser['unique_occurrences'] = len(uniques)
browser_data[browser['browser']] = browser
browser_data = [value for key, value in browser_data.items()]
browsers = sorted(browser_data, key=lambda x: x['unique_occurrences'], reverse=True)
context['browser_stats'] = browsers
referrers = (PageView.objects.values('referrer').filter(site=site)
.exclude(referrer='')
.exclude(referrer=None)
.annotate(tcount=Count('token')).order_by('-tcount'))
context['referrer_stats'] = referrers
mins_ago = timezone.now() - timedelta(minutes=15)
context['current_visitors'] = Visitor.objects.filter(session__active=True, site=site,
session__last_page_view__created_at__gte=mins_ago).count()
return context
```
#### File: backend/twauth/models.py
```python
from django.db import models
from core.models import CreatedByMixin, TimeStampModel, TokenMixin
from core import consts
class UserProfile(TimeStampModel, CreatedByMixin, TokenMixin):
display_name = models.CharField(
max_length=consts.VARCHAR_LENGTH,
)
user = models.OneToOneField(
'auth.User',
on_delete=models.CASCADE,
)
disabled = models.BooleanField(default=False)
class Meta:
ordering = ['display_name']
def __str__(self):
return self.display_name
```
#### File: backend/twauth/pipeline.py
```python
from twauth.models import UserProfile
def create_profile(backend, user, response, *args, **kwargs):
try:
user.userprofile
except UserProfile.DoesNotExist:
username = user.username
if backend.name == 'github':
username = response.get('name', user.username)
elif backend.name == 'google-oauth2':
username = response.get('displayName', user.username)
profile = UserProfile(
user=user,
display_name=username,
created_by=user,
updated_by=user,
)
profile.save()
```
|
{
"source": "jerkos/aflow",
"score": 3
}
|
#### File: src/aflowey/runner.py
```python
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
from enum import Enum, auto
from typing import Union, Any, Mapping
from aflowey.context import Context, ctx_var, executor_var
from aflowey.types import Opt, Executor
class ExecutorType(Enum):
THREAD_POOL = auto()
PROCESS_POOL = auto()
def init_executor_if_needed(
executor: Opt[Union[Executor, ExecutorType]], **kwargs: Any
) -> Opt[Executor]:
if executor is None: # pragma: no cover
return None
if isinstance(executor, ExecutorType):
if executor is ExecutorType.THREAD_POOL:
return ThreadPoolExecutor(**kwargs)
elif executor is ExecutorType.PROCESS_POOL:
return ProcessPoolExecutor(**kwargs)
raise ValueError("Wrong provided executor type") # pragma: no cover
return executor
class AsyncRunner:
"""
Runner which holds execution options
>>> async_exec = aexec() | flow1 | flow2 | [flow3, flow4]
>>> with async_exec.thread_runner(max_workers=4) as runner:
>>> a, b, (c, d) = await runner.run(async_exec)
"""
def __init__(
self, func, executor: Union[Executor, Opt[ExecutorType]] = None, **kwargs
):
self.func = func
self._executor = init_executor_if_needed(executor, **kwargs)
self._ctx = None
self._token = None
def __enter__(self) -> "AsyncRunner":
"""if no executor provided, raise an error as the use of the with
keyword is useless. Creates the context of the current executor"""
# do not override it
if self._executor is None:
raise ValueError(
"Trying to use with context with not executor provided"
) # pragma: no cover
self._executor.__enter__()
return self
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
"""closing the current executor"""
if self._executor is None:
raise ValueError(
"Trying to use with context with not executor provided"
) # pragma: no cover
self._executor.__exit__(exc_type, exc_val, exc_tb)
async def run(self, context: Opt[Union[Context, Mapping]] = None, **kwargs):
"""all kwargs stuff will be passed to the executor
Args:
context: context var variable
kwargs: passed to the gather function which run coroutine simultaneously
"""
_ctx = context or {}
_ctx = _ctx if isinstance(_ctx, Context) else Context(**_ctx)
self._token = ctx_var.set(_ctx)
executor_var.set(self._executor)
# run the function
result = await self.func(**kwargs)
# reset to avoid memory leak
ctx_var.reset(self._token)
return result
```
#### File: src/aflowey/single_executor.py
```python
import asyncio
import functools
from concurrent.futures import ProcessPoolExecutor
from contextvars import copy_context
from inspect import isawaitable
from typing import Any
from typing import Awaitable
from typing import Union
from loguru import logger
from aflowey.async_flow import AsyncFlow
from aflowey.context import executor_var
from aflowey.f import F
from aflowey.functions import get_name
from aflowey.functions import is_f0
from aflowey.functions import is_side_effect
from aflowey.types import AnyCoroutineFunction
from aflowey.types import Function
async def _exec(function: Union[F, Function], *a: Any, **kw: Any) -> Any:
current_result = function(*a, **kw)
while asyncio.iscoroutine(current_result) or isawaitable(current_result):
current_result = await current_result
if isinstance(current_result, F):
return await _exec(current_result)
return current_result
def _exec_synchronously(fn: Function, *args: Any) -> Any:
def _run():
result = fn(*args)
while isinstance(result, F): # pragma: no cover
result = result()
return result
return _run
def async_wrap(func: F) -> AnyCoroutineFunction:
"""wrap the given into a coroutine function and try
calling it
"""
@functools.wraps(func)
async def wrapped(*args: Any, **kwargs: Any) -> Any:
return await _exec(func, *args, **kwargs)
return wrapped
def check_and_run_step(fn: F, *args: Any, **kwargs: Any) -> Awaitable[Any]:
loop = asyncio.get_event_loop()
if fn.is_coroutine_function:
new_fn = async_wrap(fn)
return loop.create_task(new_fn(*args, **kwargs)) # type: ignore[call-arg]
executor = executor_var.get()
if executor is None:
new_fn = async_wrap(fn)
return new_fn(*args, **kwargs) # type: ignore[call-arg]
new_fn = fn.func
if kwargs: # pragma: no cover
new_fn = functools.partial(new_fn, **kwargs)
# process executor does not have access to the context data
if isinstance(executor, ProcessPoolExecutor):
return loop.run_in_executor(executor, new_fn, *args)
context = copy_context()
logger.debug(f'running "{new_fn}" in a thread pool executor')
return loop.run_in_executor(
executor, context.run, _exec_synchronously(new_fn, *args)
)
class SingleFlowExecutor:
"""Single flow executor"""
CANCEL_FLOW = object()
def __init__(self, flow: AsyncFlow) -> None:
self.flow = flow
@staticmethod
async def check_and_execute_flow_if_needed(
maybe_flow: Union[Any, AsyncFlow]
) -> Any:
"""check if we have an async flow and execute it"""
if isinstance(maybe_flow, AsyncFlow):
return await SingleFlowExecutor(maybe_flow).execute_flow(is_root=False)
return maybe_flow
@staticmethod
def need_to_cancel_flow(result: Any) -> bool:
"""check if we need to cancel flow checking sentinel"""
if result is SingleFlowExecutor.CANCEL_FLOW:
logger.debug("Received sentinel object, canceling flow...")
return True
return False
@staticmethod
def get_step_name(func: F, index: int) -> str:
"""get the step name"""
return get_name(func) or str(index)
def save_step(self, task: F, index: int, current_args: Any) -> None:
"""save step state in flow attribute"""
step_name = self.get_step_name(task, index)
self.flow.steps[step_name] = current_args
def _check_current_args_if_side_effect(self, first_aws: F, res: Any) -> Any:
if is_side_effect(first_aws):
return self._get_result_from_early_abort()
# self.flow.kwargs if self.flow.kwargs else self.flow.args
return res
async def _execute_first_step(self, first_aws: F) -> Any:
"""executing the first step"""
if not self.flow.args and not self.flow.kwargs and is_f0(first_aws):
self.flow.args = (None,)
res = await check_and_run_step(first_aws, *self.flow.args, **self.flow.kwargs)
current_args = self._check_current_args_if_side_effect(first_aws, res)
# if flow run it
current_args = await self.check_and_execute_flow_if_needed(current_args)
# memorize name
self.save_step(first_aws, 0, current_args)
return current_args
def _get_result_from_early_abort(self):
if self.flow.kwargs:
return self.flow.kwargs
if not self.flow.args:
return None
if self.flow.args and len(self.flow.args) == 1:
return self.flow.args[0]
return self.flow.args
async def execute_flow(self, is_root: bool) -> Any:
"""Main function to execute a flow"""
if not self.flow.aws:
return None
# get first step
first_aws = self.flow.aws[0]
current_args = await self._execute_first_step(first_aws)
if self.need_to_cancel_flow(current_args):
# returning canceling flow
if is_root:
return self._get_result_from_early_abort()
return current_args
for index, task in enumerate(self.flow.aws[1:]):
result = await check_and_run_step(task, current_args)
if is_side_effect(task):
# side effect task, does not return a value
self.save_step(task, index + 1, current_args)
if self.need_to_cancel_flow(result):
break # pragma: no cover
continue # pragma: no cover
result = await self.check_and_execute_flow_if_needed(result)
if self.need_to_cancel_flow(result): # check if we need to cancel the flow
break
current_args = result
self.save_step(task, index + 1, current_args)
# return current args that are the actual results
return current_args
CANCEL_FLOW = SingleFlowExecutor.CANCEL_FLOW
```
|
{
"source": "jerkos/datamodel-code-generator",
"score": 2
}
|
#### File: datamodel_code_generator/parser/base.py
```python
import itertools
import re
from abc import ABC, abstractmethod
from collections import OrderedDict, defaultdict
from itertools import groupby
from pathlib import Path
from typing import (
Any,
Callable,
DefaultDict,
Dict,
Iterator,
List,
Mapping,
Optional,
Set,
Tuple,
Type,
Union,
)
from pydantic import BaseModel
from datamodel_code_generator.format import format_code
from ..format import PythonVersion
from ..imports import IMPORT_ANNOTATIONS, Import, Imports
from ..model import pydantic as pydantic_model
from ..model.base import ALL_MODEL, DataModel, DataModelFieldBase
from ..reference import ModelResolver
from ..types import DataType, DataTypeManager
_UNDER_SCORE_1 = re.compile(r'(.)([A-Z][a-z]+)')
_UNDER_SCORE_2 = re.compile('([a-z0-9])([A-Z])')
def camel_to_snake(string: str) -> str:
subbed = _UNDER_SCORE_1.sub(r'\1_\2', string)
return _UNDER_SCORE_2.sub(r'\1_\2', subbed).lower()
def snakify_field(field: DataModelFieldBase) -> None:
if not field.name:
return
original_name = field.name
field.name = camel_to_snake(original_name)
if field.name != original_name:
field.alias = original_name
def set_strip_default_none(field: DataModelFieldBase) -> None:
field.strip_default_none = True
def dump_templates(templates: List[DataModel]) -> str:
return '\n\n\n'.join(str(m) for m in templates)
ReferenceMapSet = Dict[str, Set[str]]
SortedDataModels = Dict[str, DataModel]
def sort_data_models(
unsorted_data_models: List[DataModel],
sorted_data_models: Optional[SortedDataModels] = None,
require_update_action_models: Optional[List[str]] = None,
) -> Tuple[List[DataModel], SortedDataModels, List[str]]:
if sorted_data_models is None:
sorted_data_models = OrderedDict()
if require_update_action_models is None:
require_update_action_models = []
unresolved_references: List[DataModel] = []
for model in unsorted_data_models:
if not model.reference_classes:
sorted_data_models[model.name] = model
elif (
model.name in model.reference_classes and len(model.reference_classes) == 1
): # only self-referencing
sorted_data_models[model.name] = model
require_update_action_models.append(model.name)
elif (
not model.reference_classes - {model.name} - set(sorted_data_models)
): # reference classes have been resolved
sorted_data_models[model.name] = model
if model.name in model.reference_classes:
require_update_action_models.append(model.name)
else:
unresolved_references.append(model)
if unresolved_references:
try:
return sort_data_models(
unresolved_references, sorted_data_models, require_update_action_models
)
except RecursionError:
unresolved_classes = ', '.join(
f"[class: {item.name} references: {item.reference_classes}]"
for item in unresolved_references
)
raise Exception(f'A Parser can not resolve classes: {unresolved_classes}.')
return unresolved_references, sorted_data_models, require_update_action_models
def relative(current_module: str, reference: str) -> Tuple[str, str]:
"""Find relative module path."""
current_module_path = current_module.split('.') if current_module else []
*reference_path, name = reference.split('.')
if current_module_path == reference_path:
return '', ''
i = 0
for x, y in zip(current_module_path, reference_path):
if x != y:
break
i += 1
left = '.' * (len(current_module_path) - i)
right = '.'.join(reference_path[i:])
if not left:
left = '.'
if not right:
right = name
elif '.' in right:
extra, right = right.rsplit('.', 1)
left += extra
return left, right
class Result(BaseModel):
body: str
source: Optional[Path]
class Source(BaseModel):
path: Path
text: str
@classmethod
def from_path(cls, path: Path, base_path: Path) -> 'Source':
return cls(
path=path.relative_to(base_path),
text=path.read_text(),
)
class Parser(ABC):
def __init__(
self,
source: Union[str, Path, List[Path]],
*,
data_model_type: Type[DataModel] = pydantic_model.BaseModel,
data_model_root_type: Type[DataModel] = pydantic_model.CustomRootType,
data_type_manager_type: Type[DataTypeManager] = pydantic_model.DataTypeManager,
data_model_field_type: Type[DataModelFieldBase] = pydantic_model.DataModelField,
base_class: Optional[str] = None,
custom_template_dir: Optional[Path] = None,
extra_template_data: Optional[DefaultDict[str, Dict[str, Any]]] = None,
target_python_version: PythonVersion = PythonVersion.PY_37,
dump_resolve_reference_action: Optional[Callable[[List[str]], str]] = None,
validation: bool = False,
field_constraints: bool = False,
snake_case_field: bool = False,
strip_default_none: bool = False,
aliases: Optional[Mapping[str, str]] = None,
allow_population_by_field_name: bool = False,
use_default_on_required_field: bool = False,
):
self.data_type_manager: DataTypeManager = data_type_manager_type(
target_python_version
)
self.data_model_type: Type[DataModel] = data_model_type
self.data_model_root_type: Type[DataModel] = data_model_root_type
self.data_model_field_type: Type[DataModelFieldBase] = data_model_field_type
self.imports: Imports = Imports()
self.base_class: Optional[str] = base_class
self.target_python_version: PythonVersion = target_python_version
self.results: List[DataModel] = []
self.dump_resolve_reference_action: Optional[
Callable[[List[str]], str]
] = dump_resolve_reference_action
self.validation: bool = validation
self.field_constraints: bool = field_constraints
self.snake_case_field: bool = snake_case_field
self.strip_default_none: bool = strip_default_none
self.use_default_on_required_field: bool = use_default_on_required_field
self.current_source_path: Optional[Path] = None
if isinstance(source, Path):
self.base_path = (
source.absolute() if source.is_dir() else source.absolute().parent
)
else:
self.base_path = Path.cwd()
self.source: Union[str, Path, List[Path]] = source
self.custom_template_dir = custom_template_dir
self.extra_template_data: DefaultDict[
str, Any
] = extra_template_data or defaultdict(dict)
if allow_population_by_field_name:
self.extra_template_data[ALL_MODEL]['allow_population_by_field_name'] = True
self.model_resolver = ModelResolver(aliases=aliases)
self.field_preprocessors: List[Callable[[DataModelFieldBase], None]] = []
if self.snake_case_field:
self.field_preprocessors.append(snakify_field)
if self.strip_default_none:
self.field_preprocessors.append(set_strip_default_none)
@property
def iter_source(self) -> Iterator[Source]:
if isinstance(self.source, str):
yield Source(path=Path(), text=self.source)
elif isinstance(self.source, Path): # pragma: no cover
if self.source.is_dir():
for path in self.source.rglob('*'):
if path.is_file():
yield Source.from_path(path, self.base_path)
else:
yield Source.from_path(self.source, self.base_path)
elif isinstance(self.source, list): # pragma: no cover
for path in self.source:
yield Source.from_path(path, self.base_path)
def append_result(self, data_model: DataModel) -> None:
for field_preprocessor in self.field_preprocessors:
for field in data_model.fields:
field_preprocessor(field)
self.results.append(data_model)
@property
def data_type(self) -> Type[DataType]:
return self.data_type_manager.data_type
@abstractmethod
def parse_raw(self) -> None:
raise NotImplementedError
def parse(
self, with_import: Optional[bool] = True, format_: Optional[bool] = True
) -> Union[str, Dict[Tuple[str, ...], Result]]:
self.parse_raw()
if with_import:
if self.target_python_version == PythonVersion.PY_37:
self.imports.append(IMPORT_ANNOTATIONS)
_, sorted_data_models, require_update_action_models = sort_data_models(
self.results
)
grouped_results = defaultdict(int)
for m in self.results:
target_models = set(
itertools.chain.from_iterable(
[m1.module_path for m1 in self.results if m1.name == m.name]
)
)
if m.module_path and len(target_models) > 1:
grouped_results[m.name] += 1
for model_name, model in sorted_data_models.items():
if grouped_results[model_name] > 1:
model.path = Path(model_name[0].lower() + model_name[1:])
results: Dict[Tuple[str, ...], Result] = {}
module_key = lambda x: x.module_path
# process in reverse order to correctly establish module levels
grouped_models = groupby(
sorted(sorted_data_models.values(), key=module_key, reverse=True),
key=module_key,
)
more_init_imports = []
all_export = []
for module, models in (
(k, [*v]) for k, v in grouped_models
): # type: Tuple[str, ...], List[DataModel]
module_path = '.'.join(module)
models_name_in_current_module = [m.name for m in models]
all_export += models_name_in_current_module
if module_path:
more_init_imports.append(
f'''from .{camel_to_snake(module_path)} import {', '.join(models_name_in_current_module)}'''
)
init = False
if module:
parent = (*module[:-1], '__init__.py')
if parent not in results:
results[parent] = Result(body='')
if (*module, '__init__.py') in results:
module = (*module, '__init__.py')
init = True
else:
module = (*module[:-1], f'{module[-1]}.py')
else:
module = ('__init__.py',)
result: List[str] = []
imports = Imports()
models_to_update: List[str] = []
scoped_model_resolver = ModelResolver()
for model in models:
alias_map: Dict[str, Optional[str]] = {}
if model.name in require_update_action_models:
models_to_update += [model.name]
imports.append(model.imports)
for field in model.fields:
for data_type in field.data_type.all_data_types: # type: DataType
if not data_type.type or (
'.' not in data_type.type and data_type.module_name is None
):
continue
type_ = (
f"{data_type.module_name}.{data_type.type}"
if data_type.module_name
else data_type.type
)
from_, import_ = relative(module_path, type_)
full_path = f'{from_}/{import_}'
name = type_.rsplit('.', 1)[-1]
if data_type.reference:
reference = self.model_resolver.get(
data_type.reference.path
)
if (
reference
and reference.actual_module_name == module_path
):
try:
model.reference_classes.remove(name)
except Exception:
pass
continue
if full_path in alias_map:
alias = alias_map[full_path] or import_
else:
alias = scoped_model_resolver.add(
full_path.split('/'), import_, unique=True
).name
alias_map[full_path] = None if alias == import_ else alias
new_name = (
f'{camel_to_snake(alias)}.{name}'
if from_ and import_ and module_path != alias
else name
)
if name in model.reference_classes:
model.reference_classes.remove(name)
if module_path != alias:
model.reference_classes.add(new_name)
data_type.type = new_name
for ref_name in model.reference_classes:
from_, import_ = relative(module_path, ref_name)
if init:
from_ += "."
target_model = sorted_data_models.get(ref_name)
import_ = (
ref_name.split('.')[0]
if '.' in ref_name
else ref_name[0].lower() + ref_name[1:]
)
# this model is in init so import it directly
if target_model and not target_model.module_path:
import_ = ref_name
if (
target_model
and len(target_model.module_path) >= 1
and target_model.module_path[0] == module_path
):
import_ = ''
if from_ and import_:
import_ = Import(
from_=from_,
import_=import_,
alias=alias_map.get(f'{from_}/{import_}'),
)
imports.append(import_)
if with_import:
result += [str(imports), str(self.imports), '\n']
code = dump_templates(models)
result += [code]
if self.dump_resolve_reference_action is not None:
result += ['\n', self.dump_resolve_reference_action(models_to_update)]
body = '\n'.join(result)
if format_:
body = format_code(body, self.target_python_version)
results[module] = Result(body=body, source=models[0].path)
# retain existing behaviour
if [*results] == [('__init__.py',)]:
return results[('__init__.py',)].body
init_content = results[('__init__.py',)]
if init_content:
all_str = f'''__all__ = [{', '.join([f'"{name}"' for name in all_export if name])}]\n\n'''
init_content.body = format_code(
format_code(init_content.body, self.target_python_version)
+ '\n'.join(more_init_imports)
+ '\n\n'
+ all_str,
self.target_python_version,
isort=False,
)
return results
```
|
{
"source": "jerkos/hozons",
"score": 2
}
|
#### File: hozons/bemychange/utils.py
```python
from flask import flash
from bemychange.extensions import db
def flash_errors(form, category='warning'):
"""Flash all errors for a form."""
for field, errors in form.errors.items():
for error in errors:
flash('{0} - {1}'.format(getattr(form, field).label.text, error), category)
def with_transaction(func):
def wrapper(*args, **kwargs):
with db.transaction():
return func(*args, **kwargs)
return wrapper
```
|
{
"source": "jerkos/kepavi",
"score": 2
}
|
#### File: kepavi/kepavi/app.py
```python
from kepavi.auth.forms import LoginForm
from kepavi.helpers import format_date, time_since, older_than_one_month, time_left_to, is_online, crop_title, quote
from kepavi.user.models import User
from kepavi.user.views import user
try:
from metabomatch.private_keys import GITHUB_CLIENT_SECRET
except ImportError:
GITHUB_CLIENT_SECRET = ''
import os
import logging
import datetime
import sys
from flask import Flask, request, render_template
from flask_login import current_user
# Import the auth blueprint
from kepavi.auth.views import auth
from kepavi.home.views import home
# extensions
from kepavi.extensions import db, login_manager, cache, migrate, github, csrf, gravatar, babel, oauth, mongo
def create_app(config=None):
"""Creates the app."""
# Initialize the app
app = Flask("Kepavi")
# Use the default config and override it afterwards
app.config.from_object('kepavi.configs.default.DefaultConfig')
# Update the config
app.config.from_object(config)
# try to update the config via the environment variable
app.config.from_envvar("FLASKBB_SETTINGS", silent=True)
# github extensions, TODO put that into configs directory
app.config['GITHUB_CLIENT_ID'] = 'ed057c9e07f531f0fdb6'
app.config['GITHUB_CLIENT_SECRET'] = os.environ.get('GITHUB_CLIENT_SECRET') or GITHUB_CLIENT_SECRET
app.config['MONGODB_SETTINGS'] = {'db': 'biomodels', 'host': 'mongodb://localhost:27017/biomodels'}
configure_blueprints(app)
configure_extensions(app)
configure_template_filters(app)
# configure_context_processors(app)
#configure_before_handlers(app)
configure_errorhandlers(app)
#configure_logging(app)
app.logger.addHandler(logging.StreamHandler(sys.stdout))
app.logger.setLevel(logging.INFO)
return app
def configure_blueprints(app):
app.register_blueprint(home, url_prefix='/')
app.register_blueprint(user, url_prefix='/user')
app.register_blueprint(auth, url_prefix=app.config["AUTH_URL_PREFIX"])
def configure_extensions(app):
"""
Configures the extensions
"""
# Flask-SQLAlchemy
db.init_app(app)
# Flask-Migrate
migrate.init_app(app, db)
# Flask-Cache
cache.init_app(app)
# Flask-Login
login_manager.login_view = app.config["LOGIN_VIEW"]
login_manager.refresh_view = app.config["REAUTH_VIEW"]
@login_manager.user_loader
def load_user(user_id):
"""
Loads the user. Required by the `login` extension
"""
u = User.query.filter(User.id == user_id).first()
return u
login_manager.init_app(app)
# github extension
github.init_app(app)
# csrf
csrf.init_app(app)
# gravatar init
gravatar.init_app(app)
babel.init_app(app)
oauth.init_app(app)
mongo.init_app(app)
def configure_template_filters(app):
"""
Configures the template filters
"""
app.jinja_env.filters['format_date'] = format_date
app.jinja_env.filters['time_since'] = time_since
app.jinja_env.filters['older_than_one_month'] = older_than_one_month
app.jinja_env.filters['time_left_to'] = time_left_to
app.jinja_env.filters['is_online'] = is_online
app.jinja_env.filters['crop_title'] = crop_title
app.jinja_env.filters['quote'] = quote
def configure_before_handlers(app):
"""
Configures the before request handlers
"""
@app.before_request
def update_lastseen():
"""
Updates `lastseen` before every reguest if the user is authenticated
"""
if current_user.is_authenticated():
current_user.lastseen = datetime.datetime.utcnow()
db.session.add(current_user)
db.session.commit()
def configure_errorhandlers(app):
"""
Configures the error handlers
"""
@app.errorhandler(403)
def forbidden_page(error):
return render_template("errors/forbidden_page.html", form=LoginForm()), 403
@app.errorhandler(404)
def page_not_found(error):
return render_template("errors/page_not_found.html", form=LoginForm()), 404
@app.errorhandler(500)
def server_error_page(error):
return render_template("errors/server_error.html", form=LoginForm()), 500
def configure_logging(app):
"""
Configures logging.
"""
logs_folder = os.path.join(app.root_path, os.pardir, "logs")
from logging.handlers import SMTPHandler
formatter = logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s '
'[in %(pathname)s:%(lineno)d]')
info_log = os.path.join(logs_folder, app.config['INFO_LOG'])
info_file_handler = logging.handlers.RotatingFileHandler(
info_log,
maxBytes=100000,
backupCount=10
)
info_file_handler.setLevel(logging.INFO)
info_file_handler.setFormatter(formatter)
app.logger.addHandler(info_file_handler)
error_log = os.path.join(logs_folder, app.config['ERROR_LOG'])
error_file_handler = logging.handlers.RotatingFileHandler(
error_log,
maxBytes=100000,
backupCount=10
)
error_file_handler.setLevel(logging.ERROR)
error_file_handler.setFormatter(formatter)
app.logger.addHandler(error_file_handler)
if app.config["SEND_LOGS"]:
mail_handler = \
SMTPHandler(app.config['MAIL_SERVER'],
app.config['MAIL_DEFAULT_SENDER'],
app.config['ADMINS'],
'application error, no admins specified',
(
app.config['MAIL_USERNAME'],
app.config['MAIL_PASSWORD'],
))
mail_handler.setLevel(logging.ERROR)
mail_handler.setFormatter(formatter)
app.logger.addHandler(mail_handler)
if __name__ == '__main__':
create_app().run(debug=True)
```
#### File: migrations/versions/fda7d7cca9e_.py
```python
revision = '<PASSWORD>'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('kegg_reactions',
sa.Column('id', sa.String(length=50), nullable=False),
sa.Column('name', sa.Text(), nullable=True),
sa.Column('organism', sa.String(length=200), nullable=True),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('kegg_reactions')
### end Alembic commands ###
```
|
{
"source": "jerky676/GoodWillShoppingSearch",
"score": 3
}
|
#### File: GoodWillShoppingSearch/models/goodwillsearch.py
```python
from datetime import datetime
from urllib import parse
from queryitem import QueryItem
from goodwillsearchgallery import GoodWillSearchGallery
from goodwillcategories import GoodWillCategories
from goodwilllocations import GoodWillLocations
from goodwillproduct import GoodWillProduct
from urllib.parse import quote
import pytz
from bs4.element import PageElement
from bs4 import BeautifulSoup
import requests
import json
class GoodWillSearch:
def __init__(self, time_zone: pytz.timezone, jsonSearchParams: str = None):
self.time_zone = time_zone
self.set_default_search()
if jsonSearchParams is not None:
self.load_json_search_file(jsonSearchParams)
def set_default_search(self):
self.url = "https://www.shopgoodwill.com/Listings"
self.search_gallery = QueryItem("sg",GoodWillSearchGallery.Empty)
self.keyword_search = QueryItem("st","")
self.categories = QueryItem("c",GoodWillCategories.Empty)
self.good_will_location = QueryItem("s",GoodWillLocations.Empty)
self.low_price = QueryItem("lp",0)
self.high_price = QueryItem("hp",999999)
self.show_buy_now_only = QueryItem("sbn",False)
self.show_pick_up_only = QueryItem("spo",False)
self.hide_pick_up_only = QueryItem("snpo",False)
self.show_one_cent_ship_only = QueryItem("socs",False)
self.search_description = QueryItem("sd",True)
self.show_closed_auctions = QueryItem("sca",False)
self.closed_auction_end_date = QueryItem("caed",'11/14/2018')
self.day_back = QueryItem("cadb",9)
self.search_canada = QueryItem("scs",False)
self.search_international = QueryItem("sis",False)
self.field_order = QueryItem("col",0)
self.page_number = QueryItem("p",0)
self.page_size = QueryItem("ps",40)
self.short_description = QueryItem("desc",True)
self.saved_search_id = QueryItem("ss",0)
self.use_buyer_prefrences = QueryItem("UseBuyerPrefs",True)
def load_json_search_file(self, filename):
with open(filename) as json_file:
json_data = json.load(json_file)
self.search_params_by_json(json_data)
def search_params_by_json(self, json_data):
if 'search_gallery' in json_data:
self.search_gallery.value_set(GoodWillSearchGallery(json_data['search_gallery']))
if 'categories' in json_data:
self.categories.value_set(GoodWillCategories(json_data['categories']))
if 'good_will_location' in json_data:
self.good_will_location.value_set(GoodWillLocations(json_data['good_will_location']))
if 'low_price' in json_data:
self.low_price.value_set(json_data['low_price'])
if 'high_price' in json_data:
self.high_price.value_set(json_data['high_price'])
if 'show_buy_now_only' in json_data:
self.show_buy_now_only.value_set(json_data['show_buy_now_only'])
if 'show_pick_up_only' in json_data:
self.show_pick_up_only.value_set(json_data['show_pick_up_only'])
if 'hide_pick_up_only' in json_data:
self.hide_pick_up_only.value_set(json_data['hide_pick_up_only'])
if 'show_one_cent_ship_only' in json_data:
self.show_one_cent_ship_only.value_set(json_data['show_one_cent_ship_only'])
if 'search_description' in json_data:
self.search_description.value_set(json_data['search_description'])
if 'show_closed_auctions' in json_data:
self.show_closed_auctions.value_set(json_data['show_closed_auctions'])
if 'closed_auction_end_date' in json_data:
self.closed_auction_end_date.value_set(json_data['closed_auction_end_date'])
if 'day_back' in json_data:
self.day_back.value_set(json_data['day_back'])
if 'search_canada' in json_data:
self.search_canada.value_set(json_data['search_canada'])
if 'search_international' in json_data:
self.search_international.value_set(json_data['search_international'])
if 'field_order' in json_data:
self.field_order.value_set(json_data['field_order'])
if 'page_number' in json_data:
self.page_number.value_set(json_data['page_number'])
if 'page_size' in json_data:
self.page_size.value_set(json_data['page_size'])
if 'short_description' in json_data:
self.short_description.value_set(json_data['short_description'])
if 'saved_search_id' in json_data:
self.saved_search_id.value_set(json_data['saved_search_id'])
def print_search_params(self):
print(f'url: {self.url}')
print(f'search_gallery: {self.search_gallery.get_value()}')
print(f'keyword_search: {self.keyword_search.get_value()}')
print(f'categories: {self.categories.get_value()}')
print(f'good_will_location: {self.good_will_location.get_value()}')
print(f'low_price: {self.low_price.get_value()}')
print(f'high_price: {self.high_price.get_value()}')
print(f'show_buy_now_only: {self.show_buy_now_only.get_value()}')
print(f'show_pick_up_only: {self.show_pick_up_only.get_value()}')
print(f'hide_pick_up_only: {self.hide_pick_up_only.get_value()}')
print(f'show_one_cent_ship_only: {self.show_one_cent_ship_only.get_value()}')
print(f'search_description: {self.search_description.get_value()}')
print(f'show_closed_auctions: {self.show_closed_auctions.get_value()}')
print(f'closed_auction_end_date: {self.closed_auction_end_date.get_value()}')
print(f'day_back: {self.day_back.get_value()}')
print(f'search_canada: {self.search_canada.get_value()}')
print(f'search_international: {self.search_international.get_value()}')
print(f'field_order: {self.field_order.get_value()}')
print(f'page_number: {self.page_number.get_value()}')
print(f'page_size: {self.page_size.get_value()}')
print(f'short_description: {self.short_description.get_value()}')
print(f'saved_search_id: {self.saved_search_id.get_value()}')
print(f'use_buyer_prefrences: {self.use_buyer_prefrences.get_value()}')
def search(self,keyword_search:str):
self.keyword_search_set(quote(keyword_search))
return self.parse_results(requests.get(self.search_url()).text)
def search_multiple(self,keyword_search:set[str]):
goodWillProducts = [GoodWillProduct]
for keyword in keyword_search:
goodWillProducts.extend(self.search(keyword))
return goodWillProducts
def parse_results(self, response):
goodWillProducts = [GoodWillProduct]
soup = BeautifulSoup(response, 'html.parser')
products = soup.find_all('span', {'class' : 'data-container'})
for product in products:
goodWillProduct = GoodWillProduct(product,self.time_zone)
goodWillProduct.print_product()
goodWillProducts.append(goodWillProduct)
return goodWillProducts
def query_string (self):
query_string = "?"
for attrib, value in self.__dict__.items():
if type(value) is QueryItem:
query_string += f'{value.query_string_value()}&'
return query_string.rstrip('&')
def search_url(self):
return self.url + self.query_string()
def search_gallery(self):
return self.search_gallery.value()
def search_gallery(self, value: GoodWillSearchGallery):
self.search_gallery.value_set(value)
def keyword_search(self):
return self.keyword_search.value()
def keyword_search_set(self, value: str):
self.keyword_search.value_set(value)
def categories(self):
return self.categories.value()
def categories_set(self, value: GoodWillCategories):
self.categories.value_set(value)
def good_will_location(self):
return self.good_will_location.value()
def good_will_location(self, value:GoodWillLocations):
self.good_will_location.value_set(value)
def closed_auction_date(self):
return self.search_gallery.value()
def closed_auction_date(self, closed_auction_date: datetime):
self.search_gallery = closed_auction_date
def low_price(self):
return self.low_price.value()
def low_price(self, value:int):
self.low_price.value_set(value)
def high_price(self):
return self.high_price.value()
def high_price(self, value:int):
self.high_price.value_set(value)
def show_buy_now_only(self):
return self.show_buy_now_only
def show_buy_now_only(self, value:bool):
self.show_buy_now_only.value_set(value)
def show_pick_up_only(self):
return self.show_pick_up_only.value
def show_pick_up_only(self, value:bool):
self.show_pick_up_only.value_set(value)
def hide_pick_up_only(self):
return self.hide_pick_up_only.value()
def hide_pick_up_only(self, value:bool):
self.hide_pick_up_only.value_set(value)
def show_one_cent_ship_only(self):
return self.show_one_cent_ship_only.value()
def show_one_cent_ship_only(self, value:bool):
self.show_one_cent_ship_only.value_set(value)
def search_description(self):
return self.search_description.value()
def search_description(self, value:bool):
self.search_description.value_set(value)
def show_closed_auctions(self):
return self.show_closed_auctions.value()
def show_closed_auctions(self, value:bool):
self.show_closed_auctions.value_set(value)
def closed_auction_end_date(self):
return self.closed_auction_end_date.value()
def closed_auction_end_date(self, value:datetime):
self.closed_auction_end_date.value_set(value)
def day_back(self):
return self.day_back.value()
def day_back(self, value:int):
self.day_back.value_set(value)
def search_canada(self):
return self.search_canada.value()
def search_canada(self, value:bool):
self.search_canada.value_set(value)
def search_international(self):
return self.search_international.value()
def search_international(self, value:bool):
self.search_international.value_set(value)
def page_number(self):
return self.page_number.value()
def page_number(self, value:int):
self.page_number.value_set(value)
def page_size(self):
return self.page_size.value()
def page_size(self, value:int):
self.page_size.value_set(value)
def short_description(self):
return self.short_description.value()
def short_description(self, value:bool):
self.short_description.value_set(value)
def saved_search_id(self):
return self.saved_search_id.value()
def saved_search_id(self, value:int):
self.saved_search_id.value_set(value)
def use_buyer_prefrences(self):
return self.use_buyer_prefrences.value()
def use_buyer_prefrences(self, value:bool):
self.use_buyer_prefrences.value_set(value)
```
|
{
"source": "jerleo/banshee-musiccube",
"score": 3
}
|
#### File: banshee-musiccube/musiccube/analyzer.py
```python
import numpy as np
import os
import struct
import wave
from shlex import split
from subprocess import call
from uuid import uuid4
class Analyzer:
FEATURES_LENGTH = 42
SECONDS_PER_SONG = 90
SAMPLING_RATE = 10000
def valid_features(self, data):
return len(data) == self.FEATURES_LENGTH
def moments(self, x):
mean = x.mean()
std = x.var() ** 0.5
skewness = ((x - mean) ** 3).mean() / std ** 3
kurtosis = ((x - mean) ** 4).mean() / std ** 4
return [mean, std, skewness, kurtosis]
def fftfeatures(self, wavdata):
f = np.fft.fft(wavdata)
f = f[2:(f.size / 2 + 1)]
f = abs(f)
total_power = f.sum()
f = np.array_split(f, 10)
return [e.sum() / total_power for e in f]
def features(self, data):
# convert to array
x = np.array(data)
# initialize result vector
feature_vec = np.zeros(self.FEATURES_LENGTH)
# smoothing window: 1 samples
x1 = x
d1 = x1[1:] - x1[:-1]
feature_vec[0:4] = self.moments(x1)
feature_vec[4:8] = self.moments(d1)
# smoothing window: 10 samples
x10 = x.reshape(-1, 10).mean(1)
d10 = x10[1:] - x10[:-1]
feature_vec[8:12] = self.moments(x10)
feature_vec[12:16] = self.moments(d10)
# smoothing window: 100 samples
x100 = x.reshape(-1, 100).mean(1)
d100 = x100[1:] - x100[:-1]
feature_vec[16:20] = self.moments(x100)
feature_vec[20:24] = self.moments(d100)
# smoothing window: 1000 samples
x1000 = x.reshape(-1, 1000).mean(1)
d1000 = x1000[1:] - x1000[:-1]
feature_vec[24:28] = self.moments(x1000)
feature_vec[28:32] = self.moments(d1000)
feature_vec[32:] = self.fftfeatures(data)
return feature_vec
def read_wav(self, wav_file):
song_data = wave.open(wav_file)
n = song_data.getnframes()
n = n - n % 1000
frames = song_data.readframes(n)
wav_data = struct.unpack('%dh' % n, frames)
return wav_data
def compute_features(self, mp3_file):
out_path = '/tmp/%s.wav' % uuid4()
cmd_args = 'avconv -v quiet -i "%s" -ac 1 -ar %s -t %s "%s"'
cmd_args = cmd_args % (mp3_file, self.SAMPLING_RATE,
self.SECONDS_PER_SONG, out_path)
ret_code = call(split(cmd_args))
assert(ret_code == 0)
sample_data = self.read_wav(out_path)
assert(len(sample_data) > 0)
os.remove(out_path)
return self.features(sample_data)
```
|
{
"source": "j-erler/sz_tools",
"score": 2
}
|
#### File: sz_tools/sz_tools/ilc.py
```python
import numpy as np
import healpy as hp
import datetime
from astropy.io import fits
from astropy.io import ascii
from scipy import ndimage
import sz_tools as sz
import os.path
datapath = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
fwhm2sigma = 1/(2*np.sqrt(2*np.log(2)))
#paths to Planck maps
full_mission_path = "/vol/arc3/data1/sz/Planck_legacy_data/light_maps/"
ring1_path = "/vol/arc3/data1/sz/Planck_legacy_data/ring1/"
ring2_path = "/vol/arc3/data1/sz/Planck_legacy_data/ring2/"
hm1_path = "/vol/arc3/data1/sz/Planck_legacy_data/half_mission_1/"
hm2_path = "/vol/arc3/data1/sz/Planck_legacy_data/half_mission_2/"
full_mission_maps = {30: 'LFI_SkyMap_030-BPassCorrected-field-IQU_1024_R3.00_full.fits',
44: 'LFI_SkyMap_044-BPassCorrected-field-IQU_1024_R3.00_full.fits',
70: 'LFI_SkyMap_070-BPassCorrected-field-IQU_1024_R3.00_full.fits',
100: 'HFI_SkyMap_100-field-IQU_2048_R3.00_full.fits',
143: 'HFI_SkyMap_143-field-IQU_2048_R3.00_full.fits',
217: 'HFI_SkyMap_217-field-IQU_2048_R3.00_full.fits',
353: 'HFI_SkyMap_353-psb-field-IQU_2048_R3.00_full.fits',
545: 'HFI_SkyMap_545-field-Int_2048_R3.00_full.fits',
857: 'HFI_SkyMap_857-field-Int_2048_R3.00_full.fits'}
ring1_maps = {30: ring1_path+'LFI_SkyMap_030-BPassCorrected_1024_R3.00_full-ringhalf-1.fits',
44: ring1_path+'LFI_SkyMap_044-BPassCorrected_1024_R3.00_full-ringhalf-1.fits',
70: ring1_path+'LFI_SkyMap_070-BPassCorrected_1024_R3.00_full-ringhalf-1.fits',
100: hm1_path+'HFI_SkyMap_100_2048_R3.01_halfmission-1.fits',
143: hm1_path+'HFI_SkyMap_143_2048_R3.01_halfmission-1.fits',
217: hm1_path+'HFI_SkyMap_217_2048_R3.01_halfmission-1.fits',
353: hm1_path+'HFI_SkyMap_353-psb_2048_R3.01_halfmission-1.fits',
545: hm1_path+'HFI_SkyMap_545_2048_R3.01_halfmission-1.fits',
857: hm1_path+'HFI_SkyMap_857_2048_R3.01_halfmission-1.fits'}
ring2_maps = {30: ring2_path+'LFI_SkyMap_030-BPassCorrected_1024_R3.00_full-ringhalf-2.fits',
44: ring2_path+'LFI_SkyMap_044-BPassCorrected_1024_R3.00_full-ringhalf-2.fits',
70: ring2_path+'LFI_SkyMap_070-BPassCorrected_1024_R3.00_full-ringhalf-2.fits',
100: hm2_path+'HFI_SkyMap_100_2048_R3.01_halfmission-2.fits',
143: hm2_path+'HFI_SkyMap_143_2048_R3.01_halfmission-2.fits',
217: hm2_path+'HFI_SkyMap_217_2048_R3.01_halfmission-2.fits',
353: hm2_path+'HFI_SkyMap_353-psb_2048_R3.01_halfmission-2.fits',
545: hm2_path+'HFI_SkyMap_545_2048_R3.01_halfmission-2.fits',
857: hm2_path+'HFI_SkyMap_857_2048_R3.01_halfmission-2.fits'}
milca_ymap = "/vol/arc3/data1/sz/Planck_legacy_data/COM_CompMAP_YSZ_R2.02/milca_ymaps.fits"
nilc_ymap = "/vol/arc3/data1/sz/Planck_legacy_data/COM_CompMAP_YSZ_R2.02/nilc_ymaps.fits"
#from os.path import expanduser
#home = expanduser("~")
#planck_path = home + "/SSD/Planck_maps/full_mission/"
#ring1_path = home + "/SSD/Planck_maps/ringhalf_1/"
#ring2_path = home + "/SSD/Planck_maps/ringhalf_2/"
#planck_maps = {30: 'LFI_SkyMap_030_1024_R2.01_full.fits',
# 44: 'LFI_SkyMap_033_1024_R2.01_full.fits',
# 70: 'LFI_SkyMap_070_2048_R2.01_full.fits',
# 100: 'HFI_SkyMap_100_2048_R2.02_full.fits',
# 143: 'HFI_SkyMap_143_2048_R2.02_full.fits',
# 217: 'HFI_SkyMap_217_2048_R2.02_full.fits',
# 353: 'HFI_SkyMap_353_2048_R2.02_full.fits',
# 545: 'HFI_SkyMap_545_2048_R2.02_full.fits',
# 857: 'HFI_SkyMap_857_2048_R2.02_full.fits'}
#ring1_maps = {30: 'LFI_SkyMap_030_1024_R2.00_full-ringhalf-1.fits',
# 44: 'LFI_SkyMap_044_1024_R2.00_full-ringhalf-1.fits',
# 70: 'LFI_SkyMap_070_2048_R2.00_full-ringhalf-1.fits',
# 100: 'HFI_SkyMap_100_2048_R2.00_full-ringhalf-1.fits',
# 143: 'HFI_SkyMap_143_2048_R2.00_full-ringhalf-1.fits',
# 217: 'HFI_SkyMap_217_2048_R2.00_full-ringhalf-1.fits',
# 353: 'HFI_SkyMap_353_2048_R2.00_full-ringhalf-1.fits',
# 545: 'HFI_SkyMap_545_2048_R2.00_full-ringhalf-1.fits',
# 857: 'HFI_SkyMap_857_2048_R2.00_full-ringhalf-1.fits'}
#ring2_maps = {30: 'LFI_SkyMap_030_1024_R2.00_full-ringhalf-2.fits',
# 44: 'LFI_SkyMap_044_1024_R2.00_full-ringhalf-2.fits',
# 70: 'LFI_SkyMap_070_2048_R2.00_full-ringhalf-2.fits',
# 100: 'HFI_SkyMap_100_2048_R2.00_full-ringhalf-2.fits',
# 143: 'HFI_SkyMap_143_2048_R2.00_full-ringhalf-2.fits',
# 217: 'HFI_SkyMap_217_2048_R2.00_full-ringhalf-2.fits',
# 353: 'HFI_SkyMap_353_2048_R2.00_full-ringhalf-2.fits',
# 545: 'HFI_SkyMap_545_2048_R2.00_full-ringhalf-2.fits',
# 857: 'HFI_SkyMap_857_2048_R2.00_full-ringhalf-2.fits'}
fname = os.path.join(datapath, "NILC_bands.txt")
data = ascii.read(fname)
NILC_bands = np.array([data[:]['col1'],
data[:]['col2'],
data[:]['col3'],
data[:]['col4'],
data[:]['col5'],
data[:]['col6'],
data[:]['col7'],
data[:]['col8'],
data[:]['col9'],
data[:]['col10'],])
def create_header(name, RA, DEC, npix, pixel_size):
'''Creates a fits-compatible header.
Parameters
----------
name: string
name of the object
RA: float
Right acention of objects, fk5 coordinates are required
DEC: float
Declination of objects, fk5 coordinates are required
pixel_size: float
pixel size in arcmin
Returns
-------
header: fits header
'''
today = str(datetime.date.today())
c0 = fits.Card('SIMPLE', True, ' conforms to FITS standard')
c1 = fits.Card('BITPIX', -32, ' array data type')
c2 = fits.Card('NAXIS', 2, ' ')
c3 = fits.Card('NAXIS1', npix, ' ')
c4 = fits.Card('NAXIS2', npix, ' ')
c5 = fits.Card('DATE', today, ' Creation date (CCYY-MM-DD) of FITS header')
c6 = fits.Card('BUNIT', 'Compton-y', ' X-axis ')
c7 = fits.Card('BAD_DATA', -1.6375E30, ' value for missing data')
#
c8 = fits.Card('RADECSYS', 'FK5', ' Celestial coordinate system')
c9 = fits.Card('EQUINOX', 2000, ' Equinox of Ref. Coord.')
c10 = fits.Card('PC1_1', 1.0, ' Degrees / Pixel')
c11 = fits.Card('PC2_1', 0.0, ' Degrees / Pixel')
c12 = fits.Card('PC1_2', 0.0, ' Degrees / Pixel')
c13 = fits.Card('PC2_2', 1.0, ' Degrees / Pixel')
#
c14 = fits.Card('CTYPE1', 'RA---TAN', ' X-axis ')
c15 = fits.Card('CRVAL1', RA, ' Origin coordinate')
c16 = fits.Card('CRPIX1', (npix+1)/2., ' Origin pixel index (1 based)')
c17 = fits.Card('CDELT1', -pixel_size/60.0, ' Degrees/pixel')
#
c18 = fits.Card('CTYPE2', 'DEC--TAN', ' Y-axis ')
c19 = fits.Card('CRVAL2', DEC, ' Origin coordinate')
c20 = fits.Card('CRPIX2', (npix+1)/2., ' Origin pixel index (1 based)')
c21 = fits.Card('CDELT2', pixel_size/60.0, ' Degrees/pixel')
#
c22 = fits.Card('LONPOLE', 180.0 , ' Native longitude of Celestial pole')
c23 = fits.Card('LATPOLE', 0.0, ' Celestial latitude of native pole')
c24 = fits.Card('EXTEND', True, ' ')
#
header = fits.Header([c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14, c15, c16, c17, c18, c19, c20, c21, c22, c23, c24])
return(header)
def project_maps(name = None, RA = None, DEC = None, allsky_map = None, in_file = None, map_size = 10,
pixel_size = 1.5, smooth = None, planck=None, MILCA = False, NILC = False, out_path = None,
same_units = False, same_res = False):
'''Creates gnomic projections around given sky coordinates from a healpix-compatible all-sky map.
Parameters
----------
name: string or string array, optional
name of objects, will be used as file name if files are written. Default:None
RA: float or float array array, optional
Right acention of objects, fk5 coordinates are required. Default:None
DEC: float or float array, optional
Declination of objects, fk5 coordinates are required. Default:None
allsky_map: float_array, optional
all-sky map in healpix ring-ordered format. To be used as source map. Default:None
in_file: string, optional
file for batch-processing. Has to contain three columns: name, RA and DEC
replaces name, RA and DEC is set. Default:None
map_size: float, optional
size of the desired projected map in degree, map will be square. Default: 10
pixel_size: float, optional
pixel size of the desired projected map in arcmin. Default: 1.5
smooth: float, optional
fwhm of gaussian kernel for smoothing of output maps. Default: None
planck: int array, optional
list of Planck bands in GHz to be used as source files. Default:None
MILCA: Bool, optional
if set the Planck MILCA y-map will be used as input. Default: False
NILC: Bool, optional
if set the Planck NILC y-map will be used as input. Default: False
out_path: string, optional
name of the output directory. Default: None
same_units: bool, optional
if changed to True all Planck maps will be provided in units of K_CMB.
Default: False
same_res: bool, optional
if changed to True all Planck maps will be provided with the resolution
of the lowest-frequency channel. Default: False
constrained: sting or float array
defines additional spectral constraints to be used for the computation of
ILC weights. If set to 'cmb', the cmb spectrum will be used.
Returns
-------
output: array
single image or data cube containing the projected maps.
If out_path is set, one or several files will be written
'''
name = np.asarray(name)
RA = np.asarray(RA)
DEC = np.asarray(DEC)
if in_file is not None:
data = ascii.read(in_file)
name = np.array(data[:]['col1'])
RA = np.array(data[:]['col2'])
DEC = np.array(data[:]['col3'])
npix = np.int(round(map_size*60 / pixel_size))
nclusters = len(name)
if planck is None:
output = np.zeros((nclusters, npix, npix), dtype = np.float32)
if MILCA is True:
allsky_map = hp.fitsfunc.read_map(milca_ymap)
if NILC is True:
allsky_map = hp.fitsfunc.read_map(nilc_ymap)
for i in np.arange(nclusters):
projected_map = hp.gnomview(allsky_map, coord=('G','C'), rot=(RA[i],DEC[i]), return_projected_map=True, xsize=npix, reso=pixel_size, no_plot=True)
if smooth is not None:
projected_map = ndimage.gaussian_filter(projected_map, sigma=smooth*fwhm2sigma/pixel_size, order=0, mode = "reflect", truncate = 10)
output[i,:,:] = projected_map
else:
nf = len(planck)
output = np.zeros((nclusters, nf, npix, npix), dtype = np.float32)
for f in np.arange(nf):
file_name = full_mission_path + full_mission_maps[planck[f]]
allsky_map = hp.fitsfunc.read_map(file_name)
for i in np.arange(nclusters):
projected_map = hp.gnomview(allsky_map, coord=('G','C'), rot=(RA[i],DEC[i]), return_projected_map=True, xsize=npix, reso=pixel_size, no_plot=True)
if smooth is not None:
projected_map = ndimage.gaussian_filter(projected_map, sigma=smooth*fwhm2sigma/pixel_size, order=0, mode = "reflect", truncate = 10)
if same_units is True:
if planck[f] == 545:
projected_map /= sz.planck_uc(545)
if planck[f] == 857:
projected_map /= sz.planck_uc(857)
if same_res is True and f != 0:
kernel = np.sqrt(sz.planck_beams(planck[0])**2 - sz.planck_beams(planck[f])**2)
#print(sz.planck_beams(planck[0]), sz.planck_beams(planck[f]), kernel*fwhm2sigma/pixel_size)
projected_map = ndimage.gaussian_filter(projected_map, sigma=kernel*fwhm2sigma/pixel_size, order=0, mode = "reflect", truncate = 10)
output[i,f,:,:] = projected_map
if out_path is not None:
for i in np.arange(nclusters):
header = create_header(name[i], RA[i], DEC[i], npix, pixel_size)
hdu = fits.PrimaryHDU()
if planck is None:
hdu.data = output[i,:,:]
else:
hdu.data = output[i,:,:,:]
hdu.header = header
hdu.writeto(out_path + name[i] + ".fits", overwrite=True)
return(output)
def ilc_windows(scales, nside, lmax = None, silent = True):
'''Computes allsky-ILC spatial window functions from the difference of gaussians.
All scales are conserved.
Parameters
----------
scales: float array
FWHM of gaussians that define the scales for the decomposition.
Have to be provided in decending order.
nside: array
Healpy nside parameter of the allsky maps.
lmax: int, optional
Defines the maximum ell. The maximum allowed value is 3*nside-1.
Default: 2*nside-1
silent: bool
Prints the sum of all windows as a diagnostic. All scales are conserved
if all numbers are 1. Default: True
Returns
-------
bands: 2D array
Spherical-Harmonic window functions to be used for spatial decomposition.
'''
if lmax is None:
lmax = 2*nside-1
n_scales = len(scales)+1
windows = np.zeros((n_scales+1, lmax+1))
windows[n_scales,:] = np.ones((lmax+1))
bands = np.zeros((n_scales, lmax+1))
for i in np.arange(1, n_scales):
windows[i,:] = hp.sphtfunc.gauss_beam(scales[i-1]/60*np.pi/180, pol=False, lmax = lmax)
bands[i-1,:] = windows[i,:]-windows[i-1,:]
#print([i-1, int(scales[i-1]), int(scales[i-2])])
bands[n_scales-1,:] = windows[n_scales,:]-windows[n_scales-1,:]
if silent is not True:
control = np.sum(bands, 0)
print("mininmum: ", np.min(control), "maximum: ", np.max(control), "mean: ", np.mean(control))
return(bands)
def remove_offset(data, median = True, mean = False, hist = False):
'''Removes offset from ILC maps.
Parameters
----------
data: float array
ILC map
median: bool, optional
Subtracts the meadian of the data. Default: True
mean: bool, optional
Subtracts the mean of the data. Generally not recommended.
Default: False
hist: bool, optional
Fits a gaussian to the histogram of the
data and subtracts the best-fit center.
Default: False
Returns
-------
data: array
Offset-corrected ILC map.
'''
if median is True:
data = data - np.median(data)
elif mean is True:
data = data - np.mean(data)
elif hist is True:
fit_results = sz.create_histogram(data, np.sqrt(np.size(data)), fit=True, plot=False);
data = data - fit_results[2]
return(data)
def run_ilc(data, F, e = None, mask = None):
'''Runs the internal linear combination (ilc) algorithm on a multi-freqency
dataset using given spectral constraints to obtain an estimate of the
amplitude of the desired signal.
Parameters
----------
data: 2d array
Multi-frequency data set. 2d images have to be flattened.
The dimensions have to be n_freq x n_pix
F: array
Spectral constraints for the ilc algorithm. If contaminants
are constrained as well, the dimensions have to be
n_components x n_freq
e: array, optional
If multible spectral components are constrained, e gives the
responce of the ilc weights to the individual spectra
mask: array, optional
Flattened data mask. The mask will be used during the computation
of the data covariance matrix and later applied to the output
Returns
-------
ilc_result: array
Optimal estimate of the signal amplitude for the given spectrum
'''
if mask is not None:
not_masked = np.where(mask != 0)[0]
cov_matrix = np.cov(data[:,not_masked])
else:
cov_matrix = np.cov(data)
cov_inverted = np.linalg.inv(cov_matrix)
if e is None:
w = F @ cov_inverted/(F @ cov_inverted @ F)
print('ilc responce: ', w @ F)
else:
w = e @ np.linalg.inv(F @ cov_inverted @ np.transpose(F)) @ F @ cov_inverted
for i in np.arange(len(e)):
print('ilc responce ' + str(i) + ': ', w @ F[i,:])
ilc_result = w @ data
if mask is not None:
ilc_result *= mask
return(ilc_result)
def ilc_scales(data, F, scales, pixel_size, responce = None, mask = None):
'''Performes a spatial decomposition of the input maps and runs an internal linear
combination algorithm on each spatial slice. Returns the sum of all output slices.
Parameters
----------
data: 2d array
Multi-frequency data set. image cube of dimensions n_freq x n_pix x n_pix
F: array
Spectral constraints for the ilc algorithm. If contaminants
are constrained as well, the dimensions have to be
n_components x n_freq
scales: array
Array defining the spatial scales for the decomposition. The spatial
decomposition is achived by computing the differences of smoothed images.
Each scale corresponds to a Gaussian Kernel.
responce: array, optional
If multible spectral components are constrained, e gives the
responce of the ilc weights to the individual spectra
mask: array, optional
Flattened data mask. The mask will be used during the computation
of the data covariance matrix and later applied to the output
Returns
-------
ilc_result: array
Optimal estimate of the signal amplitude for the given spectrum
'''
nscales = len(scales)
nf = data.shape[0]
npix = data.shape[1]
output_slices = np.zeros((nscales+1,npix**2))
for i in np.arange(nscales+1):
print([i, "/", nscales])
data_slice = np.zeros((nf, npix, npix))
for f in np.arange(nf):
if i < nscales:
if i == 0:
scale1 = data[f,:,:]
else:
scale1 = ndimage.gaussian_filter(data[f,:,:], sigma=scales[i-1]*fwhm2sigma/pixel_size, order=0, mode = "constant", truncate = 10)
scale2 = ndimage.gaussian_filter(data[f,:,:], sigma=scales[i]*fwhm2sigma/pixel_size, order=0, mode = "constant", truncate = 10)
data_slice[f,:,:] = (scale1 - scale2)
else:
data_slice[f,:,:] = ndimage.gaussian_filter(data[f,:,:], sigma=scales[i-1]*fwhm2sigma/pixel_size, order=0, mode = "constant", truncate = 10)
output_slices[i,:] = run_ilc(data_slice.reshape(nf, npix**2), F, e = responce, mask = mask)
output = np.sum(output_slices, 0).reshape(npix, npix)
return(output)
def ilc(name = None, RA = None, DEC = None, in_file = None, map_size = 10, pixel_size = 1.5, maps = None,
freq = None, planck = None, scales = None, tsz = True, T_e = 0, cmb = False,
constrained = None, mask = None, smooth = None, out_path = None):
'''Computes an ILC map. The function was written with Planck data in mind, but can also handle synthetic
data and data from future surveys. The ILC algorithm is written is pixel space and thus all maps have to
be smoothed to the same spatial resolution. The result can be improved by spatialy decomposing the input
maps and running the ILC algorithm on each spatial scale separatly. For this, several modes are available.
Parameters
----------
name: string or string array, optional
name of objects, will be used as file name if files are written. Default:None
RA: float or float array array, optional
Right acention of objects, fk5 coordinates are required. Default:None
DEC: float or float array, optional
Declination of objects, fk5 coordinates are required. Default:None
in_file: string, optional
File for batch-processing. Has to contain three columns: name, RA and DEC
replaces name, RA and DEC is set. Default:None
map_size: float, optional
Size of the desired projected map in degree, map will be square. Default: 10
pixel_size: float, optional
Pixel size of the desired projected map in arcmin. Default: 1.5
maps: float array, optional
Cube containing multifrequency maps as input for the ILC algorithm.
The dimensions have to be nf x npix_x x npix_y. Default: None
freq: float array, optional
An array specifying the frequency bands of the input maps. Default: None
planck: int array, optional
List of Planck bands in GHz to be used as source files. Default:None
scales: float array, optional
Defines the gaussian windows to be used to spatially decompose the the maps.
The windows are computed from the difference of pairs of gaussians, the FWHMs in arcmin
of which are specified here. Default: None
tsz: bool, optional
If set to True, the function will use the tSZ spectrum to return an ILC y-map. Default: True
T_e: float, optional
Electron temperature to be used for the computation of the tSZ spectrum. The temperature will
be assigned to the full map, so use with caution. Default: 0
cmb:
If set to True, the function will use the cmb spectrum to return a CMB map. Default: False
constrained: string or float array, optional
Additional spectral constraints for the ILC algorithm. If set to 'cmb', the cmb spectrum will
be used to minimize cmb residuals. Choosing 'tsz' will remove tSZ residuals. Alternatively,
constrained can be a float array containing an arbitrary SED.
mask: array, optional
Flattened data mask. The mask will be used during the computation
of the data covariance matrix and later applied to the output
smooth: float, optional
FWHM of gaussian kernel for smoothing of output maps. Default: None
outfile: sting, optional
Path and file name for data output. The output will be stored as a healpy .fits file.
Default: None
Returns
-------
output: float array
Returns an ILC map.
'''
name = np.asarray(name)
RA = np.asarray(RA)
DEC = np.asarray(DEC)
if scales is not None:
if scales == 'default':
scales = np.array([15,25,40,65,105,170,275])
#scales = np.array([15,25,40,55,70,90,110,130,150,200]) * pixel_size
scales = np.sqrt(scales**2 - 9.66**2)
if planck is not None:
maps = project_maps(name = name, RA = RA, DEC = DEC, in_file = in_file,
map_size = map_size, pixel_size = pixel_size,
smooth = smooth, planck = planck, out_path = out_path,
same_units = True, same_res = True)
else:
maps = maps.reshape(1, maps.shape[1], maps.shape[2], maps.shape[3])
nc = maps.shape[0]
nf = maps.shape[1]
npix = maps.shape[2]
if mask is not None:
mask = mask.reshape(npix**2)
output = np.zeros((nc, npix, npix))
if in_file is not None:
data = ascii.read(in_file)
name = np.array(data[:]['col1'])
RA = np.array(data[:]['col2'])
DEC = np.array(data[:]['col3'])
if tsz is True:
if planck is not None:
spectrum = sz.tsz_spec_planck(planck, 1, T_e = T_e)
else:
spectrum = sz.tsz_spec(freq, 1, T_e = T_e)
if cmb is True:
spectrum = np.ones(nf)
if constrained is not None:
if constrained == 'cmb' or constrained == 'CMB':
F = np.array([spectrum, np.ones(nf)])
elif constrained == 'tsz' or constrained == 'tSZ':
if planck is not None:
F = np.array([spectrum, sz.tsz_spec_planck(planck, 1, T_e = T_e)])
else:
F = np.array([spectrum, sz.tsz_spec(freq, 1, T_e = T_e)])
else:
F = np.concatenate([spectrum.reshape(1,nf), constrained])
responce = np.concatenate([np.ones((1)), np.zeros((F.shape[0]-1))])
else:
F = np.array(spectrum)
responce = None
for i in np.arange(nc):
data = maps[i,:,:,:]
if scales is None:
result = run_ilc(data.reshape(nf, npix**2), F, e = responce, mask = mask).reshape(npix, npix)
else:
result = ilc_scales(data, F, scales, pixel_size, responce = responce, mask = mask)
result = remove_offset(result, median = True)
output[i,:,:] = result
if out_path is not None:
hdu = fits.PrimaryHDU()
hdu.data = np.float32(result)
if RA is not None and DEC is not None:
header = create_header(name[i], RA[i], DEC[i], npix, pixel_size)
hdu.header = header
hdu.writeto(out_path + name[i] + "_y" + ".fits", overwrite=True)
return(output)
def ilc_allsky(allsky_maps = None, freq = None, nside = 2048, planck = None, decompose = None,
field_nside = 2, T_e = 0, lmax = None, spec = "tSZ", constrained = None,
mask = None, iter = 0, ring1 = False, ring2 = False, outfile = None):
'''Computes an allsky-ILC map. The function was written with Planck data in mind,
but can also handle synthetic data and data from future surveys. The ILC algorithm is
written is pixel space and thus all maps have to be smoothed to the same spatial resolution.
The result can be improved by spatialy decomposing the input maps and running the ILC
algorithm on each spatial scale separatly. For this, several modes are available, some of
which use spatial bands of the MILCA and NILC algorithms of the Planck collaboration.
Parameters
----------
allsky_maps: float array, optional
A n_freq x n_pix array containing all-sky maps in different frequency bands.
All maps have to be given at the same units and spatial resolution. Default: None
freq: float array, optional
An array specifying the frequency bands of the input maps. Default: None
nside: array, optional
Healpy nside parameter of the allsky maps. Default: 2048
planck: int array, optional
List of Planck bands in GHz to be used as source files. Default:None
decompose: float array or string, optional
Defines the gaussian windows to be used to spatially decompose the the all-sky maps.
The windows are computed from the difference of pairs of gaussians, the FWHMs in arcmin
of which are specified here. Besides giving an array of values for the FWHMs, setting
decompose to 'default', 'NILC' or 'MILCA' uses pre-fefind windows. Default: None
field_nside: int array, optional
Defines the number of fields the sky will be tesselated in for the computation of the
covariance matrix. This is done using the healpy nested pixel-indexing scheme.
The values have the be valid healpy nside parameters. In case spatial decomposition is used,
the number of field_nsides has to be n_scales+1. If one of the pre-defined modes for the
decomposition is used field_nside will be assigned automatically. Default: 2
T_e: float, optional
Electron temperature to be used for the computation of the tSZ spectrum. The temperature will
be assigned to the full sky, so use with caution. Default: 0
lmax: int, optional
Defines the maximum ell. The maximum allowed value is 3*nside-1.
Default: 2*nside-1
spec: numpy array or string
Mixing vector of the desired component or mixing matrix of all constrained components. In
the latter case, the numpy array must be of shape (nc, nfreq), where nc is the number of
components while nfreq is the number of observed frequencies. If spec is set to "tSZ" or "CMB"
the spectrum of the tSZ effect or the CMB will be used as the mixing vector. Default: "tSZ"
constrained: string or float array, optional
Desired resonce of the ILC weights to the components of the mixing matrix. The input has to
be a numpy array of length nc, where nc is the number of constrained components. Desired
components have a resonse of 1, while 0 is assigned to unwanted components. If set to 'cmb',
the cmb spectrum will be used to minimize cmb residuals. Choosing 'tsz' will remove tSZ residuals.
Alternatively, constrained can be a float array containing an arbitrary SED.
mask: array, optional
Flattened data mask. The mask will be used during the computation
of the data covariance matrix and later applied to the output
iter: int, optional
Number if iterations to be used while processing the all-sky maps.
Higher values will reduce numerical errors. Healpy default is 3.
Default: 0
ring1: bool, optional
If set to True, the Planck Ringhalf1 maps are used as input: Default: False
ring2: bool, optional
If set to True, the Planck Ringhalf1 maps are used as input: Default: False
outfile: sting, optional
Path and file name for data output. The output will be stored as a healpy .fits file.
Default: None
Returns
-------
output: float array
Returns a ILC all-sky map in the healpy format.
'''
npix = hp.pixelfunc.nside2npix(nside)
if lmax is None:
lmax = 2*nside-1
if planck is not None:
nf = len(planck)
allsky_maps = np.zeros((nf,npix))
for f in np.arange(nf):
if ring1 is True:
file_name = ring1_maps[planck[f]]
elif ring2 is True:
file_name = ring2_maps[planck[f]]
else:
file_name = full_mission_path + full_mission_maps[planck[f]]
allsky_map = hp.fitsfunc.read_map(file_name)
if planck[f] == 30 or planck[f] == 44:
allsky_map = hp.pixelfunc.ud_grade(allsky_map, nside, order_in = 'RING')
if planck[f] == 545:
allsky_map /= sz.planck_uc(545)
if planck[f] == 857:
allsky_map /= sz.planck_uc(857)
if f != 0:
print("Smoothing map:", planck[f])
kernel = np.sqrt(sz.planck_beams(planck[0])**2 - sz.planck_beams(planck[f])**2) / 60 * np.pi/180
allsky_map = hp.sphtfunc.smoothing(allsky_map, fwhm = kernel, iter = iter, lmax = lmax)
if decompose is None:
allsky_maps[f,:] = hp.pixelfunc.reorder(allsky_map, r2n = True)
else:
allsky_maps[f,:] = allsky_map
del allsky_map
else:
nf = allsky_maps.shape[0]
if spec == "tSZ" or spec == "tsz":
if planck is not None:
spectrum = sz.tsz_spec_planck(planck, 1, T_e = T_e)
else:
spectrum = sz.tsz_spec(freq, 1, T_e = T_e)
elif spec == "CMB" or spec == "cmb":
spectrum = np.ones(nf)
else:
spectrum = np.array(spec)
if constrained is not None:
if constrained == 'cmb' or constrained == 'CMB':
response = np.array([1,0])
F = np.array([spectrum, np.ones(nf)])
elif constrained == 'tsz' or constrained == 'tSZ':
response = np.array([1,0])
if planck is not None:
F = np.array([spectrum, sz.tsz_spec_planck(planck, 1, T_e = T_e)])
else:
F = np.array([spectrum, sz.tsz_spec(freq, 1, T_e = T_e)])
else:
F = spectrum
response = np.array(constrained)
else:
F = np.array(spectrum)
response = None
output = np.zeros(npix)
if decompose is not None:
if decompose == 'milca':
windows = ilc_windows(np.flip(np.array([5.0,7.50,10.0000,13.4132,18.7716,25.2406,33.2659,43.5919,57.5805,78.0786,112.465,190.082,600.0,1500.0,3600.0])), nside, silent = False, lmax = 3*nside-1)
windows = windows[2:-3,:]
field_nside = np.array([1,2,2,2,2,4,4,4,8,8,16])
elif decompose == 'nilc':
windows = NILC_bands
field_nside = np.array([1,2,2,2,2,4,4,4,8,16])
elif decompose == 'default':
scales = np.array([1280,640,320,160,80,40,20,10,5])
windows = ilc_windows(scales, nside, silent = True)
field_nside = np.array([2,2,2,2,2,2,2,2,2,2])
else:
windows = ilc_windows(decompose, nside, silent = True)
n_scales = windows.shape[0]
filtered_maps = np.zeros((nf, npix))
for i in np.arange(n_scales):
for j in np.arange(nf):
filtered_maps[j,:] = hp.pixelfunc.reorder(hp.sphtfunc.smoothing(allsky_maps[j,:], beam_window = windows[i,:], iter = iter, lmax = lmax), r2n = True)
nfields = hp.pixelfunc.nside2npix(field_nside[i])
pix_per_field = int(npix/nfields)
fields = np.arange(0, nfields+1) * pix_per_field
for k in np.arange(nfields):
ilc_result = run_ilc(filtered_maps[:,fields[k]:fields[k+1]], F, e = response, mask = mask)
ilc_result = remove_offset(ilc_result, median = True)
output[fields[k]:fields[k+1]] += ilc_result
else:
nfields = hp.pixelfunc.nside2npix(field_nside)
pix_per_field = int(npix/nfields)
fields = np.arange(0, nfields+1) * pix_per_field
for k in np.arange(nfields):
ilc_result = run_ilc(allsky_maps[:, fields[k]:fields[k+1]], F, e = response, mask = mask)
ilc_result = remove_offset(ilc_result, median = True)
output[fields[k]:fields[k+1]] += ilc_result
output = np.float32(hp.pixelfunc.reorder(output, n2r = True))
if outfile is not None:
hp.fitsfunc.write_map(outfile, output, overwrite = True)
return(output)
```
|
{
"source": "jerlfan/pymodaq_plugins",
"score": 2
}
|
#### File: daq_viewer_plugins/plugins_0D/daq_0Dviewer_Mock.py
```python
from PyQt5 import QtWidgets
from PyQt5.QtCore import pyqtSignal, QThread
from pymodaq.daq_utils.daq_utils import ThreadCommand, getLineInfo, DataFromPlugins
import numpy as np
from pymodaq.daq_viewer.utility_classes import DAQ_Viewer_base
from easydict import EasyDict as edict
from collections import OrderedDict
from pymodaq.daq_utils.daq_utils import gauss1D
from pymodaq.daq_viewer.utility_classes import comon_parameters
class DAQ_0DViewer_Mock(DAQ_Viewer_base):
"""
=============== =================
**Attributes** **Type**
*params* dictionnary list
*x_axis* 1D numpy array
*ind_data* int
=============== =================
"""
params = comon_parameters + [
{'title': 'Wait time (ms)', 'name': 'wait_time', 'type': 'int', 'value': 100, 'default': 100, 'min': 0},
{'title': 'Separated viewers', 'name': 'sep_viewers', 'type': 'bool', 'value': False},
{'title': 'Show in LCD', 'name': 'lcd', 'type': 'bool', 'value': False},
{'name': 'Mock1', 'name': 'Mock1', 'type': 'group', 'children': [
{'title': 'Npts', 'name': 'Npts', 'type': 'int', 'value': 200, 'default': 200, 'min': 10},
{'title': 'Amp', 'name': 'Amp', 'type': 'int', 'value': 20, 'default': 20, 'min': 1},
{'title': 'x0', 'name': 'x0', 'type': 'float', 'value': 50, 'default': 50, 'min': 0},
{'title': 'dx', 'name': 'dx', 'type': 'float', 'value': 20, 'default': 20, 'min': 1},
{'title': 'n', 'name': 'n', 'type': 'int', 'value': 1, 'default': 1, 'min': 1},
{'title': 'amp_noise', 'name': 'amp_noise', 'type': 'float', 'value': 0.1, 'default': 0.1, 'min': 0}
]},
{'title': 'Mock2', 'name': 'Mock2', 'type': 'group', 'children': [
{'title': 'Npts', 'name': 'Npts', 'type': 'int', 'value': 200, 'default': 200, 'min': 10},
{'title': 'Amp', 'name': 'Amp', 'type': 'int', 'value': 10, 'default': 10, 'min': 1},
{'title': 'x0', 'name': 'x0', 'type': 'float', 'value': 100, 'default': 100, 'min': 0},
{'title': 'dx', 'name': 'dx', 'type': 'float', 'value': 30, 'default': 30, 'min': 1},
{'title': 'n', 'name': 'n', 'type': 'int', 'value': 2, 'default': 2, 'min': 1},
{'title': 'amp_noise', 'name': 'amp_noise', 'type': 'float', 'value': 0.1, 'default': 0.1, 'min': 0}
]}]
def __init__(self, parent=None,
params_state=None): # init_params is a list of tuple where each tuple contains info on a 1D channel (Ntps,amplitude, width, position and noise)
super(DAQ_0DViewer_Mock, self).__init__(parent, params_state)
self.x_axis = None
self.ind_data = 0
self.lcd_init = False
def commit_settings(self, param):
"""
Setting the mock data.
============== ========= =================
**Parameters** **Type** **Description**
*param* none not used
============== ========= =================
See Also
--------
set_Mock_data
"""
self.set_Mock_data()
if param.name() == 'wait_time':
self.emit_status(ThreadCommand('update_main_settings', [['wait_time'], param.value(), 'value']))
def set_Mock_data(self):
"""
For each parameter of the settings tree compute linspace numpy distribution with local parameters values
and add computed results to the data_mock list.
"""
self.data_mock = []
for param in self.settings.children():
if 'Mock' in param.name():
x = np.linspace(0, param.children()[0].value() - 1, param.children()[0].value())
self.data_mock.append(
param.children()[1].value() * gauss1D(
x, param.children()[2].value(), param.children()[3].value(),
param.children()[4].value()) + param.children()[5].value() * np.random.rand(
(param.children()[0].value())))
def ini_detector(self, controller=None):
"""
Initialisation procedure of the detector.
Returns
-------
???
the initialized status.
See Also
--------
set_Mock_data
"""
self.status.update(edict(initialized=False, info="", x_axis=None, y_axis=None, controller=None))
if self.settings.child(('controller_status')).value() == "Slave":
if controller is None:
raise Exception('no controller has been defined externally while this detector is a slave one')
else:
self.controller = controller
else:
self.controller = "Mock controller"
self.set_Mock_data()
self.emit_status(ThreadCommand('update_main_settings', [['wait_time'],
self.settings.child(('wait_time')).value(), 'value']))
# initialize viewers with the future type of data
self.data_grabed_signal.emit(
[DataFromPlugins(name='Mock1', data=[np.array(0)], dim='Data0D', labels=['Mock1', 'label2'])])
self.status.initialized = True
self.status.controller = self.controller
return self.status
def close(self):
"""
not implemented.
"""
pass
def grab_data(self, Naverage=1, **kwargs):
"""
| Start new acquisition.
For each data on data_mock :
* shift right data of ind_data positions
* if naverage parameter is defined append the mean of the current data to the data to be grabbed.
| Send the data_grabed_signal once done.
=============== ======== ===============================================
**Parameters** **Type** **Description**
*Naverage* int specify the threshold of the mean calculation
=============== ======== ===============================================
"""
data_tot = []
for ind, data in enumerate(self.data_mock):
data = np.roll(data, self.ind_data)
if Naverage > 1:
data_tot.append(np.array([np.mean(data[0:Naverage - 1])]))
else:
data_tot.append(np.array([data[0]]))
if self.settings.child(('sep_viewers')).value():
dat = [DataFromPlugins(name=f'Mock_{ind:03}', data=[data], dim='Data0D',
labels=[f'mock data {ind:03}']) for ind, data in enumerate(data_tot)]
self.data_grabed_signal.emit(dat)
else:
self.data_grabed_signal.emit([DataFromPlugins(name='Mock1', data=data_tot,
dim='Data0D', labels=['dat0', 'data1'])])
self.ind_data += 1
if self.settings.child('lcd').value():
if not self.lcd_init:
self.emit_status(ThreadCommand('init_lcd', [dict(labels=['dat0', 'data1'], Nvals=2, digits=6)]))
QtWidgets.QApplication.processEvents()
self.lcd_init = True
self.emit_status(ThreadCommand('lcd', [data_tot]))
def stop(self):
"""
not implemented.
"""
return ""
```
#### File: daq_viewer_plugins/plugins_2D/daq_2Dviewer_Mock.py
```python
from PyQt5.QtCore import QThread
from PyQt5 import QtWidgets
import numpy as np
import pymodaq.daq_utils.daq_utils as mylib
from pymodaq.daq_viewer.utility_classes import DAQ_Viewer_base
from easydict import EasyDict as edict
from collections import OrderedDict
from pymodaq.daq_utils.daq_utils import ThreadCommand, getLineInfo, DataFromPlugins, Axis
from pymodaq.daq_viewer.utility_classes import comon_parameters
class DAQ_2DViewer_Mock(DAQ_Viewer_base):
"""
=============== ==================
**Attributes** **Type**
*params* dictionnary list
*x_axis* 1D numpy array
*y_axis* 1D numpy array
=============== ==================
See Also
--------
utility_classes.DAQ_Viewer_base
"""
params = comon_parameters + [
{'title': 'Nimages colors:', 'name': 'Nimagescolor', 'type': 'int', 'value': 1, 'default': 1, 'min': 0,
'max': 3},
{'title': 'Nimages pannels:', 'name': 'Nimagespannel', 'type': 'int', 'value': 1, 'default': 0, 'min': 0},
{'title': 'Threshold', 'name': 'threshold', 'type': 'int', 'value': 1, 'min': 0},
{'title': 'rolling', 'name': 'rolling', 'type': 'int', 'value': 1, 'min': 0},
{'title': 'Nx', 'name': 'Nx', 'type': 'int', 'value': 100, 'default': 100, 'min': 1},
{'title': 'Ny', 'name': 'Ny', 'type': 'int', 'value': 200, 'default': 200, 'min': 1},
{'title': 'Amp', 'name': 'Amp', 'type': 'int', 'value': 20, 'default': 20, 'min': 1},
{'title': 'x0', 'name': 'x0', 'type': 'slide', 'value': 50, 'default': 50, 'min': 0},
{'title': 'y0', 'name': 'y0', 'type': 'float', 'value': 100, 'default': 100, 'min': 0},
{'title': 'dx', 'name': 'dx', 'type': 'float', 'value': 20, 'default': 20, 'min': 1},
{'title': 'dy', 'name': 'dy', 'type': 'float', 'value': 40, 'default': 40, 'min': 1},
{'title': 'n', 'name': 'n', 'type': 'int', 'value': 1, 'default': 1, 'min': 1},
{'title': 'amp_noise', 'name': 'amp_noise', 'type': 'float', 'value': 4, 'default': 0.1, 'min': 0},
{'title': 'Cam. Prop.:', 'name': 'cam_settings', 'type': 'group', 'children': []},
]
def __init__(self, parent=None,
params_state=None): # init_params is a list of tuple where each tuple contains info on a 1D channel (Ntps,amplitude, width, position and noise)
super(DAQ_2DViewer_Mock, self).__init__(parent, params_state)
self.x_axis = None
self.y_axis = None
self.live = False
self.ind_commit = 0
self.ind_data = 0
def commit_settings(self, param):
"""
Activate parameters changes on the hardware.
=============== ================================ ===========================
**Parameters** **Type** **Description**
*param* instance of pyqtgraph Parameter the parameter to activate
=============== ================================ ===========================
See Also
--------
set_Mock_data
"""
self.set_Mock_data()
def set_Mock_data(self):
"""
| Set the x_axis and y_axis with a linspace distribution from settings parameters.
|
Once done, set the data mock with parameters :
* **Amp** : The amplitude
* **x0** : the origin of x
* **dx** : the derivative x pos
* **y0** : the origin of y
* **dy** : the derivative y pos
* **n** : ???
* **amp_noise** : the noise amplitude
Returns
-------
The computed data mock.
"""
if self.settings.child('ROIselect', 'use_ROI').value():
x_axis = np.linspace(self.settings.child('ROIselect', 'x0').value(),
self.settings.child('ROIselect', 'x0').value() + self.settings.child('ROIselect',
'width').value(),
self.settings.child('ROIselect', 'width').value(), endpoint=False)
y_axis = np.linspace(self.settings.child('ROIselect', 'y0').value(),
self.settings.child('ROIselect', 'y0').value() + self.settings.child('ROIselect',
'height').value(),
self.settings.child('ROIselect', 'height').value(), endpoint=False)
data_mock = self.settings.child(('Amp')).value() * (
mylib.gauss2D(x_axis, self.settings.child(('x0')).value(), self.settings.child(('dx')).value(),
y_axis, self.settings.child(('y0')).value(), self.settings.child(('dy')).value(),
self.settings.child(('n')).value())) + self.settings.child(
('amp_noise')).value() * np.random.rand(len(y_axis), len(x_axis))
for indy in range(data_mock.shape[0]):
data_mock[indy, :] = data_mock[indy, :] * np.sin(x_axis / 8) ** 2
data_mock = np.roll(data_mock, self.ind_data * self.settings.child(('rolling')).value(), axis=1)
try:
self.image[self.settings.child('ROIselect', 'y0').value():
self.settings.child('ROIselect', 'y0').value() + self.settings.child(
'ROIselect', 'height').value(),
self.settings.child('ROIselect', 'x0').value():
self.settings.child('ROIselect', 'x0').value() + self.settings.child('ROIselect', 'width').value()
] = data_mock
except Exception as e:
self.emit_status(ThreadCommand('Update_Status', [getLineInfo() + str(e), 'log']))
else:
x_axis = np.linspace(0, self.settings.child(('Nx')).value(), self.settings.child(('Nx')).value(),
endpoint=False)
y_axis = np.linspace(0, self.settings.child(('Ny')).value(), self.settings.child(('Ny')).value(),
endpoint=False)
data_mock = self.settings.child(('Amp')).value() * (
mylib.gauss2D(x_axis, self.settings.child(('x0')).value(), self.settings.child(('dx')).value(),
y_axis, self.settings.child(('y0')).value(), self.settings.child(('dy')).value(),
self.settings.child(('n')).value())) + self.settings.child(
('amp_noise')).value() * np.random.rand(len(y_axis), len(x_axis))
for indy in range(data_mock.shape[0]):
data_mock[indy, :] = data_mock[indy, :] * np.sin(x_axis / 4) ** 2
data_mock = np.roll(data_mock, self.ind_data * self.settings.child(('rolling')).value(), axis=1)
self.image = data_mock
self.ind_data += 1
QThread.msleep(100)
return self.image
def ini_detector(self, controller=None):
"""
Initialisation procedure of the detector initializing the status dictionnary.
See Also
--------
daq_utils.ThreadCommand, get_xaxis, get_yaxis
"""
self.status.update(edict(initialized=False, info="", x_axis=None, y_axis=None, controller=None))
try:
if self.settings.child(('controller_status')).value() == "Slave":
if controller is None:
raise Exception('no controller has been defined externally while this detector is a slave one')
else:
self.controller = controller
else:
self.controller = "Mock controller"
self.x_axis = self.get_xaxis()
self.y_axis = self.get_yaxis()
# initialize viewers with the future type of data but with 0value data
self.data_grabed_signal_temp.emit(self.average_data(1, True))
# OrderedDict(name='Mock3', data=[np.zeros((128,))], type='Data1D')])
self.status.x_axis = self.x_axis
self.status.y_axis = self.y_axis
self.status.initialized = True
self.status.controller = self.controller
return self.status
except Exception as e:
self.emit_status(ThreadCommand('Update_Status', [getLineInfo() + str(e), 'log']))
self.status.info = getLineInfo() + str(e)
self.status.initialized = False
return self.status
def close(self):
"""
not implemented.
"""
pass
def get_xaxis(self):
"""
Get the current x_axis from the Mock data setting.
Returns
-------
1D numpy array
the current x_axis.
See Also
--------
set_Mock_data
"""
self.set_Mock_data()
return self.x_axis
def get_yaxis(self):
"""
Get the current y_axis from the Mock data setting.
Returns
-------
1D numpy array
the current y_axis.
See Also
--------
set_Mock_data
"""
self.set_Mock_data()
return self.y_axis
def grab_data(self, Naverage=1, **kwargs):
"""
| For each integer step of naverage range set mock data.
| Construct the data matrix and send the data_grabed_signal once done.
=============== ======== ===============================================
**Parameters** **Type** **Description**
*Naverage* int The number of images to average.
specify the threshold of the mean calculation
=============== ======== ===============================================
See Also
--------
set_Mock_data
"""
"live is an attempt to export data as fast as possible"
if 'live' in kwargs:
if kwargs['live']:
self.live = True
self.live = False # don't want to use that for the moment
if self.live:
while self.live:
data = self.average_data(Naverage)
QThread.msleep(100)
self.data_grabed_signal.emit(data)
QtWidgets.QApplication.processEvents()
else:
data = self.average_data(Naverage)
QThread.msleep(000)
self.data_grabed_signal.emit(data)
def average_data(self, Naverage, init=False):
data = [] # list of image (at most 3 for red, green and blue channels)
data_tmp = np.zeros_like(self.image)
for ind in range(Naverage):
data_tmp += self.set_Mock_data()
data_tmp = data_tmp / Naverage
data_tmp = data_tmp * (data_tmp >= self.settings.child('threshold').value()) * (init is False)
for ind in range(self.settings.child(('Nimagespannel')).value()):
datatmptmp = []
for indbis in range(self.settings.child(('Nimagescolor')).value()):
datatmptmp.append(data_tmp)
data.append(DataFromPlugins(name='Mock2D_{:d}'.format(ind), data=datatmptmp, dim='Data2D'))
# data.append(OrderedDict(name='Mock2D_1D',data=[np.mean(data_tmp,axis=0)], type='Data1D'))
return data
def stop(self):
"""
not implemented.
"""
self.live = False
return ""
```
#### File: daq_viewer_plugins/plugins_ND/daq_NDviewer_Mock.py
```python
from PyQt5.QtCore import QThread
from PyQt5 import QtWidgets
import numpy as np
import pymodaq.daq_utils.daq_utils as utils
from pymodaq.daq_viewer.utility_classes import DAQ_Viewer_base
from easydict import EasyDict as edict
from collections import OrderedDict
from pymodaq.daq_utils.daq_utils import ThreadCommand, getLineInfo, Axis, DataFromPlugins, NavAxis
from pymodaq.daq_viewer.utility_classes import comon_parameters
class DAQ_NDViewer_Mock(DAQ_Viewer_base):
"""
=============== ==================
**Attributes** **Type**
*params* dictionnary list
*x_axis* 1D numpy array
*y_axis* 1D numpy array
=============== ==================
See Also
--------
utility_classes.DAQ_Viewer_base
"""
params = comon_parameters + [
{'name': 'rolling', 'type': 'int', 'value': 1, 'min': 0},
{'name': 'amp_noise', 'type': 'float', 'value': 4, 'default': 0.1, 'min': 0},
{'title': 'Spatial properties:', 'name': 'spatial_settings', 'type': 'group', 'children': [
{'title': 'Nx', 'name': 'Nx', 'type': 'int', 'value': 100, 'default': 100, 'min': 1},
{'title': 'Ny', 'name': 'Ny', 'type': 'int', 'value': 200, 'default': 200, 'min': 1},
{'title': 'amp', 'name': 'amp', 'type': 'int', 'value': 20, 'default': 20, 'min': 1},
{'title': 'x0', 'name': 'x0', 'type': 'slide', 'value': 50, 'default': 50, 'min': 0},
{'title': 'y0', 'name': 'y0', 'type': 'float', 'value': 100, 'default': 100, 'min': 0},
{'title': 'dx', 'name': 'dx', 'type': 'float', 'value': 20, 'default': 20, 'min': 1},
{'title': 'dy', 'name': 'dy', 'type': 'float', 'value': 40, 'default': 40, 'min': 1},
{'title': 'lambda', 'name': 'lambda', 'type': 'float', 'value': 8, 'default': 1, 'min': 0.1},
{'title': 'n', 'name': 'n', 'type': 'float', 'value': 1, 'default': 1, 'min': 1},
]},
{'title': 'Temporal properties:', 'name': 'temp_settings', 'type': 'group', 'children': [
{'title': 'Nt', 'name': 'Nt', 'type': 'int', 'value': 150, 'default': 100, 'min': 1},
{'title': 'amp', 'name': 'amp', 'type': 'int', 'value': 20, 'default': 20, 'min': 1},
{'title': 't0', 'name': 't0', 'type': 'slide', 'value': 50, 'default': 50, 'min': 0},
{'title': 'dt', 'name': 'dt', 'type': 'float', 'value': 20, 'default': 20, 'min': 1},
{'title': 'n', 'name': 'n', 'type': 'float', 'value': 1, 'default': 1, 'min': 1},
]},
{'title': 'Cam. Prop.:', 'name': 'cam_settings', 'type': 'group', 'children': []},
]
def __init__(self, parent=None,
params_state=None): # init_params is a list of tuple where each tuple contains info on a 1D channel (Ntps,amplitude, width, position and noise)
super().__init__(parent, params_state)
self.x_axis = None
self.y_axis = None
self.live = False
self.ind_commit = 0
self.ind_data = 0
def commit_settings(self, param):
"""
Activate parameters changes on the hardware.
=============== ================================ ===========================
**Parameters** **Type** **Description**
*param* instance of pyqtgraph Parameter the parameter to activate
=============== ================================ ===========================
See Also
--------
set_Mock_data
"""
self.set_Mock_data()
def set_Mock_data(self):
"""
| Set the x_axis and y_axis with a linspace distribution from settings parameters.
|
Once done, set the data mock with parameters :
* **Amp** : The amplitude
* **x0** : the origin of x
* **dx** : the derivative x pos
* **y0** : the origin of y
* **dy** : the derivative y pos
* **n** : ???
* **amp_noise** : the noise amplitude
Returns
-------
The computed data mock.
"""
image = np.zeros((self.settings.child('spatial_settings', 'Ny').value(),
self.settings.child('spatial_settings', 'Nx').value(),
self.settings.child('temp_settings', 'Nt').value()))
self.time_axis = np.linspace(0, self.settings.child('temp_settings', 'Nt').value(),
self.settings.child('temp_settings', 'Nt').value(),
endpoint=False)
if self.settings.child('ROIselect', 'use_ROI').value():
self.x_axis = np.linspace(self.settings.child('ROIselect', 'x0').value(),
self.settings.child('ROIselect', 'x0').value() + self.settings.child('ROIselect',
'width').value(),
self.settings.child('ROIselect', 'width').value(), endpoint=False)
self.y_axis = np.linspace(self.settings.child('ROIselect', 'y0').value(),
self.settings.child('ROIselect', 'y0').value() + self.settings.child('ROIselect',
'height').value(),
self.settings.child('ROIselect', 'height').value(), endpoint=False)
data_mock = self.settings.child('spatial_settings', 'amp').value() * (
utils.gauss2D(self.x_axis, self.settings.child('spatial_settings', 'x0').value(),
self.settings.child('spatial_settings', 'dx').value(),
self.y_axis, self.settings.child('spatial_settings', 'y0').value(),
self.settings.child('spatial_settings', 'dy').value(),
self.settings.child('spatial_settings', 'n').value())) + self.settings.child(
('amp_noise')).value() * np.random.rand(len(self.y_axis), len(self.x_axis))
for indy in range(data_mock.shape[0]):
data_mock[indy, :] = data_mock[indy, :] * np.sin(
self.x_axis / self.settings.child('spatial_settings', 'lambda').value()) ** 2
data_mock = np.roll(data_mock, self.ind_data * self.settings.child('rolling').value(), axis=1)
try:
self.image[
self.settings.child('ROIselect', 'y0').value():
self.settings.child('ROIselect', 'y0').value() + self.settings.child('ROIselect', 'height').value(),
self.settings.child('ROIselect', 'x0').value():
self.settings.child('ROIselect', 'x0').value() + self.settings.child('ROIselect', 'width').value()] \
= data_mock
except Exception as e:
self.emit_status(ThreadCommand('Update_Status', [getLineInfo() + str(e), 'log']))
else:
self.x_axis = np.linspace(0, self.settings.child('spatial_settings', 'Nx').value(),
self.settings.child('spatial_settings', 'Nx').value(),
endpoint=False)
self.y_axis = np.linspace(0, self.settings.child('spatial_settings', 'Ny').value(),
self.settings.child('spatial_settings', 'Ny').value(),
endpoint=False)
data_mock = self.settings.child('spatial_settings', 'amp').value() * (
utils.gauss2D(self.x_axis, self.settings.child('spatial_settings', 'x0').value(),
self.settings.child('spatial_settings', 'dx').value(),
self.y_axis, self.settings.child('spatial_settings', 'y0').value(),
self.settings.child('spatial_settings', 'dy').value(),
self.settings.child('spatial_settings', 'n').value())) + \
self.settings.child(('amp_noise')).value() * \
np.random.rand(len(self.y_axis), len(self.x_axis))
for indy in range(data_mock.shape[0]):
data_mock[indy, :] = data_mock[indy, :] * np.sin(
self.x_axis / self.settings.child('spatial_settings', 'lambda').value()) ** 2
ind = 0
for indy in range(data_mock.shape[0]):
for indx in range(data_mock.shape[1]):
image[indy, indx, :] = data_mock[indy, indx] * \
utils.gauss1D(self.time_axis, self.settings.child('temp_settings', 't0').value(),
self.settings.child('temp_settings', 'dt').value(),
self.settings.child('temp_settings', 'n').value()) * \
np.sin(np.roll(self.time_axis, ind) / 4) ** 2
ind += 1
image = np.roll(image, self.ind_data * self.settings.child(('rolling')).value(), axis=1)
self.image = image
self.ind_data += 1
QThread.msleep(100)
return self.image
def ini_detector(self, controller=None):
"""
Initialisation procedure of the detector initializing the status dictionnary.
See Also
--------
daq_utils.ThreadCommand, get_xaxis, get_yaxis
"""
self.status.update(edict(initialized=False, info="", x_axis=None, y_axis=None, controller=None))
try:
if self.settings.child(('controller_status')).value() == "Slave":
if controller is None:
raise Exception('no controller has been defined externally while this detector is a slave one')
else:
self.controller = controller
else:
self.controller = "Mock controller"
self.set_Mock_data()
# initialize viewers with the future type of data
self.data_grabed_signal_temp.emit(
[DataFromPlugins(name='MockND', data=[np.zeros((128, 30, 10))], dim='DataND',
nav_axes=(0, 1)), ])
self.status.x_axis = self.x_axis
self.status.y_axis = self.y_axis
self.status.initialized = True
self.status.controller = self.controller
return self.status
except Exception as e:
self.emit_status(ThreadCommand('Update_Status', [getLineInfo() + str(e), 'log']))
self.status.info = getLineInfo() + str(e)
self.status.initialized = False
return self.status
def close(self):
"""
not implemented.
"""
pass
def grab_data(self, Naverage=1, **kwargs):
"""
| For each integer step of naverage range set mock data.
| Construct the data matrix and send the data_grabed_signal once done.
=============== ======== ===============================================
**Parameters** **Type** **Description**
*Naverage* int The number of images to average.
specify the threshold of the mean calculation
=============== ======== ===============================================
See Also
--------
set_Mock_data
"""
"live is an attempt to export data as fast as possible"
if 'live' in kwargs:
if kwargs['live']:
self.live = True
self.live = False # don't want to use that for the moment
if self.live:
while self.live:
data = self.average_data(Naverage)
QThread.msleep(100)
self.data_grabed_signal.emit(data)
QtWidgets.QApplication.processEvents()
else:
data = self.average_data(Naverage)
QThread.msleep(000)
self.data_grabed_signal.emit(data)
def average_data(self, Naverage):
data_tmp = np.zeros_like(self.image)
for ind in range(Naverage):
data_tmp += self.set_Mock_data()
data_tmp = data_tmp / Naverage
data = [DataFromPlugins(name='MockND_{:d}'.format(ind), data=[data_tmp], dim='DataND', nav_axes=(1, 0),
nav_x_axis=NavAxis(data=self.x_axis, label='X space', nav_index=1),
nav_y_axis=NavAxis(data=self.y_axis, label='Y space', nav_index=0),
x_axis=Axis(data=self.time_axis, label='time label'))]
return data
def stop(self):
"""
not implemented.
"""
self.live = False
return ""
```
|
{
"source": "jerlfan/PyMoDAQ",
"score": 2
}
|
#### File: db/db_logger/db_logger.py
```python
import logging
import datetime
from PyQt5 import QtCore
from contextlib import contextmanager
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy_utils import database_exists, create_database
from .db_logger_models import Base, Data0D, Data1D, Data2D, LogInfo, Detector, Configuration
from pymodaq.daq_utils import daq_utils as utils
from pymodaq.daq_utils.gui_utils import dashboard_submodules_params
from pyqtgraph.parametertree import Parameter, ParameterTree
logger = utils.set_logger(utils.get_module_name(__file__))
config = utils.load_config()
class DBLogHandler(logging.StreamHandler):
def __init__(self, dblogger):
super().__init__()
self.dblogger = dblogger
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
self.setFormatter(formatter)
def emit(self, record):
msg = self.format(record)
self.dblogger.add_log(msg)
class DbLogger:
user = config['network']['logging']['user']['username']
user_pwd = config['network']['logging']['user']['pwd']
def __init__(self, database_name, ip_address=config['network']['logging']['sql']['ip'],
port=config['network']['logging']['sql']['port'], save2D=False):
"""
Parameters
----------
models_base
ip_address
port
database_name
"""
self.ip_address = ip_address
self.port = port
self.database_name = database_name
self.engine = None
self.Session = None
self._save2D = save2D
@property
def save2D(self):
return self._save2D
@save2D.setter
def save2D(self, value):
self._save2D = value
@contextmanager
def session_scope(self):
"""Provide a transactional scope around a series of operations."""
session = self.Session()
try:
yield session
session.commit()
except Exception as e:
logger.error(str(e))
session.rollback()
finally:
session.close()
def connect_db(self):
self.engine = create_engine(f"postgresql://{self.user}:{self.user_pwd}@{self.ip_address}:"
f"{self.port}/{self.database_name}")
try:
if not database_exists(self.engine.url):
create_database(self.engine.url)
assert database_exists(self.engine.url)
except Exception as e:
logger.critical(f'Impossible to connect to the DB: {str(e)}')
return False
self.create_table()
self.Session = sessionmaker(bind=self.engine)
return True
def close(self):
if self.engine is not None:
self.engine.dispose()
def create_table(self):
# create tables if not existing
if self.engine is not None:
Base.metadata.create_all(self.engine)
def get_detectors(self, session):
"""Returns the list of detectors name
Parameters
----------
session: (Session) SQLAlchemy session instance for db transactions
Returns
-------
list of str
"""
return [res[0] for res in session.query(Detector.name)]
def add_detectors(self, detectors):
"""
add detectors in the detectors table
Parameters
----------
session: (Session) SQLAlchemy session instance for db transactions
detectors: (list) list of dict with keys: name and settings_xml
"""
if not isinstance(detectors, list):
detectors = [detectors]
with self.session_scope() as session:
existing_detectors = [d.name for d in session.query(Detector)]
for det in detectors:
if det['name'] not in existing_detectors:
session.add(Detector(name=det['name'], settings_xml=det['xml_settings']))
def add_config(self, config_settings):
with self.session_scope() as session:
session.add(Configuration(timestamp=datetime.datetime.now().timestamp(), settings_xml=config_settings))
def add_log(self, log):
with self.session_scope() as session:
session.add(LogInfo(log))
def add_datas(self, datas):
with self.session_scope() as session:
time_stamp = datas['acq_time_s']
detector_name = datas['name']
if session.query(Detector).filter_by(name=detector_name).count() == 0:
# security detector adding in case it hasn't been done previously (and properly)
self.add_detectors(session, dict(name=detector_name))
det_id = session.query(Detector).filter_by(name=detector_name).one().id # detector names should/are unique
if 'data0D' in datas:
for channel in datas['data0D']:
session.add(Data0D(timestamp=time_stamp, detector_id=det_id,
channel=f"{datas['data0D'][channel]['name']}:{channel}",
value=datas['data0D'][channel]['data']))
if 'data1D' in datas:
for channel in datas['data1D']:
session.add(Data1D(timestamp=time_stamp, detector_id=det_id,
channel=f"{datas['data1D'][channel]['name']}:{channel}",
value=datas['data1D'][channel]['data'].tolist()))
if 'data2D' in datas and self.save2D:
for channel in datas['data2D']:
session.add(Data2D(timestamp=time_stamp, detector_id=det_id,
channel=f"{datas['data2D'][channel]['name']}:{channel}",
value=datas['data2D'][channel]['data'].tolist()))
# not yet dataND as db should not be where to save these datas
class DbLoggerGUI(DbLogger, QtCore.QObject):
params = [
{'title': 'Database:', 'name': 'database_type', 'type': 'list', 'value': 'PostgreSQL',
'values': ['PostgreSQL', ]},
{'title': 'Server IP:', 'name': 'server_ip', 'type': 'str',
'value': config['network']['logging']['sql']['ip']},
{'title': 'Server port:', 'name': 'server_port', 'type': 'int',
'value': config['network']['logging']['sql']['port']},
{'title': 'Connect:', 'name': 'connect_db', 'type': 'bool_push', 'value': False},
{'title': 'Connected:', 'name': 'connected_db', 'type': 'led', 'value': False, 'readonly': True},
] + dashboard_submodules_params
def __init__(self, database_name):
DbLogger.__init__(self, database_name, ip_address=config['network']['logging']['sql']['ip'],
port=config['network']['logging']['sql']['port'], save2D=False)
QtCore.QObject.__init__(self)
self.settings = Parameter.create(title='DB settings', name='db_settings', type='group',
children=self.params)
self.settings.child(('do_save')).hide()
self.settings_tree = ParameterTree()
self.settings_tree.setMinimumHeight(310)
self.settings_tree.setParameters(self.settings, showTop=False)
self.settings.sigTreeStateChanged.connect(self.parameter_tree_changed)
def parameter_tree_changed(self, param, changes):
"""
Check for changes in the given (parameter,change,information) tuple list.
In case of value changed, update the DAQscan_settings tree consequently.
=============== ============================================ ==============================
**Parameters** **Type** **Description**
*param* instance of pyqtgraph parameter the parameter to be checked
*changes* (parameter,change,information) tuple list the current changes state
=============== ============================================ ==============================
"""
for param, change, data in changes:
path = self.settings.childPath(param)
if path is not None:
childName = '.'.join(path)
else:
childName = param.name()
if change == 'childAdded':
pass
elif change == 'value':
if param.name() == 'server_ip':
self.ip_address = param.value()
elif param.name() == 'server_port':
self.port = param.value()
elif param.name() == 'connect_db':
status = self.connect_db()
self.settings.child(('connected_db')).setValue(status)
elif param.name() == 'save_2D':
self.save2D = param.value()
elif change == 'parent':
pass
```
#### File: pymodaq/examples/custom_viewer.py
```python
import pymodaq.daq_utils.parameter.utils
from pymodaq.daq_utils.plotting.viewer2D.viewer2D_main import Viewer2D
from pymodaq.daq_utils import gui_utils as gutils
from pymodaq.daq_utils import daq_utils as utils
from pyqtgraph.dockarea import Dock
from pyqtgraph.parametertree import ParameterTree, Parameter
from pymodaq.daq_utils.parameter import pymodaq_ptypes as custom_tree
from pymodaq.daq_utils.scanner import TableModelTabular
from PyQt5.QtCore import QObject, Qt, pyqtSlot
from PyQt5 import QtWidgets
class ViewerPointList(QObject):
def __init__(self, area):
super().__init__()
self.area = area
self.viewer = None
self.set_viewer()
self.set_point_list()
self.viewer.sig_double_clicked.connect(self.double_click_action)
@pyqtSlot(float, float)
def double_click_action(self, posx, posy):
xs, ys = self.viewer.scale_axis(posx, posy)
indx, indy = self.viewer.mapfromview('red', posx, posy)
z = self.viewer.transform_image(self.viewer.raw_data["red"])[utils.rint(indy), utils.rint(indx)]
self.table_model.add_data(self.table_view.currentIndex().row() + 1, [xs, ys, z])
def setData(self, data):
self.viewer.setImage(data_red=data)
def setXaxis(self, xaxis):
self.viewer.x_axis = xaxis
def setYaxis(self, yaxis):
self.viewer.y_axis = yaxis
def set_viewer(self):
dock_viewer = Dock('Viewer2D')
self.area.addDock(dock_viewer, 'right')
widget = QtWidgets.QWidget()
self.viewer = Viewer2D(widget)
dock_viewer.addWidget(widget)
def set_point_list(self):
dock_list = Dock('List of points')
self.area.addDock(dock_list, 'right')
params = [{'title': 'Positions', 'name': 'tabular_table', 'type': 'table_view',
'delegate': gutils.SpinBoxDelegate, 'menu': True}, ]
self.settings_tree = ParameterTree()
self.settings = Parameter.create(name='settings', title='Settings', type='group', children=params)
self.settings_tree.setParameters(self.settings, showTop=False)
dock_list.addWidget(self.settings_tree)
init_data = [[0., 0., 0.]]
self.table_model = TableModelTabular(init_data, ['x', 'y', 'data'])
self.table_view = pymodaq.daq_utils.parameter.utils.get_widget_from_tree(self.settings_tree, custom_tree.TableViewCustom)[0]
self.settings.child(('tabular_table')).setValue(self.table_model)
self.table_view.horizontalHeader().setResizeMode(QtWidgets.QHeaderView.ResizeToContents)
self.table_view.horizontalHeader().setStretchLastSection(True)
self.table_view.setSelectionBehavior(QtWidgets.QTableView.SelectRows)
self.table_view.setSelectionMode(QtWidgets.QTableView.SingleSelection)
styledItemDelegate = QtWidgets.QStyledItemDelegate()
styledItemDelegate.setItemEditorFactory(gutils.SpinBoxDelegate())
self.table_view.setItemDelegate(styledItemDelegate)
self.table_view.setDragEnabled(True)
self.table_view.setDropIndicatorShown(True)
self.table_view.setAcceptDrops(True)
self.table_view.viewport().setAcceptDrops(True)
self.table_view.setDefaultDropAction(Qt.MoveAction)
self.table_view.setDragDropMode(QtWidgets.QTableView.InternalMove)
self.table_view.setDragDropOverwriteMode(False)
self.table_view.add_data_signal[int].connect(self.table_model.add_data)
self.table_view.remove_row_signal[int].connect(self.table_model.remove_data)
self.table_view.load_data_signal.connect(self.table_model.load_txt)
self.table_view.save_data_signal.connect(self.table_model.save_txt)
if __name__ == '__main__':
from pymodaq.daq_utils.gui_utils import DockArea
from pymodaq.daq_utils.daq_utils import Axis
import sys
import numpy as np
app = QtWidgets.QApplication(sys.argv)
area = DockArea()
win = QtWidgets.QMainWindow()
win.setCentralWidget(area)
viewer = ViewerPointList(area)
Nx = 100
Ny = 200
x = (np.linspace(0, Nx - 1, Nx) + 100) / 2
y = (np.linspace(0, Ny - 1, Ny) - 10) * 2
from pymodaq.daq_utils.daq_utils import gauss2D
data_red = 3 * gauss2D(x, 0.2 * Nx, Nx / 5, y, 0.3 * Ny, Ny / 5, 1, 90)
viewer.setData(data_red)
viewer.setXaxis(Axis(data=x, label='This is x axis', units='au'))
viewer.setYaxis(Axis(data=y, label='This is y axis', units='au'))
win.show()
app.processEvents()
sys.exit(app.exec_())
```
|
{
"source": "Jerllina/DeepLearningOnTensorflow_Programs",
"score": 3
}
|
#### File: DeepLearningOnTensorflow_Programs/MNIST_series/mnist_headforTensorflow.py
```python
import sys
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
#数据集被分成3部分:
#55000张训练数据(minist.train),5000张验证数据(mnist.validation);10000张测试数据(mnist.test)
#one-hot向量:
#除了某一位的数字是1以外其余各维度数字都是0
# Define loss and optimizer
import numpy as np
#def train_model():
'''第一层卷积'''
#输入尺寸:28*28
x = tf.placeholder (tf.float32, [None, 784])
y_ = tf.placeholder(tf.int32, [None,10])
# tf.reshape函数校正张量的维度,-1表示自适应
x_image = tf.reshape (x, [-1, 28, 28, 1])
#打破权重的对称性&避免0梯度
W_conv1 = tf.Variable (tf.truncated_normal ([5, 5, 1, 32], stddev = 0.1))#随机量填充
b_conv1 = tf.Variable (tf.constant (0.1, shape = [32]))
#ReLU函数去线性化 一个像素一个像素地移动
h_conv1 = tf.nn.relu (tf.nn.conv2d(x_image, W_conv1, strides = [1, 1, 1, 1], padding = 'SAME') + b_conv1)
'''第一层池化'''
#14*14
h_pool1 = tf.nn.max_pool (h_conv1, ksize = [1, 2, 2, 1],strides = [1, 2, 2, 1], padding = 'SAME')#same:全0填充
'''第二层卷积'''
W_conv2 = tf.Variable (tf.truncated_normal ([5, 5, 32, 64], stddev = 0.1))
b_conv2 = tf.Variable (tf.constant(0.1, shape = [64]))
#ReLU函数去线性化
h_conv2 = tf.nn.relu (tf.nn.conv2d(h_pool1, W_conv2, strides = [1, 1, 1, 1], padding = 'SAME') + b_conv2)
'''第二层池化'''
#7*7
h_pool2 = tf.nn.max_pool(h_conv2, ksize = [1, 2, 2, 1],strides = [1, 2, 2, 1], padding = 'SAME')
'''全连接层'''
W_fc1 = tf.Variable (tf.truncated_normal ([7 * 7 * 64, 1024], stddev = 0.1))
b_fc1 = tf.Variable (tf.constant (0.1, shape = [1024]))
#4维张量转换为2维
h_pool2_flat = tf.reshape (h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu (tf.matmul (h_pool2_flat, W_fc1) + b_fc1)
keep_prob = tf.placeholder (tf.float32)
h_fc1_drop = tf.nn.dropout (h_fc1, keep_prob)
'''输出层'''
#全连接+Softmax 输出 1024→ 10个数字
W_fc2 = tf.Variable (tf.truncated_normal ([1024, 10], stddev = 0.1))
b_fc2 = tf.Variable (tf.constant(0.1, shape = [10]))
y_conv =tf.nn.softmax( tf.matmul (h_fc1_drop, W_fc2) + b_fc2)
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits (labels = y_, logits = y_conv))
'''模型评估'''
train_step = tf.train.AdamOptimizer(1e-04).minimize (cross_entropy)
correct_prediction = tf.equal(tf.argmax (y_conv, 1),tf.argmax(y_, 1))
accuracy = tf.reduce_mean (tf.cast(correct_prediction, tf.float32))
sess = tf.InteractiveSession()
sess.run (tf.global_variables_initializer())
for i in range(20000):
batch_xs, batch_ys = mnist.train.next_batch(50)
if i % 100 == 0:
train_accuracy = accuracy.eval (feed_dict = {x: batch_xs, y_: batch_ys, keep_prob: 1.0})
print('step %d, training accuracy %g' % (i, train_accuracy))
train_step.run (feed_dict = {x: batch_xs, y_: batch_ys, keep_prob: 0.5})
print ('test accuracy %g' % accuracy.eval (feed_dict = {x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
```
|
{
"source": "Jerllina/GrokkingAlgorithms_LearningRecords",
"score": 4
}
|
#### File: Jerllina/GrokkingAlgorithms_LearningRecords/GreedyAlgorithm.py
```python
def choose_best_station():
states_needed=statesneeded
while states_needed:
best_station=None
states_covered=set()
#traverse all stations
for station,states_for_station in stations.items():
covered=states_needed & states_for_station
if len(covered)>len(states_covered):
best_station=station
states_covered=covered
final_stations.add(best_station)
states_needed-=states_covered
print(final_stations)
if __name__=='__main__' :
'''input the list & translate into a set'''
statesneeded=set(['mt','wa','or','id','nv','ut','ca','az'])
final_stations=set()
'''store the information into a hash table'''
stations={}
stations['kone']=set(['id','nv','ut'])
stations['ktwo']=set(['wa','id','mt'])
stations['kthree']=set(['or','nv','ca'])
stations['kfour']=set(['nv','ut'])
stations['kfive']=set(['ca','az'])
choose_best_station()
```
#### File: Jerllina/GrokkingAlgorithms_LearningRecords/SelectionSort.py
```python
def findSmallest(arr):
smallest=arr[0]
smallest_index=0
for i in range(1,len(arr)):
if arr[i]<smallest:
smallest=arr[i]
smallest_index=i
return smallest_index
#the selection sort algorithm
def selectionSort(arr):
newArr=[]
for i in range(len(arr)):
smallest=findSmallest(arr)
#pop() used to remove an element from the list
newArr.append(arr.pop(smallest))
return newArr
#test
print(selectionSort([5,3,1,6,2,10]))
```
|
{
"source": "Jerllina/MLiA_LearningRecording",
"score": 2
}
|
#### File: MLiA_SourceCode/Ch15/mrMeanMapper.py
```python
import sys
from numpy import mat, mean, power
def read_input(file):
for line in file:
yield line.rstrip()
input = read_input(sys.stdin)#creates a list of input lines
input = [float(line) for line in input] #overwrite with floats
numInputs = len(input)
input = mat(input)
sqInput = power(input,2)
#output size, mean, mean(square values)
print "%d\t%f\t%f" % (numInputs, mean(input), mean(sqInput)) #calc mean of columns
print >> sys.stderr, "report: still alive"
```
#### File: MLiA_SourceCode/Ch15/py27dbg.py
```python
from mrjob.job import MRJob
import pickle
from numpy import *
class MRsvm(MRJob):
def map(self, mapperId, inVals): #needs exactly 2 arguments
if False: yield
yield (1, 22)
def reduce(self, _, packedVals):
yield "fuck ass"
def steps(self):
return ([self.mr(mapper=self.map, reducer=self.reduce)])
if __name__ == '__main__':
MRsvm.run()
```
#### File: MLiA_LearningRecording/SVM/svm_SMO.py
```python
from numpy import *
'''load data'''
def loadDataSet(fileName):
dataMat = []
labelMat = []
fr = open(fileName)
for line in fr.readlines():
lineArr = line.strip().split('\t')
dataMat.append([float(lineArr[0]), float(lineArr[1])])#alpha1,alpha2
labelMat.append(float(lineArr[2]))#label
return dataMat,labelMat
def selectJrand(i,m):#m:the number of alpha
j=i #select any J not equal to i
while (j==i):
j = int(random.uniform(0,m))
return j
#modify aj
def clipAlpha(aj,H,L):
if aj > H:
aj = H
if L > aj:
aj = L
return aj
'''simplified SMO'''
def smoSimple(dataMatIn, classLabels, C, toler, maxIter):
#input:dataset,labels,C,tolerated error rate,max iter times
dataMatrix = mat(dataMatIn); labelMat = mat(classLabels).transpose()
b = 0; m,n = shape(dataMatrix)
alphas = mat(zeros((m,1)))#initialize to 0
iter = 0
while (iter < maxIter):
alphaPairsChanged = 0#record the alpha changed times
for i in range(m):
fXi = float(multiply(alphas,labelMat).T*(dataMatrix*dataMatrix[i,:].T)) + b
Ei = fXi - float(labelMat[i])#prediction error
if ((labelMat[i]*Ei < -toler) and (alphas[i] < C)) or ((labelMat[i]*Ei > toler) and (alphas[i] > 0)):
j = selectJrand(i,m)
fXj = float(multiply(alphas,labelMat).T*(dataMatrix*dataMatrix[j,:].T)) + b
Ej = fXj - float(labelMat[j])
alphaIold = alphas[i].copy(); alphaJold = alphas[j].copy();
if (labelMat[i] != labelMat[j]):
L = max(0, alphas[j] - alphas[i])
H = min(C, C + alphas[j] - alphas[i])
else:
L = max(0, alphas[j] + alphas[i] - C)
H = min(C, alphas[j] + alphas[i])
if L==H: print ("L==H"); continue
#eta:Optimal modifying quantity .
#if eta=0,exit,do not need modify
eta = 2.0 * dataMatrix[i,:]*dataMatrix[j,:].T - dataMatrix[i,:]*dataMatrix[i,:].T - dataMatrix[j,:]*dataMatrix[j,:].T
if eta >= 0: print ("eta>=0"); continue
alphas[j] -= labelMat[j]*(Ei - Ej)/eta
alphas[j] = clipAlpha(alphas[j],H,L)
if (abs(alphas[j] - alphaJold) < 0.00001): print("j not moving enough"); continue
alphas[i] += labelMat[j]*labelMat[i]*(alphaJold - alphas[j])#update i by the same amount as j
#the update is in the oppostie direction
b1 = b - Ei- labelMat[i]*(alphas[i]-alphaIold)*dataMatrix[i,:]*dataMatrix[i,:].T - labelMat[j]*(alphas[j]-alphaJold)*dataMatrix[i,:]*dataMatrix[j,:].T
b2 = b - Ej- labelMat[i]*(alphas[i]-alphaIold)*dataMatrix[i,:]*dataMatrix[j,:].T - labelMat[j]*(alphas[j]-alphaJold)*dataMatrix[j,:]*dataMatrix[j,:].T
if (0 < alphas[i]) and (C > alphas[i]): b = b1
elif (0 < alphas[j]) and (C > alphas[j]): b = b2
else: b = (b1 + b2)/2.0
alphaPairsChanged += 1
print("iter: %d i:%d, pairs changed %d" % (iter,i,alphaPairsChanged))
if (alphaPairsChanged == 0): iter += 1
else: iter = 0
print ("iteration number: %d" % iter)
return b,alphas
if __name__=='__main__' :
#test
dataArr, labelArr=loadDataSet('testSet.txt')
b,alphas=smoSimple(dataArr, labelArr, 0.6, 0.001, 40)
print('b:',b)
print('alphas:',alphas[alphas>0])
```
#### File: MLiA_LearningRecording/TreeRegression/Tkinter_GUI_Treetest.py
```python
from tkinter import *
'''
mylabel=Label(root,text='hello world~')
mylabel.grid()
'''
from TreesReg import *
from treePruning import *
from Treesmodel import *
from TreesRegPrediction import *
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
def reDraw(tolS,tolN):
reDraw.f.clf() # clear the figure
reDraw.a = reDraw.f.add_subplot(111)
if chkBtnVar.get():
if tolN < 2: tolN = 2
myTree=createTree(reDraw.rawDat, modelLeaf,modelErr, (tolS,tolN))
yHat = createForeCast(myTree, reDraw.testDat,modelTreeEval)
else:
myTree=createTree(reDraw.rawDat, ops=(tolS,tolN))
yHat = createForeCast(myTree, reDraw.testDat)
reDraw.a.scatter(array(reDraw.rawDat[:,0]), array(reDraw.rawDat[:,1]), s=5) #use scatter for data set
reDraw.a.plot(reDraw.testDat, yHat, linewidth=2.0) #use plot for yHat
reDraw.canvas.show()
def getInputs():
try: tolN = int(tolNentry.get())
except:
tolN = 10
print("enter Integer for tolN")
tolNentry.delete(0, END)
tolNentry.insert(0,'10')
try: tolS = float(tolSentry.get())
except:
tolS = 1.0
print ("enter Float for tolS")
tolSentry.delete(0, END)
tolSentry.insert(0,'1.0')
return tolN,tolS
def drawNewTree():
tolN,tolS = getInputs()#get values from Entry boxes
reDraw(tolS,tolN)
root=Tk()
reDraw.f = Figure(figsize=(5,4), dpi=100) #create canvas
reDraw.canvas = FigureCanvasTkAgg(reDraw.f, master=root)
reDraw.canvas.show()
reDraw.canvas.get_tk_widget().grid(row=0, columnspan=3)
Label(root, text="tolN").grid(row=1, column=0)
tolNentry = Entry(root)
tolNentry.grid(row=1, column=1)
tolNentry.insert(0,'10')
Label(root, text="tolS").grid(row=2, column=0)
tolSentry = Entry(root)
tolSentry.grid(row=2, column=1)
tolSentry.insert(0,'1.0')
Button(root, text="ReDraw", command=drawNewTree).grid(row=1, column=2, rowspan=3)
chkBtnVar = IntVar()
chkBtn = Checkbutton(root, text="Model Tree", variable = chkBtnVar)
chkBtn.grid(row=3, column=0, columnspan=2)
reDraw.rawDat = mat(loadDataSet('sine.txt'))
reDraw.testDat = arange(min(reDraw.rawDat[:,0]),max(reDraw.rawDat[:,0]),0.01)
reDraw(1.0, 10)
root.mainloop() #start the loop
```
#### File: MLiA_LearningRecording/TreeRegression/TreesReg.py
```python
from numpy import *
# create tree node
class treeNode():
def __init__(self,feat,val,right,left):
featureToSplitOn=feat
valueOfSplit=val
rightBranch=right
leftBranch=left
'''load data'''
def loadDataSet(fileName):
dataMat = []
fr = open(fileName)
for line in fr.readlines():
curLine = line.strip().split('\t')
fltLine = list(map(float,curLine)) #read in lines, map all elements to float()
dataMat.append(fltLine)
return dataMat
'''binary split for 1 feature'''
def binSplitDataSet(dataSet, feature, value):
mat0 = dataSet[nonzero(dataSet[:,feature] > value)[0],:]
mat1 = dataSet[nonzero(dataSet[:,feature] <= value)[0],:]
return mat0,mat1
#create leaves
def regLeaf(dataSet): #return the value used for each leaf
return mean(dataSet[:,-1]) #mean value
#error calculation
def regErr(dataSet):
return var(dataSet[:,-1]) * shape(dataSet)[0] #Mean variance function & sum
'''tree construction'''
def createTree(dataSet, leafType=regLeaf, errType=regErr, ops=(1,4)):# dataSet → NumPy Mat ,array filtering
dataSet=mat(dataSet) #leafType:function to create leaves
feat, val = chooseBestSplit(dataSet, leafType, errType, ops)#errType: error calculating function chosen
if feat == None:
return val
retTree = {}
retTree['spInd'] = feat
retTree['spVal'] = val
lSet, rSet = binSplitDataSet(dataSet, feat, val)
retTree['left'] = createTree(lSet, leafType, errType, ops)
retTree['right'] = createTree(rSet, leafType, errType, ops) #递归 调用自己
return retTree
def chooseBestSplit(dataSet, leafType=regLeaf, errType=regErr, ops=(1,4)):
tolS = ops[0] #Allowed error reduction
tolN = ops[1] #least data number to be splited
#if only 1 node , end
if len(set(dataSet[:,-1].T.tolist()[0])) == 1: #exit cond 1
return None, leafType(dataSet)
m,n = shape(dataSet)
#the choice of the best feature is driven by Reduction in RSS error from mean
S = errType(dataSet)
bestS = inf; bestIndex = 0; bestValue = 0
for featIndex in range(n-1):
for splitVal in set(dataSet[:,featIndex].T.tolist()[0]):
mat0, mat1 = binSplitDataSet(dataSet, featIndex, splitVal)
if (shape(mat0)[0] < tolN) or (shape(mat1)[0] < tolN): continue
newS = errType(mat0) + errType(mat1)
if newS < bestS:
bestIndex = featIndex
bestValue = splitVal
bestS = newS
#if the error is less than a threshold , end
if (S - bestS) < tolS:
return None, leafType(dataSet) #exit cond 2
mat0, mat1 = binSplitDataSet(dataSet, bestIndex, bestValue)
if (shape(mat0)[0] < tolN) or (shape(mat1)[0] < tolN): #exit cond 3
return None, leafType(dataSet)
return bestIndex,bestValue
if __name__=='__main__' :
#test
myMat=mat(loadDataSet('ex0.txt'))
mytree=createTree(myMat)
print(mytree)
```
|
{
"source": "jerloo/pyidenticon",
"score": 3
}
|
#### File: pyidenticon/tests/test_main.py
```python
import unittest
import os
from pyidenticon import make
class MyTestCase(unittest.TestCase):
def setUp(self):
if not os.path.exists('data'):
os.mkdir('data')
def test_basic_make(self):
img = make('basic')
img.save('data/basic.png')
img.close()
def test_named_color_make(self):
img = make('named_fore_color.blue', fore_color='blue')
img.save('data/named_for_color.blue.png')
img.close()
def test_arbg_color_make(self):
img = make('arbb_fore_color.125.200.136', fore_color=(150, 125, 200, 136))
img.save('data/arbb_color.125.200.136.png')
img.close()
def test_many_make(self):
for index in range(100):
item = 'many_index_{}.png'.format(index)
img = make(item, fore_color=(3, 101, 100), bg_color='grey')
img.save('data/{}'.format(item))
img.close()
# def tearDown(self):
# os.rmdir('data')
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jerluc/3R4",
"score": 3
}
|
#### File: 3R4/era/env.py
```python
class Env(object):
def __init__(self, **kwargs):
self.env = kwargs.copy()
def get(self, key, default=''):
return self.env.get(key, default)
def set(self, key, value):
self.env[key] = value
@property
def shell(self):
return self.env['SHELL']
@property
def display_pwd(self):
return '~' if self.pwd == self.home else self.pwd
@property
def pwd(self):
return self.env['PWD']
@property
def user(self):
return self.env['USER']
@property
def home(self):
return self.env['HOME']
@property
def path(self):
return self.get('PATH', '')
def items(self):
return self.env.items()
def as_dict(self):
return self.env.copy()
```
#### File: 3R4/era/fs.py
```python
from __future__ import unicode_literals
import os
import os.path
import shutil
class FSError(Exception):
def __init__(self, msg, path=None):
if path:
msg = '%s: %s' % (path, msg)
super(FSError, self).__init__(msg)
class FS(object):
'''
This class helps us to wrap normal filesystem calls to allow for
potential partial "virtualization" of calls
'''
def __init__(self, root):
self.root = root
def error(self, msg, path):
path = self.virt_path(path)
raise FSError(msg, path)
def virt_path(self, path):
'''
Converts a real filesystem path into a virtual path
'''
common_prefix = os.path.commonprefix([self.root, path])
if len(common_prefix) > 1:
# Cut off the common parts
path = path[len(common_prefix):]
else:
# TODO: Can we now assume it already was a virtual path?
pass
return path
def real_path(self, path):
'''
Converts a virtual path into the real filesystem path
'''
if len(os.path.commonprefix([self.root, path])) > 1:
# TODO: Test me!
return path
if os.path.isabs(path):
# TODO: How kosher is this?
path = path[1:]
return os.path.join(self.root, path)
def must_exist(self, path):
path = self.real_path(path)
if not self.exists(path):
self.error('No such file or directory', path)
return path
def exists(self, path):
path = self.real_path(path)
return os.path.exists(path)
def isdir(self, path):
path = self.real_path(path)
return os.path.isdir(path)
def isfile(self, path):
return not self.isdir(path)
def listdir(self, path):
path = self.must_exist(path)
return os.listdir(path)
def read(self, path):
path = self.must_exist(path)
if self.isdir(path):
self.error('Is a directory', path)
return open(path, mode='r')
def write(self, path, append=False):
path = self.real_path(path)
if self.isdir(path):
self.error('Is a directory', path)
mode = 'a' if append else 'w'
return open(path, mode)
def touch(self, path):
path = self.real_path(path)
if not self.exists(path):
open(path, 'a').close()
else:
os.utime(path, None)
def mkdir(self, path):
path = self.real_path(path)
if self.exists(path):
self.error('File exists', path)
os.mkdir(path)
def rm(self, path, recursive=False, ignore_errors=False):
path = self.real_path(path)
if not ignore_errors:
self.must_exist(path)
if self.isdir(path) and not recursive:
self.error('Is a directory', path)
elif self.isdir(path):
shutil.rmtree(path, ignore_errors)
else:
os.remove(path)
```
|
{
"source": "jerluc/envy",
"score": 2
}
|
#### File: envy/envy/__main__.py
```python
import click
import collections
import blessed
import envy.commands as commands
import os.path
import sys
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
CWD = os.path.abspath(os.getcwd())
def _maybe_exit(exit_code):
if exit_code > 0:
sys.exit(exit_code)
@click.group(chain=True, context_settings=CONTEXT_SETTINGS)
@click.version_option(version='0.1.0-alpha', prog_name='envy')
def cli():
pass
@cli.command('c')
@click.option('-r', '--recreate', is_flag=True,
help='Re-create the workspace')
def create(recreate):
"""Creates a new workspace"""
_maybe_exit(commands.create(CWD, recreate))
@cli.command('e')
def enter():
"""Enters the current workspace"""
_maybe_exit(commands.enter(CWD))
@cli.command('d')
@click.option('-f', '--force', is_flag=True,
help='Force destroy the workspace')
def destroy(force):
"""Destroys the current workspace"""
_maybe_exit(commands.destroy(CWD, force))
def main():
cli()
```
|
{
"source": "jerluc/lover",
"score": 2
}
|
#### File: lover/lover/commands.py
```python
import functools
import os
import os.path
import multiprocessing
import subprocess
import sys
import lover.love as love
def new(env, recreate):
if recreate:
# TODO: Force-recreate project configs; not entirely sure if
# this would even be useful...
pass
love.get(env)
love.setup_project_dir(env)
return 0
def run(env):
love.get(env)
love_proc = subprocess.Popen([env.love_binary(), env.project_dir])
return love_proc.wait()
def dist(env, targets):
targets = set(targets) | set(env.conf.targets)
procs = multiprocessing.Pool(4)
procs.map(functools.partial(love.get, env), targets)
love.archive(env)
for platform in targets:
love.package(env, platform)
return 0
```
|
{
"source": "jerluc/qdfs",
"score": 2
}
|
#### File: qdfs/qdfs/__main__.py
```python
from qdfs.dn.blockstore import LocalFileStore
from qdfs.dn.server import DataNodeServer
from qdfs.peer.discovery import Peer
import logging
from tornado.log import enable_pretty_logging
def main():
enable_pretty_logging()
# TODO: Do some configuration stuff?
def logger(event_type, **kwargs):
logging.info('%s' % kwargs)
peer = Peer(groups=['qdfs'], event_handler=logger)
fs = LocalFileStore(root_dir='/tmp/qdfs')
dn = DataNodeServer(fs, peer)
logging.info('Starting QDFS')
dn.start()
if __name__ == '__main__':
main()
```
#### File: qdfs/qdfs/netutils.py
```python
import socket
def getport(sockets, for_ip='0.0.0.0'):
s = filter(lambda s: s.getsockname()[0] == for_ip, sockets)[0]
return s.getsockname()[1]
def gethostname():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 53)) # connecting to a UDP address doesn't send packets
return s.getsockname()[0]
def create_multicast_socket(addr):
host, port = addr
# Create the socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setblocking(False)
# Set some options to make it multicast-friendly
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
s.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_TTL, 20)
s.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_LOOP, 1)
s.bind(('', port))
# Set some more multicast options
intf = socket.gethostbyname(socket.gethostname())
s.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_IF, socket.inet_aton(intf))
s.setsockopt(socket.SOL_IP, socket.IP_ADD_MEMBERSHIP, socket.inet_aton(host) + socket.inet_aton(intf))
return s
```
|
{
"source": "jerluebke/bsc-thesis-scripts",
"score": 2
}
|
#### File: jerluebke/bsc-thesis-scripts/cascade_analysis.py
```python
import os
import numpy as np
import matplotlib.pyplot as plt
from pathos.multiprocessing import ProcessPool as Pool
import dill; dill.settings['recurse'] = True
TO_INCH = 1./2.54
def tau_SL(p):
return 2-2*(2/3)**p-2/3*p
def log_poisson(b, *args):
return b**(2/3-np.random.poisson(2*np.log(b))*np.log(1.5)/np.log(b))
def cascade_all_steps(b, d, n, rand_func, *args):
N = b**(n-1)
M = np.empty(tuple(d*[N]+[n]))
for i in range(n):
step = b**i
size = b**(n-i-1)
coords = np.dstack(np.mgrid[tuple(d*[slice(0, step)])]).reshape(-1, d)
for coord in coords:
loc = tuple([slice(c*size,(c+1)*size) for c in coord]+[i])
M[loc] = rand_func(b, *args)
return M.cumprod(axis=-1, out=M)
def record_stats(samples, p_arr, fname):
N = 10
base = 2
dim = 2
data_fname = '_cascade_data.npy'
# p_arr = np.expand_dims(p_arr, tuple([d for d in range(dim+1)]))
p_arr = p_arr[None,None,None,:]
data = np.lib.format.open_memmap(
data_fname, mode='w+', dtype=np.float64,
shape=(samples, N, p_arr.size)
)
def worker(*unused):
M = cascade_all_steps(base, dim, N, log_poisson)[...,None]
# M = np.expand_dims(M, -1)
return np.mean(M**p_arr, axis=(0, 1))
with Pool() as p:
res = p.uimap(worker, range(samples))
for i, elem in enumerate(res):
data[i] = elem
S_arr = np.mean(data, axis=0)
sd_S = np.std(data, axis=0)
np.savez(fname, S=S_arr, sd_S=sd_S)
del data
os.remove(data_fname)
def fit(fname, p_arr=None, save=False):
from scipy import odr
def red_chi_sq(f, B, x, y, sy):
return np.sum((y - f(B, x))**2 / sy**2) / (len(y) - len(B))
def r_sq(f, B, x, y):
return 1. - np.sum((y - f(B, x))**2) / np.sum((y - y.mean())**2)
def linear_model_func(B, x):
return B[0]*x+B[1]
linear_model = odr.Model(linear_model_func)
S_file = np.load(fname)
S_arr, sd_S = S_file['S'], S_file['sd_S']
n_arr = -np.arange(S_arr.shape[0])
tau_arr = np.empty((S_arr.shape[1], 2))
C_arr = np.empty_like(tau_arr)
d_tau_rel = np.empty(S_arr.shape[1])
d_C_rel = np.empty_like(d_tau_rel)
r_sq_arr = np.empty_like(d_tau_rel)
# samples = 1000
# N = 10
# base = 2
# dim = 2
# delta_S = np.sqrt(samples*base**((N-1)*dim)) / S_arr
delta_S = sd_S / S_arr # / np.sqrt(1000)
print('[p]\ttau\t\t\t\t\tC\t\t\t\t\tred chi sq\tr^2')
print('===\t===\t\t\t\t\t===\t\t\t\t\t===\t\t===')
for i in range(S_arr.shape[1]):
data = odr.RealData(n_arr, np.log2(S_arr[:,i]), sy=delta_S[:,i])
out = odr.ODR(data, linear_model, beta0=[1., 0.]).run()
B, sd_B = out.beta, out.sd_beta
red_chi_sq_test = red_chi_sq(
linear_model_func, B, n_arr, np.log2(S_arr[:,i]), delta_S[:,i])
r_sq_test = r_sq(linear_model_func, B, n_arr, np.log2(S_arr[:,i]))
tau_arr[i,0], tau_arr[i,1] = B[0], sd_B[0]
C_arr[i,0], C_arr[i,1] = B[1], sd_B[1]
d_tau_rel[i] = abs(sd_B[0]/B[0])*100
d_C_rel[i] = abs(sd_B[1]/B[1])*100
r_sq_arr[i] = r_sq_test
print('[%2d]\t%f +- %f (%.2f%%)\t\t%f +- %f (%.2f%%)\t\t%f\t%f' \
% (i, B[0], sd_B[0], d_tau_rel[i],
B[1], sd_B[1], d_C_rel[i],
red_chi_sq_test, r_sq_test))
if save:
np.savetxt('cascade_structure.csv',
np.vstack((p_arr, tau_arr[:,0], tau_arr[:,1], d_tau_rel,
C_arr[:,0], C_arr[:,1], d_C_rel, r_sq_arr)).T,
fmt=['%d']+7*['%.3f'],
header='p,t,dt,dtr,c,dc,dcr,rsq',
comments='', delimiter=',')
return (tau_arr, C_arr), (n_arr, np.log2(S_arr), delta_S)
def plot_stats(fname, p_arr):
# color_list = plt.get_cmap('tab10').colors
color_list = plt.rcParams['axes.prop_cycle'].by_key()['color']
p_linspace = np.linspace(0, 50, 100)
(tau_arr, C_arr), (n_arr, S_arr, d_S) = fit(fname)
_, axes = plt.subplots(2, 1, sharex=True,
figsize=(11*TO_INCH, 13*TO_INCH), dpi=300)
for i, c in zip(range(9), color_list):
axes[0].plot(n_arr, tau_arr[i,0]*n_arr+C_arr[i,0], color=c, ls='--')
axes[0].errorbar(n_arr, S_arr[:,i], yerr=d_S[:,i], fmt='x', color=c,
# mfc='k', mec='k',
ms=8,
label='$p=%d$' % p_arr[i])
axes[0].set_ylabel(r'$\log_2\langle\varepsilon_l^p\rangle$')
axes[0].legend(ncol=2, loc='upper right')
for i, c in zip(range(9, 14), color_list):
axes[1].plot(n_arr, tau_arr[i,0]*n_arr+C_arr[i,0], color=c, ls='--')
axes[1].errorbar(n_arr, S_arr[:,i], yerr=d_S[:,i], fmt='x', color=c,
# mfc='k', mec='k',
ms=8,
label='$p=%d$' % p_arr[i])
axes[1].set_ylabel(r'$\log_2\langle\varepsilon_l^p\rangle$')
axes[1].set_xlabel(r'$-n=\log_2l$')
axes[1].legend(loc='upper right')
plt.tight_layout(pad=.27)
plt.savefig('cascade-structure-functions.pdf', dpi='figure')
plt.figure(figsize=(11*TO_INCH, 6*TO_INCH), dpi=300)
plt.subplot(121)
plt.errorbar(p_arr[:10], tau_arr[:10,0], yerr=tau_arr[:10,1], fmt='3',
ms=8, mew=1.6)
plt.plot(p_linspace[p_linspace<=10], tau_SL(p_linspace[p_linspace<=10]), 'k--')
plt.ylabel(r'$\tau_p$')
plt.xlabel('$p$')
plt.subplot(122)
plt.errorbar(p_arr, tau_arr[:,0], yerr=tau_arr[:,1], fmt='3', ms=8, mew=1.6,
label='recorded')
plt.plot(p_linspace, tau_SL(p_linspace), 'k--', label='theory')
plt.xlabel('$p$')
plt.legend(handlelength=1.2)
plt.tight_layout(pad=.27)
plt.savefig('cascade-stat.pdf', dpi='figure')
if __name__ == '__main__':
fname = 'cascade_structure.npz'
p_arr = np.array(list(range(1, 11)) + [20, 30, 40, 50])
# record_stats(1000, p_arr, fname)
# fit(fname, p_arr, save=True)
plot_stats(fname, p_arr)
# vim: set ff=unix tw=79 sw=4 ts=8 et ic ai :
```
#### File: jerluebke/bsc-thesis-scripts/field_1d_implementation.py
```python
import numpy as np
from alpert_rokhlin_wavelets import ARWavelet, ARWaveletNumerical
def log_poisson(rs, shape):
return 2.**(2./3.-rs.poisson(2.*np.log(2.), shape)*np.log2(1.5))
def generate_fBm(q, N, incr_seed=None, abort_at=np.inf):
min_N = min(N, abort_at)
num = 2**N
A = np.random.RandomState(incr_seed).randn(q, 2**min_N-1)
B = np.zeros(3*num)
t = np.linspace(-1, 2, 3*num)
for p in range(1, q+1):
ar = ARWavelet(q, p)
conv = ar.conv(t)
m = 0
for n in range(min_N):
size = 2**(N-n)
step = 2**n
for k in range(step, 2*step):
B[(k-1)*size:(k+2)*size] += \
2.**(-n/3.) * conv[::step] * A[p-1,m]
m += 1
return B[num:2*num]
def generate_fBm_intermittent(q, N, incr_seed=None, cascade_seed=None,
abort_at=np.inf, return_extra=False):
min_N = min(N, abort_at)
num = 2**N
A = np.random.RandomState(incr_seed).randn(q, 2**min_N-1)
W = log_poisson(np.random.RandomState(cascade_seed), 2**min_N)
for n in range(1, min_N):
for k in range(2**n):
W[2**n+k] *= W[2**(n-1)+k//2]
B = np.zeros(3*num)
t = np.linspace(-1, 2, 3*num)
for p in range(1, q+1):
ar = ARWavelet(q, p)
conv = ar.conv(t)
m = 0
for n in range(min_N):
size = 2**(N-n)
step = 2**n
for k in range(step, 2*step):
B[(k-1)*size:(k+2)*size] += \
2.**(-n/3.) * conv[::step] * A[p-1,m] * W[m]**(1/3)
m += 1
return (B[num:2*num], W[-2**min_N//2:]) if return_extra else B[num:2*num]
def generate_strain(q, N, incr_seed=None, cascade_seed=None,
abort_at=np.inf, return_extra=False):
min_N = min(N, abort_at)
num = 2**N
A = np.random.RandomState(incr_seed).randn(q, 2**min_N-1)
W = log_poisson(np.random.RandomState(cascade_seed), 2**min_N)
for n in range(1, min_N):
for k in range(2**n):
W[2**n+k] *= W[2**(n-1)+k//2]
S = np.zeros(3*num)
for p in range(1, q+1):
ar = ARWaveletNumerical(q, p, num)
conv = ar.conv_num_strain()
m = 0
for n in range(min_N):
size = 2**(N-n)
step = 2**n
for k in range(step, 2*step):
S[(k-1)*size:(k+2)*size] += \
2.**(-n/3.) * conv[::step] * A[p-1,m] * W[m]**(1/3)
m += 1
return (S[num:2*num+1], A, W) if return_extra else S[num:2*num]
def generate_strained_field(q, N, tau, incr_seed=None, cascade_seed=None,
abort_at=np.inf, return_extra=False):
min_N = min(N, abort_at)
num = 2**N
S, A, W = generate_strain(q, N, incr_seed, cascade_seed, abort_at, True)
expS = np.exp(tau*(.5-1./3.)*S)
B = np.zeros(3*num)
t = np.linspace(0, 1, num+1)
for p in range(1, q+1):
ar = ARWaveletNumerical(q, p, num)
m = 0
for n in range(min_N):
size = 2**(N-n)
step = 2**n
for j, i in enumerate(range(step, 2*step)):
conv = ar.conv_num(t, weight=expS[j*size:(j+1)*size+1])
B[(i-1)*size:(i+2)*size] += \
2.**(-n/3.) * conv[::step] * A[p-1,m] * W[m]**(1/3)
m += 1
return (B[num:2*num], S, W) if return_extra else B[num:2*num]
# vim: set ff=unix tw=79 sw=4 ts=8 et ic ai :
```
#### File: jerluebke/bsc-thesis-scripts/field_3d_implementation_precomp.py
```python
import math
import numpy as np
import numpy.ma as ma
from scipy.ndimage import zoom as _ni_zoom
from scipy.spatial.transform import Rotation
from field_util_precomp import read_wavelet_integrals, Field, Point
H = 1/3
FOUR_PI = 4.*math.pi
class SphericalField:
# divide by sqrt(2) to normalize edge length to one
# keep center (0,0,0)
CUBOCTAHEDRON_VERTICES = np.array([
( 1, 1, 0),
(-1, 1, 0),
( 1, -1, 0),
(-1, -1, 0),
( 1, 0, 1),
(-1, 0, 1),
( 1, 0, -1),
(-1, 0, -1),
( 0, 1, 1),
( 0, -1, 1),
( 0, 1, -1),
( 0, -1, -1),
( 0, 0, 0)
]) / math.sqrt(2)
# divide by sqrt(3) to normalize diagonal length to one
CUBE_VERTICES = np.array([
( 1, 1, 1),
( 1, 1, -1),
( 1, -1, 1),
( 1, -1, -1),
(-1, 1, 1),
(-1, 1, -1),
(-1, -1, 1),
(-1, -1, -1)
]) / math.sqrt(3)
def __init__(self, Nc, Np, base, q=4, random_rotation=False,
noise_seed=None, rotation_seed=None):
# b**Nc: resolution of single field component
# b**Np: resolution of result
# q: wavelet order
# basic building block of the field
self.field = read_wavelet_integrals(base, Nc, q)
# base, initial radius, component radius, initial zoom factor, number
# of grid points
self.b = base
self.r = base**Np
self.rc = base**Nc
self.z = base**(Np-Nc)
self.num = 2*self.r
self.random_rotation = random_rotation
self.vertices = {
2 : self.CUBE_VERTICES,
3 : self.CUBOCTAHEDRON_VERTICES
}.get(base)
# save wavelet order for noise generation
self.q = q
# RandomState instances
self.noise_rs = np.random.RandomState(noise_seed)
self.rotation_rs = np.random.RandomState(rotation_seed)
def compute(self, levels):
radius = self.r
z = self.z
# result
v = np.zeros((self.num, self.num, self.num, 3))
# center of initial sphere
points = [Point(radius, radius, radius)]
# start one level higher to fill the whole domain
radius *= self.b
z *= self.b
for n in range(levels):
fs, vs = self._field_and_domain_bounds(points, min(self.r, radius))
# noises.shape == (len(points), 3, q, 1, 1, 1)
noises = self._make_noise(len(points))[...,None,None,None]
# interp_field.shape == (3, q, 2*r, 2*r, 2*r)
interp_field = self._interpolate(z)
for i in range(len(points)):
# noise_field.shape == (3, 2*r, 2*r, 2*r)
noise_field = ma.sum([
noises[i,2]*interp_field.y - noises[i,1]*interp_field.z,
noises[i,0]*interp_field.z - noises[i,2]*interp_field.x,
noises[i,1]*interp_field.x - noises[i,0]*interp_field.y,
], axis=1)[(...,*fs[i])]
noise_field = np.moveaxis(noise_field, 0, -1)
v[(*vs[i],...)][~noise_field.mask] += \
self.b**(-n*H) * noise_field[~noise_field.mask]
z /= self.b
radius //= self.b
points = self._subdivide_sphere(points, radius//2)
# Biot-Savart: -1/(4 pi)
return -v / FOUR_PI
def _field_and_domain_bounds(self, points, radius):
# field component bound functions (whole sphere)
lower = lambda p: 0 if p-radius > 0 else radius-p
upper = lambda p: 2*radius if p+radius < self.num else radius+self.num-p
fs = []
vs = []
for point in points:
fs.append(tuple((
slice(lower(point.x), upper(point.x)),
slice(lower(point.y), upper(point.y)),
slice(lower(point.z), upper(point.z)),
)))
vs.append(tuple((
slice(max(point.x-radius, 0), min(point.x+radius, self.num)),
slice(max(point.y-radius, 0), min(point.y+radius, self.num)),
slice(max(point.z-radius, 0), min(point.z+radius, self.num)),
)))
return fs, vs
def _make_noise(self, num):
return ma.asarray(self.noise_rs.randn(num, 3, self.q))
def _interpolate(self, z):
if z > 1:
bound = slice(None, None) if z*self.rc < self.r \
else slice(int(self.rc-self.r//z), int(self.rc+self.r//z))
return Field(self._zoom(self.field.x[...,bound,bound,bound],
(1, z, z, z)))
elif z < 1:
step = int(1./z)
return Field(self.field.x[...,::step,::step,::step])
else:
return self.field
def _subdivide_sphere(self, points, radius):
new_points = []
vertices = radius * self.vertices
for point in points:
if self.random_rotation:
vertices = Rotation.random(random_state=self.rotation_rs).apply(vertices)
for vertex in vertices:
new_points.append(point + vertex)
return new_points
@staticmethod
def _zoom(a, z):
out = ma.zeros(tuple([int(round(ii*zz)) for ii, zz in zip(a.shape, z)]))
out = _ni_zoom(a.data, z, order=1, mode='nearest', output=out)
mask = _ni_zoom(a.mask, z, order=0, mode='constant', cval=True)
out.mask = mask
return out
# vim: set ff=unix tw=79 sw=4 ts=8 et ic ai :
```
#### File: jerluebke/bsc-thesis-scripts/field_3d_statistics.py
```python
import os
import numpy as np
import matplotlib.pyplot as plt
from physt import h1
# import dill; dill.settings['recurse'] = True
from pathos.multiprocessing import ProcessPool as Pool
from field_precomp import SphericalField as SphericalFieldRegular
from field_precomp_cascade import SphericalField as SphericalFieldCascade
TO_INCH = 1./2.54
NNODES = 6
# DATA_PATH = '/home/jel/Documents'
DATA_PATH = '.'
FNAME = os.path.join(DATA_PATH, 'data/stat_3d_b%d_%s_%%s.np%%s')
def increments(fname, samples, base, Field, **kwds):
assert base in (2, 3)
if base == 2:
sf = Field(3, 7, 2, **kwds)
levels = 5
elif base == 3:
sf = Field(2, 4, 3, **kwds)
levels = 3
s = slice(sf.r-sf.r//(2*base), sf.r+sf.r//(2*base))
num = 2*(sf.r//(2*base))
def worker(*unused):
return sf.compute(levels)[s,s,s,:]
data = np.lib.format.open_memmap(fname % ('data', 'y'), mode='w+', dtype=np.float64,
shape=(samples, num, num, num, 3))
with Pool(NNODES) as p:
# shape = (samples, num, num, num, 3)
res = p.uimap(worker, range(samples))
for i, field in enumerate(res):
data[i] = field
def compute_stats(fname):
data = np.load(fname % ('data', 'y'), mmap_mode='r')
num = data.shape[1]
d_arr = np.arange(1, num-1, 1)
# p_arr = np.arange(1, 13)
p_arr = np.array([1, 2])
# energy spectrum
E = np.zeros((num//2-1, 3))
f = np.fft.fftfreq(num, 2./num)[1:num//2]
for i in range(3):
reshaped_data = np.reshape(np.moveaxis(data, 1+i, 0), (num, -1))
cov = np.mean(np.cov(reshaped_data, bias=True), axis=-1)
E[:,i] = np.abs(np.fft.fft(cov)[1:num//2])
np.savez(fname % ('energy', 'z'), f=f, E=E)
del f; del E
# increment moments and pdf
moments = np.zeros((d_arr.size, 3, p_arr.size))
pdfs = np.zeros((d_arr.size, 100, 2, 3))
for i in range(d_arr.size):
d = d_arr[i]
ix = data[:,d:,...,0] - data[:,:-d,...,0]
iy = data[:,:,d:,:,1] - data[:,:,:-d,:,1]
iz = data[:,...,d:,2] - data[:,...,:-d,2]
for j, i_arr in enumerate([ix, iy, iz]):
h = h1(i_arr/np.expand_dims(i_arr.std(axis=1+j), 1+j), bins=100)
h.normalize(True)
pdfs[i,:,0,j] = h.bin_centers
pdfs[i,:,1,j] = h.frequencies
for k in range(p_arr.size):
moments[i,j,k] = np.mean(np.mean(np.abs(i_arr)**p_arr[k], axis=1+j))
np.savez(fname % ('moments', 'z'), d=d_arr, m=moments, p=pdfs)
del d_arr; del moments; del pdfs
def compute_pdfs(fname, d_arr):
data = np.load(fname % ('data', 'y'), mmap_mode='r')
pdfs = np.zeros((d_arr.size, 3, 2, 100))
for i in range(d_arr.size):
d = d_arr[i]
ix = data[:,d:,...,0] - data[:,:-d,...,0]
iy = data[:,:,d:,:,1] - data[:,:,:-d,:,1]
iz = data[:,...,d:,2] - data[:,...,:-d,2]
for j, i_arr in enumerate([ix, iy, iz]):
tmp = i_arr / i_arr.std()
h = h1(tmp[np.abs(tmp)<20], bins=100)
h.normalize(True)
pdfs[i,j,0] = h.bin_centers
pdfs[i,j,1] = h.densities
np.save(fname % ('pdfs', 'y'), pdfs)
def plot_spectra():
# colors = ['tab:blue', 'tab:orange', 'tab:green']
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
ls = ['-', '--']
base = [2, 3]
what = ['regular', 'cascade']
offset = [0.01, 0.001]
fig, axes = plt.subplots(2, 1, figsize=(11*TO_INCH, 13*TO_INCH), dpi=300)
for k in range(2):
for j in range(2):
data = np.load((FNAME % (base[k], what[j])) % ('energy', 'z'))
f, E = data['f'], data['E']
for i in range(3):
axes[k].plot(f, E[:,i], c=colors[i], ls=ls[j])
axes[k].plot(f, offset[k]*f**(-5/3), 'k-.')
axes[k].set_xscale('log')
axes[k].set_yscale('log')
axes[k].set_ylabel('$E(k)$')
axes[k].legend([
plt.Line2D([], [], ls='-', c=colors[0]),
plt.Line2D([], [], ls='-', c=colors[1]),
plt.Line2D([], [], ls='-', c=colors[2]),
plt.Line2D([], [], ls=ls[0], c='k'),
plt.Line2D([], [], ls=ls[1], c='k'),
], [
'$x$', '$y$', '$z$', 'regular', 'cascade',
],
# loc='upper right',
loc='lower left',
ncol=5,
handlelength=1,
columnspacing=1,
)
axes[-1].set_xlabel('$k$')
plt.tight_layout(pad=.27)
plt.savefig('stats/energy-3d.pdf', dpi='figure')
def plot_covariance():
# colors = ['tab:blue', 'tab:orange', 'tab:green']
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
ls = ['-', '--']
base = [2, 3]
size = [64, 40]
what = ['regular', 'cascade']
fig, axes = plt.subplots(2, 1, figsize=(11*TO_INCH, 13*TO_INCH), dpi=300)
for k in range(2):
for j in range(2):
data = np.load((FNAME % (base[k], what[j])) % ('moments', 'z'))
d, m = data['d'] / size[k], data['m']
for i in range(3):
axes[k].plot(d, m[:,i,1], c=colors[i], ls=ls[j])
axes[k].plot(d[d>7e-2], 4e-3*d[d>7e-2]**(2/3), 'k-.')
axes[k].plot(d[d<1e-1], 1e-1*d[d<1e-1]**2, 'k:')
axes[k].set_xscale('log')
axes[k].set_yscale('log')
axes[k].set_ylabel('$S_2(l)$')
axes[k].legend([
plt.Line2D([], [], ls='-', c=colors[0]),
plt.Line2D([], [], ls='-', c=colors[1]),
plt.Line2D([], [], ls='-', c=colors[2]),
plt.Line2D([], [], ls=ls[0], c='k'),
plt.Line2D([], [], ls=ls[1], c='k'),
], [
'$x$', '$y$', '$z$', 'regular', 'cascade',
],
loc='lower right',
ncol=5,
handlelength=1,
columnspacing=1,
)
axes[-1].set_xlabel('$l$')
plt.tight_layout(pad=.27)
plt.savefig('stats/cov-3d.pdf', dpi='figure')
def plot_pdfs():
# colors = plt.get_cmap('tab10').colors
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
base = [2, 3]
kind = ['regular', 'cascade']
d_list = [[16, 8, 4, 2], [12, 9, 6, 3]]
x_arr = np.linspace(-3, 3, 100)
which = 'abcd'
j = 0
fig, axes = plt.subplots(2, 2, figsize=(11*TO_INCH, 11*TO_INCH), dpi=300,
sharex=True, sharey=True)
for b in range(2):
for k in range(2):
# p = np.load((FNAME % (base[b], kind[k])) % ('moments', 'z'))['p']
p = np.load((FNAME % (base[b], kind[k])) % ('pdfs', 'y'))
for i, d in enumerate(d_list[b]):
axes[b,k].plot(p[i,0,0], i+np.log10(p[i,0,1]), c=colors[i], ls='-')
axes[b,k].plot(p[i,1,0], i+np.log10(p[i,1,1]), c=colors[i], ls='--')
axes[b,k].plot(p[i,2,0], i+np.log10(p[i,2,1]), c=colors[i], ls=':')
axes[b,k].plot(x_arr, np.log(np.exp(-x_arr**2/4.5)/np.sqrt(2.*np.pi)), 'k-.')
axes[b,k].text(.05, .95, r'$(%s)$' % which[j], transform=axes[b,k].transAxes,
va='top', ha='left')
axes[b,k].set_xlim(-6, 6)
j += 1
for ax in axes[-1,:]:
ax.set_xlabel(r'$\delta{u}_l$')
for ax in axes[:,0]:
ax.set_ylabel('pdf (a.u.)')
axes.flat[-1].legend(
[plt.Line2D([], [], c='k', ls='-'),
plt.Line2D([], [], c='k', ls='--'),
plt.Line2D([], [], c='k', ls=':')],
['$x$', '$y$', '$z$'],
loc='lower right',
ncol=3,
handlelength=1,
columnspacing=1,
)
plt.tight_layout(pad=.27)
plt.savefig('stats/pdf-3d.pdf', dpi='figure')
if __name__ == '__main__':
fname_list = [
FNAME % (2, 'regular'),
FNAME % (2, 'cascade'),
FNAME % (3, 'regular'),
FNAME % (3, 'cascade'),
]
# print('computing samples...')
# increments(fname_list[0], 200, 2, SphericalFieldRegular, random_rotation=False)
# increments(fname_list[1], 200, 2, SphericalFieldCascade, random_rotation=False)
# increments(fname_list[2], 5000, 3, SphericalFieldRegular, random_rotation=True)
# increments(fname_list[3], 5000, 3, SphericalFieldCascade, random_rotation=True)
#
# print('computing stats...')
# with Pool(NNODES) as p:
# p.map(compute_stats, fname_list)
# for fname in fname_list[:2]:
# compute_pdfs(fname, np.array([16, 8, 4, 2]))
# for fname in fname_list[2:]:
# compute_pdfs(fname, np.array([12, 9, 6, 3]))
#
# print('done.')
plot_spectra()
plot_covariance()
plot_pdfs()
# plt.close()
# vim: set ff=unix tw=79 sw=4 ts=8 et ic ai :
```
#### File: jerluebke/bsc-thesis-scripts/field_3d_util_precomp.py
```python
import math
import numpy as np
import numpy.ma as ma
from collections import namedtuple
from mra_sympy import MRA
TWO_PI = 2.*math.pi
INTEGRAL_FILE = "wavelet_data/wavelet_base%d_power%d_i%d_%s.dat"
class Point(namedtuple('Point', ['x', 'y', 'z'])):
__slots__ = ()
def __new__(cls, x, y, z):
x, y, z = [int(round(i)) for i in (x, y, z)]
return super().__new__(cls, x, y, z)
def __add__(self, other):
if not isinstance(other, Point):
other = Point._make(other)
return Point(self.x+other.x, self.y+other.y, self.z+other.z)
def __mul__(self, scalar):
return Point(scalar*self.x, scalar*self.y, scalar*self.z)
def __rmul__(self, scalar):
return self.__mul__(scalar)
def __truediv__(self, scalar):
return Point(self.x/scalar, self.y/scalar, self.z/scalar)
def __rtruediv__(self, scalar):
return Point(scalar/self.x, scalar/self.y, scalar/self.z)
class Field(namedtuple('Field', ['x'])):
__slots__ = ()
@property
def y(self):
return np.moveaxis(self.x, [1, 2, 3], [2, 3, 1])
@property
def z(self):
return np.moveaxis(self.x, [1, 2, 3], [3, 1, 2])
def read_wavelet_integrals(base, exp, q):
assert base in (2, 3)
assert exp in (2, 3, 4)
assert q == 4
a, b = MRA._compute_or_read_cached_results(q)
n = 2*base**exp
s = slice(-2, 2, 1j*n)
x, y, z = np.ogrid[s,s,s]
mask = (x**2 + y**2 + z**2 <= 4)
I = np.zeros((2, q, n, n, n))
for p in range(q):
I[0,p][mask] = np.genfromtxt(INTEGRAL_FILE % (base, exp, p, "lower"))
I[1,p][mask] = np.genfromtxt(INTEGRAL_FILE % (base, exp, p, "upper"))
# force symmetry
# XXX: with base==2 and exp==4 there appear to be numerical errors near the
# edge of the spherical domain
I[...,n//2:,:,:] = -I[...,:n//2,:,:][...,::-1,:,:]
# I[...,:,n//2:,:] = I[...,:,:n//2,:][...,:,::-1,:]
# I[...,:,:,n//2:] = I[...,:,:,:n//2][...,:,:,::-1]
# symmetrize last axes
I = (I + I.swapaxes(-1, -2)) / 2.
# I_{lower/upper}.shape == (q, n, n, n)
I_lower = np.sum(a[:,:,None,None,None] * I[0][None,...], axis=1)
I_upper = np.sum(b[:,:,None,None,None] * I[1][None,...], axis=1)
return Field(ma.array(
I_lower + I_upper,
mask=np.broadcast_to(~mask, (q, n, n, n)),
hard_mask=True
))
def _vdiff(v, a):
s1 = [slice(None, -1)]*3
s2 = s1.copy()
s1[a] = slice(1, None)
return v[tuple(s1)] - v[tuple(s2)]
def vorticity(v, dx):
o = np.zeros([s-1 for s in v.shape[:3]]+[3])
o[...,0] = _vdiff(v[...,2], 1) - _vdiff(v[...,1], 2)
o[...,1] = _vdiff(v[...,0], 2) - _vdiff(v[...,2], 0)
o[...,2] = _vdiff(v[...,1], 0) - _vdiff(v[...,0], 1)
return o / dx
def div(v, dx):
return sum([_vdiff(v[...,i], i) for i in range(3)]) / dx
# vim: set ff=unix tw=79 sw=4 ts=8 et ic ai :
```
|
{
"source": "jerluebke/comp_phys",
"score": 3
}
|
#### File: comp_phys/2nd_exercise/task1.py
```python
import numpy as np
import matplotlib.pyplot as plt
def rectangle(f, a, b, N):
h = (b-a) / N
d = np.arange(a, b, h) + h/2
return h * np.sum(f(d))
def trapez(f, a, b, N):
h = (b-a) / N
y = f(np.arange(a, b+h, h))
y[0] /= 2
y[-1] /= 2
return h * np.sum(y)
def simpson(f, a, b, N):
h = (b-a) / N
d = np.arange(a, b+h, h)
ar = f(d[0]) + f(d[-1])
au = np.sum(f(d[1:-1:2]))
ag = np.sum(f(d[2:-2:2]))
return h * (ar + 4*au + 2*ag) / 3
test_functions = {
'x^3 - x^2' : (lambda x: x**3-x**2,
lambda x: x**4/4-x**3/3),
'sin^2 x' : (lambda x: np.sin(x)**2,
lambda x: x/2-np.sin(x)*np.cos(x)/2),
# divide by zero in [0, 1] (log(0), duh...)
# 'x * log(x)': (lambda x: x*np.log(x),
# lambda x: x**2*np.log(x)/2-x**2/4),
'x * e^{-x}': (lambda x: x*np.exp(-x),
lambda x: -(x+1)*np.exp(-x)),
'fn' : (lambda x: np.exp(-x)*np.sin(x),
lambda x: -np.exp(-x)*np.sin(x)/2-np.exp(-x)*np.cos(x)/2)
}
A, B = 0., 1.
# M = 25
# Ns = np.arange(2, 2*M+2, 2)
Ns = 2**np.arange(0, 13)
M = len(Ns)
L = len(test_functions)
res = {
'rectangle' : (rectangle, np.zeros((M, L)), np.zeros(M)),
'trapez' : (trapez, np.zeros((M, L)), np.zeros(M)),
'simpson' : (simpson, np.zeros((M, L)), np.zeros(M))
}
fig, ax = plt.subplots()
ax.set_xscale('log')
ax.set_yscale('log')
for ni, (integ, resarr, errarr) in res.items():
for i, (nf, (f, F)) in enumerate(test_functions.items()):
resarr[:,i] = np.array([integ(f, A, B, n) for n in Ns])
print(ni, nf, np.min(resarr[:,i]))
resarr[:,i] = np.abs(resarr[:,i] - (F(B)-F(A)))
errarr = np.mean(resarr, axis=1)
ax.plot(Ns, errarr, label=ni)
plt.legend()
```
#### File: comp_phys/3rd_exercise/multigrid.py
```python
import math
import numpy as np
from scipy import sparse
class Grid:
def __init__(self, n, xmin, xmax):
self.x = np.linspace(xmin, xmax, n+1)
self.dx = self.x[1] - self.x[0]
self.f = np.zeros(n+1)
self.rho= np.zeros(n+1)
self.d = np.zeros(n+1)
self.L = sparse.diags([1, -2, 1], [-1, 0, 1], (n-1, n-1),
format='csc') / self.dx**2
self.n = n+1
@property
def defect(self):
n = self.n
self.d[1:n-1] = self.rho[1:n-1] - self.L @ self.f[1:n-1]
return self.d
def init(rho_func, N, xmin, xmax, levels):
grids = [Grid(N // 2**i, xmin, xmax) for i in range(levels)]
grids[0].rho[1:N-2] = rho_func(grids[0].x[1:N-2])
return grids
def smooth(g, solver, **kwds):
def jacobi(g, n, **kwds):
g.f[1:n-1] = .5 * (g.f[0:n-2] + g.f[2:n] - g.dx**2 * g.rho[1:n-1])
def omega_jacobi(g, n, **kwds):
omega = kwds.get('omega', .5)
g.f[1:n-1] = .5 * omega * (g.f[0:n-2] + g.f[2:n] - g.dx**2 * g.rho[1:n-1])\
+ (1. - omega) * g.f[1:n-1]
def gauss_seidel(g, n, **kwds):
for i in range(1, n-1):
g.f[i] = .5 * (g.f[i+1] + g.f[i-1] - g.dx**2 * g.rho[i])
def red_black(g, n, **kwds):
g.f[1:n-1:2] = .5 * (g.f[0:n-2:2] + g.f[2:n:2] - g.dx**2 * g.rho[1:n-1:2])
g.f[2:n-2:2] = .5 * (g.f[1:n-3:2] + g.f[3:n-1:2] - g.dx**2 * g.rho[2:n-2:2])
solver_dict = {
'jacobi' : jacobi,
'omega_jacobi' : omega_jacobi,
'gauss_seidel' : gauss_seidel,
'red_black' : red_black
}
solver_dict[solver](g, g.n, **kwds)
# von Neumann boundary condition
g.f[0] = g.f[-1] = 0
def restrict(arr):
# injection
# return arr[2:arr.size-2:2]
# full-weighting
nf = arr.size
nc = nf // 2
res = np.zeros(nc)
res[1:nc] = .5 * arr[2:nf-2:2] + .25 * (arr[3:nf-1:2] + arr[1:nf-3:2])
return res
def prolong(arr):
nc = arr.size
nf = 2 * nc - 1
res = np.zeros(nf)
res[2:nf-2:2] = arr[1:nc-1]
res[1:nf-1:2] = (arr[0:nc-1] + arr[1:nc]) * .5
return res
def solve_one_v(grids, solver, level=0):
fine = grids[level]
smooth(fine, solver)
if level < len(grids)-1:
coarse = grids[level+1]
coarse.rho = restrict(fine.defect)
coarse.f[:] = 0
solve_one_v(grids, solver, level+1)
fine.f += prolong(coarse.f)
smooth(fine, solver)
def err(solver, imax, **gridkwds):
g = init(**gridkwds)
err = [np.max(np.abs(g[0].defect))]
i = 1
while i < imax:
solve_one_v(g, solver)
err.append(np.max(np.abs(g[0].defect)))
i += 1
return err
```
#### File: comp_phys/3rd_exercise/multigrid_solution.py
```python
import numpy as np
import math
import matplotlib
# matplotlib.use('TkAgg')
from matplotlib import pyplot as plt
from matplotlib import animation
# runtime parameters
n = 256 # finest resolution
xmin = -5.0 # left border of domain
xmax = 5.0 # right border of domain
iterations = 30 # number of repititions of multigrid algorithm
# initialization for rho
init_rho = lambda x: np.sin(x) * np.exp(-x**2)
class Grid:
def __init__(self, n, xmin, xmax):
'''grid-data for one level; n + 1 gridpoints, range from xmin to xmax'''
self.n = n + 1
self.f = np.zeros(n + 1)
self.rho = np.zeros(n + 1)
self.defect = np.zeros(n + 1)
self.dx = (xmax - xmin) / n
self.x = np.linspace(xmin, xmax, n + 1) # g.x = xmin + (0:n)' * g.dx;
def __repr__(self):
return "Grid(" + str(self.n + 1) + ", " \
+ str(self.x[0]) + ", " + str(self.x[-1])+ ")"
def defect(grid):
'''update defect array of grid'''
n = grid.n
grid.defect[1:n-1] = grid.rho[1:n-1] - \
(grid.f[2:n] - 2.*grid.f[1:n-1] + grid.f[0:n-2]) / grid.dx**2
def prolong(coarse, fine):
'''coarse to fine, fine is modified'''
nc = coarse.n
nf = fine.n
fine.f[2:nf-2:2] += coarse.f[1:nc-1]
fine.f[1:nf-1:2] += 0.5 * (coarse.f[0:nc-1] + coarse.f[1:nc])
def restrict(fine, coarse):
'''fine to coarse, coarse is modified'''
nc = coarse.n
nf = fine.n
coarse.rho[0:nc] = 0.
# injektion
#coarse.rho[1:nc-1] = fine.defect[2:nf-2:2]
# full weighting
coarse.rho[1:nc-1] = 0.5*fine.defect[2:nf-2:2]\
+ 0.25*(fine.defect[3:nf-1:2] + fine.defect[1:nf-3:2])
coarse.f[0:nc] = 0.
def smooth(grid):
n = grid.n
# Jacobi:
# grid.f[1:n-1] = 0.5 * (grid.f[0:n-2] + grid.f[2:n] - grid.dx**2 * grid.rho[1:n-1])
# omega-Jacobi:
# omega = 0.5
# grid.f[1:n-1] = 0.5 * omega * ( grid.f[0:n-2] + grid.f[2:n] - grid.dx**2 * grid.rho[1:n-1] ) \
# + (1.-omega) * grid.f[1:n-1];
# Gauss-Seidel:
# for i in range(1, n-1):
# grid.f[i] = 0.5 * (grid.f[i-1] + grid.f[i+1] - grid.dx**2 * grid.rho[i])
# Red-Black Gauss-Seidel:
grid.f[1:n-1:2] = 0.5 * (grid.f[0:n-2:2] + grid.f[2:n:2] - grid.dx**2 * grid.rho[1:n-1:2])
grid.f[2:n-2:2] = 0.5 * (grid.f[1:n-3:2] + grid.f[3:n-1:2] - grid.dx**2 * grid.rho[2:n-2:2])
def solve_one(grids, level):
'''one iteration on the specified level, calls itself recursively'''
smooth(grids[level])
if level < len(grids) - 1:
defect(grids[level])
restrict(grids[level], grids[level+1])
solve_one(grids, level+1)
prolong(grids[level+1], grids[level])
smooth(grids[level])
# initialize grids
eps = 1.e-2 # for circumventing discrete floating point effects
grids = [Grid(n//2**i, xmin, xmax) for i in range(int(math.log(n, 2) + eps) - 1)]
grids[0].rho[1:grids[0].n-1] = init_rho(grids[0].x[1:grids[0].n-1])
# solve without plotting
#for i in range(iterations):
# solve_one(grids, 0)
# set up plot
est = 1.5 * np.amax(np.abs(grids[0].rho)) # estimate approiate y-range
fig = plt.figure()
line_rho = plt.Line2D([], [], lw=2, marker='.', markerfacecolor='red', markersize=12)
ax_rho = fig.add_subplot(2, 2, 1)
ax_rho.set_xlabel("$x$")
ax_rho.set_ylabel("rho")
ax_rho.set_xlim(xmin, xmax)
ax_rho.set_ylim(-est, est)
ax_rho.add_line(line_rho)
time_text = ax_rho.text(0.02, 0.75, '', transform=ax_rho.transAxes)
line_f = plt.Line2D([], [], lw=2, marker='.', markerfacecolor='red', markersize=12)
ax_f = fig.add_subplot(2, 2, 2)
ax_f.set_xlabel("$x$")
ax_f.set_ylabel("$f$")
ax_f.set_xlim(xmin, xmax)
ax_f.set_ylim(-est, est)
ax_f.add_line(line_f)
line_err = plt.Line2D([], [], lw=2, marker='.', markerfacecolor='red', markersize=12)
ax_err = fig.add_subplot(2, 2, 3)
ax_err.set_xlabel("$x$")
ax_err.set_ylabel("error")
ax_err.set_xlim(xmin, xmax)
ax_err.set_ylim(-.1*est, .1*est)
ax_err.add_line(line_err)
line_conv = plt.Line2D([], [], lw=2, marker='.', markerfacecolor='red', markersize=12)
ax_conv = fig.add_subplot(2, 2, 4)
ax_conv.set_xlabel("iteration")
ax_conv.set_ylabel("log(max defect)")
ax_conv.set_xlim(0, iterations)
ax_conv.set_ylim(-15,np.log10(2*est))
ax_conv.add_line(line_conv)
defects=[]
def init_plot():
'''initialize plot'''
line_rho.set_data(grids[0].x, init_rho(grids[0].x))
line_f.set_data([], [])
line_err.set_data([], [])
line_conv.set_data([], [])
time_text.set_text('')
return (line_f, line_err, time_text)
def step(i):
'''single step of multigrid method, for use with animation'''
global defects
solve_one(grids, 0)
defects.append(np.max(grids[0].defect))
# print(defects)
est_f = 1.2 * np.amax(np.abs(grids[0].f)) # estimate approiate y-range
est_err = 1.2 * np.amax(np.abs(grids[0].defect)) # estimate approiate y-range
ax_f.set_ylim(-est_f, est_f)
ax_err.set_ylim(-est_err, est_err)
line_f.set_data(grids[0].x, grids[0].f)
line_err.set_data(grids[0].x, grids[0].defect)
line_conv.set_data(np.arange(i+1), np.log10(defects))
time_text.set_text("iteration = {}".format(i))
return (line_f, line_err, line_conv, time_text)
anim = animation.FuncAnimation(fig, step, init_func=init_plot, frames=iterations,
interval=20, blit=False, repeat=False)
plt.show()
```
#### File: comp_phys/4th_exercise/cg.py
```python
import numpy as np
from matplotlib import pyplot as plt
from scipy import sparse
def cg(A, b, x=None, imax=1000, tol=1e-7):
i = 0
err_arr = np.zeros(imax)
if x is None:
x = np.zeros_like(b)
r = b - A @ x
p = r
dn = np.dot(r, r)
d0 = dn
while i < imax:
# if dn < tol**2*d0:
# print('convergence after %d iterations' % i)
# break
q = A @ p
alpha = dn / np.dot(p, q)
x = x + alpha*p
r = r - alpha*q
d_tmp = dn
dn = np.dot(r, r)
beta = dn / d_tmp
p = r + beta*p
err_arr[i] = dn / d0
i += 1
return x, i, err_arr
```
#### File: comp_phys/5th_exercise/bitreversal.py
```python
def reverse_stdlib(x):
return int(bin(x)[:1:-1], 2)
def reverse_bit(x, s):
r = 0
for i in range(s):
r = (r << 1) | (x & 1)
x >>= 1
return r
```
#### File: comp_phys/5th_exercise/fft.py
```python
import math
import numpy as np
def direct_ft(x):
"""
direct discrete FT
"""
N = x.size
n = np.arange(N)
k = n.reshape((N, 1))
M = np.exp(-2j * np.pi * k * n / N)
return np.dot(M, x)
def recursive_ft(x):
"""
recursive discrete FT
calls dft on lowest level
x.size needs to be power of 2
"""
N = x.size
if N <= 2:
return direct_ft(x)
else:
X_even = recursive_ft(x[::2])
X_odd = recursive_ft(x[1::2])
W = np.exp(-2j * np.pi * np.arange(N//2) / N)
# X_i = E_i + W_i * O_i
# X_i+N/2 = E_i - W_i * O_i
return np.concatenate([X_even + W * X_odd,
X_even - W * X_odd])
def bitrev_ft(x):
return _bitrev_ft(x.astype(complex))
def _bitrev_ft(x):
"""
discret fast fourier transform
x needs to be complex (e.g.: x=x.astype(complex))
and x.size a power of 2
iterative and in-place implementation of the recursive approach:
the elements are reordered and grouped into "recursion layers",
mulitplied with the corresponding factors and summed over
"""
def reverse(x, s):
"""
bit reversal of input x with wordlength s
"""
y = 0
for i in range(s):
y = (y << 1) | (x & 1)
x >>= 1
return y
n = x.size
layers = int(math.log(n, 2))
x = x[[reverse(i, layers) for i in range(n)]] # bit reversed
exptable = np.exp([-2j * np.pi * i / n for i in range(n // 2)])
# outer loop
# goes through log(n, 2) "recursion" layers
# on the l-th layer (l=log(s,2)) the fft is divided into (n-1) / s
# segments, where s is the size of each segment on the given layer
s = 2
while s <= n:
half_s = s // 2
tablestep = n // s
# middle loop
# goes through the fft's segments at layer log(s, 2)
for i in range(0, n, s):
# inner loop
# goes through the segment's s elements and performs the
# "butterfly" operation
k = 0
for j in range(i, i+half_s):
tmp = x[j + half_s] * exptable[k]
# x_right = x_left - x_right * exp(-i*pi*element / (2**layer))
x[j + half_s] = x[j] - tmp
# x_left = x_left + x_right * exp(-i*pi*element / (2**layer))
x[j] += tmp
k += tablestep
s *= 2
return x
```
#### File: comp_phys/6th_exercise/burgers.py
```python
import numpy as np
from numpy.fft import rfft, irfft
import matplotlib.pyplot as plt
from matplotlib import animation
class PDE:
def __init__(self, params, iv, dt, t0=0.):
self.__dict__.update(params)
# k-space
self.k = np.linspace(0, self.N//2, self.N//2+1) \
* 2*np.pi / (self.xe - self.xb)
self.k_sq = np.square(self.k)
# x-space, initial value, fourier transform
self.dx = (self.xe - self.xb) / self.N
self.x = np.arange(self.xb, self.xe, self.dx)
self.u = iv(self.x)
self.û = rfft(self.u)
# self.t = t0
self.t = [t0]
self.dt = dt
# self.cfl = 1
self.cfl = []
self.scheme = self.shu_osher
def time_step(self, steps=1):
# calculate timesteps
# solution is computed in fourier space, inverse transformation is
# done for plotting, `steps` gives plotting frequency
for _ in range(steps):
self.scheme()
self.u = irfft(self.û)
# check cfl condition, should be < 1
# self.cfl = np.max(self.u) * self.dt / self.dx
self.cfl.append(np.max(self.u) * self.dt / self.dx)
self.t.append(self.t[-1] + self.dt * steps)
return self.x, self.u
def prop(self, delta=1.):
# propagator
return np.exp(-self.kappa * self.k_sq * self.dt * delta)
def rhs(self, û):
# low-pass with estimate for highest frequency
û[2*self.k.size//3:] = 0
return û - .5j * self.dt * self.k * rfft(irfft(û)**2)
def euler(self):
# general method
self.û = self.rhs(self.û) * self.prop()
def heun(self):
# rk2
û1 = self.rhs(self.û) * self.prop()
self.û = .5 * (self.rhs(û1) + self.û * self.prop())
def shu_osher(self):
# rk3
û1 = self.rhs(self.û) * self.prop()
û2 = .25 * (self.rhs(û1) * self.prop(-.5) + 3. * self.û *
self.prop(.5))
self.û = 1./3. * (2. * self.rhs(û2) * self.prop(.5) + self.û *
self.prop())
# PDE
# f0 = lambda x: np.sin(x)**2
f0 = np.sin
params = dict(xb=0, xe=2*np.pi, N=256, kappa=.02)
params['kappa'] = 0
p = PDE(params, f0, dt=.01)
# params for plotting
steps = 10
tmax = 1000
frames = int(tmax // (steps * p.dt))
# compute complete solution for smoother plotting
res = np.array([p.time_step(steps) for _ in range(frames)])
# plt.imshow(res[:500,1,:].T) # x: time, y: x, color: u
###########################################################
# SET UP PLOTTING
###########################################################
fig = plt.figure()
ax = fig.add_subplot(111, xlim=(p.xb, p.xe),
# ylim=(res[:,1].min()-.1, res[:,1].max()+.1),
ylim=(-1.1, 1.1),
xlabel='$x$', ylabel='$u$')
line, = ax.plot([], [], lw=2, marker='.', mfc='r', ms=10)
# ttext = ax.text(.02, .95, '', transform=ax.transAxes) # time
# ctext = ax.text(.02, .90, '', transform=ax.transAxes) # cfl
###########################################################
# ANIMATION
###########################################################
def init():
line.set_data([], [])
return line,
def step(i):
line.set_data(res[i,0], res[i,1])
# ttext.set_text('time = %.2f' % time)
# ctext.set_text('cfl = %.2f' % p.cfl[i])
print('time = %.2f, cfl = %.2f\r' % (p.t[i], p.cfl[i]), end='')
return line, # ttext, ctext,
def start_anim():
return animation.FuncAnimation(fig, step, frames=frames, interval=10,
blit=True, repeat=True, init_func=init)
# anim = start_anim()
```
#### File: comp_phys/6th_exercise/burgers_solution.py
```python
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import animation
class Parameter:
def __init__(self, *args, **kwargs):
if args and isinstance(args[0],Parameter):
xb = args[0].xb
xe = args[0].xe
N = args[0].N
dt = args[0].dt
kappa = args[0].kappa
elif kwargs :
xb = kwargs['xb']
xe = kwargs['xe']
N = kwargs['N']
dt = kwargs['dt']
kappa = kwargs['kappa']
self.xb = xb
self.xe = xe
self.N = N
self.dx = (self.xe-self.xb)/float(self.N)
self.dt = dt
self.kappa = kappa
self.cfl=1
class PDE(Parameter):
"""
solves: u_t = \kappa \Delta u
"""
def __init__(self, p ,t0 = 0.0):
Parameter.__init__(self,p)
self.kx = np.linspace(0,self.N/2,self.N/2+1)*2*np.pi/(self.xe-self.xb)
self.kx2 = np.square(self.kx)
self.x = self.xb + self.dx * np.arange(self.N)
self.u = np.sin(self.x)
self.uhat = np.fft.rfft(self.u)
print(len(self.uhat))
#self.u = np.piecewise(self.x, [np.abs(self.x-np.pi) < 0.01], [1])
#self.u = np.sign(np.cos(self.x))
self.t = t0
self.scheme = self.heun
# attributes used for dynamic plotting
# self.x_line = None
# self.u_line = None
def time_step(self, Nsteps = 1):
for i in range(Nsteps):
self.scheme()
self.u = np.fft.irfft(self.uhat)
#print self.t, np.max(-np.fft.irfft(self.kx*1j*self.uhat))
self.cfl = np.max(self.u)*self.dt/self.dx
#print "cfl =",self.cfl
self.t += self.dt * Nsteps
def rhs(self, uhat):
# aliasing
uhat[int(2.*len(self.kx)/3.):len(self.kx)] = 0
return uhat - 0.5 * self.kx * 1j * np.fft.rfft(np.fft.irfft(uhat)**2)*self.dt
def prop(self, delta):
return np.exp(-self.kappa*self.dt*self.kx2*delta)
def euler(self):
self.uhat = self.rhs(self.uhat)*self.prop(1.)
def heun(self):
uone = self.rhs(self.uhat)*self.prop(1.)
self.uhat = 0.5*(self.uhat*self.prop(1.) + self.rhs(uone))
def shuOsher(self):
uone = self.rhs(self.uhat)*self.prop(1.)
utwo = 3./4.*self.uhat*self.prop(0.5) + 1./4.*self.rhs(uone)*self.prop(-0.5)
self.uhat = 1./3.*self.uhat*self.prop(1.) + 2./3.*self.rhs(utwo)*self.prop(0.5)
p = Parameter(xb=0,xe=2*np.pi,N=256,dt=0.01,kappa=0.02)
N_steps = 10
t_max = 100
frames = int(t_max / float(N_steps * p.dt))
frames = 500
pde = PDE(p)
def step():
pde.time_step(N_steps)
return np.array([pde.x, pde.u])
res = np.array([step() for _ in range(frames)])
######################################################################
# Set up plot
fig = plt.figure()
ax = plt.axes(xlim=(p.xb,p.xe), ylim=(-1.2, 1.2))
u_line, = ax.plot([], [],lw=2,marker='.',markerfacecolor='red', markersize=12)
time_text = ax.text(0.02, 0.95, '', transform=ax.transAxes)
cfl_text = ax.text(0.02, 0.90, '', transform=ax.transAxes)
#ax.legend(prop=dict(size=14))
ax.set_xlabel('$x$')
ax.set_ylabel('$u$')
######################################################################
# Animate plot
def init():
u_line.set_data([], [])
time_text.set_text('')
cfl_text.set_text('')
return (u_line, time_text,cfl_text)
def integrate(i):
# pde.time_step(N_steps)
u_line.set_data(res[i,0], res[i,1])
# time_text.set_text('time = %.2f' % pde.t)
# cfl_text.set_text('cfl = %.2f' % pde.cfl)
# return (u_line, time_text,cfl_text)
return u_line,
anim = animation.FuncAnimation(fig, integrate, init_func=init, frames=frames,
interval=100, blit=True,repeat=False)
plt.show()
```
#### File: comp_phys/stencil_diag/stdiag.py
```python
import stencil_diag_lib
_stdiag = stencil_diag_lib.diag_from_stencil_module.stdiag
def stdiag(n, s):
"""
diagonal matrix from stencil
Wrapper for Fortran function ``diag_from_stencil``
Parameters
----------
n : input int, size of output matrix
s : input rank-1 array, stencil
Returns
-------
sqdiag : rank-2 array with bounds (n, n)
"""
res, err = _stdiag(n, s)
if err == -1:
raise ValueError("size(s) = %d needs to be odd" % s.shape)
elif err == -2:
raise ValueError("n <= size(s)/2")
else:
return res
```
|
{
"source": "jerluebke/mathematical_stuff",
"score": 3
}
|
#### File: jerluebke/mathematical_stuff/symtensor.py
```python
from sympy import *
init_printing(use_unicode=True)
#############
# CONSTANTS #
#############
# Symbols
x, y, z = symbols('x, y, z')
r, rho, phi, theta = symbols('r, rho, phi, theta')
# Mapping for coordinate substitution
ZYL_MAP = (rho, phi, z)
SPH_MAP = (r, theta, phi)
# Transformations
CARTESIAN = ((x,
y,
z),)
ZYLINDRICAL = ((rho*cos(phi),
rho*sin(phi),
z),
ZYL_MAP)
SPHERICAL = ((r*sin(theta)*cos(phi),
r*sin(theta)*sin(phi),
r*cos(theta)),
SPH_MAP)
TRANSFORM = {'CAR': CARTESIAN,
'ZYL': ZYLINDRICAL,
'SPH': SPHERICAL}
# Jacobian Determinants for common coordinate systems
JD = {'CAR': 1,
'SPH': r**2*sin(theta),
'ZYL': rho}
###########
# Classes #
###########
class SymTensor2D(ImmutableMatrix):
pass
class TensorElement:
pass
class Integral3D(Integral):
"""Represents an unevaluated 3d integral
Properties:
function - inherited from integral
antiderivative
solution - call doit
"""
def __new__(cls, func, sym_x, sym_y, sym_z, *args, **kwargs):
"""Tidy this doc_string up!!!
if you use one of the predefined coord systems, make sure to follow
this convention:
CART = {x, y, z}
ZYL = {rho, phi, z}
SPH = {r, theta, phi}
where rho is the distance from the z axis, theta is the polar angel and
phi is the azimuthal angel.
otherwise specify your coordinates in a tuple like this:
coords = ((x(q_1, _2, _3),
y(q_1, _2, _3),
z(q_1, _2, _3),
(q_1, q_2, q_3)) # <- defines order
Don't forget to set
transform = True
when your input is not already
expressed in the desired coordinate system!
In case of transform = True func.free_symbols will be converted to list
and sorted alphabetically. Then these symbols are mapped one by one to
coords tuple
With transform = True is NOT YET implemented to adjust the integration
limits accordingly, so do it yourself!
"""
coords = kwargs.get('coords', 'CART') # set default to cartesian, its JD is 1
# it doesn't change the input function
try:
del kwargs['coords'] # needs to be removed, because super().__new__
except KeyError: # doesn't understand this keyword
pass
if coords in ('SPHERICAL', 'SPH', 'ZYLINDIRCAL', 'ZYL', 'CARTESIAN', 'CART'):
jacobian_determinant = JD[coords[:3]]
coords = TRANSFORM[coords[:3]]
else: # custom coordinates
jacobian_determinant = Jacobian(coords[0]).det() # propably some
# saftey against
# bad input would
# be nice ...
sym_list = sympify((sym_x, sym_y, sym_z))
if 'transform' in kwargs:
if kwargs['transform'] is True and coords != CARTESIAN:
func_sym = list(func.free_symbols) # sort alphabetically
func_sym.sort(key=lambda s: str(s)[0]) # for 1-1 substitution
func = coord_transform(func, func_sym, coords[0]) # <- in there
# substitute integration varibles
def sub_var(var_iter, new_var):
try:
var_iter = list(var_iter)
var_iter[0] = new_var
except TypeError:
var_iter = new_var
return var_iter
sym_list = [sub_var(o, n) for o, n in zip(sym_list, coords[1])]
del kwargs['transform'] # needs to be removed because kwargs is
# later passed to super().__new__,
# which doesn't understand this keyword
func = func * jacobian_determinant
return super().__new__(cls, func, *sym_list, **kwargs)
@property
def antiderivative(self):
return self.func(self.function, *self.function.free_symbols).doit()
class Jacobian(ImmutableMatrix):
"""Consider some function f:R^n -> R^m
Then the Jacobian is
J = (df_i/dx_j)_ij in M(m x n, R)
That is the Matrix of all partial derivations of f
For instanciation call Jacobian(f), where f should a tuple (f_1, ..., f_m)
If the input is no sympy expression it is converted to such
"""
def __new__(cls, f, *args, **kwargs):
"""returns instance of sympy.ImmutableMatrix"""
if isinstance(f, str):
f = sympify(f) # input of type str may need to be sympified two times
# type(sympify('x', 'y')) == tuple
# type(sympify(sympify('x', 'y'))) == sympy...Tuple
f = sympify(f)
J = [[diff(f_i, x_j) for x_j in f.free_symbols] for f_i in f]
return super().__new__(cls, J, *args, **kwargs)
def det(self, **kwargs):
"""returns Determinant of Matrix (simplified) (sympy expression)"""
return super().det().simplify()
class H(Heaviside):
"""Modification of Heaviside function to adjust limits of integrals by
othertiplying"""
def __new__(cls, arg, **options):
return super().__new__(Heaviside, arg, H0=1, **options)
def __mul__(self, other):
"""self * other"""
variable = self.free_symbols.intersection(other.variables)
if not (isinstance(self, Integral) and variable):
return super().__other__(other)
# TODO
# (i) Only one limit is given
# (ii) Some limits are given as not-numbers (e.g. symbols)
idx = other.variables.index(*variable)
interval = solveset(self.args, domain=S.Reals)
if other.limits[idx][2:]:
interval = interval.intersection(Interval(other.limits[idx][1:]))
new_args = list(other.args[1:])
del new_args[idx]
new_args.insert(idx, (variable, interval.start, interval.end))
return other.func(other.args[0], *new_args)
def __rmul__(self, other):
"""other * self"""
return self.__mul__(other)
def __imul__(self, other):
"""self *= other"""
return self.__mul__(other)
####################
# Module Functions #
####################
def kronecker_delta(i, j):
"""pretty self explaining
this is a simplified solution for this module
for a more advanced implementation see sympy.KroneckerDelta
returns Integer
"""
return 1 if i == j else 0
kd = kronecker_delta
def coord_transform(func, symbols, transform):
"""Apply coordinate transformation on given function by iterativly
substituting symbols[i] with transform[i]
Note: symbols and transform need to be in fitting order
returns sympified function
"""
# TODO
# transform integral limits
try:
if not func.free_symbols:
raise ValueError('no free symbols')
except AttributeError:
func = sympify(func)
for i, s in enumerate(symbols):
if not s in func.free_symbols:
raise ValueError('symbols doesn\'t match with func.free_symbols')
func = func.subs(s, transform[i])
return func
```
|
{
"source": "jerlyn06/demo_gui",
"score": 3
}
|
#### File: jerlyn06/demo_gui/pycon_talk.py
```python
from PyQt4 import QtGui
import sys
class MainWindow(QtGui.QMainWindow):
def __init__(self):
super(MainWindow,self).__init__()
self.setWindowTitle("CGPA Calculator")
wid = QtGui.QWidget()
self.setCentralWidget(wid)
layout = QtGui.QFormLayout()
self.label = QtGui.QLabel()
self.label.setText("Enter Subject")
self.sub_box = QtGui.QLineEdit()
self.sub_box.setPlaceholderText("Enter your subject")
self.label1 = QtGui.QLabel()
self.label1.setText("Enter marks")
self.mbox = QtGui.QLineEdit()
self.mbox.setPlaceholderText("Enter your marks")
self.btn = QtGui.QPushButton()
self.btn.setText("Get Results")
self.btn.clicked.connect(lambda:self.get_results(str(self.sub_box.text()),int(self.mbox.text())))
self.res = QtGui.QTextEdit()
self.res.setReadOnly(True)
layout.addWidget(self.label)
layout.addWidget(self.sub_box)
layout.addWidget(self.label1)
layout.addWidget(self.mbox)
layout.addWidget(self.btn)
layout.addWidget(self.res)
wid.setLayout(layout)
self.setStyleSheet('''QMainWindow{background-color:black;color:white}QLabel{color:white}''')
def get_results(self,subject,mark):
print(subject,mark)
if __name__=='__main__':
app = QtGui.QApplication(sys.argv)
win = MainWindow()
win.show()
try:
sys.exit(app.exec_())
except:
pass
```
|
{
"source": "jerm1geo/qtsp",
"score": 4
}
|
#### File: qtsp/src/node.py
```python
import math
class Node:
def __init__(self, id, person, location, lat, lon):
self.id = id
self.person = person
self.location = location
self.lat = lat
self.lon = lon
def __str__(self):
return "%d %s %s %f %f" % \
(self.id, self.person, self.location, self.lat, self.lon)
def compute_distance(self, to_node):
lat1 = math.radians(self.lat)
lon1 = math.radians(self.lon)
lat2 = math.radians(to_node.lat)
lon2 = math.radians(to_node.lon)
# Haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
ans = math.pow(math.sin(dlat / 2), 2) + \
math.cos(lat1) * math.cos(lat2) * math.pow(math.sin(dlon / 2), 2)
ans = 2 * math.asin(math.sqrt(ans))
# Radius of Earth: 6371km or 3956mi
radius = 3956
return ans * radius
```
|
{
"source": "jermainedavies/JermainesFirstPrograms",
"score": 3
}
|
#### File: jermainedavies/JermainesFirstPrograms/fibonacci_function.py
```python
def fib(max_num):
full_seq = []
a=0
b=1
c=0
while c < max_num:
full_seq.append(c)
c = a + b
a = b
b = c
return full_seq
print(fib(3000082929393)) #returns [0, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584, 4181, 6765, 10946, 17711, 28657, 46368, 75025, 121393, 196418, 317811, 514229, 832040, 1346269, 2178309, 3524578, 5702887, 9227465, 14930352, 24157817, 39088169, 63245986, 102334155, 165580141, 267914296, 433494437, 701408733, 1134903170, 1836311903, 2971215073, 4807526976, 7778742049, 12586269025, 20365011074, 32951280099, 53316291173, 86267571272, 139583862445, 225851433717, 365435296162, 591286729879, 956722026041, 1548008755920, 2504730781961]
```
#### File: jermainedavies/JermainesFirstPrograms/is my number divisible by 3 and 5.py
```python
user_num = int(input("which number would you like to check?"))
def devisible_by_both():
if user_num %3 == 0 and user_num %5 == 0:
print("your number is divisible by both")
else:
print("your number is not divisible by both")
```
|
{
"source": "JermaineDavy/pyconfigurathon",
"score": 3
}
|
#### File: relative/configuration/config.py
```python
import os
from pyconfigurathon.configurator import configurator
# In this example, a path relative to this script(config.py) is provided. If you decide to follow the relative route,
# when providing file names, you would have to use the following examples as a reference:
# - example.json => for files in the same directory
# - dir_name/settings.json => for files in sub directories
# - ../config.json => for files in parent directories
#
# @param config_name - The name/key of the configuration which should be retrieved from the file.
# @param file - The relative path to the file containing the configuration.
#
def get_config(config_name, file="settings.json"):
conf = configurator(os.path.join(os.path.dirname(__file__), file))
return conf.get(config_key=config_name)
```
|
{
"source": "jerm/coinwatch",
"score": 2
}
|
#### File: jerm/coinwatch/coinwatch.py
```python
import configparser
import json
import logging
import pprint
import time
from datadog import initialize as doginitialize
from datadog import ThreadStats as dogThreadStats
import requests
from requests.exceptions import ConnectionError
log = logging.getLogger(__file__)
log.setLevel(logging.INFO)
DEBUG = False
formatter = logging.Formatter(
'{"timestamp": "%(asctime)s", "progname":' +
' "%(name)s", "loglevel": "%(levelname)s", "message":, "%(message)s"}')
ch = logging.StreamHandler()
ch.setFormatter(formatter)
ch.setLevel(logging.INFO)
log.addHandler(ch)
Config = configparser.ConfigParser()
Config.read("coinwatch.ini")
COINS_CONFIG = Config.items("coins")
try:
HEALTHCHECK_URL = Config.get("healthcheck", "url")
except ConfigParser.NoOptionError:
HEALTHCHECK_URL = None
try:
DATADOG = Config.get("general", "use_datadog").lower()
except ConfigParser.NoOptionError:
DATADOG = None
if DATADOG in ['true', 'yes', 'y', 'ja', 'si', 'word']:
DATADOG = True
try:
datadog_stat_prefix = Config.get("datadog", "stat_prefix")
dd_options = {
'api_key': Config.get("datadog", "dd_api_key"),
'app_key': Config.get("datadog", "dd_app_key"),
}
log.debug("using DataDog")
except ConfigParser.NoOptionError:
log.warn("use_datadog set to true, but problem loading datadog configs")
DATADOG = None
doginitialize(**dd_options)
stats = dogThreadStats()
stats.start()
else:
DATADOG = None
def coindata_to_datadog(coindata, coinname):
""" Send latest reading to datadog. Maybe create events on some critera
"""
for statname in coindata.keys():
dd_stat_name = "{}.{}.{}".format(datadog_stat_prefix, coinname, statname)
stats.gauge(dd_stat_name, coindata[statname])
log.debug("Sent {}: {} to Datadog".format(dd_stat_name, coindata[statname]))
def collect():
""" Main action area for our collection program """
coins_name_list = []
coinsdict = {}
for coin in COINS_CONFIG:
coinsdict[coin[0].upper()] = {}
coinsdict[coin[0].upper()]['volume'] = float(coin[1])
coins_name_list.append(coin[0].upper())
try:
coin_exchange_url = "https://min-api.cryptocompare.com/data/pricemulti"
request = requests.get(
"{}?fsyms={}&tsyms=USD".format(coin_exchange_url, ','.join(coins_name_list)))
except ConnectionError:
log.error("Error talking to coin API")
exit(2)
if request.ok:
coins_requestdata = request.json()
else:
log.error(request)
exit(1)
if DEBUG:
pprint.pprint(coins_requestdata)
coinsdict['totalvalue'] = 0
for coinname in coins_requestdata.keys():
coinsdict[coinname]['price'] = coins_requestdata[coinname]['USD']
coinsdict[coinname]['value'] = (float(coins_requestdata[coinname]['USD']) *
float(coinsdict[coinname]['volume']))
coinsdict['totalvalue'] += coinsdict[coinname]['value']
if DATADOG:
coindata_to_datadog(coinsdict[coinname], coinname)
if DATADOG:
dd_stat_name = "{}.all.value".format(datadog_stat_prefix)
stats.gauge(dd_stat_name, coinsdict['totalvalue'])
stats.flush(time.time() + 10)
if HEALTHCHECK_URL:
requests.get(HEALTHCHECK_URL)
return coinsdict
if __name__ == '__main__':
""" create logger """
log = logging.getLogger(__file__)
log.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler(Config.get("general", "logfile"))
fh.setLevel(logging.INFO)
# create formatter and add it to the handlers
formatter = logging.Formatter(
'{"timestamp": "%(asctime)s", "progname":' +
'"%(name)s", "loglevel": "%(levelname)s", "message":, "%(message)s"}')
fh.setFormatter(formatter)
log.addHandler(fh)
print(json.dumps(collect(), sort_keys=True, indent=4))
```
|
{
"source": "JermineHu/pytorch-classify",
"score": 3
}
|
#### File: pytorch-classify/prediction/my_dataset.py
```python
import h5py
import torch
import torch.utils.data as Data
class MyDataSet(Data.Dataset):
def __init__(self, h5py_path):
data_file = h5py.File(h5py_path, 'r')
self.data = torch.from_numpy(data_file['data'].value)
self.nSamples = self.data.size(0)
self.label = torch.ones((self.nSamples,1))
def __len__(self):
return self.nSamples
def __getitem__(self, index):
assert index < len(self), 'index range error'
data = self.data[index]
label = self.label[index]
return (data, label)
```
|
{
"source": "jermmycr/galaxy",
"score": 3
}
|
#### File: galaxy/tools/chk_dup.py
```python
import json
import os
import collections
def loadjsons(path):
"""
Find all Jsons and load them in a dict
"""
files = []
data = []
for name in os.listdir(path):
if os.path.isfile(os.path.join(path, name)) and name.endswith('.json'):
files.append(name)
for jfile in files:
data.append(json.load(open("%s/%s" % (path, jfile))))
return data
if __name__ == '__main__':
"""
Iterate all name + synonyms
tell what is duplicated.
"""
jsons = loadjsons("../clusters")
counter = collections.Counter()
namespace = []
for djson in jsons:
items = djson.get('values')
for entry in items:
name = entry.get('value').strip().lower()
counter[name]+=1
namespace.append([name, djson.get('name')])
try:
for synonym in entry.get('meta').get('synonyms'):
name = synonym.strip().lower()
counter[name]+=1
namespace.append([name, djson.get('name')])
except (AttributeError, TypeError):
pass
counter = dict(counter)
for key, val in counter.items():
if val>1:
print ("Warning duplicate %s" % key)
for item in namespace:
if item[0]==key:
print (item)
```
|
{
"source": "Jermmy/sketch-gan",
"score": 2
}
|
#### File: sketch-gan/src/util.py
```python
from os.path import isdir, exists
import cairosvg
import cv2
import numpy as np
import tensorflow as tf
from sklearn.utils import shuffle
from config import *
def render_svg_file():
svg_dirs = [join(svg_data_dir, d) for d in os.listdir(svg_data_dir)
if isdir(join(svg_data_dir, d))]
svg_dirs = svg_dirs[0:30]
for d in svg_dirs:
dest_dir = join(sketch_data_dir, d.split('/')[-1])
if exists(dest_dir):
continue
os.mkdir(dest_dir)
svg_files = [join(d, f) for f in os.listdir(d) if f.endswith(".svg")]
for f in svg_files:
new_file = join(dest_dir, f.split('/')[-1].split('.')[0] + ".png")
cairosvg.svg2png(url=f, write_to=new_file)
print("render to %s" % (new_file))
image = cv2.imread(new_file, cv2.IMREAD_UNCHANGED)
b, g, r, a = cv2.split(image)
image = 255 - a
image = cv2.resize(image, (image_size, image_size), interpolation=cv2.INTER_AREA)
cv2.imwrite(new_file, image)
class DataGenerator():
def __init__(self, noise_dim=1000, batch_size=10, epoch=5):
train_files = self._get_all_train_files()
self.noise_dim = noise_dim
self.batch_size = batch_size
self.train_dataset = tf.data.Dataset.from_tensor_slices((train_files))
self.train_dataset = self.train_dataset.map(self._parse_data)
# self.train_dataset = self.train_dataset.map(
# lambda file: tuple(tf.py_func(
# self._parse_data, [file], [tf.float32, tf.float32]
# ))
# )
self.train_dataset = self.train_dataset.batch(batch_size).repeat(epoch)
def _get_all_train_files(self):
train_files = []
dirs = [join(sketch_data_dir, d) for d in os.listdir(sketch_data_dir)
if isdir(join(sketch_data_dir, d))]
for d in dirs:
files = [join(d, f) for f in os.listdir(d) if f.endswith(".png")]
train_files.extend(files)
train_files = shuffle(train_files)
return train_files
def _get_one_train_file(self):
train_files = []
dirs = [join(sketch_data_dir, d) for d in os.listdir(sketch_data_dir)
if isdir(join(sketch_data_dir, d))]
# for d in dirs:
d = dirs[0]
files = [join(d, f) for f in os.listdir(d) if f.endswith(".png")]
train_files.extend(files)
train_files = shuffle(train_files)
return train_files
def _parse_data(self, filename):
image_string = tf.read_file(filename=filename)
image_decode = tf.image.decode_image(image_string)
image_decode = tf.cast(image_decode, tf.float32)
image_decode = tf.subtract(image_decode, 255.0 / 2)
image_decode = image_decode / 255.0
noise_input = np.random.uniform(-1., 1., size=[self.noise_dim]).astype(np.float32)
return image_decode, noise_input
# def _parse_data(self, filename):
# image = cv2.imread(filename.decode(), cv2.IMREAD_UNCHANGED)
# image = image.reshape(image_size, image_size, 1).astype(np.float32)
# image = (image - 255.0 / 2) / 255.0
#
# # noise_input = tf.random_uniform(shape=[self.noise_dim,],
# # minval=-1.0, maxval=1.0, dtype=tf.float32)
#
# noise_input = np.random.uniform(-1., 1., size=[self.noise_dim]).astype(np.float32)
#
# return image, noise_input
if __name__ == '__main__':
render_svg_file()
# generator = DataGenerator()
# iterator = generator.train_dataset.make_one_shot_iterator()
# image, noise_input = iterator.get_next()
# print(image)
# print(noise_input)
#
# with tf.Session() as sess:
# try:
# while True:
# i, n = sess.run([image, noise_input])
# # print(e[0])
# # print(e[1])
# print(i.shape)
# print(n.shape)
# except tf.errors.OutOfRangeError:
# print("end")
```
|
{
"source": "jermnelson/aristotle-library-apps",
"score": 2
}
|
#### File: fcrepo/http/RequestFactory.py
```python
import base64
from types import StringTypes
from httplib2 import Http
from fcrepo.http.base import B_FCRepoRequestFactory
from fcrepo.http.base import B_FCRepoResponse
from fcrepo.http.base import B_FCRepoResponseBody
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
class ResponseBody(B_FCRepoResponseBody):
def getContent(self):
if 'text' in self._mime_type:
return unicode(base64.b64decode(self._raw_data), 'utf-8')
else:
return self._raw_data
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
class FCRepoResponse(B_FCRepoResponse):
def __init__(self, repository, http_method, request_uri, response, body):
self._repository = repository
self._http_method = http_method
self._request_uri = request_uri
self._status = response['status']
#
self._headers = {}
for name, value in response.items():
self._headers[name] = value
self._footers = {}
#
mime_type = self._headers.get('Content-Type','unknown')
self._body = ResponseBody(body, mime_type)
def getBody(self):
return self._body
def getFooter(self, name, default=None):
return self._footers.get(name, default)
def getFooters(self):
return self._footers
def getHeader(self, name, default=None):
return self._headers.get(name, default)
def getHeaders(self):
return self._headers
def getStatus(self):
return self._status
def getRequestMethod(self):
return self._http_method
def getRequestURI(self):
return self._request_uri
def getRepository(self):
return self._repository
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
class FCRepoRequestFactory(B_FCRepoRequestFactory):
def DELETE(self, request_uri):
return self.submit('DELETE', request_uri)
def GET(self, request_uri):
return self.submit('GET', request_uri)
def POST(self, request_uri, content=None, content_type='unknown',
chunked=False):
return self.submit('POST', request_uri, content, content_type, chunked)
def PUT(self, request_uri, content=None, content_type='unknown',
chunked=False):
return self.submit('PUT', request_uri, content, content_type, chunked)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def submit(self, method, request_uri, content=None, content_type=None,
chunked=False):
headers = { 'Connection' : 'Keep-Alive'
, 'Keep-Alive' : '300'
}
repository = self.getRepositoryURL()
url = self.getRequestURL(request_uri)
#
http = Http()
#http.add_credentials(self.auth_user, self.auth_pwd, self.domain)
auth = base64.encodestring("%s:%s" % (self.auth_user, self.auth_pwd))
headers['Authorization'] = 'Basic ' + auth
if content is None:
self._last_request = '%s ' % method + url
response, body = http.request(url, method, headers=headers)
else:
self._last_request = '%s (%s) ' % (method, content_type) + url
headers['Content-Type'] = content_type
headers['Content-Length'] = str(len(content))
response, body = http.request(url, method, body=content,
headers=headers)
response = FCRepoResponse(repository, method, request_uri,
response, body)
return response
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def getAuthScope(self):
return (self.domain, self.port, self.auth_realm)
def getCredentials(self):
return (self.auth_user, self.auth_pwd)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def setAuthRealm(self, realm):
self.auth_realm = realm
```
#### File: fcrepo/http/restapi.py
```python
from exceptions import NotImplementedError
from types import StringTypes
from fcrepo.http.RequestFactory import FCRepoRequestFactory
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
class FCRepoRestAPI:
METHOD_PARAMS = { }
RETURN_STATUS = { }
FORMAT_AS_XML = [ ]
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def __init__(self, repository_url='http://localhost:8080/fedora',
username='fedoraAdmin', password='<PASSWORD>',
realm='any', namespace='fedora'):
self.repository_url= repository_url
self.username = username
self.password = password
self.auth_realm = realm
self.namespace = namespace
def guessMimeType(self, content):
# make a very simplistic guess at strings
if type(content) in StringTypes:
if content.rfind('</html>') > 0:
return 'text/html'
elif content.rfind('</xhtml>') > 0:
return 'text/xhtml'
else:
less = content.count('<')
more = content.count('>')
if less == more:
return 'text/xml'
else:
return 'text/plain'
# don't even attempt to figure out all possible Mime Types
return 'unknown'
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def getRequestFactory(self):
return FCRepoRequestFactory(self.repository_url, self.username,
self.password, self.auth_realm)
def paramsAsURI(self, method_name, params, ignore=()):
valid_params = self.METHOD_PARAMS[method_name]
uri = ''
for name in params:
if name in valid_params and name not in ignore:
if uri: uri += '&'
uri += name + '=' + self.urlSafeString(params[name])
if method_name in self.FORMAT_AS_XML and 'format' not in params:
if uri: uri += '&'
uri += 'format=xml'
return uri
def urlSafeString(self, text):
return text.replace(' ','%20')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def describeRepository(self, **kwargs):
raise NotImplementedError
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def addDatastream(self, pid, dsID, **kwargs):
uri = '/objects/' + pid + '/datastreams/' + dsID
param_uri = self.paramsAsURI('addDatastream', kwargs,
ignore=('content',))
if param_uri:
uri += '?' + param_uri
#
repo = self.getRequestFactory()
content = kwargs.get('content', None)
mime_type = kwargs.get('mimeType',None)
if mime_type is None:
mime_type = self.guessMimeType(content)
return repo.POST(uri, content, mime_type)
METHOD_PARAMS['addDatastream'] = ( 'controlGroup', 'dsLocation', 'altIDs'
, 'dsLabel', 'versionable', 'dsState'
, 'formatURI', 'checksumType', 'checksum'
, 'mimeType', 'logMessage'
)
RETURN_STATUS['addDatastream'] = '201'
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def addRelationship(self, **kwargs):
raise NotImplementedError
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def compareDatastreamChecksum(self, **kwargs):
raise NotImplementedError
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def export(self, pid, **kwargs):
uri = '/objects/' + pid + '/export'
param_uri = self.paramsAsURI('export', kwargs)
if param_uri:
uri += '?' + param_uri
#
repo = self.getRequestFactory()
return repo.GET(uri)
METHOD_PARAMS['export'] = ('format', 'context', 'encoding')
RETURN_STATUS['export'] = '200'
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def findObjects(self, **kwargs):
uri = '/objects?'
if 'query' in kwargs:
uri += 'query=' + kwargs['query']
elif 'terms' in kwargs:
uri += 'terms=' + kwargs['terms']
else:
uri += 'terms=*'
if 'resultFormat' in kwargs:
uri += '&resultFormat=' + kwargs['resultFormat']
else:
uri += '&resultFormat=xml'
param_uri = self.paramsAsURI('findObjects', kwargs,
ignore=('terms','query','resultFormat'))
if len(param_uri) < 2:
param_uri = 'pid=true&label=true'
uri += '&' + param_uri
#
repo = self.getRequestFactory()
return repo.GET(uri)
METHOD_PARAMS['findObjects'] = ( 'terms', 'query', 'maxResults'
, 'resultFormat', 'pid', 'label', 'state'
, 'ownerid', 'cDate' 'mDate', 'dcnDate'
, 'title', 'creator', 'subject'
, 'description', 'publisher', 'contributor'
, 'date', 'type', 'format', 'identifier'
, 'source', 'language', 'relation'
, 'coverage', 'rights'
)
RETURN_STATUS['findObjects'] = '200'
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def getDatastream(self, pid, dsID, **kwargs):
uri = '/objects/' + pid + '/datastreams/' + dsID
param_uri = self.paramsAsURI('getDatastream', kwargs)
if param_uri:
uri += '?' + param_uri
#
repo = self.getRequestFactory()
return repo.GET(uri)
FORMAT_AS_XML.append('getDatastream')
METHOD_PARAMS['getDatastream'] = ('format', 'asOfDateTime')
RETURN_STATUS['getDatastream'] = '200'
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def getDatastreamHistory(self, **kwargs):
raise NotImplementedError
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def getDatastreamDissemination(self, pid, dsID, **kwargs):
uri = '/objects/' + pid + '/datastreams/' + dsID + '/content'
param_uri = self.paramsAsURI('getDatastreamDissemination', kwargs)
if param_uri:
uri += '?' + param_uri
#
repo = self.getRequestFactory()
return repo.GET(uri)
METHOD_PARAMS['getDatastreamDissemination'] = ('asOfDateTime',)
RETURN_STATUS['getDatastreamDissemination'] = '200'
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def getDatastreams(self, **kwargs):
raise NotImplementedError
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def getDissemination(self, **kwargs):
raise NotImplementedError
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def getNextPID(self, **kwargs):
uri = '/objects/nextPID'
param_uri = self.paramsAsURI('getNextPID', kwargs)
if param_uri:
uri += '?' + param_uri
#
repo = self.getRequestFactory()
return repo.POST(uri)
FORMAT_AS_XML.append('getNextPID')
METHOD_PARAMS['getNextPID'] = ('numPIDs', 'namespace', 'format')
RETURN_STATUS['getNextPID'] = '200'
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def getObjectHistory(self, pid, **kwargs):
uri = '/objects/' + pid + '/versions'
param_uri = self.paramsAsURI('getObjectHistory', kwargs)
if param_uri:
uri += '?' + param_uri
#
repo = self.getRequestFactory()
return repo.GET(uri)
FORMAT_AS_XML.append('getObjectHistory')
METHOD_PARAMS['getObjectHistory'] = ('format',)
RETURN_STATUS['getObjectHistory'] = '200'
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def getObjectProfile(self, **kwargs):
if 'pid' not in kwargs:
return None
uri = '/objects/' + pid
param_uri = self.paramsAsURI('getObjectProfile', kwargs)
if param_uri:
uri += '?' + param_uri
#
repo = self.getRequestFactory()
return repo.GET(uri)
FORMAT_AS_XML.append('getObjectProfile')
METHOD_PARAMS['getObjectProfile'] = ('asOfDateTime','format',)
RETURN_STATUS['getObjectProfile'] = '200'
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def getObjectXML(self, pid, **kwargs):
uri = '/objects/' + pid + '/objectXML'
repo = self.getRequestFactory()
return repo.GET(uri)
METHOD_PARAMS['getObjectXML'] = ()
RETURN_STATUS['getObjectXML'] = '200'
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def getRelationships(self, **kwargs):
raise NotImplementedError
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def ingest(self, pid='new', **kwargs):
uri = '/objects/' + pid
param_uri = self.paramsAsURI('ingest', kwargs,
ignore=('pid', 'content'))
if param_uri:
uri += '?' + param_uri
#
repo = self.getRequestFactory()
content = kwargs.get('content', None)
return repo.POST(uri, content, 'text/xml')
METHOD_PARAMS['ingest'] = ( 'label', 'format', 'encoding', 'namespace'
, 'ownerId', 'logMessage', 'ignoreMime'
, 'content'
)
RETURN_STATUS['ingest'] = '201'
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def listDatastreams(self, pid, **kwargs):
uri = '/objects/' + pid + '/datastreams'
param_uri = self.paramsAsURI('listDatastreams', kwargs)
if param_uri:
uri += '?' + param_uri
#
repo = self.getRequestFactory()
return repo.GET(uri)
FORMAT_AS_XML.append('listDatastreams')
METHOD_PARAMS['listDatastreams'] = ('format', 'asOfDateTime')
RETURN_STATUS['listDatastreams'] = '200'
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def listMethods(self, pid, **kwargs):
uri = '/objects/' + pid + '/methods'
param_uri = self.paramsAsURI('listMethods', kwargs)
if param_uri:
uri += '?' + param_uri
#
repo = self.getRequestFactory()
return repo.GET(uri)
FORMAT_AS_XML.append('listMethods')
METHOD_PARAMS['listMethods'] = ('format', 'asOfDateTime')
RETURN_STATUS['listMethods'] = '200'
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def modifyDatastream(self, pid, dsID, **kwargs):
uri = '/objects/' + pid+ '/datastreams/' + dsID
param_uri = self.paramsAsURI('modifyDatastream', kwargs,
ignore=('content',))
if param_uri:
uri += '?' + param_uri
#
repo = self.getRequestFactory()
content = kwargs.get('content', None)
mime_type = kwargs.get('mimeType',None)
if mime_type is None:
mime_type = self.guessMimeType(content)
return repo.POST(uri, content, mimetype)
METHOD_PARAMS['modifyDatastream'] = ( 'dsLocation', 'altIDs', 'dsLabel'
, 'versionable', 'dsState', 'formatURI'
, 'checksumType', 'checksum'
, 'mimeType', 'logMessage', 'force'
, 'ignoreContent', 'content'
)
RETURN_STATUS['modifyDatastream'] = '200'
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def modifyObject(self, pid, **kwargs):
uri = '/objects/' + pid
param_uri = self.paramsAsURI('modifyObject', kwargs)
if param_uri:
uri += '?' + param_uri
#
repo = self.getRequestFactory()
return repo.PUT(uri)
METHOD_PARAMS['modifyObject'] = ('label', 'ownerId', 'state', 'logMessage')
RETURN_STATUS['modifyObject'] = '200'
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def purgeDatastream(self, pid, dsID, **kwargs):
uri = '/objects/' + pid + '/datastreams/' + dsID
param_uri = self.paramsAsURI('purgeDatastream', kwargs)
if param_uri:
uri += '?' + param_uri
#
repo = self.getRequestFactory()
return repo.DELETE(uri)
METHOD_PARAMS['purgeDatastream'] = ( 'startDT', 'endDT', 'logMessage'
, 'force'
)
RETURN_STATUS['purgeDatastream'] = '204'
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def purgeObject(self, pid, **kwargs):
uri = '/objects/' + pid
param_uri = self.paramsAsURI('purgeObject', kwargs)
if param_uri:
uri += '?' + param_uri
#
repo = self.getRequestFactory()
return repo.DELETE(uri)
METHOD_PARAMS['purgeObject'] = ( 'logMessage', 'force')
RETURN_STATUS['purgeObject'] = '204'
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def purgeRelationship(self, **kwargs):
raise NotImplementedError
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def resumeFindObjects(self, **kwargs):
uri = '/objects?'
if 'query' in kwargs:
uri += 'query=' + kwargs['query']
elif 'terms' in kwargs:
uri += 'terms=' + kwargs['terms']
else:
uri += 'terms=*'
if 'resultFormat' in kwargs:
uri += '&resultFormat=' + kwargs['resultFormat']
else:
uri += '&resultFormat=xml'
param_uri = self.paramsAsURI('findObjects', kwargs,
ignore=('terms','query','resultFormat'))
if len(param_uri) < 2:
param_uri = 'pid=true&label=true'
uri += '&' + param_uri
#
repo = self.getRequestFactory()
return repo.GET(uri)
METHOD_PARAMS['resumeFindObjects'] = ( 'sessionToken', 'terms', 'query'
, 'maxResults', 'resultFormat', 'pid'
, 'label', 'state', 'ownerid', 'cDate'
, 'mDate', 'dcnDate', 'title'
, 'creator', 'subject', 'description'
, 'publisher', 'contributor', 'date'
, 'type', 'format', 'identifier'
, 'source', 'language', 'relation'
, 'coverage', 'rights'
)
RETURN_STATUS['resumeFindObjects'] = '200'
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def setDatastreamState(self, pid, dsID, **kwargs):
uri = '/objects/' + pid + '/datastreams/' + dsID
state = kwargs.get('dsState', 'A')
if len(state) < 1:
state = state[0]
uri += '?dsState=' + state
#
repo = self.getRequestFactory()
return repo.PUT(uri)
METHOD_PARAMS['setDatastreamState'] = ('dsState',)
RETURN_STATUS['setDatastreamState'] = '200'
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def setDatastreamVersionable(self, pid, dsID, **kwargs):
uri = '/objects/' + pid + '/datastreams/' + dsID
versionable = kwargs.get('versionable', 'true')
if type(versionable) == type(True):
if versionable:
versionable = 'true'
else:
versionable = 'false'
uri += '?versionable=' + versionable
#
repo = self.getRequestFactory()
return repo.PUT(uri)
METHOD_PARAMS['setDatastreamVersionable'] = ('versionable',)
RETURN_STATUS['setDatastreamVersionable'] = '200'
```
#### File: bibframe/ingesters/bibframeMARCParser.py
```python
__author__ = "<NAME>"
import json
import os
import re
from aristotle.settings import PROJECT_HOME, REDIS_DATASTORE
##from pyparsing import alphas, nums, dblQuotedString, Combine, Word, Group, delimitedList, Suppress, removeQuotes
##
##
##bibframeMARCMap = None
##def getbibframeMARCMap():
## global bibframeMARCMap
##
## if bibframeMARCMap is None:
## tag = Word( nums, min=3, max=3 )
## subfields = delimitedList(Word("$", alphas), "+")
## subfield_conditional = Word( "/", alphas ) | Word(",", alphas)
## field_range = nums + "XX"
##
##
##test_maps = {'manufacture': '260 $e+$f+$g',
## 'subject': '6XX, 043',
## 'title': '245 $a',
## 'upi': '0247-+2"uri"/a,z'}
MARC_FIXED_CODES = {}
for key in ['marc-illustration-codes',
'marc-language-code-list']:
MARC_FIXED_CODES[key] = json.load(
open(os.path.join(
PROJECT_HOME,
'marc_batch',
'fixures',
'{0}.json'.format(key))))
MARC_FIXED_CODES['007'] = json.load(
open(os.path.join(
PROJECT_HOME,
'marc_batch',
'fixures',
'marc-007-codes.json')))
BASIC_CONDITIONAL_RE = re.compile(r"""
if\s(?P<marc>marc:\w+)
\s(?P<operator>\D+)\s
'(?P<string>\w+)'
""",
re.VERBOSE)
METHOD_CONDITIONAL_RE = re.compile(r"""
if\s(?P<marc>marc:\w+)
[.]+(?P<method>\w+)\('(?P<param>\w+)'\)
\s(?P<operator>[>|<|=]+)\s
(?P<string>\w+)
""",
re.VERBOSE)
MARC_FLD_RE = re.compile(r"""
[marc:] # Matches M or underscore
(?P<tag>\d{1,3} # Matches specific MARC tags
| X{2,2})
(?P<ind1>\w{1,1}) # Matches indicator 1
(?P<ind2>\w{1,1}) # Matches indicator 2
(?P<subfield>\w{1,1}) # Matches subfield
""",
re.VERBOSE)
MARC_FX_FLD_RE = re.compile(r"""
[marc:] # Matches M or underscore
(?P<tag>\d{3,3}|ldr) # Matches specific MARC tags or ldr
(?P<code>\w{1,1}) # Code value in fixed position
(?P<position>\d{2,2}) # Postition in fixed field
""",
re.VERBOSE)
MARC_FX_FLD_RANGE_RE = re.compile(r"""
[marc:] # Matches M or underscore
(?P<tag>\d{3,3}) # Matches specific MARC tags
(?P<start>\d{2,2}) # start fixed position
-
(?P<end>\d{2,2}) # End fixed position
""",
re.VERBOSE)
class MARCParser(object):
def __init__(self, **kwargs):
self.entity_info = {}
self.record = kwargs.get('record')
self.redis_datastore = kwargs.get('redis_datastore',
REDIS_DATASTORE)
self.rules = json.load(
open(os.path.join(PROJECT_HOME,
'bibframe',
'ingesters',
kwargs.get('rules_filename')),
'rb'))
def add_invalid_value(self,
property_name,
value):
"""Method adds a value to the redis_datastore invalid
identifiers set
Parameters:
property_name -- property name
value -- Value to add to set
"""
pass
def parse(self):
"""Method parses through rules and applies conditional, retriving,
and post-processing for each in rules.
"""
for property_name, rule in self.rules.iteritems():
self.entity_info[property_name] = []
marc_rules = rule.get('marc')
if not marc_rules:
continue
try:
for marc_rule in rule.get('marc'):
marc_value = []
if marc_rule.get('conditional', None):
result = conditional_MARC21(
self.record,
marc_rule)
if len(result) > 0:
marc_value = result
else:
mapping = marc_rule.get('map')
if mapping is not None:
for marc_pattern in mapping:
result = parse_MARC21(
self.record,
marc_pattern)
if len(result) > 0:
marc_value.extend(result)
self.test_validity(
marc_pattern,
marc_rule,
property_name,
result)
if len(marc_value) > 0:
if 'lookup' in marc_rule:
temp_values = []
lookup_keys = marc_rule.get('lookup')
for lookup_key in lookup_keys:
for value in marc_value:
if MARC_FIXED_CODES[lookup_key].has_key(value):
temp_values.append(
MARC_FIXED_CODES[lookup_key][value])
marc_value = temp_values
self.entity_info[property_name].extend(marc_value)
if rule.get('post-processing', None):
self.entity_info[property_name] = [post_processing(
self.entity_info[property_name],
rule.get('post-processing'))]
except:
print("Problem ingesting {0}".format(rule))
def test_validity(self,
current_pattern,
marc_rule,
property_name,
result,
key_base='identifiers'):
"""Method takes a marc mapping, tests to see if it invalid, and then
applies the result to a Redis set for invalid values of property, usually
an identifier, and saves to a sorted set in the Redis datastore.
Parameters:
current_pattern -- Current pattern being evaluated
property_name -- BIBFRAME entity property
result -- Result from applying rule to MARC record
key_base -- Redis key base, defaults to identifiers
"""
if not 'invalid' in marc_rule:
return
invalid_pattern = marc_rule['invalid']
redis_key = "{0}:{1}:invalid".format(key_base,
property_name)
for row in result:
self.redis_datastore.sadd(redis_key, row)
def conditional_MARC21(record, rule):
"""Function takes a conditional and a mapping dict (called a rule)
and returns the result if the test condition matches the antecedient
Parameters:
record -- MARC21 record
rule -- Rule to match MARC field on
"""
output = []
if rule.has_key('conditional'):
conditional = BASIC_CONDITIONAL_RE.search(
rule.get('conditional'))
if conditional is None:
conditional = METHOD_CONDITIONAL_RE.search(
rule.get('conditional'))
condition_result = conditional.groupdict()
operator = condition_result.get('operator')
condition_marc_search = MARC_FLD_RE.search(
condition_result.get('marc'))
condition_marc_result = condition_marc_search.groupdict()
for mapping in rule.get('map'):
search = MARC_FLD_RE.search(mapping)
if not search:
continue
result = search.groupdict()
fields = record.get_fields(result.get('tag'))
if len(fields) < 1:
return output
for field in fields:
test_result = parse_conditional_field(
field,
condition_marc_result)
if condition_result.has_key('method'):
for row in test_result:
if not hasattr(row,
condition_result.get('method')):
return output
test_value = getattr(
row,
condition_result.get('method'))(
condition_result.get('param'))
if eval("{0} {1} {2}".format(
test_value,
operator,
condition_result.get('string'))):
output.extend(
parse_variable_field(
field,
result))
elif ['is', '='].count(operator) > 0:
test_result = parse_variable_field(
field,
condition_marc_result)
test_condition = [condition_result.get('string'),]
if test_result == test_condition:
output.extend(parse_variable_field(field,
result))
return output
def parse_conditional_field(field,
condition_marc_result):
"""Function takes a field and if the condition_marc_result mapping
includes X in either indicators, iterators through indicators and
returns a listing of matches.
Parameter:
Field -- MARC field
condition_marc_result -- Regex results from testing condition
"""
output = []
test_indicator1 = condition_marc_result.get('ind1')
test_indicator2 = condition_marc_result.get('ind2')
if test_indicator1 != 'X' and test_indicator2 != 'X':
if field.indicators == [test_indicator1, test_indicator2]:
output = field.get_subfields(
condition_marc_result.get('subfield'))
elif test_indicator1 == 'X' and test_indicator2 != 'X':
if field.indicators[1] == test_indicator2:
output = field.get_subfields(
condition_marc_result.get('subfield'))
elif test_indicator1 != 'X' and test_indicator2 == 'X':
if field.indicators[0] == test_indicator1:
output = field.get_subfields(
condition_marc_result.get('subfield'))
return output
def parse_fixed_field(field, re_dict):
"""Function takes a MARC21 field and the Regex dictgroup
for the fixed field and returns a list of values after
doing a look-up on supporting codes
Parameters:
field -- MARC21 field
re_dict -- Regular Expression dictgroup
"""
output = []
if re_dict.has_key('start'):
# Range operation on fixed field
tag = re_dict.get('tag')
if tag != field.tag:
return output
start = re_dict.get('start')
end = re_dict.get('end')
range_value = field.data[int(start):int(end)+1]
if range_value is not None:
output.append(range_value)
if field.data[0] == re_dict.get('code'):
tag = re_dict.get('tag')
code = re_dict.get('code')
position = re_dict.get('position')
position_code = field.data[int(re_dict.get('position'))]
if not MARC_FIXED_CODES.has_key(tag):
return output
if not MARC_FIXED_CODES[tag].has_key(code):
return output
if MARC_FIXED_CODES[tag][code].has_key(position):
output.append(
MARC_FIXED_CODES[tag][code][position].get(position_code))
return output
def parse_variable_field(field, re_dict):
"""Function takes a MARC21 field and the Regex dictgroup and
return a list of the subfields that match the Regex patterns.
Parameters:
field -- MARC21 field
re_dict -- Regular Expression dictgroup
"""
output = []
if field is None or re_dict is None:
return output
test_ind1 = re_dict.get('ind1').replace("_", " ")
test_ind2 = re_dict.get('ind2').replace("_", " ")
if field.indicators == [test_ind1, test_ind2]:
output = field.get_subfields(re_dict.get('subfield'))
return output
def parse_MARC21(record, mapping):
"""Function returns a list of values from a MARC record that match
a MARC 21 mapping in the format marc:XXXiiY where XXX is the tag, ii is
indicator1 and indicator2, and Y is the subfield.
Parameters:
record -- MARC21 record
mapping -- Rule to match MARC field on
"""
output = []
var_field_search = MARC_FLD_RE.search(mapping)
fixed_field_search = MARC_FX_FLD_RE.search(mapping)
if fixed_field_search is None:
fixed_field_search = MARC_FX_FLD_RANGE_RE.search(mapping)
if var_field_search is None and fixed_field_search is None:
return output
if fixed_field_search:
regex_result = fixed_field_search.groupdict()
elif var_field_search:
regex_result = var_field_search.groupdict()
else: # May leader, returns output
return output
fields = record.get_fields(regex_result.get('tag'))
for field in fields:
if hasattr(field, 'indicators'):
fld_result = parse_variable_field(field,
regex_result)
else:
fld_result = parse_fixed_field(field,
regex_result)
if len(fld_result) > 0:
for row in fld_result:
output.append(row)
return output
def post_processing(result, directive):
"""Performs one or more opeations on the result of MARC21-to-BIBFRAME
mapping.
Parameters:
result -- result of parsing the MARC with BIBFRAME rule
directive -- Instructions for manipulating the result
"""
# Combines all of results into a single string
if directive == 'concat':
return ' '.join(result)
elif type(directive) == dict:
type_of = directive.get('type')
value = directive.get('value')
if type_of == 'delimiter':
return '{0}'.format(value).join(result)
elif type_of == 'lang-lookup':
return [MARC_FIXED_CODES['lang'][code] for code in result]
elif type_of == 'prepend':
output = '{0} {1}'.format(value,
', '.join(result))
return output
elif type_of == 'second2last':
# Used for organizational system
return "{0}{1}{2}".format(" ".join(result[:-1]),
value,
result[-1])
```
#### File: bibframe/ingesters/__init__.py
```python
__author__ = "<NAME>"
import datetime, re, pymarc, os, sys,logging, redis, time
from bibframe.models import Annotation, Organization, Work, Instance, Person
from call_number.redis_helpers import generate_call_number_app
from person_authority.redis_helpers import get_or_generate_person
from aristotle.settings import PROJECT_HOME
from title_search.redis_helpers import generate_title_app,search_title
import marc21_facets
from MARC21 import MARC21toBIBFRAME
from lxml import etree
from rdflib import RDF,RDFS,Namespace
import json
STD_SOURCE_CODES = json.load(open(os.path.join(PROJECT_HOME,
'bibframe',
'fixures',
'standard-id-src-codes.json'),
'rb'))
BF = Namespace('http://bibframe.org/model-abstract/')
def info():
print("Current working directory {0}".format(os.getcwd()))
```
#### File: bibframe/ingesters/MARC21.py
```python
__author__ = "<NAME>"
import datetime
import logging
import json
import marc21_facets
import os
import pymarc
import re
import redis
import sys
import time
from bibframe.models import Annotation, Organization, Work, Holding, Instance
from bibframe.models import Person, Book, Cartography, Manuscript, Map, MixedMaterial
from bibframe.models import MovingImage, MusicalAudio, NonmusicalAudio
from bibframe.models import NotatedMusic, SoftwareOrMultimedia, StillImage
from bibframe.models import RemoteSensingImage, TitleEntity, ThreeDimensionalObject
from bibframe.ingesters.Ingester import Ingester
from bibframe.ingesters import tutt_maps, marc21_maps, web_services
from bibframe.ingesters.bibframeMARCParser import MARCParser
from discovery.redis_helpers import slug_to_title
from django.template.defaultfilters import slugify
from call_number.redis_helpers import generate_call_number_app
from person_authority.redis_helpers import get_or_generate_person
from aristotle.settings import IS_CONSORTIUM, PROJECT_HOME
from organization_authority.redis_helpers import get_or_add_organization
from title_search.redis_helpers import generate_title_app, process_title
from title_search.redis_helpers import index_title, search_title
from keyword_search.whoosh_helpers import index_marc
from lxml import etree
from rdflib import RDF, RDFS, Namespace
from bibframe.classifiers import simple_fuzzy
import aristotle.settings as settings
from aristotle.settings import REDIS_DATASTORE
field007_lkup = json.load(open(os.path.join(PROJECT_HOME,
"bibframe",
"fixures",
"marc21-007.json"),
"rb"))
ADDITIVE_SUBFLD_RE = re.compile("[+$](?P<subfld>\w)")
CONDITIONAL_SUBFLD_ID_RE = re.compile(r'[+](?P<indicator2>\w)"(?P<fld_value>\w+)"')
IND_CONDITIONAL_RE = re.compile(r'if i(?P<indicator>\w)=(?P<test>\w)')
PRECEDE_RE = re.compile(r'precede \w+ with "(?P<prepend>\w+:*)')
COMBINED_SUBFLD_RE = re.compile(r'[$](?P<subfld>\w)[+]*')
SUBFLD_RE = re.compile(r"[$|/|,](?P<subfld>\w)")
SINGLE_TAG_IND_RE = re.compile(r'(\d{3})(\d|[-])(\d|[-])')
RULE_ONE_RE = re.compile(r"\d{3},*-*\s*[$|/](?P<subfld>\w)$")
RULE_TWO_RE = re.compile(r"\d{3},*-*\s*[$|/](?P<subfld>\w)[+]*")
TAGS_RE = re.compile(r"(?P<tag>\d{3}),*-*")
MARC_FLD_RE = re.compile(r"(\d+)([-|w+])([-|w+])/(\w+)")
class MARC21Helpers(object):
"""
MARC21 Helpers for MARC21 Ingester classes
"""
marc_fld_re = re.compile(r"(\d+)(--)*([//].+)*[,]*")
def __init__(self,marc_record):
self.record = marc_record
def getSubfields(self,tag,*subfields):
"""
Extracts values from a MARC Variable Field
:param tag: MARC21 tag
:param subfields: one or more subfields
"""
if self.record[tag] is not None:
field = self.record[tag]
return ' '.join(field.get_subfields(*subfields))
class MARC21IngesterException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return "MARC21IngesterException Error={0}".format(self.value)
class MARC21Ingester(Ingester):
"Base RLSP BIBFRAME Ingester class for a MARC21 Record"
def __init__(self, **kwargs):
"""Creates an Ingester with basic parameters
Keywords:
record -- MARC21 Record
redis_datastore -- Single Redis instance or Redis Cluster
"""
self.record = kwargs.get('record', None)
self.redis_datastore = kwargs.get('redis_datastore', None)
self.entity_info = {}
def __extract__(self,**kwargs):
"""
Helper function takes a tag and list of rules and returns
either a set or a string of values
:kwarg tags: A list of MARC21 tags
:kwarg indict1: A rule for indicator 1
:kwarg indict2: A rule for indicator 2
:kwarg subfields: A list of subfields
"""
output, fields = [], []
tags = kwargs.get('tags', None)
if tags is None:
raise MARC21IngesterException("__extract__ helper function requires at least one MARC21 field tag")
indicator1_rule = kwargs.get('indict1', None)
indicator2_rule = kwargs.get('indict2', None)
subfields = kwargs.get('subfields', None)
for tag in tags:
fields.extend(self.record.get_fields(tag))
for field in fields:
if indicator1_rule is not None:
if not eval(indicator1_rule,field.indicator1):
continue
if indicator2_rule is not None:
if not eval(indicator2_rule,field.indicator2):
continue
for subfield in subfields:
output.extend(field.get_subfields(subfield))
if len(output) == 1:
return output[0]
elif len(output) > 1:
return set(output)
def __rule_one__(self, rule, in_order=True):
"""Helper method for MARC rule matching
For MARC21 Rule patterns like:
'130,730,830,245,246,247,242 $n'
'222,210 $b'
Parameters:
rule -- Text string of MARC21 to BIBFRAME rule
in_order -- Returns first
"""
values = []
rule_result = RULE_ONE_RE.search(rule)
if rule_result is not None:
if ADDITIVE_SUBFLD_RE.search(rule) is not None:
subfields = ADDITIVE_SUBFLD_RE.findall(rule)
else:
subfields = [rule_result.group('subfld'),]
tags = TAGS_RE.findall(rule)
if in_order is True:
while 1:
if len(tags) < 1:
break
tag = tags.pop(0)
marc_fields = self.record.get_fields(tag)
if len(marc_fields) > 0:
break
else:
marc_fields = []
for tag in tags:
marc_fields.extend(self.record.get_fields(tag))
if len(marc_fields) > 0:
for marc_field in marc_fields:
if not marc_field.is_control_field():
for subfield in subfields:
tag_value = marc_field.get_subfields(subfield)
tag_value = set(tag_value)
if tag_value is not None:
values.append(' '.join(tag_value))
values = list(set(values))
return values
class MARC21toFacets(MARC21Ingester):
"""
MARC21toFacets creates a BIBFRAME annotations to be associated with
either a Work or Instance.
"""
def __init__(self, **kwargs):
self.facets = None
self.creative_work = kwargs.get('creative_work')
self.instance = kwargs.get('instance')
super(MARC21toFacets, self).__init__(**kwargs)
def add_access_facet(self, **kwargs):
"""
Creates a bibframe:Annotation:Facet:Access based on
extracted info from the MARC21 Record
:keyword instance: BIBFRAME Instance, defaults to self.instance
:keyword record: MARC21 record, defaults to self.marc_record
"""
instance = kwargs.get("instance", self.instance)
record = kwargs.get("record", self.record)
access = marc21_facets.get_access(record)
facet_key = "bf:Facet:access:{0}".format(slugify(access))
self.__add_label__(facet_key)
self.redis_datastore.sadd(facet_key, instance.redis_key)
self.redis_datastore.sadd("{0}:hasAnnotation".format(instance.redis_key),
facet_key)
def add_format_facet(self, **kwargs):
"""
Creates a bibframe:Annotation:Facet:Format based on the
rda:carrierTypeManifestation property of the marcr:Instance
:keyword instance: BIBFRAME Instance, defaults to self.instance
"""
# Extract's the Format facet value from the Instance and
# creates an Annotation key that the instance's redis key
instance = kwargs.get("instance", self.instance)
facet_key = "bf:Facet:format:{0}".format(
slugify(
getattr(instance,'rda:carrierTypeManifestation')))
self.redis_datastore.sadd(facet_key, instance.redis_key)
self.__add_label__(facet_key)
self.redis_datastore.zadd('bf:Facet:format:sort',
float(self.redis_datastore.scard(facet_key)),
facet_key)
instance_annotation_key = "{0}:hasAnnotation".format(instance.redis_key)
self.redis_datastore.sadd("{0}:hasAnnotation".format(
instance.redis_key),
facet_key)
def add_lc_facet(self, **kwargs):
"""
Adds bibframe:CreativeWork to the bibframe:Annotation:Facet:LOCLetter
facet based on extracted info from the MARC21 Record
:keyword creative_work: BIBFRAME CreativeWork, defaults to
self.creative_work
:keyword record: MARC21 record, defaults to self.marc_record
"""
creative_work = kwargs.get('creative_work', self.creative_work)
record = kwargs.get('record', self.record)
lc_facet, lc_facet_desc = marc21_facets.get_lcletter(record)
for row in lc_facet_desc:
facet_key = "bf:Facet:loc-first-letter:{0}".format(
slugify(lc_facet))
self.redis_datastore.sadd(facet_key, creative_work.redis_key)
self.redis_datastore.sadd("{0}:hasAnnotation".format(
creative_work.redis_key),
facet_key)
self.redis_datastore.hset(
"bf:Facet:labels",
facet_key,
row)
self.redis_datastore.zadd(
"bf:Facet:loc-first-letter:sort",
float(self.redis_datastore.scard(facet_key)),
facet_key)
self.redis_datastore.sadd("{0}:hasAnnotation".format(
creative_work.redis_key),
facet_key)
def add_language_facet(self, **kwargs):
"""
Method takes an instance and adds to
bf:Facet:language facet
"""
instance = kwargs.get('instance', self.instance)
language = self.redis_datastore.hget(instance.redis_key,
'language')
if language is not None:
facet_key = 'bf:Facet:language:{0}'.format(
slugify(language))
self.redis_datastore.sadd(facet_key, instance.redis_key)
self.redis_datastore.hset('bf:Facet:labels',
facet_key,
language)
instance_annotation_key = "{0}:hasAnnotation".format(
instance.redis_key)
self.redis_datastore.zadd(
'bf:Facet:language:sort',
float(self.redis_datastore.scard(facet_key)),
facet_key)
self.redis_datastore.sadd(instance_annotation_key,
facet_key)
def add_locations_facet(self, **kwargs):
"""
Method takes an instance and a MARC21 record, extracts all CC's
location (holdings) codes from the MARC21 record and adds the instance
key to all of the holdings facets.
:param instance: BIBFRAME Instance, defaults to self.instance
:param record: MARC21 record, defaults to self.marc_record
"""
instance = kwargs.get("instance", self.instance)
record = kwargs.get("record", self.record)
if hasattr(settings, "IS_CONSORTIUM"):
consortium = settings.IS_CONSORTIUM
else:
consortium = False
if consortium is True:
output = marc21_facets.get_carl_location(record)
if len(output) > 0:
redis_key = "bf:Facet:location:{0}".format(
output.get("site-code"))
self.redis_datastore.sadd(redis_key, instance.redis_key)
self.redis_datastore.hset(instance.redis_key,
"ils-bib-number",
output.get('ils-bib-number'))
self.redis_datastore.hset(instance.redis_key,
"ils-item-number",
output.get('ils-item-number'))
self.redis_datastore.zadd("bf:Facet:locations:sort",
float(self.redis_datastore.scard(redis_key)),
redis_key)
else:
locations = marc21_facets.get_cc_location(record)
if len(locations) > 0:
for location in locations:
redis_key = "bf:Facet:location:{0}".format(
slugify(location[1]))
self.redis_datastore.sadd(redis_key, instance.redis_key)
if not self.redis_datastore.hexists(
"bf:Facet:labels",
slugify(location[1])):
self.redis_datastore.hset(
"bf:Facet:labels",
redis_key,
location[1])
self.redis_datastore.zadd(
"bf:Facet:locations:sort",
float(self.redis_datastore.scard(redis_key)),
redis_key)
self.redis_datastore.sadd("{0}:hasAnnotation".format(instance.redis_key),
redis_key)
def add_publish_date_facet(self, **kwargs):
"""
Method adds the publication date of the instance to the
bf:Facet:pub-year:{year}
"""
instance = kwargs.get('instance', self.instance)
publish_year = self.redis_datastore.hget(
instance.redis_key,
'rda:dateOfPublicationManifestation')
if publish_year is not None:
facet_key = 'bf:Facet:pub-year:{0}'.format(publish_year)
self.redis_datastore.sadd(facet_key, instance.redis_key)
self.redis_datastore.hset('bf:Facet:labels',
facet_key,
publish_year)
instance_annotation_key = "{0}:hasAnnotation".format(
instance.redis_key)
self.redis_datastore.zadd(
'bf:Facet:pub-year:sort',
float(self.redis_datastore.scard(facet_key)),
facet_key)
self.redis_datastore.sadd(instance_annotation_key,
facet_key)
def ingest(self,**kwargs):
"""
Method runs all of the Facet generation methods
:param creative_work: BIBFRAME CreativeWork, defaults to self.creative_work
:param instance: BIBFRAME Instance, default to self.instnace
:param record: MARC21 record, defaults to self.marc_record
"""
creative_work = kwargs.get('creative_work', self.creative_work)
instance = kwargs.get("instance", self.instance)
record = kwargs.get('record', self.record)
self.add_access_facet(instance=instance,record=record)
self.add_format_facet(instance=instance)
self.add_lc_facet(creative_work=creative_work,
record=record)
self.add_locations_facet(instance=instance,
record=record)
self.add_publish_date_facet(instance=instance)
self.add_language_facet(instance=instance)
class MARC21toInstance(MARCParser):
def __init__(self, **kwargs):
kwargs['rules_filename'] = 'bibframe-instance-map.json'
super(MARC21toInstance, self).__init__(**kwargs)
if kwargs.has_key('instanceOf'):
self.entity_info['instanceOf'] = kwargs.get('instanceOf')
def add_instance(self):
self.instance = Instance(redis_datastore=self.redis_datastore)
for key, value in self.entity_info.iteritems():
if key is not None and value is not None:
setattr(self.instance,
key,
value)
self.instance.save()
def ingest(self):
self.parse()
#! Should do duplication check before calling add_instance
self.add_instance()
class MARC21toBIBFRAME(MARC21Ingester):
"""
MARC21toBIBFRAME takes a MARC21 record and ingests into BIBFRAME Redis
datastore
"""
def __init__(self, **kwargs):
super(MARC21toBIBFRAME, self).__init__(**kwargs)
def __duplicate_check__(self, redis_key):
"""Based upon type of redis key, checks if MARC Record is a
duplicate
Parameters:
redis_key -- Redis Key
"""
field907 = self.record['907']
if field907 is not None:
if self.redis_datastore.hexists(redis_key,
field907['a'][1:-1]) is True:
return True
else:
return False
all945s = self.record.get_fields('945')
for field in all945s:
a_subfields = field.get_subfields('a')
for subfield in a_subfields:
data = subfield.split(" ")
# Bibnumber already exists return True
if self.redis_datastore.hexists(redis_key,
data[1]) is True:
return True
return False
def ingest(self):
"Method runs a complete ingestion of a MARC21 record into RLSP"
# Start with either a new or existing Creative Work or subclass
# like Book, Article, MusicalAudio, or MovingImage
if self.__duplicate_check__('ils-bib-numbers') is True:
return
self.marc2creative_work = MARC21toCreativeWork(
redis_datastore=self.redis_datastore,
record=self.record)
self.marc2creative_work.ingest()
# Exit ingest if a creative work is missing
if self.marc2creative_work.creative_work is None:
return
work_key = self.marc2creative_work.creative_work.redis_key
# Add work_key to the relatedRole:aut set, should support other
# roles based on MARC mapping
if self.marc2creative_work.entity_info.has_key('rda:isCreatedBy'):
for creator_key in self.marc2creative_work.entity_info.get('rda:isCreatedBy'):
self.redis_datastore.sadd('{0}:resourceRole:aut'.format(creator_key),
work_key)
# Extract Instance
self.marc2instance = MARC21toInstance(
instanceOf=work_key,
record=self.record,
redis_datastore=self.redis_datastore,)
self.marc2instance.ingest()
self.marc2instance.instance.save()
finish_instance = datetime.datetime.utcnow()
instance_key = self.marc2instance.instance.redis_key
work_instances_key = "{0}:hasInstance".format(work_key)
if self.redis_datastore.exists(work_instances_key):
self.redis_datastore.sadd(work_instances_key,
self.marc2instance.instance.redis_key)
else:
existing_instance_key = self.redis_datastore.hget(
work_key,
'hasInstance')
# Convert hash value to a set if instance_keys are
# different
if existing_instance_key is not None:
if instance_key != existing_instance_key:
# Remove existing instance key from work_key
self.redis_datastore.hdel(work_key, 'instanceOf')
# Add both instance keys to new work set key
self.redis_datastore.sadd(work_instances_key,
instance_key,
existing_instance_key)
# Set hash value for hasInstance singleton
else:
self.redis_datastore.hset(work_key,
'hasInstance',
instance_key)
index_marc(instance_keys=[instance_key,],
record=self.record,
redis_datastore=self.redis_datastore,
work_key=work_key)
self.marc2library_holdings = MARC21toLibraryHolding(
redis_datastore=self.redis_datastore,
record=self.record,
instance=self.marc2instance.instance)
self.marc2library_holdings.ingest()
instance_annotation_key = "{0}:hasAnnotation".format(
self.marc2instance.instance.redis_key)
if self.redis_datastore.hexists(self.marc2instance.instance.redis_key,
'hasAnnotation'):
annotation = self.marc2instance.instance.hasAnnotation
self.redis_datastore.hdel(self.marc2instance.instance.redis_key,
'hasAnnotation')
self.redis_datastore.sadd(instance_annotation_key,
annotation)
for holding in self.marc2library_holdings.holdings:
self.redis_datastore.sadd(instance_annotation_key,
holding.redis_key)
generate_call_number_app(self.marc2instance.instance,
self.redis_datastore)
self.marc2facets = MARC21toFacets(redis_datastore=self.redis_datastore,
record=self.record,
creative_work=self.marc2creative_work.creative_work,
instance=self.marc2instance.instance)
self.marc2facets.ingest()
class MARC21toLibraryHolding(MARC21Ingester):
"Ingests a MARC record into the Redis Library Services Platform"
def __init__(self,**kwargs):
super(MARC21toLibraryHolding,self).__init__(**kwargs)
self.holdings = []
self.is_local = kwargs.get('local', True)
self.instance = kwargs.get('instance', None)
def __add_cc_holdings__(self, cc_key='bf:Organization:1'):
"Helper function for Colorado College MARC Records"
# Assumes hybrid environment
if self.redis_datastore.hget('prospector-institution-codes',
'9cocp') is not None:
cc_key = self.redis_datastore.hget('prospector-institution-codes',
'9cocp')
holding = Holding(redis_datastore=self.redis_datastore)
self.entity_info['ils-bib-number'] = self.record['907']['a'][1:-1]
cc_tutt_code = self.record['994']['a']
if tutt_maps.LOCATION_CODE_MAP.has_key(cc_tutt_code):
location_key = '{0}:codes:{1}'.format(cc_key,
cc_tutt_code)
else:
location_key = cc_key
for key, value in self.entity_info.iteritems():
setattr(holding, key, value)
setattr(holding, 'schema:contentLocation', location_key)
if self.instance is not None:
holding.annotates = self.instance.redis_key
self.redis_datastore.sadd("{0}:resourceRole:own".format(cc_key),
self.instance.redis_key)
holding.save()
if location_key != cc_key:
# Assumes local location
self.redis_datastore.sadd(location_key, holding.redis_key)
if hasattr(holding, 'ils-bib-number'):
self.redis_datastore.hset('ils-bib-numbers',
getattr(holding, 'ils-bib-number'),
holding.redis_key)
self.holdings.append(holding)
def __add_consortium_holdings__(self):
"Helper function for CARL Alliance MARC records"
# quick check if local cc record using 994 field
if self.record['994'] is not None:
self.__add_cc_holdings__()
return
all945s = self.record.get_fields('945')
for field in all945s:
a_subfields = field.get_subfields('a')
for subfield in a_subfields:
holding = Holding(redis_datastore=self.redis_datastore)
data = subfield.split(" ")
institution_code = data[0]
org_key = self.redis_datastore.hget(
'prospector-institution-codes',
institution_code)
setattr(holding, 'schema:contentLocation', org_key)
setattr(holding, 'ils-bib-number', data[1])
setattr(holding, 'ils-item-number', data[2])
for key,value in self.entity_info.iteritems():
setattr(holding, key, value)
if self.instance is not None:
holding.annotates = self.instance.redis_key
# Use MARC Relator Code for set key
self.redis_datastore.sadd(
"{0}:resourceRole:own".format(org_key),
self.instance.redis_key)
holding.save()
self.redis_datastore.hset(
'ils-bib-numbers',
getattr(holding, 'ils-bib-number'),
holding.redis_key)
if self.instance is not None:
instance_annotation_key = "{0}:hasAnnotation".format(
self.instance.redis_key)
self.redis_datastore.sadd(instance_annotation_key,
holding.redis_key)
self.holdings.append(holding)
def add_holdings(self):
"""
Creates one or more Library Holdings based on values in the entity
"""
if self.is_local is True:
# CC specific III MARC record format, should be modified to be more
# generic
self.__add_cc_holdings__()
else:
self.__add_consortium_holdings__()
def ingest(self):
"""
Ingests a MARC21 record and creates a Library Holding resource that
annotates a Creative Work or Instance.
"""
self.extract_ddc()
self.extract_govdoc()
self.extract_lcc()
self.extract_medical()
self.extract_cc_local()
self.extract_udc()
self.add_holdings()
def __extract_callnumber__(self, tags):
"""
Helper function extracts a call number from a resource
:param tags: One or more MARC21 field tags
"""
output = []
fields = self.record.get_fields(*tags)
for field in fields:
subfield_b = field['b']
for subfield in field.get_subfields('a'):
if subfield_b is not None:
output.append("{0} {1}".format(subfield,subfield_b).strip())
else:
output.append(subfield)
if len(output) == 1:
return output[0]
elif len(output) > 1:
return set(output)
else:
return output
def extract_ddc(self):
"""
Extracts LCC Dewey Decimal call number from a resource
"""
ddc_values = self.__extract_callnumber__(['082',])
if len(ddc_values) > 0:
self.entity_info['callno-ddc'] = ddc_values
def extract_govdoc(self):
"""
Extracts Govdoc call number from a resource
"""
govdocs_values = self.__extract_callnumber__(['086',])
if len(govdocs_values):
self.entity_info['callno-govdoc'] = govdocs_values
def extract_lcc(self):
"""
Extracts LCC call number from a MARC21 record
"""
lcc_values = self.__extract_callnumber__(['050',
'051',
'055',
'061',
'070',
'071'])
if len(lcc_values) > 0:
self.entity_info['callno-lcc'] = lcc_values
def extract_medical(self):
med_callnumbers = self.__extract_callnumber__(['060',])
if len(med_callnumbers) > 0:
self.entity_info['callno-nlm'] = med_callnumbers
def extract_cc_local(self):
"""
Extracts local call number from MARC21 record following Colorado College
practice
"""
local_099 = self.record['099']
if local_099 is not None:
self.entity_info['callno-local'] = local_099.value()
else:
local_090 = self.record['090']
if local_090 is not None and not self.entity_info.has_key('callno-lcc'):
self.entity_info['callno-lcc'] = local_090.value()
def extract_udc(self):
"""
Extracts Universal Decimal Classification Number
"""
udc_values = self.__extract_callnumber__(['080',])
if len(udc_values) > 0:
self.entity_info['callno-udc'] = udc_values
class MARC21toPerson(MARC21Ingester):
"""
MARC21toPerson ingests a MARC record into the BIBFRAME Redis datastore
"""
def __init__(self, **kwargs):
super(MARC21toPerson, self).__init__(**kwargs)
self.person = None
self.people = []
self.field = kwargs.get("field", None)
def __extract_identifier__(self,source_code,feature):
"""
Helper function extracts all identifiers from 024 MARC21 fields,
tests if source_code is equal $2 value and assigns to feature
:param source_code: Source code to be tested
:param feature: Name of the feature
"""
output = []
if self.record is None:
return
fields = self.record.get_fields('024')
for field in fields:
if field.indicator1 == '7':
if field['2'] == source_code:
if field['a'] is not None:
output.append(field['a'])
for subfield in field.get_subfields('z'):
output.append(subfield)
self.redis_datastore.sadd("identifiers:{0}:invalid".format(feature))
if len(output) > 0:
if len(output) == 1:
self.entity_info[feature] = output[0]
else:
self.entity_info[feature] = set(output)
def extractDates(self):
"""
Extracts rda:dateOfBirth and rda:dateOfDeath from MARC21 field
"""
date_range = re.compile(r"(\d+)-*(\d*)")
if self.field is not None and ['100','700','800'].count(self.field.tag)> -1:
if ['0','1'].count(self.field.indicators[0]) > -1:
raw_dates = ''.join(self.field.get_subfields('d'))
if len(raw_dates) > 0:
date_result = date_range.search(raw_dates)
if date_result is not None:
groups = date_result.groups()
if len(groups[0]) > 0:
self.entity_info['rda:dateOfBirth'] = groups[0]
if len(groups[1]) > 0:
self.entity_info['rda:dateOfDeath'] = groups[1]
if self.field.tag == '542':
field542b = self.field.get_subfields('b')
if len(field542b) > 0:
self.entity_info['rda:dateOfDeath'] = ''.join(field542b)
def extract_features(self):
"""
Extracts features of the Person based on MARC21 fields
"""
if self.field is not None and ['100','400','600','700','800'].count(self.field.tag) > -1:
for name in self.field.get_subfields('a'):
raw_names = [r.strip() for r in name.split(',')]
if self.field.indicator1 == '0':
self.entity_info['schema:givenName'] = raw_names[0]
elif self.field.indicator1 == '1':
self.entity_info['schema:familyName'] = raw_names.pop(0)
# Assigns the next raw_name to givenName
for raw_name in raw_names:
tokens = raw_name.split(' ')
if len(tokens[0]) > 0:
if [".",",","/"].count(tokens[0][-1]) > 0:
tokens[0] = tokens[0][:-1]
self.entity_info['schema:givenName'] = tokens[0]
for title in self.field.get_subfields('b'):
if self.entity_info.has_key('schema:honorificPrefix'):
if type(self.entity_info['schema:honorificPrefix']) == list:
self.entity_info['schema:honorificPrefix'].append(title)
else:
self.entity_info['schema:honorificPrefix'] = list(self.entity_info['schema:honorificPrefix'])
else:
self.entity_info['schema:honorificPrefix'] = title
def extract_isni(self):
"""
Extracts the ISNIInternational Standard Name Identifier
"""
self.__extract_identifier__("isni","isni")
def extract_orcid(self):
"""
Extracts the Open Researcher and Contributor Identifier
"""
self.__extract_identifier__("orcid","orcid")
def extract_preferredNameForThePerson(self):
"""
Extracts RDA's preferredNameForThePerson from MARC21 record
"""
preferred_name = []
if self.field is not None and ['100','700','800'].count(self.field.tag)> -1:
if ['0','1'].count(self.field.indicators[0]) > -1:
preferred_name.extend(self.field.get_subfields('a','b'))
if len(preferred_name) > 0:
raw_name = ' '.join(preferred_name)
if raw_name[-1] == ',':
raw_name = raw_name[:-1]
self.entity_info['rda:preferredNameForThePerson'] = raw_name
def extract_viaf(self):
"""
Extracts the Virtual International Authority File number
"""
self.__extract_identifier__("via,zf","viaf")
def ingest(self):
self.extract_features()
self.extract_preferredNameForThePerson()
self.extract_isni()
self.extract_orcid()
self.extract_viaf()
self.extractDates()
result = get_or_generate_person(self.entity_info,
self.redis_datastore)
if type(result) == list:
self.people = result
else:
self.person = result
self.people.append(self.person)
class MARC21toSubjects(MARC21Ingester):
"""
MARC21toSubjects ingests a MARC21 record into the BIBFRAME Redis datastore
"""
def __init__(self,**kwargs):
"""
Creates a MARC21toSubject Ingester
"""
super(MARC21toSubjects, self).__init__(**kwargs)
self.creative_work = kwargs.get("work", None)
self.field = kwargs.get("field", None)
self.subjects = []
def add_subdivision(self,subject_key):
"""
Helper function iterates through the common 65x subdivision
fields to create Authority Redis keys in the Redis datastore
:param subject_key: Base subject key used to create subdivision
set keys for each subdivision
"""
redis_pipeline = self.redis_datastore.pipeline()
def add_subdivision(subfield, type_of):
subdivision_key = "{0}:{1}".format(subfield[0],subfield[1])
redis_pipeline.sadd("{0}:{1}".format(subject_key, type_of),
subdivision_key)
self.subjects.append(subdivision_key)
for subfield in self.field.get_subfields('v'):
add_subdivision(subfield, "form")
for subfield in self.field.get_subfields('x'):
add_subdivision(subfield, "general")
for subfield in self.field.get_subfields('y'):
add_subdivision(subfield, 'chronological')
for subfield in self.field.get_subfields('z'):
add_subdivision(subfield, 'geographic')
redis_pipeline.execute()
def extract_genre(self):
"""
Extracts Genre from the MARC21 655 field
"""
if self.field.tag == '651':
subject_key = 'bf:Authority:Subject:Genre:{0}'.format(
''.join(self.field.get_subfields('a')))
self.redis_datastore.sadd(subject_key,
self.creative_work.redis_key)
self.subjects.append(subject_key)
def extract_geographic(self):
"""
Extracts Geographic Subject from MARC21 651 field
"""
if self.field.tag == '651':
subject_key = 'bf:Authority:Subject:Geographic:{0}'.format(
''.join(self.field.get_subfields('a')))
self.subjects.append(subject_key)
self.add_subdivision(subject_key)
def extract_topical(self):
"""
Extracts Topical Subject from MARC21 650 field
"""
if ['650'].count(self.field.tag) > -1:
subject_key = 'bf:Authority:Subject:{0}'.format(
''.join(self.field.get_subfields('a')))
self.subjects.append(subject_key)
self.add_subdivision(subject_key)
def ingest(self):
self.extract_geographic()
self.extract_genre()
self.extract_topical()
class MARC21toCreativeWork(MARC21Ingester):
"RLSP ingester takes a MARC21 record, creates/gets CreativeWork + children"
def __init__(self, **kwargs):
"""Creates a MARC21toCreativeWork Ingester instance.
Keywords:
record -- MARC21 record
"""
super(MARC21toCreativeWork, self).__init__(**kwargs)
self.creative_work, self.work_class = None, None
def __classify_work_class__(self):
"Classifies the work as specific Work class based on BIBFRAME website"
leader = self.record.leader
field007 = self.record['007']
field336 = self.record['336']
if leader[6] == 'a':
if field007 is not None:
test_value = field007.data[0]
if test_value == 'a':
self.work_class = Map
elif test_value == 'd':
self.work_class = Globe
elif test_value == 'h': # Microfilm
self.work_class = StillImage
elif test_value == 'q': # Notated music
self.work_class = NotatedMusic
elif test_value == 'r':
self.work_class = RemoteSensingImage
elif test_value == 's':
self.work_class = NonmusicalAudio
elif ['m', 'v'].count(test_value) > 0:
self.work_class = MovingImage
if self.work_class == None:
# Book is the default for Language Material
self.work_class = Book
elif leader[6] == 'c':
self.work_class = NotatedMusic
elif leader[6] == 'd':
self.work_class = Manuscript
elif leader[6] == 'e' or leader[6] == 'f':
# Cartography is the default
self.work_class = Cartography
if leader[6] == 'f':
self.work_class = Manuscript
if field007 is not None:
if field007.data[0] == 'a':
self.work_class = Map
elif field007.data[0] == 'd':
self.work_class = Globe
elif field007.data[0] == 'r':
self.work_class = RemoteSensingImage
elif leader[6] == 'g':
self.work_class = MovingImage
elif leader[6] == 'i':
self.work_class = NonmusicalAudio
elif leader[6] == 'j':
self.work_class = MusicalAudio
elif leader[6] == 'k':
self.work_class = StillImage
elif leader[6] == 'm':
self.work_class = SoftwareOrMultimedia
elif leader[6] == 'p':
self.work_class = MixedMaterial
elif leader[6] == 'r':
self.work_class = ThreeDimensionalObject
elif leader[6] == 't':
self.work_class = Manuscript
if self.work_class is None:
self.work_class = Work
def extract_creators(self):
"""
Extracts and associates bf:Authority:Person entities creators
work.
"""
people_keys = []
for field in self.record.get_fields('100','700','800'):
if field is not None:
people_ingester = MARC21toPerson(redis_datastore=self.redis_datastore,
field=field)
people_ingester.ingest()
for person in people_ingester.people:
people_keys.append(person.redis_key)
for person_key in people_keys:
if not self.entity_info.has_key('associatedAgent'):
self.entity_info['associatedAgent'] = set()
self.entity_info['associatedAgent'].add(person_key)
if not self.entity_info.has_key('rda:isCreatedBy'):
self.entity_info['rda:isCreatedBy'] = set()
self.entity_info['rda:isCreatedBy'].add(person_key)
def extract_note(self):
"""
Extracts the note for the work
"""
notes = []
fields = self.record.get_fields('500')
for field in fields:
subfield3 = field['3']
subfield_a = " ".join(field.get_subfields('a'))
if subfield3 is not None:
notes.append("{0} {1}".format(subfield3,
subfield_a))
else:
notes.append(subfield_a)
if len(notes) > 0:
self.entity_info["note"] = set(notes)
def extract_performerNote(self):
"Extracts performerNote"
notes = []
fields = self.record.get_fields('511')
for field in fields:
notes.append("Cast: {0}".format(''.join(field.get_subfields('a'))))
if len(notes) > 0:
self.entity_info["performerNote"] = set(notes)
def ingest(self):
"Method ingests MARC Record into RLSP"
self.__classify_work_class__()
self.creative_work = self.work_class(
redis_datastore=self.redis_datastore)
work_titles = []
for attribute, rules in self.creative_work.marc_map.iteritems():
values = []
#! NEED TitleEntity to check for duplicates
if attribute == 'uniformTitle':
pass
if attribute == 'title':
rule = rules[0]
titleValue = ' '.join(self.__rule_one__(rule))
title_entity = TitleEntity(redis_datastore=self.redis_datastore,
titleValue=titleValue,
label=self.record.title())
title_entity.save()
index_title(title_entity, self.redis_datastore)
self.entity_info[attribute] = title_entity.redis_key
work_titles.append(title_entity.redis_key)
continue
for rule in rules:
result = list(set(self.__rule_one__(rule)))
values.extend(result)
if len(values) > 0:
self.entity_info[attribute] = values
# List of specific methods that haven't had Rule regex developed
self.extract_creators()
self.extract_note()
self.extract_performerNote()
self.get_or_add_work()
if self.creative_work is not None:
for title_key in work_titles:
self.redis_datastore.sadd(
"{0}:relatedResources".format(title_key),
self.creative_work.redis_key)
def get_or_add_work(self,
classifer=simple_fuzzy.WorkClassifier):
"""Method returns a new Work or an existing work
Default classifer does a similarity metric, basic similarity is 100% match
(i.e. all fields must match or a new work is created)
This method could use other Machine Learning techniques to improve
the existing match with multiple and complex rule sets.
Keywords
classifier -- Classifer, default is the Simple Fuzzy Work Classifer
"""
# Assumes if
if self.entity_info.has_key('instanceOf'):
self.entity_info['instanceOf'] = set(self.entity_info['instanceOf'])
# If the title matches an existing Work's title and the creative work's creators,
# assumes that the Creative Work is the same.
work_classifier = classifer(entity_info = self.entity_info,
redis_datastore=self.redis_datastore,
work_class=self.work_class)
work_classifier.classify()
self.creative_work = work_classifier.creative_work
if self.creative_work is not None:
self.creative_work.save()
class MARC21toTitleEntity(MARCParser):
"Extracts BIBFRAME TitleEntity info from MARC21 record"
def __init__(self, **kwargs):
"""Initializes MARC21toTitleEntity object
Parameters:
"""
kwargs['rules_filename'] = 'bibframe-title-entity-map.json'
super(MARC21toTitleEntity, self).__init__(**kwargs)
self.title_entity = None
def __add_title_entity__(self):
"Helper method adds a new TitleEntity"
self.title_entity = TitleEntity(redis_datastore=self.redis_datastore)
for key, value in self.entity_info.iteritems():
if key is not None and value is not None:
setattr(self.title_entity,
key,
value)
self.title_entity.save()
def __get_or_add_title_entity__(self):
"Helper method returns new or existing TitleEntity"
existing_titles = []
if self.entity_info.get('titleValue') is not None:
title_string = title
if self.entity_info.get('subtitle') is not None:
title_string += " {0}".format(
self.entity_info.get('subtitle'))
self.entity_info['label'] = title_string
def ingest(self):
"Method finds or creates a TitleEntity in RLSP"
self.parse()
self.__add_title_entity__()
def check_marc_exists(instance_ds, record, marc_tag='907'):
"""
Helper function checks to see the bib number is already associated with
a bibframe:Instance, returns True if that bibnumber already exists,
False otherwise.
:param instance_ds: BIBFRAME Instance
:param record: MARC21 record
:param marc_tag: MARC tag of bib number, default to CC's III 907
field
"""
field = record[marc_tag]
if field is not None:
raw_bib_id = ''.join(field.get_subfields('a'))
# Extract III specific bib number
bib_number = raw_bib_id[1:-1]
if instance_ds.hexists('ils-bib-numbers', bib_number):
return True
return False
def ingest_marcfile(**kwargs):
marc_filename = kwargs.get("marc_filename", None)
redis_datastore = kwargs.get("redis_datastore",
REDIS_DATASTORE)
if IS_CONSORTIUM is not None and IS_CONSORTIUM is True:
# Loads Prospector Consortium Libraries
from themes.prospector.redis_helpers import load_prospector_orgs
if not redis_datastore.exists('prospector-institution-codes'):
load_prospector_orgs(redis_datastore)
if marc_filename is not None:
marc_file = open(marc_filename,'rb')
count = 0
marc_reader = pymarc.MARCReader(marc_file,
## to_unicode=True,
utf8_handling='ignore')
start_time = datetime.datetime.now()
sys.stderr.write("Starting at {0}\n".format(start_time.isoformat()))
for record in marc_reader:
# Need to check if MARC21 record has already been ingested into the
# datastore
if not check_marc_exists(redis_datastore, record):
try:
ingester = MARC21toBIBFRAME(record=record,
redis_datastore=redis_datastore)
ingester.ingest()
except Exception as e:
print("Failed to ingest {0}={1}".format(
count,
e))
if count%1000:
if not count % 100:
sys.stderr.write(".")
else:
sys.stderr.write(str(count))
count += 1
end_time = datetime.datetime.now()
sys.stderr.write("\nFinished at {0} count={1}\n".format(end_time.isoformat(),
count))
sys.stderr.write("Total time elapsed is {0} seconds\n".format((end_time-start_time).seconds))
return count
def info():
print("Current working directory {0}".format(os.getcwd()))
```
#### File: bibframe/ingesters/MODS.py
```python
__author__ = "<NAME>"
import lxml.etree as etree
import urllib2
from aristotle.settings import REDIS_DATASTORE
from bibframe.classifiers import simple_fuzzy
from bibframe.ingesters.Ingester import personal_name_parser, Ingester
from bibframe.ingesters.Ingester import HONORIFIC_PREFIXES, HONORIFIC_SUFFIXES
from bibframe.models import Annotation, Organization, Work, Holding, Instance
from bibframe.models import Person, Audio, Book, Cartography, LanguageMaterial
from bibframe.models import MixedMaterial, MovingImage, MusicalAudio
from bibframe.models import NonmusicalAudio, NotatedMusic
from bibframe.models import SoftwareOrMultimedia, StillImage, TitleEntity
from bibframe.models import ThreeDimensionalObject
from discovery.redis_helpers import slug_to_title
from django.template.defaultfilters import slugify
from person_authority.redis_helpers import get_or_generate_person
from title_search.redis_helpers import index_title
from rdflib import Namespace
from bibframe.ingesters import tutt_maps, marc21_maps
MODS_NS = Namespace('http://www.loc.gov/mods/v3')
class MODSIngesterError(Exception):
"Exception for any errors ingesting MODS into RLSP"
def __init__(self, value):
"Initializes Class"
super(MODSIngesterError, self).__init__()
self.value = value
def __str__(self):
"Returns string representation of Exception"
return repr(self.value)
class MODSIngester(Ingester):
"Class ingests MODS XML files into RLSP"
def __init__(self, **kwargs):
"Class takes standard RLSP BIBFRAME ingester"
super(MODSIngester, self).__init__(**kwargs)
self.work_class = kwargs.get('work_class',
None)
self.classifier = kwargs.get('classifier',
simple_fuzzy.WorkClassifier)
self.contributors = []
self.creators = []
self.instances = []
self.mods_xml = None
def __classify_work_class__(self):
"Helper function classifies a work based on typeOfResource"
if self.work_class is not None:
return
type_of_resource = self.mods_xml.find(
"{{{0}}}typeOfResource".format(MODS_NS))
type_of = type_of_resource.text
# List of enumerated typeOfResource comes from the following website:
# http://www.loc.gov/standards/mods/mods-outline.html#typeOfResource
if type_of == "text":
self.work_class = LanguageMaterial
elif type_of == "cartographic":
self.work_class = Cartography
elif type_of == "notated music":
self.work_class = NotatedMusic
elif type_of == "sound recording-musical":
self.work_class = MusicalAudio
elif type_of == "sound recording-nonmusical":
self.work_class = NonmusicalAudio
elif type_of == "sound recording":
self.work_class = Audio
elif type_of == "still image":
self.work_class = StillImage
elif type_of == "moving image":
self.work_class = MovingImage
elif type_of== "three dimensional object":
self.work_class = ThreeDimensionalObject
elif type_of == "software, multimedia":
self.work_class = SoftwareOrMultimedia
elif type_of == "mixed material":
self.work_class = MixedMaterial
else:
self.work_class = Work
def __create_facets__(self, instance, location_name):
"Helper function creates facets from instance"
instance_annotation_key = "{0}:hasAnnotation".format(instance.redis_key)
# Access Facet
if getattr(instance,
'rda:carrierTypeManifestation') == 'online resource':
access_facet_key = "bf:Facet:access:online"
if not self.redis_datastore.hexists('bf:Facet:labels',
access_facet_key):
self.redis_datastore.hset('bf:Facet:labels',
access_facet_key,
'Online')
else:
access_facet_key = 'bf:Facet:access:in-the-library'
self.redis_datastore.sadd(access_facet_key, instance.redis_key)
self.redis_datastore.sadd(instance_annotation_key, access_facet_key)
# Format Facet
format_value = getattr(instance, 'rda:carrierTypeManifestation')
if format_value == 'online resource':
format_value = 'Electronic'
format_facet_key = 'bf:Facet:format:{0}'.format(slugify(format_value))
self.redis_datastore.sadd(format_facet_key, instance.redis_key)
self.redis_datastore.zadd(
'bf:Facet:format:sort',
float(self.redis_datastore.scard(format_facet_key)),
format_facet_key)
self.redis_datastore.sadd(instance_annotation_key, format_facet_key)
# Language Facet
languages = getattr(instance,
'language')
lang_keys = []
if type(languages) != str:
for language in languages:
lang_keys.append('bf:Facet:language:{0}'.format(
slugify(language)))
else:
lang_keys.append(languages)
for lang_key in lang_keys:
self.redis_datastore.sadd(lang_key, instance.redis_key)
self.redis_datastore.zadd(
'bf:Facet:language:sort',
float(self.redis_datastore.scard(lang_key)),
lang_key)
self.redis_datastore.sadd(instance_annotation_key, format_facet_key)
# Location Facet
location_facet_key = 'bf:Facet:location:{0}'.format(
slugify(location_name))
self.redis_datastore.sadd(location_facet_key, instance.redis_key)
self.redis_datastore.zadd(
"bf:Facet:locations:sort",
float(self.redis_datastore.scard(location_facet_key)),
location_facet_key)
# Publisher Date
def __create_holding__(self, instance, location_code):
"""Helper function creates a library holding entity for instance
Parameters:
instance -- BIBFRAME instance
"""
new_holding = Holding(redis_datastore=self.redis_datastore,
annotates=instance.redis_key)
setattr(new_holding, 'schema:contentLocation', location_code)
new_holding.save()
self.redis_datastore.sadd(
'{0}:hasAnnotation'.format(instance.redis_key),
new_holding.redis_key)
def __create_instances__(self, work_key, rdacarrier):
"""Helper function creates specific instance(s) from MODS
Parameter:
work_key -- Work Key to be associated with BIBFRAME Instance(s)
"""
# Create an instance for each originInfo element in MODS
origin_infos = self.mods_xml.findall('{{{0}}}originInfo'.format(
MODS_NS))
form = self.mods_xml.find(
'{{{0}}}physicalDescription/{{{0}}}form'.format(MODS_NS))
for element in origin_infos:
instance_of = {'instanceOf': work_key}
if form is not None:
if form.attrib.get('type', None) == 'carrier':
if form.attrib.get('authority', None) == 'rdacarrier':
instance_of['rda:carrierTypeManifestation'] = form.text
# Assumes a default of an online resource for
# rda:carrierTypeManifestation
if not instance_of.has_key('rda:carrierTypeManifestation'):
instance_of['rda:carrierTypeManifestation'] = rdacarrier
extent = element.find('{{{0}}}extent'.format(MODS_NS))
if extent is not None:
if extent.text is not None:
instance_of['extent'] = extent.text
hdl = self.__extract_hdl__()
if hdl is not None:
instance_of['hdl'] = hdl
language = self.__extract_languages__()
if language is not None:
instance_of['language'] = language
new_instance = Instance(redis_datastore=self.redis_datastore,
**instance_of)
new_instance.save()
self.__create_holding__(new_instance,
'bf:Organization:1:codes:dacc')
self.__create_facets__(new_instance,
'Digital Archives of Colorado College')
self.instances.append(new_instance.redis_key)
def __extract_description__(self):
"Helper method extracts the description from mods:abstract"
abstract = self.mods_xml.find('{{{0}}}abstract'.format(MODS_NS))
if abstract is not None:
description = abstract.text
if description is not None:
return description.encode('utf8', errors='ignore')
def __extract_languages__(self):
output = []
languages = self.mods_xml.findall(
"{{{0}}}language/{{{0}}}languageTerm".format(MODS_NS))
for language in languages:
if marc21_maps.LANGUAGE_CODING_MAP.has_key(language.text):
output.append(marc21_maps.LANGUAGE_CODING_MAP.get(language.text))
else:
output.append(language.text)
return output
def __extract_person__(self, name_parts):
"""Helper method takes a list of nameParts and extracts info
Parameter:
name_parts -- List of namePart elements
"""
person = dict()
# Assumes multiple nameParts use type attribute
# to parse out specifics
for row in name_parts:
name_type = row.attrib.get('type', None)
if row.text is None:
continue
name_value = row.text.strip()
if name_type == 'given':
person['schema:givenName'] = name_value
elif name_type == 'family':
person['schema:familyName'] = name_value
elif name_type == 'date':
person['rdf:dateOfBirth'] = name_value
elif name_type == 'termsOfAddress':
name_value = row.text
if HONORIFIC_PREFIXES.count(name_value) > 0:
person['schema:honorificPrefix'] = \
name_value
elif HONORIFIC_SUFFIXES.count(name_value) > 1:
person['honorificSuffix'] = name_value
# No type given tries parsing name and update
# person dict if key doesn't exist
else:
result = personal_name_parser(row.text)
for key, value in result.iteritems():
if not person.has_key(key):
# Filter for embedded editor values
if value.count('editor') > 0:
person['resourceRole:edt'] = value
else:
person[key] = value
# Create an rda:rda:preferredNameForThePerson if it doesn't
# exist
if person.has_key("rda:preferredNameForThePerson") is False:
person["rda:preferredNameForThePerson"] = "{1}, {0}".format(
person.get('schema:givenName', ''),
person.get('schema:familyName', ''))
if person.has_key('schema:honorificPrefix'):
person["rda:preferredNameForThePerson"] = "{0}. {1}".format(
person['schema:honorificPrefix'],
person["rda:preferredNameForThePerson"])
if person.has_key('honorificSuffix'):
person["rda:preferredNameForThePerson"] = "{0} {1}".format(
person["rda:preferredNameForThePerson"],
person['honorificSuffix'])
if person.has_key('rdf:dateOfBirth'):
person["rda:preferredNameForThePerson"] = "{0}, {1}-".format(
person["rda:preferredNameForThePerson"],
person['rdf:dateOfBirth'])
if person.has_key('rdf:dateOfDeath'):
person["rda:preferredNameForThePerson"] = "{0}{1}".format(
person["rda:preferredNameForThePerson"],
person['rdf:dateOfDeath'])
return person
def __extract_persons__(self):
"Helper function extracts all creators from MODS xml"
names = self.mods_xml.findall('{{{0}}}name'.format(MODS_NS))
for name in names:
person = None
name_parts = name.findall('{{{0}}}namePart'.format(MODS_NS))
if len(name_parts) < 1:
continue
# Checks role/roleTerm to see if role is a creator
role = name.find('{{{0}}}role/{{{0}}}roleTerm'.format(MODS_NS))
if role is None:
continue
person = self.__extract_person__(name_parts)
if len(person) < 1:
continue
result = get_or_generate_person(person,
self.redis_datastore)
if role.text == 'creator':
person_group = self.creators
elif role.text == 'contributor':
person_group = self.contributors
else:
# Add more roles as they are needed
continue
if type(result) == list:
for person in result:
person_group.append(person)
elif type(result) == Person:
person_group.append(result)
def __extract_hdl__(self):
location_urls = self.mods_xml.findall('{{{0}}}location/{{{0}}}url'.format(
MODS_NS))
for url in location_urls:
if url.text.startswith('http://hdl'):
return url.text
def __extract_title__(self):
"Helper function extracts title information from MODS"
title_entities = []
titleInfos = self.mods_xml.findall('{{{0}}}titleInfo'.format(MODS_NS))
for titleInfo in titleInfos:
output = {}
if titleInfo.attrib.get('type')is None:
# equalvant to MARC 245 $a
titleValue = titleInfo.find('{{{0}}}title'.format(MODS_NS))
if titleValue is not None and len(titleValue.text) > 0:
output['titleValue'] = titleValue.text
output['label'] = output['titleValue']
# equalvant to MARC 245 $b
subtitle = titleInfo.find('{{{0}}}subTitle'.format(MODS_NS))
if subtitle is not None and len(subtitle.text) > 0:
output['subtitle'] = subtitle.text
output['label'] = '{0}: {1}'.format(output.get('label'),
output['subtitle'])
# equalivant to MARC 245 $p
partTitle = titleInfo.find('{{{0}}}partName'.format(MODS_NS))
if partTitle is not None and len(partTitle.text) > 0:
output['partTitle'] = partTitle.text
if len(output) > 0:
title_entity = TitleEntity(redis_datastore=self.redis_datastore,
**output)
title_entity.save()
index_title(title_entity, self.redis_datastore)
title_entities.append(title_entity.redis_key)
return title_entities
def __ingest__(self):
"Helper function extracts info from MODS and ingests into RLSP"
if self.mods_xml is None:
raise MODSIngesterError("Ingest requires valid MODS XML")
self.contributors = []
self.creators = []
self.instances = []
work = dict()
self.__extract_persons__()
if self.creators is not None:
work['rda:isCreatedBy'] = set([creator.redis_key
for creator in self.creators])
work['associatedAgents'] = work['rda:isCreatedBy']
if self.contributors is not None:
work['rda:contributor'] = [contributor.redis_key
for contributor in self.contributors]
if work.has_key('associatedAgents'):
for redis_key in work['rda:contributor']:
work['associatedAgents'].add(redis_key)
else:
work['associatedAgents'] = set(work['rda:contributor'])
try:
title_entities = self.__extract_title__()
if len(title_entities) == 1:
work['title'] = title_entities[0]
else:
work['title'] = ' '.join(title_entities)
except ValueError:
return
description = self.__extract_description__()
if description is not None:
work['description'] = description
self.__classify_work_class__()
classifier = self.classifier(entity_info=work,
redis_datastore=self.redis_datastore,
work_class=self.work_class)
classifier.classify()
if classifier.creative_work is not None:
classifier.creative_work.save()
work_key = classifier.creative_work.redis_key
# Adds work_key to title entity relatedResources set
self.redis_datastore.sadd(
"{0}:relatedResources".format(
classifier.creative_work.title),
work_key)
for creator_key in work['rda:isCreatedBy']:
self.redis_datastore.sadd(
'{0}:resourceRole:aut'.format(creator_key),
work_key)
self.__create_instances__(work_key, 'online resource')
if len(self.instances) == 1:
self.redis_datastore.hset(work_key,
'hasInstance',
self.instances[0])
elif len(self.instances) > 1:
for instance_key in self.instances:
self.redis_datastore.sadd(
"{0}:hasInstance".format(work_key),
instance_key)
def ingest_file(self, mods_filepath):
"""Method ingests a MODS XML file into RLSP
Parameters:
mods_filepath -- File and path to MODS XML
"""
mods_file = open(mods_filepath, 'rb')
self.mods_xml = etree.XML(mods_file.read())
mods_file.close()
self.__ingest__()
def ingest_url(self, url):
"""Method ingests a MODS XML URL into RLSP
Parameters:
url -- URL to MODS XML
"""
self.mods_xml = etree.XML(urllib2.urlopen(url).read())
self.__ingest__()
```
#### File: aristotle-library-apps/call_number/redis_helpers.py
```python
__author__ = "<NAME>"
import pymarc,redis,re
import logging,sys
from app_settings import APP,SEED_RECORD_ID
import aristotle.settings as settings
from aristotle.settings import REDIS_DATASTORE
english_alphabet = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P',
'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X',
'Y', 'Z']
lccn_first_cutter_re = re.compile(r"^(\D+)(\d+)")
#lc_regex = re.compile(r"^(?P<leading>[A-Z]{1,3})(?P<number>\d{1,4}.?\w{0,1}\d*)\s*(?P<decimal>[.|\w]*\d*)\s*(?P<cutter1alpha>\w*)\s*(?P<last>\d*)")
lc_regex = re.compile(r"^(?P<leading>[A-Z]{1,3})(?P<number>\d{1,4}.?\d{0,1}\d*)\s*(?P<cutter1>[.|\w]*\d*)\s*(?P<cutter2>\w*)\s*(?P<last>\d*)")
def generate_call_number_app(instance,
redis_datastore):
"""
Helper function takes a BIBFRAME Instance, extracts any call-numbers from the
any associated Library Holdings, and creates supporting
Redis datastructures to support the call number app
Parameters:
redis_datastore -- Redis Instance or Redis Cluster
"""
has_annotations_key = "{0}:hasAnnotation".format(instance.redis_key)
annotations = redis_datastore.smembers(has_annotations_key)
for annotation_key in annotations:
if annotation_key.startswith('bf:Holding'):
if redis_datastore.hexists(annotation_key,'callno-lcc'):
callno_lcc = redis_datastore.hget(annotation_key,
'callno-lcc')
redis_datastore.hset('lcc-hash',
callno_lcc,
annotation_key)
normalized_call_number = lcc_normalize(callno_lcc)
redis_datastore.hset('lcc-normalized-hash',
normalized_call_number,
annotation_key)
redis_datastore.zadd('lcc-sort-set',
0,
normalized_call_number)
if redis_datastore.hexists(annotation_key,'callno-govdoc'):
callno_govdoc = redis_datastore.hget(annotation_key,
'callno-govdoc')
redis_datastore.hset('govdoc-hash',
callno_govdoc,
annotation_key)
redis_datastore.zadd('govdoc-sort-set',
0,
callno_govdoc)
if redis_datastore.hexists(annotation_key,'callno-local'):
callno_local = redis_datastore.hget(annotation_key,
'callno-local')
redis_datastore.hset('local-hash',
callno_local,
annotation_key)
redis_datastore.zadd('local-sort-set',
0,
callno_local)
for name in ['isbn','issn','lccn']:
if hasattr(instance,name) and getattr(instance,name) is not None:
id_value = getattr(instance, name)
if type(id_value) == set:
id_value = list(id_value)
elif type(id_value) == str:
id_value = [id_value, ]
for value in id_value:
redis_datastore.hset('{0}-hash'.format(name),
value,
instance.redis_key)
redis_datastore.zadd('{0}-sort-set'.format(name),
0,
value)
def get_all(call_number,slice_size=10):
"""
Function returns a list of call numbers with the param centered between
the slice_size
:param call_number: Call Number as stored in call-number-sort-set
:param slice_size: Slice size, default is 10
:rtype list: List of call numbers
"""
lccn_rank = redis_datastore.zrank('lccn-sort-set',call_number)
if lccn_rank is not None:
return redis_datastore.zrange('lccn-sort-set',
lccn_rank-slice_size,
lccn_rank+slice_size)
sudoc_rank = redis_datastore.zrank('sudoc-sort-set',call_number)
if sudoc_rank is not None:
return redis_datastore.zrange('sudoc-sort-set',
sudoc_rank-slice_size,
sudo_rank+slice_size)
local_rank = redis_datastore.zrank('local-sort-set',call_number)
if local_rank is not None:
return redis_datastore.zrange('local-sort-set',
local_rank-slice_size,
local_rank+slice_size)
def get_previous(call_number,
call_number_type='lccn'):
"""
Function returns a list of two records that preceed the current
param call_number using the get_slice method.
:param call_number: Call Number String
:param call_number_type: Type of call number (lccn, sudoc, or local)
:rtype list: List of two records
"""
current_rank = get_rank(call_number,
call_number_type=call_number_type)
if current_rank is None:
return None
return get_slice(current_rank-2,
current_rank-1,
call_number_type)
def get_next(call_number,
call_number_type='lcc'):
"""
Function returns a list of two records that follow the current
param call_number using the get_slice method.
:param call_number: Call Number String
:param call_number_type: Type of call number (lccn, sudoc, or local)
:rtype list: List of two records
"""
current_rank = get_rank(call_number,
call_number_type=call_number_type)
if current_rank is None:
return None
return get_slice(current_rank+1,
current_rank+2,
call_number_type)
def get_rank(call_number,
call_number_type='lcc'):
"""
Function takes a call_number, iterates through Redis datastore hash values
for lccn, sudoc, and local, and if call_number is present returns the
rank from the sorted set.
:param call_number: Call Number String
:param call_number_type: Type of call number (lcc, sudoc, local, )
:rtype integer or None:
"""
current_rank = -1
hash_key = "{0}-hash".format(call_number_type)
sort_set_key = '{0}-sort-set'.format(call_number_type)
if redis_datastore.exists(hash_key):
# Currently we are only creating normalized values for LCC call number
if call_number_type == 'lcc':
normalized_call_number = lcc_normalize(call_number)
current_rank = redis_datastore.zrank(sort_set_key,
normalized_call_number)
else:
current_rank = redis_datastore.zrank(sort_set_key,
call_number)
elif redis_datastore.exists(hash_key):
current_rank = redis_datastore.zrank(sort_set_key,
call_number)
return current_rank
def get_slice(start,stop,
call_number_type='lcc'):
"""
Function gets a list of entities saved as Redis records
:param start: Beginning of slice of sorted call number
:param stop: End of slice of sorted call numbers
:param call_number_type: Type of call number (lccn, sudoc, or local), defaults
to lcc.
:rtype: List of entities saved as Redis records
"""
entities = []
hash_key = '{0}-hash'.format(call_number_type)
sort_set_key = '{0}-sort-set'.format(call_number_type)
if redis_datastore.exists(sort_set_key):
record_slice = redis_datastore.zrange(sort_set_key,
start,
stop)
elif redis_datastore.exists(sort_set_key):
record_slice = redis_datastore.zrange(sort_set_key,
start,
stop)
else:
raise ValueError("get_slice error, {0} not in Annotation or Instance Redis instances".format(sort_set_key))
for number in record_slice:
if call_number_type == 'lcc':
annotation_key = redis_datastore.hget('lcc-normalized-hash',
number)
entity_key = redis_datastore.hget(annotation_key,'annotates')
call_number = redis_datastore.hget(annotation_key,'callno-lcc')
elif redis_datastore.exists(hash_key):
annotation_key = redis_datastore.hget(hash_key,number)
entity_key = redis_datastore.hget(annotation_key,'annotates')
call_number = redis_datastore.hget(annotation_key,"callno-{0}".format(call_number_type))
elif redis_datastore.exists(hash_key):
entity_key = redis_datastore.hget(hash_key, number)
call_number = redis_datastore.hget(entity_key,call_number_type)
record = get_record(call_number=call_number,
instance_key=entity_key)
entities.append(record)
return entities
def get_record(**kwargs):
record_info = {'call_number':kwargs.get('call_number')}
if kwargs.has_key('work_key'):
record_info['work_key'] = kwargs.get('work_key')
elif kwargs.has_key('instance_key'):
instance_key = kwargs.get('instance_key')
record_info['work_key'] = redis_datastore.hget(instance_key, 'instanceOf')
else:
# Try searching for call_number in Instance datastores
for name in ['isbn','issn']:
if redis_datastore.hexists("{0}-hash".format(name),
record_info['call_number']):
instance_key = redis_datastore.hget("{0}-hash".format(name),
record_info['call_number'])
record_info['work_key'] = redis_datastore.hget(instance_key, 'instanceOf')
record_info['type_of'] = name
break
# Trys searching for call_number in Annotation datastore
if 'work_key' not in record_info:
for name in ['lcc','govdoc','local']:
hash_key = "{0}-hash".format(name)
if redis_datastore.hexists(hash_key,
record_info['call_number']):
record_info['type_of'] = name
holding_key = redis_datastore.hget(hash_key,
record_info['call_number'])
instance_key = redis_datastore.hget(holding_key,
"annotates")
record_info['work_key'] = redis_datastore.hget(instance_key,
'instanceOf')
break
record_info['title'] = redis_datastore.hget("{0}:title".format(record_info['work_key']),
'rda:preferredTitleForTheWork')
record_info['title'] = unicode(record_info['title'], encoding="utf-8", errors="ignore")
if redis_datastore.exists('{0}:rda:isCreatedBy'.format(record_info['work_key'])):
creator_keys = list(redis_datastore.smembers('{0}:rda:isCreatedBy'.format(record_info['work_key'])))
elif redis_datastore.hexists(record_info['work_key'],'rda:isCreatedBy'):
creator_keys = [redis_datastore.hget(record_info['work_key'],
'rda:isCreatedBy'),]
else:
creator_keys = []
if len(creator_keys) > 0:
creator = redis_datastore.hget(creator_keys[0],"rda:preferredNameForThePerson")
if len(creator_keys) > 1:
creator += ' et.al.'
record_info['authors'] = unicode(creator,encoding="utf-8",errors='ignore')
return record_info
def lcc_normalize(raw_callnumber):
"""
Function based on Bill Dueber algorithm at
<http://code.google.com/p/library-callnumber-lc/wiki/Home>
"""
callnumber_regex = lc_regex.search(raw_callnumber)
output = None
if callnumber_regex is not None:
callnumber_result = callnumber_regex.groupdict()
output = '%s ' % callnumber_result.get('leading')
number = callnumber_result.get('number')
number_lst = number.split(".")
output += '{:>04}'.format(number_lst[0])
if len(number_lst) == 2:
output += '{:<02}'.format(number_lst[1])
cutter1 = callnumber_result.get('cutter1')
if len(cutter1) > 0:
cutter1 = cutter1.replace('.','')
output += '{:<04}'.format(cutter1)
cutter2 = callnumber_result.get('cutter2')
if len(cutter2) > 0:
cutter2 = cutter2.replace('.','')
output += '{:<04}'.format(cutter2)
return output
def lccn_set(identifiers_key,
call_number,
redis_datastore,
redis_key):
"""
Sets hash and sorted set for normalized and raw call numbers for
LCCN call numbers
:param identifiers_key: Key to the RDA Records rdaIdentifiersForTheExpression
:param call_number: LCCN Call number
:param redis_datastore: Redis Server
:param redis_key: Redis key
"""
redis_datastore.hset(identifiers_key,
'lccn',
call_number)
normalized_call_number = lcc_normalize(call_number)
redis_datastore.hset(identifiers_key,
'lccn-normalized',
normalized_call_number)
redis_datastore.hset('lccn-hash',
call_number,
redis_key)
redis_datastore.hset('lccn-normalized-hash',
normalized_call_number,
redis_key)
redis_datastore.zadd('lccn-sort-set',
0,
normalized_call_number)
```
#### File: aristotle-library-apps/call_number/tests.py
```python
__author__ = "<NAME>"
from django.test import TestCase
from django.test.client import Client
from redis_helpers import lccn_normalize
web_client = Client()
class WidgetTest(TestCase):
def test_view(self):
widget_response = web_client.get('/apps/call_number/widget')
self.assertEquals(widget_response.status_code,
200)
class LCCNNormalizeTest(TestCase):
def test_normalization(self):
self.assertEquals('A 0001',
lccn_normalize('A1'))
self.assertEquals('B 002230',
lccn_normalize('B22.3'))
self.assertEquals('C 000100D110',
lccn_normalize('C1.D11'))
self.assertEquals('D 001540D220 000 000 1990',
lccn_normalize('D15.4 .D22 1990'))
```
#### File: jermnelson/aristotle-library-apps/fabfile.py
```python
__author__ = "<NAME>"
import os, sys
from fabric.api import local
def ingest_shards_bibframe(location="."):
"""
Function takes a location to shards of MARC21 files and ingests into a
the BIBFRAME Redis datastores.
:param location: Location, defaults to current location
"""
shard_walker = next(os.walk(location))[2]
for filename in shard_walker:
file_ext = os.path.splitext(filename)[1]
if file_ext == '.marc' or file_ext == '.mrc':
local('python manage.py ingest_marc {0}'.format(
os.path.join(location,filename)))
def test_all():
local("./manage.py test")
def prepare_deploy():
test_all()
```
#### File: aristotle-library-apps/fedora_utilities/forms.py
```python
__author__ = "<NAME>"
import datetime
from django import forms
from fedora_utilities.models import *
from eulfedora.server import Repository
from eulfedora.util import RequestFailed
repository = Repository()
DIGITAL_ORIGIN = [(1, 'born digital'),
(2, 'reformatted digital'),
(3, 'digitized microfilm'),
(4, 'digitized other analog')]
GENRE = [('choose', 'Choose...')]
INSTITUTION_NAME = 'Colorado College'
MARC_FREQUENCY = [('choose', 'Choose...'),
('Semiweekly', 'Semiweekly - 2 times a week'),
('Three times a week', 'Three times a week'),
('Weekly', 'Weekly'),
('Biweekly', 'Biweekly - every 2 weeks'),
('Three times a month', 'Three times a month'),
('Semimonthly', 'Semimonthly - 2 times a month'),
('Monthly', 'Monthly'),
('Bimonthly', 'Bimonthly - every 2 months'),
('Quarterly', 'Quarterly'),
('Three times a year', 'Three times a year'),
('Semiannual', 'Semiannual - 2 times a year'),
('Annual', 'Annual'),
('Biennial', 'Biennial - every 2 years'),
('Triennial', 'Triennial - every 3 years'),
('Completely irregular', 'Completely irregular')]
OBJECT_TEMPLATES = [(0, 'Choose model'),
(1, 'Meeting Minutes'),
(2, 'Newsletter'),
(3, 'Podcast'),
(4, 'Video'),
(5, 'Master (All fields)')]
RIGHTS_STATEMENT = "Copyright restrictions apply. Contact Colorado College for permission to publish."
PLACE = 'Colorado Springs (Colo.)'
PUBLISHER = "Colorado College"
PUBLICATION_PLACE = 'Colorado Springs, Colorado'
class AddFedoraObjectFromTemplate(forms.Form):
admin_note = forms.CharField(label='Administrative Notes',
max_length=1500,
required=False,
widget=forms.Textarea(
attrs={'rows':5,
'class': 'form-control'}))
alt_title = forms.CharField(label='Alternative Title',
required=False,
widget=forms.TextInput(
attrs={'class': 'form-control'}))
collection_pid = forms.CharField(max_length=20,
label="PID of Parent Collection",
widget=forms.TextInput(
attrs={'class': 'form-control'}))
contributors = forms.CharField(required=False,
widget=forms.TextInput(
attrs={'class': 'form-control'}))
corporate_contributors = forms.CharField(
required=False,
widget=forms.TextInput(
attrs={'class': 'form-control'}))
corporate_creators = forms.CharField(
required=False,
widget=forms.TextInput(
attrs={'class': 'form-control'}))
creators = forms.CharField(required=False,
widget=forms.TextInput(
attrs={'class': 'form-control'}))
date_created = forms.CharField(label='Date Created',
required=False,
widget=forms.TextInput(
attrs={'class': 'form-control'}))
digital_origin = forms.ChoiceField(choices=DIGITAL_ORIGIN,
label='Digital Origin',
initial=1,
widget=forms.Select(
attrs={
'class': 'form-control'}))
description = forms.CharField(label='Description',
max_length=1500,
widget=forms.Textarea(
attrs={'class': 'form-control',
'rows':5}),
required=False)
extent = forms.CharField(label='Extent',
max_length=1500,
widget=forms.Textarea(
attrs={'rows':5,
'class': 'form-control',
'data-bind': 'value: extentValue'}),
required=False)
form = forms.CharField(label='Form',
required=False,
widget=forms.TextInput(
attrs={
'class': 'form-control',
'data-bind': 'value: formValue'}))
frequency_free_form = forms.CharField(label='Other',
required=False,
widget=forms.TextInput(
attrs={'class': 'form-control'}))
frequency = forms.ChoiceField(choices=MARC_FREQUENCY,
label='Frequency',
required=False,
widget=forms.Select(
attrs={'class': 'form-control'}))
genre = forms.ChoiceField(
label='Genre',
required=False,
widget=forms.Select(
attrs={'data-bind': "options: genreOptions, optionsText: 'name', optionsValue: 'value'",
'class': 'form-control'}))
genre_free_form = forms.CharField(label='Other',
required=False,
widget=forms.TextInput(
attrs={'class': 'form-control'}))
number_objects = forms.CharField(initial=1,
label='Number of stub records',
max_length=5,
widget=forms.TextInput(
attrs={'class': 'form-control'}))
object_template = forms.ChoiceField(label='Content Model Template',
choices=OBJECT_TEMPLATES,
widget=forms.Select(
attrs={
'class': 'form-control',
'data-bind':'value: chosenContentModel, click: displayContentModel'}))
organizations = forms.CharField(max_length=255,
required=False,
initial=INSTITUTION_NAME,
widget=forms.TextInput(
attrs={'class': 'form-control'}))
rights_holder = forms.CharField(max_length=255,
label='Rights Statement',
initial=RIGHTS_STATEMENT,
widget=forms.Textarea(
attrs={'rows': 3,
'class': 'form-control'}))
subject_dates = forms.CharField(label='Subject -- Dates',
required=False,
widget=forms.TextInput(
{'class': 'form-control'}))
subject_people = forms.CharField(label='Subject -- People',
required=False,
widget=forms.TextInput(
{'class': 'form-control'}))
subject_places = forms.CharField(label='Subject -- Places',
required=False,
initial=PLACE,
widget=forms.TextInput(
{'class': 'form-control'}))
subject_topics = forms.CharField(
label='Subject -- Topic',
required=False,
widget=forms.TextInput(
attrs={'data-bind': 'value: topicOne',
'class': 'form-control'}))
title = forms.CharField(max_length=120,
label='Title',
widget=forms.TextInput(
attrs={'class': 'form-control'}))
type_of_resource = forms.CharField(
label='Type of Resource',
required=False,
widget=forms.TextInput(
attrs={'data-bind': 'value: typeOfResource',
'class': 'form-control'}))
def clean(self):
if self._errors.has_key('genre'):
del self._errors['genre']
return self.cleaned_data
class BatchIngestForm(forms.Form):
collection_pid = forms.CharField(max_length=20)
compressed_file = forms.FileField(label="A .tar or .zip file",
required=False)
## target_directory = forms.FileField(label="Select Directory to upload",
## required=False,
## widget=forms.ClearableFileInput(attrs={"webkitdirectory":"",
## "directory":"",
## "mozdirectory":""}))
class BatchModifyMetadataForm(forms.ModelForm):
class Meta:
model = BatchModifyMetadataLog
exclude = ('created_on')
class ObjectMovementForm(forms.Form):
"""
`MoverForm` allows a user to input a Fedora Commons Repository PID and
a new parent collection PID for moving the object.
"""
collection_pid = forms.CharField(max_length=20,
label="PID of target collection",
help_text='PID of target collection')
source_pid = forms.CharField(max_length=20,
label="PID of source PID",
help_text='PID of source Fedora Object')
def clean_collection_pid(self):
"""
Custom clean method for :class:`MoverForm.collection_pid` checks to see
if PID exists in Repository, raise :mod:`forms.ValidationError` if PID
not present.
"""
data = self.cleaned_data['collection_pid']
if data is not None:
try:
collection_object = repository.api.getObjectHistory(pid=data)
except RequestFailed:
raise forms.ValidationError("Collection PID %s not found in repository" % data)
return data
```
#### File: aristotle-library-apps/fedora_utilities/solr_helpers.py
```python
__author__= "<NAME>, <NAME>, <NAME>"
import xml.etree.ElementTree as etree
import aristotle.settings as settings
import sunburnt
from app_helpers import repository
from multiprocessing import Process, Queue
print("AFTER IMPORT")
SOLR_QUEUE = Queue(maxsize=5)
MODS_NS = 'http://www.loc.gov/mods/v3'
solr_server = sunburnt.SolrInterface(settings.SOLR_URL)
FIELDNAMES = [
'access', # Should have a constant value of "Online"
'author', #namePart
'bib_num', # Pid
'contents', # Should be all of the text of a transcription (if present)
'format', #! Incorrect this should not be a subject
'full_title', #title
'id', #system generated, should be the PID
'location', #! Incorrect, this should be a constant of dacc
'notes', #! Incorrect, only include public access notes (not record notes), abstract
'personal_name', #namePart
'summary', # abstract
'title', # title
'topic', #subject
'url', # Should be the URL in the location
]
def get_title(mods):
"""
Function takes the objects MODS and extracts and returns the text of the title.
"""
title = mods.find("{{{0}}}titleInfo/{{{0}}}title".format(MODS_NS))
if title is not None:
return title.text
def get_topics(mods):
"""
Function takes the objects MODS and returns the text of the topics.
"""
output = []
topics = mods.findall("{{{0}}}subject/{{{0}}}topic".format(MODS_NS))
for topic in topics:
output.append(topic.text)
return output
def get_creators(mods):
"""
Function takes the object's MODS and extracts and returns the text of the
author or creator.
:param mods: Etree XML of MODS datastream
:rtype: List of creator names
"""
output = []
all_names = mods.findall("{{{0}}}name".format(MODS_NS))
for name in all_names:
roleTerm = name.find("{{{0}}}role/{{{0}}}roleTerm".format(MODS_NS))
if roleTerm.text == 'creator':
namePart = name.find("{{{0}}}namePart".format(MODS_NS))
output.append(namePart.text)
return output
def get_description(mods):
"""
Extracts a description from various MODS elements
:param mods: Etree XML of MODS datastream
:rtype: A list of description strings
"""
output = []
physical_desc = mods.find("{{{0}}}physicalDescription".format(MODS_NS))
if physical_desc is not None:
extent = physical_desc.find("{{{0}}}extent".format(MODS_NS))
if extent is not None:
output.append(extent.text)
origin = physical_desc.find("{{{0}}}digitalOrigin".format(MODS_NS))
if origin is not None:
output.append(origin.text)
return output
def get_format(mods):
"""
Extracts format from the genre field
:param mods: Etree XML of MODS datastream
"""
genre = mods.find("{{{0}}}genre".format(MODS_NS))
if genre is not None:
return genre.text
def get_mods(pid):
"""
Function attempts to extract the MODS datastream from the digital
repository
:param pid: PID of the object
:rtype: Etree of the MODS datastream
"""
# Save results of attempting to retrieve the MODS datstream from the
# repository
mods_result = repository.api.getDatastreamDissemination(pid=pid,
dsID="MODS")
# Gets the raw XML from the result
mods_xml = mods_result[0]
# Returns the etree MODS xml object from the raw XML
return etree.XML(mods_xml)
def get_notes(mods):
"""
Function extracts all notes fields from MODS
:param mods: Etree of the MODS datastream
"""
notes = []
all_notes = mods.find("{{{0}}}note".format(MODS_NS))
if all_notes is None:
return notes
for note in all_notes:
displayLabel = note.attribt.get('displayLabel')
if displayLabel is not None:
text = "{0} {1}".format(displayLabel, note.text)
else:
text = note.text
notes.append(text)
return notes
def get_publisher(mods):
"""
Function extracts publisher from MODS
:param mods: Etree of the MODS datastream
"""
publisher = mods.find("{{{0}}}originInfo/{{0}}publisher".format(MODS_NS))
if publisher is not None:
return publisher.text
def get_published_year(mods):
"""
Function extracts publisher from MODS
:param mods: Etree of the MODS datastream
"""
dateCreated = mods.find("{{{0}}}originInfo/{{0}}dateCreated".format(MODS_NS))
if dateCreated is not None:
return dateCreated.text
def get_summary(mods):
"""
Function extracts abstract from MODS and returns text.
"""
summary = mods.find("{{{0}}}abstract".format(MODS_NS))
if summary is not None:
return summary.text
def get_text(solr_doc,mods):
"""
Function adds most of MODS record into general text field for
searching
:param solr_doc: Solr document dictionary
:param mods: Etree of the MODS datastream
"""
output = []
for key,value in solr_doc.iteritems():
if ['access','bib_num','id'].count(key) < 1:
output.append(value)
return output
def get_url(mods):
"""
Function extracts URL location from MODS and returns text.
"""
url = mods.find("{{{0}}}location/{{{0}}}url".format(MODS_NS))
if url is not None:
return url.text
def index_collection(collection_pid='coccc:top',recursive=True):
"""
Method indexes all child elements in a Fedora Collection, if
recursive is True, any collections in the children will call
index_collection function for that child pid.A
:param collection_pid: Collection of PID, default is top-level collection
object for the repository
:param recursive: Boolean, if True will call the index_collection on any
subcollections in the collection
"""
get_collection_sparql = '''PREFIX fedora: <info:fedora/fedora-system:def/relations-external#>
SELECT ?a
FROM <#ri>
WHERE
{
?a fedora:isMemberOfCollection <info:fedora/%s>
}
''' % collection_pid
csv_reader = repository.risearch.sparql_query(get_collection_sparql)
for row in csv_reader:
result = row.get('a')
pid = result.split("/")[1]
relationship = etree.XML(repository.api.getRelationship(pid)[0])
index_digital_object(pid=pid)
def index_digital_object(**kwargs):
pid = kwargs.get('pid')
mods = get_mods(pid)
if kwargs.has_key('format'):
formatOf = kwargs.get('format')
else:
formatOf = get_format(mods)
if formatOf is None:
formatOf = 'Unknown'
else:
formatOf
solr_doc = {'access':'Online',
'bib_num':pid,
'format':formatOf.title(),
'location':'Digital Archives of Colorado College (DACC)',
'id':pid}
solr_doc['author'] = get_creators(mods)
solr_doc['description'] = get_description(mods)
solr_doc['title'] = get_title(mods)
solr_doc['full_title'] = solr_doc['title']
solr_doc['topic'] = get_topics(mods)
solr_doc['summary'] = get_summary(mods)
solr_doc['notes'] = get_notes(mods)
solr_doc['personal_name'] = solr_doc['author']
solr_doc['publisher'] = get_publisher(mods)
solr_doc['pubyear'] = get_published_year(mods)
solr_doc['text'] = get_text(solr_doc,mods)
solr_doc['url'] = get_url(mods)
print("Adding {0} with format {1} to Solr index".format(solr_doc['id'],
solr_doc['format']))
solr_server.add(solr_doc)
solr_server.commit()
def index_manuscript(pid):
"""
Function takes PID, extracts MODS, creates Solr document and attempts to ingest into Solr.
"""
index_digital_object(pid=pid,format='Manuscript')
def index_process(dig_obj,queue):
"""
Function adds result of indexing fedora digital object into
Solr index.
:param dig_obj: Digital Object
"""
print("In index_process")
index_digital_object(pid=dig_obj.pid)
queue.put("Indexed {0} with PID={1} into Solr Index".format(dig_obj.label,dig_obj.pid))
def start_indexing(pid_prefix='coccc'):
"""
Function starts Solr indexing queue for all objects in
the repository.
:param pid_prefix: PID prefix to search, defaults to CC
"""
query = "{0}*".format(pid_prefix)
print("Before get pid generator {0}".format(query))
all_pids_generator = repository.find_objects(query = "{0}*".format(pid_prefix))
print("after get pid generator {0}".format(all_pids_generator))
while 1:
try:
print("Before extracting next digital object")
digital_object = next(all_pids_generator)
print("Digital object PID={0}".format(digital_object.pid))
process = Process(target=index_process, args=(digital_object,SOLR_QUEUE))
process.start()
#process.join()
except:
break
```
#### File: aristotle-library-apps/hours/redis_helpers.py
```python
__author__ = '<NAME>, <NAME>'
import datetime,redis,copy
import aristotle.settings
redis_ds = aristotle.settings.REDIS_DATASTORE
library_key_format = 'library-hours:%Y-%m-%d'
time_format = '%H:%M'
def calculate_offset(at_time):
"""
Helper function takes a datetime and calculates the offset assuming
a 96 bit string for every quarter hour of a 24 hour day.
:param at_time: Datetime for calculating the offset
:rtype: int
"""
offset = at_time.hour * 4
#if offset is 0:
# offset = 1
minute = at_time.minute
if minute < 15:
offset += 1
if minute > 14 and minute < 30:
offset += 2
elif minute > 29 and minute < 45:
offset += 3
elif minute > 44:
offset += 4
return offset
def calculate_time(offset,start=None,end=None):
"""
Helper function takes an offset between 1-96 and returns the
time
:param offset: Int between 1-96
"""
minute_lookup = {-1:45,0:0,1:15,2:30}
hour = offset/4
return datetime.time(hour,
minute_lookup[offset%4 -1])
def old_calculate_time(offset,
start=True,
end=False):
"""
Helper function takes an offset between 1-96 and returns the
time based on start and end parameters.
:param offset: Int between 1-96
:param start: Boolean, default to True
:param end: Boolean, default to False
:rtype: Time
"""
start_time = {0:0,1:15,2:30,3:45}
end_time = {0:0,1:30,2:45,3:0}
hour = offset/4
remainder = offset%4
#print("{0} {1} {2}".format(offset,hour,remainder))
if offset < 4:
hour = offset
if start is True and end is True:
return (datetime.time(hour,
start_time.get(remainder)),
datetime.time(hour,
end_time.get(remainder)))
elif start is True and end is False:
## if remainder > 2:
## return datetime.time(hour+1,
## start)
## else:
return datetime.time(hour,
start_time.get(remainder))
elif start is False and end is True:
return datetime.time(hour,
end_time.get(remainder))
return datetime.time(hour,minute)
def add_library_hours(open_on,
close_on,
redis_ds=redis_ds):
"""
Function takes an open and close dates, iterates through each quarter
hour between the two datetimes, setting those quarter hours with a
1 bit with a 0 bit being set for times outside the this interval.
dates involved.
:param open_on: Datetime of opening time
:param close_on: Datetime of closing time
:param redis_ds: Redis datastore, defaults to module redis_ds
"""
library_key = open_on.strftime(library_key_format)
if open_on.day != close_on.day:
raise ValueError("Add library hours requires open_on and close_on to equal")
start_offset = calculate_offset(open_on)
end_offset = calculate_offset(close_on)
# Each 24 hours has 96 bits that can be set for each quarter hour
# in that time-span.
for counter in range(1,97):
bit_value = 0
if counter >= start_offset and counter < end_offset:
bit_value = 1
if end_offset < start_offset:
if counter >= end_offset and counter < start_offset:
bit_value = 0
else:
bit_value = 1
redis_ds.setbit(library_key,counter,bit_value)
#if counter > 94:
#print("{0}: {1}".format(counter,redis_ds.getbit(library_key,counter)))
def get_closing_time(question_date, redis_ds=redis_ds):
date_key = question_date.strftime(library_key_format)
offset = calculate_offset(question_date)
print("{0} {1}".format(date_key,redis_ds.getbit(date_key, 96)))
if bool(redis_ds.getbit(date_key, 96)):
next_day = datetime.datetime(question_date.year,
question_date.month,
question_date.day + 1)
next_day_key = next_day.strftime(library_key_format)
for counter in range(1,97):
if not bool(redis_ds.getbit(next_day_key,counter)):
return calculate_time(counter,False,True)
else:
for counter in range(offset,96):
if not bool(redis_ds.getbit(date_key,counter)):
return calculate_time(counter,False,True)
def is_library_open(question_date=datetime.datetime.today(),
redis_ds=redis_ds):
"""
Function checks datastore for open and closing times, returns
True if library is open.
:param question_date: Datetime object to check datastore, default
is the current datetime stamp.
:param redis_ds: Redis datastore, defaults to module redis_ds
:rtype: Boolean True or False
"""
offset = calculate_offset(question_date)
status_key = question_date.strftime(library_key_format)
return bool(int(redis_ds.getbit(status_key,offset)))
```
#### File: aristotle-library-apps/keyword_search/views.py
```python
__author__ = "<NAME>"
from aristotle.views import json_view
from keyword_search import whoosh_helpers
@json_view
def search(request):
"Searches Whoosh index with query terms"
return {}
```
#### File: marc_batch/jobs/asp_wsm.py
```python
__author__ = "<NAME>"
from asp_base import AlexanderStreetPressBase
class AlexanderStreetPressWomenSocialMovements(AlexanderStreetPressBase):
"""
The `AlexanderStreetPressWomenSocialMovements` reads MARC records from
Alexander Street Press Women and Social Movements database.
"""
def __init__(self,**kwargs):
"""
Creates instance of `WomenSocialMovementsBot`
Parameters:
- `marc_file`: MARC file
"""
kwargs['asp_code'] = 'aspw'
AlexanderStreetPressBase.__init__(self, marc_file, **kwargs)
def processRecord(self,
marc_record):
"""
Method processes a single marc_record for Women and Social Movements database.
Parameters:
- `marc_record`: MARC record
"""
if not self.resolved_baseurl:
self.getResolvedURL(marc_record)
marc_record = self.validate007(marc_record)
marc_record = self.validate245(marc_record)
marc_record = self.remove440(marc_record)
marc_record = self.remove490(marc_record)
marc_record = self.validate506(marc_record)
marc_record = self.validate533(marc_record)
marc_record = self.validateURLs(marc_record,
"0-asp6new.alexanderstreet.com.tiger.coloradocollege.edu")
marc_record = self.validate710(marc_record)
marc_record = self.validate730(marc_record,
"Women and social movements in the United States 1600-2000: Scholar's edition.")
marc_record = self.remove830(marc_record)
return marc_record
def validate001(self,marc_record):
"""
Method follows Prospector best practices for 001 MARC
field.
Parameters:
- `marc_record`: MARC record, required
"""
field001 = marc_record.get_fields('001')[0]
marc_record.remove_field(field001)
return marc_record
```
#### File: marc_batch/jobs/eai_evans.py
```python
__author__ = "<NAME>"
from eai import EarlyAmericanImprintsJob
class EarlyAmericanImprintsEvansJob(EarlyAmericanImprintsJob):
def __init__(self,marc_file,**kwargs):
"""
Creates instance of `EarlyAmericanImprintsEvansJob`
"""
kwargs['field500_stmt'] = 'Evans digital edition'
kwargs['field730_series'] = 'First series'
super(EarlyAmericanImprintsEvansJob,self).__init__(marc_file,**kwargs)
```
#### File: marc_batch/jobs/oho_p.py
```python
__author__ = "<NAME>"
from op_base import OxfordHandbooksJob
class OxfordHandbooksOnlinePhilosophy(OxfordHandbooksJob):
"""
Class reads Oxford Handbooks Online Philosophy MARC21 file and
modifies per CC's requirements
"""
def __init__(self,
**kwargs):
"""
Initializes an `OxfordHandbooksOnlinePhilosophy` class
"""
OxfordHandbooksJob.__init__(self,
marc_file,
**kwargs)
```
#### File: marc_batch/jobs/ybp_ebrary.py
```python
from marc_batch.marc_helpers import MARCModifier
from pymarc import Field
import re,logging,copy,codecs,datetime
VOL_RE = re.compile(r"(.*)(vol)[.|\s]*(.*)")
NO_RE = re.compile(r"(.*)([n|N]o)[.|\s]*(.*)")
BD_RE = re.compile(r"(.*)(Ba*n*d)[.|\s]*(.*)")
class ybp_ebrary(MARCModifier):
"""
:class:`ybp_ebrary` class takes a YBP EBL DDA MARC record
file and modifies for import into an ILS
"""
def __init__(self,marc_file):
"""
Initializes `ybp_ebl`
:param marc_file: File location of MARC records
"""
MARCModifier.__init__(self,marc_file)
def processRecord(self,
marc_record):
"""
Processes a single MARC record
:param marc_record: Single MARC record
"""
marc_record = self.validateLeader(marc_record)
marc_record = self.validate001(marc_record)
marc_record = self.validate006(marc_record)
marc_record = self.validate007s(marc_record)
marc_record = self.validate008(marc_record)
marc_record = self.validate040(marc_record)
marc_record = self.validate050s(marc_record)
marc_record = self.validate082(marc_record)
marc_record = self.validate100(marc_record)
marc_record = self.validate246(marc_record)
marc_record = self.validate300s(marc_record)
marc_record = self.validateRDA(marc_record)
marc_record = self.validateSeries(marc_record)
marc_record = self.validate506(marc_record)
marc_record = self.validate538s540(marc_record)
marc_record = self.validate710(marc_record)
marc_record = self.validate776(marc_record)
marc_record = self.validate856(marc_record)
return marc_record
def validateLeader(self,
marc_record):
"""
Changes encoding level to 3 in leader position 17
:param marc_record: MARC record
"""
leader_list = list(marc_record.leader)
leader_list[17] = '3'
leader_list[18] = 'a'
marc_record.leader = "".join(leader_list)
return marc_record
def validate001(self,
marc_record):
"""
Removes prepend in 001
:param marc_record: MARC record
"""
field001 = marc_record['001'].value()
marc_record['001'].value = field001[3:].lower()
return marc_record
def validate007s(self,
marc_record):
"""
Validates all 007s in EBL record load
:param marc_record: Single MARC record
"""
return self.replace007(marc_record,
data=r'cr n a')
def validate008(self,
marc_record):
"""
Validates all 008 in EBL record load
:param marc_record: Single MARC record
"""
field008_data = marc_record['008'].value()
marc_record['008'].value = field008_data.replace("o","|")
return marc_record
def validate040(self,
marc_record,
marc_code='CoCCC'):
"""
Validates all 040s in EBL record load by adding $d with
institution MARC code
:param marc_record: Single MARC record
:param marc_code: MARC institution code, defaults to CC
"""
all040s = marc_record.get_fields('040')
for field040 in all040s:
field040.add_subfield('d',marc_code)
return marc_record
def validate050s(self,
marc_record):
"""
Validates all 050s in EBL record load
:param marc_record: Single MARC record
"""
all050s = marc_record.get_fields('050')
for field050 in all050s:
first_a = field050.delete_subfield('a')
other_a = field050.get_subfields('a')
for counter in range(0,len(other_a)):
field050.delete_subfield('a')
field050.add_subfield('a',first_a)
first_b = field050.delete_subfield('b')
first_b = subfld_b_process(VOL_RE,first_b,"v.")
first_b = subfld_b_process(NO_RE,first_b,"no.")
first_b = subfld_b_process(BD_RE,first_b,"Bd.")
first_b += 'eb'
field050.add_subfield('b',first_b)
return marc_record
def validate082(self,
marc_record):
"""
Validates the 082 by making sure the indicators are set to 04
:param marc_record:
"""
field082s = marc_record.get_fields('082')
for field082 in field082s:
field082.indicators = ['0','4']
return marc_record
def validate100(self,
marc_record):
"""
Validates 100 field in EBL record load
:param marc_record: Single MARC record
"""
field100 = marc_record['100']
if field100 is not None:
marc_record['100'].indicators = ['1',' ']
return marc_record
def validate246(self,
marc_record):
"""
Validates 246 field in EBL record load
:param marc_record: Single MARC record
"""
field246 = marc_record['246']
if field246 is None:
return marc_record
return marc_record
def validate300s(self,
marc_record):
"""
Validates 300 fields in EBL record load
:param marc_record: Single MARC record
"""
all300s = marc_record.get_fields('300')
for field300 in all300s:
field300.delete_subfield('a')
field300.add_subfield('a','1 online resource (1 v.)')
return marc_record
def validateRDA(self,
marc_record):
"""
Validates RDA elements in 336, 337, and 338 fields
:param marc_record: Single MARC record
"""
# Creates RDA 336
self.__remove_field__(marc_record=marc_record,
tag='336')
field336 = Field('336',
indicators=[' ',' '],
subfields=['a','text',
'2','rdacontent'])
marc_record.add_field(field336)
self.__remove_field__(marc_record=marc_record,
tag='337')
field337 = Field('337',
indicators=[' ',' '],
subfields=['a','computer',
'2','rdamedia'])
marc_record.add_field(field337)
self.__remove_field__(marc_record=marc_record,
tag='338')
field338 = Field('338',
indicators=[' ',' '],
subfields=['a','online resource',
'2','rdamedia'])
marc_record.add_field(field338)
return marc_record
def validateSeries(self,
marc_record):
"""
Validates Series n 440, 490/830 fields
:param marc_record: Single MARC record
"""
if marc_record['490'] is None and marc_record['830'] is None:
return marc_record
return marc_record
def validate506(self,
marc_record):
"""
Validates 506 field
:param marc_record: Single MARC record
"""
new506 = Field('506',
indicators=[' ',' '],
subfields=['a','Access restricted to subscribing institutions. Individual titles purchased upon selection by the 7th affiliated user.'])
marc_record.add_field(new506)
return marc_record
def validate538s540(self,
marc_record):
"""
Adds 538 fields and 540 field
:param marc_record: Single MARC record
"""
all_538s_msgs = ["System requirements (computers): Browser software; optional ebrary proprietary readers require a Java plug-in (both available on the ebrary site for download at no charge)",
"System requirements (mobile devices): May download to <Kindle, Kobo, Nook and Sony Reader>; free app available on the App Store in <English and Spanish> for <iPad, iPod and iPhone>.",
"Text may be read online, with selection copying and a limited quantity of page prints allowed.",
"Users at some libraries must establish an individual no-charge ebrary account, and log in to download the full text or use extended online features. For security, do not use a confidential or important ID and password to log in; create a different username and password.",
"Optional login available using a Facebook username and password."]
for subfield_a in all_538s_msgs:
field538 = Field('538',
indicators=[' ',' '],
subfields=["a",subfield_a])
marc_record.add_field(field538)
field540 = Field('540',
indicators=[' ',' '],
subfields=["a","Books may be viewed online or downloaded for noncommercial personal or classroom use only. No derivative use, redistribution or public performance is permitted. Maximum usage allowances -- printing: Fair Use, system-controlled (up to about 20% of the total pages); copy/paste: Fair Use only."])
marc_record.add_field(field540)
return marc_record
def validate541(self,
marc_record):
"""
Adds a 541 field
:param marc_record: Single MARC record
"""
field541 = Field('541',
indicators=[' ',' '],
subfields=["a","Permanent access license for participating libraries purchased from YBP"])
marc_record.add_field(field541)
return marc_record
def validate710(self,marc_record):
"""Adds a 710 field if missing from marc_record.
:param marc_record: Single MARC record
"""
if not marc_record['710']:
field710 = Field('710',
indicators=['2',' '],
subfields=["a","Ebooks Corporation"])
marc_record.add_field(field710)
return marc_record
def validate776(self,
marc_record):
"""
Validates 776 by insuring indicators are '0' and '8'
:param marc_record: Single MARC record
"""
all776s = marc_record.get_fields('776')
for field776 in all776s:
new776 = Field(tag='776',
indicators = ['0','8'],
subfields=field776.subfields)
marc_record.remove_field(field776)
marc_record.add_field(new776)
return marc_record
def validate856(self,
marc_record):
"""
Validates 856 by changing $z to 'View electronic book'
:param marc_record: MARC Record
"""
all856s = marc_record.get_fields('856')
for field856 in all856s:
field856.delete_subfield('z')
field856.add_subfield('z','View electronic book')
return marc_record
def subfld_b_process(regex,value,repl):
if value is None:
return ''
regex_result = regex.search(value)
if regex_result is None:
return value
b_tuples = list(regex_result.groups())
b_tuples[1] = regex.sub(repl,b_tuples[1])
return "".join(b_tuples)
```
#### File: orders/templatetags/order_extras.py
```python
from datetime import datetime,timedelta
from calendar import HTMLCalendar
from django.template import Context,Library,loader
from django.utils import simplejson as json
from django.utils.safestring import mark_safe
from bs4 import BeautifulSoup
register = Library()
def create_nav_btn(soup,date,text):
"""
Helper functions for month_calendar, generates a navigation button
for calendar
:param soup: BeautifulSoup parser of document
:param date: Date to create nav button
:param text: Text for button
"""
nav_th = soup.new_tag('th',attrs=[('colspan','2')])
nav_th['class'] = 'month'
nav_a = soup.new_tag('a',href='/apps/orders/%s/%s' % (date.year,
date.month))
nav_a.string = text
if date > datetime.today():
nav_a['class'] = "btn btn-mini btn-info disabled"
nav_a['href'] = '#'
else:
nav_a['class'] = "btn btn-mini btn-info"
nav_th.insert(0,nav_a)
return nav_th
def month_calendar(date=datetime.today()):
"""
Filter displays a HTML calendar for inclusion in templates with
links to existing transactions in the datastore
:param date: Date to display Monthly calendar, default is the
current month
"""
raw_month_html = HTMLCalendar().formatmonth(date.year,date.month)
month_soup = BeautifulSoup(raw_month_html)
time_delta = timedelta(days=31)
# Creates Previous and Next month (if date isn't current)
first_row = month_soup.find('tr')
exist_th = first_row.find('th')
exist_th['colspan'] = 3
previous_month = date - time_delta
next_month = date + time_delta
create_nav_btn(month_soup,previous_month,"«")
create_nav_btn(month_soup,next_month,"»")
pretty_html = month_soup.prettify()
print(type(pretty_html))
return mark_safe(pretty_html)
register.filter('month_calendar',month_calendar)
```
#### File: aristotle-library-apps/organization_authority/redis_helpers.py
```python
__author__ = "<NAME>"
import re
from bibframe.models import Organization
from person_authority.redis_helpers import process_name
from aristotle.settings import REDIS_DATASTORE
PUNCTUATION_RE = re.compile(r"[^!-~]|[.,;:]")
def add_organization(name_metaphone_keys,
org_attributes,
redis_datastore=REDIS_DATASTORE):
"""Function adds a BIBFRAME Organization to RLSP
Function takes a Redis authority instance, the organization's name metaphone
keys and the organization's attributes to create a BIBFRAME organization
entity in the RLSP.
Parameters:
redis_datastore -- Redis Instance or Redis Cluster
org_attributes -- Dict of organization's properties
"""
new_organization = Organization(redis_datastore=redis_datastore)
for key, value in org_attributes.iteritems():
setattr(new_organization, key, value)
new_organization.save()
for metaphone in name_metaphone_keys:
redis_datastore.sadd(metaphone, new_organization.redis_key)
return new_organization
def get_or_add_organization(org_attributes,
redis_datastore=REDIS_DATASTORE):
"""
Function takes a dict of an organization's attributes and either returns an existing
Organization or creates a new organization based on similarity metric.
:param org_attributes:
:param redis_datastore: Redis BIBFRAME Authority instance
"""
name_metaphones, name_metaphone_keys, org_keys = [], [], []
normed_location_key, place_keys = None, []
if 'label' in org_attributes:
raw_name = org_attributes.get('label')
name_metaphones = process_name(raw_name)
name_metaphone_keys = ["organization-metaphone:{0}".format(x) for x in name_metaphones]
existing_org_keys = redis_datastore.sinter(name_metaphone_keys)
if len(existing_org_keys) == 0:
return add_organization(name_metaphone_keys,
org_attributes,
redis_datastore=redis_datastore)
else:
return Organization(redis_key=list(existing_org_keys)[0],
redis_datastore=redis_datastore)
```
#### File: aristotle-library-apps/person_authority/tests.py
```python
import os
from django.test import TestCase
from redis_helpers import *
from aristotle.settings import PROJECT_HOME,TEST_REDIS
authority_redis = TEST_REDIS
class AddPersonTest(TestCase):
def setUp(self):
pass
def tearDown(self):
authority_redis.flushdb()
class GetOrGeneratePersonTest(TestCase):
def setUp(self):
attributes = {"rda:preferredNameForThePerson":"<NAME>",
"schema:dateOfBirth":1960,
"schema:dateOfDeath":2008}
self.person = get_or_generate_person(attributes,
authority_redis)
self.austen_attrs = {"rda:preferredNameForThePerson":"<NAME>",
"schema:dateOfBirth":1775,
"schema:dateOfDeath":1817}
self.jane_austen = get_or_generate_person(self.austen_attrs,authority_redis)
def test_get_or_generate_person(self):
"""
Tests redis_helpers.get_or_generate_person
"""
self.assert_(self.person.redis_key)
def test_duplicate1(self):
"""
Tests duplicates based on Pride and Prejudices multiple
MARC21 record examples
"""
test_person = get_or_generate_person(self.austen_attrs, authority_redis)
self.assertEquals(self.jane_austen.redis_key,
test_person.redis_key)
def test_not_duplicate(self):
test_person = get_or_generate_person({"rda:preferredNameForThePerson":"<NAME>",
"schema:dateOfBirth":1990},
authority_redis)
self.assertNotEquals(self.jane_austen.redis_key,
test_person.redis_key)
def test_duplicate2(self):
test_person = get_or_generate_person({"rda:preferredNameForThePerson":"<NAME>",
"schema:dateOfBirth":1775},
authority_redis)
self.assertEquals(test_person.redis_key,
self.jane_austen.redis_key)
def tearDown(self):
authority_redis.flushdb()
```
#### File: aristotle-library-apps/person_authority/views.py
```python
__author__ = "<NAME>"
from app_settings import APP
from django.views.generic.simple import direct_to_template
from aristotle.views import json_view
from aristotle.settings import AUTHORITY_REDIS, INSTANCE_REDIS
from redis_helpers import person_search, process_name
from bibframe.redis_helpers import get_brief
authority_redis = AUTHORITY_REDIS
def app(request):
"""
Default view for Person Authority App
"""
return direct_to_template(request,
'person_authority/app.html',
{'app':APP})
@json_view
def search(request):
"""
JSON search view returns the results of searching the Authority Redis
datastore.
:param request: HTTP Request
"""
raw_name = request.REQUEST.get("q")
output = person_search(raw_name)
return {'results':output}
```
#### File: aristotle-library-apps/portfolio/views.py
```python
__author__ = '<NAME>'
import imp
import os
from django.shortcuts import render as direct_to_template # quick hack to get running under django 1.5
from django.shortcuts import render
import django.utils.simplejson as json
from aristotle.settings import PROJECT_HOME,PROJECT_ROOT,INSTITUTION,INSTALLED_APPS
from aristotle.forms import FeedbackForm
from app_settings import APP
def get_apps(is_authenticated):
"""
Helper function returns a list of app information, extracted
from all apps in installed apps.
:param is_authenticated: Boolean for user access to productivity apps
"""
output = []
for row in INSTALLED_APPS:
if not row.startswith('django') and ['aristotle',
'portfolio'].count(row) < 1:
settings_file = os.path.join(PROJECT_HOME,
row,
"app_settings.py")
if os.path.exists(settings_file):
app_settings = imp.load_source(row,settings_file)
app_info = app_settings.APP
if app_info.get('is_productivity'):
if is_authenticated is True:
output.append(app_info)
else:
output.append(app_info)
return output
def default(request):
"""
Default view for the portfolio app displays both Access and Productivity
Apps depending on authentication and access rights
:param request: Web request from client
:rtype: Generated HTML template
"""
app_listing = get_apps(request.user.is_authenticated())
return direct_to_template(request,
'portfolio/app.html',
{'app':APP,
'feedback_form':FeedbackForm({'subject':'Library App Portfolio'}),
'feedback_context':request.get_full_path(),
'institution':INSTITUTION,
'portfolio':app_listing,
'user':None})
```
#### File: aristotle-library-apps/reserve_search/views.py
```python
__author__ = "<NAME> & <NAME>"
from django.views.generic.simple import direct_to_template
from app_settings import APP
from aristotle.settings import INSTITUTION
def default(request):
"""
default is the standard view for the reserve search app
:param request: web request
"""
return direct_to_template(request,
'reserve_search/app.html',
{'app':APP,
'institution':INSTITUTION})
def widget(request):
"""
Returns rendered html snippet of reserve_search widget
"""
return direct_to_template(request,
'reserve_search/snippets/widget.html',
{'app':APP,
'standalone':True,
'showappicon':True})
def dotCMS(request):
"""
Returns rendered html snippet of reserve_search widget for dotCMS display
"""
return direct_to_template(request,
'reserve_search/snippets/dotCMS.html',
{'app':APP,
'standalone':True,
'showappicon':False})
```
#### File: aristotle-library-apps/schema_org/models.py
```python
__author__ = "<NAME>"
import json, os
from stdnet import odm
from aristotle.settings import PROJECT_HOME
from aristotle.settings import ANNOTATION_REDIS, AUTHORITY_REDIS, CREATIVE_WORK_REDIS, INSTANCE_REDIS
SCHEMA_RDFS = json.load(open(os.path.join(PROJECT_HOME,
'schema_org',
'fixures',
'all.json'),
'rb'))
class Thing(odm.StdModel):
"""
Schema.org Thing Base Model available at http://schema.org/Thing
"""
additionalType = odm.ForeignField()
description = odm.CharField()
image = odm.CharField()
name = odm.SymbolField()
url = odm.SymbolField()
def __unicode__(self):
return self.name
class Meta:
abstract = True
class CreativeWork(Thing):
"""
Schema.org Creative Work Model available at http://schema.org/CreativeWork
"""
about = odm.ForeignField()
accountablePerson = odm.ForeignField('Person')
aggregateRating = odm.SymbolField()
alternativeHeadline = odm.SymbolField()
associatedMedia = odm.SymbolField()
audience = odm.SymbolField()
audio = odm.CharField()
author = odm.ManyToManyField()
award = odm.ListField()
comment = odm.ManyToManyField('UserComments')
contentLocation = odm.ForeignField('Place')
contentRating = odm.SymbolField()
contributor = odm.ManyToManyField()
copyrightHolder = odm.ForeignField()
copyrightYear = odm.DateField()
creator = odm.ManyToManyField()
dateCreated = odm.SymbolField()
dateModified = odm.SymbolField()
datePublished = odm.SymbolField()
discussionUrl = odm.SymbolField()
editor = odm.ForeignField('Person')
encoding = odm.ForeignField('MediaObject')
genre = odm.SymbolField()
headline = odm.CharField()
inLanguage = odm.SymbolField()
interactionCount = odm.SymbolField()
isFamilyFriendly = odm.BooleanField()
keywords = odm.SetField()
mentions = odm.ManyToManyField()
offers = odm.ManyToManyField('Offer')
provider = odm.ManyToManyField()
publisher = odm.ManyToManyField()
publishingPrinciples = odm.CharField()
review = odm.SymbolField('Review')
sourceOrganization = odm.ForeignField('Organization')
text = odm.CharField()
thumbnailUrl = odm.CharField()
version = odm.FloatField()
video = odm.ForeignField('VideoObject')
class Person(Base):
additionalType = odm.SymbolField()
description = odm.SymbolField()
image = odm.SymbolField()
name = odm.SymbolField()
url = odm.SymbolField()
additionalName = odm.SymbolField()
address = odm.SymbolField()
affiliation = odm.SymbolField()
alumniOf = odm.SymbolField()
award = odm.SymbolField()
awards = odm.SymbolField()
birthDate = odm.SymbolField()
brand = odm.SymbolField()
children = odm.SymbolField()
colleague = odm.SymbolField()
colleagues = odm.SymbolField()
contactPoint = odm.SymbolField()
contactPoints = odm.SymbolField()
deathDate = odm.SymbolField()
duns = odm.SymbolField()
email = odm.SymbolField()
familyName = odm.SymbolField()
faxNumber = odm.SymbolField()
follows = odm.SymbolField()
gender = odm.SymbolField()
givenName = odm.SymbolField()
globalLocationNumber = odm.SymbolField()
hasPOS = odm.SymbolField()
homeLocation = odm.SymbolField()
honorificPrefix = odm.SymbolField()
honorificSuffix = odm.SymbolField()
interactionCount = odm.SymbolField()
isicV4 = odm.SymbolField()
jobTitle = odm.SymbolField()
knows = odm.SymbolField()
makesOffer = odm.SymbolField()
memberOf = odm.SymbolField()
naics = odm.SymbolField()
nationality = odm.SymbolField()
owns = odm.SymbolField()
parent = odm.SymbolField()
parents = odm.SymbolField()
performerIn = odm.SymbolField()
relatedTo = odm.SymbolField()
seeks = odm.SymbolField()
sibling = odm.SymbolField()
siblings = odm.SymbolField()
spouse = odm.SymbolField()
taxID = odm.SymbolField()
telephone = odm.SymbolField()
vatID = odm.SymbolField()
workLocation = odm.SymbolField()
worksFor = odm.SymbolField()
```
#### File: colorado_college/templatetags/colorado_college_extras.py
```python
__author__ = "<NAME>"
from django import template
from django.utils.safestring import mark_safe
from bibframe.models import Instance
from aristotle.settings import REDIS_DATASTORE, STATIC_URL
from discovery.forms import AnnotationForm
register = template.Library()
@register.filter(is_safe=True)
def author_of(person):
"Returns div with a list of Works that the person is an author of"
author_role_key = "{0}:resourceRole:aut".format(person.redis_key)
if not REDIS_DATASTORE.exists(author_role_key):
return mark_safe('')
author_html = """<div class="alert alert-info alert-block">
<h3>Author's Creative Works</h3><ul>"""
for work_key in REDIS_DATASTORE.smembers(author_role_key):
work_info = work_key.split(":")
title_key = REDIS_DATASTORE.hget(work_key, 'title')
title = REDIS_DATASTORE.hget(title_key, 'label')
author_html += """<li>{0} <em><a href="/apps/discovery/{0}/{1}">{2}</a></li></em>
""".format(work_info[1],
work_info[-1],
title)
author_html += "</ul></div>"
return mark_safe(author_html)
@register.filter(is_safe=True)
def display_facet(facet):
"Returns accordion group based on template and facet"
expand = False
if ["access", "format"].count(facet.redis_id) > 0:
expand = True
facet_grp_template = template.loader.get_template('cc-facet-group.html')
return mark_safe(facet_grp_template.render(
template.Context({'expand': expand,
'facet':facet})))
@register.filter(is_safe=True)
def display_network_toolbar(redis_entity):
"""Returns a semantic and social network toolbar
Parameter:
redis_entity -- BIBFRAME Entity
"""
network_toolbar_template = template.loader.get_template(
'cc-network-toolbar.html')
return mark_safe(network_toolbar_template.render(
template.Context({'entity': redis_entity,
'STATIC_URL': STATIC_URL})))
@register.filter(is_safe=True)
def display_pagination(current_shard):
"Filter generates pagination view based on the current shard"
pagination_template = template.loader.get_template(
'cc-pagination.html')
current_int = int(current_shard.split(":")[-1])
shard_pattern = current_shard[:-2]
total_int = int(REDIS_DATASTORE.get('global {0}'.format(shard_pattern)))
previous_int = current_int -1
next_int = current_int +1
if previous_int < 1:
previous_int = 1
shards = []
for i in range(1, total_int):
shards.append('{0}:{1}'.format(shard_pattern,
i))
previous_shard = '{0}:{1}'.format(shard_pattern,
previous_int)
next_shard = '{0}:{1}'.format(shard_pattern,
next_int)
return mark_safe(pagination_template.render(
template.Context({'previous': previous_shard,
'next': next_shard,
'shard_num': current_int})))
@register.filter(is_safe=True)
def display_brief(work):
"Returns CC version of a Brief summary based on White While DL design"
work_template = template.loader.get_template(
'cc-brief.html')
title_entity = REDIS_DATASTORE.hget(work.title, 'label')
if REDIS_DATASTORE.hexists(work.redis_key, "rda:isCreatedBy"):
creators_keys = [REDIS_DATASTORE.hget(work.redis_key,
"rda:isCreatedBy"),]
else:
creators_keys = list(REDIS_DATASTORE.smembers(
"{0}:rda:isCreatedBy".format(work.redis_key)))
creators = []
for redis_key in creators_keys[:4]:
creator = {'id': redis_key.split(":")[-1]}
given_name = REDIS_DATASTORE.hget(redis_key, 'schema:givenName')
if given_name is not None:
creator['givenName'] = unicode(given_name, errors='ignore')
family_name = REDIS_DATASTORE.hget(redis_key, 'schema:familyName')
if family_name is not None:
creator['familyName'] = unicode(family_name, errors='ignore')
creator['name'] = unicode(REDIS_DATASTORE.hget(redis_key,
'rda:preferredNameForThePerson'),
errors='ignore')
creators.append(creator)
context = template.Context({'creators': creators,
'title': title_entity,
'work': work}
)
return mark_safe(work_template.render(context))
@register.filter(is_safe=True)
def display_person_dates(person):
"Displays a person date of birth and death if present"
date_html = """<dl class="dl-horizontal">"""
if hasattr(person, 'rda:dateOfBirth'):
date_html += "<dt>Date of Birth:</dt>"
date_html += "<dd>{0}</dd>".format(getattr(person, 'rda:dateOfBirth'))
if hasattr(person, 'rda:dateOfDeath'):
date_html += "<dt>Date of Death:</dt>"
date_html += "<dd>{0}</dd>".format(getattr(person, 'rda:dateOfDeath'))
date_html += "</dl>"
return mark_safe(date_html)
@register.filter(is_safe=True)
def display_instances(work):
"Generates a display of all of the instances for a work"
work_instances_template = template.loader.get_template(
'cc-work-instances.html')
instances = []
instance_keys = list(REDIS_DATASTORE.smembers(
"{0}:hasInstance".format(work.redis_key)))
if len(instance_keys) < 1:
instance_key = REDIS_DATASTORE.hget(work.redis_key,
'hasInstance')
if instance_key is not None:
instance_keys.append(instance_key)
for instance_key in instance_keys:
instances.append(
Instance(redis_key=instance_key,
redis_datastore=REDIS_DATASTORE))
context = template.Context({'instances': instances})
return mark_safe(work_instances_template.render(context))
@register.filter(is_safe=True)
def display_instance_summary(instance):
"Generates a summary of an Instance"
output = "This is {0}".format(getattr(instance,
'rda:carrierTypeManifestation'))
if hasattr(instance, 'isbn'):
output += " with an ISBN of {0}.".format(instance.isbn)
if instance.language is not None:
output += " Language published in {0}".format(instance.language)
if hasattr(instance, 'rda:dateOfPublicationManifestation'):
output += " Published date of {0}".format(
getattr(instance,
'rda:dateOfPublicationManifestation'))
return mark_safe(output)
@register.filter(is_safe=True)
def display_user_annotation_dialog(entity):
"Displays a User Annotation dialog"
user_dialog_template = template.loader.get_template(
'cc-user-annotation.html')
context = template.Context({'form': AnnotationForm(),
'entity': entity})
return mark_safe(user_dialog_template.render(context))
@register.filter(is_safe=True)
def get_creators(bibframe_entity):
output = '<ul class="icons-ul">'
if type(bibframe_entity) == Instance:
redis_key = bibframe_entity.attributes.get('instanceOf')
else:
redis_key = bibframe_entity.redis_key
if REDIS_DATASTORE.hexists(redis_key,"rda:isCreatedBy"):
creators = [REDIS_DATASTORE.hget(redis_key,"rda:isCreatedBy"),]
else:
creators = list(REDIS_DATASTORE.smembers("{0}:rda:isCreatedBy".format(redis_key)))
for i, key in enumerate(creators):
creator_id = key.split(":")[-1]
output += """<li><a href="/apps/discovery/Person/{0}">
<i class="icon-li icon-user"></i> {1}</a></li>""".format(
key.split(":")[-1],
REDIS_DATASTORE.hget(key,
'rda:preferredNameForThePerson'))
output += "</ul>"
return mark_safe(output)
@register.filter(is_safe=True)
def get_facet(facet):
"Returns accordion group based on template and redis-key"
# Assume facet_key is sorted set
facet_grp_template = template.loader.get_template('cc-facet-group')
facet = {'label': facet_key.split(":")[-1],
'items': []}
facet['name'] = facet.get('label').lower().sub(" ","-")
for item in REDIS_DATASTORE.zrevrange(facet_key,
0,
-1,
withscores=True):
item = {'label': item[0].split(":")[-1],
'count': item[1]}
facet['items'].append(item)
return mark_safe(facet_grp_template.render(
template.Context(facet)))
@register.filter(is_safe=True)
def get_work_total(work_name):
work_key = "global bf:{0}".format(work_name)
if REDIS_DATASTORE.exists(work_key):
work_total = '{0:,}'.format(int(REDIS_DATASTORE.get(work_key)))
else:
work_total = 0
return mark_safe(work_total)
```
|
{
"source": "jermnelson/Discover-Aristotle",
"score": 2
}
|
#### File: apps/datasets/forms.py
```python
__author__ = '<NAME>'
import logging
from django import forms
from eulxml.xmlmap import mods
from eulxml.forms import XmlObjectForm,SubformField
class ThesisDatasetForm(forms.Form):
"""DatasetForm associates a form with multiple MODS elements to support a
thesis dataset in the Fedora object
"""
abstract = forms.CharField(required=False,
label='Abstract of dataset',
widget=forms.Textarea(attrs={'class':'span5',
'cols':60,
'rows':5}))
is_publically_available = forms.BooleanField(required=False,
label='I agree')
not_publically_available = forms.BooleanField(required=False,
label='I agree')
info_note = forms.CharField(required=False,
label='Software/version',
widget=forms.Textarea(attrs={'class':'span5',
'cols':60,
'rows':5}))
dataset_file = forms.FileField(required=False,
label='Dataset')
def is_empty(self):
for k,v in self.cleaned_data.iteritems():
if v != None:
return False
return True
def mods(self,
mods_xml=None):
"""
Method supports adding a dataset file stream and associated MODS elements,
creates a new MODS XML datastream if not present.
"""
if not mods_xml:
mods_xml = mods.MODS()
if self.cleaned_data.has_key('abstract'):
abstract = mods.Note(text=self.cleaned_data['abstract'],
# type='source type',
label='Dataset Abstract')
mods_xml.notes.append(abstract)
if self.cleaned_data.has_key('info_note'):
info = mods.Note(text=self.cleaned_data['info_note'],
#type='source note',
label='Dataset Information')
mods_xml.notes.append(info)
return mods_xml
```
#### File: apps/discovery/feeds.py
```python
import sunburnt
from django.conf import settings
from django.http import HttpRequest
from django.contrib.syndication.views import Feed
class SavedRecordsFeed(Feed):
"""
SavedRecordsFeed creates an RSS feed based on the current
values of the user's item carts stored in the session variables.
"""
title = 'Aristotle Saved Record Feed'
link = '/catalog/cart/feed'
description = 'Saved Records from Aristotle'
def items(self):
all_items = []
solr_server = sunburnt.SolrInterface(settings.SOLR_URL)
#! DOESN"T WORK IN THIS VERSION DJANGO!!
if self.request.session.has_key('item_cart'):
item_cart = self.request.session['item_cart']
for doc_id in item_cart:
solr_response = solr_server.search(q='id:%s' % doc_id)
if solr_response.result.numFound > 0:
all_items.append(solr_response.result.doc[0])
def item_description(self,item):
desc = ''
if item.has_key('author'):
desc += 'Author: %s' % item['author']
if item.has_key('callnum'):
desc += 'Call number: %s' % item['callnum']
if item.has_key('format'):
desc += 'Format: %s' % item['format']
if item.has_key('url'):
for url in item['url']:
desc += 'URL: %s' % url
return desc
def item_title(self,item):
if item.has_key('full_title'):
return item['full_title']
else:
return None
```
#### File: discovery/parsers/erm_update.py
```python
__author__ = "<NAME>"
import csv,urllib,httplib2
import xml.etree.ElementTree as ElementTree
from settings import CSV_FILE
def load_csv(csv_file=CSV_FILE):
"""
Method parses through CSV file and updates electronic journals dict with
the bib number as the key and the urls, holdings, and issn (if present) for
look-up by the MARC parser.
:param csv_file: Common separated file, defaults to settings values
"""
electronic_bibs = {}
csv_reader = csv.reader(open(csv_file,'rb'))
for row in csv_reader:
row_dict = {}
urls,paired_holdings,counter = [],[],0
bib_id = row[0][0:-1] # Removes last digit as per ILS convention
if len(row[2]) > 1:
row_dict['issn'] = row[2]
reversed_fields = row[3:]
reversed_fields.reverse()
for value in reversed_fields:
holdings = []
if value.lower().startswith('http'):
raw_url = value.split(' ') # Attempts to split out text from url
urls.append(raw_url[0])
paired_holdings.append("{0} ".format(' '.join(raw_url[1:])))
else:
try:
int(value[0]) # Assumes holdings starts with an int
paired_holdings[counter] = '''<a href="{0}">{1}</a> {2}'''.format(urls[counter],
paired_holdings[counter],
value)
counter += 1
except:
pass
row_dict['url'] = urls
row_dict['holdings'] = paired_holdings
electronic_bibs[bib_id] = row_dict
return electronic_bibs
```
#### File: grx/parsers/marc2grx.py
```python
import csv
import pymarc
import re
import sys
import time,datetime
import unicodedata
import urllib
import logging
from django.conf import settings
from django.contrib.sites.models import Site
from django.utils import simplejson
logging.basicConfig(filename='%slog/%s-grx-solr-indexer.log' % (settings.BASE_DIR,
datetime.datetime.today().strftime('%Y%m%d-%H')),
level=logging.INFO)
#logger = logging.getLogger('marc_solr_import')
#logger.setLevel(logging.INFO)
#logger.addHandler(logging.FileHandler('%slog/%s-marc-solr-indexer.out' %\
# (settings.BASE_DIR,
# datetime.datetime.today().strftime('%Y%m%d-%H'))))
#log_handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
try:
set
except NameError:
from sets import Set as set
from apps.discovery.parsers.marc import RowDict,normalize,subfield_list,multi_field_list,id_match,get_languages,get_row,write_csv
NONINT_RE = re.compile(r'\D')
FIELDNAMES = [
'id',
'grx_title',
'language',
'subtitles',
'summary',
'title',
'url',
]
def generate_records(data_handle):
reader = pymarc.MARCReader(data_handle)
for marc_record in reader:
record = get_record(marc_record)
if record: # skip when get_record returns None
yield record
def get_record(marc_record, ils=None):
"""
Pulls the fields from a MARCReader record into a dictionary.
>>> marc_file_handle = open('test/marc.dat')
>>> reader = pymarc.MARCReader(marc_file_handle)
>>> for marc_record in reader:
... record = get_record(marc_record)
... print record['author']
... break
...
<NAME>, 1839-1897.
"""
record = {}
try:
if ils == 'Horizon':
record['id'] = marc_record['999']['a']
elif ils == 'III':
# [1:-1] because that's how it's referred to in the opac
record['id'] = marc_record['907']['a'][1:-1]
elif ils == 'Unicorn':
record['id'] = marc_record['35']['a']
else:
record['id'] = marc_record['001'].value()
except AttributeError:
return
# First checks for a varing form of the title to set grx_title,
# otherwise set to the same value as general title
if marc_record['246'] is not None:
grx_titles = marc_record.get_fields('246')
if len(grx_titles) > 0:
subfields = grx_titles[0].get_subfields('a','i')
grx_normalized_title = ''
for field in subfields:
grx_normalized_title += normalize(field)
if grx_normalized_title.startswith('Colorado College:'):
grx_normalized_title = grx_normalized_title.replace('Colorado College:ia','')
record['grx_title'] = grx_normalized_title
if marc_record['245']:
full_title = marc_record['245'].format_field()
try:
nonfiling = int(marc_record['245'].indicator2)
except ValueError:
nonfiling = 0
record['title'] = full_title
if not record.has_key('grx_title'):
record['grx_title'] = full_title
if marc_record['590']:
record['summary'] = normalize(marc_record['590']['a'])
url_fields = marc_record.get_fields('856')
record['url'] = []
for field in url_fields:
url_subfield = field.get_subfields('u')
if url_subfield:
record['url'].append(url_subfield[0])
return record
def get_row(record):
"""Converts record dict to row for CSV input."""
row = RowDict(record)
return row
def write_csv(marc_file_handle, csv_file_handle, collections=None,
ils=settings.ILS):
"""
Convert a MARC dump file to a CSV file.
"""
# This doctest commented out until field names are stable.
#>>> write_csv('test/marc.dat', 'test/records.csv')
#>>> csv_records = open('test/records.csv').read()
#>>> csv_measure = open('test/measure.csv').read()
#>>> csv_records == csv_measure
#True
#>>> os.remove('test/records.csv')
# TODO: move xml parsing to marcxml parser
#if in_xml:
# reader = pymarc.marcxml.parse_xml_to_array(marc_file_handle)
#else:
reader = pymarc.MARCReader(marc_file_handle)
fieldname_dict = {}
for fieldname in FIELDNAMES:
fieldname_dict[fieldname] = fieldname
#for record in reader
count = 0
logging.info("Started MARC record import into GRX core")
try:
writer = csv.DictWriter(csv_file_handle, FIELDNAMES)
writer.writerow(fieldname_dict)
for marc_record in reader:
count += 1
try:
record = get_record(marc_record, ils=ils)
if record: # skip when get_record returns None
if collections:
new_collections = []
old_record = get_old_record(record['id'])
if old_record:
old_collections = old_record.get('collection')
if old_collections:
new_collections.extend(old_collections)
new_collections.extend(collections)
try:
record['collection'].extend(new_collections)
except (AttributeError, KeyError):
record['collection'] = new_collections
row = get_row(record)
writer.writerow(row)
except:
if marc_record.title() is not None:
title = marc_record.title()
else:
title = marc_record['245'].format_field()
logging.info(u"%s error at count=%s, titles is '%s'" %\
(sys.exc_info()[0],
count,
title))
sys.stderr.write("\nError in MARC record #%s (%s):\n" %
(count, title.encode('ascii', 'ignore')))
raise
else:
if count % 1000:
sys.stderr.write(".")
else:
logging.info("\t%s records processed" % count)
sys.stderr.write(str(count))
finally:
marc_file_handle.close()
csv_file_handle.close()
logging.info("Processed %s records.\n" % count)
sys.stderr.write("\nProcessed %s records.\n" % count)
return count
```
#### File: marc/bots/aspbots.py
```python
import urlparse,urllib2,re
import datetime,logging
from marcbots import MARCImportBot
from pymarc import Field
class AlexanderStreetPressBaseBot(MARCImportBot):
"""
`AlexanderStreetPressBaseBot` encapsulates the basic
MARC record changes used by child classes.
"""
def __init__(self,marc_file,**kwargs):
"""
Creates instance of `AlexanderStreetPressBaseBot`
Parameters:
- `marc_file`: Alexander Street Press MARC records
- `asp_code`: Alexander Street Press Code, default is asp
"""
MARCImportBot.__init__(self,marc_file)
if not kwargs.has_key('asp_code'):
self.asp_code = 'asp'
else:
self.asp_code = kwargs.get('asp_code')
self.resolved_baseurl = None
def getResolvedURL(self,
marc_record):
"""
Method extract's base resolved url from marc_record, sets
class variable for further processing.
Parameters:
- `marc_record`: MARC record, required
"""
field856 = marc_record.get_fields('856')[0]
raw_url = field856.get_subfields('u')[0]
redirect = urllib2.urlopen(raw_url)
redirect_url = urlparse.urlparse(redirect.geturl())
query_prefix = redirect_url.query.split("=")[0]
self.resolved_baseurl = "%s://%s%s?%s" % (redirect_url.scheme,
redirect_url.netloc,
redirect_url.path,
query_prefix)
def remove440(self,marc_record):
"""
Method removes 440 Series Statement field.
Parameters:
- `marc_record`: MARC record, required
"""
return self.__remove_field__(marc_record=marc_record,
tag='440')
def remove490(self,marc_record):
"""
Method removes 490 Series Statement field.
Parameters:
- `marc_record`: MARC record, required
"""
return self.__remove_field__(marc_record=marc_record,
tag='490')
def remove830(self,marc_record):
"""
Method removes MARC 830 field
Parameters:
- `marc_record`: MARC record, required
"""
return self.__remove_field__(marc_record=marc_record,
tag='830')
def validate506(self,marc_record):
"""
Method adds 506 field
Parameters:
- `marc_record`: MARC record, required
"""
marc_record = self.__remove_field__(marc_record=marc_record,
tag='506')
new506 = Field(tag='506',
indicators=[' ',' '],
subfields=['a','Access limited to subscribers.'])
marc_record.add_field(new506)
return marc_record
def validate533(self,marc_record):
"""
Method removes subfield n if exists in field 533
Parameters:
- `marc_record`: MARC record, required
"""
all533fields = marc_record.get_fields('533')
for field in all533fields:
marc_record.remove_field(field)
field.delete_subfield('n')
marc_record.add_field(field)
return marc_record
def validate710(self,marc_record):
"""
Method adds MARC 710 field, Corporate Heading
Parameters:
- `marc_record`: MARC Record, required
"""
new710field = Field(tag='710',
indicators=['2',' '],
subfields=['a','Alexander Street Press.'])
marc_record.add_field(new710field)
return marc_record
def validate730(self,
marc_record,
uniform_title):
"""
Methods adds MARC 730 field, Added entry: uniform title
Parameters:
- `marc_record`: MARC record, required
- `uniform_title`: Uniform title, required
"""
new730field = Field(tag='730',
indicators=['0',' '],
subfields=['a',uniform_title])
marc_record.add_field(new730field)
return marc_record
def validateURLs(self,
marc_record,
proxy_location,
public_note=None):
"""
Method retrieves URL from 856 field, retrieves redirected
url and sets new value to existing 856, calls processURLs
method and returns result
Parameters:
- `marc_record`: MARC record, required
- `proxy_location`: Proxy location, required
"""
all856s = marc_record.get_fields('856')
for field856 in all856s:
raw_url = urlparse.urlparse(field856.get_subfields('u')[0])
record_id = raw_url.query.split(";")[1]
new_url = "%s=%s" % (self.resolved_baseurl,record_id)
field856.delete_subfield('u')
field856.add_subfield('u',new_url)
if public_note:
return self.processURLs(marc_record=marc_record,
proxy_location=proxy_location,
public_note=public_note)
return self.processURLs(marc_record=marc_record,
proxy_location=proxy_location)
class AlexanderStreetPressMusicBot(AlexanderStreetPressBaseBot):
"""
The `AlexanderStreetPressMusicBot` reads MARC records for
Alexander Street Press music databases including American
Song, Jazz Music Library, among others and modifies to
CC standards.
"""
DATABASES = {'American song':{'code':'amso',
'proxy':'0-amso.alexanderstreet.com.tiger.coloradocollege.edu'},
'Classical music library':{'code':'clmu',
'proxy':'0-clmu.alexanderstreet.com.tiger.coloradocollege.edu'},
'Contemporary world music':{'code':'womu',
'proxy':'0-womu.alexanderstreet.com.tiger.coloradocollege.edu'},
'Jazz music library':{'code':'jazz',
'proxy':'0-jazz.alexanderstreet.com.tiger.coloradocollege.edu'},
'Smithsonian global sounds for libraries':{'code':'glmu',
'proxy':'0-glmu.alexanderstreet.com.tiger.coloradocollege.edu'}}
def __init__(self,**kwargs):
"""
Creates instance of `AlexanderStreetPressMusicBot`
Parameters:
- `marc_file`: MARC file, required
- `type_of`: ASP music database, required
"""
#if not kwargs.has_key('type_of'):
# raise ValueError("AlexanderStreetPressMusicBot requires type_of")
self.type_of = kwargs.get('type_of')
self.code_dict = {}
for k,v in self.DATABASES.iteritems():
self.code_dict[v['code']] = k
if self.type_of is not None:
if not self.code_dict.has_key(self.type_of):
raise ValueError('Unknown database: %s' % self.type_of)
AlexanderStreetPressBaseBot.__init__(self,
marc_file=kwargs.get('marc_file'),
asp_code=self.type_of)
def getResolvedURL(self,
marc_record):
"""
Overrides parent method, ASP music databases resolves to a different URL
pattern than other ASP databases.
Parameters:
- `marc_record`: MARC record, required
"""
field856 = marc_record.get_fields('856')[0]
raw_url = field856.get_subfields('u')[0]
redirect = urllib2.urlopen(raw_url)
redirect_url = urlparse.urlparse(redirect.geturl())
self.resolved_baseurl = 'http://%s/View/' % redirect_url.netloc.lower()
def processRecord(self,
marc_record):
"""
Method process a single MARC record for Alexander Street Press Music
databases.
Parameters:
- `marc_record`: MARC record, required
"""
if not self.resolved_baseurl:
self.getResolvedURL(marc_record)
marc_record = self.validate006(marc_record)
marc_record = self.validate007(marc_record)
marc_record = self.remove020(marc_record)
marc_record = self.validate245(marc_record)
marc_record = self.validate300(marc_record)
marc_record = self.remove440(marc_record)
marc_record = self.remove490(marc_record)
marc_record = self.validate506(marc_record)
marc_record = self.validate533(marc_record)
marc_record = self.validate710(marc_record)
marc_record = self.validate730(marc_record,
'%s.' % self.type_of)
marc_record = self.remove830(marc_record)
marc_record = self.validateURLs(marc_record)
return marc_record
def remove020(self,marc_record):
"""
Removes MARC 020 ISBN field
Paramaters:
- `marc_record`: MARC record, required
"""
return self.__remove_field__(marc_record=marc_record,
tag='020')
def validate006(self,marc_record):
"""
Validated 006 with CC standard for sound format
'm||||||||h||||||||'
Paramaters:
- `marc_record`: MARC record, required
"""
all006fields = marc_record.get_fields('006')
for field in all006fields:
marc_record.remove_field(field)
new006 = Field(tag='006',
indicators=[' ',' '],
data=r'm h ')
marc_record.add_field(new006)
return marc_record
def validate007(self,marc_record):
"""
Validates 007 fields, if data is sound resource keep, otherwise
change value to CC standard.
:param marc_record: MARC record, required
:rtype marc_record:
"""
all007s = marc_record.get_fields('007') # Could be <NAME>, <NAME>
# <NAME>, or <NAME>
# JOKE!
for field007 in all007s:
if field007.data.startswith('cr'):
field007.data = r'cr u'
return marc_record
def validate300(self,marc_record):
"""
Validates MARC 300 field set subfield a to 'Streaming audio'
Parameters:
- `marc_record`: MARC Record, required
"""
marc_record = self.__remove_field__(marc_record=marc_record,
tag='300')
new300 = Field(tag='300',
indicators=[' ',' '],
subfields=['a','Streaming audio'])
marc_record.add_field(new300)
return marc_record
def validateURLs(self,marc_record):
"""
Validates 856 fields specifically for various types of Alexander
Street Press music databases.
Parameters:
- `marc_record`: MARC record, required
"""
proxy_location = self.DATABASES[self.code_dict[self.type_of]]['proxy']
all856s = marc_record.get_fields('856')
for field856 in all856s:
raw_url = urlparse.urlparse(field856.get_subfields('u')[0])
record_id = raw_url.query.split(";")[1]
new_url = "%s%s" % (self.resolved_baseurl,record_id)
field856.delete_subfield('u')
field856.add_subfield('u',new_url)
return self.processURLs(marc_record=marc_record,
proxy_location=proxy_location,
public_note='Listen online')
class BlackDramaBot(AlexanderStreetPressBaseBot):
"""
The `BlackDramaBot` reads MARC records from the 2nd Edition
of the Alexander Street Press Black Drama and supplements
database.
"""
def __init__(self,**kwargs):
"""
Creates instance of `BlackDramaBot`
Parameters:
- `marc_file`: MARC file, required
"""
#if not kwargs.has_key('marc_file'):
# raise ValueError("BlackDramaBot requires a MARC file")
AlexanderStreetPressBaseBot.__init__(self,
marc_file=kwargs.get('marc_file'),
asp_code='aspbd2')
def processRecord(self,
marc_record):
"""
Method process a single marc_record Black Drama 2nd Edition database
Parameters:
- `marc_record`: MARC record, required
"""
if not self.resolved_baseurl:
self.getResolvedURL(marc_record)
marc_record = self.validate006(marc_record)
marc_record = self.validate007(marc_record)
marc_record = self.validate245(marc_record)
marc_record = self.validate250(marc_record)
marc_record = self.remove440(marc_record)
marc_record = self.remove490(marc_record)
marc_record = self.validate506(marc_record)
marc_record = self.validate533(marc_record)
marc_record = self.validate710(marc_record)
marc_record = self.validate730(marc_record,
'Black drama.')
marc_record = self.remove830(marc_record)
marc_record = self.validateURLs(marc_record,
'0-solomon.bld2.alexanderstreet.com.tiger.coloradocollege.edu')
return marc_record
def validate250(self,marc_record):
"""
Method adds edition statement to a new 250 field.
Parameters:
- `marc_record`: MARC record, required
"""
new250 = Field(tag='250',
indicators=[' ',' '],
subfields=['a','2nd ed.'])
marc_record.add_field(new250)
return marc_record
class WomenSocialMovementsBot(AlexanderStreetPressBaseBot):
"""
The `WomenSocialMovementsBotBot` reads MARC records from
Alexander Street Press Women and Social Movements database.
"""
def __init__(self,**kwargs):
"""
Creates instance of `WomenSocialMovementsBot`
Parameters:
- `marc_file`: MARC file
"""
AlexanderStreetPressBaseBot.__init__(self,
marc_file=kwargs.get('marc_file'),
asp_code='aspw')
def processRecord(self,
marc_record):
"""
Method processes a single marc_record for Women and Social Movements database.
Parameters:
- `marc_record`: MARC record
"""
if not self.resolved_baseurl:
self.getResolvedURL(marc_record)
marc_record = self.validate007(marc_record)
marc_record = self.validate245(marc_record)
marc_record = self.remove440(marc_record)
marc_record = self.remove490(marc_record)
marc_record = self.validate506(marc_record)
marc_record = self.validate533(marc_record)
marc_record = self.validateURLs(marc_record,
"0-asp6new.alexanderstreet.com.tiger.coloradocollege.edu")
marc_record = self.validate710(marc_record)
marc_record = self.validate730(marc_record,
"Women and social movements in the United States 1600-2000: Scholar's edition.")
marc_record = self.remove830(marc_record)
return marc_record
def validate001(self,marc_record):
"""
Method follows Prospector best practices for 001 MARC
field.
Parameters:
- `marc_record`: MARC record, required
"""
field001 = marc_record.get_fields('001')[0]
marc_record.remove_field(field001)
return marc_record
class GarlandEWMOBot(AlexanderStreetPressBaseBot):
"""
The `GarlandEWMOBot` process the MARC record
for the Alexander Street Press Garland Encyclopedia of Music World
Online electronic resource.
"""
def __init__(self,**kwargs):
"""
Creates instance of `GarlandEWMOBot`
Parameters:
- `marc_file`: MARC file
"""
AlexanderStreetPressBaseBot.__init__(self,
marc_file=kwargs.get('marc_file'),
asp_code='aspglnd')
def getResolvedURL(self,
marc_record):
"""
Overrides parent method, ASP music databases resolves to a different URL
pattern than other ASP databases.
Parameters:
- `marc_record`: MARC record, required
"""
field856 = marc_record.get_fields('856')[0]
raw_url = field856.get_subfields('u')[0]
redirect = urllib2.urlopen(raw_url)
redirect_url = urlparse.urlparse(redirect.geturl())
self.resolved_baseurl = 'http://%s/View/' % redirect_url.netloc.lower()
def processRecord(self,
marc_record):
"""
Method processes a single marc_record for Garland Encyclopedia of
Music World Online electronic resource.
Parameters:
- `marc_record`: MARC record
"""
if not self.resolved_baseurl:
self.getResolvedURL(marc_record)
marc_record = self.validate007(marc_record)
marc_record = self.validate245(marc_record)
marc_record = self.remove440(marc_record)
marc_record = self.remove490(marc_record)
marc_record = self.validate506(marc_record)
marc_record = self.validate533(marc_record)
marc_record = self.validateURLs(marc_record)
marc_record = self.validate710(marc_record)
marc_record = self.validate730(marc_record,
"The Garland Encyclopedia of World Music Online")
marc_record = self.remove830(marc_record)
return marc_record
def validateURLs(self,marc_record):
"""
Validates 856 fields specifically for various types of Alexander
Street Press music databases.
Parameters:
- `marc_record`: MARC record, required
"""
all856s = marc_record.get_fields('856')
proxy_location = "0-glnd.alexanderstreet.com.tiger.coloradocollege.edu"
for field856 in all856s:
raw_url = urlparse.urlparse(field856.get_subfields('u')[0])
record_id = raw_url.query.split(";")[1]
new_url = "%s%s" % (self.resolved_baseurl,record_id)
field856.delete_subfield('u')
field856.add_subfield('u',new_url)
return self.processURLs(marc_record=marc_record,
proxy_location=proxy_location)
```
#### File: marc/bots/awbots.py
```python
__author__ = '<NAME>'
from marcbots import MARCImportBot
PROXY_LOCATION='0-www.americanwest.amdigital.co.uk.tiger.coloradocollege.edu'
class AmericanWestBot(MARCImportBot):
"""
The `AmericanWestBot` reads MARC records from
American West Database, validates adds/modify fields
for a new import MARC file for loading into TIGER
"""
__name__ = 'American West Bot'
def __init__(self,
marc_file):
"""
Initializes `AmericanWestBot` for conversion
process.
:param marc_file: MARC file
"""
MARCImportBot.__init__(self,marc_file)
def processRecord(self,
marc_record):
"""
Method processes a single marc_record for American West
MARC.
:param marc_file: MARC file
"""
marc_record = self.validate001(marc_record)
marc_record = self.validate003(marc_record)
marc_record = self.validate006(marc_record)
marc_record = self.replace007(marc_record)
marc_record = self.validate490(marc_record)
marc_record = self.processURLs(marc_record,
proxy_location=PROXY_LOCATION)
marc_record = self.validate710(marc_record)
marc_record = self.validate730(marc_record)
marc_record = self.validate830(marc_record)
return marc_record
def validate001(self,marc_record):
"""
Method replaces AC prefix with AMP prefix for Prospector compatibility.
:param marc_file: MARC file
"""
field001 = marc_record.get_fields('001')[0]
marc_record.remove_field(field001)
raw_data = field001.data
field001.data = raw_data.replace('AC','AMP')
marc_record.add_field(field001)
return marc_record
def validate003(self,marc_record):
"""
Validates 003 field, adds control code.
:param marc_file: MARC file
"""
marc_record = self.__remove_field__(marc_record=marc_record,
tag='003')
new003 = Field(tag='003',
data='COC')
marc_record.add_field(new003)
return marc_record
def validate490(self,marc_record):
"""
Method removes all existing 490 fields.
:param marc_file: MARC file
"""
all490s = marc_record.get_fields('490')
for field in all490s:
marc_record.remove_field(field)
return marc_record
def validate710(self,
marc_record):
"""
Method validates/adds 710 fields
:param marc_file: MARC file
"""
all710s = marc_record.get_fields('710')
for field in all710s:
marc_record.remove_field(field)
first710 = Field(tag='710',
indicators=['2',' '],
subfields=['a','Newberry Library.'])
marc_record.add_field(first710)
new710 = Field(tag='710',
indicators=['2',' '],
subfields=['a','<NAME> Digital (Firm)'])
marc_record.add_field(new710)
return marc_record
def validate730(self,marc_record):
"""
Method validates 730 with American West desired text.
:param marc_file: MARC file
"""
self.__remove_field__(marc_record=marc_record,
tag='730')
field730 = Field(tag='730',
indicators=['0',' '],
subfields=['a','American West (Online Publications)'])
marc_record.add_field(field730)
return marc_record
def validate830(self,marc_record):
"""
Method removes all existing 830 fields.
:param marc_file: MARC file
"""
all830s = marc_record.get_fields('830')
for field in all830s:
marc_record.remove_field(field)
return marc_record
```
#### File: marc/bots/tests.py
```python
import re
digital_re = re.compile(r'digital\s?(file)?[.]?')
# Test 300b values
test_300b = 'sd., col., digital file +'
test2_300b = 'sd., b&w., digital file.'
test3_300b = 'b&w., digital'
test4_300b = 'sd., col., digital.'
def validate300b(raw_string):
good_300b = '%s, %s' % ('digital',digital_re.sub('',raw_string))
last_char = good_300b[-1]
if last_char == '+':
if good_300b[-3] == ',':
good_300b = good_300b[:-4] + ' + '
else:
good_300b = good_300b + ' '
elif last_char == ',':
good_300b = good_300b[:-1]
if good_300b[-1] != '.':
good_300b += '.'
return good_300b
```
#### File: apps/marc/views.py
```python
__author__ = '<NAME>, <NAME>'
import logging,zlib,datetime,os
from django import forms
from django.views.generic.simple import direct_to_template
from django.shortcuts import render_to_response
from django.http import Http404,HttpResponseRedirect,HttpResponse
from django.core.files import File
from django.core.files.base import ContentFile
from django.core.servers.basehttp import FileWrapper
from django.template import RequestContext
from forms import MARCRecordUploadForm,RecordLoadLogForm,NotesForm,UpdateRecordLoadLogForm
from models import RecordLoadLog
# Imports Bots
from bots.aspbots import AlexanderStreetPressMusicBot,BlackDramaBot
from bots.aspbots import WomenSocialMovementsBot,GarlandEWMOBot
from bots.awbots import AmericanWestBot
from bots.eccobots import ECCOBot
from bots.galebots import GVRLBot
from bots.gutenbergbots import ProjectGutenbergBot
from bots.opbots import OxfordHandbooksOnlineBot,OxfordReferenceOnlineBot
from bots.springerbots import SpringerEBookBot
from bots.video_bots import FilmsOnDemand
active_bots = [AlexanderStreetPressMusicBot,
AmericanWestBot,
BlackDramaBot,
ECCOBot,
GarlandEWMOBot,
GVRLBot,
FilmsOnDemand,
OxfordHandbooksOnlineBot,
OxfordReferenceOnlineBot,
ProjectGutenbergBot,
SpringerEBookBot,
WomenSocialMovementsBot]
def default(request):
"""Default view for MARC utilities Django application
"""
history = RecordLoadLog.objects.all()
return direct_to_template(request,
'marc/index.html',
{'active_bots':[bot.__name__ for bot in active_bots],
'history':history})
def download(request):
"""Download modified MARC21 file"""
log_pk = request.session['log_pk']
record_log = RecordLoadLog.objects.get(pk=log_pk)
modified_file = open(record_log.modified_file.path,'r')
file_wrapper = FileWrapper(file(record_log.modified_file.path))
response = HttpResponse(file_wrapper,content_type='text/plain')
filename = os.path.split(record_log.modified_file.path)[1]
response['Content-Disposition'] = 'attachment; filename=%s' % filename
response['Content-Length'] = os.path.getsize(record_log.modified_file.path)
return response
def process(request):
"""Takes form submission and runs bots on uploaded
form."""
if request.method != 'POST':
raise Http404
else:
record_log_form = RecordLoadLogForm(request.POST,request.FILES)
bot_name,bot = request.POST['bot'],None
for active_bot in active_bots:
if active_bot.__name__ == bot_name:
if request.POST.has_key('databases'):
bot = active_bot(marc_file=request.FILES['original_file'],
type_of=request.POST['databases'])
else:
bot = active_bot(marc_file=request.FILES['original_file'])
if not bot:
raise Http404
bot.load()
if not record_log_form.is_valid():
logging.error("Errors = %s" % record_log_form.errors)
record_log = record_log_form.save()
record_log.process_id = bot_name
record_log.save()
mod_filename = '%s-%s.mrc' % (datetime.datetime.today().strftime("%Y-%m-%d"),
record_log.process_id.replace('Bot',''))
#record_log.modified_file_content = File(mod_filename)
#record_log.modified_file_content.write(bot.to_text())
record_log.modified_file.save(mod_filename,ContentFile(bot.to_text()))
request.session['log_pk'] = record_log.pk
note_form = NotesForm(request.POST)
note_form.record_load_log_id = record_log.pk
if note_form.is_valid():
note_form.save()
else:
logging.error("Note form is not valid %s" % note_form.errors)
return HttpResponseRedirect("/marc/update")
#return HttpResponse('IN MARC BOT process %s' % record_log.pk)
def record_load(request,bot_name):
"""Record load view displays the `MARCRecordUploadForm` for a
particular MARC record load.
:param bot_name: Name of bot, required"""
bot_names = [bot.__name__ for bot in active_bots]
is_active = bot_names.count(bot_name)
if is_active < 1:
raise Http404
marc_form = RecordLoadLogForm()
note_form = NotesForm()
for bot in active_bots:
if bot.__name__ == bot_name and hasattr(bot,'DATABASES'):
bot_choices = []
for k,v in getattr(bot,'DATABASES').iteritems():
bot_choices.append((v['code'],k))
marc_form.fields['databases']=forms.ChoiceField(label='Select database',
required=False,
choices=bot_choices)
return direct_to_template(request,
'marc/index.html',
{'active_bots':[bot.__name__ for bot in active_bots],
'live_bot':bot_name,
'download':None,
'history':RecordLoadLog.objects.all(),
'marc_form':marc_form,
'note_form':note_form})
def search(request):
"""Search takes a query string and searches the
RecordLoadLog and docstrings of Bot classes.
"""
return HttpResponse("IN MARC search")
def update_log(request):
"""Displays download link and update log form after
successful bot processing."""
log_pk = request.session['log_pk']
marc_form = UpdateRecordLoadLogForm()
note_form = NotesForm()
download = True
return direct_to_template(request,
'marc/index.html',
{'active_bots':[bot.__name__ for bot in active_bots],
'live_bot':None,
'download':download,
'history':RecordLoadLog.objects.all(),
'marc_form':marc_form,
'note_form':note_form})
```
#### File: vendors/iii/views.py
```python
__author__ = '<NAME>'
import csv,datetime,logging
from django.http import HttpResponse,HttpResponseRedirect
from django.contrib.auth import authenticate,login
from django.views.generic.simple import direct_to_template
from vendors.iii.models import Fund,FundProcessLog
from vendors.iii.forms import CSVUploadForm,PatronLoginForm
from vendors.iii.bots.iiibots import FundBot
from vendors.iii.backends import IIIUserBackend
def csv(request):
"""
Displays a simple form with a file-upload field. If POST,
takes a CSV of order records, search and replace each occurrence
of Fund code with full fund value before returning a page to
download modified CSV file.
"""
if request.method == 'POST':
form = CSVUploadForm(request.POST, request.FILES)
if form.is_valid():
fund_bot = FundBot(csv_file=request.FILES['csv_file'])
filename = '%s-banner-iii.csv' % datetime.datetime.today().strftime('%Y-%m-%d')
response = HttpResponse(mimetype="text/csv")
response['Content-Disposition'] = 'attachment; filename=%s' % filename
csv_response = fund_bot.process(response)
new_log = FundProcessLog(substitutions=int(fund_bot.substitutions))
new_log.save()
return csv_response
else:
return direct_to_template(request,
'vendors/iii/csv.html',
{'form':CSVUploadForm()})
def patron_login(request):
"""
Processes login request posted from form, if redirect url exists,
redirect's user.
"""
if request.REQUEST.has_key('next'):
redirect_url = request.REQUEST['next']
else:
redirect_url = None
if request.method == 'POST':
last_name = request.POST['last_name']
iii_patron_id = request.POST['iii_patron_id']
try:
user = authenticate(last_name=last_name,iii_id=iii_patron_id)
except KeyError:
user = None
if user is None:
logging.error("User is none, should return login page")
return direct_to_template(request,
'vendors/iii/login.html',
{'form':PatronLoginForm(),
'next':redirect_url,
'msg':'Invalid login, please try again'})
else:
login(request,user)
logging.error("%s logging in user:%s is_authenticated=%s" % (user.backend,user,request.user.is_authenticated()))
logging.error("Request Session key is %s id is %s" % (request.session['_auth_user_id'],user.id))
setattr(request,'user',user)
if redirect_url is not None:
return HttpResponseRedirect(redirect_url)
else:
return HttpResponseRedirect("/")
else:
if request.GET.has_key('next'):
redirect = request.GET['next']
else:
redirect = None
return direct_to_template(request,
'vendors/iii/login.html',
{'form':PatronLoginForm(),
'next':redirect,
'msg':'Please login with TIGER number'})
def index(request):
"""
Displays list of utilities for III
"""
fund_logs = FundProcessLog.objects.all()
activity_logs = []
for log in fund_logs:
activity_logs.append({'activity_date':log.created_on,
'description': '%s fund codes subsituted' % log.substitutions})
logging.error("Number of activity logs:%s" % len(activity_logs))
utilities = [{'name':'csv',
'label':'Expand Fund codes to values',
'description':'''Takes order records in CSV format, replaces Fund codes with
expanded Fund numeric values'''}]
return direct_to_template(request,
'vendors/iii/index.html',
{'activity_log':activity_logs,
'utilities':utilities})
```
|
{
"source": "jermnelson/FRBR-Redis-Datastore",
"score": 2
}
|
#### File: call_number/templatetags/call_number_extras.py
```python
__author__ = '<NAME>'
import settings,redis
from django.template import Context,Library,loader
from django.utils import simplejson as json
from django.utils.safestring import mark_safe
register = Library()
def google_book_display(isbn):
"""
Calls and generates HTML for Call Number Widget
browser.
:param isbn: Numeric ISBN of Book
:rtype: Generated HTML or None
"""
try:
book_json = json.load(urllib2.urlopen(settings.GBS_BASE_URL % isbn))
gbs_template = loader.get_template('google-book.html')
for item in book_json["items"]:
if item['volumeInfo'].has_key('imageLinks'):
params = {'item':item,
'gbs_preview_url':settings.GBS_PREVIEW_URL}
return mark_safe(gbs_template.render(Context(params)))
except:
return ''
register.filter('google_book_display',google_book_display)
```
#### File: FRBR-Redis-Datastore/lib/dc.py
```python
__author__ = '<NAME>'
import datetime,os,logging
import redis,urllib2,common
from lxml import etree
import namespaces as ns
DC_RDF_URL = 'http://metadataregistry.org/schema/show/id/2.rdf'
def load_rdf(rdf_url=DC_RDF_URL):
raw_dc_rdf = urllib2.urlopen(rdf_url).read()
dc_rdf = etree.XML(raw_dc_rdf)
all_description_elements = dc_rdf.findall('{%s}Description' % ns.RDF)
for element in all_description_elements:
if element.attrib.has_key('{%s}about' % ns.RDF):
about_url = element.attrib['{%s}about' % ns.RDF]
label = element.find('{%s}label' % ns.RDFS)
definition = element.find('{%s}definition' % ns.SKOS)
redis_key = common.create_key_from_url(about_url)
if label is not None:
# Create an empty list for each redis_key and
# sets a definition from skos definition element
common.redis_server.set("%s:label" % redis_key,
label.text)
common.redis_server.set("%s:definition" % redis_key,
definition.text)
print("Finished adding %s with key %s" % (label.text,redis_key))
```
#### File: FRBR-Redis-Datastore/lib/marc21.py
```python
__author__ = '<NAME>'
import common,pymarc,sys,time
from redisco import models,connection_setup
connection_setup(host=common.REDIS_HOST,
port=common.REDIS_PORT)
MARC21_006_RDF_URL = 'http://metadataregistry.org/vocabulary/show/id/211.rdf'
def load_form_of_material(rdf_url=MARC21_006_RDF_URL):
common.load_rdf_skos('info.marc21rdf/terms/formofmaterial',rdf_url)
class MARC21Subfield(models.Model):
"""
MARC Subfield in the Redis datastore
"""
code = models.Attribute()
value = models.Attribute()
class MARC21Field(models.Model):
"""
Basic MARC Field in the Redis datastore
"""
tag = models.Attribute()
data = models.Attribute()
indicators = models.ListField(str)
subfields = models.ListField(MARC21Subfield)
class MARC21Record(models.Model):
"""
Basic MARC Record in the Redis datastore
"""
marc_fields = models.ListField(MARC21Field)
leader = models.Attribute()
def load_marc21(marc_record):
"""
Loads a MARC21 record into Redis datastore
"""
redis_marc21 = MARC21Record(leader=marc_record.leader)
for field in marc_record.fields:
new_field = MARC21Field(tag=field.tag)
# Tests to see if field data (assumes is a control field)
if hasattr(field,'data'):
new_field.data = field.data
# Tests to see if field has subfields and indicators
if hasattr(field,'subfields'):
for i,v in enumerate(field.subfields):
if not i%2:
code = v
else:
try:
new_subfield = MARC21Subfield(code=code,
value=v)
new_subfield.save()
new_field.subfields.append(new_subfield)
except UnicodeDecodeError:
print("UnicodeDecodeError unable to save subfield %s for tag %s" %\
(code,field.tag))
if hasattr(field,'indicators'):
new_field.indicators = field.indicators
new_field.save()
redis_marc21.marc_fields.append(new_field)
redis_marc21.save()
return redis_marc21
def benchmark(reader,num_rec):
"""
Function benchmarks the loading of MARC21 records using these classes into
the FRBR-Redis datastore, returns a dict of results
:param reader: pymarc reader of MARC21 records
:param num_recs: Number of MARC21 records to load
"""
time1 = time.time()
for i,record in enumerate(reader):
if i >= num_rec:
break
else:
load_marc21(record)
if i % 1000:
sys.stderr.write(".")
else:
sys.stderr.write(str(i))
time2 = time.time()
return {'MARC21 records':num_rec,
'Started':time1,
'Ended':time2,
'Duration':time2-time1}
```
#### File: FRBR-Redis-Datastore/lib/mods.py
```python
__author__ = '<NAME>'
import datetime,os,logging
from redisco import models,connection_setup
import redis,urllib2,common
from lxml import etree
import namespaces as ns
def set_attributes(xml_element,
redis_mods):
"""
Helper function take XML element and Redis MODS object
and iterites through all of the XML's attributes and
checks and sets any existing attributes in the Redis
MODS object
:param xml_element: XML element
:param redis_mods: Redis MODS object
"""
for attribute,value in xml_element.attrib.iteritems():
if hasattr(redis_mods,attribute):
setattr(redis_mods,attribute,value)
if xml_element.attrib.has_key('type') and hasattr(redis_mods,'mods_type'):
setattr(redis_mods,'mods_type',xml_element.attrib['type'])
redis_mods.save()
class baseMODS(models.Model):
"""
base MODS class contains common attributes used by most elements in
MODS schema
"""
lang = models.Attribute()
script = models.Attribute()
transliteration = models.Attribute()
xml_lang = models.Attribute()
class baseMODSDate(models.Model):
"""
base MODS date class contains common attributes used by date elements
in MODS schema
"""
encoding = models.Attribute()
point = models.Attribute()
keyDate = models.Attribute()
qualifier = models.Attribute()
class abstract(baseMODS):
"""
abstract MODS element in Redis datastore
"""
altRepGroup = models.Attribute()
displayLabel = models.Attribute()
mods_type = models.Attribute()
shareable = models.Attribute(default="no")
value_of = models.Attribute()
xlink = models.Attribute()
def load_xml(self,
abstract_element):
"""
Method takes a MODS element and sets Redis attributes
:param abstract_element: abstract XML element
"""
set_attributes(abstract_element,self)
self.value_of = abstract_element.text
self.save()
class affiliation(baseMODS):
"""
affiliation MODS element in Redis datastore
"""
value_of = models.Attribute()
def load_xml(self,
affiliation_element):
"""
Method takes a MODS element and sets Redis datastore
:param affiliation_element: affiliation XML element
"""
set_attributes(affiliation_element,self)
self.value_of = affiliation_element.text
self.save()
class classification(baseMODS):
"""
class classification MODS element in Redis datastore
"""
altRepGroup = models.Attribute()
authority = models.Attribute()
authorityURI = models.Attribute()
displayLabel = models.Attribute()
edition = models.Attribute()
usage = models.Attribute()
value_of = models.Attribute()
valueURI = models.Attribute()
def load_xml(self,
classification_element):
"""
Method takes MODS element and sets Redis datastore values
:param date_captured_element: dateCaptured XML element
"""
set_attributes(classification_element,self)
self.value_of = classification_element.text
self.save()
class date(baseMODSDate):
"""
date MDOS element in Redis datastore
"""
value_of = models.Attribute()
def load_xml(self,
date_element):
"""
Method takes MODS element and sets Redis datastore values
:param date_captured_element: dateCaptured XML element
"""
set_attributes(date_element,self)
self.value_of = date_element.text
self.save()
class dateCaptured(baseMODSDate):
"""
dateCaptured MODS element in Redis datastore
"""
value_of = models.Attribute()
def load_xml(self,
date_captured_element):
"""
Method takes MODS element and sets Redis datastore values
:param date_captured_element: dateCaptured XML element
"""
set_attributes(date_captured_element,self)
self.value_of = date_captured_element.text
self.save()
class dateCreated(baseMODSDate):
"""
dateCreated MODS element in Redis datastore
"""
keyDate = models.Attribute()
value_of = models.Attribute()
def load_xml(self,
date_created_element):
"""
Method takes MODS element and sets Redis datastore values
:param date_created_element: dateCreated XML element
"""
set_attributes(date_created_element,self)
self.value_of = date_created_element.text
self.save()
class dateIssued(baseMODSDate):
"""
dateIssued MODS element in Redis datastore
"""
keyDate = models.Attribute()
value_of = models.Attribute()
def load_xml(self,
date_issued_element):
"""
Method takes MODS element and sets Redis datastore values
:param date_issued_element: dateIssued XML element
"""
set_attributes(date_issued_element,self)
self.value_of = date_issued_element.text
self.save()
class description(baseMODS):
"""
description MODS element in Redis datastore
"""
value_of = models.Attribute()
def load_xml(self,
description_element):
"""
Method takes a MODS element and sets Redis datastore
:param description_element: description XML element
"""
set_attributes(description_element,self)
self.value_of = description_element.text
self.save()
class digitalOrigin(baseMODS):
"""
digitalOrigin MODS element in Redis datastore
"""
value_of = models.Attribute()
def load_xml(self,
digital_origin_element):
"""
Method takes a MODS element and sets Redis datastore
values
:param digital_origin_element: digitalOrigin XML element
"""
self.value_of = digital_origin_element.text
self.save()
class displayForm(baseMODS):
"""
displayForm MODS element in Redis datastore
"""
value_of = models.Attribute()
def load_xml(self,
display_form_element):
"""
Method takes MODS element and sets Redis datastore
:param display_form_element: display_form_element
"""
set_attributes(display_form_element,self)
self.value_of = display_form_element.text
self.save()
class extent(baseMODS):
"""
extent MODS element in Redis datastore
"""
end = models.Attribute()
start = models.Attribute()
supplied = models.Attribute()
unit = models.Attribute()
value_of = models.Attribute()
def load_xml(self,
extent_element):
"""
Method takes a MODS element and sets Redis datastore
values
:param extent_element: extent XML element
"""
set_attributes(extent_element,self)
if hasattr(extent_element,'text'):
self.value_of = extent_element.text
end_element = extent_element.find('{%s}end' % ns.MODS)
if end_element is not None:
self.end = end_element.text
start_element = extent_element.find('{%s}start' % ns.MODS)
if start_element is not None:
self.start = start_element.text
self.save()
class form(baseMODS):
"""
form MODS element in Redis datastore
"""
authority = models.Attribute()
authorityURI = models.Attribute()
mods_type = models.Attribute()
value_of = models.Attribute()
def load_xml(self,
form_element):
"""
Method takes MODS xml element and updates values in Redis
datastore
:param form_element: form MODS element
"""
set_attributes(form_element,self)
self.value_of = form_element.text
self.save()
class genre(baseMODS):
"""
genre MODS element in Redis datastore
"""
altRepGroup = models.Attribute()
authority = models.Attribute()
authorityURI = models.Attribute()
displayLabel = models.Attribute()
mods_type = models.Attribute()
value_of = models.Attribute()
valueURI = models.Attribute()
def load_xml(self,
genre_element):
"""
Method takes a MODS element and sets Redis attributes
:param genre_element: genre XML element
"""
set_attributes(genre_element,self)
self.value_of = genre_element.text
self.save()
class identifier(baseMODS):
"""
identifier MODS element in Redis datastore
"""
invalid = models.Attribute()
mods_type = models.Attribute()
value_of = models.Attribute()
def load_xml(self,
identifier_element):
"""
Method takes a MODS element and sets Redis attributes
:param identifier_element: identifier XML element
"""
set_attributes(identifier_element,self)
self.value_of = identifier_element.text
self.save()
class url(models.Model):
"""
url MODS element in Redis datastore
"""
access = models.Attribute()
dateLastAccessed = models.Attribute()
displayLabel = models.Attribute()
note = models.Attribute()
usage = models.Attribute()
value_of = models.Attribute()
def load_xml(self,
url_element):
"""
Method takes a MODS element and sets Redis attributes
:param url_element: url XML element
"""
set_attributes(url_element,self)
self.value_of = url_element.text
self.save()
class detail(models.Model):
"""
detail MODS element in Redis datastore
"""
caption = models.Attribute()
mods_type = models.Attribute()
number = models.Attribute()
def load_xml(self,
detail_element):
"""
Method takes a MODS element and sets Redis values
:param detail_element: detail XML element
"""
set_attributes(detail_element,self)
caption_element = detail_element.find('{%s}caption' % ns.MODS)
if caption_element is not None:
self.caption = caption_element.text
number_element = detail_element.find('{%s}number' % ns.MODS)
if number_element is not None:
self.number = number_element.text
self.save()
class languageTerm(models.Model):
"""
languageTerm MODS element in Redis datastore
"""
authority = models.Attribute()
authorityURI = models.Attribute()
mods_type = models.Attribute()
value_of = models.Attribute()
valueURI = models.Attribute()
def load_xml(self,
language_term_element):
"""
Method takes a MODS element and sets Redis attributes
:param language_term_element: languageTerm XML element
"""
set_attributes(language_term_element,self)
self.value_of = language_term_element.text
self.save()
class language(models.Model):
"""
language MODS element in Redis datastore
"""
languageTerms = models.ListField(languageTerm)
objectPart = models.Attribute()
def load_xml(self,
language_element):
"""
Method takes a MODS element and sets Redis attributes
:param language_element: language XML element
"""
set_attributes(language_element,self)
language_terms_elements = language_element.findall("{%s}languageTerm" % ns.MODS)
for element in language_terms_elements:
new_lang_term = languageTerm()
new_lang_term.load_xml(element)
self.languageTerms.append(new_lang_term)
self.save()
class location(baseMODS):
"""
location MODS element in Redis datastore
"""
altRepGroup = models.Attribute()
displayLabel = models.Attribute()
urls = models.ListField(url)
def load_xml(self,
location_element):
"""
Method takes a MODS element and sets Redis attributes
:param location_element: location XML element
"""
set_attributes(location_element,self)
url_elements = location_element.findall('{%s}url' % ns.MODS)
for element in url_elements:
new_url = url()
new_url.load_xml(element)
self.urls.append(new_url)
self.save()
class namePart(baseMODS):
"""
name MODS element in Redis datastore
"""
mods_type = models.Attribute()
value_of = models.Attribute()
def load_xml(self,
namepart_element):
"""
Method takes a MODS element and sets Redis values
:param namepart_element: namePart XML element
"""
set_attributes(namepart_element,self)
self.value_of = namepart_element.text
self.save()
class roleTerm(baseMODS):
"""
roleTerm MODS element in Redis datastore
"""
authority = models.Attribute()
authorityURI = models.Attribute()
mods_type = models.Attribute()
value_of = models.Attribute()
valueURI = models.Attribute()
def load_xml(self,
role_term_element):
"""
Method takes roleTerm MODS element and sets Redis value
:param role_term_element: roleTerm XML element
"""
set_attributes(role_term_element,self)
self.value_of = role_term_element.text
self.save()
class role(baseMODS):
"""
role MODS element in Redis datastore
"""
mods_type = models.Attribute()
roleTerms = models.ListField(roleTerm)
def load_xml(self,
role_element):
"""
Method takes MODS element and sets Redis values
:param role_element: role XML element
"""
set_attributes(role_element,self)
role_terms = role_element.findall('{%s}roleTerm' % ns.MODS)
for element in role_terms:
new_role_term = roleTerm()
new_role_term.load_xml(element)
self.roleTerms.append(new_role_term)
self.save()
class name(baseMODS):
"""
name MODS element in Redis datastore
"""
affiliations = models.ListField(affiliation)
altRepGroup = models.Attribute()
authority = models.Attribute()
authorityURI = models.Attribute()
descriptions = models.ListField(description)
displayForms = models.ListField(displayForm)
displayLabel = models.Attribute()
mods_ID = models.Attribute()
mods_type = models.Attribute()
nameParts = models.ListField(namePart)
nameTitleGroup = models.Attribute()
roles = models.ListField(role)
usage = models.Attribute()
valueURI = models.Attribute()
xlink = models.Attribute()
def load_xml(self,
name_element):
"""
Method takes a MODS element and sets Redis values in datastore
:param name_element: name XML element
"""
set_attributes(name_element,self)
affiliation_elements = name_element.findall('{%s}affiliation' % ns.MODS)
for element in affiliation_elements:
new_affiliation = affiliation()
affiliation.load_xml(element)
self.affiliations.append(affiliation)
description_elements = name_element.findall('{%s}description' % ns.MODS)
for element in description_elements:
new_description = description()
new_description.load_xml(element)
self.descriptions.append(new_description)
display_form_elements = name_element.findall('{%s}displayForm' % ns.MODS)
for element in display_form_elements:
new_display_form = displayForm()
new_display_form.load_xml(element)
self.displayForms.append(new_display_form)
name_part_elements = name_element.findall('{%s}namePart' % ns.MODS)
for element in name_part_elements:
new_name_part = namePart()
new_name_part.load_xml(element)
self.nameParts.append(new_name_part)
role_elements = name_element.findall('{%s}role' % ns.MODS)
for element in role_elements:
new_role = role()
new_role.load_xml(element)
self.roles.append(new_role)
self.save()
class note(baseMODS):
"""
note MODS element in Redis datastore
"""
altRepGroup = models.Attribute()
displayLabel = models.Attribute()
mods_ID = models.Attribute()
mods_type = models.Attribute()
value_of = models.Attribute()
xlink = models.Attribute()
def load_xml(self,
note_element):
"""
Method takes MODS xml and updates values in Redis datastores
based on XML values
:param note_element: note MODS XML element
"""
set_attributes(note_element,self)
self.value_of = note_element.text
self.save()
class placeTerm(baseMODS):
"""
placeTerm MODS element in Redis datastore
"""
authority = models.Attribute()
authorityURI = models.Attribute()
mods_type = models.Attribute()
value_of = models.Attribute()
valueURI = models.Attribute()
def load_xml(self,
place_term_element):
"""
Method takes MODS xml element and updates values in Redis
datastore
:param place_element: place MODS element
"""
set_attributes(place_term_element,self)
self.value_of = place_term_element.text
self.save()
class place(baseMODS):
"""
place MODS element in Redis datastore
"""
placeTerms = models.ListField(placeTerm)
def load_xml(self,
place_element):
"""
Method takes MODS xml element and updates values in Redis
datastore
:param place_element: place MODS element
"""
set_attributes(place_element,self)
placeterm_elements = place_element.findall('{%s}placeTerm' % ns.MODS)
for element in placeterm_elements:
new_place_term = placeTerm()
new_place_term.load_xml(element)
self.placeTerms.append(new_place_term)
self.save()
class publisher(baseMODS):
"""
publiser MODS element in Redis datastore
"""
supplied = models.Attribute()
value_of = models.Attribute()
def load_xml(self,
publisher_element):
"""
Method takes MODS xml element and updates values in Redis
datastore
:param publisher_element: publisher MODS element
"""
set_attributes(publisher_element,self)
self.value_of = publisher_element.text
self.save()
class reformattingQuality(models.Model):
"""
reformattingQuality MODS element in Redis datastore
"""
value_of = models.Attribute()
def load_xml(self,
reformatting_quality_element):
"""
Method takes MODS xml element and updates values in Redis datastore
:param reformatting_quality_element: reformattingQuality MODS element
"""
self.value_of = reformatting_quality_element.text
self.save()
class targetAudience(baseMODS):
"""
targetAudience MODS element in Redis datastore
"""
authority = models.Attribute()
value_of = models.Attribute()
def load_xml(self,
target_audience_element):
"""
Method takes MODS xml element and updates values in Redis datastore
:param target_audience_element: targetAudience MODS element
"""
set_attributes(target_audience_element,self)
self.value_of = target_audience_element.text
self.save()
class originInfo(baseMODS):
"""
originInfo MODS element in Redis datastore
"""
altRepGroup = models.Attribute()
dateCaptured = models.ReferenceField(dateCaptured)
dateCreated = models.ReferenceField(dateCreated)
dateIssueds = models.ListField(dateIssued)
displayLabel = models.Attribute()
issuance = models.Attribute()
places = models.ListField(place)
publishers = models.ListField(publisher)
def load_xml(self,
origin_info_element):
"""
Method takes MODS xml element and updates values in Redis
datastore
:param origin_info_element: originInfo MODS element
"""
set_attributes(origin_info_element,self)
dateCaptured_element = origin_info_element.find('{%s}dateCaptured' % ns.MODS)
if dateCaptured_element is not None:
new_date_captured = dateCaptured()
new_date_captured.load_xml(dateCaptured_element)
self.dateCaptured = new_date_captured
dateCreated_element = origin_info_element.find('{%s}dateCreated' % ns.MODS)
if dateCreated_element is not None:
new_date_created = dateCreated()
new_date_created.load_xml(dateCreated_element)
self.dateCreated = new_date_created
dateIssued_elements = origin_info_element.findall('{%s}dateIssued' % ns.MODS)
for element in dateIssued_elements:
new_date_issued = dateIssued()
new_date_issued.load_xml(element)
self.dateIssueds.append(new_date_issued)
issuance_element = origin_info_element.find('{%s}issuance' % ns.MODS)
if issuance_element is not None:
self.issuance = issuance_element.text
place_elements = origin_info_element.findall('{%s}place' % ns.MODS)
for element in place_elements:
new_place = place()
new_place.load_xml(element)
self.places.append(new_place)
publishers = origin_info_element.findall('{%s}publisher' % ns.MODS)
for element in publishers:
new_publisher = publisher()
new_publisher.load_xml(element)
self.publishers.append(new_publisher)
self.save()
class part(models.Model):
"""
part MODS element in Redis datastore
"""
dates = models.ListField(date)
details = models.ListField(detail)
extents = models.ListField(extent)
mods_ID = models.Attribute()
mods_type = models.Attribute()
def load_xml(self,
part_element):
"""
Method takes MODS xml element and updates values in Redis
datastore
:param part_element: part MODS element
"""
set_attributes(part_element,self)
date_elements = part_element.findall('{%s}date' % ns.MODS)
for element in date_elements:
new_date = date()
new_date.load_xml(element)
self.dates.append(new_date)
detail_elements = part_element.findall('{%s}detail' % ns.MODS)
for element in detail_elements:
new_detail = detail()
new_detail.load_xml(element)
self.details.append(new_detail)
extent_elements = part_element.findall('{%s}extent' % ns.MODS)
for element in extent_elements:
new_extent = extent()
new_extent.load_xml(element)
self.extents.append(new_extent)
self.save()
class physicalDescription(baseMODS):
"""
physicalDescription MODS element in Redis datastore
"""
altRepGroup = models.Attribute()
digitalOrigin = models.ReferenceField(digitalOrigin)
displayLabel = models.Attribute()
extents = models.ListField(extent)
forms = models.ListField(form)
reformattingQualities = models.ListField(reformattingQuality)
def load_xml(self,
physical_description_element):
"""
Method takes MODS xml element and updates values in Redis
datastore
:param physical_description_element: physicalDescription MODS element
"""
set_attributes(physical_description_element,self)
digital_orig_element = physical_description_element.find('{%s}digitalOrigin' % ns.MODS)
if digital_orig_element is not None:
new_dig_org = digitalOrigin()
new_dig_org.load_xml(digital_orig_element)
self.digitalOrigin = new_dig_org
extent_elements = physical_description_element.findall('{%s}extent' % ns.MODS)
for element in extent_elements:
new_extent = extent()
new_extent.load_xml(element)
self.extents.append(new_extent)
form_elements = physical_description_element.findall('{%s}form' % ns.MODS)
for element in form_elements:
new_form = form()
new_form.load_xml(element)
self.forms.append(new_form)
reformatting_elements = physical_description_element.findall('{%s}reformattingQuality' % ns.MODS)
for element in reformatting_elements:
new_reformat_quality = reformattingQuality()
new_reformat_quality.load_xml(element)
self.reformattingQualities.append(new_reformat_quality)
self.save()
class geographic(baseMODS):
"""
geographic MOCS element in Redis datastore
"""
authority = models.Attribute()
authorityURI = models.Attribute()
value_of = models.Attribute()
valueURI = models.Attribute()
def load_xml(self,
geographic_element):
"""
Method takes MODS xml and updates values in Redis datastore
based on XML values
:param geographic_element: geographic XML element
"""
set_attributes(geographic_element,self)
self.value_of = geographic_element.text
self.save()
class temporal(baseMODS):
"""
temporal MOCS element in Redis datastore
"""
authority = models.Attribute()
authorityURI = models.Attribute()
value_of = models.Attribute()
valueURI = models.Attribute()
def load_xml(self,
temporal_element):
"""
Method takes MODS xml and updates values in Redis datastore
based on XML values
:param temporal_element: temporal XML element
"""
set_attributes(temporal_element,self)
self.value_of = temporal_element.text
self.save()
class topic(baseMODS):
"""
topic MOCS element in Redis datastore
"""
authority = models.Attribute()
authorityURI = models.Attribute()
value_of = models.Attribute()
valueURI = models.Attribute()
def load_xml(self,
topic_element):
"""
Method takes MODS xml and updates values in Redis datastore
based on XML values
:param topic_element: topic XML element
"""
set_attributes(topic_element,self)
self.value_of = topic_element.text
self.save()
class subject(baseMODS):
"""
subject MODS element in Redis datastore
"""
altRepGroup = models.Attribute()
authority = models.Attribute()
authorityURI = models.Attribute()
displayLabel = models.Attribute()
genres = models.ListField(genre)
geographics = models.ListField(geographic)
mods_ID = models.Attribute()
names = models.ListField(name)
usage = models.Attribute()
temporals = models.ListField(temporal)
topics = models.ListField(topic)
valueURI = models.Attribute()
def load_xml(self,
subject_element):
"""
Method takes MODS xml and updates values in Redis datastore
based on XML values
:param subject_element: subject XML element
"""
set_attributes(subject_element,self)
genre_elements = subject_element.findall('{%s}genre' % ns.MODS)
for element in genre_elements:
new_genre = genre()
new_genre.load_xml(element)
self.genres.append(new_genre)
geographic_elements = subject_element.findall('{%s}geographic' % ns.MODS)
for element in geographic_elements:
new_geographic = geographic()
new_geographic.load_xml(element)
self.geographics.append(new_geographic)
name_elements = subject_element.findall('{%s}name' % ns.MODS)
for element in name_elements:
new_name = name()
new_name.load_xml(element)
self.names.append(new_name)
temporal_elements = subject_element.findall('{%s}temporal' % ns.MODS)
for element in temporal_elements:
new_temporal = temporal()
new_temporal.load_xml(element)
self.temporals.append(new_temporal)
topic_elements = subject_element.findall('{%s}topic' % ns.MODS)
for element in topic_elements:
new_topic = topic()
new_topic.load_xml(element)
self.topics.append(new_topic)
self.save()
class subTitle(baseMODS):
"""
subtitle MODS element in Redis datastore
"""
value_of = models.Attribute()
def load_xml(self,
subtitle_element):
"""
Method takes MODS xml and updates values in Redis datastore
based on XML values
:param subtitle_element: subTitle XML element
"""
set_attributes(subtitle_element,self)
self.value_of = subtitle_element.text
self.save()
class tableOfContents(baseMODS):
"""
tableOfContents MODS element in Redis datastore
"""
displayLabel = models.Attribute()
mods_type = models.Attribute()
xlink = models.Attribute()
xml_lang = models.Attribute()
def load_xml(self,
toc_element):
"""
Method takes MODS xml and updates values in Redis datastore
based on XML values
:param subtitle_element: title XML element
"""
set_attributes(toc_element,self)
self.value_of = toc_element.text
self.save()
class title(baseMODS):
"""
title MODS element in Redis datastore
"""
value_of = models.Attribute()
def load_xml(self,
title_element):
"""
Method takes MODS xml and updates values in Redis datastore
based on XML values
:param title_element: title XML element
"""
set_attributes(title_element,self)
self.value_of = title_element.text
self.save()
class titleInfo(baseMODS):
"""
titleInfo MODS element in Redis datastore
"""
altRepGroup = models.Attribute()
authority = models.Attribute()
authorityURI = models.Attribute()
displayLabel = models.Attribute()
mods_ID = models.Attribute()
mods_type = models.Attribute()
nonSort = models.Attribute()
partName = models.Attribute()
subTitles = models.ListField(subTitle)
supplied = models.Attribute()
title = models.ReferenceField(title)
valueURI = models.Attribute()
value_of = models.Attribute()
usage = models.Attribute()
def load_xml(self,
title_info_element):
"""
Method takes MODS xml and updates values in Redis datastore
based on XML values
:param title_info_element: titleInfo XML element
"""
set_attributes(title_info_element,self)
title_element = title_info_element.find('{%s}title' % ns.MODS)
if title_element is not None:
new_title = title()
new_title.load_xml(title_element)
self.title = new_title
nonsort_element = title_info_element.find('{%s}nonSort' % ns.MODS)
if nonsort_element is not None:
self.nonSort = nonsort_element.text
partname_element = title_info_element.find('{%s}partName' % ns.MODS)
if partname_element is not None:
self.partName = partname_element.text
subTitles = title_info_element.findall('{%s}subTitle' % ns.MODS)
for element in subTitles:
new_subTitle = subTitle()
new_subTitle.load_xml(element)
self.subTitles.append(new_subTitle)
self.save()
class typeOfResource(baseMODS):
"""
typeOfResource MODS element in Redis datastore
"""
collection = models.Attribute()
manuscript = models.Attribute()
displayLabel = models.Attribute()
usage = models.Attribute()
value_of = models.Attribute()
def load_xml(self,
type_of_resource_element):
"""
Method takes MODS xml and updates values in Redis datastore
based on XML values
:param type_of_resource_element: typeOfResource XML element
"""
set_attributes(type_of_resource_element,self)
self.value_of = type_of_resource_element.text
self.save()
class recordChangeDate(baseMODSDate):
"""
recordChangeDate MODS element in Redis datastore
"""
value_of = models.Attribute()
def load_xml(self,
record_change_date_element):
"""
Method takes MODS xml and updates values in Redis datastore
based on XML values
:param record_change_date_element: recordChangeDate XML element
"""
set_attributes(record_change_date_element,self)
self.value_of = record_change_date_element.text
self.save()
class recordContentSource(models.Model):
"""
recordContentSource MODS element in Redis datastore
"""
authority = models.Attribute()
value_of = models.Attribute()
def load_xml(self,
record_content_element):
"""
Method takes MODS xml and updates values in Redis datastore
based on XML values
:param record_content_element: recordContent XML element
"""
set_attributes(record_content_element,self)
self.value_of = record_content_element.text
self.save()
class recordCreationDate(baseMODSDate):
"""
recordCreationDate MODS element in Redis datastore
"""
value_of = models.Attribute()
def load_xml(self,
record_creation_date_element):
"""
Method takes MODS xml and updates values in Redis datastore
based on XML values
:param record_creation_date_element: recordCreationDate XML element
"""
set_attributes(record_creation_date_element,self)
self.value_of = record_creation_date_element.text
self.save()
class recordIdentifier(models.Model):
"""
recordIdentifier MODS element in Redis datastore
"""
identifier = models.Attribute()
value_of = models.Attribute()
def load_xml(self,
record_identifier_element):
"""
Method takes MODS xml and updates values in Redis datastore
based on XML values
:param record_content_element: recordIdentifier XML element
"""
set_attributes(record_identifier_element,self)
self.value_of = record_identifier_element.text
self.save()
class recordInfo(baseMODS):
"""
recordInfo MODS element in Redis datastore
"""
recordContentSources = models.ListField(recordContentSource)
recordCreationDate = models.ReferenceField(recordCreationDate)
recordChangeDates = models.ListField(recordChangeDate)
recordIdentifiers = models.ListField(recordIdentifier)
def load_xml(self,
record_info_element):
"""
Method takes MODS xml and updates values in Redis datastore
based on XML values
:param record_info_element: recordInfo XML element
"""
set_attributes(record_info_element,self)
rec_content_source_elements = record_info_element.findall('{%s}recordContentSource' % ns.MODS)
for element in rec_content_source_elements:
new_rec_content = recordContentSource()
new_rec_content.load_xml(new_rec_content)
self.recordContentSources.append(new_rec_content)
rec_creation_element = record_info_element.find('{%s}recordCreationDate' % ns.MODS)
if rec_creation_element is not None:
new_rec_creation = recordCreationDate()
new_rec_creation.load_xml(rec_creation_element)
self.recordCreationDate = new_rec_creation
rec_change_dates = record_info_element.findall('{%s}recordChangeDate' % ns.MODS)
for element in rec_change_dates:
new_change_date = recordChangeDate()
new_change_date.load_xml(element)
self.recordChangeDates.append(new_change_date)
rec_identifiers = record_info_element.findall('{%s}recordIdentifier' % ns.MODS)
for element in rec_identifiers:
new_identifier = recordIdentifier()
new_identifier.load_xml(element)
self.recordIdentifiers.append(element)
self.save()
class relatedItem(baseMODS):
"""
relatedItem MODS element in Redis datastore
"""
mods_type = models.Attribute()
names = models.ListField(name)
originInfos= models.ListField(originInfo)
parts = models.ListField(part)
titleInfos = models.ListField(titleInfo)
def load_xml(self,
related_item_element):
"""
Method takes MODS xml and updates values in Redis datastore
based on XML values
:param related_item_element: relatedItem XML element
"""
set_attributes(related_item_element,self)
name_elements = related_item_element.findall('{%s}name' % ns.MODS)
for element in name_elements:
new_name = name()
new_name.load_xml(element)
self.names.append(new_name)
origin_info_elements = related_item_element.findall('{%s}originInfo' % ns.MODS)
for element in origin_info_elements:
new_origin_info = originInfo()
new_origin_info.load_xml(element)
self.originInfos.append(new_origin_info)
part_elements = related_item_element.findall('{%s}part' % ns.MODS)
for element in part_elements:
new_part = part()
new_part.load_xml(element)
self.parts.append(new_part)
titleInfos = related_item_element.findall('{%s}titleInfo' % ns.MODS)
for element in titleInfos:
new_titleInfo = titleInfo()
new_titleInfo.load_xml(element)
self.titleInfos.append(new_titleInfo)
self.save()
class mods(models.Model):
"""
Root MODS element in Redis datastore
"""
abstracts = models.ListField(abstract)
## accessCondition
classifications = models.ListField(classification)
## extension
genres = models.ListField(genre)
identifiers = models.ListField(identifier)
languages = models.ListField(language)
locations = models.ListField(location)
names = models.ListField(name)
notes = models.ListField(note)
originInfos= models.ListField(originInfo)
## part
physicalDescriptions = models.ListField(physicalDescription)
recordInfo = models.ReferenceField(recordInfo)
relatedItems = models.ListField(relatedItem)
subjects = models.ListField(subject)
tableOfContents = models.ListField(tableOfContents)
targetAudiences = models.ListField(targetAudience)
titleInfos = models.ListField(titleInfo)
typeOfResources = models.ListField(typeOfResource)
mods_ID = models.Attribute()
version = models.Attribute(default="3.4")
def load_xml(self,
mods_xml):
"""
Method takes MODS xml and updates values in Redis datastores
based on XML values
"""
abstract_elements = mods_xml.findall('{%s}abstract' % ns.MODS)
for element in abstract_elements:
new_abstract = abstract()
new_abstract.load_xml(element)
self.abstracts.append(new_abstract)
classification_elements = mods_xml.findall('{%s}classification' % ns.MODS)
for element in classification_elements:
new_classification = classification()
new_classification.load_xml(element)
self.classifications.append(new_classification)
genre_elements = mods_xml.findall('{%s}genre' % ns.MODS)
for element in genre_elements:
new_genre = genre()
new_genre.load_xml(element)
self.genres.append(new_genre)
identifier_elements = mods_xml.findall('{%s}identifier' % ns.MODS)
language_elements = mods_xml.findall('{%s}language' % ns.MODS)
for element in language_elements:
new_language = language()
new_language.load_xml(element)
self.languages.append(new_language)
for element in identifier_elements:
new_identifier = identifier()
new_identifier.load_xml(element)
self.identifiers.append(new_identifier)
location_elements = mods_xml.findall('{%s}location' % ns.MODS)
for element in location_elements:
new_location = location()
new_location.load_xml(element)
self.locations.append(new_location)
name_elements = mods_xml.findall('{%s}name' % ns.MODS)
for element in name_elements:
new_name = name()
new_name.load_xml(element)
self.names.append(new_name)
note_elements = mods_xml.findall('{%s}note' % ns.MODS)
for element in note_elements:
new_note = note()
new_note.load_xml(element)
self.notes.append(new_note)
origin_info_elements = mods_xml.findall('{%s}originInfo' % ns.MODS)
for element in origin_info_elements:
new_origin_info = originInfo()
new_origin_info.load_xml(element)
self.originInfos.append(new_origin_info)
physical_descriptions = mods_xml.findall('{%s}physicalDescription' % ns.MODS)
for element in physical_descriptions:
new_physical_desc = physicalDescription()
new_physical_desc.load_xml(element)
self.physicalDescriptions.append(new_physical_desc)
subjects = mods_xml.findall('{%s}subject' % ns.MODS)
for element in subjects:
new_subject = subject()
new_subject.load_xml(element)
self.subjects.append(new_subject)
relatedItems = mods_xml.findall('{%s}relatedItem' % ns.MODS)
for element in relatedItems:
new_relatedItem = relatedItem()
new_relatedItem.load_xml(element)
self.relatedItems.append(new_relatedItem)
targetAudiences = mods_xml.findall('{%s}targetAudience' % ns.MODS)
for element in targetAudiences:
new_target_audience = targetAudience()
new_target_audience.load_xml(element)
self.targetAudiences.append(new_target_audience)
titleInfos = mods_xml.findall('{%s}titleInfo' % ns.MODS)
toc_elements = mods_xml.findall('{%s}tableOfContents' % ns.MODS)
for element in toc_elements:
new_toc = tableOfContents()
new_toc.load_xml(element)
self.tableOfContents.append(new_toc)
for element in titleInfos:
new_titleInfo = titleInfo()
new_titleInfo.load_xml(element)
self.titleInfos.append(new_titleInfo)
type_of_resources = mods_xml.findall('{%s}typeOfResource' % ns.MODS)
for element in type_of_resources:
new_type_of_resource = typeOfResource()
new_type_of_resource.load_xml(element)
self.typeOfResources.append(new_type_of_resource)
self.save()
# EXAMPLE USAGE
##redis_key = "mods:%s" % redis_server.incr("global:mods")
##redis_server.hset(redis_key,"abstract","Atomic text")
##accessCondition_key = "%s:accessCondition:%s" % (redis_key,
## redis_server.incr("global:%s:accessCondition"))
##redis_server.hset(accessCondition_key,'creation',1969)
##redis_server.hset(redis_key,'accessCondition',accessCondition_key)
```
#### File: tests/features/loc_3_usecases.py
```python
__author__ = "<NAME>"
from lettuce import *
import sys,os
import redis
from redisco import connection_setup
import lib.mods as mods
from lxml import etree
import __init__
import config
redis_server = redis.StrictRedis(host=config.REDIS_HOST,
port=config.REDIS_PORT,
db=config.REDIS_TEST_DB)
connection_setup(host=config.REDIS_HOST,
port=config.REDIS_PORT,
db=config.REDIS_TEST_DB)
mods_topic = '''<topic valueURI="http://id.loc.gov/authorities/subjects/sh85133490">Television and politics</topic>'''
@step("(\w+) from a bibliographic record")
def setup_data_or_uri(step,test_fixure):
"""
Function sets up test with either data or uri or both
:param test_fixure: data, URI, or both data and URI
"""
world.topic_xml = etree.XML(mods_topic)
@step("stores the (\w+)")
def store_data_or_uri(step,fixure_type):
"""
Function stores the data or uri in the Redis datastore
:param fixure_type: data, URI, or both
"""
world.topic = mods.topic()
world.topic.load_xml(world.topic_xml)
@step("(\w+) can be retrieved from the datastore")
def retrieve_data_or_uri_from_datastore(step,fixure_type):
"""
Function retrieves data or uri from Redis datastore
:param fixure_type: data, URI, or both
"""
if fixure_type == 'data':
assert world.topic.value_of == 'Television and politics'
elif fixure_type == 'URI':
assert world.topic.valueURI == 'http://id.loc.gov/authorities/subjects/sh85133490'
else:
assert False
```
#### File: FRBR-Redis-Datastore/tests/test_cidoc_crm.py
```python
__author__ = "<NAME>"
import unittest,redis,config
import lib.common as common
import lib.cidoc_crm as cidoc_crm
import lib.namespaces as ns
redis_server = redis.StrictRedis(host=config.REDIS_HOST,
port=config.REDIS_PORT,
db=config.REDIS_TEST_DB)
class TestConditionState(unittest.TestCase):
def setUp(self):
params = {}
self.condition_state = cidoc_crm.ConditionState(redis_server=redis_server)
def test_init(self):
self.assert_(self.condition_state.redis_ID)
def tearDown(self):
redis_server.flushdb()
class TestEvent(unittest.TestCase):
def setUp(self):
self.purpose_key = "cidoc crm:had specific purpose:%s" % redis_server.incr("global:cidoc crm:had specific purpose")
redis_server.set(self.purpose_key,"Holiday")
params = {'had specific purpose': self.purpose_key}
self.event = cidoc_crm.Event(redis_server=redis_server,**params)
def test_init(self):
self.assert_(self.event.redis_ID)
def test_had_specific_purpose(self):
purpose_key = getattr(self.event,'had specific purpose')
self.assertEquals(purpose_key,
self.purpose_key)
self.assertEquals(redis_server.get(purpose_key),
"Holiday")
def tearDown(self):
redis_server.flushdb()
class TestPeriod(unittest.TestCase):
def setUp(self):
params = {}
self.period = cidoc_crm.Period(redis_server=redis_server)
def test_init(self):
self.assert_(self.period.redis_ID)
def tearDown(self):
redis_server.flushdb()
class TestTemporalEntity(unittest.TestCase):
def setUp(self):
self.event_key = "cidoc crm:is time-span of:%s" % redis_server.incr("global:cidoc crm:is time-span of")
redis_server.set(self.event_key,"Yalta Conference")
params = {'is time-span of':self.event_key}
self.temporal_entity = cidoc_crm.TemporalEntity(redis_server=redis_server,
**params)
def test_init(self):
self.assert_(self.temporal_entity.redis_ID)
def test_is_time_span_of(self):
event_key = getattr(self.temporal_entity,
'is time-span of')
self.assertEquals(event_key,
self.event_key)
self.assertEquals(redis_server.get(event_key),
"Yalta Conference")
def tearDown(self):
redis_server.flushdb()
class TestCRMEntity(unittest.TestCase):
def setUp(self):
self.appellation_key = "cidoc crm:Appellation:%s" % redis_server.incr("global:cidoc crm:Appellation")
redis_server.set(self.appellation_key,"the Forth Bridge")
self.material_key = "cidoc crm:Material:%s" % redis_server.incr("global:cidoc crm:Material")
redis_server.set(self.material_key,"paper")
params = {'identifies':self.appellation_key,
'is type of':self.material_key}
self.crm_entity = cidoc_crm.CRMEntity(redis_server=redis_server,**params)
def test_init(self):
self.assert_(self.crm_entity.redis_ID)
def test_identifies(self):
self.assertEquals(self.appellation_key,
self.crm_entity.identifies)
def test_is_type_of(self):
material_key = getattr(self.crm_entity,"is type of")
self.assertEquals(self.material_key,
material_key)
self.assertEquals(redis_server.get(material_key),
"paper")
def tearDown(self):
redis_server.flushdb()
```
#### File: FRBR-Redis-Datastore/tests/test_frbr_rda_expression.py
```python
__author__ = '<NAME>'
import logging
import unittest,redis,config
import lib.common as common
import lib.frbr_rda as frbr_rda
import lib.namespaces as ns
redis_server = redis.StrictRedis(host=config.REDIS_HOST,
port=config.REDIS_PORT,
db=config.REDIS_TEST_DB)
class TestExpressionRDAGroup1Elements(unittest.TestCase):
def setUp(self):
self.accessibility_content_key = "mods:note:%s" % redis_server.incr("global:mods:note")
redis_server.set(self.accessibility_content_key,"Test Expression Accessibility")
self.additional_scale_information_key = "mods:note:%s" % redis_server.incr("global:mods:note")
redis_server.hset(self.additional_scale_information_key,
"type",
"source dimensions")
self.artistic_and_or_technical_credit_key = "frad:person:%s" % redis_server.incr("global:frad:person")
redis_server.hset(self.artistic_and_or_technical_credit_key,
"frad:family",
"Wallace")
self.aspect_ratio_key = "mods:note:%s" % redis_server.incr("global:mods:note")
redis_server.set(self.aspect_ratio_key,"1:5")
self.award_key = "mods:note:%s" % redis_server.incr("global:mods:note")
redis_server.set(self.award_key,"Awarded first place")
self.cataloguers_note_key = "mods:note:%s" % redis_server.incr("global:mods:note")
redis_server.hset(self.cataloguers_note_key,"type","bibliographic history")
redis_server.hset(self.cataloguers_note_key,"value","Test Cataloguer's Note")
self.colour_content_key = "mods:note:%s" % redis_server.incr("global:mods:note")
redis_server.set(self.colour_content_key,"256 Colors")
self.content_type_key = "mime:type:HTTP"
redis_server.set(self.content_type_key,"hypertext transfer protocol")
self.date_of_capture_key = "mods:dateCaptured:%s" % redis_server.incr("global:mods:dateCaptured")
redis_server.hset(self.date_of_capture_key,"year","1945")
self.date_of_expression_key = self.date_of_capture_key
self.duration_key = "mods:keyDate:%s" % redis_server.incr("global:mods:keyDate")
redis_server.hset(self.duration_key,
"start",
"1950")
redis_server.hset(self.duration_key,
"end",
"2010")
self.form_of_musical_notation_key = "mods:note:%s" % redis_server.incr("global:mods:note")
redis_server.set(self.form_of_musical_notation_key,"modern staff notation")
self.form_of_notated_movement_key = "mods:note:%s" % redis_server.incr("global:mods:note")
redis_server.set(self.form_of_notated_movement_key,"Eshkol-Wachman Movement Notation")
self.form_of_notation_key = "mods:note:%s" % redis_server.incr("global:mods:note")
redis_server.set(self.form_of_notation_key,"Test Expression Form of Notation")
self.form_of_tactile_notation_key = "mods:note:%s" % redis_server.incr("global:mods:note")
self.format_of_notated_music_key = "mods:note:%s" % redis_server.incr("global:mods:note")
self.horizontal_scale_of_cartographic_content_key = "kvm:scale:%s" % redis_server.incr("global:kvm:scale")
redis_server.set(self.horizontal_scale_of_cartographic_content_key,"kilometers")
self.identifier_for_the_expression_key = "mods:identifier:%s" % redis_server.incr("global:mods:identifier")
self.illustrative_content_key = "mods:note:%s" % redis_server.incr("global:mods:note")
self.language_of_expression_key = "xml:lang:en"
self.language_of_the_content_key = self.language_of_expression_key
self.medium_of_performance_of_musical_content_key = "mods:note:%s" % redis_server.incr("global:mods:note")
redis_server.set(self.medium_of_performance_of_musical_content_key,"Baritone (Musical instrument)")
self.other_details_of_cartographic_content_key = "mods:note:%s" % redis_server.incr("global:mods:note")
self.other_distinguishing_characteristic_of_the_expression_key = "mods:note:%s" % redis_server.incr("global:mods:note")
redis_server.hset(self.other_distinguishing_characteristic_of_the_expression_key,
"type",
"source characteristics")
redis_server.hset(self.other_distinguishing_characteristic_of_the_expression_key,
"value",
"Test Source Characteristic")
self.performer_key = "frad:person:%s" % redis_server.incr("global:frad:person")
redis_server.set(self.performer_key,"Test Expression Perfomer")
self.scale_key = "mods:note:%s" % redis_server.incr("global:mods:note")
self.scale_of_still_image_or_three_dimensional_form_key = "mods:note:%s" % redis_server.incr("global:mods:note")
self.place_and_date_of_capture_key = "mods:originInfo:%s" % redis_server.incr("global:mods:originInfo")
self.place_of_capture_key = "mods:city:Colorado Springs"
redis_server.hset(self.place_and_date_of_capture_key,"place",self.place_of_capture_key)
redis_server.hset(self.place_and_date_of_capture_key,"mods:dateCaptured","2001")
self.projection_of_cartographic_content_key = "mods:note:%s" % redis_server.incr("global:mods:note")
self.script_key = "mods:note:%s" % redis_server.incr("global:mods:note")
self.scale_of_still_image_or_three_dimensional_form_key = "mods:note:%s" % redis_server.incr("global:mods:note")
redis_server.set(self.scale_of_still_image_or_three_dimensional_form_key,"1:100,000")
self.sound_content_key = "mods:note:%s" % redis_server.incr("global:mods:note")
redis_server.set(self.sound_content_key,"Test Sound Content for Expression")
self.source_consulted_key = "frbr:Work:%s" % redis_server.incr("global:frbr:Work")
redis_server.set(self.source_consulted_key,"Test Source Consulted for Expression")
self.summarization_of_the_content_key = "mods:note:%s" % redis_server.incr("global:mods:note")
redis_server.set(self.summarization_of_the_content_key,"Test Expression Summary")
self.supplementary_content_key = "frbr:Expression:%s" % redis_server.incr("global:frbr:Expression")
self.vertical_scale_of_cartographic_content_key = "kvm:scale:%s" % redis_server.incr("global:kvm:scale")
redis_server.set(self.vertical_scale_of_cartographic_content_key,"meter")
params = {'Accessibility content (Expression)':self.accessibility_content_key,
'Additional scale information (Expression)':self.additional_scale_information_key,
'Artistic and/or technical credit (Expression)':self.artistic_and_or_technical_credit_key,
'Aspect ratio (Expression)':self.aspect_ratio_key,
'Award (Expression)':self.award_key,
"Cataloguer's note (Expression)":self.cataloguers_note_key,
'Colour content (Expression)':self.colour_content_key,
'Colour content of resource designed for persons with visual impairments (Expression)':"No",
'Colour of moving images (Expression)':"Multiple",
'Colour of still image (Expression)':["green","blue"],
'Colour of three-dimensional form (Expression)':"black",
'Content type (Expression)':self.content_type_key,
'Date of capture (Expression)':self.date_of_capture_key,
'Date of expression':self.date_of_expression_key,
'Duration (Expression)':self.duration_key,
'Form of musical notation (Expression)':self.form_of_musical_notation_key,
'Form of notated movement (Expression)':self.form_of_notated_movement_key,
'Form of notation (Expression)':self.form_of_notation_key,
'Form of tactile notation (Expression)':self.form_of_tactile_notation_key,
'Format of notated music (Expression)':self.format_of_notated_music_key,
'Horizontal scale of cartographic content (Expression)':self.horizontal_scale_of_cartographic_content_key,
'Identifier for the expression':self.identifier_for_the_expression_key,
'Illustrative content (Expression)':self.illustrative_content_key,
'Language of expression':self.language_of_expression_key,
'Language of the content (Expression)':self.language_of_the_content_key,
'Medium of performance of musical content (Expression)':self.medium_of_performance_of_musical_content_key,
'Other details of cartographic content (Expression)':self.other_details_of_cartographic_content_key,
'Other distinguishing characteristic of the expression':self.other_distinguishing_characteristic_of_the_expression_key,
'Performer, narrator, and/or presenter (Expression)':self.performer_key,
'Place and date of capture (Expression)':self.place_and_date_of_capture_key,
'Place of capture (Expression)':self.place_of_capture_key,
'Projection of cartographic content (Expression)':self.projection_of_cartographic_content_key,
'Scale (Expression)':self.scale_key,
'Scale of still image or three-dimensional form (Expression)':self.scale_of_still_image_or_three_dimensional_form_key,
'Script (Expression)':self.script_key,
'Sound content (Expression)':self.sound_content_key,
'Source consulted (Expression)':self.source_consulted_key,
'Status of identification (Expression)':"established",
'Summarization of the content (Expression)':self.summarization_of_the_content_key,
'Supplementary content (Expression)':self.supplementary_content_key,
'Vertical scale of cartographic content (Expression)':self.vertical_scale_of_cartographic_content_key}
self.expression = frbr_rda.Expression(redis_server=redis_server,
**params)
def test_init(self):
self.assert_(self.expression.redis_ID)
def test_accessibility_content(self):
accessibility_content_key = getattr(self.expression,
'Accessibility content (Expression)')
self.assertEquals(self.accessibility_content_key,
accessibility_content_key)
self.assertEquals(redis_server.hget(self.additional_scale_information_key,
"type"),
"source dimensions")
def test_additional_scale_information(self):
additional_scale_information_key = getattr(self.expression,
'Additional scale information (Expression)')
self.assertEquals(additional_scale_information_key,
self.additional_scale_information_key)
self.assertEquals(redis_server.hget(additional_scale_information_key,
"type"),
"source dimensions")
def test_artistic_and_or_technical_credit(self):
artistic_and_or_technical_credit_key = getattr(self.expression,
'Artistic and/or technical credit (Expression)')
self.assertEquals(self.artistic_and_or_technical_credit_key,
artistic_and_or_technical_credit_key)
self.assertEquals(redis_server.hget(artistic_and_or_technical_credit_key,
"frad:family"),
"Wallace")
def test_aspect_ratio(self):
aspect_ratio_key = getattr(self.expression,
'Aspect ratio (Expression)')
self.assertEquals(aspect_ratio_key,
self.aspect_ratio_key)
self.assertEquals(redis_server.get(aspect_ratio_key),
"1:5")
def test_award(self):
award_key = getattr(self.expression,
'Award (Expression)')
self.assertEquals(self.award_key,award_key)
self.assertEquals(redis_server.get(self.award_key),
"Awarded first place")
def test_cataloguers_note(self):
cataloguers_note_key = getattr(self.expression,
"Cataloguer's note (Expression)")
self.assertEquals(self.cataloguers_note_key,
cataloguers_note_key)
self.assertEquals(redis_server.hget(cataloguers_note_key,
"type"),
"bibliographic history")
self.assertEquals(redis_server.hget(cataloguers_note_key,
"value"),
"Test Cataloguer's Note")
def test_colour_content(self):
colour_content_key = getattr(self.expression,
'Colour content (Expression)')
self.assertEquals(self.colour_content_key,
colour_content_key)
self.assertEquals(redis_server.get(colour_content_key),
"256 Colors")
def test_colour_content_resource(self):
self.assertEquals(getattr(self.expression,
'Colour content of resource designed for persons with visual impairments (Expression)'),
"No")
def test_colour_moving_images(self):
self.assertEquals(getattr(self.expression,
'Colour of moving images (Expression)'),
"Multiple")
def test_colour_still_image(self):
self.assertEquals(getattr(self.expression,
'Colour of still image (Expression)'),
["green","blue"])
def test_colour_three_dimensional_form(self):
self.assertEquals(getattr(self.expression,
'Colour of three-dimensional form (Expression)'),
"black")
def test_content_type(self):
content_type_key = getattr(self.expression,
'Content type (Expression)')
self.assertEquals(self.content_type_key,
content_type_key)
self.assertEquals(redis_server.get(self.content_type_key),
"hypertext transfer protocol")
def test_date_of_capture(self):
date_of_capture_key = getattr(self.expression,
'Date of capture (Expression)')
self.assertEquals(self.date_of_capture_key,
date_of_capture_key)
self.assertEquals(redis_server.hget(date_of_capture_key,
"year"),
"1945")
def test_date_of_expression(self):
date_of_expression_key = getattr(self.expression,
'Date of expression')
self.assertEquals(self.date_of_expression_key,
date_of_expression_key)
self.assertEquals(redis_server.hget(date_of_expression_key,
"year"),
"1945")
def test_duration_key(self):
duration_key = getattr(self.expression,
'Duration (Expression)')
self.assertEquals(duration_key,self.duration_key)
self.assertEquals(redis_server.hget(duration_key,
"start"),
"1950")
self.assertEquals(redis_server.hget(duration_key,
"end"),
"2010")
def test_form_of_musical_notation(self):
form_of_musical_notation_key = getattr(self.expression,
'Form of musical notation (Expression)')
self.assertEquals(self.form_of_musical_notation_key,
form_of_musical_notation_key)
self.assertEquals(redis_server.get(form_of_musical_notation_key),
"modern staff notation")
def test_form_of_notated_movement(self):
form_of_notated_movement_key = getattr(self.expression,
'Form of notated movement (Expression)')
self.assertEquals(self.form_of_notated_movement_key,
form_of_notated_movement_key)
self.assertEquals(redis_server.get(form_of_notated_movement_key),
"Eshkol-Wachman Movement Notation")
def test_form_of_notation(self):
form_of_notation_key = getattr(self.expression,
'Form of notation (Expression)')
self.assertEquals(self.form_of_notation_key,
form_of_notation_key)
self.assertEquals(redis_server.get(self.form_of_notation_key),
"Test Expression Form of Notation")
def test_form_of_tactile_notation(self):
form_of_tactile_notation_key = getattr(self.expression,
'Form of tactile notation (Expression)')
self.assertEquals(self.form_of_tactile_notation_key,
form_of_tactile_notation_key)
def test_format_of_notated_music(self):
format_of_notated_music_key = getattr(self.expression,
'Format of notated music (Expression)')
self.assertEquals(self.format_of_notated_music_key,
format_of_notated_music_key)
def test_horizontal_scale_of_cartographic_content(self):
horizontal_scale_of_cartographic_content_key = getattr(self.expression,
'Horizontal scale of cartographic content (Expression)')
self.assertEquals(self.horizontal_scale_of_cartographic_content_key,
horizontal_scale_of_cartographic_content_key)
def test_identifier_for_the_expression(self):
identifier_for_the_expression_key = getattr(self.expression,
'Identifier for the expression')
self.assertEquals(self.identifier_for_the_expression_key,
identifier_for_the_expression_key)
def test_illustrative_content(self):
illustrative_content_key = getattr(self.expression,
'Illustrative content (Expression)')
self.assertEquals(self.illustrative_content_key,
illustrative_content_key)
def test_language_of_expression(self):
language_of_expression_key = getattr(self.expression,
'Language of expression')
self.assertEquals(self.language_of_expression_key,
language_of_expression_key)
def test_language_of_the_content(self):
language_of_the_content_key = getattr(self.expression,
'Language of the content (Expression)')
self.assertEquals(self.language_of_the_content_key,
language_of_the_content_key)
def test_medium_of_performance_of_musical_content(self):
medium_of_performance_of_musical_content_key = getattr(self.expression,
'Medium of performance of musical content (Expression)')
self.assertEquals(self.medium_of_performance_of_musical_content_key,
medium_of_performance_of_musical_content_key)
def test_other_details_of_cartographic_content(self):
other_details_of_cartographic_content_key = getattr(self.expression,
'Other details of cartographic content (Expression)')
self.assertEquals(self.other_details_of_cartographic_content_key,
other_details_of_cartographic_content_key)
def test_other_distinguishing_characteristic_of_the_expression(self):
other_distinguishing_characteristic_of_the_expression_key = getattr(self.expression,
'Other distinguishing characteristic of the expression')
self.assertEquals(self.other_distinguishing_characteristic_of_the_expression_key,
other_distinguishing_characteristic_of_the_expression_key)
def test_performer_narrator(self):
performer_key = getattr(self.expression,
'Performer, narrator, and/or presenter (Expression)')
self.assertEquals(self.performer_key,
performer_key)
def test_place_and_date_of_capture(self):
place_and_date_of_capture_key = getattr(self.expression,
'Place and date of capture (Expression)')
self.assertEquals(self.place_and_date_of_capture_key,
place_and_date_of_capture_key)
def test_place_of_capture(self):
place_of_capture_key = getattr(self.expression,
'Place of capture (Expression)')
self.assertEquals(self.place_of_capture_key,
place_of_capture_key)
def test_projection_of_cartographic_content(self):
projection_of_cartographic_content_key = getattr(self.expression,
'Projection of cartographic content (Expression)')
self.assertEquals(self.projection_of_cartographic_content_key,
projection_of_cartographic_content_key)
def test_scale(self):
scale_key = getattr(self.expression,
'Scale (Expression)')
self.assertEquals(self.scale_key,
scale_key)
def test_scale_of_still_image_or_three_dimensional_form(self):
scale_of_still_image_or_three_dimensional_form_key = getattr(self.expression,
'Scale of still image or three-dimensional form (Expression)')
self.assertEquals(self.scale_of_still_image_or_three_dimensional_form_key,
scale_of_still_image_or_three_dimensional_form_key)
def test_script(self):
script_key = getattr(self.expression,
'Script (Expression)')
self.assertEquals(self.script_key,
script_key)
def test_sound_content_key(self):
sound_content_key = getattr(self.expression,
'Sound content (Expression)')
self.assertEquals(self.sound_content_key,
sound_content_key)
self.assertEquals(redis_server.get(self.sound_content_key),
"Test Sound Content for Expression")
def test_source_consulted(self):
source_consulted_key = getattr(self.expression,
'Source consulted (Expression)')
self.assertEquals(self.source_consulted_key,
source_consulted_key)
self.assertEquals(redis_server.get(source_consulted_key),
"Test Source Consulted for Expression")
def test_status_of_identification(self):
self.assertEquals(getattr(self.expression,
'Status of identification (Expression)'),
"established")
def test_summarization_of_the_content(self):
summarization_of_the_content_key = getattr(self.expression,
'Summarization of the content (Expression)')
self.assertEquals(self.summarization_of_the_content_key,
summarization_of_the_content_key)
self.assertEquals(redis_server.get(self.summarization_of_the_content_key),
"Test Expression Summary")
def test_supplementary_content(self):
supplementary_content_key = getattr(self.expression,
'Supplementary content (Expression)')
self.assertEquals(self.supplementary_content_key,
supplementary_content_key)
def test_vertical_scale_of_cartographic_content(self):
vertical_scale_of_cartographic_content_key = getattr(self.expression,
'Vertical scale of cartographic content (Expression)')
self.assertEquals(self.vertical_scale_of_cartographic_content_key,
vertical_scale_of_cartographic_content_key)
self.assertEquals(redis_server.get(self.vertical_scale_of_cartographic_content_key),
"meter")
def tearDown(self):
redis_server.flushdb()
class TestExpressionWEMIRelationships(unittest.TestCase):
def setUp(self):
self.abridged_as_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.abridgement_of_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.absorbed_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.absorbed_by_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.absorbed_in_part_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.absorbed_in_part_by_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.abstract_key = "mods:abstract:%s" % redis_server.incr("global:mods:abstract")
redis_server.set(self.abstract_key,"Test Abstract of Expression")
self.abstracted_in_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.abstracted_for_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.accompanying_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.adaptation_of_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.adapted_as_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.adapted_as_a_motion_pic_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.adapted_as_a_motion_pic_scrn_play_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.adapted_as_radio_programme_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.adapted_as_radio_script_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.adapted_as_a_screenplay_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.adapted_as_tv_programme_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.adapted_as_tv_scrn_play_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.adapted_as_video_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.adapted_as_a_video_scrn_play_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.addenda_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.addenda_to_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.analysed_in_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.analysis_of_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.appendix_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.appendix_to_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.augmentation_of_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.augmented_by_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.augmented_by_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.based_on_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.basis_for_libretto_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.cadenza_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.cadenza_composed_for_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.catalogue_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.catalogue_of_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.choreography_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.choreography_for_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.commentary_in_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.commentary_on_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.complemented_by_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.concordance_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.concordance_to_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.contains_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.contained_in_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.continued_by_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.continued_in_part_by_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.continues_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.continues_in_part_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.critique_of_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.critiqued_in_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.derivative_relationship_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.described_in_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.description_of_key = "mods:note:%s" % redis_server.incr("global:mods:note")
redis_server.set(self.description_of_key,
'Test Description of Expression')
self.descriptive_relationships_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.digest_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.digest_of_key= "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.dramatization_of_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.dramatized_as_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.errata_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.errata_to_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.evaluated_in_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.evaluation_of_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.expanded_as_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.expanded_version_of_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.finding_aid_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.finding_aid_for_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.free_translation_of_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.freely_translated_as_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.guide_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.guide_to_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.illustrations_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.illustrations_for_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.imitated_as_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.imitation_of_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.index_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.index_to_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.indexed_in_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.indexing_for_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.libretto_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.libretto_based_on_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.libretto_for_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.merged_with_to_form_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.merger_of_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.motion_picture_adaptation_of_key =\
"frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.motion_picture_screenplay_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.motion_picture_screenplay_based_on_key =\
"frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.musical_arrangement_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.musical_arrangement_of_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.musical_setting_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.musical_setting_of_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.musical_variations_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.musical_variations_based_on_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.novelization_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.novelization_of_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.paraphrase_of_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.paraphrased_as_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.parodied_as_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.parody_of_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.preceded_by_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.radio_adaptation_of_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.radio_script_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.radio_script_based_on_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.remade_as_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.remake_of_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.review_of_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.reviewed_in_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.screenplay_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.screenplay_based_on_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.screenplay_for_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.screenplay_for_the_motion_picture_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.screenplay_for_the_television_programme_key = \
"frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.screenplay_for_the_video_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.script_for_the_radio_programme_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.separated_from_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.sequential_relationship_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.split_into_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.succeeded_by_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.summary_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.summary_of_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.superseded_by_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.superseded_in_part_by_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.supersedes_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.supersedes_in_part_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.supplement_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.supplement_to_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.television_adaptation_of_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.television_screenplay_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.television_screenplay_based_on_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.translation_of_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.verse_adaptation_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.verse_adaptation_of_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.video_adaptation_of_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.video_screenplay_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.video_screenplay_based_on_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
self.whole_part_relationship_key = "frbr_rda:Expression:%s" % redis_server.incr("global:frbr_rda:Expression")
params = {'Abridged as (Expression)':self.abridged_as_key,
'Abridgement of (Expression)':self.abridgement_of_key,
'Absorbed (Expression)':self.absorbed_key,
'Absorbed by (Expression)':self.absorbed_by_key,
'Absorbed in part (Expression)':self.absorbed_in_part_key,
'Absorbed in part by (Expression)':self.absorbed_in_part_by_key,
'Abstract (Expression)':self.abstract_key,
'Abstract of (Expression)':self.abstract_key,
'Abstracted in (Expression)':self.abstracted_in_key,
'Abstracts for (Expression)':self.abstracted_for_key,
'Accompanying expression':self.accompanying_key,
'Adaptation of (Expression)':self.adaptation_of_key,
'Adapted as (Expression)':self.adapted_as_key,
'Adapted as a motion picture (Expression)':self.adapted_as_a_motion_pic_key,
'Adapted as a motion picture screenplay (Expression)':self.adapted_as_a_motion_pic_scrn_play_key,
'Adapted as a radio programme (Expression)':self.adapted_as_radio_programme_key,
'Adapted as a radio script (Expression)':self.adapted_as_radio_script_key,
'Adapted as a screenplay (Expression)':self.adapted_as_a_screenplay_key,
'Adapted as a television programme (Expression)':self.adapted_as_tv_programme_key,
'Adapted as a television screenplay (Expression)':self.adapted_as_tv_scrn_play_key,
'Adapted as a video (Expression)':self.adapted_as_key,
'Adapted as a video screenplay (Expression)':self.adapted_as_a_video_scrn_play_key,
'Addenda (Expression)':self.addenda_key,
'Addenda to (Expression)':self.addenda_to_key,
'Analysed in (Expression)':self.analysed_in_key,
'Analysis of (Expression)':self.analysis_of_key,
'Appendix (Expression)':self.appendix_key,
'Appendix to (Expression)':self.appendix_to_key,
'Augmentation of (Expression)':self.augmentation_of_key,
'Augmented by (Expression)':self.augmented_by_key,
'Based on (Expression)':self.based_on_key,
'Basis for libretto (Expression)':self.basis_for_libretto_key,
'Cadenza (Expression)':self.cadenza_key,
'Cadenza composed for (Expression)':self.cadenza_composed_for_key,
'Catalogue (Expression)':self.catalogue_key,
'Catalogue of (Expression)':self.catalogue_of_key,
'Choreography (Expression)':self.choreography_key,
'Choreography for (Expression)':self.choreography_for_key,
'Commentary in (Expression)':self.commentary_in_key,
'Commentary on (Expression)':self.commentary_on_key,
'Complemented by (Expression)':self.complemented_by_key,
'Concordance (Expression)':self.concordance_key,
'Concordance to (Expression)':self.concordance_to_key,
'Contained in (Expression)':self.contained_in_key,
'Contains (Expression)':self.contains_key,
'Continued by (Expression)':self.continued_by_key,
'Continued in part by (Expression)':self.continued_in_part_by_key,
'Continues (Expression)':self.continues_key,
'Continues in part (Expression)':self.continues_in_part_key,
'Critique of (Expression)':self.critique_of_key,
'Critiqued in (Expression)':self.critiqued_in_key,
'Derivative relationship (Expression)':self.derivative_relationship_key,
'Described in (Expression)':self.described_in_key,
'Description of (Expression)':self.description_of_key,
'Descriptive relationships (Expression)':self.descriptive_relationships_key,
'Digest (Expression)':self.digest_key,
'Digest of (Expression)':self.digest_of_key,
'Dramatization of (Expression)':self.dramatization_of_key,
'Dramatized as (Expression)':self.dramatized_as_key,
'Errata (Expression)':self.errata_key,
'Errata to (Expression)':self.errata_to_key,
'Evaluated in (Expression)':self.evaluated_in_key,
'Evaluation of (Expression)':self.evaluation_of_key,
'Expanded as (Expression)':self.expanded_as_key,
'Expanded version of (Expression)':self.expanded_version_of_key,
'Finding aid (Expression)':self.finding_aid_key,
'Finding aid for (Expression)':self.finding_aid_for_key,
'Free translation of (Expression)':self.free_translation_of_key,
'Freely translated as (Expression)':self.freely_translated_as_key,
'Guide (Expression)':self.guide_key,
'Guide to (Expression)':self.guide_to_key,
'Illustrations (Expression)':self.illustrations_key,
'Illustrations for (Expression)':self.illustrations_for_key,
'Imitated as (Expression)':self.imitated_as_key,
'Imitation of (Expression)':self.imitation_of_key,
'Index (Expression)':self.index_key,
'Index to (Expression)':self.index_to_key,
'Indexed in (Expression)':self.indexed_in_key,
'Indexing for (Expression)':self.indexing_for_key,
'Libretto (Expression)':self.libretto_key,
'Libretto based on (Expression)':self.libretto_based_on_key,
'Libretto for (Expression)':self.libretto_for_key,
'Merged with to form (Expression)':self.merged_with_to_form_key,
'Merger of (Expression)':self.merger_of_key,
'Motion picture adaptation of (Expression)':self.motion_picture_adaptation_of_key,
'Motion picture screenplay (Expression)':self.motion_picture_screenplay_key,
'Motion picture screenplay based on (Expression)':self.motion_picture_screenplay_based_on_key,
'Musical arrangement (Expression)':self.musical_arrangement_key,
'Musical arrangement of (Expression)':self.musical_arrangement_of_key,
'Musical setting (Expression)':self.musical_setting_key,
'Musical setting of (Expression)':self.musical_setting_of_key,
'Musical variations (Expression)':self.musical_variations_key,
'Musical variations based on (Expression)':self.musical_variations_based_on_key,
'Novelization (Expression)':self.novelization_key,
'Novelization of (Expression)':self.novelization_of_key,
'Paraphrase of (Expression)':self.paraphrase_of_key,
'Paraphrased as (Expression)':self.paraphrased_as_key,
'Parodied as (Expression)':self.parodied_as_key,
'Parody of (Expression)':self.parody_of_key,
'Preceded by (Expression)':self.preceded_by_key,
'Radio adaptation of (Expression)':self.radio_adaptation_of_key,
'Radio script (Expression)':self.radio_script_key,
'Radio script based on (Expression)':self.radio_script_based_on_key,
'Remade as (Expression)':self.remade_as_key,
'Remake of (Expression)':self.remake_of_key,
'Review of (Expression)':self.review_of_key,
'Reviewed in (Expression)':self.reviewed_in_key,
'Screenplay (Expression)':self.screenplay_key,
'Screenplay based on (Expression)':self.screenplay_based_on_key,
'Screenplay for (Expression)':self.screenplay_for_key,
'Screenplay for the motion picture (Expression)':self.screenplay_for_the_motion_picture_key,
'Screenplay for the television programme (Expression)':self.screenplay_for_the_television_programme_key,
'Screenplay for the video (Expression)':self.screenplay_for_the_video_key,
'Script for the radio programme (Expression)':self.script_for_the_radio_programme_key,
'Separated from (Expression)':self.separated_from_key,
'Sequential relationship (Expression)':self.sequential_relationship_key,
'Split into (Expression)':self.split_into_key,
'Succeeded by (Expression)':self.succeeded_by_key,
'Summary (Expression)':self.summary_key,
'Summary of (Expression)':self.summary_of_key,
'Superseded by (Expression)':self.superseded_by_key,
'Superseded in part by (Expression)':self.superseded_in_part_by_key,
'Supersedes (Expression)':self.supersedes_key,
'Supersedes in part (Expression)':self.supersedes_in_part_key,
'Supplement (Expression)':self.supplement_key,
'Supplement to (Expression)':self.supplement_to_key,
'Television adaptation of (Expression)':self.television_adaptation_of_key,
'Television screenplay (Expression)':self.television_screenplay_key,
'Television screenplay based on (Expression)':self.television_screenplay_based_on_key,
'Translation of (Expression)':self.translation_of_key,
'Verse adaptation (Expression)':self.verse_adaptation_key,
'Verse adaptation of (Expression)':self.verse_adaptation_of_key,
'Video adaptation of (Expression)':self.video_adaptation_of_key,
'Video screenplay (Expression)':self.video_screenplay_key,
'Video screenplay based on (Expression)':self.video_screenplay_based_on_key,
'Whole-part relationship (Expression)':self.whole_part_relationship_key}
self.expression = frbr_rda.Expression(redis_server=redis_server,
**params)
def test_init(self):
self.assert_(self.expression.redis_ID)
def test_abridged_as(self):
abridged_as_key = getattr(self.expression,
"Abridged as (Expression)")
self.assertEquals(self.abridged_as_key,
abridged_as_key)
def test_abridgement_of(self):
self.assertEquals(getattr(self.expression,'Abridgement of (Expression)'),
self.abridgement_of_key)
def test_absorbed(self):
self.assertEquals(getattr(self.expression,'Absorbed (Expression)'),
self.absorbed_key)
def test_absorbed_by(self):
self.assertEquals(getattr(self.expression,'Absorbed by (Expression)'),
self.absorbed_by_key)
def test_absorbed_in_part(self):
self.assertEquals(getattr(self.expression,'Absorbed in part (Expression)'),
self.absorbed_in_part_key)
def test_absorbed_in_part_by(self):
self.assertEquals(getattr(self.expression,'Absorbed in part by (Expression)'),
self.absorbed_in_part_by_key)
def test_abstract(self):
abstract_key = getattr(self.expression,'Abstract (Expression)')
self.assertEquals(abstract_key,self.abstract_key)
self.assertEquals(redis_server.get(abstract_key),
"Test Abstract of Expression")
def test_abstract_of(self):
abstract_key = getattr(self.expression,'Abstract of (Expression)')
self.assertEquals(abstract_key,self.abstract_key)
self.assertEquals(redis_server.get(abstract_key),
"Test Abstract of Expression")
def test_abstracted_in(self):
self.assertEquals(getattr(self.expression,'Abstracted in (Expression)'),
self.abstracted_in_key)
def test_abstracted_for(self):
self.assertEquals(getattr(self.expression,'Abstracts for (Expression)'),
self.abstracted_for_key)
def test_accompanying(self):
self.assertEquals(getattr(self.expression,'Accompanying expression'),
self.accompanying_key)
def test_adaptation_of(self):
self.assertEquals(getattr(self.expression,'Adaptation of (Expression)'),
self.adaptation_of_key)
def test_adapted_as(self):
self.assertEquals(getattr(self.expression,'Adapted as (Expression)'),
self.adapted_as_key)
def test_adapted_as_a_motion_picure(self):
self.assertEquals(getattr(self.expression,'Adapted as a motion picture (Expression)'),
self.adapted_as_a_motion_pic_key)
def test_adapted_as_a_motion_picture_screenplay(self):
self.assertEquals(getattr(self.expression,
'Adapted as a motion picture screenplay (Expression)'),
self.adapted_as_a_motion_pic_scrn_play_key)
def test_adapted_as_radio_programme(self):
self.assertEquals(getattr(self.expression,
'Adapted as a radio programme (Expression)'),
self.adapted_as_radio_programme_key)
def test_adapted_as_radio_script(self):
self.assertEquals(getattr(self.expression,
'Adapted as a radio script (Expression)'),
self.adapted_as_radio_script_key)
def test_adapted_as_a_screenplay(self):
self.assertEquals(getattr(self.expression,
'Adapted as a screenplay (Expression)'),
self.adapted_as_a_screenplay_key)
def test_adapted_as_televison_programme(self):
self.assertEquals(getattr(self.expression,
'Adapted as a television programme (Expression)'),
self.adapted_as_tv_programme_key)
def test_adapted_as_television_screenplay(self):
self.assertEquals(getattr(self.expression,
'Adapted as a television screenplay (Expression)'),
self.adapted_as_tv_scrn_play_key)
def test_adapted_as(self):
self.assertEquals(getattr(self.expression,'Adapted as a video (Expression)'),
self.adapted_as_key)
def test_adapted_as_a_video_screenplay(self):
self.assertEquals(getattr(self.expression,
'Adapted as a video screenplay (Expression)'),
self.adapted_as_a_video_scrn_play_key)
def test_addenda(self):
self.assertEquals(getattr(self.expression,
'Addenda (Expression)'),
self.addenda_key)
def test_addenda_to(self):
self.assertEquals(getattr(self.expression,
'Addenda to (Expression)'),
self.addenda_to_key)
def test_analysed_in(self):
self.assertEquals(getattr(self.expression,
'Analysed in (Expression)'),
self.analysed_in_key)
def test_analysis_of(self):
self.assertEquals(getattr(self.expression,
'Analysis of (Expression)'),
self.analysis_of_key)
def test_appendix(self):
self.assertEquals(getattr(self.expression,
'Appendix (Expression)'),
self.appendix_key)
def test_appendix_to(self):
self.assertEquals(getattr(self.expression,
'Appendix to (Expression)'),
self.appendix_to_key)
def test_augmentation_of(self):
self.assertEquals(getattr(self.expression,
'Augmentation of (Expression)'),
self.augmentation_of_key)
def test_augmented_by(self):
self.assertEquals(getattr(self.expression,
'Augmented by (Expression)'),
self.augmented_by_key)
def test_based_on(self):
self.assertEquals(getattr(self.expression,
'Based on (Expression)'),
self.based_on_key)
def test_basis_for_libretto(self):
self.assertEquals(getattr(self.expression,
'Basis for libretto (Expression)'),
self.basis_for_libretto_key)
def test_cadenza(self):
self.assertEquals(getattr(self.expression,'Cadenza (Expression)'),
self.cadenza_key)
def test_cadenza_composed_for(self):
self.assertEquals(getattr(self.expression,
'Cadenza composed for (Expression)'),
self.cadenza_composed_for_key)
def test_catalogue(self):
self.assertEquals(getattr(self.expression,
'Catalogue (Expression)'),
self.catalogue_key)
def test_catalogue_of(self):
self.assertEquals(getattr(self.expression,
'Catalogue of (Expression)'),
self.catalogue_of_key)
def test_choreography(self):
self.assertEquals(getattr(self.expression,
'Choreography (Expression)'),
self.choreography_key)
def test_choreography_for(self):
self.assertEquals(getattr(self.expression,
'Choreography for (Expression)'),
self.choreography_for_key)
def test_commentary_in(self):
self.assertEquals(getattr(self.expression,'Commentary in (Expression)'),
self.commentary_in_key)
def test_commentary_on(self):
self.assertEquals(getattr(self.expression,
'Commentary on (Expression)'),
self.commentary_on_key)
def test_complemented_by(self):
self.assertEquals(getattr(self.expression,
'Complemented by (Expression)'),
self.complemented_by_key)
def test_concordance(self):
self.assertEquals(getattr(self.expression,
'Concordance (Expression)'),
self.concordance_key)
def test_concordance_to(self):
self.assertEquals(getattr(self.expression,
'Concordance to (Expression)'),
self.concordance_to_key)
def test_contained_in(self):
self.assertEquals(getattr(self.expression,
'Contained in (Expression)'),
self.contained_in_key)
def test_contains(self):
self.assertEquals(getattr(self.expression,
'Contains (Expression)'),
self.contains_key)
def test_continued_by(self):
self.assertEquals(getattr(self.expression,
'Continued by (Expression)'),
self.continued_by_key)
def test_continued_in_part_by(self):
self.assertEquals(getattr(self.expression,
'Continued in part by (Expression)'),
self.continued_in_part_by_key)
def test_continues(self):
self.assertEquals(getattr(self.expression,
'Continues (Expression)'),
self.continues_key)
def test_continues_in_part(self):
self.assertEquals(getattr(self.expression,
'Continues in part (Expression)'),
self.continues_in_part_key)
def test_critique_of(self):
self.assertEquals(getattr(self.expression,
'Critique of (Expression)'),
self.critique_of_key)
def test_critiqued_in(self):
self.assertEquals(getattr(self.expression,
'Critiqued in (Expression)'),
self.critiqued_in_key)
def test_derivative_relationship(self):
self.assertEquals(getattr(self.expression,
'Derivative relationship (Expression)'),
self.derivative_relationship_key)
def test_described_in(self):
self.assertEquals(getattr(self.expression,
'Described in (Expression)'),
self.described_in_key)
def test_description_of(self):
description_of_key = getattr(self.expression,
'Description of (Expression)')
self.assertEquals(description_of_key,
self.description_of_key)
self.assertEquals(redis_server.get(description_of_key),
'Test Description of Expression')
def test_descriptive_relationships(self):
self.assertEquals(getattr(self.expression,
'Descriptive relationships (Expression)'),
self.descriptive_relationships_key)
def test_digest(self):
self.assertEquals(getattr(self.expression,
'Digest (Expression)'),
self.digest_key)
def test_digest_of(self):
self.assertEquals(getattr(self.expression,
'Digest of (Expression)'),
self.digest_of_key)
def test_dramatization_of(self):
self.assertEquals(getattr(self.expression,
'Dramatization of (Expression)'),
self.dramatization_of_key)
def test_dramatized_as(self):
self.assertEquals(getattr(self.expression,
'Dramatized as (Expression)'),
self.dramatized_as_key)
def test_errata(self):
self.assertEquals(getattr(self.expression,
'Errata (Expression)'),
self.errata_key)
def test_errata_to(self):
self.assertEquals(getattr(self.expression,
'Errata to (Expression)'),
self.errata_to_key)
def test_evaluated_in(self):
self.assertEquals(getattr(self.expression,
'Evaluated in (Expression)'),
self.evaluated_in_key)
def test_evaluation_of(self):
self.assertEquals(getattr(self.expression,
'Evaluation of (Expression)'),
self.evaluation_of_key)
def test_expanded_as(self):
self.assertEquals(getattr(self.expression,
'Expanded as (Expression)'),
self.expanded_as_key)
def test_expanded_version_of(self):
self.assertEquals(getattr(self.expression,
'Expanded version of (Expression)'),
self.expanded_version_of_key)
def test_finding_aid(self):
self.assertEquals(getattr(self.expression,
'Finding aid (Expression)'),
self.finding_aid_key)
def test_finding_aid_for(self):
self.assertEquals(getattr(self.expression,
'Finding aid for (Expression)'),
self.finding_aid_for_key)
def test_free_translation_of(self):
self.assertEquals(getattr(self.expression,
'Free translation of (Expression)'),
self.free_translation_of_key)
def test_freely_translated_as(self):
self.assertEquals(getattr(self.expression,
'Freely translated as (Expression)'),
self.freely_translated_as_key)
def test_libretto(self):
self.assertEquals(getattr(self.expression,
'Libretto (Expression)'),
self.libretto_key)
def test_libretto_based_on(self):
self.assertEquals(getattr(self.expression,
'Libretto based on (Expression)'),
self.libretto_based_on_key)
def test_libretto_for(self):
self.assertEquals(getattr(self.expression,
'Libretto for (Expression)'),
self.libretto_for_key)
def test_merged_with_to_form(self):
self.assertEquals(getattr(self.expression,
'Merged with to form (Expression)'),
self.merged_with_to_form_key)
def test_merger_of(self):
self.assertEquals(getattr(self.expression,
'Merger of (Expression)'),
self.merger_of_key)
def test_motion_picture_adaptation_of(self):
self.assertEquals(getattr(self.expression,
'Motion picture adaptation of (Expression)'),
self.motion_picture_adaptation_of_key)
def test_motion_picture_screenplay(self):
self.assertEquals(getattr(self.expression,
'Motion picture screenplay (Expression)'),
self.motion_picture_screenplay_key)
def test_motion_picture_screenplay_based_on(self):
self.assertEquals(getattr(self.expression,
'Motion picture screenplay based on (Expression)'),
self.motion_picture_screenplay_based_on_key)
def test_musical_arrangement(self):
self.assertEquals(getattr(self.expression,
'Musical arrangement (Expression)'),
self.musical_arrangement_key)
def test_musical_arrangement_of(self):
self.assertEquals(getattr(self.expression,
'Musical arrangement of (Expression)'),
self.musical_arrangement_of_key)
def test_musical_setting(self):
self.assertEquals(getattr(self.expression,
'Musical setting (Expression)'),
self.musical_setting_key)
def test_musical_setting_of(self):
self.assertEquals(getattr(self.expression,
'Musical setting of (Expression)'),
self.musical_setting_of_key)
def test_musical_variations(self):
self.assertEquals(getattr(self.expression,
'Musical variations (Expression)'),
self.musical_variations_key)
def test_musical_variations_based_on(self):
self.assertEquals(getattr(self.expression,
'Musical variations based on (Expression)'),
self.musical_variations_based_on_key)
def test_novelization_of(self):
self.assertEquals(getattr(self.expression,
'Novelization of (Expression)'),
self.novelization_of_key)
def test_paraphrase_of(self):
self.assertEquals(getattr(self.expression,
'Paraphrase of (Expression)'),
self.paraphrase_of_key)
def test_paraphrased_as(self):
self.assertEquals(getattr(self.expression,
'Paraphrased as (Expression)'),
self.paraphrased_as_key)
def test_parodied_as(self):
self.assertEquals(getattr(self.expression,
'Parodied as (Expression)'),
self.parodied_as_key)
def test_parody_of(self):
self.assertEquals(getattr(self.expression,
'Parody of (Expression)'),
self.parody_of_key)
def test_preceded_by(self):
self.assertEquals(getattr(self.expression,
'Preceded by (Expression)'),
self.preceded_by_key)
def test_radio_adaptation_of(self):
self.assertEquals(getattr(self.expression,
'Radio adaptation of (Expression)'),
self.radio_adaptation_of_key)
def test_radio_script(self):
self.assertEquals(getattr(self.expression,
'Radio script (Expression)'),
self.radio_script_key)
def test_radio_script_based_on(self):
self.assertEquals(getattr(self.expression,
'Radio script based on (Expression)'),
self.radio_script_based_on_key)
def test_remade_as(self):
self.assertEquals(getattr(self.expression,
'Remade as (Expression)'),
self.remade_as_key)
def test_remake_of(self):
self.assertEquals(getattr(self.expression,
'Remake of (Expression)'),
self.remake_of_key)
def test_review_of(self):
self.assertEquals(getattr(self.expression,
'Review of (Expression)'),
self.review_of_key)
def test_reviewed_in(self):
self.assertEquals(getattr(self.expression,
'Reviewed in (Expression)'),
self.reviewed_in_key)
def test_screenplay(self):
self.assertEquals(getattr(self.expression,
'Screenplay (Expression)'),
self.screenplay_key)
def test_screenplay_based_on(self):
self.assertEquals(getattr(self.expression,
'Screenplay based on (Expression)'),
self.screenplay_based_on_key)
def test_screenplay_for(self):
self.assertEquals(getattr(self.expression,
'Screenplay for (Expression)'),
self.screenplay_for_key)
def test_screenplay_for_the_motion_picture(self):
self.assertEquals(getattr(self.expression,
'Screenplay for the motion picture (Expression)'),
self.screenplay_for_the_motion_picture_key)
def test_screenplay_for_the_television_programme(self):
self.assertEquals(getattr(self.expression,
'Screenplay for the television programme (Expression)'),
self.screenplay_for_the_television_programme_key)
def test_screenplay_for_the_video(self):
self.assertEquals(getattr(self.expression,
'Screenplay for the video (Expression)'),
self.screenplay_for_the_video_key)
def test_script_for_the_radio_programme(self):
self.assertEquals(getattr(self.expression,
'Script for the radio programme (Expression)'),
self.script_for_the_radio_programme_key)
def test_separated_from(self):
self.assertEquals(getattr(self.expression,
'Separated from (Expression)'),
self.separated_from_key)
def test_sequential_relationship(self):
self.assertEquals(getattr(self.expression,
'Sequential relationship (Expression)'),
self.sequential_relationship_key)
def test_split_into(self):
self.assertEquals(getattr(self.expression,
'Split into (Expression)'),
self.split_into_key)
def test_succeeded_by(self):
self.assertEquals(getattr(self.expression,
'Succeeded by (Expression)'),
self.succeeded_by_key)
def test_summary(self):
self.assertEquals(getattr(self.expression,
'Summary (Expression)'),
self.summary_key)
def test_summary_of(self):
self.assertEquals(getattr(self.expression,
'Summary of (Expression)'),
self.summary_of_key)
def test_superseded_by(self):
self.assertEquals(getattr(self.expression,
'Superseded by (Expression)'),
self.superseded_by_key)
def test_superseded_in_part_by(self):
self.assertEquals(getattr(self.expression,
'Superseded in part by (Expression)'),
self.superseded_in_part_by_key)
def test_supersedes(self):
self.assertEquals(getattr(self.expression,
'Supersedes (Expression)'),
self.supersedes_key)
def test_supersedes_in_part(self):
self.assertEquals(getattr(self.expression,
'Supersedes in part (Expression)'),
self.supersedes_in_part_key)
def test_supplement(self):
self.assertEquals(getattr(self.expression,
'Supplement (Expression)'),
self.supplement_key)
def test_supplement_to(self):
self.assertEquals(getattr(self.expression,
'Supplement to (Expression)'),
self.supplement_to_key)
def test_television_adaptation_of(self):
self.assertEquals(getattr(self.expression,
'Television adaptation of (Expression)'),
self.television_adaptation_of_key)
def test_television_screenplay(self):
self.assertEquals(getattr(self.expression,
'Television screenplay (Expression)'),
self.television_screenplay_key)
def test_television_screenplay_based_on(self):
self.assertEquals(getattr(self.expression,
'Television screenplay based on (Expression)'),
self.television_screenplay_based_on_key)
def test_translation_of(self):
self.assertEquals(getattr(self.expression,
'Translation of (Expression)'),
self.translation_of_key)
def test_verse_adaptation(self):
self.assertEquals(getattr(self.expression,
'Verse adaptation (Expression)'),
self.verse_adaptation_key)
def test_verse_adaptation_of(self):
self.assertEquals(getattr(self.expression,
'Verse adaptation of (Expression)'),
self.verse_adaptation_of_key)
def test_video_adaptation_of(self):
self.assertEquals(getattr(self.expression,
'Video adaptation of (Expression)'),
self.video_adaptation_of_key)
def test_video_screenplay(self):
self.assertEquals(getattr(self.expression,
'Video screenplay (Expression)'),
self.video_screenplay_key)
def test_video_screenplay_based_on(self):
self.assertEquals(getattr(self.expression,
'Video screenplay based on (Expression)'),
self.video_screenplay_based_on_key)
def test_whole_part_relationship(self):
self.assertEquals(getattr(self.expression,
'Whole-part relationship (Expression)'),
self.whole_part_relationship_key)
def tearDown(self):
redis_server.flushdb()
```
|
{
"source": "jerm/tapiriik",
"score": 3
}
|
#### File: tapiriik/tapiriik/requests_lib.py
```python
def patch_requests_with_default_timeout(timeout):
import requests
old_request = requests.Session.request
def new_request(*args, **kwargs):
if "timeout" not in kwargs:
kwargs["timeout"] = timeout
return old_request(*args, **kwargs)
requests.Session.request = new_request
def patch_requests_no_verify_ssl():
import requests
old_request = requests.Session.request
def new_request(*args, **kwargs):
kwargs.update({"verify": False})
return old_request(*args, **kwargs)
requests.Session.request = new_request
# Not really patching requests here, but...
def patch_requests_source_address(new_source_address):
import socket
old_create_connection = socket.create_connection
def new_create_connection(address, timeout=None, source_address=None):
if address[1] in [80, 443]:
return old_create_connection(address, timeout, new_source_address)
else:
return old_create_connection(address, timeout, source_address)
socket.create_connection = new_create_connection
def patch_requests_user_agent(user_agent):
import requests
old_request = requests.Session.request
def new_request(self, *args, **kwargs):
headers = kwargs.get("headers", getattr(self, "headers", {}))
if "User-Agent" not in headers:
headers["User-Agent"] = user_agent
kwargs["headers"] = headers
return old_request(self, *args, **kwargs)
requests.Session.request = new_request
```
|
{
"source": "jermwatt/blog",
"score": 3
}
|
#### File: demo_libraries/zca_sphering_library/visualizers.py
```python
import numpy as np
# import standard plotting and animation
from matplotlib import gridspec
from IPython.display import display, HTML
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from IPython.display import clear_output
from matplotlib import gridspec
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
########## plotting functionality ############
def show_images(X,**kwargs):
'''
Function for plotting input images, stacked in columns of input X.
'''
cmap = 'gray'
if 'cmap' in kwargs:
cmap = kwargs['cmap']
# plotting mechanism taken from excellent answer from stack overflow: https://stackoverflow.com/questions/20057260/how-to-remove-gaps-between-subplots-in-matplotlib
plt.figure(figsize = (9,3))
gs1 = gridspec.GridSpec(5, 14)
gs1.update(wspace=0, hspace=0.05) # set the spacing between axes.
# shape of square version of image
square_shape = int((X.shape[0])**(0.5))
for i in range(min(70,X.shape[1])):
# plot image in panel
ax = plt.subplot(gs1[i])
im = ax.imshow(255 - np.reshape(X[:,i],(square_shape,square_shape)),cmap = cmap)
# clean up panel
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
plt.show()
```
#### File: dynamic_systems_unlimited_memory/library/plot_input.py
```python
import matplotlib.pyplot as plt
from matplotlib import gridspec
import numpy as np
class Plotter:
def __init__(self):
# setup figure
self.fig = plt.figure(figsize = (9.5,3.5))
gs = gridspec.GridSpec(1, 1)
ax = plt.subplot(gs[0]);
self.axs = [ax]
def process(self,x,h,**kwargs):
counter = len(x)
if 'counter' in kwargs:
counter = kwargs['counter']
counter = min(counter,len(x))
# setup figure
ax = self.axs[0]
ax.scatter(np.arange(1,counter+1),x[:counter],c = 'mediumblue',edgecolor = 'w',s = 80,linewidth = 1,zorder = 2);
ax.plot(np.arange(1,counter+1),x[:counter],alpha = 0.5,c = 'mediumblue',zorder = 2);
# label axes
ax.axhline(c = 'k',zorder = 0)
ax.set_xlabel(r'$t$',fontsize = 13)
ax.set_ylabel(r'$x_t$',fontsize=13,rotation=0)
# fix viewing window
xmin = -1
xmax = len(x) + 1
ymin = min(x) - 2
ymax = max(x) + 2
ax.set_xlim([xmin,xmax])
ax.set_ylim([ymin,ymax])
```
#### File: markov_chains/library/markov_chars_demo.py
```python
from . import char_level_markov_model as util
def show_order(csvname,order,num_chars):
# get instance of markov model, load in text
# load in and preprocess text
model = util.Markov(csvname)
# produce probabilities for order O model
model.make_transition_probabilities(order = order)
model.generate_text(num_chars = num_chars)
```
|
{
"source": "jernaumorat/IntelligentPantry",
"score": 3
}
|
#### File: IntelligentPantry/robot_stub/Bot.py
```python
from abc import ABC, abstractmethod
class Bot(ABC):
@abstractmethod
def moveTo(self , x , y):
pass
# get robot camera image of pantry
@abstractmethod
def getImage(self):
pass
# scan pantry
@abstractmethod
def scan(self):
pass
# system state scan /idle date and time
@abstractmethod
def updateStatus(self):
pass
```
#### File: server/pantryflask/__init__.py
```python
import socket, os, atexit
from flask import Flask, jsonify, request
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask.helpers import send_from_directory, url_for
from zeroconf import ServiceInfo, Zeroconf
from pantryflask.config import FlaskConfig
from pantryflask.auth import token_auth, generate_pairing_code, generate_user_token
from pantryflask.models import AuthToken
from pantryflask.db import db
from pantryflask.pantry_api import bp as pantry_bp
from pantryflask.robot_api import bp as robot_bp
from pantryflask.util import bp as util_bp
ip = os.environ.get('LISTEN_IP')
httpZconf = ServiceInfo(
"_http._tcp.local.",
"intpantry._http._tcp.local.",
addresses=[socket.inet_aton(ip)],
port=5000)
httpsZconf = ServiceInfo(
"_https._tcp.local.",
"intpantry._https._tcp.local.",
addresses=[socket.inet_aton(ip)],
port=5443)
zc = Zeroconf()
zc.register_service(httpZconf)
print('Service Registered:', httpZconf)
def app_factory(config={}):
app = Flask(__name__)
app.config.from_object(FlaskConfig) if config == {} else app.config.from_object(config)
db.init_app(app)
migrate = Migrate(app, db)
@app.route('/')
def get_root():
links = []
for rule in app.url_map.iter_rules():
methods = ','.join(rule.methods)
links.append((f'{rule}', methods, rule.endpoint))
return jsonify(links)
@app.route('/cert', methods=['GET'])
def get_cert():
response = send_from_directory(os.path.join('.', 'static'), 'ssr.crt')
return response
@app.route('/pair', methods=['GET'])
def pair_device():
code = request.args.get('code')
if len(AuthToken.query.filter_by(token_class='user').all()) == 0 and not code:
return jsonify(generate_pairing_code())
token = generate_user_token(code)
if token == None:
return jsonify(None), 401
return jsonify(token), 201
@app.route('/pair', methods=['POST'])
@token_auth.login_required(role=['user'])
def get_pairing_code():
return jsonify(generate_pairing_code())
@app.route('/pair', methods=['DELETE'])
@token_auth.login_required(role=['user'])
def delete_token():
token = request.headers.get('Authorization')
print(token)
token = token.split(' ')[1]
db.session.delete(AuthToken.query.get(token))
db.session.commit()
return jsonify('OK')
app.register_blueprint(pantry_bp)
app.register_blueprint(robot_bp)
app.register_blueprint(util_bp)
return app, db, migrate
@atexit.register
def shutdown():
zc.unregister_all_services()
app, db, migrate = app_factory()
```
#### File: server/pantryflask/pantry_api.py
```python
from flask import Blueprint, jsonify, request, make_response
from flask_sqlalchemy import SQLAlchemy
from json import loads
from pantryflask.valid_json import pantry_post_required_params
from pantryflask.db import db
from pantryflask.models import PantryItem
from pantryflask.auth import token_auth
bp = Blueprint('pantry', __name__, url_prefix='/pantry')
@bp.route('/', methods=['GET'])
@token_auth.login_required(role=['user'])
def get_allitems():
data = []
for item in PantryItem.query.all():
data.append(item.to_dict(summary=True))
resp = jsonify(data)
if data == []:
return resp, 204
return resp
# POST a list of items with images attached
# Function expects a form dict with payload as the key and a list of dicts as its values as follows
# {'payload' : [{label:string, quantity:int,item_x:intitem_y:int,image_key:string},{label:string, quantity:int,item_x:intitem_y:int,image_key:string},.....]}
# the request should also attach jpg files for each item with image_key as each images key
@bp.route('/', methods=['POST'])
@token_auth.login_required(role=['system'])
@pantry_post_required_params({"label": str,"quantity":int,"item_x":int,"item_y":int,"image_key":str})
def add_items():
payload = loads(request.form.get('payload'))
for data in payload:
img = request.files[data['image_key']] or None
new_item = PantryItem(item_label=data['label'], item_quantity=data['quantity'], item_x=data['item_x'], item_y=data['item_y'], item_image=img.read())
db.session.add(new_item)
db.session.commit()
resp = payload
return "resp", 201
@bp.route('/<int:itemID>', methods=['GET'])
@token_auth.login_required(role=['user'])
def get_item(itemID):
item = PantryItem.query.get_or_404(itemID)
resp = jsonify(item.to_dict(summary=False))
return resp
@bp.route('/<int:itemID>/img', methods=['GET'])
@token_auth.login_required(role=['user'])
def get_item_image(itemID):
item = PantryItem.query.get_or_404(itemID)
image = item.item_image
resp = make_response(image)
resp.headers.set('Content-Type', 'image/jpeg')
resp.headers.set('Cache-Control', 'max-age=86400')
return resp
@bp.route('/', methods=['DELETE'])
@token_auth.login_required(role=['system'])
def delete_allitems():
for item in PantryItem.query.all():
db.session.delete(item)
db.session.commit()
resp = jsonify("OK")
return resp
```
#### File: server/tests/test_pantryflask.py
```python
import os
import tempfile
import json
import io
import pytest
from pantryflask import app_factory
class FlaskTestConfig(object):
SQLALCHEMY_DATABASE_URI = 'sqlite:///:memory:',
SQLALCHEMY_TRACK_MODIFICATIONS = False,
SECRETKEY = 'TESTING',
TESTING = True,
@pytest.fixture
def client():
db_fd, db_path = tempfile.mkstemp()
testconfig = FlaskTestConfig
testconfig.SQLALCHEMY_DATABASE_URI = f'sqlite:///{db_path}'
app, db, migrate = app_factory(testconfig)
db.init_app(app)
with app.test_client() as client:
with app.app_context():
db.create_all()
yield client
print(db_path)
os.close(db_fd)
os.unlink(db_path)
def test_empty_db(client):
r = client.get('/pantry/')
assert r.data is b''
assert r.status_code is 204
def test_add_item(client):
payload = {'label': 'testitem', 'quantity': 1}
jpayload = json.dumps(payload)
image = (io.BytesIO(b'image'), 'image.jpg')
r = client.post('/pantry/', content_type='multipart/form-data', data={'image': image, 'payload': jpayload})
assert r.status_code is 201
r = client.get(f'/pantry/{json.loads(r.data)["id"]}')
assert r.status_code is 200
assert b'testitem' in r.data
```
|
{
"source": "jernej-kogovsek/Vislice",
"score": 2
}
|
#### File: jernej-kogovsek/Vislice/tekstovni_vmesnik.py
```python
import model
def izpis_igre(igra):
tekst = (
'========================================'
'Število preostalih poskusov: {stevilo_preostalih_poskusov} \n\n'
' {pravilni_del_gesla}\n\n'
'Neuspeli poskusi: {neuspeli_poskusi}\n\n'
'========================================'
).format(
stevilo_preostalih_poskusov=model.STEVILO_DOVOLJENIH_NAPAK - igra.stevilo_napak() + 1,
pravilni_del_gesla=igra.pravilni_del_gesla(),
neuspeli_poskusi=igra.nepravilni_ugibi()
)
return tekst
def izpis_zmage(igra):
tekst = (
'Wipiiii, zmaga! Geslo je bilo: {geslo} \n\n'
).format(
geslo=igra.pravilni_del_gesla()
)
return tekst
def izpis_poraza(igra):
tekst = (
'Booooo, poraz! Geslo je bilo: {geslo} \n\n'
).format(
geslo=igra.geslo()
)
return tekst
def zahtevaj_vnos():
return input('Črka:')
def izpis_napake():
return '\n###### Ugiba se ena črka naenkrat\n\n'
def izpis_napake_znak():
return '\n###### Ugiba naj ne vsebuje posebnih znakov\n\n'
def pozeni_vmesnik():
igra = model.novaigra()
while True:
# najprej izpisemo stanje, da vidimo, koliko črk je ipd.
print(izpis_igre(igra))
#čakamo na črko od uporabnika
poskus = zahtevaj_vnos()
rezultat_ugiba = igra.ugibaj(poskus)
if rezultat_ugiba == model.VEC_KOT_CRKA:
print(izpis_napake())
elif rezultat_ugiba == model.POSEBEN_ZNAK:
print(izpis_napake_znak())
elif rezultat_ugiba == model.ZMAGA:
print(izpis_zmage(igra))
ponovni_zagon = ("za ponovni zagon vpišite 1.\n").strip()
if ponovni_zagon == "1":
igra = model.novaigra
else:
break
elif rezultat_ugiba == model.PORAZ:
print(izpis_poraza(igra))
ponovni_zagon = ("za ponovni zagon vpišite 1.\n").strip()
if ponovni_zagon == "1":
igra = model.novaigra
else:
break
#zaženi igro
pozeni_vmesnik()
```
|
{
"source": "jernejml/MakerMonitorTools",
"score": 2
}
|
#### File: MakerMonitorTools/DaiBalanceExporter/exporter.py
```python
import os
import json
import urllib
import locale
import time
import random
import yaml
import logging
from logging.handlers import RotatingFileHandler
from prometheus_client import start_http_server, Gauge
logger = logging.getLogger(__name__)
formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler = RotatingFileHandler('exporter.log', maxBytes = 100 * 1024 * 1024, backupCount = 5)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
# get the keys
with open('keys.yml', 'r') as stream:
try:
keys = yaml.safe_load(stream)
except yaml.YAMLError as exc:
logging.exception("Failed to read keys ...")
quit()
os.environ['WEB3_INFURA_API_KEY'] = keys["infura-api"]
from web3.auto.infura import w3
if w3.isConnected() == True:
logger.info("Connected to the ethereum network")
else:
logger.error("Can't connect to the eth network")
quit()
locale.setlocale(locale.LC_ALL, '')
# get top contracts
with open('topContracts.yml', 'r') as stream:
try:
topDaiHolderContracts = yaml.safe_load(stream)
except yaml.YAMLError as exc:
logger.exception('Failed to read topContracts')
quit()
# https://changelog.makerdao.com/releases/mainnet/1.1.2/contracts.json
daiAddress = w3.toChecksumAddress('0x6b175474e89094c44da98b954eedeac495271d0f')
potAddress = w3.toChecksumAddress('0x197E90f9FAD81970bA7976f33CbD77088E5D7cf7')
def loadContract(address):
abisite = f'http://api.etherscan.io/api?module=contract&action=getabi&address={address}&apikey={keys["etherscan-api"]}&format=raw'
with urllib.request.urlopen(abisite) as url:
abi = json.loads(url.read())
return w3.eth.contract(address = w3.toChecksumAddress(address), abi = abi)
def contractDaiBalance(address):
DAIBalance = daiContract.functions.balanceOf(w3.toChecksumAddress(address)).call()
balanceEth = w3.fromWei(DAIBalance, 'ether')
logger.debug(f'Address {address} holds {balanceEth:n} DAI')
return balanceEth
def contractDaiTotalSupply():
erc20_dai_supply = daiContract.functions.totalSupply().call()
balanceEth = w3.fromWei(erc20_dai_supply, 'ether')
logger.debug(f'Total DAI supply is {balanceEth:n}')
return balanceEth
def dsrBalance():
dsr_balance = potContract.functions.Pie().call()
chi = potContract.functions.chi().call()
multiply = dsr_balance*chi/10**27
balance_eth = w3.fromWei(multiply, 'ether')
logger.debug(f'DSR locked value is {balance_eth:n} DAI')
return balance_eth
balanceGauge = Gauge(
'top_contracts_dai_balance',
'Top contracts DAI balance',
labelnames = ['contract'])
erc20_dsr_Gauge = Gauge(
'dai_ERC20_dsr',
'ERC20 (floating supply) vs dai in DSR',
labelnames = ['supply'])
start_http_server(8000)
# load the contracts
daiContract = loadContract(daiAddress)
potContract = loadContract(potAddress)
while True:
for contractAddress in topDaiHolderContracts:
balanceGauge.labels(contract = topDaiHolderContracts[contractAddress]['desc']).set(contractDaiBalance(contractAddress))
erc20_dsr_Gauge.labels(supply='ERC20').set(contractDaiTotalSupply())
erc20_dsr_Gauge.labels(supply='DSR').set(dsrBalance())
time.sleep(10)
```
|
{
"source": "jernejml/mcd_changelog",
"score": 3
}
|
#### File: mcd_changelog/mcd_changelog/mcd_changelog.py
```python
import requests
import json
from bs4 import BeautifulSoup
chains = ['mainnet', 'kovan', 'rinkeby', 'ropsten', 'goerli']
URL_CHANGELOG = "https://changelog.makerdao.com/"
class Release:
"""release class: holds chain, like kovan, and versioning"""
def __init__(self, chain, version, contracts=None):
self.chain = chain
self.version = version
self.contracts = contracts
def readable(self):
return self.chain + "/" + self.version
def get_chain(self):
return self.chain
def get_version(self):
return self.version
def get_contracts(self):
return self.contracts
class Releases:
"""List of Release classes"""
def __init__(self):
self.releases = []
self.count = 0
def __iter__(self):
return iter(self.releases)
def add_release(self, rel):
self.releases.append(rel)
self.count = self.count + 1
def get_releases(self):
return self.releases
def get_count(self):
return self.count
def get_chain_releases(self, chain='mainnet'):
if chain not in chains:
raise Exception("Unknown chain. Try 'print(list_branches())'")
return [r for r in self.releases if r.chain == chain]
def get_chain_latest(self, chain='mainnet'):
if chain not in chains:
raise Exception("Unknown chain. Try 'print(list_branches())'")
candidates = [r for r in self.releases if r.chain == chain]
return [r for r in candidates if r.version == max([r.version for r in candidates])][0]
all_releases = Releases()
def list_branches():
return chains
def fetch():
try:
result = requests.get(URL_CHANGELOG)
return result.content
except requests.exceptions.RequestException as e:
raise e
def fetch_contracts(url):
try:
response = requests.get(url)
return json.loads(response.content)
except requests.exceptions.RequestException as e:
raise e
def parse_release_string(rels):
for r in rels:
result = r.split("/")
try:
verify_string_format(result)
except Exception as e:
raise e
try:
contracts = fetch_contracts(URL_CHANGELOG + "releases/" + result[2] + "/" + result[3] + "/contracts.json")
except Exception as e:
raise e
rel = Release(result[2], result[3], contracts)
all_releases.add_release(rel)
return
def get_releases(c):
soup = BeautifulSoup(c, 'html.parser')
prelim = [x.get('href')for x in soup.find_all('a') if x.get('href')]
releases = [x for x in prelim if x.find("releases") > 0]
for c in chains:
rels = [x for x in releases if x.find(c) > 0]
try:
parse_release_string(rels)
except Exception as e:
raise e
return all_releases
def verify_string_format(strings):
if len(strings) is not 5:
raise Exception("Parsed string length is not 5")
if strings[0] is not '':
raise Exception("First cut expected to be empty string")
if strings[1] is 'releases':
raise Exception("Second cut is not 'releases'")
if strings[2] not in chains:
raise Exception("Third cut should be known chain")
# version
if len(strings[3].split(".")) is not 3:
if not (strings[3].split(".")[0].isdigit() and
strings[3].split(".")[1].isdigit() and
strings[3].split(".")[2].isdigit()):
raise Exception("Version string not in format x.y.z, where x y z are digits")
def main():
c = fetch()
releases = get_releases(c)
r = releases.get_chain_latest("kovan")
contracts = r.get_contracts()
print(contracts["MCD_FLIP_ETH_A"])
if __name__ == "__main__":
main()
```
|
{
"source": "jernejovc/pystreamable",
"score": 3
}
|
#### File: pystreamable/pystreamable/StreamableApi.py
```python
from __future__ import print_function
from __future__ import absolute_import
import requests
from .exceptions import (StreamableApiClientException,
StreamableApiServerException)
from .utils import Authentication
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__version__ = '1.0.1'
__license__ = 'MIT'
STREAMABLE_URL = 'https://streamable.com'
API_ROOT = 'https://api.streamable.com'
UPLOAD_URL = API_ROOT + '/upload'
RETRIEVE_URL = API_ROOT + '/videos/%s'
IMPORT_URL = API_ROOT + '/import'
RETRIEVE_USER_URL = API_ROOT + '/users/%s'
AUTH_USER_URL = API_ROOT + '/me'
USER_AGENT = {'user-agent': 'pystreamable/%s (api.streamable.com python wrapper)' % __version__ }
class StreamableApi:
"""
streamable.com API wrapper.
"""
authentication = Authentication()
def __init__(self, username=None, password=<PASSWORD>):
"""
Create a new instance of the API.
If you provide username and password the uploads will be stored in your
account.
:param username: streamable.com username
:param password: <PASSWORD>
"""
self.authentication = Authentication(username, password)
def get_info(self, video_id):
"""
Get video info for video with given video ID.
Args:
video_id: Streamable short video id
Returns:
JSON with video data.
"""
url = RETRIEVE_URL % video_id
resp = self._api_request(url, requests.get)
return resp.json()
def upload_video(self, filename, title=None):
"""
Upload a video to streamable.com. Works with absolute and relative
paths.
:param filename: Path to video to be uploaded
:param title: Optional title for the video, if not present streamable
uses filename as title
:return: JSON with uploaded video data.
"""
data = None
with open(filename, 'rb') as infile:
files = {'file': infile}
if title:
data = {'title': title}
resp = self._api_request(UPLOAD_URL,
requests.post,
data=data,
files=files)
return resp.json()
def retrieve_user(self, username):
"""
Retrieves user info.
:param username: username for which to get user info
:return: JSON with user info
"""
url = RETRIEVE_USER_URL % username
resp = self._api_request(url, requests.get)
return resp.json()
def auth_user_info(self):
"""
Get authenticated user info.
:return: JSON with info of authenticated user.
"""
resp = self._api_request(AUTH_USER_URL, requests.get)
return resp.json()
def import_video(self, url, title=None):
"""
Imports a video from video hosted on external site.
:param url: URL to video file or webpage containing a video
:param title: Optional title of uploaded video
:return: JSON with uploaded video data
"""
payload = {'url': url}
if title:
payload['title'] = title
resp = self._api_request(IMPORT_URL, requests.get, payload=payload)
return resp.json()
def _api_request(self, url, method, payload=None, data=None, files=None):
auth = self.authentication.get_auth() \
if self.authentication.has_auth() else None
resp = method(url=url,
params=payload,
data=data if data else None,
files=files if files else None,
auth=auth,
headers=USER_AGENT)
if 200 >= resp.status_code < 300:
return resp
if 400 >= resp.status_code < 500:
raise StreamableApiClientException(resp.text)
if 500 >= resp.status_code < 600:
raise StreamableApiServerException(resp.text)
raise RuntimeError(resp.text)
```
|
{
"source": "jernejule/iMaps",
"score": 3
}
|
#### File: imaps/base/operation.py
```python
import argparse
import inspect
class BaseOperation:
"""Base operation."""
def validate_inputs(self):
"""Validate inputs."""
raise NotImplementedError("Overwrite this in subclasses.")
def main(self):
"""Run."""
raise NotImplementedError("Overwrite this in subclasses.")
def run(self):
"""Run."""
self.validate_inputs()
self.main()
@classmethod
def cli(cls):
"""Add command line interface."""
# Create parser
parser = argparse.ArgumentParser(cls.__doc__)
for name, parameter in inspect.signature(cls.__init__).parameters.items():
if name == "self":
continue
if parameter.default != inspect._empty: # pylint: disable=protected-access
parser.add_argument(f"--{name}", help=name, default=parameter.default)
else:
parser.add_argument(name, help=name)
# Parse arguments
args = parser.parse_args()
# Prepare inputs
inputs = {}
for name, _ in inspect.signature(cls.__init__).parameters.items():
if name == "self":
continue
inputs[name] = getattr(args, name)
# Create an instance and run it
instance = cls(**inputs)
instance.run()
```
#### File: imaps/base/sheet.py
```python
import collections
import csv
import gzip
import re
import resdk
import xlrd
from imaps.base.constants.assets import SPECIES
from imaps.base.constants.sheet import COLUMNS, INTEGER_COLUMNS, METHOD, PROTEIN, REQUIRED_COLUMNS, TISSUE
from imaps.base.exceptions import ValidationError
from imaps.base.validation import validate_date, validate_string
class DescriptorSheet:
"""iClip annotation template operations."""
max_sample_name_size = 99
def __init__(self, filename):
"""Define all instance variables."""
self.fname = filename
self.errors = []
self.extension = None
self.get_extension()
self.annotation_tab_name = self.fname.replace(self.extension, "") + "tab.gz"
self.content = []
self.column_names = []
self.parse()
self.sample_names = self.get_column("Sample name")
def parse(self):
"""Parse annotation content."""
if self.extension not in ("xls", "xlsx", "tab.gz"):
self.error("File extension not recognized.")
self.fail()
if self.extension in ("xls", "xlsx"):
workbook = xlrd.open_workbook(self.fname)
worksheet = workbook.sheets()[0]
column_names = worksheet.row_values(0)
content = []
for rownum in range(1, worksheet.nrows):
ascii_row_content = []
for cell_content, column_name in zip(worksheet.row_values(rownum), column_names):
# Handle non-ascii charachters:
try:
ascii_value = bytes(str(cell_content).encode("utf-8")).decode("ascii", "strict")
except (UnicodeEncodeError, UnicodeDecodeError):
for position, char in enumerate(cell_content):
if ord(char) > 127:
break
self.warning(
'Problem decoding row {}, column "{}" at position {}.'.format(
rownum + 1, column_name, position
)
)
ascii_value = bytes(str(cell_content).encode("utf-8")).decode("ascii", "ignore")
finally:
# Strip cell values as this can cause problems downstream.
ascii_value.strip()
ascii_row_content.append(ascii_value)
content.append(ascii_row_content)
if self.extension == "tab.gz":
with gzip.open(self.fname, "rt") as unzipped_annotation:
annotation_content = list(csv.reader(unzipped_annotation, delimiter="\t"))
column_names = annotation_content[0]
content = annotation_content[1:]
self.content = content
self.column_names = column_names
def write_tab_file(self, path=None):
"""Write a compressed tab delimited file."""
if path is None:
path = self.annotation_tab_name
with gzip.open(path, "wt") as tab_file:
tab_writer = csv.writer(tab_file, delimiter="\t")
tab_writer.writerow(self.column_names)
tab_writer.writerows(self.content)
def get_extension(self):
"""Obtain the full extension."""
extension = self.fname.split(".")[-1]
if extension == "gz":
extension = ".".join(self.fname.split(".")[-2:])
self.extension = extension
def get_column(self, column_name):
"""Obtain named column content."""
column_index = next(i for i, name in enumerate(self.column_names) if name == column_name)
return [row[column_index] for row in self.content]
def get_element(self, column_name, sample_name):
"""Obtain content of specific sample in the named column."""
if column_name not in self.column_names:
raise ValueError(f'There is no column with column name: "{column_name}"')
if sample_name not in self.sample_names:
raise ValueError(f'There is no sample with sample name: "{sample_name}"')
for row, sample in zip(self.content, self.sample_names):
for element, column in zip(row, self.column_names):
if sample == sample_name and column == column_name:
return element
def get_barcodes3(self):
"""Get 3' barcodes and number of tolerated mismatches."""
barcodes3 = []
mismatches = []
for barcode in self.get_column("Linker"):
barcodes3.append(re.sub(r"[^ACGTN]+", "", barcode))
mismatches.append(int(re.sub(r"[^\d]+", "", barcode)))
assert len(set(mismatches)) == 1
return barcodes3, mismatches[0]
def get_barcodes5(self):
"""Get 5' barcodes and number of tolerated mismatches."""
barcodes5 = []
mismatches = []
for barcode in self.get_column("5' barcode"):
barcodes5.append(re.sub(r"[^ACGTN]+", "", barcode))
mismatches.append(int(re.sub(r"[^\d]+", "", barcode)))
assert len(set(mismatches)) == 1
return barcodes5, mismatches[0]
def get_adapters(self):
"""Get adapter sequences."""
adapters = []
for adapter in self.get_column("3' adapter"):
adapters.append(re.sub(r"[^ACGTN]+", "", adapter))
return adapters
def validate(self):
"""Run all validation functions."""
self.validate_column_names()
self.validate_required_columns()
self.validate_integer_columns()
self.validate_sample_names()
self.validate_column_entry_viability("Method", METHOD)
self.validate_protein()
self.validate_tissue()
self.validate_column_entry_viability("Species", SPECIES)
self.validate_barcode_uniqness()
self.validate_single_adapter()
self.validate_yes_no("Consensus mapping (optional)")
self.validate_date("Date of gel images in lab notebook (optional)")
if self.errors:
self.fail()
def validate_barcode_uniqness(self):
"""Validate uniqness of barcodes."""
barcodes3, _ = self.get_barcodes3()
barcodes5, _ = self.get_barcodes5()
if all([brc == "" for brc in barcodes3]):
# No 3' barcodes are given, check only for uniqness of 5' ones.
if len(barcodes5) != len(set(barcodes5)):
self.error("Barcodes on 5' end are not unique.")
else:
combined = list(zip(barcodes5, barcodes3))
if len(combined) != len(set(combined)):
self.error("Combination of barcodes on 3' and 5' end is not unique.")
def validate_single_adapter(self):
"""Validate that all samples have same adapter."""
adapters = self.get_adapters()
if len(set(adapters)) > 1:
self.error("All samples should have the same adapter sequence.")
def validate_sample_names(self):
"""Validate that all samples have names and are unique."""
if len(self.sample_names) > len(set(self.sample_names)):
repeated = [name for name, count in collections.Counter(self.sample_names).items() if count >= 2]
repeated = ", ".join(repeated)
self.error("Sample names should be unique, but these names are used multiple times: {}.".format(repeated))
for name in self.sample_names:
if not name:
self.error("One or more values for annotation field <Sample name> is missing.")
if len(name) >= self.max_sample_name_size:
self.error(
'Sample name "{}" should be shorter than {} characters.'.format(name, self.max_sample_name_size)
)
def validate_column_names(self):
"""Validate if all columns are present."""
missing_columns = COLUMNS - set(self.column_names)
if missing_columns:
self.error(
"Annotation file does not contain all the required columns. Missing columns: {}".format(
missing_columns
)
)
def validate_date(self, column_name):
"""Validate date format."""
for sample_name, date in zip(self.sample_names, self.get_column(column_name)):
try:
validate_date(date, allow_empty=True)
except ValueError:
self.error("SAMPLE: {} - Incorrect date format ({}), should be YYYY-MM-DD.".format(sample_name, date))
def validate_yes_no(self, column_name):
"""Validate that ``value`` is "yes" or "no"."""
for sample_name, element in zip(self.sample_names, self.get_column(column_name)):
try:
validate_string(element, choices=["yes", "no"], allow_empty=True)
except ValueError:
self.error(
'SAMPLE: {} - Value {} in column {} should be "yes", "no" or empty.'.format(
sample_name, element, column_name
)
)
def validate_required_columns(self):
"""Check if essential input is given."""
for column_name in REQUIRED_COLUMNS:
for sample_name in self.sample_names:
element = self.get_element(column_name=column_name, sample_name=sample_name)
if not element:
self.error("SAMPLE: {} - Value for column {} is missing.".format(sample_name, column_name))
def validate_integer_columns(self):
"""Validate that ``value`` is integer or empty."""
for column_name in INTEGER_COLUMNS:
for sample_name in self.sample_names:
element = self.get_element(column_name=column_name, sample_name=sample_name)
if element:
try:
int(float(element))
except ValueError:
self.error(
"SAMPLE: {} - Value {} in column {} should be integer.".format(
sample_name, element, column_name
)
)
def validate_tissue(self):
"""Validate that ``cells/tissue`` is existent."""
for sample_name in self.sample_names:
tissue = self.get_element(column_name="Cells/tissue", sample_name=sample_name)
if self.get_part_before_colon(tissue) not in TISSUE:
self.error(
"SAMPLE: {} - {} is not a valid entry for the cells/tissue annotation field.".format(
sample_name, tissue
)
)
def validate_column_entry_viability(self, column, possible):
"""Validate that the entry is recognized."""
for sample_name in self.sample_names:
element = self.get_element(column_name=column, sample_name=sample_name)
if element not in possible:
self.error("SAMPLE: {} - {} is not a valid entry for column {}.".format(sample_name, element, column))
def validate_protein(self):
"""Only validate protein names if species is human or mouse."""
res = resdk.Resolwe(url="https://app.genialis.com")
for sample_name in self.sample_names:
species = self.get_element(column_name="Species", sample_name=sample_name)
protein = self.get_element(column_name="Protein", sample_name=sample_name)
gene_symbol = self.get_part_before_colon_hypen(protein)
if gene_symbol and gene_symbol not in PROTEIN:
if species in ["Homo sapiens", "Mus musculus"]:
kb_gene = res.feature.filter(source="UCSC", feature_id=[gene_symbol])
if not kb_gene:
self.error(
"SAMPLE: {} - Gene symbol {} is either invalid or "
"Knowledge Base cannot be reached.".format(sample_name, protein)
)
def error(self, string):
"""Save error messages."""
self.errors.append(string)
def fail(self):
"""Report collected error messages."""
raise ValidationError(self.errors)
@staticmethod
def get_part_before_colon(string):
"""Return part of string before first first colon."""
try:
return re.match(r".*?([^:]+)[:]?", string).group(1)
except AttributeError:
return string
@staticmethod
def get_part_before_colon_hypen(string):
"""Return part of string before first first colon / hypen."""
try:
return re.match(r".*?([^:\-]+)[:\-]?", string).group(1)
except AttributeError:
return string
@staticmethod
def warning(string):
"""Return an warning message."""
print(string)
```
#### File: imaps/scripts/batch_download.py
```python
import argparse
import os
import pathlib
import resdk
SERVER_URL = "https://imaps.genialis.com"
SUPPORTED_TYPES = [
"all",
"fastq",
"bam",
"bed",
"bedgraph",
"bed-annotate",
"bedgraph-annotate",
"bed-clusters",
"bedgraph-clusters",
"bed-group",
"bedgraph-group",
"bed-xlsites",
"bedgraph-xlsites",
"bed_multi",
"peaks",
"bedgraph-peaks",
"type_summary",
"subtype_summary",
"gene_summary",
"bed-paraclu",
"results-rnamaps",
"results-kmers", # New kmers version
"figures-kmers", # Old kmers version
"pos_distribution-kmers", # Old kmers version
"kmer_counts-kmers", # Old kmers version
]
def parse_arguments():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("-c", "--collection", required=True, help="Collection name.")
parser.add_argument(
"-t",
"--types",
required=True,
help="Types of files to download. If multiple types are given, separate them by comma.",
)
parser.add_argument(
"-d",
"--directory",
default=None,
help="Directory into which to download files. If not given, download to current working " "directory.",
)
return parser.parse_args()
def parse_types(input_types):
"""Parse types argument."""
if input_types == "all":
return SUPPORTED_TYPES[:]
types = []
for type_ in input_types.split(","):
type_ = type_.strip()
if type_ not in SUPPORTED_TYPES:
raise ValueError('Type "{}" is not supported.'.format(type_))
types.append(type_.split("-"))
return types
def get_unexisting_name(name, directory):
"""Get unexisting name it the one given already exists."""
extension = "".join(pathlib.Path(name).suffixes)
basename = os.path.basename(name)[: -len(extension)]
i = 1
while name in os.listdir(directory):
name = "{} ({}){}".format(basename, i, extension)
i += 1
return name
def rename_if_clashing(name, directory):
"""Rename file if it alrady exists."""
if name in os.listdir(directory):
os.rename(
os.path.join(directory, name), os.path.join(directory, get_unexisting_name(name, directory)),
)
def main():
"""Invoke when run directly as a program."""
args = parse_arguments()
res = resdk.Resolwe(url=SERVER_URL)
res.login()
collection = res.collection.get(name=args.collection)
types = parse_types(args.types)
for data in collection.data:
if data.status != "OK":
continue
for type_ in types:
# type is a tuple of size 1 or 2: (field_name) or (field_name, process_type)
if len(type_) == 2:
if not data.process.type.strip(":").endswith(type_[1]):
continue
field_name = type_[0]
if field_name not in data.output:
continue
if isinstance(data.output[field_name], list):
for item in data.output[field_name]:
# Check if file name of the file to-be-downloaded will be
# clashing with existing filenames in download direcory. If
# so, rename existing file to unexisting name.
original_name = os.path.basename(item["file"])
rename_if_clashing(original_name, args.directory)
else:
original_name = os.path.basename(data.output[field_name]["file"])
rename_if_clashing(original_name, args.directory)
print("Downloading {} output of data {} ...".format(field_name, data.name))
data.download(field_name=field_name, download_dir=args.directory)
if __name__ == "__main__":
main()
```
#### File: iMaps/tests/test_sheet.py
```python
import os
import unittest
import pandas as pd
from imaps.base.constants.sheet import METHOD
from imaps.base.sheet import DescriptorSheet
class TestDescriptorSheet(unittest.TestCase):
def setUp(self):
tests_dir = os.path.dirname(os.path.realpath(__file__))
self.example_annotation_path = os.path.join(tests_dir, "data/sample_annotation.xlsx")
self.example_annotation_tab_path = os.path.join(tests_dir, "data/sample_annotation.tab.gz")
self.example_annotation = DescriptorSheet(self.example_annotation_path)
self.correct_content = [
[
"Sample_1",
"Test collection Joe",
"Blah blah",
"Super Smarty",
"The Boss",
"iCLIP",
"It was magic",
"TARDBP",
"HEK293",
"",
"Homo sapiens",
"NNNN,GTAAC_0,NNNNN",
"AGATCGGAAG_1,AGCGGTTCAG_2",
"HiSeq",
"mouse anti-Pseudouridine",
"irCLIP_ddRT_42",
"L3-GTC",
"",
"",
"",
"1995.0",
"",
"1.0",
"",
"122.0",
"10.0",
"2020-04-10",
"",
],
[
"Sample_2",
"Test collection Joe",
"Blah blah",
"Super Smarty",
"The Boss",
"iCLIP",
"It was magic",
"TARDBP-GFP",
"CEM_SS",
"",
"Homo sapiens",
"NNNN,GTAAC_0,NNNNN",
"AGATCGGAAG_1,AGCGGTTCAG_2",
"HiSeq",
"mouse anti-Pseudouridine",
"irCLIP_ddRT_72",
"L3-GGA",
"",
"",
"",
"2000.0",
"",
"2.0",
"",
"123.0",
"15.0",
"2020-04-10",
"",
],
[
"Sample_3",
"Test collection Joe",
"Blah blah",
"Super Smarty",
"The Boss",
"iCLIP",
"It was magic",
"ctrl-pseudouridine",
"Cal51",
"Serious",
"Homo sapiens",
"NNNN,CCGGA_0,NNN",
"AGATCGGAAG_1,AGCGGTTCAG_2",
"HiSeq",
"mouse anti-Pseudouridine",
"irCLIP_ddRT_36",
"L3",
"yes",
"",
"",
"2010.0",
"",
"3.0",
"",
"124.0",
"20.0",
"2020-04-10",
"",
],
]
self.df_correct_content = pd.DataFrame(self.correct_content, columns=self.example_annotation.column_names)
def change_column_content(self, column_name, replacement):
"""Replace the content in a column."""
if column_name not in self.example_annotation.column_names:
print("{} is not present in column names.".format(column_name))
df = pd.DataFrame(self.example_annotation.content, columns=self.example_annotation.column_names)
df[column_name] = replacement
self.example_annotation.content = df.values.tolist()
def test_fname(self):
"""Test if the saved filename is correct."""
self.assertEqual(self.example_annotation.fname, self.example_annotation_path)
def test_extension(self):
"""Test if the saved format is correct."""
self.assertEqual(self.example_annotation.extension, "xlsx")
# Test if .gz gets recognized.
self.example_annotation.fname = self.example_annotation_tab_path
self.example_annotation.get_extension()
self.assertEqual(self.example_annotation.extension, "tab.gz")
def test_content(self):
"""Test if the content is correctly read."""
self.assertEqual(self.example_annotation.content, self.correct_content)
# Test if the content is correctly read from tab.gz.
example_annotation_tab = DescriptorSheet(self.example_annotation_tab_path)
self.assertEqual(example_annotation_tab.content, self.correct_content)
self.assertEqual(example_annotation_tab.column_names, self.example_annotation.column_names)
def test_get_column(self):
"""Test if the right column is returned."""
col = self.example_annotation.get_column(column_name="Protein")
self.assertEqual(col, self.df_correct_content["Protein"].tolist())
def test_get_element(self):
"""Test if the right element is returned."""
element = self.example_annotation.get_element(sample_name="Sample_1", column_name="Protein")
self.assertEqual(element, "TARDBP")
def test_get_barcodes3(self):
"""Test if the 3' barcodes and number of tolerated mismatches are returned."""
self.assertEqual(self.example_annotation.get_barcodes3(), (["GTC", "GGA", ""], 3))
def test_get_barcodes5(self):
"""Test if the 5' barcodes and number of tolerated mismatches are returned."""
self.assertEqual(
self.example_annotation.get_barcodes5(), (["NNNNGTAACNNNNN", "NNNNGTAACNNNNN", "NNNNCCGGANNN"], 0)
)
def test_get_adapters(self):
"""Test if the adapter sequences are returned."""
self.assertEqual(
self.example_annotation.get_adapters(),
["AGATCGGAAGAGCGGTTCAG", "AGATCGGAAGAGCGGTTCAG", "AGATCGGAAGAGCGGTTCAG"],
)
def test_validate_barcode_uniqness(self):
"""Test the validation of barcode uniqueness."""
linker_column_name = "Linker"
barcode5_column_name = "5' barcode"
# Test if correct content passes validation.
self.example_annotation.validate_barcode_uniqness()
self.assertFalse(self.example_annotation.errors)
self.example_annotation.errors.clear()
# Test if the validation passes when no linkers are given but the 5' barcodes are unique.
self.change_column_content(
column_name=barcode5_column_name,
replacement=["NNNN,GTAAC_0,NNNNN", "NNNN,GTCAC_0,NNNNN", "NNNN,CCGGA_0,NNN"],
)
self.change_column_content(column_name=linker_column_name, replacement=["L3", "L3", "L3"])
self.example_annotation.validate_barcode_uniqness()
self.assertFalse(self.example_annotation.errors)
self.example_annotation.errors.clear()
# Test if the validation fails when no linkers are given but the 5' barcodes are not unique.
self.change_column_content(
column_name=barcode5_column_name,
replacement=["NNNN,GTAAC_0,NNNNN", "NNNN,GTAAC_0,NNNNN", "NNNN,CCGGA_0,NNN"],
)
self.change_column_content(column_name=linker_column_name, replacement=["L3", "L3", "L3"])
self.example_annotation.validate_barcode_uniqness()
message = "Barcodes on 5' end are not unique."
self.assertEqual(self.example_annotation.errors, [message])
self.example_annotation.errors.clear()
# Test if the validation fails when linkers are given but the combinations are not unique.
self.change_column_content(column_name=linker_column_name, replacement=["L3-GTC", "L3-GTC", "L3"])
self.example_annotation.validate_barcode_uniqness()
message = "Combination of barcodes on 3' and 5' end is not unique."
self.assertEqual(self.example_annotation.errors, [message])
self.example_annotation.errors.clear()
def test_validate_single_adapter(self):
"""Test the validation of same adapter over all samples."""
# Test if the validation passes when all adapters are the same.
self.example_annotation.validate_single_adapter()
self.assertFalse(self.example_annotation.errors)
self.example_annotation.errors.clear()
# Test if the validation fails when all adapters are not same.
self.change_column_content(
column_name="3' adapter",
replacement=["CAGATCGGAAG_1,AGCGGTTCAG_2", "AGATCGGAAG_1,AGCGGTTCAG_2", "AGATCGGAAG_1,AGCGGTTCAG_2"],
)
self.example_annotation.validate_single_adapter()
message = "All samples should have the same adapter sequence."
self.assertEqual(self.example_annotation.errors, [message])
self.example_annotation.errors.clear()
def test_validate_sample_names(self):
"""Test the validation of unique sample names."""
# Test if the validation passes when all sample names are unique and present.
self.example_annotation.validate_sample_names()
self.assertFalse(self.example_annotation.errors)
self.example_annotation.errors.clear()
# Test if the validation fails when all sample names are not unique.
self.example_annotation.sample_names = ["Sample_1", "Sample_1", "Sample_3"]
self.example_annotation.validate_sample_names()
message = "Sample names should be unique, but these names are used multiple times: Sample_1."
self.assertEqual(self.example_annotation.errors, [message])
self.example_annotation.errors.clear()
# Test if the validation fails when not all rows have sample names.
self.example_annotation.sample_names = ["Sample_1", "", "Sample_3"]
self.example_annotation.validate_sample_names()
message = "One or more values for annotation field <Sample name> is missing."
self.assertEqual(self.example_annotation.errors, [message])
self.example_annotation.errors.clear()
# Test if the validation fails when sample name is to long.
to_long_name = "a" * (DescriptorSheet.max_sample_name_size + 1)
self.example_annotation.sample_names = ["Sample_1", to_long_name, "Sample_3"]
self.example_annotation.validate_sample_names()
message = 'Sample name "{}" should be shorter than 99 characters.'.format(to_long_name)
self.assertEqual(self.example_annotation.errors, [message])
self.example_annotation.errors.clear()
def test_validate_columns_names(self):
"""Test the validation of present columns."""
# Test if the validation passes when all columns are present.
self.example_annotation.validate_column_names()
self.assertFalse(self.example_annotation.errors)
self.example_annotation.errors.clear()
# Test if the validation fails when all columns are not present.
self.example_annotation.column_names[1] = ""
self.example_annotation.validate_column_names()
message = "Annotation file does not contain all the required columns. Missing columns: {'Collection name'}"
self.assertEqual(self.example_annotation.errors, [message])
self.example_annotation.errors.clear()
def test_validate_date(self):
"""Test the validation of date format."""
date_column_name = "Date of gel images in lab notebook (optional)"
# Test if the validation passes when the date format is correct.
self.example_annotation.validate_date(date_column_name)
self.assertFalse(self.example_annotation.errors)
self.example_annotation.errors.clear()
# Test if the validation fails when the date format is incorrect.
self.change_column_content(
column_name=date_column_name, replacement=["2020/04/10", "2020-04-10", "2020-04-10"]
)
self.example_annotation.validate_date(date_column_name)
message = "SAMPLE: Sample_1 - Incorrect date format (2020/04/10), should be YYYY-MM-DD."
self.assertEqual(self.example_annotation.errors, [message])
self.example_annotation.errors.clear()
def test_validate_yes_no(self):
"""Test the validation that ``value`` is "yes" or "no"."""
yes_no_column_name = "Consensus mapping (optional)"
# Test if the validation passes when value is correct.
self.example_annotation.validate_yes_no(yes_no_column_name)
self.assertFalse(self.example_annotation.errors)
self.example_annotation.errors.clear()
# Test if the validation passes when there are no values.
self.change_column_content(column_name=yes_no_column_name, replacement=["", "", ""])
self.example_annotation.validate_yes_no(yes_no_column_name)
self.assertFalse(self.example_annotation.errors)
self.example_annotation.errors.clear()
# Test if the validation fails when value is incorrect.
self.change_column_content(column_name=yes_no_column_name, replacement=["yey", "no", ""])
self.example_annotation.validate_yes_no(yes_no_column_name)
message = 'SAMPLE: Sample_1 - Value yey in column {} should be "yes", "no" or empty.'.format(
yes_no_column_name
)
self.assertEqual(self.example_annotation.errors, [message])
self.example_annotation.errors.clear()
def test_validate_required_columns(self):
"""Test the validation of essential input presence."""
# Test if the validation passes when all values are given.
self.example_annotation.validate_required_columns()
self.assertFalse(self.example_annotation.errors)
self.example_annotation.errors.clear()
# Test if the validation fails when not all values are given.
self.change_column_content(column_name="Scientist", replacement=["Super Smarty", "", "Super Smarty"])
self.example_annotation.validate_required_columns()
message = "SAMPLE: Sample_2 - Value for column Scientist is missing."
self.assertEqual(self.example_annotation.errors, [message])
self.example_annotation.errors.clear()
def test_validate_column_entry_viability(self):
"""Test that the validation recognises the entry."""
tested_entries = "Method"
# Test if the validation passes when the entry exists.
self.example_annotation.validate_column_entry_viability(tested_entries, METHOD)
self.assertFalse(self.example_annotation.errors)
self.example_annotation.errors.clear()
# Test if the validation fails when the entry does not exist.
self.change_column_content(column_name=tested_entries, replacement=["iCLIP", "", "Magick"])
self.example_annotation.validate_column_entry_viability(tested_entries, METHOD)
message_one = "SAMPLE: Sample_2 - is not a valid entry for column {}.".format(tested_entries)
message_two = "SAMPLE: Sample_3 - Magick is not a valid entry for column {}.".format(tested_entries)
self.assertEqual(self.example_annotation.errors, [message_one, message_two])
self.example_annotation.errors.clear()
def test_validate_integer_columns(self):
"""Test that validation correctly checks if ``value`` is integer or empty."""
integer_column = "Replicate (optional)"
# Test if the validation passes when the entries do exists and are correct.
self.example_annotation.validate_integer_columns()
self.assertFalse(self.example_annotation.errors)
self.example_annotation.errors.clear()
# Test if the validation passes when the entries do not exists.
self.change_column_content(column_name=integer_column, replacement=["", "", ""])
self.example_annotation.validate_integer_columns()
self.assertFalse(self.example_annotation.errors)
self.example_annotation.errors.clear()
# Test if the validation fails when the entries are not correct.
self.change_column_content(column_name=integer_column, replacement=["one", "2", "3"])
self.example_annotation.validate_integer_columns()
message = "SAMPLE: Sample_1 - Value one in column {} should be integer.".format(integer_column)
self.assertEqual(self.example_annotation.errors, [message])
self.example_annotation.errors.clear()
def test_validate_protein(self):
"""Test validation protein."""
protein_column = "Protein"
# Test if the validation passes when the entries are from human or mouse and are correct.
self.example_annotation.validate_protein()
self.assertFalse(self.example_annotation.errors)
self.example_annotation.errors.clear()
# Test if the validation passes when there are no entries.
self.change_column_content(column_name=protein_column, replacement=["", "", ""])
self.example_annotation.validate_protein()
self.assertFalse(self.example_annotation.errors)
self.example_annotation.errors.clear()
# Test if the validation fails when the entries are from human or mouse and are incorrect.
self.change_column_content(column_name=protein_column, replacement=["no_protein", "", ""])
self.example_annotation.validate_protein()
message = "SAMPLE: Sample_1 - Gene symbol no_protein is either invalid or Knowledge Base cannot be reached."
self.assertEqual(self.example_annotation.errors, [message])
self.example_annotation.errors.clear()
def test_validate_tissue(self):
"""Test validation of ``cells/tissue`` entries."""
# Test if the validation passes when the entries are correct.
self.example_annotation.validate_tissue()
self.assertFalse(self.example_annotation.errors)
self.example_annotation.errors.clear()
# Test if the validation fails when the entries are not correct.
self.change_column_content(column_name="Cells/tissue", replacement=["HEK293", "", "bone"])
self.example_annotation.validate_tissue()
message_one = "SAMPLE: Sample_2 - is not a valid entry for the cells/tissue annotation field."
message_two = "SAMPLE: Sample_3 - bone is not a valid entry for the cells/tissue annotation field."
self.assertEqual(self.example_annotation.errors, [message_one, message_two])
self.example_annotation.errors.clear()
if __name__ == "__main__":
unittest.main()
```
#### File: iMaps/tests/test_validation.py
```python
from imaps.base.validation import (
validate_bam_file,
validate_bed_file,
validate_date,
validate_integer,
validate_string,
)
from ngs_test_utils.testcase import NgsTestCase
class TestValidation(NgsTestCase):
def test_validate_bed_file(self):
message = "Bed file file.txt should have a valid bed extension."
with self.assertRaisesRegex(ValueError, message):
validate_bed_file("file.txt", check_exist=False)
message = "Bed file file.bed does not exist."
with self.assertRaisesRegex(ValueError, message):
validate_bed_file("file.bed", check_exist=True)
bed = self.make_bed(intervals=[["chr1", 10, 20, ".", 12, "+"]])
validate_bed_file(bed, check_exist=True)
def test_validate_bam_file(self):
message = "Bam file file.txt should have a valid bam extension."
with self.assertRaisesRegex(ValueError, message):
validate_bam_file("file.txt", check_exist=False)
message = "Bam file file.bam does not exist."
with self.assertRaisesRegex(ValueError, message):
validate_bam_file("file.bam", check_exist=True)
bam = self.make_bam(chroms=[("chr1", 100)], segments=[dict(cigar=[(0, 75)])])
validate_bam_file(bam, check_exist=True)
def test_validate_string(self):
message = "Value 123 should be a string."
with self.assertRaisesRegex(ValueError, message):
validate_string(123)
message = "Value C should be one of A, B."
with self.assertRaisesRegex(ValueError, message):
validate_string("C", choices=["A", "B"])
validate_string("A")
validate_string("B", choices=["A", "B"])
validate_string("", allow_empty=True)
def test_validate_integer(self):
message = "Value AAA should be an integer."
with self.assertRaisesRegex(ValueError, message):
validate_integer("AAA")
validate_integer(123)
def test_validate_date(self):
message = "Incorrect date format \\(1.2.1990\\), should be YYYY-MM-DD."
with self.assertRaisesRegex(ValueError, message):
validate_date("1.2.1990")
validate_date("1900-2-1")
validate_date("", allow_empty=True)
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.