input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
ClassVar[str] = "case to entity association mixin"
class_model_uri: ClassVar[URIRef] = BIOLINK.CaseToEntityAssociationMixin
subject: Union[str, CaseId] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.subject):
self.MissingRequiredField("subject")
if not isinstance(self.subject, CaseId):
self.subject = CaseId(self.subject)
super().__post_init__(**kwargs)
@dataclass
class ChemicalToChemicalAssociation(Association):
"""
A relationship between two chemical entities. This can encompass actual interactions as well as temporal causal
edges, e.g. one chemical converted to another.
"""
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = BIOLINK.ChemicalToChemicalAssociation
class_class_curie: ClassVar[str] = "biolink:ChemicalToChemicalAssociation"
class_name: ClassVar[str] = "chemical to chemical association"
class_model_uri: ClassVar[URIRef] = BIOLINK.ChemicalToChemicalAssociation
id: Union[str, ChemicalToChemicalAssociationId] = None
subject: Union[str, NamedThingId] = None
predicate: Union[str, PredicateType] = None
object: Union[str, ChemicalEntityId] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, ChemicalToChemicalAssociationId):
self.id = ChemicalToChemicalAssociationId(self.id)
if self._is_empty(self.object):
self.MissingRequiredField("object")
if not isinstance(self.object, ChemicalEntityId):
self.object = ChemicalEntityId(self.object)
super().__post_init__(**kwargs)
@dataclass
class ReactionToParticipantAssociation(ChemicalToChemicalAssociation):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = BIOLINK.ReactionToParticipantAssociation
class_class_curie: ClassVar[str] = "biolink:ReactionToParticipantAssociation"
class_name: ClassVar[str] = "reaction to participant association"
class_model_uri: ClassVar[URIRef] = BIOLINK.ReactionToParticipantAssociation
id: Union[str, ReactionToParticipantAssociationId] = None
predicate: Union[str, PredicateType] = None
object: Union[str, ChemicalEntityId] = None
subject: Union[str, MolecularEntityId] = None
stoichiometry: Optional[int] = None
reaction_direction: Optional[Union[str, "ReactionDirectionEnum"]] = None
reaction_side: Optional[Union[str, "ReactionSideEnum"]] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, ReactionToParticipantAssociationId):
self.id = ReactionToParticipantAssociationId(self.id)
if self._is_empty(self.subject):
self.MissingRequiredField("subject")
if not isinstance(self.subject, MolecularEntityId):
self.subject = MolecularEntityId(self.subject)
if self.stoichiometry is not None and not isinstance(self.stoichiometry, int):
self.stoichiometry = int(self.stoichiometry)
if self.reaction_direction is not None and not isinstance(self.reaction_direction, ReactionDirectionEnum):
self.reaction_direction = ReactionDirectionEnum(self.reaction_direction)
if self.reaction_side is not None and not isinstance(self.reaction_side, ReactionSideEnum):
self.reaction_side = ReactionSideEnum(self.reaction_side)
super().__post_init__(**kwargs)
@dataclass
class ReactionToCatalystAssociation(ReactionToParticipantAssociation):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = BIOLINK.ReactionToCatalystAssociation
class_class_curie: ClassVar[str] = "biolink:ReactionToCatalystAssociation"
class_name: ClassVar[str] = "reaction to catalyst association"
class_model_uri: ClassVar[URIRef] = BIOLINK.ReactionToCatalystAssociation
id: Union[str, ReactionToCatalystAssociationId] = None
predicate: Union[str, PredicateType] = None
subject: Union[str, MolecularEntityId] = None
object: Union[dict, GeneOrGeneProduct] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, ReactionToCatalystAssociationId):
self.id = ReactionToCatalystAssociationId(self.id)
if self._is_empty(self.object):
self.MissingRequiredField("object")
if not isinstance(self.object, GeneOrGeneProduct):
self.object = GeneOrGeneProduct(**self.object)
super().__post_init__(**kwargs)
@dataclass
class ChemicalToChemicalDerivationAssociation(ChemicalToChemicalAssociation):
"""
A causal relationship between two chemical entities, where the subject represents the upstream entity and the
object represents the downstream. For any such association there is an implicit reaction:
IF
R has-input C1 AND
R has-output C2 AND
R enabled-by P AND
R type Reaction
THEN
C1 derives-into C2 <<catalyst qualifier P>>
"""
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = BIOLINK.ChemicalToChemicalDerivationAssociation
class_class_curie: ClassVar[str] = "biolink:ChemicalToChemicalDerivationAssociation"
class_name: ClassVar[str] = "chemical to chemical derivation association"
class_model_uri: ClassVar[URIRef] = BIOLINK.ChemicalToChemicalDerivationAssociation
id: Union[str, ChemicalToChemicalDerivationAssociationId] = None
subject: Union[str, ChemicalEntityId] = None
object: Union[str, ChemicalEntityId] = None
predicate: Union[str, PredicateType] = None
catalyst_qualifier: Optional[Union[Union[dict, MacromolecularMachineMixin], List[Union[dict, MacromolecularMachineMixin]]]] = empty_list()
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, ChemicalToChemicalDerivationAssociationId):
self.id = ChemicalToChemicalDerivationAssociationId(self.id)
if self._is_empty(self.subject):
self.MissingRequiredField("subject")
if not isinstance(self.subject, ChemicalEntityId):
self.subject = ChemicalEntityId(self.subject)
if self._is_empty(self.object):
self.MissingRequiredField("object")
if not isinstance(self.object, ChemicalEntityId):
self.object = ChemicalEntityId(self.object)
if self._is_empty(self.predicate):
self.MissingRequiredField("predicate")
if not isinstance(self.predicate, PredicateType):
self.predicate = PredicateType(self.predicate)
if not isinstance(self.catalyst_qualifier, list):
self.catalyst_qualifier = [self.catalyst_qualifier] if self.catalyst_qualifier is not None else []
self.catalyst_qualifier = [v if isinstance(v, MacromolecularMachineMixin) else MacromolecularMachineMixin(**v) for v in self.catalyst_qualifier]
super().__post_init__(**kwargs)
@dataclass
class ChemicalToDiseaseOrPhenotypicFeatureAssociation(Association):
"""
An interaction between a chemical entity and a phenotype or disease, where the presence of the chemical gives rise
to or exacerbates the phenotype.
"""
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = BIOLINK.ChemicalToDiseaseOrPhenotypicFeatureAssociation
class_class_curie: ClassVar[str] = "biolink:ChemicalToDiseaseOrPhenotypicFeatureAssociation"
class_name: ClassVar[str] = "chemical to disease or phenotypic feature association"
class_model_uri: ClassVar[URIRef] = BIOLINK.ChemicalToDiseaseOrPhenotypicFeatureAssociation
id: Union[str, ChemicalToDiseaseOrPhenotypicFeatureAssociationId] = None
subject: Union[str, NamedThingId] = None
predicate: Union[str, PredicateType] = None
object: Union[str, DiseaseOrPhenotypicFeatureId] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, ChemicalToDiseaseOrPhenotypicFeatureAssociationId):
self.id = ChemicalToDiseaseOrPhenotypicFeatureAssociationId(self.id)
if self._is_empty(self.object):
self.MissingRequiredField("object")
if not isinstance(self.object, DiseaseOrPhenotypicFeatureId):
self.object = DiseaseOrPhenotypicFeatureId(self.object)
super().__post_init__(**kwargs)
@dataclass
class ChemicalToPathwayAssociation(Association):
"""
An interaction between a chemical entity and a biological process or pathway.
"""
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = BIOLINK.ChemicalToPathwayAssociation
class_class_curie: ClassVar[str] = "biolink:ChemicalToPathwayAssociation"
class_name: ClassVar[str] = "chemical to pathway association"
class_model_uri: ClassVar[URIRef] = BIOLINK.ChemicalToPathwayAssociation
id: Union[str, ChemicalToPathwayAssociationId] = None
subject: Union[str, NamedThingId] = None
predicate: Union[str, PredicateType] = None
object: Union[str, PathwayId] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, ChemicalToPathwayAssociationId):
self.id = ChemicalToPathwayAssociationId(self.id)
if self._is_empty(self.object):
self.MissingRequiredField("object")
if not isinstance(self.object, PathwayId):
self.object = PathwayId(self.object)
super().__post_init__(**kwargs)
@dataclass
class ChemicalToGeneAssociation(Association):
"""
An interaction between a chemical entity and a gene or gene product.
"""
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = BIOLINK.ChemicalToGeneAssociation
class_class_curie: ClassVar[str] = "biolink:ChemicalToGeneAssociation"
class_name: ClassVar[str] = "chemical to gene association"
class_model_uri: ClassVar[URIRef] = BIOLINK.ChemicalToGeneAssociation
id: Union[str, ChemicalToGeneAssociationId] = None
subject: Union[str, NamedThingId] = None
predicate: Union[str, PredicateType] = None
object: Union[dict, GeneOrGeneProduct] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, ChemicalToGeneAssociationId):
self.id = ChemicalToGeneAssociationId(self.id)
if self._is_empty(self.object):
self.MissingRequiredField("object")
if not isinstance(self.object, GeneOrGeneProduct):
self.object = GeneOrGeneProduct(**self.object)
super().__post_init__(**kwargs)
@dataclass
class DrugToGeneAssociation(Association):
"""
An interaction between a drug and a gene or gene product.
"""
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = BIOLINK.DrugToGeneAssociation
class_class_curie: ClassVar[str] = "biolink:DrugToGeneAssociation"
class_name: ClassVar[str] = "drug to gene association"
class_model_uri: ClassVar[URIRef] = BIOLINK.DrugToGeneAssociation
id: Union[str, DrugToGeneAssociationId] = None
subject: Union[str, NamedThingId] = None
predicate: Union[str, PredicateType] = None
object: Union[dict, GeneOrGeneProduct] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, DrugToGeneAssociationId):
self.id = DrugToGeneAssociationId(self.id)
if self._is_empty(self.object):
self.MissingRequiredField("object")
if not isinstance(self.object, GeneOrGeneProduct):
self.object = GeneOrGeneProduct(**self.object)
super().__post_init__(**kwargs)
@dataclass
class MaterialSampleToEntityAssociationMixin(YAMLRoot):
"""
An association between a material sample and something.
"""
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = BIOLINK.MaterialSampleToEntityAssociationMixin
class_class_curie: ClassVar[str] = "biolink:MaterialSampleToEntityAssociationMixin"
class_name: ClassVar[str] = "material sample to entity association mixin"
class_model_uri: ClassVar[URIRef] = BIOLINK.MaterialSampleToEntityAssociationMixin
subject: Union[str, MaterialSampleId] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.subject):
self.MissingRequiredField("subject")
if not isinstance(self.subject, MaterialSampleId):
self.subject = MaterialSampleId(self.subject)
super().__post_init__(**kwargs)
@dataclass
class MaterialSampleDerivationAssociation(Association):
"""
An association between a material sample and the material entity from which it is derived.
"""
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = BIOLINK.MaterialSampleDerivationAssociation
class_class_curie: ClassVar[str] = "biolink:MaterialSampleDerivationAssociation"
class_name: ClassVar[str] = "material sample derivation association"
class_model_uri: ClassVar[URIRef] = BIOLINK.MaterialSampleDerivationAssociation
id: Union[str, MaterialSampleDerivationAssociationId] = None
subject: Union[str, MaterialSampleId] = None
object: Union[str, NamedThingId] = None
predicate: Union[str, PredicateType] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, MaterialSampleDerivationAssociationId):
self.id = MaterialSampleDerivationAssociationId(self.id)
if self._is_empty(self.subject):
self.MissingRequiredField("subject")
if not isinstance(self.subject, MaterialSampleId):
self.subject = MaterialSampleId(self.subject)
if self._is_empty(self.object):
self.MissingRequiredField("object")
if not isinstance(self.object, NamedThingId):
self.object = NamedThingId(self.object)
if self._is_empty(self.predicate):
self.MissingRequiredField("predicate")
if not isinstance(self.predicate, PredicateType):
self.predicate = PredicateType(self.predicate)
super().__post_init__(**kwargs)
@dataclass
class MaterialSampleToDiseaseOrPhenotypicFeatureAssociation(Association):
"""
An association between a material sample and a disease or phenotype.
"""
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = BIOLINK.MaterialSampleToDiseaseOrPhenotypicFeatureAssociation
class_class_curie: ClassVar[str] = "biolink:MaterialSampleToDiseaseOrPhenotypicFeatureAssociation"
class_name: ClassVar[str] = "material sample to disease or phenotypic feature association"
class_model_uri: ClassVar[URIRef] = BIOLINK.MaterialSampleToDiseaseOrPhenotypicFeatureAssociation
id: Union[str, MaterialSampleToDiseaseOrPhenotypicFeatureAssociationId] = None
subject: Union[str, NamedThingId] = None
predicate: Union[str, PredicateType] = None
object: Union[str, NamedThingId] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, MaterialSampleToDiseaseOrPhenotypicFeatureAssociationId):
self.id = MaterialSampleToDiseaseOrPhenotypicFeatureAssociationId(self.id)
super().__post_init__(**kwargs)
@dataclass
class DiseaseToEntityAssociationMixin(YAMLRoot):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = BIOLINK.DiseaseToEntityAssociationMixin
class_class_curie: ClassVar[str] = "biolink:DiseaseToEntityAssociationMixin"
class_name: ClassVar[str] = "disease to entity association mixin"
class_model_uri: ClassVar[URIRef] = BIOLINK.DiseaseToEntityAssociationMixin
subject: Union[str, DiseaseId] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.subject):
self.MissingRequiredField("subject")
if not isinstance(self.subject, DiseaseId):
self.subject = DiseaseId(self.subject)
super().__post_init__(**kwargs)
@dataclass
class EntityToExposureEventAssociationMixin(YAMLRoot):
"""
An association between some entity and an exposure event.
"""
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = BIOLINK.EntityToExposureEventAssociationMixin
class_class_curie: ClassVar[str] = "biolink:EntityToExposureEventAssociationMixin"
class_name: ClassVar[str] = "entity to exposure event association mixin"
class_model_uri: ClassVar[URIRef] = BIOLINK.EntityToExposureEventAssociationMixin
object: Union[dict, ExposureEvent] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.object):
self.MissingRequiredField("object")
if not isinstance(self.object, ExposureEvent):
self.object = ExposureEvent(**self.object)
super().__post_init__(**kwargs)
@dataclass
class DiseaseToExposureEventAssociation(Association):
"""
An association between an exposure event and a disease.
"""
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = BIOLINK.DiseaseToExposureEventAssociation
class_class_curie: ClassVar[str] = "biolink:DiseaseToExposureEventAssociation"
class_name: ClassVar[str] = "disease to exposure event association"
class_model_uri: ClassVar[URIRef] = BIOLINK.DiseaseToExposureEventAssociation
id: Union[str, DiseaseToExposureEventAssociationId] = None
subject: Union[str, NamedThingId] = None
predicate: Union[str, PredicateType] = None
object: Union[str, NamedThingId] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, | |
<reponame>emilydolson/avida-spatial-tools
from .utils import *
from scipy.spatial.distance import pdist
import scipy.cluster.hierarchy as hierarchicalcluster
def rank_environment_and_phenotypes(environment, phenotypes, k=15):
"""
Clusters sets of resources/tasks using a weighted hamming distance such
that you can have few enough values to give each group of similar things a
different color. This function is designed for cases when you want to
color both an environment and a set of phenotypes such that the colors
corespond to each other.
Takes an EnvironmentFile object, a 2d array of phenotypes, and, optionally,
a number indicating the maximum number of clusters (default 15).
Returns:
- An EnvironmentFile in which the grid has been replaced with integers
indicating which cluster a cell is a member of. Integers are assigned
such that cells containing more or more complex resources have higher
numbers.
- A 2D grid of numbers representing the clusters each phenotype was
assigned to.
- An integer representing the total number of clusters.
"""
environment = convert_world_to_phenotype(environment)
ranks = get_ranks_for_environment_and_phenotypes(environment, phenotypes)
environment, n = assign_ranks_by_cluster(environment, k, ranks)
phenotypes, n = assign_ranks_by_cluster(phenotypes, k, ranks)
return environment, phenotypes, n
def do_clustering(types, max_clust):
"""
Helper method for clustering that takes a list of all of the things being
clustered (which are assumed to be binary numbers represented as strings),
and an int representing the maximum number of clusters that are allowed.
Returns: A dictionary mapping cluster ids to lists of numbers that are part
of that cluster.
"""
# Fill in leading zeros to make all numbers same length.
ls = [list(t[t.find("b")+1:]) for t in types]
prepend_zeros_to_lists(ls)
dist_matrix = pdist(ls, weighted_hamming)
clusters = hierarchicalcluster.complete(dist_matrix)
clusters = hierarchicalcluster.fcluster(clusters, max_clust,
criterion="maxclust")
# Group members of each cluster together
cluster_dict = dict((c, []) for c in set(clusters))
for i in range(len(types)):
cluster_dict[clusters[i]].append(types[i])
return cluster_dict
def rank_clusters(cluster_dict):
"""
Helper function for clustering that takes a dictionary mapping cluster
ids to lists of the binary strings that are part of that cluster and
returns a dictionary mapping cluster ids to integers representing their
"rank". Ranks provide an ordering for the clusters such that each
cluster has its own rank, and clusters are ordered from simplest to
most complex.
"""
# Figure out the relative rank of each cluster
cluster_ranks = dict.fromkeys(cluster_dict.keys())
for key in cluster_dict:
cluster_ranks[key] = eval(string_avg(cluster_dict[key], binary=True))
i = len(cluster_ranks)
for key in sorted(cluster_ranks, key=cluster_ranks.get):
cluster_ranks[key] = i
i -= 1
return cluster_ranks
def get_ranks_for_environment_and_phenotypes(environment, phenotypes, k=15):
"""
Takes an EnvironmentFile and a 2d array represemtimg phenotypes at each
location. Optionally also takes an integer indicating the maximum number
of clusters allowed to be created (default 15).
Environment is expected to already have been converted to binary numbers
(generally because this is being called by rank_environment_and_phenotypes)
Return a dictionary mapping binary strings representing groups of
resources/tasks that are present/performed in a given cell to integers
indicating the ranked order of the cluster they're part of.
"""
# Create list of all niches and all phenotypes, in phenotype format
niches = flatten_array(environment)
phenotypes = flatten_array(phenotypes)
types = set(phenotypes+niches)
types.discard("-0b1") # We'll handle this specially
types.discard("0b0") # We'll handle this specially
# Do all clustering ahead of time so colors remain consistent.
ranks = generate_ranks(list(types), k)
ranks["-0b1"] = -1 # The empty phenotype/niche should always be rank -1
ranks["0b0"] = 0 # The empty phenotype/niche should always be rank 0
return ranks
def assign_ranks_by_cluster(grid, n, ranks=None):
"""
Takes a 2D array representing phenotypes or resource sets across the world,
and integer rpresenting the maximum number of clusters allowed, and
optionally a dictionary indicating the rank of the cluster of each
phenotype/resource set. If this dictionary is not provided, one will be
generated.
Returns: - A 2d array of numbers indicating the ranks of the clusters
of the resource set/phenotype in each cell
- An integer representing the number of clusters created.
"""
if ranks is None:
ranks = generate_ranks(grid, n)
return assign_ranks_to_grid(grid, ranks), len(ranks)
def generate_ranks(grid, n):
"""
Takes a grid of phenotypes or resource sets representing as strings
representing binary numbers, and an integer indicating the maximum number
of clusters to generated.
Clusters the data in grid into a maximum of n groups, ranks each group by
the complexity and length of its "average" member, and returns a dictionary
mapping binary numbers to integers representing the rank of the cluster
they're part of.
"""
phenotypes = deepcopy(grid)
if type(phenotypes) is list and type(phenotypes[0]) is list:
phenotypes = flatten_array(phenotypes)
# Remove duplicates from types
types = list(frozenset(phenotypes))
if len(types) < n:
ranks = rank_types(types)
else:
ranks = cluster_types(types, n)
return ranks
def assign_ranks_to_grid(grid, ranks):
"""
Takes a 2D array of binary numbers represented as strings and a dictionary
mapping binary strings to integers representing the rank of the cluster
they belong to, and returns a grid in which each binary number has been
replaced with the rank of its cluster.
"""
assignments = deepcopy(grid)
ranks["0b0"] = 0
ranks["-0b1"] = -1
for i in range(len(grid)):
for j in range(len(grid[i])):
if type(grid[i][j]) is list:
for k in range(len(grid[i][j])):
assignments[i][j][k] = ranks[grid[i][j][k]]
else:
assignments[i][j] = ranks[grid[i][j]]
return assignments
def cluster_types(types, max_clust=12):
"""
Generates a dictionary mapping each binary number in types to an integer
from 0 to max_clust. Hierarchical clustering is used to determine which
which binary numbers should map to the same integer.
"""
if len(types) < max_clust:
max_clust = len(types)
# Do actual clustering
cluster_dict = do_clustering(types, max_clust)
cluster_ranks = rank_clusters(cluster_dict)
# Create a dictionary mapping binary numbers to indices
ranks = {}
for key in cluster_dict:
for typ in cluster_dict[key]:
ranks[typ] = cluster_ranks[key]
return ranks
def rank_types(types):
"""
Takes a list of binary numbers and returns a dictionary mapping each
binary number to an integer indicating it's rank within the list.
This is basically the better alternative to cluster_types, that works
in that perfect world where we have few enough types to represent each
as its own color.
"""
include_null = '0b0' in types
sorted_types = deepcopy(types)
for i in range(len(sorted_types)):
sorted_types[i] = int(sorted_types[i], 2)
sorted_types.sort()
ranks = {}
for t in types:
ranks[t] = sorted_types.index(eval(t)) + int(not include_null)
return ranks
def make_count_grid(data):
"""
Takes a 2 or 3d grid of strings representing binary numbers.
Returns a grid of the same dimensions in which each binary number has been
replaced by an integer indicating the number of ones that were in that
number.
"""
data = deepcopy(data)
for i in range(len(data)):
for j in range(len(data[i])):
for k in range(len(data[i][j])):
if type(data[i][j][k]) is list:
for l in range(len(data[i][j][k])):
try:
data[i][j][k] = data[i][j][k][l].count("1")
except:
data[i][j][k] = len(data[i][j][k][l])
else:
try:
data[i][j][k] = data[i][j][k].count("1")
except:
data[i][j][k] = len(data[i][j][k])
return data
def make_optimal_phenotype_grid(environment, phenotypes):
"""
Takes an EnvironmentFile object and a 2d array of phenotypes and returns
a 2d array in which each location contains an index representing the
distance between the phenotype in that location and the optimal phenotype
for that location.
This is acheived by using the task list in the EnvironmentFile to convert
the phenotypes to sets of tasks, and comparing them to the sets of
resources in the environment. So if the environment file that you created
the EnvironmentFile object from for some reason doesn't contain all of the
tasks, or doesn't contain them in the right order this won't work. If this
is the environment file that you used for the run of Avida that generated
this data, you should be fine.
"""
world_size = environment.size
phenotypes = deepcopy(phenotypes)
for i in range(world_size[1]):
for j in range(world_size[0]):
for k in range(len(phenotypes[i][j])):
phenotype = phenotype_to_res_set(phenotypes[i][j][k],
environment.tasks)
diff = len(environment[i][j].symmetric_difference(phenotype))
phenotypes[i][j][k] = diff
return phenotypes
def task_percentages(data, n_tasks=9):
"""
Takes a 3D array of strings representing binary numbers and calculates
the percentage of organisms in each cell (across multiple files)
that were doing a given task.
Returns an m x n x n_tasks array indicating the percentages of organisms
at each location (across the 3rd dimension) that were doing each task.
"""
pdata = deepcopy(data)
| |
duplicate
if parametercollision: # parameters disagree
if name != "":
raise ooferror.ErrSetupError("Named property in datafile conflicts with existing property '%s'" % name)
# reparametrization of unnamed property
if reg.materials:
raise ooferror.ErrSetupError("Unnamed property in datafile conflicts with existing property '%s'" % name)
# Unnamed property is being reparametrized, but it's
# not used in any materials, so it's ok.
reg.new_params(**kwargs)
switchboard.notify("redraw")
# A name collision with no parameter collision doesn't
# require any action. The data file contained a property
# identical to an existing property.
else:
# No collision, we must create a new NamedRegistration.
# We know it's a NamedRegistration because unnamed
# property registrations *always* produce a name conflict.
fullname = string.join( treepath + [name], ":")
newreg = reg.named_copy(fullname, menuitem.params[1:])
switchboard.notify("redraw")
#=-=##=-=##=-=##=-=##=-=##=-=##=-=##=-=##=-=##=-=##=-=##=-=##=-=##=-=##=-=#
# _PropertyStructureInfo and its helper classes, _PropertyFieldInfo
# and _PSInfo, keep track of how a Property is used, ie, how it
# interacts with Fields, Fluxes, and Equations.
class _PropertyFieldInfo:
def __init__(self, time_derivs, nonlinear, time_dependent):
self.time_derivs = time_derivs
self.nonlinear = nonlinear
self.time_dependent = time_dependent
class _PSInfo:
# _PSInfo is used within _PropertyStructureInfo to store the
# fields used by a single Flux or Equation.
def __init__(self):
self._fields = {}
def add(self, field, time_derivs, nonlinear, time_dependent):
# field might be None if the property doesn't have any field dependence
self._fields[field] = _PropertyFieldInfo(time_derivs, nonlinear,
time_dependent)
def fields(self):
return self._fields.keys()
def fields_of_order(self, order):
return [f for f in self._fields
if (f is not None and
order in self._fields[f].time_derivs)]
def nonlinear(self, fields):
# Is the property nonlinear in any of the given fields?
for field in fields+[None]:
try:
if self._fields[field].nonlinear:
return True
except KeyError:
pass
return False
def timeDependent(self, fields):
for field in fields+[None]:
try:
if self._fields[field].time_dependent:
return True
except KeyError:
pass
return False
class _PropertyStructureInfo:
def __init__(self):
self._dict = {} # key = Flux or Equation obj, value = _PSInfo
def add(self, obj, field, time_derivs=[], nonlinear=False,
time_dependent=False):
# obj is either a Flux or an Equation
try:
info = self._dict[obj]
except KeyError:
info = self._dict[obj] = _PSInfo()
info.add(field, time_derivs, nonlinear, time_dependent)
def fields(self):
flds = set()
for pinfo in self._dict.values():
flds.update(pinfo.fields())
return [f for f in flds if f is not None]
# These functions *could* use the "fields_of_order(x)" scheme, but
# in the calling context (subproblem and materials), they're
# really clearer this way.
def first_order_fields(self, objs):
flds = set()
for obj in objs:
try:
flds.update(self._dict[obj].fields_of_order(1))
except KeyError:
pass
return list(flds)
def second_order_fields(self, objs):
flds = set()
for obj in objs:
try:
flds.update(self._dict[obj].fields_of_order(2))
except KeyError:
pass
return list(flds)
def time_deriv_fields(self, objs):
flds = set()
for obj in objs:
try:
flds.update(self._dict[obj].fields_of_order(1))
flds.update(self._dict[obj].fields_of_order(2))
except KeyError:
pass
return list(flds)
def all(self):
return self._dict.keys()
def nonlinear(self, fields):
for key,pinfo in self._dict.items():
if pinfo.nonlinear(fields):
return True
return False
def timeDependent(self, fields):
for pinfo in self._dict.values():
if pinfo.timeDependent(fields):
return True
return False
#=-=##=-=##=-=##=-=##=-=##=-=##=-=##=-=##=-=##=-=##=-=##=-=##=-=##=-=##=-=#
# Property registrations live in the tree data structure of the
# PropertyManager, and consequently are managed separately from the
# usual OOF Registration mechanism. They do their own tree-based
# collision detection, and are not known to the global OOF namespace.
# Properties are instanced via the "OOF.Property.Create..." menu in
# scripts, and parametrized via the "OOF.Property.Parametrize..."
# menu. PropertyRegistration and NamedPropertyRegistration objects
# differ in that the former construct a default name for themselves,
# whereas the latter accept one as an argument. Both prepend the
# resulting name as a parameter to their parameter list.
class PropertyRegistrationParent:
def __init__(self, subclass, modulename, ordering, secret):
self.subclass = subclass # for PythonExportability of Property
self.modulename = modulename # ditto
self.ordering = ordering
properties.Property.registry.append(self)
self._secret = secret
def unregister(self):
del properties.Property.registry[self.getIndex()]
def getIndex(self):
for i in range(len(properties.Property.registry)):
if properties.Property.registry[i] is self:
return i
def secret(self):
return self._secret
# PropertyRegistration knows special things about itself, like the
# fact that all properties have names as their first arguments.
# Property nonlinearity is stored here at registration-time, by
# passing "nonlinear=1" in to here. Nonlinear properties force the
# recomputation of the stiffness matrix when mesh.make_stiffness is
# called, even if the mesh itself hasn't changed.
class PropertyRegistration(PropertyRegistrationParent):
def __init__(self, name, subclass, modulename, ordering, params=[],
propertyType=None,
outputs=[],
secret=0,
interfaceCompatibility=interfaceparameters.COMPATIBILITY_BULK_ONLY,
interfaceDiscontinuousFields=[],
tip=None,
discussion=None):
PropertyRegistrationParent.__init__(self, subclass,
modulename, ordering, secret)
# Save the fully-qualified name for error reporting. This
# datum should not be confused with the parameter "name",
# which contains only the leaf part of the FQN.
self._name = name
# Keep a copy of the local parameters.
self.params = params
# Equations to which this property makes *direct*
# contributions (ie, not via a flux). This is a basically a
# dictionary of _PSInfo objects, keyed by Equation.
self._equations = _PropertyStructureInfo()
# Ditto, for Fluxes instead of Equations
self._fluxes = _PropertyStructureInfo()
self._constraints = _PropertyStructureInfo()
self._outputs = outputs # names of PropOutputs it contributes to
self.tip = tip
self.discussion = discussion # discussion string or loadFile
if propertyType is None:
raise ooferror.ErrPyProgrammingError(
"Missing propertyType in PropertyRegistration %s" % name)
self._propertyType=propertyType
#Interface branch
self._interfaceCompatibility=interfaceCompatibility
self._interfaceDiscontinuities=interfaceDiscontinuousFields
# name-indexed dictionary of materials in which this property
# occurs -- it is managed by the MaterialProps objects, except
# in the new_params call, where it is local.
self.materials = {}
# At creation time, all parameters are new.
self.new_params()
# Add yourself to the AllProperties data object. "name"
# must be the fully qualified name of the property.
AllProperties[name]=self
# This returns the fully-qualified name in the labeltree.
def name(self):
return self._name
def baseName(self): # different in NamedPropertyRegistration
return self._name
# After creating a Registration, eqnInfo and fluxInfo must be
# called to indicate which Fields the Property uses, and how they
# appear in the Equations and Fluxes.
#
# The nonlinear arg can be either a list or tuple of Fields in
# which the Property is nonlinear, or a bool. If it's a bool, it
# applies to all Fields in the fields arg.
def fluxInfo(self, fluxes, fields=[None], time_derivs=[], nonlinear=False,
time_dependent=False):
for flux in fluxes:
for field in fields:
nl = ((isinstance(nonlinear, (types.ListType, types.TupleType))
and field in nonlinear)
or nonlinear)
self._fluxes.add(flux, field, time_derivs, nl, time_dependent)
def eqnInfo(self, equations, fields=[None], time_derivs=[],
nonlinear=False, time_dependent=False):
# fields == [None] means that the property makes a
# contribution to the equation when no fields are defined.
# fields==[] is different! It means that the property makes
# no contributions.
for eqn in equations:
for field in fields:
nl = ((isinstance(nonlinear, (types.ListType, types.TupleType))
and field in nonlinear)
or nonlinear)
self._equations.add(eqn, field, time_derivs, nl, time_dependent)
def constraintInfo(self,equations,fields=[None]):
# Constraint equations the property contributes to, if any.
# The fields mentioned must be defined but need not be active.
for eqn in equations:
for field in fields:
self._constraints.add(eqn, field)
def discontinuousFields(self):
return self._interfaceDiscontinuities
# These functions are different in the NamedPropertyRegistration
def nonlinear(self, fields):
return (self._fluxes.nonlinear(fields) or
self._equations.nonlinear(fields))
def timeDependent(self, fields):
return (self._fluxes.timeDependent(fields) or
self._equations.timeDependent(fields))
def fields(self):
return set(self._equations.fields() + self._fluxes.fields())
def second_order_fields(self, *equations):
return self._equations.second_order_fields(equations)
def first_order_fields(self, *equations):
return self._equations.first_order_fields(equations)
def time_deriv_fields(self, *equations):
return self._equations.time_deriv_fields(equations)
def fluxes(self):
return self._fluxes.all()
def equations(self):
return self._equations.all()
def propertyType(self):
return self._propertyType
def outputs(self):
return self._outputs
def is_property_type(self, other_name):
# Don't use self._propertyType, you might be the derived class.
return self.propertyType()==other_name
#Interface branch
def interfaceCompatibility(self):
return self._interfaceCompatibility
# "Call" method creates a property instance from a registration.
# This is the only way to create a property instance, and this
# routine does not do any book-keeping with the AllProperties
# object. We do not use the Registration class's __call__ method,
# because we need to pass the registration itself as an argument to
# the property constructor.
def __call__(self):
return self.subclass(self, self._name, *[p.value for p in self.params])
# Collision looks for the name under this tree, and if it finds
# it, checks if the parameter values are all equal. Called by
# PropertyManager.creatorcallback, which is the callback for
# OOF.LoadData.Property.
# Return value is (namecollision, parametercollision)
def collision(self, name, params):
if name == "":
for ppair in zip(params, self.params):
if ppair[0].value != | |
# -*- coding: utf-8 -*-
#
# ***********************************************************************************
# MIT License
#
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is furnished
# to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ***********************************************************************************
from ..utils import *
IID_IDXGIObject = MIDL_INTERFACE(
"{AEC22FB8-76F3-4639-9BE0-28EB43A67A2E}"
)
class IDXGIObject(comtypes.IUnknown):
_case_insensitive_ = True
_idlflags_ = []
_iid_ = IID_IDXGIObject
class DXGI_FRAME_STATISTICS(ctypes.Structure):
pass
class DXGI_MAPPED_RECT(ctypes.Structure):
pass
class _LUID(ctypes.Structure):
pass
LUID = _LUID
class DXGI_ADAPTER_DESC(ctypes.Structure):
pass
class DXGI_OUTPUT_DESC(ctypes.Structure):
pass
class DXGI_SHARED_RESOURCE(ctypes.Structure):
pass
class DXGI_SURFACE_DESC(ctypes.Structure):
pass
class DXGI_SWAP_CHAIN_DESC(ctypes.Structure):
pass
class DXGI_ADAPTER_DESC1(ctypes.Structure):
pass
class DXGI_DISPLAY_COLOR_SPACE(ctypes.Structure):
pass
class IDXGIDeviceSubObject(comtypes.IUnknown):
_case_insensitive_ = True
_iid_ = None
_idlflags_ = []
class IDXGIResource(comtypes.IUnknown):
_case_insensitive_ = True
_iid_ = None
_idlflags_ = []
class IDXGIKeyedMutex(comtypes.IUnknown):
_case_insensitive_ = True
_iid_ = None
_idlflags_ = []
class IDXGISurface(comtypes.IUnknown):
_case_insensitive_ = True
_iid_ = None
_idlflags_ = []
class IDXGISurface1(comtypes.IUnknown):
_case_insensitive_ = True
_iid_ = None
_idlflags_ = []
class IDXGIAdapter(comtypes.IUnknown):
_case_insensitive_ = True
_iid_ = None
_idlflags_ = []
class IDXGIOutput(comtypes.IUnknown):
_case_insensitive_ = True
_iid_ = None
_idlflags_ = []
class IDXGISwapChain(comtypes.IUnknown):
_case_insensitive_ = True
_iid_ = None
_idlflags_ = []
class IDXGIFactory(comtypes.IUnknown):
_case_insensitive_ = True
_iid_ = None
_idlflags_ = []
class IDXGIDevice(comtypes.IUnknown):
_case_insensitive_ = True
_iid_ = None
_idlflags_ = []
class IDXGIFactory1(comtypes.IUnknown):
_case_insensitive_ = True
_iid_ = None
_idlflags_ = []
class IDXGIAdapter1(comtypes.IUnknown):
_case_insensitive_ = True
_iid_ = None
_idlflags_ = []
class IDXGIDevice1(comtypes.IUnknown):
_case_insensitive_ = True
_iid_ = None
_idlflags_ = []
from .dxgicommon_h import * # NOQA
from .dxgitype_h import * # NOQA
DXGI_USAGE = UINT
DXGI_FRAME_STATISTICS._fields_ = [
('PresentCount', UINT),
('PresentRefreshCount', UINT),
('SyncRefreshCount', UINT),
('SyncQPCTime', LARGE_INTEGER),
('SyncGPUTime', LARGE_INTEGER),
]
DXGI_MAPPED_RECT._fields_ = [
('Pitch', INT),
('pBits', POINTER(BYTE)),
]
_LUID._fields_ = [
('LowPart', DWORD),
('HighPart', LONG),
]
PLUID = POINTER(_LUID)
DXGI_ADAPTER_DESC._fields_ = [
('Description', WCHAR * 128),
('VendorId', UINT),
('DeviceId', UINT),
('SubSysId', UINT),
('Revision', UINT),
('DedicatedVideoMemory', SIZE_T),
('DedicatedSystemMemory', SIZE_T),
('SharedSystemMemory', SIZE_T),
('AdapterLuid', LUID),
]
HMONITOR = HANDLE
DXGI_OUTPUT_DESC._fields_ = [
('DeviceName', WCHAR * 32),
('DesktopCoordinates', RECT),
('AttachedToDesktop', BOOL),
('Rotation', DXGI_MODE_ROTATION),
('Monitor', HMONITOR),
]
DXGI_SHARED_RESOURCE._fields_ = [
('Handle', HANDLE),
]
class DXGI_RESIDENCY(ENUM):
DXGI_RESIDENCY_FULLY_RESIDENT = 1
DXGI_RESIDENCY_RESIDENT_IN_SHARED_MEMORY = 2
DXGI_RESIDENCY_EVICTED_TO_DISK = 3
DXGI_SURFACE_DESC._fields_ = [
('Width', UINT),
('Height', UINT),
('Format', DXGI_FORMAT),
('SampleDesc', DXGI_SAMPLE_DESC),
]
class DXGI_SWAP_EFFECT(ENUM):
DXGI_SWAP_EFFECT_DISCARD = 0
DXGI_SWAP_EFFECT_SEQUENTIAL = 1
DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL = 3
DXGI_SWAP_EFFECT_FLIP_DISCARD = 4
class DXGI_SWAP_CHAIN_FLAG(ENUM):
DXGI_SWAP_CHAIN_FLAG_NONPREROTATED = 1
DXGI_SWAP_CHAIN_FLAG_ALLOW_MODE_SWITCH = 2
DXGI_SWAP_CHAIN_FLAG_GDI_COMPATIBLE = 4
DXGI_SWAP_CHAIN_FLAG_RESTRICTED_CONTENT = 8
DXGI_SWAP_CHAIN_FLAG_RESTRICT_SHARED_RESOURCE_DRIVER = 16
DXGI_SWAP_CHAIN_FLAG_DISPLAY_ONLY = 32
DXGI_SWAP_CHAIN_FLAG_FRAME_LATENCY_WAITABLE_OBJECT = 64
DXGI_SWAP_CHAIN_FLAG_FOREGROUND_LAYER = 128
DXGI_SWAP_CHAIN_FLAG_FULLSCREEN_VIDEO = 256
DXGI_SWAP_CHAIN_FLAG_YUV_VIDEO = 512
DXGI_SWAP_CHAIN_FLAG_HW_PROTECTED = 1024
DXGI_SWAP_CHAIN_FLAG_ALLOW_TEARING = 2048
DXGI_SWAP_CHAIN_FLAG_RESTRICTED_TO_ALL_HOLOGRAPHIC_DISPLAYS = 4096
DXGI_SWAP_CHAIN_DESC._fields_ = [
('BufferDesc', DXGI_MODE_DESC),
('SampleDesc', DXGI_SAMPLE_DESC),
('BufferUsage', DXGI_USAGE),
('BufferCount', UINT),
('OutputWindow', HWND),
('Windowed', BOOL),
('SwapEffect', DXGI_SWAP_EFFECT),
('Flags', UINT),
]
IID_IDXGIObject = MIDL_INTERFACE(
"{AEC22FB8-76F3-4639-9BE0-28EB43A67A2E}"
)
IDXGIObject._iid_ = IID_IDXGIObject
IDXGIObject._methods_ = [
COMMETHOD(
[helpstring('Method SetPrivateData')],
HRESULT,
'SetPrivateData',
(['in'], REFGUID, 'Name'),
(['in'], UINT, 'DataSize'),
(['in'], POINTER(VOID), 'pData'),
),
COMMETHOD(
[helpstring('Method SetPrivateDataInterface')],
HRESULT,
'SetPrivateDataInterface',
(['in'], REFGUID, 'Name'),
(['in'], POINTER(comtypes.IUnknown), 'pUnknown'),
),
COMMETHOD(
[helpstring('Method GetPrivateData')],
HRESULT,
'GetPrivateData',
(['in'], REFGUID, 'Name'),
(['out', 'in'], POINTER(UINT), 'pDataSize'),
(['out'], POINTER(VOID), 'pData'),
),
COMMETHOD(
[helpstring('Method GetParent')],
HRESULT,
'GetParent',
(['in'], REFIID, 'riid'),
(['out'], POINTER(POINTER(VOID)), 'ppParent'),
),
]
IID_IDXGIDeviceSubObject = MIDL_INTERFACE(
"{3D3E0379-F9DE-4D58-BB6C-18D62992F1A6}"
)
IDXGIDeviceSubObject._iid_ = IID_IDXGIDeviceSubObject
IDXGIDeviceSubObject._methods_ = [
COMMETHOD(
[helpstring('Method GetDevice')],
HRESULT,
'GetDevice',
(['in'], REFIID, 'riid'),
(['out'], POINTER(POINTER(VOID)), 'ppDevice'),
),
]
IID_IDXGIResource = MIDL_INTERFACE(
"{035F3AB4-482E-4E50-B41F-8A7F8BD8960B}"
)
IDXGIResource._iid_ = IID_IDXGIResource
IDXGIResource._methods_ = [
COMMETHOD(
[helpstring('Method GetSharedHandle')],
HRESULT,
'GetSharedHandle',
(['out'], POINTER(HANDLE), 'pSharedHandle'),
),
COMMETHOD(
[helpstring('Method GetUsage')],
HRESULT,
'GetUsage',
(['out'], POINTER(DXGI_USAGE), 'pUsage'),
),
COMMETHOD(
[helpstring('Method SetEvictionPriority')],
HRESULT,
'SetEvictionPriority',
(['in'], UINT, 'EvictionPriority'),
),
COMMETHOD(
[helpstring('Method GetEvictionPriority')],
HRESULT,
'GetEvictionPriority',
(['out'], POINTER(UINT), 'pEvictionPriority'),
),
]
IID_IDXGIKeyedMutex = MIDL_INTERFACE(
"{9D8E1289-D7B3-465F-8126-250E349AF85D}"
)
IDXGIKeyedMutex._iid_ = IID_IDXGIKeyedMutex
IDXGIKeyedMutex._methods_ = [
COMMETHOD(
[helpstring('Method AcquireSync')],
HRESULT,
'AcquireSync',
(['in'], UINT64, 'Key'),
(['in'], DWORD, 'dwMilliseconds'),
),
COMMETHOD(
[helpstring('Method ReleaseSync')],
HRESULT,
'ReleaseSync',
(['in'], UINT64, 'Key'),
),
]
IID_IDXGISurface = MIDL_INTERFACE(
"{CAFCB56C-6AC3-4889-BF47-9E23BBD260EC}"
)
IDXGISurface._iid_ = IID_IDXGISurface
IDXGISurface._methods_ = [
COMMETHOD(
[helpstring('Method GetDesc')],
HRESULT,
'GetDesc',
(['out'], POINTER(DXGI_SURFACE_DESC), 'pDesc'),
),
COMMETHOD(
[helpstring('Method Map')],
HRESULT,
'Map',
(['out'], POINTER(DXGI_MAPPED_RECT), 'pLockedRect'),
(['in'], UINT, 'MapFlags'),
),
COMMETHOD(
[helpstring('Method Unmap')],
HRESULT,
'Unmap',
),
]
IID_IDXGISurface1 = MIDL_INTERFACE(
"{4AE63092-6327-4C1B-80AE-BFE12EA32B86}"
)
IDXGISurface1._iid_ = IID_IDXGISurface1
IDXGISurface1._methods_ = [
COMMETHOD(
[helpstring('Method GetDC')],
HRESULT,
'GetDC',
(['in'], BOOL, 'Discard'),
(['out'], POINTER(HDC), 'phdc'),
),
COMMETHOD(
[helpstring('Method ReleaseDC')],
HRESULT,
'ReleaseDC',
(['in'], POINTER(RECT), 'pDirtyRect'),
),
]
IID_IDXGIAdapter = MIDL_INTERFACE(
"{2411E7E1-12AC-4CCF-BD14-9798E8534DC0}"
)
IDXGIAdapter._iid_ = IID_IDXGIAdapter
IDXGIAdapter._methods_ = [
COMMETHOD(
[helpstring('Method EnumOutputs')],
HRESULT,
'EnumOutputs',
(['in'], UINT, 'Output'),
(['out'], POINTER(POINTER(IDXGIOutput)), 'ppOutput'),
),
COMMETHOD(
[helpstring('Method GetDesc')],
HRESULT,
'GetDesc',
(['out'], POINTER(DXGI_ADAPTER_DESC), 'pDesc'),
),
COMMETHOD(
[helpstring('Method CheckInterfaceSupport')],
HRESULT,
'CheckInterfaceSupport',
(['in'], REFGUID, 'InterfaceName'),
(['out'], POINTER(LARGE_INTEGER), 'pUMDVersion'),
),
]
IID_IDXGIOutput = MIDL_INTERFACE(
"{AE02EEDB-C735-4690-8D52-5A8DC20213AA}"
)
IDXGIOutput._iid_ = IID_IDXGIOutput
IDXGIOutput._methods_ = [
COMMETHOD(
[helpstring('Method GetDesc')],
HRESULT,
'GetDesc',
(['out'], POINTER(DXGI_OUTPUT_DESC), 'pDesc'),
),
COMMETHOD(
[helpstring('Method GetDisplayModeList')],
HRESULT,
'GetDisplayModeList',
(['in'], DXGI_FORMAT, 'EnumFormat'),
(['in'], UINT, 'Flags'),
(['out', 'in'], POINTER(UINT), 'pNumModes'),
(['out'], POINTER(DXGI_MODE_DESC), 'pDesc'),
),
COMMETHOD(
[helpstring('Method FindClosestMatchingMode')],
HRESULT,
'FindClosestMatchingMode',
(['in'], POINTER(DXGI_MODE_DESC), 'pModeToMatch'),
(['out'], POINTER(DXGI_MODE_DESC), 'pClosestMatch'),
(['in'], POINTER(comtypes.IUnknown), 'pConcernedDevice'),
),
COMMETHOD(
[helpstring('Method WaitForVBlank')],
HRESULT,
'WaitForVBlank',
),
COMMETHOD(
[helpstring('Method TakeOwnership')],
HRESULT,
'TakeOwnership',
(['in'], POINTER(comtypes.IUnknown), 'pDevice'),
([], BOOL, 'Exclusive'),
),
COMMETHOD(
[helpstring('Method ReleaseOwnership')],
VOID,
'ReleaseOwnership',
),
COMMETHOD(
[helpstring('Method GetGammaControlCapabilities')],
HRESULT,
'GetGammaControlCapabilities',
(
['out'],
POINTER(DXGI_GAMMA_CONTROL_CAPABILITIES),
'pGammaCaps'
),
),
COMMETHOD(
[helpstring('Method SetGammaControl')],
HRESULT,
'SetGammaControl',
(['in'], POINTER(DXGI_GAMMA_CONTROL), 'pArray'),
),
COMMETHOD(
[helpstring('Method GetGammaControl')],
HRESULT,
'GetGammaControl',
(['out'], POINTER(DXGI_GAMMA_CONTROL), 'pArray'),
),
COMMETHOD(
[helpstring('Method SetDisplaySurface')],
HRESULT,
'SetDisplaySurface',
(['in'], POINTER(IDXGISurface), 'pScanoutSurface'),
),
COMMETHOD(
[helpstring('Method GetDisplaySurfaceData')],
HRESULT,
'GetDisplaySurfaceData',
(['in'], POINTER(IDXGISurface), 'pDestination'),
),
COMMETHOD(
[helpstring('Method GetFrameStatistics')],
HRESULT,
'GetFrameStatistics',
(['out'], POINTER(DXGI_FRAME_STATISTICS), 'pStats'),
),
]
IID_IDXGISwapChain = MIDL_INTERFACE(
"{310D36A0-D2E7-4C0A-AA04-6A9D23B8886A}"
)
IDXGISwapChain._iid_ = IID_IDXGISwapChain
IDXGISwapChain._methods_ = [
COMMETHOD(
[helpstring('Method Present')],
HRESULT,
'Present',
(['in'], UINT, 'SyncInterval'),
(['in'], UINT, 'Flags'),
),
COMMETHOD(
[helpstring('Method GetBuffer')],
HRESULT,
'GetBuffer',
(['in'], UINT, 'Buffer'),
(['in'], REFIID, 'riid'),
(['out'], POINTER(POINTER(VOID)), 'ppSurface'),
),
COMMETHOD(
[helpstring('Method SetFullscreenState')],
HRESULT,
'SetFullscreenState',
(['in'], BOOL, 'Fullscreen'),
(['in'], POINTER(IDXGIOutput), 'pTarget'),
),
COMMETHOD(
[helpstring('Method GetFullscreenState')],
HRESULT,
'GetFullscreenState',
(['out'], POINTER(BOOL), 'pFullscreen'),
(['out'], POINTER(POINTER(IDXGIOutput)), 'ppTarget'),
),
COMMETHOD(
[helpstring('Method GetDesc')],
HRESULT,
'GetDesc',
(['out'], POINTER(DXGI_SWAP_CHAIN_DESC), 'pDesc'),
),
COMMETHOD(
[helpstring('Method ResizeBuffers')],
HRESULT,
'ResizeBuffers',
(['in'], UINT, 'BufferCount'),
(['in'], UINT, 'Width'),
(['in'], UINT, 'Height'),
(['in'], DXGI_FORMAT, 'NewFormat'),
(['in'], UINT, 'SwapChainFlags'),
),
COMMETHOD(
[helpstring('Method ResizeTarget')],
HRESULT,
'ResizeTarget',
(['in'], POINTER(DXGI_MODE_DESC), 'pNewTargetParameters'),
),
COMMETHOD(
[helpstring('Method GetContainingOutput')],
HRESULT,
'GetContainingOutput',
(['out'], POINTER(POINTER(IDXGIOutput)), 'ppOutput'),
),
COMMETHOD(
[helpstring('Method GetFrameStatistics')],
HRESULT,
'GetFrameStatistics',
(['out'], POINTER(DXGI_FRAME_STATISTICS), 'pStats'),
),
COMMETHOD(
[helpstring('Method GetLastPresentCount')],
HRESULT,
'GetLastPresentCount',
(['out'], POINTER(UINT), 'pLastPresentCount'),
),
]
IID_IDXGIFactory = MIDL_INTERFACE(
"{7B7166EC-21C7-44AE-B21A-C9AE321AE369}"
)
IDXGIFactory._iid_ = IID_IDXGIFactory
IDXGIFactory._methods_ = [
COMMETHOD(
[helpstring('Method EnumAdapters')],
HRESULT,
'EnumAdapters',
(['in'], UINT, 'Adapter'),
(['out'], POINTER(POINTER(IDXGIAdapter)), 'ppAdapter'),
),
COMMETHOD(
[helpstring('Method MakeWindowAssociation')],
HRESULT,
'MakeWindowAssociation',
(['in'], HWND, 'WindowHandle'),
(['in'], UINT, 'Flags'),
),
COMMETHOD(
[helpstring('Method GetWindowAssociation')],
HRESULT,
'GetWindowAssociation',
(['out'], POINTER(HWND), 'pWindowHandle'),
),
COMMETHOD(
[helpstring('Method CreateSwapChain')],
HRESULT,
'CreateSwapChain',
(['in'], POINTER(comtypes.IUnknown), 'pDevice'),
(['in'], POINTER(DXGI_SWAP_CHAIN_DESC), 'pDesc'),
(
['out'],
POINTER(POINTER(IDXGISwapChain)),
'ppSwapChain'
),
),
COMMETHOD(
[helpstring('Method CreateSoftwareAdapter')],
HRESULT,
'CreateSoftwareAdapter',
(['in'], HMODULE, 'Module'),
(['out'], POINTER(POINTER(IDXGIAdapter)), 'ppAdapter'),
),
]
dxgi = ctypes.windll.DXGI
# HRESULT WINAPI CreateDXGIFactory(REFIID riid, _COM_Outptr_ VOID **ppFactory);
CreateDXGIFactory = dxgi.CreateDXGIFactory
CreateDXGIFactory.restype = HRESULT
# HRESULT WINAPI CreateDXGIFactory1(REFIID riid, _COM_Outptr_ VOID **ppFactory);
CreateDXGIFactory1 = dxgi.CreateDXGIFactory1
CreateDXGIFactory1.restype = HRESULT
IID_IDXGIDevice = MIDL_INTERFACE(
"{54EC77FA-1377-44E6-8C32-88FD5F44C84C}"
)
IDXGIDevice._iid_ = IID_IDXGIDevice
IDXGIDevice._methods_ = [
COMMETHOD(
[helpstring('Method GetAdapter')],
HRESULT,
'GetAdapter',
(['out'], POINTER(POINTER(IDXGIAdapter)), 'pAdapter'),
),
COMMETHOD(
[helpstring('Method CreateSurface')],
HRESULT,
'CreateSurface',
(['in'], POINTER(DXGI_SURFACE_DESC), 'pDesc'),
(['in'], UINT, 'NumSurfaces'),
(['in'], DXGI_USAGE, 'Usage'),
(
['in'],
POINTER(DXGI_SHARED_RESOURCE),
'pSharedResource'
),
(['out'], POINTER(POINTER(IDXGISurface)), 'ppSurface'),
),
COMMETHOD(
[helpstring('Method QueryResourceResidency')],
HRESULT,
'QueryResourceResidency',
(['in'], POINTER(comtypes.IUnknown), 'ppResources'),
(['out'], POINTER(DXGI_RESIDENCY), 'pResidencyStatus'),
(['in'], UINT, 'NumResources'),
),
COMMETHOD(
[helpstring('Method SetGPUThreadPriority')],
HRESULT,
'SetGPUThreadPriority',
(['in'], INT, 'Priority'),
),
COMMETHOD(
[helpstring('Method GetGPUThreadPriority')],
HRESULT,
'GetGPUThreadPriority',
(['out'], POINTER(INT), 'pPriority'),
),
]
class DXGI_ADAPTER_FLAG(ENUM):
DXGI_ADAPTER_FLAG_NONE = 0
DXGI_ADAPTER_FLAG_REMOTE = 1
DXGI_ADAPTER_FLAG_SOFTWARE = 2
DXGI_ADAPTER_FLAG_FORCE_DWORD = 0xFFFFFFFF
DXGI_ADAPTER_DESC1._fields_ = [
('Description', WCHAR * 128),
('VendorId', UINT),
('DeviceId', UINT),
('SubSysId', UINT),
('Revision', UINT),
('DedicatedVideoMemory', SIZE_T),
('DedicatedSystemMemory', SIZE_T),
('SharedSystemMemory', SIZE_T),
('AdapterLuid', LUID),
('Flags', UINT),
]
DXGI_DISPLAY_COLOR_SPACE._fields_ = [
('PrimaryCoordinates', (FLOAT * 8) * 2),
('WhitePoints', (FLOAT * 16) * 2),
]
IID_IDXGIFactory1 = MIDL_INTERFACE(
"{770AAE78-F26F-4DBA-A829-253C83D1B387}"
)
IDXGIFactory1._iid_ = IID_IDXGIFactory1
IDXGIFactory1._methods_ = [
COMMETHOD(
[helpstring('Method EnumAdapters1')],
HRESULT,
'EnumAdapters1',
(['in'], UINT, 'Adapter'),
(['out'], POINTER(POINTER(IDXGIAdapter1)), 'ppAdapter'),
),
COMMETHOD(
[helpstring('Method IsCurrent')],
BOOL,
'IsCurrent',
),
]
IID_IDXGIAdapter1 = MIDL_INTERFACE(
"{29038F61-3839-4626-91FD-086879011A05}"
)
IDXGIAdapter1._iid_ = IID_IDXGIAdapter1
IDXGIAdapter1._methods_ = [
COMMETHOD(
[helpstring('Method GetDesc1')],
HRESULT,
'GetDesc1',
(['out'], POINTER(DXGI_ADAPTER_DESC1), 'pDesc'),
| |
to libcgroup path.")
bz2FileName = g_OSlib.getBz2FilePath()
curPath = os.path.dirname(os.path.realpath(__file__))
libCgroupPath = os.path.realpath("%s/../../libcgroup" % curPath)
if not os.path.exists(libCgroupPath):
os.makedirs(libCgroupPath)
cmd = "tar -xf %s -C %s" % (bz2FileName, libCgroupPath)
(status, output) = subprocess.getstatusoutput(cmd)
if status != 0:
self.logger.logExit(ErrorCode.GAUSS_502["GAUSS_50217"] % bz2FileName +
" Error: \n%s" % output)
# load library path env
ld_path = os.path.join(libCgroupPath, "lib")
if 'LD_LIBRARY_PATH' not in os.environ:
os.environ['LD_LIBRARY_PATH'] = ld_path
os.execve(os.path.realpath(__file__), sys.argv, os.environ)
if not os.environ.get('LD_LIBRARY_PATH').startswith(ld_path):
os.environ['LD_LIBRARY_PATH'] = ld_path + ":" + os.environ['LD_LIBRARY_PATH']
os.execve(os.path.realpath(__file__), sys.argv, os.environ)
def setCgroup(self):
"""
function: Setting Cgroup
input : NA
output: NA
"""
if self.needSetCgroup():
return
self.logger.debug("Setting Cgroup.")
# decompress server pakcage
self.decompressPkg2Cgroup()
#create temp directory for libcgroup etc
cgroup_etc_dir = "%s/%s/etc" % (self.clusterToolPath, self.user)
dirName = os.path.dirname(os.path.realpath(__file__))
libcgroup_dir = os.path.realpath("%s/../../libcgroup/lib/libcgroup.so" % dirName)
cgroup_exe_dir = os.path.realpath("%s/../../libcgroup/bin/gs_cgroup" % dirName)
cmd = "rm -rf '%s/%s'" % (self.clusterToolPath, self.user)
(status, output) = DefaultValue.retryGetstatusoutput(cmd)
if status != 0:
self.logger.logExit(ErrorCode.GAUSS_502["GAUSS_50207"] %
'crash Cgroup congiguration file' + " Error: \n%s" % output)
cmd = "if [ ! -d '%s' ];then mkdir -p '%s' && " % (cgroup_etc_dir, cgroup_etc_dir)
cmd += "chmod %s '%s'/../ -R && chown %s:%s '%s'/../ -R -h;fi" %\
(DefaultValue.KEY_DIRECTORY_MODE, cgroup_etc_dir,
self.user, self.group, cgroup_etc_dir)
(status, output) = subprocess.getstatusoutput(cmd)
if status != 0:
self.logger.logExit(ErrorCode.GAUSS_502["GAUSS_50208"] %
cgroup_etc_dir + " Error: \n%s" % output)
# check or prepare libcgroup lib
libcgroup_target = "/usr/local/lib/libcgroup.so.1"
cmd = "ldd %s | grep 'libcgroup.so.1'" % cgroup_exe_dir
(status, output) = subprocess.getstatusoutput(cmd)
if status != 0:
self.logger.logExit(ErrorCode.GAUSS_514["GAUSS_51400"] % cmd + " Error: \n%s" % output)
elif str(output).find("not found") != -1:
cmd = "cp '%s' '%s' && ldconfig" % (libcgroup_dir, libcgroup_target)
self.logger.debug("Need copy libcgroup.so.1 from %s to %s." %
(libcgroup_dir, libcgroup_target))
(status, output) = subprocess.getstatusoutput(cmd)
if status != 0:
self.logger.logExit(ErrorCode.GAUSS_502["GAUSS_50214"] % libcgroup_target +
" Error: \n%s" % output)
GPHOME_cgroupCfgFile = "%s/%s/etc/gscgroup_%s.cfg" % (self.clusterToolPath, self.user,
self.user)
GAUSSHOME_cgroupCfgFile = "%s/etc/gscgroup_%s.cfg" % (self.clusterInfo.appPath, self.user)
cmd = "(if [ -f '%s' ]; then cp '%s' '%s';fi)" % (GAUSSHOME_cgroupCfgFile,
GAUSSHOME_cgroupCfgFile,
GPHOME_cgroupCfgFile)
self.logger.debug("Command for copying GAUSSHOME gscgroup's config file to GPHOME: %s\n"
% cmd)
(status, output) = subprocess.getstatusoutput(cmd)
if status != 0:
self.logger.logExit(ErrorCode.GAUSS_502["GAUSS_50214"] % GAUSSHOME_cgroupCfgFile +
" Error:\n%s" % output)
#create cgroup log file.
binPath = self.clusterInfo.logPath + "/%s/bin/gs_cgroup/" % self.user
if not os.path.exists(binPath):
g_file.createDirectory(binPath)
cgroupLog = binPath + "gs_cgroup.log"
c_logger = GaussLog(cgroupLog, "gs_cgroup")
#Get OS startup file
initFile = DefaultValue.getOSInitFile()
if initFile == "":
raise Exception(ErrorCode.GAUSS_502["GAUSS_50201"] % "startup file of current OS" +
"The startup file for euleros OS is /etc/rc.d/rc.local.")
# call cgroup
# generate cgroup config file under cluster tool path.
# and then copy it to GAUSSHOME path in gs_install step.
execute_cmd = "%s -U %s --upgrade -c -H %s/%s" % (cgroup_exe_dir, self.user,
self.clusterToolPath, self.user)
c_logger.debug("Command for executing gs_cgroup: %s\n" % execute_cmd)
(status, output) = subprocess.getstatusoutput(execute_cmd)
c_logger.debug("The result of execute gs_cgroup is:\n%s." % output)
# set cgroup cmd to OS initFile. it will be executed at restart os system.
gauss_home = self.clusterInfo.appPath
init_cmd = "export LD_LIBRARY_PATH=%s/lib/ && %s/bin/gs_cgroup -U %s --upgrade -c -H %s" % \
(gauss_home, gauss_home, self.user, gauss_home)
set_init_cmd = "sed -i \"/.* -U %s .* -c -H .*/d\" %s && " % (self.user, initFile)
set_init_cmd += "echo \"%s\" >> %s" % (init_cmd, initFile)
c_logger.debug("Command for call cgroup cmd to init file: %s\n" % set_init_cmd)
(status, output) = subprocess.getstatusoutput(set_init_cmd)
c_logger.debug("The result of init gs_cgroup is:\n%s." % output)
c_logger.debug(str(output))
#Change the owner and permission.
g_OSlib.checkLink(binPath)
g_file.changeOwner(self.user, binPath, True, retryFlag = True)
g_file.changeMode(DefaultValue.KEY_DIRECTORY_MODE, binPath, retryFlag = True)
g_file.changeMode(DefaultValue.KEY_FILE_MODE, (binPath + "*"), retryFlag = True)
if self.clusterInfo.logPath.strip() != "":
pre_bin_path = self.clusterInfo.logPath + "/%s/bin" % self.user
g_file.changeOwner(self.user, pre_bin_path, True, retryFlag = True)
c_logger.closeLog()
if status != 0:
self.logger.logExit(str(output))
self.logger.debug("Successfully set Cgroup.")
def checkaio(self):
# check libaio.so file exist
cmd = "ls /usr/local/lib | grep '^libaio.so' | wc -l"
(status, output) = subprocess.getstatusoutput(cmd)
if status != 0:
self.logger.logExit(ErrorCode.GAUSS_514["GAUSS_51400"] % cmd + " Error: \n%s" % output)
elif int(output) == 0:
cmd = "ls /usr/lib64 | grep '^libaio.so' | wc -l"
(status, output) = subprocess.getstatusoutput(cmd)
if status != 0:
self.logger.logExit(ErrorCode.GAUSS_514["GAUSS_51400"] % cmd +
" Error: \n%s" % output)
elif int(output) == 0:
raise Exception(ErrorCode.GAUSS_502["GAUSS_50201"] % "libaio.so or libaio.so.*")
def checkPlatformArm(self):
"""
function: Setting ARM Optimization
input : NA
output: NA
"""
self.logger.debug("Check if platform is ARM.")
try:
global ARM_PLATE
cmd = "python3 -c 'import platform;print(platform.machine())'"
self.logger.debug("Command for getting querying platform: %s"
% cmd)
(status, output) = subprocess.getstatusoutput(cmd)
if status != 0:
self.logger.logExit(ErrorCode.GAUSS_514["GAUSS_51400"] % cmd
+ " Error: \n%s" % output)
if str(output) == "aarch64":
ARM_PLATE = True
except Exception as e:
self.logger.logExit(str(e))
self.logger.debug("Successfully check platform ARM.")
def setArmOptimization(self):
"""
function: Setting ARM Optimization
input : NA
output: NA
"""
self.logger.debug("Set ARM Optimization.")
try:
initFile = DefaultValue.getOSInitFile()
clusterToolPath = self.clusterToolPath
# set_arm_optimization
init_cmd = "sed -i \"/(if test -f \'.*setArmOptimization.sh\';" \
" then export LC_ALL=C;" \
" sh .*setArmOptimization.sh;fi)/d\" %s && " \
% initFile
init_cmd += "echo " \
"\"(if test -f \'%s/sudo/setArmOptimization.sh\';" \
" then export LC_ALL=C;" \
"sh %s/sudo/setArmOptimization.sh;fi)\" >> %s" \
% (clusterToolPath, clusterToolPath, initFile)
(status, output) = subprocess.getstatusoutput(init_cmd)
if status != 0:
self.logger.logExit(ErrorCode.GAUSS_514["GAUSS_51400"]
% init_cmd + " Error: \n%s" % output)
cmd = "if test -f \'%s/sudo/setArmOptimization.sh\'; then export" \
" LC_ALL=C;sh %s/sudo/setArmOptimization.sh;fi" \
% (clusterToolPath, clusterToolPath)
(status, output) = subprocess.getstatusoutput(cmd)
if status != 0:
self.logger.logExit(ErrorCode.GAUSS_514["GAUSS_51400"] % cmd
+ " Error: \n%s" % output)
except Exception as e:
self.logger.logExit(str(e))
self.logger.debug("Successfully set ARM Optimization.")
def checkVirtualIp(self):
"""
function: Checking virtual IP
input : NA
output: NA
"""
self.logger.debug("Checking virtual IP...")
try:
global configuredIps
configuredIps = DefaultValue.checkIsPing(g_nodeInfo.virtualIp)
# check the self.hostnameList values are whether or not local IPs
# obtain the all local IPs
localAddrs = DefaultValue.getIpAddressList()
for ip in g_nodeInfo.virtualIp:
if (ip not in configuredIps) and (ip not in localAddrs):
self.logger.logExit(ErrorCode.GAUSS_512["GAUSS_51224"]
% ip)
except Exception as e:
self.logger.logExit(str(e))
self.logger.debug("Successfully check virtual IP.")
# IP do operation with netmask
def netNum(self, ip, mask):
"""
function: net number
input : ip,mask
output: netAddress
"""
ipArr = ip.split(".")
maskArr = mask.split(".")
binaryIpArr = []
binaryMaskArr = []
for element in ipArr:
biElement = bin(int(element)).split("b")[1]
binaryIpArr.append("0" * (8 - len(biElement)) + biElement)
for element in maskArr:
biElement = bin(int(element)).split("b")[1]
binaryMaskArr.append("0" * (8 - len(biElement)) + biElement)
binaryIp = ".".join(binaryIpArr)
binaryMask = ".".join(binaryMaskArr)
netAddress = ""
for i in range(len(binaryMask)):
if binaryMask[i] == ".":
netAddress += "."
elif binaryIp[i] == "0" or binaryMask[i] == "0":
netAddress += "0"
else:
netAddress += "1"
return netAddress
def setVirtualIp(self):
"""
function: creating Virtual Ip
input : NA
output: NA
"""
# The node instance initialization information
self.initNodeInfo()
# Add temporary files, save the virtual IP The actual
# configuration for the failure rollback
if os.path.exists(self.tmpFile):
g_file.removeFile(self.tmpFile)
tmpFileFp = None
# If this node is not configured virtual IP, exit
if g_nodeInfo.virtualIp == []:
return
# Check whether have configured the virtual ip
self.checkVirtualIp()
# If the current node virtual iP are configured, Exit
if configuredIps == []:
self.logger.debug("All virtual IP are configured.")
return
self.logger.debug("Start setting virtual IP...")
try:
# check if the file is a link
g_OSlib.checkLink(self.tmpFile)
tmpFileFp = open(self.tmpFile, "w+")
# Obtain network interface card of backIp,
# get this virtual IP network adapter card through it.
backIpNIC = DefaultValue.getNICNum(g_nodeInfo.backIps[0])
# Get this node netcard identifier already existing netcard
cmd = "/sbin/ifconfig -a | grep '%s' | awk '{print $1}'" \
% backIpNIC
(status, output) = subprocess.getstatusoutput(cmd)
if status != 0:
raise Exception(ErrorCode.GAUSS_506["GAUSS_50604"]
% g_nodeInfo.backIps[0]
+ " Error: \n%s" % output)
# Gets the currently available virtual NIC
nicList = output.split('\n')
flagValues = []
for nic in nicList:
if nic.find(':') >= 0:
flagList = nic.split(':')
flag = flagList[1].strip()
if flag.isdigit():
flagValues.append(int(flag))
vipNo = 0
if flagValues != []:
flagValues.sort()
vipNo = flagValues[-1] + 1
# Determine whether the same IP network segment.
subnetMasks = []
for backIp in g_nodeInfo.backIps:
# Get backIP subnet mask
subnetMask = ""
allNetworkInfo = g_network.getAllNetworkInfo()
for network in allNetworkInfo:
if backIp == network.ipAddress:
subnetMask = network.networkMask
# Check whether the same subnet mask backIP
if not len(subnetMasks):
subnetMasks.append(subnetMask)
else:
if subnetMask | |
return self._files
def run(self):
gem.copyNumberDistribution(
self._input(True),
output = self.files(),
sampleName = self.pipeline.name
)
class DuplicationStep(PipelineStep):
""" Duplication call step """
def files(self):
"""Return the output files generated by this step. """
if self._files is None:
self._files = []
duplication_file = self.pipeline.create_file_name(self.name, sub_directory=self.name + "/M1",
file_suffix="final.calls.dups.bed")
duplication_without_gaps_file = self.pipeline.create_file_name(self.name, sub_directory=self.name + "/M1" ,
file_suffix="final.calls.dups.woGaps.bed")
filteredSwCall = self.pipeline.create_file_name(self.name, sub_directory=self.name + "/M2", file_suffix="calls.sw_norm_wochrXMY.bed")
filteredLwCall = self.pipeline.create_file_name(self.name, sub_directory=self.name + "/M2", file_suffix="calls.lw_norm_wochrXMY.bed")
filteredCwCall = self.pipeline.create_file_name(self.name, sub_directory=self.name + "/M2", file_suffix="calls.cw_norm_wochrXMY.bed")
wssdPicked = self.pipeline.create_file_name(self.name, sub_directory=self.name + "/M2/WSSD", file_suffix="sd_woChrXMY.tab")
wssdMerged = self.pipeline.create_file_name(self.name, sub_directory=self.name + "/M2/WSSD", file_suffix="sd_woChrXMY.merged")
wssdMerged10K = self.pipeline.create_file_name(self.name, sub_directory=self.name + "/M2/WSSD", file_suffix="sd_woChrXMY_10K.merged")
wssdMerged10KNoGaps = self.pipeline.create_file_name(self.name, sub_directory=self.name + "/M2/WSSD", file_suffix="sd_woChrXMY_10K.woGaps.merged")
self._files.append(duplication_file)
self._files.append(duplication_without_gaps_file)
self._files.append(filteredSwCall)
self._files.append(filteredLwCall)
self._files.append(filteredCwCall)
self._files.append(wssdPicked)
self._files.append(wssdMerged)
self._files.append(wssdMerged10K)
self._files.append(wssdMerged10KNoGaps)
return self._files
def run(self):
cfg = self.configuration
gem.callDuplications(
self._input(),
output = self.files(),
sampleName = self.pipeline.name,
bed_repeat_regions = cfg["bed_repeat_regions"],
bed_gaps_coordinates = cfg["bed_gaps_coordinates"]
)
def _input(self):
"""Return the output of all
dependencies"""
if not self.dependencies:
raise PipelineError("This step depends on mrCaNaVar and copy number analysis!")
return [self.pipeline.steps[i].files() for i in self.dependencies if i >= 0]
class DocumentationBamStep(PipelineStep):
"""Documentation Bam Step"""
def files(self):
"""Return the output files generated by this step. """
if self._files is None:
self._files = []
stats_file = self.pipeline.create_file_name(self.name, sub_directory=self.name,file_suffix="txt")
html_file = self.pipeline.create_file_name(self.name, sub_directory=self.name,file_suffix="html")
json_file = self.pipeline.create_file_name(self.name, sub_directory=self.name,file_suffix="json")
self._files.append(stats_file)
self._files.append(html_file)
self._files.append(json_file)
return self._files
def run(self):
cfg = self.configuration
bam_file = self._input()[0][0]
gem.runBamSummaryMetrics(input=bam_file,output=self.files()[0],picard_tools_path=cfg["picard_path"],java_heap=cfg["java_heap"],tmp_folder=cfg["tmp_folder"],reference=cfg["bwa_reference"])
gem.cnvReport.create_bam_report(metrics=self.files()[0],html_file=self.files()[1],json_file=self.files()[2],sample_description=cfg["sample_description"])
def _input(self):
"""Return the output of all
dependencies"""
if not self.dependencies:
raise PipelineError("This step depends on mapping BAM mapping generation step!")
return [self.pipeline.steps[i].files() for i in self.dependencies if i >= 0]
class DocumentationRmDupStep(PipelineStep):
"""Build HTML and json documentation for the remove duplicates process"""
def files(self):
"""Return the output files generated by this step. """
if self._files is None:
self._files = []
html_file = self.pipeline.create_file_name(self.name, sub_directory=self.name,file_suffix="html")
json_file = self.pipeline.create_file_name(self.name, sub_directory=self.name,file_suffix="json")
self._files.append(html_file)
self._files.append(json_file)
return self._files
def run(self):
cfg = self.configuration
metrics = self._input()[0][0]
gem.cnvReport.create_duplicates_report(metrics=metrics,html_file=self.files()[0],json_file=self.files()[1],sample_description=cfg["sample_description"])
def _input(self):
"""Return the output of all
dependencies"""
if not self.dependencies:
raise PipelineError("This step depends on mapping BAM mapping generation step!")
return [self.pipeline.steps[i].files() for i in self.dependencies if i >= 0]
class DocumentationStep(PipelineStep):
"""Documentation Step"""
def files(self):
"""Return the output files generated by this step. """
if self._files is None:
self._files = []
html_file = self.pipeline.create_file_name(self.name, sub_directory=self.name,file_suffix="html")
json_file = self.pipeline.create_file_name(self.name, sub_directory=self.name,file_suffix="json")
self._files.append(html_file)
self._files.append(json_file)
return self._files
def run(self):
cfg = self.configuration
filesOut = self.files()
html_doc = filesOut[0]
json_doc = filesOut[1]
inputsList = self._input()
map_json_files = cfg["map_json_list"]
calls_log_file = gem._prepare_calls_log(inputsList[0][0])
dup_m1 = None
dup_m2 = None
if self.pipeline.duplications_create == True:
dup_m1 = inputsList[2][1]
dup_m2 = inputsList[2][8]
gem.cnvReport.create_report(html_file=html_doc,json_file=json_doc,mapping_stats_files=map_json_files,
mrcanavar_log_file=calls_log_file,control_regions_distribution=inputsList[1][3],
control_regions_plot=inputsList[1][2],cutoffs_file=inputsList[1][1], duplications_M1=dup_m1,
duplications_M2=dup_m2,sample_description=cfg["sample_description"])
def _input(self):
"""Return the output of all
dependencies"""
if not self.dependencies:
raise PipelineError("This step depends on mapping stats,mrCaNaVar,copy number analysis and duplications!")
return [self.pipeline.steps[i].files() for i in self.dependencies if i >= 0]
class BasePipeline:
"""Basic Base Pipeline """
def __init__(self, args=None):
self.steps = [] # pipeline steps
self.run_steps = [] # steps to run
# general parameter
self.input = None # input files
self.name = None # target name
self.names = [] # target names in case more than one set of fastq to be processes
self.multiFastq = False #If more than a fastq file or a Pair End combination file is going to be processed
self.sample_name_multi_fastq = None
self.remove_temp = True # remove temporary
self.output_dir = None # Output directory
self.threads = 1 # number of threads
self.write_config = None # write configuration
self.dry = False # only dry run
self.sort_memory = "768M" # samtools sort memory
self.direct_input = False # if true, skip the preparation step
self.force = False # force computation of all steps
self.compress_all = False
self.compress = False
#PicardTools
self.picard_path = None
self.java_heap = "25g"
self.tmp_folder = "/tmp/"
#Documentation files
self.sample_description = None
self.membersInitiation()
if args is not None:
# initialize from arguments
# load configuration
try:
if args.load_configuration is not None:
self.load(args.load_configuration)
except AttributeError:
pass
## update parameter
self.update(vars(args))
## initialize pipeline and check values
self.initialize()
def membersInitiation(self):
#To fullfill in the child class
pass
def update(self, configuration):
"""Update configuration from given map
configuration -- the input configuration
"""
for k, v in configuration.items():
try:
if v is not None:
setattr(self, k, v)
except AttributeError:
pass
def __update_dict(self, target, source):
if source is None:
return
for k, v in source.items():
#if v is not None:
target[k] = v
def open_input(self,pair_end_files = None):
if pair_end_files is not None:
return gem.filter.interleave([gem.files.open(f) for f in pair_end_files], threads=max(1, self.threads / 2))
"""Open the original input files"""
if len(self.input) == 1:
return gem.files.open(self.input[0])
else:
return gem.filter.interleave([gem.files.open(f) for f in self.input], threads=max(1, self.threads / 2))
def open_step(self, id, raw=False):
"""Open the original input files"""
return self.steps[id].open(raw=raw)
def initialize(self, silent=False):
# check general parameter
errors = []
if self.input is None:
errors.append("No input file specified")
else:
if len(self.input) == 1 and not self.single_end:
# search for second file
(n, p) = gem.utils.find_pair(self.input[0])
if p is None:
#errors.append("Unable to deduce second pair input file from %s " % self.input[0])
logging.gemtools.warning("No second input file specified, assuming interleaved paird end reads!")
else:
logging.gemtools.warning("Second pair input file found: %s " % p)
if self.name is None:
self.name = n
self.input.append(p)
# check file counts
if self.single_end and len(self.input) != 1 and self.sample_name_multi_fastq is None:
errors.append("Specify exactly one input file in single end mode")
elif not self.single_end and self.sample_name_multi_fastq is not None:
#PE and multiple fastq files for a given sample
# check input files
input_abs = []
name_abs = []
for f in self.input:
#f is not a name file that means is a list and comes from a configuration file
if type(f) is ListType:
f = f[0]
#search for second pair
(n, p) = gem.utils.find_pair(f)
if p is None:
errors.append("No second pair file found!!")
else:
logging.gemtools.warning("Second pair input file found: %s " % p)
input_abs.append([f,p])
name_abs.append(n)
self.input = input_abs
self.names = name_abs
self.multiFastq = True
if self.sample_name_multi_fastq is None:
errors.append('''No sample name was defined. You must specify a sample name
when working with more than one combination of pair end files. You
must use -sample-name-multi-fastq parameter''')
else:
self.name = self.sample_name_multi_fastq
else:
# check input files
input_abs = []
for f in self.input:
if f is None or not os.path.exists(f):
errors.append("Input file not found: %s" % (f))
else:
# make aboslute path
input_abs.append(os.path.abspath(f))
self.input = input_abs
if self.name is None and self.input is not None and len(self.input) > 0:
# get name from input files
name = os.path.basename(self.input[0])
if name.endswith(".gz"):
name = name[:-3]
idx = name.rfind(".")
if idx > 0:
self.name = name[:idx]
if self.name is None or len(self.name) == 0:
errors.append("No name specified and unable to guess one. Please use --name to set a name explicitly.")
self.referenceCheck(errors)
if self.output_dir is None:
self.output_dir = os.getcwd()
self.output_dir = os.path.abspath(self.output_dir)
if self.threads <= 0:
self.threads = 1
self.noStandardParameterChecking(errors)
if not silent and len(errors) > 0 and self.write_config is None:
raise PipelineError("Failed to initialize neccessary parameters:\n\n%s" % ("\n".join(errors)))
if self.write_config is not None:
# log configuration errors
logging.gemtools.warning("---------------------------------------------")
logging.gemtools.warning("Writing configuration")
logging.gemtools.warning("")
logging.gemtools.warning("Note that some of the parameters are missing:\n")
for e in errors:
logging.gemtools.warning("\t" + str(e))
logging.gemtools.warning("---------------------------------------------")
def pairingInfoControl(self,fileDescriptor):
'''Returns true if en error of pairing information must be reported'''
# check pairing information
p1 = None
p2 = None
c = 0
inp = fileDescriptor
for template in inp:
if template.num_alignments == 2:
## paired alignment
p1 = 1
p2 = 2
inp.close()
break
else:
if c == 0:
p1 = template.get_pair()
elif c == 1:
p2 = template.get_pair()
c += 1
if c >= 2:
inp.close()
break
inp.close()
if p1 == 0 or p2 == 0 or (p1 == 1 and | |
<reponame>kaparna126/magellanmapper
# Regional volume and density management
# Author: <NAME>, 2018, 2019
"""Measure volumes and densities by regions.
Intended to be higher-level, relatively atlas-agnostic measurements.
"""
from enum import Enum
from time import time
import numpy as np
import pandas as pd
from scipy.spatial.distance import cdist
from skimage import measure
from magmap.cv import chunking
from magmap.stats import atlas_stats, clustering
from magmap.settings import config
from magmap.io import libmag
from magmap.atlas import ontology
from magmap.cv import cv_nd
from magmap.io import df_io
# metric keys and column names
LabelMetrics = Enum(
"LabelMetrics", [
"Region",
"Volume", # volume, converted to physical units
"VolAlt", # alternate volume, eg smoothed volume
"VolPx", # volume in pixels
"VolAltPx", # alternate volume in pixels
"Intensity",
"Nuclei",
# densities; "Density" = nuclei density
# TODO: change density to nuclei density
# TODO: consider changing enum for KEY: name format
"Density", "DensityIntens",
"RegVolMean", "RegNucMean", "RegDensityMean", # per region
"VarNuclei", "VarNucIn", "VarNucOut",
"VarIntensity", "VarIntensIn", "VarIntensOut",
"MeanIntensity",
"MedIntensity",
"LowIntensity",
"HighIntensity",
"EntropyIntensity",
"VarIntensMatch",
"VarIntensDiff",
"MeanNuclei",
"VarNucMatch",
# Distances
"EdgeSize", # edge pixels
"EdgeDistSum", # sum of distances between edges in two images
"EdgeDistMean", # mean of these distances
"Dist", # generic distance
# Variation
"CoefVarIntens", "CoefVarNuc",
# Shape measurements
"SurfaceArea", "Compactness",
# Overlap metrics
"VolDSC", "NucDSC", # volume/nuclei Dice Similarity Coefficient
"VolOut", "NucOut", # volume/nuclei shifted out of orig position
# Point cloud measurements
"NucCluster", # number of nuclei clusters
"NucClusNoise", # number of nuclei that do not fit into a cluster
"NucClusLarg", # number of nuclei in the largest cluster
]
)
# variation metrics
VAR_METRICS = (
LabelMetrics.RegVolMean, LabelMetrics.RegNucMean,
LabelMetrics.VarNuclei, LabelMetrics.VarNucIn, LabelMetrics.VarNucOut,
LabelMetrics.VarIntensity, LabelMetrics.VarIntensIn,
LabelMetrics.VarIntensOut,
LabelMetrics.MeanIntensity,
LabelMetrics.MedIntensity,
LabelMetrics.LowIntensity,
LabelMetrics.HighIntensity,
LabelMetrics.EntropyIntensity,
LabelMetrics.VarIntensMatch,
LabelMetrics.VarIntensDiff,
LabelMetrics.MeanNuclei,
LabelMetrics.VarNucMatch,
LabelMetrics.CoefVarIntens, LabelMetrics.CoefVarNuc,
)
# nuclei metrics
NUC_METRICS = (
LabelMetrics.Nuclei,
LabelMetrics.RegNucMean,
LabelMetrics.MeanNuclei,
LabelMetrics.VarNuclei,
LabelMetrics.VarNucIn,
LabelMetrics.VarNucOut,
LabelMetrics.VarNucMatch,
LabelMetrics.CoefVarNuc,
)
# metrics computed from weighted averages
WT_METRICS = (
*VAR_METRICS,
LabelMetrics.EdgeDistMean,
)
def _coef_var(df):
# calculate coefficient of variation from data frame columns,
# where first column is std and second is mean
return np.divide(df.iloc[:, 0], df.iloc[:, 1])
class MetricCombos(Enum):
"""Combinations of metrics.
Each combination should be a tuple of combination name, a
tuple of metric Enums, and a function to use for aggregation applied
across colums to give a new metric value for each row.
"""
# sum of columns measuring regional homogeneity; missing columns
# will be ignored
HOMOGENEITY = (
"Homogeneity",
(LabelMetrics.VarIntensity, #LabelMetrics.VarIntensDiff,
LabelMetrics.EdgeDistSum, LabelMetrics.VarNuclei),
lambda x: np.nanmean(x, axis=1))
# coefficient of variation of intensity values
COEFVAR_INTENS = (
"CoefVarIntensity",
(LabelMetrics.VarIntensity, LabelMetrics.MeanIntensity),
_coef_var)
# coefficient of variation of intensity values
COEFVAR_NUC = (
"CoefVarNuclei",
(LabelMetrics.VarNuclei, LabelMetrics.MeanNuclei),
_coef_var)
class LabelToEdge(object):
"""Convert a label to an edge with class methods as an encapsulated
way to use in multiprocessing without requirement for global variables.
Attributes:
labels_img_np: Integer labels images as a Numpy array.
"""
labels_img_np = None
@classmethod
def set_labels_img_np(cls, val):
"""Set the labels image.
Args:
val: Labels image to set as class attribute.
"""
cls.labels_img_np = val
@classmethod
def find_label_edge(cls, label_id):
"""Convert a label into just its border.
Args:
label_id: Integer of the label to extract from
:attr:``labels_img_np``.
Returns:
Tuple of the given label ID; list of slices defining the
location of the ROI where the edges can be found; and the
ROI as a volume mask defining where the edges exist.
"""
print("getting edge for {}".format(label_id))
slices = None
borders = None
# get mask of label to get bounding box
label_mask = cls.labels_img_np == label_id
props = measure.regionprops(label_mask.astype(np.int))
if len(props) > 0 and props[0].bbox is not None:
_, slices = cv_nd.get_bbox_region(props[0].bbox)
# work on a view of the region for efficiency, obtaining borders
# as eroded region and writing into new array
region = cls.labels_img_np[tuple(slices)]
label_mask_region = region == label_id
borders = cv_nd.perimeter_nd(label_mask_region)
return label_id, slices, borders
def make_labels_edge(labels_img_np):
"""Convert labels image into label borders image.
The atlas is assumed to be a sample (eg microscopy) image on which
an edge-detection filter will be applied.
Args:
labels_img_np: Image as a Numpy array, assumed to be an
annotated image whose edges will be found by obtaining
the borders of all annotations.
Returns:
Binary image array the same shape as ``labels_img_np`` with labels
reduced to their corresponding borders.
"""
start_time = time()
labels_edge = np.zeros_like(labels_img_np)
label_ids = np.unique(labels_img_np)
# use a class to set and process the label without having to
# reference the labels image as a global variable
LabelToEdge.set_labels_img_np(labels_img_np)
pool = chunking.get_mp_pool()
pool_results = []
for label_id in label_ids:
pool_results.append(
pool.apply_async(
LabelToEdge.find_label_edge, args=(label_id, )))
for result in pool_results:
label_id, slices, borders = result.get()
if slices is not None:
borders_region = labels_edge[tuple(slices)]
borders_region[borders] = label_id
pool.close()
pool.join()
print("time elapsed to make labels edge:", time() - start_time)
return labels_edge
class MeasureLabel(object):
"""Measure metrics within image labels in a way that allows
multiprocessing without global variables.
All images should be of the same shape. If :attr:``df`` is available,
it will be used in place of underlying images. Typically this
data frame contains metrics for labels only at the lowest level,
such as drawn or non-overlapping labels. These labels can then be
used to aggregate values through summation or weighted means to
generate metrics for superseding labels that contains these
individual labels.
Attributes:
atlas_img_np: Sample image as a Numpy array.
labels_img_np: Integer labels image as a Numpy array.
labels_edge: Numpy array of labels reduced to their edges.
dist_to_orig: Distance map of labels to edges, with intensity values
in the same placement as in ``labels_edge``.
heat_map: Numpy array as a density map.
blobs (:obj:`np.ndarray`): 2D array of blobs such as nuclei in the
format, ``[[z, y, x, label_id, ...], ...]``. Defaults to None.
subseg: Integer sub-segmentations labels image as Numpy array.
df: Pandas data frame with a row for each sub-region.
"""
# metric keys
_COUNT_METRICS = (
LabelMetrics.Volume, LabelMetrics.Intensity, LabelMetrics.Nuclei)
_EDGE_METRICS = (
LabelMetrics.EdgeSize, LabelMetrics.EdgeDistSum,
LabelMetrics.EdgeDistMean)
_SHAPE_METRICS = (
LabelMetrics.SurfaceArea, LabelMetrics.Compactness)
_PCL_METRICS = (
LabelMetrics.NucCluster, LabelMetrics.NucClusNoise,
LabelMetrics.NucClusLarg,
)
# images and data frame
atlas_img_np = None
labels_img_np = None
labels_edge = None
dist_to_orig = None
labels_interior = None
heat_map = None
blobs = None
subseg = None
df = None
spacing = None
@classmethod
def set_data(cls, atlas_img_np, labels_img_np, labels_edge=None,
dist_to_orig=None, labels_interior=None, heat_map=None,
blobs=None, subseg=None, df=None, spacing=None):
"""Set the images and data frame."""
cls.atlas_img_np = atlas_img_np
cls.labels_img_np = labels_img_np
cls.labels_edge = labels_edge
cls.dist_to_orig = dist_to_orig
cls.labels_interior = labels_interior
cls.heat_map = heat_map
cls.blobs = blobs
cls.subseg = subseg
cls.df = df
cls.spacing = spacing
@classmethod
def label_metrics(cls, label_id, extra_metrics=None):
"""Calculate metrics for a given label or set of labels.
Wrapper to call :func:``measure_variation``,
:func:``measure_variation``, and :func:``measure_edge_dist``.
Args:
label_id: Integer of the label or sequence of multiple labels
in :attr:``labels_img_np`` for which to measure variation.
extra_metrics (List[:obj:`config.MetricGroups`]): Sequence of
additional metric groups to measure; defaults to None.
Returns:
Tuple of the given label ID, intensity variation, number of
pixels in the label, density variation, number of blobs,
sum edge distances, mean of edge distances, and number of
pixels in the label edge.
"""
# process basic metrics
#print("getting label metrics for {}".format(label_id))
_, count_metrics = cls.measure_counts(label_id)
_, var_metrics = cls.measure_variation(label_id)
_, edge_metrics = cls.measure_edge_dist(label_id)
metrics = {**count_metrics, **var_metrics, **edge_metrics}
if extra_metrics:
for extra_metric in extra_metrics:
# process additional metrics by applying corresponding function
fn = None
if extra_metric is config.MetricGroups.SHAPES:
fn = cls.measure_shapes
elif extra_metric is config.MetricGroups.POINT_CLOUD:
fn = cls.measure_point_cloud
if fn:
_, extra_metrics = fn(label_id)
metrics.update(extra_metrics)
return label_id, metrics
@classmethod
def measure_counts(cls, label_ids):
"""Measure the distance between edge images.
| |
file with no "src" or "dest".')
if not src:
raise BuildConfigError('"bundle" has a file with no "src".')
if not dest:
raise BuildConfigError('"bundle" has a file with no "dest".')
src = StringTemplate(src).substitute(src_vars)
dest = do_parse_level_substitutions(
dest, src, allow_lang=False, extra_vars=extra_vars
)
file_list.append(BundleFile(src=src, dest=dest))
return Bundle(zipfile=zipfile, files=file_list)
def parse_misc_file(
obj: Dict[str, Any], extra_vars: Dict[str, str]
) -> Optional[MiscFileData]:
src = required(obj, "src", "misc_files")
dest = required(obj, "dest", "misc_files")
if bool_field(obj, "skip"):
verbose(f"Skipping file {src}")
return None
else:
dest = do_parse_level_substitutions(
dest, src, allow_lang=False, extra_vars=extra_vars
)
mf = MiscFileData(
src=src,
dest=dest,
dest_is_dir=obj.get("dest_is_dir", None),
is_template=obj.get("template", False),
only_in_profile=obj.get("only_in_profile", None),
)
# Sanity checks: A Markdown file can be translated to Markdown,
# PDF or HTML. An HTML file can be translated to HTML or PDF.
# is_template is disallowed for non-text files.
if mf.is_template and (not is_text_file(src)):
raise BuildConfigError(
f'Section misc_files: "{src}" is marked as a template'
+ "but it is not a text file."
)
# We can't check to see whether the target is a directory, since
# nothing exists yet. But if it has an extension, we can assume it
# is not a directory.
if has_extension(dest):
# It's a file, not a directory.
if mf.dest_is_dir:
raise BuildConfigError(
f'Section misc_files: "{src}" uses a "dest" of '
+ f'"{dest}", which has an extension, so it is assumed '
+ 'to be a file. But, "dest_is_dir" is set to true.'
)
if is_markdown(src):
if not (is_pdf(dest) or is_html(dest) or is_markdown(dest)):
raise BuildConfigError(
f'Section misc_files: "{src}" is Markdown, the '
+ f'target ("{dest}") is not a directory and is '
+ "not PDF, HTML or Markdown."
)
if is_html(src):
if not (is_pdf(dest) or is_html(dest)):
raise BuildConfigError(
f'Section misc_files: "{src}" is HTML, the '
+ f'target ("{dest}") is not a directory and is '
+ "not PDF or HTML."
)
else:
# No extension. Assume dest_is_dir is True, if not set.
if mf.dest_is_dir is None:
mf = dataclasses.replace(mf, dest_is_dir=True)
# Some simple sanity checks.
if (not mf.dest_is_dir) and (dest in (".", "..")):
raise BuildConfigError(
f'Section misc_files: "{src}" has a "dest" of '
+ f'"{dest}", but "dest_is_dir" is set to false. '
+ "That's just silly."
)
return mf
def parse_dataset(
obj: Dict[str, Any], extra_vars: Dict[str, Any], build_yaml_dir: str
) -> Optional[DatasetData]:
src = required(obj, "src", "notebooks")
dest = required(obj, "dest", "notebooks")
if bool_field(obj, "skip"):
verbose(f"Skipping data set {src}")
return None
else:
src_dir = path.dirname(src)
license = joinpath(src_dir, "LICENSE.md")
readme = joinpath(src_dir, "README.md")
p = joinpath(build_yaml_dir, src)
if not path.exists(p):
raise BuildConfigError(f'Dataset file "{p}" does not exist')
for i in (license, readme):
p = joinpath(build_yaml_dir, i)
if not path.exists(p):
raise BuildConfigError(
f'Dataset "{src}": Required "{p}" does not exist.'
)
if os.stat(p).st_size == 0:
raise BuildConfigError(f'Dataset "{src}": "{p}" is empty.')
adj_dest = do_parse_level_substitutions(
dest, src, allow_lang=False, extra_vars=extra_vars
)
return DatasetData(src=src, dest=adj_dest, license=license, readme=readme)
def parse_file_section(
section: Sequence[Dict[str, Any]], parse: Callable[[Any, *Any], Any], *args: Any
) -> Tuple:
# Use the supplied parse function to parse each element in the
# supplied section, filtering out None results from the function.
# Convert the entire result to a tuple.
return tuple([o for o in [parse(i, *args) for i in section] if o != None])
def parse_markdown(obj: Dict[str, Any]) -> MarkdownInfo:
if obj:
stylesheet = obj.get("html_stylesheet")
else:
stylesheet = None
return MarkdownInfo(html_stylesheet=stylesheet)
def parse_notebook_types(contents: Dict[str, Any]) -> Dict[NotebookType, Any]:
res = NotebookType.default_mappings()
names_to_keys = dict([(t.value, t) for t in NotebookType])
invalid_keys = set()
for k, v in list(contents.get("notebook_type_name", {}).items()):
t = names_to_keys.get(k)
if not t:
invalid_keys.add(k)
else:
res[t] = v
if invalid_keys:
key_str = ", ".join(invalid_keys)
raise BuildConfigError(
f'Unknown key(s) in "notebook_type_name" section: {key_str}'
)
return res
def parse_min_version(key: str, value: str) -> Optional[Tuple[int, int]]:
res = contents.get(key)
if res is not None:
if isinstance(res, float):
raise BuildConfigError(
f'"{key}" of the form <major>.<minor> must be quoted.'
)
try:
# Ignore the match version.
res = parse_version_string(res)[0:2]
except ValueError as e:
raise BuildConfigError(f'Bad value of "{res}" for "{key}": {e}')
return res
def parse_course_type(
data: Dict[str, Any], section: str
) -> master_parse.CourseType:
course_type = data.get("type")
if not course_type:
raise BuildConfigError(
f'Missing required "{section}.type" setting in "{yaml_file}"'
)
if course_type.lower() == "self-paced":
return master_parse.CourseType.SELF_PACED
if course_type.lower() == "ilt":
return master_parse.CourseType.ILT
raise BuildConfigError(
f'Unknown value of "{course_type}" for "{course_type}.type". '
+ 'Legal values are "ilt" and "self-paced".'
)
def parse_course_info(
course_info_cfg: Dict[str, Any], section_name: str
) -> CourseInfo:
ilt_only = {"class_setup": None, "schedule": None, "instructor_prep": None}
name = required(course_info_cfg, "name", section_name)
version = required(course_info_cfg, "version", section_name)
ilt_only["class_setup"] = course_info_cfg.get("class_setup")
ilt_only["schedule"] = course_info_cfg.get("schedule")
ilt_only["instructor_prep"] = course_info_cfg.get("prep")
course_type = parse_course_type(course_info_cfg, section_name)
deprecated = course_info_cfg.get("deprecated", False)
copyright_year = course_info_cfg.get("copyright_year", str(datetime.now().year))
if type == master_parse.CourseType.SELF_PACED:
for k, v in list(ilt_only.items()):
if v:
warn(f"course_info.{k} is ignored for self-paced courses")
ilt_only[k] = None
return CourseInfo(
name=name,
title=course_info_cfg.get("title", name),
version=version,
class_setup=ilt_only["class_setup"],
schedule=ilt_only["schedule"],
instructor_prep=ilt_only["instructor_prep"],
course_type=course_type,
deprecated=deprecated,
copyright_year=copyright_year,
)
def parse_output_info(
contents: Dict[str, Any], course_info: CourseInfo
) -> OutputInfo:
student_dir = contents.get("student_dir", DEFAULT_STUDENT_FILES_SUBDIR)
instructor_dir = contents.get("instructor_dir", DEFAULT_INSTRUCTOR_FILES_SUBDIR)
student_dbc = contents.get("student_dbc", DEFAULT_STUDENT_LABS_DBC)
instructor_dbc = contents.get("instructor_dbc", DEFAULT_INSTRUCTOR_LABS_DBC)
for (k, v) in (
("student_dbc", student_dbc),
("instructor_dbc", instructor_dbc),
):
if path.dirname(v) != "":
raise BuildConfigError(f'"{k}" value "{v}" is not a simple file name.')
if student_dir == instructor_dir:
raise BuildConfigError(
'"student_dir" and "instructor_dir" cannot be the same. '
+ f'"student_dir" is "{student_dir}". '
+ f'"instructor_dir" is "{instructor_dir}".'
)
# Allow substitution of ${course_name}, {course_version} and/or
# ${course_id} in the file names.
fields = {
"course_name": course_info.name,
"course_version": course_info.version,
"course_id": course_info.course_id,
}
def sub(filename: str) -> str:
return VariableSubstituter(filename).substitute(fields)
return OutputInfo(
student_dir=student_dir,
instructor_dir=instructor_dir,
student_dbc=sub(student_dbc),
instructor_dbc=sub(instructor_dbc),
)
def parse_profiles(contents: Dict[str, Any]) -> Set[master_parse.Profile]:
profiles = contents.get("profiles")
use_profiles = bool_field(contents, "use_profiles", False)
if profiles and use_profiles:
raise BuildConfigError(
'You cannot specify both "use_profiles" and "profiles".'
)
if profiles:
res = set()
for thing in profiles:
if isinstance(thing, dict):
if len(list(thing.keys())) != 1:
raise BuildConfigError(f"Malformed profile: {thing}")
n = list(thing.keys())[0]
v = thing[n]
if not isinstance(v, str):
raise BuildConfigError(
f'The value of profile "{n}" ("{v}") is not ' + "a string."
)
res.add(master_parse.Profile(name=n, value=v))
continue
if isinstance(thing, str):
res.add(master_parse.Profile(name=thing, value=thing))
continue
raise BuildConfigError(
f'Profile "{thing}" is neither a simple string nor a '
+ '"name: value"'
)
elif use_profiles:
warn('"use_profiles" is deprecated. Use explicit profiles.')
res = {
master_parse.Profile(name="amazon", value="Amazon"),
master_parse.Profile(name="azure", value="azure"),
}
else:
res = set()
return res
# Main function logic
verbose(f"Loading {yaml_file}...")
contents = read_yaml_file(yaml_file)
bdc_min_version = parse_min_version(
"bdc_min_version", required(contents, "bdc_min_version", "build")
)
cur_major_minor = parse_version_string(VERSION)[0:2]
if bdc_min_version > cur_major_minor:
version_str = ".".join(map(str, bdc_min_version))
raise BuildConfigError(
f"This build requires bdc version {version_str}.x or greater, "
+ f"but you're using bdc version {VERSION}."
)
profiles = parse_profiles(contents)
variables = contents.get("variables", {})
notebooks_cfg = required(contents, "notebooks", "build")
slides_cfg = contents.get("slides", [])
misc_files_cfg = contents.get("misc_files", [])
datasets_cfg = contents.get("datasets", [])
course_info_cfg = required(contents, "course_info", "build")
course_info = parse_course_info(course_info_cfg, "course_info")
src_base = required(contents, "src_base", "build")
build_yaml_full = path.abspath(yaml_file)
build_yaml_dir = path.dirname(build_yaml_full)
src_base = path.abspath(joinpath(build_yaml_dir, src_base))
notebook_defaults = parse_notebook_defaults(
contents, "notebook_defaults", build_yaml_dir
)
if slides_cfg:
slides = parse_file_section(slides_cfg, parse_slide, variables)
else:
slides = None
if datasets_cfg:
datasets = parse_file_section(
datasets_cfg, parse_dataset, variables, build_yaml_dir
)
else:
datasets = None
if misc_files_cfg:
misc_files = parse_file_section(misc_files_cfg, parse_misc_file, variables)
else:
misc_files = None
if notebooks_cfg:
notebooks = parse_file_section(
notebooks_cfg,
parse_notebook,
notebook_defaults,
variables,
profiles,
build_yaml_dir,
)
else:
notebooks = None
need_master = any([n.master.enabled for n in notebooks])
if need_master:
required_master_min_version = parse_min_version(
"master_parse_min_version",
required(
contents,
"master_parse_min_version",
"build",
error='"master_parse_min_version" is required if any '
+ "notebooks use the master parser.",
),
)
master_version = parse_version_string(master_parse.VERSION)[0:2]
if required_master_min_version > master_version:
version_str = ".".join(map(str, required_master_min_version))
raise BuildConfigError(
f"This build requires master_parse version {version_str}.x "
+ "or greater, but you're using master_parse version "
+ f"{master_parse.VERSION}."
)
output_info = parse_output_info(contents, course_info)
bundle_info = parse_bundle(
contents.get("bundle"), output_info, course_info, variables
)
data = BuildData(
build_file_path=build_yaml_full,
top_dbc_folder_name=contents.get("top_dbc_folder_name"),
course_info=course_info,
output_info=output_info,
notebooks=notebooks,
slides=slides,
datasets=datasets,
source_base=src_base,
misc_files=misc_files,
keep_lab_dirs=bool_field(contents, "keep_lab_dirs"),
markdown_cfg=parse_markdown(contents.get("markdown")),
notebook_type_map=parse_notebook_types(contents),
variables=variables,
profiles=profiles,
bundle_info=bundle_info,
)
return data
def parse_args() -> Dict[str, Any]:
"""
Parse the command line parameters.
"""
from docopt import docopt
return docopt(USAGE, version=VERSION)
def expand_template(
src_template_file: str,
build: BuildData,
tempdir: | |
ParallelLoggingManager):
self.logging_module: ParallelLoggingManager = logging_module
self.api_key = kwargs['api_key']
self.server = kwargs['server']
self.conf, self.secret_conf = self._load_conf_files(kwargs['conf'], kwargs['secret'])
self.env_json = self._load_env_results_json()
self.is_nightly = kwargs['nightly']
self.slack_client = SlackClient(kwargs['slack'])
self.circleci_token = kwargs['circleci']
self.build_number = kwargs['build_number']
self.build_name = kwargs['branch_name']
self.isAMI = kwargs['is_ami']
self.memCheck = kwargs['mem_check']
self.server_version = kwargs['server_version']
self.is_local_run = (self.server is not None)
self.server_numeric_version = self._get_server_numeric_version()
self.instances_ips = self._get_instances_ips()
self.filtered_tests = self._extract_filtered_tests()
self.tests_data_keeper = TestResults(self.conf.unmockable_integrations)
self.conf_unmockable_tests = self._get_unmockable_tests_from_conf()
self.unmockable_test_ids: Set[str] = set()
self.mockable_tests_to_run, self.unmockable_tests_to_run = self._get_tests_to_run()
self.slack_user_id = self._retrieve_slack_user_id()
self.all_integrations_configurations = self._get_all_integration_config(self.instances_ips)
def _get_all_integration_config(self, instances_ips: dict) -> Optional[list]:
"""
Gets all integration configuration as it exists on the demisto server
Since in all packs are installed the data returned from this request is very heavy and we want to avoid
running it in multiple threads.
Args:
instances_ips: The mapping of the urls to the ports used to tunnel it's traffic
Returns:
A dict containing the configuration for the integration if found, else empty list
"""
if not self.is_nightly:
return []
url, port = list(instances_ips.items())[0]
server_url = f'https://localhost:{port}' if port else f'https://{url}'
return self.get_all_installed_integrations_configurations(server_url)
def get_all_installed_integrations_configurations(self, server_url: str) -> list:
"""
Gets all integration configuration as it exists on the demisto server
Args:
server_url: The url of the server to create integration in
Returns:
A dict containing the configuration for the integration if found else empty list
"""
tmp_client = demisto_client.configure(base_url=server_url,
api_key=self.api_key,
verify_ssl=False)
self.logging_module.debug('Getting all integrations instances')
try:
res_raw = demisto_client.generic_request_func(self=tmp_client, path='/settings/integration/search',
method='POST', body={})
except ApiException:
self.logging_module.exception('failed to get all integrations configuration')
return []
res = ast.literal_eval(res_raw[0])
TIMEOUT = 180
SLEEP_INTERVAL = 5
total_sleep = 0
while 'configurations' not in res:
if total_sleep == TIMEOUT:
self.logging_module.error(
f"Timeout - failed to get all integration configuration. Error: {res}")
return []
time.sleep(SLEEP_INTERVAL)
total_sleep += SLEEP_INTERVAL
all_configurations = res['configurations']
return all_configurations
def _generate_tests_queue(self, tests_to_run: List[TestConfiguration]) -> Queue:
"""
Generates a queue containing test playbooks to run
Args:
tests_to_run: A list containing playbook names
"""
queue: Queue = Queue()
for test in tests_to_run:
playbook = TestPlaybook(self, test)
if playbook.should_test_run():
queue.put(playbook)
return queue
def _get_tests_to_run(self) -> Tuple[Queue, Queue]:
"""
Gets tests to run in the current build and updates the unmockable tests ids set
Returns:
- A queue with mockable TestPlaybook instances to run in the current build
- A queue with unmockable TestPlaybook instances to run in the current build
"""
unmockable_tests = []
all_tests = self._get_all_tests()
if self.server or not self.isAMI:
unmockable_tests = all_tests
self.unmockable_test_ids = {test.playbook_id for test in all_tests}
elif self.isAMI:
unmockable_tests = self.conf_unmockable_tests
self.unmockable_test_ids = {test.playbook_id for test in self.conf_unmockable_tests}
self.logging_module.debug(f'Unmockable tests selected: {pformat(self.unmockable_test_ids)}')
mockable_tests = [test for test in all_tests if test.playbook_id not in self.unmockable_test_ids]
self.logging_module.debug(f'Mockable tests selected: {pformat([test.playbook_id for test in mockable_tests])}')
mockable_tests_queue = self._generate_tests_queue(mockable_tests)
unmockable_tests_queue = self._generate_tests_queue(unmockable_tests)
return mockable_tests_queue, unmockable_tests_queue
@staticmethod
def _extract_filtered_tests() -> list:
"""
Reads the content from ./Tests/filter_file.txt and parses it into a list of test playbook IDs that should be run
in the current build
Returns:
A list of playbook IDs that should be run in the current build
"""
with open(FILTER_CONF, 'r') as filter_file:
filtered_tests = [line.strip('\n') for line in filter_file.readlines()]
return filtered_tests
def _get_unmockable_tests_from_conf(self) -> list:
"""
Extracts the unmockable test playbook by looking at all playbooks that has unmockable integrations
Returns:
A with unmockable playbook names
"""
unmockable_integrations = self.conf.unmockable_integrations
tests = self.conf.tests
unmockable_tests = []
for test_record in tests:
test_name = test_record.playbook_id
unmockable_integrations_used = [integration_name for integration_name in test_record.test_integrations if
integration_name in unmockable_integrations]
if test_name and (not test_record.test_integrations or unmockable_integrations_used) or not self.isAMI:
unmockable_tests.append(test_record)
# In case a test has both - an unmockable integration and is configured with is_mockable=False -
# it will be added twice if we don't continue.
continue
if test_record.is_mockable is False:
unmockable_tests.append(test_record)
return unmockable_tests
@staticmethod
def _get_used_integrations(test_configuration: dict) -> list:
"""
Gets the integration used in a test playbook configurations
Args:
test_configuration: A test configuration from content conf.json file.
Returns:
A list with integration names
"""
tested_integrations = test_configuration.get("integrations", [])
if isinstance(tested_integrations, list):
return tested_integrations
else:
return [tested_integrations]
def _get_all_tests(self) -> List[TestConfiguration]:
"""
Gets a list of all playbooks configured in content conf.json
Returns:
A list with playbook names
"""
tests_records = self.conf.tests
return [test for test in tests_records if test.playbook_id]
def _get_instances_ips(self) -> Dict[str, Any]:
"""
Parses the env_results.json and extracts the instance ip from each server configured in it.
Returns:
A dict contains a mapping from server internal ip to the port used to tunnel it.
"""
if self.server:
return {self.server: None}
instances_ips = {env.get('InstanceDNS'): env.get('TunnelPort') for env in self.env_json if env.get('Role') == self.server_version}
return instances_ips
def get_public_ip_from_server_url(self, server_url: str) -> str:
"""
Gets a tunnel server url in the form of https://localhost:<port>, from that url checks in self.instance_ips
if there is a url that is mapped into that port and return that url if found.
Args:
server_url: The server url to parse the port from.
Returns:
A URL with the private IP of the server.
"""
port_pattern = re.compile(r'https://localhost:([0-9]+)')
port_match = port_pattern.findall(server_url)
if port_match:
port = int(port_match[0])
else:
# If the server URL has no port in the end - it means it's a local build and we can return the
# server URL as is.
return server_url
for server_private_ip, tunnel_port in self.instances_ips.items():
if tunnel_port == port:
return f'https://{server_private_ip}'
raise Exception(f'Could not find private ip for the server mapped to port {port}')
@staticmethod
def _parse_tests_list_arg(tests_list: str):
"""
Parses the test list arguments if present.
:param tests_list: CSV string of tests to run.
:return: List of tests if there are any, otherwise empty list.
"""
tests_to_run = tests_list.split(",") if tests_list else []
return tests_to_run
@staticmethod
def _load_env_results_json():
if not os.path.isfile(ENV_RESULTS_PATH):
return {}
with open(ENV_RESULTS_PATH, 'r') as json_file:
return json.load(json_file)
def _get_server_numeric_version(self) -> str:
"""
Gets the current server version
Returns:
Server numeric version
"""
default_version = '99.99.98'
if self.is_local_run:
self.logging_module.info(f'Local run, assuming server version is {default_version}', real_time=True)
return default_version
if not self.env_json:
self.logging_module.warning(
f'Did not find {ENV_RESULTS_PATH} file, assuming server version is {default_version}.',
real_time=True)
return default_version
server_version_mapping = {
'Server 5.0': '5.0.0',
'Server 5.5': '5.5.0',
'Server 6.0': '6.0.0',
'Server Master': default_version
}
server_numeric_version = server_version_mapping.get(self.server_version, default_version)
self.logging_module.info(f'Server version: {server_numeric_version}', real_time=True)
return server_numeric_version
@staticmethod
def _load_conf_files(conf_path, secret_conf_path):
with open(conf_path) as data_file:
conf = Conf(json.load(data_file))
secret_conf = None
if secret_conf_path:
with open(secret_conf_path) as data_file:
secret_conf = SecretConf(json.load(data_file))
return conf, secret_conf
def _get_user_name_from_circle(self):
url = f"https://circleci.com/api/v1.1/project/github/demisto/content/{self.build_number}?" \
f"circle-token={self.circleci_token}"
res = self._http_request(url)
user_details = res.get('user', {})
return user_details.get('name', '')
@staticmethod
def _http_request(url, params_dict=None):
try:
res = requests.request("GET",
url,
verify=True,
params=params_dict,
)
res.raise_for_status()
return res.json()
except Exception as e:
raise e
def _retrieve_slack_user_id(self):
"""
Gets the user id of the circle user who triggered the current build
"""
circle_user_name = self._get_user_name_from_circle()
user_id = ''
res = self.slack_client.api_call('users.list')
user_list = res.get('members', [])
for user in user_list:
profile = user.get('profile', {})
name = profile.get('real_name_normalized', '')
if name == circle_user_name:
user_id = user.get('id', '')
return user_id
class TestResults:
def __init__(self, unmockable_integrations):
self.succeeded_playbooks = []
self.failed_playbooks = set()
self.skipped_tests = dict()
self.skipped_integrations = dict()
self.rerecorded_tests = []
self.empty_files = []
self.unmockable_integrations = unmockable_integrations
self.playbook_skipped_integration = set()
def add_proxy_related_test_data(self, proxy):
# Using multiple appends and not extend since append is guaranteed to be thread safe
for playbook_id in proxy.rerecorded_tests:
self.rerecorded_tests.append(playbook_id)
for playbook_id in proxy.empty_files:
self.empty_files.append(playbook_id)
def create_result_files(self):
with open("./Tests/failed_tests.txt", "w") as failed_tests_file:
failed_tests_file.write('\n'.join(self.failed_playbooks))
with open('./Tests/skipped_tests.txt', "w") as skipped_tests_file:
skipped_tests_file.write('\n'.join(self.skipped_tests))
with open('./Tests/skipped_integrations.txt', "w") as skipped_integrations_file:
skipped_integrations_file.write('\n'.join(self.skipped_integrations))
def print_test_summary(self,
is_ami: bool = True,
logging_module: Union[Any, ParallelLoggingManager] = logging) -> None:
"""
Takes the information stored in the tests_data_keeper and prints it in a human readable way.
Args:
is_ami: indicating if the server running the tests is an AMI or not.
logging_module: Logging module to use for test_summary
"""
succeed_playbooks = self.succeeded_playbooks
failed_playbooks = self.failed_playbooks
skipped_tests = self.skipped_tests
unmocklable_integrations = self.unmockable_integrations
skipped_integration = self.skipped_integrations
rerecorded_tests = self.rerecorded_tests
empty_files = self.empty_files
succeed_count = len(succeed_playbooks)
failed_count = len(failed_playbooks)
rerecorded_count = len(rerecorded_tests) if is_ami else 0
empty_mocks_count = len(empty_files) if is_ami else 0
logging_module.real_time_logs_only = True
logging_module.info('TEST RESULTS:')
logging_module.info(f'Number of playbooks tested - {succeed_count + | |
map.get('items'):
temp_model = BaseFileResponse()
self.items.append(temp_model.from_map(k))
if map.get('next_marker') is not None:
self.next_marker = map.get('next_marker')
return self
class SharePermissionPolicy(TeaModel):
"""
*
"""
def __init__(self, file_path=None, permission_inheritable=None, permission_list=None, permission_type=None):
self.file_path = file_path # type: str
self.permission_inheritable = permission_inheritable # type: bool
self.permission_list = permission_list # type: List[str]
self.permission_type = permission_type # type: str
def validate(self):
pass
def to_map(self):
result = {}
if self.file_path is not None:
result['file_path'] = self.file_path
if self.permission_inheritable is not None:
result['permission_inheritable'] = self.permission_inheritable
if self.permission_list is not None:
result['permission_list'] = self.permission_list
if self.permission_type is not None:
result['permission_type'] = self.permission_type
return result
def from_map(self, map={}):
if map.get('file_path') is not None:
self.file_path = map.get('file_path')
if map.get('permission_inheritable') is not None:
self.permission_inheritable = map.get('permission_inheritable')
if map.get('permission_list') is not None:
self.permission_list = map.get('permission_list')
if map.get('permission_type') is not None:
self.permission_type = map.get('permission_type')
return self
class StoreFile(TeaModel):
"""
*
"""
def __init__(self, domain_id=None, name=None, parent_file_path=None, store_id=None, type=None):
self.domain_id = domain_id # type: str
self.name = name # type: str
self.parent_file_path = parent_file_path # type: str
self.store_id = store_id # type: str
self.type = type # type: str
def validate(self):
pass
def to_map(self):
result = {}
if self.domain_id is not None:
result['domain_id'] = self.domain_id
if self.name is not None:
result['name'] = self.name
if self.parent_file_path is not None:
result['parent_file_path'] = self.parent_file_path
if self.store_id is not None:
result['store_id'] = self.store_id
if self.type is not None:
result['type'] = self.type
return result
def from_map(self, map={}):
if map.get('domain_id') is not None:
self.domain_id = map.get('domain_id')
if map.get('name') is not None:
self.name = map.get('name')
if map.get('parent_file_path') is not None:
self.parent_file_path = map.get('parent_file_path')
if map.get('store_id') is not None:
self.store_id = map.get('store_id')
if map.get('type') is not None:
self.type = map.get('type')
return self
class StoreItemResponse(TeaModel):
"""
*
"""
def __init__(self, accelerate_endpoint=None, base_path=None, bucket=None, cdn_endpoint=None,
customized_accelerate_endpoint=None, customized_cdn_endpoint=None, customized_endpoint=None, customized_internal_endpoint=None,
domain_id=None, endpoint=None, internal_endpoint=None, location=None, ownership=None, policy=None,
role_arn=None, store_id=None, type=None):
# 全球加速地址
self.accelerate_endpoint = accelerate_endpoint # type: str
# 存储公共前缀
self.base_path = base_path # type: str
# bucket名称
self.bucket = bucket # type: str
# 内容分发地址
self.cdn_endpoint = cdn_endpoint # type: str
# 自定义全球加速地址
self.customized_accelerate_endpoint = customized_accelerate_endpoint # type: str
# 自定义内容分发地址
self.customized_cdn_endpoint = customized_cdn_endpoint # type: str
# 自定义Public访问地址
self.customized_endpoint = customized_endpoint # type: str
# 自定义vpc访问地址
self.customized_internal_endpoint = customized_internal_endpoint # type: str
self.domain_id = domain_id # type: str
# Public访问地址
self.endpoint = endpoint # type: str
# vpc访问地址
self.internal_endpoint = internal_endpoint # type: str
# 地点
self.location = location # type: str
# 存储归属,system表示系统提供,custom表示使用自己的存储
self.ownership = ownership # type: str
# Policy授权,system类型store会将bucket权限授予当前云账号
self.policy = policy # type: str
# 访问Bucket的角色ARN
self.role_arn = role_arn # type: str
# store ID
self.store_id = store_id # type: str
# 存储类型,当前只支持oss
self.type = type # type: str
def validate(self):
self.validate_required(self.bucket, 'bucket')
self.validate_required(self.endpoint, 'endpoint')
self.validate_required(self.ownership, 'ownership')
self.validate_required(self.policy, 'policy')
self.validate_required(self.store_id, 'store_id')
self.validate_required(self.type, 'type')
def to_map(self):
result = {}
if self.accelerate_endpoint is not None:
result['accelerate_endpoint'] = self.accelerate_endpoint
if self.base_path is not None:
result['base_path'] = self.base_path
if self.bucket is not None:
result['bucket'] = self.bucket
if self.cdn_endpoint is not None:
result['cdn_endpoint'] = self.cdn_endpoint
if self.customized_accelerate_endpoint is not None:
result['customized_accelerate_endpoint'] = self.customized_accelerate_endpoint
if self.customized_cdn_endpoint is not None:
result['customized_cdn_endpoint'] = self.customized_cdn_endpoint
if self.customized_endpoint is not None:
result['customized_endpoint'] = self.customized_endpoint
if self.customized_internal_endpoint is not None:
result['customized_internal_endpoint'] = self.customized_internal_endpoint
if self.domain_id is not None:
result['domain_id'] = self.domain_id
if self.endpoint is not None:
result['endpoint'] = self.endpoint
if self.internal_endpoint is not None:
result['internal_endpoint'] = self.internal_endpoint
if self.location is not None:
result['location'] = self.location
if self.ownership is not None:
result['ownership'] = self.ownership
if self.policy is not None:
result['policy'] = self.policy
if self.role_arn is not None:
result['role_arn'] = self.role_arn
if self.store_id is not None:
result['store_id'] = self.store_id
if self.type is not None:
result['type'] = self.type
return result
def from_map(self, map={}):
if map.get('accelerate_endpoint') is not None:
self.accelerate_endpoint = map.get('accelerate_endpoint')
if map.get('base_path') is not None:
self.base_path = map.get('base_path')
if map.get('bucket') is not None:
self.bucket = map.get('bucket')
if map.get('cdn_endpoint') is not None:
self.cdn_endpoint = map.get('cdn_endpoint')
if map.get('customized_accelerate_endpoint') is not None:
self.customized_accelerate_endpoint = map.get('customized_accelerate_endpoint')
if map.get('customized_cdn_endpoint') is not None:
self.customized_cdn_endpoint = map.get('customized_cdn_endpoint')
if map.get('customized_endpoint') is not None:
self.customized_endpoint = map.get('customized_endpoint')
if map.get('customized_internal_endpoint') is not None:
self.customized_internal_endpoint = map.get('customized_internal_endpoint')
if map.get('domain_id') is not None:
self.domain_id = map.get('domain_id')
if map.get('endpoint') is not None:
self.endpoint = map.get('endpoint')
if map.get('internal_endpoint') is not None:
self.internal_endpoint = map.get('internal_endpoint')
if map.get('location') is not None:
self.location = map.get('location')
if map.get('ownership') is not None:
self.ownership = map.get('ownership')
if map.get('policy') is not None:
self.policy = map.get('policy')
if map.get('role_arn') is not None:
self.role_arn = map.get('role_arn')
if map.get('store_id') is not None:
self.store_id = map.get('store_id')
if map.get('type') is not None:
self.type = map.get('type')
return self
class StreamInfo(TeaModel):
"""
*
"""
def __init__(self, crc_64hash=None, download_url=None, thumbnail=None, url=None):
# crc64_hash
self.crc_64hash = crc_64hash # type: str
# download_url
self.download_url = download_url # type: str
# thumbnail
self.thumbnail = thumbnail # type: str
# url
self.url = url # type: str
def validate(self):
pass
def to_map(self):
result = {}
if self.crc_64hash is not None:
result['crc64_hash'] = self.crc_64hash
if self.download_url is not None:
result['download_url'] = self.download_url
if self.thumbnail is not None:
result['thumbnail'] = self.thumbnail
if self.url is not None:
result['url'] = self.url
return result
def from_map(self, map={}):
if map.get('crc64_hash') is not None:
self.crc_64hash = map.get('crc64_hash')
if map.get('download_url') is not None:
self.download_url = map.get('download_url')
if map.get('thumbnail') is not None:
self.thumbnail = map.get('thumbnail')
if map.get('url') is not None:
self.url = map.get('url')
return self
class StreamUploadInfo(TeaModel):
"""
*
"""
def __init__(self, part_info_list=None, pre_rapid_upload=None, rapid_upload=None, upload_id=None):
# part_info_list
self.part_info_list = part_info_list # type: List[UploadPartInfo]
# pre_rapid_upload
# type: boolean
self.pre_rapid_upload = pre_rapid_upload # type: bool
# rapid_upload
# type: boolean
self.rapid_upload = rapid_upload # type: bool
# upload_id
self.upload_id = upload_id # type: str
def validate(self):
if self.part_info_list:
for k in self.part_info_list:
if k:
k.validate()
def to_map(self):
result = {}
result['part_info_list'] = []
if self.part_info_list is not None:
for k in self.part_info_list:
result['part_info_list'].append(k.to_map() if k else None)
if self.pre_rapid_upload is not None:
result['pre_rapid_upload'] = self.pre_rapid_upload
if self.rapid_upload is not None:
result['rapid_upload'] = self.rapid_upload
if self.upload_id is not None:
result['upload_id'] = self.upload_id
return result
def from_map(self, map={}):
self.part_info_list = []
if map.get('part_info_list') is not None:
for k in map.get('part_info_list'):
temp_model = UploadPartInfo()
self.part_info_list.append(temp_model.from_map(k))
if map.get('pre_rapid_upload') is not None:
self.pre_rapid_upload = map.get('pre_rapid_upload')
if map.get('rapid_upload') is not None:
self.rapid_upload = map.get('rapid_upload')
if map.get('upload_id') is not None:
self.upload_id = map.get('upload_id')
return self
class SystemTag(TeaModel):
"""
*
"""
def __init__(self, confidence=None, en_name=None, name=None, parent_en_name=None, parent_name=None,
selected=None, tag_level=None):
self.confidence = confidence # type: float
self.en_name = en_name # type: str
self.name = name # type: str
self.parent_en_name = parent_en_name # type: str
self.parent_name = parent_name # type: str
self.selected = selected # type: bool
self.tag_level = tag_level # type: int
def validate(self):
pass
def to_map(self):
result = {}
if self.confidence is not None:
result['confidence'] = self.confidence
if self.en_name is not None:
result['en_name'] = self.en_name
if self.name is not None:
result['name'] = self.name
if self.parent_en_name is not None:
result['parent_en_name'] = self.parent_en_name
if self.parent_name is not None:
result['parent_name'] = self.parent_name
if self.selected is not None:
result['selected'] = self.selected
if self.tag_level is not None:
result['tag_level'] = self.tag_level
return result
def from_map(self, map={}):
if map.get('confidence') is not None:
self.confidence = map.get('confidence')
if map.get('en_name') is not None:
self.en_name = map.get('en_name')
if map.get('name') is not None:
self.name = map.get('name')
if map.get('parent_en_name') is not None:
self.parent_en_name = map.get('parent_en_name')
if map.get('parent_name') is not None:
self.parent_name = map.get('parent_name')
if map.get('selected') is not None:
self.selected = map.get('selected')
if map.get('tag_level') is not None:
self.tag_level = map.get('tag_level')
return self
class TokenRequest(TeaModel):
"""
*
"""
def __init__(self, headers=None, addition_data=None, app_id=None, grant_type=None, refresh_token=None):
self.headers = headers # type: Dict[str, str]
# addition_data
self.addition_data = addition_data # type: dict
# App ID, 当前访问的App
self.app_id = app_id # type: str
# 只能填refresh_token
self.grant_type = grant_type # type: str
# refresh token, 登录时返回的
self.refresh_token = refresh_token # type: str
def validate(self):
self.validate_required(self.app_id, 'app_id')
self.validate_required(self.grant_type, 'grant_type')
self.validate_required(self.refresh_token, 'refresh_token')
def to_map(self):
result = {}
if self.headers is not None:
result['headers'] = self.headers
if self.addition_data is not None:
result['addition_data'] = self.addition_data
if self.app_id is not None:
result['app_id'] = self.app_id
if self.grant_type is not None:
result['grant_type'] = self.grant_type
if self.refresh_token is not None:
result['refresh_token'] | |
not AWS_REGION:
print("Please set and export environment variable AWS_ACCOUNT_NUMBER and AWS_REGION!")
exit(1)
if not buckets:
print("WARNING: Without setting buckets (using --buckets)," +
"Tibanna would have access to only public buckets." +
"To give permission to Tibanna for private buckets," +
"use --buckets=<bucket1>,<bucket2>,...")
time.sleep(2)
if buckets:
bucket_names = buckets.split(',')
else:
bucket_names = None
if bucket_names and not do_not_delete_public_access_block:
client = boto3.client('s3')
for b in bucket_names:
printlog("Deleting public access block for bucket %s" % b)
response = client.delete_public_access_block(Bucket=b)
tibanna_iam = self.IAM(usergroup_tag, bucket_names, no_randomize=no_randomize)
tibanna_iam.create_tibanna_iam(verbose=verbose)
print("Tibanna usergroup %s has been created on AWS." % tibanna_iam.user_group_name)
return tibanna_iam.user_group_name
def deploy_tibanna(self, suffix=None, usergroup='', setup=False,
buckets='', setenv=False, do_not_delete_public_access_block=False):
"""deploy tibanna unicorn or pony to AWS cloud (pony is for 4DN-DCIC only)"""
if setup:
if usergroup:
usergroup = self.setup_tibanna_env(buckets, usergroup, True,
do_not_delete_public_access_block=do_not_delete_public_access_block)
else: # override usergroup
usergroup = self.setup_tibanna_env(buckets,
do_not_delete_public_access_block=do_not_delete_public_access_block)
# this function will remove existing step function on a conflict
step_function_name = self.create_stepfunction(suffix, usergroup=usergroup)
print("creating a new step function... %s" % step_function_name)
if setenv:
os.environ['TIBANNA_DEFAULT_STEP_FUNCTION_NAME'] = step_function_name
with open(os.getenv('HOME') + "/.bashrc", "a") as outfile: # 'a' stands for "append"
outfile.write("\nexport TIBANNA_DEFAULT_STEP_FUNCTION_NAME=%s\n" % step_function_name)
print("deploying lambdas...")
self.deploy_core('all', suffix=suffix, usergroup=usergroup)
dd_utils.create_dynamo_table(DYNAMODB_TABLE, DYNAMODB_KEYNAME)
return step_function_name
def deploy_unicorn(self, suffix=None, no_setup=False, buckets='',
no_setenv=False, usergroup='', do_not_delete_public_access_block=False):
"""deploy tibanna unicorn to AWS cloud"""
self.deploy_tibanna(suffix=suffix, usergroup=usergroup, setup=not no_setup,
buckets=buckets, setenv=not no_setenv,
do_not_delete_public_access_block=do_not_delete_public_access_block)
def add_user(self, user, usergroup):
"""add a user to a tibanna group"""
groupname_prefix = 'tibanna_'
if self.lambda_type:
groupname_prefix += self.lambda_type + '_'
boto3.client('iam').add_user_to_group(
GroupName=groupname_prefix + usergroup,
UserName=user
)
def users(self):
"""list all users along with their associated tibanna user groups"""
client = boto3.client('iam')
marker = None
while True:
if marker:
res = client.list_users(Marker=marker)
else:
res = client.list_users()
print("user\ttibanna_usergroup")
for r in res['Users']:
res_groups = client.list_groups_for_user(
UserName=r['UserName']
)
groups = [rg['GroupName'] for rg in res_groups['Groups']]
groups = filter(lambda x: 'tibanna_' in x, groups)
groups = [x.replace('tibanna_', '') for x in groups]
print("%s\t%s" % (r['UserName'], ','.join(groups)))
marker = res.get('Marker', '')
if not marker:
break
def create_stepfunction(self, dev_suffix=None,
region_name=AWS_REGION,
aws_acc=AWS_ACCOUNT_NUMBER,
usergroup=None):
if not aws_acc or not region_name:
print("Please set and export environment variable AWS_ACCOUNT_NUMBER and AWS_REGION!")
exit(1)
# create a step function definition object
sfndef = self.StepFunction(dev_suffix, region_name, aws_acc, usergroup)
# if this encouters an existing step function with the same name, delete
sfn = boto3.client('stepfunctions', region_name=region_name)
retries = 12 # wait 10 seconds between retries for total of 120s
for i in range(retries):
try:
sfn.create_state_machine(
name=sfndef.sfn_name,
definition=json.dumps(sfndef.definition, indent=4, sort_keys=True),
roleArn=sfndef.sfn_role_arn
)
except sfn.exceptions.StateMachineAlreadyExists as e:
# get ARN from the error and format as necessary
exc_str = str(e)
if 'State Machine Already Exists:' not in exc_str:
print('Cannot delete state machine. Exiting...' % exc_str)
raise(e)
sfn_arn = exc_str.split('State Machine Already Exists:')[-1].strip().strip("''")
print('Step function with name %s already exists!' % sfndef.sfn_name)
print('Updating the state machine...')
try:
sfn.update_state_machine(
stateMachineArn=sfn_arn,
definition=json.dumps(sfndef.definition, indent=4, sort_keys=True),
roleArn=sfndef.sfn_role_arn
)
except Exception as e:
print('Error updating state machine %s' % str(e))
raise(e)
except Exception as e:
raise(e)
break
return sfndef.sfn_name
def check_metrics_plot(self, job_id, log_bucket):
return True if does_key_exist(log_bucket, job_id + '.metrics/metrics.html', quiet=True) else False
def check_metrics_lock(self, job_id, log_bucket):
return True if does_key_exist(log_bucket, job_id + '.metrics/lock', quiet=True) else False
def plot_metrics(self, job_id, sfn=None, directory='.', open_browser=True, force_upload=False,
update_html_only=False, endtime='', filesystem='/dev/nvme1n1'):
''' retrieve instance_id and plots metrics '''
if not sfn:
sfn = self.default_stepfunction_name
postrunjsonstr = self.log(job_id=job_id, sfn=sfn, postrunjson=True, quiet=True)
if postrunjsonstr:
postrunjson = AwsemPostRunJson(**json.loads(postrunjsonstr))
job_complete = True
job = postrunjson.Job
log_bucket = postrunjson.config.log_bucket
instance_type = postrunjson.config.instance_type or 'unknown'
else:
runjsonstr = self.log(job_id=job_id, sfn=sfn, runjson=True, quiet=True)
job_complete = False
if runjsonstr:
runjson = AwsemRunJson(**json.loads(runjsonstr))
job = runjson.Job
log_bucket = runjson.config.log_bucket
instance_type = runjson.config.instance_type or 'unknown'
else:
raise Exception("Neither postrun json nor run json can be retrieved." +
"Check job_id or step function?")
# report already on s3 with a lock
if self.check_metrics_plot(job_id, log_bucket) and \
self.check_metrics_lock(job_id, log_bucket) and \
not force_upload:
printlog("Metrics plot is already on S3 bucket.")
printlog('metrics url= ' + METRICS_URL(log_bucket, job_id))
# open metrics html in browser
if open_browser:
webbrowser.open(METRICS_URL(log_bucket, job_id))
return None
# report not already on s3 with a lock
starttime = job.start_time_as_str
if not endtime:
if hasattr(job, 'end_time_as_str') and job.end_time_as_str:
endtime = job.end_time_as_str
else:
endtime = datetime.utcnow()
if hasattr(job, 'filesystem') and job.filesystem:
filesystem = job.filesystem
else:
filesystem = filesystem
instance_id = ''
if hasattr(job, 'instance_id') and job.instance_id:
instance_id = job.instance_id
else:
ddres = dict()
try:
dd = boto3.client('dynamodb')
ddres = dd.query(TableName=DYNAMODB_TABLE,
KeyConditions={'Job Id': {'AttributeValueList': [{'S': job_id}],
'ComparisonOperator': 'EQ'}})
except Exception as e:
pass
if 'Items' in ddres:
instance_id = ddres['Items'][0].get('instance_id', {}).get('S', '')
if not instance_id:
ec2 = boto3.client('ec2')
res = ec2.describe_instances(Filters=[{'Name': 'tag:Name', 'Values': ['awsem-' + job_id]}])
if res['Reservations']:
instance_id = res['Reservations'][0]['Instances'][0]['InstanceId']
instance_status = res['Reservations'][0]['Instances'][0]['State']['Name']
if instance_status in ['terminated', 'shutting-down']:
job_complete = True # job failed
else:
job_complete = False # still running
else:
# waiting 10 min to be sure the istance is starting
if (datetime.utcnow() - starttime) / timedelta(minutes=1) < 5:
raise Exception("the instance is still setting up. " +
"Wait a few seconds/minutes and try again.")
else:
raise Exception("instance id not available for this run.")
# plotting
if update_html_only:
self.TibannaResource.update_html(log_bucket, job_id + '.metrics/')
else:
try:
M = self.TibannaResource(instance_id, filesystem, starttime, endtime)
M.plot_metrics(instance_type, directory)
except Exception as e:
raise MetricRetrievalException(e)
# upload files
M.upload(bucket=log_bucket, prefix=job_id + '.metrics/', lock=job_complete)
# clean up uploaded files
for f in M.list_files:
os.remove(f)
printlog('metrics url= ' + METRICS_URL(log_bucket, job_id))
# open metrics html in browser
if open_browser:
webbrowser.open(METRICS_URL(log_bucket, job_id))
def cost(self, job_id, sfn=None, update_tsv=False):
if not sfn:
sfn = self.default_stepfunction_name
postrunjsonstr = self.log(job_id=job_id, sfn=sfn, postrunjson=True)
if not postrunjsonstr:
return None
postrunjson = AwsemPostRunJson(**json.loads(postrunjsonstr))
job = postrunjson.Job
def reformat_time(t, delta):
d = datetime.strptime(t, '%Y%m%d-%H:%M:%S-UTC') + timedelta(days=delta)
return d.strftime("%Y-%m-%d")
start_time = reformat_time(job.start_time, -1) # give more room
end_time = reformat_time(job.end_time, 1) # give more room
billing_args = {'Filter': {'Tags': {'Key': 'Name', 'Values': ['awsem-' + job_id]}},
'Granularity': 'DAILY',
'TimePeriod': {'Start': start_time,
'End': end_time},
'Metrics': ['BlendedCost']}
billingres = boto3.client('ce').get_cost_and_usage(**billing_args)
cost = sum([float(_['Total']['BlendedCost']['Amount']) for _ in billingres['ResultsByTime']])
if update_tsv:
log_bucket = postrunjson.config.log_bucket
# reading from metrics_report.tsv
does_key_exist(log_bucket, job_id + '.metrics/metrics_report.tsv')
read_file = read_s3(log_bucket, os.path.join(job_id + '.metrics/', 'metrics_report.tsv'))
if 'Cost' not in read_file:
write_file = read_file + 'Cost\t' + str(cost) + '\n'
# writing
with open('metrics_report.tsv', 'w') as fo:
fo.write(write_file)
# upload new metrics_report.tsv
upload('metrics_report.tsv', log_bucket, job_id + '.metrics/')
os.remove('metrics_report.tsv')
else:
printlog("cost already in the tsv file. not updating")
return cost
def does_dynamo_table_exist(self, tablename):
try:
res = boto3.client('dynamodb').describe_table(
TableName=tablename
)
if res:
return True
else:
raise Exception("error describing table %s" % tablename)
except Exception as e:
if 'Requested resource not found' in str(e):
return False
else:
raise Exception("error describing table %s" % tablename)
def create_dynamo_table(self, tablename, keyname):
if self.does_dynamo_table_exist(tablename):
print("dynamodb table %s already exists. skip creating db" % tablename)
else:
response = boto3.client('dynamodb').create_table(
TableName=tablename,
AttributeDefinitions=[
{
'AttributeName': keyname,
'AttributeType': 'S'
}
],
KeySchema=[
{
'AttributeName': keyname,
'KeyType': 'HASH'
}
],
BillingMode='PAY_PER_REQUEST'
)
def is_idle(self, instance_id, max_cpu_percent_threshold=1.0):
"""returns True if the instance is idle i.e. not doing anything for
the past 1 hour and is safe to kill"""
end = datetime.now(tzutc())
start = end - timedelta(hours=1)
filesystem = '/dev/nvme1n1' # doesn't matter for cpu utilization
try:
cw_res = self.TibannaResource(instance_id, filesystem, start, end).as_dict()
except Exception as e:
raise MetricRetrievalException(e)
if not cw_res['max_cpu_utilization_percent']:
return True
if cw_res['max_cpu_utilization_percent'] < max_cpu_percent_threshold:
return True
return False
def cleanup(self, user_group_name, suffix='', ignore_errors=True, do_not_remove_iam_group=False,
purge_history=False, verbose=False):
def handle_error(errmsg):
if ignore_errors:
if verbose:
printlog(errmsg)
printlog("continue to remove the other components")
else:
raise Exception(errmsg)
if user_group_name.startswith('tibanna'):
raise Exception("User_group_name does not start with tibanna or tibanna_unicorn.")
if suffix:
lambda_suffix = '_' + user_group_name + '_' + suffix
else:
lambda_suffix = '_' + user_group_name
# delete step function
sfn = 'tibanna_' + self.sfn_type + lambda_suffix
if verbose:
printlog("deleting step function %s" % sfn)
try:
boto3.client('stepfunctions').delete_state_machine(stateMachineArn=STEP_FUNCTION_ARN(sfn))
except Exception as e:
handle_error("Failed to cleanup step function: %s" % str(e))
# delete lambdas
lambda_client = boto3.client('lambda')
for lmb in self.lambda_names:
if verbose:
printlog("deleting lambda functions %s" % lmb + lambda_suffix)
try:
lambda_client.delete_function(FunctionName=lmb + lambda_suffix)
except Exception as e:
handle_error("Failed to cleanup lambda: %s" % str(e))
# delete IAM policies, roles and groups
if not do_not_remove_iam_group:
if | |
returnValue(response)
@twisted_async
@inlineCallbacks
def GetAllChannelpartitionConfig(self, request, context):
log.info('grpc-request', request=request)
response = yield self.dispatcher.dispatch(
'GetAllChannelpartitionConfig',
Empty(),
context,
broadcast=True)
if isinstance(response, DispatchError):
log.warn('grpc-error-response', error=response.error_code)
context.set_details('Channelpartition error')
context.set_code(response.error_code)
returnValue(Empty())
else:
log.debug('grpc-success-response', response=response)
returnValue(response)
@twisted_async
@inlineCallbacks
def CreateChannelpartition(self, request, context):
log.info('grpc-request', request=request)
try:
assert isinstance(request, fb.ChannelpartitionConfig)
request.id = create_empty_broadcast_id()
except AssertionError, e:
context.set_details(e.message)
context.set_code(StatusCode.INVALID_ARGUMENT)
returnValue(fb.ChannelpartitionConfig())
response = yield self.dispatcher.dispatch(
'CreateChannelpartition',
request,
context,
id=request.id,
broadcast=True)
if isinstance(response, DispatchError):
log.warn('grpc-error-response', error=response.error_code)
context.set_details('Channelpartition\'{}\' error'.format(
request.id))
context.set_code(response.error_code)
returnValue(fb.ChannelpartitionConfig())
else:
log.debug('grpc-success-response', response=response)
returnValue(response)
@twisted_async
@inlineCallbacks
def UpdateChannelpartition(self, request, context):
log.info('grpc-request', request=request)
try:
assert isinstance(request, fb.ChannelpartitionConfig)
request.id = create_empty_broadcast_id()
except AssertionError, e:
context.set_details(e.message)
context.set_code(StatusCode.INVALID_ARGUMENT)
returnValue(fb.ChannelpartitionConfig())
response = yield self.dispatcher.dispatch(
'UpdateChannelpartition',
request,
context,
id=request.id,
broadcast=True)
if isinstance(response, DispatchError):
log.warn('grpc-error-response', error=response.error_code)
context.set_details('Channelpartition\'{}\' error'.format(
request.id))
context.set_code(response.error_code)
returnValue(fb.ChannelpartitionConfig())
else:
log.debug('grpc-success-response', response=response)
returnValue(response)
@twisted_async
@inlineCallbacks
def DeleteChannelpartition(self, request, context):
log.info('grpc-request', request=request)
try:
assert isinstance(request, fb.ChannelpartitionConfig)
request.id = create_empty_broadcast_id()
except AssertionError, e:
context.set_details(e.message)
context.set_code(StatusCode.INVALID_ARGUMENT)
returnValue(fb.ChannelpartitionConfig())
response = yield self.dispatcher.dispatch(
'DeleteChannelpartition',
request,
context,
id=request.id,
broadcast=True)
if isinstance(response, DispatchError):
log.warn('grpc-error-response', error=response.error_code)
context.set_details('Channelpartition\'{}\' error'.format(
request.id))
context.set_code(response.error_code)
returnValue(fb.ChannelpartitionConfig())
else:
log.debug('grpc-success-response', response=response)
returnValue(response)
@twisted_async
@inlineCallbacks
def GetAllChannelpairConfig(self, request, context):
log.info('grpc-request', request=request)
response = yield self.dispatcher.dispatch(
'GetAllChannelpairConfig',
Empty(),
context,
broadcast=True)
if isinstance(response, DispatchError):
log.warn('grpc-error-response', error=response.error_code)
context.set_details('Channelpair error')
context.set_code(response.error_code)
returnValue(Empty())
else:
log.debug('grpc-success-response', response=response)
returnValue(response)
@twisted_async
@inlineCallbacks
def CreateChannelpair(self, request, context):
log.info('grpc-request', request=request)
try:
assert isinstance(request, fb.ChannelpairConfig)
request.id = create_empty_broadcast_id()
except AssertionError, e:
context.set_details(e.message)
context.set_code(StatusCode.INVALID_ARGUMENT)
returnValue(fb.ChannelpairConfig())
response = yield self.dispatcher.dispatch(
'CreateChannelpair',
request,
context,
id=request.id,
broadcast=True)
if isinstance(response, DispatchError):
log.warn('grpc-error-response', error=response.error_code)
context.set_details('Channelpair\'{}\' error'.format(request.id))
context.set_code(response.error_code)
returnValue(fb.ChannelpairConfig())
else:
log.debug('grpc-success-response', response=response)
returnValue(response)
@twisted_async
@inlineCallbacks
def UpdateChannelpair(self, request, context):
log.info('grpc-request', request=request)
try:
assert isinstance(request, fb.ChannelpairConfig)
request.id = create_empty_broadcast_id()
except AssertionError, e:
context.set_details(e.message)
context.set_code(StatusCode.INVALID_ARGUMENT)
returnValue(fb.ChannelpairConfig())
response = yield self.dispatcher.dispatch(
'UpdateChannelpair',
request,
context,
id=request.id,
broadcast=True)
if isinstance(response, DispatchError):
log.warn('grpc-error-response', error=response.error_code)
context.set_details('Channelpair\'{}\' error'.format(request.id))
context.set_code(response.error_code)
returnValue(fb.ChannelpairConfig())
else:
log.debug('grpc-success-response', response=response)
returnValue(response)
@twisted_async
@inlineCallbacks
def DeleteChannelpair(self, request, context):
log.info('grpc-request', request=request)
try:
assert isinstance(request, fb.ChannelpairConfig)
request.id = create_empty_broadcast_id()
except AssertionError, e:
context.set_details(e.message)
context.set_code(StatusCode.INVALID_ARGUMENT)
returnValue(fb.ChannelpairConfig())
response = yield self.dispatcher.dispatch(
'DeleteChannelpair',
request,
context,
id=request.id,
broadcast=True)
if isinstance(response, DispatchError):
log.warn('grpc-error-response', error=response.error_code)
context.set_details('Channelpair\'{}\' error'.format(request.id))
context.set_code(response.error_code)
returnValue(fb.ChannelpairConfig())
else:
log.debug('grpc-success-response', response=response)
returnValue(response)
@twisted_async
@inlineCallbacks
def GetAllChannelterminationConfig(self, request, context):
log.info('grpc-request', request=request)
response = yield self.dispatcher.dispatch(
'GetAllChannelterminationConfig',
request,
context,
id=request.id)
log.info('grpc-response', response=response)
if isinstance(response, DispatchError):
log.warn('grpc-error-response', error=response.error_code)
context.set_details('Channeltermination \'{}\' error'.format(
request.id))
context.set_code(response.error_code)
returnValue(fb.ChannelterminationConfig())
else:
log.debug('grpc-success-response', response=response)
returnValue(response)
@twisted_async
@inlineCallbacks
def CreateChanneltermination(self, request, context):
log.info('grpc-request', request=request)
response = yield self.dispatcher.dispatch(
'CreateChanneltermination',
request,
context,
id=request.id)
log.info('grpc-response', response=response)
if isinstance(response, DispatchError):
log.warn('grpc-error-response', error=response.error_code)
context.set_details('Channeltermination \'{}\' error'.format(
request.id))
context.set_code(response.error_code)
returnValue(fb.ChannelterminationConfig())
else:
log.debug('grpc-success-response', response=response)
returnValue(response)
@twisted_async
@inlineCallbacks
def UpdateChanneltermination(self, request, context):
log.info('grpc-request', request=request)
response = yield self.dispatcher.dispatch(
'UpdateChanneltermination',
request,
context,
id=request.id)
log.info('grpc-response', response=response)
if isinstance(response, DispatchError):
log.warn('grpc-error-response', error=response.error_code)
context.set_details('Channeltermination \'{}\' error'.format(
request.id))
context.set_code(response.error_code)
returnValue(fb.ChannelterminationConfig())
else:
log.debug('grpc-success-response', response=response)
returnValue(response)
@twisted_async
@inlineCallbacks
def DeleteChanneltermination(self, request, context):
log.info('grpc-request', request=request)
response = yield self.dispatcher.dispatch(
'DeleteChanneltermination',
request,
context,
id=request.id)
log.info('grpc-response', response=response)
if isinstance(response, DispatchError):
log.warn('grpc-error-response', error=response.error_code)
context.set_details('Channeltermination \'{}\' error'.format(
request.id))
context.set_code(response.error_code)
returnValue(fb.ChannelterminationConfig())
else:
log.debug('grpc-success-response', response=response)
returnValue(response)
@twisted_async
@inlineCallbacks
def GetAllOntaniConfig(self, request, context):
log.info('grpc-request', request=request)
response = yield self.dispatcher.dispatch(
'GetAllOntaniConfig',
Empty(),
context,
broadcast=True)
if isinstance(response, DispatchError):
log.warn('grpc-error-response', error=response.error_code)
context.set_details('Ontani error')
context.set_code(response.error_code)
returnValue(Empty())
else:
log.debug('grpc-success-response', response=response)
returnValue(response)
@twisted_async
@inlineCallbacks
def CreateOntani(self, request, context):
log.info('grpc-request', request=request)
try:
assert isinstance(request, fb.OntaniConfig)
request.id = create_empty_broadcast_id()
except AssertionError, e:
context.set_details(e.message)
context.set_code(StatusCode.INVALID_ARGUMENT)
returnValue(fb.OntaniConfig())
response = yield self.dispatcher.dispatch(
'CreateOntani',
request,
context,
id=request.id,
broadcast=True)
if isinstance(response, DispatchError):
log.warn('grpc-error-response', error=response.error_code)
context.set_details('Ontani \'{}\' error'.format(request.id))
context.set_code(response.error_code)
returnValue(fb.OntaniConfig())
else:
log.debug('grpc-success-response', response=response)
returnValue(response)
@twisted_async
@inlineCallbacks
def UpdateOntani(self, request, context):
log.info('grpc-request', request=request)
try:
assert isinstance(request, fb.OntaniConfig)
request.id = create_empty_broadcast_id()
except AssertionError, e:
context.set_details(e.message)
context.set_code(StatusCode.INVALID_ARGUMENT)
returnValue(fb.OntaniConfig())
response = yield self.dispatcher.dispatch(
'UpdateOntani',
request,
context,
id=request.id,
broadcast=True)
if isinstance(response, DispatchError):
log.warn('grpc-error-response', error=response.error_code)
context.set_details('Ontani \'{}\' error'.format(request.id))
context.set_code(response.error_code)
returnValue(fb.OntaniConfig())
else:
log.debug('grpc-success-response', response=response)
returnValue(response)
@twisted_async
@inlineCallbacks
def DeleteOntani(self, request, context):
log.info('grpc-request', request=request)
try:
assert isinstance(request, fb.OntaniConfig)
request.id = create_empty_broadcast_id()
except AssertionError, e:
context.set_details(e.message)
context.set_code(StatusCode.INVALID_ARGUMENT)
returnValue(fb.OntaniConfig())
response = yield self.dispatcher.dispatch(
'DeleteOntani',
request,
context,
id=request.id,
broadcast=True)
if isinstance(response, DispatchError):
log.warn('grpc-error-response', error=response.error_code)
context.set_details('Ontani \'{}\' error'.format(request.id))
context.set_code(response.error_code)
returnValue(fb.OntaniConfig())
else:
log.debug('grpc-success-response', response=response)
returnValue(response)
@twisted_async
@inlineCallbacks
def GetAllVOntaniConfig(self, request, context):
log.info('grpc-request', request=request)
response = yield self.dispatcher.dispatch(
'GetAllVOntaniConfig',
Empty(),
context,
broadcast=True)
if isinstance(response, DispatchError):
log.warn('grpc-error-response', error=response.error_code)
context.set_details('VOntani error')
context.set_code(response.error_code)
returnValue(Empty())
else:
log.debug('grpc-success-response', response=response)
returnValue(response)
@twisted_async
@inlineCallbacks
def CreateVOntani(self, request, context):
log.info('grpc-request', request=request)
try:
assert isinstance(request, fb.VOntaniConfig)
request.id = create_empty_broadcast_id()
except AssertionError, e:
context.set_details(e.message)
context.set_code(StatusCode.INVALID_ARGUMENT)
returnValue(fb.VOntaniConfig())
response = yield self.dispatcher.dispatch(
'CreateVOntani',
request,
context,
id=request.id,
broadcast=True)
if isinstance(response, DispatchError):
log.warn('grpc-error-response', error=response.error_code)
context.set_details('VOntani \'{}\' error'.format(request.id))
context.set_code(response.error_code)
returnValue(fb.VOntaniConfig())
else:
log.debug('grpc-success-response', response=response)
returnValue(response)
@twisted_async
@inlineCallbacks
def UpdateVOntani(self, request, context):
log.info('grpc-request', request=request)
try:
assert isinstance(request, fb.VOntaniConfig)
request.id = create_empty_broadcast_id()
except AssertionError, e:
context.set_details(e.message)
context.set_code(StatusCode.INVALID_ARGUMENT)
returnValue(fb.VOntaniConfig())
response = yield self.dispatcher.dispatch(
'UpdateVOntani',
request,
context,
id=request.id,
broadcast=True)
if isinstance(response, DispatchError):
log.warn('grpc-error-response', error=response.error_code)
context.set_details('VOntani \'{}\' error'.format(request.id))
context.set_code(response.error_code)
returnValue(fb.VOntaniConfig())
else:
log.debug('grpc-success-response', response=response)
returnValue(response)
@twisted_async
@inlineCallbacks
def DeleteVOntani(self, request, context):
log.info('grpc-request', request=request)
try:
assert isinstance(request, fb.VOntaniConfig)
request.id = create_empty_broadcast_id()
except AssertionError, e:
context.set_details(e.message)
context.set_code(StatusCode.INVALID_ARGUMENT)
returnValue(fb.VOntaniConfig())
response = yield self.dispatcher.dispatch(
'DeleteVOntani',
request,
context,
id=request.id,
broadcast=True)
if isinstance(response, DispatchError):
log.warn('grpc-error-response', error=response.error_code)
context.set_details('VOntani \'{}\' error'.format(request.id))
context.set_code(response.error_code)
returnValue(fb.VOntaniConfig())
else:
log.debug('grpc-success-response', response=response)
returnValue(response)
@twisted_async
@inlineCallbacks
def GetAllVEnetConfig(self, request, context):
log.info('grpc-request', request=request)
response = yield self.dispatcher.dispatch(
'GetAllVEnetConfig',
request,
context,
broadcast=True)
if isinstance(response, DispatchError):
log.warn('grpc-error-response', error=response.error_code)
context.set_details('VEnet error')
context.set_code(response.error_code)
returnValue(Empty())
else:
log.debug('grpc-success-response', response=response)
returnValue(response)
@twisted_async
@inlineCallbacks
def CreateVEnet(self, request, context):
log.info('grpc-request', request=request)
try:
assert isinstance(request, fb.VEnetConfig)
request.id = create_empty_broadcast_id()
except AssertionError, e:
context.set_details(e.message)
context.set_code(StatusCode.INVALID_ARGUMENT)
returnValue(fb.VEnetConfig())
response = yield self.dispatcher.dispatch(
'CreateVEnet',
request,
context,
id=request.id,
broadcast=True)
if isinstance(response, DispatchError):
log.warn('grpc-error-response', error=response.error_code)
context.set_details('VEnet \'{}\' error'.format(request.id))
context.set_code(response.error_code)
returnValue(fb.VEnetConfig())
else:
log.debug('grpc-success-response', response=response)
returnValue(response)
@twisted_async
@inlineCallbacks
def UpdateVEnet(self, request, context):
log.info('grpc-request', request=request)
try:
assert isinstance(request, fb.VEnetConfig)
request.id = create_empty_broadcast_id()
except AssertionError, e:
context.set_details(e.message)
context.set_code(StatusCode.INVALID_ARGUMENT)
returnValue(fb.VEnetConfig())
response = yield self.dispatcher.dispatch(
'UpdateVEnet',
request,
context,
id=request.id,
broadcast=True)
if isinstance(response, DispatchError):
log.warn('grpc-error-response', error=response.error_code)
context.set_details('VEnet \'{}\' error'.format(request.id))
context.set_code(response.error_code)
returnValue(fb.VEnetConfig())
else:
log.debug('grpc-success-response', response=response)
returnValue(response)
@twisted_async
@inlineCallbacks
def DeleteVEnet(self, request, context):
log.info('grpc-request', request=request)
try:
assert isinstance(request, fb.VEnetConfig)
request.id = create_empty_broadcast_id()
except AssertionError, e:
context.set_details(e.message)
context.set_code(StatusCode.INVALID_ARGUMENT)
returnValue(fb.VEnetConfig())
response = yield self.dispatcher.dispatch(
'DeleteVEnet',
request,
context,
id=request.id,
broadcast=True)
if isinstance(response, DispatchError):
log.warn('grpc-error-response', error=response.error_code)
context.set_details('VEnet \'{}\' error'.format(request.id))
context.set_code(response.error_code)
returnValue(fb.VEnetConfig())
else:
log.debug('grpc-success-response', response=response)
returnValue(response)
@twisted_async
@inlineCallbacks
def GetAllTrafficDescriptorProfileData(self, request, context):
_method_name = sys._getframe().f_code.co_name
return self.get_all_global_xpon_object_data (request, context,
_method_name)
@twisted_async
@inlineCallbacks
def CreateTrafficDescriptorProfileData(self, request, context):
_method_name = sys._getframe().f_code.co_name
return self.manage_global_xpon_object (request, context,
_method_name)
@twisted_async
@inlineCallbacks
def UpdateTrafficDescriptorProfileData(self, request, context):
_method_name = sys._getframe().f_code.co_name
return self.manage_global_xpon_object (request, context,
_method_name)
@twisted_async
@inlineCallbacks
def DeleteTrafficDescriptorProfileData(self, request, context):
_method_name = sys._getframe().f_code.co_name
return self.manage_global_xpon_object (request, context,
_method_name)
@twisted_async
@inlineCallbacks
def GetAllTcontsConfigData(self, request, context):
_method_name = sys._getframe().f_code.co_name
return self.get_all_global_xpon_object_data (request, context,
_method_name)
@twisted_async
@inlineCallbacks
def CreateTcontsConfigData(self, request, context):
_method_name = sys._getframe().f_code.co_name
return self.manage_global_xpon_object (request, context, _method_name)
@twisted_async
@inlineCallbacks
def UpdateTcontsConfigData(self, request, context):
_method_name = sys._getframe().f_code.co_name
return self.manage_global_xpon_object (request, context, _method_name)
@twisted_async
@inlineCallbacks
def DeleteTcontsConfigData(self, request, context):
_method_name = sys._getframe().f_code.co_name
return self.manage_global_xpon_object (request, context, _method_name)
@twisted_async
@inlineCallbacks
def GetAllGemportsConfigData(self, request, context):
_method_name = sys._getframe().f_code.co_name
return self.get_all_global_xpon_object_data (request, context,
_method_name)
@twisted_async
@inlineCallbacks
def CreateGemportsConfigData(self, request, context):
_method_name = sys._getframe().f_code.co_name
return self.manage_global_xpon_object (request, context, _method_name)
@twisted_async
@inlineCallbacks
def UpdateGemportsConfigData(self, request, context):
_method_name = sys._getframe().f_code.co_name
return self.manage_global_xpon_object (request, context, _method_name)
@twisted_async
@inlineCallbacks
def DeleteGemportsConfigData(self, request, context):
_method_name = sys._getframe().f_code.co_name
return self.manage_global_xpon_object (request, context, _method_name)
@twisted_async
@inlineCallbacks
def GetAllMulticastGemportsConfigData(self, request, context):
_method_name = sys._getframe().f_code.co_name
return self.get_all_global_xpon_object_data (request, context,
_method_name)
@twisted_async
@inlineCallbacks
def CreateMulticastGemportsConfigData(self, request, context):
_method_name = sys._getframe().f_code.co_name
return self.manage_global_xpon_object (request, context, _method_name)
@twisted_async
@inlineCallbacks
def UpdateMulticastGemportsConfigData(self, request, context):
_method_name = sys._getframe().f_code.co_name
return self.manage_global_xpon_object (request, context, _method_name)
@twisted_async
@inlineCallbacks
def DeleteMulticastGemportsConfigData(self, request, context):
_method_name = sys._getframe().f_code.co_name
return self.manage_global_xpon_object (request, context, _method_name)
@twisted_async
@inlineCallbacks
def GetAllMulticastDistributionSetData(self, request, context):
_method_name = sys._getframe().f_code.co_name
return self.get_all_global_xpon_object_data (request, context,
_method_name)
@twisted_async
@inlineCallbacks
def CreateMulticastDistributionSetData(self, request, context):
_method_name = sys._getframe().f_code.co_name
return self.manage_global_xpon_object (request, context, _method_name)
@twisted_async
@inlineCallbacks
def UpdateMulticastDistributionSetData(self, request, context):
_method_name = sys._getframe().f_code.co_name
return self.manage_global_xpon_object (request, context, _method_name)
@twisted_async
@inlineCallbacks
def DeleteMulticastDistributionSetData(self, request, context):
_method_name = sys._getframe().f_code.co_name
return self.manage_global_xpon_object (request, context, _method_name)
def get_all_global_xpon_object_data(self, request, context, method_name):
log.info('grpc-request', request=request)
response = yield self.dispatcher.dispatch(
method_name,
Empty(),
context,
broadcast=True)
if isinstance(response, DispatchError):
log.warn('grpc-error-response', error=response.error_code)
context.set_details('{}\' error' .format(type(request).__name__))
context.set_code(response.error_code)
returnValue(Empty())
else:
log.debug('grpc-success-response', response=response)
returnValue(response)
def manage_global_xpon_object(self, request, context, method_name):
log.info('grpc-request', request=request)
_xpon_object_type = self.xpon_object_type[method_name]
try:
assert isinstance(request, _xpon_object_type)
request.id = create_empty_broadcast_id()
except AssertionError, e:
context.set_details(e.message)
context.set_code(StatusCode.INVALID_ARGUMENT)
returnValue(_xpon_object_type())
response = yield self.dispatcher.dispatch(
method_name,
request,
context,
id=request.id,
broadcast=True)
if isinstance(response, DispatchError):
log.warn('grpc-error-response', | |
<reponame>t-young31/thesis
#!/usr/bin/env python3
import argparse
import numpy as np
from scipy.spatial import distance_matrix
from scipy import integrate
class Constants:
hbar_au = 1.0
h_au = hbar_au * 2.0 * np.pi
amu_to_au = 1822.888486 # m_e amu-1
kj_mol_to_au = 0.00038087980 # Ha (kJ mol-1)-1
kcal_mol_to_au = 0.001593601 # Ha (kcal mol-1)-1
inverse_ang_inverse_au = 1.0 / 1.88973 # au-1 Å
kb_au = 3.1668114E-6
k_b = 1.38064852E-23 # J K-1
h = 6.62607004E-34 # J s
n_a = 6.022140857E23 # molecules mol-1
ha_to_j = 4.359744650E-18 # J Ha-1
ha_to_j_mol = ha_to_j * n_a # J mol-1 Ha-1
j_to_kcal = 0.239006 * 1E-3 # kcal J-1
atm_to_pa = 101325 # Pa
dm_to_m = 0.1 # m
amu_to_kg = 1.660539040E-27 # Kg
r = k_b * n_a # J K-1 mol-1
c = 299792458 # m s-1
c_in_cm = c * 100 # cm s-1
ang_to_m = 1E-10 # m
# Atomic weights in amu from:
# IUPAC-CIAWW's Atomic weights of the elements: Review 2000
atomic_masses = {"H": 1.00794, "He": 4.002602, "Li": 6.941, "Be": 9.012182,
"B": 10.811, "C": 12.0107, "N": 14.0067, "O": 15.9994,
"F": 18.9984032, "Ne": 2.01797, "Na": 22.989770,
"Mg": 24.3050, "Al": 26.981538, "Si": 28.0855,
"P": 30.973761, "S": 32.065, "Cl": 35.453, "Ar": 39.948,
"K": 39.0983, "Ca": 40.078, "Sc": 44.955910, "Ti": 47.867,
"V": 50.9415, "Cr": 51.9961, "Mn": 54.938049,
"Fe": 55.845, "Co": 58.933200, "Ni": 58.6934,
"Cu": 63.546, "Zn": 65.409, "Ga": 69.723, "Ge": 72.64,
"As": 74.92160, "Se": 78.96, "Br": 79.904, "Kr": 83.798,
"Rb": 85.4678, "Sr": 87.62, "Y": 88.90585, "Zr": 91.224,
"Nb": 92.90638, "Mo": 95.94, "Ru": 101.07,
"Rh": 102.90550, "Pd": 106.42, "Ag": 107.8682,
"Cd": 112.411, "In": 114.818, "Sn": 118.710,
"Sb": 121.760, "Te": 127.60, "I": 126.90447,
"Xe": 131.293, "Cs": 132.90545, "Ba": 137.327,
"La": 138.9055, "Ce": 140.116, "Pr": 140.90765,
"Nd": 144.24, "Sm": 150.36, "Eu": 151.964, "Gd": 157.25,
"Tb": 158.92534, "Dy": 162.500, "Ho": 164.93032,
"Er": 167.259, "Tm": 168.93421, "Yb": 173.04,
"Lu": 174.967, "Hf": 178.49, "Ta": 180.9479,
"W": 183.84, "Re": 186.207, "Os": 190.23, "Ir": 192.217,
"Pt": 195.078, "Au": 196.96655, "Hg": 200.59,
"Tl": 204.3833, "Pb": 207.2, "Bi": 208.98038,
"Th": 232.0381, "Pa": 231.03588, "U": 238.02891}
def get_args():
"""Get command line arguments with argparse"""
parser = argparse.ArgumentParser()
parser.add_argument("filename", action='store',
help='.out file with freq calculation performed')
parser.add_argument('-t', '--temp', type=float, default=298,
help="Temperature (K) at which to calculate G, H and "
"S. Default: %(default)s")
parser.add_argument('-ss', '--standard_state', type=str, default='1M',
help="Standard state. 1atm for gas phase and 1M for "
"solution phase. Default: %(default)s")
parser.add_argument('-m', '--method', default='grimme', nargs='?',
choices=['igm', 'truhlar', 'grimme'],
help='Method by which to calculate G, H and S. '
'Default: %(default)s')
parser.add_argument('-s', '--shift', type=float, default=100,
help="Cut-off (in cm-1) to use in Truhlar's method. "
"Frequencies below this will be shifted to this "
"value. Default: %(default)s")
parser.add_argument('-w', '--w0', type=float, default=100,
help="Value of w0 to use in Grimme's interpolation "
"method Chem. Eur. J. 2012, 18, 9955 eqn. 8. "
"Default: %(default)s")
parser.add_argument('-a', '--alpha', type=float, default=4,
help="Value of alpha to use in Grimme's interpolation "
"method Chem. Eur. J. 2012, 18, 9955 eqn. 8. "
"Default: %(default)s")
parser.add_argument('-cs', '--calc_sym', action='store_true', default=False,
help="Force calculation of symmetry number "
"(N^3 algorithm) used for n_atoms < 50."
" Default: %(default)s")
parser.add_argument('-sn', '--symn', type=int, default=None,
help="Override the symmetry number calculation. "
"Default: %(default)s")
parser.add_argument('-r', '--real_freqs', action='store_true', default=False,
help='Convert any imaginary frequencies to their real '
'counterparts')
parser.add_argument('-ts', '--transition_state', action='store_true',
default=False,
help='This species is a transition state so the lowest'
'imaginary should be ignored in calculating the'
'thermochemical contributions')
return parser.parse_args()
def print_output(molecule):
print("----------------------------------------------------------------------------------\n"
"| |\n"
"| /$$$$$$ /$$$$$$$$ /$$ /$$ /$$$$$$$$ /$$$$$$$ /$$ /$$ |\n"
"| /$$__ $$|__ $$__/| $$ | $$| $$_____/| $$__ $$| $$$ /$$$ |\n"
"| | $$ \ $$ | $$ | $$ | $$| $$ | $$ \ $$| $$$$ /$$$$ |\n"
"| | $$ | $$ | $$ | $$$$$$$$| $$$$$ | $$$$$$$/| $$ $$/$$ $$ |\n"
"| | $$ | $$ | $$ | $$__ $$| $$__/ | $$__ $$| $$ $$$| $$ |\n"
"| | $$ | $$ | $$ | $$ | $$| $$ | $$ \ $$| $$\ $ | $$ |\n"
"| | $$$$$$/ | $$ | $$ | $$| $$$$$$$$| $$ | $$| $$ \/ | $$ |\n"
"| \______/ |__/ |__/ |__/|________/|__/ |__/|__/ |__/ |\n"
"| |\n"
"-----------------------------------------------------------------------------------\n\n")
print("{:<50s}{:>33s}".format('Filename', args.filename))
print("{:<50s}{:>33.1f}".format('Temperature (K)', args.temp))
print("{:<50s}{:>33s}".format('Standard state is', args.standard_state))
if args.real_freqs:
print("{:<50s}{:>33s}".format('Treat imaginary (negative) frequencies '
'as real', 'True'))
print("{:<50s}{:>33s}".format('Calculating using the method of',
args.method))
if args.method.lower() == 'grimme':
print("{:<50s}{:>33s}".format('', '<NAME>. 2012, 18, 9955'))
if args.method.lower() == 'truhlar':
print("{:<50s}{:>33s}".format('', '<NAME>, 2011, 115, 14556'))
print()
print("{:<50s}{:>33s}".format('Symmetry number (σ)', str(molecule.sigma_r)))
print("{:<50s}{:>33.2f}".format('Molecular weight (amu)', molecule.mass / Constants.amu_to_kg))
print()
if any(freq < 0 for freq in molecule.freqs):
print('---------------------------------------WARNING--------------------------------------')
print(' Found imaginary frequencies', )
print('------------------------------------------------------------------------------------\n')
print("{:<50s}{:>33.2f}".format('Total entropy (J K-1 mol-1)', molecule.s))
print("{:<50s}{:>33.2f}".format('Total enthalpy (J mol-1)', molecule.h))
print("{:<50s}{:>33.2f}".format('Total free energy (J mol-1)', molecule.g))
print()
e_elec_ha = molecule.e / (Constants.ha_to_j * Constants.n_a)
h_ha = molecule.h / (Constants.ha_to_j * Constants.n_a)
g_ha = molecule.g / (Constants.ha_to_j * Constants.n_a)
print("{:<50s}{:>33}".format('For convenience E, H, <NAME> Hartrees', ''))
print(e_elec_ha, h_ha, g_ha, sep=',')
print("----------------------------------------------------------------------------------")
return None
def extract_frequencies(filename):
"""
Extract the frequencies from an ORCA output file. Iterate through the
reversed file to find the frequencies (in cm-1) so if multiple Hessians
have been calculated, the final one is found.
:param filename: Name of the ORCA output file
:return: (list(float)) List of frequencies (high to low, in cm-1)
"""
orca_out_file_lines = open(filename, 'r').readlines()
freq_block = False
freq_list = []
for line in reversed(orca_out_file_lines):
if 'NORMAL MODES' in line:
freq_block = True
if 'VIBRATIONAL FREQUENCIES' in line:
break
if 'cm**-1' in line and freq_block:
try:
# Line is in the form " 0: 0.00 cm**-1"
freq_list.append(float(line.split()[1]))
except TypeError:
raise Exception("Couldn't extract frequencies")
if len(freq_list) == 0:
raise Exception('Frequencies not found')
return freq_list
def extract_final_electronic_energy(filename):
"""
Get the final electronic energy from an ORCA output file
:param filename: (str)
:return: (float)
"""
for line in reversed(open(filename, 'r').readlines()):
if 'FINAL SINGLE POINT ENERGY' in line:
return Constants.ha_to_j * Constants.n_a * float(line.split()[4])
raise Exception('Final electronic energy not found')
def extract_xyzs(filename):
"""
Extract the xyzs from a ORCA output file in the format [[C, 0.0000, 0.0000, 0.0000], ....]
:param filename: Name of the ORCA output file
:return: List of xyzs
"""
xyzs = []
orca_out_file_lines = open(filename, 'r').readlines()
cart_coords_block = False
for line in reversed(orca_out_file_lines):
xyz_block = True if len(line.split()) == 4 else False
if cart_coords_block and xyz_block:
atom, x, y, z = line.split()[-4:]
xyzs.append([atom, float(x), float(y), float(z)])
if 'CARTESIAN COORDINATES (A.U.)' in line:
cart_coords_block = True
if 'CARTESIAN COORDINATES (ANGSTROEM)' in line:
break
return xyzs
def rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
"""
axis = np.asarray(axis)
axis = axis/np.linalg.norm(axis)
a = np.cos(theta/2.0)
b, c, d = -axis*np.sin(theta/2.0)
aa, bb, cc, dd = a*a, b*b, c*c, d*d
bc, ad, ac, ab, bd, cd = b*c, a*d, a*c, a*b, b*d, c*d
return np.array([[aa+bb-cc-dd, 2*(bc+ad), 2*(bd-ac)],
[2*(bc-ad), aa+cc-bb-dd, 2*(cd+ab)],
[2*(bd+ac), 2*(cd-ab), aa+dd-bb-cc]])
def normalised_vector(coord1, coord2):
vec = coord2 - coord1
return vec / np.linalg.norm(vec)
def strip_identical_and_inv_axes(axes, sim_axis_tol):
"""
For a list of axes remove those which are similar to within some distance
tolerance, or are inverses to within that tolerance
:param axes: list of axes
:param sim_axis_tol: distance tolerance in Å
:return:
"""
unique_possible_axes = []
for i in range(len(axes)):
unique = True
for unique_axis in unique_possible_axes:
if np.linalg.norm(axes[i] - unique_axis) < sim_axis_tol:
unique = False
if np.linalg.norm(-axes[i] - unique_axis) < sim_axis_tol:
unique = False
if unique:
unique_possible_axes.append(axes[i])
return unique_possible_axes
def get_possible_axes(coords, max_triple_dist=2.0, sim_axis_tol=0.05):
"""
Possible rotation axes in a molecule. Currently limited to average vectors
and cross products i.e.
Y Y --->
/ \ / \
X Y X Z
|
|
,
:param coords:
:param max_triple_dist:
:param sim_axis_tol:
:return:
"""
possible_axes = []
n_atoms = len(coords)
for i in range(n_atoms):
for j in range(n_atoms):
if i > j: # For the unique pairs add the i–j vector
possible_axes.append(normalised_vector(coords[i], coords[j]))
for k in range(n_atoms):
# Triple must not have any of the same atoms
if any((i == j, i == k, j == k)):
continue
| |
<gh_stars>0
# Copyright 2021 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import defaultdict
from enum import Enum
import os
import requests
from sushy_tools.emulator import constants
from sushy_tools.emulator.resources.systems.base import AbstractSystemsDriver
from sushy_tools import error
vmware_loaded = True
try:
from pyVim.connect import Disconnect
from pyVim.connect import SmartConnectNoSSL
from pyVmomi import vim
except ImportError:
vmware_loaded = False
is_loaded = bool(vmware_loaded)
class ErrMsg(object):
IDENTITY_NOT_FOUND = \
"VMWAREDRV_ERR_000 - Identity {0} Not Found."
POWER_STATE_CANNOT_SET = \
"VMWAREDRV_ERR_010 - Power state {0} for Identity {1} cannot set."
INVALID_BOOT_SOURCE = \
"VMWAREDRV_ERR_020 - Boot Source {0} is not valid."
NO_VIRT_DEV_TO_SUPPORT_BOOT_SRC = \
("VMWAREDRV_ERR_021 - No VirtualDevice exists in {0} "
"to support boot source: {1}.")
NO_BOOTABLE_DEVICE = \
("VMWAREDRV_ERR_022 - No Bootable Device Found. "
"Cannot get boot device.")
INVALID_DEVICE_TYPE = \
("VMWAREDRV_ERR_023 - Invalid Device Type {0}. "
"Valid values are: Pxe, Hdd, Cd, Floppy")
INVALID_BOOT_MODE = \
("VMWAREDRV_ERR_030 - Invalid boot mode. "
"Valid values are UEFI or Legacy.")
BOOT_IMAGE_CANNOT_BE_SET = \
("VMWAREDRV_ERR_031 - Boot image {0} "
"cannot be set for device {1}")
ERR_VMWARE_OPEN = \
"VMWAREDRV_ERR_040 - Error connecting to vmware host. {0}"
DRV_OP_NOT_SUPPORTED = \
("VMWAREDRV_ERR_050 - Operation not supported by "
"the virtualization driver. VMware API does not support {0}")
class PowerStates(Enum):
ON = 'On'
FORCE_ON = 'ForceOn'
FORCE_OFF = 'ForceOff'
GRACEFUL_SHUTDOWN = 'GracefulShutdown'
GRACEFUL_RESTART = 'GracefulRestart'
FORCE_RESTART = 'ForceRestart'
NMI = 'Nmi'
class VmwareOpen(object):
def __init__(self, host, port, username, password):
self._host = host
self._port = port
self._username = username
self._password = password
def __enter__(self):
try:
self._service_instance = SmartConnectNoSSL(
host=self._host, user=self._username,
pwd=self._password, port=self._port)
return self._service_instance
except IOError as e:
error_msg = ErrMsg.ERR_VMWARE_OPEN.format(e)
raise error.FishyError(error_msg)
def __exit__(self, type, value, traceback):
Disconnect(self._service_instance)
class VmwareDriver(AbstractSystemsDriver):
"""Vmware driver"""
def _get_vms(self, service_instance):
content = service_instance.RetrieveContent()
# Starting point to look into
container = content.rootFolder
# object types to look for
view_type = [vim.VirtualMachine]
# whether we should look into it recursively
recursive = True
container_view = content.viewManager.CreateContainerView(
container, view_type, recursive)
vms = container_view.view
return vms
def _get_vm(self, identity, service_instance):
vmlist = self._get_vms(service_instance)
for vm in vmlist:
# NOTE. vCenter supports Virtual Machines with the same name
# provided they are on a separate Virtual Machine Folder
# in a Datacener.
# This complicates the search by name as we have not other input
# to further filter the results.
# At this point the first VM with the matching name will be
# returned and we assume two vms will not be named the same
# within a vSphere host due to naming conventions.
vm_name = vm.summary.config.name
vm_uuid = vm.summary.config.uuid
if (vm_name == identity or vm_uuid == identity):
return vm
raise error.FishyError(
ErrMsg.IDENTITY_NOT_FOUND.format(
identity))
# Helper method to upload an image to the hypervisor
# PLEASE NOTE! This method is not covered by the Unit Tests at this point,
# due to complexity.
# It should NOT require any updates unless the pyvimomi API changes,
# which is unlikely for the managed objects it is using.
# PLEASE TEST EXTENSIVELY IF EVER MODIFIED.
def _upload_image(self, service_instance, host, port,
boot_image, datastore_name):
content = service_instance.RetrieveContent()
boot_image_folder = 'vmedia'
# Get the list of all datacenters we have available to us
datacenters_object_view = content.viewManager.CreateContainerView(
content.rootFolder,
[vim.Datacenter],
True)
# Find the datastore and datacenter we are using
datacenter = None
datastore = None
for dc_obj in datacenters_object_view.view:
datastores_object_view = content.viewManager.CreateContainerView(
dc_obj,
[vim.Datastore],
True)
for ds_obj in datastores_object_view.view:
if ds_obj.info.name == datastore_name:
datacenter = dc_obj
datastore = ds_obj
if not datacenter or not datastore:
raise Exception("Could not find the datastore specified")
# Clean up the views now that we have what we need
datastores_object_view.Destroy()
datacenters_object_view.Destroy()
# Create the Virtual Media Directory
try:
vmedia_directory = "[{0}] {1}".format(
datastore.info.name, boot_image_folder)
file_manager = content.fileManager
file_manager.MakeDirectory(vmedia_directory, datacenter, True)
except vim.fault.FileAlreadyExists:
# Directory already exists so do nothing.
pass
# Prepare http PUT call
isoname = os.path.basename(boot_image)
http_url = "https://{0}:{1}/folder/{2}/{3}".format(
self._host, self._port, boot_image_folder, isoname)
params = {"dsName": datastore.info.name, "dcPath": datacenter.name}
# Get the cookie built from the current session
client_cookie = service_instance._stub.cookie
# Break apart the cookie into it's component parts - This is more than
# is needed, but a good example of how to break apart the cookie
# anyways. The verbosity makes it clear what is happening.
cookie_name = client_cookie.split("=", 1)[0]
cookie_value = client_cookie.split("=", 1)[1].split(";", 1)[0]
cookie_path = client_cookie.split("=", 1)[1].split(";", 1)[
1].split(";", 1)[0].lstrip()
cookie_text = " " + cookie_value + "; $" + cookie_path
# Make a cookie
cookie = dict()
cookie[cookie_name] = cookie_text
# Get the request headers set up
headers = {'Content-Type': 'application/octet-stream'}
# Get the file to upload ready, extra protection by using with against
# leaving open threads
with open(boot_image, "rb") as f:
# Connect and upload the file
requests.put(http_url, params=params,
data=f, headers=headers,
cookies=cookie, verify=False)
hypervisor_boot_image = "[{0}] {1}/{2}".format(
datastore.info.name, boot_image_folder, isoname)
return hypervisor_boot_image
def vmware_boot_dev_to_sushydev(self, bootable_device):
if isinstance(bootable_device,
vim.VirtualMachineBootOptionsBootableEthernetDevice):
return 'Pxe'
elif isinstance(bootable_device,
vim.VirtualMachineBootOptionsBootableDiskDevice):
return 'Hdd'
elif isinstance(bootable_device,
vim.VirtualMachineBootOptionsBootableCdromDevice):
return 'Cd'
else:
return 'None'
def is_bootable_ethernet_dev(self, dev):
res = isinstance(dev,
vim.VirtualMachineBootOptionsBootableEthernetDevice)
return res
def is_bootable_disk_dev(self, dev):
res = isinstance(dev,
vim.VirtualMachineBootOptionsBootableDiskDevice)
return res
def is_bootable_cd_dev(self, dev):
res = isinstance(dev,
vim.VirtualMachineBootOptionsBootableCdromDevice)
return res
def is_bootable_floppy_dev(self, dev):
res = isinstance(dev,
vim.VirtualMachineBootOptionsBootableFloppyDevice)
return res
def is_dev_vmxnet3(self, dev):
res = isinstance(dev, vim.vm.device.VirtualVmxnet3)
return res
def is_dev_vdisk(self, dev):
res = isinstance(dev, vim.vm.device.VirtualDisk)
return res
def is_dev_vcd(self, dev):
res = isinstance(dev, vim.vm.device.VirtualCdrom)
return res
def is_dev_flp(self, dev):
res = isinstance(dev, vim.vm.device.VirtualFloppy)
return res
def is_dev_scsi_cntl(self, dev):
res = isinstance(dev, vim.vm.device.VirtualSCSIController)
return res
def is_dev_sata_cntl(self, dev):
res = isinstance(dev, vim.vm.device.VirtualSATAController)
return res
def is_dev_nvme_cntl(self, dev):
res = isinstance(dev, vim.vm.device.VirtualNVMEController)
return res
def is_iso_backing(self, backing):
res = isinstance(backing, vim.vm.device.VirtualCdrom.IsoBackingInfo)
return res
def reorder_boot_devs(self, boot_source, vm):
new_boot_order = []
# Bootable devices exist.
# Check if the boot_source exists in the list and make it first
# in the sequence
if (boot_source == 'Pxe'):
bootable_eth_dev_found = False
virtual_eth_dev_found = False
# Find the device and put it first in the list
for bootable_dev in vm.config.bootOptions.bootOrder:
if self.is_bootable_ethernet_dev(bootable_dev):
# Found it. Moved it first in the boot list
new_boot_order.append(bootable_dev)
bootable_eth_dev_found = True
if not bootable_eth_dev_found:
# boot_source device was not found in the bootOrder so
# we need to find the device in the virtual device list
# and create a bootable device linking to it
for device in vm.config.hardware.device:
if self.is_dev_vmxnet3(device):
net_device = \
vim.\
VirtualMachineBootOptionsBootableEthernetDevice()
net_device.deviceKey = device.key
# Add to the VM Boot Order
new_boot_order.append(net_device)
virtual_eth_dev_found = True
# boot_source does not exist in the virtual device list
# so raise an exception
if not virtual_eth_dev_found:
vm_name = vm.summary.config.name
error_msg = ErrMsg.NO_VIRT_DEV_TO_SUPPORT_BOOT_SRC.format(
vm_name, boot_source)
raise error.FishyError(error_msg)
# Add the remaining boot devices from the boot order
# ommiting the boot_source device
for bootable_dev in vm.config.bootOptions.bootOrder:
if not self.is_bootable_ethernet_dev(bootable_dev):
new_boot_order.append(bootable_dev)
elif (boot_source == 'Hdd'):
bootable_hdd_device_found = False
virtual_hdd_device_found = False
# Find the device and put it first in the list
for bootable_dev in vm.config.bootOptions.bootOrder:
if self.is_bootable_disk_dev(bootable_dev):
# Found it. Moved it first in the boot list
new_boot_order.append(bootable_dev)
bootable_hdd_device_found = True
if not bootable_hdd_device_found:
# boot_source device was not found in the bootOrder so
# we need to find the device in the virtual device list
# and create a bootable device linking to it
for device in vm.config.hardware.device:
if self.is_dev_vdisk(device):
hdd_device = \
vim.VirtualMachineBootOptionsBootableDiskDevice()
hdd_device.deviceKey = device.key
# Add to the VM Boot Order
new_boot_order.append(hdd_device)
virtual_hdd_device_found = True
# boot_source does not exist in the virtual device list
# so raise an exception
if not virtual_hdd_device_found:
vm_name = vm.summary.config.name
error_msg = ErrMsg.NO_VIRT_DEV_TO_SUPPORT_BOOT_SRC.format(
vm_name, boot_source)
raise error.FishyError(error_msg)
# Add the remaining boot devices from | |
'''Represents the content of a INDEX_ROOT attribute.
The structure of an index is a B+ tree, as such an root is always present.
Note:
This class receives an Iterable as argument, the "Parameters/Args" section
represents what must be inside the Iterable. The Iterable MUST preserve
order or things might go boom.
Args:
content[0] (:obj:`AttrTypes`): Attribute type
content[1] (:obj:`CollationRule`): Collation rule
content[2] (int): Index record size in bytes
content[3] (int): Index record size in clusters
node_header (IndexNodeHeader) - Node header related to this index root
idx_entry_list (list(IndexEntry))- List of index entries that belong to
this index root
Attributes:
attr_type (:obj:`AttrTypes`): Attribute type
collation_rule (:obj:`CollationRule`): Collation rule
index_len_in_bytes (int): Index record size in bytes
index_len_in_cluster (int): Index record size in clusters
node_header (IndexNodeHeader): Node header related to this index root
index_entry_list (list(IndexEntry)): List of index entries that belong to
'''
_idx_root_namespace = {"__len__" : _len_idx_root,
"create_from_binary" : classmethod(_from_binary_idx_root)
}
IndexRoot = _create_attrcontent_class("IndexRoot",
("attr_type", "collation_rule", "index_len_in_bytes", "index_len_in_cluster",
"node_header", "index_entry_list"),
inheritance=(AttributeContentRepr,), data_structure="<3IB3x",
extra_functions=_idx_root_namespace, docstring=_docstring_idx_root)
#******************************************************************************
# BITMAP ATTRIBUTE
#******************************************************************************
def _allocated_entries_bitmap(self):
'''Creates a generator that returns all allocated entries in the
bitmap.
Yields:
int: The bit index of the allocated entries.
'''
for entry_number in range(len(self._bitmap) * 8):
if self.entry_allocated(entry_number):
yield entry_number
def _entry_allocated_bitmap(self, entry_number):
"""Checks if a particular index is allocated.
Args:
entry_number (int): Index to verify
Returns:
bool: True if it is allocated, False otherwise.
"""
index, offset = divmod(entry_number, 8)
return bool(self._bitmap[index] & (1 << offset))
def _get_next_empty_bitmap(self):
"""Returns the next empty entry.
Returns:
int: The value of the empty entry
"""
#TODO probably not the best way, redo
for i, byte in enumerate(self._bitmap):
if byte != 255:
for offset in range(8):
if not byte & (1 << offset):
return (i * 8) + offset
def _from_binary_bitmap(cls, binary_stream):
"""See base class."""
return cls(binary_stream.tobytes())
def _len_bitmap(self):
'''Returns the size of the bitmap in bytes'''
return len(self._bitmap)
_docstring_bitmap = """Represents the content of a BITMAP attribute.
Correctly represents a bitmap as seen by the MFT. That basically means that
the underlying data structure is interpreted bit by bit, where if the bit
is 1, the entry is "occupied"/allocated.
Args:
binary_data (:obj:`bytes`): The bytes where the data is maintained
"""
_bitmap_namespace = {"__len__" : _len_bitmap,
"get_next_empty" : _get_next_empty_bitmap,
"entry_allocated" : _entry_allocated_bitmap,
"allocated_entries" : _allocated_entries_bitmap,
"create_from_binary" : classmethod(_from_binary_bitmap)
}
Bitmap = _create_attrcontent_class("Bitmap",
("_bitmap", ),
inheritance=(AttributeContentNoRepr,), data_structure=None,
extra_functions=_bitmap_namespace, docstring=_docstring_bitmap)
#******************************************************************************
# REPARSE_POINT ATTRIBUTE
#******************************************************************************
def _from_binary_junc_mnt(cls, binary_stream):
"""See base class."""
''' Offset to target name - 2 (relative to 16th byte)
Length of target name - 2
Offset to print name - 2 (relative to 16th byte)
Length of print name - 2
'''
offset_target_name, len_target_name, offset_print_name, len_print_name = \
cls._REPR.unpack(binary_stream[:cls._REPR.size])
offset = cls._REPR.size + offset_target_name
target_name = binary_stream[offset:offset+len_target_name].tobytes().decode("utf_16_le")
offset = cls._REPR.size + offset_print_name
print_name = binary_stream[offset:offset+len_print_name].tobytes().decode("utf_16_le")
nw_obj = cls((target_name, print_name))
_MOD_LOGGER.debug("Attempted to unpack Junction or MNT point from \"%s\"\nResult: %s", binary_stream.tobytes(), nw_obj)
return nw_obj
def _len_junc_mnt(self):
'''Returns the size of the bitmap in bytes'''
return len(self.target_name.encode("utf_16_le")) + len(self.print_nameencode("utf_16_le")) + 4 #size of offsets
_docstring_junc_mnt = """Represents the content of a REPARSE_POINT attribute when it is a junction
or mount point.
Args:
target_name (str): Target name
print_name (str): Print name
Attributes:
target_name (str): Target name
print_name (str): Print name
"""
_junc_mnt_namespace = {"__len__" : _len_junc_mnt,
"create_from_binary" : classmethod(_from_binary_junc_mnt)
}
JunctionOrMount = _create_attrcontent_class("JunctionOrMount",
("target_name", "print_name"),
inheritance=(AttributeContentRepr,), data_structure="<4H",
extra_functions=_junc_mnt_namespace, docstring=_docstring_junc_mnt)
#------------------------------------------------------------------------------
def _from_binary_syn_link(cls, binary_stream):
"""See base class."""
''' Offset to target name - 2 (relative to 16th byte)
Length of target name - 2
Offset to print name - 2 (relative to 16th byte)
Length of print name - 2
Symbolic link flags - 4
'''
offset_target_name, len_target_name, offset_print_name, \
len_print_name, syn_flags = \
cls._REPR.unpack(binary_stream[:cls._REPR.size])
offset = cls._REPR.size + offset_target_name
target_name = binary_stream[offset:offset+len_target_name].tobytes().decode("utf_16_le")
offset = cls._REPR.size + offset_print_name
print_name = binary_stream[offset:offset+len_print_name].tobytes().decode("utf_16_le")
nw_obj = cls((target_name, print_name, SymbolicLinkFlags(syn_flags)))
_MOD_LOGGER.debug("Attempted to unpack Symbolic Link from \"%s\"\nResult: %s", binary_stream.tobytes(), nw_obj)
return nw_obj
def _len_syn_link(self):
'''Returns the size of the bitmap in bytes'''
return len(self.target_name.encode("utf_16_le")) + len(self.print_nameencode("utf_16_le")) + 8 #size of offsets + flags
_docstring_syn_link = """Represents the content of a REPARSE_POINT attribute when it is a
symbolic link.
Args:
target_name (str): Target name
print_name (str): Print name
sym_flags (:obj:`SymbolicLinkFlags`): Symbolic link flags
Attributes:
target_name (str): Target name
print_name (str): Print name
sym_flags (:obj:`SymbolicLinkFlags`): Symbolic link flags
"""
_syn_link_namespace = {"__len__" : _len_syn_link,
"create_from_binary" : classmethod(_from_binary_syn_link)
}
SymbolicLink = _create_attrcontent_class("SymbolicLink",
("target_name", "print_name", "symbolic_flags"),
inheritance=(AttributeContentRepr,), data_structure="<4HI",
extra_functions=_syn_link_namespace, docstring=_docstring_junc_mnt)
#------------------------------------------------------------------------------
def _from_binary_reparse(cls, binary_stream):
"""See base class."""
''' Reparse type flags - 4
Reparse tag - 4 bits
Reserved - 12 bits
Reparse type - 2 bits
Reparse data length - 2
Padding - 2
'''
#content = cls._REPR.unpack(binary_view[:cls._REPR.size])
reparse_tag, data_len = cls._REPR.unpack(binary_stream[:cls._REPR.size])
#reparse_tag (type, flags) data_len, guid, data
reparse_type = ReparseType(reparse_tag & 0x0000FFFF)
reparse_flags = ReparseFlags((reparse_tag & 0xF0000000) >> 28)
guid = None #guid exists only in third party reparse points
if reparse_flags & ReparseFlags.IS_MICROSOFT:#a microsoft tag
if reparse_type is ReparseType.SYMLINK:
data = SymbolicLink.create_from_binary(binary_stream[cls._REPR.size:])
elif reparse_type is ReparseType.MOUNT_POINT:
data = JunctionOrMount.create_from_binary(binary_stream[cls._REPR.size:])
else:
data = binary_stream[cls._REPR.size:].tobytes()
else:
guid = UUID(bytes_le=binary_stream[cls._REPR.size:cls._REPR.size+16].tobytes())
data = binary_stream[cls._REPR.size+16:].tobytes()
nw_obj = cls((reparse_type, reparse_flags, data_len, guid, data))
_MOD_LOGGER.debug("Attempted to unpack REPARSE_POINT from \"%s\"\nResult: %s", binary_stream.tobytes(), nw_obj)
return nw_obj
def _len_reparse(self):
'''Returns the size of the bitmap in bytes'''
return ReparsePoint._REPR.size + self.data_len
_docstring_reparse = '''Represents the content of a REPARSE_POINT attribute.
The REPARSE_POINT attribute is a little more complicated. We can have
Microsoft predefinied content and third-party content. As expected,
this completely changes how the data is interpreted.
All Microsoft types of REPARSE_POINT can be gathered from the winnt.h file.
However, as of now, only two have been implemented:
* Symbolic Links - SYMLINK
* Mount or junction point - MOUNT_POINT
As for third-party data, this is always saved in raw (bytes).
Note:
This class receives an Iterable as argument, the "Parameters/Args" section
represents what must be inside the Iterable. The Iterable MUST preserve
order or things might go boom.
Args:
content[0] (:obj:`ReparseType`): Reparse point type
content[1] (:obj:`ReparseFlags`): Reparse point flags
content[2] (int): Reparse data length
content[3] (:obj:`UUID`): GUID
content[4] (*variable*): Content of the reparse type
Attributes:
reparse_type (:obj:`ReparseType`): Reparse point type
reparse_flags (:obj:`ReparseFlags`): Reparse point flags
data_len (int): Reparse data length
guid (:obj:`UUID`): GUID. This exists only in the third-party
reparse points. If it is a Microsoft one, it defaults to ``None``
data (*variable*): Content of the reparse type
'''
_reparse_namespace = {"__len__" : _len_reparse,
"create_from_binary" : classmethod(_from_binary_reparse)
}
ReparsePoint = _create_attrcontent_class("ReparsePoint",
("reparse_type", "reparse_flags", "data_len", "guid", "data"),
inheritance=(AttributeContentRepr,), data_structure="<IH2x",
extra_functions=_reparse_namespace, docstring=_docstring_reparse)
#******************************************************************************
# EA_INFORMATION ATTRIBUTE
#******************************************************************************
def _from_binary_ea_info(cls, binary_stream):
"""See base class."""
''' Size of Extended Attribute entry - 2
Number of Extended Attributes which have NEED_EA set - 2
Size of extended attribute data - 4
'''
return cls(cls._REPR.unpack(binary_stream[:cls._REPR.size]))
def _len_ea_info(self):
return EaInformation._REPR.size
_docstring_ea_info = '''Represents the content of a EA_INFORMATION attribute.
The (HPFS) extended attribute information ($EA_INFORMATION) contains
information about the extended attribute ($EA).
Note:
This class receives an Iterable as argument, the "Parameters/Args" section
represents what must be inside the Iterable. The Iterable MUST preserve
order or things might go boom.
Args:
content[0] (int): Size of the EA attribute entry
content[1] (int): Number of EA attributes with NEED_EA set
content[2] (int): Size of the EA data
Attributes:
entry_len (int): Size of the EA attribute entry
ea_set_number (int): Number of EA attributes with NEED_EA set
ea_size (int): Size of the EA data
'''
_ea_info_namespace = {"__len__" : _len_ea_info,
"create_from_binary" : classmethod(_from_binary_ea_info)
}
EaInformation = _create_attrcontent_class("EaInformation",
("entry_len", "ea_set_number", "ea_size"),
inheritance=(AttributeContentRepr,), data_structure="<2HI",
extra_functions=_ea_info_namespace, docstring=_docstring_ea_info)
#******************************************************************************
# EA ATTRIBUTE
#******************************************************************************
def _from_binary_ea_entry(cls, binary_stream):
"""See base class."""
''' Offset to the next EA - 4
Flags - 1
Name length - 1
Value length - 2
'''
offset_next_ea, flags, name_len, value_len = cls._REPR.unpack(binary_stream[:cls._REPR.size])
name = binary_stream[cls._REPR.size:cls._REPR.size + name_len].tobytes().decode("ascii")
#it looks like the value is 8 byte aligned, do some math to compensate
#TODO confirm if this is true
value_alignment = (_ceil((cls._REPR.size + name_len) / 8) * 8)
value = binary_stream[value_alignment:value_alignment + value_len].tobytes()
nw_obj = cls((offset_next_ea, EAFlags(flags), name, value))
_MOD_LOGGER.debug("Attempted to unpack EA entry from \"%s\"\nResult: %s", binary_stream.tobytes(), nw_obj)
return nw_obj
def _len_ea_entry(self):
'''Returns the size of the entry'''
return EaEntry._REPR.size + len(self.name.encode("ascii")) + self.value_len
_docstring_ea_entry = '''Represents an entry for EA.
The EA attribute is composed by multiple EaEntries. Some information is not
completely understood for this. One of those is if it is necessary some
kind of aligment from the name to the value. The code considers | |
"""
This module has functions that compares the contrast calculation from different methods:
1. E2E coronagraph
2. (Image-based PASTIS)
3. Matrix-based PASTIS
All three methods are currently only supported for JWST, and you can pick between the analytical or numerical matrix.
HiCAT and LUVOIR only have an E2E vs numerical PASTIS comparison (1 + 3).
"""
import os
import time
import numpy as np
from astropy.io import fits
import astropy.units as u
import logging
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import hcipy as hc
from config import CONFIG_INI
import util_pastis as util
import image_pastis as impastis
from e2e_simulators.luvoir_imaging import LuvoirAPLC
log = logging.getLogger()
@u.quantity_input(rms=u.nm)
def contrast_jwst_ana_num(matdir, matrix_mode="analytical", rms=1. * u.nm, im_pastis=False, plotting=False):
"""
Calculate the contrast for an RMS WFE with image PASTIS, matrix PASTIS
:param matdir: data directory to use for matrix and calibration coefficients from
:param matrix_mode: use 'analytical or 'numerical' matrix
:param rms: RMS wavefront error in pupil to calculate contrast for; in NANOMETERS
:param im_pastis: default False, whether to also calculate contrast from image PASTIS
:param plotting: default False, whether to save E2E and PASTIS DH PSFs; works only if im_pastis=True
:return:
"""
from e2e_simulators import webbpsf_imaging as webbim
log.warning("THIS ONLY WORKS FOR PISTON FOR NOW")
# Keep track of time
start_time = time.time() # runtime currently is around 12 min
# Parameters
dataDir = os.path.join(CONFIG_INI.get('local', 'local_data_path'), matdir)
which_tel = CONFIG_INI.get('telescope', 'name')
nb_seg = CONFIG_INI.getint(which_tel, 'nb_subapertures')
filter = CONFIG_INI.get(which_tel, 'filter_name')
fpm = CONFIG_INI.get(which_tel, 'focal_plane_mask') # focal plane mask
lyot_stop = CONFIG_INI.get(which_tel, 'pupil_plane_stop') # Lyot stop
inner_wa = CONFIG_INI.getint(which_tel, 'IWA')
outer_wa = CONFIG_INI.getint(which_tel, 'OWA')
tel_size_px = CONFIG_INI.getint('numerical', 'tel_size_px')
sampling = CONFIG_INI.getfloat('numerical', 'sampling')
#real_samp = sampling * tel_size_px / im_size
zern_number = CONFIG_INI.getint('calibration', 'local_zernike')
zern_mode = util.ZernikeMode(zern_number)
zern_max = CONFIG_INI.getint('zernikes', 'max_zern')
# Import PASTIS matrix
matrix_pastis = None
if matrix_mode == 'numerical':
filename = 'PASTISmatrix_num_' + zern_mode.name + '_' + zern_mode.convention + str(zern_mode.index)
matrix_pastis = fits.getdata(os.path.join(dataDir, 'matrix_numerical', filename + '.fits'))
elif matrix_mode == 'analytical':
filename = 'PASTISmatrix_' + zern_mode.name + '_' + zern_mode.convention + str(zern_mode.index)
matrix_pastis = fits.getdata(os.path.join(dataDir, 'matrix_analytical', filename + '.fits'))
# Create random aberration coefficients
aber = np.random.random([nb_seg]) # piston values in input units
#log.info(f'PISTON ABERRATIONS: {aber}')
# Normalize to the RMS value I want
rms_init = util.rms(aber)
aber *= rms.value / rms_init
calc_rms = util.rms(aber) * u.nm
aber *= u.nm # making sure the aberration has the correct units
log.info(f"Calculated RMS: {calc_rms}")
# Remove global piston
aber -= np.mean(aber)
# Make equivalent aberration array that goes into the WebbPSF function
Aber_WSS = np.zeros([nb_seg, zern_max])
Aber_WSS[:,0] = aber.to(u.m).value # index "0" works because we're using piston currently; convert to meters
### BASELINE PSF - NO ABERRATIONS, NO CORONAGRAPH
log.info('Generating baseline PSF from E2E - no coronagraph, no aberrations')
psf_perfect = webbim.nircam_nocoro(filter, np.zeros_like(Aber_WSS))
normp = np.max(psf_perfect)
psf_perfect = psf_perfect / normp
### WEBBPSF
log.info('Generating E2E coro contrast')
start_webb = time.time()
# Set up NIRCam and coronagraph, get PSF
psf_webbpsf = webbim.nircam_coro(filter, fpm, lyot_stop, Aber_WSS)
psf_webbpsf = psf_webbpsf / normp
# Create dark hole
dh_area = util.create_dark_hole(psf_webbpsf, inner_wa, outer_wa, sampling)
# Get the mean contrast from the WebbPSF coronagraph
webb_dh_psf = psf_webbpsf * dh_area
contrast_webbpsf = np.mean(webb_dh_psf[np.where(webb_dh_psf != 0)])
end_webb = time.time()
#TODO: save plots of phase on segmented pupil
# Load in baseline contrast
contrastname = 'base-contrast_' + zern_mode.name + '_' + zern_mode.convention + str(zern_mode.index)
coro_floor = float(np.loadtxt(os.path.join(dataDir, 'calibration', contrastname+'.txt')))
### IMAGE PASTIS
contrast_am = np.nan
if im_pastis:
log.info('Generating contrast from image-PASTIS')
start_impastis = time.time()
# Create calibrated image from analytical model
psf_am, full_psf = impastis.analytical_model(zern_number, aber, cali=True)
# Get the mean contrast from image PASTIS
contrast_am = np.mean(psf_am[np.where(psf_am != 0)]) + coro_floor
end_impastis = time.time()
### MATRIX PASTIS
log.info('Generating contrast from matrix-PASTIS')
start_matrixpastis = time.time()
# Get mean contrast from matrix PASTIS
contrast_matrix = util.pastis_contrast(aber, matrix_pastis) + coro_floor # calculating contrast with PASTIS matrix model
end_matrixpastis = time.time()
ratio = None
if im_pastis:
ratio = contrast_am / contrast_matrix
# Outputs
log.info('\n--- CONTRASTS: ---')
log.info(f'Mean contrast from E2E: {contrast_webbpsf}')
log.info(f'Mean contrast with image PASTIS: {contrast_am}')
log.info(f'Contrast from matrix PASTIS: {contrast_matrix}')
log.info(f'Ratio image PASTIS / matrix PASTIS: {ratio}')
log.info('\n--- RUNTIMES: ---')
log.info(f'E2E: {end_webb-start_webb}sec = {(end_webb-start_webb)/60}min')
if im_pastis:
log.info(f'Image PASTIS: {end_impastis-start_impastis}sec = {(end_impastis-start_impastis)/60}min')
log.info(f'Matrix PASTIS: {end_matrixpastis-start_matrixpastis}sec = {(end_matrixpastis-start_matrixpastis)/60}min')
end_time = time.time()
runtime = end_time - start_time
log.info(f'Runtime for contrast_calculation_simple.py: {runtime} sec = {runtime/60} min')
# Save the PSFs
if im_pastis:
if plotting:
# As fits files
util.write_fits(util.zoom_cen(webb_dh_psf, psf_am.shape[0]/2), os.path.join(dataDir, 'results',
'dh_images_'+matrix_mode, '{:.2e}'.format(rms.value)+str(rms.unit)+'RMS_e2e.fits'))
util.write_fits(psf_am, os.path.join(dataDir, 'results', 'dh_images_'+matrix_mode,
'{:.2e}'.format(rms.value)+str(rms.unit)+'RMS_am.fits'))
# As PDF plot
plt.clf()
plt.figure()
plt.suptitle('{:.2e}'.format(rms.value) + str(rms.unit) + " RMS")
plt.subplot(1, 2, 1)
plt.title("E2E")
plt.imshow(util.zoom_cen(webb_dh_psf, psf_am.shape[0]/2), norm=LogNorm())
plt.colorbar()
plt.subplot(1, 2, 2)
plt.title("PASTIS image")
plt.imshow(psf_am, norm=LogNorm())
plt.colorbar()
plt.savefig(os.path.join(dataDir, 'results', 'dh_images_'+matrix_mode,
'{:.2e}'.format(rms.value)+'DH_PSFs.pdf'))
#TODO: check image rotation, I think there is a 90 degree difference in them for the JWST simulations
return contrast_webbpsf, contrast_am, contrast_matrix
def contrast_hicat_num(matrix_dir, matrix_mode='hicat', rms=1*u.nm):
"""
Compute the contrast for a random IrisAO mislignment on the HiCAT simulator.
:param matrix_dir: str, directory of saved matrix
:param matrix_mode: str, analytical or numerical; currently only numerical supported
:param rms: astropy quantity, rms wfe to be put randomly on the SM
:return: 2x float, E2E and matrix contrast
"""
import hicat.simulators
# Keep track of time
start_time = time.time() # runtime currently is around 12 min
# Parameters
nb_seg = CONFIG_INI.getint('HiCAT', 'nb_subapertures')
iwa = CONFIG_INI.getfloat('HiCAT', 'IWA')
owa = CONFIG_INI.getfloat('HiCAT', 'OWA')
# Import numerical PASTIS matrix for HiCAT sim
filename = 'PASTISmatrix_num_HiCAT_piston_Noll1'
matrix_pastis = fits.getdata(os.path.join(matrix_dir, filename + '.fits'))
# Create random aberration coefficients
aber = np.random.random([nb_seg]) # piston values in input units
log.info(f'PISTON ABERRATIONS: {aber}')
# Normalize to the RMS value I want
rms_init = util.rms(aber)
aber *= rms.value / rms_init
calc_rms = util.rms(aber) * u.nm
aber *= u.nm # making sure the aberration has the correct units
log.info(f"Calculated RMS: {calc_rms}")
# Remove global piston
aber -= np.mean(aber)
### BASELINE PSF - NO ABERRATIONS, NO CORONAGRAPH
log.info('Generating baseline PSF from E2E - no coronagraph, no aberrations')
hc = hicat.simulators.hicat_sim.HICAT_Sim()
hc.iris_ao = 'iris_ao'
hc.apodizer = 'cnt1_apodizer'
hc.lyot_stop = 'cnt1_apodizer_lyot_stop'
hc.include_fpm = False
psf_perfect = hc.calc_psf(display=False, return_intermediates=False)
normp = np.max(psf_perfect[0].data)
#psf_perfect = psf_perfect[0].data / normp don't actually need the perfect PSF
### HiCAT sim
start_e2e = time.time()
# Set up the HiCAT simulator, get PSF
hc.apodizer = 'cnt1_apodizer'
hc.lyot_stop = 'cnt1_apodizer_lyot_stop'
hc.include_fpm = True
# Calculate coro PSF without aberrations
psf_coro = hc.calc_psf(display=False, return_intermediates=False)
psf_coro = psf_coro[0].data / normp
log.info('Calculating E2E contrast...')
# Put aberration on Iris AO
for nseg in range(nb_seg):
hc.iris_dm.set_actuator(nseg+1, aber[nseg], 0, 0)
psf_hicat = hc.calc_psf(display=False, return_intermediates=False)
psf_hicat = psf_hicat[0].data / normp
# Create DH
dh_mask = util.create_dark_hole(psf_hicat, iwa=iwa, owa=owa, samp=13 / 4)
# Get the mean contrast
hicat_dh_psf = psf_hicat * dh_mask
contrast_hicat = np.mean(hicat_dh_psf[np.where(hicat_dh_psf != 0)])
end_e2e = time.time()
###
# Calculate coronagraph contrast floor
baseline_dh = psf_coro * dh_mask
coro_floor = np.mean(baseline_dh[np.where(baseline_dh != 0)])
## MATRIX PASTIS
log.info('Generating contrast from matrix-PASTIS')
start_matrixpastis = time.time()
# Get mean contrast from matrix PASTIS
contrast_matrix = util.pastis_contrast(aber, matrix_pastis) + coro_floor # calculating contrast with PASTIS matrix model
end_matrixpastis = time.time()
## Outputs
log.info('\n--- CONTRASTS: ---')
log.info(f'Mean contrast from E2E: {contrast_hicat}')
log.info(f'Contrast from matrix PASTIS: {contrast_matrix}')
log.info('\n--- RUNTIMES: ---')
log.info(f'E2E: {end_e2e-start_e2e}sec = {(end_e2e-start_e2e)/60}min')
log.info(f'Matrix PASTIS: {end_matrixpastis-start_matrixpastis}sec = {(end_matrixpastis-start_matrixpastis)/60}min')
end_time = time.time()
runtime = end_time - start_time
log.info(f'Runtime for contrast_calculation_simple.py: {runtime} sec = {runtime/60} min')
return contrast_hicat, contrast_matrix
def contrast_luvoir_num(apodizer_choice, matrix_dir, rms=1*u.nm):
"""
Compute the contrast for a random segmented mirror misalignment on the LUVOIR simulator.
:param matrix_dir: str, directory of saved matrix
:param rms: astropy quantity (e.g. m or nm), WFE rms (OPD) to be put randomly over the entire segmented mirror
:return: 2x float, E2E and matrix contrast
"""
# Keep track of time
start_time = time.time()
# Parameters
nb_seg = CONFIG_INI.getint('LUVOIR', 'nb_subapertures')
sampling = 4
# Import numerical PASTIS matrix for HiCAT sim
filename = 'PASTISmatrix_num_piston_Noll1'
matrix_pastis = fits.getdata(os.path.join(matrix_dir, filename + '.fits'))
# Create random aberration coefficients
aber = np.random.random([nb_seg]) # piston values in input units
log.info(f'PISTON ABERRATIONS: {aber}')
# Normalize to the WFE RMS value I want
rms_init = util.rms(aber)
aber *= rms.value / rms_init
calc_rms = util.rms(aber) * u.nm
aber *= u.nm | |
pass
return sections
@staticmethod
def write_prox_lua(lua_config):
"""
Write an .ini-format config file for PROX (parameters.lua)
PROX does not allow a space before/after the =, so we need
a custom method
"""
out = []
for key in lua_config:
value = '"' + lua_config[key] + '"'
if key == "__name__":
continue
if value is not None and value != '@':
key = "=".join((key, str(value).replace('\n', '\n\t')))
out.append(key)
else:
key = str(key).replace('\n', '\n\t')
out.append(key)
return os.linesep.join(out)
@staticmethod
def write_prox_config(prox_config):
"""
Write an .ini-format config file for PROX
PROX does not allow a space before/after the =, so we need
a custom method
"""
out = []
for i, (section_name, section) in enumerate(prox_config):
out.append("[{}]".format(section_name))
for index, item in enumerate(section):
key, value = item
if key == "__name__":
continue
if value is not None and value != '@':
key = "=".join((key, str(value).replace('\n', '\n\t')))
out.append(key)
else:
key = str(key).replace('\n', '\n\t')
out.append(key)
return os.linesep.join(out)
def put_string_to_file(self, s, remote_path):
file_obj = cStringIO(s)
self.ssh_helper.put_file_obj(file_obj, remote_path)
return remote_path
def generate_prox_lua_file(self):
p = OrderedDict()
all_ports = self.vnfd_helper.port_pairs.all_ports
for port_name in all_ports:
port_num = self.vnfd_helper.port_num(port_name)
intf = self.vnfd_helper.find_interface(name=port_name)
vintf = intf['virtual-interface']
p["tester_mac{0}".format(port_num)] = vintf["dst_mac"]
p["src_mac{0}".format(port_num)] = vintf["local_mac"]
return p
def upload_prox_lua(self, config_file, lua_data):
# prox can't handle spaces around ' = ' so use custom method
out = StringIO(self.write_prox_lua(lua_data))
out.seek(0)
remote_path = os.path.join("/tmp", config_file)
self.ssh_helper.put_file_obj(out, remote_path)
return remote_path
def upload_prox_config(self, config_file, prox_config_data):
# prox can't handle spaces around ' = ' so use custom method
out = StringIO(self.write_prox_config(prox_config_data))
out.seek(0)
remote_path = os.path.join("/tmp", config_file)
self.ssh_helper.put_file_obj(out, remote_path)
return remote_path
def build_config_file(self):
task_path = self.scenario_helper.task_path
options = self.scenario_helper.options
config_path = options['prox_config']
config_file = os.path.basename(config_path)
config_path = find_relative_file(config_path, task_path)
self.additional_files = {}
try:
if options['prox_generate_parameter']:
self.lua = []
self.lua = self.generate_prox_lua_file()
if len(self.lua) > 0:
self.upload_prox_lua("parameters.lua", self.lua)
except:
pass
prox_files = options.get('prox_files', [])
if isinstance(prox_files, six.string_types):
prox_files = [prox_files]
for key_prox_file in prox_files:
base_prox_file = os.path.basename(key_prox_file)
key_prox_path = find_relative_file(key_prox_file, task_path)
remote_prox_file = self.copy_to_target(key_prox_path, base_prox_file)
self.additional_files[base_prox_file] = remote_prox_file
self._prox_config_data = self.generate_prox_config_file(config_path)
# copy config to queue so we can read it from traffic_runner process
self.config_queue.put(self._prox_config_data)
self.remote_path = self.upload_prox_config(config_file, self._prox_config_data)
def build_config(self):
self.build_config_file()
options = self.scenario_helper.options
prox_args = options['prox_args']
LOG.info("Provision and start the %s", self.APP_NAME)
self._build_pipeline_kwargs()
self.pipeline_kwargs["args"] = " ".join(
" ".join([k, v if v else ""]) for k, v in prox_args.items())
self.pipeline_kwargs["cfg_file"] = self.remote_path
cmd_template = "sudo bash -c 'cd {tool_dir}; {tool_path} -o cli {args} -f {cfg_file} '"
prox_cmd = cmd_template.format(**self.pipeline_kwargs)
return prox_cmd
# this might be bad, sometimes we want regular ResourceHelper methods, like collect_kpi
class ProxResourceHelper(ClientResourceHelper):
RESOURCE_WORD = 'prox'
PROX_MODE = ""
WAIT_TIME = 3
@staticmethod
def find_pci(pci, bound_pci):
# we have to substring match PCI bus address from the end
return any(b.endswith(pci) for b in bound_pci)
def __init__(self, setup_helper):
super(ProxResourceHelper, self).__init__(setup_helper)
self.mgmt_interface = self.vnfd_helper.mgmt_interface
self._user = self.mgmt_interface["user"]
self._ip = self.mgmt_interface["ip"]
self.done = False
self._vpci_to_if_name_map = None
self.additional_file = {}
self.remote_prox_file_name = None
self.lower = None
self.upper = None
self.step_delta = 1
self.step_time = 0.5
self._test_type = None
@property
def sut(self):
if not self.client:
self.client = self._connect()
return self.client
@property
def test_type(self):
if self._test_type is None:
self._test_type = self.setup_helper.find_in_section('global', 'name', None)
return self._test_type
def run_traffic(self, traffic_profile):
self._queue.cancel_join_thread()
self.lower = 0.0
self.upper = 100.0
traffic_profile.init(self._queue)
# this frees up the run_traffic loop
self.client_started.value = 1
while not self._terminated.value:
# move it all to traffic_profile
self._run_traffic_once(traffic_profile)
def _run_traffic_once(self, traffic_profile):
traffic_profile.execute_traffic(self)
if traffic_profile.done:
self._queue.put({'done': True})
LOG.debug("tg_prox done")
self._terminated.value = 1
# For VNF use ResourceHelper method to collect KPIs directly.
# for TG leave the superclass ClientResourceHelper collect_kpi_method intact
def collect_collectd_kpi(self):
return self._collect_resource_kpi()
def collect_kpi(self):
result = super(ProxResourceHelper, self).collect_kpi()
# add in collectd kpis manually
if result:
result['collect_stats'] = self._collect_resource_kpi()
return result
def terminate(self):
# should not be called, use VNF terminate
raise NotImplementedError()
def up_post(self):
return self.sut # force connection
def execute(self, cmd, *args, **kwargs):
func = getattr(self.sut, cmd, None)
if func:
return func(*args, **kwargs)
def _connect(self, client=None):
"""Run and connect to prox on the remote system """
# De-allocating a large amount of hugepages takes some time. If a new
# PROX instance is started immediately after killing the previous one,
# it might not be able to allocate hugepages, because they are still
# being freed. Hence the -w switch.
# self.connection.execute("sudo killall -w Prox 2>/dev/null")
# prox_cmd = "export TERM=xterm; cd "+ self.bin_path +"; ./Prox -t
# -f ./handle_none-4.cfg"
# prox_cmd = "export TERM=xterm; export RTE_SDK=" + self._dpdk_dir +
# "; " \
# + "export RTE_TARGET=" + self._dpdk_target + ";" \
# + " cd " + self._prox_dir + "; make HW_DIRECT_STATS=y -j50;
# sudo " \
# + "./build/Prox " + prox_args
# log.debug("Starting PROX with command [%s]", prox_cmd)
# thread.start_new_thread(self.ssh_check_quit, (self, self._user,
# self._ip, prox_cmd))
if client is None:
client = ProxSocketHelper()
# try connecting to Prox for 60s
for _ in range(RETRY_SECONDS):
time.sleep(RETRY_INTERVAL)
try:
client.connect(self._ip, PROX_PORT)
except (socket.gaierror, socket.error):
continue
else:
return client
msg = "Failed to connect to prox, please check if system {} accepts connections on port {}"
raise Exception(msg.format(self._ip, PROX_PORT))
class ProxDataHelper(object):
def __init__(self, vnfd_helper, sut, pkt_size, value, tolerated_loss):
super(ProxDataHelper, self).__init__()
self.vnfd_helper = vnfd_helper
self.sut = sut
self.pkt_size = pkt_size
self.value = value
self.tolerated_loss = tolerated_loss
self.port_count = len(self.vnfd_helper.port_pairs.all_ports)
self.tsc_hz = None
self.measured_stats = None
self.latency = None
self._totals_and_pps = None
self.result_tuple = None
@property
def totals_and_pps(self):
if self._totals_and_pps is None:
rx_total, tx_total = self.sut.port_stats(range(self.port_count))[6:8]
pps = self.value / 100.0 * self.line_rate_to_pps()
self._totals_and_pps = rx_total, tx_total, pps
return self._totals_and_pps
@property
def rx_total(self):
return self.totals_and_pps[0]
@property
def tx_total(self):
return self.totals_and_pps[1]
@property
def pps(self):
return self.totals_and_pps[2]
@property
def samples(self):
samples = {}
for port_name, port_num in self.vnfd_helper.ports_iter():
port_rx_total, port_tx_total = self.sut.port_stats([port_num])[6:8]
samples[port_name] = {
"in_packets": port_rx_total,
"out_packets": port_tx_total,
}
return samples
def __enter__(self):
self.check_interface_count()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.make_tuple()
def make_tuple(self):
if self.result_tuple:
return
self.result_tuple = ProxTestDataTuple(
self.tolerated_loss,
self.tsc_hz,
self.measured_stats['delta'].rx,
self.measured_stats['delta'].tx,
self.measured_stats['delta'].tsc,
self.latency,
self.rx_total,
self.tx_total,
self.pps,
)
self.result_tuple.log_data()
@contextmanager
def measure_tot_stats(self):
with self.sut.measure_tot_stats() as self.measured_stats:
yield
def check_interface_count(self):
# do this assert in init? unless we expect interface count to
# change from one run to another run...
assert self.port_count in {1, 2, 4}, \
"Invalid number of ports: 1, 2 or 4 ports only supported at this time"
def capture_tsc_hz(self):
self.tsc_hz = float(self.sut.hz())
def line_rate_to_pps(self):
# FIXME Don't hardcode 10Gb/s
return self.port_count * TEN_GIGABIT / BITS_PER_BYTE / (self.pkt_size + 20)
class ProxProfileHelper(object):
__prox_profile_type__ = "Generic"
PROX_CORE_GEN_MODE = "gen"
PROX_CORE_LAT_MODE = "lat"
@classmethod
def get_cls(cls, helper_type):
"""Return class of specified type."""
if not helper_type:
return ProxProfileHelper
for profile_helper_class in utils.itersubclasses(cls):
if helper_type == profile_helper_class.__prox_profile_type__:
return profile_helper_class
return ProxProfileHelper
@classmethod
def make_profile_helper(cls, resource_helper):
return cls.get_cls(resource_helper.test_type)(resource_helper)
def __init__(self, resource_helper):
super(ProxProfileHelper, self).__init__()
self.resource_helper = resource_helper
self._cpu_topology = None
self._test_cores = None
self._latency_cores = None
@property
def cpu_topology(self):
if not self._cpu_topology:
stdout = io.BytesIO()
self.ssh_helper.get_file_obj("/proc/cpuinfo", stdout)
self._cpu_topology = SocketTopology.parse_cpuinfo(stdout.getvalue().decode('utf-8'))
return self._cpu_topology
@property
def test_cores(self):
if not self._test_cores:
self._test_cores = self.get_cores(self.PROX_CORE_GEN_MODE)
return self._test_cores
@property
def latency_cores(self):
if not self._latency_cores:
self._latency_cores = self.get_cores(self.PROX_CORE_LAT_MODE)
return self._latency_cores
@contextmanager
def traffic_context(self, pkt_size, value):
self.sut.stop_all()
self.sut.reset_stats()
try:
self.sut.set_pkt_size(self.test_cores, pkt_size)
self.sut.set_speed(self.test_cores, value)
self.sut.start_all()
yield
finally:
self.sut.stop_all()
def get_cores(self, mode):
cores = []
for section_name, section in self.setup_helper.prox_config_data:
if not section_name.startswith("core"):
continue
for key, value in section:
if key == "mode" and value == mode:
core_tuple = CoreSocketTuple(section_name)
core = core_tuple.find_in_topology(self.cpu_topology)
cores.append(core)
return cores
def run_test(self, pkt_size, duration, value, tolerated_loss=0.0):
data_helper = ProxDataHelper(self.vnfd_helper, self.sut, pkt_size, value, tolerated_loss)
with data_helper, self.traffic_context(pkt_size, value):
with data_helper.measure_tot_stats():
time.sleep(duration)
# Getting statistics to calculate PPS at right speed....
data_helper.capture_tsc_hz()
data_helper.latency = self.get_latency()
return data_helper.result_tuple, data_helper.samples
def get_latency(self):
"""
:return: return lat_min, lat_max, lat_avg
:rtype: list
"""
if self._latency_cores:
return self.sut.lat_stats(self._latency_cores)
return []
def terminate(self):
pass
def __getattr__(self, item):
return getattr(self.resource_helper, item)
class ProxMplsProfileHelper(ProxProfileHelper):
__prox_profile_type__ = "MPLS tag/untag"
def __init__(self, resource_helper):
super(ProxMplsProfileHelper, self).__init__(resource_helper)
self._cores_tuple = None
@property
def mpls_cores(self):
if not self._cores_tuple:
self._cores_tuple = self.get_cores_mpls()
return self._cores_tuple
@property
def tagged_cores(self):
return self.mpls_cores[0]
@property
def plain_cores(self):
return self.mpls_cores[1]
def get_cores_mpls(self):
cores_tagged = []
cores_plain = []
for section_name, section in self.resource_helper.setup_helper.prox_config_data:
if not section_name.startswith("core"):
continue
if all(key != "mode" or | |
is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
row : 'all', int or None (default)
Subplot row index (starting from 1) for the trace to be
added. Only valid if figure was created using
`plotly.tools.make_subplots`.If 'all', addresses all
rows in the specified column(s).
col : 'all', int or None (default)
Subplot col index (starting from 1) for the trace to be
added. Only valid if figure was created using
`plotly.tools.make_subplots`.If 'all', addresses all
columns in the specified row(s).
Returns
-------
Figure
"""
from plotly.graph_objs import Table
new_trace = Table(
cells=cells,
columnorder=columnorder,
columnordersrc=columnordersrc,
columnwidth=columnwidth,
columnwidthsrc=columnwidthsrc,
customdata=customdata,
customdatasrc=customdatasrc,
domain=domain,
header=header,
hoverinfo=hoverinfo,
hoverinfosrc=hoverinfosrc,
hoverlabel=hoverlabel,
ids=ids,
idssrc=idssrc,
meta=meta,
metasrc=metasrc,
name=name,
stream=stream,
uid=uid,
uirevision=uirevision,
visible=visible,
**kwargs
)
return self.add_trace(new_trace, row=row, col=col)
def add_treemap(
self,
branchvalues=None,
count=None,
customdata=None,
customdatasrc=None,
domain=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hovertemplate=None,
hovertemplatesrc=None,
hovertext=None,
hovertextsrc=None,
ids=None,
idssrc=None,
insidetextfont=None,
labels=None,
labelssrc=None,
level=None,
marker=None,
maxdepth=None,
meta=None,
metasrc=None,
name=None,
opacity=None,
outsidetextfont=None,
parents=None,
parentssrc=None,
pathbar=None,
sort=None,
stream=None,
text=None,
textfont=None,
textinfo=None,
textposition=None,
textsrc=None,
texttemplate=None,
texttemplatesrc=None,
tiling=None,
uid=None,
uirevision=None,
values=None,
valuessrc=None,
visible=None,
row=None,
col=None,
**kwargs
):
"""
Add a new Treemap trace
Visualize hierarchal data from leaves (and/or outer branches)
towards root with rectangles. The treemap sectors are
determined by the entries in "labels" or "ids" and in
"parents".
Parameters
----------
branchvalues
Determines how the items in `values` are summed. When
set to "total", items in `values` are taken to be value
of all its descendants. When set to "remainder", items
in `values` corresponding to the root and the branches
sectors are taken to be the extra part not part of the
sum of the values at their leaves.
count
Determines default for `values` when it is not
provided, by inferring a 1 for each of the "leaves"
and/or "branches", otherwise 0.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
domain
:class:`plotly.graph_objects.treemap.Domain` instance
or dict with compatible properties
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
hoverinfo .
hoverlabel
:class:`plotly.graph_objects.treemap.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-time-format#locale_format for
details on the date formatting syntax. The variables
available in `hovertemplate` are the ones emitted as
event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. variables `currentPath`, `root`,
`entry`, `percentRoot`, `percentEntry` and
`percentParent`. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
hovertemplate .
hovertext
Sets hover text elements associated with each sector.
If a single string, the same string appears for all
data points. If an array of string, the items are
mapped in order of this trace's sectors. To be seen,
trace `hoverinfo` must contain a "text" flag.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
hovertext .
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
ids .
insidetextfont
Sets the font used for `textinfo` lying inside the
sector.
labels
Sets the labels of each of the sectors.
labelssrc
Sets the source reference on Chart Studio Cloud for
labels .
level
Sets the level from which this trace hierarchy is
rendered. Set `level` to `''` to start from the root
node in the hierarchy. Must be an "id" if `ids` is
filled in, otherwise plotly attempts to find a matching
item in `labels`.
marker
:class:`plotly.graph_objects.treemap.Marker` instance
or dict with compatible properties
maxdepth
Sets the number of rendered sectors from any given
`level`. Set `maxdepth` to "-1" to render all the
levels in the hierarchy.
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
outsidetextfont
Sets the font used for `textinfo` lying outside the
sector. This option refers to the root of the hierarchy
presented on top left corner of a treemap graph. Please
note that if a hierarchy has multiple root nodes, this
option won't have any effect and `insidetextfont` would
be used.
parents
Sets the parent sectors for each of the sectors. Empty
string items '' are understood to reference the root
node in the hierarchy. If `ids` is filled, `parents`
items are understood to be "ids" themselves. When `ids`
is not set, plotly attempts to find matching items in
`labels`, but beware they must be unique.
parentssrc
Sets the source reference on Chart Studio Cloud for
parents .
pathbar
:class:`plotly.graph_objects.treemap.Pathbar` instance
or dict with compatible properties
sort
Determines whether or not the sectors are reordered
from largest to smallest.
stream
:class:`plotly.graph_objects.treemap.Stream` instance
or dict with compatible properties
text
Sets text elements associated with each sector. If
trace `textinfo` contains a "text" flag, these elements
will be seen on the chart. If trace `hoverinfo`
contains a "text" flag and "hovertext" is not set,
these elements will be seen in the hover labels.
textfont
Sets the font used for `textinfo`.
textinfo
Determines which trace information appear on the graph.
textposition
Sets the positions of the `text` elements.
textsrc
Sets the source reference on Chart Studio Cloud for
text .
texttemplate
Template string used for rendering the information text
that appear on points. Note that this will override
`textinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-time-format#locale_format for
details on the date formatting syntax. Every attributes
that can be specified per-point (the ones that are
`arrayOk: true`) are available. variables
`currentPath`, `root`, `entry`, `percentRoot`,
`percentEntry`, `percentParent`, `label` and `value`.
texttemplatesrc
Sets the source reference on Chart Studio Cloud for
texttemplate .
tiling
:class:`plotly.graph_objects.treemap.Tiling` instance
or dict with compatible properties
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
| |
"""Module to help with executing commands over SSH."""
##
# Copyright 2016 Canonical Ltd.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
##
# from charmhelpers.core import unitdata
# from charmhelpers.core.hookenv import log
import io
import ipaddress
import os
import socket
import shlex
import traceback
import sys
from subprocess import (
check_call,
Popen,
CalledProcessError,
PIPE,
)
def install_dependencies():
# Make sure Python3 + PIP are available
if not os.path.exists("/usr/bin/python3") or not os.path.exists("/usr/bin/pip3"):
# This is needed when running as a k8s charm, as the ubuntu:latest
# image doesn't include either package.
# Update the apt cache
check_call(["apt-get", "update"])
# Install the Python3 package
check_call(["apt-get", "install", "-y", "python3", "python3-pip"],)
# Install the build dependencies for our requirements (paramiko)
check_call(["apt-get", "install", "-y", "libffi-dev", "libssl-dev"],)
check_call(
[sys.executable, "-m", "pip", "install", "paramiko"],
)
try:
import paramiko
except Exception as ex:
install_dependencies()
import paramiko
class SSHProxy:
private_key_path = "/root/.ssh/id_sshproxy"
public_key_path = "/root/.ssh/id_sshproxy.pub"
key_type = "rsa"
key_bits = 4096
def __init__(self, hostname: str, username: str, password: str = ""):
self.hostname = hostname
self.username = username
self.password = password
@staticmethod
def generate_ssh_key():
"""Generate a 4096-bit rsa keypair."""
if not os.path.exists(SSHProxy.private_key_path):
cmd = "ssh-keygen -t {} -b {} -N '' -f {}".format(
SSHProxy.key_type, SSHProxy.key_bits, SSHProxy.private_key_path,
)
try:
check_call(cmd, shell=True)
except CalledProcessError:
return False
return True
@staticmethod
def write_ssh_keys(public, private):
"""Write a 4096-bit rsa keypair."""
with open(SSHProxy.public_key_path, "w") as f:
f.write(public)
f.close()
with open(SSHProxy.private_key_path, "w") as f:
f.write(private)
f.close()
@staticmethod
def get_ssh_public_key():
publickey = ""
if os.path.exists(SSHProxy.private_key_path):
with open(SSHProxy.public_key_path, "r") as f:
publickey = f.read()
return publickey
@staticmethod
def get_ssh_private_key():
privatekey = ""
if os.path.exists(SSHProxy.private_key_path):
with open(SSHProxy.private_key_path, "r") as f:
privatekey = f.read()
return privatekey
@staticmethod
def has_ssh_key():
return True if os.path.exists(SSHProxy.private_key_path) else False
def run(self, cmd: str) -> (str, str):
"""Run a command remotely via SSH.
Note: The previous behavior was to run the command locally if SSH wasn't
configured, but that can lead to cases where execution succeeds when you'd
expect it not to.
"""
if isinstance(cmd, str):
cmd = shlex.split(cmd)
host = self._get_hostname()
user = self.username
passwd = <PASSWORD>
key = self.private_key_path
# Make sure we have everything we need to connect
if host and user:
return self._ssh(cmd)
raise Exception("Invalid SSH credentials.")
def sftp(self, local, remote):
client = self._get_ssh_client()
# Create an sftp connection from the underlying transport
sftp = paramiko.SFTPClient.from_transport(client.get_transport())
sftp.put(local, remote)
client.close()
pass
def verify_credentials(self):
"""Verify the SSH credentials."""
try:
(stdout, stderr) = self.run("hostname")
except CalledProcessError as e:
stderr = "Command failed: {} ({})".format(" ".join(e.cmd), str(e.output))
except paramiko.ssh_exception.AuthenticationException as e:
stderr = "{}.".format(e)
except paramiko.ssh_exception.BadAuthenticationType as e:
stderr = "{}".format(e.explanation)
except paramiko.ssh_exception.BadHostKeyException as e:
stderr = "Host key mismatch: expected {} but got {}.".format(
e.expected_key, e.got_key,
)
except (TimeoutError, socket.timeout):
stderr = "Timeout attempting to reach {}".format(self._get_hostname())
except Exception as error:
tb = traceback.format_exc()
stderr = "Unhandled exception: {}".format(tb)
if len(stderr) == 0:
return True
return False
###################
# Private methods #
###################
def _get_hostname(self):
"""Get the hostname for the ssh target.
HACK: This function was added to work around an issue where the
ssh-hostname was passed in the format of a.b.c.d;a.b.c.d, where the first
is the floating ip, and the second the non-floating ip, for an Openstack
instance.
"""
return self.hostname.split(";")[0]
def _get_ssh_client(self):
"""Return a connected Paramiko ssh object."""
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
pkey = None
# Otherwise, check for the auto-generated private key
if os.path.exists(self.private_key_path):
with open(self.private_key_path) as f:
pkey = paramiko.RSAKey.from_private_key(f)
###########################################################################
# There is a bug in some versions of OpenSSH 4.3 (CentOS/RHEL 5) where #
# the server may not send the SSH_MSG_USERAUTH_BANNER message except when #
# responding to an auth_none request. For example, paramiko will attempt #
# to use password authentication when a password is set, but the server #
# could deny that, instead requesting keyboard-interactive. The hack to #
# workaround this is to attempt a reconnect, which will receive the right #
# banner, and authentication can proceed. See the following for more info #
# https://github.com/paramiko/paramiko/issues/432 #
# https://github.com/paramiko/paramiko/pull/438 #
###########################################################################
try:
client.connect(
self.hostname,
port=22,
username=self.username,
password=<PASSWORD>,
pkey=pkey,
)
except paramiko.ssh_exception.SSHException as e:
if "Error reading SSH protocol banner" == str(e):
# Once more, with feeling
client.connect(
host, port=22, username=user, password=password, pkey=pkey
)
else:
# Reraise the original exception
raise e
return client
def _ssh(self, cmd):
"""Run an arbitrary command over SSH.
Returns a tuple of (stdout, stderr)
"""
client = self._get_ssh_client()
cmds = " ".join(cmd)
stdin, stdout, stderr = client.exec_command(cmds, get_pty=True)
retcode = stdout.channel.recv_exit_status()
client.close() # @TODO re-use connections
if retcode > 0:
output = stderr.read().strip()
raise CalledProcessError(returncode=retcode, cmd=cmd, output=output)
return (
stdout.read().decode("utf-8").strip(),
stderr.read().decode("utf-8").strip(),
)
## OLD ##
# def get_config():
# """Get the current charm configuration.
# Get the "live" kv store every time we need to access the charm config, in
# case it has recently been changed by a config-changed event.
# """
# db = unitdata.kv()
# return db.get('config')
# def get_host_ip():
# """Get the IP address for the ssh host.
# HACK: This function was added to work around an issue where the
# ssh-hostname was passed in the format of a.b.c.d;a.b.c.d, where the first
# is the floating ip, and the second the non-floating ip, for an Openstack
# instance.
# """
# cfg = get_config()
# return cfg['ssh-hostname'].split(';')[0]
# def is_valid_hostname(hostname):
# """Validate the ssh-hostname."""
# print("Hostname: {}".format(hostname))
# if hostname == "0.0.0.0":
# return False
# try:
# ipaddress.ip_address(hostname)
# except ValueError:
# return False
# return True
# def verify_ssh_credentials():
# """Verify the ssh credentials have been installed to the VNF.
# Attempts to run a stock command - `hostname` on the remote host.
# """
# verified = False
# status = ''
# cfg = get_config()
# try:
# host = get_host_ip()
# if is_valid_hostname(host):
# if len(cfg['ssh-hostname']) and len(cfg['ssh-username']):
# cmd = 'hostname'
# status, err = _run(cmd)
# if len(err) == 0:
# verified = True
# else:
# status = "Invalid IP address."
# except CalledProcessError as e:
# status = 'Command failed: {} ({})'.format(
# ' '.join(e.cmd),
# str(e.output)
# )
# except paramiko.ssh_exception.AuthenticationException as e:
# status = '{}.'.format(e)
# except paramiko.ssh_exception.BadAuthenticationType as e:
# status = '{}'.format(e.explanation)
# except paramiko.ssh_exception.BadHostKeyException as e:
# status = 'Host key mismatch: expected {} but got {}.'.format(
# e.expected_key,
# e.got_key,
# )
# except (TimeoutError, socket.timeout):
# status = "Timeout attempting to reach {}".format(cfg['ssh-hostname'])
# except Exception as error:
# tb = traceback.format_exc()
# status = 'Unhandled exception: {}'.format(tb)
# return (verified, status)
# def charm_dir():
# """Return the root directory of the current charm."""
# d = os.environ.get('JUJU_CHARM_DIR')
# if d is not None:
# return d
# return os.environ.get('CHARM_DIR')
# def run_local(cmd, env=None):
# """Run a command locally."""
# if isinstance(cmd, str):
# cmd = shlex.split(cmd) if ' ' in cmd else [cmd]
# if type(cmd) is not list:
# cmd = [cmd]
# p = Popen(cmd,
# env=env,
# shell=True,
# stdout=PIPE,
# stderr=PIPE)
# stdout, stderr = p.communicate()
# retcode = p.poll()
# if retcode > 0:
# raise CalledProcessError(returncode=retcode,
# cmd=cmd,
# output=stderr.decode("utf-8").strip())
# return (stdout.decode('utf-8').strip(), stderr.decode('utf-8').strip())
# def _run(cmd, env=None):
# """Run a command remotely via SSH.
# Note: The previous behavior was to run the command locally if SSH wasn't
# configured, but that can lead to cases where execution succeeds when you'd
# expect it not to.
# """
# if isinstance(cmd, str):
# cmd = shlex.split(cmd)
# if type(cmd) is not list:
# cmd = [cmd]
# cfg = get_config()
# if cfg:
# if all(k in cfg for k in ['ssh-hostname', 'ssh-username',
# 'ssh-password', 'ssh-private-key']):
# host = get_host_ip()
# user = cfg['ssh-username']
# passwd = cfg['<PASSWORD>']
# key = cfg['ssh-private-key'] # DEPRECATED
# if host and user:
# return ssh(cmd, host, user, passwd, key)
# raise Exception("Invalid SSH credentials.")
# def get_ssh_client(host, user, password=None, key=None):
# """Return a connected Paramiko ssh object."""
# client = paramiko.SSHClient()
# client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# pkey = None
# # Check for the DEPRECATED private-key
# if key:
# f = io.StringIO(key)
# pkey = paramiko.RSAKey.from_private_key(f)
# else:
# # Otherwise, check for the auto-generated private key
# if os.path.exists('/root/.ssh/id_juju_sshproxy'):
# with open('/root/.ssh/id_juju_sshproxy', 'r') as f:
# pkey = paramiko.RSAKey.from_private_key(f)
# ###########################################################################
# # There is a bug in some versions of | |
<reponame>amirdel/dispersion-continua
# Copyright 2017 <NAME>, <EMAIL>
#
# Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee
# is hereby granted, provided that the above copyright notice and this permission notice appear in all
# copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE
# FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
# ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import random as random
import bisect as bs
import numpy as np
from py_dp.dispersion.binning import get_cdf
from py_dp.dispersion.second_order_markov import find_1d_bins
from py_dp.dispersion.dispersion_models import dispersionModelGeneral
# These classes are not used now. Just kept for reference.
class dispModelUncorrStencil(dispersionModelGeneral):
def __init__(self, n_particles, n_steps, dt, dx_array, x_max,
inj_location = "start", verbose = True):
super(dispModelUncorrStencil,self).__init__(n_particles, n_steps,
inj_location, verbose)
self.dx_array = dx_array
self.dt = dt
self.table_size = len(dx_array) - 1
self.x_max = x_max
def advance_one_step(self, particle_number, current_index):
x_max = self.x_max
end_of_domain_reached = False
dx_array = self.dx_array
dt = self.dt
table_size = self.table_size
rand_ind = random.randint(0,table_size)
dx = dx_array[rand_ind]
current_t = self.time_array[particle_number, current_index]
current_x = self.x_array[particle_number, current_index]
next_x = current_x + dx
if next_x > x_max:
velocity = dx/dt
distance_to_end = x_max - current_x
dt = distance_to_end/velocity
next_x = x_max
end_of_domain_reached = True
next_index = current_index + 1
self.time_array[particle_number,next_index] = current_t + dt
self.x_array[particle_number,next_index] = next_x
return end_of_domain_reached
def follow_specific_paticle(self,particle_number):
n_steps = self.n_steps
for step in range(n_steps):
#x_array, pore_nr_array, time_array entries are changed inside
#advance_to_next_pore
current_index = step
end_flag = self.advance_one_step(particle_number, current_index)
if end_flag:
freeze_idx = current_index + 1
self.freeze_particle(particle_number, freeze_idx)
break
def follow_all_particles(self):
n_particles = self.n_particles
for particle in range(n_particles):
self.follow_specific_paticle(particle)
def freeze_particle(self,particle_number,current_index):
"""
after a particle gets to the end of the domain it would stay there.
this function would copy the value at current_idx to all following values for x and time
"""
self.x_array[particle_number,current_index:] = self.x_array[particle_number,current_index]
self.time_array[particle_number,current_index:] = self.time_array[particle_number,current_index]
#self.freeze_time[particle_number] = self.time_array[particle_number,current_index]
self.last_index_array[particle_number] = current_index
class dispModelCorrelatedStencil(dispersionModelGeneral):
def __init__(self, n_particles, n_steps, dt, x_max, trans_matrix,
class_velocity, init_class_count, inj_location = "start",
verbose = True):
super(dispModelCorrelatedStencil,self).__init__(n_particles, n_steps,
inj_location, verbose)
self.trans_matrix = trans_matrix
self.init_class_count = init_class_count
self.class_velocity = class_velocity
self.dt = dt
self.x_max = x_max
self.init_class_cdf = get_cdf(init_class_count)
self.cdf_matrix = np.cumsum(trans_matrix, axis=0)
def draw_from_init_calss_idx(self):
return bs.bisect(self.init_class_cdf, random.random())
def choose_next_class(self, current_class):
cdf = self.cdf_matrix[:, current_class]
return bs.bisect(cdf, random.random())
def follow_one_particle(self, particle_number):
dt = self.dt
class_velocity = self.class_velocity
x_array = self.x_array
t_array = self.time_array
x = 0.0
t = 0.0
out_put_idx = 1
#initialize the particle velocity
class_idx = self.draw_from_init_calss_idx()
next_idx = 0
v = class_velocity[class_idx]
idx_max = self.n_steps + 1
while out_put_idx < idx_max:
x += dt*v
t += dt
x_array[particle_number, out_put_idx] = x
t_array[particle_number, out_put_idx] = t
out_put_idx += 1
next_idx = self.choose_next_class(class_idx)
v = class_velocity[next_idx]
class_idx = next_idx
def follow_all_particles(self):
for i in range(self.n_particles):
self.follow_one_particle(i)
class dispModelCorrelatedStencilFix(dispModelCorrelatedStencil):
def __init__(self, n_particles, n_steps, dt, x_max, trans_matrix,
class_velocity, init_class_count, length, inj_location = "start", verbose = True):
super(dispModelCorrelatedStencilFix,self).__init__(n_particles, n_steps, dt, x_max, trans_matrix,
class_velocity, init_class_count, inj_location, verbose)
self.length = length
def follow_one_particle(self, particle_number):
l = self.length
dt = self.dt
class_velocity = self.class_velocity
x_array = self.x_array
t_array = self.time_array
x = 0.0
t = 0.0
out_put_idx = 1
# initialize the particle velocity
class_idx = self.draw_from_init_calss_idx()
next_idx = 0
v = class_velocity[class_idx]
idx_max = self.n_steps + 1
while out_put_idx < idx_max:
dx = v*dt
abs_dx = abs(dx)
if abs_dx < l:
length_traveled = 0.0
while abs(length_traveled) <= l - abs_dx and out_put_idx < idx_max:
length_traveled += dx
x += dx
t += dt
x_array[particle_number, out_put_idx] = x
t_array[particle_number, out_put_idx] = t
out_put_idx += 1
else:
x += dt * v
t += dt
x_array[particle_number, out_put_idx] = x
t_array[particle_number, out_put_idx] = t
out_put_idx += 1
next_idx = self.choose_next_class(class_idx)
v = class_velocity[next_idx]
class_idx = next_idx
class dispModelCorrelatedSpace(dispersionModelGeneral):
def __init__(self, n_particles, n_steps, dx, x_max, trans_matrix,
class_velocity, init_class_count, inj_location = "start",
verbose = True):
super(dispModelCorrelatedSpace,self).__init__(n_particles, n_steps,
inj_location, verbose)
self.trans_matrix = trans_matrix
self.init_class_count = init_class_count
self.class_velocity = class_velocity
self.dx = dx
self.x_max = x_max
self.init_class_cdf = get_cdf(init_class_count)
self.cdf_matrix = np.cumsum(trans_matrix, axis=0)
def draw_from_init_calss_idx(self):
return bs.bisect(self.init_class_cdf, random.random())
def choose_next_class(self, current_class):
cdf = self.cdf_matrix[:, current_class]
return bs.bisect(cdf, random.random())
def follow_one_particle(self, particle_number):
dx = self.dx
class_velocity = self.class_velocity
x_array = self.x_array
t_array = self.time_array
x = 0.0
t = 0.0
out_put_idx = 1
#initialize the particle velocity
class_idx = self.draw_from_init_calss_idx()
v = class_velocity[class_idx]
idx_max = self.n_steps + 1
while out_put_idx < idx_max:
x += np.sign(v)*dx
t += dx/abs(v)
x_array[particle_number, out_put_idx] = x
t_array[particle_number, out_put_idx] = t
out_put_idx += 1
next_idx = self.choose_next_class(class_idx)
v = class_velocity[next_idx]
class_idx = next_idx
def follow_all_particles(self):
for i in range(self.n_particles):
self.follow_one_particle(i)
class dispModelCorrelatedSpaceKang(dispersionModelGeneral):
def __init__(self, n_particles, n_steps, dx, x_max, trans_matrix,
class_log_edges, init_class_count, inj_location = "start",
verbose = True):
super(dispModelCorrelatedSpaceKang,self).__init__(n_particles, n_steps,
inj_location, verbose)
self.trans_matrix = trans_matrix
self.init_class_count = init_class_count
self.class_log_edges = class_log_edges
self.class_velocity = self.get_class_velocity(class_log_edges)
self.dx = dx
self.x_max = x_max
self.init_class_cdf = get_cdf(init_class_count)
self.cdf_matrix = np.cumsum(trans_matrix, axis=0)
def get_class_velocity(self, class_log_edges):
v_log_edges = self.class_log_edges
n_class = len(class_log_edges) - 1
class_velocity = np.zeros(n_class)
for i in range(n_class):
log_value = 0.5*(v_log_edges[i] + v_log_edges[i+1])
class_velocity[i] = np.exp(log_value)
return class_velocity
def draw_from_class_velocity(self, idx):
v_log_edges = self.class_log_edges
x = random.random()
log_v = v_log_edges[idx]*x + v_log_edges[idx+1]*(1-x)
return np.exp(log_v)
def draw_from_init_calss_idx(self):
return bs.bisect(self.init_class_cdf, random.random())
def choose_next_class(self, current_class):
cdf = self.cdf_matrix[:, current_class]
return bs.bisect(cdf, random.random())
def follow_one_particle(self, particle_number):
dx = self.dx
class_velocity = self.class_velocity
x_array = self.x_array
t_array = self.time_array
x = 0.0
t = 0.0
out_put_idx = 1
#initialize the particle velocity
v_class_idx = self.draw_from_init_calss_idx()
class_idx = 2*v_class_idx
v = self.draw_from_class_velocity(v_class_idx)
v_sign = 1.0
idx_max = self.n_steps + 1
while out_put_idx < idx_max:
x += v_sign*dx
t += dx/v
x_array[particle_number, out_put_idx] = x
t_array[particle_number, out_put_idx] = t
out_put_idx += 1
next_idx = self.choose_next_class(class_idx)
v_class_idx = np.floor(next_idx/2)
v_sign = -1.0 + 2.0*((next_idx - 2*v_class_idx) == 0)
v = self.draw_from_class_velocity(v_class_idx)
class_idx = next_idx
def follow_all_particles(self):
for i in range(self.n_particles):
self.follow_one_particle(i)
class dispModelCorrelatedStencilKang(dispersionModelGeneral):
"""
Class to model plume spreading using a Markov model in time, The velocity is
binned using the binning strategy in Kang 2010
"""
def __init__(self, n_particles, n_steps, dt, x_max, trans_matrix,
class_log_edges, init_class_count, inj_location = "start",
verbose = True):
super(dispModelCorrelatedStencilKang,self).__init__(n_particles, n_steps,
inj_location, verbose)
self.trans_matrix = trans_matrix
self.init_class_count = init_class_count
self.class_log_edges = class_log_edges
self.dt = dt
self.x_max = x_max
self.init_class_cdf = get_cdf(init_class_count)
self.cdf_matrix = np.cumsum(trans_matrix, axis=0)
def draw_from_init_calss_idx(self):
return bs.bisect(self.init_class_cdf, random.random())
def choose_next_class(self, current_class):
cdf = self.cdf_matrix[:, current_class]
return bs.bisect(cdf, random.random())
def draw_from_class_velocity(self, idx):
v_log_edges = self.class_log_edges
x = random.random()
log_v = v_log_edges[idx]*x + v_log_edges[idx+1]*(1-x)
return np.exp(log_v)
def follow_one_particle(self, particle_number):
dt = self.dt
x_array = self.x_array
t_array = self.time_array
x = 0.0
t = 0.0
out_put_idx = 1
#initialize the particle velocity
v_class_idx = self.draw_from_init_calss_idx()
class_idx = 2*v_class_idx
#v is the abs value of velocity
v = self.draw_from_class_velocity(v_class_idx)
v_sign = 1.0
idx_max = self.n_steps + 1
while out_put_idx < idx_max:
x += dt*v*v_sign
t += dt
x_array[particle_number, out_put_idx] = x
t_array[particle_number, out_put_idx] = t
out_put_idx += 1
next_idx = self.choose_next_class(class_idx)
v_class_idx = np.floor(next_idx/2)
v_sign = -1.0 + 2.0*((next_idx - 2*v_class_idx) == 0)
v = self.draw_from_class_velocity(v_class_idx)
class_idx = next_idx
def follow_all_particles(self):
for i in range(self.n_particles):
self.follow_one_particle(i)
class dispModelOrderTwo(dispersionModelGeneral):
def __init__(self, n_particles, n_steps, dx, x_max, trans_matrix,
class_log_edges, init_class_count, inj_location = "start",
verbose = True):
super(dispModelOrderTwo,self).__init__(n_particles, n_steps,
inj_location, verbose)
self.trans_matrix = trans_matrix
self.init_class_count = init_class_count
self.class_log_edges = class_log_edges
self.class_velocity = self.get_class_velocity(class_log_edges)
self.dx = dx
self.x_max = x_max
self.init_class_cdf = get_cdf(init_class_count)
self.n_class = np.sqrt(trans_matrix.shape[0])
self.blocked_particles = []
def get_class_velocity(self, class_log_edges):
v_log_edges = self.class_log_edges
n_class = len(class_log_edges) - 1
class_velocity = np.zeros(n_class)
for i in range(n_class):
log_value = 0.5*(v_log_edges[i] + v_log_edges[i+1])
class_velocity[i] = np.exp(log_value)
return class_velocity
def draw_from_class_velocity(self, idx):
v_log_edges = self.class_log_edges
x = random.random()
log_v = v_log_edges[idx]*x + v_log_edges[idx+1]*(1-x)
return np.exp(log_v)
def draw_from_init_calss_idx(self):
return bs.bisect_right(self.init_class_cdf, random.random())
def choose_next_class(self, current_class):
indptr = self.trans_matrix.indptr
start = indptr[current_class]
end = indptr[current_class+1]
rows = self.trans_matrix.indices[start:end]
values = self.trans_matrix.data[start:end]
if len(values) == 0:
return -12
cdf = get_cdf(values)
return rows[bs.bisect(cdf, random.random())]
def | |
# quality = self._select_valid_quality(track, quality)
quality = track_formats.TRACK_FORMAT_MAP[quality_key]
title = tags["title"]
ext = quality["ext"]
if not filename:
filename = title + ext
if not str(filename).endswith(ext):
filename += ext
filename = util.clean_filename(filename)
download_dir = path.normpath(download_dir)
download_path = path.join(download_dir, filename)
util.create_folders(download_dir)
print("Starting download of:", title)
res = self.session.get(url, cookies=self.get_cookies(), stream=True)
chunk_size = 2048
total_filesize = int(res.headers["Content-Length"])
current_filesize = 0
i = 0
pbar = tqdm(res.iter_content(chunk_size), total=total_filesize,
unit="B", unit_scale=True, unit_divisor=1024, leave=False, desc=title)
with open(download_path, "wb") as f:
f.seek(current_filesize)
for chunk in pbar:
chunk_len = len(chunk)
if i % 3 > 0:
f.write(chunk)
elif len(chunk) < chunk_size:
f.write(chunk)
pbar.update(chunk_len)
break
else:
cipher = Cipher(algorithms.Blowfish(blowfish_key),
modes.CBC(
bytes([i for i in range(8)])),
default_backend())
decryptor = cipher.decryptor()
dec_data = decryptor.update(
chunk) + decryptor.finalize()
f.write(dec_data)
chunk_len = len(dec_data)
i += 1
current_filesize += chunk_size
pbar.update(chunk_len)
pbar.close()
if with_metadata:
if ext.lower() == ".flac":
self._write_flac_tags(download_path, track, tags=tags)
else:
self._write_mp3_tags(download_path, track, tags=tags)
if with_lyrics:
lyrics_path = path.join(download_dir, title)
self.save_lyrics(lyric_data, lyrics_path)
print("Track downloaded to:", download_path)
def get_tracks(self, track_ids):
"""Gets the list of the tracks that corresponds with the given {track_ids}
Arguments:
track_ids {list} -- List of track id
Returns:
dict -- List of tracks
"""
data = self._api_call(api_methods.SONG_GET_LIST_DATA, params={
"SNG_IDS": track_ids
})
data = data["results"]
valid_ids = [str(song["SNG_ID"]) for song in data["data"]]
data["errors"] = []
for id in track_ids:
if not str(id) in valid_ids:
data["errors"].append(id)
return data
def get_track_lyrics(self, track_id):
"""Gets the lyrics data of the given {track_id}
Arguments:
track_id {str} -- Track Id
Returns:
dict -- Dictionary that containts the {info}, and {save} partial function.
"""
data = self._api_call(api_methods.SONG_LYRICS, params={
"SNG_ID": track_id
})
data = data["results"]
return {
"info": data,
"save": partial(self.save_lyrics, data)
}
def save_lyrics(self, lyric_data, save_path):
"""Saves the {lyric_data} into a .lrc file.
Arguments:
lyric_data {dict} -- The 'info' value returned from {get_track_lyrics()}
save_path {str} -- Full path on where the file is to be saved
Returns:
bool -- Operation success
"""
filename = path.basename(save_path)
filename = util.clean_filename(filename)
save_path = path.join(path.dirname(save_path), filename)
if not str(save_path).endswith(".lrc"):
save_path += ".lrc"
util.create_folders(path.dirname(save_path))
with open(save_path, "w", encoding="utf-8") as f:
if not "LYRICS_SYNC_JSON" in lyric_data:
return False
sync_data = lyric_data["LYRICS_SYNC_JSON"]
for line in sync_data:
if str(line["line"]):
f.write("{0}{1}".format(
line["lrc_timestamp"], line["line"]))
f.write("\n")
return True
def get_album(self, album_id):
"""Gets the album data of the given {album_id}
Arguments:
album_id {str} -- Album Id
Returns:
dict -- Album data
"""
# data = self._api_call(api_methods.ALBUM_GET_DATA, params={
# "ALB_ID": album_id,
# "LANG": "en"
# })
# return data["results"]
data = self._legacy_api_call("/album/{0}".format(album_id))
# TODO: maybe better logic?
data["cover_id"] = str(data["cover_small"]).split(
"cover/")[1].split("/")[0]
return data
def get_album_poster(self, album, size=500, ext="jpg"):
"""Gets the album poster as a binary data
Arguments:
album {dict} -- Album data
Keyword Arguments:
size {int} -- Size of the image, {size}x{size} (default: {500})
ext {str} -- Extension of the image, can be ('.jpg' or '.png') (default: {"jpg"})
Returns:
bytes -- Binary data of the image
"""
# return self._get_poster(album["ALB_PICTURE"], size=size, ext=ext)
return self._get_poster(album["cover_id"], size=size, ext=ext)
def get_album_tracks(self, album_id):
"""Gets the tracks of the given {album_id}
Arguments:
album_id {str} -- Album Id
Returns:
list -- List of tracks
"""
data = self._api_call(api_methods.ALBUM_TRACKS, params={
"ALB_ID": album_id,
"NB": -1
})
for i, track in enumerate(data["results"]["data"]):
track["_POSITION"] = i + 1
return data["results"]["data"]
def get_artist(self, artist_id):
"""Gets the artist data from the given {artist_id}
Arguments:
artist_id {str} -- Artist Id
Returns:
dict -- Artist data
"""
data = self._api_call(api_methods.PAGE_ARTIST, params={
"ART_ID": artist_id,
"LANG": "en"
})
return data["results"]
def get_artist_poster(self, artist, size=500, ext="jpg"):
"""Gets the artist poster as a binary data
Arguments:
artist {dict} -- artist data
Keyword Arguments:
size {int} -- Size of the image, {size}x{size} (default: {500})
ext {str} -- Extension of the image, can be ('.jpg' or '.png') (default: {"jpg"})
Returns:
bytes -- Binary data of the image
"""
if not "ART_PICTURE" in artist and "DATA" in artist:
artist = artist["DATA"]
return self._get_poster(artist["ART_PICTURE"], size=size, ext=ext)
def get_artist_discography(self, artist_id):
"""Gets the artist's discography (tracks)
Arguments:
artist_id {str} -- Artist Id
Returns:
dict -- Artist discography data
"""
data = self._api_call(api_methods.ARTIST_DISCOGRAPHY, params={
"ART_ID": artist_id,
"NB": 500,
"NB_SONGS": -1,
"START": 0
})
return data["results"]["data"]
def get_artist_top_tracks(self, artist_id):
"""Gets the top tracks of the given artist
Arguments:
artist_id {str} -- Artist Id
Returns:
list -- List of track
"""
data = self._api_call(api_methods.ARTIST_TOP_TRACKS, params={
"ART_ID": artist_id,
"NB": 100
})
for i, track in enumerate(data["results"]["data"]):
track["_POSITION"] = i + 1
return data["results"]["data"]
def get_playlist(self, playlist_id):
"""Gets the playlist data from the given playlist_id
Arguments:
playlist_id {str} -- Playlist Id
Returns:
dict -- Playlist data
"""
data = self._api_call(api_methods.PAGE_PLAYLIST, params={
"playlist_id": playlist_id,
"LANG": "en"
})
return data["results"]
def get_playlist_tracks(self, playlist_id):
"""Gets the tracks inside the playlist
Arguments:
playlist_id {str} -- Playlist Id
Returns:
list -- List of tracks
"""
data = self._api_call(api_methods.PLAYLIST_TRACKS, params={
"PLAYLIST_ID": playlist_id,
"NB": -1
})
for i, track in enumerate(data["results"]["data"]):
track["_POSITION"] = i + 1
return data["results"]["data"]
def get_suggested_queries(self, query):
"""Gets suggestion based on the given {query}
Arguments:
query {str} -- Query keyword
Returns:
list -- List of suggestions
"""
data = self._api_call(api_methods.GET_SUGGESTED_QUERIES, params={
"QUERY": query
})
results = data["results"]["SUGGESTION"]
for result in results:
if "HIGHLIGHT" in result:
del result["HIGHLIGHT"]
return results
def search_tracks(self, query, limit=30, index=0):
"""Searches tracks on a given query
Arguments:
query {str} -- Query keyword
Keyword Arguments:
limit {int} -- Number of results (default: {30})
index {int} -- Offset (default: {0})
Returns:
list -- List of tracks
"""
return self._legacy_search(api_methods.SEARCH_TRACK, query, limit=limit, index=index)
def search_albums(self, query, limit=30, index=0):
"""Searches albums on a given query
Arguments:
query {str} -- Query keyword
Keyword Arguments:
limit {int} -- Number of results (default: {30})
index {int} -- Offset (default: {0})
Returns:
list -- List of albums
"""
return self._legacy_search(api_methods.SEARCH_ALBUM, query, limit=limit, index=index)
def search_artists(self, query, limit=30, index=0):
"""Searches artists on a given query
Arguments:
query {str} -- Query keyword
Keyword Arguments:
limit {int} -- Number of tracks (default: {30})
index {int} -- Offset (default: {0})
Returns:
list -- List of artists
"""
return self._legacy_search(api_methods.SEARCH_ARTIST, query, limit=limit, index=index)
def search_playlists(self, query, limit=30, index=0):
"""Searches playlists on a given query
Arguments:
query {str} -- Query keyword
Keyword Arguments:
limit {int} -- Number of tracks (default: {30})
index {int} -- Offset (default: {0})
Returns:
list -- List of playlists
"""
return self._legacy_search(api_methods.SEARCH_PLAYLIST, query, limit=limit, index=index)
def _legacy_search(self, method, query, limit=30, index=0):
query = util.clean_query(query)
data = self._legacy_api_call(method, {
"q": query,
"limit": limit,
"index": index
})
return data["data"]
def _get_poster(self, poster_id, size=500, ext="jpg"):
ext = ext.lower()
if ext != "jpg" and ext != "png":
raise ValueError("Image extension should only be jpg or png!")
url = f'https://e-cdns-images.dzcdn.net/images/cover/{poster_id}/{size}x{size}.{ext}'
return {
"image": self.session.get(url, params=networking_settings.HTTP_HEADERS, cookies=self.get_cookies()).content,
"size": (size, size),
"ext": ext,
"mime_type": "image/jpeg" if ext == "jpg" else "image/png"
}
def _write_mp3_tags(self, path, track, tags=None):
if "DATA" in track:
track = track["DATA"]
if not tags:
tags = self.get_track_tags(track)
audio = MP3(path, ID3=EasyID3)
audio.delete()
EasyID3.RegisterTextKey("label", "TPUB")
cover = tags["_albumart"]
del tags["_albumart"]
for key, val in tags.items():
if val:
audio[key] = str(val)
audio.save()
if cover:
cover_handle = ID3(path)
cover_handle["APIC"] = APIC(
type=3,
mime=cover["mime_type"],
data=cover["image"]
)
cover_handle.save(path)
return True
def _write_flac_tags(self, path, track, tags=None):
if "DATA" in track:
track = track["DATA"]
if not tags:
tags = self.get_track_tags(track)
audio = File(path)
audio.delete()
cover = tags["_albumart"]
del tags["_albumart"]
if cover:
pic = mutagen.flac.Picture()
pic.data = cover["image"]
audio.clear_pictures()
audio.add_picture(pic)
for key, val in tags.items():
if val:
audio[key] = str(val)
audio.save()
return True
def _select_valid_quality(self, track, quality):
# If the track does not support the desired quality or if the given quality is not in the TRACK_FORMAT_MAP,
# Use the default quality
valid_qualities = self.get_track_valid_quality(track)
if not quality or not quality in valid_qualities:
default_size = int(track["FILESIZE"])
for key in track_formats.TRACK_FORMAT_MAP.keys():
if f"FILESIZE_{key}" in track and int(track[f"FILESIZE_{key}"]) == default_size:
quality = track_formats.TRACK_FORMAT_MAP[key]
break
else:
quality = track_formats.TRACK_FORMAT_MAP[quality]
return quality
def _api_call(self, method, params={}):
token = "null"
if method != api_methods.GET_USER_DATA:
token = self.token
res = self.session.post(api_urls.API_URL, json=params, params={
"api_version": "1.0",
"api_token": token,
"input": "3",
"method": method
}, cookies=self.get_cookies())
data = res.json()
if "error" in data and data["error"]:
error_type = list(data["error"].keys())[0]
error_message = data["error"][error_type]
| |
<reponame>Derekt2/PyProofpoint
import requests
class ProofpointAPIError(BaseException):
pass
class ProofPoint(object):
'''ProofPoint Threat Insights API Class.
'''
def __init__(self, servicePrincipal, APISecret):
self.auth = (servicePrincipal, APISecret)
self.base_url = "https://tap-api-v2.proofpoint.com"
def get_campaign_ids(self, interval, size=100, page=1):
'''Fetch a list of IDs of campaigns active in a time window sorted by the last updated timestamp.
:param interval:A string containing an ISO8601-formatted interval. The minimum interval is 30 seconds. The maximum interval is 1 day. E.G: 2020-05-01T12:00:00Z/2020-05-01T13:00:00Z, or PT30M/2020-05-01T12:30:00Z
:param size:The maximum number of campaign IDs to produce in the response. Defaults to 100 and the max supported value is 200.
:param page: The page of results to return, in multiples of the specified size (or 100, if no size is explicitly chosen). Defaults to 1.
'''
uri = "/v2/campaign/ids"
params = {"interval": interval,
'page': page,
'size': size}
return self.send_request(uri, params=params)
def get_vap(self, window, size=1000, page=1):
'''Fetch the identities and attack index breakdown of Very Attacked People within your organization for a given period.
:param window:An integer indicating how many days the data should be retrieved for. Accepted values are 14, 30 and 90.
:param size:The maximum number of VAPs to produce in the response. The attackIndex value determine the order of results. Defaults to 1000.
:param page: The page of results to return, in multiples of the specified size (or 1000, if no size is explicitly chosen). Defaults to 1.
'''
uri = "/v2/people/vap"
params = {"window": window,
'page': page,
'size': size}
return self.send_request(uri, params=params)
def get_top_clickers(self, window, size=100, page=1):
'''Fetch the identities and attack index of the top clickers within your organization for a given period. Top clickers are the users who have demonstrated a tendency to click on malicious URLs, regardless of whether the clicks were blocked or not.
:param window:An integer indicating how many days the data should be retrieved for. Accepted values are 14, 30 and 90.
:param size:The maximum number of top clickers to produce in the response. The attackIndex value determine the order of results. Defaults to 100 and the max supported value is 200.
:param page: The page of results to return, in multiples of the specified size (or 100, if no size is explicitly chosen). Defaults to 1.
'''
uri = "/v2/people/top-clickers"
params = {"window": window,
'page': page,
'size': size}
return self.send_request(uri, params=params)
def get_campaign(self, cid):
'''Fetch detailed information for a given campaign.
:param cid:A string representing a campaignID
'''
uri = f"/v2/campaign/{cid}"
return self.send_request(uri)
def get_forensic(self, threatId=None, campaignId=None, includeCampaignForensics=False):
'''Fetch forensic information for a given threat or campaign.
:param threatId:A string containing a threat identifier.
:param campaignId:A string containing a campaignId.
:param includeCampaignForensics:A boolean value, defaulting to false. May optionally be used with the threatId parameter. It cannot be used with the campaignId parameter.
'''
uri = f"/v2/forensics"
if not (threatId or campaignId):
raise ValueError("Must provide threatID or CampaignID")
params = {
'threatId': threatId,
'campaignId': campaignId,
'includeCampaignForensics': includeCampaignForensics
}
return self.send_request(uri, params=params)
def get_clicks_blocked(self, interval=None, sinceSeconds=None, sinceTime=None, dataformat='syslog', threatType=None, threatStatus=None):
'''Fetch events for clicks to malicious URLs blocked in the specified time period
:param interval:A string containing an ISO8601-formatted interval. If this interval overlaps with previous requests for data, records from the previous request may be duplicated. The minimum interval is thirty seconds. The maximum interval is one hour.
:param sinceSeconds:An integer representing a time window in seconds from the current API server time. The start of the window is the current API server time, rounded to the nearest minute, less the number of seconds provided. The end of the window is the current API server time rounded to the nearest minute. If JSON output is selected, the end time is included in the returned result.
:param sinceTime:A string containing an ISO8601 date. It represents the start of the data retrieval period. The end of the period is determined by current API server time rounded to the nearest minute. If JSON output is selected, the end time is included in the returned result.
:param dataformat:A string specifying the format in which data is returned. If no format is specified, syslog will be used as the default. The following values are accepted: JSON, syslog
:param threatType:A string specifying which threat type will be returned in the data. If no value is specified, all threat types are returned. The following values are accepted: url, attachment, messageText
:param threatStatus:A string specifying which threat statuses will be returned in the data. If no value is specified, active and cleared threats are returned. The following values are accepted: active, cleared, falsePositive
'''
uri = f"/v2/siem/clicks/blocked"
if not (interval or sinceSeconds or sinceTime):
raise ValueError("Must provide sinceTime or sinceSeconds or interval")
params = {
'interval': interval,
'sinceSeconds': sinceSeconds,
'sinceTime': sinceTime,
'format': dataformat,
'threatType': threatType,
'threatStatus': threatStatus
}
return self.send_request(uri, params=params)
def get_clicks_permitted(self, interval=None, sinceSeconds=None, sinceTime=None, dataformat='syslog', threatType=None, threatStatus=None):
'''Fetch events for clicks to malicious URLs permitted in the specified time period
:param interval:A string containing an ISO8601-formatted interval. If this interval overlaps with previous requests for data, records from the previous request may be duplicated. The minimum interval is thirty seconds. The maximum interval is one hour.
:param sinceSeconds:An integer representing a time window in seconds from the current API server time. The start of the window is the current API server time, rounded to the nearest minute, less the number of seconds provided. The end of the window is the current API server time rounded to the nearest minute. If JSON output is selected, the end time is included in the returned result.
:param sinceTime:A string containing an ISO8601 date. It represents the start of the data retrieval period. The end of the period is determined by current API server time rounded to the nearest minute. If JSON output is selected, the end time is included in the returned result.
:param dataformat:A string specifying the format in which data is returned. If no format is specified, syslog will be used as the default. The following values are accepted: JSON, syslog
:param threatType:A string specifying which threat type will be returned in the data. If no value is specified, all threat types are returned. The following values are accepted: url, attachment, messageText
:param threatStatus:A string specifying which threat statuses will be returned in the data. If no value is specified, active and cleared threats are returned. The following values are accepted: active, cleared, falsePositive
'''
uri = f"/v2/siem/clicks/permitted"
if not (interval or sinceSeconds or sinceTime):
raise ValueError("Must provide sinceTime or sinceSeconds or interval")
params = {
'interval': interval,
'sinceSeconds': sinceSeconds,
'sinceTime': sinceTime,
'format': dataformat,
'threatType': threatType,
'threatStatus': threatStatus
}
return self.send_request(uri, params=params)
def get_messages_blocked(self, interval=None, sinceSeconds=None, sinceTime=None, dataformat='syslog', threatType=None, threatStatus=None):
'''Fetch events for messages blocked in the specified time period which contained a known threat
:param interval:A string containing an ISO8601-formatted interval. If this interval overlaps with previous requests for data, records from the previous request may be duplicated. The minimum interval is thirty seconds. The maximum interval is one hour.
:param sinceSeconds:An integer representing a time window in seconds from the current API server time. The start of the window is the current API server time, rounded to the nearest minute, less the number of seconds provided. The end of the window is the current API server time rounded to the nearest minute. If JSON output is selected, the end time is included in the returned result.
:param sinceTime:A string containing an ISO8601 date. It represents the start of the data retrieval period. The end of the period is determined by current API server time rounded to the nearest minute. If JSON output is selected, the end time is included in the returned result.
:param | |
isinstance(ret, str):
if not self._error_reply(msg=retmsg, err_code=EINVALID_COMMAND, txt=ret):
user_message_routing.direct_send_to_user(socket, retmsg)
else:
retmsg.update(ret)
umr.send_message_to_user(retmsg)
elif cmd == MsgType.RESPONSE_SIGNATURE:
if not self._param_check([KeyType.domain_id, KeyType.destination_user_id, KeyType.source_user_id], dat):
self.logger.debug("RESPONSE_SIGNATURE: bad format")
return False, None
retmsg = _make_message_structure(domain_id, MsgType.RESPONSE_GATHER_SIGNATURE,
dat[KeyType.destination_user_id], dat[KeyType.query_id])
if KeyType.signature in dat:
if KeyType.transaction_data_format in dat:
# -- for backward compatibility
retmsg[KeyType.transaction_data_format] = dat[KeyType.transaction_data_format]
retmsg[KeyType.signature] = dat[KeyType.signature]
retmsg[KeyType.ref_index] = dat[KeyType.ref_index]
elif KeyType.status not in dat:
retmsg[KeyType.status] = EOTHER
retmsg[KeyType.reason] = dat[KeyType.reason]
elif dat[KeyType.status] < ESUCCESS:
retmsg[KeyType.status] = dat[KeyType.status]
retmsg[KeyType.reason] = dat[KeyType.reason]
retmsg[KeyType.source_user_id] = dat[KeyType.source_user_id]
umr.send_message_to_user(retmsg)
elif cmd == MsgType.MESSAGE:
if not self._param_check([KeyType.domain_id, KeyType.source_user_id, KeyType.destination_user_id], dat):
self.logger.debug("MESSAGE: bad format")
return False, None
if KeyType.is_anycast in dat:
dat[KeyType.anycast_ttl] = DEFAULT_ANYCAST_TTL
umr.send_message_to_user(dat)
elif cmd == MsgType.REQUEST_CROSS_REF_VERIFY:
if not self._param_check([KeyType.domain_id, KeyType.source_user_id, KeyType.transaction_id], dat):
self.logger.debug("REQUEST_CROSS_REF_VERIFY: bad format")
return False, None
dat[KeyType.command] = domain0_manager.Domain0Manager.REQUEST_VERIFY
self.networking.send_message_to_a_domain0_manager(domain_id, dat)
elif cmd == MsgType.REQUEST_CROSS_REF_LIST:
if not self._param_check([KeyType.domain_id, KeyType.source_user_id], dat):
self.logger.debug("REQUEST_CROSS_REF_LIST: bad format")
return False, None
retmsg = _make_message_structure(domain_id, MsgType.RESPONSE_CROSS_REF_LIST,
dat[KeyType.source_user_id], dat[KeyType.query_id])
domain_list = self.networking.domains[domain_id]['data'].search_domain_having_cross_ref()
# domain_list = list of ["id", "transaction_id", "outer_domain_id", "txid_having_cross_ref"]
retmsg[KeyType.transaction_id_list] = [row[1] for row in domain_list]
umr.send_message_to_user(retmsg)
elif cmd == MsgType.REQUEST_REGISTER_HASH_IN_SUBSYS:
if not self._param_check([KeyType.transaction_id], dat):
self.logger.debug("REQUEST_REGISTER_HASH_IN_SUBSYS: bad format")
return False, None
retmsg = _make_message_structure(domain_id, MsgType.RESPONSE_REGISTER_HASH_IN_SUBSYS,
dat[KeyType.source_user_id], dat[KeyType.query_id])
if domain_id in self.ledger_subsystems:
transaction_id = dat[KeyType.transaction_id]
self.ledger_subsystems[domain_id].register_transaction(transaction_id=transaction_id)
umr.send_message_to_user(retmsg)
else:
self._error_reply(msg=retmsg, err_code=ENOSUBSYSTEM, txt="Ledger_subsystem is not activated")
elif cmd == MsgType.REQUEST_VERIFY_HASH_IN_SUBSYS:
if not self._param_check([KeyType.transaction_id], dat):
self.logger.debug("REQUEST_REGISTER_HASH_IN_SUBSYS: bad format")
return False, None
retmsg = _make_message_structure(domain_id, MsgType.RESPONSE_VERIFY_HASH_IN_SUBSYS,
dat[KeyType.source_user_id], dat[KeyType.query_id])
if domain_id in self.ledger_subsystems:
transaction_id = dat[KeyType.transaction_id]
result = self.ledger_subsystems[domain_id].verify_transaction(transaction_id=transaction_id)
retmsg[KeyType.merkle_tree] = result
umr.send_message_to_user(retmsg)
else:
self._error_reply(msg=retmsg, err_code=ENOSUBSYSTEM, txt="Ledger_subsystem is not activated")
elif cmd == MsgType.REGISTER:
if domain_id is None:
return False, None
if not self._param_check([KeyType.domain_id, KeyType.source_user_id], dat):
self.logger.debug("REGISTER: bad format")
return False, None
user_id = dat[KeyType.source_user_id]
self.logger.debug("[%s] register_user: %s" % (binascii.b2a_hex(domain_id[:2]),
binascii.b2a_hex(user_id[:4])))
umr.register_user(user_id, socket, on_multiple_nodes=dat.get(KeyType.on_multinodes, False))
return False, (domain_id, user_id)
elif cmd == MsgType.UNREGISTER:
if umr is not None:
umr.unregister_user(dat[KeyType.source_user_id], socket)
return True, None
elif cmd == MsgType.REQUEST_INSERT_NOTIFICATION:
self._register_to_notification_list(domain_id, dat[KeyType.asset_group_id], dat[KeyType.source_user_id])
elif cmd == MsgType.CANCEL_INSERT_NOTIFICATION:
self.remove_from_notification_list(domain_id, dat[KeyType.asset_group_id], dat[KeyType.source_user_id])
elif cmd == MsgType.REQUEST_GET_STATS:
retmsg = _make_message_structure(domain_id, MsgType.RESPONSE_GET_STATS,
dat[KeyType.source_user_id], dat[KeyType.query_id])
retmsg[KeyType.stats] = copy.deepcopy(self.stats.get_stats())
user_message_routing.direct_send_to_user(socket, retmsg)
elif cmd == MsgType.NOTIFY_DOMAIN_KEY_UPDATE:
if domain_id is not None:
self.networking.get_domain_keypair(domain_id)
elif cmd == MsgType.REQUEST_REPAIR:
if KeyType.transaction_id in dat:
dat[KeyType.command] = repair_manager.RepairManager.REQUEST_REPAIR_TRANSACTION
self.networking.domains[domain_id]['repair'].put_message(dat)
elif KeyType.asset_group_id in dat and KeyType.asset_id in dat:
dat[KeyType.command] = repair_manager.RepairManager.REQUEST_REPAIR_ASSET_FILE
self.networking.domains[domain_id]['repair'].put_message(dat)
else:
self.logger.debug("REQUEST_REPAIR: bad format")
return False, None
elif cmd == MsgType.REQUEST_GET_NEIGHBORLIST:
retmsg = _make_message_structure(domain_id, MsgType.RESPONSE_GET_NEIGHBORLIST,
dat[KeyType.source_user_id], dat[KeyType.query_id])
if domain_id in self.networking.domains:
retmsg[KeyType.domain_id] = domain_id
retmsg[KeyType.neighbor_list] = self.networking.domains[domain_id]['topology'].make_neighbor_list()
else:
retmsg[KeyType.status] = False
retmsg[KeyType.reason] = "No such domain"
user_message_routing.direct_send_to_user(socket, retmsg)
elif cmd == MsgType.REQUEST_GET_CONFIG:
retmsg = _make_message_structure(domain_id, MsgType.RESPONSE_GET_CONFIG,
dat[KeyType.source_user_id], dat[KeyType.query_id])
jsondat = self.config.get_json_config()
retmsg[KeyType.bbc_configuration] = jsondat
user_message_routing.direct_send_to_user(socket, retmsg)
elif cmd == MsgType.REQUEST_GET_DOMAINLIST:
retmsg = _make_message_structure(domain_id, MsgType.RESPONSE_GET_DOMAINLIST,
dat[KeyType.source_user_id], dat[KeyType.query_id])
data = bytearray()
data.extend(to_2byte(len(self.networking.domains)))
for domain_id in self.networking.domains:
data.extend(domain_id)
retmsg[KeyType.domain_list] = bytes(data)
user_message_routing.direct_send_to_user(socket, retmsg)
elif cmd == MsgType.REQUEST_GET_FORWARDING_LIST:
retmsg = _make_message_structure(domain_id, MsgType.RESPONSE_GET_FORWARDING_LIST,
dat[KeyType.source_user_id], dat[KeyType.query_id])
data = bytearray()
data.extend(to_2byte(len(umr.forwarding_entries)))
for user_id in umr.forwarding_entries:
data.extend(user_id)
data.extend(to_2byte(len(umr.forwarding_entries[user_id]['nodes'])))
for node_id in umr.forwarding_entries[user_id]['nodes']:
data.extend(node_id)
retmsg[KeyType.forwarding_list] = bytes(data)
user_message_routing.direct_send_to_user(socket, retmsg)
elif cmd == MsgType.REQUEST_GET_USERS:
retmsg = _make_message_structure(domain_id, MsgType.RESPONSE_GET_USERS,
dat[KeyType.source_user_id], dat[KeyType.query_id])
data = bytearray()
data.extend(to_2byte(len(umr.registered_users)))
for user_id in umr.registered_users.keys():
data.extend(user_id)
retmsg[KeyType.user_list] = bytes(data)
user_message_routing.direct_send_to_user(socket, retmsg)
elif cmd == MsgType.REQUEST_GET_NODEID:
retmsg = _make_message_structure(domain_id, MsgType.RESPONSE_GET_NODEID,
dat[KeyType.source_user_id], dat[KeyType.query_id])
data = bytearray()
data.extend(self.networking.domains[domain_id]['topology'].my_node_id)
retmsg[KeyType.node_id] = bytes(data)
user_message_routing.direct_send_to_user(socket, retmsg)
elif cmd == MsgType.REQUEST_GET_NOTIFICATION_LIST:
retmsg = _make_message_structure(domain_id, MsgType.RESPONSE_GET_NOTIFICATION_LIST,
dat[KeyType.source_user_id], dat[KeyType.query_id])
data = bytearray()
data.extend(to_2byte(len(self.insert_notification_user_list[domain_id])))
for asset_group_id in self.insert_notification_user_list[domain_id].keys():
data.extend(asset_group_id)
data.extend(to_2byte(len(self.insert_notification_user_list[domain_id][asset_group_id])))
for user_id in self.insert_notification_user_list[domain_id][asset_group_id]:
data.extend(user_id)
retmsg[KeyType.notification_list] = bytes(data)
user_message_routing.direct_send_to_user(socket, retmsg)
elif cmd == MsgType.REQUEST_SETUP_DOMAIN:
if not self._param_check([KeyType.domain_id], dat):
self.logger.debug("REQUEST_SETUP_DOMAIN: bad format")
return False, None
conf = None
if KeyType.bbc_configuration in dat:
conf = json.loads(dat[KeyType.bbc_configuration])
retmsg = _make_message_structure(None, MsgType.RESPONSE_SETUP_DOMAIN,
dat[KeyType.source_user_id], dat[KeyType.query_id])
retmsg[KeyType.result] = self.networking.create_domain(domain_id=domain_id, config=conf)
if not retmsg[KeyType.result]:
retmsg[KeyType.reason] = "Already exists"
retmsg[KeyType.domain_id] = domain_id
self.config.update_config()
user_message_routing.direct_send_to_user(socket, retmsg)
elif cmd == MsgType.REQUEST_CLOSE_DOMAIN:
retmsg = _make_message_structure(None, MsgType.RESPONSE_CLOSE_DOMAIN,
dat[KeyType.source_user_id], dat[KeyType.query_id])
retmsg[KeyType.result] = self.networking.remove_domain(domain_id)
if not retmsg[KeyType.result]:
retmsg[KeyType.reason] = "No such domain"
user_message_routing.direct_send_to_user(socket, retmsg)
elif cmd == MsgType.REQUEST_ECDH_KEY_EXCHANGE:
retmsg = _make_message_structure(None, MsgType.RESPONSE_ECDH_KEY_EXCHANGE,
dat[KeyType.source_user_id], dat[KeyType.query_id])
privatekey_for_ecdh, peer_pub_key_to_send, my_keyname = message_key_types.get_ECDH_parameters()
if privatekey_for_ecdh is None:
return False, None
nonce = dat[KeyType.nonce]
rand = dat[KeyType.random]
shared_key = message_key_types.derive_shared_key(privatekey_for_ecdh, dat[KeyType.ecdh], rand)
retmsg[KeyType.ecdh] = peer_pub_key_to_send
retmsg[KeyType.nonce] = nonce
retmsg[KeyType.random] = rand
retmsg[KeyType.hint] = my_keyname
user_message_routing.direct_send_to_user(socket, retmsg)
message_key_types.set_cipher(shared_key, nonce, my_keyname, dat[KeyType.hint])
umr.set_aes_name(socket, my_keyname)
elif cmd == MsgType.DOMAIN_PING:
if not self._param_check([KeyType.domain_id, KeyType.source_user_id, KeyType.port_number], dat):
return False, None
ipv4 = dat.get(KeyType.ipv4_address, None)
ipv6 = dat.get(KeyType.ipv6_address, None)
if ipv4 is None and ipv6 is None:
return False, None
port = dat[KeyType.port_number]
self.networking.send_domain_ping(domain_id, ipv4, ipv6, port)
elif cmd == MsgType.REQUEST_SET_STATIC_NODE:
retmsg = _make_message_structure(domain_id, MsgType.RESPONSE_SET_STATIC_NODE,
dat[KeyType.source_user_id], dat[KeyType.query_id])
retmsg[KeyType.domain_id] = domain_id
node_info = dat.get(KeyType.node_info, None)
if node_info is None:
retmsg[KeyType.result] = False
else:
self.networking.add_neighbor(domain_id, *node_info, is_static=True)
self.config.update_config()
retmsg[KeyType.result] = True
user_message_routing.direct_send_to_user(socket, retmsg)
elif cmd == MsgType.REQUEST_MANIP_LEDGER_SUBSYS:
retmsg = _make_message_structure(domain_id, MsgType.RESPONSE_MANIP_LEDGER_SUBSYS,
dat[KeyType.source_user_id], dat[KeyType.query_id])
if self.ledger_subsystems[domain_id] is not None:
if dat[KeyType.ledger_subsys_manip]:
self.ledger_subsystems[domain_id].enable()
else:
self.ledger_subsystems[domain_id].disable()
user_message_routing.direct_send_to_user(socket, retmsg)
else:
self._error_reply(msg=retmsg, err_code=ENOSUBSYSTEM, txt="Ledger_subsystem is not installed")
else:
self.logger.error("Bad command/response: %s" % cmd)
return False, None
def _register_to_notification_list(self, domain_id, asset_group_id, user_id):
"""Register user_id in insert completion notification list
Args:
domain_id (bytes): target domain_id
asset_group_id (bytes): target asset_group_id of which you want to get notification about the insertion
user_id (bytes): user_id that registers in the list
"""
self.insert_notification_user_list.setdefault(domain_id, dict())
self.insert_notification_user_list[domain_id].setdefault(asset_group_id, set())
self.insert_notification_user_list[domain_id][asset_group_id].add(user_id)
umr = self.networking.domains[domain_id]['user']
umr.send_multicast_join(asset_group_id, permanent=True)
def remove_from_notification_list(self, domain_id, asset_group_id, user_id):
"""Remove entry from insert completion notification list
This method checks validation only.
Args:
domain_id (bytes): target domain_id
asset_group_id (bytes): target asset_group_id of which you want to get notification about the insertion
user_id (bytes): user_id that registers in the list
"""
if domain_id not in self.insert_notification_user_list:
return
if asset_group_id is not None:
if asset_group_id in self.insert_notification_user_list[domain_id]:
self._remove_notification_entry(domain_id, asset_group_id, user_id)
else:
for asset_group_id in list(self.insert_notification_user_list[domain_id]):
self._remove_notification_entry(domain_id, asset_group_id, user_id)
def _remove_notification_entry(self, domain_id, asset_group_id, user_id):
"""Remove entry from insert completion notification list
Args:
domain_id (bytes): target domain_id
asset_group_id (bytes): target asset_group_id of which you want to get notification about the insertion
user_id (bytes): user_id that registers in the list
"""
self.insert_notification_user_list[domain_id][asset_group_id].remove(user_id)
if len(self.insert_notification_user_list[domain_id][asset_group_id]) == 0:
self.insert_notification_user_list[domain_id].pop(asset_group_id, None)
umr = self.networking.domains[domain_id]['user']
umr.send_multicast_leave(asset_group_id)
if len(self.insert_notification_user_list[domain_id]) == 0:
self.insert_notification_user_list.pop(domain_id, None)
def validate_transaction(self, txdata, asset_files=None):
"""Validate transaction by verifying signature
Args:
txdata (bytes): serialized transaction data
asset_files (dict): dictionary of {asset_id: content} for the transaction
Returns:
BBcTransaction: if validation fails, None returns.
int (short): 2-byte value of BBcFormat type or None
"""
txobj, fmt_type = bbclib.deserialize(txdata)
if not txobj:
self.stats.update_stats_increment("transaction", "invalid", 1)
self.logger.error("Fail to deserialize transaction data")
return None, None
txobj.digest()
txobj_is_valid, valid_assets, invalid_assets = bbclib.validate_transaction_object(txobj, asset_files)
if not txobj_is_valid:
self.stats.update_stats_increment("transaction", "invalid", 1)
if len(invalid_assets) > 0:
self.stats.update_stats_increment("asset_file", "invalid", 1)
if txobj_is_valid and len(invalid_assets) == 0:
return txobj, fmt_type
else:
return None, None
def insert_transaction(self, domain_id, txdata, asset_files):
"""Insert transaction into ledger
Args:
domain_id (bytes): target domain_id
txdata (bytes): serialized transaction data
asset_files (dict): dictionary of {asset_id: content} for the transaction
Returns:
dict|str: inserted transaction_id or error message
"""
self.stats.update_stats_increment("transaction", "insert_count", 1)
if domain_id is None:
self.stats.update_stats_increment("transaction", "insert_fail_count", 1)
self.logger.error("No such domain")
return "Set up the domain, first!"
if domain_id == bbclib.domain_global_0:
self.stats.update_stats_increment("transaction", "insert_fail_count", 1)
self.logger.error("Insert is not allowed in domain_global_0")
return "Insert is not allowed in domain_global_0"
txobj, fmt_type = self.validate_transaction(txdata, asset_files)
if txobj is None:
self.stats.update_stats_increment("transaction", "insert_fail_count", 1)
self.logger.error("Bad transaction format")
return "Bad transaction format"
self.logger.debug("[node:%s] insert_transaction %s" %
(self.networking.domains[domain_id]['name'], binascii.b2a_hex(txobj.transaction_id[:4])))
asset_group_ids = self.networking.domains[domain_id]['data'].insert_transaction(txdata, txobj=txobj, fmt_type=fmt_type,
asset_files=asset_files)
if asset_group_ids is None:
self.stats.update_stats_increment("transaction", "insert_fail_count", 1)
self.logger.error("[%s] Fail to insert a transaction into the ledger" % self.networking.domains[domain_id]['name'])
return "Failed to insert a transaction into the ledger"
self.send_inserted_notification(domain_id, asset_group_ids, txobj.transaction_id)
return {KeyType.transaction_id: txobj.transaction_id}
def send_inserted_notification(self, domain_id, asset_group_ids, transaction_id, only_registered_user=False):
"""Broadcast NOTIFY_INSERTED
Args:
domain_id (bytes): target domain_id
asset_group_ids (list): list of asset_group_ids
transaction_id (bytes): transaction_id that has just inserted
only_registered_user (bool): If True, notification is not sent to other nodes
"""
umr = self.networking.domains[domain_id]['user']
destination_users = set()
destination_nodes = set()
for asset_group_id in asset_group_ids:
if domain_id in self.insert_notification_user_list:
if asset_group_id in self.insert_notification_user_list[domain_id]:
for user_id in self.insert_notification_user_list[domain_id][asset_group_id]:
destination_users.add(user_id)
if not only_registered_user:
if asset_group_id in umr.forwarding_entries:
for node_id in umr.forwarding_entries[asset_group_id]['nodes']:
destination_nodes.add(node_id)
if len(destination_users) == 0 and len(destination_nodes) == 0:
return
msg = {
KeyType.domain_id: domain_id,
KeyType.infra_command: data_handler.DataHandler.NOTIFY_INSERTED,
KeyType.command: MsgType.NOTIFY_INSERTED,
KeyType.transaction_id: | |
'americat',
'amethyst',
'amidships',
'amidst',
'amigas',
'amigo',
'amigos',
'amine',
'amir',
'amiss',
'amnesia',
'among',
'amongst',
'amore',
'amos',
'amount',
'amounted',
'amounter',
'amounters',
'amounting',
'amounts',
'amp',
'amphitrite',
'ample',
'amputated',
'amry',
'ams',
'amt',
'amts',
'amuck',
'amulets',
'amuse',
'amused',
'amusement',
"amusement's",
'amusements',
'amuses',
'amusing',
'amy',
'an',
"an'",
'anachronistic',
'analog',
'analytical',
'analyze',
'analyzing',
'anarchy',
'anatomy',
'ancestors',
'anchor',
'anchorage',
'anchored',
'anchoring',
'anchors',
'anchovies',
'ancient',
'anciently',
'ancients',
'and',
'andaba',
'andago',
'andaire',
'andama',
'anddd',
'andi',
'andila',
'andira',
'andoso',
'andrea',
"andrea's",
'andrew',
'andrina',
"andrina's",
'andro',
'andromeda',
'andros',
'andumal',
'andy',
'anegola',
'anegoso',
'anemic',
'anemone',
'anemones',
'anent',
'anew',
'angaba',
'angama',
'angassa',
'ange',
'angel',
"angel's",
'angelfish',
'angelfood',
'angelica',
'angels',
'anger',
'angered',
'angering',
'angers',
'angle',
'angled',
'angler',
"angler's",
'anglers',
'angles',
'angling',
'angrier',
'angries',
'angriest',
'angrily',
'angry',
'angst',
'angus',
'animal',
"animal's",
'animal-talent',
'animal-talents',
'animally',
'animals',
'animate',
'animated',
'animates',
'animatings',
'animation',
'animations',
'animator',
"animator's",
'animators',
'anime',
'anita',
'ankle',
'anklet',
'anklets',
'ankoku',
'ann',
"ann's",
'anna',
"anna's",
'anne',
"anne's",
'anneliese',
"anneliese's",
'annie',
"annie's",
'annihilate',
'annihilated',
'annihilation',
'anniversary',
'annos',
'annotate',
'announce',
'announced',
'announcement',
'announcements',
'announcer',
"announcer's",
'announcers',
'announces',
'announcing',
'annoy',
'annoyance',
"annoyance's",
'annoyances',
'annoyed',
'annoyer',
'annoyers',
'annoying',
'annoyingly',
'annoys',
'annual',
'annually',
'annuals',
'annul',
'anomaly',
'anon',
'anonymity',
'another',
"another's",
'anselmo',
'answer',
'answered',
'answerer',
'answerers',
'answering',
'answers',
'ant',
"ant's",
'antacid',
'antagonist',
'antagonize',
'antagonized',
'antagonizing',
'antama',
'antarctic',
'antassa',
'ante',
'antelope',
"antelope's",
'antelopes',
'antenna',
'antes',
'anthem',
'anther',
'anthers',
'anthill',
"anthill's",
'anthills',
'anthony',
'anthropology',
'anti',
'anti-cog',
'anti-gravity',
'antiano',
'antibacterial',
'antibiotic',
'antibiotics',
'antibodies',
'antic',
'anticipate',
'anticipated',
'anticipating',
'anticipation',
'anticipatively',
'anticlimactic',
'antics',
'antidisestablishmentarianism',
'antigue',
'antik',
'antima',
'antios',
'antique',
'antiques',
'antiros',
'antis',
'antisocial',
'antivirus',
'anton',
"anton's",
'ants',
'antsy',
'antumal',
'anuberos',
'anubi',
'anubos',
'anvil',
'anvils',
'anxieties',
'anxiety',
'anxious',
'anxiously',
'any',
'anybodies',
'anybody',
"anybody's",
'anyhow',
'anymore',
'anyone',
"anyone's",
'anyones',
'anyplace',
'anything',
"anything's",
'anythings',
'anytime',
'anywas',
'anyway',
'anyways',
'anywhere',
'anywheres',
'aoba',
'aobasar',
'aoboshi',
'aoi',
'aoogah',
'aoogahs',
'aoteoroa',
'apart',
'apartment',
'apartments',
'apathetic',
'apathy',
'ape',
"ape's",
'apes',
'apex',
'aphoristic',
'apiece',
'aplenty',
'apocalyptyca',
'apodous',
'apologies',
'apologize',
'apologized',
'apologizes',
'apologizing',
'apology',
"apology's",
'apostles',
'apostrophe',
"apostrophe's",
'apostrophes',
'app',
'appalled',
'apparel',
'apparent',
'apparently',
'appeal',
'appealed',
'appealer',
'appealers',
'appealing',
'appeals',
'appear',
'appearance',
'appearances',
'appeared',
'appearer',
'appearers',
'appearing',
'appears',
'appease',
'appeasing',
'append',
'appendices',
'appendix',
'appetite',
'appetites',
'appetizer',
'appetizers',
'appetizing',
'applaud',
'applauded',
'applauder',
'applauding',
'applauds',
'applause',
'apple',
'apples',
'applesauce',
'appliances',
'applicable',
'applicants',
'application',
"application's",
'applications',
'applied',
'applier',
'appliers',
'applies',
'apply',
'applying',
'appoint',
'appointed',
'appointer',
'appointers',
'appointing',
'appointive',
'appointment',
'appointments',
'appoints',
'appose',
'apposed',
'appreciate',
'appreciated',
'appreciates',
'appreciation',
'appreciative',
'apprehension',
'apprehensive',
'apprentice',
'approach',
'approached',
'approacher',
'approachers',
'approaches',
'approaching',
'appropriate',
'appropriated',
'appropriately',
'appropriates',
'appropriatest',
'appropriating',
'appropriation',
'appropriations',
'appropriative',
'approval',
'approve',
'approved',
'approver',
"approver's",
'approvers',
'approves',
'approving',
'approx',
'approximate',
'approximately',
'apps',
'appt',
'apr',
'apricot',
'april',
"april's",
'apron',
'apt',
'aptly',
'aqua',
'aquablue',
'aquarium',
'aquariums',
'aquarius',
'aquatic',
'aquatta',
'arabia',
'arabian',
'aradia',
'arbitrage',
'arbitrarily',
'arbitrary',
'arbok',
'arbor',
'arboreal',
'arc',
"arc's",
'arcade',
'arcades',
'arcadia',
'arcane',
'arcanine',
'arch',
'archaeology',
'archaic',
'archer',
"archer's",
'archers',
'arches',
'archibald',
'architect',
'architects',
'architecture',
'archway',
'archways',
'arctic',
"arctic's",
'arduous',
'are',
'area',
"area's",
'areas',
"aren't",
'arena',
'arenas',
'arenberg',
"arenberg's",
'arent',
'areserversup',
'areserverup',
'arf',
'arfur',
'arg',
'argentina',
'argg',
'arggest',
'arggg',
'argggg',
'arggggg',
'arggggge',
'argggggg',
'argggggggggggg',
'argggggggh',
'argggghhh',
'argggh',
'arggghhh',
'arggh',
'argghh',
'argghhh',
'argghhhh',
'argh',
'arghgh',
'arghghghggh',
'arghh',
'arghhh',
'arghhhh',
'arghhhhh',
'arghhhhhhhhhhhhhhhhhh',
'argon',
'argue',
'argued',
'arguer',
"arguer's",
'arguers',
'argues',
'arguing',
'argument',
"argument's",
'arguments',
'argust',
'aria',
'ariana',
'arianna',
'ariel',
"ariel's",
'aries',
'aright',
'aril',
'arising',
'arista',
'aristocat',
"aristocat's",
'aristocats',
'aristocratic',
'ark',
'arks',
'arm',
"arm's",
'armada',
'armadas',
'armadillo',
"armadillo's",
'armadillos',
'armchair',
"armchair's",
'armchairs',
'armed',
'armer',
'armers',
'armies',
'arming',
'armlets',
'armoire',
'armor',
'armory',
'armpit',
'arms',
'armstrong',
'army',
"army's",
'aroma',
'aromatic',
'around',
'arr',
'arrack',
'arraignment',
'arrange',
'arranged',
'arrangement',
"arrangement's",
'arrangements',
'arranger',
'arrangers',
'arranges',
'arranging',
'arrant',
'array',
'arrest',
'arrested',
'arresting',
'arrests',
'arrgg',
'arrggg',
'arrgggg',
'arrggghhh',
'arrgghh',
'arrgghhh',
'arrgh',
'arrghh',
'arrghhh',
'arrghhhh',
'arrghhhhhhh',
'arrgonauts',
'arrival',
'arrivals',
'arrive',
'arrived',
'arrivederci',
'arriver',
'arrives',
'arriving',
'arrogant',
'arrow',
'arrowed',
'arrowing',
'arrows',
'arrr',
'arrrr',
'arrrrgh',
'arsis',
'art',
"art's",
'art-talent',
'arte',
'artezza',
'arthritis',
'artichoke',
'article',
"article's",
'articled',
'articles',
'articling',
'articulate',
'articuno',
'artie',
'artifact',
'artifacts',
'artificial',
'artificially',
'artillery',
'artillerymen',
'artist',
"artist's",
'artiste',
'artistic',
'artists',
'arts',
'artwork',
'arty',
'aru',
'aruba',
'arwin',
"arwin's",
'as',
'asap',
'asarion',
'ascended',
'ascending',
'ascent',
'ashame',
'ashamed',
'ashes',
'ashley',
"ashley's",
'ashore',
'ashtray',
'ashy',
'asia',
'aside',
'asides',
'ask',
'askalice',
'asked',
'asker',
'askers',
'asking',
'asks',
'aslan',
"aslan's",
'aslans',
'asleep',
'asp',
'asparagus',
'aspect',
"aspect's",
'aspects',
'aspen',
'asphalt',
'aspiration',
'aspirations',
'aspire',
'aspirin',
'aspiring',
'asps',
'assemble',
'assembled',
'assembler',
'assemblers',
'assembles',
'assembling',
'assembly',
'assert',
'assertive',
'assessment',
'asset',
"asset's",
'assets',
'assign',
'assigned',
'assigning',
'assignment',
'assignments',
'assigns',
'assist',
'assistance',
'assistant',
'assistants',
'assisted',
'assisting',
'assistive',
'assn',
'assoc',
'associate',
'associated',
'associates',
'associating',
'association',
"association's",
'associations',
'associative',
'assorted',
'assortment',
'asst',
'assume',
'assumed',
'assumer',
'assumes',
'assuming',
'assumption',
"assumption's",
'assumptions',
'assurance',
'assure',
'assured',
'assuredly',
'assures',
'aster',
'asterisks',
'asterius',
'astern',
'asteroid',
"asteroid's",
'asteroids',
'asthma',
'astir',
'astonish',
'astonished',
'astonishes',
'astonishing',
'astounded',
'astounds',
'astray',
'astro',
"astro's",
'astro-barrier',
'astron',
'astronaut',
"astronaut's",
'astronauts',
'astrond',
'astronomy',
'astroturf',
'asuna',
'asylum',
'at',
'ate',
'atheist',
'athlete',
'athletes',
'athletic',
'athletics',
'atlantic',
'atlantis',
'atlantyans',
'atlas',
'atm',
"atm's",
'atmosphere',
"atmosphere's",
'atmosphered',
'atmospheres',
'atms',
'atom',
"atom's",
'atomettes',
'atomic',
'atoms',
'atone',
'atonement',
'atop',
'atrocious',
'atrocities',
'atrocity',
'atta',
'attach',
'attached',
'attacher',
'attachers',
'attaches',
'attaching',
'attachment',
'attachments',
'attack',
'attackable',
'attacked',
'attacker',
"attacker's",
'attackers',
'attacking',
'attacks',
'attainable',
'attained',
'attempt',
'attempted',
'attempter',
'attempters',
'attempting',
'attempts',
'attend',
'attendance',
'attendant',
'attended',
'attender',
'attenders',
'attending',
'attends',
'attention',
"attention's",
'attentions',
'attentive',
'attentively',
'attest',
'attic',
"attic's",
'attics',
'attina',
'attire',
'attitude',
"attitude's",
'attitudes',
'attn',
'attorney',
"attorney's",
'attorneys',
'attract',
'attractant',
'attracted',
'attracting',
'attraction',
'attractions',
'attractive',
'attractively',
'attracts',
'attribute',
'attributes',
'attribution',
'attrition',
'attune',
'attuned',
'attunement',
'attunements',
'attunes',
'attuning',
'atty',
'auburn',
'auction',
'audience',
"audience's",
'audiences',
'audio',
'audit',
'audition',
'auditioned',
'audits',
'auf',
'aug',
'aught',
'augmenter',
'august',
'auguste',
'aula',
'aunt',
'auntie',
"auntie's",
'aunties',
'aunts',
'aunty',
'aura',
'aurora',
"aurora's",
'aurorium',
'aurors',
'aurours',
'auspicious',
'auspiciously',
'australia',
"australia's",
'auth',
'authenticity',
'author',
"author's",
'authored',
'authoring',
'authoritative',
'authorities',
'authority',
"authority's",
'authorization',
'authorize',
'authorized',
'authors',
'auto',
"auto's",
'auto-reel',
'autocratic',
'autograph',
'autographed',
'autographs',
'automated',
'automatic',
'automatically',
'automatics',
'automobile',
"automobile's",
'automobiles',
'autopia',
"autopia's",
'autopilot',
'autos',
'autumn',
"autumn's",
'autumns',
'aux',
'av',
'avail',
'availability',
'available',
'avalanche',
'avarice',
'avaricia',
'avast',
'avatar',
"avatar's",
'avatars',
'avater',
'avec',
'avenge',
'avenged',
'avenger',
"avenger's",
'avengers',
'avenging',
'avenue',
'aver',
'average',
'averaged',
'averagely',
'averages',
'averaging',
'aversion',
'averted',
'aviation',
'aviator',
'aviators',
'avid',
'avis',
'avocados',
'avoid',
'avoidance',
'avoided',
'avoider',
'avoiders',
'avoiding',
'avoids',
'aw',
'await',
'awaiting',
'awaits',
'awake',
'awaked',
'awaken',
'awakening',
'awakes',
'awaking',
'award',
'award-winning',
'awarded',
'awarder',
'awarders',
'awarding',
'awards',
'aware',
'awareness',
'awash',
'away',
'awayme',
'awe',
'awed',
'aweigh',
'awesome',
'awesomely',
'awesomeness',
'awesomers',
'awesomus',
'awestruck',
'awful',
'awfully',
'awhile',
'awkward',
'awkwardly',
'awkwardness',
'awl',
'awn',
'awning',
'awnings',
'awoke',
'awry',
'aww',
'axed',
'axel',
'axis',
'axisd',
'axle',
'axles',
'ay',
'aye',
'ayes',
'ayy',
'ayyy',
'aza',
'azamaros',
'azamaru',
'azapi',
'azaria',
'azeko',
'azenor',
'azewana',
'aztec',
"aztec's",
'aztecs',
'b)',
'b-day',
'b-sharp',
'b4',
'babble',
'babbles',
'babbling',
'babied',
'babies',
'baboon',
"baby's",
'babyface',
'babyish',
'babysitter',
'babysitters',
'babysitting',
'baccaneer',
'bacchus',
"bacchus's",
'bachelor',
'bachelors',
'back',
'back-to-school',
'back-up',
'backbiters',
'backbone',
'backbones',
'backcrash',
'backdrop',
'backed',
'backer',
'backers',
'backfall',
'backfire',
'backfired',
'backfires',
'backflip',
'backflips',
'backginty',
'background',
"background's",
'backgrounds',
'backing',
'backpack',
"backpack's",
'backpacking',
'backpacks',
'backpedaling',
'backpedals',
'backs',
'backslash',
'backspace',
'backspaces',
'backspacing',
'backstabbed',
'backstabber',
'backstabbers',
'backstabbing',
'backstreet',
'backstroke',
'backtrack',
'backup',
'backups',
'backward',
'backwardly',
'backwardness',
'backwards',
'backwash',
'backwater',
'backwaters',
'backwoods',
'backyard',
"backyard's",
'backyards',
'bacon',
"bacon's",
'bacons',
'bacteria',
'bad',
'baddest',
'baddie',
'baddies',
'baddy',
'bade',
'badge',
'badger',
"badger's",
'badgered',
'badgering',
'badgers',
'badges',
'badlands',
'badly',
'badness',
'badr',
'baffle',
'baffled',
'bafflement',
'bag',
"bag's",
'bagel',
"bagel's",
'bagelbee',
'bagelberry',
'bagelblabber',
'bagelbocker',
'bagelboing',
'bagelboom',
'bagelbounce',
'bagelbouncer',
'bagelbrains',
'bagelbubble',
'bagelbumble',
'bagelbump',
'bagelbumper',
'bagelburger',
'bagelchomp',
'bagelcorn',
'bagelcrash',
'bagelcrumbs',
'bagelcrump',
'bagelcrunch',
'bageldoodle',
'bageldorf',
'bagelface',
'bagelfidget',
'bagelfink',
'bagelfish',
'bagelflap',
'bagelflapper',
'bagelflinger',
'bagelflip',
'bagelflipper',
'bagelfoot',
'bagelfuddy',
'bagelfussen',
'bagelgadget',
'bagelgargle',
'bagelgloop',
'bagelglop',
'bagelgoober',
'bagelgoose',
'bagelgrooven',
'bagelhoffer',
'bagelhopper',
'bageljinks',
'bagelklunk',
'bagelknees',
'bagelmarble',
'bagelmash',
'bagelmonkey',
'bagelmooch',
'bagelmouth',
'bagelmuddle',
'bagelmuffin',
'bagelmush',
'bagelnerd',
'bagelnoodle',
'bagelnose',
'bagelnugget',
'bagelphew',
'bagelphooey',
'bagelpocket',
'bagelpoof',
'bagelpop',
'bagelpounce',
'bagelpow',
'bagelpretzel',
'bagelquack',
'bagelroni',
'bagels',
'bagelscooter',
'bagelscreech',
'bagelsmirk',
'bagelsnooker',
'bagelsnoop',
'bagelsnout',
'bagelsocks',
'bagelspeed',
'bagelspinner',
'bagelsplat',
'bagelsprinkles',
'bagelsticks',
'bagelstink',
'bagelswirl',
'bagelteeth',
'bagelthud',
'bageltoes',
'bagelton',
'bageltoon',
'bageltooth',
'bageltwist',
'bagelwhatsit',
'bagelwhip',
'bagelwig',
'bagelwoof',
'bagelzaner',
'bagelzap',
'bagelzapper',
'bagelzilla',
'bagelzoom',
'bagg o. wattar',
'baggage',
'bagged',
'bagger',
'baggie',
'bagging',
'bagheera',
"bagheera's",
'bagpipe',
'bags',
"bags'",
'bah',
'baha',
'bahaha',
'bahama',
'bahamas',
'bahano',
'bahh',
'bahhh',
'bahia',
'bahira',
'bai',
'bail',
'bailed',
'bailey',
"bailey's",
'baileys',
'bailing',
'bails',
'bain',
'bait',
'baiter',
'baiters',
'baits',
'bajillion',
'bake',
'baked',
'baker',
"baker's",
'bakers',
'bakery',
'baking',
'bakuraiya',
'balance',
'balanced',
'balancer',
'balancers',
'balances',
'balancing',
'balas',
'balboa',
'balconies',
'balcony',
'bald',
'balding',
'baldness',
'baldy',
'bale',
'baled',
'bales',
'baling',
| |
-1.8,
"efficiencies": 1.6,
"prosecuted": -1.6,
"welling": 1.6,
"louseworts": -0.6,
"lowly": -1.0,
"graciles": 0.6,
"idealistic": 1.8,
"bl": 2.3,
"agreement": 2.2,
"looming": -0.5,
"vitals": 1.1,
"d-:": 1.6,
"bz": 0.4,
"discontented": -1.8,
"stinkpots": -0.7,
"exaggerated": -0.4,
"flirtations": -0.1,
"stressless": 1.6,
"brilliancies": 2.3,
"helpfulness": 1.9,
"forgave": 1.4,
"shylocked": -0.7,
"complainingly": -1.7,
"exaggerates": -0.6,
"gigglingly": 1.1,
"terrorists": -3.1,
"disturbingly": -2.3,
"doubters": -1.3,
"troubleshooting": 0.7,
"doomsayings": -1.5,
"enthusiasms": 2.0,
"criticisms": -0.9,
"sneaky": -0.9,
"safelights": 0.8,
"trusteeship": 0.5,
"dumbcanes": -0.6,
"insensitivity": -1.8,
"graciously": 2.3,
"relaxed": 2.2,
"undeserving": -1.9,
"safelight": 1.1,
"screwballs": -0.3,
"relaxer": 1.6,
"gravers": -1.2,
"inspirits": 0.8,
"charmed": 2.0,
"positivist": 2.0,
"pmfji": 0.3,
"pissant": -1.5,
"revengefulness": -2.2,
"mature": 1.8,
"charmer": 1.9,
"dumbheads": -1.9,
"positivism": 1.6,
"genial": 1.8,
"annoyer": -2.2,
"raging": -2.4,
"haunt": -1.7,
"hopefulness": 1.6,
"troublemaker": -2.0,
"paradox": -0.4,
"homesicknesses": -1.8,
"tard": -2.5,
"preventing": -0.1,
"scapegoat": -1.7,
"energetic": 1.9,
"prosecution": -2.2,
"nerdish": -0.1,
"doomsdays": -2.4,
"defenselessness": -1.3,
"fighter": 0.6,
"valuables": 2.1,
"rescued": 1.8,
"feud": -1.4,
"freer": 1.1,
"fresh": 1.3,
"menace": -2.2,
"stammers": -0.8,
"rescues": 1.3,
"unbelieving": -0.8,
"enjoyably": 1.8,
"enjoyable": 1.9,
"stops": -0.6,
"gladly": 1.4,
"gossip": -0.7,
"screwed up": -1.5,
"bitterroots": -0.2,
"dislike": -1.6,
"carelessly": -1.0,
"toughened": 0.1,
"crueler": -2.3,
"traumatises": -2.2,
"torture": -2.9,
"glamorising": 1.2,
"traumatised": -2.4,
"manipulated": -1.6,
"abduction": -2.8,
"welcomely": 1.9,
"keens": 0.1,
"funneling": -0.1,
"damning": -1.4,
"anxious": -1.0,
"trite": -0.8,
"partyers": 1.1,
"desperations": -2.2,
"honorees": 2.3,
"gravelling": -0.4,
"terrifies": -2.6,
"dynamics": 1.1,
"odd": -1.3,
"expressed": 3.1,
"toughens": -0.2,
"harassing": -2.5,
"advantageously": 1.9,
"scepticism": -0.8,
"respectively": 1.4,
"misgiving": -1.4,
"poverty": -2.3,
"impressionism": 0.8,
"great": 3.1,
"engage": 1.4,
"uneasy": -1.6,
"depressor": -1.8,
"defeat": -2.0,
"impressionist": 1.0,
"cheerfullest": 3.2,
"panicked": -2.0,
"savageries": -1.9,
"distrusted": -2.4,
"lowlifes": -2.2,
"dumbest": -2.3,
"confidence": 2.3,
"cheered": 2.3,
"illegal": -2.6,
"pressurising": -0.6,
"assuring": 1.6,
"doubt": -1.5,
"cheerer": 1.7,
"astound": 1.7,
"brilliantine": 0.8,
"humorless": -1.3,
"stinkiest": -2.1,
"flexibilities": 1.0,
"isolatable": 0.2,
"pettiest": -1.3,
"lowliest": -1.8,
"unequal": -1.4,
"charity": 1.8,
"challenge": 0.3,
"benefitting": 1.9,
"fulfill": 1.9,
"intimidations": -1.4,
"perfecting": 2.3,
"warmers": 1.0,
"disadvantageousness": -1.6,
"ruinousness": -1.0,
"tantrums": -1.5,
"bastardization": -2.4,
"engagingly": 1.5,
"destroying": -2.6,
"joyance": 2.3,
"comedo": 0.3,
"humerous": 1.4,
"delay": -1.3,
"comedy": 1.5,
"intelligent": 2.0,
"blocks": -0.9,
"scornful": -1.8,
"}:(": -2.0,
"await": 0.4,
"}:)": 0.4,
"graveyards": -1.2,
"traumatising": -1.9,
"victimless": 0.6,
"solidarity": 1.2,
"efficiently": 1.7,
"distressfully": -1.7,
"awkwardness": -0.7,
"acquitted": 1.0,
"festively": 2.2,
"allow": 0.9,
"classy": 1.9,
"intellectuals": 1.6,
"doubtful": -1.4,
"apathetically": -0.4,
"warred": -2.4,
"snobbishness": -1.1,
"paranoiacs": -0.7,
"perfect": 2.7,
"easements": 0.4,
"thieved": -1.4,
"challengingly": -0.6,
"decay": -1.7,
"remorseless": -2.3,
"idealities": 1.5,
"stalled": -0.8,
"perfectest": 3.1,
"popularized": 1.9,
"warmongers": -2.8,
"deviltry": -2.8,
"bothering": -1.6,
"kiss": 1.8,
"bashfulness": -0.8,
"popularizer": 1.8,
"popularizes": 1.4,
"ecstacy": 3.3,
"doomster": -2.2,
"meritoriously": 1.3,
"nicely": 1.9,
"delightful": 2.8,
"expels": -1.6,
"embarrassable": -1.6,
"truth": 1.3,
"shortage": -1.0,
"shittimwood": -0.3,
"foetuses": 0.2,
"intellectualness": 1.5,
"joyed": 2.9,
"decays": -1.7,
"irritation": -2.3,
"feudatories": -0.5,
"creditworthy": 2.4,
"perversions": -1.2,
"mediated": 3.1,
"favours": 1.8,
"hilarious": 1.7,
"greeds": -1.0,
"shit": -2.6,
"jackasses": -2.8,
"inferiority": -1.1,
"greedy": -1.3,
"loverly": 2.8,
"loomed": -1.1,
"surely": 1.9,
"dynamometry": 0.6,
"embarrass": -1.2,
"scary": -2.2,
"snobbishly": -1.2,
"sleeplessness": -1.6,
"cunts": -2.9,
"troublemakers": -2.2,
"scare": -2.2,
"humoral": 0.6,
"straining": -1.3,
"dynamists": 0.9,
"opportune": 1.7,
"intellectualizes": 1.8,
"festival": 2.2,
"fuckers": -2.9,
"intellectualized": 1.2,
"exhilarating": 1.7,
"painful": -1.9,
"adopts": 0.7,
"interests": 1.0,
"pressurizes": -0.2,
"pressurizer": 0.1,
"feudalities": -0.4,
"depressing": -1.6,
"pressurized": 0.1,
"applauds": 1.4,
"luckiness": 1.0,
"warfare": -1.2,
"harmonizing": 1.4,
"bother": -1.4,
"luckie": 1.6,
"aggressor": -0.8,
"tortured": -2.6,
"jollity": 1.8,
"tortures": -2.5,
"torturer": -2.3,
"adversative": -1.2,
"misunderstand": -1.5,
"accusing": -0.7,
"foolfish": -0.8,
"lamebrain": -1.6,
"tensioned": -0.4,
"trusting": 1.7,
"poisons": -2.7,
"gently": 2.0,
"reassuring": 1.7,
"gentle": 1.9,
"devotions": 1.8,
"blockbuster": 2.9,
"clearly": 1.7,
"tensioner": -1.6,
"fatalisms": -1.7,
"vested": 0.6,
"brilliances": 2.9,
"tranquility": 1.8,
"pricking": -0.9,
"disturbers": -2.1,
";-)": 1.0,
"toughest": -0.3,
"freestanding": 1.1,
"disgustedly": -3.0,
";-]": 0.7,
"forbiddances": -1.0,
"*)": 0.6,
"amoroso": 2.3,
"vultures": -1.3,
"medal": 2.1,
"fervent": 1.1,
"pettier": -0.3,
"amortize": -0.1,
"stronger": 1.6,
"fiasco": -2.3,
"optionless": -1.7,
"=p": 1.3,
"=|": -0.8,
"assholes": -2.8,
"=d": 2.3,
"=l": -1.2,
"trivial": -0.1,
"disillusion": -1.0,
"wiseass": -1.8,
"championed": 1.2,
"conciliate": 1.0,
"=D": 2.3,
"aversively": -0.8,
"hope": 1.9,
"romanticised": 1.7,
"meditative": 1.4,
"=3": 2.1,
"disappointingly": -1.9,
"intellectually": 1.4,
"killifish": -0.1,
"lucky": 1.8,
"romanticises": 1.3,
"=/": -1.4,
"harmonizers": 1.6,
"defenseless": -1.4,
"antagonist": -1.9,
"spammers": -1.6,
"dishearten": -2.0,
"contemptible": -1.6,
"champers": 0.5,
"contemptibly": -1.4,
"j4f": 1.4,
"favoring": 1.8,
"foolishest": -1.4,
"strengthened": 1.8,
"creationist": 0.8,
"catastrophic": -2.2,
"btd": -2.1,
"defenseman": 0.1,
"rotflol": 3.0,
"strengthener": 1.8,
"fearfullest": -2.5,
"dangered": -2.4,
"creationism": 0.7,
"insulting": -2.2,
"secureness": 1.4,
"distressingly": -2.2,
"injustice": -2.7,
"felony": -2.5,
"champaigns": 0.5,
"harsher": -2.2,
"trustbuster": -0.5,
"fed up": -1.8,
"restricting": -1.6,
"troubleshoots": 0.5,
"virtueless": -1.4,
"obsessiveness": -1.2,
"lawsuit": -0.9,
"absentee": -1.1,
"worriments": -1.9,
"tantrum": -1.8,
"challengers": 0.4,
"innocently": 1.4,
"numbat": 0.2,
"wealthiness": 2.4,
"screwbean": 0.3,
"wtg": 2.1,
"profitableness": 2.4,
"radiances": 1.1,
"suretyship": -0.1,
"thieves": -2.3,
"unlovelier": -1.9,
"waste": -1.8,
">-:": -2.0,
"chastised": -2.2,
"depressively": -2.1,
"neatened": 2.0,
"hateful": -2.2,
"swindling": -2.0,
"violators": -1.9,
"freewheeling": 0.5,
"freewill": 1.0,
"toothless": -1.4,
"x-d": 2.6,
"grinning": 1.5,
"remorselessly": -2.0,
"hahaha": 2.6,
"rigged": -1.5,
"toughnesses": 0.3,
"x-p": 1.7,
"loathes": -1.9,
"agonized": -2.2,
"uncertainty": -1.4,
"appalls": -1.9,
"surprisingly": 1.2,
"optimistic": 1.3,
"agonizes": -2.3,
"lunatic": -2.2,
"</3": -3.0,
"terrorist": -3.7,
"superiorities": 0.8,
"romance": 2.6,
"terrorise": -3.1,
"terrorism": -3.6,
"joyously": 2.9,
"cheeriness": 2.5,
"conflicting": -1.7,
"wows": 2.0,
"wisest": 2.1,
"thks": 1.4,
"disheartenments": -2.2,
"fantasticalness": 1.3,
"supremacists": -1.0,
"expand": 1.3,
"pleasantly": 2.1,
"wimpier": -1.0,
"discounted": 0.2,
"fascinate": 2.4,
"confronted": -0.8,
"enrage": -2.6,
"stenchy": -2.3,
"poisonously": -2.9,
"devilry": -2.8,
"^<_<": 1.4,
"surest": 1.3,
"prizewinner": 2.3,
"dissatisfying": -2.4,
"rewardable": 2.0,
"glum": -2.1,
"miserableness": -2.8,
"freedwomen": 1.3,
"gentlest": 1.8,
"afflicted": -1.5,
"generous": 2.3,
"gloomful": -2.1,
"sickened": -2.5,
"arrest": -1.4,
"optimising": 1.7,
"exempt": 0.4,
"contempts": -1.0,
"sickener": -2.2,
"(o:": 1.6,
"vain": -1.8,
"innocents": 1.1,
"increased": 1.1,
"btdt": -0.1,
"isolators": -0.4,
"humors": 1.6,
"fools": -2.2,
"inquisitive": 0.7,
">_>^": 2.1,
"nerves": -0.4,
"weepers": -1.1,
"lamest": -1.5,
"poor": -2.1,
"2qt": 2.1,
"cheerled": 1.5,
"despisers": -1.6,
"immortal": 1.0,
"championship": 1.9,
"wisecrack": -0.1,
"forgiver": 1.7,
"cruelest": -2.6,
"anguishes": -2.1,
"avoid": -1.2,
"anguished": -1.8,
"woohoo": 2.3,
"passion": 2.0,
"forgiven": 1.6,
"smarties": 1.7,
"blurry": -0.4,
"annoyers": -1.5,
"benefitted": 1.7,
"hooligan": -1.5,
"happing": 1.1,
"pressure": -1.2,
"destructivity": -2.2,
"adversary": -0.8,
"entertainingly": 1.9,
"hiding": -1.2,
"gained": 1.6,
"triviality": -0.5,
"merited": 1.4,
"dynamistic": 1.5,
"charitablenesses": 1.6,
"unattractive": -1.9,
"libertarians": 0.1,
"commitment": 1.6,
"screamed": -1.3,
"indoctrinates": -0.6,
"lowermost": -1.4,
"stamina": 1.2,
"restricts": -1.3,
"encouragements": 2.1,
"frustration": -2.1,
"troubling": -2.5,
"surprisal": 1.5,
"indoctrinated": -0.4,
"humorlessness": -1.4,
"interest": 2.0,
"gratz": 2.0,
"funned": 2.3,
"shyness": -1.3,
"lowered": -0.5,
"funner": 2.2,
"lovely": 2.8,
"triumphant": 2.4,
"excitement": 2.2,
"burdened": -1.7,
"prickliness": -0.6,
"problem": -1.7,
"whoreson": -2.2,
"dumpish": -1.8,
"unsavory": -1.9,
"determinacy": 1.0,
"excellencies": 2.4,
"gracilis": 0.4,
"uglily": -2.1,
"overwhelms": -0.8,
"gallantly": 1.9,
"=-3": 2.0,
"rebelled": -1.0,
"devilkin": -2.4,
"profiteer": 0.8,
"victimise": -1.1,
"freeboot": -0.7,
"=-D": 2.4,
"treason": -1.9,
"huggable": 1.6,
"chance": 1.0,
"badass": -0.6,
"inspiration": 2.4,
"ghost": -1.3,
"frightfully": -1.7,
"=-d": 2.4,
"bastardizes": -1.8,
"radiantly": 1.3,
"despairers": -1.3,
"villainous": -2.0,
"bastardized": -2.0,
"evildoings": -2.5,
"abhor": -2.0,
"moodier": -1.1,
"\\o:": -1.2,
"desirable": 1.3,
"encouragingly": 2.0,
"controversial": -0.8,
"trickier": -0.7,
"integrity": 1.6,
"\\o/": 2.2,
"adorning": 1.0,
"stinks": -1.0,
"stinky": -1.5,
"daze": -0.7,
"stinko": -1.5,
"worth": 0.9,
"profiteroles": 0.5,
"amorous": 1.8,
"misreporting": -1.5,
"optimists": 1.6,
"compassion": 2.0,
"distort": -1.3,
"splendorous": 2.2,
"distractive": -1.6,
"smog": -1.2,
"darlingly": 1.6,
"hesitaters": -1.4,
"exaggerate": -0.6,
"compassionately": 1.7,
"neglectfully": -2.1,
"quaking": -1.5,
"triumph": 2.1,
"adorners": 0.9,
"impressionistic": 1.5,
"violative": -2.4,
"nerdiest": 0.6,
"tensioning": -1.4,
"rigorous": -1.1,
"sweetie": 2.2,
"walkout": -1.3,
"wisewomen": 1.3,
"stinkhorn": -0.2,
"unmotivated": -1.4,
"uncertainness": -1.3,
"festive": 2.0,
"admirers": 1.7,
"richest": 2.4,
"stresses": -2.0,
"wimpiest": -0.9,
"kudos": 2.3,
"rigorously": -0.4,
"gla": 2.5,
"fearfuller": -2.2,
"postpones": -1.1,
"foolishness": -1.8,
"adopt": 0.7,
"miserere": -0.8,
"respectfulnesses": 1.3,
"stressed": -1.4,
"lonesome": -1.5,
"bastardizations": -2.1,
"postponed": -0.8,
"envious": -1.1,
"secure": 1.4,
"darlings": 2.2,
"angrily": -1.8,
"dominative": -0.7,
"cheats": -1.8,
"neuroticism": -0.9,
"sparkling": 1.2,
"achievable": 1.3,
"romantically": 1.8,
"wiseacre": -1.2,
"safeness": 1.5,
"devastation": -1.8,
"neatens": 1.1,
"uneasier": -1.4,
"motivate": 1.6,
"negative": -2.7,
"insult": -2.3,
"antagonism": -1.9,
"agonizingly": -2.3,
"inspiritingly": 2.1,
"abusing": -2.0,
"destructs": -2.4,
"haha": 2.0,
"award": 2.5,
"championships": 2.2,
"@>-->--": 2.1,
"blocking": -1.6,
"tnx": 1.1,
"worn": -1.2,
"resents": | |
<reponame>webertom/event-Python
# -*- coding: utf-8 -*-
"""
This module contains classes, functions and an example (main) for handling AER vision data.
"""
import glob
import cv2
import numpy as np
from win32api import GetSystemMetrics
import timer
class Events(object):
"""
Temporal Difference events.
data: a NumPy Record Array with the following named fields
x: pixel x coordinate, unsigned 16bit int
y: pixel y coordinate, unsigned 16bit int
p: polarity value, boolean. False=off, True=on
ts: timestamp in microseconds, unsigned 64bit int
width: The width of the frame. Default = 304.
height: The height of the frame. Default = 240.
"""
def __init__(self, num_events, width=304, height=240):
"""num_spikes: number of events this instance will initially contain"""
self.data = np.rec.array(None, dtype=[('x', np.uint16), ('y', np.uint16), ('p', np.bool_), ('ts', np.uint64)], shape=(num_events))
self.width = width
self.height = height
def show_em(self):
"""Displays the EM events (grayscale ATIS events)"""
frame_length = 24e3
t_max = self.data.ts[-1]
frame_start = self.data[0].ts
frame_end = self.data[0].ts + frame_length
max_val = 1.16e5
min_val = 1.74e3
val_range = max_val - min_val
thr = np.rec.array(None, dtype=[('valid', np.bool_), ('low', np.uint64), ('high', np.uint64)], shape=(self.height, self.width))
thr.valid.fill(False)
thr.low.fill(frame_start)
thr.high.fill(0)
def show_em_frame(frame_data):
"""Prepare and show a single frame of em data to be shown"""
for datum in np.nditer(frame_data):
ts_val = datum['ts'].item(0)
thr_data = thr[datum['y'].item(0), datum['x'].item(0)]
if datum['p'].item(0) == 0:
thr_data.valid = 1
thr_data.low = ts_val
elif thr_data.valid == 1:
thr_data.valid = 0
thr_data.high = ts_val - thr_data.low
img = 255 * (1 - (thr.high - min_val) / (val_range))
#thr_h = cv2.adaptiveThreshold(thr_h, 255,
#cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 3, 0)
img = np.piecewise(img, [img <= 0, (img > 0) & (img < 255), img >= 255], [0, lambda x: x, 255])
img = img.astype('uint8')
cv2.imshow('img', img)
cv2.waitKey(1)
while frame_start < t_max:
#with timer.Timer() as em_playback_timer:
frame_data = self.data[(self.data.ts >= frame_start) & (self.data.ts < frame_end)]
show_em_frame(frame_data)
frame_start = frame_end + 1
frame_end += frame_length + 1
#print 'showing em frame took %s seconds' %em_playback_timer.secs
cv2.destroyAllWindows()
return
def show_td(self, wait_delay=1):
"""Displays the TD events (change detection ATIS or DVS events)
waitDelay: milliseconds
"""
frame_length = 24e3
t_max = self.data.ts[-1]
frame_start = self.data[0].ts
frame_end = self.data[0].ts + frame_length
td_img = np.ones((self.height, self.width), dtype=np.uint8)
while frame_start < t_max:
frame_data = self.data[(self.data.ts >= frame_start) & (self.data.ts < frame_end)]
if frame_data.size > 0:
td_img.fill(128)
#with timer.Timer() as em_playback_timer:
for datum in np.nditer(frame_data):
td_img[datum['y'].item(0), datum['x'].item(0)] = datum['p'].item(0)
#print 'prepare td frame by iterating events took %s seconds'
#%em_playback_timer.secs
td_img = np.piecewise(td_img, [td_img == 0, td_img == 1, td_img == 128], [0, 255, 128])
cv2.imshow('img', td_img)
cv2.waitKey(wait_delay)
frame_start = frame_end + 1
frame_end = frame_end + frame_length + 1
cv2.destroyAllWindows()
return
def filter_td(self, us_time):
"""Generate a filtered set of event data.
Does not modify instance data
Uses a background activity filter on the events, such that only events which are
correlated with a neighbouring event within 'us_time' microseconds will be allowed
through the filter.
us_time: microseconds
"""
max_x = self.width - 1
max_y = self.height - 1
t0 = np.ones((self.width, self.height)) - us_time - 1
x_prev = 0
y_prev = 0
p_prev = 0
valid_indices = np.ones(len(self.data), np.bool_)
i = 0
with timer.Timer() as ref_timer:
for datum in np.nditer(self.data):
datum_ts = datum['ts'].item(0)
datum_x = datum['x'].item(0)
datum_y = datum['y'].item(0)
datum_p = datum['p'].item(0)
if x_prev != datum_x | y_prev != datum_y | p_prev != datum_p:
t0[datum_x, datum_y] = -us_time
min_x_sub = max(0, datum_x - 1)
max_x_sub = min(max_x, datum_x + 1)
min_y_sub = max(0, datum_y - 1)
max_y_sub = min(max_y, datum_y + 1)
t0_temp = t0[min_x_sub:(max_x_sub + 1), min_y_sub:(max_y_sub + 1)]
if min(datum_ts - t0_temp.reshape(-1, 1)) > us_time:
valid_indices[i] = 0
t0[datum_x, datum_y] = datum_ts
x_prev = datum_x
y_prev = datum_y
p_prev = datum_p
i = i + 1
print 'filtering took %s seconds' % ref_timer.secs
return self.data[valid_indices.astype('bool')]
def sort_order(self):
"""Generate data sorted by ascending ts
Does not modify instance data
Will look through the struct events, and sort all events by the field 'ts'.
In other words, it will ensure events_out.ts is monotonically increasing,
which is useful when combining events from multiple recordings.
"""
#chose mergesort because it is a stable sort, at the expense of more
#memory usage
events_out = np.sort(self.data, order='ts', kind='mergesort')
return events_out
def extract_roi(self, top_left, size, is_normalize=False):
"""Extract Region of Interest
Does not modify instance data
Generates a set of td_events which fall into a rectangular region of interest with
top left corner at 'top_left' and size 'size'
top_left: [x: int, y: int]
size: [width, height]
is_normalize: bool. If True, x and y values will be normalized to the cropped region
"""
min_x = top_left[0]
min_y = top_left[1]
max_x = size[0] + min_x
max_y = size[1] + min_y
extracted_data = self.data[(self.data.x >= min_x) & (self.data.x < max_x) & (self.data.y >= min_y) & (self.data.y < max_y)]
if is_normalize:
self.width = size[0]
self.height = size[1]
extracted_data = np.copy(extracted_data)
extracted_data = extracted_data.view(np.recarray)
extracted_data.x -= min_x
extracted_data.y -= min_y
return extracted_data
def apply_refraction(self, us_time):
"""Implements a refractory period for each pixel.
Does not modify instance data
In other words, if an event occurs within 'us_time' microseconds of
a previous event at the same pixel, then the second event is removed
us_time: time in microseconds
"""
t0 = np.ones((self.width, self.height)) - us_time - 1
valid_indices = np.ones(len(self.data), np.bool_)
#with timer.Timer() as ref_timer:
i = 0
for datum in np.nditer(self.data):
datum_ts = datum['ts'].item(0)
datum_x = datum['x'].item(0)
datum_y = datum['y'].item(0)
if datum_ts - t0[datum_x, datum_y] < us_time:
valid_indices[i] = 0
else:
t0[datum_x, datum_y] = datum_ts
i += 1
#print 'Refraction took %s seconds' % ref_timer.secs
return self.data[valid_indices.astype('bool')]
def write_j_aer(self, filename):
"""
writes the td events in 'td_events' to a file specified by 'filename'
which is compatible with the jAER framework.
To view these events in jAER, make sure to select the DAVIS640 sensor.
"""
import time
y = 479 - self.data.y
#y = td_events.y
y_shift = 22 + 32
x = 639 - self.data.x
#x = td_events.x
x_shift = 12 + 32
p = self.data.p + 1
p_shift = 11 + 32
ts_shift = 0
y_final = y.astype(dtype=np.uint64) << y_shift
x_final = x.astype(dtype=np.uint64) << x_shift
p_final = p.astype(dtype=np.uint64) << p_shift
ts_final = self.data.ts.astype(dtype=np.uint64) << ts_shift
vector_all = np.array(y_final + x_final + p_final + ts_final, dtype=np.uint64)
aedat_file = open(filename, 'wb')
version = '2.0'
aedat_file.write('#!AER-DAT' + version + '\r\n')
aedat_file.write('# This is a raw AE data file - do not edit\r\n')
aedat_file.write \
('# Data format is int32 address, int32 timestamp (8 bytes total), repeated for each event\r\n')
aedat_file.write('# Timestamps tick is 1 us\r\n')
aedat_file.write('# created ' + time.strftime("%d/%m/%Y") \
+ ' ' + time.strftime("%H:%M:%S") \
+ ' by the Python function "write2jAER"\r\n')
aedat_file.write \
('# This function fakes the format of DAVIS640 to allow for the full ATIS address space to be used (304x240)\r\n')
##aedat_file.write(vector_all.astype(dtype='>u8').tostring())
to_write = bytearray(vector_all[::-1])
to_write.reverse()
aedat_file.write(to_write)
#aedat_file.write(vector_all)
#vector_all.tofile(aedat_file)
aedat_file.close()
def present_checkerboard(num_squares):
"""
Presents a checkerboard pattern of size num_squares*num_squares on the screen.
The function will automatically detect the screen size in pixels and assume a
resolution of 96 dpi to provide the square size in mm.
"""
screen_width_pixels = GetSystemMetrics(0)
screen_height_pixels = GetSystemMetrics(1)
#fixed parameters of the setup
figure_border_size = 30 #leave space of 100 pixels on each side of the axes for the figure
#controls etc
#image_border_size = 10 #within the image, create a border of size 10
#pixels to ensure contrast with the outside
#rectangles
#How big is each rectangle in units of pixels?
screen_size_pixels = np.array([screen_width_pixels, screen_height_pixels])
screen_size_mm = 0.00254 * screen_size_pixels / 96
square_size_pixels = int(min(screen_size_pixels - 2 * figure_border_size) / (num_squares + 2))
image_border_size = np.array([1, 2])
image_border_size[0] = (screen_size_pixels[0] - figure_border_size * 2 - square_size_pixels * (num_squares)) / 2
image_border_size[1] = (screen_size_pixels[1] - figure_border_size * 2 - square_size_pixels * (num_squares)) / 2
#How big is each rectangle in units of millimeters?
square_size_mm = screen_size_mm * square_size_pixels / screen_size_pixels
#How big is the checkered part of the image
image_inner_dim = num_squares * square_size_pixels # the dimenstion of the inside of the image (not including the border)
#Create a black image | |
'''Relative path to absolute'''
if (type(relpath) is object or hasattr(relpath, 'read')): # relpath is either an object or file-like, try to get its name
relpath = relpath.name
return os.path.abspath(os.path.expanduser(relpath))
def recwalk(inputpath, sorting=True, folders=False, topdown=True, filetype=None):
'''Recursively walk through a folder. This provides a mean to flatten out the files restitution (necessary to show a progress bar). This is a generator.'''
noextflag = False
if filetype and isinstance(filetype, list):
filetype = list(filetype) # make a copy to avoid modifying the input variable (in case it gets reused externally)
if '' in filetype: # special case: we accept when there is no extension, then we don't supply to endswith() because it would accept any filetype then, we check this case separately
noextflag = True
filetype.remove('')
filetype = tuple(filetype) # str.endswith() only accepts a tuple, not a list
# If it's only a single file, return this single file
if os.path.isfile(inputpath):
abs_path = fullpath(inputpath)
yield os.path.dirname(abs_path), os.path.basename(abs_path)
# Else if it's a folder, walk recursively and return every files
else:
for dirpath, dirs, files in walk(inputpath, topdown=topdown):
if sorting:
files.sort()
dirs.sort() # sort directories in-place for ordered recursive walking
# return each file
for filename in files:
if not filetype or filename.endswith(filetype) or (noextflag and not '.' in filename):
yield (dirpath, filename) # return directory (full path) and filename
# return each directory
if folders:
for folder in dirs:
yield (dirpath, folder)
def create_dir_if_not_exist(path):
"""Create a directory if it does not already exist, else nothing is done and no error is return"""
if not os.path.exists(path):
os.makedirs(path)
def real_copy(srcfile, dstfile):
"""Copy a file or a folder and keep stats"""
shutil.copyfile(srcfile, dstfile)
shutil.copystat(srcfile, dstfile)
def symbolic_copy(srcfile, dstfile):
"""Create a symlink (symbolic/soft link) instead of a real copy"""
os.symlink(srcfile, dstfile)
def sort_list_a_given_list_b(list_a, list_b):
return sorted(list_a, key=lambda x: list_b.index(x))
def replace_buggy_accents(s, encoding=None):
"""Fix weird encodings that even ftfy cannot fix"""
# todo enhance speed? or is it the new regex on name?
dic_replace = {
'\xc4\x82\xc2\xa8': 'e',
'ĂŠ': 'e',
'Ăť': 'u',
'â': 'a',
'Ă´': 'o',
'°': '°',
'â': "'",
'ĂŞ': 'e',
'ÂŤ': '«',
'Âť': '»',
'Ă': 'a',
'AŠ': 'e',
'AŞ': 'e',
'A¨': 'e',
'A¨': 'e',
'Ă': 'E',
'â˘': '*',
'č': 'e',
'’': '\'',
}
# Convert the patterns to unicode if the input is a unicode string
if isinstance(s, unicode):
dic_replace = {k.decode('utf-8'): v.decode('utf-8') for k,v in dic_replace.items()}
# Replace each pattern with its correct counterpart
for pat, rep in dic_replace.items():
if encoding:
pat = pat.decode(encoding)
rep = rep.decode(encoding)
s = s.replace(pat, rep)
return s
def cleanup_name(s, encoding=None, normalize=True, clean_nonletters=True):
"""Clean a name and remove accentuated characters"""
if not isinstance(s, unicode):
# Decode only if the input string is not already unicode (decoding is from str to unicode, encoding is from unicode to str)
if encoding is None:
encoding = chardet.detect(s)['encoding']
if encoding:
s = s.decode(encoding)
s = _unidecode(s.replace('^', ' '))
if normalize:
s = s.lower().strip()
if clean_nonletters:
s = re.sub('\-+', '-', re.sub('\s+', ' ', re.sub('[^a-zA-Z0-9\-]', ' ', s))).strip().replace('\r', '').replace('\n', '').replace('\t', '').replace(',', ' ').replace(' ', ' ').strip() # clean up spaces, punctuation and double dashes in name
return s
# Compute best diagnosis for each patient
def compute_best_diag(serie, diag_order=None, persubject=True):
"""Convert a serie to a categorical type and extract the best diagnosis for each subject (patient name must be set as index level 0)
Note: case insensitive and strip spaces automatically
Set persubject to None if you want to do the max or min yourself (this will return the Series configured with discrete datatype),
in this case, do NOT use `max(compute_best_diag(etc..))`, which will output a random value, but rather `compute_best_diag(etc..).max()` (pandas max instead of python native max) which will give you the correct maximum given your specified diag_order."""
if isinstance(serie, basestring):
return serie
if diag_order is None:
diag_order = ['coma', 'vs/uws', 'mcs', 'mcs-', 'mcs+', 'emcs', 'lis'] # from least to best
# Convert to lowercase
diag_order = [x.lower().strip() for x in diag_order]
# Convert to a serie if given a simple list (which will give us access to CategoricalDtype) or if None (so that the rest of the function can work and return an expected Series)
if isinstance(serie, list) or serie is None:
serie = pd.Series(serie, dtype='str')
# Check if our list of diagnosis covers all possible in the database, else raise an error
possible_diags = serie.str.lower().str.strip().dropna().unique()
# If unicode, we convert the diag_order to unicode
if len(serie) > 0 and isinstance(possible_diags[0].lower().strip(), unicode):
diag_order = list_to_unicode(diag_order)
try:
assert not set([x.lower().strip() for x in possible_diags]) - set([x.lower().strip() for x in diag_order])
except Exception as exc:
raise ValueError('The provided list of diagnosis does not cover all possible diagnosis in database. Please fix the list. Here are the possible diagnosis from database: %s' % str(possible_diags))
#for subjname, d in cf_crsr_all.groupby(level=0):
# print(d['CRSr::Computed Outcome'])
# Implement the CategoricalDtype and return the resulting Series
if persubject:
# Return one result per patient
return serie.str.lower().str.strip().astype(pd.api.types.CategoricalDtype(categories=diag_order, ordered=True)).max(level=0)
elif persubject is False:
# Respect the original keys and return one result for each key (can be multilevel, eg subject + date)
return serie.str.lower().str.strip().astype(pd.api.types.CategoricalDtype(categories=diag_order, ordered=True)).groupby(level=range(serie.index.nlevels)).max()
else:
# If None, just return the Serie as-is, and the user can do .max() or .min() or whatever
return serie.str.lower().str.strip().astype(pd.api.types.CategoricalDtype(categories=diag_order, ordered=True))
def ordereddict_change_key(d, old, new):
"""Rename the key of an ordered dict without changing the order"""
# from https://stackoverflow.com/a/17747040
d2 = d.copy()
for _ in range(len(d2)):
k, v = d2.popitem(False)
d2[new if old == k else k] = v
return d2
def merge_two_df(df1, df2, col='Name', dist_threshold=0.2, dist_words_threshold=0.4, mode=0, skip_sanity=False, keep_nulls=True, returnmerged=False, keep_lastname_only=False, prependcols=None, fillna=False, fillna_exclude=None, join_on_shared_keys=True, squish=True, verbose=False, **kwargs):
"""Compute the remapping between two dataframes (or a single duplicated dataframe) based on one or multiple columns. Supports similarity matching (normalized character-wise AND words-wise levenshtein distance) for names, and date comparison using provided formatting.
mode=0 is or test, 1 is and test. In other words, this is a join with fuzzy matching on one column (based on name/id) but supporting multiple columns with exact matching for the others.
`keep_nulls=True` if you want to keep all records from both databases, False if you want only the ones that match both databases, 1 or 2 if you want specifically the ones that are in 1 or in 2
`col` can either be a string for a merge based on a single column (usually name), or an OrderedDict of multiple columns names and types, following this formatting: [OrderedDict([('column1_name', 'column1_type'), ('column2_name', 'column2_type')]), OrderedDict([...])] so that you have one ordered dict for each input dataframe, and with the same number of columns (even if the names are different) and in the same order (eg, name is first column, date of acquisition as second column, etc)
If `fillna=True`, subjects with multiple rows/sessions will be squashed and rows with missing infos will be completed from rows where the info is available (in case there are multiple information, they will all be present as a list). This is only useful when merging (and hence argument `col`) is multi-columns. The key columns are never filled, even if `fillna=True`.
If `fillna_exclude` is specified with a list of columns, this list of columns won't be filled (particularly useful for dates).
if `join_on_shared_keys=True`, if merging on multi-columns and not the same number of key columns are supplied, the merge will be done on only the shared keys in both dataframes: this is very convenient to allow to groupby in one dataframe according to some keys but not in the other one (eg, one is grouped by name and date so both are kept, while the other one is only grouped by name).
if `squish=True`, the dataframes are each squished on key columns to make them unique, so that other non-key columns will get concatenated values. True by default, but if you have to non overlapping databases, then you can set this to False to keep all rows.
"""
### Preparing the input dataframes
# If the key column is in fact a list of columns | |
# -*- coding: utf-8 -*-
from __future__ import (unicode_literals, division, absolute_import,
print_function)
import six
__license__ = 'GPL v3'
__copyright__ = '2020, <NAME>, 2011, <NAME> <<EMAIL>>'
__docformat__ = 'restructuredtext en'
import logging
logger = logging.getLogger(__name__)
from time import sleep
from datetime import time
from io import StringIO
from collections import defaultdict
from calibre.utils.ipc.server import Empty, Server
from calibre.utils.ipc.job import ParallelJob
from calibre.constants import numeric_version as calibre_version
from calibre.utils.date import local_tz
# pulls in translation files for _() strings
try:
load_translations()
except NameError:
pass # load_translations() added in calibre 1.9
# ------------------------------------------------------------------------------
#
# Functions to perform downloads using worker jobs
#
# ------------------------------------------------------------------------------
def do_download_worker(book_list,
options,
cpus,
merge=False,
notification=lambda x,y:x):
'''
Coordinator job, to launch child jobs to do downloads.
This is run as a worker job in the background to keep the UI more
responsive and get around any memory leak issues as it will launch
a child job for each book as a worker process
'''
## Now running one BG proc per site, which downloads for the same
## site in serial.
logger.info("CPUs:%s"%cpus)
server = Server(pool_size=cpus)
logger.info(options['version'])
sites_lists = defaultdict(list)
[ sites_lists[x['site']].append(x) for x in book_list if x['good'] ]
totals = {}
# can't do direct assignment in list comprehension? I'm sure it
# makes sense to some pythonista.
# [ totals[x['url']]=0.0 for x in book_list if x['good'] ]
[ totals.update({x['url']:0.0}) for x in book_list if x['good'] ]
# logger.debug(sites_lists.keys())
# Queue all the jobs
jobs_running = 0
for site in sites_lists.keys():
site_list = sites_lists[site]
logger.info(_("Launch background process for site %s:")%site + "\n" +
"\n".join([ x['url'] for x in site_list ]))
# logger.debug([ x['url'] for x in site_list])
args = ['calibre_plugins.fanficfare_plugin.jobs',
'do_download_site',
(site,site_list,options,merge)]
job = ParallelJob('arbitrary_n',
"site:(%s)"%site,
done=None,
args=args)
job._site_list = site_list
job._processed = False
server.add_job(job)
jobs_running += 1
# This server is an arbitrary_n job, so there is a notifier available.
# Set the % complete to a small number to avoid the 'unavailable' indicator
notification(0.01, _('Downloading FanFiction Stories'))
# dequeue the job results as they arrive, saving the results
count = 0
while True:
job = server.changed_jobs_queue.get()
# logger.debug("job get job._processed:%s"%job._processed)
# A job can 'change' when it is not finished, for example if it
# produces a notification.
msg = None
try:
## msg = book['url']
(percent,msg) = job.notifications.get_nowait()
# logger.debug("%s<-%s"%(percent,msg))
if percent == 10.0: # Only when signaling d/l done.
count += 1
totals[msg] = 1.0/len(totals)
# logger.info("Finished: %s"%msg)
else:
totals[msg] = percent/len(totals)
notification(max(0.01,sum(totals.values())), _('%(count)d of %(total)d stories finished downloading')%{'count':count,'total':len(totals)})
except Empty:
pass
# without update, is_finished will never be set. however, we
# do want to get all the notifications for status so we don't
# miss the 'done' ones.
job.update(consume_notifications=False)
# if not job._processed:
# sleep(0.5)
## Can have a race condition where job.is_finished before
## notifications for all downloads have been processed.
## Or even after the job has been finished.
# logger.debug("job.is_finished(%s) or job._processed(%s)"%(job.is_finished, job._processed))
if not job.is_finished:
continue
## only process each job once. We can get more than one loop
## after job.is_finished.
if not job._processed:
# sleep(1)
# A job really finished. Get the information.
## This is where bg proc details end up in GUI log.
## job.details is the whole debug log for each proc.
logger.info("\n\n" + ("="*80) + " " + job.details.replace('\r',''))
# logger.debug("Finished background process for site %s:\n%s"%(job._site_list[0]['site'],"\n".join([ x['url'] for x in job._site_list ])))
for b in job._site_list:
book_list.remove(b)
book_list.extend(job.result)
job._processed = True
jobs_running -= 1
## Can't use individual count--I've seen stories all reported
## finished before results of all jobs processed.
if jobs_running == 0:
book_list = sorted(book_list,key=lambda x : x['listorder'])
logger.info("\n"+_("Download Results:")+"\n%s\n"%("\n".join([ "%(status)s %(url)s %(comment)s" % book for book in book_list])))
good_lists = defaultdict(list)
bad_lists = defaultdict(list)
for book in book_list:
if book['good']:
good_lists[book['status']].append(book)
else:
bad_lists[book['status']].append(book)
order = [_('Add'),
_('Update'),
_('Meta'),
_('Different URL'),
_('Rejected'),
_('Skipped'),
_('Bad'),
_('Error'),
]
j = 0
for d in [ good_lists, bad_lists ]:
for status in order:
if d[status]:
l = d[status]
logger.info("\n"+status+"\n%s\n"%("\n".join([book['url'] for book in l])))
for book in l:
book['reportorder'] = j
j += 1
del d[status]
# just in case a status is added but doesn't appear in order.
for status in d.keys():
logger.info("\n"+status+"\n%s\n"%("\n".join([book['url'] for book in d[status]])))
break
server.close()
# return the book list as the job result
return book_list
def do_download_site(site,book_list,options,merge,notification=lambda x,y:x):
# logger.info(_("Started job for %s")%site)
retval = []
for book in book_list:
# logger.info("%s"%book['url'])
retval.append(do_download_for_worker(book,options,merge,notification))
notification(10.0,book['url'])
return retval
def do_download_for_worker(book,options,merge,notification=lambda x,y:x):
'''
Child job, to download story when run as a worker job
'''
from calibre_plugins.fanficfare_plugin import FanFicFareBase
fffbase = FanFicFareBase(options['plugin_path'])
with fffbase: # so the sys.path was modified while loading the
# plug impl.
from calibre_plugins.fanficfare_plugin.dialogs import NotGoingToDownload
from calibre_plugins.fanficfare_plugin.prefs import (
SAVE_YES, SAVE_YES_UNLESS_SITE, OVERWRITE, OVERWRITEALWAYS, UPDATE,
UPDATEALWAYS, ADDNEW, SKIP, CALIBREONLY, CALIBREONLYSAVECOL)
from calibre_plugins.fanficfare_plugin.wordcount import get_word_count
from fanficfare import adapters, writers
from fanficfare.epubutils import get_update_data
from fanficfare.six import text_type as unicode
from calibre_plugins.fanficfare_plugin.fff_util import get_fff_config
try:
logger.info("\n\n" + ("-"*80) + " " + book['url'])
## No need to download at all. Can happen now due to
## collision moving into book for CALIBREONLY changing to
## ADDNEW when story URL not in library.
if book['collision'] in (CALIBREONLY, CALIBREONLYSAVECOL):
logger.info("Skipping CALIBREONLY 'update' down inside worker")
return book
book['comment'] = _('Download started...')
configuration = get_fff_config(book['url'],
options['fileform'],
options['personal.ini'])
if not options['updateepubcover'] and 'epub_for_update' in book and book['collision'] in (UPDATE, UPDATEALWAYS):
configuration.set("overrides","never_make_cover","true")
# images only for epub, html, even if the user mistakenly
# turned it on else where.
if options['fileform'] not in ("epub","html"):
configuration.set("overrides","include_images","false")
adapter = adapters.getAdapter(configuration,book['url'])
adapter.is_adult = book['is_adult']
adapter.username = book['username']
adapter.password = book['password']
adapter.setChaptersRange(book['begin'],book['end'])
## each site download job starts with a new copy of the
## cookiejar and basic_cache from the FG process. They
## are not shared between different sites' BG downloads
if configuration.getConfig('use_browser_cache'):
if 'browser_cache' in options:
configuration.set_browser_cache(options['browser_cache'])
else:
options['browser_cache'] = configuration.get_browser_cache()
if 'browser_cachefile' in options:
options['browser_cache'].load_cache(options['browser_cachefile'])
if 'basic_cache' in options:
configuration.set_basic_cache(options['basic_cache'])
else:
options['basic_cache'] = configuration.get_basic_cache()
options['basic_cache'].load_cache(options['basic_cachefile'])
if 'cookiejar' in options:
configuration.set_cookiejar(options['cookiejar'])
else:
options['cookiejar'] = configuration.get_cookiejar()
options['cookiejar'].load_cookiejar(options['cookiejarfile'])
story = adapter.getStoryMetadataOnly()
if not story.getMetadata("series") and 'calibre_series' in book:
adapter.setSeries(book['calibre_series'][0],book['calibre_series'][1])
# set PI version instead of default.
if 'version' in options:
story.setMetadata('version',options['version'])
book['title'] = story.getMetadata("title", removeallentities=True)
book['author_sort'] = book['author'] = story.getList("author", removeallentities=True)
book['publisher'] = story.getMetadata("publisher")
book['url'] = story.getMetadata("storyUrl", removeallentities=True)
book['tags'] = story.getSubjectTags(removeallentities=True)
book['comments'] = story.get_sanitized_description()
book['series'] = story.getMetadata("series", removeallentities=True)
if story.getMetadataRaw('datePublished'):
book['pubdate'] = story.getMetadataRaw('datePublished').replace(tzinfo=local_tz)
if story.getMetadataRaw('dateUpdated'):
book['updatedate'] = story.getMetadataRaw('dateUpdated').replace(tzinfo=local_tz)
if story.getMetadataRaw('dateCreated'):
book['timestamp'] = story.getMetadataRaw('dateCreated').replace(tzinfo=local_tz)
else:
book['timestamp'] = datetime.now().replace(tzinfo=local_tz) # need *something* there for calibre.
writer = writers.getWriter(options['fileform'],configuration,adapter)
outfile = book['outfile']
## checks were done earlier, it's new or not dup or newer--just write it.
if book['collision'] in (ADDNEW, SKIP, OVERWRITE, OVERWRITEALWAYS) or \
('epub_for_update' not in book and book['collision'] in (UPDATE, UPDATEALWAYS)):
# preserve logfile even on overwrite.
if 'epub_for_update' in book:
adapter.logfile = get_update_data(book['epub_for_update'])[6]
# change the existing entries id to notid so
# write_epub writes a whole new set to indicate overwrite.
if adapter.logfile:
adapter.logfile = adapter.logfile.replace("span id","span notid")
if book['collision'] == OVERWRITE and 'fileupdated' in book:
lastupdated=story.getMetadataRaw('dateUpdated')
fileupdated=book['fileupdated']
# updated doesn't have time (or is midnight), use dates only.
# updated does have time, use full timestamps.
if (lastupdated.time() == time.min and fileupdated.date() > lastupdated.date()) or \
(lastupdated.time() != time.min and fileupdated > lastupdated):
raise NotGoingToDownload(_("Not Overwriting, web site is not newer."),'edit-undo.png',showerror=False)
logger.info("write to %s"%outfile)
inject_cal_cols(book,story,configuration)
writer.writeStory(outfilename=outfile,
forceOverwrite=True,
notification=notification)
if adapter.story.chapter_error_count > 0:
book['comment'] = _('Download %(fileform)s completed, %(failed)s failed chapters, %(total)s total chapters.')%\
{'fileform':options['fileform'],
'failed':adapter.story.chapter_error_count,
'total':story.getMetadata("numChapters")}
book['chapter_error_count'] = adapter.story.chapter_error_count
else:
book['comment'] = _('Download %(fileform)s completed, %(total)s chapters.')%\
{'fileform':options['fileform'],
'total':story.getMetadata("numChapters")}
book['all_metadata'] = story.getAllMetadata(removeallentities=True)
if options['savemetacol'] != '':
book['savemetacol'] = story.dump_html_metadata()
## checks were done earlier, just update it.
elif 'epub_for_update' in book and book['collision'] in (UPDATE, UPDATEALWAYS):
# update now handled by pre-populating the old images and
# chapters in the adapter rather than merging epubs.
#urlchaptercount = int(story.getMetadata('numChapters').replace(',',''))
# returns int adjusted for start-end range.
urlchaptercount = story.getChapterCount()
(url,
chaptercount,
adapter.oldchapters,
adapter.oldimgs,
adapter.oldcover,
adapter.calibrebookmark,
adapter.logfile,
adapter.oldchaptersmap,
adapter.oldchaptersdata) = get_update_data(book['epub_for_update'])[0:9]
# dup handling from fff_plugin needed for anthology updates.
if book['collision'] == UPDATE:
if chaptercount == urlchaptercount:
if merge:
| |
import networkx as nx
import statistics
import matplotlib.pyplot as plt
from queue import PriorityQueue
import math
class Grafo(object):
def __init__(self, grafo_dict={}):
self.grafo_dict = grafo_dict
def vertices(self):
return list(self.grafo_dict.keys())
def arestas(self):
return self.gerar_arestas()
def adicionar_vertice(self, vertice):
if vertice not in self.grafo_dict:
self.grafo_dict[vertice] = []
def adicionar_aresta(self, bidirecional=False, *aresta):
(vertice1, vertice2, custo) = aresta
self.adicionar_vertice(vertice1)
self.adicionar_vertice(vertice2)
self.add_aresta_sem_repeticao(vertice1, vertice2, custo)
if bidirecional:
self.add_aresta_sem_repeticao(vertice2, vertice1, custo)
def add_aresta_sem_repeticao(self, vertice1, vertice2, custo):
lista_vertice1 = self.grafo_dict[vertice1]
for i, (vertice, _) in enumerate(lista_vertice1):
if vertice == vertice2:
lista_vertice1[i] = (vertice2, custo)
break
else:
lista_vertice1.append((vertice2, custo))
def custo_direto(self, vertice1, vertice2):
lista_vertice1 = self.grafo_dict[vertice1]
for (vertice, custo) in lista_vertice1:
if vertice == vertice2:
return custo
else:
return math.inf
def gerar_arestas(self):
aresta = []
for vertice in self.grafo_dict:
for (vizinho, custo) in self.grafo_dict[vertice]:
if (vizinho, vertice) not in aresta:
aresta.append(((vertice, vizinho, custo)))
return aresta
def __str__(self):
return 'Vertices: {0}\nArestas: {1}'.format(sorted(self.vertices()), sorted(self.arestas()))
def dijkstra(grafo, root):
queue = PriorityQueue() # lista de prioridades
caminho = {} # dicionario com o caminho e o custo total
for vertice in grafo.vertices():
if vertice == root:
caminho[vertice] = [[], 0] # custo 0 para o root
else:
caminho[vertice] = [[], math.inf] # custo infinito para os demais
queue.put((caminho[vertice][1], vertice)) # adiciona todas na lista de prioridade (maior prioridade = menor custo
vertices_remanescentes = list(grafo.vertices()) # lista de vertices nao visitados
for i in range(len(grafo.vertices())):
prioritario = queue.get()[1] # vertice prioritario da lista
vertices_remanescentes.remove(prioritario) # remove da lista de nao visitados
for vertice in vertices_remanescentes: # para cada vertice nao visitado
menor_prioritario = caminho[prioritario][1] # menor custo ate o vertice prioritario
custo = grafo.custo_direto(prioritario, vertice) # custo de prioritario ate vertice
menor_custo_prioritario = caminho[vertice][1] # menor custo ate o vertice
if menor_prioritario + custo < menor_custo_prioritario: # o caminho ate o vertice pelo prioritario e menos custoso que o melhor ate entao
caminho[vertice][1] = menor_prioritario + custo # atualiza o custo
caminho[vertice][0] = caminho[prioritario][0] + [prioritario] # atualiza o caminho
queue.queue.remove((menor_custo_prioritario, vertice)) # atualiza a prioridade do vertice na lista de prioridade
queue.put((caminho[vertice][1], vertice))
return caminho
def converter_caminho_str(caminho):
caminho_str = []
vertices = sorted(caminho.keys())
for vertice in vertices:
custo = caminho[vertice][1]
if custo == 0:
continue
string = ' --- '.join(caminho[vertice][0]) + ' --- ' + vertice
caminho_str.append(string.upper() + ' = custo: ' + str(custo))
return '\n'.join(caminho_str)
def prim(grafo, root):
vertice = [root] # lista dos vertices a partir do qual buscamos as arestas
vertice_selecionada = [] # lista com as arestas selecionadas
peso = 0 # peso do minimum spanning tree
vertices_remanescentes = list(grafo.vertices()) # lista com os vertices destunos da busca
vertices_remanescentes.remove(root) # o root é o ponto de partida, então sai da lista
for i in range(len(vertices_remanescentes)):
custo_minimo = math.inf # inicializa o custo minimo como infinito
vertice_a, vertice_b = None, None # vertices candidatos para a aresta selecionada
for vertice_1 in vertice: # para cada vertice na lista de busca de origem
for vertice_2 in vertices_remanescentes: # busca os vertices que anida nao estao no grafo final
custo = grafo.custo_direto(vertice_1, vertice_2) # calcula o custo da aresta
if custo < custo_minimo: # se for menor que o minimo ate entao, atualiza os dados
vertice_a = vertice_1
vertice_b = vertice_2
custo_minimo = custo
if custo_minimo < math.inf: # depois de todas as buscas, se o custo é finito:
vertice_selecionada.append((vertice_a, vertice_b, custo_minimo)) # adcionamos a aresta de vertice_a a vertice_b na solucao
vertice.append(vertice_b) # vertice_b agora sera nova origem de busca
vertices_remanescentes.remove(vertice_b) # vertice_b nao mais sera o destino de busca, pois ja consta na solucao
peso += custo_minimo # atualiza o peso
return vertice_selecionada, peso # retorna a lista de arestas selecionadas com o peso total
def imprimir_grafo(grafo, salvar_png=False):
nx_grafo = nx.Graph()
custos = []
for (vertice_a, vertice_b, custo) in grafo.arestas():
nx_grafo.add_edge(vertice_a, vertice_b, custo=custo)
custos.append(custo)
pos = nx.spring_layout(nx_grafo) # posicao para todos os nos
custo_medio = statistics.mean(custos)
elarge = [(u, v) for (u, v, d) in nx_grafo.edges(data=True) if d['custo'] > custo_medio]
esmall = [(u, v) for (u, v, d) in nx_grafo.edges(data=True) if d['custo'] <= custo_medio]
# nos
nx.draw_networkx_nodes(nx_grafo, pos, node_size=700)
# arestas
nx.draw_networkx_edges(nx_grafo, pos, edgelist=elarge, width=4)
nx.draw_networkx_edges(nx_grafo, pos, edgelist=esmall, width=4, alpha=0.5, edge_color='b', style='dashed')
# labels
nx.draw_networkx_labels(nx_grafo, pos, font_size=20, font_family='sans-serif')
# nx.draw_networkx_edge_labels(nx_grafo, pos)
plt.axis('off')
if salvar_png:
plt.savefig("grafo.png") # salva como png
plt.show() # plota o grafo
def carregar_grafo_letra():
grafo = [
('a', 'b', 0.640826),('a', 'c', 0.949966),('a', 'd', 0.1369275),('a', 'e', 0.1863630),('a', 'f', 0.1646214),('a', 'g', 0.1837410),('a', 'h', 0.1584884),('a', 'i', 0.2359051),('a', 'j', 0.2382511),('a', 'k', 0.2055704),('a', 'l', 0.2939706),
('b', 'a', 0.640654),('b', 'c', 0.755502),('b', 'd', 0.816306),('b', 'e', 0.1310661),('b', 'f', 0.1093244),('b', 'g', 0.1284441),('b', 'h', 0.1308287),('b', 'i', 0.1806081),('b', 'j', 0.1829541),('b', 'k', 0.1502734),('b', 'l', 0.2386736),
('c', 'a', 0.944047),('c', 'b', 0.753159),('c', 'd', 0.1064706),('c', 'e', 0.1675028),('c', 'f', 0.1617897),('c', 'g', 0.1809094),('c', 'h', 0.858237),('c', 'i', 0.1861038),('c', 'j', 0.1884498),('c', 'k', 0.1557692),('c', 'l', 0.2441693),
('d', 'a', 0.1448976),('d', 'b', 0.830328),('d', 'c', 0.1067247),('d', 'e', 0.647491),('d', 'f', 0.696594),('d', 'g', 0.848246),('d', 'h', 0.1176037),('d', 'i', 0.1045810),('d', 'j', 0.1069270),('d', 'k', 0.742463),('d', 'l', 0.1626465),
('e', 'a', 0.1929128),('e', 'b', 0.1310480),('e', 'c', 0.1676631),('e', 'd', 0.647054),('e', 'f', 0.601648),('e', 'g', 0.526142),('e', 'h', 0.1758513),('e', 'i', 0.1051998),('e', 'j', 0.1075458),('e', 'k', 0.748652),('e', 'l', 0.1424885),
('f', 'a', 0.1711914),('f', 'b', 0.1093266),('f', 'c', 0.1619624),('f', 'd', 0.698887),('f', 'e', 0.609938),('f', 'g', 0.275292),('f', 'h', 0.1815937),('f', 'i', 0.1429575),('f', 'j', 0.1453035),('f', 'k', 0.1126228),('f', 'l', 0.1930347),
('g', 'a', 0.1906972),('g', 'b', 0.1288323),('g', 'c', 0.1814682),('g', 'd', 0.842088),('g', 'e', 0.518337),('g', 'f', 0.277465),('g', 'h', 0.2010994),('g', 'i', 0.1440625),('g', 'j', 0.1464085),('g', 'k', 0.1137278),('g', 'l', 0.1941397),
('h', 'a', 0.1581596),('h', 'b', 0.1309179),('h', 'c', 0.856920),('h', 'd', 0.1191969),('h', 'e', 0.1762419),('h', 'f', 0.1817398),('h', 'g', 0.2008595),('h', 'i', 0.1656214),('h', 'j', 0.1679674),('h', 'k', 0.1357317),('h', 'l', 0.2234308),
('i', 'a', 0.2426321),('i', 'b', 0.1807672),('i', 'c', 0.1863441),('i', 'd', 0.1047016),('i', 'e', 0.1053173),('i', 'f', 0.1422245),('i', 'g', 0.1441572),('i', 'h', 0.1655558),('i', 'j', 0.41700),('i', 'k', 0.338377),('i', 'l', 0.664622),
('j', 'a', 0.2449781),('j', 'b', 0.1831132),('j', 'c', 0.1886901),('j', 'd', 0.1070476),('j', 'e', 0.1076633),('j', 'f', 0.1445705),('j', 'g', 0.1465032),('j', 'h', 0.1679018),('j', 'i', 0.41700),('j', 'k', 0.361837),('j', 'l', 0.688082),
('k', 'a', 0.2122615),('k', 'b', 0.1503967),('k', 'c', 0.1559735),('k', 'd', 0.743311),('k', 'e', 0.749468),('k', 'f', 0.1118539),('k', 'g', 0.1137867),('k', 'h', 0.1362428),('k', 'i', 0.341998),('k', 'j', 0.365458),('k', 'l', 0.922653),
('l', 'a', 0.3007055),('l', 'b', 0.2388407),('l', 'c', 0.2444175),('l', 'd', 0.1627751),('l', 'e', 0.1426507),('l', 'f', 0.1922036),('l', 'g', 0.1941363),('l', 'h', 0.2233759),('l', 'i', 0.664609),('l', 'j', 0.688069),('l', 'k', 0.919112)
]
menu_escolha = "a – Cachoeira do Caracol\nb – Cachoeira do Salto Grande\nc – Cataratas do Iguacu\n" \
"d – Cachoeira do Itambe\ne – Cachoeira do Tabuleiro\nf – Cachoeira Conde Deu\n" \
"g – Cachoeira da Fumaca\nh – Cachoeira Boca da Onca\ni – Cachoeira Veu da Noiva\n" \
"j – Cachoeira Santa Barbara\nk – Cachoeira do Tororo\nl – Cachoeira do Formiga"
return grafo, menu_escolha
def carregar_grafo_nome():
grafo = [
('Cachoeira do Caracol', 'Cachoeira do Salto Grande', 0.640826),('Cachoeira do Caracol', 'Cataratas do Iguacu', 0.949966),('Cachoeira do Caracol', 'Cachoeira do Itambe', 0.1369275),('Cachoeira do Caracol', 'Cachoeira do Tabuleiro', 0.1863630),('Cachoeira do Caracol', 'Cachoeira Conde Deu', 0.1646214),('Cachoeira do Caracol', 'Cachoeira da Fumaca', 0.1837410),('Cachoeira do Caracol', 'Cachoeira Boca da Onca', 0.1584884),('Cachoeira do Caracol', 'Cachoeira Veu da Noiva', 0.2359051),('Cachoeira do Caracol', 'Cachoeira Santa Barbara', 0.2382511),('Cachoeira do Caracol', 'Cachoeira do Tororo', 0.2055704),('Cachoeira do Caracol', 'Cachoeira do Formiga', 0.2939706),
('Cachoeira do Salto Grande', 'Cachoeira do Caracol', 0.640654),('Cachoeira do Salto Grande', 'Cataratas do Iguacu', 0.755502),('Cachoeira do Salto Grande', 'Cachoeira do Itambe', 0.816306),('Cachoeira do Salto Grande', 'Cachoeira do Tabuleiro', 0.1310661),('Cachoeira do Salto Grande', 'Cachoeira Conde Deu', 0.1093244),('Cachoeira do Salto Grande', 'Cachoeira da Fumaca', 0.1284441),('Cachoeira do Salto Grande', 'Cachoeira Boca da Onca', 0.1308287),('Cachoeira do Salto Grande', 'Cachoeira Veu da Noiva', 0.1806081),('Cachoeira do Salto Grande', 'Cachoeira Santa Barbara', 0.1829541),('Cachoeira do Salto Grande', 'Cachoeira do Tororo', 0.1502734),('Cachoeira do Salto Grande', 'Cachoeira do Formiga', 0.2386736),
('Cataratas do Iguacu', 'Cachoeira do Caracol', 0.944047),('Cataratas do Iguacu', 'Cachoeira do Salto Grande', 0.753159),('Cataratas do Iguacu', 'Cachoeira do Itambe', 0.1064706),('Cataratas do Iguacu', 'Cachoeira do Tabuleiro', 0.1675028),('Cataratas do Iguacu', 'Cachoeira Conde Deu', 0.1617897),('Cataratas do Iguacu', 'Cachoeira da Fumaca', 0.1809094),('Cataratas do Iguacu', 'Cachoeira Boca da Onca', 0.858237),('Cataratas do Iguacu', 'Cachoeira Veu da Noiva', 0.1861038),('Cataratas do Iguacu', 'Cachoeira Santa Barbara', 0.1884498),('Cataratas do Iguacu', 'Cachoeira do Tororo', 0.1557692),('Cataratas do Iguacu', 'Cachoeira do Formiga', 0.2441693),
('Cachoeira do Itambe', 'Cachoeira do Caracol', 0.1448976),('Cachoeira do Itambe', 'Cachoeira do Salto Grande', 0.830328),('Cachoeira do Itambe', 'Cataratas do Iguacu', 0.1067247),('Cachoeira do Itambe', 'Cachoeira do Tabuleiro', 0.647491),('Cachoeira do Itambe', 'Cachoeira | |
= ( "ETR '{}' decapsulation <a href='/lisp/show/" + "database'>stats</a>" ) . format ( stats_name )
if 79 - 79: OOooOOo / I1Ii111 . OoOoOO00 - I1ii11iIi11i
O0ooo0O0oo0 += "%" + stats_name
elif ( rtr_name == "stats" ) :
iIii11iI1II = "lisp-rtr"
I1II1I1I = ( "RTR '{}' decapsulation <a href='/lisp/show/" + "rtr/map-cache'>stats</a>" ) . format ( stats_name )
if 47 - 47: OoooooooOO % O0 * iII111i . Ii1I
O0ooo0O0oo0 += "%" + stats_name
else :
iIiIIIi = lisp . lisp_print_sans ( "Invalid command" )
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 38 - 38: O0 - IiII % I1Ii111
if 64 - 64: iIii1I11I1II1
if 15 - 15: I1ii11iIi11i + OOooOOo / I1ii11iIi11i / I1Ii111
if 31 - 31: ooOoO0o + O0 + ooOoO0o . iIii1I11I1II1 + Oo0Ooo / o0oOOo0O0Ooo
if 6 - 6: Oo0Ooo % IiII * I11i / I1IiiI + Oo0Ooo
O0ooo0O0oo0 = lisp . lisp_command_ipc ( O0ooo0O0oo0 , "lisp-core" )
lisp . lisp_ipc ( O0ooo0O0oo0 , Ooo , iIii11iI1II )
if 39 - 39: OoOoOO00 - Oo0Ooo / iII111i * OoooooooOO
if 100 - 100: O0 . I11i . OoO0O00 + O0 * oO0o
if 42 - 42: oO0o % OoooooooOO + o0oOOo0O0Ooo
if 56 - 56: OoooooooOO + I1ii11iIi11i - iII111i
III1I1 = getoutput ( "egrep 'lisp map-cache' ./lisp.config" )
if ( III1I1 != "" ) :
os . system ( "touch ./lisp.config" )
if 12 - 12: iIii1I11I1II1 % ooOoO0o % ooOoO0o
if 78 - 78: IiII . OoOoOO00 . I11i
iIiIIIi = lisp . lisp_print_sans ( "{} cleared" . format ( I1II1I1I ) )
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 97 - 97: oO0o
if 80 - 80: I1IiiI . Ii1I
if 47 - 47: I11i + ooOoO0o + II111iiii % i11iIiiIii
if 93 - 93: I1ii11iIi11i % OoOoOO00 . O0 / iII111i * oO0o
if 29 - 29: o0oOOo0O0Ooo
if 86 - 86: II111iiii . IiII
if 2 - 2: OoooooooOO
@ bottle . route ( '/lisp/show/map-server' )
def o0o0O00 ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 35 - 35: iIii1I11I1II1
if 94 - 94: OoOoOO00
return ( lispconfig . lisp_process_show_command ( Ooo ,
"show map-server" ) )
if 100 - 100: OoO0O00 / i1IIi - I1IiiI % Ii1I - iIii1I11I1II1
if 17 - 17: I11i / o0oOOo0O0Ooo % Oo0Ooo
if 71 - 71: IiII . I1Ii111 . OoO0O00
if 68 - 68: i11iIiiIii % oO0o * OoO0O00 * IiII * II111iiii + O0
if 66 - 66: I11i % I1ii11iIi11i % OoooooooOO
if 34 - 34: o0oOOo0O0Ooo / iII111i % O0 . OoO0O00 . i1IIi
if 29 - 29: O0 . I1Ii111
@ bottle . route ( '/lisp/show/database' )
def OO0o0oO0O000o ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 47 - 47: I1Ii111 - OoO0O00 / Ii1I * OoooooooOO / Ii1I . Oo0Ooo
return ( lispconfig . lisp_process_show_command ( Ooo ,
"show database-mapping" ) )
if 34 - 34: ooOoO0o
if 27 - 27: I1Ii111 + OoooooooOO - OoOoOO00
if 15 - 15: oO0o / I11i * O0 . II111iiii - OoO0O00
if 90 - 90: oO0o
if 94 - 94: I11i / I1ii11iIi11i * I1Ii111 - OoOoOO00
if 44 - 44: Ii1I % i11iIiiIii - iII111i * I1ii11iIi11i + Oo0Ooo * OOooOOo
if 41 - 41: O0 * ooOoO0o - OoOoOO00 . Ii1I
@ bottle . route ( '/lisp/show/itr/map-cache' )
def oO ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 20 - 20: oO0o * O0 + I11i - OoooooooOO . I11i
return ( lispconfig . lisp_process_show_command ( Ooo ,
"show itr-map-cache" ) )
if 60 - 60: o0oOOo0O0Ooo . o0oOOo0O0Ooo / iII111i
if 45 - 45: O0 . i11iIiiIii % iII111i . OoOoOO00 % IiII % iIii1I11I1II1
if 58 - 58: iIii1I11I1II1 . OoOoOO00 - i11iIiiIii * iIii1I11I1II1 % i11iIiiIii / I1IiiI
if 80 - 80: I1ii11iIi11i / iIii1I11I1II1 % OoOoOO00
if 80 - 80: OoO0O00 % iII111i
if 99 - 99: ooOoO0o / iIii1I11I1II1 - Ii1I * I1ii11iIi11i % I1IiiI
if 13 - 13: OoO0O00
@ bottle . route ( '/lisp/show/itr/rloc-probing' )
def O0oo0O0 ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 2 - 2: OoooooooOO . OOooOOo . IiII
return ( lispconfig . lisp_process_show_command ( Ooo ,
"show itr-rloc-probing" ) )
if 42 - 42: OOooOOo % oO0o / OoO0O00 - oO0o * i11iIiiIii
if 19 - 19: oO0o * I1IiiI % i11iIiiIii
if 24 - 24: o0oOOo0O0Ooo
if 10 - 10: o0oOOo0O0Ooo % Ii1I / OOooOOo
if 28 - 28: OOooOOo % ooOoO0o
if 48 - 48: i11iIiiIii % oO0o
if 29 - 29: iII111i + i11iIiiIii % I11i
@ bottle . post ( '/lisp/show/itr/map-cache/lookup' )
def oOo00Ooo0o0 ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 33 - 33: I11i
if 87 - 87: OoOoOO00 / IiII + iIii1I11I1II1
oo0O0o = bottle . request . forms . get ( "eid" )
if ( lispconfig . lisp_validate_input_address_string ( oo0O0o ) == False ) :
iIiIIIi = "Address '{}' has invalid format" . format ( oo0O0o )
iIiIIIi = lisp . lisp_print_sans ( iIiIIIi )
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 13 - 13: iIii1I11I1II1 . OoOoOO00 * I1IiiI / oO0o * Ii1I
if 64 - 64: ooOoO0o / O0 * OoOoOO00 * ooOoO0o
Oo0O0 = "show itr-map-cache" + "%" + oo0O0o
return ( lispconfig . lisp_process_show_command ( Ooo ,
Oo0O0 ) )
if 60 - 60: I11i / i1IIi % I1ii11iIi11i / I1ii11iIi11i * I1ii11iIi11i . i11iIiiIii
if 99 - 99: OoOoOO00
if 77 - 77: o0oOOo0O0Ooo
if 48 - 48: OoOoOO00 % I1ii11iIi11i / I11i . iIii1I11I1II1 * II111iiii
if 65 - 65: OoOoOO00
if 31 - 31: I11i * OoOoOO00 . IiII % Ii1I + Oo0Ooo
if 47 - 47: O0 * I1IiiI * OoO0O00 . II111iiii
@ bottle . route ( '/lisp/show/rtr/map-cache' )
@ bottle . route ( '/lisp/show/rtr/map-cache/<dns>' )
def O0o00o000oO ( dns = "" ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 62 - 62: I1ii11iIi11i / I11i . i1IIi
if 99 - 99: OoOoOO00 . I1Ii111
if ( dns == "dns" ) :
return ( lispconfig . lisp_process_show_command ( Ooo ,
"show rtr-map-cache-dns" ) )
else :
return ( lispconfig . lisp_process_show_command ( Ooo ,
"show rtr-map-cache" ) )
if 59 - 59: I11i / Oo0Ooo / OOooOOo / O0 / OoOoOO00 + o0oOOo0O0Ooo
if 13 - 13: o0oOOo0O0Ooo % oO0o / I1Ii111 % I1Ii111 % O0
if 90 - 90: IiII . ooOoO0o / iIii1I11I1II1
if 28 - 28: IiII + oO0o - ooOoO0o / iIii1I11I1II1 - I1IiiI
if 45 - 45: O0 / i1IIi * oO0o * OoO0O00
if 35 - 35: I1ii11iIi11i / iII111i % I1IiiI + iIii1I11I1II1
if 79 - 79: OoOoOO00 / ooOoO0o
if 77 - 77: Oo0Ooo
@ bottle . route ( '/lisp/show/rtr/rloc-probing' )
def i1i111Iiiiiii ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 19 - 19: I1IiiI . Oo0Ooo + OoooooooOO - I1IiiI
return ( lispconfig . lisp_process_show_command ( Ooo ,
"show rtr-rloc-probing" ) )
if 93 - 93: iIii1I11I1II1 + I1IiiI + i11iIiiIii
if 74 - 74: I11i / II111iiii + ooOoO0o * iIii1I11I1II1 - I1Ii111 - OoO0O00
if 69 - 69: iIii1I11I1II1 * I1IiiI - iII111i + O0 + O0
if 65 - 65: I1Ii111 / i11iIiiIii / OoO0O00 - OOooOOo
if 9 - 9: I1IiiI | |
sweep_iname, sweep_min_value, scan_min_value, stride,
tracking_iname):
domain = temp_kernel.get_inames_domain(frozenset((scan_iname, sweep_iname)))
inames_added_for_scan.add(tracking_iname)
new_domain = _create_domain_for_sweep_tracking(domain,
tracking_iname, sweep_iname, sweep_min_value, scan_min_value, stride)
_insert_subdomain_into_domain_tree(temp_kernel, domains, new_domain)
return tracking_iname
def replace_var_within_expr(expr, from_var, to_var):
from pymbolic.mapper.substitutor import make_subst_func
from loopy.symbolic import (
SubstitutionRuleMappingContext, RuleAwareSubstitutionMapper)
rule_mapping_context = SubstitutionRuleMappingContext(
temp_kernel.substitutions, var_name_gen)
from pymbolic import var
mapper = RuleAwareSubstitutionMapper(
rule_mapping_context,
make_subst_func({from_var: var(to_var)}),
within=lambda *args: True)
return mapper(expr, temp_kernel, None)
def make_temporaries(name_based_on, nvars, shape, dtypes, address_space):
var_names = [
var_name_gen(name_based_on.format(index=i))
for i in range(nvars)]
from loopy.kernel.data import TemporaryVariable
for name, dtype in zip(var_names, dtypes):
new_temporary_variables[name] = TemporaryVariable(
name=name,
shape=shape,
dtype=dtype,
address_space=address_space)
return var_names
# }}}
# {{{ sequential scan
def map_scan_seq(expr, rec, callables_table, nresults, arg_dtypes,
reduction_dtypes, sweep_iname, scan_iname, sweep_min_value,
scan_min_value, stride, guarding_predicates):
outer_insn_inames = insn.within_inames
inames_to_remove.add(scan_iname)
track_iname = var_name_gen(
"{sweep_iname}__seq_scan"
.format(sweep_iname=sweep_iname))
get_or_add_sweep_tracking_iname_and_domain(
scan_iname, sweep_iname, sweep_min_value, scan_min_value,
stride, track_iname)
from loopy.kernel.data import AddressSpace
acc_var_names = make_temporaries(
name_based_on="acc_" + scan_iname,
nvars=nresults,
shape=(),
dtypes=reduction_dtypes,
address_space=AddressSpace.PRIVATE)
from pymbolic import var
acc_vars = tuple(var(n) for n in acc_var_names)
init_id = insn_id_gen(
"{}_{}_init".format(insn.id, "_".join(expr.inames)))
init_insn_depends_on = frozenset()
# FIXME: Explain why we care about global barriers here
if kernel_has_global_barriers(kernel):
global_barrier = lp.find_most_recent_global_barrier(temp_kernel, insn.id)
if global_barrier is not None:
init_insn_depends_on |= frozenset([global_barrier])
expression, callables_table = expr.operation.neutral_element(
*arg_dtypes, callables_table=callables_table, target=kernel.target)
init_insn = make_assignment(
id=init_id,
assignees=acc_vars,
within_inames=outer_insn_inames - frozenset(
(sweep_iname,) + expr.inames),
within_inames_is_final=insn.within_inames_is_final,
depends_on=init_insn_depends_on,
expression=expression,
# Do not inherit predicates: Those might read variables
# that may not yet be set, and we don't have a great way
# of figuring out what the dependencies of the accumulator
# initializer should be.
# This way, we may initialize a few too many accumulators,
# but that's better than being incorrect.
# https://github.com/inducer/loopy/issues/231
)
generated_insns.append(init_insn)
update_insn_depends_on = {init_insn.id} | insn.depends_on
updated_inner_exprs = (
preprocess_scan_arguments(insn, expr.expr, nresults,
scan_iname, track_iname, update_insn_depends_on))
update_id = insn_id_gen(
based_on="{}_{}_update".format(insn.id, "_".join(expr.inames)))
update_insn_iname_deps = insn.within_inames | {track_iname}
if insn.within_inames_is_final:
update_insn_iname_deps = insn.within_inames | {track_iname}
expression, callables_table = expr.operation(
arg_dtypes,
_strip_if_scalar(acc_vars, acc_vars),
_strip_if_scalar(acc_vars, updated_inner_exprs),
callables_table,
kernel.target)
scan_insn = make_assignment(
id=update_id,
assignees=acc_vars,
expression=expression,
depends_on=frozenset(update_insn_depends_on),
within_inames=update_insn_iname_deps,
no_sync_with=insn.no_sync_with,
within_inames_is_final=insn.within_inames_is_final,
predicates=guarding_predicates,
)
generated_insns.append(scan_insn)
new_insn_add_depends_on.add(scan_insn.id)
if nresults == 1:
assert len(acc_vars) == 1
return acc_vars[0], callables_table
else:
return acc_vars, callables_table
# }}}
# {{{ local-parallel scan
def map_scan_local(expr, rec, callables_table, nresults, arg_dtypes,
reduction_dtypes, sweep_iname, scan_iname, sweep_min_value,
scan_min_value, stride, guarding_predicates):
scan_size = _get_int_iname_size(sweep_iname)
assert scan_size > 0
if scan_size == 1:
return map_reduction_seq(expr, rec, callables_table,
nresults, arg_dtypes, reduction_dtypes,
guarding_predicates)
outer_insn_inames = insn.within_inames
from loopy.kernel.data import LocalInameTagBase
outer_local_inames = tuple(oiname for oiname in outer_insn_inames
if kernel.iname_tags_of_type(oiname, LocalInameTagBase)
and oiname != sweep_iname)
from pymbolic import var
outer_local_iname_vars = tuple(
var(oiname) for oiname in outer_local_inames)
outer_local_iname_sizes = tuple(
_get_int_iname_size(oiname)
for oiname in outer_local_inames)
track_iname = var_name_gen(
"{sweep_iname}__pre_scan"
.format(sweep_iname=sweep_iname))
get_or_add_sweep_tracking_iname_and_domain(
scan_iname, sweep_iname, sweep_min_value, scan_min_value, stride,
track_iname)
# {{{ add separate iname to carry out the scan
# Doing this sheds any odd conditionals that may be active
# on our scan_iname.
base_exec_iname = var_name_gen(sweep_iname + "__scan")
domains.append(_make_slab_set(base_exec_iname, scan_size))
new_iname_tags[base_exec_iname] = kernel.iname_tags(sweep_iname)
# }}}
from loopy.kernel.data import AddressSpace
read_var_names = make_temporaries(
name_based_on="read_"+scan_iname+"_arg_{index}",
nvars=nresults,
shape=(),
dtypes=reduction_dtypes,
address_space=AddressSpace.PRIVATE)
acc_var_names = make_temporaries(
name_based_on="acc_"+scan_iname,
nvars=nresults,
shape=outer_local_iname_sizes + (scan_size,),
dtypes=reduction_dtypes,
address_space=AddressSpace.LOCAL)
acc_vars = tuple(var(n) for n in acc_var_names)
read_vars = tuple(var(n) for n in read_var_names)
base_iname_deps = (outer_insn_inames
- frozenset(expr.inames) - frozenset([sweep_iname]))
neutral, callables_table = expr.operation.neutral_element(
*arg_dtypes, callables_table=callables_table, target=kernel.target)
init_insn_depends_on = insn.depends_on
# FIXME: Explain why we care about global barriers here
if kernel_has_global_barriers(kernel):
global_barrier = lp.find_most_recent_global_barrier(temp_kernel, insn.id)
if global_barrier is not None:
init_insn_depends_on |= frozenset([global_barrier])
init_id = insn_id_gen(f"{insn.id}_{scan_iname}_init")
init_insn = make_assignment(
id=init_id,
assignees=tuple(
acc_var[outer_local_iname_vars + (var(base_exec_iname),)]
for acc_var in acc_vars),
expression=neutral,
within_inames=base_iname_deps | frozenset([base_exec_iname]),
within_inames_is_final=insn.within_inames_is_final,
depends_on=init_insn_depends_on,
# Do not inherit predicates: Those might read variables
# that may not yet be set, and we don't have a great way
# of figuring out what the dependencies of the accumulator
# initializer should be.
# This way, we may initialize a few too many accumulators,
# but that's better than being incorrect.
# https://github.com/inducer/loopy/issues/231
)
generated_insns.append(init_insn)
transfer_insn_depends_on = {init_insn.id} | insn.depends_on
updated_inner_exprs = (
preprocess_scan_arguments(insn, expr.expr, nresults,
scan_iname, track_iname, transfer_insn_depends_on))
from loopy.symbolic import Reduction
from loopy.symbolic import pw_aff_to_expr
sweep_min_value_expr = pw_aff_to_expr(sweep_min_value)
transfer_id = insn_id_gen(f"{insn.id}_{scan_iname}_transfer")
transfer_insn = make_assignment(
id=transfer_id,
assignees=tuple(
acc_var[outer_local_iname_vars
+ (var(sweep_iname) - sweep_min_value_expr,)]
for acc_var in acc_vars),
expression=Reduction(
operation=expr.operation,
inames=(track_iname,),
expr=_strip_if_scalar(acc_vars, updated_inner_exprs),
allow_simultaneous=False,
),
within_inames=outer_insn_inames - frozenset(expr.inames),
within_inames_is_final=insn.within_inames_is_final,
depends_on=frozenset(transfer_insn_depends_on),
no_sync_with=frozenset([(init_id, "any")]) | insn.no_sync_with,
predicates=insn.predicates,
)
generated_insns.append(transfer_insn)
prev_id = transfer_id
istage = 0
cur_size = 1
while cur_size < scan_size:
stage_exec_iname = var_name_gen("%s__scan_s%d" % (sweep_iname, istage))
domains.append(
_make_slab_set_from_range(stage_exec_iname, cur_size, scan_size))
new_iname_tags[stage_exec_iname] = kernel.iname_tags(sweep_iname)
for read_var, acc_var in zip(read_vars, acc_vars):
read_stage_id = insn_id_gen(
"scan_%s_read_stage_%d" % (scan_iname, istage))
read_stage_insn = make_assignment(
id=read_stage_id,
assignees=(read_var,),
expression=(
acc_var[
outer_local_iname_vars
+ (var(stage_exec_iname) - cur_size,)]),
within_inames=(
base_iname_deps | frozenset([stage_exec_iname])),
within_inames_is_final=insn.within_inames_is_final,
depends_on=frozenset([prev_id]),
predicates=insn.predicates,
)
if cur_size == 1:
# Performance hack: don't add a barrier here with transfer_insn.
# NOTE: This won't work if the way that local inames
# are lowered changes.
read_stage_insn = read_stage_insn.copy(
no_sync_with=(
read_stage_insn.no_sync_with
| frozenset([(transfer_id, "any")])))
generated_insns.append(read_stage_insn)
prev_id = read_stage_id
write_stage_id = insn_id_gen(
"scan_%s_write_stage_%d" % (scan_iname, istage))
expression, callables_table = expr.operation(
arg_dtypes,
_strip_if_scalar(acc_vars, read_vars),
_strip_if_scalar(acc_vars, tuple(
acc_var[
outer_local_iname_vars + (var(stage_exec_iname),)]
for acc_var in acc_vars)),
callables_table,
kernel.target)
write_stage_insn = make_assignment(
id=write_stage_id,
assignees=tuple(
acc_var[outer_local_iname_vars + (var(stage_exec_iname),)]
for acc_var in acc_vars),
expression=expression,
within_inames=(
base_iname_deps | frozenset([stage_exec_iname])),
within_inames_is_final=insn.within_inames_is_final,
depends_on=frozenset([prev_id]),
predicates=insn.predicates,
)
generated_insns.append(write_stage_insn)
prev_id = write_stage_id
cur_size *= 2
istage += 1
new_insn_add_depends_on.add(prev_id)
new_insn_add_within_inames.add(sweep_iname)
output_idx = var(sweep_iname) - sweep_min_value_expr
if nresults == 1:
assert len(acc_vars) == 1
return (acc_vars[0][outer_local_iname_vars + (output_idx,)],
callables_table)
else:
return [acc_var[outer_local_iname_vars + (output_idx,)]
for acc_var in acc_vars], callables_table
# }}}
# {{{ seq/par dispatch
def map_reduction(expr, rec, callables_table,
guarding_predicates, nresults=1):
# Only expand one level of reduction at a time, going from outermost to
# innermost. Otherwise we get the (iname + insn) dependencies wrong.
from loopy.type_inference import (
infer_arg_and_reduction_dtypes_for_reduction_expression)
arg_dtypes, reduction_dtypes = (
infer_arg_and_reduction_dtypes_for_reduction_expression(
temp_kernel, expr, callables_table, unknown_types_ok))
outer_insn_inames = insn.within_inames
bad_inames = frozenset(expr.inames) & outer_insn_inames
if bad_inames:
raise LoopyError("reduction used within loop(s) that it was "
"supposed to reduce over: " + ", ".join(bad_inames))
iname_classes = _classify_reduction_inames(temp_kernel, expr.inames)
n_sequential = len(iname_classes.sequential)
n_local_par = len(iname_classes.local_parallel)
n_nonlocal_par = len(iname_classes.nonlocal_parallel)
really_force_scan = force_scan and (
len(expr.inames) != 1 or expr.inames[0] not in inames_added_for_scan)
def _error_if_force_scan_on(cls, msg):
if really_force_scan:
raise cls(msg)
may_be_implemented_as_scan = False
if force_scan or automagic_scans_ok:
from loopy.diagnostic import ReductionIsNotTriangularError
try:
# Try to determine scan candidate information (sweep iname, scan
# iname, etc).
scan_param = _try_infer_scan_candidate_from_expr(
temp_kernel, expr, outer_insn_inames,
sweep_iname=force_outer_iname_for_scan)
except ValueError as v:
error = str(v)
else:
# Ensures the reduction is triangular (somewhat expensive).
may_be_implemented_as_scan, error = (
_check_reduction_is_triangular(
temp_kernel, expr, scan_param))
if not may_be_implemented_as_scan:
_error_if_force_scan_on(ReductionIsNotTriangularError, error)
# {{{ sanity checks
if n_local_par and n_sequential:
raise LoopyError("Reduction over '%s' contains both parallel and "
"sequential inames. It must be split "
"(using split_reduction_{in,out}ward) "
"before code generation."
% ", ".join(expr.inames))
if n_local_par > 1:
raise LoopyError("Reduction over '%s' contains more than"
"one parallel iname. It must be split "
"(using split_reduction_{in,out}ward) "
"before code generation."
% ", ".join(expr.inames))
if n_nonlocal_par:
bad_inames = iname_classes.nonlocal_parallel
raise LoopyError("the only form of parallelism supported "
"by reductions is 'local'--found iname(s) '%s' "
"respectively tagged '%s'"
% (", ".join(bad_inames),
", ".join(str(kernel.iname_tags(iname))
for iname in bad_inames)))
if n_local_par == 0 and n_sequential == 0:
from loopy.diagnostic import warn_with_kernel
warn_with_kernel(kernel, "empty_reduction",
"Empty reduction found (no inames to reduce over). "
"Eliminating.")
# We're not supposed to reduce/sum at all. (Note how this is distinct
# from an empty reduction--there is an element here, just no inames
# to reduce over. It's rather similar to an array with () shape in
# numpy.)
return expr.expr, callables_table
# }}}
if may_be_implemented_as_scan:
assert force_scan or automagic_scans_ok
# We require the "scan" iname to be tagged sequential.
if n_sequential:
sweep_iname = scan_param.sweep_iname
sweep_class = _classify_reduction_inames(kernel, (sweep_iname,))
sequential = sweep_iname in sweep_class.sequential
parallel = sweep_iname in sweep_class.local_parallel
bad_parallel = sweep_iname in sweep_class.nonlocal_parallel
if sweep_iname not in outer_insn_inames:
_error_if_force_scan_on(LoopyError,
"Sweep iname '%s' was detected, but is not an iname "
"for the instruction." % sweep_iname)
elif bad_parallel:
_error_if_force_scan_on(LoopyError,
"Sweep iname '%s' has an unsupported parallel tag '%s' "
"- the | |
<reponame>riclima/django-danceschool<gh_stars>1-10
from django.conf import settings
from django.db.models import Sum, Count, Q
from django.db.models.functions import TruncDate, TruncMonth
from django.http import HttpResponse
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
import unicodecsv as csv
from calendar import month_name
import pytz
from danceschool.core.constants import getConstant
from danceschool.core.models import (
Registration, Event, EventOccurrence, EventStaffMember, InvoiceItem, Room, StaffMember
)
from danceschool.core.utils.timezone import ensure_timezone
from .constants import EXPENSE_BASES
from .models import ExpenseItem, RevenueItem, RepeatedExpenseRule, RoomRentalInfo, TransactionParty
def getExpenseItemsCSV(queryset, scope='instructor'):
response = HttpResponse(content_type='text/csv')
if scope == 'instructor':
response['Content-Disposition'] = 'attachment; filename="paymentHistory.csv"'
else:
response['Content-Disposition'] = 'attachment; filename="expenseHistory.csv"'
writer = csv.writer(response, csv.excel)
# BOM (optional...Excel needs it to open UTF-8 file properly)
response.write(u'\ufeff'.encode('utf8'))
header_list = [
_('Description'),
_('Expense Category'),
_('Hours'),
_('Wage Rate'),
_('Total Payment'),
_('Is Reimbursement'),
_('Submission Date'),
_('Event'),
_('Approved'),
_('Paid'),
_('Payment Date'),
]
if scope != 'instructor':
header_list += [_('Pay To')]
writer.writerow(header_list)
for x in queryset:
this_row_data = [
x.description,
x.category.name,
x.hours,
x.wageRate,
x.total,
x.reimbursement,
x.submissionDate,
x.event,
x.approved,
x.paid,
x.paymentDate,
]
if scope != 'instructor':
this_row_data.append(x.payTo)
writer.writerow(this_row_data)
return response
def getRevenueItemsCSV(queryset):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="revenueHistory.csv"'
writer = csv.writer(response, csv.excel)
# BOM (optional...Excel needs it to open UTF-8 file properly)
response.write(u'\ufeff'.encode('utf8'))
header_list = [
_('Description'),
_('Revenue Category'),
_('Gross Total (Pre-Discounts & Vouchers)'),
_('Net Total'),
_('Received From'),
_('Registration'),
_('Event'),
_('Received'),
_('Received Date')
]
writer.writerow(header_list)
for x in queryset:
this_row_data = [
x.description,
x.category.name,
x.grossTotal,
x.total
]
if x.registration:
this_row_data.append(x.registration.fullName)
else:
this_row_data.append(x.receivedFrom.name)
this_row_data += [
x.registration,
x.event,
x.received,
x.receivedDate
]
writer.writerow(this_row_data)
return response
def createExpenseItemsForVenueRental(request=None, datetimeTuple=None, rule=None, event=None):
'''
For each Location or Room-related Repeated Expense Rule, look for Events
in the designated time window that do not already have expenses associated
with them. For hourly rental expenses, then generate new expenses that are
associated with this rule. For non-hourly expenses, generate new expenses
based on the non-overlapping intervals of days, weeks or months for which
there is not already an ExpenseItem associated with the rule in question.
'''
# These are used repeatedly, so they are put at the top
submissionUser = getattr(request, 'user', None)
rental_category = getConstant('financial__venueRentalExpenseCat')
# Return the number of new expense items created
generate_count = 0
# First, construct the set of rules that need to be checked for affiliated events
rule_filters = Q(disabled=False) & Q(rentalRate__gt=0) & \
(Q(locationrentalinfo__isnull=False) | Q(roomrentalinfo__isnull=False))
if rule:
rule_filters = rule_filters & Q(id=rule.id)
rulesToCheck = RepeatedExpenseRule.objects.filter(rule_filters).distinct()
# These are the filters place on Events that overlap the window in which
# expenses are being generated.
event_timefilters = Q()
if datetimeTuple and len(datetimeTuple) == 2:
timelist = list(datetimeTuple)
timelist.sort()
event_timefilters = event_timefilters & (
Q(startTime__gte=timelist[0]) & Q(startTime__lte=timelist[1])
)
if event:
event_timefilters = event_timefilters & Q(id=event.id)
# Now, we loop through the set of rules that need to be applied, then loop through the
# Events in the window in question that occurred at the location indicated by the rule.
for rule in rulesToCheck:
venue = (
getattr(rule, 'location', None) if
isinstance(rule, RoomRentalInfo) else
getattr(rule, 'location', None)
)
loc = getattr(venue, 'location') if isinstance(venue, Room) else venue
event_locfilter = Q(room=venue) if isinstance(venue, Room) else Q(location=venue)
# Find or create the TransactionParty associated with the location.
loc_party = TransactionParty.objects.get_or_create(
location=loc, defaults={'name': loc.name}
)[0]
if rule.advanceDays:
if rule.advanceDaysReference == RepeatedExpenseRule.MilestoneChoices.end:
event_timefilters = event_timefilters & \
Q(endTime__lte=timezone.now() + timedelta(days=rule.advanceDays))
elif rule.advanceDaysReference == RepeatedExpenseRule.MilestoneChoices.start:
event_timefilters = event_timefilters & \
Q(startTime__lte=timezone.now() + timedelta(days=rule.advanceDays))
if rule.priorDays:
if rule.priorDaysReference == RepeatedExpenseRule.MilestoneChoices.end:
event_timefilters = event_timefilters & \
Q(endTime__gte=timezone.now() - timedelta(days=rule.priorDays))
elif rule.priorDaysReference == RepeatedExpenseRule.MilestoneChoices.start:
event_timefilters = event_timefilters & \
Q(startTime__gte=timezone.now() - timedelta(days=rule.priorDays))
if rule.startDate:
event_timefilters = event_timefilters & Q(
event__startTime__gte=timezone.now().replace(
year=rule.startDate.year, month=rule.startDate.month, day=rule.startDate.day,
hour=0, minute=0, second=0, microsecond=0,
)
)
if rule.endDate:
event_timefilters = event_timefilters & Q(
event__startTime__lte=timezone.now().replace(
year=rule.endDate.year, month=rule.endDate.month, day=rule.endDate.day,
hour=0, minute=0, second=0, microsecond=0,
)
)
# For construction of expense descriptions
replacements = {
'type': _('Event/Series venue rental'),
'of': _('of'),
'location': venue.name,
'for': _('for'),
}
# Loop through Events for which there are not already directly allocated
# expenses under this rule, and create new ExpenseItems for them depending
# on whether the rule requires hourly expenses or non-hourly ones to
# be generated.
events = Event.objects.filter(event_locfilter & event_timefilters).exclude(
Q(expenseitem__expenseRule=rule)).distinct()
if rule.applyRateRule == rule.RateRuleChoices.hourly:
for this_event in events:
# Hourly expenses are always generated without checking for
# overlapping windows, because the periods over which hourly expenses
# are defined are disjoint. However, hourly expenses are allocated
# directly to events, so we just need to create expenses for any events
# that do not already have an Expense Item generate under this rule.
replacements['name'] = this_event.name
replacements['dates'] = this_event.localStartTime.strftime('%Y-%m-%d')
if (
event.localStartTime.strftime('%Y-%m-%d') != \
this_event.localEndTime.strftime('%Y-%m-%d')
):
replacements['dates'] += ' %s %s' % (
_('to'), this_event.localEndTime.strftime('%Y-%m-%d')
)
ExpenseItem.objects.create(
event=this_event,
category=rental_category,
payTo=loc_party,
expenseRule=rule,
description='%(type)s %(of)s %(location)s %(for)s: %(name)s, %(dates)s' % \
replacements,
submissionUser=submissionUser,
total=this_event.duration * rule.rentalRate,
accrualDate=this_event.startTime,
)
generate_count += 1
else:
# Non-hourly expenses are generated by constructing the time
# intervals in which the occurrence occurs, and removing from that
# interval any intervals in which an expense has already been
# generated under this rule (so, for example, monthly rentals will
# now show up multiple times). So, we just need to construct the set
# of intervals for which to construct expenses
intervals = [
(x.localStartTime, x.localEndTime) for x in \
EventOccurrence.objects.filter(event__in=events)
]
remaining_intervals = rule.getWindowsAndTotals(intervals)
for startTime, endTime, total, description in remaining_intervals:
replacements['when'] = description
ExpenseItem.objects.create(
category=rental_category,
payTo=loc_party,
expenseRule=rule,
periodStart=startTime,
periodEnd=endTime,
description='%(type)s %(of)s %(location)s %(for)s %(when)s' % replacements,
submissionUser=submissionUser,
total=total,
accrualDate=startTime,
)
generate_count += 1
rulesToCheck.update(lastRun=timezone.now())
return generate_count
def createExpenseItemsForEvents(request=None, datetimeTuple=None, rule=None, event=None):
'''
For each StaffMember-related Repeated Expense Rule, look for EventStaffMember
instances in the designated time window that do not already have expenses associated
with them. For hourly rental expenses, then generate new expenses that are
associated with this rule. For non-hourly expenses, generate new expenses
based on the non-overlapping intervals of days, weeks or months for which
there is not already an ExpenseItem associated with the rule in question.
'''
# This is used repeatedly, so it is put at the top
submissionUser = getattr(request, 'user', None)
# Return the number of new expense items created
generate_count = 0
# First, construct the set of rules that need to be checked for affiliated events
rule_filters = Q(disabled=False) & Q(rentalRate__gt=0) & \
Q(Q(staffmemberwageinfo__isnull=False) | Q(staffdefaultwage__isnull=False))
if rule:
rule_filters = rule_filters & Q(id=rule.id)
rulesToCheck = RepeatedExpenseRule.objects.filter(
rule_filters).distinct().order_by(
'-staffmemberwageinfo__category', '-staffdefaultwage__category'
)
# These are the filters placed on Events that overlap the window in which
# expenses are being generated.
event_timefilters = Q()
if datetimeTuple and len(datetimeTuple) == 2:
timelist = list(datetimeTuple)
timelist.sort()
event_timefilters = event_timefilters & (
Q(event__startTime__gte=timelist[0]) & Q(event__startTime__lte=timelist[1])
)
if event:
event_timefilters = event_timefilters & Q(event__id=event.id)
# Now, we loop through the set of rules that need to be applied, then loop
# through the Events in the window in question that involved the staff
# member indicated by the rule.
for rule in rulesToCheck:
staffMember = getattr(rule, 'staffMember', None)
staffCategory = getattr(rule, 'category', None)
# No need to continue if expenses are not to be generated
if (
(not staffMember and not staffCategory) or
(
not staffMember and not
getConstant('financial__autoGenerateFromStaffCategoryDefaults')
)
):
continue
# For construction of expense descriptions
replacements = {
'type': _('Staff'),
'to': _('payment to'),
'for': _('for'),
}
# This is the generic category for all Event staff, but it may be overridden below
expense_category = getConstant('financial__otherStaffExpenseCat')
if staffCategory:
if staffMember:
# This staff member in this category
eventstaff_filter = Q(staffMember=staffMember) & Q(category=staffCategory)
elif getConstant('financial__autoGenerateFromStaffCategoryDefaults'):
# Any staff member who does not already have a rule specified this category
eventstaff_filter = (
Q(category=staffCategory) &
~Q(staffMember__expenserules__category=staffCategory)
)
replacements['type'] = staffCategory.name
# For standard categories of staff, map the EventStaffCategory to
# an ExpenseCategory using the stored constants. Otherwise, the
# ExpenseCategory is a generic one.
if staffCategory == getConstant('general__eventStaffCategoryAssistant'):
expense_category = getConstant('financial__assistantClassInstructionExpenseCat')
elif staffCategory in [
getConstant('general__eventStaffCategoryInstructor'),
getConstant('general__eventStaffCategorySubstitute')
]:
expense_category = getConstant('financial__classInstructionExpenseCat')
else:
# We don't want to generate duplicate expenses when there | |
<reponame>karlicoss/my<filename>my/core/__main__.py
import functools
import importlib
import inspect
import os
import sys
import traceback
from typing import Optional, Sequence, Iterable, List, Type, Any, Callable
from pathlib import Path
from subprocess import check_call, run, PIPE, CompletedProcess
import click
@functools.lru_cache()
def mypy_cmd() -> Optional[Sequence[str]]:
try:
# preferably, use mypy from current python env
import mypy
return [sys.executable, '-m', 'mypy']
except ImportError:
pass
# ok, not ideal but try from PATH
import shutil
if shutil.which('mypy'):
return ['mypy']
warning("mypy not found, so can't check config with it. See https://github.com/python/mypy#readme if you want to install it and retry")
return None
from types import ModuleType
def run_mypy(pkg: ModuleType) -> Optional[CompletedProcess]:
from .preinit import get_mycfg_dir
mycfg_dir = get_mycfg_dir()
# todo ugh. not sure how to extract it from pkg?
# todo dunno maybe use the same mypy config in repository?
# I'd need to install mypy.ini then??
env = {**os.environ}
mpath = env.get('MYPYPATH')
mpath = str(mycfg_dir) + ('' if mpath is None else f':{mpath}')
env['MYPYPATH'] = mpath
cmd = mypy_cmd()
if cmd is None:
return None
mres = run([
*cmd,
'--namespace-packages',
'--color-output', # not sure if works??
'--pretty',
'--show-error-codes',
'--show-error-context',
'--check-untyped-defs',
'-p', pkg.__name__,
], stderr=PIPE, stdout=PIPE, env=env)
return mres
# use click.echo over print since it handles handles possible Unicode errors,
# strips colors if the output is a file
# https://click.palletsprojects.com/en/7.x/quickstart/#echoing
def eprint(x: str) -> None:
# err=True prints to stderr
click.echo(x, err=True)
def indent(x: str) -> str:
return ''.join(' ' + l for l in x.splitlines(keepends=True))
OK = '✅'
OFF = '🔲'
def info(x: str) -> None:
eprint(OK + ' ' + x)
def error(x: str) -> None:
eprint('❌ ' + x)
def warning(x: str) -> None:
eprint('❗ ' + x) # todo yellow?
def tb(e: Exception) -> None:
tb = ''.join(traceback.format_exception(Exception, e, e.__traceback__))
sys.stderr.write(indent(tb))
def config_create() -> None:
from .preinit import get_mycfg_dir
mycfg_dir = get_mycfg_dir()
created = False
if not mycfg_dir.exists():
# todo not sure about the layout... should I use my/config.py instead?
my_config = mycfg_dir / 'my' / 'config' / '__init__.py'
my_config.parent.mkdir(parents=True)
my_config.write_text('''
### HPI personal config
## see
# https://github.com/karlicoss/HPI/blob/master/doc/SETUP.org#setting-up-modules
# https://github.com/karlicoss/HPI/blob/master/doc/MODULES.org
## for some help on writing your own config
# to quickly check your config, run:
# hpi config check
# to quickly check a specific module setup, run hpi doctor <module>, e.g.:
# hpi doctor my.reddit.rexport
### useful default imports
from my.core import Paths, PathIsh, get_files
###
# most of your configs will look like this:
class example:
export_path: Paths = '/home/user/data/example_data_dir/'
### you can insert your own configuration below
### but feel free to delete the stuff above if you don't need ti
'''.lstrip())
info(f'created empty config: {my_config}')
created = True
else:
error(f"config directory '{mycfg_dir}' already exists, skipping creation")
check_passed = config_ok()
if not created or not check_passed:
sys.exit(1)
# TODO return the config as a result?
def config_ok() -> bool:
errors: List[Exception] = []
import my
try:
paths: List[str] = list(my.__path__) # type: ignore[attr-defined]
except Exception as e:
errors.append(e)
error('failed to determine module import path')
tb(e)
else:
info(f'import order: {paths}')
try:
import my.config as cfg
except Exception as e:
errors.append(e)
error("failed to import the config")
tb(e)
# todo yield exception here? so it doesn't fail immediately..
# I guess it's fairly critical and worth exiting immediately
sys.exit(1)
cfg_path = cfg.__file__# todo might be better to use __path__?
info(f"config file : {cfg_path}")
import my.core as core
try:
core_pkg_path = str(Path(core.__path__[0]).parent) # type: ignore[attr-defined]
if cfg_path.startswith(core_pkg_path):
error(f'''
Seems that the stub config is used ({cfg_path}). This is likely not going to work.
See https://github.com/karlicoss/HPI/blob/master/doc/SETUP.org#setting-up-modules for more information
'''.strip())
errors.append(RuntimeError('bad config path'))
except Exception as e:
errors.append(e)
tb(e)
# todo for some reason compileall.compile_file always returns true??
try:
cmd = [sys.executable, '-m', 'compileall', str(cfg_path)]
check_call(cmd)
info('syntax check: ' + ' '.join(cmd))
except Exception as e:
errors.append(e)
mres = run_mypy(cfg)
if mres is not None: # has mypy
rc = mres.returncode
if rc == 0:
info('mypy check : success')
else:
error('mypy check: failed')
errors.append(RuntimeError('mypy failed'))
sys.stderr.write(indent(mres.stderr.decode('utf8')))
sys.stderr.write(indent(mres.stdout.decode('utf8')))
if len(errors) > 0:
error(f'config check: {len(errors)} errors')
return False
else:
# note: shouldn't exit here, might run something else
info('config check: success!')
return True
from .util import HPIModule, modules
def _modules(*, all: bool=False) -> Iterable[HPIModule]:
skipped = []
for m in modules():
if not all and m.skip_reason is not None:
skipped.append(m.name)
else:
yield m
if len(skipped) > 0:
warning(f'Skipped {len(skipped)} modules: {skipped}. Pass --all if you want to see them.')
def modules_check(*, verbose: bool, list_all: bool, quick: bool, for_modules: List[str]) -> None:
if len(for_modules) > 0:
# if you're checking specific modules, show errors
# hopefully makes sense?
verbose = True
vw = '' if verbose else '; pass --verbose to print more information'
tabulate_warnings()
import contextlib
from .common import quick_stats
from .util import get_stats, HPIModule
from .stats import guess_stats
from .error import warn_my_config_import_error
mods: Iterable[HPIModule]
if len(for_modules) == 0:
mods = _modules(all=list_all)
else:
mods = [HPIModule(name=m, skip_reason=None) for m in for_modules]
# todo add a --all argument to disregard is_active check?
for mr in mods:
skip = mr.skip_reason
m = mr.name
if skip is not None:
eprint(f'{OFF} {click.style("SKIP", fg="yellow")}: {m:<50} {skip}')
continue
try:
mod = importlib.import_module(m)
except Exception as e:
# todo more specific command?
error(f'{click.style("FAIL", fg="red")}: {m:<50} loading failed{vw}')
# check that this is an import error in particular, not because
# of a ModuleNotFoundError because some dependency wasnt installed
if isinstance(e, (ImportError, AttributeError)):
warn_my_config_import_error(e)
if verbose:
tb(e)
continue
info(f'{click.style("OK", fg="green")} : {m:<50}')
# first try explicitly defined stats function:
stats = get_stats(m)
if stats is None:
# then try guessing.. not sure if should log somehow?
stats = guess_stats(m, quick=quick)
if stats is None:
eprint(" - no 'stats' function, can't check the data")
# todo point to a readme on the module structure or something?
continue
quick_context = quick_stats() if quick else contextlib.nullcontext()
try:
kwargs = {}
if callable(stats) and 'quick' in inspect.signature(stats).parameters:
kwargs['quick'] = quick
with quick_context:
res = stats(**kwargs)
assert res is not None, 'stats() returned None'
except Exception as ee:
warning(f' - {click.style("stats:", fg="red")} computing failed{vw}')
if verbose:
tb(ee)
else:
info(f' - stats: {res}')
def list_modules(*, list_all: bool) -> None:
# todo add a --sort argument?
tabulate_warnings()
for mr in _modules(all=list_all):
m = mr.name
sr = mr.skip_reason
if sr is None:
pre = OK
suf = ''
else:
pre = OFF
suf = f' {click.style(f"[disabled: {sr}]", fg="yellow")}'
click.echo(f'{pre} {m:50}{suf}')
def tabulate_warnings() -> None:
'''
Helper to avoid visual noise in hpi modules/doctor
'''
import warnings
orig = warnings.formatwarning
def override(*args, **kwargs) -> str:
res = orig(*args, **kwargs)
return ''.join(' ' + x for x in res.splitlines(keepends=True))
warnings.formatwarning = override
# TODO loggers as well?
def _requires(module: str) -> Sequence[str]:
from .discovery_pure import module_by_name
mod = module_by_name(module)
# todo handle when module is missing
r = mod.requires
if r is None:
error(f"Module {module} has no REQUIRES specification")
sys.exit(1)
return r
def module_requires(*, module: str) -> None:
rs = [f"'{x}'" for x in _requires(module)]
eprint(f'dependencies of {module}')
for x in rs:
click.echo(x)
def module_install(*, user: bool, module: str) -> None:
# TODO hmm. not sure how it's gonna work -- presumably people use different means of installing...
# how do I install into the 'same' environment??
import shlex
cmd = [
sys.executable, '-m', 'pip', 'install',
*(['--user'] if user else []), # meh
*_requires(module),
]
eprint('Running: ' + ' '.join(map(shlex.quote, cmd)))
check_call(cmd)
def _ui_getchar_pick(choices: Sequence[str], prompt: str = 'Select from: ') -> int:
'''
Basic menu allowing the user to select one of the choices
returns the index the user chose
'''
assert len(choices) > 0, 'Didnt receive any choices to prompt!'
eprint(prompt + '\n')
# prompts like 1,2,3,4,5,6,7,8,9,a,b,c,d,e,f...
chr_offset = ord('a') - 10
# dict from key user can press -> resulting index
result_map = {}
for i, opt in enumerate(choices, 1):
char: str = str(i) if i < 10 else chr(i + chr_offset)
result_map[char] = i - 1
eprint(f'\t{char}. {opt}')
eprint('')
while True:
ch = click.getchar()
if ch not in result_map:
eprint(f'{ch} not in {list(result_map.keys())}')
continue
return result_map[ch]
def _locate_functions_or_prompt(qualified_names: List[str], prompt: bool = True) -> Iterable[Callable[..., Any]]:
from .query import locate_qualified_function, QueryException
from .stats import is_data_provider
# if not connected to a terminal, cant prompt
if not sys.stdout.isatty():
prompt = False
for qualname in qualified_names:
try:
| |
"""
Polarized millimeter-wave atmospheric emission model
Copyright (c) 2019 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import numpy as np
import numba
import scipy.linalg
from constants import *
from wofz import wofz # Cython module needed to use SciPy function with Numba
@numba.njit
def delta_DeltaM(N, B, M, DeltaM):
"""
Calculates frequency shift of Zeeman components in GHz.
Based on Larsson et al. (2019).
Parameters
----------
N : int
Total rotational angular momentum quantum number, with sign matching change in rotational quantum number
B : float
Magnetic field strength (nT)
M : int
Magnetic quantum number
DeltaM : int
Change in magnetic quantum number
Returns
-------
float
Zeeman frequency shift (GHz)
"""
assert np.abs(N) % 2 == 1
assert np.abs(M) <= np.abs(N)
assert np.abs(DeltaM) <= 1
g_1 = ZEEMAN_COEFF[np.abs(N) - 1, 2]
g_2 = ZEEMAN_COEFF[np.abs(N) - 1, 2 + np.sign(N)]
return -MU_B / H * B * (g_1 * M + g_2 * (M + DeltaM)) * 1e-9 * 1e-9
@numba.njit("complex128[:,:](int64, float64)")
def rho_DeltaM(DeltaM, theta):
"""
Returns transition matricies for Zeeman transitions.
Parameters
----------
DeltaM : int
Change in magnetic quantum number
theta : float
Angle between line of sight and magnetic field direction (rad)
Returns
-------
np.array
Transition matrix
"""
if DeltaM == -1:
return np.array(
[[1, 1j * np.cos(theta)], [-1j * np.cos(theta), np.cos(theta) ** 2]],
dtype=np.complex128,
)
elif DeltaM == 1:
return np.array(
[[1, -1j * np.cos(theta)], [1j * np.cos(theta), np.cos(theta) ** 2]],
dtype=np.complex128,
)
assert DeltaM == 0
return np.array([[0j, 0j], [0j, np.sin(theta) ** 2]], dtype=np.complex128)
@numba.njit
def P_trans(N, M, DeltaJ, DeltaM):
"""
Calculates transition probability.
Parameters
----------
N : int
Total rotational angular momentum quantum number
M : int
Magnetic quantum number
DeltaJ : int
Change in rotational quantum number
DeltaM : int
Change in magnetic quantum number
Returns
-------
float
Transition probability
"""
# fmt: off
if DeltaJ == 1: # Liebe 1981
if DeltaM == 1:
return 3 * (N + M + 1) * (N + M + 2) / (4 * (N + 1) * (2*N + 1) * (2*N + 3))
elif DeltaM == 0:
return 3 * ((N + 1)**2 - M**2) / ((N + 1) * (2 * N + 1) * (2 * N + 3))
elif DeltaM == -1:
return 3 * (N - M + 1) * (N - M + 2) / (4 * (N + 1) * (2*N + 1) * (2*N + 3))
elif DeltaJ == -1:
if DeltaM == 1:
return 3 * (N + 1) * (N - M) * (N - M - 1) / (4 * N * (2 * N + 1) * (2 * N**2 + N - 1))
elif DeltaM == 0:
return 3 * (N + 1) * (N**2 - M**2) / (N * (2 * N + 1) * (2 * N**2 + N - 1))
elif DeltaM == -1:
return 3 * (N + 1) * (N + M) * (N + M - 1) / (4 * N * (2 * N + 1) * (2 * N**2 + N - 1))
# fmt: on
@numba.njit
def F(T, nu, nu_k, P, i, p_frac_h2o):
"""
Calculates line profile intensity.
Parameters
----------
T : float
Temperature of given atmosphere layer (K)
nu : float
Frequency to evaluate propagation at (GHz)
nu_k : float
Frequency of emission line (GHz)
P : float
Pressure (mbar)
i : int
Line number index.
p_frac_h2o : float
Pressure fraction of water vapor [0, 1]
Returns
-------
float
Line profile intensity (GHz)
"""
p_frac_air = 1 - p_frac_h2o
# Makarov et al. (2011)
y_l = (O2_PARAMS[i, 1] + O2_PARAMS[i, 2] * (300 / T - 1)) * (
300 / T
) ** 0.8 # bar^-1
g_l = (O2_PARAMS[i, 3] + O2_PARAMS[i, 4] * (300 / T - 1)) * (
300 / T
) ** 1.6 # bar^-2
deltanu_l = (O2_PARAMS[i, 5] + O2_PARAMS[i, 6] * (300 / T - 1)) * (
300 / T
) ** 1.6 # GHz bar^-2
N = int(np.abs(O2_PARAMS[i, 0])) # Line number (unitless)
# M.A. Koshelev et al. (2016), Eq. (3) & Table 3
gamma_air = 1.132150 + 1.348131 / (
1 + 0.18844 * N - 0.007720 * N ** 2 + 1.660877e-5 * N ** 4
) # MHz / Torr
# M.A. Koshelev et al. (2015), Eq. (1) & Table 2
gamma_h2o = 1.140129 + 1.528118 / (
1 + 0.10118 * N - 4.78115e-3 * N ** 2 + 7.40233e-6 * N ** 4
) # MHz / Torr
# M.A. Koshelev et al. (2016), Eq. (1) & Table 1
dnc = gamma_air / 1000 * TORR2MBAR * (P * p_frac_air) * (296 / T) ** 0.75412 # GHz
# Reuse previous temperature dependence due to lack of better value
dnc += gamma_h2o / 1000 * TORR2MBAR * (P * p_frac_h2o) * (296 / T) ** 0.75412 # GHz
# Varghese & Hanson (1984), https://doi.org/10.1364/AO.23.002376
# Herbert (1974), https://doi.org/10.1016/0022-4073(74)90021-1
mol_mass = O2_MOL_MASS / MOL / 1e3 # kg
doppler_half_width = (
np.sqrt(2 * K_B * T / mol_mass) * nu_k / C
) # (1 / e) Doppler half width (GHz)
# Combine Larsson (2014) method that adds line mixing with Melsheimer (2005) method that approximates VVW
nu_prime = (
nu - nu_k - deltanu_l * (P / 1000) ** 2
) / doppler_half_width # Unitless
a = dnc / doppler_half_width # Unitless
z1 = nu_prime + 1j * a # Unitless
nu_prime = (
nu + nu_k + deltanu_l * (P / 1000) ** 2
) / doppler_half_width # Unitless
z2 = nu_prime + 1j * a # Unitless
return (
(nu / nu_k) ** 2
/ (doppler_half_width * np.sqrt(np.pi))
* (
(1 + g_l * (P / 1000) ** 2 - 1j * y_l * (P / 1000)) * wofz(z1)
+ (1 + g_l * (P / 1000) ** 2 + 1j * y_l * (P / 1000)) * wofz(z2)
)
) # GHz
@numba.jitclass(
[
("B", numba.int32),
("atm_profile", numba.float64[:, ::1]),
("min_altitude", numba.float64),
("max_altitude", numba.float64),
("relative_humidity", numba.float64),
("layer_z", numba.float64),
]
)
class AtmSim(object):
def __init__(
self,
B,
atm_profile,
min_altitude,
max_altitude=100000,
relative_humidity=0.1,
layer_z=0.2,
):
"""
Initializes atmospheric simulator.
Parameters
----------
B : int32
Magnetic field strength (nT)
atm_profile : float64[:, ::1]
Atmosphere profile [Altitude (m), Temperature (K), Pressure (mbar), Temperature stddev (K), Pressure stddev (mbar)]
min_altitude : float64
Minimum simulation altitude (m)
max_altitude : float64
Maximum simulation altitude (m)
relative_humidity : float64
Relative humidity fraction [0, 1]
layer_z : float64
Height of atmosphere layers to simulate (km)
"""
self.B = B
self.atm_profile = atm_profile
self.min_altitude = min_altitude
self.max_altitude = max_altitude
self.relative_humidity = relative_humidity
self.layer_z = layer_z
def calc_propagation_matrix(self, nu, P, T, theta):
"""
Calculates propagation matrix.
Parameters
----------
nu : float
Frequency to evaluate propagation at (GHz)
P : float
Pressure (mbar)
T : float
Temperature of given atmosphere layer (K)
theta : float
Angle between line of sight and magnetic field direction (radian)
Returns
-------
np.array
Propagation matrix (Np km^-1)
"""
# Pressure | |
<reponame>eonu/tempora<gh_stars>10-100
import numpy as np
from ..internals import _Validator
__all__ = ['Transform', 'Custom', 'TrimConstants', 'MinMaxScale', 'Center', 'Standardize', 'Downsample', 'Filter']
class Transform:
"""Base class representing a single transformation."""
def __init__(self):
self._val = _Validator()
self._name = self.__class__.__name__
def __call__(self, X, validate=True):
"""Applies the transformation to the observation sequence(s).
Parameters
----------
X: numpy.ndarray (float) or list of numpy.ndarray (float)
An individual observation sequence or a list of multiple observation sequences.
validate: bool
Whether or not to validate the input sequences.
Returns
-------
transformed: :class:`numpy:numpy.ndarray` (float) or list of :class:`numpy:numpy.ndarray` (float)
The transformed input observation sequence(s).
"""
if self._val.is_boolean(validate, 'validate'):
X = self._val.is_observation_sequences(X, allow_single=True)
if self.is_fitted():
return self._apply(X)
try:
self.fit(X, validate=validate)
return self._apply(X)
except:
raise
finally:
self.unfit()
def transform(self, x):
"""Applies the transformation to a single observation sequence.
Parameters
----------
X: numpy.ndarray (float)
An individual observation sequence.
Returns
-------
transformed: :class:`numpy:numpy.ndarray` (float)
The transformed input observation sequence.
"""
raise NotImplementedError
def fit(self, X, validate=True):
"""Fit the transformation on the provided observation sequence(s) (without transforming them).
Parameters
----------
X: numpy.ndarray (float) or list of numpy.ndarray (float)
An individual observation sequence or a list of multiple observation sequences.
validate: bool
Whether or not to validate the input sequences.
"""
self._val.is_boolean(validate, 'validate')
def fit_transform(self, X, validate=True):
"""Fit the transformation with the provided observation sequence(s) and transform them.
Parameters
----------
X: numpy.ndarray (float) or list of numpy.ndarray (float)
An individual observation sequence or a list of multiple observation sequences.
validate: bool
Whether or not to validate the input sequences.
Returns
-------
transformed: :class:`numpy:numpy.ndarray` (float) or list of :class:`numpy:numpy.ndarray` (float)
The transformed input observation sequence(s).
"""
self.fit(X, validate=validate)
return self.__call__(X, validate=validate)
def is_fitted(self):
"""Check whether or not the transformation is fitted on some observation sequence(s).
Returns
-------
fitted: bool
Whether or not the transformation is fitted.
"""
return False
def unfit(self):
"""Unfit the transformation by resetting the parameters to their default settings."""
pass
def _apply(self, X):
"""Applies the transformation to the observation sequence(s) (internal).
Parameters
----------
X: numpy.ndarray (float) or list of numpy.ndarray (float)
An individual observation sequence or a list of multiple observation sequences.
Returns
-------
transformed: :class:`numpy:numpy.ndarray` (float) or list of :class:`numpy:numpy.ndarray` (float)
The transformed input observation sequence(s).
"""
return self.transform(X) if isinstance(X, np.ndarray) else [self.transform(x) for x in X]
def __str__(self):
"""Description of the transformation.
Returns
-------
description: str
The description of the transformation.
"""
raise NotImplementedError
class Custom(Transform):
"""Apply a custom transformation to the input observation sequence(s).
Parameters
----------
func: callable
A lambda or function that specifies the transformation that should be applied to a **single** observation sequence.
name: str
Name of the transformation.
desc: str
Description of the transformation.
Examples
--------
>>> # Create some sample data
>>> X = [np.random.random((10 * i, 3)) for i in range(1, 4)]
>>> # Apply a custom transformation
>>> X = Custom(lambda x: x**2, name='Square', desc='Square observations element-wise')(X)
"""
def __init__(self, func, name=None, desc=None):
super().__init__()
self.transform = self._val.is_func(func, 'transformation')
self._name = 'Custom' + ('' if name is None else ' ({})'.format(self._val.is_string(name, 'name')))
self._desc = 'Apply a custom transformation' if desc is None else self._val.is_string(desc, 'description')
def __str__(self):
return self._desc
class TrimConstants(Transform):
"""Trim constant observations from the input observation sequence(s).
Parameters
----------
const: float
The constant value.
Examples
--------
>>> # Create some sample data
>>> z = np.zeros((4, 3))
>>> x = lambda i: np.vstack((z, np.random.random((10 * i, 3)), z))
>>> X = [x(i) for i in range(1, 4)]
>>> # Trim the data
>>> X = TrimConstants()(X)
"""
def __init__(self, constant=0):
super().__init__()
try:
self.constant = float(constant)
except ValueError:
raise TypeError('Expected constant to be a float or convertible to a float')
def transform(self, x):
return x[~np.all(x == self.constant, axis=1)]
def __str__(self):
return 'Remove constant observations (={:.3})'.format(self.constant)
class MinMaxScale(Transform):
"""Scales the observation sequence features to each be within a provided range.
Parameters
----------
scale: tuple(int/float, int/float)
The range of the transformed observation sequence features.
independent: bool
Whether to independently compute the minimum and maximum to scale each observation sequence.
"""
def __init__(self, scale=(0., 1.), independent=True):
super().__init__()
if not isinstance(scale, tuple):
raise TypeError('Expected scaling range to be a tuple')
if not all(isinstance(val, (int, float)) for val in scale):
raise TypeError('Expected upper and lower bounds of scaling range to be floats')
if not scale[0] < scale[1]:
raise ValueError('Expected lower bound of scaling range to be less than the upper bound')
self.scale = scale
self.independent = self._val.is_boolean(independent, 'independent')
self._type = (_MinMaxScaleIndependent if independent else _MinMaxScaleAll)(scale)
def fit(self, X, validate=True):
super().fit(X, validate=validate)
self._type.fit(X, validate=validate)
def transform(self, x):
return self._type.transform(x)
def is_fitted(self):
return self._type.is_fitted()
def unfit(self):
self._type.unfit()
def __str__(self):
return str(self._type)
class _MinMaxScaleAll(Transform):
def __init__(self, scale):
super().__init__()
self.scale = scale
self.unfit()
def fit(self, X, validate):
if validate:
X = self._val.is_observation_sequences(X, allow_single=True)
if isinstance(X, list):
X = np.vstack(X)
self.min, self.max = X.min(axis=0), X.max(axis=0)
def transform(self, x):
min_, max_ = self.scale
scale = (max_ - min_) / (self.max - self.min)
return scale * x + min_ - self.min * scale
def is_fitted(self):
return (self.min is not None) and (self.max is not None)
def unfit(self):
self.min, self.max = None, None
def __str__(self):
return 'Min-max scaling into range {} (all)'.format(self.scale)
class _MinMaxScaleIndependent(Transform):
def __init__(self, scale):
super().__init__()
self.scale = scale
def transform(self, x):
min_, max_ = self.scale
scale = (max_ - min_) / (x.max(axis=0) - x.min(axis=0))
return scale * x + min_ - x.min(axis=0) * scale
def __str__(self):
return 'Min-max scaling into range {} (independent)'.format(self.scale)
class Center(Transform):
"""Centers the observation sequence features around their means. Results in zero-mean features.
Parameters
----------
independent: bool
Whether to independently compute the mean to scale each observation sequence.
Examples
--------
>>> # Create some sample data
>>> X = [np.random.random((10 * i, 3)) for i in range(1, 4)]
>>> # Center the data
>>> X = Center()(X)
"""
def __init__(self, independent=True):
super().__init__()
self.independent = self._val.is_boolean(independent, 'independent')
self._type = (_CenterIndependent if independent else _CenterAll)()
def fit(self, X, validate=True):
super().fit(X, validate=validate)
self._type.fit(X, validate=validate)
def transform(self, x):
return self._type.transform(x)
def is_fitted(self):
return self._type.is_fitted()
def unfit(self):
self._type.unfit()
def __str__(self):
return str(self._type)
class _CenterAll(Transform):
def __init__(self):
super().__init__()
self.unfit()
def fit(self, X, validate):
if validate:
X = self._val.is_observation_sequences(X, allow_single=True)
if isinstance(X, list):
X = np.vstack(X)
self.mean = X.mean(axis=0)
def transform(self, x):
return x - self.mean
def is_fitted(self):
return self.mean is not None
def unfit(self):
self.mean = None
def __str__(self):
return 'Centering around mean (zero mean) (all)'
class _CenterIndependent(Transform):
def transform(self, x):
return x - x.mean(axis=0)
def __str__(self):
return 'Centering around mean (zero mean) (independent)'
class Standardize(Transform):
"""Centers the observation sequence features around their means, then scales them by their deviations.
Results in zero-mean, unit-variance features.
Parameters
----------
independent: bool
Whether to independently compute the mean and standard deviation to scale each observation sequence.
Examples
--------
>>> # Create some sample data
>>> X = [np.random.random((10 * i, 3)) for i in range(1, 4)]
>>> # Standardize the data
>>> X = Standardize()(X)
"""
def __init__(self, independent=True):
super().__init__()
self.independent = self._val.is_boolean(independent, 'independent')
self._type = (_StandardizeIndependent if independent else _StandardizeAll)()
def fit(self, X, validate=True):
super().fit(X, validate=validate)
self._type.fit(X, validate=validate)
def transform(self, x):
return self._type.transform(x)
def is_fitted(self):
return self._type.is_fitted()
def unfit(self):
self._type.unfit()
def __str__(self):
return str(self._type)
class _StandardizeAll(Transform):
def __init__(self):
super().__init__()
self.unfit()
def fit(self, X, validate):
if validate:
X = self._val.is_observation_sequences(X, allow_single=True)
if isinstance(X, list):
X = np.vstack(X)
self.mean, self.std = X.mean(axis=0), X.std(axis=0)
def transform(self, x):
return (x - self.mean) / self.std
def is_fitted(self):
return (self.mean is not None) and (self.std is not None)
def unfit(self):
self.mean, self.std = None, None
def __str__(self):
return 'Standard scaling (zero mean, unit variance) (all)'
class _StandardizeIndependent(Transform):
def transform(self, x):
return (x - x.mean(axis=0)) / x.std(axis=0)
def __str__(self):
return 'Standard scaling (zero mean, unit variance) (independent)'
class Downsample(Transform):
"""Downsamples an observation sequence (or multiple sequences) by either:
- Decimating the next :math:`n-1` observations
- Averaging the current observation with the next :math:`n-1` observations
Parameters
----------
factor: int > 0
Downsample factor.
method: {'decimate', 'mean'}
The downsampling method.
Examples
--------
>>> # Create some sample data
>>> X = [np.random.random((10 * i, 3)) for i in range(1, 4)]
>>> # Downsample the data | |
the piecewise cost linearization breakpoints.
# the time index is redundant, but required. in the Piecewise construct, the breakpoints must be indexed the
# same as the Piecewise construct itself.
model.PowerGenerationPiecewisePoints = {}
model.PowerGenerationPiecewiseValues = {}
def power_generation_piecewise_points_rule(m, g, t):
if len(m.CostPiecewisePoints[g]) > 0:
m.PowerGenerationPiecewisePoints[g,t] = list(m.CostPiecewisePoints[g])
temp = list(m.CostPiecewiseValues[g])
m.PowerGenerationPiecewiseValues[g,t] = {}
for i in xrange(len(m.CostPiecewisePoints[g])):
m.PowerGenerationPiecewiseValues[g,t][m.PowerGenerationPiecewisePoints[g,t][i]] = temp[i] - m.MinimumProductionCost[g]
# MinimumPowerOutput will be one of our piecewise points, so it is safe to add (0,0)
if m.PowerGenerationPiecewisePoints[g,t][0] != 0:
m.PowerGenerationPiecewisePoints[g,t].insert(0,0)
m.PowerGenerationPiecewiseValues[g,t][0] = 0
elif value(m.ProductionCostA2[g]) == 0:
# If cost is linear, we only need two points -- (0,CostA0-MinCost) and (MaxOutput, MaxCost)
m.PowerGenerationPiecewisePoints[g, t] = [0, value(m.MaximumPowerOutput[g])]
m.PowerGenerationPiecewiseValues[g,t] = {}
m.PowerGenerationPiecewiseValues[g,t][0] = value(m.ProductionCostA0[g] - m.MinimumProductionCost[g])
m.PowerGenerationPiecewiseValues[g,t][m.PowerGenerationPiecewisePoints[g,t][1]] = \
value(m.ProductionCostA0[g]) + \
value(m.ProductionCostA1[g]) * m.PowerGenerationPiecewisePoints[g, t][1] \
- value(m.MinimumProductionCost[g])
else:
min_power = value(m.MinimumPowerOutput[g])
max_power = value(m.MaximumPowerOutput[g])
n = value(m.NumGeneratorCostCurvePieces)
width = (max_power - min_power) / float(n)
if width == 0:
m.PowerGenerationPiecewisePoints[g, t] = [min_power]
else:
m.PowerGenerationPiecewisePoints[g, t] = [min_power + i*width for i in xrange(0,n+1)]
m.PowerGenerationPiecewiseValues[g,t] = {}
for i in xrange(n+1):
m.PowerGenerationPiecewiseValues[g,t][m.PowerGenerationPiecewisePoints[g,t][i]] = \
value(m.ProductionCostA0[g]) + \
value(m.ProductionCostA1[g]) * m.PowerGenerationPiecewisePoints[g, t][i] + \
value(m.ProductionCostA2[g]) * m.PowerGenerationPiecewisePoints[g, t][i]**2 \
- value(m.MinimumProductionCost[g])
if m.PowerGenerationPiecewisePoints[g, t][0] != 0:
m.PowerGenerationPiecewisePoints[g, t].insert(0,0)
m.PowerGenerationPiecewiseValues[g, t][0] = 0
model.CreatePowerGenerationPiecewisePoints = BuildAction(model.ThermalGenerators * model.TimePeriods, rule=power_generation_piecewise_points_rule)
# a function for use in piecewise linearization of the cost function.
def production_cost_function(m, g, t, x):
return m.TimePeriodLength * m.PowerGenerationPiecewiseValues[g,t][x] * m.FuelCost[g]
###############################################
# startup cost parameters for each generator. #
###############################################
#CSH_j
model.ColdStartHours = Param(model.ThermalGenerators, within=NonNegativeIntegers, default=0) # units are hours.
#HSC_j
model.HotStartCost = Param(model.ThermalGenerators, within=NonNegativeReals, default=0.0) # units are $.
#CSC_j
model.ColdStartCost = Param(model.ThermalGenerators, within=NonNegativeReals, default=0.0) # units are $.
##################################################################################
# shutdown cost for each generator. in the literature, these are often set to 0. #
##################################################################################
model.ShutdownCostCoefficient = Param(model.ThermalGenerators, within=NonNegativeReals, default=0.0) # units are $.
#
# STORAGE parameters
#
# \cal{S}
model.Storage = Set()
# \cal{S}(b} \subseteq \cal{S}
model.StorageAtBus = Set(model.Buses, initialize=Set())
####################################################################################
# minimum and maximum power ratings, for each storage unit. units are MW. #
# could easily be specified on a per-time period basis, but are not currently. #
####################################################################################
# Storage power output >0 when discharging
#\underbar{POS}_s
model.MinimumPowerOutputStorage = Param(model.Storage, within=NonNegativeReals, default=0.0)
def maximum_power_output_validator_storage(m, v, s):
return v >= value(m.MinimumPowerOutputStorage[s])
#\overbar{POS}_s
model.MaximumPowerOutputStorage = Param(model.Storage, within=NonNegativeReals, validate=maximum_power_output_validator_storage, default=0.0)
#Storage power input >0 when charging
#\underbar{PIS}_s
model.MinimumPowerInputStorage = Param(model.Storage, within=NonNegativeReals, default=0.0)
def maximum_power_input_validator_storage(m, v, s):
return v >= value(m.MinimumPowerInputStorage[s])
#\overbar{PIS}_s
model.MaximumPowerInputStorage = Param(model.Storage, within=NonNegativeReals, validate=maximum_power_input_validator_storage, default=0.0)
###############################################
# storage ramp up/down rates. units are MW/h. #
###############################################
# ramp rate limits when discharging
#NRUOS_s
model.NominalRampUpLimitStorageOutput = Param(model.Storage, within=NonNegativeReals)
#NRDOS_s
model.NominalRampDownLimitStorageOutput = Param(model.Storage, within=NonNegativeReals)
# ramp rate limits when charging
#NRUIS_s
model.NominalRampUpLimitStorageInput = Param(model.Storage, within=NonNegativeReals)
#NRDIS_s
model.NominalRampDownLimitStorageInput = Param(model.Storage, within=NonNegativeReals)
def scale_storage_ramp_up_out(m, s):
return m.NominalRampUpLimitStorageOutput[s] * m.TimePeriodLength
model.ScaledNominalRampUpLimitStorageOutput = Param(model.Storage, within=NonNegativeReals, initialize=scale_storage_ramp_up_out)
def scale_storage_ramp_down_out(m, s):
return m.NominalRampDownLimitStorageOutput[s] * m.TimePeriodLength
model.ScaledNominalRampDownLimitStorageOutput = Param(model.Storage, within=NonNegativeReals, initialize=scale_storage_ramp_down_out)
def scale_storage_ramp_up_in(m, s):
return m.NominalRampUpLimitStorageInput[s] * m.TimePeriodLength
model.ScaledNominalRampUpLimitStorageInput = Param(model.Storage, within=NonNegativeReals, initialize=scale_storage_ramp_up_in)
def scale_storage_ramp_down_in(m, s):
return m.NominalRampDownLimitStorageInput[s] * m.TimePeriodLength
model.ScaledNominalRampDownLimitStorageInput = Param(model.Storage, within=NonNegativeReals, initialize=scale_storage_ramp_down_in)
####################################################################################
# minimum state of charge (SOC) and maximum energy ratings, for each storage unit. #
# units are MWh for energy rating and p.u. (i.e. [0,1]) for SOC #
####################################################################################
# you enter storage energy ratings once for each storage unit
#\overbar{ES}_s
model.MaximumEnergyStorage = Param(model.Storage, within=NonNegativeReals, default=0.0)
#\underbar{SOC}_s
model.MinimumSocStorage = Param(model.Storage, within=PercentFraction, default=0.0)
################################################################################
# round trip efficiency for each storage unit given as a fraction (i.e. [0,1]) #
################################################################################
#\eta_s
model.EfficiencyEnergyStorage = Param(model.Storage, within=PercentFraction, default=1.0)
########################################################################
# end-point SOC for each storage unit. units are in p.u. (i.e. [0,1]) #
########################################################################
# end-point values are the SOC targets at the final time period. With no end-point constraints
# storage units will always be empty at the final time period.
#EPSOC_s
model.EndPointSocStorage = Param(model.Storage, within=PercentFraction, default=0.5)
############################################################
# storage initial conditions: SOC, power output and input #
############################################################
def t0_storage_power_input_validator(m, v, s):
return (v >= value(m.MinimumPowerInputStorage[s])) and (v <= value(m.MaximumPowerInputStorage[s]))
def t0_storage_power_output_validator(m, v, s):
return (v >= value(m.MinimumPowerInputStorage[s])) and (v <= value(m.MaximumPowerInputStorage[s]))
#\overbar{x}_s(0)
model.StoragePowerOutputOnT0 = Param(model.Storage, within=NonNegativeIntegers, validate=t0_storage_power_output_validator, default=0)
#\underbar{x}_s(0)
model.StoragePowerInputOnT0 = Param(model.Storage, within=NonNegativeIntegers, validate=t0_storage_power_input_validator, default=0)
#SOC_S(0)
model.StorageSocOnT0 = Param(model.Storage, within=PercentFraction, default=0.5)
#########################################
# penalty costs for constraint violation #
#########################################
BigPenalty = 1e6
#\Lambda
model.LoadMismatchPenalty = Param(within=NonNegativeReals, default=BigPenalty)
#
# Variables
#
# Total demand for reserve requirement
model.TotalDemand = Var(model.TimePeriods, within=NonNegativeReals)
def calculate_total_demand(m, t):
return m.TotalDemand[t] == sum(m.Demand[b,t] for b in m.Buses)
model.CalculateTotalDemand = Constraint(model.TimePeriods, rule=calculate_total_demand)
######################
# decision variables #
######################
# indicator variables for each generator, at each time period.
model.UnitOn = Var(model.ThermalGenerators, model.TimePeriods, within=Binary, initialize=1)
# amount of power produced by each generator, at each time period.
def power_bounds_rule(m, g, t):
return (0, m.MaximumPowerOutput[g])
model.PowerGenerated = Var(model.ThermalGenerators, model.TimePeriods, within=NonNegativeReals, bounds=power_bounds_rule)
# amount of power flowing along each line, at each time period
def line_power_bounds_rule(m, l, t):
return (-m.ThermalLimit[l], m.ThermalLimit[l])
model.LinePower = Var(model.TransmissionLines, model.TimePeriods, bounds=line_power_bounds_rule)
# assume wind can be curtailed, then wind power is a decision variable
def nd_bounds_rule(m,n,t):
return (m.MinNondispatchablePower[n,t], m.MaxNondispatchablePower[n,t])
model.NondispatchablePowerUsed = Var(model.NondispatchableGenerators, model.TimePeriods, within=NonNegativeReals, bounds=nd_bounds_rule)
# maximum power output for each generator, at each time period.
model.MaximumPowerAvailable = Var(model.ThermalGenerators, model.TimePeriods, within=NonNegativeReals)
# voltage angles at the buses (S) (lock the first bus at 0) in radians
model.Angle = Var(model.Buses, model.TimePeriods, within=Reals, bounds=(-3.14159265,3.14159265))
def fix_first_angle_rule(m,t):
return m.Angle[m.Buses[1],t] == 0.0
model.FixFirstAngle = Constraint(model.TimePeriods, rule=fix_first_angle_rule)
##############################
# Storage decision variables #
##############################
# binary variables for storage (input/output are semicontinuous)
model.OutputStorage = Var(model.Storage, model.TimePeriods, within=Binary)
model.InputStorage = Var(model.Storage, model.TimePeriods, within=Binary)
# amount of output power of each storage unit, at each time period
def power_output_storage_bounds_rule(m, s, t):
return (0, m.MaximumPowerOutputStorage[s])
model.PowerOutputStorage = Var(model.Storage, model.TimePeriods, within=NonNegativeReals, bounds=power_output_storage_bounds_rule)
# amount of input power of each storage unit, at each time period
def power_input_storage_bounds_rule(m, s, t):
return (0, m.MaximumPowerInputStorage[s])
model.PowerInputStorage = Var(model.Storage, model.TimePeriods, within=NonNegativeReals, bounds=power_input_storage_bounds_rule)
# state of charge of each storage unit, at each time period
model.SocStorage = Var(model.Storage, model.TimePeriods, within=PercentFraction)
###################
# cost components #
###################
# production cost associated with each generator, for each time period.
model.ProductionCost = Var(model.ThermalGenerators, model.TimePeriods, within=NonNegativeReals)
# startup and shutdown costs for each generator, each time period.
model.StartupCost = Var(model.ThermalGenerators, model.TimePeriods, within=NonNegativeReals)
model.ShutdownCost = Var(model.ThermalGenerators, model.TimePeriods, within=NonNegativeReals)
# (implicit) binary denoting whether starting up a generator will cost HotStartCost or ColdStartCost
model.HotStart = Var(model.ThermalGenerators, model.TimePeriods, bounds=(0,1))
# cost over all generators, for all time periods.
"""model.TotalProductionCost = Var(within=NonNegativeReals)"""
# all other overhead / fixed costs, e.g., associated with startup and shutdown.
"""model.TotalFixedCost = Var(within=NonNegativeReals)"""
############################################### KICK THAT OUT ?
#####################################################
# load "shedding" can be both positive and negative #
#####################################################
model.LoadGenerateMismatch = Var(model.Buses, model.TimePeriods, within = Reals, initialize=0)
model.posLoadGenerateMismatch = Var(model.Buses, model.TimePeriods, within = NonNegativeReals, initialize=0)
model.negLoadGenerateMismatch = Var(model.Buses, model.TimePeriods, within = NonNegativeReals, initialize=0)
model.GlobalLoadGenerateMismatch = Var(model.TimePeriods, within = Reals, initialize=0)
model.posGlobalLoadGenerateMismatch = Var(model.TimePeriods, within = NonNegativeReals, initialize=0)
model.negGlobalLoadGenerateMismatch = Var(model.TimePeriods, within = NonNegativeReals, initialize=0)
# the following constraints are necessarily, at least in the case of CPLEX 12.4, to prevent
# the appearance of load generation mismatch component values in the range of *negative* e-5.
# what these small negative values do is to cause the optimal objective to be a very large negative,
# due to obviously large penalty values for under or over-generation. JPW would call this a heuristic
# at this point, but it does seem to work broadly. we tried a single global constraint, across all
# buses, but that failed to correct the problem, and caused the solve times to explode.
def pos_load_generate_mismatch_tolerance_rule(m, b):
return sum((m.posLoadGenerateMismatch[b,t] for t in m.TimePeriods)) >= 0.0
model.PosLoadGenerateMismatchTolerance = Constraint(model.Buses, rule=pos_load_generate_mismatch_tolerance_rule)
def neg_load_generate_mismatch_tolerance_rule(m, b):
return sum((m.negLoadGenerateMismatch[b,t] for t in m.TimePeriods)) >= 0.0
model.NegLoadGenerateMismatchTolerance = Constraint(model.Buses, rule=neg_load_generate_mismatch_tolerance_rule)
#################################################
# per-stage cost variables - necessary for PySP #
#################################################
"""model.FirstStageCost = Var(within=NonNegativeReals)
model.SecondStageCost = Var(within=NonNegativeReals)"""
######################################################### KICK THAT OUT ?
#
# Constraints
#
def line_power_rule(m, l, t):
return m.LinePower[l,t] == m.B[l] * (m.Angle[m.BusFrom[l], t] - m.Angle[m.BusTo[l], t])
model.CalculateLinePower = Constraint(model.TransmissionLines, model.TimePeriods, rule=line_power_rule)
# Power balance at each node (S)
def power_balance(m, b, t):
# bus b, time t (S)
return sum(m.PowerGenerated[g, t] for g in m.ThermalGeneratorsAtBus[b]) \
+ sum(m.PowerOutputStorage[s, t] for s in m.StorageAtBus[b])\
- sum(m.PowerInputStorage[s, t] for s in m.StorageAtBus[b])\
+ sum(m.NondispatchablePowerUsed[g, t] for g in m.NondispatchableGeneratorsAtBus[b]) \
+ sum(m.LinePower[l,t] for l in m.LinesTo[b]) \
- sum(m.LinePower[l,t] for l in m.LinesFrom[b]) \
+ m.LoadGenerateMismatch[b,t] \
== m.Demand[b, t]
model.PowerBalance = Constraint(model.Buses, model.TimePeriods, rule=power_balance)
# give meaning to the positive and negative parts of the mismatch
def posneg_rule(m, b, t):
return m.posLoadGenerateMismatch[b, t] - m.negLoadGenerateMismatch[b, t] == m.LoadGenerateMismatch[b, t]
model.Defineposneg_Mismatch = Constraint(model.Buses, model.TimePeriods, rule = posneg_rule)
def global_posneg_rule(m, t):
return m.posGlobalLoadGenerateMismatch[t] - m.negGlobalLoadGenerateMismatch[t] == m.GlobalLoadGenerateMismatch[t]
model.Global_Defineposneg_Mismatch = Constraint(model.TimePeriods, rule = global_posneg_rule)
# ensure there is sufficient maximal power output available to meet both the
# demand and the spinning reserve requirements in each time period.
# encodes Constraint 3 in Carrion and Arroyo.
def enforce_reserve_requirements_rule(m, t):
return sum(m.MaximumPowerAvailable[g, t] for g in m.ThermalGenerators) \
+ sum(m.NondispatchablePowerUsed[n,t] for n in m.NondispatchableGenerators) \
+ sum(m.PowerOutputStorage[s,t] for s in m.Storage) \
== \
m.TotalDemand[t] + m.ReserveRequirement[t] + m.GlobalLoadGenerateMismatch[t]
model.EnforceReserveRequirements = Constraint(model.TimePeriods, rule=enforce_reserve_requirements_rule)
############################################
# generation limit and | |
length 1."""
return 1
def __str__(self):
return "OP2 Dat: %s on (%s) with datatype %s" \
% (self._name, self._dataset, self.dtype.name)
def __repr__(self):
return "Dat(%r, None, %r, %r)" \
% (self._dataset, self.dtype, self._name)
def _check_shape(self, other):
if other.dataset.dim != self.dataset.dim:
raise ValueError('Mismatched shapes in operands %s and %s',
self.dataset.dim, other.dataset.dim)
def _op(self, other, op):
ops = {operator.add: ast.Sum,
operator.sub: ast.Sub,
operator.mul: ast.Prod,
operator.truediv: ast.Div}
ret = _make_object('Dat', self.dataset, None, self.dtype)
name = "binop_%s" % op.__name__
if np.isscalar(other):
other = _make_object('Global', 1, data=other)
k = ast.FunDecl("void", name,
[ast.Decl(self.ctype, ast.Symbol("self"),
qualifiers=["const"], pointers=[""]),
ast.Decl(other.ctype, ast.Symbol("other"),
qualifiers=["const"], pointers=[""]),
ast.Decl(self.ctype, ast.Symbol("ret"), pointers=[""])],
ast.c_for("n", self.cdim,
ast.Assign(ast.Symbol("ret", ("n", )),
ops[op](ast.Symbol("self", ("n", )),
ast.Symbol("other", ("0", )))),
pragma=None))
k = _make_object('Kernel', k, name)
else:
self._check_shape(other)
k = ast.FunDecl("void", name,
[ast.Decl(self.ctype, ast.Symbol("self"),
qualifiers=["const"], pointers=[""]),
ast.Decl(other.ctype, ast.Symbol("other"),
qualifiers=["const"], pointers=[""]),
ast.Decl(self.ctype, ast.Symbol("ret"), pointers=[""])],
ast.c_for("n", self.cdim,
ast.Assign(ast.Symbol("ret", ("n", )),
ops[op](ast.Symbol("self", ("n", )),
ast.Symbol("other", ("n", )))),
pragma=None))
k = _make_object('Kernel', k, name)
par_loop(k, self.dataset.set, self(READ), other(READ), ret(WRITE))
return ret
def _iop(self, other, op):
ops = {operator.iadd: ast.Incr,
operator.isub: ast.Decr,
operator.imul: ast.IMul,
operator.itruediv: ast.IDiv}
name = "iop_%s" % op.__name__
if np.isscalar(other):
other = _make_object('Global', 1, data=other)
k = ast.FunDecl("void", name,
[ast.Decl(self.ctype, ast.Symbol("self"), pointers=[""]),
ast.Decl(other.ctype, ast.Symbol("other"),
qualifiers=["const"], pointers=[""])],
ast.c_for("n", self.cdim,
ops[op](ast.Symbol("self", ("n", )),
ast.Symbol("other", ("0", ))),
pragma=None))
k = _make_object('Kernel', k, name)
else:
self._check_shape(other)
quals = ["const"] if self is not other else []
k = ast.FunDecl("void", name,
[ast.Decl(self.ctype, ast.Symbol("self"), pointers=[""]),
ast.Decl(other.ctype, ast.Symbol("other"),
qualifiers=quals, pointers=[""])],
ast.c_for("n", self.cdim,
ops[op](ast.Symbol("self", ("n", )),
ast.Symbol("other", ("n", ))),
pragma=None))
k = _make_object('Kernel', k, name)
par_loop(k, self.dataset.set, self(INC), other(READ))
return self
def _uop(self, op):
ops = {operator.sub: ast.Neg}
name = "uop_%s" % op.__name__
k = ast.FunDecl("void", name,
[ast.Decl(self.ctype, ast.Symbol("self"), pointers=[""])],
ast.c_for("n", self.cdim,
ast.Assign(ast.Symbol("self", ("n", )),
ops[op](ast.Symbol("self", ("n", )))),
pragma=None))
k = _make_object('Kernel', k, name)
par_loop(k, self.dataset.set, self(RW))
return self
def inner(self, other):
"""Compute the l2 inner product of the flattened :class:`Dat`
:arg other: the other :class:`Dat` to compute the inner
product against.
"""
self._check_shape(other)
ret = _make_object('Global', 1, data=0, dtype=self.dtype)
k = ast.FunDecl("void", "inner",
[ast.Decl(self.ctype, ast.Symbol("self"),
qualifiers=["const"], pointers=[""]),
ast.Decl(other.ctype, ast.Symbol("other"),
qualifiers=["const"], pointers=[""]),
ast.Decl(self.ctype, ast.Symbol("ret"), pointers=[""])],
ast.c_for("n", self.cdim,
ast.Incr(ast.Symbol("ret", (0, )),
ast.Prod(ast.Symbol("self", ("n", )),
ast.Symbol("other", ("n", )))),
pragma=None))
k = _make_object('Kernel', k, "inner")
par_loop(k, self.dataset.set, self(READ), other(READ), ret(INC))
return ret.data_ro[0]
@property
def norm(self):
"""Compute the l2 norm of this :class:`Dat`
.. note::
This acts on the flattened data (see also :meth:`inner`)."""
from math import sqrt
return sqrt(self.inner(self))
def __pos__(self):
pos = _make_object('Dat', self)
return pos
def __add__(self, other):
"""Pointwise addition of fields."""
return self._op(other, operator.add)
def __radd__(self, other):
"""Pointwise addition of fields.
self.__radd__(other) <==> other + self."""
return self + other
def __neg__(self):
neg = _make_object('Dat', self)
return neg._uop(operator.sub)
def __sub__(self, other):
"""Pointwise subtraction of fields."""
return self._op(other, operator.sub)
def __rsub__(self, other):
"""Pointwise subtraction of fields.
self.__rsub__(other) <==> other - self."""
ret = -self
ret += other
return ret
def __mul__(self, other):
"""Pointwise multiplication or scaling of fields."""
return self._op(other, operator.mul)
def __rmul__(self, other):
"""Pointwise multiplication or scaling of fields.
self.__rmul__(other) <==> other * self."""
return self.__mul__(other)
def __truediv__(self, other):
"""Pointwise division or scaling of fields."""
return self._op(other, operator.truediv)
__div__ = __truediv__ # Python 2 compatibility
def __iadd__(self, other):
"""Pointwise addition of fields."""
return self._iop(other, operator.iadd)
def __isub__(self, other):
"""Pointwise subtraction of fields."""
return self._iop(other, operator.isub)
def __imul__(self, other):
"""Pointwise multiplication or scaling of fields."""
return self._iop(other, operator.imul)
def __itruediv__(self, other):
"""Pointwise division or scaling of fields."""
return self._iop(other, operator.itruediv)
__idiv__ = __itruediv__ # Python 2 compatibility
@collective
def global_to_local_begin(self, access_mode):
"""Begin a halo exchange from global to ghosted representation.
:kwarg access_mode: Mode with which the data will subsequently
be accessed."""
halo = self.dataset.halo
if halo is None:
return
if access_mode in [READ, RW] and not self.halo_valid:
halo.global_to_local_begin(self, WRITE)
elif access_mode is INC:
self._data[self.dataset.size:] = 0
elif access_mode in [MIN, MAX]:
min_, max_ = dtype_limits(self.dtype)
self._data[self.dataset.size:] = {MAX: min_, MIN: max_}[access_mode]
@collective
def global_to_local_end(self, access_mode):
"""End a halo exchange from global to ghosted representation.
:kwarg access_mode: Mode with which the data will subsequently
be accessed."""
halo = self.dataset.halo
if halo is None:
return
if access_mode in [READ, RW] and not self.halo_valid:
halo.global_to_local_end(self, WRITE)
self.halo_valid = True
elif access_mode in [MIN, MAX, INC]:
self.halo_valid = False
@collective
def local_to_global_begin(self, insert_mode):
"""Begin a halo exchange from ghosted to global representation.
:kwarg insert_mode: insertion mode (an access descriptor)"""
halo = self.dataset.halo
if halo is None:
return
halo.local_to_global_begin(self, insert_mode)
@collective
def local_to_global_end(self, insert_mode):
"""End a halo exchange from ghosted to global representation.
:kwarg insert_mode: insertion mode (an access descriptor)"""
halo = self.dataset.halo
if halo is None:
return
halo.local_to_global_end(self, insert_mode)
self.halo_valid = False
@classmethod
def fromhdf5(cls, dataset, f, name):
"""Construct a :class:`Dat` from a Dat named ``name`` in HDF5 data ``f``"""
slot = f[name]
data = slot.value
soa = slot.attrs['type'].find(':soa') > 0
ret = cls(dataset, data, name=name, soa=soa)
return ret
class DatView(Dat):
"""An indexed view into a :class:`Dat`.
This object can be used like a :class:`Dat` but the kernel will
only see the requested index, rather than the full data.
:arg dat: The :class:`Dat` to create a view into.
:arg index: The component to select a view of.
"""
def __init__(self, dat, index):
cdim = dat.cdim
if not (0 <= index < cdim):
raise IndexTypeError("Can't create DatView with index %d for Dat with shape %s" % (index, dat.dim))
self.index = index
# Point at underlying data
super(DatView, self).__init__(dat.dataset,
dat._data,
dtype=dat.dtype,
name="view[%s](%s)" % (index, dat.name))
# Remember parent for lazy computation forcing
self._parent = dat
@cached_property
def cdim(self):
return 1
@cached_property
def dim(self):
return (1, )
@cached_property
def shape(self):
return (self.dataset.total_size, )
@property
def data(self):
cdim = self._parent.cdim
full = self._parent.data
sub = full.reshape(-1, cdim)[:, self.index]
return sub
@property
def data_ro(self):
cdim = self._parent.cdim
full = self._parent.data_ro
sub = full.reshape(-1, cdim)[:, self.index]
return sub
@property
def data_with_halos(self):
cdim = self._parent.cdim
full = self._parent.data_with_halos
sub = full.reshape(-1, cdim)[:, self.index]
return sub
@property
def data_ro_with_halos(self):
cdim = self._parent.cdim
full = self._parent.data_ro_with_halos
sub = full.reshape(-1, cdim)[:, self.index]
return sub
class MixedDat(Dat):
r"""A container for a bag of :class:`Dat`\s.
Initialized either from a :class:`MixedDataSet`, a :class:`MixedSet`, or
an iterable of :class:`DataSet`\s and/or :class:`Set`\s, where all the
:class:`Set`\s are implcitly upcast to :class:`DataSet`\s ::
mdat = op2.MixedDat(mdset)
mdat = op2.MixedDat([dset1, ..., dsetN])
or from an iterable of :class:`Dat`\s ::
mdat = op2.MixedDat([dat1, ..., datN])
"""
def __init__(self, mdset_or_dats):
def what(x):
if isinstance(x, (Global, GlobalDataSet, GlobalSet)):
return "Global"
elif isinstance(x, (Dat, DataSet, Set)):
return "Dat"
else:
raise DataSetTypeError("Huh?!")
if isinstance(mdset_or_dats, MixedDat):
self._dats = tuple(_make_object(what(d), d) for d in mdset_or_dats)
else:
self._dats = tuple(d if isinstance(d, (Dat, Global)) else _make_object(what(d), d) for d in mdset_or_dats)
if not all(d.dtype == self._dats[0].dtype for d in self._dats):
raise DataValueError('MixedDat with different dtypes is not supported')
# TODO: Think about different communicators on dats (c.f. MixedSet)
self.comm = self._dats[0].comm
def __getitem__(self, idx):
"""Return :class:`Dat` with index ``idx`` or a given slice of Dats."""
return self._dats[idx]
@cached_property
def dtype(self):
"""The NumPy dtype of the data."""
return self._dats[0].dtype
@cached_property
def split(self):
r"""The underlying tuple of :class:`Dat`\s."""
return self._dats
@cached_property
def dataset(self):
r""":class:`MixedDataSet`\s this :class:`MixedDat` is defined on."""
return _make_object('MixedDataSet', tuple(s.dataset for s in self._dats))
@cached_property
def soa(self):
"""Are the data in SoA format?"""
return tuple(s.soa for s in self._dats)
@cached_property
def _data(self):
"""Return the user-provided data buffer, or a zeroed buffer of
the correct size if none was provided."""
return tuple(d._data for d in self)
@property
@collective
def data(self):
"""Numpy arrays containing the data excluding halos."""
return tuple(s.data for s in self._dats)
@property
@collective
def data_with_halos(self):
"""Numpy arrays containing the data including halos."""
return tuple(s.data_with_halos for s in self._dats)
@property
@collective
def data_ro(self):
"""Numpy arrays with read-only data excluding halos."""
return tuple(s.data_ro for s in self._dats)
@property
@collective
def data_ro_with_halos(self):
"""Numpy arrays with read-only data including halos."""
return tuple(s.data_ro_with_halos for s in self._dats)
@property
def halo_valid(self):
"""Does this Dat have up to date halos?"""
return all(s.halo_valid for s in self)
@halo_valid.setter
def halo_valid(self, val):
"""Indictate whether this Dat requires a halo update"""
for d in | |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""APIs for logging data in the event file."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import json
import os
import logging
from .proto import event_pb2
from .proto import summary_pb2
from .event_file_writer import EventFileWriter
from .summary import scalar_summary, histogram_summary, image_summary, audio_summary
from .summary import text_summary, pr_curve_summary, _net2pb
from .utils import _save_embedding_tsv, _make_sprite_image, _make_metadata_tsv
from .utils import _add_embedding_config, _make_numpy_array, _get_embedding_dir
from .utils import _is_2D_matrix
class SummaryToEventTransformer(object):
"""This class is adapted with minor modifications from
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/summary/writer/writer.py#L125
Users should not use this class directly for logging MXNet data.
This class abstractly implements the SummaryWriter API: add_summary.
The endpoint generates an event protobuf from the Summary object, and passes
the event protobuf to _event_writer, which is of type EventFileWriter, for logging.
"""
# TODO(junwu): Need to check its compatibility with using ONNX for visualizing MXNet graphs.
def __init__(self, event_writer):
"""Initializes the _event_writer with the passed-in value.
Parameters
----------
event_writer: EventFileWriter
An event file writer writing events to the files in the path `logdir`.
"""
self._event_writer = event_writer
# This set contains tags of Summary Values that have been encountered
# already. The motivation here is that the SummaryWriter only keeps the
# metadata property (which is a SummaryMetadata proto) of the first Summary
# Value encountered for each tag. The SummaryWriter strips away the
# SummaryMetadata for all subsequent Summary Values with tags seen
# previously. This saves space.
self._seen_summary_tags = set()
def add_summary(self, summary, global_step=None):
"""Adds a `Summary` protocol buffer to the event file.
This method wraps the provided summary in an `Event` protocol buffer and adds it
to the event file.
Parameters
----------
summary : A `Summary` protocol buffer
Optionally serialized as a string.
global_step: Number
Optional global step value to record with the summary.
"""
if isinstance(summary, bytes):
summ = summary_pb2.Summary()
summ.ParseFromString(summary)
summary = summ
# We strip metadata from values with tags that we have seen before in order
# to save space - we just store the metadata on the first value with a
# specific tag.
for value in summary.value:
if not value.metadata:
continue
if value.tag in self._seen_summary_tags:
# This tag has been encountered before. Strip the metadata.
value.ClearField("metadata")
continue
# We encounter a value with a tag we have not encountered previously. And
# it has metadata. Remember to strip metadata from future values with this
# tag string.
self._seen_summary_tags.add(value.tag)
event = event_pb2.Event(summary=summary)
self._add_event(event, global_step)
def add_graph(self, graph):
"""Adds a `Graph` protocol buffer to the event file."""
event = event_pb2.Event(graph_def=graph.SerializeToString())
self._add_event(event, None)
def _add_event(self, event, step):
event.wall_time = time.time()
if step is not None:
event.step = int(step)
self._event_writer.add_event(event)
class FileWriter(SummaryToEventTransformer):
"""This class is adapted from
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/summary/writer/writer.py.
Even though this class provides user-level APIs in TensorFlow, it is recommended to use the
interfaces defined in the class `SummaryWriter` (see below) for logging in MXNet as they are
directly compatible with the MXNet NDArray type.
This class writes `Summary` protocol buffers to event files. The `FileWriter` class provides
a mechanism to create an event file in a given directory and add summaries and events to it.
The class updates the file contents asynchronously.
"""
def __init__(self, logdir, max_queue=10, flush_secs=120, filename_suffix=None, verbose=True):
"""Creates a `FileWriter` and an event file.
On construction the summary writer creates a new event file in `logdir`.
This event file will contain `Event` protocol buffers constructed when you
call one of the following functions: `add_summary()`, or `add_event()`.
Parameters
----------
logdir : str
Directory where event file will be written.
max_queue : int
Size of the queue for pending events and summaries.
flush_secs: Number
How often, in seconds, to flush the pending events and summaries to disk.
filename_suffix : str
Every event file's name is suffixed with `filename_suffix` if provided.
verbose : bool
Determines whether to print logging messages.
"""
event_writer = EventFileWriter(logdir, max_queue, flush_secs, filename_suffix, verbose)
super(FileWriter, self).__init__(event_writer)
def __enter__(self):
"""Make usable with "with" statement."""
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
"""Make usable with "with" statement."""
self.close()
def get_logdir(self):
"""Returns the directory where event file will be written."""
return self._event_writer.get_logdir()
def add_event(self, event):
"""Adds an event to the event file.
Parameters
----------
event : An `Event` protocol buffer.
"""
self._event_writer.add_event(event)
def flush(self):
"""Flushes the event file to disk.
Call this method to make sure that all pending events have been written to disk.
"""
self._event_writer.flush()
def close(self):
"""Flushes the event file to disk and close the file.
Call this method when you do not need the summary writer anymore.
"""
self._event_writer.close()
def reopen(self):
"""Reopens the EventFileWriter.
Can be called after `close()` to add more events in the same directory.
The events will go into a new events file. Does nothing if the EventFileWriter
was not closed.
"""
self._event_writer.reopen()
class SummaryWriter(object):
"""This class is adapted with modifications in support of the MXNet NDArray types from
https://github.com/lanpa/tensorboard-pytorch/blob/master/tensorboardX/writer.py.
The `SummaryWriter` class provides a high-level api to create an event file in a
given directory and add summaries and events to it. This class writes data to the
event file asynchronously.
This class is a wrapper of the FileWriter class. It's recommended that users use
the APIs of this class to log MXNet data for visualization as they are directly compatible with
the MXNet data types.
Examples
--------
>>> data = mx.nd.random.uniform(size=(10, 10))
>>> with SummaryWriter(logdir='logs') as sw:
>>> sw.add_histogram(tag='my_hist', values=data, global_step=0, bins=100)
"""
def __init__(self, logdir, max_queue=10, flush_secs=120, filename_suffix=None, verbose=True):
"""
Creates a `SummaryWriter` and an event file.
On construction the summary writer creates a new event file in `logdir`.
This event file will contain `Event` protocol buffers constructed when you
call one of the following functions: `add_audio()`, `add_embedding()`,
`add_histogram()`, `add_image()`, `add_pr_curve()`, `add_scalar()`, and `add_text()`.
Please make sure that the `logdir` used here for initiailizing `SummaryWriter`
matches the `--logdir` parameter you passed to the `tensorboard` binary in the command line
for launching TensorBoard.
Parameters
----------
logdir : str
Directory where event file will be written.
max_queue : int
Size of the queue for pending events and summaries.
flush_secs: Number
How often, in seconds, to flush the pending events and summaries to disk.
filename_suffix : str
Every event file's name is suffixed with `filename_suffix` if provided.
verbose : bool
Determines whether to print the logging messages.
"""
self._file_writer = FileWriter(logdir=logdir, max_queue=max_queue,
flush_secs=flush_secs, filename_suffix=filename_suffix,
verbose=verbose)
self._max_queue = max_queue
self._flush_secs = flush_secs
self._filename_suffix = filename_suffix
self._verbose = verbose
# for writing scalars of different tags in the same plot
self._all_writers = {self._file_writer.get_logdir(): self._file_writer}
self._logger = None
if verbose:
self._logger = logging.getLogger(__name__)
self._logger.setLevel(logging.INFO)
self._default_bins = None
self._text_tags = []
# scalar value dict.
# key: file_writer's logdir, value: list of [timestamp, global_step, value]
self._scalar_dict = {}
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def _get_default_bins(self):
"""Ported from the C++ function InitDefaultBucketsInner() in the following file.
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/lib/histogram/histogram.cc
See the following tutorial for more details on how TensorFlow initialize bin distribution.
https://www.tensorflow.org/programmers_guide/tensorboard_histograms"""
if self._default_bins is None:
v = 1E-12
buckets = []
neg_buckets = []
while v < 1E20:
buckets.append(v)
neg_buckets.append(-v)
v *= 1.1
self._default_bins = neg_buckets[::-1] + [0] + buckets
return self._default_bins
def _append_to_scalar_dict(self, tag, scalar_value, global_step, timestamp):
"""Adds a list [timestamp, step, value] to the value of `self._scalar_dict[tag]`.
This allows users to store scalars in memory and dump them to a json file later."""
if tag not in self._scalar_dict.keys():
| |
there will be no conflict between
# letters)
# 2) If the lists mentioned above are both not empty, add any letters they have in common to fully_merged.
# If one has a letter that the other doesn't, discard it
for hashmap in hashmaps:
merge_step: dict = {k: [] for k in ALPHABET}
for l in ALPHABET:
# Step 1
if (hashmap[l] == [] or fully_merged[l] == []):
merge_step[l] = list(set(fully_merged[l] + hashmap[l]))
continue
# Step 2
for possible_letter in hashmap[l]:
if possible_letter in fully_merged[l]:
merge_step[l] = list(set(merge_step[l] + [possible_letter]))
fully_merged = merge_step # Continuously add back to fully_merged so everything merges to the same place
return fully_merged
# end: def merge_possible_hashmaps
# ====================================================================================================
# def get_possible_letters
#
# Get a mapping of the encrypted letters and their possible answers as english letters
#
# Arguments--
#
# encrypted_text: the encrypted message to get the possible keys/english letter mapping for
#
# Returns--
#
# A hashmap with keys representing the encrypted letters and values representing the possible letter(s)
# for the given encrypted letter
#
def get_possible_letters(encrypted_text: str) -> dict:
possible_letter_maps: list = []
# Get a single mapping
# ----------
# Loop through each of the encrypted words in encrypted_text and get all of the english words
# with matching word patterns. Create a mapping for all of the matching words for all of the encrypted
# words given the letters of the encrypted/matching word (ex: enc word "hkz" with match "cat" ->
# {h: [c], k: [a], z: [t]}). Once all of these mappings are made, merge them together using the
# merge_possible_hashmaps function
for encrypted_word in encrypted_text.split():
possible_letters: dict = {k: [] for k in ALPHABET}
possible_words: list = get_matching_words(encrypted_word)
for possible_word in possible_words:
# Add all the letters in possible_word to their respective matchings in possible_letters to create
# the mapping
for l in range(len(encrypted_word)):
encrypted_letter: str = encrypted_word[l]
possible_letter: str = possible_word[l]
possible_letters[encrypted_letter] = list(set(possible_letters[encrypted_letter] + [possible_letter]))
possible_letter_maps.append(possible_letters)
all_possible_letters: dict = merge_possible_hashmaps(possible_letter_maps)
# Loop through all of the possibilities in all_possible_letters and remove any duplicates for the known letters.
# Because this algorithm may remove such that only 1 letter remains in another position, it must be run multiple
# times (ex: {a: [z], b: [z, c], c: [c, d, f]}. The first pass would result in {a: [z], b: [c], c: [c, d, f]},
# but now b=c is also known so it must be run again to finally get {a: [z], b: [c], c: [d, f]})
known_letters: list = []
known_letters_left: bool = True
while (known_letters_left):
known_letters_left = False
# Find all of the known letters
for encrypted_letter in ALPHABET:
if (len(all_possible_letters[encrypted_letter]) == 1):
known_letters.extend(all_possible_letters[encrypted_letter])
# Remove known letters from all other places
for encrypted_letter in ALPHABET:
for known_letter in known_letters:
possible_letters: list = all_possible_letters[encrypted_letter]
if (len(possible_letters) != 1 and known_letter in possible_letters):
all_possible_letters[encrypted_letter].remove(known_letter) # Known letter cannot be in another spot
if (len(all_possible_letters[encrypted_letter]) == 1):
known_letters_left = True
return all_possible_letters
# end: def get_possible_letters
# ====================================================================================================
# def solve
#
# Decrypt a given string of encrypted text
#
# Arguments--
#
# encrypted_text: the text to decrypt
#
# Returns--
#
# The decrypted text and the final key it was decrypted with
#
def solve(encrypted_text: str):
possible_letters: dict = get_possible_letters(encrypted_text)
key = UNKNOWN_CHAR * len(ALPHABET)
# Given the list of possible mappings to a given cipher letter, create a final key. If there is
# only one possible mapping, use that in the final key. Otherwise, ignore that mapping
for encrypted_letter in ALPHABET:
if (len(possible_letters[encrypted_letter]) == 1):
key_index: int = ord(encrypted_letter) - 97
key_letter: str = possible_letters[encrypted_letter][0]
key = key[:key_index] + key_letter + key[key_index + 1:]
return decrypt_with_key(key, encrypted_text), key
# end: def solve
# ====================================================================================================
# def main
#
# The main function to handle the solving routine
#
# Arguments--
#
# encrypted_text: the text to solve for
#
def main(encrypted_text: str):
print("INFO -- Will clean up encrypted_text...")
encrypted_text = clean_cipher_text(encrypted_text)
print("\tDone.")
print("INFO -- Will begin solving...")
ans, key = solve(encrypted_text)
print("\tDone.")
return ans, key
# end: def main
# ====================================================================================================
# def run_test
#
# Run the cipher solve program with an expected answer to check accuracy
#
# Arguments--
#
# test_name: the name of the test to print out for clarity
#
# encrypted_text: the encrypted version of expected; what the program should try to decrypt
#
# expected: the expected decrypted string
#
def run_test(test_name, encrypted_text, expected):
print("--------------------------------------------------")
print(f"INFO -- Test {test_name}")
ans, key = main(encrypted_text)
# Source: https://stackoverflow.com/questions/17388213/find-the-similarity-metric-between-two-strings
w1 = ans + ' ' * (len(expected) - len(ans))
w2 = expected + ' ' * (len(ans) - len(expected))
accuracy: int = sum(1 if i == j else 0 for i, j in zip(w1, w2)) / float(len(w1)) * 100
print(f"ANS: {ans}\n\t" +
f"KEY: {key}\n\t" +
f"% CORRECT: {accuracy}\n")
# end: def run_test
# call to main
if __name__ == "__main__":
print("INFO -- Will init words_by_patterns...")
init_word_patterns()
print("\tDone.")
# If the user just ran the python file, then demonstrate the capabilities of the program by running tests
# on some common mono-alphabetic ciphers. Also inform the user that they can run their own text
if (len(sys.argv) <= 1):
print("INFO -- No text specified, will run with example text\n\t" +
f"Use python3 mono_substitution_solver.py <text...> to load custom encrypted text\n\t" +
f"Make sure to surround strings with quotes")
expected = "if he had anything confidential to say he wrote it in cipher that is by so changing the order of the letters of the alphabet that not a word could be made out because sometimes we all have information that we might want to hide in an encoded form especially if we were to be part of a war or conflict where access to information could change the course of the fight for us it is important to hide the information from people who might want to uncover it"
# Source for encoding: https://cryptii.com/pipes/caesar-cipher (shift = 7)
run_test("1: CAESAR", "pm ol ohk hufaopun jvumpkluaphs av zhf ol dyval pa pu jpwoly aoha pz if zv johunpun aol vykly vm aol slaalyz vm aol hswohila aoha uva h dvyk jvbsk il thkl vba iljhbzl zvtlaptlz dl hss ohcl pumvythapvu aoha dl tpnoa dhua av opkl pu hu lujvklk mvyt lzwljphssf pm dl dlyl av il whya vm h dhy vy jvumspja dolyl hjjlzz av pumvythapvu jvbsk johunl aol jvbyzl vm aol mpnoa mvy bz pa pz ptwvyahua av opkl aol pumvythapvu myvt wlvwsl dov tpnoa dhua av bujvcly pa", expected)
# Source for encoding: http://rumkin.com/tools/cipher/atbash.php
run_test("2: ATBASH", "ru sv szw zmbgsrmt xlmurwvmgrzo gl hzb sv dilgv rg rm xrksvi gszg rh yb hl xszmtrmt gsv liwvi lu gsv ovggvih lu gsv zokszyvg gszg mlg z dliw xlfow yv nzwv lfg yvxzfhv hlnvgrnvh dv zoo szev rmulinzgrlm gszg dv nrtsg dzmg gl srwv rm zm vmxlwvw ulin vhkvxrzoob ru dv dviv gl yv kzig lu z dzi li xlmuorxg dsviv zxxvhh gl rmulinzgrlm xlfow xszmtv gsv xlfihv lu gsv urtsg uli fh rg rh rnkligzmg gl srwv gsv rmulinzgrlm uiln kvlkov dsl nrtsg dzmg gl fmxlevi rg", expected)
# Source for encoding: https://cryptii.com/pipes/caesar-cipher
run_test("3: ROT 13", "vs ur unq nalguvat pbasvqragvny gb fnl ur jebgr vg va pvcure gung vf ol fb punatvat gur beqre bs gur yrggref bs gur nycunorg gung abg n jbeq pbhyq or znqr bhg orpnhfr fbzrgvzrf jr nyy unir vasbezngvba gung jr zvtug jnag gb uvqr va na rapbqrq sbez rfcrpvnyyl vs jr jrer gb or cneg bs n jne be pbasyvpg jurer npprff gb vasbezngvba pbhyq punatr gur pbhefr bs gur svtug sbe hf vg vf vzcbegnag gb uvqr gur vasbezngvba sebz crbcyr jub zvtug jnag gb hapbire vg", expected)
# Source for encoding: https://cryptii.com/pipes/caesar-cipher (slope = 5, int = 8)
run_test("4: AFFINE", "wh rc rix ivyzrwvm savhwxcvzwil za uiy rc opazc wz wv swfrcp zriz wu ny ua srivmwvm zrc apxcp ah zrc lczzcpu ah zrc ilfrincz zriz vaz i oapx saelx nc qixc aez ncsieuc uaqczwqcu oc ill rijc wvhapqizwav zriz oc qwmrz oivz za rwxc wv iv cvsaxcx hapq cufcswilly wh oc ocpc za nc fipz ah i oip ap savhlwsz orcpc | |
import bz2
from collections import OrderedDict
import gzip
from operator import getitem
import os
import struct
import sys
from types import MappingProxyType
from typing import Any, MutableMapping
from zipfile import ZipFile
from astropy.io import fits as pyfits
import pds4_tools
import numpy as np
import pandas as pd
import pvl
# TODO: this module doesn't exist in Ross's pvl 1.x
from pvl._collections import PVLObject
import rasterio
from rasterio.errors import RasterioIOError
# import matplotlib.pyplot as plt # just for QA
def get_from(collection: MutableMapping, keys, default=None) -> Any:
"""
toolz-style getter that will attempt both getattr and getitem (intended
for named tuples nested inside of dicts, etc)
(hierarchical list of keys, collection ->
item of collection, possibly from a nested collection)
"""
level = collection
for key in keys:
try:
level = getitem(level, key)
except (KeyError, IndexError, TypeError):
try:
level = getattr(level, key)
except AttributeError:
return default
return level
def pvl_to_dict(labeldata):
# Convert a PVL label object to a Python dict Deprecated because PVL
# allows keywords to be repeated at the same depth, which is _not_
# allowed in a dict(), so this function ends up clobbering information.
data = {}
if (
(type(labeldata) == pvl._collections.PVLModule)
or (type(labeldata) == pvl._collections.PVLGroup)
or (type(labeldata) == pvl._collections.PVLObject)
):
for k in labeldata.keys():
data[k] = pvl_to_dict(labeldata[k])
else:
return labeldata
return data
def filetype(filename):
"""Attempt to deduce the filetype based on the filename."""
if ".IMG" in filename.upper():
return "IMG"
elif ".FITS" in filename.upper():
return "FITS"
elif ".DAT" in filename.upper():
if os.path.exists(filename.replace(".DAT", ".LBL")):
# PDS3 .DAT with detached PVL label
return "PDS3DAT"
else:
# presumed PDS4 .DAT with detached xml label
return "PDS4DAT"
else:
print(
"*** Unsupported file type: [...]{end}".format(end=filename[-10:])
)
return "UNK"
def has_attached_label(filename):
"""Read the first line of a file to decide if it's a label."""
with open(filename, "rb") as f:
return "PDS_VERSION_ID" in str(f.readline())
def parse_attached_label(filename):
"""Parse an attached label of a IMG file."""
# First grab the entries from the label that define how to read the label
return pvl.load(filename, strict=False)
# with open(filename, "rb") as f:
# for line_ in f:
# line = line_.decode("utf-8").strip() # hacks through a rare error
# if "PDS_VERSION_ID" in line:
# PDS_VERSION_ID = line.strip().split("=")[1]
# if "RECORD_BYTES" in line:
# RECORD_BYTES = int(line.strip().split("=")[1])
# if "LABEL_RECORDS" in line:
# if "<BYTES>" in line:
# # Convert pointer value to bytes like everything else
# LABEL_RECORDS = line.strip().split("=")[1].split()[0] * 8
# else:
# LABEL_RECORDS = int(line.strip().split("=")[1])
# break
# # Read the label and then parse it with PVL
# try:
# with open(filename, "rb") as f:
# return pvl.load(f.read(RECORD_BYTES * (LABEL_RECORDS)))
# except UnboundLocalError:
# print("*** RECORD_BYTES not set??? ***")
# return None
# except:
# with open(filename, "rb") as f:
# return pvl.load(f.read(RECORD_BYTES * (LABEL_RECORDS)), strict=False)
def find_pointers(label, parent_level=None, path=""):
"""
function to look for file pointers in PDS3 labels.
drills down one level into 'file area' / sublabels, i.e.,
nested PVLObjects.
TODO:
some of this interface appears to have changed in Ross's pvl 1.x
and the class reference may need to be modified.
TODO:
these are never nested more than one level deep, right?
"""
pointers = []
for key, value in label.items():
if isinstance(value, PVLObject):
# go down a level to look for pointers in 'file areas' &c
pointers += find_pointers(value, key, path)
elif key.startswith("^"):
if isinstance(value, str):
# break up nested pointers; "if string" ignores None
# from default parent_level
pointer_target = os.path.join(path, value)
elif isinstance(value, pvl._collections.Units):
# attempt to handle byte offsets in attached labels
pointer_target = value.value
elif isinstance(value, int) and ("FILE_RECORDS" in label.keys()):
# attempt to handle file records offsets in attached labels
pointer_target = value * label["FILE_RECORDS"]
else:
print("Warning: " + str(
value) + "can't be interpreted as a valid target for a pointer.")
continue
pointers.append(
{
"object": [
string for string in [parent_level, key[1:]] if string
],
"target": pointer_target,
}
)
return pointers
def find_pvl_objects(label):
"""
list of PVLObjects at top level of PDS3 label.
or anything, I guess.
TODO:
some of this interface appears to have changed in Ross's pvl 1.x
and the class reference may need to be modified.
"""
objects = []
for key, value in label.items():
if isinstance(value, PVLObject):
objects.append(key)
return objects
def get_file_area(label, pointer):
if isinstance(pointer, str):
# if we just passed a function "IMAGE" or what-have-you,
# empirically, as it were,
# rather than having discovered terms in the label
file_area = label[pointer]
object_name = pointer
else:
# for nested keys: we have a list in a dict.
# reach down and grab the nested 'sublabel'
# or 'file area' in PDS4 parlance
file_area = get_from(label, pointer["object"])
# because first terms of pointer are at a higher level of nesting
object_name = pointer["object"][-1]
return file_area, object_name
def parse_label(filename, full=False):
"""Wraps forking paths for attached and detached PDS3 labels."""
if filename.endswith(".fmt"):
return pvl.load(filename, strict=False)
if not has_attached_label(filename):
if os.path.exists(filename[: filename.rfind(".")] + ".LBL"):
label = pvl.load(filename[: filename.rfind(".")] + ".LBL")
elif os.path.exists(filename[: filename.rfind(".")] + ".lbl"):
label = pvl.load(filename[: filename.rfind(".")] + ".lbl")
elif os.path.exists(filename[: filename.rfind(".")] + ".xml"):
# TODO: Make label data format consistent between PDS3 & 4
label = pds4_tools.read(
filename[: filename.rfind(".")] + ".xml", quiet=True
).label.to_dict()
else:
print("*** Unable to locate file label. ***")
return None
else:
label = parse_attached_label(filename)
# TODO: This ugly conditional exists entirely to deal with Cassini data
# which all seem to be returning zero-value images, so maybe it's wrong!
if (not full) and ("UNCOMPRESSED_FILE" in label.keys()):
if "COMPRESSED_FILE" in label.keys():
if "ENCODING_TYPE" in label["COMPRESSED_FILE"].keys():
if (
label["COMPRESSED_FILE"]["ENCODING_TYPE"]
== "MSLMMM-COMPRESSED"
):
return label
return label["UNCOMPRESSED_FILE"]
return label
def sample_types(SAMPLE_TYPE, SAMPLE_BYTES):
"""Defines a translation from PDS data types to Python data types.
TODO: The commented-out types below are technically valid PDS3
types, but I haven't yet worked out the translation to Python.
"""
# NOTE: The byte depth of various data types is non-unique in PDS3
return {
"MSB_INTEGER": ">h",
"INTEGER": ">h",
"MAC_INTEGER": ">h",
"SUN_INTEGER": ">h",
"MSB_UNSIGNED_INTEGER": ">h" if SAMPLE_BYTES == 2 else ">B",
"UNSIGNED_INTEGER": ">B",
"MAC_UNSIGNED_INTEGER": ">B",
"SUN_UNSIGNED_INTEGER": ">B",
"LSB_INTEGER": "<h" if SAMPLE_BYTES == 2 else "<B",
"PC_INTEGER": "<h",
"VAX_INTEGER": "<h",
"ASCII_INTEGER": "<h",
"LSB_UNSIGNED_INTEGER": "<h" if SAMPLE_BYTES == 2 else "<B",
"PC_UNSIGNED_INTEGER": "<B",
"VAX_UNSIGNED_INTEGER": "<B",
"IEEE_REAL": ">f",
"FLOAT": ">f",
"REAL": ">f",
"PC_REAL": "<d" if SAMPLE_BYTES == 8 else "<f",
"MAC_REAL": ">f",
"SUN_REAL": ">f",
"MSB_BIT_STRING": ">B",
}[SAMPLE_TYPE]
# Possibly unused in PDS3: just park them here unless needed
# 'IEEE_COMPLEX': '>c',
# 'COMPLEX': '>c',
# 'MAC_COMPLEX': '>c',
# 'SUN_COMPLEX': '>c',
# 'PC_COMPLEX': '<c',
# 'MSB_BIT_STRING': '>S',
# 'LSB_BIT_STRING': '<S',
# 'VAX_BIT_STRING': '<S',
def get_data_types(filename):
"""Placeholder function for the fact that PDS3 can contain multiple
types of data (e.g. an image and a header) which are defined by
'pointers' in the label. This should be dealt with at some point.
"""
for k in parse_label(filename).keys():
if k.startswith("^"):
print(k)
def data_start_byte(label, pointer):
"""Determine the first byte of the data in an IMG file from its pointer."""
if type(pointer) is str:
name_or_offset = label["^" + pointer]
# for nested keys
else:
name_or_offset = get_from(
label, [*pointer["object"][0:-1], "^" + pointer["object"][-1]]
)
if type(name_or_offset) is int:
return label["RECORD_BYTES"] * (name_or_offset - 1)
elif type(name_or_offset) is pvl._collections.Units:
return name_or_offset.value - 1
elif type(name_or_offset) is list:
if type(name_or_offset[0]) is int:
return name_or_offset[0]
elif type(name_or_offset[-1]) is int:
return label["RECORD_BYTES"] * (name_or_offset[-1] - 1)
else:
return 0
elif type(name_or_offset) is str:
return 0
else:
print("WTF?", name_or_offset)
raise
def read_document(filename, label, pointer):
"""
placeholder function. right now just: if open will decode it as Unicode, great, return the text;
otherwise, return the bytes.
"""
try:
with open(filename) as file:
return file.read()
except UnicodeDecodeError:
with open(filename, "rb") as file:
return file.read()
except FileNotFoundError:
return "referenced document not found"
def image_props_list():
return [
"BYTES_PER_PIXEL",
"start_byte",
"DTYPE",
"nrows",
"ncols",
"prefix_bytes",
"prefix_cols",
"BANDS",
"pixels",
"band_storage_type",
]
def get_image_props(dictionary):
"""
convenience function
grabs image properties from a dict -- perhaps locals() --
to be passed into a different scope
"""
return {prop: dictionary[prop] for prop in image_props_list()}
def generic_image_props(label, pointer):
try:
sublabel = get_file_area(label, pointer)
file_area = sublabel[0]
except (KeyError, TypeError):
# print("*** IMG w/ old format attached label not currently supported.")
# print("\t{fn}".format(fn=filename))
print("No image data identified.")
return None
BYTES_PER_PIXEL = int(file_area["SAMPLE_BITS"] / 8)
DTYPE = sample_types(file_area["SAMPLE_TYPE"], BYTES_PER_PIXEL)
nrows = file_area["LINES"]
ncols = file_area["LINE_SAMPLES"]
if "LINE_PREFIX_BYTES" in file_area.keys():
# print("Accounting for a line | |
self.KS_CR, self.KS_CM0,
self.KS_CM1, self.KS_M0, self.KS_M1, self.KS_M2)
elif self.model == 'regression':
self._scattered_V = _scattered_V_regression(
self.airmass,
0.5 * (np.cos(np.pi * self.moon_phase) + 1.),
90 - self.moon_zenith.value,
self.separation_angle.value) * u.mag / u.arcsec**2
else:
raise NotImplementedError
# Calculate the wavelength-dependent extinction of moonlight
# scattered once into the observed field of view.
scattering_airmass = (
1 - 0.96 * np.sin(self.moon_zenith) ** 2) ** (-0.5)
extinction = (
10 ** (-self._extinction_coefficient * scattering_airmass / 2.5) *
(1 - 10 ** (-self._extinction_coefficient * self.airmass / 2.5)))
self._surface_brightness = self._moon_spectrum * extinction
# Renormalized the extincted spectrum to the correct V-band magnitude.
raw_V = self._vband.get_ab_magnitude(
self._surface_brightness, self._wavelength) * u.mag
area = 1 * u.arcsec ** 2
self._surface_brightness *= 10 ** (
-(self._scattered_V * area - raw_V) / (2.5 * u.mag)) / area
@property
def KS_CR(self):
return self._KS_CR
@KS_CR.setter
def KS_CR(self, ks_cr):
self._KS_CR = ks_cr
self._update_required = True
@property
def KS_CM0(self):
return self._KS_CM0
@KS_CM0.setter
def KS_CM0(self, ks_cm0):
self._KS_CM0 = ks_cm0
self._update_required = True
@property
def KS_CM1(self):
return self._KS_CM1
@KS_CM1.setter
def KS_CM1(self, ks_cm1):
self._KS_CM1 = ks_cm1
self._update_required = True
@property
def KS_M0(self):
return self._KS_M0
@KS_M0.setter
def KS_M0(self, ks_m0):
self._KS_M0 = ks_m0
self._update_required = True
@property
def KS_M1(self):
return self._KS_M1
@KS_M1.setter
def KS_M1(self, ks_m1):
self._KS_M1 = ks_m1
self._update_required = True
@property
def KS_M2(self):
return self._KS_M2
@KS_M2.setter
def KS_M2(self, ks_m2):
self._KS_M2 = ks_m2
self._update_required = True
reg_model_coeffs = np.array([
0.00000000e+00, -1.24246947e-01, -2.19592318e-01, -1.27371956e-02,
4.16108739e-02, -8.96992463e-02, -6.74266151e-01, 2.67170371e-02,
-1.54258481e-02, -3.52318515e-01, -4.12007754e-03, 6.44355466e-02,
2.70616098e-04, -2.52914043e-04, -6.59789181e-04, -1.00704130e-01,
-1.17732794e+00, 1.00074153e-02, 2.02381309e-02, -1.03468867e+00,
7.06332796e-02, 1.80523919e-02, -8.04924203e-04, -8.78033445e-04,
-1.93926394e-04, -6.88153692e-01, -1.34713209e-01, 1.85076523e-03,
5.65520710e-05, -1.30331216e-05, -4.89722809e-04, 2.99858228e-06,
8.39852557e-06, 8.86494950e-06, 4.35592782e-06])
reg_model_intercept = 20.507688847655775
def _scattered_V_regression(airmass, moon_frac, moon_alt, moon_sep):
''' 4th degree polynomial regression fit to the V-band scattered moonlight
from BOSS and DESI CMX data.
'''
theta = np.atleast_2d(np.array([airmass, moon_frac, moon_alt, moon_sep]).T)
combs = chain.from_iterable(combinations_with_replacement(range(4), i)
for i in range(0, 4))
theta_transform = np.empty((theta.shape[0], len(reg_model_coeffs)))
for i, comb in enumerate(combs):
theta_transform[:, i] = theta[:, comb].prod(1)
return np.dot(theta_transform, reg_model_coeffs.T) + reg_model_intercept
def krisciunas_schaefer_free(obs_zenith, moon_zenith, separation_angle, moon_phase,
vband_extinction, C_R, C_M0, C_M1, M0, M1, M2):
"""Calculate the scattered moonlight surface brightness in V band.
Based on Krisciunas and Schaefer, "A model of the brightness of moonlight",
PASP, vol. 103, Sept. 1991, p. 1033-1039 (http://dx.doi.org/10.1086/132921).
Equation numbers in the code comments refer to this paper.
The function :func:`plot_lunar_brightness` provides a convenient way to
plot this model's predictions as a function of observation pointing.
Units are required for the angular inputs and the result has units of
surface brightness, for example:
>>> sb = krisciunas_schaefer(20*u.deg, 70*u.deg, 50*u.deg, 0.25, 0.15)
>>> print(np.round(sb, 3))
19.855 mag / arcsec2
The output is automatically broadcast over input arrays following the usual
numpy rules.
This method has several caveats but the authors find agreement with data at
the 8% - 23% level. See the paper for details.
Parameters
----------
obs_zenith : astropy.units.Quantity
Zenith angle of the observation in angular units.
moon_zenith : astropy.units.Quantity
Zenith angle of the moon in angular units.
separation_angle : astropy.units.Quantity
Opening angle between the observation and moon in angular units.
moon_phase : float
Phase of the moon from 0.0 (full) to 1.0 (new), which can be calculated
as abs((d / D) - 1) where d is the time since the last new moon
and D = 29.5 days is the period between new moons. The corresponding
illumination fraction is ``0.5*(1 + cos(pi * moon_phase))``.
vband_extinction : float
V-band extinction coefficient to use.
Returns
-------
astropy.units.Quantity
Observed V-band surface brightness of scattered moonlight.
"""
moon_phase = np.asarray(moon_phase)
if np.any((moon_phase < 0) | (moon_phase > 1)):
raise ValueError(
'Invalid moon phase {0}. Expected 0-1.'.format(moon_phase))
# Calculate the V-band magnitude of the moon (eqn. 9).
abs_alpha = 180. * moon_phase
#m = -12.73 + 0.026 * abs_alpha + 4e-9 * abs_alpha ** 4 (default value)
m = M0 + M1 * abs_alpha + M2 * 1e-9 * abs_alpha ** 4
# Calculate the illuminance of the moon outside the atmosphere in
# foot-candles (eqn. 8).
Istar = 10 ** (-0.4 * (m + 16.57))
# Calculate the scattering function (eqn.21).
rho = separation_angle.to(u.deg).value
f_scatter = (C_R * (1.06 + np.cos(separation_angle) ** 2) +
10 ** (C_M0 - rho / C_M1))
# Calculate the scattering airmass along the lines of sight to the
# observation and moon (eqn. 3).
X_obs = (1 - 0.96 * np.sin(obs_zenith) ** 2) ** (-0.5)
X_moon = (1 - 0.96 * np.sin(moon_zenith) ** 2) ** (-0.5)
# Calculate the V-band moon surface brightness in nanoLamberts.
B_moon = (f_scatter * Istar *
10 ** (-0.4 * vband_extinction * X_moon) *
(1 - 10 ** (-0.4 * (vband_extinction * X_obs))))
# Convert from nanoLamberts to to mag / arcsec**2 using eqn.19 of
# Garstang, "Model for Artificial Night-Sky Illumination",
# PASP, vol. 98, Mar. 1986, p. 364 (http://dx.doi.org/10.1086/131768)
return ((20.7233 - np.log(B_moon / 34.08)) / 0.92104 *
u.mag / (u.arcsec ** 2))
def _cI_twi(alpha, delta, airmass):
''' twilight contribution
:param alpha:
:param delta:
:param airmass:
:retrun wave:
:return twi:
'''
ftwi = os.path.join(UT.dat_dir(), 'sky', 'twilight_coeffs.p')
twi_coeffs = pickle.load(open(ftwi, 'rb'))
twi = (
twi_coeffs['t0'] * np.abs(alpha) + # CT2
twi_coeffs['t1'] * np.abs(alpha)**2 + # CT1
twi_coeffs['t2'] * np.abs(delta)**2 + # CT3
twi_coeffs['t3'] * np.abs(delta) # CT4
) * np.exp(-twi_coeffs['t4'] * airmass) + twi_coeffs['c0']
return twi_coeffs['wave'], np.array(twi)
def _twilight_coeffs():
''' save twilight coefficients from Parker
'''
f = os.path.join(UT.code_dir(), 'dat', 'sky', 'MoonResults.csv')
coeffs = pd.DataFrame.from_csv(f)
coeffs.columns = [
'wl', 'model', 'data_var', 'unexplained_var',' X2', 'rX2',
'c0', 'c_am', 'tau', 'tau2', 'c_zodi', 'c_isl', 'sol', 'I',
't0', 't1', 't2', 't3', 't4', 'm0', 'm1', 'm2', 'm3', 'm4', 'm5', 'm6',
'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec',
'c2', 'c3', 'c4', 'c5', 'c6']
# keep moon models
twi_coeffs = coeffs[coeffs['model'] == 'twilight']
coeffs = coeffs[coeffs['model'] == 'moon']
# order based on wavelengths for convenience
wave_sort = np.argsort(np.array(coeffs['wl']))
twi = {}
twi['wave'] = np.array(coeffs['wl'])[wave_sort]
for k in ['t0', 't1', 't2', 't3', 't4', 'c0']:
twi[k] = np.array(twi_coeffs[k])[wave_sort]
# save to file
ftwi = os.path.join(UT.dat_dir(), 'sky', 'twilight_coeffs.p')
pickle.dump(twi, open(ftwi, 'wb'))
return None
##########################################################################
# contributions to parker's sky surface brightness model
##########################################################################
def _read_parkerCoeffs():
''' read the coefficients of parker's model
'''
f = ''.join([UT.code_dir(), 'dat/sky/MoonResults.csv'])
_coeffs = pd.DataFrame.from_csv(f)
_coeffs.columns = [
'wl', 'model', 'data_var', 'unexplained_var',' X2', 'rX2',
'c0', 'c_am', 'tau', 'tau2', 'c_zodi', 'c_isl', 'sol', 'I',
't0', 't1', 't2', 't3', 't4', 'm0', 'm1', 'm2', 'm3', 'm4', 'm5', 'm6',
'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec',
'c2', 'c3', 'c4', 'c5', 'c6'
]
# keep moon models
coeffs = _coeffs[coeffs['model'] == 'moon']
# order based on wavelengths for convenience
wave_sort = np.argsort(np.array(coeffs['wl']))
for k in coeffs.keys():
coeffs[k] = np.array(coeffs[k])[wave_sort]
return coeffs
def _parker_Icontinuum(coeffs, X, beta, l, b, mjd, month_frac, hour_frac, alpha, delta, altm, illm, delm, g):
''' sky continuum (Fragelius thesis Eq. 4.23)
'''
# airmass contrib.
_Iairmass = coeffs['c_am'] * X
# zodiacal contrib. (func. of ecliptic latitude)
_Izodiacal = coeffs['c_zodi'] * _parker_Izodi(beta)
_Iisl = coeffs['c_isl'] * _parker_Iisl(l, b)
_Isolar_flux = coeffs['sol'] * _parker_Isf(mjd - coeffs['I'])
_Iseasonal = _parker_cI_seas(month_frac, coeffs)
_Ihourly = _parker_cI_hour(hour_frac, coeffs)
_dT = _parker_deltaT(X, coeffs)
# When the sun is above -20 altitude, some of its light will back-scatter
# off the atmosphere into the field of view. (Fragelius thesis Eq. 4.27)
_Itwilight = _parker_cI_twi_exp(alpha, delta, X, coeffs)
# light from the moon that is scattered into our field of view (Fragelius thesis Eq. 4.28, 4.29)
_Imoon = _parker_cI_moon_exp(altm, illm, delm, g, X, coeffs)
_Iadd_continuum = coeffs['c0']
# I_continuum(lambda)
Icont = (_Iairmass + _Izodiacal + _Iisl + _Isolar_flux + _Iseasonal + _Ihourly + _Iadd_continuum) * _dT + _Itwilight + _Imoon
return 10*coeffs['wl'], np.array(Icont)
def _parker_cI_moon_exp(altm, illm, deltam, g, airmass, coeffs):
''' light from the moon that is | |
x + D_params[
5] * y * y
shifted_t0 = t0 + t0_params[0] + t0_params[1] * x + t0_params[2] * y + \
t0_params[3] * x * y + t0_params[4] * x * x + t0_params[5] * y * y
a = ep.minimum(a, 1)
return a * self.truncexpon(-t, -shifted_t0, b) + (1 - a) * self.truncexpon(-t, -shifted_t0, c)
def track_point(self, start, direction, z):
"""
This function returns the segment coordinates for a point along the `z` coordinate
Args:
start (tuple): start coordinates
direction (tuple): direction coordinates
z (float): `z` coordinate corresponding to the `x`, `y` coordinates
Returns:
tuple: the (x,y) pair of coordinates for the segment at `z`
"""
l = (z - start[:, 2][...,ep.newaxis]) / direction[:, 2][...,ep.newaxis]
xl = start[:, 0][...,ep.newaxis] + l * direction[:, 0][...,ep.newaxis]
yl = start[:, 1][...,ep.newaxis] + l * direction[:, 1][...,ep.newaxis]
return xl, yl
def get_pixel_coordinates(self, pixels):
"""
Returns the coordinates of the pixel center given the pixel IDs
"""
tpc_borders_ep = ep.from_numpy(pixels, self.tpc_borders).float32()
plane_id = pixels[..., 0] // self.n_pixels[0]
borders = ep.stack([tpc_borders_ep[x.astype(int)] for x in plane_id])
pix_x = (pixels[..., 0] - self.n_pixels[0] * plane_id) * self.pixel_pitch + borders[..., 0, 0]
pix_y = pixels[..., 1] * self.pixel_pitch + borders[..., 1, 0]
return pix_x[...,ep.newaxis], pix_y[...,ep.newaxis]
def tracks_current(self, pixels, tracks, time_max, fields):
"""
This function calculates the charge induced on the pixels by the input tracks.
Args:
pixels (:obj:`numpy.ndarray`, `pyTorch/Tensorflow/JAX Tensor`): 3D array with dimensions S x P x 2, where S is
the number of track segments, P is the number of pixels and the third dimension
contains the two pixel ID numbers.
tracks (:obj:`numpy.ndarray`, `pyTorch/Tensorflow/JAX Tensor`): 2D array containing the detector segments.
time_max (int) : total number of time ticks (see time_intervals)
fields (list): an ordered string list of field/column name of the tracks structured array
Returns:
signals (:obj:`numpy.ndarray`, `pyTorch/Tensorflow/JAX Tensor`): 3D array with dimensions S x P x T,
where S is the number of track segments, P is the number of pixels, and T is
the number of time ticks.
"""
pixels = ep.astensor(pixels)
tracks_ep = ep.astensor(tracks)
it = ep.arange(pixels, 0, time_max)
# Pixel coordinates
x_p, y_p = self.get_pixel_coordinates(pixels)
x_p += self.pixel_pitch / 2
y_p += self.pixel_pitch / 2
start_coords = ep.stack([tracks_ep[:, fields.index("x_start")],
tracks_ep[:, fields.index("y_start")],
tracks_ep[:, fields.index("z_start")]], axis=1)
end_coords = ep.stack([tracks_ep[:, fields.index("x_end")],
tracks_ep[:, fields.index("y_end")],
tracks_ep[:, fields.index("z_end")]], axis=1)
cond = tracks_ep[:, fields.index("z_start")] < tracks_ep[:, fields.index("z_end")]
start = ep.where(cond[...,ep.newaxis], start_coords, end_coords)
end = ep.where(cond[...,ep.newaxis], end_coords, start_coords)
segment = end - start
length = ep.norms.l2(end, axis=1, keepdims=True)
direction = segment / length
sigmas = ep.stack([tracks_ep[:, fields.index("tran_diff")],
tracks_ep[:, fields.index("tran_diff")],
tracks_ep[:, fields.index("long_diff")]], axis=1)
# The impact factor is the the size of the transverse diffusion or, if too small,
# half the diagonal of the pixel pad
impact_factor = ep.maximum(ep.sqrt((5 * sigmas[:, 0]) ** 2 + (5 * sigmas[:, 1]) ** 2),
ep.full_like(sigmas[:, 0], sqrt(self.pixel_pitch ** 2 + self.pixel_pitch ** 2) / 2)) * 2
z_poca, z_start, z_end = self.z_interval(start, end, x_p, y_p, impact_factor)
z_start_int = z_start - 4 * sigmas[:, 2][...,ep.newaxis]
z_end_int = z_end + 4 * sigmas[:, 2][...,ep.newaxis]
x_start, y_start = self.track_point(start, direction, z_start)
x_end, y_end = self.track_point(start, direction, z_end)
y_step = (ep.abs(y_end - y_start) + 8 * sigmas[:, 1][...,ep.newaxis]) / (self.sampled_points - 1)
x_step = (ep.abs(x_end - x_start) + 8 * sigmas[:, 0][...,ep.newaxis]) / (self.sampled_points - 1)
z_sampling = self.t_sampling / 2.
z_steps = ep.maximum(self.sampled_points, ((ep.abs(z_end_int - z_start_int) / z_sampling)+1).astype(int))
z_step = (z_end_int - z_start_int) / (z_steps - 1)
#This was a // divide, implement?
t_start = ep.maximum(self.time_interval[0],
(tracks_ep[:, fields.index("t_start")] - self.time_padding)
/ self.t_sampling * self.t_sampling)
total_current = 0
total_charge = 0
time_tick = t_start[:, ep.newaxis] + it * self.t_sampling
iz = ep.arange(z_steps, 0, z_steps.max().item())
z = z_start_int[...,ep.newaxis] + iz[ep.newaxis, ep.newaxis, :] * z_step[...,ep.newaxis]
tpc_borders_ep = ep.from_numpy(pixels, self.tpc_borders).float32()
borders = ep.stack([tpc_borders_ep[x.astype(int)] for x in tracks_ep[:, fields.index("pixel_plane")]])
t0 = (ep.abs(z - borders[..., 2, 0, ep.newaxis, ep.newaxis]) - 0.5) / self.vdrift
# FIXME: this sampling is far from ideal, we should sample around the track
# and not in a cube containing the track
ix = ep.arange(iz, 0, self.sampled_points)
x = x_start[...,ep.newaxis] + \
ep.sign(direction[..., 0, ep.newaxis, ep.newaxis]) *\
(ix[ep.newaxis, ep.newaxis, :] * x_step[...,ep.newaxis] - 4 * sigmas[..., 0, ep.newaxis, ep.newaxis])
x_dist = ep.abs(x_p - x)
iy = ep.arange(iz, 0, self.sampled_points)
y = y_start[...,ep.newaxis] + \
ep.sign(direction[..., 1, ep.newaxis, ep.newaxis]) *\
(iy[ep.newaxis, ep.newaxis, :] * y_step[...,ep.newaxis] - 4 * sigmas[..., 1, ep.newaxis, ep.newaxis])
y_dist = ep.abs(y_p - y)
charge = self.rho((x[:,:, :, ep.newaxis, ep.newaxis], y[:,:, ep.newaxis, :, ep.newaxis], z[:,:, ep.newaxis, ep.newaxis, :]), tracks_ep[:, fields.index("n_electrons")], start, sigmas, segment)\
* ep.abs(x_step[..., ep.newaxis, ep.newaxis, ep.newaxis]) * ep.abs(y_step[..., ep.newaxis, ep.newaxis, ep.newaxis]) * ep.abs(z_step[..., ep.newaxis, ep.newaxis, ep.newaxis])
# Setup mask of pixel pitch and z_poca conditions
cond_pix = ep.logical_and(x_dist[:, :, :, ep.newaxis] < self.pixel_pitch/2,
y_dist[:, :, ep.newaxis, :] < self.pixel_pitch/2)
cond_all = ep.logical_and(cond_pix, z_poca[:, :, ep.newaxis, ep.newaxis] != 0)
# Indices passing conditions (better way to do this than np and raw?)
trk_idx, pix_idx, xidx, yidx = np.where(cond_all.raw.cpu())
# Set up inputs (with multiplicities) in "passing condition" space
tt_sel = time_tick[trk_idx, :, ep.newaxis]
t0_sel = t0[trk_idx, pix_idx, ep.newaxis, :]
xd_sel = x_dist[trk_idx, pix_idx, xidx, ep.newaxis, ep.newaxis]
yd_sel = y_dist[trk_idx, pix_idx, yidx, ep.newaxis, ep.newaxis]
# Current model
current_out = self.current_model(tt_sel, t0_sel, xd_sel, yd_sel)
# Multiply in appropriate charge and const. Sum over z sampling right away
full_out = (charge[trk_idx, pix_idx, xidx, yidx][:, ep.newaxis, :]*current_out*self.e_charge).sum(axis=2)
# Map back to pixels/tracks/time steps with zero padding
reshaped = ep.zeros(full_out, shape=(x_dist.shape[0],x_dist.shape[1], time_max,
self.sampled_points, self.sampled_points))
reshaped = ep.index_update(reshaped, ep.index[trk_idx, pix_idx, :, xidx, yidx], full_out)
# Sum over x, y sampling cube
signals = reshaped.sum(axis=(3,4))
return signals.raw
def sum_pixel_signals(self, pixels_signals, signals, track_starts, index_map):
"""
This function sums the induced current signals on the same pixel.
Args:
pixels_signals (:obj:`numpy.ndarray`): 2D array that will contain the
summed signal for each pixel. First dimension is the pixel ID, second
dimension is the time tick
signals (:obj:`numpy.ndarray`): 3D array with dimensions S x P x T,
where S is the number of track segments, P is the number of pixels, and T is
the number of time ticks.
track_starts (:obj:`numpy.ndarray`): 1D array containing the starting time of
each track
index_map (:obj:`numpy.ndarray`): 2D array containing the correspondence between
the track index and the pixel ID index.
"""
signals = ep.astensor(signals)
track_starts = ep.astensor(track_starts)
index_map = ep.astensor(index_map)
# Set up index map to match with signal shape
index = index_map[..., ep.newaxis]
# Set up time map to match with signal shape. To implement: ep.round
itime = ((track_starts / self.t_sampling + 0.5).astype(int)[:, ep.newaxis, ep.newaxis] +
ep.arange(signals, 0, signals.shape[2])[ep.newaxis, ep.newaxis, :])
# Each signal index now has a corresponding pixel/time index
exp_index = ep.tile(index, (1,1,signals.shape[2]))
exp_itime = ep.tile(itime, (1, signals.shape[1], 1))
# Put pixel/time/signal together and flatten
idxs = ep.stack((exp_index, exp_itime, signals), axis=-1)
flat_idxs = idxs.reshape((-1, 3))
# Get unique indices (return_inverse doesn't exist for ep)
unique_idxs, idx_inv = flat_idxs[:, :2].raw.unique(dim=0, return_inverse=True)
unique_idxs = ep.astensor(unique_idxs)
idx_inv = ep.astensor(idx_inv)
# Sum over values for unique indices - scatter_add_ doesn't exist in ep. Can loop, but slow, e.g.
#out = ep.zeros(signals, shape=(len(unique_idxs)))
#for i in range(flat_idxs.shape[0]):
# out = out.index_update(idx_inv[i], out[idx_inv[i]]+flat_idxs[i, 2])
res = ep.astensor(ep.zeros(signals, shape=(len(unique_idxs))).raw.scatter_add_(0, idx_inv.raw, flat_idxs[:, 2].raw))
output = ep.index_update(ep.astensor(pixels_signals), (unique_idxs[:,0].astype(int),
unique_idxs[:,1].astype(int)), res)
return output.raw
# def backtrack_adcs(self, tracks, adc_list, adc_times_list, track_pixel_map, event_id_map, unique_evids, backtracked_id,
# shift):
# pedestal = floor((fee.V_PEDESTAL - fee.V_CM) * fee.ADC_COUNTS / (fee.V_REF - fee.V_CM))
#
# ip = cuda.grid(1)
#
# if ip < adc_list.shape[0]:
# for itrk in range(track_pixel_map.shape[1]):
# track_index = track_pixel_map[ip][itrk]
# if track_index >= 0:
# track_start_t = tracks["t_start"][track_index]
# track_end_t = tracks["t_end"][track_index]
# evid = unique_evids[event_id_map[track_index]]
# for iadc in range(adc_list[ip].shape[0]):
#
# if adc_list[ip][iadc] > pedestal:
# adc_time = adc_times_list[ip][iadc]
# evid_time = adc_time // (time_interval[1] * 3)
#
# if track_start_t - self.time_padding < adc_time - evid_time * time_interval[
# 1] * 3 < track_end_t + consts.time_padding + 0.5 / self.vdrift:
# counter = 0
#
# while | |
"/Mimic/Pi/imgs/eps/array-charged.zip"
#elif halfavg_1b >= 151.5: #charging
# self.eps_screen.ids.array_1b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charging.zip"
# self.eps_screen.ids.array_1b.color = 1, 1, 1, 1.0
#if float(c1b) > 0.0: #power channel offline!
# self.eps_screen.ids.array_1b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-offline.png"
#if halfavg_2a < 151.5: #discharging
# self.eps_screen.ids.array_2a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-discharging.zip"
# #self.eps_screen.ids.array_2a.color = 1, 1, 1, 0.8
#elif avg_2a > 160.0: #charged
# self.eps_screen.ids.array_2a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charged.zip"
#elif halfavg_2a >= 151.5: #charging
# self.eps_screen.ids.array_2a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charging.zip"
# self.eps_screen.ids.array_2a.color = 1, 1, 1, 1.0
#if float(c2a) > 0.0: #power channel offline!
# self.eps_screen.ids.array_2a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-offline.png"
#if halfavg_2b < 151.5: #discharging
# self.eps_screen.ids.array_2b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-discharging.zip"
# #self.eps_screen.ids.array_2b.color = 1, 1, 1, 0.8
#elif avg_2b > 160.0: #charged
# self.eps_screen.ids.array_2b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charged.zip"
#elif halfavg_2b >= 151.5: #charging
# self.eps_screen.ids.array_2b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charging.zip"
# self.eps_screen.ids.array_2b.color = 1, 1, 1, 1.0
#if float(c2b) > 0.0: #power channel offline!
# self.eps_screen.ids.array_2b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-offline.png"
#if halfavg_3a < 151.5: #discharging
# self.eps_screen.ids.array_3a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-discharging.zip"
#self.eps_screen.ids.array_3a.color = 1, 1, 1, 0.8
#elif avg_3a > 160.0: #charged
# self.eps_screen.ids.array_3a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charged.zip"
#elif halfavg_3a >= 151.5: #charging
# self.eps_screen.ids.array_3a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charging.zip"
# self.eps_screen.ids.array_3a.color = 1, 1, 1, 1.0
#if float(c3a) > 0.0: #power channel offline!
# self.eps_screen.ids.array_3a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-offline.png"
#if halfavg_3b < 151.5: #discharging
# self.eps_screen.ids.array_3b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-discharging.zip"
#self.eps_screen.ids.array_3b.color = 1, 1, 1, 0.8
#elif avg_3b > 160.0: #charged
# self.eps_screen.ids.array_3b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charged.zip"
#elif halfavg_3b >= 151.5: #charging
# self.eps_screen.ids.array_3b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charging.zip"
# self.eps_screen.ids.array_3b.color = 1, 1, 1, 1.0
#if float(c3b) > 0.0: #power channel offline!
# self.eps_screen.ids.array_3b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-offline.png"
#if halfavg_4a < 151.5: #discharging
# self.eps_screen.ids.array_4a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-discharging.zip"
# #self.eps_screen.ids.array_4a.color = 1, 1, 1, 0.8
#elif avg_4a > 160.0: #charged
# self.eps_screen.ids.array_4a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charged.zip"
#elif halfavg_4a >= 151.5: #charging
# self.eps_screen.ids.array_4a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charging.zip"
# self.eps_screen.ids.array_4a.color = 1, 1, 1, 1.0
#if float(c4a) > 0.0: #power channel offline!
# self.eps_screen.ids.array_4a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-offline.png"
#if halfavg_4b < 151.5: #discharging
# self.eps_screen.ids.array_4b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-discharging.zip"
# #self.eps_screen.ids.array_4b.color = 1, 1, 1, 0.8
#elif avg_4b > 160.0: #charged
# self.eps_screen.ids.array_4b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charged.zip"
#elif halfavg_4b >= 151.5: #charging
# self.eps_screen.ids.array_4b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charging.zip"
# self.eps_screen.ids.array_4b.color = 1, 1, 1, 1.0
#if float(c4b) > 0.0: #power channel offline!
# self.eps_screen.ids.array_4b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-offline.png"
#if avg_total_voltage > 151.5:
#else:
if float(v1a) >= 151.5 or float(v1b) >= 151.5 or float(v2a) >= 151.5 or float(v2b) >= 151.5 or float(v3a) >= 151.5 or float(v3b) >= 151.5 or float(v4a) >= 151.5 or float(v4b) >= 151.5:
self.eps_screen.ids.eps_sun.color = 1, 1, 1, 1
else:
self.eps_screen.ids.eps_sun.color = 1, 1, 1, 0.1
if float(v1a) < 151.5: #discharging
self.eps_screen.ids.array_1a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-discharging.zip"
#self.eps_screen.ids.array_1a.color = 1, 1, 1, 0.8
elif float(v1a) > 160.0: #charged
self.eps_screen.ids.array_1a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charged.zip"
elif float(v1a) >= 151.5: #charging
self.eps_screen.ids.array_1a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charging.zip"
self.eps_screen.ids.array_1a.color = 1, 1, 1, 1.0
if float(c1a) > 0.0: #power channel offline!
self.eps_screen.ids.array_1a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-offline.png"
if float(v1b) < 151.5: #discharging
self.eps_screen.ids.array_1b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-discharging.zip"
#self.eps_screen.ids.array_1b.color = 1, 1, 1, 0.8
elif float(v1b) > 160.0: #charged
self.eps_screen.ids.array_1b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charged.zip"
elif float(v1b) >= 151.5: #charging
self.eps_screen.ids.array_1b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charging.zip"
self.eps_screen.ids.array_1b.color = 1, 1, 1, 1.0
if float(c1b) > 0.0: #power channel offline!
self.eps_screen.ids.array_1b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-offline.png"
if float(v2a) < 151.5: #discharging
self.eps_screen.ids.array_2a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-discharging.zip"
#self.eps_screen.ids.array_2a.color = 1, 1, 1, 0.8
elif float(v2a) > 160.0: #charged
self.eps_screen.ids.array_2a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charged.zip"
elif float(v2a) >= 151.5: #charging
self.eps_screen.ids.array_2a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charging.zip"
self.eps_screen.ids.array_2a.color = 1, 1, 1, 1.0
if float(c2a) > 0.0: #power channel offline!
self.eps_screen.ids.array_2a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-offline.png"
if float(v2b) < 151.5: #discharging
self.eps_screen.ids.array_2b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-discharging.zip"
#self.eps_screen.ids.array_2b.color = 1, 1, 1, 0.8
elif float(v2b) > 160.0: #charged
self.eps_screen.ids.array_2b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charged.zip"
elif float(v2b) >= 151.5: #charging
self.eps_screen.ids.array_2b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charging.zip"
self.eps_screen.ids.array_2b.color = 1, 1, 1, 1.0
if float(c2b) > 0.0: #power channel offline!
self.eps_screen.ids.array_2b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-offline.png"
if float(v3a) < 151.5: #discharging
self.eps_screen.ids.array_3a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-discharging.zip"
#self.eps_screen.ids.array_3a.color = 1, 1, 1, 0.8
elif float(v3a) > 160.0: #charged
self.eps_screen.ids.array_3a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charged.zip"
elif float(v3a) >= 151.5: #charging
self.eps_screen.ids.array_3a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charging.zip"
self.eps_screen.ids.array_3a.color = 1, 1, 1, 1.0
if float(c3a) > 0.0: #power channel offline!
self.eps_screen.ids.array_3a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-offline.png"
if float(v3b) < 151.5: #discharging
self.eps_screen.ids.array_3b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-discharging.zip"
#self.eps_screen.ids.array_3b.color = 1, 1, 1, 0.8
elif float(v3b) > 160.0: #charged
self.eps_screen.ids.array_3b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charged.zip"
elif float(v3b) >= 151.5: #charging
self.eps_screen.ids.array_3b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charging.zip"
self.eps_screen.ids.array_3b.color = 1, 1, 1, 1.0
if float(c3b) > 0.0: #power channel offline!
self.eps_screen.ids.array_3b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-offline.png"
if float(v4a) < 151.5: #discharging
self.eps_screen.ids.array_4a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-discharging.zip"
#self.eps_screen.ids.array_4a.color = 1, 1, 1, 0.8
elif float(v4a) > 160.0: #charged
self.eps_screen.ids.array_4a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charged.zip"
elif float(v4a) >= 151.5: #charging
self.eps_screen.ids.array_4a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charging.zip"
self.eps_screen.ids.array_4a.color = 1, 1, 1, 1.0
if float(c4a) > 0.0: #power channel offline!
self.eps_screen.ids.array_4a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-offline.png"
#4b has a lower setpoint voltage for now - reverted back as of US EVA 63
if float(v4b) < 141.5: #discharging
self.eps_screen.ids.array_4b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-discharging.zip"
#self.eps_screen.ids.array_4b.color = 1, 1, 1, 0.8
elif float(v4b) > 150.0: #charged
self.eps_screen.ids.array_4b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charged.zip"
elif float(v4b) >= 141.5: #charging
self.eps_screen.ids.array_4b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charging.zip"
self.eps_screen.ids.array_4b.color = 1, 1, 1, 1.0
if float(c4b) > 0.0: #power channel offline!
self.eps_screen.ids.array_4b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-offline.png"
##-------------------C&T Functionality-------------------##
self.ct_sgant_screen.ids.sgant_dish.angle = float(sgant_elevation)
self.ct_sgant_screen.ids.sgant_elevation.text = "{:.2f}".format(float(sgant_elevation))
#make sure radio animations turn off when no signal or no transmit
if float(sgant_transmit) == 1.0 and float(aos) == 1.0:
self.ct_sgant_screen.ids.radio_up.color = 1, 1, 1, 1
if "10" in tdrs:
self.ct_sgant_screen.ids.tdrs_west10.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.zip"
self.ct_sgant_screen.ids.tdrs_west11.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
self.ct_sgant_screen.ids.tdrs_east12.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
self.ct_sgant_screen.ids.tdrs_east6.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
self.ct_sgant_screen.ids.tdrs_z7.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
if "11" in tdrs:
self.ct_sgant_screen.ids.tdrs_west11.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.zip"
self.ct_sgant_screen.ids.tdrs_west10.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
self.ct_sgant_screen.ids.tdrs_east12.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
self.ct_sgant_screen.ids.tdrs_east6.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
self.ct_sgant_screen.ids.tdrs_z7.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
if "12" in tdrs:
self.ct_sgant_screen.ids.tdrs_west11.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
self.ct_sgant_screen.ids.tdrs_west10.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
self.ct_sgant_screen.ids.tdrs_east12.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.zip"
self.ct_sgant_screen.ids.tdrs_east6.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
self.ct_sgant_screen.ids.tdrs_z7.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
if "6" in tdrs:
self.ct_sgant_screen.ids.tdrs_west11.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
self.ct_sgant_screen.ids.tdrs_west10.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
self.ct_sgant_screen.ids.tdrs_east6.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.zip"
self.ct_sgant_screen.ids.tdrs_east12.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
self.ct_sgant_screen.ids.tdrs_z7.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
if "7" in tdrs:
self.ct_sgant_screen.ids.tdrs_west11.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
self.ct_sgant_screen.ids.tdrs_west10.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
self.ct_sgant_screen.ids.tdrs_east6.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
self.ct_sgant_screen.ids.tdrs_east12.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
self.ct_sgant_screen.ids.tdrs_z7.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.zip"
elif float(aos) == 0.0 and (float(sgant_transmit) == 0.0 or float(sgant_transmit) == 1.0):
self.ct_sgant_screen.ids.radio_up.color = 0, 0, 0, 0
self.ct_sgant_screen.ids.tdrs_east12.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
self.ct_sgant_screen.ids.tdrs_east6.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
self.ct_sgant_screen.ids.tdrs_west11.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
self.ct_sgant_screen.ids.tdrs_west10.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
self.ct_sgant_screen.ids.tdrs_z7.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
#now check main CT screen radio signal
if float(sgant_transmit) == 1.0 and float(aos) == 1.0:
self.ct_screen.ids.sgant1_radio.color = 1, 1, 1, 1
self.ct_screen.ids.sgant2_radio.color = 1, 1, 1, 1
elif float(sgant_transmit) == 1.0 and float(aos) == 0.0:
self.ct_screen.ids.sgant1_radio.color = 0, 0, 0, 0
self.ct_screen.ids.sgant2_radio.color = 0, 0, 0, 0
elif float(sgant_transmit) == 0.0:
self.ct_screen.ids.sgant1_radio.color = 0, 0, 0, 0
self.ct_screen.ids.sgant2_radio.color = 0, 0, 0, 0
elif float(aos) == 0.0:
self.ct_screen.ids.sgant1_radio.color = 0, 0, 0, 0
self.ct_screen.ids.sgant2_radio.color = 0, 0, 0, 0
if float(sasa1_active) == 1.0 and float(aos) == 1.0:
self.ct_screen.ids.sasa1_radio.color = 1, 1, 1, 1
elif float(sasa1_active) == 1.0 and float(aos) == 0.0:
self.ct_screen.ids.sasa1_radio.color = 0, 0, 0, 0
elif float(sasa1_active) == 0.0:
self.ct_screen.ids.sasa1_radio.color = 0, 0, 0, 0
elif float(aos) == 0.0:
self.ct_screen.ids.sasa1_radio.color = 0, 0, 0, 0
if float(sasa2_active) == 1.0 and float(aos) == 1.0:
self.ct_screen.ids.sasa2_radio.color = 1, 1, 1, 1
elif float(sasa2_active) == 1.0 and float(aos) == 0.0:
self.ct_screen.ids.sasa2_radio.color = 0, 0, 0, 0
elif float(sasa2_active) == 0.0:
self.ct_screen.ids.sasa2_radio.color = 0, 0, 0, 0
elif float(aos) == 0.0:
self.ct_screen.ids.sasa2_radio.color | |
assert 'Removing image v2-full_web' in result.stderr
assert 'Removing image busybox' not in result.stderr
assert 'Removing network v2-full_default' in result.stderr
assert 'Removing network v2-full_front' in result.stderr
def test_down_timeout(self):
self.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
assert len(service.containers()) == 1
assert service.containers()[0].is_running
""
self.dispatch(['down', '-t', '1'], None)
assert len(service.containers(stopped=True)) == 0
def test_down_signal(self):
self.base_dir = 'tests/fixtures/stop-signal-composefile'
self.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
assert len(service.containers()) == 1
assert service.containers()[0].is_running
self.dispatch(['down', '-t', '1'], None)
assert len(service.containers(stopped=True)) == 0
def test_up_detached(self):
self.dispatch(['up', '-d'])
service = self.project.get_service('simple')
another = self.project.get_service('another')
assert len(service.containers()) == 1
assert len(another.containers()) == 1
# Ensure containers don't have stdin and stdout connected in -d mode
container, = service.containers()
assert not container.get('Config.AttachStderr')
assert not container.get('Config.AttachStdout')
assert not container.get('Config.AttachStdin')
def test_up_detached_long_form(self):
self.dispatch(['up', '--detach'])
service = self.project.get_service('simple')
another = self.project.get_service('another')
assert len(service.containers()) == 1
assert len(another.containers()) == 1
# Ensure containers don't have stdin and stdout connected in -d mode
container, = service.containers()
assert not container.get('Config.AttachStderr')
assert not container.get('Config.AttachStdout')
assert not container.get('Config.AttachStdin')
def test_up_attached(self):
self.base_dir = 'tests/fixtures/echo-services'
result = self.dispatch(['up', '--no-color'])
simple_name = self.project.get_service('simple').containers(stopped=True)[0].name_without_project
another_name = self.project.get_service('another').containers(
stopped=True
)[0].name_without_project
assert '{} | simple'.format(simple_name) in result.stdout
assert '{} | another'.format(another_name) in result.stdout
assert '{} exited with code 0'.format(simple_name) in result.stdout
assert '{} exited with code 0'.format(another_name) in result.stdout
@v2_only()
def test_up(self):
self.base_dir = 'tests/fixtures/v2-simple'
self.dispatch(['up', '-d'], None)
services = self.project.get_services()
network_name = self.project.networks.networks['default'].full_name
networks = self.client.networks(names=[network_name])
assert len(networks) == 1
assert networks[0]['Driver'] == 'bridge' if not is_cluster(self.client) else 'overlay'
assert 'com.docker.network.bridge.enable_icc' not in networks[0]['Options']
network = self.client.inspect_network(networks[0]['Id'])
for service in services:
containers = service.containers()
assert len(containers) == 1
container = containers[0]
assert container.id in network['Containers']
networks = container.get('NetworkSettings.Networks')
assert list(networks) == [network['Name']]
assert sorted(networks[network['Name']]['Aliases']) == sorted(
[service.name, container.short_id]
)
for service in services:
assert self.lookup(container, service.name)
@v2_only()
def test_up_no_start(self):
self.base_dir = 'tests/fixtures/v2-full'
self.dispatch(['up', '--no-start'], None)
services = self.project.get_services()
default_network = self.project.networks.networks['default'].full_name
front_network = self.project.networks.networks['front'].full_name
networks = self.client.networks(names=[default_network, front_network])
assert len(networks) == 2
for service in services:
containers = service.containers(stopped=True)
assert len(containers) == 1
container = containers[0]
assert not container.is_running
assert container.get('State.Status') == 'created'
volumes = self.project.volumes.volumes
assert 'data' in volumes
volume = volumes['data']
# The code below is a Swarm-compatible equivalent to volume.exists()
remote_volumes = [
v for v in self.client.volumes().get('Volumes', [])
if v['Name'].split('/')[-1] == volume.full_name
]
assert len(remote_volumes) > 0
@v2_only()
def test_up_no_ansi(self):
self.base_dir = 'tests/fixtures/v2-simple'
result = self.dispatch(['--no-ansi', 'up', '-d'], None)
assert "%c[2K\r" % 27 not in result.stderr
assert "%c[1A" % 27 not in result.stderr
assert "%c[1B" % 27 not in result.stderr
@v2_only()
def test_up_with_default_network_config(self):
filename = 'default-network-config.yml'
self.base_dir = 'tests/fixtures/networks'
self._project = get_project(self.base_dir, [filename])
self.dispatch(['-f', filename, 'up', '-d'], None)
network_name = self.project.networks.networks['default'].full_name
networks = self.client.networks(names=[network_name])
assert networks[0]['Options']['com.docker.network.bridge.enable_icc'] == 'false'
@v2_only()
def test_up_with_network_aliases(self):
filename = 'network-aliases.yml'
self.base_dir = 'tests/fixtures/networks'
self.dispatch(['-f', filename, 'up', '-d'], None)
back_name = '{}_back'.format(self.project.name)
front_name = '{}_front'.format(self.project.name)
networks = [
n for n in self.client.networks()
if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
]
# Two networks were created: back and front
assert sorted(n['Name'].split('/')[-1] for n in networks) == [back_name, front_name]
web_container = self.project.get_service('web').containers()[0]
back_aliases = web_container.get(
'NetworkSettings.Networks.{}.Aliases'.format(back_name)
)
assert 'web' in back_aliases
front_aliases = web_container.get(
'NetworkSettings.Networks.{}.Aliases'.format(front_name)
)
assert 'web' in front_aliases
assert 'forward_facing' in front_aliases
assert 'ahead' in front_aliases
@v2_only()
def test_up_with_network_internal(self):
self.require_api_version('1.23')
filename = 'network-internal.yml'
self.base_dir = 'tests/fixtures/networks'
self.dispatch(['-f', filename, 'up', '-d'], None)
internal_net = '{}_internal'.format(self.project.name)
networks = [
n for n in self.client.networks()
if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
]
# One network was created: internal
assert sorted(n['Name'].split('/')[-1] for n in networks) == [internal_net]
assert networks[0]['Internal'] is True
@v2_only()
def test_up_with_network_static_addresses(self):
filename = 'network-static-addresses.yml'
ipv4_address = '172.16.100.100'
ipv6_address = 'fe80::1001:100'
self.base_dir = 'tests/fixtures/networks'
self.dispatch(['-f', filename, 'up', '-d'], None)
static_net = '{}_static_test'.format(self.project.name)
networks = [
n for n in self.client.networks()
if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
]
# One networks was created: front
assert sorted(n['Name'].split('/')[-1] for n in networks) == [static_net]
web_container = self.project.get_service('web').containers()[0]
ipam_config = web_container.get(
'NetworkSettings.Networks.{}.IPAMConfig'.format(static_net)
)
assert ipv4_address in ipam_config.values()
assert ipv6_address in ipam_config.values()
@v2_only()
def test_up_with_networks(self):
self.base_dir = 'tests/fixtures/networks'
self.dispatch(['up', '-d'], None)
back_name = '{}_back'.format(self.project.name)
front_name = '{}_front'.format(self.project.name)
networks = [
n for n in self.client.networks()
if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
]
# Two networks were created: back and front
assert sorted(n['Name'].split('/')[-1] for n in networks) == [back_name, front_name]
# lookup by ID instead of name in case of duplicates
back_network = self.client.inspect_network(
[n for n in networks if n['Name'] == back_name][0]['Id']
)
front_network = self.client.inspect_network(
[n for n in networks if n['Name'] == front_name][0]['Id']
)
web_container = self.project.get_service('web').containers()[0]
app_container = self.project.get_service('app').containers()[0]
db_container = self.project.get_service('db').containers()[0]
for net_name in [front_name, back_name]:
links = app_container.get('NetworkSettings.Networks.{}.Links'.format(net_name))
assert '{}:database'.format(db_container.name) in links
# db and app joined the back network
assert sorted(back_network['Containers']) == sorted([db_container.id, app_container.id])
# web and app joined the front network
assert sorted(front_network['Containers']) == sorted([web_container.id, app_container.id])
# web can see app but not db
assert self.lookup(web_container, "app")
assert not self.lookup(web_container, "db")
# app can see db
assert self.lookup(app_container, "db")
# app has aliased db to "database"
assert self.lookup(app_container, "database")
@v2_only()
def test_up_missing_network(self):
self.base_dir = 'tests/fixtures/networks'
result = self.dispatch(
['-f', 'missing-network.yml', 'up', '-d'],
returncode=1)
assert 'Service "web" uses an undefined network "foo"' in result.stderr
@v2_only()
@no_cluster('container networks not supported in Swarm')
def test_up_with_network_mode(self):
c = self.client.create_container(
'busybox', 'top', name='composetest_network_mode_container',
host_config={}
)
self.addCleanup(self.client.remove_container, c, force=True)
self.client.start(c)
container_mode_source = 'container:{}'.format(c['Id'])
filename = 'network-mode.yml'
self.base_dir = 'tests/fixtures/networks'
self._project = get_project(self.base_dir, [filename])
self.dispatch(['-f', filename, 'up', '-d'], None)
networks = [
n for n in self.client.networks()
if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
]
assert not networks
for name in ['bridge', 'host', 'none']:
container = self.project.get_service(name).containers()[0]
assert list(container.get('NetworkSettings.Networks')) == [name]
assert container.get('HostConfig.NetworkMode') == name
service_mode_source = 'container:{}'.format(
self.project.get_service('bridge').containers()[0].id)
service_mode_container = self.project.get_service('service').containers()[0]
assert not service_mode_container.get('NetworkSettings.Networks')
assert service_mode_container.get('HostConfig.NetworkMode') == service_mode_source
container_mode_container = self.project.get_service('container').containers()[0]
assert not container_mode_container.get('NetworkSettings.Networks')
assert container_mode_container.get('HostConfig.NetworkMode') == container_mode_source
@v2_only()
def test_up_external_networks(self):
filename = 'external-networks.yml'
self.base_dir = 'tests/fixtures/networks'
self._project = get_project(self.base_dir, [filename])
result = self.dispatch(['-f', filename, 'up', '-d'], returncode=1)
assert 'declared as external, but could not be found' in result.stderr
networks = [
n['Name'] for n in self.client.networks()
if n['Name'].startswith('{}_'.format(self.project.name))
]
assert not networks
network_names = ['{}_{}'.format(self.project.name, n) for n in ['foo', 'bar']]
for name in network_names:
self.client.create_network(name, attachable=True)
self.dispatch(['-f', filename, 'up', '-d'])
container = self.project.containers()[0]
assert sorted(list(container.get('NetworkSettings.Networks'))) == sorted(network_names)
@v2_only()
def test_up_with_external_default_network(self):
filename = 'external-default.yml'
self.base_dir = 'tests/fixtures/networks'
self._project = get_project(self.base_dir, [filename])
result = self.dispatch(['-f', filename, 'up', '-d'], returncode=1)
assert 'declared as external, but could not be found' in result.stderr
networks = [
n['Name'] for n in self.client.networks()
if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
]
assert not networks
network_name = 'composetest_external_network'
self.client.create_network(network_name, attachable=True)
self.dispatch(['-f', filename, 'up', '-d'])
container = self.project.containers()[0]
assert list(container.get('NetworkSettings.Networks')) == [network_name]
@v2_1_only()
def test_up_with_network_labels(self):
filename = 'network-label.yml'
self.base_dir = 'tests/fixtures/networks'
self._project = get_project(self.base_dir, [filename])
self.dispatch(['-f', filename, 'up', '-d'], returncode=0)
network_with_label = '{}_network_with_label'.format(self.project.name)
networks = [
n for n in self.client.networks()
if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
]
assert [n['Name'].split('/')[-1] for n in networks] == [network_with_label]
assert 'label_key' in networks[0]['Labels']
assert networks[0]['Labels']['label_key'] == 'label_val'
@v2_1_only()
def test_up_with_volume_labels(self):
filename = 'volume-label.yml'
self.base_dir = 'tests/fixtures/volumes'
self._project = get_project(self.base_dir, [filename])
self.dispatch(['-f', filename, 'up', '-d'], returncode=0)
volume_with_label = '{}_volume_with_label'.format(self.project.name)
volumes = [
v for v in self.client.volumes().get('Volumes', [])
if v['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
]
assert set([v['Name'].split('/')[-1] for v in volumes]) == set([volume_with_label])
assert 'label_key' in volumes[0]['Labels']
assert volumes[0]['Labels']['label_key'] == 'label_val'
@v2_only()
def test_up_no_services(self):
self.base_dir = 'tests/fixtures/no-services'
self.dispatch(['up', '-d'], None)
network_names = [
n['Name'] for n in self.client.networks()
if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
]
assert network_names == []
def test_up_with_links_v1(self):
self.base_dir = 'tests/fixtures/links-composefile'
self.dispatch(['up', '-d', 'web'], None)
# No network was created
network_name = self.project.networks.networks['default'].full_name
networks = self.client.networks(names=[network_name])
assert networks == []
web = self.project.get_service('web')
db = self.project.get_service('db')
console = self.project.get_service('console')
# console was not started
assert len(web.containers()) == 1
assert len(db.containers()) == 1
assert len(console.containers()) == 0
# web has links
web_container = web.containers()[0]
assert web_container.get('HostConfig.Links')
def test_up_with_net_is_invalid(self):
self.base_dir = 'tests/fixtures/net-container'
result = self.dispatch(
['-f', 'v2-invalid.yml', 'up', '-d'],
returncode=1)
assert "Unsupported config option for services.bar: 'net'" in result.stderr
@no_cluster("Legacy networking not supported on Swarm")
def test_up_with_net_v1(self):
self.base_dir = 'tests/fixtures/net-container'
self.dispatch(['up', '-d'], None)
bar = self.project.get_service('bar')
bar_container = bar.containers()[0]
foo = self.project.get_service('foo')
foo_container = foo.containers()[0]
assert foo_container.get('HostConfig.NetworkMode') == 'container:{}'.format(
bar_container.id
)
@v3_only()
def test_up_with_healthcheck(self):
def wait_on_health_status(container, status):
def condition():
container.inspect()
return container.get('State.Health.Status') == status
return wait_on_condition(condition, delay=0.5)
self.base_dir = 'tests/fixtures/healthcheck'
self.dispatch(['up', '-d'], None)
| |
self.__get_color(C_CYAN)|curses.A_REVERSE,
'yellow': self.__get_color(C_YELLOW)|curses.A_BOLD
},
'state_yellow': {
'default': self.__get_color(C_YELLOW),
'cursor': self.__get_color(C_CYAN)|curses.A_REVERSE,
'yellow': self.__get_color(C_YELLOW)|curses.A_BOLD
},
'state_green': {
'default': self.__get_color(C_GREEN),
'cursor': self.__get_color(C_CYAN)|curses.A_REVERSE,
'yellow': self.__get_color(C_YELLOW)|curses.A_BOLD
},
'state_red': {
'default': self.__get_color(C_RED),
'cursor': self.__get_color(C_CYAN)|curses.A_REVERSE,
'yellow': self.__get_color(C_YELLOW)|curses.A_BOLD
},
'query': {
'default': self.__get_color(0),
'cursor': self.__get_color(C_CYAN)|curses.A_REVERSE,
'yellow': self.__get_color(C_YELLOW)|curses.A_BOLD
},
'relation': {
'default': self.__get_color(C_CYAN),
'cursor': self.__get_color(C_CYAN)|curses.A_REVERSE,
'yellow': self.__get_color(C_YELLOW)|curses.A_BOLD
},
'type': {
'default': self.__get_color(0),
'cursor': self.__get_color(C_CYAN)|curses.A_REVERSE,
'yellow': self.__get_color(C_YELLOW)|curses.A_BOLD
},
'mode_yellow': {
'default': self.__get_color(C_YELLOW)|curses.A_BOLD,
'cursor': self.__get_color(C_CYAN)|curses.A_REVERSE,
'yellow': self.__get_color(C_YELLOW)|curses.A_BOLD
},
'mode_red': {
'default': self.__get_color(C_RED)|curses.A_BOLD,
'cursor': self.__get_color(C_CYAN)|curses.A_REVERSE,
'yellow': self.__get_color(C_YELLOW)|curses.A_BOLD
}
}
def __init_curses(self,):
"""
Initialize curses environment.
"""
curses.setupterm()
self.win = curses.initscr()
self.win.keypad(1)
curses.noecho()
try:
# deactivate cursor
curses.curs_set(0)
# use colors
curses.start_color()
curses.use_default_colors()
except Exception:
# Terminal doesn't support curs_set() and colors
self.sys_color = False
curses.cbreak()
curses.endwin()
self.win.scrollok(0)
(self.maxy, self.maxx) = self.win.getmaxyx()
def get_flag_from_options(self, options):
"""
Returns the flag depending on the options.
"""
flag = PGTOP_FLAG_DATABASE | PGTOP_FLAG_USER | PGTOP_FLAG_CLIENT
flag = flag | PGTOP_FLAG_CPU | PGTOP_FLAG_MEM | PGTOP_FLAG_READ
flag = flag | PGTOP_FLAG_WRITE | PGTOP_FLAG_TIME | PGTOP_FLAG_WAIT
flag = flag | PGTOP_FLAG_RELATION | PGTOP_FLAG_TYPE | PGTOP_FLAG_MODE
flag = flag | PGTOP_FLAG_IOWAIT | PGTOP_FLAG_APPNAME
if options.nodb is True:
flag -= PGTOP_FLAG_DATABASE
if options.nouser is True:
flag -= PGTOP_FLAG_USER
if options.nocpu is True:
flag -= PGTOP_FLAG_CPU
if options.noclient is True:
flag -= PGTOP_FLAG_CLIENT
if options.nomem is True:
flag -= PGTOP_FLAG_MEM
if options.noread is True:
flag -= PGTOP_FLAG_READ
if options.nowrite is True:
flag -= PGTOP_FLAG_WRITE
if options.notime is True:
flag -= PGTOP_FLAG_TIME
if options.nowait is True:
flag -= PGTOP_FLAG_WAIT
if options.noappname is True:
flag -= PGTOP_FLAG_APPNAME
# Remove some if no running against local pg server.
if not self.get_is_local() and (flag & PGTOP_FLAG_CPU):
flag -= PGTOP_FLAG_CPU
if not self.get_is_local() and (flag & PGTOP_FLAG_MEM):
flag -= PGTOP_FLAG_MEM
if not self.get_is_local() and (flag & PGTOP_FLAG_READ):
flag -= PGTOP_FLAG_READ
if not self.get_is_local() and (flag & PGTOP_FLAG_WRITE):
flag -= PGTOP_FLAG_WRITE
if not self.get_is_local() and (flag & PGTOP_FLAG_IOWAIT):
flag -= PGTOP_FLAG_IOWAIT
return flag
def __get_color(self, color):
"""
Wrapper around curses.color_pair()
"""
if self.sys_color:
return curses.color_pair(color)
else:
return 0
def set_max_db_length(self, new_length):
"""
Set new DATABASE column length
"""
global PGTOP_COLS
if new_length > 16:
new_length = 16
if new_length < 8:
new_length = 8
self.max_db_length = new_length
str_nl = str(new_length)
PGTOP_COLS['activities']['database']['template_h'] = '%-'+str_nl+'s '
PGTOP_COLS['waiting']['database']['template_h'] = '%-'+str_nl+'s '
PGTOP_COLS['blocking']['database']['template_h'] = '%-'+str_nl+'s '
def at_exit_curses(self,):
"""
Called at exit time.
Rollback to default values.
"""
try:
self.win.keypad(0)
self.win.move(0, 0)
self.win.erase()
except KeyboardInterrupt:
pass
except AttributeError:
# Curses not initialized yet
return
curses.nocbreak()
curses.echo()
try:
curses.curs_set(1)
except Exception:
pass
curses.endwin()
def signal_handler(self, signal, frame):
"""
Function called on a process kill.
"""
self.at_exit_curses()
print("FATAL: Killed with signal %s ." % (str(signal),))
print("%s" % (str(frame),))
sys.exit(1)
def set_nocolor(self,):
"""
Replace colors by white.
"""
if not self.sys_color:
return
self.color = False
curses.init_pair(C_BLACK_GREEN, curses.COLOR_BLACK, curses.COLOR_WHITE)
curses.init_pair(C_CYAN, curses.COLOR_WHITE, -1)
curses.init_pair(C_RED, curses.COLOR_WHITE, -1)
curses.init_pair(C_RED_BLACK, curses.COLOR_WHITE, curses.COLOR_BLACK)
curses.init_pair(C_GREEN, curses.COLOR_WHITE, -1)
curses.init_pair(C_YELLOW, curses.COLOR_WHITE, -1)
curses.init_pair(C_MAGENTA, curses.COLOR_WHITE, -1)
curses.init_pair(C_WHITE, curses.COLOR_WHITE, -1)
curses.init_pair(C_BLACK_CYAN, curses.COLOR_WHITE, -1)
curses.init_pair(C_GRAY, curses.COLOR_WHITE, -1)
def set_color(self,):
"""
Set colors.
"""
if not self.sys_color:
return
self.color = True
curses.init_pair(C_BLACK_GREEN, curses.COLOR_BLACK, curses.COLOR_GREEN)
curses.init_pair(C_CYAN, curses.COLOR_CYAN, -1)
curses.init_pair(C_RED, curses.COLOR_RED, -1)
curses.init_pair(C_RED_BLACK, curses.COLOR_RED, curses.COLOR_BLACK)
curses.init_pair(C_GREEN, curses.COLOR_GREEN, -1)
curses.init_pair(C_YELLOW, curses.COLOR_YELLOW, -1)
curses.init_pair(C_MAGENTA, curses.COLOR_MAGENTA, -1)
curses.init_pair(C_WHITE, curses.COLOR_WHITE, -1)
curses.init_pair(C_BLACK_CYAN, curses.COLOR_BLACK, curses.COLOR_CYAN)
curses.init_pair(C_GRAY, 0, -1)
def set_output(self, output):
self.output = output
def set_options(self, options):
self.options = options
if self.data:
self.data.min_duration = options.minduration
def clean_str(self, string):
"""
Strip and replace some special characters.
"""
msg = str(string)
msg = msg.replace("\n", " ")
msg = re.sub(r"\s+", r" ", msg)
msg = msg.replace("FATAL:", "")
msg = re.sub(r"^\s", r"", msg)
msg = re.sub(r"\s$", r"", msg)
return msg
def ask_password(self, ):
"""
Ask for PostgreSQL user password
"""
password = getpass()
return password
def check_window_size(self,):
"""
Update window's size
"""
(self.maxy, self.maxx) = self.win.getmaxyx()
return
def __get_pause_msg(self,):
"""
Returns PAUSE message, depending of the line size
"""
msg = "PAUSE"
line = ""
line += " " * (int(self.maxx/2) - len(msg))
line += msg
line += " " * (self.maxx - len(line) - 0)
return line
def __pause(self,):
"""
PAUSE mode
"""
self.__print_string(
self.start_line,
0,
self.__get_pause_msg(),
self.__get_color(C_RED_BLACK)|curses.A_REVERSE|curses.A_BOLD)
while 1:
try:
k = self.win.getch()
except KeyboardInterrupt as err:
raise err
if k == ord('q'):
curses.endwin()
exit()
if k == ord(' '):
curses.flushinp()
return 0
if k == curses.KEY_RESIZE:
if self.uibuffer is not None and 'procs' in self.uibuffer:
self.check_window_size()
self.refresh_window(
self.uibuffer['procs'],
self.uibuffer['extras'],
self.uibuffer['flag'],
self.uibuffer['indent'],
self.uibuffer['io'],
self.uibuffer['tps'],
self.uibuffer['active_connections'],
self.uibuffer['size_ev'],
self.uibuffer['total_size'])
self.__print_string(
self.start_line,
0,
self.__get_pause_msg(),
self.__get_color(C_RED_BLACK)|\
curses.A_REVERSE|curses.A_BOLD)
curses.flushinp()
def __current_position(self,):
"""
Display current mode
"""
if self.mode == 'activities':
msg = "RUNNING QUERIES"
if self.mode == 'waiting':
msg = "WAITING QUERIES"
if self.mode == 'blocking':
msg = "BLOCKING QUERIES"
color = self.__get_color(C_GREEN)
line = ""
line += " " * (int(self.maxx/2) - len(msg))
line += msg
line += " " * (self.maxx - len(line) - 0)
self.__print_string(self.start_line, 0, line, color|curses.A_BOLD)
def __help_key_interactive(self,):
"""
Display interactive mode menu bar
"""
colno = self.__print_string(
(self.maxy - 1),
0,
"c",
self.__get_color(0))
colno += self.__print_string(
(self.maxy - 1),
colno,
"Cancel current query ",
self.__get_color(C_CYAN)|curses.A_REVERSE)
colno += self.__print_string(
(self.maxy - 1),
colno,
"k",
self.__get_color(0))
colno += self.__print_string(
(self.maxy - 1),
colno,
"Terminate the backend ",
self.__get_color(C_CYAN)|curses.A_REVERSE)
colno += self.__print_string(
(self.maxy - 1),
colno,
"Space",
self.__get_color(0))
colno += self.__print_string(
(self.maxy - 1),
colno,
"Tag/untag the process ",
self.__get_color(C_CYAN)|curses.A_REVERSE)
colno += self.__print_string(
(self.maxy - 1),
colno,
"Other",
self.__get_color(0))
colno += self.__print_string(
(self.maxy - 1),
colno,
"Back to activity ",
self.__get_color(C_CYAN)|curses.A_REVERSE)
colno += self.__print_string(
(self.maxy - 1),
colno,
"q",
self.__get_color(0))
colno += self.__print_string(
(self.maxy - 1),
colno,
"Quit ",
self.__get_color(C_CYAN)|curses.A_REVERSE)
colno += self.__print_string(
(self.maxy - 1),
colno,
self.__add_blank(" "),
self.__get_color(C_CYAN)|curses.A_REVERSE)
def __change_mode_interactive(self,):
"""
Display change mode menu bar
"""
colno = self.__print_string(
(self.maxy - 1),
0,
"F1/1",
self.__get_color(0))
colno += self.__print_string(
(self.maxy - 1),
colno,
"Running queries ",
self.__get_color(C_CYAN)|curses.A_REVERSE)
colno += self.__print_string(
(self.maxy - 1),
colno,
"F2/2",
self.__get_color(0))
colno += self.__print_string(
(self.maxy - 1),
colno,
"Waiting queries ",
self.__get_color(C_CYAN)|curses.A_REVERSE)
colno += self.__print_string(
(self.maxy - 1),
colno,
"F3/3",
self.__get_color(0))
colno += self.__print_string(
(self.maxy - 1),
colno,
"Blocking queries ",
self.__get_color(C_CYAN)|curses.A_REVERSE)
colno += self.__print_string(
(self.maxy - 1),
colno,
"Space",
self.__get_color(0))
colno += self.__print_string(
(self.maxy - 1),
colno,
"Pause ",
self.__get_color(C_CYAN)|curses.A_REVERSE)
colno += self.__print_string(
(self.maxy - 1),
colno,
"q",
self.__get_color(0))
colno += self.__print_string(
(self.maxy - 1),
colno,
"Quit ",
self.__get_color(C_CYAN)|curses.A_REVERSE)
colno += self.__print_string(
(self.maxy - 1),
colno,
"h",
self.__get_color(0))
colno += self.__print_string(
(self.maxy - 1),
colno,
"Help ",
self.__get_color(C_CYAN)|curses.A_REVERSE)
colno += self.__print_string(
(self.maxy - 1),
colno,
self.__add_blank(" ", colno + 1),
self.__get_color(C_CYAN)|curses.A_REVERSE)
def __ask_terminate_or_cancel_backends(self, action, pids,):
"""
Ask for cancelling or terminating some backends
"""
if len(pids) == 1:
colno = self.__print_string(
(self.maxy - 1),
0,
PGTOP_SIGNAL_MESSAGE[action]['s'] % (str(pids[0]),),
self.__get_color(0))
else:
pos = 0
disp = ""
for pid in pids:
if pos > 5:
disp += "..."
break
if pos > 0:
disp += ", "
disp += "%s" % (pid,)
pos += 1
colno = self.__print_string(
(self.maxy - 1),
0,
PGTOP_SIGNAL_MESSAGE[action]['p'] % (str(disp),),
self.__get_color(0))
colno += self.__print_string(
(self.maxy - 1),
colno,
self.__add_blank(" "),
self.__get_color(C_CYAN)|curses.A_REVERSE)
while 1:
try:
key = self.win.getch()
except KeyboardInterrupt as err:
raise err
# quit
if key == ord('q'):
curses.endwin()
exit()
# yes
if key == ord('y') or key == ord('Y'):
for pid in pids:
if action == PGTOP_SIGNAL_TERMINATE_BACKEND:
self.data.pg_terminate_backend(str(pid),)
else:
self.data.pg_cancel_backend(str(pid),)
self.__empty_pid_yank()
return 1
# no
if key == ord('n') or key == ord('N') or key == ord(' '):
return 0
# resize => exit
if key == curses.KEY_RESIZE:
return 0
def __empty_pid_yank(self,):
"""
Empty pid list to be yanked
"""
self.pid_yank = []
def __check_pid_yank(self,):
"""
Check if PIDs in PGTOP_PID_YANK list are still attached
to live processes
"""
if len(self.pid_yank) > 0:
for pid in self.pid_yank:
if self.pid.count(pid) == 0:
self.pid_yank.remove(pid)
def __interactive(self, process, flag, indent,):
"""
Interactive mode trigged on KEY_UP or KEY_DOWN key press
If no key hit during 3 seconds, exit this mode
"""
# Force truncated display
old_verbose_mode = self.verbose_mode
self.verbose_mode = PGTOP_TRUNCATE
# Refresh lines with this verbose mode
self.__scroll_window(process, flag, indent, 0)
self.__help_key_interactive()
current_pos = 0
offset = 0
| |
end_time: builtins.str, start_time: builtins.str) -> None:
"""
:param end_time: ``CfnAnomalyDetector.RangeProperty.EndTime``.
:param start_time: ``CfnAnomalyDetector.RangeProperty.StartTime``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudwatch-anomalydetector-range.html
"""
self._values: typing.Dict[str, typing.Any] = {
"end_time": end_time,
"start_time": start_time,
}
@builtins.property
def end_time(self) -> builtins.str:
"""``CfnAnomalyDetector.RangeProperty.EndTime``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudwatch-anomalydetector-range.html#cfn-cloudwatch-anomalydetector-range-endtime
"""
result = self._values.get("end_time")
assert result is not None, "Required property 'end_time' is missing"
return result
@builtins.property
def start_time(self) -> builtins.str:
"""``CfnAnomalyDetector.RangeProperty.StartTime``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudwatch-anomalydetector-range.html#cfn-cloudwatch-anomalydetector-range-starttime
"""
result = self._values.get("start_time")
assert result is not None, "Required property 'start_time' is missing"
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "RangeProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="@aws-cdk/aws-cloudwatch.CfnAnomalyDetectorProps",
jsii_struct_bases=[],
name_mapping={
"metric_name": "metricName",
"namespace": "namespace",
"stat": "stat",
"configuration": "configuration",
"dimensions": "dimensions",
},
)
class CfnAnomalyDetectorProps:
def __init__(
self,
*,
metric_name: builtins.str,
namespace: builtins.str,
stat: builtins.str,
configuration: typing.Optional[typing.Union[aws_cdk.core.IResolvable, CfnAnomalyDetector.ConfigurationProperty]] = None,
dimensions: typing.Optional[typing.Union[aws_cdk.core.IResolvable, typing.List[typing.Union[aws_cdk.core.IResolvable, CfnAnomalyDetector.DimensionProperty]]]] = None,
) -> None:
"""Properties for defining a ``AWS::CloudWatch::AnomalyDetector``.
:param metric_name: ``AWS::CloudWatch::AnomalyDetector.MetricName``.
:param namespace: ``AWS::CloudWatch::AnomalyDetector.Namespace``.
:param stat: ``AWS::CloudWatch::AnomalyDetector.Stat``.
:param configuration: ``AWS::CloudWatch::AnomalyDetector.Configuration``.
:param dimensions: ``AWS::CloudWatch::AnomalyDetector.Dimensions``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudwatch-anomalydetector.html
"""
self._values: typing.Dict[str, typing.Any] = {
"metric_name": metric_name,
"namespace": namespace,
"stat": stat,
}
if configuration is not None:
self._values["configuration"] = configuration
if dimensions is not None:
self._values["dimensions"] = dimensions
@builtins.property
def metric_name(self) -> builtins.str:
"""``AWS::CloudWatch::AnomalyDetector.MetricName``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudwatch-anomalydetector.html#cfn-cloudwatch-anomalydetector-metricname
"""
result = self._values.get("metric_name")
assert result is not None, "Required property 'metric_name' is missing"
return result
@builtins.property
def namespace(self) -> builtins.str:
"""``AWS::CloudWatch::AnomalyDetector.Namespace``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudwatch-anomalydetector.html#cfn-cloudwatch-anomalydetector-namespace
"""
result = self._values.get("namespace")
assert result is not None, "Required property 'namespace' is missing"
return result
@builtins.property
def stat(self) -> builtins.str:
"""``AWS::CloudWatch::AnomalyDetector.Stat``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudwatch-anomalydetector.html#cfn-cloudwatch-anomalydetector-stat
"""
result = self._values.get("stat")
assert result is not None, "Required property 'stat' is missing"
return result
@builtins.property
def configuration(
self,
) -> typing.Optional[typing.Union[aws_cdk.core.IResolvable, CfnAnomalyDetector.ConfigurationProperty]]:
"""``AWS::CloudWatch::AnomalyDetector.Configuration``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudwatch-anomalydetector.html#cfn-cloudwatch-anomalydetector-configuration
"""
result = self._values.get("configuration")
return result
@builtins.property
def dimensions(
self,
) -> typing.Optional[typing.Union[aws_cdk.core.IResolvable, typing.List[typing.Union[aws_cdk.core.IResolvable, CfnAnomalyDetector.DimensionProperty]]]]:
"""``AWS::CloudWatch::AnomalyDetector.Dimensions``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudwatch-anomalydetector.html#cfn-cloudwatch-anomalydetector-dimensions
"""
result = self._values.get("dimensions")
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "CfnAnomalyDetectorProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.implements(aws_cdk.core.IInspectable)
class CfnCompositeAlarm(
aws_cdk.core.CfnResource,
metaclass=jsii.JSIIMeta,
jsii_type="@aws-cdk/aws-cloudwatch.CfnCompositeAlarm",
):
"""A CloudFormation ``AWS::CloudWatch::CompositeAlarm``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudwatch-compositealarm.html
:cloudformationResource: AWS::CloudWatch::CompositeAlarm
"""
def __init__(
self,
scope: aws_cdk.core.Construct,
id: builtins.str,
*,
alarm_name: builtins.str,
alarm_rule: builtins.str,
actions_enabled: typing.Optional[typing.Union[builtins.bool, aws_cdk.core.IResolvable]] = None,
alarm_actions: typing.Optional[typing.List[builtins.str]] = None,
alarm_description: typing.Optional[builtins.str] = None,
insufficient_data_actions: typing.Optional[typing.List[builtins.str]] = None,
ok_actions: typing.Optional[typing.List[builtins.str]] = None,
) -> None:
"""Create a new ``AWS::CloudWatch::CompositeAlarm``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param alarm_name: ``AWS::CloudWatch::CompositeAlarm.AlarmName``.
:param alarm_rule: ``AWS::CloudWatch::CompositeAlarm.AlarmRule``.
:param actions_enabled: ``AWS::CloudWatch::CompositeAlarm.ActionsEnabled``.
:param alarm_actions: ``AWS::CloudWatch::CompositeAlarm.AlarmActions``.
:param alarm_description: ``AWS::CloudWatch::CompositeAlarm.AlarmDescription``.
:param insufficient_data_actions: ``AWS::CloudWatch::CompositeAlarm.InsufficientDataActions``.
:param ok_actions: ``AWS::CloudWatch::CompositeAlarm.OKActions``.
"""
props = CfnCompositeAlarmProps(
alarm_name=alarm_name,
alarm_rule=alarm_rule,
actions_enabled=actions_enabled,
alarm_actions=alarm_actions,
alarm_description=alarm_description,
insufficient_data_actions=insufficient_data_actions,
ok_actions=ok_actions,
)
jsii.create(CfnCompositeAlarm, self, [scope, id, props])
@jsii.member(jsii_name="inspect")
def inspect(self, inspector: aws_cdk.core.TreeInspector) -> None:
"""(experimental) Examines the CloudFormation resource and discloses attributes.
:param inspector: - tree inspector to collect and process attributes.
:stability: experimental
"""
return jsii.invoke(self, "inspect", [inspector])
@jsii.member(jsii_name="renderProperties")
def _render_properties(
self,
props: typing.Mapping[builtins.str, typing.Any],
) -> typing.Mapping[builtins.str, typing.Any]:
"""
:param props: -
"""
return jsii.invoke(self, "renderProperties", [props])
@jsii.python.classproperty # type: ignore
@jsii.member(jsii_name="CFN_RESOURCE_TYPE_NAME")
def CFN_RESOURCE_TYPE_NAME(cls) -> builtins.str:
"""The CloudFormation resource type name for this resource class."""
return jsii.sget(cls, "CFN_RESOURCE_TYPE_NAME")
@builtins.property # type: ignore
@jsii.member(jsii_name="attrArn")
def attr_arn(self) -> builtins.str:
"""
:cloudformationAttribute: Arn
"""
return jsii.get(self, "attrArn")
@builtins.property # type: ignore
@jsii.member(jsii_name="cfnProperties")
def _cfn_properties(self) -> typing.Mapping[builtins.str, typing.Any]:
return jsii.get(self, "cfnProperties")
@builtins.property # type: ignore
@jsii.member(jsii_name="alarmName")
def alarm_name(self) -> builtins.str:
"""``AWS::CloudWatch::CompositeAlarm.AlarmName``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudwatch-compositealarm.html#cfn-cloudwatch-compositealarm-alarmname
"""
return jsii.get(self, "alarmName")
@alarm_name.setter # type: ignore
def alarm_name(self, value: builtins.str) -> None:
jsii.set(self, "alarmName", value)
@builtins.property # type: ignore
@jsii.member(jsii_name="alarmRule")
def alarm_rule(self) -> builtins.str:
"""``AWS::CloudWatch::CompositeAlarm.AlarmRule``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudwatch-compositealarm.html#cfn-cloudwatch-compositealarm-alarmrule
"""
return jsii.get(self, "alarmRule")
@alarm_rule.setter # type: ignore
def alarm_rule(self, value: builtins.str) -> None:
jsii.set(self, "alarmRule", value)
@builtins.property # type: ignore
@jsii.member(jsii_name="actionsEnabled")
def actions_enabled(
self,
) -> typing.Optional[typing.Union[builtins.bool, aws_cdk.core.IResolvable]]:
"""``AWS::CloudWatch::CompositeAlarm.ActionsEnabled``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudwatch-compositealarm.html#cfn-cloudwatch-compositealarm-actionsenabled
"""
return jsii.get(self, "actionsEnabled")
@actions_enabled.setter # type: ignore
def actions_enabled(
self,
value: typing.Optional[typing.Union[builtins.bool, aws_cdk.core.IResolvable]],
) -> None:
jsii.set(self, "actionsEnabled", value)
@builtins.property # type: ignore
@jsii.member(jsii_name="alarmActions")
def alarm_actions(self) -> typing.Optional[typing.List[builtins.str]]:
"""``AWS::CloudWatch::CompositeAlarm.AlarmActions``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudwatch-compositealarm.html#cfn-cloudwatch-compositealarm-alarmactions
"""
return jsii.get(self, "alarmActions")
@alarm_actions.setter # type: ignore
def alarm_actions(self, value: typing.Optional[typing.List[builtins.str]]) -> None:
jsii.set(self, "alarmActions", value)
@builtins.property # type: ignore
@jsii.member(jsii_name="alarmDescription")
def alarm_description(self) -> typing.Optional[builtins.str]:
"""``AWS::CloudWatch::CompositeAlarm.AlarmDescription``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudwatch-compositealarm.html#cfn-cloudwatch-compositealarm-alarmdescription
"""
return jsii.get(self, "alarmDescription")
@alarm_description.setter # type: ignore
def alarm_description(self, value: typing.Optional[builtins.str]) -> None:
jsii.set(self, "alarmDescription", value)
@builtins.property # type: ignore
@jsii.member(jsii_name="insufficientDataActions")
def insufficient_data_actions(self) -> typing.Optional[typing.List[builtins.str]]:
"""``AWS::CloudWatch::CompositeAlarm.InsufficientDataActions``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudwatch-compositealarm.html#cfn-cloudwatch-compositealarm-insufficientdataactions
"""
return jsii.get(self, "insufficientDataActions")
@insufficient_data_actions.setter # type: ignore
def insufficient_data_actions(
self,
value: typing.Optional[typing.List[builtins.str]],
) -> None:
jsii.set(self, "insufficientDataActions", value)
@builtins.property # type: ignore
@jsii.member(jsii_name="okActions")
def ok_actions(self) -> typing.Optional[typing.List[builtins.str]]:
"""``AWS::CloudWatch::CompositeAlarm.OKActions``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudwatch-compositealarm.html#cfn-cloudwatch-compositealarm-okactions
"""
return jsii.get(self, "okActions")
@ok_actions.setter # type: ignore
def ok_actions(self, value: typing.Optional[typing.List[builtins.str]]) -> None:
jsii.set(self, "okActions", value)
@jsii.data_type(
jsii_type="@aws-cdk/aws-cloudwatch.CfnCompositeAlarmProps",
jsii_struct_bases=[],
name_mapping={
"alarm_name": "alarmName",
"alarm_rule": "alarmRule",
"actions_enabled": "actionsEnabled",
"alarm_actions": "alarmActions",
"alarm_description": "alarmDescription",
"insufficient_data_actions": "insufficientDataActions",
"ok_actions": "okActions",
},
)
class CfnCompositeAlarmProps:
def __init__(
self,
*,
alarm_name: builtins.str,
alarm_rule: builtins.str,
actions_enabled: typing.Optional[typing.Union[builtins.bool, aws_cdk.core.IResolvable]] = None,
alarm_actions: typing.Optional[typing.List[builtins.str]] = None,
alarm_description: typing.Optional[builtins.str] = None,
insufficient_data_actions: typing.Optional[typing.List[builtins.str]] = None,
ok_actions: typing.Optional[typing.List[builtins.str]] = None,
) -> None:
"""Properties for defining a ``AWS::CloudWatch::CompositeAlarm``.
:param alarm_name: ``AWS::CloudWatch::CompositeAlarm.AlarmName``.
:param alarm_rule: ``AWS::CloudWatch::CompositeAlarm.AlarmRule``.
:param actions_enabled: ``AWS::CloudWatch::CompositeAlarm.ActionsEnabled``.
:param alarm_actions: ``AWS::CloudWatch::CompositeAlarm.AlarmActions``.
:param alarm_description: ``AWS::CloudWatch::CompositeAlarm.AlarmDescription``.
:param insufficient_data_actions: ``AWS::CloudWatch::CompositeAlarm.InsufficientDataActions``.
:param ok_actions: ``AWS::CloudWatch::CompositeAlarm.OKActions``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudwatch-compositealarm.html
"""
self._values: typing.Dict[str, typing.Any] = {
"alarm_name": alarm_name,
"alarm_rule": alarm_rule,
}
if actions_enabled is not None:
self._values["actions_enabled"] = actions_enabled
if alarm_actions is not None:
self._values["alarm_actions"] = alarm_actions
if alarm_description is not None:
self._values["alarm_description"] = alarm_description
if insufficient_data_actions is not None:
self._values["insufficient_data_actions"] = insufficient_data_actions
if ok_actions is not None:
self._values["ok_actions"] = ok_actions
@builtins.property
def alarm_name(self) -> builtins.str:
"""``AWS::CloudWatch::CompositeAlarm.AlarmName``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudwatch-compositealarm.html#cfn-cloudwatch-compositealarm-alarmname
"""
result = self._values.get("alarm_name")
assert result is not None, "Required property 'alarm_name' is missing"
return result
@builtins.property
def alarm_rule(self) -> builtins.str:
"""``AWS::CloudWatch::CompositeAlarm.AlarmRule``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudwatch-compositealarm.html#cfn-cloudwatch-compositealarm-alarmrule
"""
result = self._values.get("alarm_rule")
assert result is not None, "Required property 'alarm_rule' is missing"
return result
@builtins.property
def actions_enabled(
self,
) -> typing.Optional[typing.Union[builtins.bool, aws_cdk.core.IResolvable]]:
"""``AWS::CloudWatch::CompositeAlarm.ActionsEnabled``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudwatch-compositealarm.html#cfn-cloudwatch-compositealarm-actionsenabled
"""
result = self._values.get("actions_enabled")
return result
@builtins.property
def alarm_actions(self) -> typing.Optional[typing.List[builtins.str]]:
"""``AWS::CloudWatch::CompositeAlarm.AlarmActions``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudwatch-compositealarm.html#cfn-cloudwatch-compositealarm-alarmactions
"""
result = self._values.get("alarm_actions")
return result
@builtins.property
def alarm_description(self) -> typing.Optional[builtins.str]:
"""``AWS::CloudWatch::CompositeAlarm.AlarmDescription``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudwatch-compositealarm.html#cfn-cloudwatch-compositealarm-alarmdescription
"""
result = self._values.get("alarm_description")
return result
@builtins.property
def insufficient_data_actions(self) -> typing.Optional[typing.List[builtins.str]]:
"""``AWS::CloudWatch::CompositeAlarm.InsufficientDataActions``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudwatch-compositealarm.html#cfn-cloudwatch-compositealarm-insufficientdataactions
"""
result = self._values.get("insufficient_data_actions")
return result
@builtins.property
def ok_actions(self) -> typing.Optional[typing.List[builtins.str]]:
"""``AWS::CloudWatch::CompositeAlarm.OKActions``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudwatch-compositealarm.html#cfn-cloudwatch-compositealarm-okactions
"""
result = self._values.get("ok_actions")
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "CfnCompositeAlarmProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.implements(aws_cdk.core.IInspectable)
class CfnDashboard(
aws_cdk.core.CfnResource,
metaclass=jsii.JSIIMeta,
jsii_type="@aws-cdk/aws-cloudwatch.CfnDashboard",
):
"""A CloudFormation ``AWS::CloudWatch::Dashboard``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudwatch-dashboard.html
:cloudformationResource: AWS::CloudWatch::Dashboard
"""
def __init__(
self,
scope: aws_cdk.core.Construct,
id: builtins.str,
*,
dashboard_body: builtins.str,
dashboard_name: typing.Optional[builtins.str] = None,
) -> None:
"""Create a new ``AWS::CloudWatch::Dashboard``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param dashboard_body: ``AWS::CloudWatch::Dashboard.DashboardBody``.
:param dashboard_name: ``AWS::CloudWatch::Dashboard.DashboardName``.
"""
props = CfnDashboardProps(
dashboard_body=dashboard_body, dashboard_name=dashboard_name
)
jsii.create(CfnDashboard, self, [scope, id, props])
@jsii.member(jsii_name="inspect")
def inspect(self, inspector: aws_cdk.core.TreeInspector) -> None:
"""(experimental) Examines the CloudFormation resource and discloses attributes.
:param inspector: - tree inspector to collect and process attributes.
:stability: experimental
"""
return jsii.invoke(self, "inspect", [inspector])
@jsii.member(jsii_name="renderProperties")
def _render_properties(
self,
props: typing.Mapping[builtins.str, typing.Any],
) -> typing.Mapping[builtins.str, typing.Any]:
"""
:param props: -
"""
return jsii.invoke(self, "renderProperties", [props])
@jsii.python.classproperty # type: ignore
@jsii.member(jsii_name="CFN_RESOURCE_TYPE_NAME")
def CFN_RESOURCE_TYPE_NAME(cls) -> builtins.str:
"""The CloudFormation resource type name for this resource class."""
return jsii.sget(cls, "CFN_RESOURCE_TYPE_NAME")
@builtins.property # type: ignore
@jsii.member(jsii_name="cfnProperties")
def _cfn_properties(self) -> typing.Mapping[builtins.str, typing.Any]:
return jsii.get(self, "cfnProperties")
@builtins.property # type: ignore
@jsii.member(jsii_name="dashboardBody")
def dashboard_body(self) -> builtins.str:
"""``AWS::CloudWatch::Dashboard.DashboardBody``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudwatch-dashboard.html#cfn-cloudwatch-dashboard-dashboardbody
"""
return jsii.get(self, "dashboardBody")
@dashboard_body.setter # type: ignore
def dashboard_body(self, value: builtins.str) -> None:
jsii.set(self, "dashboardBody", value)
@builtins.property # type: ignore
@jsii.member(jsii_name="dashboardName")
def dashboard_name(self) -> typing.Optional[builtins.str]:
"""``AWS::CloudWatch::Dashboard.DashboardName``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudwatch-dashboard.html#cfn-cloudwatch-dashboard-dashboardname
"""
return jsii.get(self, "dashboardName")
@dashboard_name.setter # type: ignore
def | |
|-
az apim notification recipient-user delete --resource-group "rg1" --service-name \\
"apimService1" --notification-name "RequestPublisherNotificationMessage" --user-id \\
"576823d0a40f7e74ec07d642"
"""
helps['apim notification recipient-user list'] = """
type: command
short-summary: list notification recipient user.
"""
helps['apim notification recipient-email'] = """
type: group
short-summary: Commands to manage notification recipient email.
"""
helps['apim notification recipient-email create'] = """
type: command
short-summary: create notification recipient email.
examples:
- name: ApiManagementCreateNotificationRecipientEmail
text: |-
az apim notification recipient-email create --resource-group "rg1" --service-name \\
"apimService1" --notification-name "RequestPublisherNotificationMessage" --email \\
"<EMAIL>"
"""
helps['apim notification recipient-email update'] = """
type: command
short-summary: update notification recipient email.
"""
helps['apim notification recipient-email delete'] = """
type: command
short-summary: delete notification recipient email.
examples:
- name: ApiManagementDeleteNotificationRecipientEmail
text: |-
az apim notification recipient-email delete --resource-group "rg1" --service-name \\
"apimService1" --notification-name "RequestPublisherNotificationMessage" --email \\
"<EMAIL>"
"""
helps['apim notification recipient-email list'] = """
type: command
short-summary: list notification recipient email.
"""
helps['apim openid-connect-provider'] = """
type: group
short-summary: Commands to manage open id connect provider.
"""
helps['apim openid-connect-provider create'] = """
type: command
short-summary: create open id connect provider.
examples:
- name: ApiManagementCreateOpenIdConnectProvider
text: |-
az apim openid-connect-provider create --resource-group "rg1" --service-name \\
"apimService1" --opid "templateOpenIdConnect3" --display-name "templateoidprovider3" \\
--metadata-endpoint "https://oidprovider-template3.net" --client-id \\
"oidprovidertemplate3"
"""
helps['apim openid-connect-provider update'] = """
type: command
short-summary: update open id connect provider.
examples:
- name: ApiManagementUpdateOpenIdConnectProvider
text: |-
az apim openid-connect-provider update --resource-group "rg1" --service-name \\
"apimService1" --opid "templateOpenIdConnect2" --client-secret "updatedsecret"
"""
helps['apim openid-connect-provider delete'] = """
type: command
short-summary: delete open id connect provider.
examples:
- name: ApiManagementDeleteOpenIdConnectProvider
text: |-
az apim openid-connect-provider delete --resource-group "rg1" --service-name \\
"apimService1" --opid "templateOpenIdConnect3"
"""
helps['apim openid-connect-provider list'] = """
type: command
short-summary: list open id connect provider.
"""
helps['apim openid-connect-provider show'] = """
type: command
short-summary: show open id connect provider.
"""
helps['apim policy'] = """
type: group
short-summary: Commands to manage policy.
"""
helps['apim policy create'] = """
type: command
short-summary: create policy.
examples:
- name: ApiManagementCreatePolicy
text: |-
az apim policy create --resource-group "rg1" --service-name "apimService1" --policy-id \\
"policy" --value "<policies>\\r\\n <inbound />\\r\\n <backend>\\r\\n <forward-request />\\r\\
n </backend>\\r\\n <outbound />\\r\\n</policies>" --format "xml"
"""
helps['apim policy update'] = """
type: command
short-summary: update policy.
"""
helps['apim policy delete'] = """
type: command
short-summary: delete policy.
examples:
- name: ApiManagementDeletePolicy
text: |-
az apim policy delete --resource-group "rg1" --service-name "apimService1" --policy-id \\
"policy"
"""
helps['apim policy list'] = """
type: command
short-summary: list policy.
"""
helps['apim policy show'] = """
type: command
short-summary: show policy.
"""
helps['apim portalsetting signin'] = """
type: group
short-summary: Commands to manage sign in setting.
"""
helps['apim portalsetting signin create'] = """
type: command
short-summary: create sign in setting.
examples:
- name: ApiManagementPortalSettingsUpdateSignIn
text: |-
az apim portalsetting signin create --resource-group "rg1" --name "apimService1" \\
--enabled true
"""
helps['apim portalsetting signin update'] = """
type: command
short-summary: update sign in setting.
examples:
- name: ApiManagementPortalSettingsUpdateSignIn
text: |-
az apim portalsetting signin update --resource-group "rg1" --name "apimService1" \\
--enabled true
"""
helps['apim portalsetting signin show'] = """
type: command
short-summary: show sign in setting.
"""
helps['apim portalsetting signup'] = """
type: group
short-summary: Commands to manage sign up setting.
"""
helps['apim portalsetting signup create'] = """
type: command
short-summary: create sign up setting.
examples:
- name: ApiManagementPortalSettingsUpdateSignUp
text: |-
az apim portalsetting signup create --resource-group "rg1" --name "apimService1" \\
--enabled true
"""
helps['apim portalsetting signup update'] = """
type: command
short-summary: update sign up setting.
examples:
- name: ApiManagementPortalSettingsUpdateSignUp
text: |-
az apim portalsetting signup update --resource-group "rg1" --name "apimService1" \\
--enabled true
"""
helps['apim portalsetting signup show'] = """
type: command
short-summary: show sign up setting.
"""
helps['apim portalsetting delegation'] = """
type: group
short-summary: Commands to manage delegation setting.
"""
helps['apim portalsetting delegation create'] = """
type: command
short-summary: create delegation setting.
examples:
- name: ApiManagementPortalSettingsUpdateDelegation
text: |-
az apim portalsetting delegation create --resource-group "rg1" --name "apimService1" \\
--url "http://contoso.com/delegation" --validation-key "<KEY>
<KEY>aQdqPuzJH3ECG4TU2yZjQ7Q=="
"""
helps['apim portalsetting delegation update'] = """
type: command
short-summary: update delegation setting.
examples:
- name: ApiManagementPortalSettingsUpdateDelegation
text: |-
az apim portalsetting delegation update --resource-group "rg1" --name "apimService1" \\
--url "http://contoso.com/delegation" --validation-key "<KEY>
o66hvUmjCDkPKR3qxPu/otJcNciz2aQdqPuzJH3ECG4TU2yZjQ7Q=="
"""
helps['apim portalsetting delegation show'] = """
type: command
short-summary: show delegation setting.
"""
helps['apim product'] = """
type: group
short-summary: Commands to manage product.
"""
helps['apim product create'] = """
type: command
short-summary: create product.
examples:
- name: ApiManagementCreateProduct
text: |-
az apim product create --resource-group "rg1" --service-name "apimService1" --product-id \\
"testproduct" --display-name "Test Template ProductName 4"
"""
helps['apim product update'] = """
type: command
short-summary: update product.
examples:
- name: ApiManagementUpdateProduct
text: |-
az apim product update --resource-group "rg1" --service-name "apimService1" --product-id \\
"testproduct" --display-name "Test Template ProductName 4"
"""
helps['apim product delete'] = """
type: command
short-summary: delete product.
examples:
- name: ApiManagementDeleteProduct
text: |-
az apim product delete --resource-group "rg1" --service-name "apimService1" --product-id \\
"testproduct"
"""
helps['apim product list'] = """
type: command
short-summary: list product.
"""
helps['apim product show'] = """
type: command
short-summary: show product.
"""
helps['apim product api'] = """
type: group
short-summary: Commands to manage product api.
"""
helps['apim product api create'] = """
type: command
short-summary: create product api.
examples:
- name: ApiManagementCreateProductApi
text: |-
az apim product api create --resource-group "rg1" --service-name "apimService1" \\
--product-id "testproduct" --api-id "echo-api"
"""
helps['apim product api update'] = """
type: command
short-summary: update product api.
"""
helps['apim product api delete'] = """
type: command
short-summary: delete product api.
examples:
- name: ApiManagementDeleteProductApi
text: |-
az apim product api delete --resource-group "rg1" --service-name "apimService1" \\
--product-id "testproduct" --api-id "echo-api"
"""
helps['apim product api list'] = """
type: command
short-summary: list product api.
"""
helps['apim product group'] = """
type: group
short-summary: Commands to manage product group.
"""
helps['apim product group create'] = """
type: command
short-summary: create product group.
examples:
- name: ApiManagementCreateProductGroup
text: |-
az apim product group create --resource-group "rg1" --service-name "apimService1" \\
--product-id "testproduct" --group-id "templateGroup"
"""
helps['apim product group update'] = """
type: command
short-summary: update product group.
"""
helps['apim product group delete'] = """
type: command
short-summary: delete product group.
examples:
- name: ApiManagementDeleteProductGroup
text: |-
az apim product group delete --resource-group "rg1" --service-name "apimService1" \\
--product-id "testproduct" --group-id "templateGroup"
"""
helps['apim product group list'] = """
type: command
short-summary: list product group.
"""
helps['apim product policy'] = """
type: group
short-summary: Commands to manage product policy.
"""
helps['apim product policy create'] = """
type: command
short-summary: create product policy.
examples:
- name: ApiManagementCreateProductPolicy
text: |-
az apim product policy create --resource-group "rg1" --service-name "apimService1" \\
--product-id "5702e97e5157a50f48dce801" --policy-id "policy" --value "<policies>\\r\\n <inb
ound>\\r\\n <rate-limit calls=\\"{{call-count}}\\" renewal-period=\\"15\\"></rate-limit>\\r\\n
<log-to-eventhub logger-id=\\"16\\">\\r\\n @( string.Join(\\",\\", DateT
ime.UtcNow, context.Deployment.ServiceName, context.RequestId, context.Request.IpAddress,
context.Operation.Name) ) \\r\\n </log-to-eventhub>\\r\\n <quota-by-key ca
lls=\\"40\\" counter-key=\\"cc\\" renewal-period=\\"3600\\" increment-count=\\"@(context.Request.
Method == "POST" ? 1:2)\\" />\\r\\n <base />\\r\\n </inbound>\\r\\n <backend>\\r\\n
<base />\\r\\n </backend>\\r\\n <outbound>\\r\\n <base />\\r\\n </outbound>\\r\\n</policies
>" --format "xml"
"""
helps['apim product policy update'] = """
type: command
short-summary: update product policy.
"""
helps['apim product policy delete'] = """
type: command
short-summary: delete product policy.
examples:
- name: ApiManagementDeleteProductPolicy
text: |-
az apim product policy delete --resource-group "rg1" --service-name "apimService1" \\
--product-id "testproduct" --policy-id "policy"
"""
helps['apim product policy list'] = """
type: command
short-summary: list product policy.
"""
helps['apim product policy show'] = """
type: command
short-summary: show product policy.
"""
helps['apim property'] = """
type: group
short-summary: Commands to manage property.
"""
helps['apim property create'] = """
type: command
short-summary: create property.
examples:
- name: ApiManagementCreateProperty
text: |-
az apim property create --resource-group "rg1" --service-name "apimService1" --prop-id \\
"testprop2" --secret true --display-name "prop3name" --value "propValue"
"""
helps['apim property update'] = """
type: command
short-summary: update property.
examples:
- name: ApiManagementUpdateProperty
text: |-
az apim property update --resource-group "rg1" --service-name "apimService1" --prop-id \\
"testprop2" --secret true
"""
helps['apim property delete'] = """
type: command
short-summary: delete property.
examples:
- name: ApiManagementDeleteProperty
text: |-
az apim property delete --resource-group "rg1" --service-name "apimService1" --prop-id \\
"testprop2"
"""
helps['apim property list'] = """
type: command
short-summary: list property.
"""
helps['apim property show'] = """
type: command
short-summary: show property.
"""
helps['apim subscription'] = """
type: group
short-summary: Commands to manage subscription.
"""
helps['apim subscription create'] = """
type: command
short-summary: create subscription.
examples:
- name: ApiManagementCreateSubscription
text: |-
az apim subscription create --resource-group "rg1" --service-name "apimService1" --sid \\
"testsub" --owner-id "/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_grou
p }}/providers/Microsoft.ApiManagement/service/{{ service_name }}/users/{{ user_name }}" \\
--scope "/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}/provider
s/Microsoft.ApiManagement/service/{{ service_name }}/products/{{ product_name }}" \\
--display-name "testsub"
"""
helps['apim subscription update'] = """
type: command
short-summary: update subscription.
examples:
- name: ApiManagementUpdateSubscription
text: |-
az | |
"""
MIT License
Copyright (c) 2020 Airbyte
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import json
from datetime import datetime,timedelta
from typing import Dict, Generator
import xmltodict
import requests
from jsonschema import validate
from datetime import datetime
from urllib.parse import urlencode
import traceback
import timeunit
from collections import OrderedDict
from airbyte_protocol import (
AirbyteCatalog,
AirbyteConnectionStatus,
AirbyteMessage,
AirbyteRecordMessage,
AirbyteStream,
ConfiguredAirbyteCatalog,
Status,
Type,
SyncMode
)
from base_python import AirbyteLogger, Source
# STREAM NAMES
StreamGetSiteMetaData = "GetSiteMetaData"
StreamGetSensorMetaData = "GetSensorMetaData"
StreamGetSensorData = "GetSensorData"
# PARAM_NAMES
ConfigPropDataApiUrl = "data_api_url"
ConfigPropSystemKey = "system_key"
# OTHER CONSTANTS
HttpResponseTimeout = 30 # TIME TO WAIT FOR A RESPONSE FROM ONERAIN SERVER (IN SECONDS)
OneRainDateTimeFormat = "%Y-%m-%d %H:%M:%S" # DATE FORMAT USED BY ONERAIN FOR DATETIMES
class SourceOnerainApi(Source):
def check(self, logger: AirbyteLogger, config: json) -> AirbyteConnectionStatus:
"""
Tests if the input configuration can be used to successfully connect to the integration
e.g: if a provided Stripe API token can be used to connect to the Stripe API.
:param logger: Logging object to display debug/info/error to the logs
(logs will not be accessible via airbyte UI if they are not passed to this logger)
:param config: Json object containing the configuration of this source, content of this json is as specified in
the properties of the spec.json file
:return: AirbyteConnectionStatus indicating a Success or Failure
"""
try:
# VALIDATE CONFIG AGAINST JSON SCHEMA (spec.json)
validate( instance=config, schema=self.spec(logger).connectionSpecification)
# try to get time (ping) from configured URL
url = config[ConfigPropDataApiUrl]
def assertAliasedParamsNotBothPresent(config,stream,paramName1,paramName2):
if paramName1 in config[stream] and paramName2 in config[stream]:
raise AssertionError(f"{stream}: cannot specify both aliased parameters '{paramName1}' and '{paramName2}'. choose one.")
def assertOneRainDateFormat(config,stream,paramName):
try:
if paramName in config[stream]:
return datetime.strptime(config[stream][paramName],'%Y-%m-%d %H:%M:%S')
except ValueError as e:
raise ValueError(stream,paramName,str(e))
# ADDITIONAL GetSiteMetadata STREAM CONFIG CHECKS
assertAliasedParamsNotBothPresent(config,StreamGetSiteMetaData,"or_site_id","site_id")
# ADDITIONAL GetSensorMetaData STREAM CONFIG CHECKS
assertAliasedParamsNotBothPresent(config,StreamGetSensorMetaData,"or_sensor_id","sensor_id")
assertAliasedParamsNotBothPresent(config,StreamGetSensorMetaData,"or_site_id","site_id")
# ADDITIONAL GetSensorData STREAM CONFIG CHECKS
assertAliasedParamsNotBothPresent(config,StreamGetSensorData,"or_site_id","site_id")
assertAliasedParamsNotBothPresent(config,StreamGetSensorData,"or_sensor_id","sensor_id")
assertOneRainDateFormat(config,StreamGetSensorData,"data_start")
assertOneRainDateFormat(config,StreamGetSensorData,"data_end")
# PING CONFIGURED ONERAIN URL WITH GetTime REQUEST TO MAKE SURE IT'S A VALID ENDPOINT
get_time_url = f'{url}?method=GetTime'
# use GetTime method to validate well formed url and that it responds to this
# basic time get request
r = requests.get(get_time_url,timeout=HttpResponseTimeout)
assert r.status_code == 200
return AirbyteConnectionStatus(status=Status.SUCCEEDED)
except Exception as e:
return AirbyteConnectionStatus(status=Status.FAILED, message=f"An exception occurred: {str(e)}")
def discover(self, logger: AirbyteLogger, config: json) -> AirbyteCatalog:
"""
Returns an AirbyteCatalog representing the available streams and fields in this integration.
For example, given valid credentials to a Postgres database,
returns an Airbyte catalog where each postgres table is a stream, and each table column is a field.
:param logger: Logging object to display debug/info/error to the logs
(logs will not be accessible via airbyte UI if they are not passed to this logger)
:param config: Json object containing the configuration of this source, content of this json is as specified in
the properties of the spec.json file
:return: AirbyteCatalog is an object describing a list of all available streams in this source.
A stream is an AirbyteStream object that includes:q
- its stream name (or table name in the case of Postgres)
- json_schema providing the specifications of expected schema for this stream (a list of columns described
by their names and types)
"""
streams = []
# GET SPEC TO GRAB DESCRIPTIONS OF FIELDS
spec = self.spec(logger).connectionSpecification
defs = spec['definitions']
def get_spec_def_obj(name):
return defs[name]
def get_spec_def_desc(name):
return defs[name]['description']
def get_spec_def_type(name):
return defs[name]['type']
def get_spec_def_prop(spec_def_name,def_prop_name):
return defs[spec_def_name][def_prop_name]
# ADD SCHEMA FOR StreamGetSiteMetaData
stream_name = StreamGetSiteMetaData
json_schema = { # Example
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"or_site_id": get_spec_def_obj('or_site_id'),
"site_id": get_spec_def_obj('site_id'),
"location":{"desription":"describes site location","type":"string"},
"owner":{"desription":"DEPRECATED","type":"string"},
"system_id":{"description":"identifies the input system for which the site belongs.", "type":"integer"},
"client_id":{"description":"identifies the client that owns the input system for which the site belongs.","type":"string"},
"latitude_dec":{"description":"latitude of site in decimal form","type":"number"},
"longitude_dec":{"description":"longitude of site in decimal form","type":"number"},
"elevation":{"description":"elevation of site","type":"number"},
},
}
streams.append(AirbyteStream(name=stream_name,
supported_sync_modes=["full_refresh"], # don't need incremental for site metadata. small dataset
source_defined_cursor=False, # small dataset don't need
json_schema=json_schema))
# ADD SCHEMA FOR StreamGetSensorMetaData
stream_name = StreamGetSensorMetaData
json_schema = {
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"site_id": get_spec_def_obj('site_id'),
"sensor_id": get_spec_def_obj('sensor_id'),
"or_site_id": get_spec_def_obj('or_site_id'),
"or_sensor_id":get_spec_def_obj('or_sensor_id'),
"location":{"description":"site name","type":"string"},
"description":{"description":"sensor name", "type":"string"},
"sensor_class":get_spec_def_obj('class'),
"sensor_type":{"description":"source type of data","type":"string"},
"units":get_spec_def_obj('units'),
"translate":{"description":"text translation enabled", "type":"boolean"},
"precision":{"description":"number of decimals displayed for Reading/Finished value in user interface", "type":"integer"},
"last_time":{"description":"last data time; see GetSensorData A5","type":"string"},
"last_value":{"description":"last Reading/Finished; see GetSensorData A8", "type":"number"},
"last_time_received":{"description":"last data time; see GetSensorData A5", "type":"string"},
"last_value_received":{"description":"last Reading/Finished value; see GetSensorData A8", "type":"number"},
"last_raw_value":{"description":"last raw value; see GetSensorData A6", "type":"number"},
"last_raw_value_received":{"description":"last raw value received; see GetSensorData A6","type":"number"},
"change_time":{"description":"time of last change to sensor metadata","type":"string"},
"normal":{"description":"is sensor in normal mode (not timed out)?", "type":"integer"}, # boolean?
"active":{"description":"is sensor active (not in maintenance mode/out of service)?", "type":"integer"}, #boolean?
"valid":{"description":"*may* indicate if last value is valid. unknown", "type":"integer"}, #boolean?
"change_rate":{"description":"DEPRECATED/UNUSED", "type":"number"},
"time_min_consec_zeros":{"description":"DEPRECATED/UNUSED", "type":"integer"},
"validation":{"description":"validation protocol for finished value", "type":"string"},
"value_max":{"description":"validation parameter: maximum value", "type":"number"},
"value_min":{"description":"validation parameter: minimum value", "type":"number"},
"delta_pos":{"description":"validation parameter: positive delta", "type":"number"},
"delta_neg":{"description":"validation parameter: negative delta", "type":"number"},
"rate_pos":{"description":"DEPRECATED", "type":"integer"},
"rate_neg":{"description":"DEPRECATED", "type":"integer"},
"time_max":{"description":"validation parameter: maximum time", "type":"integer"},
"time_min":{"description":"validation parameter: minimum time", "type":"integer"},
"slope":{"description":"used in data conversion; multiplicative value", "type":"number"},
"offset":{"description":"used in data conversion; additive value", "type":"number"},
"reference":{"description":"used in data conversion; additive value", "type":"number"},
"utc_offset":{"description":"the numeric offset (in hours) from Universal Coordinated Time", "type":"integer"},
"using_dst":{"description":"DEPRECATED", "type":"boolean"},
"conversion":{"description":"conversion protocol for raw to finished value", "type":"string"},
"usage":{"description":"DEPRECATED/UNUSED", "type":"string"},
"protocol":{"description":"DEPRECATED/UNUSED", "type":"integer"}
}
}
streams.append(AirbyteStream(name=stream_name,
supported_sync_modes=["full_refresh"], # don't need incremental. small dataset
source_defined_cursor=False,
json_schema=json_schema))
# ADD STREAM FOR StreamGetSensorData
stream_name = StreamGetSensorData
json_schema = {
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"site_id":get_spec_def_obj('site_id'),
"sensor_id":get_spec_def_obj('sensor_id'),
"or_site_id":get_spec_def_obj('or_site_id'),
"or_sensor_id":get_spec_def_obj('or_sensor_id'),
"sensor_class":get_spec_def_obj('class'),
"data_time": {
"type": get_spec_def_type('onerain_datetime'),
"description":"date/time data was captured",
"pattern":get_spec_def_prop('onerain_datetime','pattern')
},
"data_value": {
"type":"number",
"description":"finished data value with precision (conversion) applied",
},
"data_quality": get_spec_def_obj('data_quality'),
"raw_value": {
"type":"number",
"description":"this is the value supplied by the source system. It is the value before any conversion or validation is applied.",
},
"units": get_spec_def_obj('units')
}
}
streams.append(AirbyteStream(name=stream_name,
supported_sync_modes=["full_refresh","incremental"],
source_defined_cursor=True,
json_schema=json_schema))
return AirbyteCatalog(streams=streams)
def read(
self, logger: AirbyteLogger, config: json, catalog: ConfiguredAirbyteCatalog, state: Dict[str, any]
) -> Generator[AirbyteMessage, None, None]:
"""
Returns a generator of the AirbyteMessages generated by reading the source with the given configuration,
catalog, and state.
:param logger: Logging object to display debug/info/error to the logs
(logs will not be accessible via airbyte UI if they are not passed to this logger)
:param config: Json object containing the configuration of this source, content of this json is as specified in
the properties of the spec.json file
:param catalog: The input catalog is a ConfiguredAirbyteCatalog which is almost the same as AirbyteCatalog
returned by discover(), but
in addition, it's been configured in the UI! For each particular stream and field, there may have been provided
with extra modifications such as: filtering streams and/or columns out, renaming some entities, etc
:param state: When a Airbyte reads data from a source, it might need to keep a checkpoint cursor to resume
replication in the future from that saved checkpoint.
This is the object that is provided with state from previous runs and avoid replicating the entire set of
data everytime.
:return: A generator that produces a stream of AirbyteRecordMessage contained in AirbyteMessage object.
"""
stream_name = StreamGetSiteMetaData # Example
| |
<reponame>alexholcombe/twoWords<gh_stars>0
#<NAME> <EMAIL>
#See the github repository for more information: https://github.com/alexholcombe/twoWords
from __future__ import print_function, division
from psychopy import monitors, visual, event, data, logging, core, sound, gui, microphone
from matplotlib import pyplot
import psychopy.info
import scipy
import numpy as np
from math import atan, log, ceil
import copy
import time, sys, os , pylab
try:
from noiseStaircaseHelpers import printStaircase, toStaircase, outOfStaircase, createNoise, plotDataAndPsychometricCurve
except ImportError:
print('Could not import from noiseStaircaseHelpers.py (you need that file to be in the same directory)')
try:
import stringResponse
except ImportError:
print('Could not import stringResponse.py (you need that file to be in the same directory)')
try:
import recordVocalResponseKRedit
except ImportError:
print('Could not import recordVocalResponse.py (you need that file to be in the same directory)')
# test speeds: 17,35,50,66,84,99,115,130
wordDur1 = 1000 #Fast 35
wordDur2 = 1000 #Slow 50
#Setting up vocal response parameters
responseMode = 'voice'
buffer = 128 # smaller = short play latency, but higher chance of choppy sound playback
rate = 48000 # needs to be 40000 or higher. Changing this doesn't seem to work.
sound.init(buffer=buffer, rate=rate)
autoLogging=False
logging.console.setLevel(logging.EXP) #DEBUG means set the console to receive nearly all messges, INFO is for everything else, INFO, EXP, DATA, WARNING and ERROR
# initial set up:
dataPath='dataRaw'
if os.path.isdir('dataRaw'):
if os.path.isdir(dataPath+os.sep+'recordings'):
recordingPath = dataPath + os.sep + 'recordings'
else:
print ('No "recordings" dir exists, so saving data in dataRaw')
recordingPath = dataPath
else:
print (dataPath,' directory does not exist, so saving data and recordings in present working directory')
dataPath='.'
recordingPath='.'
#
#switchOn doesn't do much, but takes up to several seconds.
#Supposed to be able to set the sampling rate
# Set up microphone, must be 16000 or 8000 Hz for speech recognition
desiredSampleRate=16000
microphone.switchOn(sampleRate=desiredSampleRate)
sampleRate = sound.pyoSndServer.getSamplingRate()
if (round(sampleRate)==round(desiredSampleRate)):
print ('Successfully set sample rate to ',sampleRate)
else:
print ('Attempted to set sampleRate to = ', desiredSampleRate, ' but is now reported as ', sampleRate, '. Alex hasnt found a way to fix this but it just means you cant use speech recognition')
mic = microphone.AdvAudioCapture() #Initialise class to get ready for recording with automatic markerTone to assess timing
markerToneDur = 0.05
markerToneVol = .4 # 0.03 works fine and is inaudible
markerFreq = 19000 #19000
mic.setMarker(tone=markerFreq, secs=markerToneDur, volume= markerToneVol)
(hz,duration,volume) = mic.getMarkerInfo()
print ("According to mic, the marker hz=",hz," duration=",duration," volume=",volume)
units='deg '
wordEccentricity= 0.3 #4
tasks=['T1']; task = tasks[0]
#THINGS THAT COULD PREVENT SUCCESS ON A STRANGE MACHINE
#same screen or external screen? Set scrn=0 if one screen. scrn=1 means display stimulus on second screen.
#widthPix, heightPix
quitFinder = False #if checkRefreshEtc, quitFinder becomes True
autopilot=False
demo=False #False
exportImages= False #quits after one trial
subject='PX' #user is prompted to enter true subject name
if autopilot: subject='auto'
timeAndDateStr = time.strftime("%d%b%Y_%H-%M", time.localtime())
showRefreshMisses=True #flicker fixation at refresh rate, to visualize if frames missed
feedback=True
autoLogging=False
if demo:
refreshRate = 60.; #100
numWordsInStream = 1
totalTrials=0
staircaseTrials = 25
prefaceStaircaseTrialsN = 0 #22
prefaceStaircaseNoise = np.array([5,20,20,20, 50,50,50,5,80,80,80,5,95,95,95]) #will be recycled / not all used, as needed
threshCriterion = 0.58
bgColor = [-.7,-.7,-.7] # [-1,-1,-1]
cueColor = [-.7,-.7,-.7] #originally [1.,1.,1.]
letterColor = [1,1,1] #[1.,1.,1.]
cueRadius = 6 #6 deg in Goodbourn & Holcombe
widthPix= 1600 #1920 #monitor width in pixels of Agosta [1280]
heightPix= 900 #1080 #800 #monitor height in pixels [800]
monitorwidth = 52.5 #38.7 #monitor width in cm [was 38.7]
scrn=1
#0 to use main screen, 1 to use external screen connected to computer #for some reason this works when it is the other way around.
fullscr=False #True to use fullscreen, False to not. Timing probably won't be quite right if fullscreen = False
allowGUI = False
if demo: monitorwidth = 23#18.0
if exportImages:
widthPix = 600; heightPix = 600
monitorwidth = 13.0
fullscr=False; scrn=1
framesSaved=0
if demo:
scrn=1; fullscr=False
widthPix = 800; heightPix = 600
monitorname='EIZO'
allowGUI = True
viewdist = 57 #50. #cm
pixelperdegree = widthPix/ (atan(monitorwidth/viewdist) /np.pi*180)
print('pixelperdegree=',pixelperdegree)
ltrHeight = 0.7 #0.9 #0.4 #2.5 #Martini letters were 2.5deg high
totalTrials=0
# create a dialog from dictionary
infoFirst = { 'Do staircase (only)': False, 'Check refresh etc':True, 'Fullscreen (timing errors if not)': fullscr,
'Screen refresh rate': 60 }
OK = gui.DlgFromDict(dictionary=infoFirst,
title='AB experiment OR staircase to find thresh noise level for T1 performance criterion',
order=['Do staircase (only)', 'Check refresh etc', 'Fullscreen (timing errors if not)'],
tip={'Check refresh etc': 'To confirm refresh rate and that can keep up, at least when drawing a grating'},
#fixed=['Check refresh etc'])#this attribute can't be changed by the user
)
if not OK.OK:
print('User cancelled from dialog box'); core.quit()
doStaircase = infoFirst['Do staircase (only)']
checkRefreshEtc = infoFirst['Check refresh etc']
fullscr = infoFirst['Fullscreen (timing errors if not)']
refreshRate = infoFirst['Screen refresh rate']
if checkRefreshEtc:
quitFinder = True
if quitFinder:
import os
applescript="\'tell application \"Finder\" to quit\'"
shellCmd = 'osascript -e '+applescript
os.system(shellCmd)
monitorname = 'EIZO'
waitBlank = False
mon = monitors.Monitor(monitorname,width=monitorwidth, distance=viewdist)#relying on monitorwidth cm (39 for Mitsubishi to do deg calculations) and gamma info in calibratn
mon.setSizePix( (widthPix,heightPix) )
units='deg' #'cm'
def openMyStimWindow(): #make it a function because have to do it several times, want to be sure is identical each time
#assumes widthPix, heightPix, allowGUI exists, bgColor, fullscr, scrn,waitBlank
print("Trying to open window of size=",widthPix," , ",heightPix)
myWin = visual.Window(monitor=mon,size=(widthPix,heightPix),allowGUI=allowGUI,units=units,color=bgColor,colorSpace='rgb',fullscr=fullscr,screen=scrn,waitBlanking=waitBlank) #Holcombe lab monitor
print("Opened window,",myWin," of size ",myWin.size)
return myWin
myWin = openMyStimWindow()
#identify the hardware microphone in use:
circle = visual.Circle(myWin, 5, fillColor='grey', lineColor='grey', lineColorSpace='rgb', fillColorSpace='rgb', edges=64, autoLog=autoLogging)
names, idx = sound.pyo.pa_get_input_devices()
inp = sound.pyo.pa_get_default_input()
msg = 'Ensure speaker vol > 0\n\nAny key to start...\n\nUsing input="%s"' % names[idx.index(inp)]
startMessage = visual.TextStim(myWin, msg, color=letterColor, colorSpace='rgb', height=ltrHeight, autoLog=autoLogging)
circle.draw()
startMessage.draw()
myWin.flip()
if 'escape' in event.waitKeys():
print("User quit during sound setup, via ESCAPE")
core.quit()
refreshMsg2 = ''
if not checkRefreshEtc:
refreshMsg1 = 'REFRESH RATE WAS NOT CHECKED'
refreshRateWrong = False
else: #checkRefreshEtc
runInfo = psychopy.info.RunTimeInfo(
# if you specify author and version here, it overrides the automatic detection of __author__ and __version__ in your script
#author='<your name goes here, plus whatever you like, e.g., your lab or contact info>',
#version="<your experiment version info>",
win=myWin, ## a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()
refreshTest='grating', ## None, True, or 'grating' (eye-candy to avoid a blank screen)
verbose=True, ## True means report on everything
userProcsDetailed=True ## if verbose and userProcsDetailed, return (command, process-ID) of the user's processes
)
#print(runInfo)
logging.info(runInfo)
print('Finished runInfo- which assesses the refresh and processes of this computer')
#check screen refresh is what assuming it is ##############################################
Hzs=list()
myWin.flip(); myWin.flip();myWin.flip();myWin.flip();
myWin.setRecordFrameIntervals(True) #otherwise myWin.fps won't work
print('About to measure frame flips')
for i in range(50):
myWin.flip()
Hzs.append( myWin.fps() ) #varies wildly on successive runs!
myWin.setRecordFrameIntervals(False)
# end testing of screen refresh########################################################
Hzs = np.array( Hzs ); Hz= np.median(Hzs)
msPerFrame= 1000./Hz
refreshMsg1= 'Frames per second ~='+ str( np.round(Hz,1) )
refreshRateTolerancePct = 3
pctOff = abs( (np.median(Hzs)-refreshRate) / refreshRate)
refreshRateWrong = pctOff > (refreshRateTolerancePct/100.)
if refreshRateWrong:
refreshMsg1 += ' BUT'
refreshMsg1 += ' program assumes ' + str(refreshRate)
refreshMsg2 = 'which is off by more than' + str(round(refreshRateTolerancePct,0)) + '%!!'
else:
refreshMsg1 += ', which is close enough to desired val of ' + str( round(refreshRate,1) )
myWin.allowGUI =True
###
myWin.close() #have to close window to show dialog box
group = '1' #to use if no staircase, can be set by user
trialsPerCondition = 1 #default value
dlgLabelsOrdered = list()
if doStaircase:
myDlg = gui.Dlg(title="Staircase to find appropriate noisePercent", pos=(200,400))
else:
myDlg = gui.Dlg(title="RSVP experiment", pos=(200,400))
if not autopilot:
myDlg.addField('Subject name (default="PX"):', 'PX', tip='or subject code')
dlgLabelsOrdered.append('subject')
if doStaircase:
#prefaceStaircaseTrialsN = 5
easyTrialsCondText = 'Num preassigned noise trials to preface staircase with (default=' + str(prefaceStaircaseTrialsN) + '):'
myDlg.addField(easyTrialsCondText, tip=str(prefaceStaircaseTrialsN))
dlgLabelsOrdered.append('easyTrials')
myDlg.addField('Staircase trials (default=' + str(staircaseTrials) + '):', tip="Staircase will run until this number is reached or it thinks it has precise estimate of threshold")
dlgLabelsOrdered.append('staircaseTrials')
pctCompletedBreak = 101
else:
myDlg.addField('\tParticipant Group', group, tip=str(group))
dlgLabelsOrdered.append('group')
myDlg.addField('Trials per condition (default=' + str(trialsPerCondition) + '):', trialsPerCondition, tip=str(trialsPerCondition))
dlgLabelsOrdered.append('trialsPerCondition')
pctCompletedBreak = 55
myDlg.addText(refreshMsg1, color='Black')
if refreshRateWrong:
myDlg.addText(refreshMsg2, color='Red')
if refreshRateWrong:
logging.error(refreshMsg1+refreshMsg2)
else: logging.info(refreshMsg1+refreshMsg2)
if checkRefreshEtc and (not demo) and (myWin.size != [widthPix,heightPix]).any():
msgWrongResolution = 'Screen apparently NOT the desired resolution of '+ str(widthPix)+'x'+str(heightPix)+ ' pixels!!'
myDlg.addText(msgWrongResolution, color='Red')
logging.error(msgWrongResolution)
print(msgWrongResolution)
myDlg.addText('Note: to abort press ESC at a trials response screen', color=[-1.,1.,-1.]) # color='DimGrey') color names stopped working along the way, for unknown reason
myDlg.show()
if myDlg.OK: #unpack information from dialogue box
thisInfo = myDlg.data #this will be a list of data returned from each field added in order
if not autopilot:
name=thisInfo[dlgLabelsOrdered.index('subject')]
if len(name) > 0: #if entered something
subject = name #change subject default name to | |
<reponame>megatron0000/ces22-xadrez
from math import sqrt
from multiprocessing import Process, Pipe
from aiprocess import aiprocess, move2str
from chessengine import *
from guiengine import *
WIDTH = 800
HEIGHT = 600
SCALE = 1 / 15
GAME = None
CHOOSINGPROMOTION = False
PIPE = None
AI_PROC = None
WAITING = False
PVP = False
def set_globals():
global WIDTH, HEIGHT, SCALE, GAME, CHOOSINGPROMOTION, PIPE, AI_PROC, WAITING, PVP
WIDTH = 800
HEIGHT = 600
SCALE = 1 / 15
GAME = None
CHOOSINGPROMOTION = False
PIPE = None
AI_PROC = None
WAITING = False
PVP = False
initialize(WIDTH, HEIGHT)
SBD = Image('resources/Cburnett V2 improved/PNGs/square brown dark_png.png').scale(SCALE)
SBL = Image('resources/Cburnett V2 improved/PNGs/square brown light_png.png').scale(SCALE)
SGD = Image('resources/Cburnett V2 improved/PNGs/square gray dark _png.png').scale(SCALE)
SGL = Image('resources/Cburnett V2 improved/PNGs/square gray light _png.png').scale(SCALE)
INACTIVE_SQUARE = [SBD, SGD]
ACTIVE_SQUARE = [SBL, SGL]
PIECES = {
Bishop(Side.BLACK): Image(
'resources/Cburnett V2 improved/PNGs/With Shadow/b_bishop_png_withShadow.png').scale(SCALE),
King(Side.BLACK): Image(
'resources/Cburnett V2 improved/PNGs/With Shadow/b_king_png_withShadow.png').scale(SCALE),
Knight(Side.BLACK): Image(
'resources/Cburnett V2 improved/PNGs/With Shadow/b_knight_png_withShadow.png').scale(SCALE),
Pawn(Side.BLACK): Image(
'resources/Cburnett V2 improved/PNGs/With Shadow/b_pawn_png_withShadow.png').scale(SCALE),
Queen(Side.BLACK): Image(
'resources/Cburnett V2 improved/PNGs/With Shadow/b_queen_png_withShadow.png').scale(SCALE),
Rook(Side.BLACK): Image(
'resources/Cburnett V2 improved/PNGs/With Shadow/b_rook_png_withShadow.png').scale(SCALE),
Bishop(Side.WHITE): Image(
'resources/Cburnett V2 improved/PNGs/With Shadow/w_bishop_png_withShadow.png').scale(SCALE),
King(Side.WHITE): Image(
'resources/Cburnett V2 improved/PNGs/With Shadow/w_king_png_withShadow.png').scale(SCALE),
Knight(Side.WHITE): Image(
'resources/Cburnett V2 improved/PNGs/With Shadow/w_knight_png_withShadow.png').scale(SCALE),
Pawn(Side.WHITE): Image(
'resources/Cburnett V2 improved/PNGs/With Shadow/w_pawn_png_withShadow.png').scale(SCALE),
Queen(Side.WHITE): Image(
'resources/Cburnett V2 improved/PNGs/With Shadow/w_queen_png_withShadow.png').scale(SCALE),
Rook(Side.WHITE): Image(
'resources/Cburnett V2 improved/PNGs/With Shadow/w_rook_png_withShadow.png').scale(SCALE)
}
PIECES_INIT = {
"a1": Rook(Side.WHITE), "b1": Knight(Side.WHITE), "c1": Bishop(Side.WHITE),
"d1": Queen(Side.WHITE), "e1": King(Side.WHITE), "f1": Bishop(Side.WHITE),
"g1": Knight(Side.WHITE), "h1": Rook(Side.WHITE), "a2": Pawn(Side.WHITE),
"b2": Pawn(Side.WHITE), "c2": Pawn(Side.WHITE), "d2": Pawn(Side.WHITE),
"e2": Pawn(Side.WHITE), "f2": Pawn(Side.WHITE), "g2": Pawn(Side.WHITE),
"h2": Pawn(Side.WHITE), "a8": Rook(Side.BLACK), "b8": Knight(Side.BLACK),
"c8": Bishop(Side.BLACK), "d8": Queen(Side.BLACK), "e8": King(Side.BLACK),
"f8": Bishop(Side.BLACK), "g8": Knight(Side.BLACK), "h8": Rook(Side.BLACK),
"a7": Pawn(Side.BLACK), "b7": Pawn(Side.BLACK), "c7": Pawn(Side.BLACK),
"d7": Pawn(Side.BLACK), "e7": Pawn(Side.BLACK), "f7": Pawn(Side.BLACK),
"g7": Pawn(Side.BLACK), "h7": Pawn(Side.BLACK),
}
class BoardSquare(FigureNode):
def __init__(self, square, xy):
self.square = Square(square)
super().__init__(xy, INACTIVE_SQUARE[self.square.index % 2])
self.BoardMouse(self).watch(self._bus, lambda: self.bounds)
self._bus.on('moves markup', self.markup)
self._bus.on('request square change', self.respond_squarechange)
def markup(self, moves):
if self.square in [move.tosq for move in moves]:
self.set_image(ACTIVE_SQUARE[self.square.index % 2])
else:
self.set_image(INACTIVE_SQUARE[self.square.index % 2])
def respond_squarechange(self, pos_self_fromsq):
if not self.bounds.collidepoint(pos_self_fromsq[0]):
return
move = next((x for x in GAME.moves(pos_self_fromsq[2]) if
x.fromsq == pos_self_fromsq[2] and x.tosq == self.square), None)
if move is not None:
self._bus.emit(
'respond square change', (self.bounds.topleft, pos_self_fromsq[1], move))
class BoardMouse(MouseAware):
def __init__(self, outer):
"""
:type outer ChessPiece
"""
super().__init__()
self.outer = outer
def onclick(self):
self.outer._bus.emit('moves markup', [])
class ChessPiece(FigureNode):
def __init__(self, piece, xy, square):
super().__init__(xy, PIECES[piece])
self.image = PIECES[piece]
self.calcpos(xy)
self.piece = piece
self.square = square
self.PieceAware(self).watch(self._bus, lambda: self.bounds)
self.ismoving = False
self.movedirection = (0, 0)
self.movetarget = (0, 0)
self.timetaken = 0
def calcpos(self, xy, reset=True):
x = xy[0] + (SBD.width - self.image.width) / 2
y = xy[1] + (SBD.height - self.image.height) / 2
if reset:
self.xy((x, y))
return x, y
def update_logic(self, dt):
super().update_logic(dt)
if self.timetaken > 0:
self.xy(tuple(l1 + 800 * dt * l2 for l1, l2 in zip(self.xy(), self.movedirection)))
self.timetaken -= dt
self._bus.emit(Event.REQ_ANIM_FRAME)
if self.timetaken <= 0:
self.calcpos(self.movetarget)
self.ismoving = False
class PieceAware(MouseAware):
def __init__(self, outer):
"""
:type outer ChessPiece
"""
super().__init__()
self.outer = outer
self.outer._bus.on('respond square change', self.on_response_squarechange)
self.outer._bus.on('order piece move', self.on_order_piece_move)
self.draglastpos = None
self.originalpos = None
def on_response_squarechange(self, pos_self_move):
if self is not pos_self_move[1]:
return
self.outer.calcpos(pos_self_move[0])
self.outer.square = pos_self_move[2].tosq
if pos_self_move[2].promotion is not None:
self.outer._bus.emit('request promotion options', (self.outer, pos_self_move[2]))
captured = GAME.get(pos_self_move[2].tosq)
if captured.kind is not NoPiece:
self.outer._bus.emit('piece captured', (captured, pos_self_move[2].tosq))
global CHOOSINGPROMOTION
CHOOSINGPROMOTION = True
return
captured = GAME.make(pos_self_move[2])
if captured[0].kind is not NoPiece:
self.outer._bus.emit('piece captured', captured)
if pos_self_move[2].kind is MoveKind.CASTLE_QUEEN:
self.outer._bus.emit('request piece move', (self.outer.square - 2, self.outer.square + 1))
elif pos_self_move[2].kind is MoveKind.CASTLE_KING:
self.outer._bus.emit('request piece move', (self.outer.square + 1, self.outer.square - 1))
self.outer._bus.emit('move made', pos_self_move[2])
def on_order_piece_move(self, self_tosq_coord):
if self.outer is not self_tosq_coord[0]:
return
self.outer.square = self_tosq_coord[1]
self.outer.ismoving = True
self.outer.movedirection = self.outer.calcpos(tuple(
l2 - l1 for l1, l2 in zip(self.outer.xy(), self_tosq_coord[2])), reset=False)
length = sqrt(self.outer.movedirection[0] * self.outer.movedirection[0]
+ self.outer.movedirection[1] * self.outer.movedirection[1])
self.outer.movedirection = tuple(l / length for l in self.outer.movedirection)
self.outer.movetarget = self_tosq_coord[2]
self.outer.timetaken = length / 800
def ondragstart(self, pos):
if self.outer.ismoving:
return
self.outer._bus.emit('moves markup', GAME.moves(self.outer.square))
self.draglastpos = pos
self.originalpos = self.outer.xy()
def ondrag(self, pos):
if self.outer.ismoving:
return
newxy = tuple(
orig + x1 - x0 for orig, x1, x0 in zip(self.outer.xy(), pos, self.draglastpos))
self.outer.xy(newxy)
self.draglastpos = pos
def ondragend(self, pos):
if self.outer.ismoving:
return
self.outer._bus.emit('moves markup', [])
self.outer.xy(self.originalpos)
if not CHOOSINGPROMOTION and not WAITING:
self.outer._bus.emit('request square change', (pos, self, self.outer.square))
class PromotionOption(FigureNode):
class PromotionOptionMouseAware(MouseAware):
def __init__(self, outer):
super().__init__()
self.outer = outer
def onmouseenter(self):
self.outer.set_image(
PIECES[self.outer.chesspiece].clone(apply_changes=False).scale(SCALE * 1.2))
def onmouseleave(self):
self.outer.set_image(PIECES[self.outer.chesspiece].clone())
def onclick(self):
self.outer._bus.emit('promotion choosen', self.outer.chesspiece)
def __init__(self, xy, chesspiece):
self.chesspiece = chesspiece
super().__init__(xy, PIECES[self.chesspiece].clone())
self.PromotionOptionMouseAware(self).watch(self._bus, lambda: self.bounds)
class MainMenuScreen(Scene):
"""
Classe que monta o menu principal.
"""
def _parts(self):
play_text = Text("Versus CPU", 36, None, (255, 255, 255), (139, 69, 19))
play_button = ButtonNode(((WIDTH - play_text.width()) // 2, 200), play_text)
pvp_text = Text("PVP", 36, None, (255, 255, 255), (139, 69, 19))
pvp_button = ButtonNode(((WIDTH - pvp_text.width()) // 2, 400), pvp_text)
self._background((184, 134, 11))
self._add_child(play_button)
self._add_child(pvp_button)
play_button.onclick(self.clickplay)
pvp_button.onclick(self.clickpvp)
# self._bgm(Sound('Music/Music.ogg'))
def clickplay(self):
self._bus.emit(Event.SCENE_CHANGE, PlayScreen)
global PVP
PVP = False
def clickpvp(self):
self._bus.emit(Event.SCENE_CHANGE, PlayScreen)
global PVP
PVP = True
class PlayScreen(Scene):
"""
Classe que monta a tela de jogo.
"""
children = []
promoting_uipiece = None
promoting_move = None
special_condition_message = None
special_condition_button = None
def on_piececaptured(self, piece_square):
child = next(chesspiece for chesspiece in self.children if
isinstance(chesspiece, ChessPiece)
and chesspiece.piece == piece_square[0] and chesspiece.square == piece_square[1])
self._remove_child(child)
self.children.remove(child)
def on_request_piece_move(self, fromsq_tosq):
child = next(piece for piece in self.children if piece.square == fromsq_tosq[0]
and isinstance(piece, ChessPiece))
square = next(sq for sq in self.children if sq.square == fromsq_tosq[1]
and isinstance(sq, BoardSquare))
self._bus.emit('order piece move', (child, fromsq_tosq[1], square.xy()))
def on_request_promotion_options(self, uipiece_move):
self.promoting_uipiece = uipiece_move[0]
self.promoting_move = uipiece_move[1]
side = uipiece_move[0].piece.side
choose_text = Text('Escolha uma promoção', 40, None, (255, 255, 255), (0, 0, 0))
marginx = (WIDTH - 8 * SBD.width) / 2
marginy = (HEIGHT - 8 * SBD.height) / 2
posx = (WIDTH - choose_text.width()) / 2
posy = ((HEIGHT - 8 * SBD.height) / 2 - choose_text.height()) / 2
choose_button = TextNode((posx, posy), choose_text)
self.children.append(choose_button)
self._add_child(choose_button)
deltay = 0
for piece in [Rook(side), Queen(side), Bishop(side), Knight(side)]:
image = PIECES[piece]
option = PromotionOption(
(marginx + 8 * SBD.width + (marginx - image.width) / 2,
deltay + marginy),
piece)
self._add_child(option)
self.children.append(option)
deltay += 2 * image.height
def on_promotion_choosen(self, chesspiece):
xy = next(square.xy() for square in self.children if isinstance(square, BoardSquare) and
square.square == self.promoting_uipiece.square)
square = self.promoting_uipiece.square
promotedpiece = ChessPiece(chesspiece, xy, square)
for element in self.children[-5:]:
self._remove_child(element)
self.children.remove(element)
self.children.remove(self.promoting_uipiece)
self._remove_child(self.promoting_uipiece)
self._add_child(promotedpiece)
self.children.append(promotedpiece)
global CHOOSINGPROMOTION
CHOOSINGPROMOTION = False
promotionmove = self.promoting_move._replace(promotion=chesspiece)
if promotionmove in GAME.moves():
GAME.make(promotionmove)
self._bus.emit('move made', promotionmove)
def on_move_made(self, move):
global WAITING
def change_message(text, color):
marginy = (HEIGHT - 8 * SBD.height) / 2
self.special_condition_message.content(text)
self.special_condition_message.color(color)
self.special_condition_button.xy((
(WIDTH - self.special_condition_message.width()) / 2,
marginy + 8 * SBD.height + (marginy - self.special_condition_message.height()) / 2
))
if GAME.stalemate():
change_message(
'EMPATE ! Não há movimentos possíveis. Retorne ao menu principal', (123, 70, 203))
elif GAME.checkmate():
change_message('XEQUE-MATE ! Retorne ao menu principal', (242, 68, 43))
elif GAME.check():
change_message('XEQUE !', (255, 165, 0))
else:
change_message('', (0, 0, 0))
if GAME.turn() == Side.BLACK and not GAME.checkmate() and not PVP:
# self.on_ai_request_move(AI.ai_move(1, True))
# self.on_ai_request_move(Pool(1).apply_async(AI.ai_move, (2, True)).get())
PIPE.send(move2str(move))
WAITING = True
def on_ai_request_move(self, move):
uifromsq = next(sq for sq in self.children if isinstance(sq, BoardSquare) and sq.square == move.fromsq)
uitosq = next(sq for sq in self.children if isinstance(sq, BoardSquare) and sq.square == move.tosq)
uipiece = next(pc for pc in self.children if isinstance(pc, ChessPiece) and pc.square == move.fromsq)
# uipiece.square = move.tosq Não ! O 'request piece move' faz a peça lidar com isso por conta própria
captured = GAME.make(move)
if captured[0].kind is not NoPiece:
self._bus.emit('piece captured', captured)
self._bus.emit('request piece move', (move.fromsq, move.tosq))
if move.kind is MoveKind.CASTLE_QUEEN:
self._bus.emit('request piece move', (move.tosq - 2, move.tosq + 1))
elif move.kind is MoveKind.CASTLE_KING:
self._bus.emit('request piece move', (move.tosq + 1, move.tosq - 1))
elif move.promotion is not None:
self._bus.emit('request promotion options', (uipiece, move))
self._bus.emit('promotion choosen', move.promotion)
self._bus.emit('move made', move)
def _parts(self):
self.children = []
self.promoting_uipiece = None
self.promoting_move = None
listagame = []
margemx = (WIDTH - 8 * SBD.width) / 2
margemy = (HEIGHT - 8 * SBD.height) / 2
self.special_condition_message = | |
from click.testing import CliRunner
from unittest.mock import MagicMock
import yaml
from mai.cli import cli, login_with_profile
import mai
import os
import aws_saml_login.saml
import time
import pytest
TEST_CONFIG = {'example-Administrator': {'saml_identity_provider_url': 'https://auth.example.com',
'saml_role': ['arn:aws:iam::911:saml-provider/Shibboleth',
'arn:aws:iam::911:role/Shibboleth-Administrator',
'example'],
'saml_user': '<EMAIL>'},
'example-User': {'saml_identity_provider_url': 'https://auth.example.com',
'saml_role': ['arn:aws:iam::911:saml-provider/Shibboleth',
'arn:aws:iam::911:role/Shibboleth-User',
'example'],
'saml_user': '<EMAIL>'}}
SAML_RESPONSE_0_ROLES = ('''<xml xmlns="urn:oasis:names:tc:SAML:2.0:assertion"><Assertion>¬
</Assertion></xml>''', [])
SAML_RESPONSE_1_ROLE = ('''<xml xmlns="urn:oasis:names:tc:SAML:2.0:assertion"><Assertion>¬
<Attribute FriendlyName="Role" Name="https://aws.amazon.com/SAML/Attributes/Role">¬
<AttributeValue>arn:aws:iam::911:saml-provider/Shibboleth,arn:aws:iam::911:role/Shibboleth-User</AttributeValue>¬
</Attribute>¬
</Assertion></xml>''',
[('arn:aws:iam::911:saml-provider/Shibboleth', 'arn:aws:iam::911:role/Shibboleth-User', None)])
SAML_RESPONSE_2_ROLES = ('''<xml xmlns="urn:oasis:names:tc:SAML:2.0:assertion"><Assertion>¬
<Attribute FriendlyName="Role" Name="https://aws.amazon.com/SAML/Attributes/Role">¬
<AttributeValue>arn:aws:iam::911:saml-provider/Shibboleth,arn:aws:iam::911:role/Shibboleth-User</AttributeValue>¬
</Attribute>¬
<Attribute FriendlyName="Role" Name="https://aws.amazon.com/SAML/Attributes/Role">¬
<AttributeValue>arn:aws:iam::911:saml-provider/Shibboleth,arn:aws:iam::911:role/Shibboleth-Administrator</AttributeValue>¬
</Attribute>¬
</Assertion></xml>''',
[('arn:aws:iam::911:saml-provider/Shibboleth',
'arn:aws:iam::911:role/Shibboleth-User',
'example'),
('arn:aws:iam::911:saml-provider/Shibboleth',
'arn:aws:iam::911:role/Shibboleth-Administrator',
'example')])
def test_version():
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(cli, ['--version'], catch_exceptions=False)
assert 'Mai {}'.format(mai.__version__) in result.output
def test_no_command():
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(cli, ['--config-file', 'mai.yaml'], catch_exceptions=False)
assert 'No profile configured' in result.output
def test_cli():
runner = CliRunner()
data = {'myprofile': {}}
with runner.isolated_filesystem():
with open('mai.yaml', 'w') as fd:
yaml.safe_dump(data, fd)
result = runner.invoke(cli, ['--config-file', 'mai.yaml'], catch_exceptions=False)
assert 'Usage: cli' in result.output
assert 'Missing identity provider URL' in result.output
def test_cli_002():
runner = CliRunner()
data = {'myprofile': {'saml_identity_provider_url': 'https://auth.example.com'}}
with runner.isolated_filesystem():
with open('mai.yaml', 'w') as fd:
yaml.safe_dump(data, fd)
result = runner.invoke(cli, ['--config-file', 'mai.yaml'], catch_exceptions=False)
assert 'Usage: cli' in result.output
assert 'Missing SAML username' in result.output
def test_cli_global():
runner = CliRunner()
data = {'global': {'default_profile': 'myprofile'}, 'myprofile': {}}
with runner.isolated_filesystem():
with open('mai.yaml', 'w') as fd:
yaml.safe_dump(data, fd)
result = runner.invoke(cli, ['--config-file', 'mai.yaml'], catch_exceptions=False)
assert 'Usage: cli' in result.output
assert 'Missing identity provider URL' in result.output
def test_cli_list():
runner = CliRunner()
data = {'myprofile': {}}
with runner.isolated_filesystem():
with open('mai.yaml', 'w') as fd:
yaml.safe_dump(data, fd)
result = runner.invoke(cli, ['--config-file', 'mai.yaml', 'list'], catch_exceptions=False)
assert 'Name' in result.output
def test_create_001_missing_argument():
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(cli, ['--config-file', 'mai.yaml', 'create'], catch_exceptions=False)
assert 'Usage: cli' in result.output
assert 'Missing argument "profile-name"' in result.output
def test_create_002_one_role(monkeypatch):
monkeypatch.setattr('keyring.get_password', MagicMock(return_value=''))
monkeypatch.setattr('keyring.set_password', MagicMock(return_value=''))
monkeypatch.setattr('mai.cli.authenticate', MagicMock(return_value=SAML_RESPONSE_1_ROLE))
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(cli, ['--config-file', 'mai.yaml', 'create', 'foobar'],
catch_exceptions=False, input='auth.example.com\n<EMAIL>\n1234567\n')
workingdir = os.getcwd()
assert os.path.exists('mai.yaml')
with open('mai.yaml') as fd:
generated_config = yaml.safe_load(fd)
assert generated_config['foobar']['saml_identity_provider_url'] == 'https://auth.example.com'
assert generated_config['foobar']['saml_role'][1] == 'arn:aws:iam::911:role/Shibboleth-User'
assert generated_config['foobar']['saml_user'] == '<EMAIL>'
assert 'Identity provider URL: auth.example.com' in result.output
assert 'SAML username: <EMAIL>' in result.output
assert 'Authenticating against https://auth.example.com..\n OK' in result.output
assert 'Storing new profile in {}.. OK'.format(os.path.join(workingdir, 'mai.yaml')) in result.output
def test_create_003_no_roles(monkeypatch):
monkeypatch.setattr('keyring.get_password', MagicMock(return_value=''))
monkeypatch.setattr('keyring.set_password', MagicMock(return_value=''))
monkeypatch.setattr('mai.cli.authenticate', MagicMock(return_value=SAML_RESPONSE_0_ROLES))
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(cli, ['--config-file', 'mai.yaml', 'create', 'foobar'],
catch_exceptions=False, input='auth.example.com\n<EMAIL>\n1234567\n')
assert 'No roles found' in result.output
assert result.exit_code == 1
def test_create_004_two_roles(monkeypatch):
monkeypatch.setattr('keyring.get_password', MagicMock(return_value=''))
monkeypatch.setattr('keyring.set_password', MagicMock(return_value=''))
monkeypatch.setattr('mai.cli.authenticate', MagicMock(return_value=SAML_RESPONSE_2_ROLES))
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(cli, ['--config-file', 'mai.yaml', 'create', 'foobar'],
catch_exceptions=False, input='auth.example.com\n<EMAIL>@<EMAIL>.<EMAIL>\n1234567\n1\n')
workingdir = os.getcwd()
assert os.path.exists('mai.yaml')
with open('mai.yaml') as fd:
generated_config = yaml.safe_load(fd)
assert generated_config['foobar']['saml_identity_provider_url'] == 'https://auth.example.com'
assert generated_config['foobar']['saml_role'][1] == 'arn:aws:iam::911:role/Shibboleth-Administrator'
assert generated_config['foobar']['saml_user'] == '<EMAIL>'
assert 'Identity provider URL: auth.example.com' in result.output
assert 'SAML username: <EMAIL>' in result.output
assert 'Authenticating against https://auth.example.com..\n OK' in result.output
assert 'Storing new profile in {}.. OK'.format(os.path.join(workingdir, 'mai.yaml')) in result.output
def test_create_005_two_roles_options(monkeypatch):
monkeypatch.setattr('keyring.get_password', MagicMock(return_value=''))
monkeypatch.setattr('keyring.set_password', MagicMock(return_value=''))
monkeypatch.setattr('mai.cli.authenticate', MagicMock(return_value=SAML_RESPONSE_2_ROLES))
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(cli, ['--config-file', 'mai.yaml',
'create', 'foobar',
'--url', 'auth.example.com',
'--user', '<EMAIL>'],
catch_exceptions=False, input='1234567\n1\n')
workingdir = os.getcwd()
assert os.path.exists('mai.yaml')
with open('mai.yaml') as fd:
generated_config = yaml.safe_load(fd)
assert generated_config['foobar']['saml_identity_provider_url'] == 'https://auth.example.com'
assert generated_config['foobar']['saml_role'][1] == 'arn:aws:iam::911:role/Shibboleth-Administrator'
assert generated_config['foobar']['saml_user'] == '<EMAIL>'
assert 'Authenticating against https://auth.example.com..\n OK' in result.output
assert 'Storing new profile in {}.. OK'.format(os.path.join(workingdir, 'mai.yaml')) in result.output
def test_create_006_authentication_failed(monkeypatch):
monkeypatch.setattr('keyring.get_password', MagicMock(return_value=''))
monkeypatch.setattr('keyring.set_password', MagicMock(return_value=''))
def my_authenticate_mock(url, user, saml_password):
if saml_password == '<PASSWORD>':
raise aws_saml_login.saml.AuthenticationFailed()
else:
return SAML_RESPONSE_2_ROLES
monkeypatch.setattr('mai.cli.authenticate', my_authenticate_mock)
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(cli, ['--config-file', 'mai.yaml', 'create', 'foobar'],
catch_exceptions=False,
input='auth.example.com\[email protected]\nwrong\n1234567\n2\n')
workingdir = os.getcwd()
assert os.path.exists('mai.yaml')
with open('mai.yaml') as fd:
generated_config = yaml.safe_load(fd)
assert generated_config['foobar']['saml_identity_provider_url'] == 'https://auth.example.com'
assert generated_config['foobar']['saml_role'][1] == 'arn:aws:iam::911:role/Shibboleth-User'
assert generated_config['foobar']['saml_user'] == '<EMAIL>'
assert 'Identity provider URL: auth.example.com' in result.output
assert 'SAML username: <EMAIL>' in result.output
assert 'Authenticating against https://auth.example.com..\n Authentication Failed' in result.output
assert 'Please check your username/password and try again.' in result.output
assert 'Authenticating against https://auth.example.com..\n OK' in result.output
assert 'Storing new profile in {}.. OK'.format(os.path.join(workingdir, 'mai.yaml')) in result.output
def test_create_all_001(monkeypatch):
monkeypatch.setattr('keyring.get_password', MagicMock(return_value=''))
monkeypatch.setattr('keyring.set_password', MagicMock(return_value=''))
monkeypatch.setattr('mai.cli.authenticate', MagicMock(return_value=SAML_RESPONSE_2_ROLES))
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(cli, ['--config-file', 'mai.yaml', 'create-all'],
catch_exceptions=False, input='auth.example.com\[email protected]\n123456\n')
workingdir = os.getcwd()
assert os.path.exists('mai.yaml')
with open('mai.yaml') as fd:
generated_config = yaml.safe_load(fd)
assert generated_config['example-Administrator']['saml_identity_provider_url'] == 'https://auth.example.com'
assert generated_config['example-Administrator']['saml_role'][1] == 'arn:aws:iam::911:role/Shibboleth-Administrator'
assert generated_config['example-Administrator']['saml_user'] == '<EMAIL>'
assert generated_config['example-User']['saml_identity_provider_url'] == 'https://auth.example.com'
assert generated_config['example-User']['saml_role'][1] == 'arn:aws:iam::911:role/Shibboleth-User'
assert generated_config['example-User']['saml_user'] == '<EMAIL>'
assert 'Identity provider URL: auth.example.com' in result.output
assert 'SAML username: <EMAIL>' in result.output
assert 'Authenticating against https://auth.example.com..\n OK' in result.output
assert 'Storing new profile in {}.. OK'.format(os.path.join(workingdir, 'mai.yaml')) in result.output
def test_create_all_002_no_roles(monkeypatch):
monkeypatch.setattr('keyring.get_password', MagicMock(return_value=''))
monkeypatch.setattr('keyring.set_password', MagicMock(return_value=''))
monkeypatch.setattr('mai.cli.authenticate', MagicMock(return_value=SAML_RESPONSE_0_ROLES))
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(cli, ['--config-file', 'mai.yaml', 'create-all'],
catch_exceptions=False, input='auth.example.com\n<EMAIL>\n1234567\n')
assert 'No roles found' in result.output
assert result.exit_code == 1
def test_create_all_003_one_role(monkeypatch):
monkeypatch.setattr('keyring.get_password', MagicMock(return_value=''))
monkeypatch.setattr('keyring.set_password', MagicMock(return_value=''))
monkeypatch.setattr('mai.cli.authenticate', MagicMock(return_value=SAML_RESPONSE_1_ROLE))
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(cli, ['--config-file', 'mai.yaml', 'create-all'],
catch_exceptions=False, input='auth.example.com\[email protected]\n123456\n')
workingdir = os.getcwd()
assert os.path.exists('mai.yaml')
with open('mai.yaml') as fd:
generated_config = yaml.safe_load(fd)
assert generated_config['default-User']['saml_identity_provider_url'] == 'https://auth.example.com'
assert generated_config['default-User']['saml_role'][1] == 'arn:aws:iam::911:role/Shibboleth-User'
assert generated_config['default-User']['saml_user'] == '<EMAIL>'
assert 'Identity provider URL: auth.example.com' in result.output
assert 'SAML username: <EMAIL>' in result.output
assert 'Authenticating against https://auth.example.com..\n OK' in result.output
assert 'Storing new profile in {}.. OK'.format(os.path.join(workingdir, 'mai.yaml')) in result.output
def test_set_default_001(monkeypatch):
data = TEST_CONFIG
runner = CliRunner()
with runner.isolated_filesystem():
with open('mai.yaml', 'w') as fd:
yaml.dump(data, fd)
result = runner.invoke(cli, ['--config-file', 'mai.yaml', 'set-default', 'example-User'])
workingdir = os.getcwd()
assert os.path.exists('mai.yaml')
with open('mai.yaml') as fd:
generated_config = yaml.safe_load(fd)
assert generated_config['global']['default_profile'] == 'example-User'
assert 'Storing configuration in {}.. OK'.format(os.path.join(workingdir, 'mai.yaml')) in result.output
def test_set_default_002_unknown_profile(monkeypatch):
data = TEST_CONFIG
runner = CliRunner()
with runner.isolated_filesystem():
with open('mai.yaml', 'w') as fd:
yaml.dump(data, fd)
result = runner.invoke(cli, ['--config-file', 'mai.yaml', 'set-default', 'foobar-User'])
assert 'Profile "foobar-User" does not exist' in result.output
assert result.exit_code == 2
def test_delete_profile_001(monkeypatch):
data = TEST_CONFIG
runner = CliRunner()
with runner.isolated_filesystem():
with open('mai.yaml', 'w') as fd:
yaml.dump(data, fd)
result = runner.invoke(cli, ['--config-file', 'mai.yaml', 'delete', 'example-User'])
workingdir = os.getcwd()
assert os.path.exists('mai.yaml')
with open('mai.yaml') as fd:
generated_config = yaml.safe_load(fd)
assert 'example-User' not in generated_config
assert 'Deleting profile from {}.. OK'.format(os.path.join(workingdir, 'mai.yaml')) in result.output
def test_delete_profile_002_unknown_profile(monkeypatch):
data = TEST_CONFIG
runner = CliRunner()
with runner.isolated_filesystem():
with open('mai.yaml', 'w') as fd:
yaml.dump(data, fd)
result = runner.invoke(cli, ['--config-file', 'mai.yaml', 'delete', 'foobar-User'])
assert 'Profile "foobar-User" does not exist' in result.output
assert result.exit_code == 2
def test_login_001(monkeypatch):
monkeypatch.setattr('keyring.get_password', MagicMock(return_value='<PASSWORD>'))
monkeypatch.setattr('keyring.set_password', MagicMock(return_value=''))
monkeypatch.setattr('mai.cli.authenticate', MagicMock(return_value=SAML_RESPONSE_2_ROLES))
monkeypatch.setattr('mai.cli.assume_role', MagicMock(return_value=('KEYID', 'SECRET', 'SESSION_TOKEN')))
monkeypatch.setattr('mai.cli.write_aws_credentials', MagicMock)
class sleep_counter:
count = 1
sleep_backup = time.sleep
def my_sleep(sec):
if sec == 120:
if sleep_counter.count > 3:
raise KeyboardInterrupt
sleep_counter.count += 1
sleep_backup(0.1)
else:
sleep_backup(sec)
monkeypatch.setattr('time.sleep', my_sleep)
data = TEST_CONFIG
runner = CliRunner()
with runner.isolated_filesystem():
with open('mai.yaml', 'w') as fd:
yaml.dump(data, fd)
result = runner.invoke(cli, ['--config-file', 'mai.yaml', 'login', 'example-User'],
catch_exceptions=False)
assert 'Authenticating against https://auth.example.com..\n OK' in result.output
assert 'Assuming role AWS Account 911 (example): Shibboleth-User.. OK' in result.output
assert 'Writing temporary AWS credentials.. OK' in result.output
result = runner.invoke(cli, ['--config-file', 'mai.yaml', 'login', 'example-User'],
catch_exceptions=False)
assert 'Authenticating against https://auth.example.com..\n OK' in result.output
assert 'Assuming role AWS Account 911 (example): Shibboleth-User.. OK' in result.output
assert 'Writing temporary AWS credentials.. OK' in result.output
result = runner.invoke(cli, ['--config-file', 'mai.yaml', 'login'],
catch_exceptions=False)
assert 'Authenticating against https://auth.example.com..\n OK' in result.output
assert 'Assuming role AWS Account 911 (example): Shibboleth-User.. OK' in result.output
assert 'Writing temporary AWS credentials.. OK' in result.output
result = runner.invoke(cli, ['--config-file', 'mai.yaml', 'login', '--refresh'],
catch_exceptions=False)
assert 'Authenticating against https://auth.example.com..\n OK' in result.output
assert 'Assuming role AWS Account 911 (example): Shibboleth-User.. OK' in result.output
assert 'Writing temporary AWS credentials.. OK' in result.output
assert 'Waiting 54 minutes before refreshing credentials.. . . . OK' in result.output
sleep_counter.count = 1
result = runner.invoke(cli, ['--config-file', 'mai.yaml', 'login', 'example-Administrator'],
catch_exceptions=False)
assert 'Authenticating against https://auth.example.com..\n OK' in result.output
assert 'Assuming role AWS Account 911 (example): Shibboleth-Administrator.. OK' in result.output
assert 'Writing temporary AWS credentials.. OK' in result.output
result = runner.invoke(cli, ['--config-file', 'mai.yaml', 'login'],
catch_exceptions=False)
assert 'Authenticating against https://auth.example.com..\n OK' in result.output
assert 'Assuming role AWS Account 911 (example): Shibboleth-Administrator.. OK' in result.output
assert 'Writing temporary AWS credentials.. OK' in result.output
result = runner.invoke(cli, ['--config-file', 'mai.yaml', 'login', '--refresh'],
catch_exceptions=False)
assert 'Authenticating against https://auth.example.com..\n OK' in result.output
assert 'Assuming role AWS Account 911 (example): Shibboleth-Administrator.. OK' in result.output
assert 'Writing temporary | |
= t_jit(x, y)
jit_o.backward(grad)
x.grad.zero_()
y.grad.zero_()
jit_o = t_jit(x, y)
jit_o.backward(grad)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(ref_x.grad, x.grad)
self.assertEqual(ref_y.grad, y.grad)
self.assertTrue(self._compare("comparing output failed", o, jit_o, 1e-3))
self.assertGraphContainsExactly(t_jit.graph_for(x, y), FUSION_GUARD, 1, consider_subgraphs=True)
bwd_graph = list(
list(t_jit.get_debug_state().execution_plans.values())[
0].code.grad_executor_states()[0].execution_plans.values()
)[0].graph
FileCheck().check(FUSION_GUARD).run(bwd_graph)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test__softmax_function(self):
def t(x: torch.Tensor, y: torch.Tensor):
o = torch.mul(x, y)
o = torch._softmax(o, dim=-1, half_to_float=False)
return o
x = torch.randn([4, 4], dtype=torch.float16, device="cuda")
y = torch.randn_like(x)
o = t(x, y)
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertTrue(self._compare("comparing output failed", o, jit_o, 1e-3))
self.assertGraphContainsExactly(t_jit.graph_for(x, y), FUSION_GUARD, 1, consider_subgraphs=True)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test__softmax_function_half_to_float(self):
def t(x: torch.Tensor, y: torch.Tensor):
o = torch.mul(x, y)
o = torch._softmax(o, dim=-1, half_to_float=True)
return o
x = torch.randn([4, 4], dtype=torch.float16, device="cuda")
y = torch.randn_like(x)
o = t(x, y)
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertTrue(self._compare("comparing output failed", o, jit_o, 1e-3))
self.assertGraphContainsExactly(t_jit.graph_for(x, y), FUSION_GUARD, 1, consider_subgraphs=True)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_softmax(self):
output_size = 10000
dims = 4
output_size = int(pow(output_size, 1. / dims))
reduction_sizes = [67, 256, 1024, 4096]
# gradient check
for reduction_dim in range(dims):
for is_log_softmax in [False, True]:
shape = [output_size for idx in range(dims)]
self._softmax_helper(shape, reduction_dim, is_log_softmax, torch.float64, "cuda", 1e-4)
for reduction_dim in range(dims):
for reduction_size in reduction_sizes:
x = [output_size for idx in range(dims)]
x[reduction_dim] = reduction_size
for is_log_softmax in [False, True]:
self._softmax_helper(x, reduction_dim, is_log_softmax, torch.float32, "cuda", 1e-4)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_softmax_half(self):
output_size = 10000
dims = 4
output_size = int(pow(output_size, 1. / dims))
reduction_sizes = [67, 256, 1024, 4096]
for reduction_dim in range(dims):
for reduction_size in reduction_sizes:
x = [output_size for idx in range(dims)]
x[reduction_dim] = reduction_size
for is_log_softmax in [False, True]:
self._softmax_helper(x, reduction_dim, is_log_softmax, torch.float16, "cuda", 5e-3)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
@unittest.skipIf(not TEST_BF16, "device does not support BFloat16")
def test_softmax_bfloat(self):
output_size = 10000
dims = 4
output_size = int(pow(output_size, 1. / dims))
reduction_sizes = [67, 256, 1024, 4096]
for reduction_dim in range(dims):
for reduction_size in reduction_sizes:
x = [output_size for idx in range(dims)]
x[reduction_dim] = reduction_size
for is_log_softmax in [False, True]:
self._softmax_helper(x, reduction_dim, is_log_softmax, torch.bfloat16, "cuda", 1e-1)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_reduction_permutation(self):
x = [7, 8, 12]
# note that num_dim is exclusive from len(x), so we are not reducing
# to single element (codegen limitation at this moment)
for num_reduce_dim in range(1, len(x)):
for axes in itertools.combinations(range(len(x)), num_reduce_dim):
for perm0 in itertools.permutations(range(len(x))):
for perm1 in itertools.permutations(range(len(x))):
self._reduction_helper(x, axes, torch.float32, "cuda", perm0, perm1)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_reduction_multiple_output(self):
old_guard = torch._C._jit_set_nvfuser_guard_mode(True)
torch._C._jit_set_bailout_depth(20)
def t(x: torch.Tensor, y: torch.Tensor, scale: float, z: torch.Tensor):
o = torch.mul(x, y)
o = torch.mul(o, scale)
out1 = torch.mul(o, z)
out2 = torch.sum(out1, dim=[2])
return out1, out2
t_jit = torch.jit.script(t)
x = torch.randn(8, 4, 10, 16, dtype=torch.float, device="cuda")
y = torch.randn(8, 4, 10, 16, dtype=torch.float, device="cuda")
z = torch.randn(8, 4, 10, 16, dtype=torch.float, device="cuda")
scale = 0.5
jit_o = t_jit(x, y, scale, z)
jit_o = t_jit(x, y, scale, z)
o = t(x, y, scale, z)
for oo, jit_oo in zip(o, jit_o):
self.assertEqual(oo.dtype, jit_oo.dtype)
self.assertEqual(oo, jit_oo)
self.assertGraphContains(t_jit.graph_for(x, y, scale, z), FUSION_GUARD)
x = x.to(memory_format=torch.channels_last)
y = y.to(memory_format=torch.channels_last)
z = z.to(memory_format=torch.channels_last)
jit_o = t_jit(x, y, scale, z)
jit_o = t_jit(x, y, scale, z)
o = t(x, y, scale, z)
for oo, jit_oo in zip(o, jit_o):
self.assertEqual(oo.dtype, jit_oo.dtype)
self.assertEqual(oo, jit_oo)
self.assertGraphContains(t_jit.graph_for(x, y, scale, z), FUSION_GUARD)
torch._C._jit_set_nvfuser_guard_mode(old_guard)
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_channels_last_with_broadcast(self):
# setting this true forces a new graph to be generated with a new
# input a different broadcast shape
torch._C._jit_set_nvfuser_guard_mode(True)
def t(x: torch.Tensor, y: torch.Tensor):
o = torch.mul(x, y)
o = o + 2.0
return o
t_jit = torch.jit.script(t)
# Single Channel broadcasts
# Test 1
x = torch.randn(8, 4, 10, 16, dtype=torch.float, device="cuda")
x = x.to(memory_format=torch.channels_last)
y = torch.randn(8, 4, 10, 1, dtype=torch.float, device="cuda")
y = y.to(memory_format=torch.channels_last)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o.is_contiguous(memory_format=torch.channels_last),
jit_o.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(o, jit_o)
# Test 2
y = torch.randn(8, 4, 1, 16, dtype=torch.float, device="cuda")
y = y.to(memory_format=torch.channels_last)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o.is_contiguous(memory_format=torch.channels_last),
jit_o.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(o, jit_o)
# Test 3
y = torch.randn(8, 1, 10, 16, dtype=torch.float, device="cuda")
y = y.to(memory_format=torch.channels_last)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o.is_contiguous(memory_format=torch.channels_last),
jit_o.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(o, jit_o)
# Test 3
y = torch.randn(1, 4, 10, 16, dtype=torch.float, device="cuda")
y = y.to(memory_format=torch.channels_last)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o.is_contiguous(memory_format=torch.channels_last),
jit_o.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(o, jit_o)
'''
Currently, the JIT doesn't have tensor merge logic to handle adding
a broadcast tensor with more than one broadcast into a non-broadcast
tensor. Therefore, either of these tests can fail depending on the
sort implementation. The second test is known to fail.
# Two Channel broadcasts
# Test 1
y = torch.randn(8, 4, 1, 1, dtype=torch.float, device="cuda")
y = y.to(memory_format=torch.channels_last)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o.is_contiguous(memory_format=torch.channels_last),
jit_o.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(o, jit_o)
# Test 2
y = torch.randn(8, 4, 1, 1, dtype=torch.float, device="cuda")
y = y.to(memory_format=torch.channels_last).transpose(2,3)
x = x.transpose(2,3)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o.is_contiguous(memory_format=torch.channels_last),
jit_o.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(o, jit_o)
'''
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_pw_single_reduction_partition(self):
sizes = [2, 2, 2]
dtype = torch.float
device = "cuda"
x = torch.randn(sizes, dtype=dtype, device=device)
y = torch.randn(sizes, dtype=dtype, device=device)
z = torch.randn(sizes, dtype=dtype, device=device)
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
o = torch.add(x, y)
o = torch.sum(o, dim=[0])
o = torch.add(o, z)
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y, z)
jit_o = t_jit(x, y, z)
o = t(x, y, z)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, z), FUSION_GUARD)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_permutation_preservation(self):
sizes = [2, 3, 4, 5]
dtype = torch.float
device = "cuda"
x = torch.randn(sizes, dtype=dtype, device=device).to(memory_format=torch.channels_last)
def t(x: torch.Tensor):
o = torch.relu(x)
o = torch.sum(o, dim=[0])
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x)
jit_o = t_jit(x)
o = t(x)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x), FUSION_GUARD)
# TODO: we could preserve permutation to inputs
self.assertEqual(o.stride(), jit_o.stride())
def t(x: torch.Tensor):
o = torch.relu(x)
o = torch.add(o, 1.0)
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x)
jit_o = t_jit(x)
o = t(x)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x), FUSION_GUARD)
self.assertTrue(jit_o.is_contiguous(memory_format=torch.channels_last))
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_normalization_partition(self):
sizes = [3, 8, 5]
dtype = torch.float
device = "cuda"
x = torch.randn(sizes, dtype=dtype, device=device)
y = torch.randn(sizes, dtype=dtype, device=device)
z = torch.randn(sizes, dtype=dtype, device=device)
r_m = torch.randn(8, dtype=dtype, device=device)
r_v = torch.randn(8, dtype=dtype, device=device)
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, r_mean: torch.Tensor, r_var: torch.Tensor):
o = torch.add(x, y)
o = torch.nn.functional.softmax(o, | |
<gh_stars>0
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
from scipy.optimize import brentq
from scipy.linalg import solve_banded, block_diag
from multiprocessing import Pool
from itertools import product
from functools import partial
class qclSolver:
fundamentals = {
"e-charge": -1.6021766208 * (10 ** (-19)),
"planck": 1.054571800 * (10 ** (-34)),
"planck_e": 6.582119569 * (10 ** (-16)),
"m0": 9.10938356 * (10 ** (-31)),
"k-bol": 1.38064852 * (10 ** (-23)),
"c": 3 * 10 ** 8,
"eps0": 8.85418781762 * (10 ** (-12)),
}
def __init__(self, struct, interval=2, step=0.05, istep=0.2, side=5., TE=400., TL=293.):
if not (isinstance(struct, list)):
struct = [struct]
for i in range(0, len(struct)):
struct[i].length = np.array([struct[i].layers[ind].width for ind in range(0, len(struct[i].layers))]).sum()
self.istep = istep
self.step = step
self.struct = struct
self.side = side
self.TL = TL
self.TE = TE
self.index = []
self.grid = []
self.meff = []
self.Valloy = []
self.Perm = []
self.lattconst = []
self.comp = []
self.Ep = []
self.Population = []
self.U = 0
self.potential = []
self.shifts = side * np.ones(len(struct))
self.ends = np.zeros_like(self.shifts)
self.N_carr = 0
self.struct_end = []
self.eigs = []
self.psi = []
self.dop = []
# chunk division
for i in range(0, len(struct)):
# index interpolation
self.shifts[i] = self.shifts[i] - struct[i].layers[0].width
if i == 0:
index, last, end = qclSolver.layerExtrap(struct[i], side)
self.grid.append(np.arange(0, last, step))
else:
index, last, end = qclSolver.layerExtrap(struct[i], side, z_start=end)
self.grid.append(np.arange(self.grid[i - 1][-1] - 2 * side // step * step, last, step))
self.struct_end.append(end)
self.index.append(index)
# parameter grid filling
self.meff.append(np.array([(struct[i].layers[int(self.index[i](z))].material.params['meff'])
for z in self.grid[i]]))
self.Valloy.append(np.array([(struct[i].layers[int(self.index[i](z))].material.params['meff'])
for z in self.grid[i]]))
self.Perm.append(np.array([(self.struct[i].layers[int(self.index[i](z))].material.params['eps0'])
for z in self.grid[i]]) * qclSolver.fundamentals["eps0"])
self.lattconst.append(np.array([(self.struct[i].layers[int(self.index[i](z))].material.params['lattconst'])
for z in self.grid[i]]))
self.comp.append(np.array([self.struct[i].layers[int(self.index[i](z))].material.x for z in self.grid[i]]))
self.comp[i][self.comp[i] == None] = 0
self.Ep.append(np.array([(self.struct[i].layers[int(self.index[i](z))].material.params['Ep'])
for z in self.grid[i]]))
# doping processing
self.N_carr += struct[i].getSheetDop() * (10 ** 4)
dop = 0.
if i == 0:
shift = self.shifts[0]
for j in range(0, len(struct[i].dopings)):
dop += np.piecewise(
self.grid[i], [self.grid[i]-shift < struct[i].dopings[j][0],
self.grid[i]-shift >= struct[i].dopings[j][0],
self.grid[i]-shift >= struct[i].dopings[j][1]],
[0, struct[i].dopings[j][2], 0]
)
else:
shift = self.struct_end[i-1]
for j in range(0, len(struct[i].dopings)):
dop += np.piecewise(
self.grid[i], [self.grid[i]-shift < struct[i].dopings[j][0],
self.grid[i]-shift >= struct[i].dopings[j][0],
self.grid[i]-shift >= struct[i].dopings[j][1]],
[0, struct[i].dopings[j][2], 0]
)
self.dop.append(dop)
# various parameters
self.tau_pure = 0.5 * 10 ** (-13) # pure dephasing time
self.periods = 30
self.dim_l = 0.3 # cm
self.dim_h = 4 / 10 ** 4 # cm
self.dim_w = 0.15 # cm
if interval == 2:
self.refr = 3.4
self.alpha_m = 7.5
self.alpha_w = 7.5
self.Gamma_overlap = 1
else:
self.refr = 3.4
self.alpha_m = 3.
self.alpha_w = 3.
self.Gamma_overlap = 1
self.evaluate_W = True
self.TPop = True
self.P = -1
# ============================================
# ================== EIGS ====================
# ============================================
def eigTM(self, resolution=10 ** (-3)):
step = self.step / 10 ** 9
for i in range(0, len(self.struct)):
m = self.meff[i] * qclSolver.fundamentals["m0"]
Ep = self.Ep[i]
Energy = np.arange(np.amin(self.potential[i]), np.amax(self.potential[i]), resolution)
boundary = lambda E: np.dot(qclSolver.buildTM(E, self.potential[i], Ep, m, step)[:, :, -1], [1, -1]).sum()
val = []
eig = []
psi = []
old_settings = np.seterr(all='ignore')
for E in Energy:
val.append(boundary(E).real)
for j in range(0, np.size(Energy) - 1):
if val[j] * val[j + 1] < 0:
eig.append(brentq(lambda E: boundary(E).real, Energy[j], Energy[j + 1], xtol=1e-20))
for E in eig:
matArray = qclSolver.buildTM(E, self.potential[i], Ep, m, step)
psi_tmp = np.sum(np.matmul(np.transpose(matArray, (2, 0, 1)), [1, -1]), axis=1).real
nrm = ((psi_tmp ** 2).sum() * step)
psi_tmp = psi_tmp / np.sqrt(nrm)
psi.append(np.append(0., psi_tmp))
np.seterr(**old_settings)
self.eigs.append(np.array(eig)[::-1])
self.psi.append((np.array(psi)[::-1][:]).transpose())
self.selectPsi()
def buildTM(E, Ev, Ep, m, step):
dE = (E - Ev) * 1.60218e-19
planck = qclSolver.fundamentals["planck"]
m0 = qclSolver.fundamentals["m0"]
m_np = m * (1 + (E - Ev) / (m / m0 * Ep))
k = np.sqrt(2 * m_np * dE + 0j) / planck
kt = k / m_np
kp = (k[:-1] + k[1:]) / 2
km = (k[:-1] - k[1:]) / 2
a = (kt[1:] + kt[:-1]) / 2 / kt[1:] * np.exp(1j * kp * step)
b = (kt[1:] - kt[:-1]) / 2 / kt[1:] * np.exp(-1j * km * step)
c = (kt[1:] - kt[:-1]) / 2 / kt[1:] * np.exp(1j * km * step)
d = (kt[1:] + kt[:-1]) / 2 / kt[1:] * np.exp(-1j * kp * step)
matArray = np.array([[a, b], [c, d]])
for i in range(1, len(Ev) - 1):
matArray[:, :, i] = np.matmul(matArray[:, :, i], matArray[:, :, i - 1])
return matArray
def eigDiag(self, ): # not implemented
self.eigs = 1
self.psi = 1
return 0
# ============================================
# =================== MISC ===================
# ============================================
def layerExtrap(struct, side=5., z_start=0.):
shift = side - struct.layers[0].width
length = struct.length + shift
z = np.array([struct.layerPos(i) + shift for i in range(0, struct.Nl)] + [length] + [length + side])
n = np.arange(0, struct.Nl + 2, 1)
z[0] = -0.01-struct.layers[0].width
n[-2:] = 0
if z_start != 0.:
z = z + z_start - shift
return interp1d(z, n, kind='previous'), z[-1], z[-2]
def setPotential(self, U, hart=0.):
self.U = U
self.potential = []
for i in range(0, len(self.struct)):
self.potential.append(np.array(
[(self.struct[i].layers[int(self.index[i](z))].material.params['Ec']) for z in self.grid[i]])
- U * self.grid[i] / 10 ** 7)
if not np.all(hart == 0.):
front = np.argwhere(self.unified_grid <= self.grid[i][0])[-1][-1]
back = np.argwhere(self.unified_grid <= self.grid[i][-1])[-1][-1]
self.potential[-1] += hart[front:back+1]
def setBGDoping(self, BGDoping):
for chunk in range(0, len(self.struct)):
self.dop[chunk][self.dop == 0] += BGDoping
self.alpha_w *= self.step / 10 ** 9 * self.dop[chunk].sum() * (10 ** 6) / self.N_carr
self.N_carr = self.step / 10 ** 9 * self.dop[chunk].sum() * (10 ** 6)
def selectPsi(self, deloc_ext=1):
self.mass_sub = []
step = self.step / 10 ** 9
U = self.U / 10 ** 7
eigs_list = self.eigs
psi_list = self.psi
psi_out = []
eigs_out = []
for j in range(0, len(self.struct)):
eigs = eigs_list[j]
psi = psi_list[j]
potential = self.potential[j]
ind = np.zeros(0, dtype=int)
Ep = self.Ep[j]
for i in range(0, len(eigs)):
if eigs[i] > potential[-1]:
left = (psi[np.nonzero(potential[0] - U * self.grid[j] - eigs[i] > 0), i] ** 2).sum()
right = (psi[np.nonzero(eigs[i] - potential[0] + U * self.grid[j] > 0), i] ** 2).sum()
if left < deloc_ext * right:
ind = np.append(ind, i)
eigs = np.delete(eigs, ind)
psi = np.delete(psi, ind, 1)
mass_sub = np.zeros_like(eigs)
for i in range(0, len(eigs)):
mass_sub[i] = step * (
self.meff[j] * (1 + (eigs[i] - potential) / self.meff[j] / Ep) * (psi[:, i] ** 2)).sum()
eigs_out.append(eigs)
psi_out.append(psi)
self.mass_sub.append(mass_sub * qclSolver.fundamentals["m0"])
self.eigs = eigs_out
self.psi = psi_out
def unite_chunks(self):
if len(self.struct) == 1:
self.unified_grid = self.grid[0]
self.unified_potential = self.potential[0]
self.unified_dop = self.dop[0]
self.unified_perm = self.Perm[0]
self.unified_psi = self.psi[0]
self.unified_eigs = self.eigs[0]
self.unified_mass_sub = self.mass_sub[0]
return None
front, back = [], []
front.append(0)
back.append(np.argwhere(self.grid[0] <= self.struct_end[0])[-1][-1])
for i in range(1, len(self.struct)-1):
back.append(np.argwhere(self.grid[i] <= self.struct_end[i])[-1][-1])
front.append(np.argwhere(self.grid[i] <= self.struct_end[i - 1])[-1][-1])
back.append(len(self.grid[-1]))
front.append(np.argwhere(self.grid[-1] <= self.struct_end[-2])[-1][-1])
unified_potential = np.zeros(0)
unified_dop = np.zeros(0)
unified_grid = np.zeros(0)
unified_perm = np.zeros(0)
unified_eigs = np.zeros(0)
unified_mass_sub = np.zeros(0)
unified_psi = np.array(self.psi)
for i in range(0, len(self.struct)):
for k in range(0, len(self.struct)):
if k < i:
unified_psi[i] = np.append(
np.zeros([len(self.grid[k][front[k]:back[k]]), len(self.eigs[i])]), unified_psi[i], axis=0)
elif k > i:
unified_psi[i] = np.append(
unified_psi[i], np.zeros([len(self.grid[k][front[k]:back[k]]), len(self.eigs[i])]), axis=0)
for i in range(0, len(self.struct)):
unified_potential = np.append(unified_potential, self.potential[i][front[i]:back[i]])
unified_dop = np.append(unified_dop, self.dop[i][front[i]:back[i]])
unified_grid = np.append(unified_grid, self.grid[i][front[i]:back[i]])
unified_perm = np.append(unified_perm, self.Perm[i][front[i]:back[i]])
unified_eigs = np.append(unified_eigs, self.eigs[i])
unified_mass_sub = np.append(unified_mass_sub, self.mass_sub[i])
unified_psi[i] = unified_psi[i][front[i]:back[i]-len(self.grid[i])-1, :]
self.unified_grid = unified_grid[:-1]
self.unified_potential = unified_potential[:-1]
self.unified_dop = unified_dop[:-1]
self.unified_perm = unified_perm[:-1]
self.unified_psi = np.hstack(unified_psi)
self. unified_eigs = unified_eigs
self.unified_mass_sub = unified_mass_sub
# ============================================
# ============ GENERAL SOLVERS ===============
# ============================================
def solvePoisson(self):
h = self.step / 10 ** 9
side = self.side / 10 ** 9
z = self.unified_grid / 10 ** 9
el = qclSolver.fundamentals["e-charge"]
N = len(z)
mass_sub = self.unified_mass_sub
Perm = self.unified_perm
eigs = self.unified_eigs
psi = self.unified_psi
N_car_tot = self.N_carr
front = np.argwhere(z <= side)[-1][-1] - 1
back = np.argwhere(z <= (self.struct_end[-1] + self.struct[0].layers[0].width) / 10 ** 9)[-1][-1]
if self.TPop:
mu = brentq(lambda mu_t: (N_car_tot - qclSolver.TDistr(mass_sub, eigs, mu_t, self.TE, self.TL).sum()),
-1,
1,
xtol=1e-30)
Population = qclSolver.TDistr(mass_sub, eigs, mu, self.TE, self.TL)
self.Population = []
end = 0
for i in range(0, len(self.struct)):
self.Population.append(Population[end:end+len(self.eigs[i])])
| |
<filename>tools/SDKTool/libs/AgentAPI/AgentMsgMgr.py
# -*- coding: utf-8 -*-
"""
This source code file is licensed under the GNU General Public License Version 3.
For full details, please refer to the file "LICENSE.txt" which is provided as part of this source code package.
Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
"""
import os
import sys
import logging
import numpy as np
import traceback
if sys.version_info < (3, 0):
import ConfigParser as ConfigParser
else:
import configparser as ConfigParser
__dir__ = os.path.dirname(os.path.abspath(__file__))
sys.path.append(__dir__ + "/protocol")
import tbus
from .protocol import common_pb2
from .protocol import gameregProtoc_pb2
MSG_SEND_ID_START = 40000
MSG_SEND_GROUP_ID = MSG_SEND_ID_START + 1
MSG_SEND_TASK_FLAG = MSG_SEND_ID_START + 2
MSG_SEND_ADD_TASK = MSG_SEND_ID_START + 3
MSG_SEND_DEL_TASK = MSG_SEND_ID_START + 4
MSG_SEND_CHG_TASK = MSG_SEND_ID_START + 5
MSG_SEND_TASK_CONF = MSG_SEND_ID_START + 6
MSG_REGER_BUTTON_TYPE = 'button'
MSG_REGER_STUCK_TYPE = 'stuck'
MSG_REGER_FIXOBJ_TYPE = 'fix object'
MSG_REGER_PIX_TYPE = 'pixel'
MSG_REGER_DEFORM_TYPE = 'deform object'
MSG_REGER_NUMBER_TYPE = 'number'
MSG_REGER_FIXBLOOD_TYPE = 'fix blood'
MSG_REGER_KING_GLORY_BOOD_TYPE = 'king glory blood'
MSG_REGER_MAPREG_TYPE = 'map'
MSG_REGER_MAPDIRECTIONREG_TYPE = 'mapDirection'
MSG_REGER_MULTCOLORVAR_TYPE = 'multcolorvar'
MSG_REGER_SHOOTGAMEBLOOD_TYPE = 'shoot game blood'
MSG_REGER_SHOOTGAMEHURT_TYPE = 'shoot game hurt'
LOG = logging.getLogger('agent')
class MsgMgr(object):
"""
message manager implement
"""
def __init__(self, cfgPath='../cfg/bus.ini', initParamFile='../cfg/param.file', index=1):
self.__selfAddr = None
self.__gameRegAddr = None
self.__cfgPath = cfgPath
self.__initParamFile = initParamFile
self.__index = index
self.__serialMsgHandle = dict()
self.__serialRegerHandle = dict()
self.__unSeiralRegerHandle = dict()
def Initialize(self, selfAddr=None):
"""
Initialize message manager, parse tbus configure file and initialize tbus
return: True or Flase
"""
if os.path.exists(self.__cfgPath):
self._Register()
config = ConfigParser.ConfigParser(strict=False)
config.read(self.__cfgPath)
gameRegAddr = "GameReg" + str(self.__index) + "Addr"
strgameRegAddr = config.get('BusConf', gameRegAddr)
UIRecognizeAddr = "UI" + str(self.__index) + "Addr"
strUIAddr = config.get('BusConf', UIRecognizeAddr)
if selfAddr is None:
AgentAddr = "Agent" + str(self.__index) + "Addr"
strselfAddr = config.get('BusConf', AgentAddr)
else:
strselfAddr = config.get('BusConf', selfAddr)
self.__gameRegAddr = tbus.GetAddress(strgameRegAddr)
self.__UIRecognizeAddr = tbus.GetAddress(strUIAddr)
self.__selfAddr = tbus.GetAddress(strselfAddr)
LOG.info("gamereg addr is {0}, self addr is {1}" \
.format(self.__gameRegAddr, self.__selfAddr))
ret = tbus.Init(self.__selfAddr, self.__cfgPath)
if ret != 0:
LOG.error('tbus init failed with return code[{0}]'.format(ret))
return False
return True
else:
LOG.error('tbus config file not exist in {0}'.format(self.__cfgPath))
return False
def ProcMsg(self, msgID, msgValue):
"""
Process message: create message and send to GameReg
:param msgID: message ID which should be in [MSG_SEND_GROUP_ID, MSG_SEND_TASK_FLAG,
MSG_SEND_ADD_TASK,MSG_SEND_DEL_TASK, MSG_SEND_CHG_TASK, MSG_SEND_TASK_CONF]
:param msgValue: value of message
:return: True or False
"""
outBuff = self._CreateMsg(msgID, msgValue)
if outBuff is None:
LOG.error('create msg failed')
return False
return self._Send(outBuff)
def ProcSrcImgMsg(self, srcImgDict):
"""
ProcSrcImgMsg: create message and send to GameReg
:param srcImgDict: image information saved with dictinary format,
keywords is 'frameSeq', 'width', 'height''image','deviceIndex'
:return: True or False
"""
msg = common_pb2.tagMessage()
msg.eMsgID = common_pb2.MSG_SRC_IMAGE_INFO
stSrcImageInfo = msg.stSrcImageInfo
stSrcImageInfo.uFrameSeq = srcImgDict['frameSeq']
stSrcImageInfo.nWidth = srcImgDict['width']
stSrcImageInfo.nHeight = srcImgDict['height']
stSrcImageInfo.byImageData = srcImgDict['image'].tobytes()
stSrcImageInfo.uDeviceIndex = srcImgDict['deviceIndex']
msgBuff = msg.SerializeToString()
if msgBuff is None:
LOG.error('create msg failed')
return False
return self._Send(msgBuff)
def ProcUISrcImgMsg(self, srcImgDict):
msgPB = common_pb2.tagMessage()
msgPB.eMsgID = common_pb2.MSG_UI_STATE_IMG
msgPB.stUIAPIState.eUIState = common_pb2.PB_UI_STATE_NORMAL
msgPB.stUIAPIState.stUIImage.uFrameSeq = srcImgDict['frameSeq']
msgPB.stUIAPIState.stUIImage.nWidth = srcImgDict['width']
msgPB.stUIAPIState.stUIImage.nHeight = srcImgDict['height']
msgPB.stUIAPIState.stUIImage.byImageData = srcImgDict['image'].tobytes()
msgPB.stUIAPIState.stUIImage.uDeviceIndex = srcImgDict['deviceIndex']
msgPB.stUIAPIState.eGameState = common_pb2.PB_STATE_NONE
msg = msgPB.SerializeToString()
if msg is None:
LOG.error('create msg failed')
return False
return self._SendUI(msg)
def Recv(self):
"""
Recv: receieve message from GameReg, the value is the result of imageReg and the processed image
:return: True or False
"""
msgBuffRet = None
msgBuff = tbus.RecvFrom(self.__gameRegAddr)
while msgBuff is not None:
msgBuffRet = msgBuff
msgBuff = tbus.RecvFrom(self.__gameRegAddr)
if msgBuffRet is not None:
# msg = msgpack.unpackb(msgBuffRet, object_hook=mn.decode, encoding='utf-8')
msg = self._UnSerialResultMsg(msgBuffRet)
if msg is not None:
frameSeq = msg['value'].get('frameSeq')
LOG.debug('recv frame data, frameIndex={0}'.format(frameSeq))
return msg
else:
LOG.error("unserial result message failed")
return None
else:
return None
def RecvUI(self):
msgBuffRet = None
msgBuff = tbus.RecvFrom(self.__UIRecognizeAddr)
while msgBuff is not None:
msgBuffRet = msgBuff
msgBuff = tbus.RecvFrom(self.__UIRecognizeAddr)
if msgBuffRet is not None:
# msg = msgpack.unpackb(msgBuffRet, object_hook=mn.decode, encoding='utf-8')
return self._UnSerialUIResultMsg(msgBuffRet)
else:
return None
def Release(self):
"""
tbus exit
:return: None
"""
LOG.info('tbus exit...')
tbus.Exit(self.__selfAddr)
def _UnSerialUIResultMsg(self, msg):
msgPB = common_pb2.tagMessage()
msgPB.ParseFromString(msg)
msgID = msgPB.eMsgID
if msgID != common_pb2.MSG_UI_ACTION:
LOG.error('wrong msg id: {}'.format(msgID))
return None
UIResultDict = dict()
UIResultDict['actions'] = []
for UIUnitAction in msgPB.stUIAction.stUIUnitAction:
eUIAction = UIUnitAction.eUIAction
action = dict()
action['type'] = eUIAction
action['points'] = []
if action['type'] == common_pb2.PB_UI_ACTION_CLICK:
point = {}
point["x"] = UIUnitAction.stClickPoint.nX
point["y"] = UIUnitAction.stClickPoint.nY
action['points'].append(point)
elif action['type'] == common_pb2.PB_UI_ACTION_DRAG:
for dragPoint in UIUnitAction.stDragPoints:
point = {}
point["x"] = dragPoint.nX
point["y"] = dragPoint.nY
action['points'].append(point)
UIResultDict['actions'].append(action)
data = np.fromstring(msgPB.stUIAction.stSrcImageInfo.byImageData, np.uint8)
UIResultDict['image'] = np.reshape(data, (
msgPB.stUIAction.stSrcImageInfo.nHeight, msgPB.stUIAction.stSrcImageInfo.nWidth, 3))
return UIResultDict
def _CreateMsg(self, msgID, msgValue):
msgDic = dict()
msgDic['msgID'] = msgID
msgDic['value'] = msgValue
# outBuff = msgpack.packb(msgDic, use_bin_type=True)
outBuff = self._SerialSendMsg(msgDic)
return outBuff
def _Send(self, outBuff):
ret = tbus.SendTo(self.__gameRegAddr, outBuff)
if ret != 0:
LOG.error('TBus Send To GameReg Addr return code[{0}]'.format(ret))
return False
return True
def _SendUI(self, outBuff):
ret = tbus.SendTo(self.__UIRecognizeAddr, outBuff)
if ret != 0:
LOG.error('TBus Send To UI Anuto Addr return code[{0}]'.format(ret))
return False
return True
def _Register(self):
"""
registe message hander
serial message: MSG_SEND_GROUP_ID, MSG_SEND_ADD_TASK, MSG_SEND_CHG_TASK, MSG_SEND_TASK_FLAG,
MSG_SEND_DEL_TASK, MSG_SEND_TASK_CONF, MSG_REGER_STUCK_TYPE, MSG_REGER_FIXOBJ_TYPE,
MSG_REGER_PIX_TYPE, MSG_REGER_DEFORM_TYPE, MSG_REGER_NUMBER_TYPE, MSG_REGER_FIXBLOOD_TYPE,
MSG_REGER_KING_GLORY_BOOD_TYPE, MSG_REGER_MAPREG_TYPE, MSG_REGER_MAPDIRECTIONREG_TYPE,
MSG_REGER_MULTCOLORVAR_TYPE, MSG_REGER_SHOOTGAMEBLOOD_TYPE, MSG_REGER_SHOOTGAMEHURT_TYPE
unserial message:gameregProtoc_pb2.TYPE_STUCKREG, gameregProtoc_pb2.TYPE_FIXOBJREG,
gameregProtoc_pb2.TYPE_PIXREG, gameregProtoc_pb2.TYPE_DEFORMOBJ,
gameregProtoc_pb2.TYPE_NUMBER, gameregProtoc_pb2.TYPE_FIXBLOOD,
gameregProtoc_pb2.TYPE_KINGGLORYBLOOD, gameregProtoc_pb2.TYPE_MAPREG,
gameregProtoc_pb2.TYPE_MAPDIRECTIONREG,gameregProtoc_pb2.TYPE_MULTCOLORVAR,
gameregProtoc_pb2.TYPE_SHOOTBLOOD, gameregProtoc_pb2.TYPE_SHOOTHURT
:return: None
"""
self.__serialMsgHandle[MSG_SEND_GROUP_ID] = self._SerialTask
self.__serialMsgHandle[MSG_SEND_ADD_TASK] = self._SerialAddTask
self.__serialMsgHandle[MSG_SEND_CHG_TASK] = self._SerialChgTask
self.__serialMsgHandle[MSG_SEND_TASK_FLAG] = self._SerialFlagTask
self.__serialMsgHandle[MSG_SEND_DEL_TASK] = self._SerialDelTask
self.__serialMsgHandle[MSG_SEND_TASK_CONF] = self._SerialConfTask
self.__serialRegerHandle[MSG_REGER_STUCK_TYPE] = self._SerialStuckReg
self.__serialRegerHandle[MSG_REGER_FIXOBJ_TYPE] = self._SerialFixObjReg
self.__serialRegerHandle[MSG_REGER_PIX_TYPE] = self._SerialPixReg
self.__serialRegerHandle[MSG_REGER_DEFORM_TYPE] = self._SerialDeformReg
self.__serialRegerHandle[MSG_REGER_NUMBER_TYPE] = self._SerialNumberReg
self.__serialRegerHandle[MSG_REGER_FIXBLOOD_TYPE] = self._SerialFixBloodReg
self.__serialRegerHandle[MSG_REGER_KING_GLORY_BOOD_TYPE] = self._SerialKingGloryBlood
self.__serialRegerHandle[MSG_REGER_MAPREG_TYPE] = self._SerialMapReg
self.__serialRegerHandle[MSG_REGER_MAPDIRECTIONREG_TYPE] = self._SerialMapDirectionReg
self.__serialRegerHandle[MSG_REGER_MULTCOLORVAR_TYPE] = self._SerialMultColorVar
self.__serialRegerHandle[MSG_REGER_SHOOTGAMEBLOOD_TYPE] = self._SerialShootGameBlood
self.__serialRegerHandle[MSG_REGER_SHOOTGAMEHURT_TYPE] = self._SerialShootGameHurt
self.__unSeiralRegerHandle[gameregProtoc_pb2.TYPE_STUCKREG] = self._UnSerialStuckRegResult
self.__unSeiralRegerHandle[gameregProtoc_pb2.TYPE_FIXOBJREG] = self._UnSerialFixObjRegResult
self.__unSeiralRegerHandle[gameregProtoc_pb2.TYPE_PIXREG] = self._UnSerialPixRegResult
self.__unSeiralRegerHandle[gameregProtoc_pb2.TYPE_DEFORMOBJ] = self._UnSerialDeformRegResult
self.__unSeiralRegerHandle[gameregProtoc_pb2.TYPE_NUMBER] = self._UnSerialNumberRegResult
self.__unSeiralRegerHandle[gameregProtoc_pb2.TYPE_FIXBLOOD] = self._UnSerialFixBloodRegResult
self.__unSeiralRegerHandle[gameregProtoc_pb2.TYPE_KINGGLORYBLOOD] = self._UnSerialKingGloryBloodResult
self.__unSeiralRegerHandle[gameregProtoc_pb2.TYPE_MAPREG] = self._UnSerialMapRegResult
self.__unSeiralRegerHandle[gameregProtoc_pb2.TYPE_MAPDIRECTIONREG] = self._UnSerialMapDirectionRegResult
self.__unSeiralRegerHandle[gameregProtoc_pb2.TYPE_MULTCOLORVAR] = self._UnSerialMultColorVar
self.__unSeiralRegerHandle[gameregProtoc_pb2.TYPE_SHOOTBLOOD] = self._UnSerialShootGameBloodRegResult
self.__unSeiralRegerHandle[gameregProtoc_pb2.TYPE_SHOOTHURT] = self._UnSerialShootGameHurtRegResult
def _SerialSendMsg(self, msgDic):
msg = common_pb2.tagMessage()
msg.eMsgID = common_pb2.MSG_GAMEREG_INFO
msgAgent = msg.stPBAgentMsg
msgAgent.eAgentMsgID = self._GetValueFromDict(msgDic, 'msgID')
self.__serialMsgHandle[msgAgent.eAgentMsgID](
msgAgent, self._GetValueFromDict(msgDic, 'value')
)
msgBuff = msg.SerializeToString()
return msgBuff
def _SerialTask(self, msg, msgValue):
value = msg.stPBAgentTaskValue
value.uGroupID = self._GetValueFromDict(msgValue, 'groupID')
for taskVal in self._GetValueFromDict(msgValue, 'task'):
taskPB = value.stPBAgentTaskTsks.add()
self.__serialRegerHandle[self._GetValueFromDict(taskVal, 'type')](taskPB, taskVal)
def _SerialChgTask(self, msg, msgValue):
value = msg.stPBAgentTaskValue
value.uGroupID = 1
for taskVal in msgValue:
taskPB = value.stPBAgentTaskTsks.add()
self.__serialRegerHandle[self._GetValueFromDict(taskVal, 'type')](taskPB, taskVal)
def _SerialAddTask(self, msg, msgValue):
value = msg.stPBAgentTaskValue
value.uGroupID = 1
for taskVal in msgValue:
taskPB = value.stPBAgentTaskTsks.add()
self.__serialRegerHandle[self._GetValueFromDict(taskVal, 'type')](taskPB, taskVal)
def _SerialFlagTask(self, msg, msgValue):
for taskID in msgValue:
value = msg.stPBTaskFlagMaps.add()
value.nTaskID = int(taskID)
value.bFlag = msgValue[taskID]
def _SerialDelTask(self, msg, msgValue):
for key in msgValue:
msg.nDelTaskIDs.append(key)
def _SerialConfTask(self, msg, msgValue):
for filename in msgValue:
if filename is not None:
msg.strConfFileName.append(filename)
def _SerialMapReg(self, taskPB, taskVal):
taskPB.nTaskID = self._GetValueFromDict(taskVal, 'taskID')
taskPB.eType = gameregProtoc_pb2.TYPE_MAPREG
# taskPB.nSkipFrame = self._GetValueFromDict(taskVal, 'skipFrame')
for element in self._GetValueFromDict(taskVal, 'elements'):
elementPB = taskPB.stPBAgentTaskElements.add()
rect = elementPB.stPBRect
rect.nX = self._GetValueFromDict(self._GetValueFromDict(element, 'ROI'), 'x')
rect.nY = self._GetValueFromDict(self._GetValueFromDict(element, 'ROI'), 'y')
rect.nW = self._GetValueFromDict(self._GetValueFromDict(element, 'ROI'), 'w')
rect.nH = self._GetValueFromDict(self._GetValueFromDict(element, 'ROI'), 'h')
elementPB.strMyLocCondition = self._GetValueFromDict(element, 'myLocCondition')
elementPB.strFriendsCondition = self._GetValueFromDict(element, 'friendsLocCondition')
elementPB.strViewLocCondition = self._GetValueFromDict(element, 'viewLocCondition')
elementPB.strMapPath = self._GetValueFromDict(element, 'mapTempPath')
elementPB.strMaskPath = element.get('mapMaskPath') or str()
elementPB.nMaxPointNum = self._GetValueFromDict(element, 'maxPointNum')
elementPB.nFilterSize = self._GetValueFromDict(element, 'filterSize')
def _SerialMapDirectionReg(self, taskPB, taskVal):
taskPB.nTaskID = self._GetValueFromDict(taskVal, 'taskID')
taskPB.eType = gameregProtoc_pb2.TYPE_MAPDIRECTIONREG
# taskPB.nSkipFrame = self._GetValueFromDict(taskVal, 'skipFrame')
for element in self._GetValueFromDict(taskVal, 'elements'):
elementPB = taskPB.stPBAgentTaskElements.add()
rect = elementPB.stPBRect
rect.nX = self._GetValueFromDict(self._GetValueFromDict(element, 'ROI'), 'x')
rect.nY = self._GetValueFromDict(self._GetValueFromDict(element, 'ROI'), 'y')
rect.nW = self._GetValueFromDict(self._GetValueFromDict(element, 'ROI'), 'w')
rect.nH = self._GetValueFromDict(self._GetValueFromDict(element, 'ROI'), 'h')
elementPB.strMyLocCondition = self._GetValueFromDict(element, 'myLocCondition')
elementPB.strViewLocCondition = self._GetValueFromDict(element, 'viewLocCondition')
elementPB.strMaskPath = element.get('mapMaskPath') or str()
elementPB.nMaxPointNum = self._GetValueFromDict(element, 'maxPointNum')
elementPB.nFilterSize = self._GetValueFromDict(element, 'filterSize')
elementPB.nDilateSize = self._GetValueFromDict(element, 'dilateSize')
elementPB.nErodeSize = self._GetValueFromDict(element, 'erodeSize')
elementPB.nRegionSize = self._GetValueFromDict(element, 'regionSize')
def _SerialMultColorVar(self, taskPB, taskVal):
taskPB.nTaskID = self._GetValueFromDict(taskVal, 'taskID')
taskPB.eType = gameregProtoc_pb2.TYPE_MULTCOLORVAR
# taskPB.nSkipFrame = self._GetValueFromDict(taskVal, 'skipFrame')
for element in self._GetValueFromDict(taskVal, 'elements'):
elementPB = taskPB.stPBAgentTaskElements.add()
elementPB.strImgFilePath = self._GetValueFromDict(element, 'imageFilePath')
def _SerialShootGameBlood(self, taskPB, taskVal):
"""
ShootGameBlood, this recongnizer method only used by ShootGame
"""
taskPB.nTaskID = self._GetValueFromDict(taskVal, 'taskID')
taskPB.eType = gameregProtoc_pb2.TYPE_SHOOTBLOOD
# taskPB.nSkipFrame = self._GetValueFromDict(taskVal, 'skipFrame')
for element in self._GetValueFromDict(taskVal, 'elements'):
elementPB = taskPB.stPBAgentTaskElements.add()
rect = elementPB.stPBRect
self._SerialRect(rect, self._GetValueFromDict(element, 'ROI'))
elementPB.nFilterSize = self._GetValueFromDict(element, 'filterSize')
elementPB.nBloodLength = self._GetValueFromDict(element, 'bloodLength')
elementPB.nMaxPointNum = self._GetValueFromDict(element, 'maxPointNum')
elementPB.fMinScale = self._GetValueFromDict(element, 'minScale')
elementPB.fMaxScale = self._GetValueFromDict(element, 'maxScale')
elementPB.nScaleLevel = self._GetValueFromDict(element, 'scaleLevel')
templates = elementPB.stPBTemplates
for templ in self._GetValueFromDict(element, 'templates'):
template = templates.stPBTemplates.add()
self._SerialTemplate(template, templ)
def _SerialShootGameHurt(self, taskPB, taskVal):
"""
ShootGameHurt, this recongnizer method only used by ShootGame
"""
taskPB.nTaskID = self._GetValueFromDict(taskVal, 'taskID')
taskPB.eType = gameregProtoc_pb2.TYPE_SHOOTHURT
# taskPB.nSkipFrame = self._GetValueFromDict(taskVal, 'skipFrame')
| |
def __init__(self, *args):
this = _ghmmwrapper.new_ghmm_dpseq(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ghmmwrapper.delete_ghmm_dpseq
__del__ = lambda self : None;
def set_discrete(self, *args): return _ghmmwrapper.ghmm_dpseq_set_discrete(self, *args)
def set_continuous(self, *args): return _ghmmwrapper.ghmm_dpseq_set_continuous(self, *args)
def get_discrete(self, *args): return _ghmmwrapper.ghmm_dpseq_get_discrete(self, *args)
def get_continuous(self, *args): return _ghmmwrapper.ghmm_dpseq_get_continuous(self, *args)
def slice(self, *args): return _ghmmwrapper.ghmm_dpseq_slice(self, *args)
def get_char(self, *args): return _ghmmwrapper.ghmm_dpseq_get_char(self, *args)
def get_double(self, *args): return _ghmmwrapper.ghmm_dpseq_get_double(self, *args)
ghmm_dpseq_swigregister = _ghmmwrapper.ghmm_dpseq_swigregister
ghmm_dpseq_swigregister(ghmm_dpseq)
def ghmm_dpseq_free(*args):
return _ghmmwrapper.ghmm_dpseq_free(*args)
ghmm_dpseq_free = _ghmmwrapper.ghmm_dpseq_free
normal = _ghmmwrapper.normal
normal_right = _ghmmwrapper.normal_right
normal_approx = _ghmmwrapper.normal_approx
normal_left = _ghmmwrapper.normal_left
uniform = _ghmmwrapper.uniform
binormal = _ghmmwrapper.binormal
multinormal = _ghmmwrapper.multinormal
density_number = _ghmmwrapper.density_number
def density_array_alloc(*args):
return _ghmmwrapper.density_array_alloc(*args)
density_array_alloc = _ghmmwrapper.density_array_alloc
def density_array_getitem(*args):
return _ghmmwrapper.density_array_getitem(*args)
density_array_getitem = _ghmmwrapper.density_array_getitem
def density_array_setitem(*args):
return _ghmmwrapper.density_array_setitem(*args)
density_array_setitem = _ghmmwrapper.density_array_setitem
class ghmm_c_emission(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, ghmm_c_emission, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, ghmm_c_emission, name)
__repr__ = _swig_repr
__swig_setmethods__["type"] = _ghmmwrapper.ghmm_c_emission_type_set
__swig_getmethods__["type"] = _ghmmwrapper.ghmm_c_emission_type_get
if _newclass:type = _swig_property(_ghmmwrapper.ghmm_c_emission_type_get, _ghmmwrapper.ghmm_c_emission_type_set)
__swig_setmethods__["dimension"] = _ghmmwrapper.ghmm_c_emission_dimension_set
__swig_getmethods__["dimension"] = _ghmmwrapper.ghmm_c_emission_dimension_get
if _newclass:dimension = _swig_property(_ghmmwrapper.ghmm_c_emission_dimension_get, _ghmmwrapper.ghmm_c_emission_dimension_set)
__swig_getmethods__["mean"] = _ghmmwrapper.ghmm_c_emission_mean_get
if _newclass:mean = _swig_property(_ghmmwrapper.ghmm_c_emission_mean_get)
__swig_getmethods__["variance"] = _ghmmwrapper.ghmm_c_emission_variance_get
if _newclass:variance = _swig_property(_ghmmwrapper.ghmm_c_emission_variance_get)
__swig_setmethods__["sigmainv"] = _ghmmwrapper.ghmm_c_emission_sigmainv_set
__swig_getmethods__["sigmainv"] = _ghmmwrapper.ghmm_c_emission_sigmainv_get
if _newclass:sigmainv = _swig_property(_ghmmwrapper.ghmm_c_emission_sigmainv_get, _ghmmwrapper.ghmm_c_emission_sigmainv_set)
__swig_setmethods__["det"] = _ghmmwrapper.ghmm_c_emission_det_set
__swig_getmethods__["det"] = _ghmmwrapper.ghmm_c_emission_det_get
if _newclass:det = _swig_property(_ghmmwrapper.ghmm_c_emission_det_get, _ghmmwrapper.ghmm_c_emission_det_set)
__swig_setmethods__["sigmacd"] = _ghmmwrapper.ghmm_c_emission_sigmacd_set
__swig_getmethods__["sigmacd"] = _ghmmwrapper.ghmm_c_emission_sigmacd_get
if _newclass:sigmacd = _swig_property(_ghmmwrapper.ghmm_c_emission_sigmacd_get, _ghmmwrapper.ghmm_c_emission_sigmacd_set)
__swig_setmethods__["min"] = _ghmmwrapper.ghmm_c_emission_min_set
__swig_getmethods__["min"] = _ghmmwrapper.ghmm_c_emission_min_get
if _newclass:min = _swig_property(_ghmmwrapper.ghmm_c_emission_min_get, _ghmmwrapper.ghmm_c_emission_min_set)
__swig_setmethods__["max"] = _ghmmwrapper.ghmm_c_emission_max_set
__swig_getmethods__["max"] = _ghmmwrapper.ghmm_c_emission_max_get
if _newclass:max = _swig_property(_ghmmwrapper.ghmm_c_emission_max_get, _ghmmwrapper.ghmm_c_emission_max_set)
__swig_setmethods__["fixed"] = _ghmmwrapper.ghmm_c_emission_fixed_set
__swig_getmethods__["fixed"] = _ghmmwrapper.ghmm_c_emission_fixed_get
if _newclass:fixed = _swig_property(_ghmmwrapper.ghmm_c_emission_fixed_get, _ghmmwrapper.ghmm_c_emission_fixed_set)
def setDensity(self, *args): return _ghmmwrapper.ghmm_c_emission_setDensity(self, *args)
def getDensity(self): return _ghmmwrapper.ghmm_c_emission_getDensity(self)
def getMeanVec(self): return _ghmmwrapper.ghmm_c_emission_getMeanVec(self)
def __init__(self):
this = _ghmmwrapper.new_ghmm_c_emission()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ghmmwrapper.delete_ghmm_c_emission
__del__ = lambda self : None;
ghmm_c_emission_swigregister = _ghmmwrapper.ghmm_c_emission_swigregister
ghmm_c_emission_swigregister(ghmm_c_emission)
class ghmm_c_emission_variance(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, ghmm_c_emission_variance, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, ghmm_c_emission_variance, name)
__repr__ = _swig_repr
__swig_setmethods__["val"] = _ghmmwrapper.ghmm_c_emission_variance_val_set
__swig_getmethods__["val"] = _ghmmwrapper.ghmm_c_emission_variance_val_get
if _newclass:val = _swig_property(_ghmmwrapper.ghmm_c_emission_variance_val_get, _ghmmwrapper.ghmm_c_emission_variance_val_set)
__swig_setmethods__["mat"] = _ghmmwrapper.ghmm_c_emission_variance_mat_set
__swig_getmethods__["mat"] = _ghmmwrapper.ghmm_c_emission_variance_mat_get
if _newclass:mat = _swig_property(_ghmmwrapper.ghmm_c_emission_variance_mat_get, _ghmmwrapper.ghmm_c_emission_variance_mat_set)
def __init__(self):
this = _ghmmwrapper.new_ghmm_c_emission_variance()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ghmmwrapper.delete_ghmm_c_emission_variance
__del__ = lambda self : None;
ghmm_c_emission_variance_swigregister = _ghmmwrapper.ghmm_c_emission_variance_swigregister
ghmm_c_emission_variance_swigregister(ghmm_c_emission_variance)
class ghmm_c_emission_mean(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, ghmm_c_emission_mean, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, ghmm_c_emission_mean, name)
__repr__ = _swig_repr
__swig_setmethods__["val"] = _ghmmwrapper.ghmm_c_emission_mean_val_set
__swig_getmethods__["val"] = _ghmmwrapper.ghmm_c_emission_mean_val_get
if _newclass:val = _swig_property(_ghmmwrapper.ghmm_c_emission_mean_val_get, _ghmmwrapper.ghmm_c_emission_mean_val_set)
__swig_setmethods__["vec"] = _ghmmwrapper.ghmm_c_emission_mean_vec_set
__swig_getmethods__["vec"] = _ghmmwrapper.ghmm_c_emission_mean_vec_get
if _newclass:vec = _swig_property(_ghmmwrapper.ghmm_c_emission_mean_vec_get, _ghmmwrapper.ghmm_c_emission_mean_vec_set)
def __init__(self):
this = _ghmmwrapper.new_ghmm_c_emission_mean()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ghmmwrapper.delete_ghmm_c_emission_mean
__del__ = lambda self : None;
ghmm_c_emission_mean_swigregister = _ghmmwrapper.ghmm_c_emission_mean_swigregister
ghmm_c_emission_mean_swigregister(ghmm_c_emission_mean)
def c_emission_array_alloc(*args):
return _ghmmwrapper.c_emission_array_alloc(*args)
c_emission_array_alloc = _ghmmwrapper.c_emission_array_alloc
def c_emission_array_getRef(*args):
return _ghmmwrapper.c_emission_array_getRef(*args)
c_emission_array_getRef = _ghmmwrapper.c_emission_array_getRef
def c_emission_ptr_array_alloc(*args):
return _ghmmwrapper.c_emission_ptr_array_alloc(*args)
c_emission_ptr_array_alloc = _ghmmwrapper.c_emission_ptr_array_alloc
def c_emission_ptr_array_getitem(*args):
return _ghmmwrapper.c_emission_ptr_array_getitem(*args)
c_emission_ptr_array_getitem = _ghmmwrapper.c_emission_ptr_array_getitem
def c_emission_ptr_array_setitem(*args):
return _ghmmwrapper.c_emission_ptr_array_setitem(*args)
c_emission_ptr_array_setitem = _ghmmwrapper.c_emission_ptr_array_setitem
def ighmm_invert_det(*args):
return _ghmmwrapper.ighmm_invert_det(*args)
ighmm_invert_det = _ghmmwrapper.ighmm_invert_det
class ghmm_cstate(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, ghmm_cstate, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, ghmm_cstate, name)
__repr__ = _swig_repr
__swig_setmethods__["M"] = _ghmmwrapper.ghmm_cstate_M_set
__swig_getmethods__["M"] = _ghmmwrapper.ghmm_cstate_M_get
if _newclass:M = _swig_property(_ghmmwrapper.ghmm_cstate_M_get, _ghmmwrapper.ghmm_cstate_M_set)
__swig_setmethods__["pi"] = _ghmmwrapper.ghmm_cstate_pi_set
__swig_getmethods__["pi"] = _ghmmwrapper.ghmm_cstate_pi_get
if _newclass:pi = _swig_property(_ghmmwrapper.ghmm_cstate_pi_get, _ghmmwrapper.ghmm_cstate_pi_set)
__swig_setmethods__["out_id"] = _ghmmwrapper.ghmm_cstate_out_id_set
__swig_getmethods__["out_id"] = _ghmmwrapper.ghmm_cstate_out_id_get
if _newclass:out_id = _swig_property(_ghmmwrapper.ghmm_cstate_out_id_get, _ghmmwrapper.ghmm_cstate_out_id_set)
__swig_setmethods__["in_id"] = _ghmmwrapper.ghmm_cstate_in_id_set
__swig_getmethods__["in_id"] = _ghmmwrapper.ghmm_cstate_in_id_get
if _newclass:in_id = _swig_property(_ghmmwrapper.ghmm_cstate_in_id_get, _ghmmwrapper.ghmm_cstate_in_id_set)
__swig_setmethods__["out_a"] = _ghmmwrapper.ghmm_cstate_out_a_set
__swig_getmethods__["out_a"] = _ghmmwrapper.ghmm_cstate_out_a_get
if _newclass:out_a = _swig_property(_ghmmwrapper.ghmm_cstate_out_a_get, _ghmmwrapper.ghmm_cstate_out_a_set)
__swig_setmethods__["in_a"] = _ghmmwrapper.ghmm_cstate_in_a_set
__swig_getmethods__["in_a"] = _ghmmwrapper.ghmm_cstate_in_a_get
if _newclass:in_a = _swig_property(_ghmmwrapper.ghmm_cstate_in_a_get, _ghmmwrapper.ghmm_cstate_in_a_set)
__swig_setmethods__["out_states"] = _ghmmwrapper.ghmm_cstate_out_states_set
__swig_getmethods__["out_states"] = _ghmmwrapper.ghmm_cstate_out_states_get
if _newclass:out_states = _swig_property(_ghmmwrapper.ghmm_cstate_out_states_get, _ghmmwrapper.ghmm_cstate_out_states_set)
__swig_setmethods__["in_states"] = _ghmmwrapper.ghmm_cstate_in_states_set
__swig_getmethods__["in_states"] = _ghmmwrapper.ghmm_cstate_in_states_get
if _newclass:in_states = _swig_property(_ghmmwrapper.ghmm_cstate_in_states_get, _ghmmwrapper.ghmm_cstate_in_states_set)
__swig_setmethods__["c"] = _ghmmwrapper.ghmm_cstate_c_set
__swig_getmethods__["c"] = _ghmmwrapper.ghmm_cstate_c_get
if _newclass:c = _swig_property(_ghmmwrapper.ghmm_cstate_c_get, _ghmmwrapper.ghmm_cstate_c_set)
__swig_setmethods__["fix"] = _ghmmwrapper.ghmm_cstate_fix_set
__swig_getmethods__["fix"] = _ghmmwrapper.ghmm_cstate_fix_get
if _newclass:fix = _swig_property(_ghmmwrapper.ghmm_cstate_fix_get, _ghmmwrapper.ghmm_cstate_fix_set)
__swig_setmethods__["e"] = _ghmmwrapper.ghmm_cstate_e_set
__swig_getmethods__["e"] = _ghmmwrapper.ghmm_cstate_e_get
if _newclass:e = _swig_property(_ghmmwrapper.ghmm_cstate_e_get, _ghmmwrapper.ghmm_cstate_e_set)
__swig_setmethods__["desc"] = _ghmmwrapper.ghmm_cstate_desc_set
__swig_getmethods__["desc"] = _ghmmwrapper.ghmm_cstate_desc_get
if _newclass:desc = _swig_property(_ghmmwrapper.ghmm_cstate_desc_get, _ghmmwrapper.ghmm_cstate_desc_set)
__swig_setmethods__["xPosition"] = _ghmmwrapper.ghmm_cstate_xPosition_set
__swig_getmethods__["xPosition"] = _ghmmwrapper.ghmm_cstate_xPosition_get
if _newclass:xPosition = _swig_property(_ghmmwrapper.ghmm_cstate_xPosition_get, _ghmmwrapper.ghmm_cstate_xPosition_set)
__swig_setmethods__["yPosition"] = _ghmmwrapper.ghmm_cstate_yPosition_set
__swig_getmethods__["yPosition"] = _ghmmwrapper.ghmm_cstate_yPosition_get
if _newclass:yPosition = _swig_property(_ghmmwrapper.ghmm_cstate_yPosition_get, _ghmmwrapper.ghmm_cstate_yPosition_set)
def alloc(self, *args): return _ghmmwrapper.ghmm_cstate_alloc(self, *args)
def setDensity(self, *args): return _ghmmwrapper.ghmm_cstate_setDensity(self, *args)
def setWeight(self, *args): return _ghmmwrapper.ghmm_cstate_setWeight(self, *args)
def setMean(self, *args): return _ghmmwrapper.ghmm_cstate_setMean(self, *args)
def setStdDev(self, *args): return _ghmmwrapper.ghmm_cstate_setStdDev(self, *args)
def getDensity(self, *args): return _ghmmwrapper.ghmm_cstate_getDensity(self, *args)
def getWeight(self, *args): return _ghmmwrapper.ghmm_cstate_getWeight(self, *args)
def getMean(self, *args): return _ghmmwrapper.ghmm_cstate_getMean(self, *args)
def getStdDev(self, *args): return _ghmmwrapper.ghmm_cstate_getStdDev(self, *args)
def setMin(self, *args): return _ghmmwrapper.ghmm_cstate_setMin(self, *args)
def setMax(self, *args): return _ghmmwrapper.ghmm_cstate_setMax(self, *args)
def getInState(self, *args): return _ghmmwrapper.ghmm_cstate_getInState(self, *args)
def getOutState(self, *args): return _ghmmwrapper.ghmm_cstate_getOutState(self, *args)
def getInProb(self, *args): return _ghmmwrapper.ghmm_cstate_getInProb(self, *args)
def getOutProb(self, *args): return _ghmmwrapper.ghmm_cstate_getOutProb(self, *args)
def setInProb(self, *args): return _ghmmwrapper.ghmm_cstate_setInProb(self, *args)
def setOutProb(self, *args): return _ghmmwrapper.ghmm_cstate_setOutProb(self, *args)
def getEmission(self, *args): return _ghmmwrapper.ghmm_cstate_getEmission(self, *args)
def calc_cmbm(self, *args): return _ghmmwrapper.ghmm_cstate_calc_cmbm(self, *args)
def calc_b(self, *args): return _ghmmwrapper.ghmm_cstate_calc_b(self, *args)
def calc_cmBm(self, *args): return _ghmmwrapper.ghmm_cstate_calc_cmBm(self, *args)
def calc_B(self, *args): return _ghmmwrapper.ghmm_cstate_calc_B(self, *args)
def getDesc(self): return _ghmmwrapper.ghmm_cstate_getDesc(self)
def setDesc(self, *args): return _ghmmwrapper.ghmm_cstate_setDesc(self, *args)
def __init__(self):
this = _ghmmwrapper.new_ghmm_cstate()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ghmmwrapper.delete_ghmm_cstate
__del__ = lambda self : None;
ghmm_cstate_swigregister = _ghmmwrapper.ghmm_cstate_swigregister
ghmm_cstate_swigregister(ghmm_cstate)
def cstate_array_alloc(*args):
return _ghmmwrapper.cstate_array_alloc(*args)
cstate_array_alloc = _ghmmwrapper.cstate_array_alloc
def cstate_array_getRef(*args):
return _ghmmwrapper.cstate_array_getRef(*args)
cstate_array_getRef = _ghmmwrapper.cstate_array_getRef
def cstate_ptr_array_alloc(*args):
return _ghmmwrapper.cstate_ptr_array_alloc(*args)
cstate_ptr_array_alloc = _ghmmwrapper.cstate_ptr_array_alloc
def cstate_ptr_array_getitem(*args):
return _ghmmwrapper.cstate_ptr_array_getitem(*args)
cstate_ptr_array_getitem = _ghmmwrapper.cstate_ptr_array_getitem
def cstate_ptr_array_setitem(*args):
return _ghmmwrapper.cstate_ptr_array_setitem(*args)
cstate_ptr_array_setitem = _ghmmwrapper.cstate_ptr_array_setitem
class ghmm_cmodel_class_change_context(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, ghmm_cmodel_class_change_context, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, ghmm_cmodel_class_change_context, name)
__repr__ = _swig_repr
__swig_setmethods__["python_module"] = _ghmmwrapper.ghmm_cmodel_class_change_context_python_module_set
__swig_getmethods__["python_module"] = _ghmmwrapper.ghmm_cmodel_class_change_context_python_module_get
if _newclass:python_module = _swig_property(_ghmmwrapper.ghmm_cmodel_class_change_context_python_module_get, _ghmmwrapper.ghmm_cmodel_class_change_context_python_module_set)
__swig_setmethods__["python_function"] = _ghmmwrapper.ghmm_cmodel_class_change_context_python_function_set
__swig_getmethods__["python_function"] = _ghmmwrapper.ghmm_cmodel_class_change_context_python_function_get
if _newclass:python_function = _swig_property(_ghmmwrapper.ghmm_cmodel_class_change_context_python_function_get, _ghmmwrapper.ghmm_cmodel_class_change_context_python_function_set)
__swig_setmethods__["k"] = _ghmmwrapper.ghmm_cmodel_class_change_context_k_set
__swig_getmethods__["k"] = _ghmmwrapper.ghmm_cmodel_class_change_context_k_get
if _newclass:k = _swig_property(_ghmmwrapper.ghmm_cmodel_class_change_context_k_get, _ghmmwrapper.ghmm_cmodel_class_change_context_k_set)
__swig_setmethods__["get_class"] = _ghmmwrapper.ghmm_cmodel_class_change_context_get_class_set
__swig_getmethods__["get_class"] = _ghmmwrapper.ghmm_cmodel_class_change_context_get_class_get
if _newclass:get_class = _swig_property(_ghmmwrapper.ghmm_cmodel_class_change_context_get_class_get, _ghmmwrapper.ghmm_cmodel_class_change_context_get_class_set)
__swig_setmethods__["user_data"] = _ghmmwrapper.ghmm_cmodel_class_change_context_user_data_set
__swig_getmethods__["user_data"] = _ghmmwrapper.ghmm_cmodel_class_change_context_user_data_get
if _newclass:user_data = _swig_property(_ghmmwrapper.ghmm_cmodel_class_change_context_user_data_get, _ghmmwrapper.ghmm_cmodel_class_change_context_user_data_set)
def __init__(self):
this = _ghmmwrapper.new_ghmm_cmodel_class_change_context()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ghmmwrapper.delete_ghmm_cmodel_class_change_context
__del__ = lambda self : None;
ghmm_cmodel_class_change_context_swigregister = _ghmmwrapper.ghmm_cmodel_class_change_context_swigregister
ghmm_cmodel_class_change_context_swigregister(ghmm_cmodel_class_change_context)
class ghmm_cmodel_baum_welch_context(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, ghmm_cmodel_baum_welch_context, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, ghmm_cmodel_baum_welch_context, name)
__repr__ = _swig_repr
__swig_setmethods__["smo"] = _ghmmwrapper.ghmm_cmodel_baum_welch_context_smo_set
__swig_getmethods__["smo"] = _ghmmwrapper.ghmm_cmodel_baum_welch_context_smo_get
if _newclass:smo = _swig_property(_ghmmwrapper.ghmm_cmodel_baum_welch_context_smo_get, _ghmmwrapper.ghmm_cmodel_baum_welch_context_smo_set)
__swig_setmethods__["sqd"] = _ghmmwrapper.ghmm_cmodel_baum_welch_context_sqd_set
__swig_getmethods__["sqd"] = _ghmmwrapper.ghmm_cmodel_baum_welch_context_sqd_get
if _newclass:sqd = _swig_property(_ghmmwrapper.ghmm_cmodel_baum_welch_context_sqd_get, _ghmmwrapper.ghmm_cmodel_baum_welch_context_sqd_set)
__swig_setmethods__["logp"] = _ghmmwrapper.ghmm_cmodel_baum_welch_context_logp_set
__swig_getmethods__["logp"] = _ghmmwrapper.ghmm_cmodel_baum_welch_context_logp_get
if _newclass:logp = _swig_property(_ghmmwrapper.ghmm_cmodel_baum_welch_context_logp_get, _ghmmwrapper.ghmm_cmodel_baum_welch_context_logp_set)
__swig_setmethods__["eps"] = _ghmmwrapper.ghmm_cmodel_baum_welch_context_eps_set
__swig_getmethods__["eps"] = _ghmmwrapper.ghmm_cmodel_baum_welch_context_eps_get
if _newclass:eps = _swig_property(_ghmmwrapper.ghmm_cmodel_baum_welch_context_eps_get, _ghmmwrapper.ghmm_cmodel_baum_welch_context_eps_set)
__swig_setmethods__["max_iter"] = _ghmmwrapper.ghmm_cmodel_baum_welch_context_max_iter_set
__swig_getmethods__["max_iter"] = _ghmmwrapper.ghmm_cmodel_baum_welch_context_max_iter_get
if _newclass:max_iter = _swig_property(_ghmmwrapper.ghmm_cmodel_baum_welch_context_max_iter_get, _ghmmwrapper.ghmm_cmodel_baum_welch_context_max_iter_set)
def __init__(self, *args):
this = _ghmmwrapper.new_ghmm_cmodel_baum_welch_context(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ghmmwrapper.delete_ghmm_cmodel_baum_welch_context
__del__ = lambda self : None;
ghmm_cmodel_baum_welch_context_swigregister = _ghmmwrapper.ghmm_cmodel_baum_welch_context_swigregister
ghmm_cmodel_baum_welch_context_swigregister(ghmm_cmodel_baum_welch_context)
class ghmm_cmodel(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, ghmm_cmodel, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, ghmm_cmodel, name)
__repr__ = _swig_repr
__swig_setmethods__["N"] = _ghmmwrapper.ghmm_cmodel_N_set
__swig_getmethods__["N"] = _ghmmwrapper.ghmm_cmodel_N_get
if _newclass:N = _swig_property(_ghmmwrapper.ghmm_cmodel_N_get, _ghmmwrapper.ghmm_cmodel_N_set)
__swig_setmethods__["M"] = _ghmmwrapper.ghmm_cmodel_M_set
__swig_getmethods__["M"] = _ghmmwrapper.ghmm_cmodel_M_get
if _newclass:M = _swig_property(_ghmmwrapper.ghmm_cmodel_M_get, _ghmmwrapper.ghmm_cmodel_M_set)
__swig_setmethods__["dim"] = _ghmmwrapper.ghmm_cmodel_dim_set
__swig_getmethods__["dim"] = _ghmmwrapper.ghmm_cmodel_dim_get
if _newclass:dim = _swig_property(_ghmmwrapper.ghmm_cmodel_dim_get, _ghmmwrapper.ghmm_cmodel_dim_set)
__swig_setmethods__["cos"] = _ghmmwrapper.ghmm_cmodel_cos_set
__swig_getmethods__["cos"] = _ghmmwrapper.ghmm_cmodel_cos_get
if _newclass:cos = _swig_property(_ghmmwrapper.ghmm_cmodel_cos_get, _ghmmwrapper.ghmm_cmodel_cos_set)
__swig_setmethods__["prior"] = _ghmmwrapper.ghmm_cmodel_prior_set
__swig_getmethods__["prior"] = _ghmmwrapper.ghmm_cmodel_prior_get
if _newclass:prior = _swig_property(_ghmmwrapper.ghmm_cmodel_prior_get, _ghmmwrapper.ghmm_cmodel_prior_set)
__swig_setmethods__["name"] = _ghmmwrapper.ghmm_cmodel_name_set
__swig_getmethods__["name"] = _ghmmwrapper.ghmm_cmodel_name_get
if _newclass:name = _swig_property(_ghmmwrapper.ghmm_cmodel_name_get, _ghmmwrapper.ghmm_cmodel_name_set)
__swig_setmethods__["model_type"] = _ghmmwrapper.ghmm_cmodel_model_type_set
__swig_getmethods__["model_type"] = _ghmmwrapper.ghmm_cmodel_model_type_get
if _newclass:model_type = _swig_property(_ghmmwrapper.ghmm_cmodel_model_type_get, _ghmmwrapper.ghmm_cmodel_model_type_set)
__swig_setmethods__["s"] = _ghmmwrapper.ghmm_cmodel_s_set
__swig_getmethods__["s"] = _ghmmwrapper.ghmm_cmodel_s_get
if _newclass:s = _swig_property(_ghmmwrapper.ghmm_cmodel_s_get, _ghmmwrapper.ghmm_cmodel_s_set)
__swig_setmethods__["class_change"] = _ghmmwrapper.ghmm_cmodel_class_change_set
__swig_getmethods__["class_change"] = _ghmmwrapper.ghmm_cmodel_class_change_get
if _newclass:class_change = _swig_property(_ghmmwrapper.ghmm_cmodel_class_change_get, _ghmmwrapper.ghmm_cmodel_class_change_set)
def __init__(self, *args):
this = _ghmmwrapper.new_ghmm_cmodel(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ghmmwrapper.delete_ghmm_cmodel
__del__ = lambda self : None;
def write_xml(self, *args): return _ghmmwrapper.ghmm_cmodel_write_xml(self, *args)
def forward(self, *args): return _ghmmwrapper.ghmm_cmodel_forward(self, *args)
def backward(self, *args): return _ghmmwrapper.ghmm_cmodel_backward(self, *args)
def logp(self, *args): return _ghmmwrapper.ghmm_cmodel_logp(self, *args)
def logp_joint(self, *args): return _ghmmwrapper.ghmm_cmodel_logp_joint(self, *args)
def class_change_alloc(self, *args): return _ghmmwrapper.ghmm_cmodel_class_change_alloc(self, *args)
def get_random_var(self, *args): return _ghmmwrapper.ghmm_cmodel_get_random_var(self, *args)
def generate_sequences(self, *args): return _ghmmwrapper.ghmm_cmodel_generate_sequences(self, *args)
def likelihood(self, *args): return _ghmmwrapper.ghmm_cmodel_likelihood(self, *args)
def individual_likelihoods(self, *args): return _ghmmwrapper.ghmm_cmodel_individual_likelihoods(self, *args)
def prob_distance(self, *args): return _ghmmwrapper.ghmm_cmodel_prob_distance(self, *args)
def get_interval_B(self, *args): return _ghmmwrapper.ghmm_cmodel_get_interval_B(self, *args)
def normalize(self): return _ghmmwrapper.ghmm_cmodel_normalize(self)
def get_transition(self, *args): return _ghmmwrapper.ghmm_cmodel_get_transition(self, *args)
def check_transition(self, *args): return _ghmmwrapper.ghmm_cmodel_check_transition(self, *args)
def set_transition(self, *args): return _ghmmwrapper.ghmm_cmodel_set_transition(self, *args)
def viterbi(self, *args): return _ghmmwrapper.ghmm_cmodel_viterbi(self, *args)
def | |
% (656 - 334)) + muddyShoeRect.x
mudSplatRect2.y = ((height * prn2) % (590 - 317)) + muddyShoeRect.y
mudArray.append(mudSplatRect2)
elif scoreMultiplier > 2:
prn1 = randomNumber()
mudSplatRect1 = mudSplat.get_rect()
mudSplatRect1.x = ((width * prn1) % (656 - 334)) + muddyShoeRect.x
mudSplatRect1.y = ((height * prn1) % (590 - 317)) + muddyShoeRect.y
mudArray.append(mudSplatRect1)
myGen.setSeed(prn1)
prn2 = (myGen.next_prn() % 100) + 1
mudSplatRect2 = mudSplat.get_rect()
mudSplatRect2.x = ((width * prn2) % (656 - 334)) + muddyShoeRect.x
mudSplatRect2.y = ((height * prn2) % (590 - 317)) + muddyShoeRect.y
mudArray.append(mudSplatRect2)
myGen.setSeed(prn2)
prn3 = (myGen.next_prn() % 100) + 1
mudSplatRect3 = mudSplat.get_rect()
mudSplatRect3.x = ((width * prn3) % (656 - 334)) + muddyShoeRect.x
mudSplatRect3.y = ((height * prn3) % (590 - 317)) + muddyShoeRect.y
mudArray.append(mudSplatRect3)
start = time.time()
while 1:
# if exit button is pressed, close program
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
#check for pause
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
state = PAUSED
pauseScreen()
# if mudSplat is clicked, remove from array
if event.type == pygame.MOUSEBUTTONDOWN and napkinButtonPressed == True:
x, y = event.pos
for item in mudArray:
if item.collidepoint(x, y):
click = pygame.mouse.get_pressed()
mudArray.remove(item)
hpoints += 1
#happyMeter()
# if score is less than or equal to 0, game over!
if score < 0:
print("You Lose!")
gameOver()
# load multiplyer button
if score >= upgradeCost:
multiplier = pygame.image.load("x2Multiplier1.png")
else :
multiplier = pygame.image.load("x2Multiplier1Disabled.png")
# create multiplyer rectangle
multRect = multiplier.get_rect()
# set x,y position of multiplier image on screen
multRect.x = width * 0.83
multRect.y = height * 0.01
# draw images onto screen
screen.blit(background, backgroundRect)
screen.blit(muddyShoe, muddyShoeRect)
screen.blit(multiplier, multRect)
screen.blit(napkinButton, napkinButtonRect)
screen.blit(deodorButton, deodorButtonRect)
screen.blit(mood, moodRect) #new
# draw all mudSplats on screen
for item in mudArray:
screen.blit(mudSplat, item)
# render text on screen
scoretext = myfont.render("Score {0}".format(score),1,(0,0,0))
upgradetext = myfont.render("Upgrade Cost: {0}".format(upgradeCost),1,(0,0,0))
multipliertext = myfont.render("Multiplier: x{0}".format(scoreMultiplier),1,(0,0,0))
# draw text onto screen
screen.blit(scoretext, (5,10))
screen.blit(upgradetext, (5,30))
screen.blit(multipliertext, (5,50))
# object for getting mouse position and click value
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
# this is so we know to render the cursor as the napkin image if pressed
if napkinButtonPressed == True:
# making the cursor invisible
pygame.mouse.set_visible( False )
# place the napkin image over the cursor
napkinCursorRect.center = pygame.mouse.get_pos()
# display the napkin image over the cursor
screen.blit(napkinCursor, napkinCursorRect)
# activate multiplier if button is pressed
if (multRect.x + 130 > mouse[0] > multRect.x and multRect.y + 130 > mouse[1] > multRect.y) and score >= upgradeCost:
if click[0] == 1:
score -= upgradeCost
scoreMultiplier *= 2
upgradeCost *= scoreMultiplier
clickValue *= scoreMultiplier
# reset left click[0] to 0
click = pygame.mouse.get_pressed()
# activate napkin if button is pressed
if (napkinButtonRect.x + 130 > mouse[0] > napkinButtonRect.x and napkinButtonRect.y + 130 > mouse[1] > napkinButtonRect.y):
if click[0] == 1:
napkinButtonPressed = True
# reset left click[0] to 0
click = pygame.mouse.get_pressed()
# if mudArray is empty, return to gameLoop()
if len(mudArray) == 0:
pygame.mouse.set_visible(True)
end = time.time()
lapse = end - start
print("end - start: ", end - start)
if lapse >= 1 and lapse < 1.5:
hpoints += 1
happyMeter()
if lapse >= 1.5 and lapse < 2:
hpoints -= 1
happyMeter()
if lapse >= 2:
hpoints -= 2
happyMeter()
return
# decrement score (medium decrease)
score -= int(((20 * scoreMultiplier) / 5))
# update the screen
pygame.display.update()
clock.tick(10)
# ===================== stinky() =============================
# stinky randomly generates fumes onto the shoe
# in this event, points decrease at an extremely fast rate
# and the amount of decrease scales with the x2 multiplier
# it is called during the gameLoop() and calls the PRNG
# for numbers
# ==============================================================
def stinky():
# reference global vars
global score
global scoreMultiplier
global upgradeCost
global clickValue
global hpoints
# list for storing stinky rects
stinkArray = []
# load background image
background = pygame.image.load("stinkyBG.jpg")
# create background rectangle
backgroundRect = background.get_rect()
# load deodorant Cursor Image
deodorCursor = pygame.image.load("spraycan.png")
# deodorant Cursor Rectangle
deodorCursorRect = deodorCursor.get_rect()
# set napkin cursor variable to not clicked yet
deodorButtonPressed = False
# load up stinky fumes
# stinkSplatRects will be instantiated later
stinkSplat = pygame.image.load("fume1.png")
stinkSplat2 = pygame.image.load("fume2.png")
# load stinky shoe image
stinkyShoe = pygame.image.load("sneaker.png")
# stinkyShoe rectangle
stinkyShoeRect = stinkyShoe.get_rect()
# load mood image
global mood
#create mood rectangle
moodRect = mood.get_rect() #new
# load napkin button
napkinButton = pygame.image.load("napkinButtonDisabled.png")
# create napkin rectangle
napkinButtonRect = napkinButton.get_rect()
# load deodorant button
deodorButton = pygame.image.load("spraycanButton.png")
# create napkin rectangle
deodorButtonRect = deodorButton.get_rect()
myfont = pygame.font.SysFont("monospace", 16)
# set x,y position of shoe image on screen
stinkyShoeRect.x = width * 0.07
stinkyShoeRect.y = height * 0.15
# set x,y position of napkin image on screen
napkinButtonRect.x = width * 0.83
napkinButtonRect.y = height * 0.25
# set x,y position of deodorant image on screen
deodorButtonRect.x = width * 0.83
deodorButtonRect.y = height * 0.49
# set x,y position of mood image on screen
moodRect.x = width * .33 #new
moodRect.y = height * .01 #new
# place a number of fumes on shoe based on multiplier
if scoreMultiplier == 1:
stinkSplatRect = stinkSplat.get_rect()
randNum = randomNumber()
stinkSplatRect.x = ((width * randNum) % (656 - 334)) + stinkyShoeRect.x
stinkSplatRect.y = ((height * randNum) % (590 - 317)) + stinkyShoeRect.y
stinkArray.append(stinkSplatRect)
elif scoreMultiplier == 2:
prn1 = randomNumber()
stinkSplatRect1 = stinkSplat2.get_rect()
stinkSplatRect1.x = ((width * prn1) % (656 - 334)) + stinkyShoeRect.x
stinkSplatRect1.y = ((height * prn1) % (590 - 317)) + stinkyShoeRect.y
stinkArray.append(stinkSplatRect1)
myGen.setSeed(prn1)
prn2 = (myGen.next_prn() % 100) + 1
stinkSplatRect2 = stinkSplat.get_rect()
stinkSplatRect2.x = ((width * prn2) % (656 - 334)) + stinkyShoeRect.x
stinkSplatRect2.y = ((height * prn2) % (590 - 317)) + stinkyShoeRect.y
stinkArray.append(stinkSplatRect2)
elif scoreMultiplier > 2:
prn1 = randomNumber()
stinkSplatRect1 = stinkSplat2.get_rect()
stinkSplatRect1.x = ((width * prn1) % (656 - 334)) + stinkyShoeRect.x
stinkSplatRect1.y = ((height * prn1) % (590 - 317)) + stinkyShoeRect.y
stinkArray.append(stinkSplatRect1)
myGen.setSeed(prn1)
prn2 = (myGen.next_prn() % 100) + 1
stinkSplatRect2 = stinkSplat2.get_rect()
stinkSplatRect2.x = ((width * prn2) % (656 - 334)) + stinkyShoeRect.x
stinkSplatRect2.y = ((height * prn2) % (590 - 317)) + stinkyShoeRect.y
stinkArray.append(stinkSplatRect2)
myGen.setSeed(prn2)
prn3 = (myGen.next_prn() % 100) + 1
stinkSplatRect3 = stinkSplat.get_rect()
stinkSplatRect3.x = ((width * prn3) % (656 - 334)) + stinkyShoeRect.x
stinkSplatRect3.y = ((height * prn3) % (590 - 317)) + stinkyShoeRect.y
stinkArray.append(stinkSplatRect3)
start = time.time()
while 1:
# if exit button is pressed, close program
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
#check for pause
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
state = PAUSED
pauseScreen()
# if stinkySplat is clicked, remove from array
if event.type == pygame.MOUSEBUTTONDOWN and deodorButtonPressed == True:
x, y = event.pos
for item in stinkArray:
if item.collidepoint(x, y):
click = pygame.mouse.get_pressed()
stinkArray.remove(item)
hpoints += 1
#happyMeter()
# if score is less than or equal to 0, game over!
if score < 0:
print("You Lose!")
gameOver()
# load multiplyer button
if score >= upgradeCost:
multiplier = pygame.image.load("x2Multiplier1.png")
else :
multiplier = pygame.image.load("x2Multiplier1Disabled.png")
# create multiplyer rectangle
multRect = multiplier.get_rect()
# set x,y position of multiplier image on screen
multRect.x = width * 0.83
multRect.y = height * 0.01
# draw images onto screen
screen.blit(background, backgroundRect)
screen.blit(stinkyShoe, stinkyShoeRect)
screen.blit(multiplier, multRect)
screen.blit(napkinButton, napkinButtonRect)
screen.blit(deodorButton, deodorButtonRect)
screen.blit(mood, moodRect) #new
# draw all stinkSplats on screen
for item in stinkArray:
screen.blit(stinkSplat, item)
# render text on screen
scoretext = myfont.render("Score {0}".format(score),1,(0,0,0))
upgradetext = myfont.render("Upgrade Cost: {0}".format(upgradeCost),1,(0,0,0))
multipliertext = myfont.render("Multiplier: x{0}".format(scoreMultiplier),1,(0,0,0))
# draw text onto screen
screen.blit(scoretext, (5,10))
screen.blit(upgradetext, (5,30))
screen.blit(multipliertext, (5,50))
# object for getting mouse position and click value
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
# rendor deodorant mouse cursor
if deodorButtonPressed == True:
# making the cursor invisible
pygame.mouse.set_visible( False )
# place the napkin image over the cursor
deodorCursorRect.center = pygame.mouse.get_pos()
# display the napkin image over the cursor
screen.blit(deodorCursor, deodorCursorRect)
# activate multiplier if button is pressed
if (multRect.x + 130 > mouse[0] > multRect.x and multRect.y + 130 > mouse[1] > multRect.y) and score >= upgradeCost:
if click[0] == 1:
score -= upgradeCost
scoreMultiplier *= 2
upgradeCost *= scoreMultiplier
clickValue *= scoreMultiplier
# reset left click[0] to 0
click = pygame.mouse.get_pressed()
# activate napkin if button is pressed
if (deodorButtonRect.x + 130 > mouse[0] > deodorButtonRect.x and deodorButtonRect.y + 130 > mouse[1] > deodorButtonRect.y):
if click[0] == 1:
deodorButtonPressed = True
# reset left click[0] to 0
click = pygame.mouse.get_pressed()
# if stinkyArray is empty, return to gameLoop()
if len(stinkArray) == 0:
pygame.mouse.set_visible(True)
end = time.time()
lapse = end - start
print("end - start: ", end - start)
if lapse >= 1 and lapse < 1.5:
hpoints += 1
happyMeter()
if lapse >= 1.5 and lapse < 2:
hpoints -= 1
happyMeter()
if lapse >= 2:
hpoints -= 2
happyMeter()
return
# decrement score (extreme decrease)
score -= int(((50 * scoreMultiplier) / 5))
# update the screen
pygame.display.update()
clock.tick(10)
# ===================== randomNumber() =============================
# returns a random number. This is called by the events and the
# game loop.
# ==================================================================
def randomNumber():
# use current time as seed
seed = int(time.time() % 60) + 1
# seed the PRNG instance
myGen.setSeed(seed)
# generate the pseduo random number in range 1 to 100
prn = (myGen.next_prn() % 100) + 1
return prn
# ======================== getChance() =========================
# getChance triggers the events based on a random number.
# This number is multiplied by chance, based on the happy meter.
# ===============================================================
def getChance():
# if score is a multiple of 50 (50 was picked arbitrarily),
# trigger random number generation
if score % 50 == 0:
randNum = randomNumber()
print("Random number: ", randNum)
# if random number is <= 50, trigger mudslide event
if(randNum*chance <= 50):
mudslide()
#if random number is >= 70, trigger stinky event
if(randNum*chance >= 70):
stinky()
# ======================== happyMeter() =========================
# happyMeter changes the image of the the faces based on the
# points. Chance is updated. Is called | |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for healthcare.deploy.templates.data_project.
These tests check that the template is free from syntax errors and generates
the expected resources.
To run tests, run `python -m unittest tests.data_project_test` from the
templates directory.
"""
import unittest
from templates import data_project
class TestDataProject(unittest.TestCase):
def test_expansion_local_logging(self):
class FakeContext(object):
env = {
'deployment': 'my-deployment',
'project': 'my-project',
}
properties = {
'has_organization': True,
'remove_owner_user': '<EMAIL>',
'owners_group': '<EMAIL>',
'auditors_group': '<EMAIL>',
'data_readwrite_groups': [
'<EMAIL>',
'<EMAIL>',
],
'data_readonly_groups': [
'<EMAIL>',
'<EMAIL>',
],
'local_audit_logs': {
'logs_gcs_bucket': {
'location': 'US',
'storage_class': 'MULTI_REGIONAL',
'ttl_days': 365,
},
'logs_bigquery_dataset': {
'location': 'US',
},
},
'bigquery_datasets': [
{
'name': 'us_data',
'location': 'US',
},
{
'name': 'euro_data',
'location': 'EU',
},
],
'data_buckets': [
{
'name_suffix': '-nlp-bucket',
'location': 'US-CENTRAL1',
'storage_class': 'REGIONAL',
'expected_users': [
'<EMAIL>',
'<EMAIL>',
],
},
{
'name_suffix': '-other-bucket',
'location': 'US-EAST1',
'storage_class': 'REGIONAL',
},
{
'name_suffix': '-euro-bucket',
'location': 'EUROPE-WEST1',
'storage_class': 'REGIONAL',
'expected_users': ['<EMAIL>'],
},
],
'pubsub': {
'topic': 'test-topic',
'subscription': 'test-subscription',
'publisher_account': (
'<EMAIL>'),
'ack_deadline_sec': 100
},
'enabled_apis': [
'cloudbuild.googleapis.com',
'cloudresourcemanager.googleapis.com', # Ignored by script.
'containerregistry.googleapis.com',
'deploymentmanager.googleapis.com', # Ignored by script.
]
}
generated = data_project.generate_config(FakeContext())
expected = {
'resources': [{
'name': 'set-project-bindings-get-iam-policy',
'action': ('gcp-types/cloudresourcemanager-v1:'
'cloudresourcemanager.projects.getIamPolicy'),
'properties': {'resource': 'my-project'},
'metadata': {'runtimePolicy': ['UPDATE_ALWAYS']},
}, {
'name': 'set-project-bindings-patch-iam-policy',
'action': ('gcp-types/cloudresourcemanager-v1:'
'cloudresourcemanager.projects.setIamPolicy'),
'properties': {
'resource': 'my-project',
'policy': '$(ref.set-project-bindings-get-iam-policy)',
'gcpIamPolicyPatch': {
'add': [
{
'role': 'roles/iam.securityReviewer',
'members': [
'group:<EMAIL>'
],
}, {
'role': 'roles/owner',
'members': [
'group:<EMAIL>'
],
}
],
'remove': [{
'role': 'roles/owner',
'members': ['user:<EMAIL>'],
}],
},
},
}, {
'name': 'my-project-logs',
'type': 'storage.v1.bucket',
'accessControl': {
'gcpIamPolicy': {
'bindings': [{
'role': 'roles/storage.admin',
'members': [
'group:<EMAIL>'
]
}, {
'role': 'roles/storage.objectCreator',
'members': ['group:<EMAIL>']
}]
}
},
'properties': {
'location': 'US',
'storageClass': 'MULTI_REGIONAL',
'lifecycle': {
'rule': [{
'action': {'type': 'Delete'},
'condition': {
'isLive': True,
'age': 365
}
}]
}
}
}, {
'name': 'audit-logs-to-bigquery',
'type': 'logging.v2.sink',
'properties': {
'sink': 'audit-logs-to-bigquery',
'uniqueWriterIdentity': True,
'destination': ('bigquery.googleapis.com/projects/my-project/'
'datasets/audit_logs'),
'filter': 'logName:"logs/cloudaudit.googleapis.com"',
}
}, {
'name': 'create-big-query-dataset-us_data',
'type': 'bigquery.v2.dataset',
'properties': {
'datasetReference': {'datasetId': 'us_data'},
'location': 'US',
},
}, {
'name': 'update-big-query-dataset-us_data',
'action': 'gcp-types/bigquery-v2:bigquery.datasets.patch',
'properties': {
'projectId': 'my-project',
'datasetId': 'us_data',
'access': [{
'role': 'OWNER',
'groupByEmail': '<EMAIL>',
}, {
'role': 'READER',
'groupByEmail': '<EMAIL>',
}, {
'role': 'READER',
'groupByEmail': '<EMAIL>',
}, {
'role': 'WRITER',
'groupByEmail': '<EMAIL>',
}, {
'role': 'WRITER',
'groupByEmail': '<EMAIL>',
}],
},
'metadata': {'dependsOn': ['create-big-query-dataset-us_data']},
}, {
'name': 'create-big-query-dataset-euro_data',
'type': 'bigquery.v2.dataset',
'properties': {
'datasetReference': {'datasetId': 'euro_data'},
'location': 'EU',
},
}, {
'name': 'update-big-query-dataset-euro_data',
'action': 'gcp-types/bigquery-v2:bigquery.datasets.patch',
'properties': {
'projectId': 'my-project',
'datasetId': 'euro_data',
'access': [{
'role': 'OWNER',
'groupByEmail': '<EMAIL>',
}, {
'role': 'READER',
'groupByEmail': '<EMAIL>',
}, {
'role': 'READER',
'groupByEmail': '<EMAIL>',
}, {
'role': 'WRITER',
'groupByEmail': '<EMAIL>',
}, {
'role': 'WRITER',
'groupByEmail': '<EMAIL>',
}],
},
'metadata': {'dependsOn': ['create-big-query-dataset-euro_data']},
}, {
'name': 'my-project-nlp-bucket',
'type': 'storage.v1.bucket',
'metadata': {'dependsOn': ['my-project-logs']},
'accessControl': {
'gcpIamPolicy': {
'bindings': [{
'role': 'roles/storage.admin',
'members': ['group:<EMAIL>']
}, {
'role': 'roles/storage.objectAdmin',
'members': [
'group:<EMAIL>',
'group:<EMAIL>',
]
}, {
'role': 'roles/storage.objectViewer',
'members': [
'group:<EMAIL>',
'group:<EMAIL>',
]
}]
}
},
'properties': {
'location': 'US-CENTRAL1',
'versioning': {'enabled': True},
'storageClass': 'REGIONAL',
'logging': {
'logBucket': 'my-project-logs'
}
}
}, {
'name': 'unexpected-access-my-project-nlp-bucket',
'type': 'logging.v2.metric',
'properties': {
'filter': ('resource.type=gcs_bucket AND '
'logName=projects/my-project/logs/'
'cloudaudit.googleapis.com%2Fdata_access AND '
'protoPayload.resourceName=projects/_/buckets/'
'my-project-nlp-bucket AND '
'protoPayload.authenticationInfo.principalEmail!=('
'<EMAIL> AND '
'auth_<EMAIL>)'),
'description':
'Count of unexpected data access to my-project-nlp-bucket.',
'labelExtractors': {
'user': ('EXTRACT('
'protoPayload.authenticationInfo.principalEmail)'),
},
'metricDescriptor': {
'labels': [{
'description': 'Unexpected user',
'key': 'user',
'valueType': 'STRING'
}],
'unit': '1',
'metricKind': 'DELTA',
'valueType': 'INT64'
},
'metric': 'unexpected-access-my-project-nlp-bucket'
}
}, {
'name': 'my-project-other-bucket',
'type': 'storage.v1.bucket',
'metadata': {'dependsOn': ['my-project-logs']},
'accessControl': {
'gcpIamPolicy': {
'bindings': [{
'role': 'roles/storage.admin',
'members': ['group:<EMAIL>']
}, {
'role': 'roles/storage.objectAdmin',
'members': [
'group:<EMAIL>',
'group:<EMAIL>',
]
}, {
'role': 'roles/storage.objectViewer',
'members': [
'group:<EMAIL>',
'group:<EMAIL>',
]
}]
}
},
'properties': {
'location': 'US-EAST1',
'versioning': {'enabled': True},
'storageClass': 'REGIONAL',
'logging': {
'logBucket': 'my-project-logs'
}
}
}, {
'name': 'my-project-euro-bucket',
'type': 'storage.v1.bucket',
'metadata': {'dependsOn': ['my-project-logs']},
'accessControl': {
'gcpIamPolicy': {
'bindings': [{
'role': 'roles/storage.admin',
'members': ['group:<EMAIL>']
}, {
'role': 'roles/storage.objectAdmin',
'members': [
'group:<EMAIL>',
'group:<EMAIL>',
]
}, {
'role': 'roles/storage.objectViewer',
'members': [
'group:<EMAIL>',
'group:<EMAIL>',
]
}]
}
},
'properties': {
'location': 'EUROPE-WEST1',
'versioning': {'enabled': True},
'storageClass': 'REGIONAL',
'logging': {
'logBucket': 'my-project-logs'
}
}
}, {
'name': 'unexpected-access-my-project-euro-bucket',
'type': 'logging.v2.metric',
'properties': {
'filter': ('resource.type=gcs_bucket AND '
'logName=projects/my-project/logs/'
'cloudaudit.googleapis.com%2Fdata_access AND '
'protoPayload.resourceName=projects/_/buckets/'
'my-project-euro-bucket AND '
'protoPayload.authenticationInfo.principalEmail!=('
'<EMAIL>)'),
'description': ('Count of unexpected data access to '
'my-project-euro-bucket.'),
'labelExtractors': {
'user': ('EXTRACT('
'protoPayload.authenticationInfo.principalEmail)'),
},
'metricDescriptor': {
'labels': [{
'description': 'Unexpected user',
'key': 'user',
'valueType': 'STRING'
}],
'unit': '1',
'metricKind': 'DELTA',
'valueType': 'INT64'
},
'metric': 'unexpected-access-my-project-euro-bucket'
}
}, {
'name': 'test-topic',
'type': 'pubsub.v1.topic',
'properties': {
'topic': 'test-topic',
},
'accessControl': {
'gcpIamPolicy': {
'bindings': [
{
'role': 'roles/pubsub.publisher',
'members': [
('serviceAccount:cloud-healthcare-eng'
'@system.gserviceaccount.com')
],
},
],
},
},
}, {
'name': 'test-subscription',
'type': 'pubsub.v1.subscription',
'properties': {
'subscription': 'test-subscription',
'topic': 'projects/my-project/topics/test-topic',
'ackDeadlineSeconds': 100,
},
'accessControl': {
'gcpIamPolicy': {
'bindings': [{
'role':
'roles/pubsub.editor',
'members': [
'group:<EMAIL>',
('group:another-readwrite-<EMAIL>@googlegroups.'
'com'),
],
},],
},
},
'metadata': {
'dependsOn': ['test-topic'],
},
}, {
'name': 'iam-policy-change-count',
'type': 'logging.v2.metric',
'properties': {
'filter': ('\n'
' resource.type=project AND\n'
' protoPayload.serviceName='
'cloudresourcemanager.googleapis.com AND\n'
' protoPayload.methodName=SetIamPolicy'),
'description': 'Count of IAM policy changes.',
'labelExtractors': {
'user': ('EXTRACT('
'protoPayload.authenticationInfo.principalEmail)'),
},
'metricDescriptor': {
'labels': [{
'description': 'Unexpected user',
'key': 'user',
'valueType': 'STRING'
}],
'unit': '1',
'metricKind': 'DELTA',
'valueType': 'INT64'
},
'metric': 'iam-policy-change-count'
}
}, {
'name': 'bucket-permission-change-count',
'type': 'logging.v2.metric',
'properties': {
'filter': (
'\n'
' resource.type=gcs_bucket AND\n'
' protoPayload.serviceName=storage.googleapis.com '
'AND\n'
' (protoPayload.methodName=storage.setIamPermissions '
'OR\n'
' protoPayload.methodName=storage.objects.update)'),
'description':
'Count of GCS permissions changes.',
'labelExtractors': {
'user': ('EXTRACT('
'protoPayload.authenticationInfo.principalEmail)'),
},
'metricDescriptor': {
'labels': [{
'description': 'Unexpected user',
'key': 'user',
'valueType': 'STRING'
}],
'unit':
'1',
'metricKind':
'DELTA',
'valueType':
'INT64'
},
'metric':
'bucket-permission-change-count'
}
}, {
'name': 'audit-configs-get-iam-etag',
'action': ('gcp-types/cloudresourcemanager-v1:'
'cloudresourcemanager.projects.getIamPolicy'),
'properties': {
'resource': 'my-project',
},
'metadata': {
'runtimePolicy': ['UPDATE_ALWAYS'],
'dependsOn': ['set-project-bindings-patch-iam-policy'],
},
}, {
'name': 'audit-configs-patch-iam-policy',
'action': ('gcp-types/cloudresourcemanager-v1:'
'cloudresourcemanager.projects.setIamPolicy'),
'properties': {
'updateMask': 'auditConfigs,etag',
'resource': 'my-project',
'policy': {
'auditConfigs': [{
'auditLogConfigs': [
{'logType': 'ADMIN_READ'},
{'logType': 'DATA_WRITE'},
{'logType': 'DATA_READ'},
],
'service': 'allServices',
}],
'etag': '$(ref.audit-configs-get-iam-etag.etag)',
},
},
'metadata': {
'dependsOn': ['audit-configs-get-iam-etag'],
},
}, {
'name': 'enable-cloudbuild',
'action': ('gcp-types/servicemanagement-v1:'
'servicemanagement.services.enable'),
'properties': {
'consumerId': 'project:my-project',
'serviceName': 'cloudbuild.googleapis.com'
},
'metadata': {
'dependsOn': ['audit-configs-patch-iam-policy'],
},
}, {
'name': 'enable-containerregistry',
'action': ('gcp-types/servicemanagement-v1:'
'servicemanagement.services.enable'),
'properties': {
'consumerId': 'project:my-project',
'serviceName': 'containerregistry.googleapis.com'
},
'metadata': {
'dependsOn': ['audit-configs-patch-iam-policy'],
},
}]
}
self.assertEqual(generated, expected)
def testExpansionRemoteLoggingNoOrg(self):
class FakeContext(object):
env = {
'deployment': 'my-deployment',
'project': 'my-project',
}
properties = {
'has_organization': False,
'owners_group': '<EMAIL>',
'editors_group': '<EMAIL>',
'auditors_group': '<EMAIL>',
'additional_project_permissions': [
{
'roles': ['roles/editor',],
'members': ['serviceAccount:<EMAIL>',
'serviceAccount:<EMAIL>']
}, {
'roles': ['roles/bigquery.dataViewer',
'roles/storage.objectViewer'],
'members': ['group:<EMAIL>',
'user:<EMAIL>']
},
],
'data_readwrite_groups': ['<EMAIL>'],
'data_readonly_groups': ['<EMAIL>'],
'remote_audit_logs': {
'audit_logs_project_id': 'my-audit-logs',
'logs_gcs_bucket_name': 'some_remote_bucket',
'logs_bigquery_dataset_id': 'some_remote_dataset',
},
'bigquery_datasets': [
{
'name': 'us_data',
'location': 'US',
},
],
'data_buckets': [
{
'name_suffix': '-data',
'location': 'US',
'storage_class': 'MULTI_REGIONAL',
},
],
}
generated = data_project.generate_config(FakeContext())
expected = {
'resources': [{
'name': 'set-project-bindings-get-iam-policy',
'action': ('gcp-types/cloudresourcemanager-v1:'
'cloudresourcemanager.projects.getIamPolicy'),
'properties': {'resource': 'my-project'},
'metadata': {'runtimePolicy': ['UPDATE_ALWAYS']},
}, {
'name': 'set-project-bindings-patch-iam-policy',
'action': ('gcp-types/cloudresourcemanager-v1:'
'cloudresourcemanager.projects.setIamPolicy'),
'properties': {
'resource': 'my-project',
'policy': '$(ref.set-project-bindings-get-iam-policy)',
'gcpIamPolicyPatch': {
'add': [
{
'role': 'roles/bigquery.dataViewer',
'members': [
'group:<EMAIL>',
'user:<EMAIL>',
],
}, {
'role': 'roles/editor',
'members': [
'group:<EMAIL>',
'serviceAccount:<EMAIL>',
'serviceAccount:<EMAIL>',
],
}, {
'role': 'roles/iam.securityReviewer',
'members': [
'group:<EMAIL>',
],
}, {
'role': 'roles/resourcemanager.projectIamAdmin',
'members': [
'group:<EMAIL>',
],
}, {
'role': 'roles/storage.objectViewer',
'members': [
'group:<EMAIL>',
'user:<EMAIL>',
],
},
],
},
},
}, {
'name': 'audit-logs-to-bigquery',
'type': 'logging.v2.sink',
'properties': {
'sink': 'audit-logs-to-bigquery',
'uniqueWriterIdentity': True,
'destination': ('bigquery.googleapis.com/projects/'
'my-audit-logs/datasets/some_remote_dataset'),
'filter': 'logName:"logs/cloudaudit.googleapis.com"',
}
}, {
'name': 'create-big-query-dataset-us_data',
'type': 'bigquery.v2.dataset',
'properties': {
'datasetReference': {'datasetId': 'us_data'},
'location': 'US',
},
}, {
'name': 'update-big-query-dataset-us_data',
'action': 'gcp-types/bigquery-v2:bigquery.datasets.patch',
'properties': {
'projectId': 'my-project',
'datasetId': 'us_data',
'access': [{
'role': 'OWNER',
'groupByEmail': '<EMAIL>',
}, {
'role': 'READER',
'groupByEmail': '<EMAIL>',
}, {
'role': 'WRITER',
'groupByEmail': '<EMAIL>',
}],
},
'metadata': {'dependsOn': ['create-big-query-dataset-us_data']},
}, | |
import sys
import os
import time
import errno
import signal
import select
import traceback
from multiprocessing.forking import Popen
JOIN_RESTART_POLICY = 0
TERMINATE_RESTART_POLICY = 1
def get_current_process():
class CurrentProcess(Process):
def __init__(self, *args, **kwargs):
self._child = None
self._parent = None
self._parent_pid = None
self.name = 'MainProcess {1}'.format(self.__class__.__name__, os.getpid())
self.daemonic = False
@property
def pid(self):
return os.getpid()
return CurrentProcess()
class ProcessOpen(object):
"""ProcessOpen forks the current process and runs a Process object
create() method in the child process.
If the child process fork fails, the Process object cleanup routine method
is called to make sure the object _child attribute is set to None.
The ProcessOpen objects are not reusable and are only meant to be used as a
one shot process execution tracker.
:param process: Process whom create method should be called in the child process
:type process: pkit.process.Process
"""
READY_FLAG = "READY"
def __init__(self, process, wait=False, wait_timeout=1):
sys.stdout.flush()
sys.stderr.flush()
self.process = process
self.returncode = None
self.ready = None
read_pipe, write_pipe = os.pipe()
self.pid = os.fork()
if self.pid == 0:
signal.signal(signal.SIGTERM, self.on_sigterm)
# Once the child process has it's signal handler
# binded we warn the parent process through a pipe
if wait is True:
self._send_ready_flag(write_pipe, read_pipe)
returncode = self.process.create()
sys.stdout.flush()
sys.stderr.flush()
os._exit(returncode)
else:
if wait is True:
self.ready = self._poll_ready_flag(read_pipe, write_pipe, wait_timeout)
def _send_ready_flag(self, write_pipe, read_pipe=None):
"""Ran in the forked child process"""
if read_pipe is not None:
os.close(read_pipe)
write_pipe = os.fdopen(write_pipe, 'w', 128)
write_pipe.write(self.READY_FLAG)
write_pipe.close()
def _poll_ready_flag(self, read_pipe, write_pipe=None, timeout=0):
"""Polls the child process read-only pipe for incoming data"""
if write_pipe is not None:
os.close(write_pipe)
try:
read, _, _ = select.select([read_pipe], [], [], timeout)
except select.error as e:
if hasattr(e, 'errno') and e.errno == errno.EINTR:
return False # If select is interrupted, we don't care about ready flag
if len(read) > 0:
return True
return False
def poll(self, flag=os.WNOHANG):
if self.returncode is None:
while True:
try:
pid, sts = os.waitpid(self.pid, flag)
except os.error as e:
if e.errno == errno.EINTR:
continue
# Child process not yet created. See #1731717
# e.errno == errno.ECHILD == 10
return None
else:
break
if pid == self.pid:
if os.WIFSIGNALED(sts):
self.returncode = -os.WTERMSIG(sts)
else:
assert os.WIFEXITED(sts)
self.returncode = os.WEXITSTATUS(sts)
return self.returncode
def wait(self, timeout=None):
"""Polls the forked process for it's status.
It uses os.waitpid under the hood, and checks for the
forked process exit code status.
Poll method source code: http://hg.python.org/cpython/file/ab05e7dd2788/Lib/multiprocessing/forking.py
:param timeout: time interval to poll the forked process status
:type timeout: float
:returns: the forked process exit code status
:rtype: int
"""
if timeout is None:
return self.poll(0)
deadline = time.time() + timeout
delay = 0.0005
while 1:
returncode = self.poll()
if returncode is not None:
break
remaining = deadline - time.time()
if remaining <= 0:
break
delay = min(delay * 2, remaining, 0.05)
time.sleep(delay)
self.process.clean()
return returncode
# def wait(self, timeout=None):
# """Polls the forked process for it's status.
# It uses os.waitpid under the hood, and checks for the
# forked process exit code status.
# Poll method source code: http://hg.python.org/cpython/file/ab05e7dd2788/Lib/multiprocessing/forking.py
# :param timeout: time interval to poll the forked process status
# :type timeout: float
# :returns: the forked process exit code status
# :rtype: int
# """
# returncode = super(ProcessOpen, self).wait(timeout)
# self.process.clean()
# return returncode
def terminate(self):
"""Kills the running forked process using the SIGTERM signal
The method checks if the process is actually running, and
will therefore send it a SIGTERM signal, and wait for it to
exit before it returns.
The process object cleanup routine method is then called
to make sur the object _child attribute is set to None
"""
if self.returncode is None:
try:
os.kill(self.pid, signal.SIGTERM)
except OSError as e:
if self.wait(timeout=0.1) is None:
raise
self.returncode = 1
return self.returncode
def on_sigterm(self, signum, sigframe):
"""Subprocess sigterm signal handler"""
self.returncode = 1
os._exit(1)
class Process(object):
"""Process objects represent activity that is run in a child process
:param target: callable object to be invoked in the run method
:type target: callable
:param name: sets the process name
:type name: str
:param on_exit: callback to be invoked on process exit,
will be provided with current process as first
argument. Should be of the form: lambda process: ...
:type on_exit: callable
:param args: arguments to provide to the target
:type args: tuple
:param kwargs: keyword arguments to provide to the target
:type kwargs: dict
"""
def __init__(self, target=None, name=None,
parent=False, on_exit=None, args=(), kwargs={}):
self._current = get_current_process()
self._parent_pid = self._current.pid
self._child = None
self._parent = None
self._exitcode = None
self._on_exit = on_exit
self.name = name or self.__class__.__name__
self.daemonic = False
self.target = target
self.target_args = tuple(args)
self.target_kwargs = dict(kwargs)
# Bind signals handlers
signal.signal(signal.SIGCHLD, self.on_sigchld)
signal.siginterrupt(signal.SIGCHLD, False)
def __str__(self):
return '<{0} {1}>'.format(self.name, self.pid)
def __repr__(self):
return self.__str__()
def on_sigchld(self, signum, sigframe):
if self._child is not None and self._child.pid:
pid, status = os.waitpid(self._child.pid, os.WNOHANG)
if pid == self._child.pid:
self._exitcode = os.WEXITSTATUS(status)
if self._on_exit:
self._on_exit(self)
self.clean()
def create(self):
"""Method to be called when the process child is forked"""
# Global try/except designed to catch
# SystemExit and any uncaught exceptions
# while run() method execution.
try:
try:
sys.stdin.close()
sys.stdin = open(os.devnull)
except (OSError, ValueError):
pass
# Run the process target and cleanup
# the instance afterwards.
self._current = self
self.run()
returncode = 0
except SystemError as err:
if not err.args:
returncode = 1
elif isinstance(err.args[0], int):
returncode = err.args[0]
else:
sys.stderr.write(str(err.args[0]) + '\n')
sys.stderr.flush()
returncode = 0 if isinstance(err.args[0], str) else 1
except:
returncode = 1
sys.stderr.write('Process {} with pid {}:\n'.format(self.name, self.pid))
sys.stderr.flush()
traceback.print_exc()
return returncode
def clean(self):
"""Cleans up the object child process status"""
self._current = get_current_process()
if self._child is not None:
self._child = None
def run(self):
"""Runs the target with provided args and kwargs in a fork"""
if self.target:
self.target(*self.target_args, **self.target_kwargs)
def start(self, wait=False, wait_timeout=0):
"""Starts the Process"""
if os.getpid() != self._parent_pid:
raise RuntimeError(
"Can only start a process object created by current process"
)
if self._child is not None:
raise RuntimeError("Cannot start a process twice")
self._child = ProcessOpen(self, wait=wait, wait_timeout=wait_timeout)
child_pid = self._child.pid
self._current = self
return child_pid
def join(self, timeout=None):
"""Awaits on Process exit
:param timeout: Time to wait for the process exit
:type timeout: float
"""
if self._child is None:
raise RuntimeError("Can only join a started process")
try:
self._exitcode = self._child.wait(timeout)
except OSError:
pass
def terminate(self, wait=False):
"""Forces the process to stop
The method checks if the process is actually running, and
will therefore send it a SIGTERM signal, and wait for it to
exit before it returns.
"""
if self._child is None:
raise RuntimeError("Can only terminate a started process")
self._child.terminate()
if wait:
self.wait(until=lambda p, *args: p._child is None)
def restart(self, policy=JOIN_RESTART_POLICY):
if not policy in [JOIN_RESTART_POLICY, TERMINATE_RESTART_POLICY]:
raise ValueError("Invalid restart policy supplied")
if policy == JOIN_RESTART_POLICY:
self.join()
elif policy == TERMINTE_RESTART_POLICY:
pid_dump = self.pid
self.terminate()
os.waitpid(pid_dump, 0)
self.start()
def wait(self, until=None, args=(), timeout=None):
"""Wait until the provided predicate about the Process
object becomes true.
Default behavior (if until is not provided) is to call
wait on Process subprocess using the provided timeout.
The method can be useful in some specific case where you
would want to wait for specific process states before taking
any other actions.
Typically, it could be useful when you'd like to wait
for a sigterm to be taken in account by the process before
taking any other actions.
example:
p = Process(target=lambda: time.sleep(100))
p.start()
os.kill(p.pid, signal.SIGTERM)
p.wait()
:param until: Callable predicate to be evaluated against the
process. Takes a process obj and an args tuple
as input.
:type until: callable
:param args: Args to be supplied to the until predicate callable,
default value is an empty tuple.
:type args: tuple
:param timeout: timeout in seconds
:type timeout: float
"""
def default_until(self, *args):
if self._child is not None:
try:
self._child.wait(timeout)
except OSError:
pass
return True
if until is not None and not hasattr(until, '__call__'):
raise ValueError("Until parameter must be a callable")
timeout = timeout or 0.1
until = until or default_until
while until(self, *args) is False:
time.sleep(0.1)
| |
import re
from df_engine.core import Actor, Context
from langcodes import normalize_characters
from torch import norm, normal
from scenario.qcfg import g
from nltk import grammar, parse
import re
from lxml import etree
import urllib.request, gzip, io
import scenario.config as config
# Create the CFG grammar from a string saved in qcfg.py
gram = grammar.FeatureGrammar.fromstring(g)
# Get the expoplanet data and parse it into an XML tree
url = "https://github.com/OpenExoplanetCatalogue/oec_gzip/raw/master/systems.xml.gz"
oec = etree.parse(gzip.GzipFile(fileobj=io.BytesIO(urllib.request.urlopen(url).read())))
# Predefined response templates in German and English:
speech_acts = {'another_search':{'de':'Möchtest du eine neue Suche starten?','en':'Would you like to try another search?'},
'no_understand':{'de':'Ich habe die diese Anfrage nicht verstanden:','en':'I did not understand the query'},
'spelling':{'de':'Entschuldige, bitte achte auf korrekte Rechtschreibung. Drücke eine Taste, um es erneut zu versuchen.','en':'Sorry, please spell correctly. Press any key to try again.'},
'more_info':{'de':'Möchtest du mehr über einen dieser Planeten erfahren?','en':'Would you like more info on one of these planets?'},
'initiate':{'de':'Bitte gib deine Anfrage ein.','en':'Please enter your search query!'},
'follow_up':{'de':'OK, über welchen Planeten?','en':'Alright, for which planet?'},
'fail':{'de':'Oh, hier ist etwas schief gelaufen. Drücke eine Taste, um von Vorne zu beginnen.','en':'Oh, something went wrong. Press any key to start from the beginning.'},
'not_found':{'de':'Ich habe keine Planeten gefunden für die Anfrage ','en':'I did not find any planet for the query '}}
def find_planets(query,gram):
'''
Process the query and parse it with the CFG grammar.
If the query cannot be parsed the parser returns a ValueError or is empty
and the function will return an error. If the query can be parsed all created parse trees are iterated over
and their 'SEM' feature holding the created Xpath query is extracted. It is then verified that
this Xpath query is well-formed (e.g., '../planet[radius=1]'), and if it is, then the XML expoplanet database is queried.
INPUT:
- query: string; the original input query, e.g., 'planets with a radius of 1'
- gram: NLTK FeatureGrammar object created from grammar defined in qcfg.py
OUTPUT: tuple:
- planet_names: list of lists; if the length of the outer list is 1, the query had 1 part, e.g. 'planet with a radius of 1';
if the length of the outer list is > 1, the query had multiple parts, e.g.
'2 planets with a radius of 1 and 1 planet with a mass of 1'
- language; either 'de' or 'en'; this is the CFG tree feature 'L'
- normalized_query; query processed with normalize() and serialize()
- xml_queries: the queries in Xpath format
'''
# remove numbers from query and substitute by '#NUM#'
query,number_dict = serialize(query)
query = normalize(query)
normalized_query = (f'Normalized NL Query: {query}')
# parse query
parser = parse.FeatureEarleyChartParser(gram)
try:
trees = list(parser.parse(query.split()))
except ValueError:
return('QueryError',0,0,0)
if not trees:
return('QueryError',0,0,0)
planet_names = []
for i,t in enumerate(trees):
answer = trees[i].label()['SEM']
language = trees[i].label()['L']
# join the parts of the featue 'SEM' into one continous string:
answer = ''.join([s for s in answer if s])
# substitute back in the numbers:
for tag in number_dict.keys():
answer = re.sub(tag,number_dict[tag],answer)
subqueries = answer.split(';')
if are_wellformed(subqueries):
planet_names = []
found = False
try:
for q in subqueries:
planets = oec.xpath(q)
if planets:
found = True
planet_names.append(get_names(planets))
xml_queries=f'XML Queries:{subqueries}'
if found:
return (planet_names,language,normalized_query,xml_queries)
else:
continue
except:
continue
return (0,language,normalized_query,xml_queries)
def serialize(query):
'''
Replace the numbers in the query with numbered tags,
e.g., 'planets with a radius of 1' -> 'planets with a radius of #NUM0'.
Create a dictionary in order to later reassign the numbers to the tags.
INPUT: query; the original query
OUTPUT:
- query with replaced numbers, e.g., 'planets with a radius of #NUM0'
- number_dict: dictionary with number tags as keys (e.g., #NUM0)
and numbers as values (e.g., 1)
'''
number_dict = dict()
numbers = re.findall(r'(\d+\.*\d*)',query)
number_tags = ['#NUM'+str(i) for i in range(len(numbers))]
for i,tag in enumerate(number_tags):
number_dict[tag] = numbers[0]
query = re.sub(f'(?<!NUM)({numbers[0]})',tag,query,count=1)
numbers = re.findall(r'(?<!NUM)(\d+\.*\d*)',query)
return (query,number_dict)
def normalize(query):
'''
Normalize the input query and remove redundant material
that is not relevant for the parsing of the query.
'''
query = query.lower()
query = re.sub(r'[^\w\s#]','',query)
query = re.sub(
r'zeig mir|show me|are there any|are there|gibt es|can you show me|look for|search|suche?|finde?',
'',query)
return query
def are_wellformed(qs):
'''
Check if the given queries are in a well-formed Xpath format.
RETURN: true if all queries are well-formed, false otherwise
'''
wf = re.compile('\.//.+?\[.+?\]|\(\.//.+?\[.+?\]\)\[.+?\]')
for q in qs:
if not wf.match(q):
return 0
return 1
def get_names(elements):
names = []
for e in elements:
names.append(e.xpath("name")[0].text)
return names
def give_response(planet_names,query,language):
'''
Provide search result response in case the search yielded planets.
The language has been extracted from the parsed and processed query
and determines the language of the response.
INPUT:
- planet_names: list of lists; if the length of the outer list is 1, the query had 1 part, e.g. 'planet with a radius of 1';
if the length of the outer list is > 1, the query had multiple parts, e.g.
'2 planets with a radius of 1 and 1 planet with a mass of 1'
- query: string; the original input query
- language: string
OUTPUT: string built from the planet names embedded in language-specific response templates.
'''
if language == 'de':
response = give_de_response(planet_names,query)
else:
response = give_en_response(planet_names,query)
return response
def give_en_response(planet_names,query):
'''
See calling function give_response()
'''
if len(planet_names) == 1:
if len(planet_names[0]) == 1:
return f"I found the following {len(planet_names[0])} planet for the query '{query}':\n\n{', '.join(planet_names[0])}\n"
elif len(planet_names[0]) > 1:
return f"I found the following {len(planet_names[0])} planets for the query '{query}':\n\n{', '.join(planet_names[0])}\n"
elif len(planet_names) > 1:
response = ""
for sqi,names in enumerate(planet_names):
if len(names) == 1:
response += f"Here is 1 planet I found for part {sqi + 1} of the query '{query}':\n\n{names[0]}\n\n"
elif len(names) > 1:
response += f"Here are {len(names)} planets I found for part {sqi + 1} of the query '{query}':\n\n{', '.join(names)}\n\n"
else:
response += f"I did not find any planet for part {sqi + 1} of the query '{query}'.\n\n"
return response
def give_de_response(planet_names,query):
'''
See calling function give_response()
'''
if len(planet_names) == 1:
if len(planet_names[0]) == 1:
config.PLANET_FOUND = True
return f"Ich habe den folgenden Planeten gefunden für die Anfrage '{query}':\n\n{', '.join(planet_names[0])}\n"
elif len(planet_names[0]) > 1:
config.PLANET_FOUND = True
return f"Ich habe die folgenden {len(planet_names[0])} Planeten gefunden für die Anfrage '{query}:'\n\n{', '.join(planet_names[0])}\n"
elif len(planet_names) > 1:
response = ""
for sqi,names in enumerate(planet_names):
if len(names) == 1:
config.PLANET_FOUND = True
response += f"Hier ist 1 Planet, den ich für Teil {sqi + 1} der Anfrage '{query}' gefunden habe:\n\n{names[0]}\n\n"
elif len(names) > 1:
config.PLANET_FOUND = True
response += f"Hier sind {len(names)} Planeten, die ich für Teil {sqi + 1} der Anfrage '{query}' gefunden habe:\n\n{', '.join(names)}\n\n"
else:
response += f"Ich habe keine Planeten für Teil {sqi + 1} der Anfrage '{query}' gefunden.\n\n"
return response
def process_query(ctx: Context, actor: Actor, *args, **kwargs):
'''
Initiate the processing of the input query and return a presentation of the search result.
The processing and parsing of the input query is done within find_planets(). The language is extracted
from the parsed query and used to set the language of the response.
OUTPUT: String containing:
- normalized_query: numbers replaced by placeholders that are recognized by the CFG grammar,
lower-cased, punctuation removed
- xml_queries: the converted query in Xpath format.
- response: the presentation of the search result including
- question: follow-up question for the user
'''
config.PLANET_FOUND = False
query = ctx.last_request
planet_names,language,normalized_query,xml_queries = find_planets(query,gram)
if language:
config.LANGUAGE = language
if planet_names == 'QueryError':
response = speech_acts['no_understand'][config.LANGUAGE]
question = question = speech_acts['another_search'][config.LANGUAGE]
return f"\n\n{normalized_query}\n{xml_queries}\n\n{response} '{query}'. {question}\n"
else:
if planet_names:
config.PLANET_FOUND = True
response = give_response(planet_names,query,language)
question = speech_acts['more_info'][config.LANGUAGE]
return f'\n\n{normalized_query}\n{xml_queries}\n\n{response}\n{question}\n'
else:
config.PLANET_FOUND = False
response = speech_acts['not_found'][config.LANGUAGE]
question = speech_acts['another_search'][config.LANGUAGE]
return f"\n\n{normalized_query}\n{xml_queries}\n\n{response}'{query}'\n\n{question}\n"
def planet_description(ctx: Context, actor: Actor, *args, **kwargs):
'''
Retrieve a description from the data base via Xpath for the planet provided by the user.
OUTPUT: String with description or prompt to provide the correctly spelled name of a planet.
'''
planet = ctx.last_request
try:
description = | |
"""
This script loads in a trained policy neural network and uses it for inference.
Typically this script will be executed on the Nvidia Jetson TX2 board during an
experiment in the Spacecraft Robotics and Control Laboratory at Carleton
University.
Script created: June 12, 2019
@author: Kirk (<EMAIL>)
"""
import tensorflow as tf
import numpy as np
import socket
import time
import threading
from collections import deque
# import code # for debugging
#code.interact(local=dict(globals(), **locals())) # Ctrl+D or Ctrl+Z to continue execution
try:
from settings import Settings
except:
print("You must load the 'manipulator' environment in settings\n\nQuitting.")
raise SystemExit
from build_neural_networks import BuildActorNetwork
assert Settings.ENVIRONMENT == 'manipulator'
# Load an environment to use methods from
environment_file = __import__('environment_' + Settings.ENVIRONMENT) # importing the environment
"""
*# Relative pose expressed in the chaser's body frame; everything else in Inertial frame #*
Deep guidance output in x and y are in the chaser body frame
"""
# Are we testing?
testing = False
CHECK_VELOCITY_LIMITS_IN_PYTHON = True
HARD_CODE_TARGET_SPIN = False
TARGET_SPIN_VALUE = -7*np.pi/180 # [rad/s]
SUCCESSFUL_DOCKING_RADIUS = 0.04 # [m] [default: 0.04] overwrite the successful docking radius defined in the environment
###############################
### User-defined parameters ###
###############################
offset_x = 0 # Position offset of the target in its body frame
offset_y = 0 # Position offset of the target in its body frame
offset_angle = 0 # Angle offset of the target in its body frame
# Do you want to debug with constant accelerations?
DEBUG_CONTROLLER_WITH_CONSTANT_ACCELERATIONS = False
constant_Ax = 0 # [m/s^2] in inertial frame
constant_Ay = 0 # [m/s^2] in inertial frame
constant_alpha = 0 # [rad/s^2] in inertial frame
constant_alpha_shoulder = 0 # [rad/s^2]
constant_alpha_elbow = 0# [rad/s^2]
constant_alpha_wrist = 0# [rad/s^2]
def make_C_bI(angle):
C_bI = np.array([[ np.cos(angle), np.sin(angle)],
[-np.sin(angle), np.cos(angle)]]) # [2, 2]
return C_bI
class MessageParser:
def __init__(self, testing, client_socket, messages_to_deep_guidance, stop_run_flag):
print("Initializing Message Parser!")
self.client_socket = client_socket
self.messages_to_deep_guidance = messages_to_deep_guidance
self.stop_run_flag = stop_run_flag
self.testing = testing
# Items from the Pi
self.Pi_time = 0
self.Pi_red_x = 0
self.Pi_red_y = 0
self.Pi_red_theta = 0
self.Pi_red_Vx = 0
self.Pi_red_Vy = 0
self.Pi_red_omega = 0
self.Pi_black_x = 0
self.Pi_black_y = 0
self.Pi_black_theta = 0
self.Pi_black_Vx = 0
self.Pi_black_Vy = 0
self.Pi_black_omega = 0
self.shoulder_theta = 0
self.elbow_theta = 0
self.wrist_theta = 0
self.shoulder_omega = 0
self.elbow_omega = 0
self.wrist_omega = 0
print("Done initializing parser!")
def run(self):
print("Running Message Parser!")
# Run until we want to stop
while not self.stop_run_flag.is_set():
if self.testing:
# Assign test values
# Items from the Pi
self.Pi_time = 15
self.Pi_red_x = 3
self.Pi_red_y = 1
self.Pi_red_theta = 0.5
self.Pi_red_Vx = 0
self.Pi_red_Vy = 0
self.Pi_red_omega = 0
self.Pi_black_x = 1
self.Pi_black_y = 1
self.Pi_black_theta = 3.1
self.Pi_black_Vx = 0
self.Pi_black_Vy = 0
self.Pi_black_omega = 0
self.shoulder_theta = 1
self.elbow_theta = 1.2
self.wrist_theta = 0.5
self.shoulder_omega = 0
self.elbow_omega = 0
self.wrist_omega = 0
else:
# It's real
try:
data = self.client_socket.recv(4096) # Read the next value
except socket.timeout:
print("Socket timeout")
continue
data_packet = np.array(data.decode("utf-8").splitlines())
#print('Got message: ' + str(data.decode("utf-8")))
# We received a packet from the Pi
# input_data_array is: [time, red_x, red_y, red_angle, red_vx, red_vy, red_dangle, black_x, black_y, black_angle, black_vx, black_vy, black_dangle, shoulder_angle, elbow_angle, wrist_angle, shoulder_omega, elbow_omega, wrist_omega]
try:
self.Pi_time, self.Pi_red_x, self.Pi_red_y, self.Pi_red_theta, self.Pi_red_Vx, self.Pi_red_Vy, self.Pi_red_omega, self.Pi_black_x, self.Pi_black_y, self.Pi_black_theta, self.Pi_black_Vx, self.Pi_black_Vy, self.Pi_black_omega, self.shoulder_theta, self.elbow_theta, self.wrist_theta, self.shoulder_omega, self.elbow_omega, self.wrist_omega = data_packet.astype(np.float32)
except:
print("Failed data read from jetsonRepeater.py, continuing...")
continue
if HARD_CODE_TARGET_SPIN:
self.Pi_black_omega = TARGET_SPIN_VALUE
# Apply the offsets to the target
offsets_target_body = np.array([offset_x, offset_y])
offsets_target_inertial = np.matmul(make_C_bI(self.Pi_black_theta).T, offsets_target_body)
self.Pi_black_x = self.Pi_black_x - offsets_target_inertial[0]
self.Pi_black_y = self.Pi_black_y - offsets_target_inertial[1]
self.Pi_black_theta = self.Pi_black_theta - offset_angle
# Write the data to the queue for DeepGuidanceModelRunner to use!
""" This queue is thread-safe. If I append multiple times without popping, the data in the queue is overwritten. Perfect! """
#(self.Pi_time, self.Pi_red_x, self.Pi_red_y, self.Pi_red_theta, self.Pi_red_Vx, self.Pi_red_Vy, self.Pi_red_omega, self.Pi_black_x, self.Pi_black_y, self.Pi_black_theta, self.Pi_black_Vx, self.Pi_black_Vy, self.Pi_black_omega, self.shoulder_theta, self.elbow_theta, self.wrist_theta, self.shoulder_omega, self.elbow_omega, self.wrist_omega)
self.messages_to_deep_guidance.append((self.Pi_time, self.Pi_red_x, self.Pi_red_y, self.Pi_red_theta, self.Pi_red_Vx, self.Pi_red_Vy, self.Pi_red_omega, self.Pi_black_x, self.Pi_black_y, self.Pi_black_theta, self.Pi_black_Vx, self.Pi_black_Vy, self.Pi_black_omega, self.shoulder_theta, self.elbow_theta, self.wrist_theta, self.shoulder_omega, self.elbow_omega, self.wrist_omega))
print("Message handler gently stopped")
class DeepGuidanceModelRunner:
def __init__(self, testing, client_socket, messages_to_deep_guidance, stop_run_flag):
print("Initializing deep guidance model runner")
self.client_socket = client_socket
self.messages_to_deep_guidance = messages_to_deep_guidance
self.stop_run_flag = stop_run_flag
self.testing = testing
# Initializing a variable to check if we've docked
self.have_we_docked = 0.
# Holding the previous position so we know when SPOTNet gives a new update
self.previousSPOTNet_relative_x = 0.0
# Initialize an environment so we can use its methods
self.environment = environment_file.Environment()
self.environment.reset(False)
# Overwrite the successful docking radius
self.environment.SUCCESSFUL_DOCKING_RADIUS = SUCCESSFUL_DOCKING_RADIUS
# Uncomment this on TF2.0
#tf.compat.v1.disable_eager_execution()
# Clear any old graph
tf.reset_default_graph()
# Initialize Tensorflow, and load in policy
self.sess = tf.Session()
# Building the policy network
self.state_placeholder = tf.placeholder(dtype = tf.float32, shape = [None, Settings.OBSERVATION_SIZE], name = "state_placeholder")
self.actor = BuildActorNetwork(self.state_placeholder, scope='learner_actor_main')
# Loading in trained network weights
print("Attempting to load in previously-trained model\n")
saver = tf.train.Saver() # initialize the tensorflow Saver()
# Try to load in policy network parameters
try:
ckpt = tf.train.get_checkpoint_state('../')
saver.restore(self.sess, ckpt.model_checkpoint_path)
print("\nModel successfully loaded!\n")
except (ValueError, AttributeError):
print("Model: ", ckpt.model_checkpoint_path, " not found... :(")
raise SystemExit
print("Done initializing model!")
def run(self):
print("Running Deep Guidance!")
counter = 1
# Parameters for normalizing the input
relevant_state_mean = np.delete(Settings.STATE_MEAN, Settings.IRRELEVANT_STATES)
relevant_half_range = np.delete(Settings.STATE_HALF_RANGE, Settings.IRRELEVANT_STATES)
# To log data
data_log = []
# Run zeros through the policy to ensure all libraries are properly loaded in
deep_guidance = self.sess.run(self.actor.action_scaled, feed_dict={self.state_placeholder:np.zeros([1, Settings.OBSERVATION_SIZE])})[0]
# Run until we want to stop
while not stop_run_flag.is_set():
# Total state is [relative_x, relative_y, relative_vx, relative_vy, relative_angle, relative_angular_velocity, chaser_x, chaser_y, chaser_theta, target_x, target_y, target_theta, chaser_vx, chaser_vy, chaser_omega, target_vx, target_vy, target_omega] *# Relative pose expressed in the chaser's body frame; everything else in Inertial frame #*
# Network input: [relative_x, relative_y, relative_angle, chaser_theta, chaser_vx, chaser_vy, chaser_omega, target_omega] ** Normalize it first **
# Get data from Message Parser
try:
Pi_time, Pi_red_x, Pi_red_y, Pi_red_theta, \
Pi_red_Vx, Pi_red_Vy, Pi_red_omega, \
Pi_black_x, Pi_black_y, Pi_black_theta, \
Pi_black_Vx, Pi_black_Vy, Pi_black_omega, \
shoulder_theta, elbow_theta, wrist_theta, \
shoulder_omega, elbow_omega, wrist_omega = self.messages_to_deep_guidance.pop()
except IndexError:
# Queue was empty, try agian
continue
#############################
### Check if we've docked ###
#############################
# Check the reward function based off this state
self.environment.chaser_position = np.array([Pi_red_x, Pi_red_y, Pi_red_theta])
self.environment.chaser_velocity = np.array([Pi_red_Vx, Pi_red_Vy, Pi_red_omega])
self.environment.target_position = np.array([Pi_black_x, Pi_black_y, Pi_black_theta])
self.environment.target_velocity = np.array([Pi_black_Vx, Pi_black_Vy, Pi_black_omega])
self.environment.arm_angles = np.array([shoulder_theta, elbow_theta, wrist_theta])
self.environment.arm_angular_rates = np.array([shoulder_omega, elbow_omega, wrist_omega])
# Get environment to check for collisions
self.environment.update_end_effector_and_docking_locations()
self.environment.update_end_effector_location_body_frame()
self.environment.update_relative_pose_body_frame()
self.environment.check_collisions()
# Ask the environment whether docking occurred
self.have_we_docked = np.max([self.have_we_docked, float(self.environment.docked)])
# Extracting end-effector position and docking port position in the Inertial frame
end_effector_position = self.environment.end_effector_position
docking_port_position = self.environment.docking_port_position
# Calculating relative position between the docking port and the end-effector in the Target's body frame
docking_error_inertial = end_effector_position - docking_port_position
docking_error_target_body = np.matmul(make_C_bI(Pi_black_theta), docking_error_inertial)
print("Distance from cone to end-effector in target body frame: ", docking_error_target_body, " Environment thinks we've docked: ", self.have_we_docked)
#################################
### Building the Policy Input ###
#################################
total_state = self.environment.make_total_state()
policy_input = np.delete(total_state, Settings.IRRELEVANT_STATES)
# Normalizing
if Settings.NORMALIZE_STATE:
normalized_policy_input = (policy_input - relevant_state_mean)/relevant_half_range
else:
normalized_policy_input = policy_input
# Reshaping the input
normalized_policy_input = normalized_policy_input.reshape([-1, Settings.OBSERVATION_SIZE])
# Run processed state through the policy
deep_guidance = self.sess.run(self.actor.action_scaled, feed_dict={self.state_placeholder:normalized_policy_input})[0] # [accel_x, accel_y, alpha]
# Rotating the command into the inertial frame
if not Settings.ACTIONS_IN_INERTIAL:
deep_guidance[0:2] = np.matmul(make_C_bI(Pi_red_theta).T,deep_guidance[0:2])
# Commanding constant values in the inertial frame for testing purposes
if DEBUG_CONTROLLER_WITH_CONSTANT_ACCELERATIONS:
deep_guidance[0] = constant_Ax # [m/s^2]
deep_guidance[1] = constant_Ay # [m/s^2]
deep_guidance[2] = constant_alpha # [rad/s^2]
deep_guidance[3] = constant_alpha_shoulder # [rad/s^2]
deep_guidance[4] = constant_alpha_elbow # [rad/s^2]]
deep_guidance[5] = constant_alpha_wrist # [rad/s^2]
#################################################################
### Cap output if we are exceeding the max allowable velocity ###
#################################################################
# Stopping the command of additional velocity when we are already at our maximum
""" The check for arm velocity exceeding has been transferred to Simulink - June 1, 2021 """
if | |
column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=1, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=4, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=4, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=4, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=4, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=4, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
def Menu_Catawba(self):
self.mw.destroy()
self.mw = tk.Tk()
#Specify the attributes for all widgets simply like this.
self.mw.option_add("*Button.Background", "Teal")
self.mw.option_add("*Button.Foreground", "White")
self.mw.title('OP25 Repeater Selector GUI')
#You can set the geometry attribute to change the root windows size
self.mw.geometry("800x420") #You want the size of the app to be 750 X 562.5 Pixels (Slightky Smaller than the RPI 7" Touch Screens)
self.mw.resizable(0, 0) #Don't allow resizing in the x or y direction
back = tk.Frame(master=self.mw,bg='Grey')
back.pack_propagate(0) #Don't allow the widgets inside to determine the frame's width / height
back.pack(fill=tk.BOTH, expand=1) #Expand the frame to fill the root window
#Buttons
Stop_OP25 = tk.Button(master=back, text='Stop OP25 Instances', command=stopall, width=14, height=3)
Stop_OP25.grid(row=0, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
GoToFavorites_OP25 = tk.Button(master=back, text='Go To Favorites', command=self.Favorites, width=14, height=3)
GoToFavorites_OP25.grid(row=0, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
GoToNCCounties_OP25 = tk.Button(master=back, text='Go To NC Counties', command=self.NC_Counties_Home, width=14, height=3)
GoToNCCounties_OP25.grid(row=0, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_Anderson_Mountain = tk.Button(master=back, text='Anderson_Mountain', command=CMD_Anderson_Mountain, width=14, height=3)
Button_Anderson_Mountain.grid(row=1, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_Baker_Mtn = tk.Button(master=back, text='Baker_Mtn', command=CMD_Baker_Mtn, width=14, height=3)
Button_Baker_Mtn.grid(row=1, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_River_Bend_Rd = tk.Button(master=back, text='River_Bend_Rd', command=CMD_River_Bend_Rd, width=14, height=3)
Button_River_Bend_Rd.grid(row=1, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=1, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=1, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=4, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=4, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=4, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=4, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=4, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
def Menu_Chatham(self):
self.mw.destroy()
self.mw = tk.Tk()
#Specify the attributes for all widgets simply like this.
self.mw.option_add("*Button.Background", "Teal")
self.mw.option_add("*Button.Foreground", "White")
self.mw.title('OP25 Repeater Selector GUI')
#You can set the geometry attribute to change the root windows size
self.mw.geometry("800x420") #You want the size of the app to be 750 X 562.5 Pixels (Slightky Smaller than the RPI 7" Touch Screens)
self.mw.resizable(0, 0) #Don't allow resizing in the x or y direction
back = tk.Frame(master=self.mw,bg='Grey')
back.pack_propagate(0) #Don't allow the widgets inside to determine the frame's width / height
back.pack(fill=tk.BOTH, expand=1) #Expand the frame to fill the root window
#Buttons
Stop_OP25 = tk.Button(master=back, text='Stop OP25 Instances', command=stopall, width=14, height=3)
Stop_OP25.grid(row=0, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
GoToFavorites_OP25 = tk.Button(master=back, text='Go To Favorites', command=self.Favorites, width=14, height=3)
GoToFavorites_OP25.grid(row=0, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
GoToNCCounties_OP25 = tk.Button(master=back, text='Go To NC Counties', command=self.NC_Counties_Home, width=14, height=3)
GoToNCCounties_OP25.grid(row=0, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_Chatham_Mountain = tk.Button(master=back, text='Chatham_Mountain', command=CMD_Chatham_Mountain, width=14, height=3)
Button_Chatham_Mountain.grid(row=1, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_Siler_City = tk.Button(master=back, text='Siler_City', command=CMD_Siler_City, width=14, height=3)
Button_Siler_City.grid(row=1, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=1, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=1, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=1, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=4, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=4, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=4, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=4, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=4, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
def Menu_Cherokee(self):
self.mw.destroy()
self.mw = tk.Tk()
#Specify the attributes for all widgets simply like this.
self.mw.option_add("*Button.Background", "Teal")
self.mw.option_add("*Button.Foreground", "White")
self.mw.title('OP25 Repeater Selector GUI')
#You can set the geometry attribute to change the root windows size
self.mw.geometry("800x420") #You want the size of the app to be 750 X 562.5 Pixels (Slightky Smaller than the RPI 7" Touch Screens)
self.mw.resizable(0, 0) #Don't allow resizing in the x or y direction
back = tk.Frame(master=self.mw,bg='Grey')
back.pack_propagate(0) #Don't allow the widgets inside to determine the frame's width / height
back.pack(fill=tk.BOTH, expand=1) #Expand the frame to fill the root window
#Buttons
Stop_OP25 = tk.Button(master=back, text='Stop OP25 Instances', command=stopall, width=14, height=3)
Stop_OP25.grid(row=0, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
GoToFavorites_OP25 = tk.Button(master=back, text='Go To Favorites', command=self.Favorites, width=14, height=3)
GoToFavorites_OP25.grid(row=0, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
GoToNCCounties_OP25 = tk.Button(master=back, text='Go To NC Counties', command=self.NC_Counties_Home, width=14, height=3)
GoToNCCounties_OP25.grid(row=0, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_Joanna_Bald = tk.Button(master=back, text='Joanna_Bald', command=CMD_Joanna_Bald, width=14, height=3)
Button_Joanna_Bald.grid(row=1, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_Murphy = tk.Button(master=back, text='Murphy', command=CMD_Murphy, width=14, height=3)
Button_Murphy.grid(row=1, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=1, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=1, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=1, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=2, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
Button_.grid(row=3, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5)
Button_ = tk.Button(master=back, text='', command='', width=14, height=3)
| |
<filename>src/mbed_cloud/_backends/iam/apis/aggregator_account_admin_api.py
# coding: utf-8
"""
Account Management API
API for managing accounts, users, creating API keys, uploading trusted certificates
OpenAPI spec version: v3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from ..api_client import ApiClient
class AggregatorAccountAdminApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def add_account_api_key_to_groups(self, account_id, api_key, body, **kwargs): # noqa: E501
"""Add API key to a list of groups. # noqa: E501
An endpoint for adding API key to groups. **Example usage:** `curl -X POST https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/api-keys/{apikey}/groups -d '[0162056a9a1586f30242590700000000,0117056a9a1586f30242590700000000]' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.add_account_api_key_to_groups(account_id, api_key, body, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str account_id: Account ID. (required)
:param str api_key: The ID of the API key to be added to the group. (required)
:param list[str] body: A list of IDs of the groups to be updated. (required)
:return: UpdatedResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.add_account_api_key_to_groups_with_http_info(account_id, api_key, body, **kwargs) # noqa: E501
else:
(data) = self.add_account_api_key_to_groups_with_http_info(account_id, api_key, body, **kwargs) # noqa: E501
return data
def add_account_api_key_to_groups_with_http_info(self, account_id, api_key, body, **kwargs): # noqa: E501
"""Add API key to a list of groups. # noqa: E501
An endpoint for adding API key to groups. **Example usage:** `curl -X POST https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/api-keys/{apikey}/groups -d '[0162056a9a1586f30242590700000000,0117056a9a1586f30242590700000000]' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.add_account_api_key_to_groups_with_http_info(account_id, api_key, body, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str account_id: Account ID. (required)
:param str api_key: The ID of the API key to be added to the group. (required)
:param list[str] body: A list of IDs of the groups to be updated. (required)
:return: UpdatedResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'api_key', 'body'] # noqa: E501
all_params.append('asynchronous')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_account_api_key_to_groups" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `add_account_api_key_to_groups`") # noqa: E501
# verify the required parameter 'api_key' is set
if ('api_key' not in params or
params['api_key'] is None):
raise ValueError("Missing the required parameter `api_key` when calling `add_account_api_key_to_groups`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `add_account_api_key_to_groups`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountID'] = params['account_id'] # noqa: E501
if 'api_key' in params:
path_params['apiKey'] = params['api_key'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/v3/accounts/{accountID}/api-keys/{apiKey}/groups', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='UpdatedResponse', # noqa: E501
auth_settings=auth_settings,
asynchronous=params.get('asynchronous'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def add_account_certificate(self, account_id, body, **kwargs): # noqa: E501
"""Upload new trusted certificate. # noqa: E501
An endpoint for uploading new trusted certificates. **Example usage:** `curl -X POST https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/trusted-certificates -d {\"name\": \"myCert1\", \"description\": \"very important cert\", \"certificate\": \"certificate_data\", \"service\": \"lwm2m\"} -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.add_account_certificate(account_id, body, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str account_id: Account ID. (required)
:param TrustedCertificateRootReq body: A trusted certificate object with attributes, signature is optional. (required)
:return: TrustedCertificateResp
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.add_account_certificate_with_http_info(account_id, body, **kwargs) # noqa: E501
else:
(data) = self.add_account_certificate_with_http_info(account_id, body, **kwargs) # noqa: E501
return data
def add_account_certificate_with_http_info(self, account_id, body, **kwargs): # noqa: E501
"""Upload new trusted certificate. # noqa: E501
An endpoint for uploading new trusted certificates. **Example usage:** `curl -X POST https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/trusted-certificates -d {\"name\": \"myCert1\", \"description\": \"very important cert\", \"certificate\": \"certificate_data\", \"service\": \"lwm2m\"} -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.add_account_certificate_with_http_info(account_id, body, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str account_id: Account ID. (required)
:param TrustedCertificateRootReq body: A trusted certificate object with attributes, signature is optional. (required)
:return: TrustedCertificateResp
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'body'] # noqa: E501
all_params.append('asynchronous')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_account_certificate" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params or
params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `add_account_certificate`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `add_account_certificate`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['accountID'] = params['account_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/v3/accounts/{accountID}/trusted-certificates', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TrustedCertificateResp', # noqa: E501
auth_settings=auth_settings,
asynchronous=params.get('asynchronous'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def add_account_user_to_groups(self, account_id, user_id, body, **kwargs): # noqa: E501
"""Add user to a list of groups. # noqa: E501
An endpoint for adding user to groups. **Example usage:** `curl -X POST https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/users/{user-id}/groups -d '[0162056a9a1586f30242590700000000,0117056a9a1586f30242590700000000]' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.add_account_user_to_groups(account_id, user_id, body, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str account_id: Account ID. (required)
:param str user_id: The ID of the user to be added to the group. (required)
:param list[str] body: A list of IDs of the groups to be updated. (required)
:return: UpdatedResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.add_account_user_to_groups_with_http_info(account_id, user_id, body, **kwargs) # noqa: E501
else:
(data) = self.add_account_user_to_groups_with_http_info(account_id, user_id, body, **kwargs) # noqa: E501
return data
def add_account_user_to_groups_with_http_info(self, account_id, user_id, body, **kwargs): # noqa: E501
"""Add user to a list of groups. # noqa: E501
An endpoint for adding user to groups. **Example usage:** `curl -X POST https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/users/{user-id}/groups -d '[0162056a9a1586f30242590700000000,0117056a9a1586f30242590700000000]' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.add_account_user_to_groups_with_http_info(account_id, user_id, body, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str account_id: Account ID. (required)
:param str user_id: The ID of the user to be added to the group. (required)
| |
should fail if the FITS files in the directory
# are actually read.
bad_dir = tmpdir.mkdtemp()
not_really_fits = bad_dir.join('not_fits.fit')
not_really_fits.dump('I am not really a FITS file')
# Make sure an error will be generated if the FITS file is read
with pytest.raises(IOError):
fits.getheader(not_really_fits.strpath)
log = tmpdir.join('tmp.log')
self._setup_logger(log.strpath)
_ = ImageFileCollection(location=bad_dir.strpath, keywords=[])
with open(log.strpath) as f:
warnings = f.read()
# ImageFileCollection will suppress the IOError but log a warning
# so check that the log has no warnings in it.
assert (len(warnings) == 0)
def test_fits_summary_when_keywords_are_not_subset(self, triage_setup):
"""
Catch case when there is overlap between keyword list
passed to the ImageFileCollection and to files_filtered
but the latter is not a subset of the former.
"""
ic = ImageFileCollection(triage_setup.test_dir,
keywords=['imagetyp', 'exposure'])
n_files = len(ic.files)
files_missing_this_key = ic.files_filtered(imagetyp='*',
monkeys=None)
assert(n_files > 0)
assert(n_files == len(files_missing_this_key))
def test_duplicate_keywords_in_setting(self, triage_setup):
keywords_in = ['imagetyp', 'a', 'a']
ic = ImageFileCollection(triage_setup.test_dir,
keywords=keywords_in)
for key in set(keywords_in):
assert (key in ic.keywords)
# One keyword gets added: file
assert len(ic.keywords) < len(keywords_in) + 1
def test_keyword_includes_file(self, triage_setup):
keywords_in = ['file', 'imagetyp']
ic = ImageFileCollection(triage_setup.test_dir,
keywords=keywords_in)
assert 'file' in ic.keywords
file_keywords = [key for key in ic.keywords if key == 'file']
assert len(file_keywords) == 1
def test_setting_keywords_to_none(self, triage_setup):
ic = ImageFileCollection(triage_setup.test_dir, keywords=['imagetyp'])
ic.keywords = None
assert ic.summary == []
def test_getting_value_for_keyword(self, triage_setup):
ic = ImageFileCollection(triage_setup.test_dir, keywords=['imagetyp'])
# Does it fail if the keyword is not in the summary?
with pytest.raises(ValueError):
ic.values('filter')
# If I ask for unique values do I get them?
values = ic.values('imagetyp', unique=True)
assert values == list(set(ic.summary['imagetyp']))
assert len(values) < len(ic.summary['imagetyp'])
# Does the list of non-unique values match the raw column?
values = ic.values('imagetyp', unique=False)
assert values == list(ic.summary['imagetyp'])
# Does unique actually default to false?
values2 = ic.values('imagetyp')
assert values == values2
def test_collection_when_one_file_not_fits(self, triage_setup):
not_fits = 'foo.fit'
path_bad = os.path.join(triage_setup.test_dir, not_fits)
# Create an empty file...
with open(path_bad, 'w'):
pass
ic = ImageFileCollection(triage_setup.test_dir, keywords=['imagetyp'])
assert not_fits not in ic.summary['file']
os.remove(path_bad)
def test_data_type_mismatch_in_fits_keyword_values(self, triage_setup):
# If one keyword has an unexpected type, do we notice?
img = np.uint16(np.arange(100))
bad_filter = fits.PrimaryHDU(img)
bad_filter.header['imagetyp'] = 'LIGHT'
bad_filter.header['filter'] = 15.0
path_bad = os.path.join(triage_setup.test_dir, 'bad_filter.fit')
bad_filter.writeto(path_bad)
ic = ImageFileCollection(triage_setup.test_dir, keywords=['filter'])
# dtype is object when there is a mix of types
assert ic.summary['filter'].dtype == np.dtype('O')
os.remove(path_bad)
def test_filter_by_numerical_value(self, triage_setup):
ic = ImageFileCollection(triage_setup.test_dir, keywords=['naxis'])
should_be_zero = ic.files_filtered(naxis=2)
assert len(should_be_zero) == 0
should_not_be_zero = ic.files_filtered(naxis=1)
assert len(should_not_be_zero) == triage_setup.n_test['files']
def test_files_filtered_with_full_path(self, triage_setup):
ic = ImageFileCollection(triage_setup.test_dir, keywords=['naxis'])
files = ic.files_filtered(naxis=1, include_path=True)
for f in files:
assert f.startswith(triage_setup.test_dir)
def test_unknown_generator_type_raises_error(self, triage_setup):
ic = ImageFileCollection(triage_setup.test_dir, keywords=['naxis'])
with pytest.raises(ValueError):
for foo in ic._generator('not a real generator'):
pass
def test_setting_write_location_to_bad_dest_raises_error(self, tmpdir,
triage_setup):
new_tmp = tmpdir.mkdtemp()
bad_directory = new_tmp.join('foo')
ic = ImageFileCollection(triage_setup.test_dir, keywords=['naxis'])
with pytest.raises(IOError):
for hdr in ic.headers(save_location=bad_directory.strpath):
pass
def test_initialization_with_no_keywords(self, triage_setup):
# This test is primarily historical -- the old default for
# keywords was an empty list (it is now the wildcard '*').
ic = ImageFileCollection(location=triage_setup.test_dir, keywords=[])
# Iteration below failed before bugfix...
execs = 0
for h in ic.headers():
execs += 1
assert not execs
def check_all_keywords_in_collection(self, image_collection):
lower_case_columns = [c.lower() for c in
image_collection.summary.colnames]
for h in image_collection.headers():
for k in h:
assert k.lower() in lower_case_columns
def test_tabulate_all_keywords(self, triage_setup):
ic = ImageFileCollection(location=triage_setup.test_dir, keywords='*')
self.check_all_keywords_in_collection(ic)
def test_summary_table_is_always_masked(self, triage_setup):
# First, try grabbing all of the keywords
ic = ImageFileCollection(location=triage_setup.test_dir, keywords='*')
assert ic.summary.masked
# Now, try keywords that every file will have
ic.keywords = ['bitpix']
assert ic.summary.masked
# What about keywords that include some that will surely be missing?
ic.keywords = ['bitpix', 'dsafui']
assert ic.summary.masked
def test_case_of_keywords_respected(self, triage_setup):
keywords_in = ['BitPix', 'instrume', 'NAXIS']
ic = ImageFileCollection(location=triage_setup.test_dir,
keywords=keywords_in)
for key in keywords_in:
assert key in ic.summary.colnames
def test_grabbing_all_keywords_and_specific_keywords(self, triage_setup):
keyword_not_in_headers = 'OIdn89!@'
ic = ImageFileCollection(triage_setup.test_dir,
keywords=['*', keyword_not_in_headers])
assert keyword_not_in_headers in ic.summary.colnames
self.check_all_keywords_in_collection(ic)
def test_grabbing_all_keywords_excludes_empty_key(self, triage_setup):
# This test needs a file with a blank keyword in it to ensure
# that case is handled correctly.
blank_keyword = fits.PrimaryHDU()
blank_keyword.data = np.zeros((100, 100))
blank_keyword.header[''] = 'blank'
blank_keyword.writeto(os.path.join(triage_setup.test_dir,
'blank.fits'))
ic = ImageFileCollection(triage_setup.test_dir, keywords='*')
assert 'col0' not in ic.summary.colnames
@pytest.mark.skipif("os.environ.get('APPVEYOR') or os.sys.platform == 'win32'",
reason="fails on Windows because of file permissions.")
def test_header_with_long_history_roundtrips_to_disk(self, triage_setup):
# I tried combing several history comments into one table entry with
# '\n'.join(history), which resulted in a table that couldn't
# round trip to disk because on read the newline character was
# interpreted as...a new line! This test is a check against future
# foolishness.
from astropy.table import Table
img = np.uint16(np.arange(100))
long_history = fits.PrimaryHDU(img)
long_history.header['imagetyp'] = 'BIAS'
long_history.header['history'] = 'Something happened'
long_history.header['history'] = 'Then something else happened'
long_history.header['history'] = 'And then something odd happened'
path_history = os.path.join(triage_setup.test_dir, 'long_history.fit')
long_history.writeto(path_history)
ic = ImageFileCollection(triage_setup.test_dir, keywords='*')
with NamedTemporaryFile() as test_table:
ic.summary.write(test_table.name, format='ascii.csv',
overwrite=True)
table_disk = Table.read(test_table.name, format='ascii.csv')
assert len(table_disk) == len(ic.summary)
@pytest.mark.skipif("os.environ.get('APPVEYOR') or os.sys.platform == 'win32'",
reason="fails on Windows because file "
"overwriting fails")
def test_refresh_method_sees_added_keywords(self, triage_setup):
ic = ImageFileCollection(triage_setup.test_dir, keywords='*')
# Add a keyword I know isn't already in the header to each file.
not_in_header = 'BARKARK'
for h in ic.headers(overwrite=True):
h[not_in_header] = True
assert not_in_header not in ic.summary.colnames
ic.refresh()
# After refreshing the odd keyword should be present.
assert not_in_header.lower() in ic.summary.colnames
def test_refresh_method_sees_added_files(self, triage_setup):
ic = ImageFileCollection(triage_setup.test_dir, keywords='*')
# Compressed files don't get copied. Not sure why...
original_len = len(ic.summary) - triage_setup.n_test['compressed']
# Generate additional files in this directory
for h in ic.headers(save_with_name="_foo"):
pass
ic.refresh()
new_len = len(ic.summary) - triage_setup.n_test['compressed']
assert new_len == 2 * original_len
def test_keyword_order_is_preserved(self, triage_setup):
keywords = ['imagetyp', 'exposure', 'filter']
ic = ImageFileCollection(triage_setup.test_dir, keywords=keywords)
assert ic.keywords == ['file'] + keywords
def test_sorting(self, triage_setup):
collection = ImageFileCollection(
location=triage_setup.test_dir,
keywords=['imagetyp', 'filter', 'object'])
all_elements = []
for hdu, fname in collection.hdus(return_fname=True):
all_elements.append((str(hdu.header), fname))
# Now sort
collection.sort(keys=['imagetyp', 'object'])
# and check it's all still right
for hdu, fname in collection.hdus(return_fname=True):
assert((str(hdu.header), fname) in all_elements)
for i in range(len(collection.summary)):
assert(collection.summary['file'][i] == collection.files[i])
def test_sorting_without_key_fails(self, triage_setup):
ic = ImageFileCollection(location=triage_setup.test_dir)
with pytest.raises(ValueError):
ic.sort(keys=None)
def test_duplicate_keywords(self, triage_setup):
# Make sure duplicated keywords don't make the imagefilecollection
# fail.
hdu = fits.PrimaryHDU()
hdu.data = np.zeros((5, 5))
hdu.header['stupid'] = 'fun'
hdu.header.append(('stupid', 'nofun'))
hdu.writeto(os.path.join(triage_setup.test_dir, 'duplicated.fits'))
with pytest.warns(UserWarning) as w:
ic = ImageFileCollection(triage_setup.test_dir, keywords='*')
assert len(w) == 1
assert 'stupid' in str(w[0].message)
assert 'stupid' in ic.summary.colnames
assert 'fun' in ic.summary['stupid']
assert 'nofun' not in ic.summary['stupid']
def test_ccds_generator_in_different_directory(self, triage_setup, tmpdir):
"""
Regression test for https://github.com/astropy/ccdproc/issues/421 in
which the ccds generator fails if the current working directory is
not the location of the ImageFileCollection.
"""
coll = ImageFileCollection(triage_setup.test_dir)
# The temporary directory below should be different that the collection
# location.
os.chdir(tmpdir.strpath)
# Let's make sure it is.
assert not os.path.samefile(os.getcwd(), coll.location)
# This generated an IOError before the issue was fixed.
for _ in coll.ccds(ccd_kwargs={'unit': 'adu'}):
pass
def test_ccds_generator_does_not_support_overwrite(self, triage_setup):
"""
CCDData objects have several attributes that make it hard to
reliably support overwriting. For example in what extension should
mask, uncertainty be written?
Also CCDData doesn't explicitly support in-place operations so it's to
easy to create a new CCDData object inadvertantly and all modifications
might be lost.
"""
ic = ImageFileCollection(triage_setup.test_dir)
with pytest.raises(NotImplementedError):
ic.ccds(overwrite=True)
with pytest.raises(NotImplementedError):
ic.ccds(clobber=True)
def test_glob_matching(self, triage_setup):
# We'll create two files with strange names to test glob
# includes / excludes
one = fits.PrimaryHDU()
one.data = np.zeros((5, 5))
one.header[''] = 'whatever'
one.writeto(os.path.join(triage_setup.test_dir, 'SPAM_stuff.fits'))
one.writeto(os.path.join(triage_setup.test_dir, 'SPAM_other_stuff.fits'))
coll = ImageFileCollection(triage_setup.test_dir, glob_include='SPAM*')
assert len(coll.files) == 2
coll = ImageFileCollection(triage_setup.test_dir, glob_include='SPAM*',
glob_exclude='*other*')
assert len(coll.files) == 1
# The glob attributes are readonly, so setting them raises an Exception.
with pytest.raises(AttributeError):
coll.glob_exclude = '*stuff*'
with pytest.raises(AttributeError):
coll.glob_include = '*stuff*'
def test_that_test_files_have_expected_properties(self, triage_setup):
expected_name = \
get_pkg_data_filename('data/expected_ifc_file_properties.csv')
expected = Table.read(expected_name)
# Make the comparison more reliable by sorting
expected.sort('file')
ic = ImageFileCollection(triage_setup.test_dir)
actual = ic.summary
# Write the actual IFC summary out to disk to turn bool into strings of
# "True" and "False", and any other non-essential differences between
# the tables.
tmp_file = 'actual.csv'
actual.write(tmp_file)
actual = Table.read(tmp_file)
# Make the comparison more reliable by sorting
actual.sort('file')
assert len(ic.summary) | |
= self.state_dict()
self._payout_pots()
if self.RETURN_PRE_TRANSITION_STATE_IN_INFO:
return state_before_transition
return
elif self.current_round == Poker.FLOP:
self._deal_flop()
elif self.current_round == Poker.TURN:
self._deal_turn()
elif self.current_round == Poker.RIVER:
self._deal_river()
else:
raise ValueError(self.current_round)
def _deal_next_round(self):
"""
Call this AFTER round+=1
"""
if self.current_round == Poker.PREFLOP:
self._deal_hole_cards()
elif self.current_round == Poker.FLOP:
self._deal_flop()
elif self.current_round == Poker.TURN:
self._deal_turn()
elif self.current_round == Poker.RIVER:
self._deal_river()
else:
raise ValueError(self.current_round)
def _next_round(self):
if self.IS_FIXED_LIMIT_GAME:
self.n_raises_this_round = 0
# refer to #ID_2 in docstring of this class for this.
self.capped_raise.reset()
# sort out mainpot, sidepots and p.currentbets
self._put_current_bets_into_main_pot_and_side_pots()
# This must be called BEFORE round += 1
self.current_player = self._get_first_to_act_post_flop()
# set has_acted_this_round = False and maybe others
for p in self.seats:
p.has_acted_this_round = False
self.current_round += 1 # highly dependant on PokerEnv.""ROUND_NAME"" being sequential ints!
self._deal_next_round()
def _step(self, processed_action):
"""
the action passed is considered to be for self.current_player and come from its respective agent (if applicable
to your algorithm).
actions are always of the form [action_idx, _raise_size]
However _raise_size is only considered when the action is Poker.BET_RAISE
raise_size is measured in total chips as current_bet. Not as an addition to current_bet
Args:
processed_action (tuple or list): (action_idx, raise_size)
Returns:
obs, rew_for_all_players, done?, info
"""
# After this call, this fn assumes that executing the action is legal.
processed_action = self._get_fixed_action(action=processed_action)
if processed_action[0] == Poker.CHECK_CALL:
self.current_player.check_call(total_to_call=processed_action[1])
elif processed_action[0] == Poker.FOLD:
self.current_player.fold()
elif processed_action[0] == Poker.BET_RAISE:
# This happens when someone has fewer chips than minraise and goes all in.
# The last raiser, if there is one, can't reopen in this case until someone else reraises!
if processed_action[1] < self._get_current_total_min_raise():
self.capped_raise.happened_this_round = True
self.capped_raise.player_that_raised = self.current_player
self.capped_raise.player_that_cant_reopen = self.last_raiser # might be None. Then everyone can raise.
elif self.capped_raise.happened_this_round is True:
# if someone shoved under minraise over someone else's raise the original raiser can't reraise again!
# But if a 3rd plyr raises over that under-min shove, everyone can raise again. this is handled here.
if self.capped_raise.player_that_cant_reopen is not self.current_player:
self.capped_raise.reset()
self.last_raiser = self.current_player # leave this line at the end of this function!!
self.current_player.bet_raise(total_bet_amount=processed_action[1])
self.n_actions_this_episode += 1
# If this is a limit poker game, increment the raise counter
if self.IS_FIXED_LIMIT_GAME:
self.n_raises_this_round += 1
else:
raise RuntimeError(processed_action[0], " is not legal")
self.last_action = [processed_action[0], processed_action[1], self.current_player.seat_id]
# ______________________________________________________________________________________________________________
# check if should deal next round, rundown or continue to next step in the episode of the poker game
all_non_all_in_and_non_fold_p = [p for p in self.seats if not p.folded_this_episode and not p.is_allin]
all_nonfold_p = [p for p in self.seats if not p.folded_this_episode]
# just let next player run in this round
info = None
if self._should_continue_in_this_round(all_non_all_in_and_non_fold_p=all_non_all_in_and_non_fold_p,
all_nonfold_p=all_nonfold_p):
self.current_player = self._get_player_that_has_to_act_next()
is_terminal = False
if self.RETURN_PRE_TRANSITION_STATE_IN_INFO:
info = {"chance_acts": False, "state_dict_before_money_move": None}
# next round
elif len(all_non_all_in_and_non_fold_p) > 1:
# payout if final round
if self.current_round == len(self.ALL_ROUNDS_LIST) - 1:
is_terminal = True
self._put_current_bets_into_main_pot_and_side_pots()
if self.RETURN_PRE_TRANSITION_STATE_IN_INFO:
info = {"chance_acts": False, "state_dict_before_money_move": self.state_dict()}
self._payout_pots()
# deal next round
else:
is_terminal = False
if self.RETURN_PRE_TRANSITION_STATE_IN_INFO:
info = {"chance_acts": True, "state_dict_before_money_move": self.state_dict()}
self._next_round()
# rundown
elif len(all_nonfold_p) > 1: # rundown only makes sense if >0 are allin and 1 is not or >2 are allin.
is_terminal = True
state_before_payouts = self._rundown()
if self.RETURN_PRE_TRANSITION_STATE_IN_INFO:
info = {"chance_acts": False, "state_dict_before_money_move": state_before_payouts}
# only one not folded, so pay all pots to him.
elif len(all_nonfold_p) == 1:
is_terminal = True
if self.RETURN_PRE_TRANSITION_STATE_IN_INFO:
self._put_current_bets_into_main_pot_and_side_pots()
info = {"chance_acts": False, "state_dict_before_money_move": self.state_dict()}
self._payout_pots()
else: # more efficient, but doesnt give info needed.
self._pay_all_to_one_player(all_nonfold_p[0])
else:
raise RuntimeError("There seems to be an edge-case not built into this")
return self._get_current_step_returns(is_terminal=is_terminal, info=info)
# _____________________________________________________ UTIL ______________________________________________________
def _get_winner_list(self, players_to_consider):
"""
Returns:
list: list of PokerPlayer instances that are winners
"""
best_rank = max([p.hand_rank for p in players_to_consider])
winners = [p for p in players_to_consider if p.hand_rank == best_rank]
return winners
def _get_current_total_min_raise(self):
"""
Taking the highest and 2nd highest and subtracting them gives us the minraise amount. If all bets are equal,
we return with a delta of 1 big blind.
"""
if self.N_SEATS == 2:
_sorted_ascending = sorted([p.current_bet for p in self.seats]) # 0 is small, 1 is big
delta = max(_sorted_ascending[1] - _sorted_ascending[0], self.BIG_BLIND)
return _sorted_ascending[1] + delta
else:
current_bets_sorted_descending = sorted([p.current_bet for p in self.seats], reverse=True)
current_to_call_total = max(current_bets_sorted_descending)
_largest_bet = current_bets_sorted_descending[0]
for i in range(1, self.N_SEATS):
if current_bets_sorted_descending[i] == _largest_bet:
continue
delta_between_last_and_before_last = _largest_bet - current_bets_sorted_descending[i]
delta = max(delta_between_last_and_before_last, self.BIG_BLIND)
return current_to_call_total + delta
# in cases where all bets are equal, the minraise delta is 1 big blind
return current_to_call_total + self.BIG_BLIND
def _get_new_board(self):
return np.full((self.N_TOTAL_BOARD_CARDS, 2), Poker.CARD_NOT_DEALT_TOKEN_1D, dtype=np.int8)
def _get_first_to_act_pre_flop(self):
if self.N_SEATS >= 4:
# left of BB
return self.seats[3]
else:
# for n_players==3 and 2 btn starts
return self.seats[0]
def _get_first_to_act_post_flop(self):
"""
Btn has index 0. He is always the last with the exception of some HU rules where ""BTN_IS_FIRST_POSTFLOP"" can
be set to True. In multi-agent games, we search for the smalles seat id in the list, while 0 (i.e. btn) is
treated as inf.
"""
if self.N_SEATS == 2:
if self.BTN_IS_FIRST_POSTFLOP:
return self.seats[0]
else:
return self.seats[1]
else:
players_to_consider = [p for p in self.seats if not p.folded_this_episode and not p.is_allin]
# since there will always be at least 2 ppl in the pot, the btn can NEVER be the first!
first_p = players_to_consider[0]
for p in players_to_consider:
if p.seat_id < first_p.seat_id or first_p.seat_id == 0:
first_p = p
return first_p
def _get_biggest_bet_out_there_aka_total_to_call(self):
"""
chip count of max([p.current_bet for p in self.seats])
"""
return max([p.current_bet for p in self.seats])
def _get_player_that_has_to_act_next(self):
idx = self.seats.index(self.current_player) + 1
for i in range(self.N_SEATS):
mod_idx = idx % self.N_SEATS
p = self.seats[mod_idx]
if not p.is_allin and not p.folded_this_episode:
return self.seats[mod_idx]
idx += 1
raise RuntimeError("There is no next player. Seems like some more debugging is needed...")
def _get_fixed_action(self, action):
"""
This method is responsible for asserting that an action is valid at the current state and returns the
capped/changed action if not.
Args:
action (iterable): iterable of 2 ints - [PokerEnv.ACTIONTYPE, _raise_amount_in_chips]
Returns: [Poker.FOLD, -1]
or [Poker.CHECK_CALL, total_bet_to_be_placed_in_front_by_player]
or [Poker.BET_RAISE, total_bet_to_be_placed_in_front_by_player]
"""
#print("action", action)
_action_idx = 0
if type(action) is int:
_action_idx = action
else:
_action_idx = action[0]
total_to_call = self._get_biggest_bet_out_there_aka_total_to_call()
if _action_idx == Poker.FOLD:
if total_to_call <= self.current_player.current_bet:
return self._process_check_call(total_to_call=total_to_call)
else:
return [Poker.FOLD, -1]
elif _action_idx == Poker.CHECK_CALL:
if (self.FIRST_ACTION_NO_CALL
and (self.n_actions_this_episode == 0)
and self.current_round == Poker.PREFLOP):
return [Poker.FOLD, -1]
return self._process_check_call(total_to_call=total_to_call)
elif _action_idx == Poker.BET_RAISE:
# Limit Poker specific rule
if self.IS_FIXED_LIMIT_GAME:
if self.n_raises_this_round >= self.MAX_N_RAISES_PER_ROUND[self.current_round]:
return self._process_check_call(total_to_call=total_to_call)
if ((self.current_player.stack + self.current_player.current_bet <= total_to_call)
or (self.capped_raise.player_that_cant_reopen is self.current_player)):
return self._process_check_call(total_to_call=total_to_call)
else:
return self._process_raise(raise_total_amount_in_chips=action[1])
else:
raise RuntimeError('invalid action ({}) must be fold (0), call (1), or raise (2) '.format(_action_idx))
def _process_check_call(self, total_to_call):
delta_to_call = min(total_to_call - self.current_player.current_bet, self.current_player.stack)
total_bet_to_be_placed = int(delta_to_call + self.current_player.current_bet)
return [Poker.CHECK_CALL, total_bet_to_be_placed]
def _process_raise(self, raise_total_amount_in_chips):
raise_to = self._adjust_raise(raise_total_amount_in_chips=raise_total_amount_in_chips)
# lastly, if that amount is too much, raise all in
if self.current_player.current_bet + self.current_player.stack < raise_to:
raise_to = self.current_player.stack + self.current_player.current_bet
return [Poker.BET_RAISE, int(raise_to)]
def _should_continue_in_this_round(self, all_non_all_in_and_non_fold_p, all_nonfold_p):
""" util function used in ._step() """
# only 1 player did not fold yet
if len(all_nonfold_p) < 2:
return False
largest_bet = max([p.current_bet for p in self.seats])
if len([p for p in all_nonfold_p if p.is_allin or p.current_bet == largest_bet]) == len(all_nonfold_p) \
and len([p for p in all_non_all_in_and_non_fold_p if not p.has_acted_this_round]) == 0:
return False
return True
# _____________________________________________________ OUTPUT _____________________________________________________
def _get_current_step_returns(self, is_terminal, info):
obs = self.get_current_obs(is_terminal)
reward = self._get_step_reward(is_terminal)
return obs, reward, is_terminal, info
def _get_player_states_all_players(self, normalization_sum):
""" Public Information About Each Player """
if (self.N_SEATS == 2) and self._USE_SIMPLE_HU_OBS:
player_states = []
for player in self.seats:
player_states += [
player.stack / normalization_sum,
player.current_bet / normalization_sum,
player.is_allin
]
else:
player_states = []
for player in self.seats:
player_states += [
player.stack / normalization_sum,
player.current_bet / normalization_sum,
player.folded_this_episode,
player.is_allin
]
x = [0] * self.N_SEATS
if player.side_pot_rank > 0:
x[int(player.side_pot_rank)] = 1
player_states += x
return player_states
| |
baselines, t, subkeys):
"""
Computes batch of estimators for score :math:`\\nabla_Z \\log p(\\Theta, D | Z)`
Selects corresponding estimator used for the term :math:`\\nabla_Z E_{p(G|Z)}[ p(\\Theta, D | G) ]`
and executes it in batch.
Args:
zs (ndarray): batch of latent tensors :math:`Z` ``[n_particles, d, k, 2]``
thetas (Any): batch of parameters PyTree with ``n_particles`` as leading dim
baselines (ndarray): array of score function baseline values of shape ``[n_particles, ]``
Returns:
tuple batch of (gradient estimates, baselines) of shapes ``[n_particles, d, k, 2], [n_particles, ]``
"""
# select the chosen gradient estimator
if self.grad_estimator_z == 'score':
grad_z_likelihood = self.grad_z_likelihood_score_function
elif self.grad_estimator_z == 'reparam':
grad_z_likelihood = self.grad_z_likelihood_gumbel
else:
raise ValueError(f'Unknown gradient estimator `{self.grad_estimator_z}`')
# vmap
return vmap(grad_z_likelihood, (0, 0, 0, None, 0), (0, 0))(zs, thetas, baselines, t, subkeys)
def grad_z_likelihood_score_function(self, single_z, single_theta, single_sf_baseline, t, subk):
"""
Score function estimator (aka REINFORCE) for the score :math:`\\nabla_Z \\log p(\\Theta, D | Z)`
Uses the same :math:`G \\sim p(G | Z)` samples for expectations in numerator and denominator.
This does not use :math:`\\nabla_G \\log p(\\Theta, D | G)` and is hence applicable when
the gradient w.r.t. the adjacency matrix is not defined (as e.g. for the BGe score).
Args:
single_z (ndarray): single latent tensor ``[d, k, 2]``
single_theta (Any): single parameter PyTree
single_sf_baseline (ndarray): ``[1, ]``
t (int): step
subk (ndarray): rng key
Returns:
tuple of gradient, baseline ``[d, k, 2], [1, ]``
"""
# [d, d]
p = self.edge_probs(single_z, t)
n_vars, n_dim = single_z.shape[0:2]
# [n_grad_mc_samples, d, d]
subk, subk_ = random.split(subk)
g_samples = self.sample_g(p, subk_, self.n_grad_mc_samples)
# same MC samples for numerator and denominator
n_mc_numerator = self.n_grad_mc_samples
n_mc_denominator = self.n_grad_mc_samples
# [n_grad_mc_samples, ]
subk, subk_ = random.split(subk)
logprobs_numerator = self.eltwise_log_joint_prob(g_samples, single_theta, subk_)
logprobs_denominator = logprobs_numerator
# variance_reduction
logprobs_numerator_adjusted = lax.cond(
self.score_function_baseline <= 0.0,
lambda _: logprobs_numerator,
lambda _: logprobs_numerator - single_sf_baseline,
operand=None)
# [d * k * 2, n_grad_mc_samples]
grad_z = self.eltwise_grad_latent_log_prob(g_samples, single_z, t)\
.reshape(self.n_grad_mc_samples, n_vars * n_dim * 2)\
.transpose((1, 0))
# stable computation of exp/log/divide
# [d * k * 2, ] [d * k * 2, ]
log_numerator, sign = logsumexp(a=logprobs_numerator_adjusted, b=grad_z, axis=1, return_sign=True)
# []
log_denominator = logsumexp(logprobs_denominator, axis=0)
# [d * k * 2, ]
stable_sf_grad = sign * jnp.exp(log_numerator - jnp.log(n_mc_numerator) - log_denominator + jnp.log(n_mc_denominator))
# [d, k, 2]
stable_sf_grad_shaped = stable_sf_grad.reshape(n_vars, n_dim, 2)
# update baseline
single_sf_baseline = (self.score_function_baseline * logprobs_numerator.mean(0) +
(1 - self.score_function_baseline) * single_sf_baseline)
return stable_sf_grad_shaped, single_sf_baseline
def grad_z_likelihood_gumbel(self, single_z, single_theta, single_sf_baseline, t, subk):
"""
Reparameterization estimator for the score :math:`\\nabla_Z \\log p(\\Theta, D | Z)`
sing the Gumbel-softmax / concrete distribution reparameterization trick.
Uses the same :math:`G \\sim p(G | Z)` samples for expectations in numerator and denominator.
This **does** require a well-defined gradient
:math:`\\nabla_G \\log p(\\Theta, D | G)` and is hence not applicable when
the gradient w.r.t. the adjacency matrix is not defined for Gumbel-relaxations
of the discrete adjacency matrix.
Any (marginal) likelihood expressible as a function of ``g[:, j]`` and ``theta`` ,
e.g. using the vector of (possibly soft) parent indicators as a mask, satisfies this.
Examples are: ``dibs.models.LinearGaussian`` and ``dibs.models.DenseNonlinearGaussian``
See also e.g. http://proceedings.mlr.press/v108/zheng20a/zheng20a.pdf
Args:
single_z (ndarray): single latent tensor ``[d, k, 2]``
single_theta (Any): single parameter PyTree
single_sf_baseline (ndarray): ``[1, ]``
t (int): step
subk (ndarray): rng key
Returns:
tuple of gradient, baseline ``[d, k, 2], [1, ]``
"""
n_vars = single_z.shape[0]
# same MC samples for numerator and denominator
n_mc_numerator = self.n_grad_mc_samples
n_mc_denominator = self.n_grad_mc_samples
# sample Logistic(0,1) as randomness in reparameterization
subk, subk_ = random.split(subk)
eps = random.logistic(subk_, shape=(self.n_grad_mc_samples, n_vars, n_vars))
# [n_grad_mc_samples, ]
# since we don't backprop per se, it leaves us with the option of having
# `soft` and `hard` versions for evaluating the non-grad p(.))
subk, subk_ = random.split(subk)
# [d, k, 2], [d, d], [n_grad_mc_samples, d, d], [1,], [1,] -> [n_grad_mc_samples]
logprobs_numerator = vmap(self.log_joint_prob_soft, (None, None, 0, None, None), 0)(single_z, single_theta, eps, t, subk_)
logprobs_denominator = logprobs_numerator
# [n_grad_mc_samples, d, k, 2]
# d/dx log p(theta, D | G(x, eps)) for a batch of `eps` samples
# use the same minibatch of data as for other log prob evaluation (if using minibatching)
# [d, k, 2], [d, d], [n_grad_mc_samples, d, d], [1,], [1,] -> [n_grad_mc_samples, d, k, 2]
grad_z = vmap(grad(self.log_joint_prob_soft, 0), (None, None, 0, None, None), 0)(single_z, single_theta, eps, t, subk_)
# stable computation of exp/log/divide
# [d, k, 2], [d, k, 2]
log_numerator, sign = logsumexp(a=logprobs_numerator[:, None, None, None], b=grad_z, axis=0, return_sign=True)
# []
log_denominator = logsumexp(logprobs_denominator, axis=0)
# [d, k, 2]
stable_grad = sign * jnp.exp(log_numerator - jnp.log(n_mc_numerator) - log_denominator + jnp.log(n_mc_denominator))
return stable_grad, single_sf_baseline
#
# Estimators for score d/dtheta log p(theta, D | Z)
# (i.e. w.r.t the conditional distribution parameters)
#
def eltwise_grad_theta_likelihood(self, zs, thetas, t, subkeys):
"""
Computes batch of estimators for the score :math:`\\nabla_{\\Theta} \\log p(\\Theta, D | Z)`,
i.e. w.r.t the conditional distribution parameters.
Uses the same :math:`G \\sim p(G | Z)` samples for expectations in numerator and denominator.
This does not use :math:`\\nabla_G \\log p(\\Theta, D | G)` and is hence applicable when
the gradient w.r.t. the adjacency matrix is not defined (as e.g. for the BGe score).
Analogous to ``eltwise_grad_z_likelihood`` but gradient w.r.t :math:`\\Theta` instead of :math:`Z`
Args:
zs (ndarray): batch of latent tensors Z of shape ``[n_particles, d, k, 2]``
thetas (Any): batch of parameter PyTree with ``n_mc_samples`` as leading dim
Returns:
batch of gradients in form of ``thetas`` PyTree with ``n_particles`` as leading dim
"""
return vmap(self.grad_theta_likelihood, (0, 0, None, 0), 0)(zs, thetas, t, subkeys)
def grad_theta_likelihood(self, single_z, single_theta, t, subk):
"""
Computes Monte Carlo estimator for the score :math:`\\nabla_{\\Theta} \\log p(\\Theta, D | Z)`
Uses hard samples of :math:`G`, but a soft reparameterization like for :math:`\\nabla_Z` is also possible.
Uses the same :math:`G \\sim p(G | Z)` samples for expectations in numerator and denominator.
Args:
single_z (ndarray): single latent tensor ``[d, k, 2]``
single_theta (Any): single parameter PyTree
t (int): step
subk (ndarray): rng key
Returns:
parameter gradient PyTree
"""
# [d, d]
p = self.edge_probs(single_z, t)
# [n_grad_mc_samples, d, d]
g_samples = self.sample_g(p, subk, self.n_grad_mc_samples)
# same MC samples for numerator and denominator
n_mc_numerator = self.n_grad_mc_samples
n_mc_denominator = self.n_grad_mc_samples
# [n_mc_numerator, ]
subk, subk_ = random.split(subk)
logprobs_numerator = self.eltwise_log_joint_prob(g_samples, single_theta, subk_)
logprobs_denominator = logprobs_numerator
# PyTree shape of `single_theta` with additional leading dimension [n_mc_numerator, ...]
# d/dtheta log p(theta, D | G) for a batch of G samples
# use the same minibatch of data as for other log prob evaluation (if using minibatching)
grad_theta_log_joint_prob = grad(self.log_joint_prob, 1)
grad_theta = vmap(grad_theta_log_joint_prob, (0, None, None, None), 0)(g_samples, single_theta, self.x, subk_)
# stable computation of exp/log/divide and PyTree compatible
# sums over MC graph samples dimension to get MC gradient estimate of theta
# original PyTree shape of `single_theta`
log_numerator = tree_map(
lambda leaf_theta:
logsumexp(a=expand_by(logprobs_numerator, leaf_theta.ndim - 1), b=leaf_theta, axis=0, return_sign=True)[0],
grad_theta)
# original PyTree shape of `single_theta`
sign = tree_map(
lambda leaf_theta:
logsumexp(a=expand_by(logprobs_numerator, leaf_theta.ndim - 1), b=leaf_theta, axis=0, return_sign=True)[1],
grad_theta)
# []
log_denominator = logsumexp(logprobs_denominator, axis=0)
# original PyTree shape of `single_theta`
stable_grad = tree_multimap(
lambda sign_leaf_theta, log_leaf_theta:
(sign_leaf_theta * jnp.exp(log_leaf_theta - jnp.log(n_mc_numerator) - log_denominator + jnp.log(n_mc_denominator))),
sign, log_numerator)
return stable_grad
"""
Estimators for score d/dZ log p(Z)
"""
def constraint_gumbel(self, single_z, single_eps, t):
"""
Evaluates continuous acyclicity constraint using
Gumbel-softmax instead of Bernoulli samples
Args:
single_z (ndarray): single latent tensor ``[d, k, 2]``
single_eps (ndarray): i.i.d. Logistic noise of shape ``[d, d``] for Gumbel-softmax
t (int): step
Returns:
constraint value of shape ``[1,]``
"""
n_vars = single_z.shape[0]
G = self.particle_to_soft_graph(single_z, single_eps, t)
h = acyclic_constr_nograd(G, n_vars)
return h
def grad_constraint_gumbel(self, single_z, key, t):
"""
Reparameterization estimator for the gradient :math:`\\nabla_Z E_{p(G|Z)} [ h(G) ]`
where :math:`h` is the acyclicity constraint penalty function.
Since :math:`h` is | |
# -*- coding: utf-8 -*-
""" p2p-streams (c) 2014 enen92 fightnight
This file contains the livestream addon engine. It is mostly based on divingmule work on livestreams addon!
Functions:
xml_lists_menu() -> main menu for the xml list category
addlista() -> add a new list. It'll ask for local or remote and processes the given input
remove_list(name) -> Remove a list
get_groups(url) -> First regex function to parse a given list. Sopcast type list
get_channels(name,url) -> Second regex function to parse a given list. Used to general livestreams xml type lists
getData(url,fanart) -> Get the item data such as iconimage, fanart, etc
getChannelItems(name,url,fanart) -> Function to grab the channel items
getItems(items,fanart) -> Function to grab the items from the xml
removeNonAscii(s) -> Function to remove non-ascii characters from the list
getSoup(url) -> uses beautifulsoup to parse a remote xml
addon_log(string) -> Simple log/print function
getRegexParsed(regexs, url) -> parse the regex expression
list_type(url) -> Checks if the list is xml or m3u
parse_m3u(url) -> Parses a m3u type list
"""
import urllib,urllib2,re,xbmcplugin,xbmcgui,xbmc,xbmcaddon,HTMLParser,time,datetime,os,xbmcvfs,sys
from BeautifulSoup import BeautifulStoneSoup, BeautifulSoup, BeautifulSOAP
from peertopeerutils.pluginxbmc import *
from peertopeerutils.webutils import *
from peertopeerutils.directoryhandle import *
from peertopeerutils.iofile import *
"""
Main Menu
"""
def xml_lists_menu():
if settings.getSetting('sopcast-oficial') == "true":
addDir(translate(40116),"http://sopcast.org/chlist.xml",101,addonpath + art + 'xml_list_sopcast.png',2,True)
try:
if os.path.exists(os.path.join(pastaperfil,"Lists")):
dirs, files = xbmcvfs.listdir(os.path.join(pastaperfil,"Lists"))
for file in files:
f = open(os.path.join(pastaperfil,"Lists",file), "r")
string = f.read()
if xbmcvfs.exists(os.path.join(pastaperfil,"Lists-fanart",file.replace('.txt','.jpg'))):addDir("[B][COLOR orange]" + file.replace(".txt","") + "[/B][/COLOR]",string,101,addonpath + art + 'xml_lists.png',2,True,fan_art=os.path.join(pastaperfil,"Lists-fanart",file.replace('.txt','.jpg')))
else: addDir("[B][COLOR orange]" + file.replace(".txt","") + "[/B][/COLOR]",string,101,addonpath + art + 'xml_lists.png',2,True)
except: pass
addDir(translate(40121),MainURL,107,addonpath + art + 'plus-menu.png',2,False)
#xbmc.executebuiltin("Container.SetViewMode(51)")
"""
Add a new list function
"""
def addlista():
opcao= xbmcgui.Dialog().yesno(translate(40000), translate(40123),"","",translate(40124),translate(40125))
if opcao:
dialog = xbmcgui.Dialog()
lista_xml = dialog.browse(int(1), translate(40186), 'myprograms','.xml|.m3u')
keybdois = xbmc.Keyboard("", translate(40130))
keybdois.doModal()
if (keybdois.isConfirmed()):
searchname = keybdois.getText()
if searchname=='': sys.exit(0)
encode=urllib.quote(searchname)
if xbmcvfs.exists(os.path.join(pastaperfil,"Lists")): pass
else: xbmcvfs.mkdir(os.path.join(pastaperfil,"Lists"))
txt_name = searchname + ".txt"
save(os.path.join(pastaperfil,"Lists",txt_name),lista_xml)
mensagemok(translate(40000),translate(40129))
xbmc.executebuiltin("XBMC.Container.Refresh")
else:
keyb = xbmc.Keyboard("", translate(40127))
keyb.doModal()
if (keyb.isConfirmed()):
search = keyb.getText()
if search=='': sys.exit(0)
if "dropbox" in search and not "?dl=1" in search: search = search + '?dl=1'
if "xml" not in search.split(".")[-1] and "m3u" not in search.split(".")[-1]: mensagemok(translate(40000),translate(40128)); sys.exit(0)
else:
try:
code = get_page_source(search)
except:
mensagemok(translate(40000),translate(40128))
sys.exit(0)
keybdois = xbmc.Keyboard("", translate(40130))
keybdois.doModal()
if (keybdois.isConfirmed()):
searchname = keybdois.getText()
if searchname=='': sys.exit(0)
encode=urllib.quote(searchname)
if os.path.exists(os.path.join(pastaperfil,"Lists")): pass
else: xbmcvfs.mkdir(os.path.join(pastaperfil,"Lists"))
txt_name = searchname + ".txt"
save(os.path.join(pastaperfil,"Lists",txt_name),search)
mensagemok(translate(40000),translate(40129))
xbmc.executebuiltin("XBMC.Container.Refresh")
"""
Remove a List
"""
def remove_list(name):
xbmcvfs.delete(name)
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % (translate(40000), translate(40150), 1,addonpath+"/icon.png"))
xbmc.executebuiltin("Container.Refresh")
"""
Parsing functions
"""
def list_type(url):
ltype = url.split('.')[-1]
if 'xml' in ltype: get_groups(url)
elif 'm3u' in ltype: parse_m3u(url)
else: pass
def parse_m3u(url):
if "http" in url: content = get_page_source(url)
else: content = readfile(url)
match = re.compile('#EXTINF:.+?,(.*?)\n(.*?)(?:\r|\n)').findall(content)
for channel_name,stream_url in match:
if 'plugin://' in stream_url:
stream_url = 'XBMC.RunPlugin('+stream_url+')'
addDir(channel_name,stream_url,106,'',1,False)
elif 'sop://' in stream_url:
addDir(channel_name,stream_url,2,'',1,False)
elif ('acestream://' in stream_url) or ('.acelive' in stream_url) or ('.torrent' in stream_url):
addDir(channel_name,stream_url,1,'',1,False)
else: addLink(channel_name,stream_url,'')
def get_groups(url):
from xml.etree import ElementTree
try:
print("Sopcast xml-type list detected")
if "http" in url:
source = get_page_source(url)
save(os.path.join(pastaperfil,"working.xml"),source)
workingxml = os.path.join(pastaperfil,"working.xml")
else:
workingxml = url
groups = ElementTree.parse(workingxml).findall('.//group')
unname_group_index = 1
LANGUAGE = "en"
for group in groups:
if group.attrib[LANGUAGE] == "":
group.attrib[LANGUAGE] = str(unname_group_index)
unname_group_index = unname_group_index + 1
if re.sub('c','e',LANGUAGE) == LANGUAGE:
OTHER_LANG = re.sub('e','c',LANGUAGE)
else:
OTHER_LANG = re.sub('c','e',LANGUAGE)
if LANGUAGE == "cn":
try:
if len(group.attrib[OTHER_LANG]) > 0:
group.attrib[LANGUAGE] = group.attrib[OTHER_LANG]
unname_group_index = unname_group_index - 1
except:
pass
if (group.find('.//channel')==None): continue
group_name=group.attrib[LANGUAGE]
try:
addDir_livestreams_common(group_name,url,102,addonpath + art + 'xml_list_sopcast.png',True)
except: pass
#xbmc.executebuiltin("Container.SetViewMode(51)")
except:
print("Other type of xml list")
getData(url,"")
def get_channels(name,url):
from xml.etree import ElementTree
if url.startswith('http://'):
source = get_page_source(url)
else:
source = readfile(url)
save(os.path.join(pastaperfil,"working.xml"),source)
chlist_tree = ElementTree.parse(os.path.join(pastaperfil,"working.xml"))
LANGUAGE = "en"
groups = ElementTree.parse(os.path.join(pastaperfil,"working.xml")).findall('.//group')
for group in groups:
if group.attrib[LANGUAGE].encode('utf-8') == name:
channels = group.findall('.//channel')
for channel in channels:
try:
try:
title = channel.find('.//name').attrib['en'].encode('utf-8')
except: title = ''
if not title:
try: title = channel.find('.//name').attrib['cn'].encode('utf-8')
except: title = ''
if not title:
try: title = channel.find('.//name').text
except: title = ''
tipo = channel.find('.//stream_type').text
sop_address = channel.find('.//item').text
if not tipo: tipo = "N/A"
if not title: title = "N/A"
thumbnail = ""
try:
thumbnail = channel.find('.//thumbnail').text
except: pass
if sop_address:
if thumbnail == "": thumbnail = addonpath + art + 'sopcast_link.png'
try: addDir_livestreams_common('[B][COLOR orange]' + title + ' [/B][/COLOR](' + tipo +')',sop_address,2,thumbnail,False)
except:pass
else: pass
except: pass
else: pass
def getData(url,fanart):
soup = getSoup(url)
if len(soup('channels')) > 0:
channels = soup('channel')
for channel in channels:
name = channel('name')[0].string
thumbnail = channel('thumbnail')[0].string
if thumbnail == None:
thumbnail = ''
try:
if not channel('fanart'):
if addon.getSetting('use_thumb') == "true":
fanArt = thumbnail
else:
fanArt = fanart
else:
fanArt = channel('fanart')[0].string
if fanArt == None:
raise
except:
fanArt = fanart
try:
desc = channel('info')[0].string
if desc == None:
raise
except:
desc = ''
try:
genre = channel('genre')[0].string
if genre == None:
raise
except:
genre = ''
try:
date = channel('date')[0].string
if date == None:
raise
except:
date = ''
try:
credits = channel('credits')[0].string
if credits == None:
raise
except:
credits = ''
try:
addDir_livestreams(name.encode('utf-8', 'ignore'),url.encode('utf-8'),103,thumbnail,fanArt,desc,genre,date,credits,True)
except:
addon_log('There was a problem adding directory from getData(): '+name.encode('utf-8', 'ignore'))
else:
addon_log('No Channels: getItems')
getItems(soup('item'),fanart)
def getChannelItems(name,url,fanart):
soup = getSoup(url)
channel_list = soup.find('channel', attrs={'name' : name.decode('utf-8')})
items = channel_list('item')
try:
fanArt = channel_list('fanart')[0].string
if fanArt == None:
raise
except:
fanArt = fanart
for channel in channel_list('subchannel'):
name = channel('name')[0].string
try:
thumbnail = channel('thumbnail')[0].string
if thumbnail == None:
raise
except:
thumbnail = ''
try:
if not channel('fanart'):
if addon.getSetting('use_thumb') == "true":
fanArt = thumbnail
else:
fanArt = channel('fanart')[0].string
if fanArt == None:
raise
except:
pass
try:
desc = channel('info')[0].string
if desc == None:
raise
except:
desc = ''
try:
genre = channel('genre')[0].string
if genre == None:
raise
except:
genre = ''
try:
date = channel('date')[0].string
if date == None:
raise
except:
date = ''
try:
credits = channel('credits')[0].string
if credits == None:
raise
except:
credits = ''
try:
addDir_livestreams(name.encode('utf-8', 'ignore'),url.encode('utf-8'),3,thumbnail,fanArt,desc,genre,credits,date)
except:
addon_log('There was a problem adding directory - '+name.encode('utf-8', 'ignore'))
getItems(items,fanArt)
def getItems(items,fanart):
total = len(items)
addon_log('Total Items: %s' %total)
for item in items:
try:
name = item('title')[0].string
if name is None:
name = 'unknown?'
except:
addon_log('Name Error')
name = ''
try:
if item('epg'):
if item.epg_url:
addon_log('Get EPG Regex')
epg_url = item.epg_url.string
epg_regex = item.epg_regex.string
epg_name = get_epg(epg_url, epg_regex)
if epg_name:
name += ' - ' + epg_name
elif item('epg')[0].string > 1:
name += getepg(item('epg')[0].string)
else:
pass
except:
addon_log('EPG Error')
try:
url = []
for i in item('link'):
if not i.string == None:
url.append(i.string)
if len(url) < 1:
raise
except:
addon_log('Error <link> element, Passing:'+name.encode('utf-8', 'ignore'))
continue
try:
thumbnail = item('thumbnail')[0].string
if thumbnail == None:
raise
except:
thumbnail = ''
try:
if not item('fanart'):
if addon.getSetting('use_thumb') == "true":
fanArt = thumbnail
else:
fanArt = fanart
else:
fanArt = item('fanart')[0].string
if fanArt == None:
raise
except:
fanArt = fanart
try:
desc = item('info')[0].string
if desc == None:
raise
except:
desc = ''
try:
genre = item('genre')[0].string
if genre == None:
raise
except:
genre = ''
try:
date = item('date')[0].string
if date == None:
raise
except:
date = ''
regexs = None
if item('regex'):
try:
regexs = {}
for i in item('regex'):
regexs[i('name')[0].string] = {}
regexs[i('name')[0].string]['expre'] = i('expres')[0].string
regexs[i('name')[0].string]['page'] = i('page')[0].string
try:
regexs[i('name')[0].string]['refer'] = i('referer')[0].string
except:
addon_log("Regex: -- No Referer --")
try:
regexs[i('name')[0].string]['agent'] = i('agent')[0].string
except:
addon_log("Regex: -- No User Agent --")
regexs = urllib.quote(repr(regexs))
except:
regexs = None
addon_log('regex Error: '+name.encode('utf-8', 'ignore'))
try:
if "RunPlugin" in url[0]:
try:
addDir_livestreams(name.encode('utf-8', 'ignore'),url[0],106,thumbnail,fanArt,desc,genre,"credits",date)
except:
match = re.compile("&name=(.+?)\)").findall(url[0].replace(";",""))
if match:
try:
addDir_livestreams(name.encode('utf-8', 'ignore'),removeNonAscii(url[0]),106,thumbnail,fanArt,desc,genre,credits,date)
except:
try:
addDir_livestreams(removeNonAscii(name.encode('utf-8', 'ignore')),removeNonAscii(url[0].replace(";","")),106,thumbnail,fanArt,desc,genre,credits,date)
except:
addon_log('There was a problem adding item - '+name.encode('utf-8', 'ignore'))
else:
addon_log('There was a problem adding item - '+name.encode('utf-8', 'ignore'))
else:
if ('acestream://' in url[0]) or ('.acelive' in url[0]) or ('.torrent' in url[0]):
if 'plugin://' not in url[0]:
addDir_livestreams(name.encode('utf-8', 'ignore'),url[0],1,thumbnail,fanArt,desc,genre,"credits",date)
else:
addLink_livestreams(url[0].replace(';',''),name.encode('utf-8', 'ignore'),thumbnail,fanArt,desc,genre,date,True,None,regexs,total)
elif 'sop://' in url[0]:
if 'plugin://' not in url[0]:
addDir_livestreams(name.encode('utf-8', 'ignore'),url[0],2,thumbnail,fanArt,desc,genre,"credits",date)
else:
addLink_livestreams(url[0].replace(';',''),name.encode('utf-8', 'ignore'),thumbnail,fanArt,desc,genre,date,True,None,regexs,total)
else: addLink_livestreams(url[0].replace(';',''),name.encode('utf-8', 'ignore'),thumbnail,fanArt,desc,genre,date,True,None,regexs,total)
except:
addon_log('There was a problem adding item - '+name.encode('utf-8', 'ignore'))
def removeNonAscii(s): return "".join(filter(lambda x: ord(x)<128, s))
def getSoup(url):
if url.startswith('http://'):
data = makeRequest(url)
else:
if xbmcvfs.exists(url):
if url.startswith("smb://") or url.startswith("nfs://"):
copy = xbmcvfs.copy(url, os.path.join(profile, 'temp', 'sorce_temp.txt'))
if copy:
data = open(os.path.join(profile, 'temp', 'sorce_temp.txt'), "r").read()
xbmcvfs.delete(os.path.join(profile, | |
this is
# what top_k does.
# ---------------------------------------------------------------------
input_vector = tf.reshape(input_tensor, [batch_h_w_size, input_area])
_, top_indices = tf.nn.top_k(input_vector, k=k) # top_k per input_area
# Now the problem is that sparse_to_dense requires a 1d vector, or a
# vector of n-d indices. So we will need to give it a 1d vector, but
# offset the indices.
# Since we have k "winners" per batch item. All k need to be offset by
# b * cells.
# ---------------------------------------------------------------------
indices_offsets = np.empty([batch_h_w_size * k], dtype=int)
for b in range(batch_size): # foreach( batch of k winners )
for y in range(h):
for x in range(w):
for n in range(k): # foreach( winner in the batch )
#index = b * k + n # each b is a vector of k indices
#offset = b * input_area # when we offset
index = b * k * w * h \
+ y * k * w \
+ x * k \
+ n
offset = b * input_area * w * h \
+ y * input_area * w \
+ x * input_area
indices_offsets[index] = offset
indices_offsets = tf.convert_to_tensor(indices_offsets, dtype=tf.int32) # convert the vector to a TF vector
# Now that we have the batch x indices vector, make it a 1d vector.
# Then add the offsets
# ---------------------------------------------------------------------
indices_vector = tf.reshape(top_indices, [batch_h_w_size * k])
indices_vector = tf.add(indices_vector, indices_offsets)
# Finally, construct the mask. We need a default vector the the output
# which is 1s for each filled element.
# ---------------------------------------------------------------------
values_vector = tf.ones(batch_h_w_size * k, dtype=tf.float32)
mask_vector_dense = tf.sparse_to_dense(indices_vector, [batch_h_w_size * input_area], values_vector, default_value=0,
validate_indices=False)
batch_mask_vector_op = tf.reshape(mask_vector_dense, [batch_size, h, w, input_area]) #, name="rank-mask")
return batch_mask_vector_op
def tf_build_varying_top_k_mask_4d_op(input_tensor, k_max, batch_size, h, w, input_area, ranking_mask):
"""Build varying top K mask."""
batch_h_w_size = batch_size * h * w
logging.debug('encoding shape = (%s, %s, %s, %s)',
batch_size, h, w, input_area)
# Find the "winners". The top k elements in each batch sample. this is
# what top_k does.
# ---------------------------------------------------------------------
input_vector = tf.reshape(input_tensor, [batch_h_w_size, input_area])
_, top_indices = tf.nn.top_k(input_vector, k=k_max) # top_k per input_area
# Now the problem is that sparse_to_dense requires a 1d vector, or a
# vector of n-d indices. So we will need to give it a 1d vector, but
# offset the indices.
# Since we have k "winners" per batch item. All k need to be offset by
# b * cells.
# ---------------------------------------------------------------------
indices_offsets = np.empty([batch_h_w_size * k_max], dtype=int)
for b in range(batch_size): # foreach( batch of k winners )
for y in range(h):
for x in range(w):
for n in range(k_max): # foreach( winner in the batch )
#index = b * k + n # each b is a vector of k indices
#offset = b * input_area # when we offset
index = b * k_max * w * h \
+ y * k_max * w \
+ x * k_max \
+ n
offset = b * input_area * w * h \
+ y * input_area * w \
+ x * input_area
indices_offsets[index] = offset
indices_offsets = tf.convert_to_tensor(indices_offsets, dtype=tf.int32) # convert the vector to a TF vector
# Now that we have the batch x indices vector, make it a 1d vector.
# Then add the offsets
# ---------------------------------------------------------------------
indices_vector = tf.reshape(top_indices, [batch_h_w_size * k_max])
indices_vector = tf.add(indices_vector, indices_offsets)
# Finally, construct the mask. We need a default vector the the output
# which is 1s for each filled element.
# ---------------------------------------------------------------------
#values_vector = tf.ones(batch_h_w_size * k_max, dtype=tf.float32)
ranking_mask_tiled = tf.tile(ranking_mask, [batch_h_w_size, 1])
values_vector = tf.reshape(ranking_mask_tiled, [batch_h_w_size * k_max])
# The values vector is fiddled to adapt to varying k
#values_vector = self._dual.get_pl('top_k_mask') # shape: [1, k_max] where 1 or 0
mask_vector_dense = tf.sparse_to_dense(indices_vector, [batch_h_w_size * input_area], values_vector, default_value=0,
validate_indices=False)
batch_mask_vector_op = tf.reshape(mask_vector_dense, [batch_size, h, w, input_area]) #, name="rank-mask")
return batch_mask_vector_op
def tf_reduce_var(x, axis=None, keepdims=False):
"""Variance of a tensor, alongside the specified axis.
Stolen from: https://stackoverflow.com/a/43409235
# Arguments
x: A tensor or variable.
axis: An integer, the axis to compute the variance.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
# Returns
A tensor with the variance of elements of `x`.
"""
m = tf.reduce_mean(x, axis=axis, keepdims=True)
devs_squared = tf.square(x - m)
return tf.reduce_mean(devs_squared, axis=axis, keepdims=keepdims)
def tf_build_stats_summaries(tensor, name_scope):
"""Build statistical summaries for a specific variable/tensor."""
with tf.name_scope(name_scope):
m_mean = tf.reduce_mean(tensor)
m_var = tf_reduce_var(tensor)
m_min = tf.reduce_min(tensor)
m_max = tf.reduce_max(tensor)
m_sum = tf.reduce_sum(tensor)
mean_op = tf.summary.scalar('mean', m_mean)
sd_op = tf.summary.scalar('sd', tf.sqrt(m_var))
min_op = tf.summary.scalar('min', m_min)
max_op = tf.summary.scalar('max', m_max)
sum_op = tf.summary.scalar('sum', m_sum)
stats_summaries = []
stats_summaries.append(mean_op)
stats_summaries.append(sd_op)
stats_summaries.append(min_op)
stats_summaries.append(max_op)
stats_summaries.append(sum_op)
return stats_summaries
def tf_build_stats_summaries_short(tensor, name_scope):
"""
Build a shorter version of statistical summaries for a specific variable/tensor.
Mean, StdDev, Min and Max
"""
with tf.name_scope(name_scope):
m_mean = tf.reduce_mean(tensor)
m_var = tf_reduce_var(tensor)
m_min = tf.reduce_min(tensor)
m_max = tf.reduce_max(tensor)
mean_op = tf.summary.scalar('mean', m_mean)
sd_op = tf.summary.scalar('sd', tf.sqrt(m_var))
min_op = tf.summary.scalar('min', m_min)
max_op = tf.summary.scalar('max', m_max)
stats_summaries = []
stats_summaries.append(mean_op)
stats_summaries.append(sd_op)
stats_summaries.append(min_op)
stats_summaries.append(max_op)
return stats_summaries
def tf_summary_scalar(name="name", tensor=None, mute=True):
"""Convenience method for creating scalar summary with mute option."""
if not mute:
tf.summary.scalar(name=name, tensor=tensor)
else:
pass
def tf_print(var, message="", summarize=10, mute=True):
"""Convenience function for printing tensors in graph at runtime, also better formatting, can be muted."""
if not mute:
message = "\n" + message + "\n\t"
return tf.Print(var, [var], message=message, summarize=summarize)
return var
def degrade_by_mask_per_bit(input_tensor, degrade_mask=None, degrade_factor=0.5, degrade_value=0.0, label=None):
"""
Randomly degrade 'degrade_factor' proportion of bits of the `degrade_mask`, resetting them to 'reset_value'.
*** Currently - It does the exact same degradation pattern for every input in the batch.***
First dimension of input must be batch.
"""
input_shape = input_tensor.shape.as_list()
input_size = np.prod(input_shape[1:])
input_tensor = tf.reshape(input_tensor, [-1, input_size])
# generate random values between 0 and 1, for all positions in the mask, then use it to select approp. proportion
random_values = tf.random_uniform(shape=[input_size])
if degrade_mask is not None:
random_values = random_values * degrade_mask
preserve_mask = tf.greater(random_values, degrade_factor)
preserve_mask = tf.to_float(preserve_mask)
degrade_vec = degrade_value * preserve_mask
degrade_vec = degrade_value - degrade_vec # preserved bits = 0, else degraded_value (flipped)
degraded = tf.multiply(input_tensor, preserve_mask) # use broadcast to element-wise multiply batch with 'preserved'
degraded = degraded + degrade_vec # set non-preserved values to the 'degrade_value'
degraded = tf.reshape(degraded, input_shape)
if label is None:
return degraded
return degraded, label
def degrade_by_mask(input_tensor, num_active, degrade_mask=None, degrade_factor=0.5, degrade_value=0.0, label=None):
"""
WARNING - this version works with mask, but only works if the min value is 0 (such as -1)
No point updating it now.
Randomly degrade degrade_factor bits, resetting them to 'reset_value'.
*** Currently - It does the exact same degradation pattern for every input in the batch.***
First dimension of input must be batch.
"""
dbug = False
if dbug:
num_active = int(num_active)
print("num_active = " + str(num_active))
input_shape = input_tensor.shape.as_list()
input_size = np.prod(input_shape[1:])
input_tensor = tf.reshape(input_tensor, [-1, input_size])
input_tensor = tf_print(input_tensor, "input_tensor", mute=not dbug)
# make a compact version of the active bits, and randomly knockout half the bits
preserved_compact = np.ones(num_active)
preserved_compact[:int(degrade_factor * num_active)] = 0
preserved_compact = tf.convert_to_tensor(preserved_compact, dtype=tf.float32)
preserved_compact = tf.random_shuffle(preserved_compact)
preserved_compact = tf_print(preserved_compact, "preserved_compact", mute=not dbug)
# map back to the actual positions, use to clear all not preserved (i.e. to degrade)
_, indices_of_active = tf.nn.top_k(input=degrade_mask, k=num_active, sorted=False)
indices_of_active = tf_print(indices_of_active, "indices_of_active", mute=not dbug)
preserve_mask = tf.sparse_to_dense(sparse_indices=indices_of_active,
output_shape=[input_size],
sparse_values=preserved_compact,
default_value=0,
validate_indices=False)
preserve_mask = tf_print(preserve_mask, "preserve_mask", mute=not dbug)
degrade_value_vec = np.ones(input_size)
degrade_value_vec[:] = degrade_value
degrade_value_vec = tf.convert_to_tensor(degrade_value_vec, dtype=tf.float32)
degrade_value_vec = degrade_value_vec * preserve_mask # preserved bits = degrade_value
degrade_value_vec = degrade_value - degrade_value_vec # preserved bits = 0, else degraded_value (flipped)
degrade_value_vec = tf_print(degrade_value_vec, "degrade_value_vec", mute=not dbug)
degraded = tf.multiply(input_tensor, preserve_mask) # use broadcast to element-wise multiply batch with 'preserved'
degraded = degraded + degrade_value_vec # set non-preserved values to the 'degrade_value'
degraded | |
['CD', 'CD2'], ['OE1', 'HD11'], ['OE2', 'HD12'], ['HE2', 'HD13']],
['HD21', 'HD22', 'HD23'],
{'LEU-GLH': ['disappear', 1, 0], 'GLH-LEU': ['appear', 0, 1]},
],
'GLH-LYN': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG', 'HG2', 'HG3', 'CD'],
[['HE2', 'HE2'], ['OE1', 'HD2'], ['OE2', 'HD3']],
['CE', 'HE3', 'NZ', 'HZ1', 'HZ2'],
{'GLH-LYN': ['appear', 0, 1], 'LYN-GLH': ['disappear', 1, 0]},
],
'GLH-LYS': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG', 'HG2', 'HG3', 'CD'],
[['HE2', 'HE2'], ['OE1', 'HD2'], ['OE2', 'HD3']],
['CE', 'HE3', 'NZ', 'HZ1', 'HZ2', 'HZ3'],
{'LYS-GLH': ['disappear', 1, 0], 'GLH-LYS': ['appear', 0, 1]},
],
'GLH-MET': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HG2', 'HG2'], ['HG3', 'HG3'], ['HE2', 'HE2'], ['CD', 'SD'], ['OE1', 'CE'], ['OE2', 'HE1']],
['HE3'],
{'GLH-MET': ['appear', 0, 1], 'MET-GLH': ['disappear', 1, 0]},
],
'GLH-PHE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HE2', 'HE2'], ['HG2', 'CD1'], ['HG3', 'CD2'], ['CD', 'HD1'], ['OE1', 'CE1'], ['OE2', 'HD2']],
['CE2', 'HE1', 'CZ', 'HZ'],
{'PHE-GLH': ['disappear', 1, 0], 'GLH-PHE': ['appear', 0, 1]},
],
'GLH-PRO': [
['N', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB2'], ['HB3', 'HB3'], ['CG', 'CG'], ['HG2', 'HG2'], ['HG3', 'HG3'], ['CD', 'CD'], ['OE1', 'HD2'],
['OE2', 'HD3']],
['H', 'HE2'],
{'GLH-PRO': ['disappear', 0, 1], 'PRO-GLH': ['appear', 1, 0]},
],
'GLH-SER': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['CG', 'OG'], ['HG2', 'HG']],
['HG3', 'CD', 'OE1', 'OE2', 'HE2'],
{'SER-GLH': ['appear', 1, 0], 'GLH-SER': ['disappear', 0, 1]},
],
'GLH-THR': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'OG1'], ['CG', 'CG2'], ['HG2', 'HG1'], ['HG3', 'HG21'], ['CD', 'HG22'],
['OE1', 'HG23']],
['OE2', 'HE2'],
{'THR-GLH': ['appear', 1, 0], 'GLH-THR': ['disappear', 0, 1]},
],
'GLH-TRP': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HG2', 'CD1'], ['HG3', 'CD2'], ['CD', 'HD1'], ['OE1', 'NE1'], ['OE2', 'CE3'], ['HE2', 'CE2']],
['HE1', 'HE3', 'CZ3', 'CZ2', 'HZ3', 'HZ2', 'CH2', 'HH2'],
{'GLH-TRP': ['appear', 0, 1], 'TRP-GLH': ['disappear', 1, 0]},
],
'GLH-TYR': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HE2', 'HE2'], ['HG2', 'CD1'], ['HG3', 'CD2'], ['CD', 'HD1'], ['OE1', 'CE1'], ['OE2', 'HD2']],
['CE2', 'HE1', 'CZ', 'OH', 'HH'],
{'TYR-GLH': ['disappear', 1, 0], 'GLH-TYR': ['appear', 0, 1]},
],
'GLH-VAL': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'CG1'], ['CG', 'CG2'], ['HG2', 'HG11'], ['HG3', 'HG12'], ['CD', 'HG13'],
['OE1', 'HG21'], ['OE2', 'HG22'], ['HE2', 'HG23']],
[],
{'VAL-GLH': ['', 1, 0], 'GLH-VAL': ['', 0, 1]},
],
'GLN-GLU': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG', 'HG2', 'HG3', 'CD'],
[['OE1', 'OE1'], ['NE2', 'OE2']],
['HE21', 'HE22'],
{'GLU-GLN': ['appear', 1, 0], 'GLN-GLU': ['disappear', 0, 1]},
],
'GLN-GLY': [
['N', 'H', 'CA', 'C', 'O'],
[['HA', 'HA2'], ['CB', 'HA3']],
['HB2', 'HB3', 'CG', 'HG2', 'HG3', 'CD', 'OE1', 'NE2', 'HE21', 'HE22'],
{'GLY-GLN': ['appear', 1, 0], 'GLN-GLY': ['disappear', 0, 1]},
],
'GLN-HID': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['NE2', 'NE2'], ['HG2', 'ND1'], ['HG3', 'CD2'], ['CD', 'HD1'], ['OE1', 'CE1'], ['HE21', 'HD2'],
['HE22', 'HE1']],
[],
{'GLN-HID': ['', 0, 1], 'HID-GLN': ['', 1, 0]},
],
'GLN-HIE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['NE2', 'NE2'], ['HG2', 'ND1'], ['HG3', 'CD2'], ['CD', 'CE1'], ['OE1', 'HD2'], ['HE21', 'HE1'],
['HE22', 'HE2']],
[],
{'GLN-HIE': ['', 0, 1], 'HIE-GLN': ['', 1, 0]},
],
'GLN-HIP': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['NE2', 'NE2'], ['HG2', 'ND1'], ['HG3', 'CD2'], ['CD', 'HD1'], ['OE1', 'CE1'], ['HE21', 'HD2'],
['HE22', 'HE1']],
['HE2'],
{'HIP-GLN': ['disappear', 1, 0], 'GLN-HIP': ['appear', 0, 1]},
],
'GLN-ILE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'CG1'], ['CG', 'CG2'], ['HG2', 'HG12'], ['HG3', 'HG13'], ['CD', 'CD1'], ['OE1', 'HG21'],
['NE2', 'HG22'], ['HE21', 'HG23'], ['HE22', 'HD11']],
['HD12', 'HD13'],
{'GLN-ILE': ['appear', 0, 1], 'ILE-GLN': ['disappear', 1, 0]},
],
'GLN-LEU': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HG2', 'HG'], ['HG3', 'CD1'], ['CD', 'CD2'], ['OE1', 'HD11'], ['NE2', 'HD12'], ['HE21', 'HD13'],
['HE22', 'HD21']],
['HD22', 'HD23'],
{'GLN-LEU': ['appear', 0, 1], 'LEU-GLN': ['disappear', 1, 0]},
],
'GLN-LYN': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG', 'HG2', 'HG3', 'CD'],
[['OE1', 'HD2'], ['NE2', 'HD3'], ['HE21', 'CE'], ['HE22', 'HE2']],
['HE3', 'NZ', 'HZ1', 'HZ2'],
{'LYN-GLN': ['disappear', 1, 0], 'GLN-LYN': ['appear', 0, 1]},
],
'GLN-LYS': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG', 'HG2', 'HG3', 'CD'],
[['OE1', 'HD2'], ['NE2', 'HD3'], ['HE21', 'CE'], ['HE22', 'HE2']],
['HE3', 'NZ', 'HZ1', 'HZ2', 'HZ3'],
{'GLN-LYS': ['appear', 0, 1], 'LYS-GLN': ['disappear', 1, 0]},
],
'GLN-MET': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HG2', 'HG2'], ['HG3', 'HG3'], ['CD', 'SD'], ['OE1', 'CE'], ['NE2', 'HE1'], ['HE21', 'HE2'], ['HE22', 'HE3']],
[],
{'MET-GLN': ['', 1, 0], 'GLN-MET': ['', 0, 1]},
],
'GLN-PHE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HG2', 'CD1'], ['HG3', 'CD2'], ['CD', 'HD1'], ['OE1', 'CE1'], ['NE2', 'HD2'], ['HE21', 'CE2'],
['HE22', 'HE1']],
['CZ', 'HE2', 'HZ'],
{'PHE-GLN': ['disappear', 1, 0], 'GLN-PHE': ['appear', 0, 1]},
],
'GLN-PRO': [
['N', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB2'], ['HB3', 'HB3'], ['CG', 'CG'], ['HG2', 'HG2'], ['HG3', 'HG3'], ['CD', 'CD'], ['OE1', 'HD2'],
['NE2', 'HD3']],
['H', 'HE21', 'HE22'],
{'GLN-PRO': ['disappear', 0, 1], 'PRO-GLN': ['appear', 1, 0]},
],
'GLN-SER': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['CG', 'OG'], ['HG2', 'HG']],
['HG3', 'CD', 'OE1', 'NE2', 'HE21', 'HE22'],
{'GLN-SER': ['disappear', 0, 1], 'SER-GLN': ['appear', 1, 0]},
],
'GLN-THR': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'OG1'], ['CG', 'CG2'], ['HG2', 'HG1'], ['HG3', 'HG21'], ['CD', 'HG22'],
['OE1', 'HG23']],
['NE2', 'HE21', 'HE22'],
{'THR-GLN': ['appear', 1, 0], 'GLN-THR': ['disappear', 0, 1]},
],
'GLN-TRP': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HG2', 'CD1'], ['HG3', 'CD2'], ['CD', 'HD1'], ['OE1', 'NE1'], ['NE2', 'CE3'], ['HE21', 'CE2'],
['HE22', 'HE1']],
['HE3', 'CZ3', 'CZ2', 'HZ3', 'HZ2', 'CH2', 'HH2'],
{'TRP-GLN': ['disappear', 1, 0], 'GLN-TRP': ['appear', 0, 1]},
],
'GLN-TYR': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HG2', 'CD1'], ['HG3', 'CD2'], ['CD', 'HD1'], ['OE1', 'CE1'], ['NE2', 'HD2'], ['HE21', 'CE2'],
['HE22', 'HE1']],
['CZ', 'HE2', 'OH', 'HH'],
{'GLN-TYR': ['appear', 0, 1], 'TYR-GLN': ['disappear', 1, 0]},
],
'GLN-VAL': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'CG1'], ['CG', 'CG2'], ['HG2', 'HG11'], ['HG3', 'HG12'], ['CD', 'HG13'],
['OE1', 'HG21'], ['NE2', 'HG22'], ['HE21', 'HG23']],
['HE22'],
{'GLN-VAL': ['disappear', 0, 1], 'VAL-GLN': ['appear', 1, 0]},
],
'GLU-GLY': [
['N', 'H', 'CA', 'O', 'C'],
[['HA', 'HA2'], ['CB', 'HA3']],
('HB2', 'HB3', 'CG', 'HG2', 'HG3', 'CD', 'OE1', 'OE2'),
{'GLU-GLY': ['disappear', 0, 1], 'GLY-GLU': ['appear', 1, 0]},
],
'GLU-HID': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HG2', 'ND1'], ['HG3', 'CD2'], ['CD', 'HD1'], ['OE1', 'CE1'], ['OE2', 'HD2']],
['NE2', 'HE1'],
{'GLU-HID': ['appear', 0, 1], 'HID-GLU': ['disappear', 1, 0]},
],
'GLU-HIE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HG2', 'ND1'], ['HG3', 'CD2'], ['CD', 'CE1'], ['OE1', 'HD2'], ['OE2', 'NE2']],
['HE1', 'HE2'],
{'GLU-HIE': ['appear', 0, 1], 'HIE-GLU': ['disappear', 1, 0]},
],
'GLU-HIP': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HG2', 'ND1'], ['HG3', 'CD2'], ['CD', 'HD1'], ['OE1', 'CE1'], ['OE2', 'HD2']],
['NE2', 'HE1', 'HE2'],
{'GLU-HIP': ['appear', 0, 1], 'HIP-GLU': ['disappear', 1, 0]},
],
'GLU-ILE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'CG1'], ['CG', 'CG2'], ['HG2', 'HG12'], ['HG3', 'HG13'], ['CD', 'CD1'], ['OE1', 'HG21'],
['OE2', 'HG22']],
['HG23', 'HD11', 'HD12', 'HD13'],
{'GLU-ILE': ['appear', 0, 1], 'ILE-GLU': ['disappear', 1, 0]},
],
'GLU-LEU': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HG2', 'HG'], ['HG3', 'CD1'], ['CD', 'CD2'], ['OE1', 'HD11'], ['OE2', 'HD12']],
['HD13', 'HD21', 'HD22', 'HD23'],
{'GLU-LEU': ['appear', 0, 1], 'LEU-GLU': ['disappear', 1, 0]},
],
'GLU-LYN': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG', 'HG2', 'HG3', 'CD'],
[['OE1', 'HD2'], ['OE2', 'HD3']],
['CE', 'HE2', 'HE3', 'NZ', 'HZ1', 'HZ2'],
{'LYN-GLU': ['disappear', 1, 0], 'GLU-LYN': ['appear', 0, 1]},
],
'GLU-LYS': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG', 'HG2', 'HG3', 'CD'],
[['OE1', 'HD2'], ['OE2', 'HD3']],
['CE', 'HE2', 'HE3', 'NZ', 'HZ1', 'HZ2', 'HZ3'],
{'GLU-LYS': ['appear', 0, 1], 'LYS-GLU': ['disappear', 1, 0]},
],
'GLU-MET': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HG2', 'HG2'], ['HG3', 'HG3'], ['CD', 'SD'], ['OE1', | |
#!/usr/bin/env python
# title :devops_manager.py
# description :Creating a DevOps like on Solaris
# author :<NAME>
# release date :20181018
# update date :20191127
# version :0.9.0
# usage :python devops_manager.py
# notes :
# python_version :2.7.14
# ==============================================================================
# RAD modules
import rad.bindings.com.oracle.solaris.rad.zonemgr as zonemgr
import rad.bindings.com.oracle.solaris.rad.smf_1 as smf
import rad.bindings.com.oracle.solaris.rad.kstat_1 as kbind
import rad.client as radc
import rad.connect as radcon
import rad.auth as rada
# import rad.bindings.com.oracle.solaris.rad.zonesbridge as zbind
# General modules
import os
import re
import sys
import pwd
import time
import datetime
import json
import ldap
import socket
import getpass
import logging
import configparser
import argparse
import pickledb
from subprocess import PIPE, Popen
from multiprocessing import Process
import requests
requests.packages.urllib3.disable_warnings()
# Argument Parser Options
parser = argparse.ArgumentParser(
description='Create VM(zone) with associated /apps1 clone'
)
parser.add_argument('-e', '--env', nargs='?', default='dev', type=str,
choices=['test', 'dev', 'stage'],
help='select environment dev, test, stage(default is dev)')
parser.add_argument('-u', '--user', default=False, type=str,
required=True,
help='create zone with give login credentials.')
parser.add_argument('-p', '--password', nargs='?', default=None, type=str,
help='password for give login credentials.')
parser.add_argument('-t', '--appType', nargs='?', default=False, type=str,
const='db',
choices=['app', 'db'],
help='select zone/VM type. app or db(default is app)')
parser.add_argument('-v', '--dbVersion', nargs='?', default=False, type=int,
help='create / rotate zone using given db version(default is db_version in versions.ini, managed by -n flag).')
parser.add_argument('-vl', '--dbLastVersion', nargs='?', default=False, type=int,
help='create / rotate zone using given db version(default is latest_db_version in versions.ini, managed by -nl flag).')
group1 = parser.add_mutually_exclusive_group()
group1.add_argument('-s', '--imgStat', action='store_true', default=False,
help='returns VM(zone) live information, e.g. Global Zone, IP, Port, File System, details.')
group1.add_argument('-d', '--delete', action='store_true', default=False,
help='delete VM(zone) with associated snap(s)')
group1.add_argument('-r', '--rotateImg', default=False, type=str,
choices=['app', 'db'],
help='rotate / sync update /apps1. for informix DB: refresh to latest DB copy(/ifxsrv).')
group1.add_argument('-fr', '--fullRotate', default=False, type=str,
const='fullRotate', nargs='?',
help='rotate update /apps1, informix DB, refresh all to the default copy (unless otherwise noted with -v).')
group2 = parser.add_mutually_exclusive_group()
group2.add_argument('-U', '--userID', default=None, type=str,
help='returns zones created by given user ID.')
group2.add_argument('-a', '--all', nargs='?', default=None, type=str,
const='allUsers',
help='returns zones created by all users.')
group = parser.add_mutually_exclusive_group()
group.add_argument('-i', '--jiraid', nargs='?', metavar='', required=False, type=str,
help='associated Jira ID')
group.add_argument('-l', '--listZones', nargs='?', const='listZones',
choices=['sum', 'det', 'listZones'],
default=None, required=False, type=str,
help='list all active zones, options are summary or details(sum, det)')
group.add_argument('-n', '--setDBVers', nargs='?', metavar='', required=False, type=int,
const='0', default=None,
help='Updated App or DB version default version')
args = parser.parse_args()
os.chdir("/export/home/confmgr")
# Get date and time
dt = datetime.datetime.now()
# Set working environment(defaults to dev).
work_env = args.env
def set_logging(logName):
"""Configure / set all logging related settings"""
global logger, handler, formatter, log_output
log_dir = '/sc_profile_src/logs/'
logger = logging.getLogger(logName)
logger.setLevel(logging.DEBUG)
# create a file handler
log_output = "zone_vm.log"
handler = logging.FileHandler(log_output)
handler.setLevel(logging.DEBUG)
# create formatter
extra = {'user_name': args.user}
formatter = logging.Formatter(
'%(asctime)s:%(name)s:%(user_name)s:%(levelname)s: %(message)s'
)
handler.setFormatter(formatter)
# add handler to logger
logger.addHandler(handler)
logger = logging.LoggerAdapter(logger, extra)
def get_config(section, item=None, zone_name=None, item_type=None, dc=None):
config = configparser.ConfigParser()
config.sections()
config.read('devops_config.ini')
config_details = dict(config.items(section))
val_list = []
val_dict = {}
rec = 0
if section == "ZFS_DST_SNAP":
return str(config_details[item]) + zone_name
elif item == "DICT_LIST":
for key, val in config.items(section):
if key.split(".")[2] == "1":
rec = str(key.split(".")[1])
file_name = str(val)
elif key.split(".")[2] == "2":
dst_file = str(val)
else:
if item_type == "link":
dst_data = str(val)
else:
dst_data = str(get_file_data(dst_file), dc)
d = {"file": file_name, "src_file": str(dst_file), "dst_val": dst_data}
val_dict[rec] = d
return val_dict
elif section == "HOST_LIST":
for key, val in config.items('HOST_LIST'):
if key.split(".")[0] == "ha":
rec += 1
hahost = str(key.split(".")[0])
haval = str(val)
d = {'id': rec, hahost: haval}
else:
drhost = str(key.split(".")[0])
drval = str(val)
d[drhost] = str(drval)
val_list.append(d)
return val_list
elif item == "ITEM_LIST":
for key, val in config.items(section):
if zone_name:
val_list.append(str(val) + str(zone_name))
else:
val_list.append(str(val))
return val_list
else:
return str(config_details[item])
def write_file_data(src, data):
try:
f = open(src, "w")
except IOError, e:
print e
f.write(data)
f.close()
def get_file_data(src, dc=None):
try:
f = open(src, "r")
except IOError, e:
print dc.upper(), e
logger.error("%s", e)
logger.error("%s", sys.exc_type)
sys.exit(1)
data = f.read()
f.close()
return data
# Lowest / first app / db versions.
db_min_version = int(get_config('CONFIG', 'db_min_version'))
app_min_version = int(get_config('CONFIG', 'app_min_version'))
def get_app_versions(app_type=None, default_version=None, latest_version=None):
"""Get versions db/app default or latest
accepts: app type (app, db), default_version, latest_version.
returns: version number
"""
db = pickledb.load('versions.db', False)
if default_version:
try:
version = db.dget('versions', app_type + '_version')['version']
except KeyError as error:
print "No version found for %s type" % (app_type.upper())
sys.exit(0)
print version
sys.exit(0)
if latest_version:
try:
version = db.dget('versions', 'latest_' + app_type + '_version')['version']
except KeyError as error:
print "No version found for %s type" % (app_type.upper())
sys.exit(0)
print version
sys.exit(0)
def app_versions(app_type=None, new_version=None, start_version=None, latest_version=None):
"""Updates DB version number
accepts: app type (app, db), min_version, start version.
returns: single port number.
"""
db = pickledb.load('versions.db', False)
lock_file = '/var/tmp/versions_pickle_db.lock'
lock_timeout = 1
if latest_version:
app_type = 'latest_' + app_type
if os.path.exists(lock_file):
print("Lock file found will retry in 1 Second(for a max of 10 tries).")
logger.error("Lock file found will retry in 1 Second(for a max of 10 tries).")
while lock_timeout < 10:
if os.path.exists(lock_file):
time.sleep(1)
lock_timeout += 1
else:
break
write_file_data(lock_file, "pickle_db lock file")
try:
db.dgetall('versions')
except KeyError as error:
db.dcreate('versions')
db.dump()
if db.dexists('versions', app_type + '_version'):
try:
db.dget('versions', app_type + '_version')
except KeyError as error:
db.dadd('versions', (app_type + '_version', {'version': ''}))
else:
db.dadd('versions', (app_type + '_version', {'version': ''}))
db.dump()
version = db.dget('versions', app_type + '_version')['version']
if version:
next_version = version
else:
next_version = start_version
db.dadd('versions', (app_type + '_version', {'version': next_version}))
db.dump()
if new_version is None:
if os.path.exists(lock_file):
os.remove(lock_file)
return next_version
else:
if version == new_version:
print "ERROR: New %s version is the same as current.. exiting." % (app_type.upper())
if os.path.exists(lock_file):
os.remove(lock_file)
else:
if latest_version:
if new_version > next_version:
next_version = new_version
print "Successfully updated %s version from %s: %s to: %s." % (app_type.upper(), app_type + '_version', version, next_version)
else:
print "ERROR: Not updating %s since new version: %s is less then the latest version: %s" % (app_type.upper(), new_version, next_version)
next_version = next_version
else:
next_version = new_version
db.dadd('versions', (app_type + '_version', {'version': next_version}))
if os.path.exists(lock_file):
os.remove(lock_file)
db.dump()
if latest_version is None:
if version != new_version:
print "Successfully updated %s version from %s: %s to: %s." % (app_type.upper(), app_type + '_version', version, next_version)
if latest_version:
sys.exit(0)
if args.dbVersion:
db_version = args.dbVersion
app_version = args.dbVersion
else:
try:
# db_version = int(get_file_data('db_version.ini'))
db_version = app_versions('db', None, db_min_version)
app_version = app_versions('app', None, app_min_version)
except Exception as error:
print("ERROR: No DB config file was found, generating one with db version of 5. \nPlease make sure version is set correctly.")
# write_file_data('db_version.ini', str('5'))
# db_version = int(get_file_data('db_version.ini'))
db_version = app_versions('db', None, db_min_version)
app_version = app_versions('app', None, app_min_version)
if (args.listZones is not None or args.setDBVers is not None):
if args.delete or args.imgStat or args.rotateImg or args.fullRotate:
d = {'app_name': sys.argv[0]}
print """usage: devops_manager.py [-h] [-e [{{test,dev,stage}}]] -u USER [-p [PASSWORD]]
[-t [{{app,db}}]] [-s | -d | -r {{app,db}}]
[-U USERID | -a [ALL]]
(-i | -l [{{sum,det,listZones}}])
{app_name}: error: argument -i/--jiraid is required""".format(**d)
sys.exit(0)
else:
# Set filesystem, zone-name
dst_zone = "z-" + dt.strftime("%s") + "-" + "status"
pass
else:
if args.jiraid is None:
while True:
try:
jira_id = raw_input("Please enter a Jira ID: ")
except Exception as error:
print('ERROR', error)
if jira_id:
break
else:
jira_id = args.jiraid
if (args.appType == "db"):
# Set db zone name
# dst_zone = "z-db-" + "v" + str(db_version + 1) + "-" + dt.strftime("%s") + "-" + args.jiraid
dst_zone = "z-db-" + "v" + str(int(db_version) + 1) + "-" + dt.strftime("%s") + "-" + jira_id
elif (args.appType == "app"):
dst_zone = "z-app-" + "v" + str(int(app_version) + 1) + "-" + dt.strftime("%s") + "-" + jira_id
else:
# Set app zone name
# dst_zone = "z-" + dt.strftime("%s") + "-" + args.jiraid
dst_zone = "z-" + dt.strftime("%s") + "-" + jira_id
# ====================== ZFS related ======================
# Set system proxy
if get_config('PROXY', 'http_proxy') != "None":
os.environ['http_proxy'] = get_config('PROXY', 'http_proxy')
if get_config('PROXY', 'https_proxy') != "None":
os.environ['https_proxy'] = get_config('PROXY', 'https_proxy')
# ZFSSA API URL
url = get_config('ZFSSA', 'url')
# ZFSSA API login
zfsuser = get_config('ZFSSA', 'zfsuser')
zfspass = get_config('ZFSSA', 'zfspass')
zfsauth = (zfsuser, zfspass)
# ZFS pool
zfspool = get_config('ZFSSA', 'zfspool')
# ZFS project
zfsproject = get_config('ZFSSA', 'zfsproject')
zfsprojecttmp = zfsproject + '_' + dt.strftime("%s")
# ZFS source filesystem
# zfssrcfslist = get_config('ZFS_SRC_FS', 'ITEM_LIST')
zfssrcfslist = []
# ZFS snap filesystem
zfsdstsnap = get_config('ZFS_DST_SNAP', 'zfsdstsnap', dst_zone)
# ZFS clone filesystem(s)
# zfsdstclonelist = get_config('ZFS_DST_FS', 'ITEM_LIST', dst_zone)
zfsdstclonelist = []
# ZFS replication target
replication_target = get_config('ZFSSA', 'replication_target')
# Headers
jsonheader = {'Content-Type': 'application/json'}
# ====================== Global and Zone related =================
# Global zone - will the one with the lowest CPU load.
# Define dr state, can be | |
from typing import Dict
from typing import List
from botocore.paginate import Paginator
class GetClassifiers(Paginator):
def paginate(self, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`Glue.Client.get_classifiers`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetClassifiers>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Classifiers': [
{
'GrokClassifier': {
'Name': 'string',
'Classification': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'GrokPattern': 'string',
'CustomPatterns': 'string'
},
'XMLClassifier': {
'Name': 'string',
'Classification': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'RowTag': 'string'
},
'JsonClassifier': {
'Name': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'JsonPath': 'string'
},
'CsvClassifier': {
'Name': 'string',
'CreationTime': datetime(2015, 1, 1),
'LastUpdated': datetime(2015, 1, 1),
'Version': 123,
'Delimiter': 'string',
'QuoteSymbol': 'string',
'ContainsHeader': 'UNKNOWN'|'PRESENT'|'ABSENT',
'Header': [
'string',
],
'DisableValueTrimming': True|False,
'AllowSingleColumn': True|False
}
},
],
}
**Response Structure**
- *(dict) --*
- **Classifiers** *(list) --*
The requested list of classifier objects.
- *(dict) --*
Classifiers are triggered during a crawl task. A classifier checks whether a given file is in a format it can handle, and if it is, the classifier creates a schema in the form of a ``StructType`` object that matches that data format.
You can use the standard classifiers that AWS Glue supplies, or you can write your own classifiers to best categorize your data sources and specify the appropriate schemas to use for them. A classifier can be a ``grok`` classifier, an ``XML`` classifier, a ``JSON`` classifier, or a custom ``CSV`` classifier as specified in one of the fields in the ``Classifier`` object.
- **GrokClassifier** *(dict) --*
A ``GrokClassifier`` object.
- **Name** *(string) --*
The name of the classifier.
- **Classification** *(string) --*
An identifier of the data format that the classifier matches, such as Twitter, JSON, Omniture logs, and so on.
- **CreationTime** *(datetime) --*
The time this classifier was registered.
- **LastUpdated** *(datetime) --*
The time this classifier was last updated.
- **Version** *(integer) --*
The version of this classifier.
- **GrokPattern** *(string) --*
The grok pattern applied to a data store by this classifier. For more information, see built-in patterns in `Writing Custom Classifers <http://docs.aws.amazon.com/glue/latest/dg/custom-classifier.html>`__ .
- **CustomPatterns** *(string) --*
Optional custom grok patterns defined by this classifier. For more information, see custom patterns in `Writing Custom Classifers <http://docs.aws.amazon.com/glue/latest/dg/custom-classifier.html>`__ .
- **XMLClassifier** *(dict) --*
An ``XMLClassifier`` object.
- **Name** *(string) --*
The name of the classifier.
- **Classification** *(string) --*
An identifier of the data format that the classifier matches.
- **CreationTime** *(datetime) --*
The time this classifier was registered.
- **LastUpdated** *(datetime) --*
The time this classifier was last updated.
- **Version** *(integer) --*
The version of this classifier.
- **RowTag** *(string) --*
The XML tag designating the element that contains each record in an XML document being parsed. Note that this cannot identify a self-closing element (closed by ``/>`` ). An empty row element that contains only attributes can be parsed as long as it ends with a closing tag (for example, ``<row item_a="A" item_b="B"></row>`` is okay, but ``<row item_a="A" item_b="B" />`` is not).
- **JsonClassifier** *(dict) --*
A ``JsonClassifier`` object.
- **Name** *(string) --*
The name of the classifier.
- **CreationTime** *(datetime) --*
The time this classifier was registered.
- **LastUpdated** *(datetime) --*
The time this classifier was last updated.
- **Version** *(integer) --*
The version of this classifier.
- **JsonPath** *(string) --*
A ``JsonPath`` string defining the JSON data for the classifier to classify. AWS Glue supports a subset of JsonPath, as described in `Writing JsonPath Custom Classifiers <https://docs.aws.amazon.com/glue/latest/dg/custom-classifier.html#custom-classifier-json>`__ .
- **CsvClassifier** *(dict) --*
A ``CSVClassifier`` object.
- **Name** *(string) --*
The name of the classifier.
- **CreationTime** *(datetime) --*
The time this classifier was registered.
- **LastUpdated** *(datetime) --*
The time this classifier was last updated.
- **Version** *(integer) --*
The version of this classifier.
- **Delimiter** *(string) --*
A custom symbol to denote what separates each column entry in the row.
- **QuoteSymbol** *(string) --*
A custom symbol to denote what combines content into a single column value. Must be different from the column delimiter.
- **ContainsHeader** *(string) --*
Indicates whether the CSV file contains a header.
- **Header** *(list) --*
A list of strings representing column names.
- *(string) --*
- **DisableValueTrimming** *(boolean) --*
Specifies not to trim values before identifying the type of column values. The default value is true.
- **AllowSingleColumn** *(boolean) --*
Enables the processing of files that contain only one column.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class GetConnections(Paginator):
def paginate(self, CatalogId: str = None, Filter: Dict = None, HidePassword: bool = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`Glue.Client.get_connections`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetConnections>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
CatalogId='string',
Filter={
'MatchCriteria': [
'string',
],
'ConnectionType': 'JDBC'|'SFTP'
},
HidePassword=True|False,
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'ConnectionList': [
{
'Name': 'string',
'Description': 'string',
'ConnectionType': 'JDBC'|'SFTP',
'MatchCriteria': [
'string',
],
'ConnectionProperties': {
'string': 'string'
},
'PhysicalConnectionRequirements': {
'SubnetId': 'string',
'SecurityGroupIdList': [
'string',
],
'AvailabilityZone': 'string'
},
'CreationTime': datetime(2015, 1, 1),
'LastUpdatedTime': datetime(2015, 1, 1),
'LastUpdatedBy': 'string'
},
],
}
**Response Structure**
- *(dict) --*
- **ConnectionList** *(list) --*
A list of requested connection definitions.
- *(dict) --*
Defines a connection to a data source.
- **Name** *(string) --*
The name of the connection definition.
- **Description** *(string) --*
The description of the connection.
- **ConnectionType** *(string) --*
The type of the connection. Currently, only JDBC is supported; SFTP is not supported.
- **MatchCriteria** *(list) --*
A list of criteria that can be used in selecting this connection.
- *(string) --*
- **ConnectionProperties** *(dict) --*
These key-value pairs define parameters for the connection:
* ``HOST`` - The host URI: either the fully qualified domain name (FQDN) or the IPv4 address of the database host.
* ``PORT`` - The port number, between 1024 and 65535, of the port on which the database host is listening for database connections.
* ``USER_NAME`` - The name under which to log in to the database. The value string for ``USER_NAME`` is "``USERNAME`` ".
* ``PASSWORD`` - A password, if one is used, for the user name.
* ``ENCRYPTED_PASSWORD`` - When you enable connection password protection by setting ``ConnectionPasswordEncryption`` in the Data Catalog encryption settings, this field stores the encrypted password.
* ``JDBC_DRIVER_JAR_URI`` - The Amazon S3 path of the JAR file that contains the JDBC driver to use.
* ``JDBC_DRIVER_CLASS_NAME`` - The class name of the JDBC driver to use.
* ``JDBC_ENGINE`` - The name of the JDBC engine to use.
* ``JDBC_ENGINE_VERSION`` - The version of the JDBC engine to use.
* ``CONFIG_FILES`` - (Reserved for future use).
* ``INSTANCE_ID`` - The instance ID to use.
* ``JDBC_CONNECTION_URL`` - The URL for the JDBC connection.
* ``JDBC_ENFORCE_SSL`` - A Boolean string (true, false) | |
<filename>main.py<gh_stars>0
from __future__ import print_function
from __future__ import division
from PACK import *
from torch.optim.lr_scheduler import StepLR
from model import Graph, adjust_learning_rate
from aggregator import MeanPoolAggregator, MaxPoolAggregator, naiveMeanAggr
from utils import buildTestData, configRefineDataset, evalRefineDataset, averageQueryExpansion
from collect_graph import collectGraphTrain, collectGraphTest
from sample_neighbors import SampleNeigh, collectNeighborFeatures
import numpy as np
import math
import time
from tqdm import tqdm
import os
import visdom
import argparse
import ast
eval_func = 'evaluation/compute_ap'
retrieval_result = 'retrieval'
test_dataset = {
'oxford5k': {
'node_num': 5063,
'img_testpath': '/path/to/images',
'feature_path': '/path/to/oxford5k',
'gt_path': '/path/to/oxford5k_groundTruth',
},
'paris6k': {
'node_num': 6392,
'img_testpath': '/path/to/images',
'feature_path': '/path/to/paris6k',
'gt_path': '/path/to/paris6k_groundTruth',
},
'roxford5k':{
'node_num': 4993,
'dataset_path': '/path/to/datasets',
'feature_path': '/path/to/roxford5k',
},
'rparis6k':{
'node_num': 6322,
'dataset_path': '/path/to/datasets',
'feature_path': '/path/to/rparis6k',
},
'oxford105k': {
'node_num': 105134,
'img_testpath': '/path/to/images',
'feature_path': '/path/to/oxford105k',
'gt_path': '/path/to/oxford5k_groundTruth',
},
'paris106k': {
'node_num': 106463,
'img_testpath': 'test_par/images',
'feature_path': 'test_feature_map/paris106k',
'gt_path': '/path/to/paris6k_groundTruth',
},
'roxford105k':{
'node_num': 105064,
'dataset_path': '/path/to/datasets',
'feature_path': '/path/to/roxford105k',
},
'rparis106k':{
'node_num': 106393,
'dataset_path': '/path/to/datasets',
'feature_path': '/path/to/rparis106k',
}
}
building_oxf = buildTestData(img_path=test_dataset['oxford5k']['img_testpath'], gt_path=test_dataset['oxford5k']['gt_path'], eval_func=eval_func)
building_par = buildTestData(img_path=test_dataset['paris6k']['img_testpath'], gt_path=test_dataset['paris6k']['gt_path'], eval_func=eval_func)
building_roxf = configRefineDataset(dataset='roxford5k', dir_main=test_dataset['roxford5k']['dataset_path'])
building_rpar = configRefineDataset(dataset='rparis6k', dir_main=test_dataset['rparis6k']['dataset_path'])
building_oxf_flk = buildTestData(img_path=test_dataset['oxford105k']['img_testpath'], gt_path=test_dataset['oxford105k']['gt_path'], eval_func=eval_func)
building_par_flk = buildTestData(img_path=test_dataset['paris106k']['img_testpath'], gt_path=test_dataset['paris106k']['gt_path'], eval_func=eval_func)
building_roxf_flk = configRefineDataset(dataset='roxford5k', dir_main=test_dataset['roxford105k']['dataset_path'])
building_rpar_flk = configRefineDataset(dataset='rparis6k', dir_main=test_dataset['rparis106k']['dataset_path'])
building = {
'oxford5k': building_oxf,
'paris6k': building_par,
'roxford5k': building_roxf,
'rparis6k': building_rpar,
'oxford105k': building_oxf_flk,
'paris106k': building_par_flk,
'roxford105k': building_roxf_flk,
'rparis106k': building_rpar_flk,
}
aggregators = {
'max': MaxPoolAggregator,
'mean': MeanPoolAggregator,
}
def train(args):
if args.suffix.startswith('_rmac') or args.suffix.startswith('_gem'):
node_num, class_num = 36460, 578
else:
raise ValueError("Wrong feature type.")
assert args.aggre_layer_num + 1 == len(args.embed_dims), "layer_num does not match embed_dims."
label, feature_map, adj_sets = collectGraphTrain(node_num, class_num, args.embed_dims[0], args.knn, args.suffix)
sampler = SampleNeigh(adj_sets, knn=args.knn, hop_num=args.aggre_layer_num)
model = Graph(args.aggre_layer_num, args.embed_dims, class_num, aggregators[args.aggre_type], args.combine, args.concate, args.activate, args.residue, args.weighted, args.margin)
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)
scheduler = StepLR(optimizer, step_size=args.step_size, gamma=args.learning_rate_decay)
if args.use_cuda:
model.cuda()
label = label.cuda()
feature_map = feature_map.cuda()
assert args.train_num < node_num, 'train_num > node_num.'
np.random.seed(2)
rand_indices = np.random.permutation(node_num)
train_nodes = rand_indices[:args.train_num]
val_nodes = rand_indices[args.train_num:]
val_num = val_nodes.shape[0]
## sample positive and negative for rank loss
positive_nodes, negative_nodes = [], []
if args.mode == 'classRank':
for anchor in train_nodes:
for ri in rand_indices:
if ri != anchor and label[ri] == label[anchor]:
positive_nodes.append(ri)
break
while True:
rand_node = np.random.choice(rand_indices)
if label[rand_node] != label[anchor]:
negative_nodes.append(rand_node)
break
positive_nodes = np.array(positive_nodes)
negative_nodes = np.array(negative_nodes)
elif args.mode == 'classBatch':
for anchor in train_nodes:
for ri in rand_indices:
if ri != anchor and label[ri] == label[anchor]:
positive_nodes.append(ri)
break
positive_nodes = np.array(positive_nodes)
batch_size = args.batch_size
iter_num = int(math.ceil(args.train_num/batch_size))
check_loss = []
val_accuracy = []
check_step = args.check_step
train_loss = 0.0
iter_cnt = 0
for e in range(args.epoch_num):
## train
model.train()
np.random.shuffle(train_nodes)
for batch in range(iter_num):
if args.mode == 'class':
batch_nodes = train_nodes[batch*batch_size: (batch+1)*batch_size]
batch_labels = label[batch_nodes]
batch_features = collectNeighborFeatures(sampler, batch_nodes, feature_map)
optimizer.zero_grad()
loss = model.classLoss(batch_features, batch_labels)
loss.backward()
optimizer.step()
elif args.mode == 'classRank':
batch_idx = range(batch*batch_size, min((batch+1)*batch_size, args.train_num))
batch_anchors = train_nodes[batch_idx]
positive_labels = label[batch_anchors]
batch_positives = positive_nodes[batch_idx]
batch_negatives = negative_nodes[batch_idx]
negative_labels = label[batch_negatives]
batch_anchor_features = collectNeighborFeatures(sampler, batch_anchors, feature_map)
batch_positive_features = collectNeighborFeatures(sampler, batch_positives, feature_map)
batch_negative_features = collectNeighborFeatures(sampler, batch_negatives, feature_map)
optimizer.zero_grad()
loss = model.classRankLoss(batch_anchor_features, batch_positive_features, batch_negative_features, positive_labels, negative_labels, args.omega)
loss.backward()
optimizer.step()
elif args.mode == 'classBatch':
batch_idx = range(batch*batch_size, min((batch+1)*batch_size, args.train_num))
batch_anchors = train_nodes[batch_idx]
batch_labels = label[batch_anchors]
batch_positives = positive_nodes[batch_idx]
batch_anchor_features = collectNeighborFeatures(sampler, batch_anchors, feature_map)
batch_positive_features = collectNeighborFeatures(sampler, batch_positives, feature_map)
optimizer.zero_grad()
loss = model.classBatchLoss(batch_anchor_features, batch_positive_features, batch_labels, args.omega)
loss.backward()
optimizer.step()
iter_cnt += 1
train_loss += loss.cpu().item()
if iter_cnt % check_step == 0:
check_loss.append(train_loss / check_step)
print(time.strftime('%Y-%m-%d %H:%M:%S'), "epoch: {}/{}, iter:{}, loss: {:.4f}".format(e, args.epoch_num-1, iter_cnt, train_loss/check_step))
train_loss = 0.0
## validation
model.eval()
group = int(math.ceil(val_num / batch_size))
accur_cnt = 0
for batch in range(group):
batch_nodes = val_nodes[batch*batch_size: (batch+1)*batch_size]
batch_label = label[batch_nodes].squeeze().cpu().detach().numpy()
batch_neighs = sampler(batch_nodes)
batch_features = []
batch_features.append(feature_map[batch_nodes])
for neigh in batch_neighs:
batch_features.append(feature_map[neigh])
_, scores = model(batch_features)
batch_predict = np.argmax(scores.cpu().data.numpy(), axis=1)
accur_cnt += np.sum(batch_predict == batch_label)
val_accuracy.append(accur_cnt / val_num)
print(time.strftime('%Y-%m-%d %H:%M:%S'), "Epoch: {}/{}, Validation Accuracy: {:.4f}".format(e, args.epoch_num-1, accur_cnt/val_num))
print("******" * 10)
## learning rate schedule
# scheduler.step()
adjust_learning_rate(optimizer, e)
## save model
checkpoint_path = 'checkpoint_{}.pth'.format(time.strftime('%Y%m%d%H%M%S'))
torch.save({
'train_num': args.train_num,
'epoch_num': args.epoch_num,
'learning_rate': args.learning_rate,
'knn': args.knn,
'embed_dims': args.embed_dims,
'optimizer_type': 'Adam',
'optimizer_state_dict': optimizer.state_dict(),
'graph_state_dict': model.state_dict(),
},
checkpoint_path
)
vis = visdom.Visdom(env='Graph', port='8099')
vis.line(
X = np.arange(1, len(check_loss)+1, 1) * check_step,
Y = np.array(check_loss),
opts = dict(
title=time.strftime('%Y-%m-%d %H:%M:%S'),
xlabel='itr.',
ylabel='loss'
)
)
vis.line(
X = np.arange(1, len(val_accuracy)+1, 1),
Y = np.array(val_accuracy),
opts = dict(
title=time.strftime('%Y-%m-%d %H:%M:%S'),
xlabel='epoch',
ylabel='accuracy'
)
)
np.save('check_loss_{}_{}_{}_{}_{}.npy'.format(str(args.aggre_layer_num), str(args.knn), str(args.combine), args.mode, time.strftime('%Y%m%d%H%M')), np.array(check_loss))
np.save('val_accuracy_{}_{}_{}_{}_{}.npy'.format(str(args.aggre_layer_num), str(args.knn), str(args.combine), args.mode, time.strftime('%Y%m%d%H%M')), np.array(val_accuracy))
return checkpoint_path, class_num
def test(checkpoint_path, class_num, args):
model = Graph(args.aggre_layer_num, args.embed_dims, class_num, aggregators[args.aggre_type], args.combine, args.concate, args.activate, args.residue, args.weighted, args.margin)
checkpoint = torch.load(checkpoint_path)
model.load_state_dict(checkpoint['graph_state_dict'])
model.eval()
if args.use_cuda:
model.cuda()
for key in building.keys():
node_num = test_dataset[key]['node_num']
old_feature_map, adj_sets, old_query = collectGraphTest(test_dataset[key]['feature_path'], node_num, args.embed_dims[0], args.knn, args.suffix)
sampler = SampleNeigh(adj_sets, knn=args.knn, hop_num=args.aggre_layer_num)
if args.use_cuda:
old_feature_map = old_feature_map.cuda()
query = old_query.cuda()
## process query
new_query = model.queryForward(query)
new_query = new_query.cpu().detach().numpy()
old_query = old_query.detach().numpy()
batch_num = int(math.ceil(node_num/args.batch_size))
test_nodes = np.arange(node_num)
new_feature_map = torch.FloatTensor()
for batch in tqdm(range(batch_num)):
batch_nodes = test_nodes[batch*args.batch_size: (batch+1)*args.batch_size]
batch_neighs = sampler(batch_nodes)
batch_features = []
batch_features.append(old_feature_map[batch_nodes])
for neigh in batch_neighs:
batch_features.append(old_feature_map[neigh])
new_feature, _ = model(batch_features)
new_feature_map = torch.cat((new_feature_map, new_feature.cpu().data), dim=0)
new_feature_map = new_feature_map.numpy()
old_feature_map = old_feature_map.cpu().numpy()
np.save('new_feature_map_{}.npy'.format(key), new_feature_map)
print(time.strftime('%Y-%m-%d %H:%M:%S'), 'eval {}'.format(key))
if not key.startswith('r'):
mAP_old = building[key].evalQuery(old_feature_map, old_query, retrieval_result)
mAP_new = building[key].evalQuery(new_feature_map, new_query, retrieval_result)
print('base feature: {}, new feature: {}'.format(old_feature_map.shape, new_feature_map.shape))
print('base mAP: {:.4f}, new mAP: {:.4f}, improve: {:.4f}'.format(mAP_old, mAP_new, mAP_new - mAP_old))
else:
print('base feature: {}, new feature: {}'.format(old_feature_map.shape, new_feature_map.shape))
print(' --base--')
mapE, mapM, mapH, mprE, mprM, mprH = evalRefineDataset(building[key]['gnd'], old_feature_map, old_query)
print('mAP E: {:.4f}, M: {:.4f}, H: {:.4f}'.format(mapE, mapM, mapH))
print('mP@k [1 5 10]\n E: {}\n M: {}\n H: {}'.format(np.around(mprE, decimals=4), np.around(mprM, decimals=4), np.around(mprH, decimals=4)))
print(' --graph--')
mapE, mapM, mapH, mprE, mprM, mprH = evalRefineDataset(building[key]['gnd'], new_feature_map, new_query)
print('mAP E: {:.4f}, M: {:.4f}, H: {:.4f}'.format(mapE, mapM, mapH))
print('mP@k [1 5 10]\n E: {}\n M: {}\n H: {}'.format(np.around(mprE, decimals=4), np.around(mprM, decimals=4), np.around(mprH, decimals=4)))
## naive mean aggregation
sampler = SampleNeigh(adj_sets, knn=args.knn, hop_num=1)
mean_feature_map = np.zeros((node_num, args.embed_dims[0]))
for batch in range(batch_num):
batch_nodes = test_nodes[batch*args.batch_size: (batch+1)*args.batch_size]
batch_neighs = sampler(batch_nodes)
batch_features = [old_feature_map[batch_nodes], old_feature_map[batch_neighs[0]]]
new_feature = naiveMeanAggr(*batch_features, weighted=args.weighted)
mean_feature_map[batch_nodes] = new_feature
if not key.startswith('r'):
mAP_new = building[key].evalQuery(mean_feature_map, old_query, retrieval_result)
print('mean mAP: {:.4f}, improve: {:.4f}'.format(mAP_new, mAP_new - mAP_old))
else:
print(' --mean--')
mapE, mapM, mapH, mprE, mprM, mprH = evalRefineDataset(building[key]['gnd'], mean_feature_map, old_query)
print('mAP E: {:.4f}, M: {:.4f}, H: {:.4f}'.format(mapE, mapM, mapH))
print('mP@k [1 5 10]\n E: {}\n M: {}\n H: {}'.format(np.around(mprE, decimals=4), np.around(mprM, decimals=4), np.around(mprH, decimals=4)))
## average query expansion
base_aqe = []
graph_aqe = []
mean_aqe = []
for m in [1,3,5,7,9]:
old_aug_query = averageQueryExpansion(old_feature_map, old_query, m)
new_aug_query = averageQueryExpansion(new_feature_map, new_query, m)
mean_aug_query = averageQueryExpansion(mean_feature_map, old_query, m)
if not key.startswith('r'):
mAP_old = building[key].evalQuery(old_feature_map, old_aug_query, retrieval_result)
base_aqe.append((m, np.around(mAP_old, decimals=4)))
mAP_new = building[key].evalQuery(new_feature_map, new_aug_query, retrieval_result)
graph_aqe.append((m, np.around(mAP_new, decimals=4)))
mAP_mean = building[key].evalQuery(mean_feature_map, mean_aug_query, retrieval_result)
mean_aqe.append((m, np.around(mAP_mean, decimals=4)))
else:
mapE, mapM, mapH, mprE, mprM, mprH = evalRefineDataset(building[key]['gnd'], old_feature_map, old_aug_query)
base_aqe.append((m, np.around(mapE, decimals=4), np.around(mapM, decimals=4), np.around(mapH, decimals=4), np.around(mprE, decimals=4), np.around(mprM, decimals=4), np.around(mprH, decimals=4)))
mapE, mapM, mapH, mprE, mprM, mprH = evalRefineDataset(building[key]['gnd'], new_feature_map, new_aug_query)
graph_aqe.append((m, np.around(mapE, decimals=4), np.around(mapM, decimals=4), np.around(mapH, decimals=4), np.around(mprE, decimals=4), np.around(mprM, decimals=4), np.around(mprH, decimals=4)))
mapE, mapM, mapH, mprE, mprM, mprH = evalRefineDataset(building[key]['gnd'], mean_feature_map, mean_aug_query)
mean_aqe.append((m, np.around(mapE, decimals=4), np.around(mapM, decimals=4), np.around(mapH, decimals=4), np.around(mprE, decimals=4), np.around(mprM, decimals=4), np.around(mprH, decimals=4)))
print(' --base+aqe--')
print(base_aqe)
print(' --graph+aqe--')
print(graph_aqe)
print(' --mean+aqe--')
print(mean_aqe)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = 'Graph Attention Network, train on Landmark_clean, test on Oxford5k and Paris6k.')
parser.add_argument('--epoch_num', type=int, default=70, required=False, help='training epoch number.')
parser.add_argument('--step_size', type=int, default=30, required=False, help='learning rate decay step_size.')
parser.add_argument('--learning_rate_decay', type=float, default=0.5, required=False, help='learning rate decay factor.')
parser.add_argument('--batch_size', type=int, default=64, required=False, help='training batch size.')
parser.add_argument('--check_step', type=int, default=100, required=False, help='loss check step.')
parser.add_argument('--use_cuda', type=ast.literal_eval, default=True, required=False, help='whether to use gpu (True) or not (False).')
parser.add_argument('--learning_rate', type=float, default=0.001, required=False, help='training learning rate.')
parser.add_argument('--weight_decay', type=float, default=5e-6, required=False, help='weight decay (L2 regularization).')
parser.add_argument('--knn', type=int, default=10, required=False, help='number of neighbors to aggregate.')
parser.add_argument('--suffix', type=str, default='_gem.npy', required=False, help='feature type, \'f\' for vggnet (512-d), \'fr\' for resnet (2048-d), \'frmac\' for vgg16_rmac (512-d).')
parser.add_argument('--train_num', type=int, default=33000, required=False, help='number of training nodes (less than 36460). Left for validation.')
parser.add_argument('--aggre_layer_num', type=int, default=1, required=False, help='number of aggregator layers.')
parser.add_argument('--aggre_type', type=str, default='max', required=False, help='aggregator function.')
parser.add_argument('--embed_dims', nargs='+', type=int, required=False, help='input dim and hidden layer dims.')
parser.add_argument('--combine', type=ast.literal_eval, default=False, required=False, help='combine(True) features of | |
<reponame>OSLL/apagescan<filename>src/main_view.py
import time
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import pyqtSlot, Qt
from PyQt5.QtGui import QBrush
from PyQt5.QtGui import QGuiApplication
from PyQt5.QtWidgets import QMainWindow, QVBoxLayout, QMessageBox, QAbstractItemView, QProgressBar, QTableWidgetItem, \
QColorDialog, QTableWidget
from src.custom_signals import CustomSignals
from src.device.interaction import *
from src.qt_dialogs.dynamics_dialog import DynamicsDialog
from src.graphics.barplot_graphics import barplot_pids_pagemap
from src.device.handler import DeviceHandler
from src.device.listener import Listener
from src.graphics.pages_graphics import plot_pids_pagemap
from src.picture_viewer import PictureViewer
from src.qt_ui.mainwindow_ui import Ui_MainWindow
from src.qt_dialogs.select_dialog import SelectDialog
from src.qt_dialogs.table_dialog import TableDialog
from src.qt_dialogs.tree_dialog_facade import TreeDialogFacade
from src.qt_dialogs.tree_dialog import TreeDialog
from src.utilities import *
class MainView(QMainWindow, Listener):
"""Main application class
:ivar devices_handler: DeviceHandler instance, handles connected devices
:ivar device_interaction: DeviceInteraction instance, provides data collection from connected device
:ivar signals: custom signals for interaction with dialogs
:ivar pages_stats_graph: widget for displaying pages percentage stats graph
:ivar pages_graph: widget for displaying memory state graph
:ivar timer: app clock, used for updating once per certain period
:ivar time: app time
:ivar active_pids: list containing pids for memory inspection
:ivar active_pids_len: length of active pids list
:ivar active_state: number of current iteration of memory state's data collection
:ivar iteration_time: time to wait between iterations of data collection
:ivar total_time: limit of time that all data collection would take
:ivar is_data_collected: flag indicating if data has been collected at the moment
"""
def __init__(self):
super().__init__()
self._ui = Ui_MainWindow()
self._ui.setupUi(self)
self.devices_handler = DeviceHandler()
self.devices_handler.add_listener(self)
self.device_interaction = DeviceInteraction()
self.signals = CustomSignals()
self._ui.tableWidget.verticalHeader().setVisible(False)
self.set_buttons(pid=False, data=False, nxt=False, prev=False, play=False, cgr=False, refc=False,
highlight=False)
self.pages_stats_graph = PictureViewer(need_zoom=False, parent=self._ui.graphicsBar)
layout = QVBoxLayout(self._ui.graphicsBar)
layout.addWidget(self.pages_stats_graph)
self._ui.graphicsBar.setStyleSheet("background-color: whitesmoke")
self.pages_graph = PictureViewer(need_zoom=True, parent=self._ui.graphicsPresent)
layout = QVBoxLayout(self._ui.graphicsPresent)
layout.addWidget(self.pages_graph)
self._ui.graphicsPresent.setStyleSheet("background-color: whitesmoke")
self._ui.dataButton.clicked.connect(self.collect_data)
self._ui.pidsButton.clicked.connect(self.select_processes)
self._ui.actionShow_CGroup_tree.triggered.connect(self.select_processes_cgroup)
self._ui.devicesButton.clicked.connect(self.select_devices)
self._ui.playButton.clicked.connect(self.mem_dynamics)
self._ui.prevButton.clicked.connect(self.mem_prev_state)
self._ui.nextButton.clicked.connect(self.mem_next_state)
self._ui.highlightButton.clicked.connect(self.highlight_pids)
self._ui.refreshColorsButton.clicked.connect(self.refresh_colors)
self._ui.tableWidget.customContextMenuRequested.connect(self.call_menu)
self._ui.tableWidget.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.timer = QtCore.QTimer()
self.time = QtCore.QTime(0, 0, 0)
self.timer.timeout.connect(self.timer_event)
self.timer.start(1000)
self.devices_handler.update()
self.active_pids = []
self.active_state = -1
self.active_pids_len = 0
self.iteration_time = 0
self.total_time = 0
self.is_data_collected = False
def call_menu(self, point):
"""Calls context menu for a chosen pid in a table
:return: None
"""
if self.active_state == -1:
return
menu = QtWidgets.QMenu()
info_action = menu.addAction("Show full information")
color_action = menu.addAction("Change color")
info_action.triggered.connect(self.show_pid_info)
color_action.triggered.connect(self.change_pid_color)
menu.exec(self._ui.tableWidget.mapToGlobal(point))
def show(self):
"""Shows application widget
:return: None
"""
super().show()
self._ui.devicesButton.clicked.emit()
def update_data(self):
"""Updates data such as pid list from a device with a small time interval
:return: None
"""
self.device_interaction.clear()
if not self.devices_handler.is_device_selected():
return
try:
self.device_interaction.collect_pid_list_all()
self.device_interaction.collect_cgroups_list()
except CalledProcessError:
self.show_msg('Error', 'Check connection with device and tool presence')
except EmptyDataError:
self.show_msg('Error', 'Pid list unavailable')
finally:
clean_tmp_data_from_device(device=self.devices_handler.current_device, remove_page_data=False)
clean_tmp_data(remove_page_data=False, remove_pictures_data=False)
def generate_pid_colors(self, update_active_pids=True):
"""Generates colors for pid's representation on a plot
:param update_active_pids: true if active pids colors has to be re-generated, false if not
:return: None
"""
for i in range(self.active_pids_len):
if self.active_pids[i]['corrupted']:
self.active_pids[i]['color'] = QColor(Qt.transparent)
elif update_active_pids:
alpha, self.active_pids[i]['color'] = self.active_pids[i]['color'].alpha(), generate_color()
self.active_pids[i]['color'].setAlpha(alpha)
self.set_table_color(i)
def display_page_data(self):
"""Plots all memory state graphics
:return: None
"""
try:
iterations = self.device_interaction.iterations
if iterations is not None:
for i in range(iterations):
self.plot_page_data(i)
except AttributeError:
pass # no data collected yet
finally:
clean_tmp_data_from_device(device=self.devices_handler.current_device, remove_pids_data=False)
clean_tmp_data(remove_pictures_data=False, remove_pids_data=False)
def refresh_colors(self):
"""Generates new colors for pids on a plot
:return: None
"""
self.generate_pid_colors()
self.display_page_data()
self.show_state(self.active_state)
def view_checked_pids(self, checked_pids):
"""Handles checked pids for further actions
:param checked_pids: list of pids
"""
self._ui.tableWidget.clear()
row = len(checked_pids)
col = 2
self._ui.tableWidget.setColumnCount(col)
self._ui.tableWidget.setRowCount(row)
self.active_pids = []
for i in range(row):
self.active_pids.append({
'pid': checked_pids[i][0],
'title': checked_pids[i][1],
'corrupted': False,
'highlighted': True,
'color': generate_color()
})
for j in range(col):
item = QTableWidgetItem()
if j == 0:
item.setCheckState(Qt.Unchecked)
item.setText(str(checked_pids[i][j]))
self._ui.tableWidget.setItem(i, j, item)
self.active_pids_len = len(self.active_pids)
self._ui.tableWidget.setSelectionBehavior(QAbstractItemView.SelectRows)
self._ui.tableWidget.setSelectionMode(QAbstractItemView.SingleSelection)
self._ui.tableWidget.horizontalHeader().setStretchLastSection(True)
self._ui.tableWidget.horizontalHeader().hide()
self._ui.tableWidget.setEditTriggers(QTableWidget.NoEditTriggers)
self._ui.tableWidget.horizontalHeader().setSectionResizeMode(0, QtWidgets.QHeaderView.ResizeToContents)
self.is_data_collected = False
def timer_event(self):
"""Calls update method with small time interval
:return: None
"""
self.time = self.time.addSecs(1)
self.timer.start(1000)
self.devices_handler.update()
if self.time.second() % 30 == 0:
self.react()
def show_msg(self, msg_type, msg):
"""Shows custom message
:param msg_type: type of message to be shown
:param msg: text of message
:return: None
"""
QGuiApplication.restoreOverrideCursor()
QMessageBox.about(self, msg_type, msg)
def closeEvent(self, event):
"""Responds to window close request
:param event: close request
:return: None
"""
clean_tmp_data_from_device(device=self.devices_handler.current_device)
clean_tmp_data()
event.accept()
def react(self):
"""Updates internal data - pid lists, cgroup list, connected devices
and changes GUI state according to updated data
:return: None
"""
super().react()
if not self.devices_handler.is_device_selected():
self.view_checked_pids([])
self.set_buttons(pid=False, data=False, cgr=False, refc=False, highlight=False)
self.set_buttons()
self.update_data()
self.signals.pids_changed.emit(self.device_interaction.pid_list_all)
self.signals.devices_changed.emit(self.devices_handler.devices_list)
self.signals.cgroup_changed.emit(self.device_interaction.cgroups_list)
def show_state(self, state_index):
"""Displays current memory state visualization on pages_graph
:param state_index: index of memory state to be shown
:return: None
"""
self.pages_graph.set_item(QtGui.QPixmap(f'resources/data/pictures/offsets/p{state_index}.png'))
self.pages_stats_graph.set_item(QtGui.QPixmap(f'resources/data/pictures/barplot/b{state_index}.png'))
self.set_buttons(prev=(self.active_state > 0),
nxt=(self.active_state < self.device_interaction.iterations - 1))
@pyqtSlot()
def collect_data(self):
"""Runs scripts on a device, pulls data to application, plots and shows graphs
:return: None
"""
if not self.devices_handler.is_device_selected():
self.show_msg('Error', 'No attached devices')
return
self.pages_graph.set_content(False)
self.set_buttons(data=False, refc=False, highlight=False)
progress = QProgressBar(self)
progress.move(self._ui.centralWidget.geometry().center())
dynamics_dialog = DynamicsDialog()
dynamics_dialog.signals.send_data.connect(self.set_collection_time)
dynamics_dialog.exec_()
if self.iteration_time < 0 or self.total_time <= 0:
self.show_msg('Error', 'Please enter the other number of iterations')
self.set_buttons(data=True, refc=True)
return
iterations = 0
self.device_interaction.set_iterations(iterations)
QGuiApplication.setOverrideCursor(Qt.WaitCursor)
progress.show()
start_time = time.time()
cur_time = start_time
error_pids = []
while cur_time - start_time <= self.total_time:
try:
pid_list = [pid['pid'] for pid in self.active_pids]
error_pids = self.device_interaction.collect_page_data(cur_iteration=iterations, pid_list=pid_list)
except Exception:
self.show_msg('Error', 'Either the process is a system process (no access) or it has already completed,'
'also check tool presence')
progress.hide()
self.set_buttons(data=False)
return
iterations += 1
self.device_interaction.set_iterations(iterations)
sleep(self.iteration_time)
cur_time = time.time()
progress.setValue((cur_time - start_time) * 100 // self.total_time)
for index, pid in enumerate(self.active_pids):
if pid['pid'] in error_pids:
pid['corrupted'] = True
self._ui.tableWidget.item(index, 0).setCheckState(Qt.Unchecked)
# drawing all data after collecting to detect corrupted pids
# and draw at the same time
self.generate_pid_colors(update_active_pids=True if not self.is_data_collected else False)
self.display_page_data()
progress.setValue(100)
QGuiApplication.restoreOverrideCursor()
progress.hide()
# display first state after collecting data
self.active_state = 0
self.show_state(self.active_state)
self.set_buttons(data=True, refc=True, highlight=True)
self.set_buttons(prev=(self.active_state > 0),
nxt=(self.active_state < self.device_interaction.iterations - 1),
play=True)
self.is_data_collected = True
def plot_page_data(self, iteration):
"""Plots graphics of memory state for given iteration using collected data
:param iteration: iteration of memory state to be shown
:return: None
"""
color_list = []
highlighted_pid_list = []
for pid in list(filter(lambda el: el['corrupted'] is False, self.active_pids)):
if pid['highlighted'] is True:
highlighted_pid_list.append(pid['pid'])
color_list.append(pid['color'])
page_data = (self.device_interaction.get_page_data(iteration, present=True),
self.device_interaction.get_page_data(iteration, swapped=True))
plot_pids_pagemap(page_data, color_list, iteration)
barplot_pids_pagemap(self.device_interaction.get_page_data(iteration),
highlighted_pid_list,
str(iteration))
@pyqtSlot()
def mem_dynamics(self):
"""Shows graphics of all memory state iterations with a small interval
:return: None
"""
self.active_state = 0
for i in range(self.device_interaction.iterations): # simple implementation using sleep
self._ui.nextButton.clicked.emit()
self.set_buttons(prev=False, nxt=False)
sleep(0.5)
self.set_buttons(prev=(self.active_state > 0),
nxt=(self.active_state < self.device_interaction.iterations - 1),
play=True)
@pyqtSlot()
def mem_prev_state(self):
"""Shows previous iteration of memory state
:return: None
"""
if self.active_state > 0:
self.active_state -= 1
self.show_state(self.active_state)
@pyqtSlot()
def mem_next_state(self):
"""Shows next iteration of memory state
:return: None
"""
if self.active_state < self.device_interaction.iterations - 1:
self.active_state += 1
self.show_state(self.active_state)
@pyqtSlot(object)
def set_active_pids(self, data):
"""Sets active pid list from given data and updates GUI state
:param data:
:return: None
"""
self.view_checked_pids(data)
self.set_buttons(data=True if self.active_pids_len > 0 else False,
refc=False,
highlight=False)
@pyqtSlot(object)
def set_collection_time(self, data):
"""Sets iteration_time and total_time with a given data
:param data: tuple(iteration_time, total_time)
:return: None
"""
self.iteration_time = data[0] if data is not None else -1
self.total_time = data[1] if data is not None else -1
@pyqtSlot(object)
def set_device_data(self, data):
"""Sets connected device's serial number
:param data: [[device_number]]
:return: None
"""
if len(data) > 0:
self.devices_handler.switch(str(data[0][0]))
self.device_interaction.set_device(self.devices_handler.current_device)
self.set_buttons(pid=True, cgr=True)
self._ui.statusBar.showMessage(f'{data[0][0] if len(data) > 0 else "No"} device was connected')
self.react()
@pyqtSlot()
def select_processes(self):
"""Opens menu for pids' selection for memory inspection
:return: None
"""
pids_dialog = SelectDialog(data_list=self.device_interaction.pid_list_all,
label='Select pids',
has_select_all=True,
parent=self)
self.signals.pids_changed.connect(pids_dialog.update)
pids_dialog.signals.send_data.connect(self.set_active_pids)
pids_dialog.exec_()
@pyqtSlot()
def select_devices(self):
"""Opens menu for selection of device to collect data from
:return: None
"""
devices_dialog = SelectDialog(data_list=self.devices_handler.devices_list,
label='Select devices',
close_on_detach=False,
parent=self)
self.signals.devices_changed.connect(devices_dialog.update)
devices_dialog.signals.send_data.connect(self.set_device_data)
devices_dialog.exec_()
@pyqtSlot()
def show_pid_info(self):
"""Shows pid's page by page memory information
:return: None
"""
pid_index = self._ui.tableWidget.selectedIndexes()[0].row()
pid = self.active_pids[pid_index]['pid']
if self.active_pids[pid_index]['corrupted']:
self.show_msg('Message', 'No access to the process data')
return
try:
data_frame = self.device_interaction.get_page_data(self.active_state).get(pid)
table_dialog = TableDialog(pid, data_frame.values)
table_dialog.exec_()
except Exception:
self.show_msg('Message', 'Data hasn\'t been collected yet')
@pyqtSlot()
def select_processes_cgroup(self):
"""Shows tree of processes from chosen cgroup
:return: None
"""
tree_dialog = TreeDialog(self.device_interaction.cgroups_list)
transfer_data_facade = TreeDialogFacade(self.device_interaction, tree_dialog)
self.signals.cgroup_changed.connect(tree_dialog.update)
tree_dialog.signals.send_data.connect(self.set_active_pids)
tree_dialog.signals.cgroup_data_request.connect(transfer_data_facade.transfer_data)
tree_dialog.exec_()
def set_buttons(self, pid=None, data=None, nxt=None, prev=None, play=None, cgr=None, refc=None, highlight=None):
"""Sets GUI buttons' state - enabled of disabled, according to given flags
:return: None
"""
self._ui.pidsButton.setEnabled(pid if pid is not None else self._ui.pidsButton.isEnabled())
self._ui.dataButton.setEnabled(data | |
all pages into to a file. Must be path to file, example ${OUTPUT_DIR}/har.file.
If not specified, the HAR is not recorded. Make sure to await context to close for the
to be saved.
`omitContent`: Optional setting to control whether to omit request content
from the HAR. Default is False `path`: Path on the filesystem to write the HAR file to.
The ${OUTPUTDIR}/browser/ is removed at the first suite startup.
``tracing`` is file name where the [https://playwright.dev/docs/api/class-tracing/|tracing]
file is saved. Example trace.zip will be saved to ${OUTPUT_DIR}/traces.zip. Temporary trace
files will be saved to ${OUTPUT_DIR}/Browser/traces. If file name is defined, tracing will
be enabled for all pages in the context. Tracing is automatically closed when context is
closed. Temporary trace files will be automatically deleted at start of each test
execution. Trace file can be opened after the test execution by running command from
shell: `rfbrowser show-trace -F /path/to/trace.zip`.
``screen``
Emulates consistent window screen size available inside web page via window.screen.
Is only used when the viewport is set.
- Example {'width': 414, 'height': 896}
``storageState`` restores the storage stated created by the `Save Storage State`
keyword. Must mbe full path to the file.
Example:
| Test an iPhone
| ${device}= `Get Device` iPhone X
| `New Context` &{device} # unpacking here with &
| `New Page` http://example.com
A BrowserContext is the Playwright object that controls a single browser profile.
Within a context caches and cookies are shared. See
[https://playwright.dev/docs/api/class-browser#browsernewcontextoptions|Playwright browser.newContext]
for a list of supported options.
If there's no open Browser this keyword will open one. Does not create pages.
"""
params = locals_to_params(locals())
params = self._set_video_path(params)
params = self._set_video_size_to_int(params)
if storageState and not Path(storageState).is_file():
raise ValueError(
f"storageState argument value '{storageState}' is not file, but it should be."
)
if "httpCredentials" in params and params["httpCredentials"] is not None:
secret = self.resolve_secret(
httpCredentials, params.get("httpCredentials"), "httpCredentials"
)
params["httpCredentials"] = secret
params = convert_typed_dict(self.new_context.__annotations__, params)
if not videosPath:
params.pop("videoSize", None)
trace_file = params.pop("tracing", None)
masked_params = self._mask_credentials(params.copy())
options = json.dumps(params, default=str)
logger.info(json.dumps(masked_params, default=str))
trace_file = Path(self.outputdir, trace_file) if tracing else ""
response = self._new_context(options, hideRfBrowser, trace_file)
context_options = self._mask_credentials(json.loads(response.contextOptions))
logger.info(response.log)
logger.info(context_options)
if response.newBrowser:
logger.info(
"No browser was open. New browser was automatically opened "
"when this context is created."
)
self.context_cache.add(response.id, self._get_video_size(params))
return response.id
# Only to ease unit test mocking.
def _new_context(self, options: str, hide_rf_browser: bool, trace_file: str):
with self.playwright.grpc_channel() as stub:
response = stub.NewContext(
Request().Context(
rawOptions=options,
hideRfBrowser=hide_rf_browser,
defaultTimeout=int(self.timeout),
traceFile=str(trace_file),
)
)
return response
def _mask_credentials(self, data: dict):
if "httpCredentials" in data:
data["httpCredentials"] = "XXX"
return data
def _set_video_path(self, params: dict) -> dict:
video_path = params.get("videosPath")
record_video = params.get("recordVideo", {})
if not video_path:
video_path = record_video.get("dir")
if not video_path:
return params
if Path(video_path).is_dir():
return params
if record_video:
params["recordVideo"]["dir"] = self.video_output / video_path
else:
params["videosPath"] = self.video_output / video_path
return params
def _get_record_video_size(self, params) -> Tuple[Optional[int], Optional[int]]:
width = params.get("recordVideo", {}).get("size", {}).get("width")
height = params.get("recordVideo", {}).get("size", {}).get("height")
return int(width) if width else None, int(height) if height else None
def _set_video_size_to_int(self, params: dict) -> dict:
width, height = self._get_record_video_size(params)
if width and height:
params["recordVideo"]["size"]["width"] = width
params["recordVideo"]["size"]["height"] = height
return params
def _get_video_size(self, params: dict) -> dict:
width, height = self._get_record_video_size(params)
if width and height:
return {"width": width, "height": height}
if "videoSize" in params:
return params["videoSize"]
if "viewport" in params:
return params["viewport"]
return {"width": 1280, "height": 720}
@keyword(tags=("Setter", "BrowserControl"))
def new_page(self, url: Optional[str] = None) -> str:
"""Open a new Page. A Page is the Playwright equivalent to a tab.
See `Browser, Context and Page` for more information about Page concept.
Returns a stable identifier for the created page.
When a `New Page` is called without an open browser, `New Browser`
and `New Context` are executed with default values first.
``url`` If specified it will open the new page to the specified URL.
"""
with self.playwright.grpc_channel() as stub:
response = stub.NewPage(
# '' will be treated as falsy on .ts side.
# TODO: Use optional url field instead once it stabilizes at upstream
# https://stackoverflow.com/a/62566052
Request().Url(url=(url or ""), defaultTimeout=int(self.timeout))
)
logger.info(response.log)
if response.newBrowser:
logger.info(
"No browser and context was open. New browser and context was "
"automatically opened when page is created."
)
if response.newContext and not response.newBrowser:
logger.info(
"No context was open. New context was automatically opened when "
"this page is created."
)
self._embed_video(json.loads(response.video))
return response.body
def _embed_video(self, video: dict):
if not video.get("video_path"):
logger.debug("Video is not enabled.")
return
relative_path = get_link_path(video.get("video_path"), self.outputdir)
video_size = self.context_cache.get(video["contextUuid"])
video_width = video_size["width"]
video_height = video_size["height"]
video_type = relative_path.split(".")[1]
logger.info(
'</td></tr><tr><td colspan="3">'
f'<video width="{video_width}" height="{video_height}" controls>'
f'<source src="{relative_path}" type="video/{video_type}"></video>',
html=True,
)
@keyword(tags=("Getter", "BrowserControl"))
@with_assertion_polling
def get_browser_catalog(
self,
assertion_operator: Optional[AssertionOperator] = None,
assertion_expected: Any = None,
message: Optional[str] = None,
) -> Dict:
"""Returns all browsers, open contexts in them and open pages in these contexts.
See `Browser, Context and Page` for more information about these concepts.
``message`` overrides the default error message.
The data is parsed into a python list containing data representing the open Objects.
On the root level the data contains a list of open browsers.
Data can be manipulated also with ``assertion_operator`` for example to find
a specific id based on index or page title with ``then`` operator.
Return value can also be asserted against expected value.
Sample:
| [
| {
| "type": "chromium",
| "id": "browser=96207191-8147-44e7-b9ac-5e04f2709c1d",
| "contexts": [
| {
| "type": "context",
| "id": "context=525d8e5b-3c4e-4baa-bfd4-dfdbc6e86089",
| "activePage": "page=f90c97b8-eaaf-47f2-98b2-ccefd3450f12",
| "pages": [
| {
| "type": "page",
| "title": "Robocorp",
| "url": "https://robocorp.com/",
| "id": "page=7ac15782-22d2-48b4-8591-ff17663fa737",
| "timestamp": 1598607713.858
| },
| {
| "type": "page",
| "title": "Home - Reaktor",
| "url": "https://www.reaktor.com/",
| "id": "page=f90c97b8-eaaf-47f2-98b2-ccefd3450f12",
| "timestamp": 1598607714.702
| }
| ]
| }
| ],
| "activeContext": "context=525d8e5b-3c4e-4baa-bfd4-dfdbc6e86089",
| "activeBrowser": false
| },
| {
| "type": "firefox",
| "id": "browser=ad99abac-17a9-472b-ac7f-d6352630834e",
| "contexts": [
| {
| "type": "context",
| "id": "context=bc64f1ba-5e76-46dd-9735-4bd344afb9c0",
| "activePage": "page=8baf2991-5eaf-444d-a318-8045f914e96a",
| "pages": [
| {
| "type": "page",
| "title": "Software-Qualit\u00e4tssicherung und Softwaretest",
| "url": "https://www.imbus.de/",
| "id": "page=8baf2991-5eaf-444d-a318-8045f914e96a",
| "timestamp": 1598607716.828
| }
| ]
| }
| ],
| "activeContext": "context=bc64f1ba-5e76-46dd-9735-4bd344afb9c0",
| "activeBrowser": true
| }
| ]
"""
with self.playwright.grpc_channel() as stub:
response = stub.GetBrowserCatalog(Request().Empty())
parsed = json.loads(response.json)
logger.info(json.dumps(parsed))
return verify_assertion(
parsed,
assertion_operator,
assertion_expected,
"Browser Catalog",
message,
)
@keyword(tags=("Setter", "BrowserControl"))
def switch_browser(self, id: str) -> str:
"""Switches the currently active Browser to another open Browser.
Returns a stable identifier for the previous browser.
See `Browser, Context and Page` for more information about Browser and related concepts.
``id`` Id of the browser to be changed to. Starting at 0.
"""
with self.playwright.grpc_channel() as stub:
response = stub.SwitchBrowser(Request().Index(index=id))
logger.info(response.log)
return response.body
@keyword(tags=("Setter", "BrowserControl"))
def switch_context(self, id: str, browser: str = "CURRENT") -> str:
"""Switches the active BrowserContext to another open context.
Returns a stable identifier for the previous context.
See `Browser, Context and Page` for more information about Context and related concepts.
``id`` Id of the context to be changed to. Randomly generated UUID.
``browser`` < ``CURRENT`` | str> Switch context in specified browser. If value is not "CURRENT"
it should be an the id of the browser where to switch context.
Example:
| ${first_context} = `New Context`
| `New Page` ${URL1}
| ${second_context} = `New Context`
| `New Page` ${URL2}
| `Switch Context` ${first_context} # Switches back to first context and page.
"""
with self.playwright.grpc_channel() as stub:
self._correct_browser(browser)
response = stub.SwitchContext(Request().Index(index=id))
logger.info(response.log)
return response.body
@keyword(tags=("Setter", "BrowserControl"))
def switch_page(
self, id: str, context: str = "CURRENT", browser: str = "CURRENT"
) -> str:
"""Switches the active browser page to another open page by ``id`` or ``NEW``.
Returns a stable identifier ``id`` for the previous page.
See `Browser, Context and Page` for more information about Page and related concepts.
``id`` < ``CURRENT`` | ``NEW `` | str> Id of the page to be changed to or
``NEW`` for a page opened after the | |
if beamline is not None and len(beamline) > 0:
try:
beam = importlib.import_module('beamlines.' + beamline + '.beam_tabs')
except Exception as e:
print (e)
msg_window('cannot import beamlines.' + beamline + ' module' )
raise
self.prep_tab = beam.PrepTab()
self.format_tab = DataTab()
self.rec_tab = RecTab()
self.display_tab = beam.DispTab()
self.tabs = [self.prep_tab, self.format_tab, self.rec_tab, self.display_tab]
else:
self.format_tab = DataTab()
self.rec_tab = RecTab()
self.tabs = [self.format_tab, self.rec_tab]
for tab in self.tabs:
self.addTab(tab, tab.name)
tab.init(self, main_win)
def notify(self, **args):
try:
self.display_tab.update_tab(**args)
self.prep_tab.update_tab(**args)
except:
pass
def clear_configs(self):
for tab in self.tabs:
tab.clear_conf()
def run_all(self):
for tab in self.tabs:
tab.run_tab()
def run_prep(self):
import beamline_preprocess as prep
# this line is passing all parameters from command line to prep script.
# if there are other parameters, one can add some code here
prep.handle_prep(self.main_win.experiment_dir, self.main_win.args)
def run_viz(self):
import beamline_visualization as dp
dp.handle_visualization(self.main_win.experiment_dir)
def load_conf(self, load_dir, need_convert):
for tab in self.tabs:
tab.load_tab(load_dir, need_convert)
def save_conf(self):
for tab in self.tabs:
tab.save_conf()
class DataTab(QWidget):
def __init__(self, parent=None):
"""
Constructor, initializes the tabs.
"""
super(DataTab, self).__init__(parent)
self.name = 'Data'
def init(self, tabs, main_window):
"""
Creates and initializes the 'data' tab.
Parameters
----------
none
Returns
-------
nothing
"""
self.tabs = tabs
self.main_win = main_window
layout = QFormLayout()
self.alien_alg = QComboBox()
self.alien_alg.addItem("none")
self.alien_alg.addItem("block aliens")
self.alien_alg.addItem("alien file")
self.alien_alg.addItem("AutoAlien1")
layout.addRow("alien algorithm", self.alien_alg)
sub_layout = QFormLayout()
self.set_alien_layout(sub_layout)
layout.addRow(sub_layout)
self.intensity_threshold = QLineEdit()
layout.addRow("Intensity Threshold", self.intensity_threshold)
self.center_shift = QLineEdit()
layout.addRow("center_shift", self.center_shift)
self.adjust_dimensions = QLineEdit()
layout.addRow("pad, crop", self.adjust_dimensions)
self.binning = QLineEdit()
layout.addRow("binning", self.binning)
cmd_layout = QHBoxLayout()
self.set_data_conf_from_button = QPushButton("Load data conf from")
self.set_data_conf_from_button.setStyleSheet("background-color:rgb(205,178,102)")
self.config_data_button = QPushButton('format data', self)
self.config_data_button.setStyleSheet("background-color:rgb(175,208,156)")
cmd_layout.addWidget(self.set_data_conf_from_button)
cmd_layout.addWidget(self.config_data_button)
layout.addRow(cmd_layout)
self.setLayout(layout)
self.alien_alg.currentIndexChanged.connect(lambda: self.set_alien_layout(sub_layout))
# this will create config_data file and run data script
# to generate data ready for reconstruction
self.config_data_button.clicked.connect(self.run_tab)
self.set_data_conf_from_button.clicked.connect(self.load_data_conf)
def clear_conf(self):
self.alien_alg.setCurrentIndex(0)
self.intensity_threshold.setText('')
self.binning.setText('')
self.center_shift.setText('')
self.adjust_dimensions.setText('')
def load_tab(self, load_from, need_convert):
"""
It verifies given configuration file, reads the parameters, and fills out the window.
Parameters
----------
conf : str
configuration file (config_data)
Returns
-------
nothing
"""
if os.path.isfile(load_from):
conf = load_from
else:
conf_dir = os.path.join(load_from, 'conf')
conf = os.path.join(conf_dir, 'config_data')
if not os.path.isfile(conf):
msg_window('info: the load directory does not contain config_data file')
return
if need_convert:
conf_map = conv.get_conf_dict(conf, 'config_data')
else:
conf_map = cohere.read_config(conf)
if conf_map is None:
msg_window('please check configuration file ' + conf)
return
if 'alien_alg' not in conf_map:
conf_map['alien_alg'] = 'random'
if conf_map['alien_alg'] == 'random':
self.alien_alg.setCurrentIndex(0)
elif conf_map['alien_alg'] == 'block_aliens':
self.alien_alg.setCurrentIndex(1)
if 'aliens' in conf_map:
self.aliens.setText(str(conf_map['aliens']).replace(" ", ""))
elif conf_map['alien_alg'] == 'alien_file':
self.alien_alg.setCurrentIndex(2)
if 'alien_file' in conf_map:
self.alien_file.setText(str(conf_map['alien_file']).replace(" ", ""))
elif conf_map['alien_alg'] == 'AutoAlien1':
self.alien_alg.setCurrentIndex(3)
if 'AA1_size_threshold' in conf_map:
self.AA1_size_threshold.setText(str(conf_map['AA1_size_threshold']).replace(" ", ""))
if 'AA1_asym_threshold' in conf_map:
self.AA1_asym_threshold.setText(str(conf_map['AA1_asym_threshold']).replace(" ", ""))
if 'AA1_min_pts' in conf_map:
self.AA1_min_pts.setText(str(conf_map['AA1_min_pts']).replace(" ", ""))
if 'AA1_eps' in conf_map:
self.AA1_eps.setText(str(conf_map['AA1_eps']).replace(" ", ""))
if 'AA1_amp_threshold' in conf_map:
self.AA1_amp_threshold.setText(str(conf_map['AA1_amp_threshold']).replace(" ", ""))
if 'AA1_save_arrs' in conf_map:
self.AA1_save_arrs.setChecked(conf_map['AA1_save_arrs'])
else:
self.AA1_save_arrs.setChecked(False)
if 'AA1_expandcleanedsigma' in conf_map:
self.AA1_expandcleanedsigma.setText(str(conf_map['AA1_expandcleanedsigma']).replace(" ", ""))
if 'intensity_threshold' in conf_map:
self.intensity_threshold.setText(str(conf_map['intensity_threshold']).replace(" ", ""))
if 'binning' in conf_map:
self.binning.setText(str(conf_map['binning']).replace(" ", ""))
if 'center_shift' in conf_map:
self.center_shift.setText(str(conf_map['center_shift']).replace(" ", ""))
if 'adjust_dimensions' in conf_map:
self.adjust_dimensions.setText(str(conf_map['adjust_dimensions']).replace(" ", ""))
def get_data_config(self):
"""
It reads parameters related to formatting data from the window and adds them to dictionary.
Parameters
----------
none
Returns
-------
conf_map : dict
contains parameters read from window
"""
conf_map = {}
if self.alien_alg.currentIndex() == 1:
conf_map['alien_alg'] = 'block_aliens'
if len(self.aliens.text()) > 0:
conf_map['aliens'] = str(self.aliens.text()).replace('\n', '')
if self.alien_alg.currentIndex() == 2:
conf_map['alien_alg'] = 'alien_file'
if len(self.alien_file.text()) > 0:
conf_map['alien_file'] = str(self.alien_file.text())
elif self.alien_alg.currentIndex() == 3:
conf_map['alien_alg'] = 'AutoAlien1'
if len(self.AA1_size_threshold.text()) > 0:
conf_map['AA1_size_threshold'] = ast.literal_eval(str(self.AA1_size_threshold.text()))
if len(self.AA1_asym_threshold.text()) > 0:
conf_map['AA1_asym_threshold'] = ast.literal_eval(str(self.AA1_asym_threshold.text()))
if len(self.AA1_min_pts.text()) > 0:
conf_map['AA1_min_pts'] = ast.literal_eval(str(self.AA1_min_pts.text()))
if len(self.AA1_eps.text()) > 0:
conf_map['AA1_eps'] = ast.literal_eval(str(self.AA1_eps.text()))
if len(self.AA1_amp_threshold.text()) > 0:
conf_map['AA1_amp_threshold'] = ast.literal_eval(str(self.AA1_amp_threshold.text()))
if self.AA1_save_arrs.isChecked():
conf_map['AA1_save_arrs'] = True
if len(self.AA1_expandcleanedsigma.text()) > 0:
conf_map['AA1_expandcleanedsigma'] = ast.literal_eval(str(self.AA1_expandcleanedsigma.text()))
if len(self.intensity_threshold.text()) > 0:
conf_map['intensity_threshold'] = ast.literal_eval(str(self.intensity_threshold.text()))
if len(self.binning.text()) > 0:
conf_map['binning'] = ast.literal_eval(str(self.binning.text()).replace('\n', ''))
if len(self.center_shift.text()) > 0:
conf_map['center_shift'] = ast.literal_eval(str(self.center_shift.text()).replace('\n', ''))
if len(self.adjust_dimensions.text()) > 0:
conf_map['adjust_dimensions'] = ast.literal_eval(str(self.adjust_dimensions.text()).replace('\n', ''))
return conf_map
def set_alien_layout(self, layout):
for i in reversed(range(layout.count())):
layout.itemAt(i).widget().setParent(None)
if self.alien_alg.currentIndex() == 1:
self.aliens = QLineEdit()
layout.addRow("aliens", self.aliens)
elif self.alien_alg.currentIndex() == 2:
self.alien_file = QPushButton()
layout.addRow("alien file", self.alien_file)
self.alien_file.clicked.connect(self.set_alien_file)
elif self.alien_alg.currentIndex() == 3:
self.AA1_size_threshold = QLineEdit()
layout.addRow("relative size threshold", self.AA1_size_threshold)
self.AA1_asym_threshold = QLineEdit()
layout.addRow("average asymmetry threshold", self.AA1_asym_threshold)
self.AA1_min_pts = QLineEdit()
layout.addRow("min pts in cluster", self.AA1_min_pts)
self.AA1_eps = QLineEdit()
layout.addRow("cluster alg eps", self.AA1_eps)
self.AA1_amp_threshold = QLineEdit()
layout.addRow("alien alg amp threshold", self.AA1_amp_threshold)
self.AA1_save_arrs = QCheckBox()
layout.addRow("save analysis arrs", self.AA1_save_arrs)
self.AA1_save_arrs.setChecked(False)
self.AA1_expandcleanedsigma = QLineEdit()
layout.addRow("expand cleaned sigma", self.AA1_expandcleanedsigma)
def set_alien_file(self):
"""
It display a select dialog for user to select an alien file.
Parameters
----------
none
Returns
-------
nothing
"""
self.alien_filename = select_file(os.getcwd())
if self.alien_filename is not None:
self.alien_file.setStyleSheet("Text-align:left")
self.alien_file.setText(self.alien_filename)
else:
self.alien_file.setText('')
def run_tab(self):
"""
Reads the parameters needed by format data script. Saves the config_data configuration file with parameters from the window and runs the format script.
Parameters
----------
none
Returns
-------
nothing
"""
import standard_preprocess as run_dt
if not self.main_win.is_exp_exists():
msg_window('the experiment has not been created yet')
elif not self.main_win.is_exp_set():
msg_window('the experiment has changed, pres "set experiment" button')
elif len(self.intensity_threshold.text()) == 0:
msg_window('Please, enter Intensity Threshold parameter')
else:
found_file = False
for p, d, f in os.walk(self.main_win.experiment_dir):
if 'prep_data.tif' in f:
found_file = True
break
if found_file:
conf_map = self.get_data_config()
# verify that data configuration is ok
er_msg = cohere.verify('config_data', conf_map)
if len(er_msg) > 0:
msg_window(er_msg)
return
cohere.write_config(conf_map, os.path.join(self.main_win.experiment_dir, 'conf', 'config_data'))
run_dt.format_data(self.main_win.experiment_dir)
else:
msg_window('Please, run data preparation in previous tab to activate this function')
def save_conf(self):
# save data config
conf_map = self.get_data_config()
if len(conf_map) > 0:
er_msg = cohere.verify('config_data', conf_map)
if len(er_msg) > 0:
msg_window(er_msg)
return
cohere.write_config(conf_map, os.path.join(self.main_win.experiment_dir, 'conf', 'config_data'))
def load_data_conf(self):
"""
It display a select dialog for user to select a configuration file. When selected, the parameters from that file will be loaded to the window.
Parameters
----------
none
Returns
-------
nothing
"""
data_file = select_file(os.getcwd())
if data_file is not None:
self.load_tab(data_file)
else:
msg_window('please select valid data config file')
class RecTab(QWidget):
def __init__(self, parent=None):
"""
Constructor, initializes the tabs.
"""
super(RecTab, self).__init__(parent)
self.name = 'Reconstruction'
def init(self, tabs, main_window):
"""
Creates and initializes the 'reconstruction' tab.
Parameters
----------
none
Returns
-------
nothing
"""
self.tabs = tabs
self.main_win = main_window
self.old_conf_id = ''
layout = QVBoxLayout()
ulayout = QFormLayout()
mlayout = QHBoxLayout()
self.init_guess = QComboBox()
self.init_guess.InsertAtBottom
self.init_guess.addItem("random")
self.init_guess.addItem("continue")
self.init_guess.addItem("AI algorithm")
ulayout.addRow("initial guess", self.init_guess)
sub_layout = QFormLayout()
self.set_init_guess_layout(sub_layout)
ulayout.addRow(sub_layout)
self.add_conf_button = QPushButton('add configuration', self)
ulayout.addWidget(self.add_conf_button)
self.rec_id = QComboBox()
self.rec_id.InsertAtBottom
self.rec_id.addItem("main")
ulayout.addWidget(self.rec_id)
self.rec_id.hide()
self.proc = QComboBox()
self.proc.addItem("auto")
if sys.platform != 'darwin':
self.proc.addItem("cp")
self.proc.addItem("np")
self.proc.addItem("af")
if sys.platform != 'darwin':
self.proc.addItem("cuda")
self.proc.addItem("opencl")
self.proc.addItem("cpu")
ulayout.addRow("processor type", self.proc)
self.device = QLineEdit()
ulayout.addRow("device(s)", self.device)
self.reconstructions = QLineEdit()
ulayout.addRow("number of reconstructions", self.reconstructions)
self.alg_seq = QLineEdit()
ulayout.addRow("algorithm sequence", self.alg_seq)
# TODO add logic to show this only if HIO is in sequence
self.hio_beta = QLineEdit()
ulayout.addRow("HIO beta", self.hio_beta)
self.initial_support_area = QLineEdit()
ulayout.addRow("initial support area", self.initial_support_area)
self.rec_default_button = QPushButton('set to defaults', self)
ulayout.addWidget(self.rec_default_button)
self.features = Features(self, mlayout)
llayout = QHBoxLayout()
self.set_rec_conf_from_button = QPushButton("Load rec conf from")
self.set_rec_conf_from_button.setStyleSheet("background-color:rgb(205,178,102)")
self.config_rec_button = QPushButton('run reconstruction', self)
self.config_rec_button.setStyleSheet("background-color:rgb(175,208,156)")
llayout.addWidget(self.set_rec_conf_from_button)
llayout.addWidget(self.config_rec_button)
spacer = QSpacerItem(0, 3)
llayout.addItem(spacer)
layout.addLayout(ulayout)
layout.addLayout(mlayout)
layout.addLayout(llayout)
self.setAutoFillBackground(True)
self.setLayout(layout)
self.config_rec_button.clicked.connect(self.run_tab)
self.init_guess.currentIndexChanged.connect(lambda: self.set_init_guess_layout(sub_layout))
self.rec_default_button.clicked.connect(self.set_defaults)
self.add_conf_button.clicked.connect(self.add_rec_conf)
self.rec_id.currentIndexChanged.connect(self.toggle_conf)
self.set_rec_conf_from_button.clicked.connect(self.load_rec_conf_dir)
def load_tab(self, load_dir, need_convert):
"""
It verifies given configuration file, reads the parameters, and fills out the window.
Parameters
----------
conf : str
configuration file (config_rec)
Returns
-------
nothing
"""
conf_dir = os.path.join(load_dir, 'conf')
conf = os.path.join(conf_dir, 'config_rec')
if not os.path.isfile(conf):
msg_window('info: the load directory does not contain config_rec file')
return
if need_convert:
conf_dict = conv.get_conf_dict(conf, 'config_rec')
# if experiment set, save the config_rec
try:
cohere.write_config(conf_dict, os.path.join(conf_dir, 'config_rec'))
except:
pass
else:
conf_map = cohere.read_config(conf)
if conf_map is None:
msg_window('please check configuration file ' + conf)
return
self.load_tab_common(conf_map)
def load_tab_common(self, conf_map, update_rec_choice=True):
if 'init_guess' not in conf_map:
conf_map['init_guess'] = 'random'
if conf_map['init_guess'] == 'random':
self.init_guess.setCurrentIndex(0)
elif conf_map['init_guess'] == 'continue':
self.init_guess.setCurrentIndex(1)
if 'continue_dir' in conf_map:
self.cont_dir_button.setText(str(conf_map['continue_dir']).replace(" ", ""))
elif conf_map['init_guess'] == 'AI_guess':
self.init_guess.setCurrentIndex(2)
if 'AI_trained_model' in conf_map:
self.AI_trained_model.setText(str(conf_map['AI_trained_model']).replace(" ", ""))
self.AI_trained_model.setStyleSheet("Text-align:left")
# this will update the configuration choices by reading configuration files names
| |
"""
VirusTotal V3 - Premium API
Difference: https://developers.virustotal.com/v3.0/reference#public-vs-premium-api
"""
import copy
from typing import Tuple, Iterable
import urllib3
from dateparser import parse
from CommonServerPython import *
# Disable insecure warnings
urllib3.disable_warnings() # pylint: disable=no-member
# region Globals
INTEGRATION_NAME = "VirusTotal"
COMMAND_PREFIX = "vt-private"
INTEGRATION_ENTRY_CONTEXT = "VirusTotal"
# endregion
# region Helper functions
def convert_epoch_to_readable(
readable_inputs: dict,
keys: Iterable[str] = ('start_date', 'creation_date', 'finish_date')
) -> dict:
"""Gets the readable input from a function and converts it times to readable outputs
Args:
readable_inputs: a readable output with epoch times in it
keys: keys to convert
Returns:
epoch time in readable output
"""
for date_ in keys:
if creation_date := readable_inputs.get(date_):
if creation_date := parse(str(creation_date)):
readable_inputs[date_] = creation_date.replace(microsecond=0).isoformat()
return readable_inputs
def decrease_data_size(data: Union[dict, list]) -> Union[dict, list]:
""" Minifying data size.
Args:
data: the data object from raw response
Returns:
the same data without:
data['attributes']['last_analysis_results']
data['attributes']['pe_info']
data['attributes']['crowdsourced_ids_results']
data['attributes']['autostart_locations']
data['attributes']['sandbox_verdicts']
data['attributes']['sigma_analysis_summary']
data['attributes']['popular_threat_classification']
data['attributes']['packers']
data['attributes']['malware_config']
"""
attributes_to_remove = [
'last_analysis_results', 'pe_info', 'crowdsourced_ids_results', 'autostart_locations', 'sandbox_verdicts',
'sigma_analysis_summary', 'popular_threat_classification', 'packers', 'malware_config'
]
if isinstance(data, list):
data = [decrease_data_size(item) for item in data]
else:
for attribute in attributes_to_remove:
try:
del data['attributes'][attribute]
except KeyError:
pass
return data
def arg_to_boolean_can_be_none(arg: Optional[Union[bool, str]]) -> Optional[bool]:
"""A wrapper of argToBool that can return None if arg is None or an empty string"""
if arg in (None, ''):
return None
else:
return argToBoolean(arg)
def get_last_run_time(params: Optional[dict] = None, last_run: Optional[dict] = None) -> datetime:
"""getting the last run time.
Args:
params: Demisto params. Must contain the `first_fetch` key.
last_run: if exists, should contain the `date` key.
Returns:
A datetime object of the fetch time
"""
if last_run is None:
last_run = demisto.getLastRun()
if params is None:
params = demisto.params()
if last_run:
last_run_date = parse(last_run.get('date'))
else: # first run
first_fetch = params.get('first_fetch')
try:
last_run_date = parse(first_fetch)
if not last_run_date:
raise TypeError
except TypeError:
raise DemistoException(f'The first fetch time is invalid "{first_fetch=}"')
return last_run_date
def get_time_range_object(start_time: Optional[str] = None, end_time: Optional[str] = None) -> Dict[str, int]:
"""Gets start and/or end times and converts them to time_range object.
Args:
start_time: A string represents time or date range (2018-01-01-18:00Z or 3 days)
end_time: A string represents time (2018-01-01-18:00Z). if not supplied, will use the current time.
Returns:
A dictionary with start and end time.
Examples:
>>> get_time_range_object('3 days')
{'start': 1615199101, 'end': 1615458301}
>>> get_time_range_object('2018-01-01')
{'start': 1514757600, 'end': 1615465632}
>>> get_time_range_object('2018-01-01', '2020-01-01T15:00Z')
"""
time_range = {}
start_date: datetime
end_date: datetime
if start_time and end_time:
start_date = parse(start_time)
assert start_date, f'Could not parse start_date argument. {start_time=}'
end_date = parse(end_time)
assert end_date, f'Could not parse end_time argument. {end_time=}'
time_range = {
'start': int(start_date.timestamp()),
'end': int(end_date.timestamp())
}
elif start_time:
start_date, end_date = parse(start_time), datetime.now()
assert start_date, f'Could not parse start_date argument. {start_time=}'
assert end_date, f'Could not parse end_time argument. {end_time=}'
time_range = {
'start': int(start_date.timestamp()),
'end': int(end_date.timestamp())
}
elif end_time:
raise AssertionError('Found end_time argument without start_time.')
return time_range
def arg_to_number_must_int(arg: Any, arg_name: Optional[str] = None, required: bool = False):
"""Wrapper of arg_to_number that must return int
For mypy fixes.
"""
arg_num = arg_to_number(arg, arg_name, required)
assert isinstance(arg_num, int)
return arg_num
def raise_if_hash_not_valid(
file_hash: str,
valid_hashes: Union[tuple, str] = ('sha256', 'sha1', 'md5')
):
"""Raises an error if file_hash is not valid
Args:
file_hash: file hash
valid_hashes: Valid hashes to not raise if file_hash is of its type
Raises:
ValueError: if hash is not sha256, sha1, md5
Examples:
>>> raise_if_hash_not_valid('not a hash')
Traceback (most recent call last):
...
ValueError: Hash "not a hash" is not of type sha256, sha1, md5
>>> raise_if_hash_not_valid('not a hash', valid_hashes='sha1')
Traceback (most recent call last):
...
ValueError: Hash "not a hash" is not of type sha1
>>> raise_if_hash_not_valid('7e641f6b9706d860baf09fe418b6cc87')
"""
if isinstance(valid_hashes, str):
valid_hashes = tuple([valid_hashes])
if get_hash_type(file_hash) not in valid_hashes:
raise ValueError(f'Hash "{file_hash}" is not of type {", ".join(valid_hashes)}')
def get_file_name(content_disposition: str, ) -> str:
"""Content-Disposition has the filename between the `"`. get it.
Args:
content_disposition: the content disposition from download header
Returns:
the file name
"""
if match := re.search(r'"(.*?)"', content_disposition):
file_name = match.group(1)
else:
file_name = demisto.uniqueFile()
return file_name
# endregion
class Client(BaseClient):
def __init__(self, params: dict):
self.api_key = params['credentials']['password']
super().__init__(
'https://www.virustotal.com/api/v3/',
verify=not params.get('insecure'),
proxy=params.get('proxy'),
headers={'x-apikey': self.api_key}
)
def download_file(self, file: str) -> requests.Response:
"""Download a file.
See Also:
https://developers.virustotal.com/v3.0/reference#files-download
"""
return self._http_request(
'GET',
f'files/{file}/download',
allow_redirects=True,
resp_type='response'
)
def create_zip(self, hashes: list, password: Optional[str] = None) -> dict:
"""Creates a password-protected ZIP file containing files from VirusTotal.
See Also:
https://developers.virustotal.com/v3.0/reference#zip_files
"""
body: dict = {
'hashes': hashes
}
if password:
body['password'] = password
return self._http_request(
'POST',
'intelligence/zip_files',
json_data={'data': body}
)
def get_zip(self, zip_id: str) -> dict:
"""Retrieve information about a ZIP file
See Also:
https://developers.virustotal.com/v3.0/reference#get-zip-file
"""
return self._http_request(
'GET',
f'intelligence/zip_files/{zip_id}'
)
def download_zip(self, zip_id: str) -> requests.Response:
"""Download a ZIP file.
See Also:
https://developers.virustotal.com/v3.0/reference#zip-files-download
"""
return self._http_request(
'GET',
f'intelligence/zip_files/{zip_id}/download',
allow_redirects=True,
resp_type='request'
)
def get_pcap_beaviour(self, report_id) -> dict:
"""Extracted PCAP from a sandbox analysis.
See Also:
https://developers.virustotal.com/v3.0/reference#file_behaviours_pcap
"""
return self._http_request(
'GET',
f'file_behaviours/{report_id}/pcap',
resp_type='content'
)
def search_intelligence(
self,
query: str,
order: Optional[str] = None,
limit: Optional[int] = None,
cursor: Optional[str] = None,
descriptors_only: Optional[bool] = None
):
"""Search for files.
See Also:
https://developers.virustotal.com/v3.0/reference#intelligence-search
"""
return self._http_request(
'GET',
'intelligence/search',
params=assign_params(
query=query,
cursor=cursor,
order=order,
limit=limit,
descriptors_only=descriptors_only
)
)
def get_livehunt_rule_by_id(self, id_: str):
"""Retrieve a VT Hunting Livehunt ruleset.
See Also:
https://developers.virustotal.com/v3.0/reference#get-hunting-ruleset.
"""
return self._http_request(
'GET',
f'intelligence/hunting_rulesets/{id_}'
)
def list_livehunt_rules(
self,
/,
limit: int,
order: Optional[str] = None,
name: Optional[str] = None,
enabled: Optional[bool] = None,
rule_content: Optional[str] = None,
) -> dict:
"""Retrieve a VT Hunting Livehunt rulesets.
See Also:
https://developers.virustotal.com/v3.0/reference#list-hunting-rulesets
"""
filter_ = ''
if name:
filter_ += f'{name} '
if rule_content:
filter_ += f'rules:{rule_content} '
if enabled is not None:
filter_ += f'enabled:{enabled} '
return self._http_request(
'GET',
'intelligence/hunting_rulesets',
params=assign_params(
filter=filter_,
limit=limit,
order=order
)
)
def create_livehunt_rule(
self,
name: str,
yara_rule: str,
enabled: Optional[bool],
limit: Optional[int],
notification_emails: Optional[List[str]]
) -> dict:
"""Create a new VT Hunting Livehunt ruleset.
See Also:
https://developers.virustotal.com/v3.0/reference#create-hunting-ruleset
"""
return self._http_request(
'POST',
'intelligence/hunting_rulesets',
json_data={
'data': {
'type': 'hunting_ruleset',
'attributes': assign_params(
name=name,
enabled=enabled,
rules=yara_rule,
limit=limit,
notification_emails=notification_emails
)
}
}
)
def update_livehunt_rule(
self,
id_: str,
yara_rule: Optional[str],
enabled: Optional[bool],
limit: Optional[int],
notification_emails: Optional[List[str]]
) -> dict:
"""Update a VT Hunting Livehunt ruleset.
See Also:
https://developers.virustotal.com/v3.0/reference#create-hunting-ruleset
"""
params = assign_params(
enabled=enabled,
rules=yara_rule,
limit=limit,
notification_emails=notification_emails
)
assert params, 'Found nothing to update'
return self._http_request(
'PATCH',
f'intelligence/hunting_rulesets/{id_}',
json_data={
'data': {
'type': 'hunting_ruleset',
'id': id_,
'attributes': params
}
}
)
def delete_livehunt_rule(self, id_: str):
"""Delete a VT Hunting Livehunt ruleset.
See Also:
https://developers.virustotal.com/v3.0/reference#delete-hunting-ruleset
"""
self._http_request(
'DELETE',
f'intelligence/hunting_rulesets/{id_}',
resp_type='text'
)
def list_notifications(
self,
from_time: Optional[datetime] = None,
to_time: Optional[datetime] = None,
tag: Optional[str] = None,
cursor: Optional[str] = None,
limit: Optional[int] = None
) -> dict:
"""Retrieve VT Hunting Livehunt notifications.
See Also:
https://developers.virustotal.com/v3.0/reference#list-hunting-notifications
"""
time_format = "%Y-%m-%dT%H:%M:%S"
filter_ = ''
if tag:
filter_ += f'{tag} '
if from_time:
filter_ += f'date:{from_time.strftime(time_format)}+ '
if to_time:
filter_ += f'date:{to_time.strftime(time_format)}- '
return self._http_request(
'GET',
'intelligence/hunting_notifications',
params=assign_params(
filter=filter_,
limit=limit,
cursor=cursor
)
)
def list_notifications_files(self, filter_: Optional[str], cursor: Optional[str] = None,
limit: Optional[int] = None):
"""Retrieve file objects for VT Hunting Livehunt notifications.
See Also:
https://developers.virustotal.com/v3.0/reference#hunting_notification_files
"""
return self._http_request(
'GET',
'intelligence/hunting_notification_files',
params=assign_params(
filter=filter_,
limit=limit,
cursor=cursor
)
)
def list_files_by_rule(self, id_: str, cursor: Optional[str] = None, limit: Optional[int] = None) -> dict:
"""Get a VT Hunting Livehunt ruleset by hunting notification files relationship.
See Also:
https://developers.virustotal.com/v3.0/reference#get-hunting-ruleset-relationship
"""
return self._http_request(
'GET',
f'intelligence/hunting_rulesets/{id_}/relationships/hunting_notification_files',
params=assign_params(
cursor=cursor,
limit=limit
)
)
def list_retrohunt_jobs(
self,
filter_: Optional[str] = None,
cursor: Optional[str] = None,
limit: Optional[int] = None
) -> dict:
"""Retrieve retrohunt jobs.
See Also:
https://developers.virustotal.com/v3.0/reference#get-retrohunt-jobs
"""
return self._http_request(
'GET',
'intelligence/retrohunt_jobs',
params=assign_params(
filter=filter_,
limit=limit,
cursor=cursor
)
)
def create_retrohunt_job(
self,
rules: str,
notification_email: Optional[List[str]] = None,
corpus: Optional[str] = None,
time_range: Optional[Dict[str, int]] = None
) -> dict:
"""Create a new retrohunt job.
See Also:
https://developers.virustotal.com/v3.0/reference#create-retrohunt-job
"""
return self._http_request(
'POST',
'intelligence/retrohunt_jobs',
json_data={
"data": {
"type": "retrohunt_job",
"attributes": assign_params(
rules=rules,
notification_email=notification_email,
corpus=corpus,
time_range=time_range
)
}
}
)
def get_retrohunt_job_by_id(self, id_: str) -> dict:
"""Retrieve a retrohunt job.
See Also:
https://developers.virustotal.com/v3.0/reference#get-retrohunt-job
"""
return self._http_request(
'GET',
f'intelligence/retrohunt_jobs/{id_}'
)
def get_retrohunt_job_matching_files(self, id_: str) -> dict:
"""Retrieve matches for a retrohunt job matching file relationship..
See Also:
| |
value of the `users` property.
"""
return self._users
@users.setter
def users(self, value):
"""
Sets the value of the `users` property.
"""
self._users = value
@property
def network_configuration(self):
"""
Returns the value of the `network_configuration` property.
"""
return self._network_configuration
@network_configuration.setter
def network_configuration(self, value):
"""
Sets the value of the `network_configuration` property.
"""
Struct._check_type('network_configuration', value, NetworkConfiguration)
self._network_configuration = value
@property
def regenerate_ssh_keys(self):
"""
Returns the value of the `regenerate_ssh_keys` property.
"""
return self._regenerate_ssh_keys
@regenerate_ssh_keys.setter
def regenerate_ssh_keys(self, value):
"""
Sets the value of the `regenerate_ssh_keys` property.
"""
self._regenerate_ssh_keys = value
@property
def host(self):
"""
Returns the value of the `host` property.
"""
return self._host
@host.setter
def host(self, value):
"""
Sets the value of the `host` property.
"""
Struct._check_type('host', value, Host)
self._host = value
@property
def timezone(self):
"""
Returns the value of the `timezone` property.
"""
return self._timezone
@timezone.setter
def timezone(self, value):
"""
Sets the value of the `timezone` property.
"""
self._timezone = value
@property
def files(self):
"""
Returns the value of the `files` property.
"""
return self._files
@files.setter
def files(self, value):
"""
Sets the value of the `files` property.
"""
self._files = value
@property
def authorized_keys(self):
"""
Returns the value of the `authorized_keys` property.
"""
return self._authorized_keys
@authorized_keys.setter
def authorized_keys(self, value):
"""
Sets the value of the `authorized_keys` property.
"""
self._authorized_keys = value
class Configuration(Struct):
def __init__(
self,
data=None,
type=None,
):
super(Configuration, self).__init__(
)
self.data = data
self.type = type
@property
def data(self):
"""
Returns the value of the `data` property.
"""
return self._data
@data.setter
def data(self, value):
"""
Sets the value of the `data` property.
"""
self._data = value
@property
def type(self):
"""
Returns the value of the `type` property.
"""
return self._type
@type.setter
def type(self, value):
"""
Sets the value of the `type` property.
"""
Struct._check_type('type', value, ConfigurationType)
self._type = value
class Console(Struct):
def __init__(
self,
enabled=None,
):
super(Console, self).__init__(
)
self.enabled = enabled
@property
def enabled(self):
"""
Returns the value of the `enabled` property.
"""
return self._enabled
@enabled.setter
def enabled(self, value):
"""
Sets the value of the `enabled` property.
"""
self._enabled = value
class Core(Struct):
def __init__(
self,
index=None,
socket=None,
):
super(Core, self).__init__(
)
self.index = index
self.socket = socket
@property
def index(self):
"""
Returns the value of the `index` property.
"""
return self._index
@index.setter
def index(self, value):
"""
Sets the value of the `index` property.
"""
self._index = value
@property
def socket(self):
"""
Returns the value of the `socket` property.
"""
return self._socket
@socket.setter
def socket(self, value):
"""
Sets the value of the `socket` property.
"""
self._socket = value
class Cpu(Struct):
def __init__(
self,
architecture=None,
cores=None,
cpu_tune=None,
level=None,
mode=None,
name=None,
speed=None,
topology=None,
type=None,
):
super(Cpu, self).__init__(
)
self.architecture = architecture
self.cores = cores
self.cpu_tune = cpu_tune
self.level = level
self.mode = mode
self.name = name
self.speed = speed
self.topology = topology
self.type = type
@property
def mode(self):
"""
Returns the value of the `mode` property.
"""
return self._mode
@mode.setter
def mode(self, value):
"""
Sets the value of the `mode` property.
"""
Struct._check_type('mode', value, CpuMode)
self._mode = value
@property
def level(self):
"""
Returns the value of the `level` property.
"""
return self._level
@level.setter
def level(self, value):
"""
Sets the value of the `level` property.
"""
self._level = value
@property
def cpu_tune(self):
"""
Returns the value of the `cpu_tune` property.
"""
return self._cpu_tune
@cpu_tune.setter
def cpu_tune(self, value):
"""
Sets the value of the `cpu_tune` property.
"""
Struct._check_type('cpu_tune', value, CpuTune)
self._cpu_tune = value
@property
def cores(self):
"""
Returns the value of the `cores` property.
"""
return self._cores
@cores.setter
def cores(self, value):
"""
Sets the value of the `cores` property.
"""
self._cores = value
@property
def topology(self):
"""
Returns the value of the `topology` property.
"""
return self._topology
@topology.setter
def topology(self, value):
"""
Sets the value of the `topology` property.
"""
Struct._check_type('topology', value, CpuTopology)
self._topology = value
@property
def name(self):
"""
Returns the value of the `name` property.
"""
return self._name
@name.setter
def name(self, value):
"""
Sets the value of the `name` property.
"""
self._name = value
@property
def architecture(self):
"""
Returns the value of the `architecture` property.
"""
return self._architecture
@architecture.setter
def architecture(self, value):
"""
Sets the value of the `architecture` property.
"""
Struct._check_type('architecture', value, Architecture)
self._architecture = value
@property
def type(self):
"""
Returns the value of the `type` property.
"""
return self._type
@type.setter
def type(self, value):
"""
Sets the value of the `type` property.
"""
self._type = value
@property
def speed(self):
"""
Returns the value of the `speed` property.
"""
return self._speed
@speed.setter
def speed(self, value):
"""
Sets the value of the `speed` property.
"""
self._speed = value
class CpuTopology(Struct):
def __init__(
self,
cores=None,
sockets=None,
threads=None,
):
super(CpuTopology, self).__init__(
)
self.cores = cores
self.sockets = sockets
self.threads = threads
@property
def sockets(self):
"""
Returns the value of the `sockets` property.
"""
return self._sockets
@sockets.setter
def sockets(self, value):
"""
Sets the value of the `sockets` property.
"""
self._sockets = value
@property
def cores(self):
"""
Returns the value of the `cores` property.
"""
return self._cores
@cores.setter
def cores(self, value):
"""
Sets the value of the `cores` property.
"""
self._cores = value
@property
def threads(self):
"""
Returns the value of the `threads` property.
"""
return self._threads
@threads.setter
def threads(self, value):
"""
Sets the value of the `threads` property.
"""
self._threads = value
class CpuTune(Struct):
def __init__(
self,
vcpu_pins=None,
):
super(CpuTune, self).__init__(
)
self.vcpu_pins = vcpu_pins
@property
def vcpu_pins(self):
"""
Returns the value of the `vcpu_pins` property.
"""
return self._vcpu_pins
@vcpu_pins.setter
def vcpu_pins(self, value):
"""
Sets the value of the `vcpu_pins` property.
"""
self._vcpu_pins = value
class CpuType(Struct):
def __init__(
self,
architecture=None,
level=None,
name=None,
):
super(CpuType, self).__init__(
)
self.architecture = architecture
self.level = level
self.name = name
@property
def level(self):
"""
Returns the value of the `level` property.
"""
return self._level
@level.setter
def level(self, value):
"""
Sets the value of the `level` property.
"""
self._level = value
@property
def name(self):
"""
Returns the value of the `name` property.
"""
return self._name
@name.setter
def name(self, value):
"""
Sets the value of the `name` property.
"""
self._name = value
@property
def architecture(self):
"""
Returns the value of the `architecture` property.
"""
return self._architecture
@architecture.setter
def architecture(self, value):
"""
Sets the value of the `architecture` property.
"""
Struct._check_type('architecture', value, Architecture)
self._architecture = value
class CustomProperty(Struct):
def __init__(
self,
name=None,
regexp=None,
value=None,
):
super(CustomProperty, self).__init__(
)
self.name = name
self.regexp = regexp
self.value = value
@property
def regexp(self):
"""
Returns the value of the `regexp` property.
"""
return self._regexp
@regexp.setter
def regexp(self, value):
"""
Sets the value of the `regexp` property.
"""
self._regexp = value
@property
def name(self):
"""
Returns the value of the `name` property.
"""
return self._name
@name.setter
def name(self, value):
"""
Sets the value of the `name` property.
"""
self._name = value
@property
def value(self):
"""
Returns the value of the `value` property.
"""
return self._value
@value.setter
def value(self, value):
"""
Sets the value of the `value` property.
"""
self._value = value
class Display(Struct):
def __init__(
self,
address=None,
allow_override=None,
certificate=None,
copy_paste_enabled=None,
disconnect_action=None,
disconnect_action_delay=None,
file_transfer_enabled=None,
keyboard_layout=None,
monitors=None,
port=None,
proxy=None,
secure_port=None,
single_qxl_pci=None,
smartcard_enabled=None,
type=None,
):
super(Display, self).__init__(
)
self.address = address
self.allow_override = allow_override
self.certificate = certificate
self.copy_paste_enabled = copy_paste_enabled
self.disconnect_action = disconnect_action
self.disconnect_action_delay = disconnect_action_delay
self.file_transfer_enabled = file_transfer_enabled
self.keyboard_layout = keyboard_layout
self.monitors = monitors
self.port = port
self.proxy = proxy
self.secure_port = secure_port
self.single_qxl_pci = single_qxl_pci
self.smartcard_enabled = smartcard_enabled
self.type = type
@property
def address(self):
"""
Returns the value of the `address` property.
"""
return self._address
@address.setter
def address(self, value):
"""
Sets the value of the `address` property.
"""
self._address = value
@property
def allow_override(self):
"""
Returns the value of the `allow_override` property.
"""
return self._allow_override
@allow_override.setter
def allow_override(self, value):
"""
Sets the value of the `allow_override` property.
"""
self._allow_override = value
@property
def disconnect_action(self):
"""
Returns the value of the `disconnect_action` property.
"""
return self._disconnect_action
@disconnect_action.setter
def disconnect_action(self, value):
"""
Sets the value of the `disconnect_action` property.
"""
self._disconnect_action = value
@property
def single_qxl_pci(self):
"""
Returns the value of the `single_qxl_pci` property.
"""
return self._single_qxl_pci
| |
<reponame>YousefMansy/Finding-Lane-Lines<filename>P1.py
# coding: utf-8
# [//]: # (Image References)
#
# [preview]: ./test_images_output/solidWhiteCurveExtended.png "Preview"
#
# # **Finding Lane Lines on the Road**
#
# ![preview]
#
# ## The goals of this project is to setup a pipeline that finds lane lines on the road.
#
# ## Import Packages
# In[4]:
import os
import sys
import cv2
import math
import numpy as np
import matplotlib.pyplot as plt
from IPython.display import HTML
import matplotlib.image as mpimg
from moviepy.editor import VideoFileClip
get_ipython().magic('matplotlib inline')
# ## Original helper functions
# In[5]:
def grayscale(img):
"""Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')"""
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
# return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
`vertices` should be a numpy array of integer points.
"""
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def draw_lines(img, lines, color=[255, 0, 0], thickness=2):
"""
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
"""
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(img, (x1, y1), (x2, y2), color, thickness)
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
"""
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines(line_img, lines)
return line_img
# Python 3 has support for cool math symbols.
def weighted_img(img, initial_img, α=0.8, β=1., γ=0.):
"""
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * α + img * β + γ
NOTE: initial_img and img must be the same shape!
"""
return cv2.addWeighted(initial_img, α, img, β, γ)
# ## Function for calculating region of interest on an image
# In[6]:
def calculate_region_of_interest(image):
bottom_left = (0,image.shape[0])
top = (image.shape[1]/2,(image.shape[0]/2)+50)
bottom_right = (image.shape[1],image.shape[0])
return bottom_left, bottom_right, top
# ## Lane detection pipeline
# [stage1]: ./pipeline_stages/1_grayscale.png "Grayscale"
# [stage2]: ./pipeline_stages/2_smoothed.png "Smoothed"
# [stage3]: ./pipeline_stages/3_canny.png "Canny"
# [stage4]: ./pipeline_stages/4_masked.png "Masked"
# [stage5]: ./pipeline_stages/5_hough.png "Hough"
# [stage6]: ./pipeline_stages/6_weighted.png "Weighted"
#
# ##### My initial pipeline consisted of the following stages:
#
# First, the image is converted to grayscale. This makes it easier to use the same parameters to find yellow and white lanes alike.
#
# ![Gray][stage1]
#
# Second, the image is smoothed out using an 11x11 Gaussian mask. This helps get rid of noise and small details and makes it easier to identify edges.
#
# ![Smoothed][stage2]
#
# Then Canny algorithm is used for edge detection with a low threshold of 30, and a high threshold of 60. It is recommended to have a low to high threshold ratio of 1:2 or 1:3.
#
# ![Canny][stage3]
#
# Then a mask is applied to define the region of interest. This allows for more accurate results and faster processing since it restricts the search to only the region where we know the lane lines should exist. I used a mask with a triangular shape, with its top vertex just above the vertical center of the photo where the lane lines meet by a safe margin.
#
# ![Masked][stage4]
#
# Finally, hough line transform is used for line detection.
#
# ![Hough][stage5]
#
# Then the lines are added on top of the original picture for comparison.
#
# ![Weighted][stage6]
# In[7]:
def draw_lane_lines(image):
path = './pipeline_stages/'
#transform image to grayscale
image_gray = grayscale(image)
# plt.imsave(path+'1_grayscale.png', image_gray, cmap='gray')
#smooth image out using gaussian blur with mask size of 11x11
image_smoothed = gaussian_blur(image_gray,11)
# plt.imsave(path+'2_smoothed.png', image_smoothed, cmap='gray')
#define parameters for canny edge detection
low_threshold = 30
high_threshold = 60
#perform canny edge detection
image_canny = canny(image_smoothed, low_threshold, high_threshold)
# plt.imsave(path+'3_canny.png', image_canny, cmap='gray')
#define vertices for region of interest triangle mask
bottom_left, bottom_right, top = calculate_region_of_interest(image)
mask_vertices = np.int32([[bottom_left, top, bottom_right]])
#apply region of interest mask
image_masked = region_of_interest(image_canny, mask_vertices)
# plt.imsave(path+'4_masked.png', image_masked, cmap='gray')
#define parameters for hough lines algorithm
rho = 1
theta = np.pi/180
threshold = 10
min_line_len = 12
max_line_gap = 5
#perform hough lines transformation
image_hough = hough_lines(image_masked, rho, theta, threshold, min_line_len, max_line_gap)
# plt.imsave(path+'5_hough.png', image_hough, cmap='gray')
#draw lines on the original image
image_weighted = weighted_img(image_hough, image)
# plt.imsave(path+'6_weighted.png', image_weighted, cmap='gray')
# plt.imsave(path+'7_extended.png', image_weighted, cmap='gray')
return image_weighted
def draw_lane_lines_and_save(image, path):
#call pipeline function
image_out = draw_lane_lines(image)
#save image
plt.imsave(path, image_out)
return image_out
# ## Run lane detection pipeline on test images
# In[8]:
fig = plt.figure(figsize=(12,12))
for i,test_image in enumerate(os.listdir("test_images/")):
im = plt.imread('./test_images/'+test_image)
im_w_lines = draw_lane_lines_and_save(im, './test_images_output/'+test_image[:-4]+'.png')
fig.add_subplot(3,2,i+1)
plt.imshow(im_w_lines)
# ## Run lane detection pipeline on test videos
# In[9]:
white_output = 'test_videos_output/solidWhiteRight.mp4'
yellow_output = 'test_videos_output/solidYellowLeft.mp4'
white_clip = VideoFileClip("test_videos/solidWhiteRight.mp4").fl_image(draw_lane_lines)
yellow_clip = VideoFileClip("test_videos/solidYellowLeft.mp4").fl_image(draw_lane_lines)
get_ipython().magic('time white_clip.write_videofile(white_output, audio=False)')
get_ipython().magic('time yellow_clip.write_videofile(yellow_output, audio=False)')
# ## Play lane detection output videos inline
# In[10]:
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(white_output))
# In[11]:
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(yellow_output))
# ## Rewrite the draw_lines() function to draw a single solid line over each lane
# [stage7]: ./pipeline_stages/7_extended.png "Extended"
#
# In order to draw a single line on the left and right lanes, I modified the draw_lines() function by having it calculate the slopes and y-intercepts for all lines generated by the Hough transform within the region of interest, then separating the lines by slope; lines with slope > 0 belonging to the left lane, and lines with slope < 0 belonging to the right lane.
#
# Then I find the median slope and range for each lane, and use those values to draw a single straight line that extends from the bottom of the image to the top of the mask area (region of interest).
#
# ![Extended][stage7]
# In[12]:
class Lane:
"""
Lane class defines a lane by its slope (m) and y-intercept (c)
Points (x1,y1) and (x2,y2) are
calculated based on the region of interest passed to the constructor
"""
def __init__(self, m, c, bottom, top):
self.m = m
self.c = c
self.x1 = int((bottom-c)/m)
self.y1 = int(bottom)
self.x2 = int((top-c)/m)
self.y2 = int(top)
def draw(self, img, color, thickness):
cv2.line(img, (self.x1, self.y1), (self.x2, self.y2), color, thickness)
def draw_lines(img, lines, color=[255, 0, 0], thickness=10):
right_lane_slopes = []
left_lane_slopes = []
right_lane_intercepts = []
left_lane_intercepts = []
for line in lines:
for x1,y1,x2,y2 in line:
m | |
#!/usr/bin/env python3
from argparse import ArgumentParser
from dataclasses import dataclass, field
from itertools import islice
from typing import Dict, List, TypeVar
import ijson
import os
import re
import subprocess
import sys
import tempfile
TSC_MAX = (1 << 64) - 1
@dataclass
class DTraceArgument:
"""Describes a DTrace probe (usdt) argument"""
name: str
pos: int
type: type
@dataclass
class DTraceProbe:
"""Describes a DTrace probe (usdt) point"""
name: str
args: Dict[str, DTraceArgument]
def __init__(self, name, args):
self.name = name
self.args = {a.name: a for a in args}
@dataclass
class DTraceEntry:
"""Describes a single DTrace probe invocation"""
name: str
args: Dict[str, TypeVar('ArgumentType', str, int)]
def __init__(self, probe, args):
valmap = {int: lambda x: int(x, 16),
str: lambda x: x.strip().strip("'")}
self.name = probe.name
self.args = {}
for name, value in args.items():
arg = probe.args.get(name)
if arg is None:
raise ValueError(f'Unexpected argument: {name}')
self.args[name] = valmap[arg.type](value)
class DTrace:
"""Generates bpftrace script based on the supplied probe points, parses its
output and stores is as a list of DTraceEntry sorted by their tsc.
"""
def __init__(self, probes, file=None):
self._avail_probes = self._list_probes()
self._probes = {p.name: p for p in probes}
self.entries = self._parse(file) if file is not None else []
# Sanitize the probe definitions
for probe in probes:
if probe.name not in self._avail_probes:
raise ValueError(f'Couldn\'t find probe: "{probe.name}"')
for arg in probe.args.values():
if arg.pos >= self._avail_probes[probe.name]:
raise ValueError('Invalid probe argument position')
if arg.type not in (int, str):
raise ValueError('Invalid argument type')
def _parse(self, file):
regex = re.compile(r'(\w+): (.*)')
entries = []
for line in file.readlines():
match = regex.match(line)
if match is None:
continue
name, args = match.groups()
probe = self._probes.get(name)
# Skip the line if we don't recognize the probe name
if probe is None:
continue
entries.append(DTraceEntry(probe, args=dict(a.strip().split('=')
for a in args.split(','))))
entries.sort(key=lambda e: e.args['tsc'])
return entries
def _list_probes(self):
files = subprocess.check_output(['git', 'ls-files', '*.[ch]',
':!:include/spdk_internal/usdt.h'])
files = filter(lambda f: len(f) > 0, str(files, 'ascii').split('\n'))
regex = re.compile(r'SPDK_DTRACE_PROBE([0-9]*)\((\w+)')
probes = {}
for fname in files:
with open(fname, 'r') as file:
for match in regex.finditer(file.read()):
nargs, name = match.group(1), match.group(2)
nargs = int(nargs) if len(nargs) > 0 else 0
# Add one to accommodate for the tsc being the first arg
probes[name] = nargs + 1
return probes
def _gen_usdt(self, probe):
usdt = (f'usdt:__EXE__:{probe.name} {{' +
f'printf("{probe.name}: ')
args = probe.args
if len(args) > 0:
argtype = {int: '0x%lx', str: '\'%s\''}
argcast = {int: lambda x: x, str: lambda x: f'str({x})'}
argstr = [f'{a.name}={argtype[a.type]}' for a in args.values()]
argval = [f'{argcast[a.type](f"arg{a.pos}")}' for a in args.values()]
usdt += ', '.join(argstr) + '\\n", ' + ', '.join(argval)
else:
usdt += '\\n"'
usdt += ');}'
return usdt
def generate(self):
return '\n'.join([self._gen_usdt(p) for p in self._probes.values()])
def record(self, pid):
with tempfile.NamedTemporaryFile(mode='w+') as script:
script.write(self.generate())
script.flush()
try:
subprocess.run([f'{os.path.dirname(__file__)}/../bpftrace.sh',
f'{pid}', f'{script.name}'])
except KeyboardInterrupt:
pass
@dataclass
class TracepointArgument:
"""Describes an SPDK tracepoint argument"""
TYPE_INT = 0
TYPE_PTR = 1
TYPE_STR = 2
name: str
argtype: int
@dataclass
class Tracepoint:
"""Describes an SPDK tracepoint, equivalent to struct spdk_trace_tpoint"""
name: str
id: int
new_object: bool
object_type: int
owner_type: int
args: List[TracepointArgument]
@dataclass
class TraceEntry:
"""Describes an SPDK tracepoint entry, equivalent to struct spdk_trace_entry"""
lcore: int
tpoint: Tracepoint
tsc: int
poller: str
size: int
object_id: str
object_ptr: int
time: int
args: Dict[str, TypeVar('ArgumentType', str, int)]
class TraceProvider:
"""Defines interface for objects providing traces and tracepoint definitions"""
def tpoints(self):
"""Returns tracepoint definitions as a dict of (tracepoint_name, tracepoint)"""
raise NotImplementedError()
def entries(self):
"""Generator returning subsequent trace entries"""
raise NotImplementedError()
def tsc_rate(self):
"""Returns the TSC rate that was in place when traces were collected"""
raise NotImplementedError()
class JsonProvider(TraceProvider):
"""Trace provider based on JSON-formatted output produced by spdk_trace app"""
def __init__(self, file):
self._parser = ijson.parse(file)
self._tpoints = {}
self._parse_defs()
def _parse_tpoints(self, tpoints):
for tpoint in tpoints:
tpoint_id = tpoint['id']
self._tpoints[tpoint_id] = Tracepoint(
name=tpoint['name'], id=tpoint_id,
new_object=tpoint['new_object'], object_type=OBJECT_NONE,
owner_type=OWNER_NONE,
args=[TracepointArgument(name=a['name'],
argtype=a['type'])
for a in tpoint.get('args', [])])
def _parse_defs(self):
builder = None
for prefix, event, value in self._parser:
# If we reach entries array, there are no more tracepoint definitions
if prefix == 'entries':
break
elif prefix == 'tsc_rate':
self._tsc_rate = value
continue
if (prefix, event) == ('tpoints', 'start_array'):
builder = ijson.ObjectBuilder()
if builder is not None:
builder.event(event, value)
if (prefix, event) == ('tpoints', 'end_array'):
self._parse_tpoints(builder.value)
builder = None
def _parse_entry(self, entry):
tpoint = self._tpoints[entry['tpoint']]
obj = entry.get('object', {})
return TraceEntry(tpoint=tpoint, lcore=entry['lcore'], tsc=entry['tsc'],
size=entry.get('size'), object_id=obj.get('id'),
object_ptr=obj.get('value'), time=obj.get('time'),
poller=entry.get('poller'),
args={n.name: v for n, v in zip(tpoint.args, entry.get('args', []))})
def tsc_rate(self):
return self._tsc_rate
def tpoints(self):
return self._tpoints
def entries(self):
builder = None
for prefix, event, value in self._parser:
if (prefix, event) == ('entries.item', 'start_map'):
builder = ijson.ObjectBuilder()
if builder is not None:
builder.event(event, value)
if (prefix, event) == ('entries.item', 'end_map'):
yield self._parse_entry(builder.value)
builder = None
class Trace:
"""Stores, parses, and prints out SPDK traces"""
def __init__(self, file):
self._provider = JsonProvider(file)
self._objects = []
self._argfmt = {TracepointArgument.TYPE_PTR: lambda a: f'0x{a:x}'}
self.tpoints = self._provider.tpoints()
def _annotate_args(self, entry):
annotations = {}
for obj in self._objects:
current = obj.annotate(entry)
if current is None:
continue
annotations.update(current)
return annotations
def _format_args(self, entry):
annotations = self._annotate_args(entry)
args = []
for arg, (name, value) in zip(entry.tpoint.args, entry.args.items()):
annot = annotations.get(name)
if annot is not None:
args.append('{}({})'.format(name, ', '.join(f'{n}={v}' for n, v in annot.items())))
else:
args.append('{}: {}'.format(name, self._argfmt.get(arg.argtype,
lambda a: a)(value)))
return args
def register_object(self, obj):
self._objects.append(obj)
def print(self):
def get_us(tsc, off):
return ((tsc - off) * 10 ** 6) / self._provider.tsc_rate()
offset = None
for e in self._provider.entries():
offset = e.tsc if offset is None else offset
timestamp = get_us(e.tsc, offset)
diff = get_us(e.time, 0) if e.time is not None else None
args = ', '.join(self._format_args(e))
fields = [
f'{e.lcore:3}',
f'{timestamp:16.3f}',
f'{e.poller:3}' if e.poller is not None else ' ' * 3,
f'{e.tpoint.name:24}',
f'size: {e.size:6}' if e.size is not None else ' ' * (len('size: ') + 6),
f'id: {e.object_id:8}' if e.object_id is not None else None,
f'time: {diff:<8.3f}' if diff is not None else None,
args
]
print(' '.join([*filter(lambda f: f is not None, fields)]).rstrip())
class SPDKObject:
"""Describes a specific type of an SPDK objects (e.g. qpair, thread, etc.)"""
@dataclass
class Lifetime:
"""Describes a lifetime and properites of a particular SPDK object."""
begin: int
end: int
ptr: int
properties: dict = field(default_factory=dict)
def __init__(self, trace: Trace, tpoints: List[str]):
self.tpoints = {}
for name in tpoints:
tpoint = next((t for t in trace.tpoints.values() if t.name == name), None)
if tpoint is None:
# Some tpoints might be undefined if configured without specific subystems
continue
self.tpoints[tpoint.id] = tpoint
def _annotate(self, entry: TraceEntry):
"""Abstract annotation method to be implemented by subclasses."""
raise NotImplementedError()
def annotate(self, entry: TraceEntry):
"""Annotates a tpoint entry and returns a dict indexed by argname with values representing
various object properites. For instance, {"qpair": {"qid": 1, "subnqn": "nqn"}} could be
returned to annotate an argument called "qpair" with two items: "qid" and "subnqn".
"""
if entry.tpoint.id not in self.tpoints:
return None
return self._annotate(entry)
class QPair(SPDKObject):
def __init__(self, trace: Trace, dtrace: DTrace):
super().__init__(trace, tpoints=[
'RDMA_REQ_NEW',
'RDMA_REQ_NEED_BUFFER',
'RDMA_REQ_TX_PENDING_C2H',
'RDMA_REQ_TX_PENDING_H2C',
'RDMA_REQ_TX_H2C',
'RDMA_REQ_RDY_TO_EXECUTE',
'RDMA_REQ_EXECUTING',
'RDMA_REQ_EXECUTED',
'RDMA_REQ_RDY_TO_COMPL',
'RDMA_REQ_COMPLETING_C2H',
'RDMA_REQ_COMPLETING',
'RDMA_REQ_COMPLETED'])
self._objects = []
self._find_objects(dtrace.entries)
def _find_objects(self, dprobes):
def probe_match(probe, other):
return probe.args['qpair'] == other.args['qpair']
for i, dprobe in enumerate(dprobes):
if dprobe.name != 'nvmf_poll_group_add_qpair':
continue
# We've found a new qpair, now find the probe indicating its destruction
last_idx, last = next((((i + j + 1), d) for j, d in enumerate(islice(dprobes, i, None))
if d.name == 'nvmf_poll_group_remove_qpair' and
probe_match(d, dprobe)), (None, None))
obj = SPDKObject.Lifetime(begin=dprobe.args['tsc'],
end=last.args['tsc'] if last is not None else TSC_MAX,
ptr=dprobe.args['qpair'],
properties={'ptr': hex(dprobe.args['qpair']),
'thread': dprobe.args['thread']})
for other in filter(lambda p: probe_match(p, dprobe), dprobes[i:last_idx]):
if other.name == 'nvmf_ctrlr_add_qpair':
for prop in ['qid', 'subnqn', 'hostnqn']:
obj.properties[prop] = other.args[prop]
self._objects.append(obj)
def _annotate(self, entry):
qpair = entry.args.get('qpair')
if qpair is None:
return None
for obj in self._objects:
if obj.ptr == qpair and obj.begin <= entry.tsc <= obj.end:
return {'qpair': obj.properties}
return None
def build_dtrace(file=None):
return DTrace([
DTraceProbe(
name='nvmf_poll_group_add_qpair',
args=[DTraceArgument(name='tsc', pos=0, type=int),
DTraceArgument(name='qpair', pos=1, type=int),
DTraceArgument(name='thread', pos=2, type=int)]),
DTraceProbe(
name='nvmf_poll_group_remove_qpair',
args=[DTraceArgument(name='tsc', pos=0, type=int),
DTraceArgument(name='qpair', pos=1, type=int),
DTraceArgument(name='thread', pos=2, type=int)]),
DTraceProbe(
name='nvmf_ctrlr_add_qpair',
args=[DTraceArgument(name='tsc', pos=0, type=int),
DTraceArgument(name='qpair', pos=1, type=int),
DTraceArgument(name='qid', pos=2, type=int),
DTraceArgument(name='subnqn', pos=3, type=str),
DTraceArgument(name='hostnqn', pos=4, type=str)])], file)
def print_trace(trace_file, dtrace_file):
dtrace = | |
<gh_stars>1-10
# -*- coding: UTF-8 -*-
# Interstitial Error Detector
# Version 0.2, 2013-08-28
# Copyright (c) 2013 AudioVisual Preservation Solutions
# All rights reserved.
# Released under the Apache license, v. 2.0
# Created on May 14, 2014
# @author: <NAME> <<EMAIL>>
import numpy as np
from scikits.audiolab import Sndfile
from math import fabs, floor
from re import compile
from os import walk, path, stat
from time import strftime, time, sleep
import datetime
from Core import SharedApp
from Core import DAWDirsCore, ReferenceDirsCore
class DirsHandlerCore(object):
"""
Application Directories Handler Core Class
"""
def __init__(self):
"""
Constructor
"""
self.Interstitial = SharedApp.SharedApp.App
self.number_of_daw_core = 1
self.number_of_ref_core = 1
self.daw_dirs_core = {}
self.reference_dirs_core = {}
for index_daw in xrange(0, self.number_of_daw_core):
self.daw_dirs_core[index_daw] = DAWDirsCore.DAWDirsCore()
for index_ref in xrange(0, self.number_of_ref_core):
self.reference_dirs_core[index_ref] = ReferenceDirsCore.ReferenceDirsCore()
pass
def setNumberOfDawCore(self, number_of_dirs_daw):
"""
Set Number Of Reference Dirs
@return daw_dirs_core:String
"""
self.number_of_daw_core = number_of_dirs_daw
def setNumberOfRefCore(self, number_of_dirs_ref):
"""
Set Number Of Daw Dirs
@return number_of_ref_core:String
"""
self.number_of_ref_core = number_of_dirs_ref
def getDawDirsCore(self, index):
"""
Set Daw Dirs Core
@return None
"""
return self.daw_dirs_core[index]
def setDawDirsCore(self, text, index):
"""
Set Daw Dirs Core
@return None
"""
new_daw = DAWDirsCore.DAWDirsCore()
new_daw.setCoreDawText(text)
new_daw.setCoreDawId(index)
self.daw_dirs_core[index] = new_daw
def setRefDirsCore(self, text, index):
"""
Set Ref Dirs Core
@return None
"""
new_ref = ReferenceDirsCore.ReferenceDirsCore()
new_ref.setCoreRefText(text)
new_ref.setCoreRefId(index)
self.reference_dirs_core[index] = new_ref
def getRefDirsCore(self, index):
"""
Set Ref Dirs Core
@return None
"""
return self.reference_dirs_core[index]
def mono(self, numpy_matrix):
"""
mono(numpy matrix ar)
reduces an n-dimensional matrix to a 1-dimensional list if n > 1
if n = 1, returns it
@param numpy_matrix: Numpy Matrix
@return: numpy_matrix
"""
if numpy_matrix.ndim > 1:
return numpy_matrix[:,0]
else:
return numpy_matrix
def offs(self, track1, track2):
"""
offs(audiofile track1, audiofile track2)
calculates the head offset between two (supposedly) otherwise identitical audio files
this is achieved via finding the peak-to-peak difference of the waveform heads
"""
# opens files for reading
try:
track_one_file_obj = Sndfile(track1.encode('utf-8'), 'r')
except:
print('Corrupted File 1 : '+ track1)
return
pass
try:
track_two_file_obj = Sndfile(track2, 'r')
except:
print('Corrupted File 2 : '+ track2)
return
pass
# calculates the head of each file (first twentieth of the waveform)
# if this is less than 5 seconds of audio (that is, the waveform is under 100 seconds long)
# then the head is the first five seconds of the waveform
track_one_file_obj_head = floor(.05 * track_one_file_obj.nframes)
if track_one_file_obj_head < (track_one_file_obj.samplerate * 5):
track_one_file_obj_head = track_one_file_obj.nframes
track_two_file_obj_head = floor(.05 * track_two_file_obj.nframes)
if track_two_file_obj_head < (track_two_file_obj.samplerate * 5):
track_two_file_obj_head = track_two_file_obj.nframes
# reads the head of each file (as absolute values, accounting for reversed waveforms)
# into a 1-dimensional numpy matrix (via mono function)
numpy_matrix_of_track1 = self.mono(np.absolute(track_one_file_obj.read_frames(track_one_file_obj_head)))
numpy_matrix_of_track2 = self.mono(np.absolute(track_two_file_obj.read_frames(track_two_file_obj_head)))
# returns the difference between the peak of each list
return np.argmax(numpy_matrix_of_track1) - np.argmax(numpy_matrix_of_track2)
def populate(self, dir):
"""
Populate (File Path Dir)
walks the file tree under dir recursively and returns all .wav files in it
"""
populated_list = []
wav = compile('.[Ww][Aa][Vv]$')
for root, subFolders, files in walk(dir):
for singleFile in files:
if wav.search(singleFile):
populated_list.append(path.join(root, singleFile))
return populated_list
def specialCharacterHandler(self, string_to_be_handled):
"""
Method to handle all special characters
@param string_to_be_handled: String To Be Handled
@return: String - Fixed characters String
"""
try:self.Fixity = SharedApp.SharedApp.App
except:pass
try:
string_to_be_handled = string_to_be_handled.decode('cp1252')
except:
pass
try:
string_to_be_handled = string_to_be_handled.encode('utf8')
except:
pass
return string_to_be_handled
def run_executor(self, manifest_path, q_action=None, is_unit_test=False):
'''
Run Executor For all Directories
@param manifest_path: Manifest File Path
@param q_action: QCoreApplication Object
@param is_unit_test: Is call generated from Unit test
@return manifest_file_path/{manifest_info, manifest_file_path}: Sting/List
'''
testers = 0
file_count = 0
values = ''
filename = self.Interstitial.Configuration.getManifestFileName()
columns = self.Interstitial.Configuration.getColumnsOfManifest()
timer = time()
initiated = self.Interstitial.Configuration.getCurrentTime()
current_date = strftime("%Y-%m-%d")
self.all_ref_files = []
self.all_daw_files = []
self.scanned_daw_files = []
self.scanned_ref_files = []
for index_daw in xrange(0, self.number_of_daw_core):
for index_ref in xrange(0, self.number_of_ref_core):
# Launch The Scanner to Test Audio Files
report_result = self.execute(index_daw, index_ref, q_action)
try:
testers += len(report_result['manifest_info']['testers'])
except: pass
try:
file_count += int(report_result['manifest_info']['file_count'])
except: pass
try:
values += report_result['manifest_info']['values']
except: pass
sleep(2)
if self.unmatched_flag:
values += path.abspath(self.daw_directories[index_daw]) + ", NONE " + ","
values += "," + "," + "," + ''
values += "\n"
print('')
print "COULD NOT MATCH FILES: " +self.daw_directories[index_daw]
print('')
self.scanned_daw_files.append(self.daw_directories[index_daw])
for single_daw_file in self.all_daw_files:
if single_daw_file not in self.scanned_daw_files:
values += path.abspath(single_daw_file) + ", NONE "
values += "," + "," + "," + "," + ''
values += "\n"
print('')
print "COULD NOT MATCH FILES: " + single_daw_file
print('')
self.scanned_daw_files.append(single_daw_file)
for single_ref_file in self.all_ref_files:
if single_ref_file not in self.scanned_ref_files:
values += "NONE ," + path.abspath(single_ref_file)
values += "," + "," + "," + "," + ''
values += "\n"
print('')
print "COULD NOT MATCH FILES: " + single_ref_file
print('')
self.scanned_ref_files.append(single_ref_file)
seconds_content = str(floor(time() - timer))
manifest_info = {'current_date': current_date, 'initiated': initiated, 'seconds_content': seconds_content,
'testers': testers, 'file_count': file_count, 'columns': columns, 'values': values}
# Open template file and get manifest template content to manifest file creation
template_of_manifest_file = open(self.Interstitial.Configuration.getManifestTemplatePath(), "r")
template_of_manifest_file_lines = template_of_manifest_file.readlines()
template_of_manifest_file.close()
manifest_content = self.generateManifestContent(template_of_manifest_file_lines, manifest_info)
# Do We Have Metadata? If So, Write A Manifest
# Write Manifest File
if len((values + columns)) > 110:
manifest_file_path = manifest_path + "/" + filename
self.writeManifestFile(manifest_file_path, manifest_content)
if is_unit_test:
return {'manifest_info': manifest_info, 'manifest_file_path':manifest_file_path}
else:
return manifest_file_path
def execute(self, index_daw, index_ref, q_action=None):
"""
Execute (wavefile first_wave_file, wavefile second_wave_file, directory d, QAction qa)
The heart of interstitial - performs a null test on two wav files and returns the first difference
"""
# initialize useful variables
values = ''
file_count = 0
test_done_for_files = []
targeted_done = []
# Ensures That We Have Legitimate Directories To Walk Down
# And Populates The List Of Files To Test
if not path.isdir(path.abspath(self.getDawDirsCore(index_daw).getCoreDawText())) or not path.isdir(path.abspath(self.getRefDirsCore(index_ref).getCoreRefText())):
print self.Interstitial.messages['illegalPaths']
return
self.daw_directories = []
self.ref_directories = []
self.daw_directories = self.populate(self.getDawDirsCore(index_daw).getCoreDawText())
print str(len(self.daw_directories)) + self.Interstitial.messages['WAV_found'] + path.abspath(self.getDawDirsCore(index_daw).getCoreDawText())
self.ref_directories = self.populate(self.getRefDirsCore(index_ref).getCoreRefText())
print str(len(self.ref_directories)) + self.Interstitial.messages['WAV_found'] + path.abspath(self.getRefDirsCore(index_ref).getCoreRefText())
try:
q_action.processEvents()
except:
pass
self.unmatched_flag = False
for index in xrange(len(self.daw_directories)):
self.all_daw_files.append(self.daw_directories[index])
for index in xrange(len(self.ref_directories)):
self.all_ref_files.append(self.ref_directories[index])
# Process Each File In The Tester Array
for index in xrange(len(self.daw_directories)):
found = False
if self.daw_directories[index] in self.scanned_daw_files:
continue
for e in xrange(len(self.ref_directories)):
if self.ref_directories[e] in self.scanned_ref_files:
continue
try:
q_action.processEvents()
except:
pass
# If We Haven't Already Processed This File, Process It
if self.ref_directories[e] not in targeted_done:
# find the offset and align the waveforms
toff = self.offs(self.daw_directories[index], self.ref_directories[e])
try:
tester_file_obj = Sndfile(self.daw_directories[index], 'r')
except:
print('Corrupted File : '+ self.daw_directories[index])
return
pass
try:
target_file_obj = Sndfile(self.ref_directories[e], 'r')
except:
print('Corrupted File : ' +self. ref_directories[e])
return
pass
if toff > 0:
tester_file_obj.seek(toff)
else:
target_file_obj.seek(fabs(toff))
# Read The First 1000 Samples Of Each File
# If Each Sample Is Within 6dB Of The Other, We Have A Match And Can Begin Processing
numpy_matrix_of_track1 = self.mono(tester_file_obj.read_frames(1000))
numpy_matrix_of_track2 = self.mono(target_file_obj.read_frames(1000))
if np.array_equal(numpy_matrix_of_track1, numpy_matrix_of_track2):
print('')
print "MATCH: " + self.daw_directories[index] + " matches " + self.ref_directories[e]
try:
q_action.processEvents()
except:
pass
# mark files as done
test_done_for_files.append(self.daw_directories[index])
targeted_done.append(self.ref_directories[e])
# we can't read the entire file into RAM at once
# so instead we're breaking it into one-second parts
l = min((tester_file_obj.nframes - toff), (target_file_obj.nframes - toff)) / tester_file_obj.samplerate
for n in xrange(0, l, 1):
errs = 0
try:
# drop all but the first channel
track_one_response = self.mono(tester_file_obj.read_frames(tester_file_obj.samplerate))
track_two_response = self.mono(target_file_obj.read_frames(target_file_obj.samplerate))
# are these arrays equivalent? if not, there's an error
if not np.array_equal(track_one_response, track_two_response):
file_count += 1
# where's the error?
# we find it by comparing sample by sample across this second of audio
for m in xrange(len(track_one_response)):
if not np.array_equal(track_one_response[m], track_two_response[m]):
# We found it! print a message and we're done with these files
errs = (n * tester_file_obj.samplerate) + m + 1000
print self.Interstitial.messages['errorFoundBw'] +self.daw_directories[index] + " and " + self.ref_directories[e] + " at sample " + str(errs)
try:
q_action.processEvents()
except:
pass
break
if errs != 0:
break
except RuntimeError:
break
# Append Metadata For Output
values += path.abspath(self.daw_directories[index]) + "," + path.abspath(self.ref_directories[e]) + ","
values += datetime.datetime.fromtimestamp(stat(self.daw_directories[index]).st_ctime).strftime("%Y-%m-%d %H:%M:%S") + ","
values += str(stat(self.daw_directories[index]).st_size) + "," + str(tester_file_obj.channels) + "," + str(tester_file_obj.samplerate) + ","
values += str(datetime.timedelta(seconds=int(tester_file_obj.nframes / tester_file_obj.samplerate))) | |
"""
Managers for handling different ServiceChannels.
If a manager for a specific :class:`ServiceChannel` is attached,
incoming messages get forwarded there, otherways they are discarded.
Managers can be attached by calling `add_manager()` on the :class:`Console`
object (see example)
Methods of manager are available through console-context.
Example:
How to add a manager::
discovered = await Console.discover(timeout=1)
if len(discovered):
console = discovered[0]
# Add manager, optionally passing initialization parameter
some_arg_for_manager_init = 'example'
console.add_manager(
MediaManager,
additional_arg=some_arg_for_manager_init
)
await console.connect()
if console.connection_state != ConnectionState.Connected:
print("Connection failed")
sys.exit(1)
console.wait(1)
# Call manager method
console.media_command(0x54321, MediaControlCommand.PlayPauseToggle, 0)
else:
print("No consoles discovered")
sys.exit(1)
"""
import asyncio
import time
import logging
from typing import Optional
from construct import Container
from xbox.sg import factory
from xbox.sg.enum import MessageType, ServiceChannel, AckStatus, TextResult, \
SoundLevel, MediaControlCommand, MediaPlaybackStatus, TextInputScope, \
MediaType, GamePadButton
from xbox.sg.utils.events import Event
from xbox.sg.utils.struct import XStruct
log = logging.getLogger(__name__)
class Manager(object):
__namespace__ = ''
def __init__(self, console, channel: ServiceChannel):
"""
Don't use directly!
INTERNALLY called by the parent :class:`Console`!
Args:
console: Console object, internally passed by `Console.add_manager
channel: Service channel
"""
self.console = console
self.console.on_message += self._pre_on_message
self.console.on_json += self._pre_on_json
self._channel = channel
def _pre_on_message(self, msg, channel):
if channel == self._channel:
self._on_message(msg, channel)
def _pre_on_json(self, data, channel):
if channel == self._channel:
self._on_json(data, channel)
def _on_message(self, msg, channel):
"""
Managers must implement this
"""
pass
def _on_json(self, data, channel):
"""
Managers must implement this
"""
pass
async def _send_message(self, msg: XStruct):
"""
Internal method to send messages to initialized Service Channel
Args:
msg (:class:`XStructObj`): Message
"""
return await self.console.send_message(msg, channel=self._channel)
async def _send_json(self, data: str) -> None:
"""
Internal method to send JSON messages to initialized Service Channel
Args:
data: JSON message
"""
return await self.console.json(data, channel=self._channel)
class InputManagerError(Exception):
"""
Exception thrown by InputManager
"""
pass
class InputManager(Manager):
__namespace__ = 'input'
def __init__(self, console):
"""
Input Manager (ServiceChannel.SystemInput)
Args:
console: Console object, internally passed by `Console.add_manager
"""
super(InputManager, self).__init__(console, ServiceChannel.SystemInput)
def _on_message(self, msg: XStruct, channel: ServiceChannel) -> None:
"""
Internal handler method to receive messages from SystemInput Channel
Args:
msg: Message
channel: Service channel
"""
raise InputManagerError("Unexpected message received on InputManager")
async def gamepad_input(
self,
buttons: GamePadButton,
l_trigger: int = 0,
r_trigger: int = 0,
l_thumb_x: int = 0,
l_thumb_y: int = 0,
r_thumb_x: int = 0,
r_thumb_y: int = 0
) -> None:
"""
Send gamepad input
Args:
buttons: Gamepad buttons bits
l_trigger: Left trigger value
r_trigger: Right trigger value
l_thumb_x: Left thumbstick X-axis value
l_thumb_y: Left thumbstick Y-axis value
r_thumb_x: Right thumbstick X-axis value
r_thumb_y: Right thumbstick Y-axis value
Returns: None
"""
ts = int(time.time())
msg = factory.gamepad(
ts, buttons, l_trigger, r_trigger, l_thumb_x, l_thumb_y,
r_thumb_x, r_thumb_y
)
return await self._send_message(msg)
class MediaManagerError(Exception):
"""
Exception thrown by MediaManager
"""
pass
class MediaManager(Manager):
__namespace__ = 'media'
def __init__(self, console):
"""
Media Manager (ServiceChannel.SystemMedia)
Args: Console object, internally passed by `Console.add_manager
"""
super(MediaManager, self).__init__(console, ServiceChannel.SystemMedia)
self._media_state = None
self.on_media_state = Event()
self.on_media_command_result = Event()
self.on_media_controller_removed = Event()
def _on_message(self, msg: XStruct, channel: ServiceChannel) -> None:
"""
Internal handler method to receive messages from SystemMedia Channel
Args:
msg: Message
channel: Service channel
"""
msg_type = msg.header.flags.msg_type
payload = msg.protected_payload
if msg_type == MessageType.MediaState:
log.debug('Received MediaState message')
self._media_state = payload
self.on_media_state(self.media_state)
elif msg_type == MessageType.MediaCommandResult:
log.debug('Received MediaCommandResult message')
self.on_media_command_result(payload)
elif msg_type == MessageType.MediaControllerRemoved:
title_id = payload.title_id
log.debug('Received MediaControllerRemoved message, title id: 0x%x', title_id)
if self.title_id == title_id:
log.debug('Clearing MediaState')
self._media_state = None
self.on_media_controller_removed(payload)
else:
raise MediaManagerError(
"Unexpected message received on MediaManager"
)
@property
def media_state(self) -> Optional[Container]:
"""
Media state payload
Returns: Media state payload
"""
return self._media_state
@property
def active_media(self) -> Optional[bool]:
"""
Check whether console has active media
Returns: `True` if media is active, `False` if not
"""
return self.media_state is not None
@property
def title_id(self) -> Optional[int]:
"""
Title Id of active media
Returns: Title Id
"""
if self.media_state:
return self.media_state.title_id
@property
def aum_id(self) -> Optional[str]:
"""
Application user model Id of active media
Returns: Aum Id
"""
if self.media_state:
return self.media_state.aum_id
@property
def asset_id(self) -> Optional[str]:
"""
Asset Id of active media
Returns: Asset Id
"""
if self.media_state:
return self.media_state.asset_id
@property
def media_type(self) -> Optional[MediaType]:
"""
Media type of active media
Returns: Media type
"""
if self.media_state:
return self.media_state.media_type
@property
def sound_level(self) -> Optional[SoundLevel]:
"""
Sound level of active media
Returns: Sound level
"""
if self.media_state:
return self.media_state.sound_level
@property
def enabled_commands(self) -> Optional[MediaControlCommand]:
"""
Enabled MediaCommands bitmask
Returns: Bitmask of enabled commands
"""
if self.media_state:
return self.media_state.enabled_commands
@property
def playback_status(self) -> Optional[MediaPlaybackStatus]:
"""
Playback status of active media
Returns: Playback status
"""
if self.media_state:
return self.media_state.playback_status
@property
def rate(self) -> Optional[float]:
"""
Playback rate of active media
Returns: Playback rate
"""
if self.media_state:
return self.media_state.rate
@property
def position(self) -> Optional[int]:
"""
Playback position of active media
Returns: Playback position in microseconds
"""
if self.media_state:
return self.media_state.position
@property
def media_start(self) -> Optional[int]:
"""
Media start position of active media
Returns: Media start position in microseconds
"""
if self.media_state:
return self.media_state.media_start
@property
def media_end(self) -> Optional[int]:
"""
Media end position of active media
Returns: Media end position in microseconds
"""
if self.media_state:
return self.media_state.media_end
@property
def min_seek(self) -> Optional[int]:
"""
Minimum seek position of active media
Returns: Minimum position in microseconds
"""
if self.media_state:
return self.media_state.min_seek
@property
def max_seek(self) -> Optional[int]:
"""
Maximum seek position of active media
Returns: Maximum position in microseconds
"""
if self.media_state:
return self.media_state.max_seek
@property
def metadata(self) -> Container:
"""
Media metadata of active media
Returns: Media metadata
"""
if self.media_state:
return self.media_state.metadata
async def media_command(
self,
title_id: int,
command: MediaControlCommand,
request_id: int = 0,
seek_position: Optional[int] = None
) -> None:
"""
Send media command
Args:
title_id: Title Id
command: Media Command
request_id: Incrementing Request Id
seek_position: Seek position
Returns: None
"""
msg = factory.media_command(
request_id, title_id, command, seek_position
)
return await self._send_message(msg)
class TextManagerError(Exception):
"""
Exception thrown by TextManager
"""
pass
class TextManager(Manager):
__namespace__ = 'text'
def __init__(self, console):
"""
Text Manager (ServiceChannel.SystemText)
Args:
console: Console object, internally passed by `Console.add_manager
"""
super(TextManager, self).__init__(console, ServiceChannel.SystemText)
self.session_config = None
self.current_session_input = None
self.last_session_ack = None
self._current_text_version = 0
self.on_systemtext_configuration = Event()
self.on_systemtext_input = Event()
self.on_systemtext_done = Event()
def _on_message(self, msg: XStruct, channel: ServiceChannel):
"""
Internal handler method to receive messages from SystemText Channel
Args:
msg (:class:`XStructObj`): Message
channel (:class:`ServiceChannel`): Service channel
"""
msg_type = msg.header.flags.msg_type
payload = msg.protected_payload
session_id = payload.text_session_id
if msg_type == MessageType.SystemTextConfiguration:
self.reset_session()
self.session_config = payload
self.on_systemtext_configuration(payload)
elif msg_type == MessageType.SystemTextInput:
# Assign console input msg
self.current_session_input = payload
self.current_text_version = payload.submitted_version
asyncio.create_task(
self.send_systemtext_ack(
self.text_session_id,
self.current_text_version
)
)
self.on_systemtext_input(payload)
elif msg_type == MessageType.SystemTextAck:
self.current_text_version = payload.text_version_ack
elif msg_type == MessageType.SystemTextDone:
if session_id == self.text_session_id:
self.reset_session()
elif session_id == 0:
# SystemTextDone for session 0 is sent by console
# No clue what it means, if anything
pass
else:
pass
# log.debug('Received DONE msg for inactive session %i' % session_id)
self.on_systemtext_done(payload)
elif msg_type in [MessageType.TitleTextConfiguration,
MessageType.TitleTextInput,
MessageType.TitleTextSelection]:
raise TextManagerError('Received TitleTextConfiguration, unhandled')
else:
raise TextManagerError(
"Unexpected message received on TextManager"
)
@property
def got_active_session(self):
"""
Check whether a text session is active
Returns:
bool: Returns `True` if any text session is active, `False` otherwise
"""
return self.session_config is not None
@property
def current_text_version(self):
"""
Current Text version
Returns:
int: Current Text Version
"""
return self._current_text_version
@current_text_version.setter
def current_text_version(self, value):
if value > self.current_text_version:
self._current_text_version = value
@property
def text_session_id(self):
"""
Current Text session id
Returns:
int: Text session id if existing, `None` otherwise
"""
if self.session_config:
return self.session_config.text_session_id
@property
def text_options(self):
"""
Current Text options
Returns:
:class:`TextOption`: Text options if existing, `None` otherwise
"""
if self.session_config:
return self.session_config.text_options
@property
def text_input_scope(self) -> Optional[TextInputScope]:
"""
Current Text input scope
Returns: Text input scope if existing, `None` otherwise
"""
if self.session_config:
return self.session_config.input_scope
@property
def max_text_length(self) -> Optional[int]:
"""
Maximum Text length
Returns: Max text length if existing, `None` otherwise
"""
if self.session_config:
return self.session_config.max_text_length
@property
def text_locale(self) -> Optional[str]:
"""
Test
Returns: Text locale if existing, `None` otherwise
"""
if self.session_config:
return self.session_config.locale
@property
def text_prompt(self) -> Optional[str]:
"""
Test
Returns: Text prompt if existing, `None` otherwise
"""
if self.session_config:
return | |
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
from __future__ import print_function
import os
from os import path
import sys
import shutil
import tempfile
import boto3
import json
import time
import imp
import argparse
from botocore.exceptions import ClientError
from datetime import datetime
import base64
import ast
import textwrap
import fileinput
import subprocess
from subprocess import call
import fnmatch
import unittest
try:
from unittest.mock import MagicMock, patch, ANY
except ImportError:
import mock
from mock import MagicMock, patch, ANY
rdk_dir = '.rdk'
rules_dir = ''
tests_dir = ''
util_filename = 'rule_util'
rule_handler = 'rule_code'
rule_template = 'rdk-rule.template'
config_bucket_prefix = 'config-bucket-'
config_role_name = 'config-role'
assume_role_policy_file = 'configRuleAssumeRolePolicyDoc.json'
delivery_permission_policy_file = 'deliveryPermissionsPolicy.json'
code_bucket_prefix = 'config-rule-code-bucket-'
parameter_file_name = 'parameters.json'
example_ci_dir = 'example_ci'
test_ci_filename = 'test_ci.json'
event_template_filename = 'test_event_template.json'
class rdk():
def __init__(self, args):
self.args = args
def process_command(self):
method_to_call = getattr(self, self.args.command.replace('-','_'))
exit_code = method_to_call()
return(exit_code)
def init(self):
parser = argparse.ArgumentParser(
prog='rdk '+self.args.command,
description = 'Sets up AWS Config and turn current directory into a rdk working directory. This will enable configuration recording in AWS.')
self.args = parser.parse_args(self.args.command_args, self.args)
print ("Running init!")
#if the .rdk directory exists, delete it.
#if os.path.exists(rdk_dir):
# shutil.rmtree(rdk_dir)
#copy contents of template directory into .rdk directory
#src = os.path.join(path.dirname(__file__), 'template')
#dst = rdk_dir
#shutil.copytree(src, dst)
#create custom session based on whatever credentials are available to us
my_session = self.__get_boto_session()
#Create our ConfigService client
my_config = my_session.client('config')
#get accountID
my_sts = my_session.client('sts')
response = my_sts.get_caller_identity()
account_id = response['Account']
config_recorder_exists = False
config_recorder_name = "default"
config_role_arn = ""
delivery_channel_exists = False
config_bucket_exists = False
#Check to see if the ConfigRecorder has been created.
recorders = my_config.describe_configuration_recorders()
if len(recorders['ConfigurationRecorders']) > 0:
config_recorder_exists = True
config_recorder_name = recorders['ConfigurationRecorders'][0]['name']
config_role_arn = recorders['ConfigurationRecorders'][0]['roleARN']
print("Found Config Recorder: " + config_recorder_name)
print("Found Config Role: " + config_role_arn)
delivery_channels = my_config.describe_delivery_channels()
if len(delivery_channels['DeliveryChannels']) > 0:
delivery_channel_exists = True
config_bucket_name = delivery_channels['DeliveryChannels'][0]['s3BucketName']
print("Found Bucket: " + config_bucket_name)
config_bucket_exists = True
my_s3 = my_session.client('s3')
if not config_bucket_exists:
#create config bucket
config_bucket_name = config_bucket_prefix + account_id
response = my_s3.list_buckets()
bucket_exists = False
for bucket in response['Buckets']:
if bucket['Name'] == config_bucket_name:
bucket_exists = True
if not bucket_exists:
print('Creating Config bucket '+config_bucket_name )
if my_session.region_name == 'us-east-1':
my_s3.create_bucket(
Bucket=config_bucket_name
)
else:
my_s3.create_bucket(
Bucket=config_bucket_name,
CreateBucketConfiguration={
'LocationConstraint': my_session.region_name
}
)
if not config_role_arn:
#create config role
my_iam = my_session.client('iam')
response = my_iam.list_roles()
role_exists = False
for role in response['Roles']:
if role['RoleName'] == config_role_name:
role_exists = True
if not role_exists:
print('Creating IAM role config-role')
assume_role_policy = open(os.path.join(path.dirname(__file__), 'template', assume_role_policy_file), 'r').read()
my_iam.create_role(RoleName=config_role_name, AssumeRolePolicyDocument=assume_role_policy, Path="/rdk/")
#attach role policy
my_iam.attach_role_policy(RoleName=config_role_name, PolicyArn='arn:aws:iam::aws:policy/service-role/AWSConfigRole')
policy_template = open(os.path.join(path.dirname(__file__), 'template', delivery_permission_policy_file), 'r').read()
delivery_permissions_policy = policy_template.replace('ACCOUNTID', account_id)
my_iam.put_role_policy(RoleName=config_role_name, PolicyName='ConfigDeliveryPermissions', PolicyDocument=delivery_permissions_policy)
#wait for changes to propagate.
print('Waiting for IAM role to propagate')
time.sleep(16)
#create or update config recorder
if not config_role_arn:
config_role_arn = "arn:aws:iam::"+account_id+":role/rdk/config-role"
my_config.put_configuration_recorder(ConfigurationRecorder={'name':config_recorder_name, 'roleARN':config_role_arn, 'recordingGroup':{'allSupported':True, 'includeGlobalResourceTypes': True}})
if not delivery_channel_exists:
#create delivery channel
my_config.put_delivery_channel(DeliveryChannel={'name':'default', 's3BucketName':config_bucket_name, 'configSnapshotDeliveryProperties':{'deliveryFrequency':'Six_Hours'}})
#start config recorder
my_config.start_configuration_recorder(ConfigurationRecorderName=config_recorder_name)
print('Config Service is ON')
print('Config setup complete.')
#create code bucket
code_bucket_name = code_bucket_prefix + account_id + "-" + my_session.region_name
response = my_s3.list_buckets()
bucket_exists = False
for bucket in response['Buckets']:
if bucket['Name'] == code_bucket_name:
bucket_exists = True
print ("Found code bucket: " + code_bucket_name)
if not bucket_exists:
print('Creating Code bucket '+code_bucket_name )
bucket_configuration = {}
#Consideration for us-east-1 S3 API
if my_session.region_name == 'us-east-1':
my_s3.create_bucket(
Bucket=code_bucket_name
)
else:
my_s3.create_bucket(
Bucket=code_bucket_name,
CreateBucketConfiguration={
'LocationConstraint': my_session.region_name
}
)
return 0
def create(self):
#Parse the command-line arguments relevant for creating a Config Rule.
self.__parse_rule_args(True)
print ("Running create!")
if not self.args.runtime:
print("Runtime is required for 'create' command.")
return 1
extension_mapping = {'java8':'.java', 'python2.7':'.py', 'python3.6':'.py','nodejs4.3':'.js', 'dotnetcore1.0':'cs', 'dotnetcore2.0':'cs', 'python3.6-managed':'.py'}
if self.args.runtime not in extension_mapping:
print ("rdk does nto support that runtime yet.")
#if not self.args.maximum_frequency:
# self.args.maximum_frequency = "TwentyFour_Hours"
# print("Defaulting to TwentyFour_Hours Maximum Frequency.")
#create rule directory.
rule_path = os.path.join(os.getcwd(), rules_dir, self.args.rulename)
if os.path.exists(rule_path):
print("Local Rule directory already exists.")
return 1
try:
os.makedirs(os.path.join(os.getcwd(), rules_dir, self.args.rulename))
#copy rule template into rule directory
if self.args.runtime == 'java8':
self.__create_java_rule()
elif self.args.runtime in ['dotnetcore1.0', 'dotnetcore2.0']:
self.__create_dotnet_rule()
else:
src = os.path.join(path.dirname(__file__), 'template', 'runtime', self.args.runtime, rule_handler + extension_mapping[self.args.runtime])
dst = os.path.join(os.getcwd(), rules_dir, self.args.rulename, self.args.rulename + extension_mapping[self.args.runtime])
shutil.copyfile(src, dst)
src = os.path.join(path.dirname(__file__), 'template', 'runtime', self.args.runtime, 'rule_test' + extension_mapping[self.args.runtime])
if os.path.exists(src):
dst = os.path.join(os.getcwd(), rules_dir, self.args.rulename, self.args.rulename+"_test"+extension_mapping[self.args.runtime])
shutil.copyfile(src, dst)
#with fileinput.FileInput(dst, inplace=True) as file:
f = fileinput.input(files=dst, inplace=True)
for line in f:
print(line.replace('<%RuleName%>', self.args.rulename), end='')
f.close()
src = os.path.join(path.dirname(__file__), 'template', 'runtime', self.args.runtime, util_filename + extension_mapping[self.args.runtime])
if os.path.exists(src):
dst = os.path.join(os.getcwd(), rules_dir, self.args.rulename, util_filename + extension_mapping[self.args.runtime])
shutil.copyfile(src, dst)
#Write the parameters to a file in the rule directory.
self.__populate_params()
print ("Local Rule files created.")
except Exception as e:
print("Error during create: " + str(e))
print("Rolling back...")
shutil.rmtree(rule_path)
raise e
return 0
def modify(self):
#Parse the command-line arguments necessary for modifying a Config Rule.
self.__parse_rule_args(False)
print("Running modify!")
self.args.rulename = self.__clean_rule_name(self.args.rulename)
#Get existing parameters
old_params = self.__read_params_file(self.args.rulename)
if 'SourceEvents' in old_params['Parameters']:
if self.args.maximum_frequency and old_params['Parameters']['SourceEvents']:
print("Removing Source Events and changing to Periodic Rule.")
self.args.resource_types = ""
old_params['Parameters']['SourceEvents'] = ""
if not self.args.resource_types and old_params['Parameters']['SourceEvents']:
self.args.resource_types = old_params['Parameters']['SourceEvents']
if 'SourcePeriodic' in old_params['Parameters']:
if self.args.resource_types and old_params['Parameters']['SourcePeriodic']:
print("Removing Max Frequency and changing to Event-based Rule.")
self.args.maximum_frequency = ""
old_params['Parameters']['SourcePeriodic'] = ""
if not self.args.maximum_frequency and old_params['Parameters']['SourcePeriodic']:
self.args.maximum_frequency = old_params['Parameters']['SourcePeriodic']
if not self.args.runtime and old_params['Parameters']['SourceRuntime']:
self.args.runtime = old_params['Parameters']['SourceRuntime']
if not self.args.input_parameters and old_params['Parameters']['InputParameters']:
self.args.input_parameters = old_params['Parameters']['InputParameters']
if 'RuleSets' in old_params['Parameters']:
if not self.args.rulesets:
self.args.rulesets = old_params['Parameters']['RuleSets']
#Write the parameters to a file in the rule directory.
self.__populate_params()
print ("Modified Rule '"+self.args.rulename+"'. Use the `deploy` command to push your changes to AWS.")
def deploy(self):
parser = argparse.ArgumentParser(prog='rdk deploy')
parser.add_argument('rulename', metavar='<rulename>', nargs='*', help='Rule name(s) to deploy. Rule(s) will be pushed to AWS.')
parser.add_argument('--all','-a', action='store_true', help="All rules in the working directory will be deployed.")
parser.add_argument('-s','--rulesets', required=False, help='comma-delimited RuleSet names')
self.args = parser.parse_args(self.args.command_args, self.args)
if self.args.rulesets:
self.args.rulesets = self.args.rulesets.split(',')
#run the deploy code
print ("Running deploy!")
rule_names = self.__get_rule_list_for_command()
#create custom session based on whatever credentials are available to us
my_session = self.__get_boto_session()
#get accountID
my_sts = my_session.client('sts')
response = my_sts.get_caller_identity()
account_id = response['Account']
for rule_name in rule_names:
my_rule_params = self.__get_rule_parameters(rule_name)
s3_src = ""
if my_rule_params['SourceRuntime'] == "java8":
#Do java build and package.
print ("Running Gradle Build for "+rule_name)
working_dir = os.path.join(os.getcwd(), rules_dir, rule_name)
command = ["gradle","build"]
subprocess.call( command, cwd=working_dir)
#set source as distribution zip
s3_src = os.path.join(os.getcwd(), rules_dir, rule_name, 'build', 'distributions', rule_name+".zip")
elif my_rule_params['SourceRuntime'] in ["dotnetcore1.0","dotnetcore2.0"]:
print ("Packaging "+rule_name)
working_dir = os.path.join(os.getcwd(), rules_dir, rule_name)
commands = [["dotnet","restore"]]
app_runtime = "netcoreapp1.0"
if my_rule_params['SourceRuntime'] == "dotnetcore2.0":
app_runtime = "netcoreapp2.0"
commands.append(["dotnet","lambda","package","-c","Release","-f", app_runtime])
for command in commands:
subprocess.call( command, cwd=working_dir)
# Remove old zip file if it already exists
package_file_dst = os.path.join(rule_name, rule_name+".zip")
self.__delete_package_file(package_file_dst)
# Create new package in temp directory, copy to rule directory
# This copy avoids the archiver trying to include the output zip in itself
s3_src_dir = os.path.join(os.getcwd(),rules_dir, rule_name,'bin','Release', app_runtime, 'publish')
tmp_src = shutil.make_archive(os.path.join(tempfile.gettempdir(), rule_name), 'zip', s3_src_dir)
shutil.copy(tmp_src, package_file_dst)
s3_src = os.path.abspath(package_file_dst)
self.__delete_package_file(tmp_src)
else:
print ("Zipping " + rule_name)
#zip rule code files and upload to s3 bucket
# Remove old zip file if it already exists
package_file_dst = os.path.join(rule_name, rule_name+".zip")
self.__delete_package_file(package_file_dst)
s3_src_dir = os.path.join(os.getcwd(), rules_dir, rule_name)
tmp_src = shutil.make_archive(os.path.join(tempfile.gettempdir(), rule_name), 'zip', s3_src_dir)
shutil.copy(tmp_src, package_file_dst)
s3_src = os.path.abspath(package_file_dst)
self.__delete_package_file(tmp_src)
s3_dst = "/".join((rule_name, rule_name+".zip"))
code_bucket_name = code_bucket_prefix + account_id + "-" + my_session.region_name
my_s3 = my_session.resource('s3')
print ("Uploading " + rule_name)
my_s3.meta.client.upload_file(s3_src, code_bucket_name, s3_dst)
#create CFN Parameters
source_events = "NONE"
if 'SourceEvents' in my_rule_params:
source_events = my_rule_params['SourceEvents']
source_periodic = "NONE"
if 'SourcePeriodic' in my_rule_params:
source_periodic = my_rule_params['SourcePeriodic']
my_params = [
{
'ParameterKey': 'SourceBucket',
'ParameterValue': code_bucket_name,
},
{
'ParameterKey': 'SourcePath',
'ParameterValue': s3_dst,
},
{
'ParameterKey': 'SourceRuntime',
'ParameterValue': my_rule_params['SourceRuntime'],
},
{
'ParameterKey': 'SourceEvents',
'ParameterValue': source_events,
},
{
'ParameterKey': 'SourcePeriodic',
'ParameterValue': source_periodic,
},
{
'ParameterKey': 'SourceInputParameters',
'ParameterValue': my_rule_params['InputParameters'],
},
{
'ParameterKey': 'SourceHandler',
'ParameterValue': self.__get_handler(rule_name, my_rule_params)
}]
#deploy config | |
"""
This module contains the core classes of the package that implement the three hyperparameter optimization methods
presented in Forward and Reverse Gradient-Based Hyperparameter Optimization (https://arxiv.org/abs/1703.01785).
"""
# TODO put tf.Session optional parameter in all the methods that require `run`
# import numpy as np
import tensorflow as tf
from rfho.optimizers import Optimizer, AdamOptimizer
from rfho.utils import dot, MergedVariable, VlMode, as_list, simple_name, GlobalStep, ZMergedMatrix, flatten_list
from rfho.utils import call_method_optional_param as cmo
class ReverseHG:
"""
Class to compute hyper-gradients in reverse mode
"""
# noinspection SpellCheckingInspection
def __init__(self, optimizer, hyper_dict, state_history=None, global_step=None):
"""
Creates a new object that computes the hyper-gradient of validation errors in reverse mode.
See section 3.1 of Forward and Reverse Gradient-Based Hyperparameter Optimization
(https://arxiv.org/abs/1703.01785)
Note that this class only computes the hyper-gradient and does not perform hyperparameter optimization.
:param optimizer: insance of Optimizer class, which contains the dynamics with which the model parameters are
updated
:param hyper_dict: A dictionary of `{validation_error: hyperparameter or list_of_hyperparameters}` where
`validation_error` is a scalar tensor and `list_of_hyperparameters` is a list
of tensorflow variables that represents the hyperparameters
:param state_history: (default: empty list) state history manager:
should implement methods `clear`, `append`, `__getitem__`
:param global_step: optional instance of GlobalStep class
"""
assert isinstance(optimizer, Optimizer)
self.w = optimizer.raw_w # might be variable or MergedVariable
# TODO check if it works also with w as simple Variable
self.w_t = self.w # MergedVariable.get_tensor(self.w) # this is always a tensor
self.tr_dynamics = optimizer.dynamics
assert isinstance(hyper_dict, dict), '%s not allowed type. Should be a dict of ' \
'(tf.Tensor, hyperparameters)' % hyper_dict
self.val_error_dict = hyper_dict
self.hyper_list = []
for k, v in hyper_dict.items():
self.hyper_list += as_list(v)
self.val_error_dict[k] = as_list(v) # be sure that are all lists
self.w_hist = state_history or []
with self.w_t.graph.as_default():
# global step
self.global_step = global_step or GlobalStep()
self._fw_ops = optimizer.assign_ops # add here when hyper-parameters are sequence
# backward assign ops
with tf.name_scope('backward'):
# equation (9)
p_T = {ve: tf.gradients(ve, self.w_t)[0] for ve, hyp_list in self.val_error_dict.items()} # deltaE(s_t)
self.p_dict = {ve: tf.Variable(pt, name='p') for ve, pt in p_T.items()}
# for nullity check
self._abs_sum_p = tf.reduce_sum(tf.stack([tf.reduce_sum(tf.abs(p), name='l1_p')
for p in self.p_dict.values()]))
# build Lagrangian function
with tf.name_scope('lagrangian'):
self.lagrangians_dict = {ve: dot(p, self.tr_dynamics) for ve, p in self.p_dict.items()}
# TODO read below
'''
In the following {if else} block there are two ways of computing the the dynamics of the update
of the Lagrangian multipliers. The procedures SHOULD produce the same result,
however, for some strange reason, if w is indeed a state varibale that contains auxiliary components
(e.g. velocity in Momentum algorithm, ...) there is a difference in the two methods and
the right one is the first one. This is possibly due to the order in wich the derivatives are
taken by tensorflow, but furhter investigation is necessary.
'''
# detects if some auxiliary variables are used.
if isinstance(self.w, MergedVariable) and \
any([isinstance(v, MergedVariable) for v in self.w.var_list(VlMode.RAW)]):
state_components = self.w.var_list(VlMode.TENSOR)
# equation (8)
self.p_dynamics = {ve: tf.concat(tf.gradients(lagrangian, state_components), 0)
for ve, lagrangian in self.lagrangians_dict.items()}
else:
# equation (8)
self.p_dynamics = {ve: tf.gradients(lagrangian, self.w_t)[0]
for ve, lagrangian in self.lagrangians_dict.items()} # equation (7)
self._bk_ops = [self.p_dict[ve].assign(self.p_dynamics[ve])
for ve in self.val_error_dict] # add here when hp are sequ.
with tf.name_scope('w_history_ops'):
self._w_placeholder = tf.placeholder(self.w_t.dtype)
self._back_hist_op = self.w.assign(self._w_placeholder)
with tf.name_scope('hyper_derivatives'):
# equation (10) without summation.
self.hyper_derivatives = [
(self.val_error_dict[ve], tf.gradients(lagrangian, self.val_error_dict[ve]))
for ve, lagrangian in self.lagrangians_dict.items()
] # list of couples (hyper_list, list of tensors hyper_gradients) (lists are unhashable!)
# check that all hyper-gradients are defined
assert all(e is not None for e in flatten_list(
[e[1] for e in self.hyper_derivatives])), 'Some gradient of the validation error is None!'
with tf.name_scope('hyper_gradients'): # ADDED 28/3/17 keeps track of hyper-gradients as tf.Variable
self._grad_wrt_hypers_placeholder = tf.placeholder(tf.float32, name='placeholder')
# TODO this placeholder is not really necessary... just added to minimize the changes needed
# (merge with RICCARDO)
self.hyper_gradient_vars = [tf.Variable(tf.zeros_like(hyp), name=simple_name(hyp))
for hyp in self.hyper_list]
self.hyper_gradients_dict = {hyp: hgv for hyp, hgv # redundant.. just for comfort ..
in zip(self.hyper_list, self.hyper_gradient_vars)}
self._hyper_assign_ops = {h: v.assign(self._grad_wrt_hypers_placeholder)
for h, v in self.hyper_gradients_dict.items()}
def initialize(self, session=None):
"""
Helper for initializing all the variables. Builds and runs model variables and global step initializers.
Note that dual variables are initialized only when calling `backward`.
:param session: optional tensorflow session (if None default session is used)
:return: None
"""
ss = session or tf.get_default_session()
assert ss, 'No default tensorflow session!'
if isinstance(self.w, MergedVariable):
self.w.initialize(session=session)
else:
ss.run(tf.variables_initializer([self.w]))
ss.run(tf.variables_initializer(self.hyper_gradient_vars + [self.global_step.var]))
def forward(self, T, train_feed_dict_supplier=None, summary_utils=None):
"""
Performs (forward) optimization of the parameters.
:param T: Total number of iterations
:param train_feed_dict_supplier: (optional) A callable with signature `t -> feed_dict`
or `() -> feed_dict` to pass to
`tf.Session.run` feed_dict argument
:param summary_utils: (optional) object that implements a method method `run(tf.Session, step)`
that is executed at every iteration (see for instance utils.PrintUtils
:return: None
"""
if not train_feed_dict_supplier:
train_feed_dict_supplier = lambda: None
# var_init = self.w.var_list(VlMode.BASE) if isinstance(self.w, MergedVariable) else [self.w]
# tf.variables_initializer(var_init + [self.global_step.var]).run()
ss = tf.get_default_session()
self.w_hist.clear()
for t in range(T):
self.w_hist.append(self.w_t.eval())
ss.run([self._fw_ops], feed_dict=cmo(train_feed_dict_supplier, self.global_step.eval()))
self.global_step.increase.eval()
if summary_utils:
summary_utils.run(ss, t)
def backward(self, T, val_feed_dict_suppliers=None, train_feed_dict_supplier=None, hyper_batch_step=None,
summary_utils=None, check_if_zero=False):
"""
Performs backward computation of hyper-gradients
:param hyper_batch_step: supports for stochastic sampling of validation set
:param T: Total number of iterations
:param val_feed_dict_suppliers: either a callable that returns a feed_dict
or a dictionary {validation_error tensor: callable
(signature `t -> feed_dict` or `() -> feed_dict`) that
is used to initialize the dual variables `p` (generally supplier of
validation example set).
:param train_feed_dict_supplier: (optional) A callable with signature `t -> feed_dict` or `() -> feed_dict` to
pass to `tf.Session.run`
feed_dict argument
:param summary_utils: (optional) object that implements a method method `run(tf.Session, step)`
that is executed at every iteration (see for instance utils.PrintUtils
:param check_if_zero: (optional) debug flag
:return: A dictionary of lists of step-wise hyper-gradients. In usual application the "true" hyper-gradients
can be obtained with method std_collect_hyper_gradients
"""
if not train_feed_dict_supplier:
# noinspection PyUnusedLocal
train_feed_dict_supplier = lambda: None
if not val_feed_dict_suppliers: # FIXME probably won't work with the current settings.
val_feed_dict_suppliers = lambda: None
else:
if not isinstance(val_feed_dict_suppliers, dict) and len(self.val_error_dict.keys()) == 1:
# cast validation supplier into a dict
val_feed_dict_suppliers = {list(self.val_error_dict.keys())[0]: val_feed_dict_suppliers}
# compute alpha_T using the validation set
[tf.variables_initializer([self.p_dict[ve]]).run(feed_dict=cmo(data_supplier, hyper_batch_step))
for ve, data_supplier in val_feed_dict_suppliers.items()]
# set hyper-derivatives to 0
hyper_derivatives = self._initialize_hyper_derivatives_res() # TODO deal better with the hyper-derivatives
ss = tf.get_default_session()
if summary_utils: summary_utils.run(ss, T)
for t in range(T - 1, -1, -1):
self.global_step.decrease.eval()
# revert w_t to w_(t-1)
ss.run(self._back_hist_op, feed_dict={self._w_placeholder: self.w_hist[t]})
# noinspection PyNoneFunctionAssignment
fds = cmo(train_feed_dict_supplier, self.global_step.eval())
# TODO read below (maybe use tf.control_dependencies ... would it speed this up?)
""" Unfortunately it looks like that the following two lines cannot be run together (will this
degrade the performances???"""
if check_if_zero: # debug
if self._abs_sum_p.eval() < 1.e-20:
# ss.run([self.bk_ops, self.global_step.decrease], feed_dict=fds)
print('exiting backward pass at iteration %d.' % t)
return {k: list(reversed(v)) for k, v in hyper_derivatives.items()}
# compute partial results for hyper_derivatives: alpha_t*B_t and concatenates them
mid_res = ss.run([e[1] for e in self.hyper_derivatives], feed_dict=fds)
for k in range(len(mid_res)):
hyper_list = self.hyper_derivatives[k][0]
mr = mid_res[k]
for j in range(len(hyper_list)):
hyper_derivatives[hyper_list[j]].append(mr[j])
# computes alpha_t = alpha_(t+1)*A_(t+1)
ss.run([self._bk_ops], feed_dict=fds) # check this global_step here.. (for Adam)
if summary_utils: summary_utils.run(ss, t)
hyper_derivatives = {k: list(reversed(v)) for k, v in hyper_derivatives.items()}
# updates also variables that keep track of hyper-gradients
[self._hyper_assign_ops[h].eval(feed_dict={self._grad_wrt_hypers_placeholder: ghv})
for h, ghv in ReverseHG.std_collect_hyper_gradients(hyper_derivatives).items()]
return hyper_derivatives
def _initialize_hyper_derivatives_res(self):
return {hyper: [] for hyper in self.hyper_list}
def run_all(self, T, train_feed_dict_supplier=None, val_feed_dict_suppliers=None, hyper_batch_step=None,
forward_su=None, backward_su=None, after_forward_su=None, check_if_zero=False):
"""
Performs both forward and backward step. See functions `forward` and `backward` for details.
:param hyper_batch_step: support for stochastic sampling of validation set
:param T: Total number of iterations
:param train_feed_dict_supplier: (feed_dict) supplier for training stage
:param val_feed_dict_suppliers: (feed_dict) supplier for validation stage
:param forward_su: (optional) utils object with function `run` passed to `forward`
:param backward_su: (optional) utils object with function `run` passed to `backward`
:param after_forward_su: (optional) utils object with function | |
# Use the binary cross entropy function
self.criterion = nn.CrossEntropyLoss()
def forward(self, x, hidden_state=None, seq_lengths=None,
total_length=None, get_hidden_state=False,
prob_output=True, already_embedded=False):
if self.embed_features is not None and already_embedded is False:
# Run each embedding layer on each respective feature, adding the
# resulting embedding values to the tensor and removing the original,
# categorical encoded columns
x = du.embedding.embedding_bag_pipeline(x, self.embed_layers, self.embed_features,
model_forward=True, inplace=True)
# Make sure that the input data is of type float
x = x.float()
# Get the batch size (might not be always the same)
batch_size = x.shape[0]
if hidden_state is None:
# Reset the LSTM hidden state. Must be done before you run a new
# batch. Otherwise the LSTM will treat a new batch as a continuation
# of a sequence.
self.hidden = self.init_hidden(batch_size)
else:
# Use the specified hidden state
self.hidden = hidden_state
if seq_lengths is not None:
# pack_padded_sequence so that padded items in the sequence won't be
# shown to the LSTM
x = pack_padded_sequence(x, seq_lengths, batch_first=True, enforce_sorted=False)
# Get the outputs and hidden states from the LSTM layer(s)
lstm_output, self.hidden = self.lstm(x, self.hidden)
if seq_lengths is not None:
# Undo the packing operation
lstm_output, _ = pad_packed_sequence(lstm_output, batch_first=True,
total_length=self.total_length)
# Apply dropout to the last LSTM layer
lstm_output = self.dropout(lstm_output)
# Flatten LSTM output to fit into the fully connected layer
flat_lstm_output = lstm_output.contiguous().view(-1, self.n_hidden * (1 + self.bidir))
# Apply the final fully connected layer
output = self.fc(flat_lstm_output)
if prob_output is True:
# Get the outputs in the form of probabilities
if self.n_outputs == 1:
output = self.activation(output)
else:
# Normalize outputs on their last dimension
output = self.activation(output, dim=len(output.shape)-1)
if get_hidden_state is True:
return output, self.hidden
else:
return output
def loss(self, y_pred, y_labels):
# Flatten the data
y_pred = y_pred.reshape(-1)
y_labels = y_labels.reshape(-1)
# Find the indices that don't correspond to padding samples
non_pad_idx = y_labels != self.padding_value
# Remove the padding samples
y_labels = y_labels[non_pad_idx]
y_pred = y_pred[non_pad_idx]
# Compute cross entropy loss which ignores all padding values
ce_loss = self.criterion(y_pred, y_labels)
return ce_loss
def init_hidden(self, batch_size):
# Create two new tensors with sizes n_layers x batch_size x n_hidden,
# initialized to zero, for hidden state and cell state of LSTM
weight = next(self.parameters()).data
# Check if GPU is available
train_on_gpu = torch.cuda.is_available()
if train_on_gpu is True:
hidden = (weight.new(self.n_lstm_layers * (1 + self.bidir), batch_size, self.n_hidden).zero_().cuda(),
weight.new(self.n_lstm_layers * (1 + self.bidir), batch_size, self.n_hidden).zero_().cuda())
else:
hidden = (weight.new(self.n_lstm_layers * (1 + self.bidir), batch_size, self.n_hidden).zero_(),
weight.new(self.n_lstm_layers * (1 + self.bidir), batch_size, self.n_hidden).zero_())
return hidden
class CustomLSTM(BaseRNN):
def __init__(self, n_inputs, n_hidden, n_outputs, n_lstm_layers=1, p_dropout=0,
embed_features=None, n_embeddings=None, embedding_dim=None,
bidir=False, padding_value=999999):
if bidir is True:
rnn_module = lambda *cell_args: BidirLSTMLayer(LSTMCell, *cell_args)
else:
rnn_module = lambda *cell_args: LSTMLayer(LSTMCell, *cell_args)
super(CustomLSTM, self).__init__(rnn_module=rnn_module, n_inputs=n_inputs,
n_hidden=n_hidden, n_outputs=n_outputs,
n_lstm_layers=n_lstm_layers, p_dropout=p_dropout,
embed_features=embed_features,
n_embeddings=n_embeddings,
embedding_dim=embedding_dim,
bidir=bidir, is_lstm=True,
padding_value=padding_value)
class TLSTM(BaseRNN):
def __init__(self, n_inputs, n_hidden, n_outputs, n_rnn_layers=1, p_dropout=0,
embed_features=None, n_embeddings=None, embedding_dim=None,
bidir=False, padding_value=999999,
delta_ts_col=None, elapsed_time='small', no_small_delta=True):
if delta_ts_col is None:
if embed_features is None:
self.delta_ts_col = n_inputs
else:
# Have into account the new embedding columns that will be added,
# as well as the removal of the originating categorical columns
# NOTE: This only works assuming that the delta_ts column is the
# last one on the dataframe, standing to the left of all the
# embedding features
if all([isinstance(feature, int) for feature in embed_features]):
self.delta_ts_col = n_inputs - len(embed_features)
elif (all([isinstance(feat_list, list) for feat_list in embed_features])
and all([isinstance(feature, int) for feat_list in embed_features
for feature in feat_list])):
self.delta_ts_col = n_inputs
for i in range(len(embed_features)):
self.delta_ts_col = self.delta_ts_col - len(embed_features[i])
else:
self.delta_ts_col = delta_ts_col
self.elapsed_time = elapsed_time
self.no_small_delta = no_small_delta
TLSTMCell_prtl = partial(TLSTMCell, delta_ts_col=self.delta_ts_col,
elapsed_time=self.elapsed_time,
no_small_delta=self.no_small_delta)
if bidir is True:
rnn_module = lambda *cell_args: BidirTLSTMLayer(TLSTMCell_prtl, *cell_args)
else:
rnn_module = lambda *cell_args: TLSTMLayer(TLSTMCell_prtl, *cell_args)
super(TLSTM, self).__init__(rnn_module=rnn_module, n_inputs=n_inputs,
n_hidden=n_hidden, n_outputs=n_outputs,
n_rnn_layers=n_rnn_layers,
p_dropout=p_dropout,
embed_features=embed_features,
n_embeddings=n_embeddings,
embedding_dim=embedding_dim,
bidir=bidir, is_lstm=True,
padding_value=padding_value)
def forward(self, x, hidden_state=None, get_hidden_state=False,
prob_output=True, already_embedded=False):
if self.embed_features is not None and already_embedded is False:
# Run each embedding layer on each respective feature, adding the
# resulting embedding values to the tensor and removing the original,
# categorical encoded columns
x = du.embedding.embedding_bag_pipeline(x, self.embed_layers, self.embed_features,
model_forward=True, inplace=True)
# Make sure that the input data is of type float
x = x.float()
# Get the batch size (might not be always the same)
batch_size = x.shape[0]
# Isolate the delta_ts feature
delta_ts = x[:, :, self.delta_ts_col].clone()
left_to_delta = x[:, :, :self.delta_ts_col]
right_to_delta = x[:, :, self.delta_ts_col+1:]
x = torch.cat([left_to_delta, right_to_delta], 2)
if hidden_state is None:
# Reset the LSTM hidden state. Must be done before you run a new
# batch. Otherwise the LSTM will treat a new batch as a continuation
# of a sequence.
self.hidden = self.init_hidden(batch_size)
else:
# Use the specified hidden state
self.hidden = hidden_state
# Make sure that the data is input in the format of (timestamp x sample x features)
x = x.permute(1, 0, 2)
# Get the outputs and hidden states from the RNN layer(s)
if self.n_rnn_layers == 1:
if self.bidir is False:
# Since there's only one layer and the model is not bidirectional,
# we only need one set of hidden state
hidden_state = (self.hidden[0][0], self.hidden[1][0])
# Run the RNN layer on the data
rnn_output, self.hidden = self.rnn_layer(x, hidden_state, delta_ts=delta_ts)
else:
# List[RNNState]: One state per layer
output_states = (torch.zeros(self.hidden[0].shape), torch.zeros(self.hidden[1].shape))
i = 0
# The first RNN layer's input is the original input;
# the following layers will use their respective previous layer's
# output as input
rnn_output = x
for rnn_layer in self.rnn_layers:
hidden_state = (self.hidden[0][i], self.hidden[1][i])
# Run the RNN layer on the data
rnn_output, out_state = rnn_layer(rnn_output, hidden_state, delta_ts=delta_ts)
# Apply the dropout layer except the last layer
if i < self.n_rnn_layers - 1:
rnn_output = self.dropout(rnn_output)
output_states[0][i] = out_state[0]
output_states[1][i] = out_state[1]
i += 1
# Update the hidden states variable
self.hidden = output_states
# Reconvert the data to the format of (sample x timestamp x features)
rnn_output = rnn_output.permute(1, 0, 2)
# Flatten RNN output to fit into the fully connected layer
flat_rnn_output = rnn_output.contiguous().view(-1, self.n_hidden * (1 + self.bidir))
# Apply the final fully connected layer
output = self.fc(flat_rnn_output)
if prob_output is True:
# Get the outputs in the form of probabilities
if self.n_outputs == 1:
output = self.activation(output)
else:
# Normalize outputs on their last dimension
output = self.activation(output, dim=len(output.shape)-1)
if get_hidden_state is True:
return output, self.hidden
else:
return output
class MF1LSTM(BaseRNN):
def __init__(self, n_inputs, n_hidden, n_outputs, n_rnn_layers=1, p_dropout=0,
embed_features=None, n_embeddings=None, embedding_dim=None,
bidir=False, padding_value=999999,
delta_ts_col=None, elapsed_time='small', no_small_delta=True):
if delta_ts_col is None:
if embed_features is None:
self.delta_ts_col = n_inputs
else:
# Have into account the new embedding columns that will be added,
# as well as the removal of the originating categorical columns
# NOTE: This only works assuming that the delta_ts column is the
# last one on the dataframe, standing to the left of all the
# embedding features
if all([isinstance(feature, int) for feature in embed_features]):
self.delta_ts_col = n_inputs - len(embed_features)
elif (all([isinstance(feat_list, list) for feat_list in embed_features])
and all([isinstance(feature, int) for feat_list in embed_features
for feature in feat_list])):
self.delta_ts_col = n_inputs
for i in range(len(embed_features)):
self.delta_ts_col = self.delta_ts_col - len(embed_features[i])
else:
self.delta_ts_col = delta_ts_col
self.elapsed_time = elapsed_time
self.no_small_delta = no_small_delta
MF1LSTMCell_prtl = partial(MF1LSTMCell, delta_ts_col=self.delta_ts_col,
elapsed_time=self.elapsed_time,
no_small_delta=self.no_small_delta)
if bidir is True:
rnn_module = lambda *cell_args: BidirTLSTMLayer(MF1LSTMCell_prtl, *cell_args)
else:
rnn_module = lambda *cell_args: TLSTMLayer(MF1LSTMCell_prtl, *cell_args)
super(MF1LSTM, self).__init__(rnn_module=rnn_module, n_inputs=n_inputs,
n_hidden=n_hidden, n_outputs=n_outputs,
n_rnn_layers=n_rnn_layers,
p_dropout=p_dropout,
embed_features=embed_features,
n_embeddings=n_embeddings,
embedding_dim=embedding_dim,
bidir=bidir, is_lstm=True,
padding_value=padding_value)
def forward(self, x, hidden_state=None, get_hidden_state=False,
prob_output=True, already_embedded=False):
if self.embed_features is not None and already_embedded is False:
# Run each embedding layer on each respective feature, adding the
# resulting embedding values to the tensor and removing the original,
# categorical encoded columns
x = du.embedding.embedding_bag_pipeline(x, self.embed_layers, self.embed_features,
model_forward=True, inplace=True)
# Make sure that the input data is of type float
x = x.float()
# Get the batch size (might not be always | |
(with the outcomes ordered to be the same as the wires in the circuit).
"""
if isinstance(structure, str):
assert(structure == '1Q'), "The only default `structure` option is the string '1Q'"
structure = tuple([(q,) for q in pspec.qubit_labels])
n = pspec.number_of_qubits
else:
assert(isinstance(structure, list) or isinstance(structure, tuple)
), "If not a string, `structure` must be a list or tuple."
qubits_used = []
for qubit_labels in structure:
assert(isinstance(qubit_labels, list) or isinstance(
qubit_labels, tuple)), "SubsetQs must be a list or a tuple!"
qubits_used = qubits_used + list(qubit_labels)
assert(len(set(qubits_used)) == len(qubits_used)
), "The qubits in the tuples/lists of `structure must all be unique!"
assert(set(qubits_used).issubset(set(pspec.qubit_labels))
), "The qubits to benchmark must all be in the ProcessorSpec `pspec`!"
n = len(qubits_used)
# Creates a empty circuit over no wires
circuit = _cir.Circuit(num_lines=0, editable=True)
s_rc_dict = {}
p_rc_dict = {}
circuit_dict = {}
if isinstance(length, int):
length_per_subset = [length for i in range(len(structure))]
else:
length_per_subset = length
assert(len(length) == len(structure)), "If `length` is a list it must be the same length as `structure`"
for ssQs_ind, qubit_labels in enumerate(structure):
qubit_labels = tuple(qubit_labels)
# Sample a random circuit of "native gates" over this set of qubits, with the
# specified sampling.
subset_circuit = random_circuit(pspec=pspec, length=length_per_subset[ssQs_ind], qubit_labels=qubit_labels,
sampler=sampler, samplerargs=samplerargs, addlocal=addlocal, lsargs=lsargs)
circuit_dict[qubit_labels] = subset_circuit
# find the symplectic matrix / phase vector this circuit implements.
s_rc_dict[qubit_labels], p_rc_dict[qubit_labels] = _symp.symplectic_rep_of_clifford_circuit(
subset_circuit, pspec=pspec)
# Tensors this circuit with the current circuit
circuit.tensor_circuit(subset_circuit)
circuit.done_editing()
# Find the expected outcome of the circuit.
s_out, p_out = _symp.symplectic_rep_of_clifford_circuit(circuit, pspec=pspec)
s_inputstate, p_inputstate = _symp.prep_stabilizer_state(n, zvals=None)
s_outstate, p_outstate = _symp.apply_clifford_to_stabilizer_state(s_out, p_out, s_inputstate, p_inputstate)
idealout = []
for qubit_labels in structure:
subset_idealout = []
for q in qubit_labels:
qind = circuit.line_labels.index(q)
measurement_out = _symp.pauli_z_measurement(s_outstate, p_outstate, qind)
subset_idealout.append(measurement_out[1])
idealout.append(tuple(subset_idealout))
idealout = tuple(idealout)
return circuit, idealout
def _get_setting(l, circuitindex, substructure, depths, circuits_per_length, structure):
lind = depths.index(l)
settingDict = {}
for s in structure:
if s in substructure:
settingDict[s] = len(depths) + lind * circuits_per_length + circuitindex
else:
settingDict[s] = lind
return settingDict
def simultaneous_random_circuits_experiment(pspec, depths, circuits_per_length, structure='1Q', sampler='Qelimination',
samplerargs=[], addlocal=False, lsargs=[], set_isolated=True,
setcomplement_isolated=False,
descriptor='A set of simultaneous random circuits', verbosity=1):
"""
Generates a set of simultaneous random circuits of the specified depths.
Parameters
----------
pspec : ProcessorSpec
The ProcessorSpec for the device that the circuit is being sampled for, which defines the
"native" gate-set and the connectivity of the device. The returned circuit will be over
the gates in `pspec`, and will respect the connectivity encoded by `pspec`. Note that `pspec`
is always handed to the sampler, as the first argument of the sampler function (this is only
of importance when not using an in-built sampler).
depths : int
Todo : update (needs to include list option)
The set of depths for the circuits.
circuits_per_length : int
The number of (possibly) different circuits sampled at each length.
structure : str or tuple.
Defines the "structure" of the simultaneous circuit. TODO : more details.
sampler : str or function, optional
If a string, this should be one of: {'pairingQs', 'Qelimination', 'co2Qgates', 'local'}.
Except for 'local', this corresponds to sampling layers according to the sampling function
in rb.sampler named circuit_layer_by* (with * replaced by 'sampler'). For 'local', this
corresponds to sampling according to rb.sampler.circuit_layer_of_oneQgates.
If `sampler` is a function, it should be a function that takes as the first argument a
ProcessorSpec, and returns a random circuit layer as a list of gate Label objects. Note that
the default 'Qelimination' is not necessarily the most useful in-built sampler, but it is the
only sampler that requires no parameters beyond the ProcessorSpec *and* works for arbitrary
connectivity devices. See the docstrings for each of these samplers for more information.
samplerargs : list, optional
A list of arguments that are handed to the sampler function, specified by `sampler`.
The first argument handed to the sampler is `pspec`, the second argument is `qubit_labels`,
and `samplerargs` lists the remaining arguments handed to the sampler. This is not
optional for some choices of `sampler`.
addlocal : bool, optional
Whether to follow each layer in the "core" circuits, sampled according to `sampler` with
a layer of 1-qubit gates.
lsargs : list, optional
Only used if addlocal is True. A list of optional arguments handed to the 1Q gate
layer sampler circuit_layer_by_oneQgate(). Specifies how to sample 1Q-gate layers.
set_isolated : bool, optional
Todo
setcomplement_isolated : bool, optional
Todo
descriptor : str, optional
A description of the experiment being generated. Stored in the output dictionary.
verbosity : int, optional
If > 0 the number of circuits generated so far is shown.
Returns
-------
dict
A dictionary containing the generated random circuits, the error-free outputs of the circuit,
and the specification used to generate the circuits. The keys are:
- 'circuits'. A dictionary of the sampled circuits. The circuit with key(l,k) is the kth circuit
at length l.
- 'probs'. A dictionary of the error-free *marginalized* probabilities for the "1" outcome of
a computational basis measurement at the end of each circuit, with the standard input state.
The ith element of this tuple corresponds to this probability for the qubit on the ith wire of
the output circuit.
- 'qubitordering'. The ordering of the qubits in the 'target' tuples.
- 'spec'. A dictionary containing all of the parameters handed to this function, except `pspec`.
This then specifies how the circuits where generated.
"""
experiment_dict = {}
experiment_dict['spec'] = {}
experiment_dict['spec']['depths'] = depths
experiment_dict['spec']['circuits_per_length'] = circuits_per_length
experiment_dict['spec']['sampler'] = sampler
experiment_dict['spec']['samplerargs'] = samplerargs
experiment_dict['spec']['addlocal'] = addlocal
experiment_dict['spec']['lsargs'] = lsargs
experiment_dict['spec']['descriptor'] = descriptor
experiment_dict['spec']['createdby'] = 'extras.rb.sample.simultaneous_random_circuits_experiment'
if isinstance(structure, str):
assert(structure == '1Q'), "The only default `structure` option is the string '1Q'"
structure = tuple([(q,) for q in pspec.qubit_labels])
else:
assert(isinstance(structure, list) or isinstance(structure, tuple)), \
"If not a string, `structure` must be a list or tuple."
qubits_used = []
for qubit_labels in structure:
assert(isinstance(qubit_labels, list) or isinstance(
qubit_labels, tuple)), "SubsetQs must be a list or a tuple!"
qubits_used = qubits_used + list(qubit_labels)
assert(len(set(qubits_used)) == len(qubits_used)), \
"The qubits in the tuples/lists of `structure must all be unique!"
assert(set(qubits_used).issubset(set(pspec.qubit_labels))), \
"The qubits to benchmark must all be in the ProcessorSpec `pspec`!"
experiment_dict['spec']['structure'] = structure
experiment_dict['circuits'] = {}
experiment_dict['probs'] = {}
experiment_dict['settings'] = {}
for lnum, l in enumerate(depths):
if verbosity > 0:
print('- Sampling {} circuits at length {} ({} of {} depths)'.format(circuits_per_length, l,
lnum + 1, len(depths)))
print(' - Number of circuits sampled = ', end='')
for j in range(circuits_per_length):
circuit, idealout = simultaneous_random_circuit(pspec, l, structure=structure, sampler=sampler,
samplerargs=samplerargs, addlocal=addlocal, lsargs=lsargs)
if (not set_isolated) and (not setcomplement_isolated):
experiment_dict['circuits'][l, j] = circuit
experiment_dict['probs'][l, j] = idealout
experiment_dict['settings'][l, j] = {
s: len(depths) + lnum * circuits_per_length + j for s in tuple(structure)}
else:
experiment_dict['circuits'][l, j] = {}
experiment_dict['probs'][l, j] = {}
experiment_dict['settings'][l, j] = {}
experiment_dict['circuits'][l, j][tuple(structure)] = circuit
experiment_dict['probs'][l, j][tuple(structure)] = idealout
experiment_dict['settings'][l, j][tuple(structure)] = _get_setting(l, j, structure, depths,
circuits_per_length, structure)
if set_isolated:
for subset_ind, subset in enumerate(structure):
subset_circuit = circuit.copy(editable=True)
#print(subset)
for q in circuit.line_labels:
if q not in subset:
#print(subset_circuit, q)
subset_circuit.replace_with_idling_line(q)
subset_circuit.done_editing()
experiment_dict['circuits'][l, j][(tuple(subset),)] = subset_circuit
experiment_dict['probs'][l, j][(tuple(subset),)] = idealout[subset_ind]
# setting = {}
# for s in structure:
# if s in subset:
# setting[s] = len(depths) + lnum*circuits_per_length + j
# else:
# setting[s] = lnum
experiment_dict['settings'][l, j][(tuple(subset),)] = _get_setting(l, j, (tuple(subset),), depths,
circuits_per_length, structure)
# print(subset)
# print(_get_setting(l, j, subset, depths, circuits_per_length, structure))
if setcomplement_isolated:
for subset_ind, subset in enumerate(structure):
subsetcomplement_circuit = circuit.copy(editable=True)
for q in circuit.line_labels:
if q in subset:
subsetcomplement_circuit.replace_with_idling_line(q)
subsetcomplement_circuit.done_editing()
subsetcomplement = list(_copy.copy(structure))
subsetcomplement_idealout = list(_copy.copy(idealout))
del subsetcomplement[subset_ind]
del subsetcomplement_idealout[subset_ind]
subsetcomplement = tuple(subsetcomplement)
subsetcomplement_idealout = tuple(subsetcomplement_idealout)
experiment_dict['circuits'][l, j][subsetcomplement] = subsetcomplement_circuit
experiment_dict['probs'][l, j][subsetcomplement] = subsetcomplement_idealout
# for s in structure:
# if s in subsetcomplement:
# setting[s] = len(depths) + lnum*circuits_per_length + j
# else:
# setting[s] = lnum
experiment_dict['settings'][l, j][subsetcomplement] = _get_setting(l, j, subsetcomplement, depths,
circuits_per_length, structure)
if verbosity > 0: print(j + 1, end=',')
if verbosity > | |
<filename>chapter/5/problems_5_14_x.py
from resources.GF2 import one
from resources.vecutil import list2vec
from resources.vec import Vec
from math import pi, sqrt
from resources.vecutil import zero_vec
def zero_vec_n(n):
return zero_vec(set(list(range(n))))
def lin_comb_sum(scalars, vectors):
return sum([s*v for s, v in zip(scalars, vectors)])
def problem_5_14_1(a, b, c):
"""
Multiply the vectors in V span by the scalars a, b, c
:param a: scalar
:param b: scalar
:param c: scalar
:return: sum of the span vectors multiplied by a, b and c
>>> problem_5_14_1(1,1,0) == list2vec([2,1,4,1]) # a
True
>>> problem_5_14_1(1/2, 1, 1) == list2vec([1,1,1,0]) # b
True
>>> problem_5_14_1(0,1,-1) == list2vec([0,1,1,2]) # c
True
"""
V = [list2vec([2,0,4,0]), list2vec([0,1,0,1]), list2vec([0,0,-1,-1])]
#return a * V[0] + b * V[1] + c * V[2]
return lin_comb_sum([a, b, c], V)
def problem_5_14_2(a, b, c):
"""
Multiply the vectors in V span by the scalars a, b, c
:param a: scalar
:param b: scalar
:param c: scalar
:return: sum of the span vectors multiplied by a, b and c
>>> problem_5_14_2(3,-1,1) == list2vec([2,1,4]) # a
True
>>> problem_5_14_2(1/2,-3/2,1) == list2vec([1,1,1]) # b
True
>>> problem_5_14_2(1/2,-11/2,4) == list2vec([5,4,3]) # c
True
>>> problem_5_14_2(1,-2,1) == list2vec([0,1,1]) # d
True
"""
V = [list2vec([0,0,1]),list2vec([2,0,1]),list2vec([4,1,2])]
# return a * V[0] + b * V[1] + c * V[2]
return lin_comb_sum([a, b, c], V)
def problem_5_14_3(a, b, c, d):
"""
Multiply the vectors in V span by the scalars a, b, c and d
:param a: scalar
:param b: scalar
:param c: scalar
:param d: scalar
:return: sum of the span vectors multiplied by a, b, c and d
>>> problem_5_14_3(one,0,one,0) == list2vec([one,one,0,0]) # a
True
>>> problem_5_14_3(one,0,0,one) == list2vec([one,0,one,0]) # b
True
>>> problem_5_14_3(one,one,0,one) == list2vec([one,0,0,0]) # c
True
"""
V = [list2vec([0,one,0,one]), list2vec([0,0,one,0]), list2vec([one,0,0,one]), list2vec([one,one,one,one])]
# return a * V[0] + b * V[1] + c * V[2] + d * V[3]
return lin_comb_sum([a, b, c, d], V)
D_5_14_4 = {'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'}
V_5_14_4 = {
'v1': Vec(D_5_14_4, {'a':one, 'b':one}),
'v2': Vec(D_5_14_4, {'b':one, 'c':one}),
'v3': Vec(D_5_14_4, {'a':one, 'd':one}),
'v4': Vec(D_5_14_4, {'b':one, 'e':one}),
'v5': Vec(D_5_14_4, {'c':one, 'e':one}),
'v6': Vec(D_5_14_4, {'d':one, 'e':one}),
'v7': Vec(D_5_14_4, {'f':one, 'h':one}),
'v8': Vec(D_5_14_4, {'g':one, 'h':one})
}
def problem_5_14_4(edges):
"""
:param scalars: set of edges to follow
:return: linear combination of scalars * V
>>> problem_5_14_4({'v5', 'v6'}) == Vec(D_5_14_4, {'c': one, 'd': one})
True
>>> problem_5_14_4({'v7', 'v8'}) == Vec(D_5_14_4, {'f': one, 'g': one})
True
>>> problem_5_14_4({'v3', 'v6'}) == Vec(D_5_14_4, {'a': one, 'e': one})
True
>>> problem_5_14_4({'v1', 'v3'}) == Vec(D_5_14_4, {'b': one, 'd': one})
True
"""
V = V_5_14_4
return sum([one * V[edge] for edge in edges])
zero_vec_3 = zero_vec_n(3)
def problem_5_14_5(scalars, vectors):
"""
Linear combination of scalars and vectors
:param scalars: n list of scalars
:param vectors: n list of vectors
:return: linear combination of scalar[i] * vector[i]
>>> a_vectors = [list2vec([1,2,0]), list2vec([2,4,1]), list2vec([0,0,-1])]
>>> problem_5_14_5([-2,1,1], a_vectors) == zero_vec_3
True
>>> b_vectors = [list2vec([2,4,0]), list2vec([8,16,4]), list2vec([0,0,7])]
>>> problem_5_14_5([-4, 1, -4/7], b_vectors) == zero_vec_3
True
>>> c_vectors = [list2vec([0,0,5]), list2vec([1,34,2]), list2vec([123,456,789]), list2vec([-3,-6,0]), list2vec([1,2,0.5])]
>>> problem_5_14_5([-3/10,0,0,1,3], c_vectors) == zero_vec_3
True
"""
return lin_comb_sum(scalars, vectors)
def problem_5_14_6(scalars, vectors):
"""
Linear combination of scalars and vectors
:param scalars: n list of scalars
:param vectors: n list of vectors
:return: linear combination of scalar[i] * vector[i]
>>> a_vectors = [list2vec([1,2,3]), list2vec([4,5,6]), list2vec([1,1,1])]
>>> problem_5_14_6([-1,1,-3], a_vectors) == zero_vec_3
True
>>> b_vectors = [list2vec([0,-1,0,-1]), list2vec([pi, pi, pi, pi]), list2vec([-sqrt(2), sqrt(2), -sqrt(2), sqrt(2)])]
>>> problem_5_14_6([2*sqrt(2), sqrt(2)/pi, 1], b_vectors) == zero_vec_n(4)
True
>>> c_vectors = [list2vec([1,-1,0,0,0]), list2vec([0,1,-1,0,0]), list2vec([0,0,1,-1,0]), list2vec([0,0,0,1,-1]), list2vec([-1,0,0,0,1])]
>>> problem_5_14_6([1,1,1,1,1], c_vectors) == zero_vec_n(5)
True
"""
return lin_comb_sum(scalars, vectors)
def problem_5_14_7(scalars, vectors):
"""
>>> u = list2vec([3, 9, 6, 5, 5])
>>> v = list2vec([4, 10, 6, 6, 8])
>>> w = list2vec([1, 1, 0, 1, 3])
>>> problem_5_14_6([-1, 1], [u, v]) == w
True
"""
return lin_comb_sum(scalars, vectors)
def problem_5_14_8(scalars, vectors):
"""
>>> u = list2vec([1,1,0,0])
>>> v = list2vec([0,1,1,0])
>>> w = list2vec([0,0,1,1])
>>> x = list2vec([1,0,0,1])
>>> problem_5_14_8([1, -1, 1], [u, v, w]) == x
True
>>> problem_5_14_8([-1, 1, 1], [u, v, x]) == w
True
>>> problem_5_14_8([1, 1, -1], [u, w, x]) == v
True
>>> problem_5_14_8([1, -1, 1], [v, w, x]) == u
True
"""
return lin_comb_sum(scalars, vectors)
def list_of_lists_to_vecs(list_of_lists):
return [list2vec(l) for l in list_of_lists]
def problem_5_14_9(scalars, vectors):
"""
Problem c cutoff in Kindle edition. Using value from Coursera course:
https://github.com/jimcarson/Coursera/blob/master/CodingTheMatrix/hw12_basis_problems/The_Basis_problems.py#L118
>>> zero_vec_4 = zero_vec_n(4)
>>> a_vectors = list_of_lists_to_vecs([[one, one, one, one], [one, 0, one, 0], [0, one, one, 0], [0, one, 0, one]])
>>> problem_5_14_9([one, one, 0, one], a_vectors) == zero_vec_4
True
>>> b_vectors = list_of_lists_to_vecs([[0, 0, 0, one], [0, 0, one, 0], [one, one, 0, one], [one, one, one, one]])
>>> problem_5_14_9([0, one, one, one], b_vectors) == zero_vec_4
True
>>> c_vectors = list_of_lists_to_vecs([[one,one,0,one,one], [0,0,one,0,0], [0,0,one,one,one], [one,0,one,one,one], [one,one,one,one,one]])
>>> problem_5_14_9([one, one, 0, 0, one], c_vectors) == list2vec([0, 0, 0, 0, 0])
True
"""
return lin_comb_sum(scalars, vectors)
# a b c d e f g h
# v1 one one
# v2 one one
# v3 one one
# v4 one one
# v5 one one
# v6 one one
# v7 one one
# v8 one one
def problem_5_4_10(scalars, vectors):
"""
>>> zero_vec_D_5_14_4 = zero_vec(D_5_14_4)
>>> a_vectors = [V_5_14_4[key] for key in ['v1', 'v2', 'v3', 'v4', 'v5']]
>>> problem_5_4_10([0, one, 0, one, one], a_vectors) == zero_vec_D_5_14_4
True
>>> b_vectors = [V_5_14_4[key] for key in ['v1', 'v2', 'v3', 'v4', 'v5', 'v7', 'v8']]
>>> problem_5_4_10([0, one, 0, one, one, 0, 0], b_vectors) == zero_vec_D_5_14_4
True
>>> c_vectors = [V_5_14_4[key] for key in ['v1', 'v2', 'v3', 'v4', 'v6']]
>>> problem_5_4_10([one, 0, one, one, one], c_vectors) == zero_vec_D_5_14_4
True
>>> d_vectors = [V_5_14_4[key] for key in ['v1', 'v2', 'v3', 'v5', 'v6', 'v7', 'v8']]
>>> problem_5_4_10([one, one, one, one, one, 0, 0], d_vectors) == zero_vec_D_5_14_4
True
"""
return lin_comb_sum(scalars, vectors)
S_5_14_11 = {ix:list2vec(l) for ix, l in enumerate([
# w
[1, 0, 0, 0, 0], # 0
[0, 1, 0, 0, 0], # 1
[0, 0, 1, 0, 0], # 2
[0, 0, 0, 1, 0], # 3
[0, 0, 0, 0, 1] # 4
])}
def problem_5_14_11(scalars, w, z):
"""
The Exchange Lemma is defined in 5.11
:param scalars: list of ints
:param w: vector key in S, but not in A, to replace with z
:param z: vector to replace vector in position w
:return: linear combination of scalars over the union of S and {z} minus {w}
>>>
>>>
>>> a_z = list2vec([1, 1, 1, 1, 1])
>>> a_w = 4
>>> problem_5_14_11([-1, -1, -1, -1, 1], a_w, a_z) == S_5_14_11[a_w] # [0, 0, 0, 0, 1]
True
>>> b_z = list2vec([0, 1, 0, 1, 0])
>>> b_w = 3
>>> problem_5_14_11([0, -1, 0, 1, 0], b_w, b_z) == S_5_14_11[b_w] # [0, 0, 0, 1, 0]
True
>>> c_z = list2vec([1, 0, 1, 0, 1])
>>> c_w = 2
>>> problem_5_14_11([-1, 0, 1, 0, -1], c_w, c_z) == S_5_14_11[c_w] # [0, 0, 1, 0, 0]
True
"""
S = S_5_14_11.copy()
A = {0, 1}
assert w in S
assert w not in A
S[w] = z
return lin_comb_sum(scalars, [S[key] for key in sorted(S.keys())])
# a b c d e f g h
# v1 one one
# v2 one one
# v3 one one
# v4 one one
# v5 one one
# v6 one one
# v7 one one
# v8 one one
def problem_5_14_12(scalars, w, z, A, S=None):
"""
:param scalars: S len list of scalars to multiple S by
:param w: key in S to replace with z
:param z: vector to replace key w in S with
:param A: set of keys in S to retain
:param S: dictionary of vectors
:return: linear combination of scalars * S in dictionary key order
>>> a_A = {'v1', 'v4'}
>>> a_w = 'v6'
>>> a_z = Vec(D_5_14_4, {'d': one, 'e': one})
>>> problem_5_14_12([0, 0, 0, 0, 0, one, 0, 0], a_w, a_z, a_A) == V_5_14_4[a_w] # {'d': one, 'e': one}
True
>>> b_A = {'v2', 'v3'}
>>> b_w = 'v5'
>>> b_z = Vec(D_5_14_4, {'c': one, 'd': one})
>>> problem_5_14_12([0, one, 0, one, 0, 0, 0, 0], b_w, b_z, b_A) == | |
# TODO
# Add state callbacks - Try to attach to post fork loop
#import gc
import signal
import asyncio
import traceback
import socket
import os, sys, random, mrpacker
import multiprocessing
import faulthandler
import functools
from wsgiref.handlers import format_date_time
import inspect, copy
#from inspect import signature #getmodulename, isawaitable, signature, stack
#from prof import profiler_start,profiler_stop
import uuid, http.cookies
import mrhttp
from mrhttp import Protocol
from mrhttp.request import Request
from mrhttp import Response
from mrhttp import router
from mrhttp import MemcachedClient
from mrhttp import MrqClient
from mrhttp import MrcacheClient
try:
import mrjson as json
except ImportError:
try:
import ujson as json
except:
pass
#import mrmemcache
#import uvloop
#asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
signames = {
int(v): v.name for k, v in signal.__dict__.items()
if isinstance(v, signal.Signals)}
class Application(mrhttp.CApp):
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(Application, cls).__new__(cls, *args, **kwargs)
return cls._instance
def __init__(self):
self._loop = None
self._connections = set()
self._error_handlers = []
self._request_extensions = {}
self._log_request = None
self._protocol_factory = Protocol
self._debug = False
self.request = Request()
self.requests = None
self.response = Response()
self.router = router.Router()
self.tasks = []
self.config = {}
self.listeners = { "at_start":[], "at_end":[], "after_start":[]}
self._mc = None
self._mrq = None
self._mrc = None
self.session_backend = "memcached"
self.uses_session = False
self.uses_mrq = False
self.err404 = "<html><head><title>404 Not Found</title></head><body><h1>Not Found</h1><p>The requested page was not found</p></body></html>"
def setup(self, log_request=None, protocol_factory=None, debug=False):
self._log_request = log_request
self._protocol_factory = protocol_factory or Protocol
self._debug = debug
@property
def loop(self):
if not self._loop:
self._loop = asyncio.get_event_loop()
return self._loop
def expand_requests(self):
for x in range(len(self.requests)):
self.requests.append(Request())
def prehandler(self):
pass
# Decorator
def on(self, event):
"""Call the decorated function on one of the following events:
["at_start","at_end", "after_start"]
"""
def decorator(func):
self.listeners[event].append(func)
return func
return decorator
def trigger_event(self, event):
for func in self.listeners[event]:
result = func()
if inspect.isawaitable(result):
self.loop.run_until_complete(result)
# Decorator
def route(self, uri, methods=["GET"], options=[], _type="html"):
if "session" in options:
self.uses_session = True
if "mrq" in options:
self.uses_mrq = True
if not uri.startswith('/'): uri = '/' + uri
#params = {}
#params["methods"] = methods
#params["options"] = options
#params["type"] = _type
#params["mrq"] = None
#for o in options:
#if o.startswith("mrq"):
#
#self.uses_mrq = True
#if self._mrq == None:
#srvs = self.config.get("mrq", None)
#print(srvs)
#if type(srvs) != list or type(srvs[0]) != tuple or len(srvs) == 0:
#print("When using MrQ app.config['mrq'] must be set to a list of (host,port) tuple pairs. Exiting")
#exit(1)
#self._mrq = []
#if type(srvs) == list and type(srvs[0]) == list:
#for s in srvs:
#self._mrq.append( MrqClient( s, self.loop) )
#else:
#self._mrq.append( MrqClient( srvs, self.loop) )
#if o == "mrq":
#o = "mrq0"
#l = []
#try:
#for n in o[3:]:
#l.append( self._mrq[int(n)] )
#params["mrq"] = l
#except:
#print("Error mrq route specifies a cluster that doesn't exist")
#print("uri:", uri, "mrq", o)
#exit(1)
def response(func):
self.router.add_route( func, uri, methods, options, _type )
#self.router.add_route( func, uri, params )
return func
return response
def add_routes(self, rs):
for r in rs:
params = {}
params["methods"] = r[2]
params["options"] = r[3]
params["type"] = r[4]
self.router.add_route( r[0], r[1], params )
def _get_idle_and_busy_connections(self):
return \
[c for c in self._connections if c.pipeline_empty], \
[c for c in self._connections if not c.pipeline_empty]
async def drain(self):
#await asyncio.sleep(0.1)
idle, busy = self._get_idle_and_busy_connections()
for c in idle:
c.transport.close()
if idle or busy:
print('Draining connections...')
else:
return
if idle:
print('{} idle connections closed immediately'.format(len(idle)))
if busy:
print('{} connections busy, read-end closed'.format(len(busy)))
for x in range(5, 0, -1):
await asyncio.sleep(1)
idle, busy = self._get_idle_and_busy_connections()
for c in idle:
c.transport.close()
if not busy:
break
else:
print( "{} seconds remaining, {} connections still busy" .format(x, len(busy)))
_, busy = self._get_idle_and_busy_connections()
if busy:
print('Forcefully killing {} connections'.format(len(busy)))
for c in busy:
c.pipeline_cancel()
#await asyncio.sleep(2.3)
def extend_request(self, handler, *, name=None, property=False):
if not name:
name = handler.__name__
self._request_extensions[name] = (handler, property)
def serve(self, *, sock, host, port, loop, run_async=False):
faulthandler.enable()
#pr = cProfile.Profile()
#pr.enable()
#cProfile.runctx('test(num)', globals(), locals(), 'prof%d.prof' %num)
#sock.setsockopt(socket.SOL_SOCKET, socket.SO_OOBINLINE, 0) #TODO uvloop .9.1 sets this
#profiler_start(b"mrhttp.log")
if not loop:
loop = self.loop
asyncio.set_event_loop(loop)
else:
self._loop = loop
self.session_backend_type = 1
if self.session_backend == "mrworkserver":
self.session_backend_type = 2
elif self.session_backend == "mrcache":
self.session_backend_type = 3
self.requests = [Request() for x in range(128)]
self.cinit()
self.router.finalize_routes()
self.router.setupRoutes()
self._appStart()
if self.uses_mrq:
mrqconf = self.config.get("mrq", None)
if not mrqconf:
print("When using MrQ app.config['mrq'] must be set. Exiting")
exit(1)
srvs = self.config.get("mrq", None)
if type(srvs) != list or len(srvs) == 0 or type(srvs[0]) != tuple:
print("When using MrQ app.config['mrq'] must be set to a list of (host,port) tuple pairs. Exiting")
exit(1)
self._mrq = MrqClient( srvs, self.loop)
if self.uses_session:
self.setupSessionClient()
self.trigger_event("at_start")
server_coro = loop.create_server( lambda: self._protocol_factory(self), sock=sock)
if run_async:
return server_coro
# Try except here?
server = loop.run_until_complete(server_coro)
print('Accepting connections on http://{}:{}'.format(host, port))
self.trigger_event("after_start")
loop.add_signal_handler(signal.SIGTERM, loop.stop)
#loop.add_signal_handler(signal.SIGINT, loop.stop)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
loop.run_until_complete(loop.shutdown_asyncgens())
loop.run_until_complete(self.drain())
self._connections.clear()
server.close()
loop = asyncio.get_event_loop()
loop.run_until_complete(server.wait_closed())
self.trigger_event("at_end")
loop.close()
for r in self.requests:
r.cleanup()
self.requests = None
#for ref in gc.get_referrers(self.requests[0]):
#if type(ref) == list:
#print("list")
#else:
#print(ref)
#print("DELME refcnt ", sys.getrefcount(self.requests[0]))
#r = self.requests[0]
#print("id requests ", id(self.requests))
#rs = self.requests
#self.requests = None
#gc.collect()
#print (gc.get_referrers(rs))
#print("DELME refcnt ", sys.getrefcount(r))
#for ref in gc.get_referrers(r):
#if type(ref) == list:
#print("list")
#print("id ref ", id(ref))
#else:
#print(ref)
# Update the response date string every few seconds
def updateDateString(self):
self.updateDate( format_date_time(None) )
self.loop.call_later(10, self.updateDateString)
def _appStart(self):
self.loop.call_soon(self.updateDateString)
def _run(self, *, host, port, num_workers=None, debug=None):
self._debug = debug or self._debug
if self._debug and not self._log_request:
self._log_request = self._debug
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port))
os.set_inheritable(sock.fileno(), True)
workers = set()
terminating = False
def stop(sig, frame):
nonlocal terminating
#if reloader_pid and sig == signal.SIGHUP:
#print('Reload request received')
if not terminating:
terminating = True
print('Termination request received')
for worker in workers:
worker.terminate()
def appstop():
nonlocal terminating
terminating = True
for worker in workers:
worker.terminate()
self.stop = appstop
signal.signal(signal.SIGTERM, stop)
#signal.signal(signal.SIGHUP, stop)
for _ in range(num_workers or 1):
worker = multiprocessing.Process( target=self.serve, kwargs=dict(sock=sock, host=host, port=port, loop=None))
worker.daemon = True
worker.start()
workers.add(worker)
sock.close() # Only the kids access the socket
for worker in workers:
try:
worker.join()
print("worker stopped")
if worker.exitcode > 0:
print('Worker exited with code {}'.format(worker.exitcode))
elif worker.exitcode < 0:
try:
signame = signames[-worker.exitcode]
except KeyError:
print( 'Worker crashed with unknown code {}!' .format(worker.exitcode))
else:
print('Worker crashed on signal {}!'.format(signame))
except KeyboardInterrupt:
pass
def run(self, host='0.0.0.0', port=8080, *, cores=None, debug=False):
# TODO reloader?
self._run( host=host, port=port, num_workers=cores, debug=debug)
def start_server(self, *, host='0.0.0.0', port=8080, loop=None):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port))
os.set_inheritable(sock.fileno(), True)
if not loop:
loop = self.loop
return self.serve(sock=sock, host=host, port=port, loop=loop, run_async=True)
def logoutUser(self, request):
c = http.cookies.SimpleCookie()
c['mrsession'] = ""
c['mrsession']['max-age'] = 0
request.response.cookies = c
# TODO LOL
def setUserSessionAndCookies(self, request, user_id, user, cookies=http.cookies.SimpleCookie(),
expiration=12*30*24*60*60, json=False ):
if self.session_backend_type == 1 and self._mc == None:
raise ValueError("setUserSession called without memcached being setup")
if self.session_backend_type == 2 and self._mrq== None:
raise ValueError("setUserSession called without mrworkserver being setup")
if self.session_backend_type == 3 and self._mrc== None:
raise ValueError("setUserSession called without mrcache being setup")
a = random.getrandbits(64)
b = random.getrandbits(64)
c = random.getrandbits(64)
k = mrhttp.to64(a) + mrhttp.to64(b) + mrhttp.to64(c)
k = k[:32]
while len(k) < 32:
k += mrhttp.to64( random.getrandbits(6) )
userk = ""
numbits = user_id.bit_length()
if numbits == 0:
numbits += 1
while numbits > 0:
userk = mrhttp.to64( user_id & 0x1F ) + userk
user_id >>= 5
numbits -= 5
userk = userk + mrhttp.to64( 0x20 | random.getrandbits(5) )
skey = userk + k[len(userk):]
# TODO We could have user id be optional and do this if not given
#skey = uuid.uuid4().hex
# Send the session cookie back to the user
c = cookies
c['mrsession'] = skey
c['mrsession']['path'] = '/'
c['mrsession']['expires'] = expiration
request.response.cookies = c
if self.session_backend_type == 1: # Memcached
if json:
self._mc.set( skey, json.dumpb(user) )
else:
self._mc.set( skey, mrpacker.pack(user) )
elif self.session_backend_type == 2: # MrWorkServer
self._mrq.set( user_id, mrpacker.pack( [skey, user]) )
elif self.session_backend_type == 3: # MrCache
self._mrc.set( skey, mrpacker.pack( user ) )
return skey
def setupSessionClient(self):
if self.session_backend == "memcached":
srvs = self.config.get("memcache", None)
if type(srvs) != list | |
<reponame>dhoebbel/USIO-Automation<gh_stars>0
#import dependencies
import pandas as pd
import numpy as np
import datetime
import math
#read in CSVs
full_lens = "Dave Files (raw)//2018 H1 Raw Full Lens.csv"
buyer_lens = "Dave Files (raw)//2018 H1 Final Buyer Lens.csv"
region_map = "region_map.csv"
josh_volumes = "2018Q2_Josh_Volume.csv"
ncreif = "ncreif_v1.csv"
full_df = pd.read_csv(full_lens, encoding = "ISO-8859-1")
buyer_df = pd.read_csv(buyer_lens, encoding = "ISO-8859-1")
region_map_df = pd.read_csv(region_map, encoding = "ISO-8859-1")
josh_volumes_df = pd.read_csv(josh_volumes, encoding = "ISO-8859-1")
cap_rates = pd.read_csv(ncreif, encoding = "ISO-8859-1")
## Data Transformation # NON Portfolio Datafile
#convert data to numeric for parsing
full_df.apply(pd.to_numeric,errors = 'ignore')
buyer_df.apply(pd.to_numeric,errors = 'ignore')
#create quarter column
full_df['Quarter'] = full_df['Quarter of transaction'].str[5:]
full_df
#Full lens Dataframe excluding portfolio headers rows
full_df_no_port = full_df[~full_df['Portfolio sale?'].isin(['Port.'])]
full_df_no_port['Portfolio sale?'].value_counts()
#Add in column that concatenates Quarter and Market Name for better table parsing
full_df_no_port["QQ_Market"] = full_df_no_port["Quarter"].map(str) + "_" + full_df_no_port["JLL Market"]
#Add in column that concatenates Quarter and Product Type for better table parsing
full_df_no_port["QQ_Product"] = full_df_no_port["Quarter"].map(str) + "_" + full_df_no_port["JLL Product Type"]
#Office
office_full_df = full_df_no_port[~full_df_no_port['JLL Sector'].isin(['Industrial', 'Flex-R&D', 'Multifamily', 'Seniors Housing', 'Retail'])]
#Industrial & Flex-R&D
industrial_full_df = full_df_no_port[~full_df_no_port['JLL Sector'].isin(['Office', 'Multifamily', 'Seniors Housing', 'Retail'])]
#Multifamily
multifamily_full_df = full_df_no_port[~full_df_no_port['JLL Sector'].isin(['Industrial', 'Flex-R&D', 'Office', 'Seniors Housing', 'Retail'])]
#Retail
retail_full_df = full_df_no_port[~full_df_no_port['JLL Sector'].isin(['Industrial', 'Flex-R&D', 'Multifamily', 'Seniors Housing', 'Office'])]
######### Port. ONLY Data file ##################
#full DF filtered only for Port.
full_df_port = full_df[~full_df['Portfolio sale?'].isin(['Yes', 'No'])]
#Office
office_full_df_port = full_df_port[~full_df_port['JLL Sector'].isin(['Industrial', 'Flex-R&D', 'Multifamily', 'Seniors Housing', 'Retail'])]
#Industrial & Flex-R&D
industrial_full_df_port = full_df_port[~full_df_port['JLL Sector'].isin(['Office', 'Multifamily', 'Seniors Housing', 'Retail'])]
#Multifamily
multifamily_full_df_port = full_df_port[~full_df_port['JLL Sector'].isin(['Industrial', 'Flex-R&D', 'Office', 'Seniors Housing', 'Retail'])]
#Retail
retail_full_df_port = full_df_port[~full_df_port['JLL Sector'].isin(['Industrial', 'Flex-R&D', 'Multifamily', 'Seniors Housing', 'Office'])]
############# Buyer Lens Transformation with No PORTFOLIO #######################
#create quarter column
buyer_df['Quarter'] = buyer_df['Quarter of transaction'].str[5:]
# buyer_df.head()
#buyer lens Dataframe excluding portfolio headers rows
buyer_df_no_port = buyer_df[~buyer_df['Portfolio sale?'].isin(['Port.'])]
#Office
office_buyer_df = buyer_df_no_port[~buyer_df_no_port['JLL Sector'].isin(['Industrial', 'Flex-R&D', 'Multifamily', 'Seniors Housing', 'Retail'])]
#Industrial & Flex-R&D
industrial_buyer_df = buyer_df_no_port[~buyer_df_no_port['JLL Sector'].isin(['Office', 'Multifamily', 'Seniors Housing', 'Retail'])]
#Multifamily
multifamily_buyer_df = buyer_df_no_port[~buyer_df_no_port['JLL Sector'].isin(['Industrial', 'Flex-R&D', 'Office', 'Seniors Housing', 'Retail'])]
#Retail
retail_buyer_df = buyer_df_no_port[~buyer_df_no_port['JLL Sector'].isin(['Industrial', 'Flex-R&D', 'Multifamily', 'Seniors Housing', 'Office'])]
############################ REMOVES NULL FROM COUNTRY ##############################
buyer_df_no_port_no_null = buyer_df_no_port[buyer_df_no_port['Country'].notnull()]
#Office
office_buyer_df_no_null = buyer_df_no_port_no_null[~buyer_df_no_port_no_null['JLL Sector'].isin(['Industrial', 'Flex-R&D', 'Multifamily', 'Seniors Housing', 'Retail'])]
#Industrial & Flex-R&D
industrial_buyer_df_no_null = buyer_df_no_port_no_null[~buyer_df_no_port_no_null['JLL Sector'].isin(['Office', 'Multifamily', 'Seniors Housing', 'Retail'])]
#Multifamily
multifamily_buyer_df_no_null = buyer_df_no_port_no_null[~buyer_df_no_port_no_null['JLL Sector'].isin(['Industrial', 'Flex-R&D', 'Office', 'Seniors Housing', 'Retail'])]
#Retail
retail_buyer_df_no_null = buyer_df_no_port_no_null[~buyer_df_no_port_no_null['JLL Sector'].isin(['Industrial', 'Flex-R&D', 'Multifamily', 'Seniors Housing', 'Office'])]
############################# NCREIF Cap Rate Visuals ###################################
#convert cap rate df to useable format
cap_cols = cap_rates.columns.drop(cap_rates[['Market', 'Sector', 'Market Type']])
cap_rates[cap_cols] = cap_rates[cap_cols].apply(pd.to_numeric, errors='coerce')
cap_rates.fillna(0)
#consolidate to just overall metrics
cap_rates_overall = cap_rates[cap_rates['Market'].isin(['Primary', 'Secondary', 'Overall', 'Ten-Year Treasury'])]
#office overall cap rates
office_cap_rates_overall = cap_rates_overall[cap_rates_overall['Sector'].isin(['Office', 'Ten-Year Treasury'])]
#industrial overall cap rates
industrial_cap_rates_overall = cap_rates_overall[cap_rates_overall['Sector'].isin(['Industrial', 'Ten-Year Treasury'])]
#multifamily overall cap rates
multifamily_cap_rates_overall = cap_rates_overall[cap_rates_overall['Sector'].isin(['Multifamily', 'Ten-Year Treasury'])]
#retail overall cap rates
retail_cap_rates_overall = cap_rates_overall[cap_rates_overall['Sector'].isin(['Retail', 'Ten-Year Treasury'])]
#calculating retail investment volumes
retail_volumes = retail_full_df.pivot_table(index=["Year of transaction"],values=["Price ($)"],
columns=["Quarter"],aggfunc=[np.sum],fill_value=0)
retail_volumes_billions = retail_volumes / 1000000000
####Shift pivot to normal dataframe
retail_volumes_billions.columns = retail_volumes_billions.columns.droplevel(0)
retail_volumes_billions.columns = retail_volumes_billions.columns.droplevel(0)
retail_volumes_billions.columns.name = None #remove categories
retail_volumes_final = retail_volumes_billions.reset_index()
##Final DFs
#retail_volumes_final
#calculating retail investment volumes by transaction type (i.e. Single Asset, Portfolio and Recapitilization)
#Annual single asset volume
retail_single_asset_volume = retail_full_df[retail_full_df['Portfolio sale?'] == 'No'].pivot_table(index=["Year of transaction"],
values=["Price ($)"], columns=["Portfolio sale?"], aggfunc=[np.sum],fill_value=0)
#Annual Recap Volume
retail_recap_volume = retail_full_df[retail_full_df['Sales Type'] == 'Recapitalization'].pivot_table(index=["Year of transaction"],
values=["Price ($)"], columns=["Sales Type"], aggfunc=[np.sum],fill_value=0)
#Shift single asset volumes to normal DF
retail_single_asset_volume.columns = retail_single_asset_volume.columns.droplevel(0)
retail_single_asset_volume.columns = retail_single_asset_volume.columns.droplevel(0)
retail_single_asset_volume.columns.name = None #remove categories
retail_single_asset_volumes_final = retail_single_asset_volume.reset_index()
#Shift Recap volumes to normal DF
retail_recap_volume.columns = retail_recap_volume.columns.droplevel(0)
retail_recap_volume.columns = retail_recap_volume.columns.droplevel(0)
retail_recap_volume.columns.name = None #remove categories
retail_recap_volumes_final = retail_recap_volume.reset_index()
##Final DFs
#retail_single_asset_volumes_final
#retail_recap_volumes_final
#retail_port_volume_final
#calculate portfolio volumes
#Annual Portfolio Volume - Portfolio Sale? = "Port"
retail_port_volume = retail_full_df_port[retail_full_df_port['Portfolio sale?'] == 'Port.'].pivot_table(index=["Year of transaction"],
values=["Price ($)"], columns=["Portfolio sale?"], aggfunc=[np.sum],fill_value=0)
#Shift Annual Portfolio Volume to normal DF
retail_port_volume.columns = retail_port_volume.columns.droplevel(0)
retail_port_volume.columns = retail_port_volume.columns.droplevel(0)
retail_port_volume.columns.name = None #remove categories
retail_port_volume_final = retail_port_volume.reset_index()
##final DF
#retail_port_volume_final
# Market by Market Quarterly Summary
retail_market_volumes_qtrly = retail_full_df.pivot_table(index=["JLL Market", "Quarter", "QQ_Market", "MarketType"],values=["Price ($)"],
columns=["Year of transaction"],aggfunc=[np.sum],fill_value=0)
# Market by Market Annual Summary
retail_market_volumes_annual = retail_full_df.pivot_table(index=["JLL Market", "MarketType"],values=["Price ($)"],
columns=["Year of transaction"],aggfunc=[np.sum],fill_value=0)
# Product by Product Quarterly Summary
retail_product_volumes_qtrly = retail_full_df.pivot_table(index=["JLL Product Type", "Quarter", "QQ_Product"],values=["Price ($)"],
columns=["Year of transaction"],aggfunc=[np.sum],fill_value=0)
# Product by Product Annual Summary
retail_product_volumes_annual = retail_full_df.pivot_table(index=["JLL Product Type"],values=["Price ($)"],
columns=["Year of transaction"],aggfunc=[np.sum],fill_value=0)
#Convert market by market quarterly summary to normal DF
retail_market_volumes_qtrly.columns = retail_market_volumes_qtrly.columns.droplevel(0)
retail_market_volumes_qtrly.columns = retail_market_volumes_qtrly.columns.droplevel(0)
retail_market_volumes_qtrly.columns.name = None #remove categories
retail_market_volumes_qtrly_final = retail_market_volumes_qtrly.reset_index()
#Convert market by market annual summary to normal DF
retail_market_volumes_annual.columns = retail_market_volumes_annual.columns.droplevel(0)
retail_market_volumes_annual.columns = retail_market_volumes_annual.columns.droplevel(0)
retail_market_volumes_annual.columns.name = None #remove categories
retail_market_volumes_annual_final = retail_market_volumes_annual.reset_index()
#Convert product by product annual summary
retail_product_volumes_annual.columns = retail_product_volumes_annual.columns.droplevel(0)
retail_product_volumes_annual.columns = retail_product_volumes_annual.columns.droplevel(0)
retail_product_volumes_annual.columns.name = None #remove categories
retail_product_volumes_annual_final = retail_product_volumes_annual.reset_index()
##Final DFs
# retail_market_volumes_qtrly_final
# retail_market_volumes_annual_final
# retail_product_volumes_annual_final
#Domestic Investment by Buyer Type
buyer_type_domestic_volume = retail_buyer_df_no_null[(retail_buyer_df_no_null['Country'] == 'United States') & (retail_buyer_df_no_null['Country'] != '<unknown>') & (retail_buyer_df_no_null['Country'] != 'NULL')].pivot_table(index=["Type"],
values=["JV_Price"], columns=["Year of transaction"], aggfunc=[np.sum],fill_value=0)
#Foreign Investment by Buyer Type
buyer_type_foreign_volume = retail_buyer_df_no_null[(retail_buyer_df_no_null['Country'] != 'United States') & (retail_buyer_df_no_null['Country'] != '<unknown>') & (retail_buyer_df_no_null['Country'] != 'NULL')].pivot_table(index=["Type"],
values=["JV_Price"], columns=["Year of transaction"], aggfunc=[np.sum],fill_value=0)
#append a total column to foreign buyer type data frame
total = buyer_type_foreign_volume.apply(np.sum)
total['Year of transaction'] = 'Total'
buyer_type_foreign_volume_transform = buyer_type_foreign_volume.append(pd.DataFrame(total.values, index=total.keys()).T, ignore_index=False)
#Editing TOTAL row in DF to read 'total' instead of '0'
buyer_type_list = buyer_type_foreign_volume_transform.index.tolist()
total = buyer_type_list.index(0)
buyer_type_list[total] = 'foreign total'
buyer_type_foreign_volume_transform.index = buyer_type_list
#Convert buyer_type_foreign_volume_transform to normal DF
buyer_type_foreign_volume_transform.columns = buyer_type_foreign_volume_transform.columns.droplevel(0)
buyer_type_foreign_volume_transform.columns = buyer_type_foreign_volume_transform.columns.droplevel(0)
buyer_type_foreign_volume_transform.columns.name = None #remove categories
buyer_type_foreign_volume_update = buyer_type_foreign_volume_transform.reset_index()
#remove blank column from DF
buyer_type_foreign_volume_final = buyer_type_foreign_volume_update.drop([''], axis=1)
buyer_type_foreign_volume_final
#Convert buyer_type_domestic_volume to normal DF
buyer_type_domestic_volume.columns = buyer_type_domestic_volume.columns.droplevel(0)
buyer_type_domestic_volume.columns = buyer_type_domestic_volume.columns.droplevel(0)
buyer_type_domestic_volume.columns.name = None #remove categories
buyer_type_domestic_volume_final = buyer_type_domestic_volume.reset_index()
#filter out CMBS, unknown, sovereign wealth fund, and null values
buyer_type_domestic_volume_final_v2 = buyer_type_domestic_volume_final[(buyer_type_domestic_volume_final['Type'] != 'CMBS')
& (buyer_type_domestic_volume_final['Type'] != 'Unknown')
& (buyer_type_domestic_volume_final['Type'] != 'Sovereign Wealth Fund')
& (buyer_type_domestic_volume_final['Type'] != '(null)')]
#filter out unknown, null and CMBS from foreign figures
buyer_type_foreign_volume_final_v2 = buyer_type_foreign_volume_final[(buyer_type_foreign_volume_final['index'] != 'CMBS')
& (buyer_type_foreign_volume_final['index'] != 'Unknown')
& (buyer_type_foreign_volume_final['index'] != '(null)')]
#append foreign total to combined/summarized DF
buyer_volumes_final_orig = buyer_type_domestic_volume_final_v2.append(buyer_type_foreign_volume_final_v2[buyer_type_foreign_volume_final_v2['index'] == 'foreign total'])
# replace NaN with 'foreign total'
buyer_volumes_final = buyer_volumes_final_orig.replace(np.nan, 'foreign total', regex=True)
# buyer_volumes_final, replaces NaN with Total
total_v3 = buyer_volumes_final.apply(np.sum)
total_v3['Type'] = 'Total'
buyer_volumes_final_v2 = buyer_volumes_final.append(pd.DataFrame(total_v3.values, index=total_v3.keys()).T, ignore_index=False)
#set Type as INDEX
buyer_volumes_final_v2.set_index('Type', inplace=True)
#drop 'index' column - not sure why this was created
buyer_volumes_final_v3 = buyer_volumes_final_v2.drop('index', 1)
#do division to calc percentages
buyer_volumes_final_v3.loc['DevOp_percent'] = buyer_volumes_final_v3.loc['Developer-Operator'] / buyer_volumes_final_v3.loc['Total']
buyer_volumes_final_v3.loc['HNW_percent'] = buyer_volumes_final_v3.loc['High Net Worth'] / buyer_volumes_final_v3.loc['Total']
buyer_volumes_final_v3.loc['Instit_percent'] = buyer_volumes_final_v3.loc['Institution-Advisor'] / buyer_volumes_final_v3.loc['Total']
buyer_volumes_final_v3.loc['REIT_percent'] = buyer_volumes_final_v3.loc['REIT-REOC'] / buyer_volumes_final_v3.loc['Total']
buyer_volumes_final_v3.loc['User_percent'] = buyer_volumes_final_v3.loc['User-Other'] / buyer_volumes_final_v3.loc['Total']
buyer_volumes_final_v3.loc['Foreign_percent'] = buyer_volumes_final_v3.loc['foreign total'] / buyer_volumes_final_v3.loc['Total']
buyer_volumes_final_v3.loc['Total_percent'] = buyer_volumes_final_v3.loc['Total'] / buyer_volumes_final_v3.loc['Total']
# #final DF
# buyer_volumes_final_v3
################### Mapping major regions by country #######################
region_map_df
region_df = retail_buyer_df.merge(region_map_df, on='Country', how='left')
#################### Concatenate Quarter/Country for Granular Breakout #####################
#Add in column that concatenates Quarter and Market Name for better table parsing
region_df["QQ_Country"] = region_df["Quarter"].map(str) + "_" + region_df["Country"]
#Add in column that concatenates Quarter and Product Type for better table parsing
region_df["QQ_Region"] = region_df["Quarter"].map(str) + "_" + region_df["Region"]
#################### Quarterly & Annual Breakouts by Region/Country PIVOT #####################
# Regional/Country Volume Breakout - ANNUAL
regional_country_volumes = region_df.pivot_table(index=["Region", "Country"],values=["JV_Price"],
columns=["Year of transaction"],aggfunc=[np.sum],fill_value=0)
# Regional/Country Volume Breakout - QUARTERLY
regional_country_volumes_QQ = region_df.pivot_table(index=["Region", "QQ_Country"],values=["JV_Price"],
columns=["Year of transaction"],aggfunc=[np.sum],fill_value=0)
# Regional Volume - Annual
regional_volumes = region_df.pivot_table(index=["Region"],values=["JV_Price"],
columns=["Year of transaction"],aggfunc=[np.sum],fill_value=0)
# Regional Volume Breakout - QUARTERLY
regional_volumes_QQ = region_df.pivot_table(index=["Region", "QQ_Region"],values=["JV_Price"],
columns=["Year of transaction"],aggfunc=[np.sum],fill_value=0)
#################### Quarterly & Annual Breakouts by Region/Country & Market PIVOT #####################
# Regional/Country Volume Breakout - ANNUAL
regional_country_volumes_market = region_df.pivot_table(index=["Region", "Country", "JLL Market"],values=["JV_Price"],
columns=["Year of transaction"],aggfunc=[np.sum],fill_value=0)
# Regional/Country Volume Breakout - QUARTERLY
regional_country_volumes_QQ_market = region_df.pivot_table(index=["Region", "QQ_Country", "JLL Market"],values=["JV_Price"],
columns=["Year of transaction"],aggfunc=[np.sum],fill_value=0)
# Regional Volume - Annual
regional_volumes_market = region_df.pivot_table(index=["Region", "JLL Market"],values=["JV_Price"],
columns=["Year of transaction"],aggfunc=[np.sum],fill_value=0)
# Regional Volume Breakout - QUARTERLY
regional_volumes_QQ_market = region_df.pivot_table(index=["Region", "QQ_Region", "JLL Market"],values=["JV_Price"],
columns=["Year of transaction"],aggfunc=[np.sum],fill_value=0)
#################### Quarterly & Annual Breakouts by Region/Country & Market 'conversion to normal DF' ###########################
#Convert regional/country volume annual summary to normal DF - ANNUAL
regional_country_volumes_market.columns = regional_country_volumes_market.columns.droplevel(0)
regional_country_volumes_market.columns = regional_country_volumes_market.columns.droplevel(0)
regional_country_volumes_market.columns.name = None #remove categories
regional_country_volumes_market_final = regional_country_volumes_market.reset_index()
#Convert regional/country volume annual summary to normal DF QUARTERLY
regional_country_volumes_QQ_market.columns = regional_country_volumes_QQ_market.columns.droplevel(0)
regional_country_volumes_QQ_market.columns = regional_country_volumes_QQ_market.columns.droplevel(0)
regional_country_volumes_QQ_market.columns.name = None #remove categories
regional_country_volumes_QQ_market_final = regional_country_volumes_QQ_market.reset_index()
#Convert regional volume annual summary to normal DF - ANNUAL
regional_volumes_market.columns = regional_volumes_market.columns.droplevel(0)
regional_volumes_market.columns = regional_volumes_market.columns.droplevel(0)
regional_volumes_market.columns.name = None #remove categories
regional_volumes_market_final = regional_volumes_market.reset_index()
#Convert regional volume annual summary to normal DF - QUARTERLY
regional_volumes_QQ_market.columns = regional_volumes_QQ_market.columns.droplevel(0)
regional_volumes_QQ_market.columns = regional_volumes_QQ_market.columns.droplevel(0)
regional_volumes_QQ_market.columns.name = None #remove categories
regional_volumes_QQ_market_final = regional_volumes_QQ_market.reset_index()
#################### Quarterly & Annual Breakouts by Region/Country 'conversion to normal DF'###########################
#Convert regional/country volume annual summary to normal DF - ANNUAL
regional_country_volumes.columns = regional_country_volumes.columns.droplevel(0)
regional_country_volumes.columns = regional_country_volumes.columns.droplevel(0)
regional_country_volumes.columns.name = None #remove categories
regional_country_volumes_final = regional_country_volumes.reset_index()
#Convert regional/country volume annual summary to normal DF QUARTERLY
regional_country_volumes_QQ.columns = regional_country_volumes_QQ.columns.droplevel(0)
regional_country_volumes_QQ.columns = regional_country_volumes_QQ.columns.droplevel(0)
regional_country_volumes_QQ.columns.name = None #remove categories
regional_country_volumes_QQ_final = regional_country_volumes_QQ.reset_index()
#Convert regional volume annual summary to normal DF - ANNUAL
regional_volumes.columns = regional_volumes.columns.droplevel(0)
regional_volumes.columns = regional_volumes.columns.droplevel(0)
regional_volumes.columns.name = None #remove categories
regional_volumes_final = regional_volumes.reset_index()
#Convert regional volume annual summary to normal DF - QUARTERLY
regional_volumes_QQ.columns = regional_volumes_QQ.columns.droplevel(0)
regional_volumes_QQ.columns = regional_volumes_QQ.columns.droplevel(0)
regional_volumes_QQ.columns.name = None #remove categories
regional_volumes_QQ_final = regional_volumes_QQ.reset_index()
########################## Annual Percetage of overall investment by REGION ###################################
#create offshore dataframe that removes domestic
domestic = regional_volumes.index.isin(['Domestic'])
offshore_regional_total = regional_volumes[~domestic]
#calculate total row and append to regional volumes data frame
regional_total = offshore_regional_total.apply(np.sum)
regional_volumes_final_transform = offshore_regional_total.append(pd.DataFrame(regional_total.values, index=regional_total.keys()).T, ignore_index=False)
#Editing TOTAL row in DF to read 'total' instead of '0'
regional_volumes_list = regional_volumes_final_transform.index.tolist()
r_total = regional_volumes_list.index(0)
regional_volumes_list[r_total] = 'Off-shore total'
regional_volumes_final_transform.index = regional_volumes_list
# #do division to calc percentages
regional_volumes_final_transform.loc['Africa_percent'] = regional_volumes_final_transform.loc['Africa'] / regional_volumes_final_transform.loc['Off-shore total']
regional_volumes_final_transform.loc['Americas_percent'] = regional_volumes_final_transform.loc['Americas'] / regional_volumes_final_transform.loc['Off-shore total']
regional_volumes_final_transform.loc['Asia_percent'] = regional_volumes_final_transform.loc['Asia'] / regional_volumes_final_transform.loc['Off-shore | |
keys 'id' and 'shape' are not set.
'size' [scalar] Diameter of the telescope dish (in
meters) if the key 'shape' is set to 'dish' or
length of the dipole if key 'shape' is set to
'dipole'. Will be ignored if key 'shape' is set to
'delta'. Will be ignored if key 'id' is set and a
preset value used for the diameter or dipole.
'orientation' [list or numpy array] If key 'shape' is set to
dipole, it refers to the orientation of the dipole
element unit vector whose magnitude is specified
by length. If key 'shape' is set to 'dish', it
refers to the position on the sky to which the
dish is pointed. For a dipole, this unit vector
must be provided in the local ENU coordinate
system aligned with the direction cosines
coordinate system or in the Alt-Az coordinate
system. This will be used only when key 'shape'
is set to 'dipole'. This could be a 2-element
vector (transverse direction cosines) where the
third (line-of-sight) component is determined,
or a 3-element vector specifying all three
direction cosines or a two-element coordinate in
Alt-Az system. If not provided it defaults to an
eastward pointing dipole. If key
'shape' is set to 'dish', the orientation refers
to the pointing center of the dish on the sky. It
can be provided in Alt-Az system as a two-element
vector or in the direction cosine coordinate
system as a two- or three-element vector. If not
set in the case of a dish element, it defaults to
zenith. This is not to be confused with the key
'pointing_center' in dictionary 'pointing_info'
which refers to the beamformed pointing center of
the array. The coordinate system is specified by
the key 'ocoords'
'ocoords' [scalar string] specifies the coordinate system
for key 'orientation'. Accepted values are 'altaz'
and 'dircos'.
'element_locs'
[2- or 3-column array] Element locations that
constitute the tile. Each row specifies
location of one element in the tile. The
locations must be specified in local ENU
coordinate system. First column specifies along
local east, second along local north and the
third along local up. If only two columns are
specified, the third column is assumed to be
zeros. If 'elements_locs' is not provided, it
assumed to be a one-element system and not a
phased array as far as determination of primary
beam is concerned.
'groundplane' [scalar] height of telescope element above the
ground plane (in meteres). Default = None will
denote no ground plane effects.
'ground_modify'
[dictionary] contains specifications to modify
the analytically computed ground plane pattern. If
absent, the ground plane computed will not be
modified. If set, it may contain the following
keys:
'scale' [scalar] positive value to scale the
modifying factor with. If not set, the
scale factor to the modification is unity.
'max' [scalar] positive value to clip the
modified and scaled values to. If not set,
there is no upper limit
'latitude' [scalar] specifies latitude of the telescope site
(in degrees). Default = None, otherwise should
equal the value specified during initialization
of the instance
'longitude' [scalar] specifies longitude of the telescope site
(in degrees). Default = None, otherwise should
equal the value specified during initialization
of the instance
'altitude' [scalar] specifies altitude of the telescope site
(in m). Default = None, otherwise should
equal the value specified during initialization
of the instance
'pol' [string] specifies polarization when using
MWA_Tools for primary beam computation. Value of
key 'id' in attribute dictionary telescope must be
set to 'mwa_tools'. 'X' or 'x' denotes
X-polarization. Y-polarization is specified by 'Y'
or 'y'. If polarization is not specified when 'id'
of telescope is set to 'mwa_tools', it defaults
to X-polarization.
------------------------------------------------------------------------
"""
try:
skymodel, freq, pinfo
except NameError:
raise NameError('skymodel, freq, and pinfo must be specified.')
if self.freq is None:
if freq is None:
raise ValueError('freq must be specified using a numpy array')
elif not isinstance(freq, NP.ndarray):
raise TypeError('freq must be specified using a numpy array')
self.freq = freq.ravel()
if (freq_scale is None) or (freq_scale == 'Hz') or (freq_scale == 'hz'):
self.freq = NP.asarray(freq)
elif freq_scale == 'GHz' or freq_scale == 'ghz':
self.freq = NP.asarray(freq) * 1.0e9
elif freq_scale == 'MHz' or freq_scale == 'mhz':
self.freq = NP.asarray(freq) * 1.0e6
elif freq_scale == 'kHz' or freq_scale == 'khz':
self.freq = NP.asarray(freq) * 1.0e3
else:
raise ValueError('Frequency units must be "GHz", "MHz", "kHz" or "Hz". If not set, it defaults to "Hz"')
self.freq_scale = 'Hz'
if self.telescope is None:
if isinstance(telescope, dict):
self.telescope = telescope
else:
raise TypeError('Input telescope must be a dictionary.')
if skymodel is None:
self.info['pbeam'] += [NP.asarray([])]
self.info['ind'] += [NP.asarray([])]
self.pinfo += [None]
elif not isinstance(skymodel, SM.SkyModel):
raise TypeError('skymodel should be an instance of class SkyModel.')
else:
self.skymodel = skymodel
if self.freq is None:
if freq is None:
raise ValueError('freq must be specified using a numpy array')
elif not isinstance(freq, NP.ndarray):
raise TypeError('freq must be specified using a numpy array')
self.freq = freq.ravel()
if (freq_scale is None) or (freq_scale == 'Hz') or (freq_scale == 'hz'):
self.freq = NP.asarray(freq)
elif freq_scale == 'GHz' or freq_scale == 'ghz':
self.freq = NP.asarray(freq) * 1.0e9
elif freq_scale == 'MHz' or freq_scale == 'mhz':
self.freq = NP.asarray(freq) * 1.0e6
elif freq_scale == 'kHz' or freq_scale == 'khz':
self.freq = NP.asarray(freq) * 1.0e3
else:
raise ValueError('Frequency units must be "GHz", "MHz", "kHz" or "Hz". If not set, it defaults to "Hz"')
self.freq_scale = 'Hz'
if roi_info is None:
raise ValueError('roi_info dictionary must be set.')
pbeam_input = False
if 'ind' in roi_info:
if roi_info['ind'] is not None:
self.info['ind'] += [roi_info['ind']]
if roi_info['ind'].size > 0:
if 'pbeam' in roi_info:
if roi_info['pbeam'] is not None:
try:
pb = roi_info['pbeam'].reshape(-1,self.freq.size)
except ValueError:
raise ValueError('Number of columns of primary beam in key "pbeam" of dictionary roi_info must be equal to number of frequency channels.')
if NP.asarray(roi_info['ind']).size == pb.shape[0]:
self.info['pbeam'] += [roi_info['pbeam'].astype(NP.float32)]
else:
raise ValueError('Number of elements in values in key "ind" and number of rows of values in key "pbeam" must be identical.')
pbeam_input = True
if not pbeam_input: # Will require sky positions in Alt-Az coordinates
if skymodel.coords == 'radec':
skycoords = SkyCoord(ra=skymodel.location[:,0]*units.deg, dec=skymodel.location[:,1]*units.deg, frame='fk5', equinox=Time(skymodel.epoch, format='jyear_str', scale='utc'))
if self.telescope['latitude'] is None:
raise ValueError('Latitude of the observatory must be provided.')
if lst is None:
raise ValueError('LST must be provided.')
if time_jd is None:
raise ValueError('Time in JD must be provided')
skycoords_altaz = skycoords.transform_to(AltAz(obstime=Time(time_jd, format='jd', scale='utc'), location=EarthLocation(lon=self.telescope['longitude']*units.deg, lat=self.telescope['latitude']*units.deg, height=self.telescope['altitude']*units.m)))
skypos_altaz = NP.hstack((skycoords_altaz.alt.deg.reshape(-1,1), skycoords_altaz.az.deg.reshape(-1,1)))
# skypos_altaz = GEOM.hadec2altaz(NP.hstack((NP.asarray(lst-skymodel.location[:,0]).reshape(-1,1), skymodel.location[:,1].reshape(-1,1))), self.telescope['latitude'], units='degrees') # Need to accurately take ephemeris into account
elif skymodel.coords == 'hadec':
if self.telescope['latitude'] is None:
raise ValueError('Latitude of the observatory must be provided.')
skypos_altaz = GEOM.hadec2altaz(skymodel.location, self.telescope['latitude'], units='degrees')
elif skymodel.coords == 'dircos':
skypos_altaz = GEOM.dircos2altaz(skymodel.location, units='degrees')
elif skymodel.coords == 'altaz':
skypos_altaz = skymodel.location
else:
raise KeyError('skycoords invalid or unspecified in skymodel')
if 'radius' in roi_info:
self.info['radius'] += [roi_info['radius']]
if 'center' in roi_info:
self.info['center'] += [roi_info['center']]
else:
if roi_info['radius'] is None:
roi_info['radius'] = 90.0
else:
roi_info['radius'] = max(0.0, min(roi_info['radius'], 90.0))
self.info['radius'] += [roi_info['radius']]
if roi_info['center'] is None:
self.info['center'] += [NP.asarray([90.0, 270.0]).reshape(1,-1)]
else:
roi_info['center'] = NP.asarray(roi_info['center']).reshape(1,-1)
if roi_info['center_coords'] == 'dircos':
self.info['center'] += [GEOM.dircos2altaz(roi_info['center'], units='degrees')]
elif roi_info['center_coords'] == 'altaz':
self.info['center'] += [roi_info['center']]
elif roi_info['center_coords'] == 'hadec':
self.info['center'] += [GEOM.hadec2altaz(roi_info['center'], self.telescope['latitude'], units='degrees')]
elif roi_info['center_coords'] == 'radec':
if lst is None:
raise KeyError('LST not provided for coordinate conversion')
hadec = NP.asarray([lst-roi_info['center'][0,0], roi_info['center'][0,1]]).reshape(1,-1)
self.info['center'] += [GEOM.hadec2altaz(hadec, self.telescope['latitude'], units='degrees')]
elif roi_info['center_coords'] == 'dircos':
self.info['center'] += [GEOM.dircos2altaz(roi_info['center'], units='degrees')]
else:
raise ValueError('Invalid coordinate system specified for center')
if skymodel.coords == 'radec':
if self.telescope['latitude'] is None:
raise ValueError('Latitude of the observatory must be provided.')
if lst is None:
raise ValueError('LST must be provided.')
if time_jd is None:
raise ValueError('Time in JD must be provided')
skycoords = SkyCoord(ra=skymodel.location[:,0]*units.deg, dec=skymodel.location[:,1]*units.deg, frame='fk5', equinox=Time(skymodel.epoch, format='jyear_str', scale='utc'))
skycoords_altaz = skycoords.transform_to(AltAz(obstime=Time(time_jd, format='jd', scale='utc'), location=EarthLocation(lon=self.telescope['longitude']*units.deg, lat=self.telescope['latitude']*units.deg, height=self.telescope['altitude']*units.m)))
skypos_altaz = | |
import pygame
import os
from math import *
from memory_pic import *
from base64 import *
def get_pic(pic_code, pic_name):
image = open(pic_name, 'wb')
image.write(b64decode(pic_code))
image.close()
os.mkdir('_')
get_pic(earthfromnorthpole_png, '_\\earthfromnorthpole.png')
get_pic(fire_png, '_\\fire.png')
get_pic(ship_png, '_\\ship.png')
get_pic(icon_ico, '_\\icon.ico')
get_pic(moon_png, '_\\moon.png')
def main():
def trans_x2u(x):
return int((x - x0) * Kshow + u0)
def trans_y2v(y):
return int(-(y - y0) * Kshow + v0)
def trans_xy2uv(x, y):
return int((x - x0) * Kshow + u0), int(-(y - y0) * Kshow + v0)
def is_in_rect(pos, rect): # 是否在矩形区域内
x, y = pos
rx, ry, rw, rh = rect
if (rx <= x <= rx + rw) and (ry <= y <= ry + rh):
return True
return False
def calc_Kshow_x0_y0(zoom_type):
#global Kshow
#global x0
#global y0
if zoom_type == 0: # Earth & spaceship
Kshow = height / 2.0 / max(Re, Dse) / Cshow
x0 = 0
y0 = 0
elif zoom_type == 1: # Earth & spaceship & MOON
Kshow = height / 2.0 / max(Re, max(Dse, Dem)) / Cshow
x0 = 0
y0 = 0
elif zoom_type == 2: # moon & spaceship
Kshow = height / 2.0 / max(Rm, Dsm) / Cshow
x0 = Xm
y0 = Ym
return (Kshow,x0,y0)
#################################### var.py ####################################
dt = base_dt = 3
# 地球
Re = 6371.4e3
Me = 5.972e24
Xe = 0
Ye = 0
# 月球
Rm = 1731.1e3
Mm = 7.36e22
Vm_0 = 1.023e3 # m/s
Vmx = 0
Vmy = Vm_0
Sta_m = 0
Dem = 384400e3 * 0.99 # Re + 29000e3
Xm = Dem
Ym = 0
# 飞船
Rs = 300e3
Ms = 1000
Vsx = 0
Vsy = 0 # 7.6e3 # 7.6
Xs = Re#+300e3#200e3
##Vsy=8e3
##Xs=Re+300e3
Ys = 0
##Vsy = 3.0746311898429766e3 #地球同步轨道参数
##Xs = 42163.772928313745e3 #
## Xs = Xm + Rm * 1.5
## Ys = 0
Dse = 0
Dsm = 0
# 万有引力常数
G = 6.67430e-11
Fsex = 0
Fsmx = 0
Fsey = 0
Fsmy = 0
Fsx = 0
Fsy = 0
###############################################################################
pygame.init()
width = 1000 # 800
height = 1000 # 800
size = [width, height]
Cshow = 1.2
Kshow = height / 2.0 / Re / Cshow
Zoom_Type = 0
Kfire = 10000
u0 = width / 2
v0 = height / 2
x0 = 0
y0 = 0
Erotate = 0
SecondPerDegree = 240 # 8
Edrotate = dt / SecondPerDegree
Sscale = 0.2
Srotate = -90
Fscale = 0.1
Sdrotate = 1 # d_r
Fhigh = 0
Fdhigh = 0.05
FhighMax = 1
total_time = 0
historytemp = 0
historyid = 0
historyN = 500
history_s_XY = [(0, 0)] * historyN
history_m_XY = [(0, 0)] * historyN
screen = pygame.display.set_mode(size)
pygame.display.set_caption("“飞向太空”——万有引力仿真程序")
icon = pygame.image.load("_\\icon.ico").convert_alpha()
pygame.display.set_icon(icon)
keepGoing = True
fonts = pygame.font.SysFont('consolas', 20)
earthpic0 = pygame.image.load("_\\earthfromnorthpole.png").convert_alpha()
earthpic = earthpic0
earthRect = earthpic.get_rect()
moonpic0 = pygame.image.load("_\\moon.png").convert_alpha()
moonpic = moonpic0
moonRect = moonpic.get_rect()
ship0 = pygame.image.load("_\\ship.png").convert_alpha()
ship0 = pygame.transform.smoothscale(ship0, (
int(ship0.get_rect().width * Sscale), int(ship0.get_rect().height * Sscale))) # Unrotate Ship
fire0 = pygame.image.load("_\\fire.png").convert_alpha()
fire0 = pygame.transform.smoothscale(fire0, (
int(fire0.get_rect().width * Fscale), int(fire0.get_rect().height * Fscale))) # Unrotate Fire
ship = ship0
shipRect = ship0.get_rect()
fire = fire1 = fire0 # fire1: Scaled but not rotate
fireRect = fire0.get_rect()
IsFireAllowed = True
IsTurnAllowed = True
IsOnEarth = True
IsOnMoon = False
IsFireReverse = False
picx = 0
picy = 0
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
EBLUE = (14, 80, 164)
timer = pygame.time.Clock()
while keepGoing: # Game loop
for event in pygame.event.get():
if event.type == pygame.QUIT:
keepGoing = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
keepGoing = False
elif event.key == pygame.K_r: ## 'R'
if IsFireAllowed and (not IsOnEarth) and (Fhigh == 0):
IsTurnAllowed = not IsTurnAllowed
elif event.key == pygame.K_z: ## 'Z'
Zoom_Type = (Zoom_Type + 1) % 3
## if event.type == pygame.MOUSEBUTTONDOWN:
## if is_in_rect(event.pos,[width-140,height-40,135,35]):
## file = '火箭发射.mp4'
## os.system('%ProgramFiles(x86)%\Windows Media Player\wmplayer.exe '+ file)
if IsOnEarth:
IsTurnAllowed = True
keys = pygame.key.get_pressed()
if keys[pygame.K_1]:
dt = base_dt
IsFireAllowed = True
elif keys[pygame.K_2]:
dt = base_dt * 2
IsFireAllowed = False
## IsTurnAllowed=False
elif keys[pygame.K_3]:
dt = base_dt * 4
IsFireAllowed = False
## IsTurnAllowed=False
elif keys[pygame.K_4]:
dt = base_dt * 8
IsFireAllowed = False
## IsTurnAllowed=False
elif keys[pygame.K_5]:
dt = base_dt * 16
IsFireAllowed = False
## IsTurnAllowed=False
elif keys[pygame.K_6]:
dt = base_dt * 32
IsFireAllowed = False
## IsTurnAllowed=False
elif keys[pygame.K_7]:
dt = base_dt * 64
IsFireAllowed = False
## IsTurnAllowed=False
elif keys[pygame.K_8]:
dt = base_dt * 128
IsFireAllowed = False
## IsTurnAllowed=False
elif keys[pygame.K_9]:
dt = base_dt * 256
IsFireAllowed = False
## IsTurnAllowed=False
if (not IsFireAllowed) and (not IsOnEarth):
IsTurnAllowed = False
if keys[pygame.K_LEFT]:
Srotate += Sdrotate
if keys[pygame.K_RIGHT]:
Srotate -= Sdrotate
## ship=pygame.transform.rotate(ship0, Srotate)
## if Fhigh > 1e-5:
## fire=pygame.transform.rotate(fire1, Srotate)
if keys[pygame.K_DOWN]:
Fhigh += Fdhigh
if Fhigh > FhighMax:
Fhigh = FhighMax
IsFireReverse = True
elif not keys[pygame.K_UP]:
Fhigh = 0
IsFireReverse = False
if keys[pygame.K_UP]:
Fhigh += Fdhigh
if Fhigh > FhighMax:
Fhigh = FhighMax
elif not keys[pygame.K_DOWN]:
Fhigh = 0
Dse = sqrt((Xs - Xe) * (Xs - Xe) + (Ys - Ye) * (Ys - Ye))
Dsm = sqrt((Xs - Xm) * (Xs - Xm) + (Ys - Ym) * (Ys - Ym))
Fse = G * Me * Ms / Dse / Dse
Fsex = -Fse * (Xs - Xe) / Dse
Fsey = -Fse * (Ys - Ye) / Dse
Fsm = G * Mm * Ms / Dsm / Dsm
Fsmx = -Fsm * (Xs - Xm) / Dsm
Fsmy = -Fsm * (Ys - Ym) / Dsm
if IsFireAllowed:
Ffire = Kfire * Fhigh
else:
Ffire = 0
if IsFireReverse:
Ffire = -Ffire
Ffirex = -Ffire * sin(Srotate / 180 * pi)
Ffirey = Ffire * cos(Srotate / 180 * pi)
Fsx = Fsex + Fsmx + Ffirex
Fsy = Fsey + Fsmy + Ffirey
Vsx += dt * Fsx / Ms
Vsy += dt * Fsy / Ms
Xs += Vsx * dt
Ys += Vsy * dt
# 月球运动
Sta_m += Vm_0 / Dem * dt
if Sta_m > (2*pi):
Sta_m -= (2*pi)
Xm = Dem * cos(Sta_m)
Ym = Dem * sin(Sta_m)
Vmx = - Vm_0 * sin(Sta_m)
Vmy = Vm_0 * cos(Sta_m)
(Kshow, x0, y0) = calc_Kshow_x0_y0(Zoom_Type)
# 地球自转
Edrotate = dt / SecondPerDegree
Erotate += Edrotate
Erotate %= 360
if sqrt((Xs - Xe) * (Xs - Xe) + (Ys - Ye) * (Ys - Ye)) <= Re:
IsOnEarth = True
Xs -= Vsx * dt
Ys -= Vsy * dt
Vsx, Vsy = 0, 0
Xt = Xs - Xe
Yt = Ys - Ye
theta = asin(Yt / Dse)
if Xt <= 0:
theta = pi - theta
theta += Edrotate * (pi / 180)
Vsx = -(pi / SecondPerDegree / 180) * Re * sin(theta)
Vsy = (pi / SecondPerDegree / 180) * Re * cos(theta)
Ys = Ye + Re * sin(theta)
Xs = Xe + Re * cos(theta)
Srotate += Edrotate
else:
IsOnEarth = False
if sqrt((Xs - Xm) * (Xs - Xm) + (Ys - Ym) * (Ys - Ym)) <= Rm:
Xs -= Vsx * dt
Ys -= Vsy * dt
if not IsOnMoon:
XXms = Xs - Xm
YYms = Ys - Ym
Vsx, Vsy = Vmx, Vmy
Xs = Xm + XXms
Ys = Ym + YYms
IsOnMoon = True
# Vsx = Vsy = Vs = 0
## Xt = Xs - Xm
## Yt = Ys - Ym
## theta = asin(Yt / Dse)
## if Xt <= 0:
## theta = pi - theta
## theta += Edrotate * (pi / 180)
## Vsx = -(pi / SecondPerDegree / 180) * Re * sin(theta)
## Vsy = (pi / SecondPerDegree / 180) * Re * cos(theta)
## Ys = Ye + Re * sin(theta)
## Xs = Xe + Re * cos(theta)
## | |
<reponame>nanbi/Python-software
import numpy as np
import regreg.api as rr
import regreg.affine as ra
from .query import query
from .randomization import split
class M_estimator(query):
def __init__(self, loss, epsilon, penalty, randomization, solve_args={'min_its':50, 'tol':1.e-10}):
"""
Fits the logistic regression to a candidate active set, without penalty.
Calls the method bootstrap_covariance() to bootstrap the covariance matrix.
Computes $\bar{\beta}_E$ which is the restricted
M-estimator (i.e. subject to the constraint $\beta_{-E}=0$).
Parameters:
-----------
active: np.bool
The active set from fitting the logistic lasso
solve_args: dict
Arguments to be passed to regreg solver.
Returns:
--------
None
Notes:
------
Sets self._beta_unpenalized which will be used in the covariance matrix calculation.
Also computes Hessian of loss at restricted M-estimator as well as the bootstrap covariance.
"""
query.__init__(self, randomization)
(self.loss,
self.epsilon,
self.penalty,
self.randomization,
self.solve_args) = (loss,
epsilon,
penalty,
randomization,
solve_args)
# Methods needed for subclassing a query
def solve(self, scaling=1, solve_args={'min_its':20, 'tol':1.e-10}, nboot=2000):
self.randomize()
(loss,
randomized_loss,
epsilon,
penalty,
randomization,
solve_args) = (self.loss,
self.randomized_loss,
self.epsilon,
self.penalty,
self.randomization,
self.solve_args)
# initial solution
problem = rr.simple_problem(randomized_loss, penalty)
self.initial_soln = problem.solve(**solve_args)
# find the active groups and their direction vectors
# as well as unpenalized groups
groups = np.unique(penalty.groups)
active_groups = np.zeros(len(groups), np.bool)
unpenalized_groups = np.zeros(len(groups), np.bool)
active_directions = []
active = np.zeros(loss.shape, np.bool)
unpenalized = np.zeros(loss.shape, np.bool)
initial_scalings = []
active_directions_list = [] ## added for group lasso
active_penalty = []
for i, g in enumerate(groups):
group = penalty.groups == g
active_groups[i] = (np.linalg.norm(self.initial_soln[group]) > 1.e-6 * penalty.weights[g]) and (penalty.weights[g] > 0)
unpenalized_groups[i] = (penalty.weights[g] == 0)
if active_groups[i]:
active[group] = True
z = np.zeros(active.shape, np.float)
z[group] = self.initial_soln[group] / np.linalg.norm(self.initial_soln[group])
active_directions.append(z)
active_directions_list.append(z[group]) ## added for group lasso
active_penalty.append(penalty.weights[g]) ## added
initial_scalings.append(np.linalg.norm(self.initial_soln[group]))
if unpenalized_groups[i]:
unpenalized[group] = True
self.active_penalty = active_penalty
# solve the restricted problem
self._overall = active + unpenalized > 0
self._inactive = ~self._overall
self._unpenalized = unpenalized
self.active_directions_list = active_directions_list ## added for group lasso
self._active_directions = np.array(active_directions).T
self._active_groups = np.array(active_groups, np.bool)
self._unpenalized_groups = np.array(unpenalized_groups, np.bool)
self.selection_variable = {'groups':self._active_groups,
'variables':self._overall,
'directions':self._active_directions}
# initial state for opt variables
initial_subgrad = -(self.randomized_loss.smooth_objective(self.initial_soln, 'grad') +
self.randomized_loss.quadratic.objective(self.initial_soln, 'grad'))
# the quadratic of a smooth_atom is not included in computing the smooth_objective
self.initial_subgrad = initial_subgrad
initial_subgrad = initial_subgrad[self._inactive]
initial_unpenalized = self.initial_soln[self._unpenalized]
self.observed_opt_state = np.concatenate([initial_scalings,
initial_unpenalized,
initial_subgrad], axis=0)
# set the _solved bit
self._solved = True
# Now setup the pieces for linear decomposition
(loss,
epsilon,
penalty,
initial_soln,
overall,
inactive,
unpenalized,
active_groups,
active_directions) = (self.loss,
self.epsilon,
self.penalty,
self.initial_soln,
self._overall,
self._inactive,
self._unpenalized,
self._active_groups,
self._active_directions)
# scaling should be chosen to be Lipschitz constant for gradient of Gaussian part
# we are implicitly assuming that
# loss is a pairs model
_sqrt_scaling = np.sqrt(scaling)
_beta_unpenalized = restricted_Mest(loss, overall, solve_args=solve_args)
beta_full = np.zeros(overall.shape)
beta_full[overall] = _beta_unpenalized
_hessian = loss.hessian(beta_full)
self._beta_full = beta_full
# observed state for score
self.observed_score_state = np.hstack([_beta_unpenalized * _sqrt_scaling,
-loss.smooth_objective(beta_full, 'grad')[inactive] / _sqrt_scaling])
# form linear part
self.num_opt_var = self.observed_opt_state.shape[0]
p = loss.shape[0] # shorthand for p
# (\bar{\beta}_{E \cup U}, N_{-E}, c_E, \beta_U, z_{-E})
# E for active
# U for unpenalized
# -E for inactive
_opt_linear_term = np.zeros((p, self._active_groups.sum() + unpenalized.sum() + inactive.sum()))
_score_linear_term = np.zeros((p, p))
# \bar{\beta}_{E \cup U} piece -- the unpenalized M estimator
Mest_slice = slice(0, overall.sum())
_Mest_hessian = _hessian[:,overall]
_score_linear_term[:,Mest_slice] = -_Mest_hessian / _sqrt_scaling
# N_{-(E \cup U)} piece -- inactive coordinates of score of M estimator at unpenalized solution
null_idx = range(overall.sum(), p)
inactive_idx = np.nonzero(inactive)[0]
for _i, _n in zip(inactive_idx, null_idx):
_score_linear_term[_i,_n] = -_sqrt_scaling
# c_E piece
scaling_slice = slice(0, active_groups.sum())
if len(active_directions)==0:
_opt_hessian=0
else:
_opt_hessian = (_hessian + epsilon * np.identity(p)).dot(active_directions)
_opt_linear_term[:,scaling_slice] = _opt_hessian / _sqrt_scaling
self.observed_opt_state[scaling_slice] *= _sqrt_scaling
# beta_U piece
unpenalized_slice = slice(active_groups.sum(), active_groups.sum() + unpenalized.sum())
unpenalized_directions = np.identity(p)[:,unpenalized]
if unpenalized.sum():
_opt_linear_term[:,unpenalized_slice] = (_hessian + epsilon * np.identity(p)).dot(unpenalized_directions) / _sqrt_scaling
self.observed_opt_state[unpenalized_slice] *= _sqrt_scaling
# subgrad piece
subgrad_idx = range(active_groups.sum() + unpenalized.sum(), active_groups.sum() + inactive.sum() + unpenalized.sum())
subgrad_slice = slice(active_groups.sum() + unpenalized.sum(), active_groups.sum() + inactive.sum() + unpenalized.sum())
for _i, _s in zip(inactive_idx, subgrad_idx):
_opt_linear_term[_i,_s] = _sqrt_scaling
self.observed_opt_state[subgrad_slice] /= _sqrt_scaling
# form affine part
_opt_affine_term = np.zeros(p)
idx = 0
groups = np.unique(penalty.groups)
for i, g in enumerate(groups):
if active_groups[i]:
group = penalty.groups == g
_opt_affine_term[group] = active_directions[:,idx][group] * penalty.weights[g]
idx += 1
# two transforms that encode score and optimization
# variable roles
# later, we will modify `score_transform`
# in `linear_decomposition`
self.opt_transform = (_opt_linear_term, _opt_affine_term)
self.score_transform = (_score_linear_term, np.zeros(_score_linear_term.shape[0]))
# now store everything needed for the projections
# the projection acts only on the optimization
# variables
self.scaling_slice = scaling_slice
# weights are scaled here because the linear terms scales them by scaling
new_groups = penalty.groups[inactive]
new_weights = dict([(g, penalty.weights[g] / _sqrt_scaling) for g in penalty.weights.keys() if g in np.unique(new_groups)])
# we form a dual group lasso object
# to do the projection
self.group_lasso_dual = rr.group_lasso_dual(new_groups, weights=new_weights, bound=1.)
self.subgrad_slice = subgrad_slice
self._setup = True
self._marginalize_subgradient = False
self.scaling_slice = scaling_slice
self.unpenalized_slice = unpenalized_slice
self.ndim = loss.shape[0]
self.Q = ((_hessian + epsilon * np.identity(p))[:,active])[active,:]
self.Qinv = np.linalg.inv(self.Q)
self.form_VQLambda()
self.nboot = nboot
def form_VQLambda(self):
nactive_groups = len(self.active_directions_list)
nactive_vars = sum([self.active_directions_list[i].shape[0] for i in range(nactive_groups)])
V = np.zeros((nactive_vars, nactive_vars-nactive_groups))
#U = np.zeros((nvariables, ngroups))
Lambda = np.zeros((nactive_vars,nactive_vars))
temp_row, temp_col = 0, 0
for g in range(len(self.active_directions_list)):
size_curr_group = self.active_directions_list[g].shape[0]
#U[temp_row:(temp_row+size_curr_group),g] = self._active_directions[g]
Lambda[temp_row:(temp_row+size_curr_group),temp_row:(temp_row+size_curr_group)] \
= self.active_penalty[g]*np.identity(size_curr_group)
import scipy
from scipy import linalg, matrix
def null(A, eps=1e-12):
u, s, vh = scipy.linalg.svd(A)
padding = max(0, np.shape(A)[1] - np.shape(s)[0])
null_mask = np.concatenate(((s <= eps), np.ones((padding,), dtype=bool)), axis=0)
null_space = scipy.compress(null_mask, vh, axis=0)
return scipy.transpose(null_space)
V_g = null(matrix(self.active_directions_list[g]))
V[temp_row:(temp_row+V_g.shape[0]), temp_col:(temp_col+V_g.shape[1])] = V_g
temp_row += V_g.shape[0]
temp_col += V_g.shape[1]
self.VQLambda = np.dot(np.dot(V.T,self.Qinv), Lambda.dot(V))
return self.VQLambda
def derivative_logdet_jacobian(self, scalings):
nactive_groups = len(self.active_directions_list)
nactive_vars = np.sum([self.active_directions_list[i].shape[0] for i in range(nactive_groups)])
from scipy.linalg import block_diag
matrix_list = [scalings[i]*np.identity(self.active_directions_list[i].shape[0]-1) for i in range(scalings.shape[0])]
Gamma_minus = block_diag(*matrix_list)
jacobian_inv = np.linalg.inv(Gamma_minus+self.VQLambda)
group_sizes = [self._active_directions[i].shape[0] for i in range(nactive_groups)]
group_sizes_cumsum = np.concatenate(([0], np.array(group_sizes).cumsum()))
jacobian_inv_blocks = [jacobian_inv[group_sizes_cumsum[i]:group_sizes_cumsum[i+1],group_sizes_cumsum[i]:group_sizes_cumsum[i+1]]
for i in range(nactive_groups)]
der = np.zeros(self.observed_opt_state.shape[0])
der[self.scaling_slice] = np.array([np.matrix.trace(jacobian_inv_blocks[i]) for i in range(scalings.shape[0])])
return der
def setup_sampler(self, scaling=1, solve_args={'min_its':20, 'tol':1.e-10}):
pass
def projection(self, opt_state):
"""
Full projection for Langevin.
The state here will be only the state of the optimization variables.
"""
if not self._setup:
raise ValueError('setup_sampler should be called before using this function')
if ('subgradient' not in self.selection_variable and
'scaling' not in self.selection_variable): # have not conditioned on any thing else
new_state = opt_state.copy() # not really necessary to copy
new_state[self.scaling_slice] = np.maximum(opt_state[self.scaling_slice], 0)
new_state[self.subgrad_slice] = self.group_lasso_dual.bound_prox(opt_state[self.subgrad_slice])
elif ('subgradient' not in self.selection_variable and
'scaling' in self.selection_variable): # conditioned on the initial scalings
# only the subgradient in opt_state
new_state = self.group_lasso_dual.bound_prox(opt_state)
elif ('subgradient' in self.selection_variable and
'scaling' not in self.selection_variable): # conditioned on the subgradient
# only the scaling in opt_state
new_state = np.maximum(opt_state, 0)
else:
new_state = opt_state
return new_state
# optional things to condition on
def decompose_subgradient(self, conditioning_groups=None, marginalizing_groups=None):
"""
ADD DOCSTRING
conditioning_groups and marginalizing_groups should be disjoint
"""
groups = np.unique(self.penalty.groups)
condition_inactive_groups = np.zeros_like(groups, dtype=bool)
if conditioning_groups is None:
conditioning_groups = np.zeros_like(groups, dtype=np.bool)
if marginalizing_groups is None:
marginalizing_groups = np.zeros_like(groups, dtype=np.bool)
if np.any(conditioning_groups * marginalizing_groups):
raise ValueError("cannot simultaneously condition and marginalize over a group's subgradient")
if not self._setup:
raise ValueError('setup_sampler should be called before using this function')
condition_inactive_variables = np.zeros_like(self._inactive, dtype=bool)
moving_inactive_groups = np.zeros_like(groups, dtype=bool)
moving_inactive_variables = np.zeros_like(self._inactive, dtype=bool)
self._inactive_groups = ~(self._active_groups+self._unpenalized)
inactive_marginal_groups = np.zeros_like(self._inactive, dtype=bool)
limits_marginal_groups = np.zeros_like(self._inactive)
for i, g in enumerate(groups):
if (self._inactive_groups[i]) and conditioning_groups[i]:
group = self.penalty.groups == g
condition_inactive_groups[i] = True
condition_inactive_variables[group] = True
elif (self._inactive_groups[i]) and (~conditioning_groups[i]) and (~marginalizing_groups[i]):
group = self.penalty.groups == g
moving_inactive_groups[i] = True
moving_inactive_variables[group] = True
if (self._inactive_groups[i]) and marginalizing_groups[i]:
group = self.penalty.groups == g
inactive_marginal_groups[i] = True
limits_marginal_groups[i] = self.penalty.weights[g]
if inactive_marginal_groups is not None:
if inactive_marginal_groups.sum()>0:
self._marginalize_subgradient = True
self.inactive_marginal_groups = inactive_marginal_groups
self.limits_marginal_groups = limits_marginal_groups
opt_linear, opt_offset = self.opt_transform
new_linear = np.zeros((opt_linear.shape[0], (self._active_groups.sum() +
self._unpenalized_groups.sum() +
moving_inactive_variables.sum())))
new_linear[:, self.scaling_slice] = opt_linear[:, self.scaling_slice]
new_linear[:, self.unpenalized_slice] = opt_linear[:, self.unpenalized_slice]
inactive_moving_idx = np.nonzero(moving_inactive_variables)[0]
subgrad_idx = range(self._active_groups.sum() | |
for corp in [self.state.unknownCorpus,
self.state.hamCorpus,
self.state.spamCorpus]:
for k in corp.keys():
if len(keys) >= max_results:
break
msg = corp[k]
msg.load()
if 'subject' in params:
subj = str(msg['Subject'])
if self._contains(subj, key, ic):
push((k, corp))
if 'body' in params:
# For [ 906581 ] Assertion failed in search
# subject. Can the headers be a non-string?
msg_body = msg.as_string()
msg_body = msg_body[msg_body.index('\r\n\r\n'):]
if self._contains(msg_body, key, ic):
push((k, corp))
if 'headers' in params:
for nm, val in msg.items():
# For [ 906581 ] Assertion failed in
# search subject. Can the headers be
# a non-string?
nm = str(nm)
val = str(val)
if self._contains(nm, key, ic) or \
self._contains(val, key, ic):
push((k, corp))
if len(keys):
if len(keys) == 1:
title = _("Found message")
else:
title = _("Found messages")
keys = list(keys)
else:
page = _("<p>Could not find any matching messages. " \
"Maybe they expired?</p>")
title = _("Did not find message")
box = self._buildBox(title, 'status.gif', page)
self.write(box)
self.write(self._buildBox(_('Find message'),
'query.gif',
self.html.findMessage))
self._writePostamble()
return
# Else show the most recent day's page, as decided by _buildReviewKeys.
else:
start = 0
# Build the lists of messages: spams, hams and unsure.
if len(keys) == 0:
keys, date, prior, this, next = self._buildReviewKeys(start)
keyedMessageInfo = {options["Headers", "header_unsure_string"]: [],
options["Headers", "header_ham_string"]: [],
options["Headers", "header_spam_string"]: [],
}
invalid_keys = []
for key in keys:
if isinstance(key, tuple):
key, sourceCorpus = key
else:
sourceCorpus = self.state.unknownCorpus
# Parse the message, get the judgement header and build a message
# info object for each message.
message = sourceCorpus[key]
try:
message.load()
except IOError:
# Someone has taken this file away from us. It was
# probably a virus protection program, so that's ok.
# Don't list it in the review, though.
invalid_keys.append(key)
continue
judgement = message[options["Headers",
"classification_header_name"]]
if judgement is None:
judgement = options["Headers", "header_unsure_string"]
else:
judgement = judgement.split(';')[0].strip()
messageInfo = self._makeMessageInfo(message)
keyedMessageInfo[judgement].append((key, messageInfo))
for key in invalid_keys:
keys.remove(key)
# Present the list of messages in their groups in reverse order of
# appearance, by default, or according to the specified sort order.
if keys:
page = self.html.reviewtable.clone()
if prior:
page.prior.value = prior
del page.priorButton.disabled
if next:
page.next.value = next
del page.nextButton.disabled
templateRow = page.reviewRow.clone()
# The decision about whether to reverse the sort
# order has to go here, because _sortMessages gets called
# thrice, and so the ham list would end up sorted backwards.
sort_order = params.get('sort')
if self.previous_sort == sort_order:
reverse = True
self.previous_sort = None
else:
reverse = False
self.previous_sort = sort_order
page.table = "" # To make way for the real rows.
for header, label in ((options["Headers",
"header_unsure_string"], 'Unsure'),
(options["Headers",
"header_ham_string"], 'Ham'),
(options["Headers",
"header_spam_string"], 'Spam')):
messages = keyedMessageInfo[header]
if messages:
sh = self.html.reviewSubHeader.clone()
# Setup the header row
sh.optionalHeaders = ''
h = self.html.headerHeader.clone()
for disp_header in options["html_ui", "display_headers"]:
h.headerLink.href = 'review?sort=%sHeader' % \
(disp_header.lower(),)
h.headerName = disp_header.title()
sh.optionalHeaders += h
if not options["html_ui", "display_score"]:
del sh.score_header
if not options["html_ui", "display_received_time"]:
del sh.received_header
subHeader = str(sh)
subHeader = subHeader.replace('TYPE', label)
page.table += self.html.blankRow
page.table += subHeader
self._appendMessages(page.table, messages, label,
sort_order, reverse)
page.table += self.html.trainRow
if title == "":
title = _("Untrained messages received on %s") % date
box = self._buildBox(title, None, page) # No icon, to save space.
else:
page = _("<p>There are no untrained messages to display. " \
"Return <a href='home'>Home</a>, or " \
"<a href='review'>check again</a>.</p>")
title = _("No untrained messages")
box = self._buildBox(title, 'status.gif', page)
self.write(box)
self._writePostamble(help_topic="review")
def onView(self, key, corpus):
"""View a message - linked from the Review page."""
self._writePreamble(_("View message"),
parent=('review', _('Review')))
sourceCorpus = None
message = None
if self.state.unknownCorpus.get(key) is not None:
sourceCorpus = self.state.unknownCorpus
elif self.state.hamCorpus.get(key) is not None:
sourceCorpus = self.state.hamCorpus
elif self.state.spamCorpus.get(key) is not None:
sourceCorpus = self.state.spamCorpus
if sourceCorpus is not None:
message = sourceCorpus.get(key)
if message is not None:
self.write("<pre>%s</pre>" % cgi.escape(message.as_string()))
else:
self.write(_("<p>Can't find message %r. Maybe it expired.</p>") %
key)
self._writePostamble()
def onShowclues(self, key, subject, tokens='0'):
"""Show clues for a message - linked from the Review page."""
tokens = bool(int(tokens)) # needs the int, as bool('0') is True
self._writePreamble(_("Message clues"),
parent=('review', _('Review')))
sourceCorpus = None
message = None
if self.state.unknownCorpus.get(key) is not None:
sourceCorpus = self.state.unknownCorpus
elif self.state.hamCorpus.get(key) is not None:
sourceCorpus = self.state.hamCorpus
elif self.state.spamCorpus.get(key) is not None:
sourceCorpus = self.state.spamCorpus
if sourceCorpus is not None:
message = sourceCorpus.get(key).as_string()
if message is not None:
# For Macs?
message = message.replace('\r\n', '\n').replace('\r', '\n')
results = self._buildCluesTable(message, subject, tokens)
del results.classifyAnother
self.write(results)
else:
self.write(_("<p>Can't find message %r. Maybe it expired.</p>") %
key)
self._writePostamble()
def onPluginconfig(self):
html = self._buildConfigPage(self.plugin.plugin_map)
html.title = _('Home > Plugin Configuration')
html.pagename = _('> Plugin Configuration')
html.plugin_button.name.value = _("Back to basic configuration")
html.plugin_button.action = "config"
html.config_submit.value = _("Save plugin options")
html.restore.value = _("Restore plugin options defaults")
del html.exp_button
del html.adv_button
self.writeOKHeaders('text/html')
self.write(html)
def close_database(self):
self.state.close()
def reReadOptions(self):
"""Called by the config page when the user saves some new options, or
restores the defaults."""
load_options()
# Recreate the state.
self.state = self.state.recreate_state()
self.classifier = self.state.bayes
def verifyInput(self, parms, pmap):
'''Check that the given input is valid.'''
# Most of the work here is done by the parent class, but
# we have a few extra checks
errmsg = UserInterface.UserInterface.verifyInput(self, parms, pmap)
if pmap != parm_ini_map:
return errmsg
return errmsg
def readUIResources(self):
"""Returns ui.html and a dictionary of Gifs."""
if self.lang_manager:
ui_html = self.lang_manager.import_ui_html()
else:
from spambayes.core_resources import ui_html
images = {}
for baseName in UserInterface.IMAGES:
moduleName = '%s.%s_gif' % ('spambayes.core_resources', baseName)
module = __import__(moduleName, {}, {}, ('spambayes',
'core_resources'))
images[baseName] = module.data
return ui_html.data, images
class CoreState:
"""This keeps the global state of the module - the command-line options,
statistics like how many mails have been classified, the handle of the
log file, the Classifier and FileCorpus objects, and so on."""
def __init__(self):
"""Initialises the State object that holds the state of the app.
The default settings are read from Options.py and bayescustomize.ini
and are then overridden by the command-line processing code in the
__main__ code below."""
self.log_file = None
self.bayes = None
self.mutex = None
self.prepared = False
self.can_stop = True
self.plugin = None
# Unique names for cached messages - see `getNewMessageName()` below.
self.last_base_message_name = ''
self.uniquifier = 2
# Set up the statistics.
self.numSpams = 0
self.numHams = 0
self.numUnsure = 0
self.servers = ""
# Load up the other settings from Option.py / bayescustomize.ini
self.ui_port = options["html_ui", "port"]
self.launch_ui = options["html_ui", "launch_browser"]
self.gzip_cache = options["Storage", "cache_use_gzip"]
self.run_test_server = False
self.is_test = False
self.spamCorpus = self.hamCorpus = self.unknownCorpus = None
self.spam_trainer = self.ham_trainer = None
self.init()
def init(self):
assert not self.prepared, "init after prepare, but before close"
## no i18n yet...
## # Load the environment for translation.
## self.lang_manager = i18n.LanguageManager()
## # Set the system user default language.
## self.lang_manager.set_language(\
## self.lang_manager.locale_default_lang())
## # Set interface to use the user language in the configuration file.
## for language in reversed(options["globals", "language"]):
## # We leave the default in there as the last option, to fall
## # back on if necessary.
## self.lang_manager.add_language(language)
## if options["globals", "verbose"]:
## print "Asked to add languages: " + \
## ", ".join(options["globals", "language"])
## print "Set language to " + \
## str(self.lang_manager.current_langs_codes)
self.lang_manager = None
# Open the log file.
if options["globals", "verbose"]:
self.log_file = open('_core_server.log', 'wb', 0)
# Remember reported errors.
self.reported_errors = {}
def close(self):
assert self.prepared, "closed without being prepared!"
if self.bayes is not None:
# Only store a non-empty db.
if self.bayes.nham != 0 and self.bayes.nspam != 0:
self.bayes.store()
self.bayes.close()
self.bayes = None
spambayes.message.Message().message_info_db = None
self.spamCorpus = self.hamCorpus = self.unknownCorpus = None
self.spam_trainer = self.ham_trainer = None
self.prepared = False
self.close_platform_mutex()
def prepare(self, can_stop=True):
"""Do whatever needs to be done to prepare for running. If
can_stop is False, then we may not let the user shut down the
proxy - for example, running as a Windows service this should
be the case."""
self.init()
# If we can, prevent multiple servers from running at the same time.
assert self.mutex is None, "Should | |
selected_jobs )
if queue_paused:
parser_status = 'paused'
self._parser_status.setText( parser_status )
if current_action == '' and files_paused:
current_action = 'paused'
self._current_action.setText( current_action )
if files_paused:
ClientGUIFunctions.SetBitmapButtonBitmap( self._pause_files_button, CC.global_pixmaps().file_play )
else:
ClientGUIFunctions.SetBitmapButtonBitmap( self._pause_files_button, CC.global_pixmaps().file_pause )
if queue_paused:
ClientGUIFunctions.SetBitmapButtonBitmap( self._pause_queue_button, CC.global_pixmaps().gallery_play )
else:
ClientGUIFunctions.SetBitmapButtonBitmap( self._pause_queue_button, CC.global_pixmaps().gallery_pause )
( file_network_job, page_network_job ) = self._simple_downloader_import.GetNetworkJobs()
self._file_download_control.SetNetworkJob( file_network_job )
self._page_download_control.SetNetworkJob( page_network_job )
def CheckAbleToClose( self ):
if self._simple_downloader_import.CurrentlyWorking():
raise HydrusExceptions.VetoException( 'This page is still importing.' )
def EventAdvance( self ):
selected_jobs = self._pending_jobs_listbox.GetData( only_selected = True )
for job in selected_jobs:
self._simple_downloader_import.AdvanceJob( job )
if len( selected_jobs ) > 0:
self._UpdateImportStatus()
def EventDelay( self ):
selected_jobs = list( self._pending_jobs_listbox.GetData( only_selected = True ) )
selected_jobs.reverse()
for job in selected_jobs:
self._simple_downloader_import.DelayJob( job )
if len( selected_jobs ) > 0:
self._UpdateImportStatus()
def EventDelete( self ):
selected_jobs = self._pending_jobs_listbox.GetData( only_selected = True )
message = 'Delete {} jobs?'.format( HydrusData.ToHumanInt( len( selected_jobs ) ) )
result = ClientGUIDialogsQuick.GetYesNo( self, message )
if result != QW.QDialog.Accepted:
return
for job in selected_jobs:
self._simple_downloader_import.DeleteJob( job )
if len( selected_jobs ) > 0:
self._UpdateImportStatus()
def EventFormulaChanged( self ):
formula = self._formulae.GetValue()
formula_name = formula.GetName()
self._simple_downloader_import.SetFormulaName( formula_name )
self._controller.new_options.SetString( 'favourite_simple_downloader_formula', formula_name )
def PauseQueue( self ):
self._simple_downloader_import.PausePlayQueue()
self._UpdateImportStatus()
def PauseFiles( self ):
self._simple_downloader_import.PausePlayFiles()
self._UpdateImportStatus()
def SetSearchFocus( self ):
ClientGUIFunctions.SetFocusLater( self._page_url_input )
def Start( self ):
self._simple_downloader_import.Start( self._page_key )
management_panel_types_to_classes[ MANAGEMENT_TYPE_IMPORT_SIMPLE_DOWNLOADER ] = ManagementPanelImporterSimpleDownloader
class ManagementPanelImporterURLs( ManagementPanelImporter ):
def __init__( self, parent, page, controller, management_controller ):
ManagementPanelImporter.__init__( self, parent, page, controller, management_controller )
#
self._url_panel = ClientGUICommon.StaticBox( self, 'url downloader' )
#
self._import_queue_panel = ClientGUICommon.StaticBox( self._url_panel, 'imports' )
self._pause_button = ClientGUICommon.BetterBitmapButton( self._import_queue_panel, CC.global_pixmaps().file_pause, self.Pause )
self._pause_button.setToolTip( 'pause/play files' )
self._file_download_control = ClientGUINetworkJobControl.NetworkJobControl( self._import_queue_panel )
self._urls_import = self._management_controller.GetVariable( 'urls_import' )
self._file_seed_cache_control = ClientGUIFileSeedCache.FileSeedCacheStatusControl( self._import_queue_panel, self._controller, page_key = self._page_key )
#
self._gallery_panel = ClientGUICommon.StaticBox( self._url_panel, 'search' )
self._gallery_download_control = ClientGUINetworkJobControl.NetworkJobControl( self._gallery_panel )
self._gallery_seed_log_control = ClientGUIGallerySeedLog.GallerySeedLogStatusControl( self._gallery_panel, self._controller, False, False, 'search', page_key = self._page_key )
#
self._url_input = ClientGUIControls.TextAndPasteCtrl( self._url_panel, self._PendURLs )
self._url_input.setPlaceholderText( 'any url hydrus recognises, or a raw file url' )
( file_import_options, tag_import_options ) = self._urls_import.GetOptions()
show_downloader_options = True
self._file_import_options = ClientGUIImport.FileImportOptionsButton( self._url_panel, file_import_options, show_downloader_options, self._urls_import.SetFileImportOptions )
self._tag_import_options = ClientGUIImport.TagImportOptionsButton( self._url_panel, tag_import_options, show_downloader_options, update_callable = self._urls_import.SetTagImportOptions, allow_default_selection = True )
#
self._import_queue_panel.Add( self._pause_button, CC.FLAGS_ON_RIGHT )
self._import_queue_panel.Add( self._file_seed_cache_control, CC.FLAGS_EXPAND_PERPENDICULAR )
self._import_queue_panel.Add( self._file_download_control, CC.FLAGS_EXPAND_PERPENDICULAR )
self._gallery_panel.Add( self._gallery_seed_log_control, CC.FLAGS_EXPAND_PERPENDICULAR )
self._gallery_panel.Add( self._gallery_download_control, CC.FLAGS_EXPAND_PERPENDICULAR )
self._url_panel.Add( self._import_queue_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
self._url_panel.Add( self._gallery_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
self._url_panel.Add( self._url_input, CC.FLAGS_EXPAND_PERPENDICULAR )
self._url_panel.Add( self._file_import_options, CC.FLAGS_EXPAND_PERPENDICULAR )
self._url_panel.Add( self._tag_import_options, CC.FLAGS_EXPAND_PERPENDICULAR )
#
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, self._media_sort, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
QP.AddToLayout( vbox, self._url_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
self._MakeCurrentSelectionTagsBox( vbox )
self.widget().setLayout( vbox )
#
file_seed_cache = self._urls_import.GetFileSeedCache()
self._file_seed_cache_control.SetFileSeedCache( file_seed_cache )
gallery_seed_log = self._urls_import.GetGallerySeedLog()
self._gallery_seed_log_control.SetGallerySeedLog( gallery_seed_log )
self._UpdateImportStatus()
def _PendURLs( self, urls, filterable_tags = None, additional_service_keys_to_tags = None ):
if filterable_tags is None:
filterable_tags = set()
if additional_service_keys_to_tags is None:
additional_service_keys_to_tags = ClientTags.ServiceKeysToTags()
urls = [ url for url in urls if url.startswith( 'http' ) ]
self._urls_import.PendURLs( urls, filterable_tags = filterable_tags, additional_service_keys_to_tags = additional_service_keys_to_tags )
self._UpdateImportStatus()
def _UpdateImportStatus( self ):
paused = self._urls_import.IsPaused()
if paused:
ClientGUIFunctions.SetBitmapButtonBitmap( self._pause_button, CC.global_pixmaps().file_play )
else:
ClientGUIFunctions.SetBitmapButtonBitmap( self._pause_button, CC.global_pixmaps().file_pause )
( file_network_job, gallery_network_job ) = self._urls_import.GetNetworkJobs()
self._file_download_control.SetNetworkJob( file_network_job )
self._gallery_download_control.SetNetworkJob( gallery_network_job )
def CheckAbleToClose( self ):
if self._urls_import.CurrentlyWorking():
raise HydrusExceptions.VetoException( 'This page is still importing.' )
def Pause( self ):
self._urls_import.PausePlay()
self._UpdateImportStatus()
def PendURL( self, url, filterable_tags = None, additional_service_keys_to_tags = None ):
if filterable_tags is None:
filterable_tags = set()
if additional_service_keys_to_tags is None:
additional_service_keys_to_tags = ClientTags.ServiceKeysToTags()
self._PendURLs( ( url, ), filterable_tags = filterable_tags, additional_service_keys_to_tags = additional_service_keys_to_tags )
def SetSearchFocus( self ):
ClientGUIFunctions.SetFocusLater( self._url_input )
def Start( self ):
self._urls_import.Start( self._page_key )
management_panel_types_to_classes[ MANAGEMENT_TYPE_IMPORT_URLS ] = ManagementPanelImporterURLs
class ManagementPanelPetitions( ManagementPanel ):
TAG_DISPLAY_TYPE = ClientTags.TAG_DISPLAY_STORAGE
def __init__( self, parent, page, controller, management_controller ):
self._petition_service_key = management_controller.GetKey( 'petition_service' )
ManagementPanel.__init__( self, parent, page, controller, management_controller )
self._service = self._controller.services_manager.GetService( self._petition_service_key )
self._can_ban = self._service.HasPermission( HC.CONTENT_TYPE_ACCOUNTS, HC.PERMISSION_ACTION_MODERATE )
service_type = self._service.GetServiceType()
self._num_petition_info = None
self._current_petition = None
self._last_petition_type_fetched = None
#
self._petitions_info_panel = ClientGUICommon.StaticBox( self, 'petitions info' )
self._refresh_num_petitions_button = ClientGUICommon.BetterButton( self._petitions_info_panel, 'refresh counts', self._FetchNumPetitions )
self._petition_types_to_controls = {}
content_type_hboxes = []
petition_types = []
if service_type == HC.FILE_REPOSITORY:
petition_types.append( ( HC.CONTENT_TYPE_FILES, HC.CONTENT_STATUS_PETITIONED ) )
elif service_type == HC.TAG_REPOSITORY:
petition_types.append( ( HC.CONTENT_TYPE_MAPPINGS, HC.CONTENT_STATUS_PETITIONED ) )
petition_types.append( ( HC.CONTENT_TYPE_TAG_PARENTS, HC.CONTENT_STATUS_PENDING ) )
petition_types.append( ( HC.CONTENT_TYPE_TAG_PARENTS, HC.CONTENT_STATUS_PETITIONED ) )
petition_types.append( ( HC.CONTENT_TYPE_TAG_SIBLINGS, HC.CONTENT_STATUS_PENDING ) )
petition_types.append( ( HC.CONTENT_TYPE_TAG_SIBLINGS, HC.CONTENT_STATUS_PETITIONED ) )
for ( content_type, status ) in petition_types:
func = HydrusData.Call( self._FetchPetition, content_type, status )
st = ClientGUICommon.BetterStaticText( self._petitions_info_panel )
button = ClientGUICommon.BetterButton( self._petitions_info_panel, 'fetch ' + HC.content_status_string_lookup[ status ] + ' ' + HC.content_type_string_lookup[ content_type ] + ' petition', func )
button.setEnabled( False )
self._petition_types_to_controls[ ( content_type, status ) ] = ( st, button )
hbox = QP.HBoxLayout()
QP.AddToLayout( hbox, st, CC.FLAGS_CENTER_PERPENDICULAR_EXPAND_DEPTH )
QP.AddToLayout( hbox, button, CC.FLAGS_CENTER_PERPENDICULAR )
content_type_hboxes.append( hbox )
#
self._petition_panel = ClientGUICommon.StaticBox( self, 'petition' )
self._num_files_to_show = ClientGUICommon.NoneableSpinCtrl( self._petition_panel, message = 'number of files to show', min = 1 )
self._num_files_to_show.SetValue( 256 )
self._action_text = ClientGUICommon.BetterStaticText( self._petition_panel, label = '' )
self._reason_text = QW.QTextEdit( self._petition_panel )
self._reason_text.setReadOnly( True )
self._reason_text.setMinimumHeight( 80 )
check_all = ClientGUICommon.BetterButton( self._petition_panel, 'check all', self._CheckAll )
flip_selected = ClientGUICommon.BetterButton( self._petition_panel, 'flip selected', self._FlipSelected )
check_none = ClientGUICommon.BetterButton( self._petition_panel, 'check none', self._CheckNone )
self._sort_by_left = ClientGUICommon.BetterButton( self._petition_panel, 'sort by left', self._SortBy, 'left' )
self._sort_by_right = ClientGUICommon.BetterButton( self._petition_panel, 'sort by right', self._SortBy, 'right' )
self._sort_by_left.setEnabled( False )
self._sort_by_right.setEnabled( False )
self._contents = QP.CheckListBox( self._petition_panel )
self._contents.setSelectionMode( QW.QAbstractItemView.ExtendedSelection )
self._contents.itemDoubleClicked.connect( self.EventContentDoubleClick )
( min_width, min_height ) = ClientGUIFunctions.ConvertTextToPixels( self._contents, ( 16, 20 ) )
self._contents.setMinimumHeight( min_height )
self._process = QW.QPushButton( 'process', self._petition_panel )
self._process.clicked.connect( self.EventProcess )
self._process.setObjectName( 'HydrusAccept' )
self._copy_account_key_button = ClientGUICommon.BetterButton( self._petition_panel, 'copy petitioner account id', self._CopyAccountKey )
self._modify_petitioner = QW.QPushButton( 'modify petitioner', self._petition_panel )
self._modify_petitioner.clicked.connect( self.EventModifyPetitioner )
self._modify_petitioner.setEnabled( False )
if not self._can_ban: self._modify_petitioner.setVisible( False )
#
self._petitions_info_panel.Add( self._refresh_num_petitions_button, CC.FLAGS_EXPAND_PERPENDICULAR )
for hbox in content_type_hboxes:
self._petitions_info_panel.Add( hbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
check_hbox = QP.HBoxLayout()
QP.AddToLayout( check_hbox, check_all, CC.FLAGS_CENTER_PERPENDICULAR_EXPAND_DEPTH )
QP.AddToLayout( check_hbox, flip_selected, CC.FLAGS_CENTER_PERPENDICULAR_EXPAND_DEPTH )
QP.AddToLayout( check_hbox, check_none, CC.FLAGS_CENTER_PERPENDICULAR_EXPAND_DEPTH )
sort_hbox = QP.HBoxLayout()
QP.AddToLayout( sort_hbox, self._sort_by_left, CC.FLAGS_CENTER_PERPENDICULAR_EXPAND_DEPTH )
QP.AddToLayout( sort_hbox, self._sort_by_right, CC.FLAGS_CENTER_PERPENDICULAR_EXPAND_DEPTH )
self._petition_panel.Add( ClientGUICommon.BetterStaticText( self._petition_panel, label = 'Double-click a petition to see its files, if it has them.' ), CC.FLAGS_EXPAND_PERPENDICULAR )
self._petition_panel.Add( self._num_files_to_show, CC.FLAGS_EXPAND_PERPENDICULAR )
self._petition_panel.Add( self._action_text, CC.FLAGS_EXPAND_PERPENDICULAR )
self._petition_panel.Add( self._reason_text, CC.FLAGS_EXPAND_PERPENDICULAR )
self._petition_panel.Add( check_hbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
self._petition_panel.Add( sort_hbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
self._petition_panel.Add( self._contents, CC.FLAGS_EXPAND_BOTH_WAYS )
self._petition_panel.Add( self._process, CC.FLAGS_EXPAND_PERPENDICULAR )
self._petition_panel.Add( self._copy_account_key_button, CC.FLAGS_EXPAND_PERPENDICULAR )
self._petition_panel.Add( self._modify_petitioner, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, self._media_sort, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._media_collect, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._petitions_info_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._petition_panel, CC.FLAGS_EXPAND_BOTH_WAYS )
self._MakeCurrentSelectionTagsBox( vbox )
self.widget().setLayout( vbox )
| |
#!/usr/bin/env python
# encoding: utf-8
#
# test_map.py
#
# @Author: <NAME> <andrews>
# @Date: 2017-07-02 13:08:00
# @Last modified by: andrews
# @Last modified time: 2018-03-01 11:03:79
from copy import deepcopy
import numpy as np
from astropy import units as u
import matplotlib
import pytest
from marvin import config
from marvin.core.exceptions import MarvinError
from marvin.tools.maps import Maps
from marvin.tools.quantities import Map, EnhancedMap
from marvin.tests import marvin_test_if
from marvin.utils.datamodel.dap import datamodel
from marvin.utils.datamodel.dap.plotting import get_default_plot_params
from marvin.utils.general.maskbit import Maskbit
value1 = np.array([[16.35, 0.8],
[0, -10.]])
value2 = np.array([[591., 1e-8],
[4., 10]])
value_prod12 = np.array([[9.66285000e+03, 8e-9],
[0, -100]])
ivar1 = np.array([[4, 1],
[6.97789734e+36, 1e8]])
ivar2 = np.array([[10, 1e-8],
[5.76744385e+36, 0]])
ivar_sum12 = np.array([[2.85714286e+00, 9.99999990e-09],
[3.15759543e+36, 0]])
ivar_prod12 = np.array([[1.10616234e-05, 1.56250000e-08],
[0, 0.]])
ivar_pow_2 = np.array([[5.23472002e-08, 9.53674316e-01],
[0, 25]])
ivar_pow_05 = np.array([[3.66072168e-03, 7.81250000e+00],
[0, 0]])
ivar_pow_0 = np.array([[0, 0],
[0, 0]])
ivar_pow_m1 = np.array([[4, 1.],
[0, 1e+08]])
ivar_pow_m2 = np.array([[2.67322500e+02, 1.6e-01],
[0, 2.5e+09]])
ivar_pow_m05 = np.array([[0.97859327, 5],
[0, 0]])
u_flux = u.erg / u.cm**2 / u.s / u.def_unit('spaxel')
u_flux2 = u_flux * u_flux
def _get_maps_kwargs(galaxy, data_origin):
if data_origin == 'file':
maps_kwargs = dict(filename=galaxy.mapspath)
else:
maps_kwargs = dict(plateifu=galaxy.plateifu, release=galaxy.release,
bintype=galaxy.bintype, template_kin=galaxy.template,
mode='local' if data_origin == 'db' else 'remote')
return maps_kwargs
@pytest.fixture(scope='function', params=[('emline_gflux', 'ha_6564'),
('emline_gvel', 'oiii_5008'),
('stellar_vel', None),
('stellar_sigma', None)])
def map_(request, galaxy, data_origin):
maps = Maps(**_get_maps_kwargs(galaxy, data_origin))
map_ = maps.getMap(property_name=request.param[0], channel=request.param[1])
map_.data_origin = data_origin
return map_
class TestMap(object):
def test_map(self, map_, galaxy):
assert map_.getMaps().release == galaxy.release
assert tuple(map_.shape) == tuple(galaxy.shape)
assert map_.value.shape == tuple(galaxy.shape)
assert map_.ivar.shape == tuple(galaxy.shape)
assert map_.mask.shape == tuple(galaxy.shape)
assert (map_.masked.data == map_.value).all()
assert (map_.masked.mask == map_.mask.astype(bool)).all()
assert map_.snr == pytest.approx(np.abs(map_.value * np.sqrt(map_.ivar)))
assert datamodel[map_.getMaps()._dapver][map_.datamodel.full()].unit == map_.unit
def test_plot(self, map_):
fig, ax = map_.plot()
assert isinstance(fig, matplotlib.figure.Figure)
assert isinstance(ax, matplotlib.axes._subplots.Subplot)
assert 'Make single panel map or one panel of multi-panel map plot.' in map_.plot.__doc__
@marvin_test_if(mark='skip', map_={'data_origin': ['db']})
def test_save_and_restore(self, temp_scratch, map_):
fout = temp_scratch.join('test_map.mpf')
map_.save(str(fout))
assert fout.check() is True
map_restored = Map.restore(str(fout), delete=True)
assert tuple(map_.shape) == tuple(map_restored.shape)
@pytest.mark.parametrize('property_name, channel',
[('emline_gflux', 'ha_6564'),
('stellar_vel', None)])
def test_deepcopy(self, galaxy, property_name, channel):
maps = Maps(plateifu=galaxy.plateifu)
map1 = maps.getMap(property_name=property_name, channel=channel)
map2 = deepcopy(map1)
for attr in vars(map1):
if not attr.startswith('_'):
value = getattr(map1, attr)
value2 = getattr(map2, attr)
if isinstance(value, np.ndarray):
assert np.isclose(value, value2).all()
elif isinstance(value, np.ma.core.MaskedArray):
assert (np.isclose(value.data, value2.data).all() and
(value.mask == value2.mask).all())
elif isinstance(value, Maskbit) or isinstance(value[0], Maskbit):
if isinstance(value, Maskbit):
value = [value]
value2 = [value2]
for mb, mb2 in zip(value, value2):
for it in ['bits', 'description', 'labels', 'mask', 'name']:
assert getattr(mb, it) == getattr(mb2, it)
assert (mb.schema == mb2.schema).all().all()
elif isinstance(value, Maps):
pass
else:
assert value == value2, attr
def test_getMap_invalid_property(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
with pytest.raises(ValueError) as ee:
maps.getMap(property_name='mythical_property')
assert 'Your input value is too ambiguous.' in str(ee.value)
def test_getMap_invalid_channel(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
with pytest.raises(ValueError) as ee:
maps.getMap(property_name='emline_gflux', channel='mythical_channel')
assert 'Your input value is too ambiguous.' in str(ee.value)
@marvin_test_if(mark='include', maps={'plateifu': '8485-1901',
'release': 'MPL-6',
'mode': 'local',
'data_origin': 'file'})
def test_quatities_reorder(self, maps):
"""Asserts the unit survives a quantity reorder (issue #374)."""
ha = maps['emline_gflux_ha']
assert ha is not None
assert ha.unit is not None
reordered_ha = np.moveaxis(ha, 0, -1)
assert reordered_ha.unit is not None
class TestMapArith(object):
def test_add_constant(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
ha = maps['emline_gflux_ha_6564']
ha10 = ha + 10.
assert ha10.value == pytest.approx(ha.value + 10.)
assert ha10.ivar == pytest.approx(ha.ivar)
assert ha10.mask == pytest.approx(ha.mask)
def test_subtract_constant(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
ha = maps['emline_gflux_ha_6564']
ha10 = ha - 10.
assert ha10.value == pytest.approx(ha.value - 10.)
assert ha10.ivar == pytest.approx(ha.ivar)
assert ha10.mask == pytest.approx(ha.mask)
def test_multiply_constant(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
ha = maps['emline_gflux_ha_6564']
ha10 = ha * 10.
assert ha10.value == pytest.approx(ha.value * 10.)
assert ha10.ivar == pytest.approx(ha.ivar / 10.**2)
assert ha10.mask == pytest.approx(ha.mask)
def test_divide_constant(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
ha = maps['emline_gflux_ha_6564']
ha10 = ha / 10.
assert ha10.value == pytest.approx(ha.value / 10.)
assert ha10.ivar == pytest.approx(ha.ivar * 10.**2)
assert ha10.mask == pytest.approx(ha.mask)
@pytest.mark.parametrize('ivar1, ivar2, expected',
[(ivar1, ivar2, ivar_sum12)])
def test_add_ivar(self, ivar1, ivar2, expected):
assert Map._add_ivar(ivar1, ivar2) == pytest.approx(expected)
@pytest.mark.parametrize('ivar1, ivar2, value1, value2, value_prod12, expected',
[(ivar1, ivar2, value1, value2, value_prod12, ivar_prod12)])
def test_mul_ivar(self, ivar1, ivar2, value1, value2, value_prod12, expected):
ivar = Map._mul_ivar(ivar1, ivar2, value1, value2, value_prod12)
ivar[np.isnan(ivar)] = 0
ivar[np.isinf(ivar)] = 0
assert ivar == pytest.approx(expected)
@pytest.mark.parametrize('power, expected',
[(2, ivar_pow_2),
(0.5, ivar_pow_05),
(0, ivar_pow_0),
(-1, ivar_pow_m1),
(-2, ivar_pow_m2),
(-0.5, ivar_pow_m05)])
@pytest.mark.parametrize('ivar, value,',
[(ivar1, value1)])
def test_pow_ivar(self, ivar, value, power, expected):
ivar = Map._pow_ivar(ivar, value, power)
ivar[np.isnan(ivar)] = 0
ivar[np.isinf(ivar)] = 0
assert ivar == pytest.approx(expected)
@pytest.mark.parametrize('power', [2, 0.5, 0, -1, -2, -0.5])
def test_pow_ivar_none(self, power):
assert Map._pow_ivar(None, np.arange(4), power) == pytest.approx(np.zeros(4))
@pytest.mark.parametrize('unit1, unit2, op, expected',
[(u_flux, u_flux, '+', u_flux),
(u_flux, u_flux, '-', u_flux),
(u_flux, u_flux, '*', u_flux2),
(u_flux, u_flux, '/', u.dimensionless_unscaled),
(u.km, u.s, '*', u.km * u.s),
(u.km, u.s, '/', u.km / u.s)])
def test_unit_propagation(self, unit1, unit2, op, expected):
assert Map._unit_propagation(unit1, unit2, op) == expected
@pytest.mark.parametrize('unit1, unit2, op',
[(u_flux, u.km, '+'),
(u_flux, u.km, '-')])
def test_unit_propagation_mismatch(self, unit1, unit2, op):
with pytest.warns(UserWarning):
assert Map._unit_propagation(unit1, unit2, op) is None
@pytest.mark.parametrize('property1, channel1, property2, channel2',
[('emline_gflux', 'ha_6564', 'emline_gflux', 'nii_6585'),
('emline_gvel', 'ha_6564', 'stellar_vel', None)])
def test_add_maps(self, galaxy, property1, channel1, property2, channel2):
maps = Maps(plateifu=galaxy.plateifu)
map1 = maps.getMap(property_name=property1, channel=channel1)
map2 = maps.getMap(property_name=property2, channel=channel2)
map12 = map1 + map2
assert map12.value == pytest.approx(map1.value + map2.value)
assert map12.ivar == pytest.approx(map1._add_ivar(map1.ivar, map2.ivar))
assert map12.mask == pytest.approx(map1.mask | map2.mask)
@pytest.mark.parametrize('property1, channel1, property2, channel2',
[('emline_gflux', 'ha_6564', 'emline_gflux', 'nii_6585'),
('emline_gvel', 'ha_6564', 'stellar_vel', None)])
def test_subtract_maps(self, galaxy, property1, channel1, property2, channel2):
maps = Maps(plateifu=galaxy.plateifu)
map1 = maps.getMap(property_name=property1, channel=channel1)
map2 = maps.getMap(property_name=property2, channel=channel2)
map12 = map1 - map2
assert map12.value == pytest.approx(map1.value - map2.value)
assert map12.ivar == pytest.approx(map1._add_ivar(map1.ivar, map2.ivar))
assert map12.mask == pytest.approx(map1.mask | map2.mask)
@pytest.mark.parametrize('property1, channel1, property2, channel2',
[('emline_gflux', 'ha_6564', 'emline_gflux', 'nii_6585'),
('emline_gvel', 'ha_6564', 'stellar_vel', None)])
def test_multiply_maps(self, galaxy, property1, channel1, property2, channel2):
maps = Maps(plateifu=galaxy.plateifu)
map1 = maps.getMap(property_name=property1, channel=channel1)
map2 = maps.getMap(property_name=property2, channel=channel2)
map12 = map1 * map2
ivar = map1._mul_ivar(map1.ivar, map2.ivar, map1.value, map2.value, map12.value)
ivar[np.isnan(ivar)] = 0
ivar[np.isinf(ivar)] = 0
assert map12.value == pytest.approx(map1.value * map2.value)
assert map12.ivar == pytest.approx(ivar)
assert map12.mask == pytest.approx(map1.mask | map2.mask)
@pytest.mark.parametrize('property1, channel1, property2, channel2',
[('emline_gflux', 'ha_6564', 'emline_gflux', 'nii_6585'),
('emline_gvel', 'ha_6564', 'stellar_vel', None)])
def test_divide_maps(self, galaxy, property1, channel1, property2, channel2):
maps = Maps(plateifu=galaxy.plateifu)
map1 = maps.getMap(property_name=property1, channel=channel1)
map2 = maps.getMap(property_name=property2, channel=channel2)
map12 = map1 / map2
ivar = map1._mul_ivar(map1.ivar, map2.ivar, map1.value, map2.value, map12.value)
ivar[np.isnan(ivar)] = 0
ivar[np.isinf(ivar)] = 0
mask = map1.mask | map2.mask
bad = np.isnan(map12.value) | np.isinf(map12.value)
mask[bad] = mask[bad] | map12.pixmask.labels_to_value('DONOTUSE')
with np.errstate(divide='ignore', invalid='ignore'):
assert map12.value == pytest.approx(map1.value / map2.value, nan_ok=True)
assert map12.ivar == pytest.approx(ivar)
assert map12.mask == pytest.approx(mask)
@pytest.mark.runslow
@pytest.mark.parametrize('power', [2, 0.5, 0, -1, -2, -0.5])
@pytest.mark.parametrize('property_name, channel',
[('emline_gflux', 'ha_6564'),
('stellar_vel', None)])
def test_pow(self, galaxy, property_name, channel, power):
maps = Maps(plateifu=galaxy.plateifu)
map_orig = maps.getMap(property_name=property_name, channel=channel)
map_new = map_orig**power
sig_orig = np.sqrt(1. / map_orig.ivar)
sig_new = map_new.value * power * sig_orig * map_orig.value
ivar_new = 1 / sig_new**2.
ivar_new[np.isnan(ivar_new)] = 0
ivar_new[np.isinf(ivar_new)] = 0
assert map_new.value == pytest.approx(map_orig.value**power, nan_ok=True)
assert map_new.ivar == pytest.approx(ivar_new)
assert (map_new.mask == map_orig.mask).all()
@marvin_test_if(mark='skip', galaxy=dict(release=['MPL-4']))
def test_stellar_sigma_correction(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
stsig = maps['stellar_sigma']
stsigcorr = maps['stellar_sigmacorr']
expected = (stsig**2 - stsigcorr**2)**0.5
actual = stsig.inst_sigma_correction()
assert actual.value == pytest.approx(expected.value, nan_ok=True)
assert actual.ivar == pytest.approx(expected.ivar)
assert (actual.mask == expected.mask).all()
@marvin_test_if(mark='include', galaxy=dict(release=['MPL-4']))
def test_stellar_sigma_correction_MPL4(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
stsig = maps['stellar_sigma']
with pytest.raises(MarvinError) as ee:
stsig.inst_sigma_correction()
assert 'Instrumental broadening correction not implemented for MPL-4.' in str(ee.value)
def test_stellar_sigma_correction_invalid_property(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
ha = maps['emline_gflux_ha_6564']
with pytest.raises(MarvinError) as ee:
ha.inst_sigma_correction()
assert ('Cannot correct {0}_{1} '.format(ha.datamodel.name, ha.datamodel.channel) +
'for instrumental broadening.') in str(ee.value)
def test_emline_sigma_correction(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
hasig = maps['emline_gsigma_ha_6564']
emsigcorr = maps['emline_instsigma_ha_6564']
expected = (hasig**2 - emsigcorr**2)**0.5
actual = hasig.inst_sigma_correction()
assert actual.value == pytest.approx(expected.value, nan_ok=True)
assert actual.ivar == pytest.approx(expected.ivar)
assert (actual.mask == expected.mask).all()
class TestMaskbit(object):
def test_masked(self, maps_release_only):
__, dapver = config.lookUpVersions(maps_release_only.release)
params = get_default_plot_params(dapver)
ha = maps_release_only['emline_gflux_ha_6564']
expected = ha.pixmask.get_mask(params['default']['bitmasks'], dtype=bool)
assert ha.masked.data == pytest.approx(ha.value)
assert (ha.masked.mask == expected).all()
@marvin_test_if(mark='include', maps_release_only=dict(release=['MPL-4']))
def test_values_to_bits_mpl4(self, maps_release_only):
ha = maps_release_only['emline_gflux_ha_6564']
assert ha.pixmask.values_to_bits(1) == [0]
@marvin_test_if(mark='skip', maps_release_only=dict(release=['MPL-4']))
def test_values_to_bits(self, maps_release_only):
ha = maps_release_only['emline_gflux_ha_6564']
assert ha.pixmask.values_to_bits(3) == [0, 1]
@marvin_test_if(mark='include', maps_release_only=dict(release=['MPL-4']))
def test_values_to_labels_mpl4(self, maps_release_only):
ha = maps_release_only['emline_gflux_ha_6564']
assert ha.pixmask.values_to_labels(1) == ['DONOTUSE']
@marvin_test_if(mark='skip', maps_release_only=dict(release=['MPL-4']))
def test_values_to_labels(self, maps_release_only):
ha = maps_release_only['emline_gflux_ha_6564']
assert ha.pixmask.values_to_labels(3) == ['NOCOV', 'LOWCOV']
@marvin_test_if(mark='include', maps_release_only=dict(release=['MPL-4']))
def test_labels_to_value_mpl4(self, maps_release_only):
ha = maps_release_only['emline_gflux_ha_6564']
assert ha.pixmask.labels_to_value('DONOTUSE') == 1
@marvin_test_if(mark='skip', | |
<reponame>Sketos/PyAutoArray<gh_stars>0
import autoarray as aa
import autoarray.plot as aplt
from os import path
import matplotlib.pyplot as plt
import os
import pytest
import numpy as np
import shutil
directory = path.dirname(path.realpath(__file__))
@pytest.fixture(name="plot_path")
def make_plotter_setup():
return "{}/..//test_files/plot/".format(os.path.dirname(os.path.realpath(__file__)))
@pytest.fixture(autouse=True)
def set_config_path():
aa.conf.instance = aa.conf.Config(
path.join(directory, "../test_files/plot"), path.join(directory, "output")
)
class TestAbstractPlotterAttributes:
def test__units__from_config_or_via_manual_input(self):
plotter = aplt.Plotter()
assert plotter.units.use_scaled == True
assert plotter.units.in_kpc == False
assert plotter.units.conversion_factor == None
plotter = aplt.Plotter(units=aplt.Units(in_kpc=True, conversion_factor=2.0))
assert plotter.units.use_scaled == True
assert plotter.units.in_kpc == True
assert plotter.units.conversion_factor == 2.0
sub_plotter = aplt.SubPlotter()
assert sub_plotter.units.use_scaled == True
assert sub_plotter.units.in_kpc == False
assert sub_plotter.units.conversion_factor == None
sub_plotter = aplt.SubPlotter(
units=aplt.Units(use_scaled=False, conversion_factor=2.0)
)
assert sub_plotter.units.use_scaled == False
assert sub_plotter.units.in_kpc == False
assert sub_plotter.units.conversion_factor == 2.0
def test__figure__from_config_or_via_manual_input(self):
plotter = aplt.Plotter()
assert plotter.figure.figsize == (7, 7)
assert plotter.figure.aspect == "auto"
plotter = aplt.Plotter(figure=aplt.Figure(aspect="auto"))
assert plotter.figure.figsize == (7, 7)
assert plotter.figure.aspect == "auto"
sub_plotter = aplt.SubPlotter()
assert sub_plotter.figure.figsize == None
assert sub_plotter.figure.aspect == "square"
sub_plotter = aplt.SubPlotter(figure=aplt.Figure.sub(figsize=(6, 6)))
assert sub_plotter.figure.figsize == (6, 6)
assert sub_plotter.figure.aspect == "square"
def test__colormap__from_config_or_via_manual_input(self):
plotter = aplt.Plotter()
assert plotter.cmap.cmap == "jet"
assert plotter.cmap.norm == "linear"
assert plotter.cmap.norm_min == None
assert plotter.cmap.norm_max == None
assert plotter.cmap.linthresh == 1.0
assert plotter.cmap.linscale == 2.0
plotter = aplt.Plotter(
cmap=aplt.ColorMap(
cmap="cold",
norm="log",
norm_min=0.1,
norm_max=1.0,
linthresh=1.5,
linscale=2.0,
)
)
assert plotter.cmap.cmap == "cold"
assert plotter.cmap.norm == "log"
assert plotter.cmap.norm_min == 0.1
assert plotter.cmap.norm_max == 1.0
assert plotter.cmap.linthresh == 1.5
assert plotter.cmap.linscale == 2.0
sub_plotter = aplt.SubPlotter()
assert sub_plotter.cmap.cmap == "jet"
assert sub_plotter.cmap.norm == "linear"
assert sub_plotter.cmap.norm_min == None
assert sub_plotter.cmap.norm_max == None
assert sub_plotter.cmap.linthresh == 1.0
assert sub_plotter.cmap.linscale == 2.0
sub_plotter = aplt.SubPlotter(
cmap=aplt.ColorMap.sub(
cmap="cold", norm="log", norm_min=0.1, norm_max=1.0, linscale=2.0
)
)
assert sub_plotter.cmap.cmap == "cold"
assert sub_plotter.cmap.norm == "log"
assert sub_plotter.cmap.norm_min == 0.1
assert sub_plotter.cmap.norm_max == 1.0
assert sub_plotter.cmap.linthresh == 1.0
assert sub_plotter.cmap.linscale == 2.0
def test__colorbar__from_config_or_via_manual_input(self):
plotter = aplt.Plotter()
assert plotter.cb.ticksize == 1
assert plotter.cb.fraction == 3.0
assert plotter.cb.pad == 4.0
assert plotter.cb.tick_values == None
assert plotter.cb.tick_labels == None
plotter = aplt.Plotter(
cb=aplt.ColorBar(
ticksize=20,
fraction=0.001,
pad=10.0,
tick_values=(1.0, 2.0),
tick_labels=(3.0, 4.0),
)
)
assert plotter.cb.ticksize == 20
assert plotter.cb.fraction == 0.001
assert plotter.cb.pad == 10.0
assert plotter.cb.tick_values == (1.0, 2.0)
assert plotter.cb.tick_labels == (3.0, 4.0)
sub_plotter = aplt.SubPlotter()
assert sub_plotter.cb.ticksize == 1
assert sub_plotter.cb.fraction == 3.0
assert sub_plotter.cb.pad == 4.0
sub_plotter = aplt.SubPlotter(cb=aplt.ColorBar.sub(fraction=0.001, pad=10.0))
assert sub_plotter.cb.ticksize == 1
assert sub_plotter.cb.fraction == 0.001
assert sub_plotter.cb.pad == 10.0
def test__ticks__from_config_or_via_manual_input(self):
plotter = aplt.Plotter()
assert plotter.ticks.ysize == 14
assert plotter.ticks.xsize == 15
assert plotter.ticks.y_manual == None
assert plotter.ticks.x_manual == None
plotter = aplt.Plotter(
ticks=aplt.Ticks(
ysize=24, xsize=25, y_manual=[1.0, 2.0], x_manual=[3.0, 4.0]
)
)
assert plotter.ticks.ysize == 24
assert plotter.ticks.xsize == 25
assert plotter.ticks.y_manual == [1.0, 2.0]
assert plotter.ticks.x_manual == [3.0, 4.0]
sub_plotter = aplt.SubPlotter()
assert sub_plotter.ticks.ysize == 24
assert sub_plotter.ticks.xsize == 25
assert sub_plotter.ticks.y_manual == None
assert sub_plotter.ticks.x_manual == None
sub_plotter = aplt.SubPlotter(
ticks=aplt.Ticks.sub(xsize=25, y_manual=[1.0, 2.0], x_manual=[3.0, 4.0])
)
assert sub_plotter.ticks.ysize == 24
assert sub_plotter.ticks.xsize == 25
assert sub_plotter.ticks.y_manual == [1.0, 2.0]
assert sub_plotter.ticks.x_manual == [3.0, 4.0]
def test__labels__from_config_or_via_manual_input(self):
plotter = aplt.Plotter()
assert plotter.labels.title == None
assert plotter.labels._yunits == None
assert plotter.labels._xunits == None
assert plotter.labels.titlesize == 11
assert plotter.labels.ysize == 12
assert plotter.labels.xsize == 13
plotter = aplt.Plotter(
labels=aplt.Labels(
title="OMG", yunits="hi", xunits="hi2", titlesize=1, ysize=2, xsize=3
)
)
assert plotter.labels.title == "OMG"
assert plotter.labels._yunits == "hi"
assert plotter.labels._xunits == "hi2"
assert plotter.labels.titlesize == 1
assert plotter.labels.ysize == 2
assert plotter.labels.xsize == 3
sub_plotter = aplt.SubPlotter()
assert sub_plotter.labels.title == None
assert sub_plotter.labels._yunits == None
assert sub_plotter.labels._xunits == None
assert sub_plotter.labels.titlesize == 15
assert sub_plotter.labels.ysize == 22
assert sub_plotter.labels.xsize == 23
sub_plotter = aplt.SubPlotter(
labels=aplt.Labels.sub(
title="OMG", yunits="hi", xunits="hi2", ysize=2, xsize=3
)
)
assert sub_plotter.labels.title == "OMG"
assert sub_plotter.labels._yunits == "hi"
assert sub_plotter.labels._xunits == "hi2"
assert sub_plotter.labels.titlesize == 15
assert sub_plotter.labels.ysize == 2
assert sub_plotter.labels.xsize == 3
def test__legend__from_config_or_via_manual_input(self):
plotter = aplt.Plotter()
assert plotter.legend.include == True
assert plotter.legend.fontsize == 12
plotter = aplt.Plotter(legend=aplt.Legend(include=False, fontsize=11))
assert plotter.legend.include == False
assert plotter.legend.fontsize == 11
sub_plotter = aplt.SubPlotter()
assert sub_plotter.legend.include == False
assert sub_plotter.legend.fontsize == 13
sub_plotter = aplt.SubPlotter(legend=aplt.Legend.sub(include=True))
assert sub_plotter.legend.include == True
assert sub_plotter.legend.fontsize == 13
def test__origin_scatterer__from_config_or_via_manual_input(self):
plotter = aplt.Plotter()
assert plotter.origin_scatterer.size == 80
assert plotter.origin_scatterer.marker == "x"
assert plotter.origin_scatterer.colors == ["k"]
plotter = aplt.Plotter(
origin_scatterer=aplt.OriginScatterer(size=1, marker=".", colors="k")
)
assert plotter.origin_scatterer.size == 1
assert plotter.origin_scatterer.marker == "."
assert plotter.origin_scatterer.colors == ["k"]
sub_plotter = aplt.SubPlotter()
assert sub_plotter.origin_scatterer.size == 81
assert sub_plotter.origin_scatterer.marker == "."
assert sub_plotter.origin_scatterer.colors == ["r"]
sub_plotter = aplt.SubPlotter(
origin_scatterer=aplt.OriginScatterer.sub(marker="o", colors=["r", "b"])
)
assert sub_plotter.origin_scatterer.size == 81
assert sub_plotter.origin_scatterer.marker == "o"
assert sub_plotter.origin_scatterer.colors == ["r", "b"]
def test__mask_scatterer__from_config_or_via_manual_input(self):
plotter = aplt.Plotter()
assert plotter.mask_scatterer.size == 12
assert plotter.mask_scatterer.marker == "."
assert plotter.mask_scatterer.colors == ["g"]
plotter = aplt.Plotter(
mask_scatterer=aplt.MaskScatterer(size=1, marker="x", colors="k")
)
assert plotter.mask_scatterer.size == 1
assert plotter.mask_scatterer.marker == "x"
assert plotter.mask_scatterer.colors == ["k"]
sub_plotter = aplt.SubPlotter()
assert sub_plotter.mask_scatterer.size == 8
assert sub_plotter.mask_scatterer.marker == "."
assert sub_plotter.mask_scatterer.colors == ["w"]
sub_plotter = aplt.SubPlotter(
mask_scatterer=aplt.MaskScatterer.sub(marker="o", colors=["r", "b"])
)
assert sub_plotter.mask_scatterer.size == 8
assert sub_plotter.mask_scatterer.marker == "o"
assert sub_plotter.mask_scatterer.colors == ["r", "b"]
def test__border_scatterer__from_config_or_via_manual_input(self):
plotter = aplt.Plotter()
assert plotter.border_scatterer.size == 13
assert plotter.border_scatterer.marker == "+"
assert plotter.border_scatterer.colors == ["c"]
plotter = aplt.Plotter(
border_scatterer=aplt.BorderScatterer(size=1, marker="x", colors="k")
)
assert plotter.border_scatterer.size == 1
assert plotter.border_scatterer.marker == "x"
assert plotter.border_scatterer.colors == ["k"]
sub_plotter = aplt.SubPlotter()
assert sub_plotter.border_scatterer.size == 7
assert sub_plotter.border_scatterer.marker == "."
assert sub_plotter.border_scatterer.colors == ["k"]
sub_plotter = aplt.SubPlotter(
border_scatterer=aplt.BorderScatterer.sub(marker="o", colors=["r", "b"])
)
assert sub_plotter.border_scatterer.size == 7
assert sub_plotter.border_scatterer.marker == "o"
assert sub_plotter.border_scatterer.colors == ["r", "b"]
def test__grid_scatterer__from_config_or_via_manual_input(self):
plotter = aplt.Plotter()
assert plotter.grid_scatterer.size == 14
assert plotter.grid_scatterer.marker == "x"
assert plotter.grid_scatterer.colors == ["y"]
plotter = aplt.Plotter(
grid_scatterer=aplt.GridScatterer(size=1, marker="x", colors="k")
)
assert plotter.grid_scatterer.size == 1
assert plotter.grid_scatterer.marker == "x"
assert plotter.grid_scatterer.colors == ["k"]
sub_plotter = aplt.SubPlotter()
assert sub_plotter.grid_scatterer.size == 6
assert sub_plotter.grid_scatterer.marker == "."
assert sub_plotter.grid_scatterer.colors == ["r"]
sub_plotter = aplt.SubPlotter(
grid_scatterer=aplt.GridScatterer.sub(marker="o", colors=["r", "b"])
)
assert sub_plotter.grid_scatterer.size == 6
assert sub_plotter.grid_scatterer.marker == "o"
assert sub_plotter.grid_scatterer.colors == ["r", "b"]
def test__positions_scatterer__from_config_or_via_manual_input(self):
plotter = aplt.Plotter()
assert plotter.positions_scatterer.size == 15
assert plotter.positions_scatterer.marker == "o"
assert plotter.positions_scatterer.colors == ["r", "g", "b"]
plotter = aplt.Plotter(
positions_scatterer=aplt.PositionsScatterer(size=1, marker="x", colors="k")
)
assert plotter.positions_scatterer.size == 1
assert plotter.positions_scatterer.marker == "x"
assert plotter.positions_scatterer.colors == ["k"]
sub_plotter = aplt.SubPlotter()
assert sub_plotter.positions_scatterer.size == 5
assert sub_plotter.positions_scatterer.marker == "."
assert sub_plotter.positions_scatterer.colors == ["c", "g", "b"]
sub_plotter = aplt.SubPlotter(
positions_scatterer=aplt.PositionsScatterer.sub(
marker="o", colors=["r", "b"]
)
)
assert sub_plotter.positions_scatterer.size == 5
assert sub_plotter.positions_scatterer.marker == "o"
assert sub_plotter.positions_scatterer.colors == ["r", "b"]
def test__index_scatterer__from_config_or_via_manual_input(self):
plotter = aplt.Plotter()
assert plotter.index_scatterer.size == 20
assert plotter.index_scatterer.marker == "."
assert plotter.index_scatterer.colors == ["r", "g", "b", "y", "k", "w"]
plotter = aplt.Plotter(
index_scatterer=aplt.IndexScatterer(size=1, marker="x", colors="k")
)
assert plotter.index_scatterer.size == 1
assert plotter.index_scatterer.marker == "x"
assert plotter.index_scatterer.colors == ["k"]
sub_plotter = aplt.SubPlotter()
assert sub_plotter.index_scatterer.size == 21
assert sub_plotter.index_scatterer.marker == "+"
assert sub_plotter.index_scatterer.colors == ["r", "g", "b", "y", "w", "k"]
sub_plotter = aplt.SubPlotter(
index_scatterer=aplt.IndexScatterer.sub(marker="o", colors="r")
)
assert sub_plotter.index_scatterer.size == 21
assert sub_plotter.index_scatterer.marker == "o"
assert sub_plotter.index_scatterer.colors == ["r"]
def test__pixelization_grid_scatterer__from_config_or_via_manual_input(self):
plotter = aplt.Plotter()
assert plotter.pixelization_grid_scatterer.size == 5
assert plotter.pixelization_grid_scatterer.marker == "."
assert plotter.pixelization_grid_scatterer.colors == ["r"]
plotter = aplt.Plotter(
pixelization_grid_scatterer=aplt.PixelizationGridScatterer(
size=1, marker="x", colors="k"
)
)
assert plotter.pixelization_grid_scatterer.size == 1
assert plotter.pixelization_grid_scatterer.marker == "x"
assert plotter.pixelization_grid_scatterer.colors == ["k"]
sub_plotter = aplt.SubPlotter()
assert sub_plotter.pixelization_grid_scatterer.size == 6
assert sub_plotter.pixelization_grid_scatterer.marker == "o"
assert sub_plotter.pixelization_grid_scatterer.colors == ["g"]
sub_plotter = aplt.SubPlotter(
pixelization_grid_scatterer=aplt.PixelizationGridScatterer.sub(
marker="o", colors="r"
)
)
assert sub_plotter.pixelization_grid_scatterer.size == 6
assert sub_plotter.pixelization_grid_scatterer.marker == "o"
assert sub_plotter.pixelization_grid_scatterer.colors == ["r"]
def test__liner__from_config_or_via_manual_input(self):
plotter = aplt.Plotter()
assert plotter.liner.width == 3
assert plotter.liner.style == "-"
assert plotter.liner.colors == ["k", "w"]
assert plotter.liner.pointsize == 2
plotter = aplt.Plotter(
liner=aplt.Liner(width=1, style=".", colors=["k", "b"], pointsize=3)
)
assert plotter.liner.width == 1
assert plotter.liner.style == "."
assert plotter.liner.colors == ["k", "b"]
assert plotter.liner.pointsize == 3
sub_plotter = aplt.SubPlotter()
assert sub_plotter.liner.width == 1
assert sub_plotter.liner.style == "-"
assert sub_plotter.liner.colors == ["k"]
assert sub_plotter.liner.pointsize == 20
sub_plotter = aplt.SubPlotter(
liner=aplt.Liner.sub(style=".", colors="r", pointsize=21)
)
assert sub_plotter.liner.width == 1
assert sub_plotter.liner.style == "."
assert sub_plotter.liner.colors == ["r"]
assert sub_plotter.liner.pointsize == 21
def test__voronoi_drawer__from_config_or_via_manual_input(self):
plotter = aplt.Plotter()
assert plotter.voronoi_drawer.edgewidth == 0.3
assert plotter.voronoi_drawer.edgecolor == "k"
assert plotter.voronoi_drawer.alpha == 0.7
plotter = aplt.Plotter(
voronoi_drawer=aplt.VoronoiDrawer(edgewidth=0.5, edgecolor="r", alpha=1.0)
)
assert plotter.voronoi_drawer.edgewidth == 0.5
assert plotter.voronoi_drawer.edgecolor == "r"
assert plotter.voronoi_drawer.alpha == 1.0
sub_plotter = | |
import ipaddress
import netaddr
import numpy as np
import pandas as pd
from networkml.featurizers.features import Features
MAC_BCAST = netaddr.EUI('FF-FF-FF-FF-FF-FF')
ETH_TYPE_ARP = 0x806
ETH_TYPE_IP = 0x800
ETH_TYPE_IPV6 = 0x86DD
ETH_TYPE_IPX = 0x8137
ETH_IP_TYPES = frozenset((ETH_TYPE_ARP, ETH_TYPE_IP, ETH_TYPE_IPV6))
WK_IP_PROTOS = ('tcp', 'udp', 'icmp', 'arp', 'icmpv6', 'gre', 'esp', 'ah')
WK_IP_PROTOS_INDEX = {WK_IP_PROTOS.index(i): i for i in WK_IP_PROTOS}
TCP_UDP_PROTOS = {
6: 'tcp',
17: 'udp',
}
class HostBase:
CALC_COL_NAMES = (
('frame.len', 'frame_len'),
('frame.time_delta_displayed', 'time_delta'))
CALC_COL_FUNCS = (
('max', lambda x: x.max()),
('min', lambda x: x.min()),
('count', lambda x: x.count()),
('total', lambda x: x.sum()),
('average', lambda x: x.mean()),
('median', lambda x: x.median()),
('variance', lambda x: x.var()),
('25q', lambda x: x.quantile(0.25)),
('75q', lambda x: x.quantile(0.75)))
# https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xml
# TODO: enumerate most common ports from survey (complete indicator matrix too expensive)
WK_PRIV_TCPUDP_PORTS = frozenset(
[22, 23, 25, 53, 67, 68, 69, 80, 88, 110, 123, 137, 138, 139, 143, 161, 443, 631])
WK_NONPRIV_TCPUDP_PORTS = frozenset(
[1900, 2375, 2376, 5222, 5349, 5353, 5354, 5349, 5357, 6653])
DROP_PROTOS = frozenset(
['frame', 'data', 'eth', 'ip', 'ipv6'])
def _mac(self, mac):
return netaddr.EUI(int(mac), dialect=netaddr.mac_unix_expanded)
def _is_unicast(self, mac):
mac_val = self._mac(mac)
if mac_val == MAC_BCAST or mac_val.packed[0] & 1:
return False
return True
def _numericintset(self, nums):
if nums is not None:
return frozenset(int(x) for x in nums if x is not None and pd.notna(x))
return frozenset()
def _get_ip(self, row, cols):
ipv = row['ip.version']
if not pd.isnull(ipv):
ipv = int(ipv)
if ipv == 4:
prefix = 'ip'
else:
prefix = 'ipv6'
for col in cols:
val = row['.'.join((prefix, col))]
if not pd.isnull(val):
return ipaddress.ip_address(int(val))
return None
def _get_src_ip(self, row):
return self._get_ip(row, ('src', 'src_host'))
def _get_dst_ip(self, row):
return self._get_ip(row, ('dst', 'dst_host'))
def _get_flags(self, mac_df, col_name, decode_map, suffix=None, field_name=None):
try:
col = mac_df[col_name]
unique_flags = self._numericintset(col.unique())
except KeyError:
unique_flags = [0]
decoded_flags = set()
for bit, decoded_flag in decode_map.items():
bitval = 2**bit
for flags in sorted(filter(lambda x: x >= bitval, unique_flags)):
if flags & bitval:
decoded_flags.add(decoded_flag)
if field_name is None:
field_name = col_name.replace('.', '_')
if suffix is not None:
return {'tshark_%s_%s_%s' % (
field_name, decoded_flag, suffix): int(decoded_flag in decoded_flags)
for decoded_flag in decode_map.values()}
return {'tshark_%s_%s' % (
field_name, decoded_flag): int(decoded_flag in decoded_flags)
for decoded_flag in decode_map.values()}
def _tshark_flags(self, suffix, mac_df):
mac_row_flags = {}
for func in (
lambda x, y: self._get_flags(x, 'ip.dsfield', {
0: 'ecn0', 1: 'ecn1', 2: 'dscp0', 3: 'dscp1', 4: 'dscp2', 5: 'dscp3', 6: 'dscp4', 7: 'dscp5'}, suffix=y),
lambda x, y: self._get_flags(x, 'ip.flags', {
0: 'fin', 1: 'syn', 2: 'rst', 3: 'psh', 4: 'ack', 5: 'urg', 6: 'ece', 7: 'cwr', 8: 'ns'}, suffix=y),
lambda x, y: self._get_flags(x, 'tcp.flags', {
0: 'fin', 1: 'syn', 2: 'rst', 3: 'psh', 4: 'ack', 5: 'urg', 6: 'ece', 7: 'cwr', 8: 'ns'}, suffix=y),
):
mac_row_flags.update(func(mac_df, suffix))
return mac_row_flags
def _lowest_ip_proto_port(self, mac_df, ip_proto):
if not mac_df.empty:
src = mac_df['%s.srcport' % ip_proto]
dst = mac_df['%s.dstport' % ip_proto]
if src.count() and dst.count():
return self._numericintset(np.minimum(src, dst).unique()) # pylint: disable=no-member
return frozenset()
def _tshark_ports(self, suffix, mac_df):
mac_row_ports = {}
def port_priv(port):
return port < 1024
for ip_proto_num, ip_proto in TCP_UDP_PROTOS.items():
proto_df = mac_df[mac_df['ip.proto']==ip_proto_num]
lowest_ports = self._lowest_ip_proto_port(proto_df, ip_proto)
for field_name, ports, wk_ports in (
('priv', {port for port in lowest_ports if port_priv(
port)}, self.WK_PRIV_TCPUDP_PORTS),
('nonpriv', {port for port in lowest_ports if not port_priv(
port)}, self.WK_NONPRIV_TCPUDP_PORTS),
):
port_flags = {port: int(port in ports) for port in wk_ports}
port_flags.update(
{'other': int(bool(lowest_ports) and not ports.issubset(wk_ports))})
mac_row_ports.update({
'tshark_%s_%s_port_%s_%s' % (ip_proto, field_name, port, suffix): present for port, present in port_flags.items()})
return mac_row_ports
def _tshark_ratio_ports(self, mac_df):
mac_row_ports = {}
def calc_ratio(src_count, dst_count):
packet_ratio = 0
if src_count is not None and dst_count is not None:
if dst_count > 0:
packet_ratio = src_count / dst_count
elif src_count > 0:
packet_ratio = 1
return packet_ratio
for ip_proto_num, ip_proto in TCP_UDP_PROTOS.items():
proto_df = mac_df[mac_df['ip.proto']==ip_proto_num]
src = pd.DataFrame(columns=['%s.srcport' % ip_proto])
dst = pd.DataFrame(columns=['%s.dstport' % ip_proto])
if not proto_df.empty:
try:
src = proto_df['%s.srcport' % ip_proto]
dst = proto_df['%s.dstport' % ip_proto]
except KeyError:
pass
for field_name, wk_ports, port_src, port_dst in (
('priv', self.WK_PRIV_TCPUDP_PORTS,
src[src <= 1023], dst[dst <= 1023]),
('nonpriv', self.WK_NONPRIV_TCPUDP_PORTS,
src[src > 1023], dst[dst > 1023])):
src_values = port_src[src.isin(wk_ports)]
dst_values = port_dst[dst.isin(wk_ports)]
src_counts = {}
if not src_values.empty:
src_counts = src_values.value_counts()
dst_counts = {}
if not dst_values.empty:
dst_counts = dst_values.value_counts()
for port in wk_ports:
src_count = src_counts.get(port, None)
dst_count = dst_counts.get(port, None)
mac_row_ports.update({
'tshark_%s_%s_packet_ratio_io_port_%s' % (ip_proto, field_name, port): calc_ratio(src_count, dst_count)})
src_values = port_src[~port_src.isin(wk_ports)]
src_count = 0
if not src_values.empty:
src_count = src_values.value_counts().sum()
dst_values = port_dst[~port_dst.isin(wk_ports)]
dst_count = 0
if not dst_values.empty:
dst_count = dst_values.value_counts().sum()
mac_row_ports.update({
'tshark_%s_%s_packet_ratio_io_port_%s' % (ip_proto, field_name, 'other'): calc_ratio(src_count, dst_count)})
return mac_row_ports
def _tshark_ipversions(self, mac_df):
try:
ip_versions = self._numericintset(mac_df['ip.version'].unique())
except AttributeError:
ip_versions = frozenset()
return {'tshark_ipv%u' % v: int(v in ip_versions) for v in (4, 6)}
def _tshark_non_ip(self, mac_df):
try:
eth_types = self._numericintset(mac_df['eth.type'].unique())
except AttributeError:
eth_types = frozenset()
return {
'tshark_ipx': int(ETH_TYPE_IPX in eth_types),
'tshark_nonip': int(bool(eth_types - ETH_IP_TYPES)),
}
def _tshark_both_private_ip(self, mac_df):
try:
both_private_ip = int(mac_df['_both_private_ip'].max() == 1)
except KeyError:
both_private_ip = 0
return {
'tshark_both_private_ip': both_private_ip,
}
def _tshark_ipv4_multicast(self, mac_df):
try:
ipv4_multicast = int(mac_df['_ipv4_multicast'].max() == 1)
except KeyError:
ipv4_multicast = 0
return {
'tshark_ipv4_multicast': ipv4_multicast,
}
def _tshark_wk_ip_protocol(self, mac_df):
return self._get_flags(mac_df, '_protos_int', WK_IP_PROTOS_INDEX, suffix=None, field_name='wk_ip_protocol')
def _tshark_vlan_id(self, mac_df):
return {
'tshark_tagged_vlan': int(pd.notna(mac_df['vlan.id'].max()))
}
def _tshark_frame_epoch(self, mac_df):
return {
'tshark_frame_epoch': float(mac_df['frame.time_epoch'].max())
}
def _tshark_unique_ips(self, mac, mac_df):
srcips = mac_df[mac_df['eth.src'] == mac]['_srcip']
dstips = mac_df[mac_df['eth.src'] == mac]['_dstip']
return {
'tshark_srcips': list(set(srcips.unique().tolist()) - {'None'}),
'tshark_unique_srcips': srcips.nunique(),
'tshark_unique_dstips': dstips.nunique(),
}
def _calc_cols(self, mac, mac_df):
mac_row = {}
for suffix, suffix_func in (
('out', lambda x: mac_df[mac_df['eth.src'] == x]),
('in', lambda x: mac_df[mac_df['eth.src'] != x])):
try:
suffix_df = suffix_func(mac)
except KeyError:
continue
for col_name, field_name in self.CALC_COL_NAMES:
col = suffix_df[col_name]
for calc_name, calc_func in self.CALC_COL_FUNCS:
calc_col = 'tshark_%s_%s_%s' % (
calc_name, field_name, suffix)
val = calc_func(col)
if pd.isnull(val):
val = 0
mac_row.update({calc_col: val})
for func in (
self._tshark_flags,
self._tshark_ports):
mac_row.update(func(suffix, suffix_df))
for func in (
self._tshark_ipversions,
self._tshark_non_ip,
self._tshark_both_private_ip,
self._tshark_ipv4_multicast,
self._tshark_wk_ip_protocol,
self._tshark_vlan_id,
self._tshark_frame_epoch,
self._tshark_ratio_ports):
mac_row.update(func(mac_df))
mac_row.update(self._tshark_unique_ips(mac, mac_df))
return mac_row
def _calc_mac_row(self, mac, mac_df):
mac_row = {'host_key': str(self._mac(mac))}
mac_row.update(self._calc_cols(mac, mac_df))
return mac_row
def _host_key(self, row):
raise NotImplementedError
def _df_ip_flags(self, ip_src, ip_dst):
both_private_ip = 0
ipv4_multicast = 0
if not pd.isnull(ip_src) and not pd.isnull(ip_dst):
both_private_ip = int(ip_src.is_private and ip_dst.is_private)
ipv4_multicast = int(ip_dst.version == 4 and ip_dst.is_multicast)
return (both_private_ip, ipv4_multicast)
def _encode_df_proto_flags(self, short_row_keys, frame_protocols):
if frame_protocols:
short_frame_protocols = frozenset(frame_protocols.split(':'))
else:
short_frame_protocols = {}
all_protos = short_row_keys.union(
short_frame_protocols) - self.DROP_PROTOS
all_protos_int = 0
for proto in all_protos.intersection(WK_IP_PROTOS):
index = WK_IP_PROTOS.index(proto)
all_protos_int += 2**index
return all_protos_int
def _df_proto_flags(self, row):
short_row_keys = frozenset(x.split('.')[0] for x, y in row.items(
) if not pd.isnull(y) and not x.startswith('_'))
return self._encode_df_proto_flags(short_row_keys, row['frame.protocols'])
def _tshark_all(self, df, srcmacid):
print('calculating intermediates', end='', flush=True)
df['_host_key'], df['_srcip'], df['_dstip'], df['_both_private_ip'], df['_ipv4_multicast'], df['_protos_int'] = zip(
*df.apply(self._host_key, axis=1))
eth_srcs = frozenset(df['eth.src'].unique())
eth_dsts = frozenset(df['eth.dst'].unique())
all_unicast_macs = frozenset(
mac for mac in eth_srcs.union(eth_dsts) if self._is_unicast(mac))
host_keys = df['_host_key'].unique()
host_keys_count = len(host_keys)
print('.%u MACs, %u sessions' %
(len(all_unicast_macs), host_keys_count), end='', flush=True)
if srcmacid:
minsrcipmac = df.groupby(['eth.src'])[
'_srcip'].nunique().idxmin(axis=0)
assert minsrcipmac in all_unicast_macs
print('.MAC %s has minimum number of source IPs, selected as canonical source' %
self._mac(minsrcipmac), end='', flush=True)
all_unicast_macs = {minsrcipmac}
mac_rows = []
for i, mac in enumerate(all_unicast_macs, start=1):
mac_df = df[(df['eth.src'] == mac) | (df['eth.dst'] == mac)]
# If just one MAC, don't need groupby on host key.
if len(all_unicast_macs) == 1:
mac_rows.append(self._calc_mac_row(mac, mac_df))
else:
s = 0
for _, key_df in mac_df.groupby('_host_key'):
s += 1
if s % 100 == 0:
print('.MAC %u/%u %.1f%%' % (i, len(all_unicast_macs),
s / len(host_keys) * 100), end='', flush=True)
mac_rows.append(self._calc_mac_row(mac, key_df))
print('.MAC %u/%u 100%%.' %
(i, len(all_unicast_macs)), end='', flush=True)
return mac_rows
class Host(HostBase, Features):
def _host_key(self, row):
ip_src = self._get_src_ip(row)
ip_dst = self._get_dst_ip(row)
both_private_ip, ipv4_multicast = self._df_ip_flags(ip_src, ip_dst)
protos_int = self._df_proto_flags(row)
return (0, str(ip_src), str(ip_dst), both_private_ip, ipv4_multicast, protos_int)
def host_tshark_all(self, df, parsed_args):
return self._tshark_all(df, parsed_args.srcmacid)
class SessionHost(HostBase, Features):
def _host_key(self, row):
eth_src = row['eth.src']
eth_dst = row['eth.dst']
ip_src = self._get_src_ip(row)
ip_dst = self._get_dst_ip(row)
both_private_ip, ipv4_multicast = self._df_ip_flags(ip_src, ip_dst)
protos_int = self._df_proto_flags(row)
if not pd.isnull(ip_src) and not pd.isnull(ip_dst):
ip_proto = TCP_UDP_PROTOS.get(row['ip.version'], None)
if ip_proto:
src_port = row['%s.srcport' % ip_proto]
dst_port = row['%s.dstport' % ip_proto]
if ip_src > ip_dst:
key | |
type "to-one"
if relation_type == TO_ONE_RELATION:
# tries to retrieve the "current" target
# entity to be used for the application
target_entity = self.get_value(item_name)
# in case there is no item (target entity)
# in the entity one must be created
if target_entity == None:
# "resolves" the target to one relation, loading or creating
# the required model and sets the retrieved (target) entity
# in the current model instance
target_entity = self.resolve_to_one(
item_value,
target_model,
permissive
)
setattr(self, item_name, target_entity)
# otherwise the entity already contains the
# item it must be "re-used" and merged
# with the item value
else:
# updates the item in the entity with
# the map containing the value
target_entity.apply(
item_value,
permissive = permissive
)
# in case the relation is of type "to-many"
elif relation_type == TO_MANY_RELATION:
# "resolves" the target to many relation, loading or
# creating the required models and sets the target entities
# list in the current model instance
target_entitites_list = self.resolve_to_many(
item_value,
target_model,
permissive
)
setattr(self, item_name, target_entitites_list)
# otherwise it's a single attribute relation
# the normal setting should apply
else:
# sets the attribute in the current model
self._set_attribute(item_name, item_value)
except BaseException as exception:
# tries to call the fail apply method, in order to notify the
# current instance about the failure of the apply procedure
if hasattr(self, "fail_apply"): self.fail_apply(exception)
# re-raises the exception back in the stack so that it can
# be properly handled by the upper layers
raise
finally:
# attaches the entity back to the data source
# for correct persistence structures
self.attach(force = False)
# tries to call the post apply method, in order to notify the
# current instance about the finishing of the apply procedure
if hasattr(self, "post_apply"): self.post_apply()
def get_system(self):
"""
Retrieves the current (associated) system instance
reference that can be used to retrieve the plugin
internal state and global data reference.
:rtype: Object
:return: The system instance associated with the current
entity model.
"""
return self._system_instance
def get_plugin(self):
"""
Retrieves the current (associated) plugin instance
reference that can be used to retrieve the plugin
internal state and global data reference.
:rtype: Object
:return: The plugin instance associated with the current
entity model.
"""
return self._system_instance.plugin
def get_attribute_name(self, attribute_name):
"""
Retrieves the attribute from the given composite
attribute name.
The attribute is retrieved using a composite approach
and the name is separated by dots.
:type attribute_name: String
:param attribute_name: The name of the attribute
to be retrieved.
:rtype: Object
:return: The attribute for the given attribute name.
"""
# splits the attribute name into tokens
attribute_name_tokens = attribute_name and attribute_name.split(".") or []
# sets the initial current attribute value
current_attribute = self
# iterates over all the attribute name tokens
for attribute_name_token in attribute_name_tokens:
# updates the current attribute with the attribute name token
current_attribute = current_attribute.get_value(attribute_name_token)
# returns the current attribute
return current_attribute
def dumps(self, serializer):
"""
Serializes (dumps) the current object with
the given serializer object.
:type serializer: Serializer
:param serializer: The serializer object to be used
to serialize the current object.
:rtype: String
:return: The serialized value.
"""
# serializes the object (dumps)
data = serializer.dumps(self)
# returns the serialized value (data)
return data
def loads(self, serializer, data):
"""
Unserializes (loads) converting and loading
the given data into the current object.
:type serializer: Serializer
:param serializer: The serializer object to be used
to unserialize the given data.
:rtype: String
:return: The serialized data to be loaded.
"""
# unserializes the data (loads)
# retrieving the model
model = serializer.loads(data)
# iterates over all the dictionary items
# to load the values (from the model)
for key, value in colony.legacy.items(model):
# loads the given value in the current object
self._load_value(key, value)
def _load_value(self, key, value):
"""
Loads the value with the given key in the
current object.
:type key: String
:param key: The key to be used to refer to the value
in the current object.
:type value: Object
:param value: The value to be set in the current object.
"""
# in case the current object does not contain
# an attribute with the key name must return
# immediately, nothing to be set
if not hasattr(self, key): return
# sets the value in the current object
setattr(self, key, value)
def is_lazy_loaded(self, attribute_name):
"""
Indicates if the specified attribute
is lazy loaded.
:type attribute_name: String
:param attribute_name: The attribute name.
:rtype: bool
:return: The lazy loaded flag.
"""
# sets the lazy loaded flag in case
# the attribute value is not present
lazy_loaded = not self.has_value(attribute_name)
# returns the lazy loaded flag
return lazy_loaded
def lock_session(self):
"""
Locks the session associated with the current request,
subsequent accesses to the session will be blocked until the
session is released.
"""
# tries to retrieve the request session
request_session = self.request.get_session()
# in case the request session
# is invalid
if not request_session:
# start a session if none is started and then
# retrieves it from the request
self.request.start_session()
request_session = self.request.get_session()
# locks the "just" retrieved (or created) request
# session (blocks it)
request_session.lock()
def release_session(self):
"""
Releases the session associated with the current request,
allowing further requests to access the session to be passed.
"""
# tries to retrieve the request session
request_session = self.request.get_session()
# in case the request session is invalid
# an exception should be raised (invalid situation)
if not request_session: raise RuntimeError("problem releasing session, no session available")
# releases the "just" retrieved request
# session (unblocks it)
request_session.release()
def get_session_attribute(
self,
session_attribute_name,
namespace_name = None,
unset_session_attribute = False
):
"""
Retrieves the session attribute from the current request
with the given name and for the given namespace.
Optionally it may be unset from session after retrieval.
:type namespace_name: String
:param namespace_name: The name of the namespace for the
attribute to be retrieved.
:type unset_session_attribute: bool
:param unset_session_attribute: If the session attribute should
be unset after retrieval.
:rtype: Object
:return The retrieved session attribute.
"""
# retrieves the currently available request to try
# to access the session variables
request = self.get_request()
# tries to retrieve the request session
request_session = request.get_session()
# in case the request session
# is invalid, must return invalid
if not request_session: return None
# resolves the complete session attribute name
session_attribute_name = _get_complete_name(session_attribute_name, namespace_name)
# retrieves the attribute from the session
session_attribute = request_session.get_attribute(session_attribute_name)
# in case the unset the session attribute flag is set
# the session attribute is unset
unset_session_attribute and request_session.unset_attribute(session_attribute_name)
# returns the session attribute
return session_attribute
def set_session_attribute(
self,
session_attribute_name,
session_attribute_value,
namespace_name = None
):
"""
Sets the session attribute in the current request
with the given name and for the given namespace.
The session attribute value may be of any type.
:type session_attribute_name: String
:param session_attribute_name: The name of the session
attribute to be set.
:type session_attribute_value: Object
:param session_attribute_value: The value of the session
attribute to be set.
:type namespace_name: String
:param namespace_name: The name of the namespace for the
attribute to be set.
"""
# retrieves the currently available request to try
# to access the session variables
request = self.get_request()
# tries to retrieve the request session
request_session = request.get_session()
# in case the request session
# is invalid
if not request_session:
# start a session if none is started and then
# retrieves it from the request
request.start_session()
request_session = request.get_session()
# resolves the complete session attribute name
session_attribute_name = _get_complete_name(session_attribute_name, namespace_name)
# sets the attribute in the session
request_session.set_attribute(session_attribute_name, session_attribute_value)
def unset_session_attribute(self, session_attribute_name, namespace_name = None):
"""
Unsets the session attribute from the current request
with the given name and for the given namespace.
:type session_attribute_name: String
:param session_attribute_name: The name of the session
attribute to be unset.
:type namespace_name: String
:param namespace_name: The name of the namespace for the
attribute to be unset.
"""
| |
<reponame>boffman/grizzly<filename>grizzly/users/restapi.py
'''Communicates with HTTP and HTTPS, with built-in support for Azure authenticated endpoints.
## Request methods
Supports the following request methods:
* get
* put
* post
## Format
Format of `host` is the following:
```plain
http[s]://<hostname>
```
## Examples
Example on how to use it in a scenario:
```gherkin
Given a user of type "RestApi" load testing "https://api.example.com"
Then post request "test/request.j2.json" to endpoint "/api/test"
Then get request from endpoint "/api/test"
```
To change how often the token should be refreshed, default is 3000 seconds:
```gherkin
And set context variable "auth.refresh_time" to "3500"
```
### Authentication
#### Client secret
```gherkin
Given a user of type "RestApi" load testing "https://api.example.com"
And set context variable "auth.client.tenant" "<tenant name/guid>"
And set context variable "auth.client.id" to "<client id>"
And set context variable "auth.client.secret" to "<client secret>"
And set context variable "auth.client.resource" to "<resource url/guid>"
```
#### Username and password
`auth.user.redirect_uri` needs to correspond to the endpoint that the client secret is registrered for.
```gherkin
Given a user of type "RestApi" load testing "https://api.example.com"
And set context variable "auth.client.id" to "<client id>"
And set context variable "auth.user.username" to "<EMAIL>"
And set context variable "auth.user.password" to "<PASSWORD>!"
And set context variable "auth.user.redirect_uri" to "/app-registrered-redirect-uri"
```
'''
import json
import re
from typing import Dict, Optional, Any, Tuple, List, Union, cast
from time import time, perf_counter as time_perf_counter
from functools import wraps
from enum import Enum
from urllib.parse import parse_qs, urlparse
from uuid import uuid4
from locust.contrib.fasthttp import FastHttpSession
from locust.exception import StopUser
from locust.env import Environment
import requests
from ..types import GrizzlyResponse, RequestType, WrappedFunc, GrizzlyResponseContextManager
from ..utils import merge_dicts
from ..types import RequestMethod
from ..tasks import RequestTask
from ..clients import ResponseEventSession
from .base import RequestLogger, ResponseHandler, GrizzlyUser, HttpRequests, AsyncRequests
from . import logger
from urllib3 import disable_warnings as urllib3_disable_warnings
urllib3_disable_warnings()
class AuthMethod(Enum):
NONE = 1
CLIENT = 2
USER = 3
class refresh_token:
def __call__(self, func: WrappedFunc) -> WrappedFunc:
@wraps(func)
def refresh_token(cls: 'RestApiUser', *args: Tuple[Any, ...], **kwargs: Dict[str, Any]) -> Any:
auth_context = cls._context['auth']
use_auth_client = (
auth_context.get('client', {}).get('id', None) is not None
and auth_context.get('client', {}).get('secret', None) is not None
)
use_auth_user = (
auth_context.get('client', {}).get('id', None) is not None
and auth_context.get('user', {}).get('username', None) is not None
and auth_context.get('user', {}).get('password', None) is not None
and auth_context.get('user', {}).get('redirect_uri', None) is not None
)
if use_auth_client:
auth_method = AuthMethod.CLIENT
elif use_auth_user:
auth_method = AuthMethod.USER
else:
auth_method = AuthMethod.NONE
if auth_method is not AuthMethod.NONE and cls.session_started is not None:
session_now = time()
session_duration = session_now - cls.session_started
# refresh token if session has been alive for at least refresh_time
if session_duration >= auth_context.get('refresh_time', 3000) or cls.headers.get('Authorization', None) is None:
cls.get_token(auth_method)
else:
try:
del cls.headers['Authorization']
except KeyError:
pass
return func(cls, *args, **kwargs)
return cast(WrappedFunc, refresh_token)
class RestApiUser(ResponseHandler, RequestLogger, GrizzlyUser, HttpRequests, AsyncRequests):
session_started: Optional[float]
headers: Dict[str, Optional[str]]
host: str
_context: Dict[str, Any] = {
'verify_certificates': True,
'auth': {
'refresh_time': 3000,
'url': None,
'client': {
'id': None,
'secret': None,
'resource': None,
'tenant': None,
},
'user': {
'username': None,
'password': <PASSWORD>,
'redirect_uri': None,
},
},
'metadata': None,
}
def __init__(self, environment: Environment, *args: Tuple[Any, ...], **kwargs: Dict[str, Any]) -> None:
super().__init__(environment, *args, **kwargs)
self.headers = {
'Authorization': None,
'Content-Type': 'application/json',
'x-grizzly-user': f'{self.__class__.__name__}',
}
self.session_started = None
self._context = merge_dicts(
super().context(),
# this is needed since we create a new class with this class as sub class, context will be messed up otherwise
# in other words, don't use RestApiUser._context. This should only be used in classes which are direct created
# in grizzly
self.__class__._context,
)
headers = self._context.get('metadata', None)
if headers is not None:
self.headers.update(headers)
def on_start(self) -> None:
self.session_started = time()
def get_token(self, auth_method: AuthMethod) -> None:
if auth_method == AuthMethod.CLIENT:
self.get_client_token()
elif auth_method == AuthMethod.USER:
self.get_user_token()
else:
pass
def get_user_token(self) -> None:
def _parse_response_config(response: requests.Response) -> Dict[str, Any]:
match = re.search(r'Config={(.*?)};', response.text, re.MULTILINE)
if not match:
raise ValueError(f'no config found in response from {response.url}')
return cast(Dict[str, Any], json.loads(f'{{{match.group(1)}}}'))
def update_state(state: Dict[str, str], response: requests.Response) -> Dict[str, Any]:
config = _parse_response_config(response)
for key in state.keys():
if key in config:
state[key] = str(config[key])
elif key in response.headers:
state[key] = str(response.headers[key])
else:
raise ValueError(f'unexpected response body from {response.url}: missing "{key}" in config')
return config
def generate_uuid() -> str:
uuid = uuid4().hex
return '{}-{}-{}-{}-{}'.format(
uuid[0:8],
uuid[8:12],
uuid[12:16],
uuid[16:20],
uuid[20:]
)
headers_ua: Dict[str, str] = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:88.0) Gecko/20100101 Firefox/88.0'
}
auth_user_context = self._context['auth']['user']
start_time = time_perf_counter()
total_response_length = 0
exception: Optional[Exception] = None
try:
if self._context['auth']['url'] is None:
try:
[_, tenant] = auth_user_context['username'].rsplit('@', 1)
if tenant is None or len(tenant) < 1:
raise RuntimeError()
except Exception:
raise ValueError(f'auth.url was not set and could not find tenant part in {auth_user_context["username"]}')
self._context['auth']['url'] = f'https://login.microsoftonline.com/{tenant}/oauth2/authorize'
auth_url_parsed = urlparse(self._context['auth']['url'])
total_response_length = 0
with requests.Session() as client:
headers: Dict[str, str]
payload: Dict[str, Any]
data: Dict[str, Any]
state: Dict[str, str] = {
'hpgact': '',
'hpgid': '',
'sFT': '',
'sCtx': '',
'apiCanary': '',
'canary': '',
'correlationId': '',
'sessionId': '',
'x-ms-request-id': '',
'country': '',
}
# <!-- request 1
client_id = self._context['auth']['client']['id']
client_request_id = generate_uuid()
username_lowercase = auth_user_context['username'].lower()
redirect_uri_parsed = urlparse(auth_user_context['redirect_uri'])
if len(redirect_uri_parsed.netloc) == 0:
redirect_uri = f"{self._context['host']}{auth_user_context['redirect_uri']}"
else:
redirect_uri = auth_user_context['redirect_uri']
params: Dict[str, List[str]] = {
'response_type': ['id_token'],
'client_id': [client_id],
'redirect_uri': [redirect_uri],
'state': [generate_uuid()],
'client-request-id': [client_request_id],
'x-client-SKU': ['Js'],
'x-client-Ver': ['1.0.18'],
'nonce': [generate_uuid()],
}
headers = {
'Host': str(auth_url_parsed.netloc),
**headers_ua,
}
response = client.get(cast(str, self._context['auth']['url']), headers=headers, params=params)
logger.debug(f'user auth request 1: {response.url} ({response.status_code})')
total_response_length += int(response.headers.get('content-length', '0'))
if response.status_code != 200:
raise RuntimeError(f'user auth request 1: {response.url} had unexpected status code {response.status_code}')
referer = response.url
config = update_state(state, response)
# // request 1 -->
# <!-- request 2
url_parsed = urlparse(config['urlGetCredentialType'])
params = parse_qs(url_parsed.query)
url = f'{url_parsed.scheme}://{url_parsed.netloc}{url_parsed.path}'
params['mkt'] = ['sv-SE']
headers = {
'Accept': 'application/json',
'Host': str(auth_url_parsed.netloc),
'ContentType': 'application/json; charset=UTF-8',
'canary': state['apiCanary'],
'client-request-id': client_request_id,
'hpgact': state['hpgact'],
'hpgid': state['hpgid'],
'hpgrequestid': state['sessionId'],
**headers_ua,
}
payload = {
'username': username_lowercase,
'isOtherIdpSupported': True,
'checkPhones': False,
'isRemoteNGCSupported': True,
'isCookieBannerShown': False,
'isFidoSupported': True,
'originalRequest': state['sCtx'],
'country': state['country'],
'forceotclogin': False,
'isExternalFederationDisallowed': False,
'isRemoteConnectSupported': False,
'federationFlags': 0,
'isSignup': False,
'flowToken': state['sFT'],
'isAccessPassSupported': True,
}
response = client.post(url, headers=headers, params=params, json=payload)
total_response_length += int(response.headers.get('content-length', '0'))
logger.debug(f'user auth request 2: {response.url} ({response.status_code})')
if response.status_code != 200:
raise RuntimeError(f'user auth request 2: {response.url} had unexpected status code {response.status_code}')
data = cast(Dict[str, Any], json.loads(response.text))
if 'error' in data:
error = data['error']
raise RuntimeError(f'error response from {url}: code={error["code"]}, message={error["message"]}')
state['apiCanary'] = data['apiCanary']
assert state['sFT'] == data['FlowToken'], 'flow token between user auth request 1 and 2 differed'
# // request 2 -->
# <!-- request 3
assert config['urlPost'].startswith('https://'), f"response from {response.url} contained unexpected value '{config['urlPost']}'"
url = config['urlPost']
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Content-Type': 'application/x-www-form-urlencoded',
'Host': str(auth_url_parsed.netloc),
'Referer': referer,
**headers_ua,
}
payload = {
'i13': '0',
'login': username_lowercase,
'loginfmt': username_lowercase,
'type': '11',
'LoginOptions': '3',
'lrt': '',
'lrtPartition': '',
'hisRegion': '',
'hisScaleUnit': '',
'passwd': <PASSWORD>['password'],
'ps': '2', # postedLoginStateViewId
'psRNGCDefaultType': '',
'psRNGCEntropy': '',
'psRNGCSLK': '',
'canary': state['canary'],
'ctx': state['sCtx'],
'hpgrequestid': state['sessionId'],
'flowToken': state['sFT'],
'PPSX': '',
'NewUser': '1',
'FoundMSAs': '',
'fspost': '0',
'i21': '0', # wasLearnMoreShown
'CookieDisclosure': '0',
'IsFidoSupported': '1',
'isSignupPost': '0',
'i19': '16369', # time on page
}
response = client.post(url, headers=headers, data=payload)
total_response_length += int(response.headers.get('content-length', '0'))
logger.debug(f'user auth request 3: {response.url} ({response.status_code})')
if response.status_code != 200:
raise RuntimeError(f'user auth request 3: {response.url} had unexpected status code {response.status_code}')
config = _parse_response_config(response)
exception_message = config.get('strServiceExceptionMessage', None)
if exception_message is not None and len(exception_message.strip()) > 0:
logger.error(exception_message)
raise RuntimeError(exception_message)
user_proofs = config.get('arrUserProofs', [])
if len(user_proofs) > 0:
user_proof = user_proofs[0]
error_message = f'{username_lowercase} requires MFA for login: {user_proof["authMethodId"]} = {user_proof["display"]}'
logger.error(error_message)
raise RuntimeError(error_message)
# update state
state['sessionId'] = config['sessionId']
state['sFT'] = config['sFT']
# // request 3 -->
# <!-- request 4
assert not config['urlPost'].startswith('https://'), f"unexpected response from {response.url}, incorrect username and/or password?"
url = f'{str(auth_url_parsed.scheme)}://{str(auth_url_parsed.netloc)}{config["urlPost"]}'
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Content-Type': 'application/x-www-form-urlencoded',
'Host': str(auth_url_parsed.netloc),
'Referer': referer,
**headers_ua,
}
payload = {
'LoginOptions': '3',
'type': '28',
'ctx': state['sCtx'],
'hprequestid': state['sessionId'],
'flowToken': state['sFT'],
'canary': state['canary'],
'i2': '',
'i17': '',
'i18': '',
'i19': '1337',
}
response = client.post(url, headers=headers, data=payload, allow_redirects=False)
total_response_length += int(response.headers.get('content-length', '0'))
logger.debug(f'user auth request 4: {response.url} ({response.status_code})')
if response.status_code != 302:
raise RuntimeError(f'user auth request 4: {response.url} had unexpected status code {response.status_code}')
assert 'Location' in response.headers, f'Location header was not found in response from {response.url}'
token_url = response.headers['Location']
assert token_url.startswith(f'{redirect_uri}'), f'unexpected redirect URI, got {token_url} but expected | |
len(jobs):
Client.killJobs(jobs,2)
_logger.debug("killJobs for Anal Active (%s)" % str(jobs))
# kill too long pending jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=1)
varMap = {}
varMap[':jobStatus'] = 'pending'
varMap[':creationTime'] = timeLimit
status,res = taskBuffer.querySQLS("SELECT PandaID FROM ATLAS_PANDA.jobsWaiting4 WHERE jobStatus=:jobStatus AND creationTime<:creationTime",
varMap)
jobs = []
if res is not None:
for (id,) in res:
jobs.append(id)
# kill
if len(jobs):
if len(jobs):
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug("killJobs for Pending (%s)" % str(jobs[iJob:iJob+nJob]))
Client.killJobs(jobs[iJob:iJob+nJob],4)
iJob += nJob
# kick waiting ES merge jobs which were generated from fake co-jumbo
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(minutes=10)
varMap = {}
varMap[':jobStatus'] = 'waiting'
varMap[':creationTime'] = timeLimit
varMap[':esMerge'] = EventServiceUtils.esMergeJobFlagNumber
sql = "SELECT PandaID,computingSite FROM ATLAS_PANDA.jobsWaiting4 WHERE jobStatus=:jobStatus AND creationTime<:creationTime "
sql += "AND eventService=:esMerge ORDER BY jediTaskID "
status,res = taskBuffer.querySQLS(sql, varMap)
jobsMap = {}
if res is not None:
for id,site in res:
if site not in jobsMap:
jobsMap[site] = []
jobsMap[site].append(id)
# kick
if len(jobsMap):
for site in jobsMap:
jobs = jobsMap[site]
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug("kick waiting ES merge (%s)" % str(jobs[iJob:iJob+nJob]))
Client.reassignJobs(jobs[iJob:iJob+nJob], )
iJob += nJob
# kill too long waiting jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=1)
varMap = {}
varMap[':jobStatus'] = 'waiting'
varMap[':creationTime'] = timeLimit
varMap[':coJumbo'] = EventServiceUtils.coJumboJobFlagNumber
sql = "SELECT PandaID FROM ATLAS_PANDA.jobsWaiting4 WHERE jobStatus=:jobStatus AND creationTime<:creationTime "
sql += "AND (eventService IS NULL OR eventService<>:coJumbo) "
status,res = taskBuffer.querySQLS(sql, varMap)
jobs = []
if res is not None:
for (id,) in res:
jobs.append(id)
# kill
if len(jobs):
if len(jobs):
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug("killJobs for Waiting (%s)" % str(jobs[iJob:iJob+nJob]))
Client.killJobs(jobs[iJob:iJob+nJob],4)
iJob += nJob
# kill too long running ES jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=24)
varMap = {}
varMap[':jobStatus1'] = 'running'
varMap[':jobStatus2'] = 'starting'
varMap[':timeLimit'] = timeLimit
varMap[':esJob'] = EventServiceUtils.esJobFlagNumber
varMap[':coJumbo'] = EventServiceUtils.coJumboJobFlagNumber
sql = "SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE jobStatus IN (:jobStatus1,:jobStatus2) AND stateChangeTime<:timeLimit "
sql += "AND eventService IN (:esJob,:coJumbo) AND currentPriority>=900 "
status,res = taskBuffer.querySQLS(sql, varMap)
jobs = []
if res is not None:
for (id,) in res:
jobs.append(id)
# kill
if len(jobs):
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug("killJobs for long running ES jobs (%s)" % str(jobs[iJob:iJob+nJob]))
Client.killJobs(jobs[iJob:iJob+nJob], 2, keepUnmerged=True, jobSubStatus='es_toolong')
iJob += nJob
# kill too long running ES merge jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=24)
varMap = {}
varMap[':jobStatus1'] = 'running'
varMap[':jobStatus2'] = 'starting'
varMap[':timeLimit'] = timeLimit
varMap[':esMergeJob'] = EventServiceUtils.esMergeJobFlagNumber
sql = "SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE jobStatus IN (:jobStatus1,:jobStatus2) AND stateChangeTime<:timeLimit "
sql += "AND eventService=:esMergeJob "
status,res = taskBuffer.querySQLS(sql, varMap)
jobs = []
if res is not None:
for (id,) in res:
jobs.append(id)
# kill
if len(jobs):
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug("killJobs for long running ES merge jobs (%s)" % str(jobs[iJob:iJob+nJob]))
Client.killJobs(jobs[iJob:iJob+nJob], 2)
iJob += nJob
# kill too long waiting jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=7)
sql = "SELECT PandaID FROM ATLAS_PANDA.jobsWaiting4 WHERE ((creationTime<:timeLimit AND (eventService IS NULL OR eventService<>:coJumbo)) "
sql += "OR modificationTime<:timeLimit) "
varMap = {}
varMap[':timeLimit'] = timeLimit
varMap[':coJumbo'] = EventServiceUtils.coJumboJobFlagNumber
status,res = taskBuffer.querySQLS(sql, varMap)
jobs = []
if res is not None:
for (id,) in res:
jobs.append(id)
# kill
if len(jobs):
Client.killJobs(jobs,4)
_logger.debug("killJobs in jobsWaiting (%s)" % str(jobs))
# rebrokerage
_logger.debug("Rebrokerage start")
# get timeout value
timeoutVal = taskBuffer.getConfigValue('rebroker','ANALY_TIMEOUT')
if timeoutVal is None:
timeoutVal = 12
_logger.debug("timeout value : {0}h".format(timeoutVal))
try:
normalTimeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=timeoutVal)
sortTimeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=3)
sql = "WITH p AS ("\
"SELECT MIN(PandaID) PandaID,jobDefinitionID,prodUserName,prodUserID,computingSite,jediTaskID,processingType,workingGroup "\
"FROM ATLAS_PANDA.jobsActive4 "\
"WHERE prodSourceLabel IN (:prodSourceLabel1,:prodSourceLabel2) "\
"AND jobStatus IN (:jobStatus1,:jobStatus2,:jobStatus3) "\
"AND jobsetID IS NOT NULL AND lockedBy=:lockedBy "\
"GROUP BY jobDefinitionID,prodUserName,prodUserID,computingSite,jediTaskID,processingType,workingGroup "\
") "\
"SELECT /*+ INDEX (s JOBS_STATUSLOG_PANDAID_IDX) */ "\
"p.jobDefinitionID,p.prodUserName,p.prodUserID,p.computingSite,s.modificationTime,p.jediTaskID,p.processingType,p.workingGroup " \
"FROM p, ATLAS_PANDA.jobs_statuslog s "\
"WHERE s.PandaID=p.PandaID AND s.jobStatus=:s_jobStatus AND s.modificationTime<:modificationTime "
varMap = {}
varMap[':prodSourceLabel1'] = 'user'
varMap[':prodSourceLabel2'] = 'panda'
varMap[':modificationTime'] = sortTimeLimit
varMap[':lockedBy'] = 'jedi'
varMap[':jobStatus1'] = 'activated'
varMap[':jobStatus2'] = 'dummy'
varMap[':jobStatus3'] = 'starting'
varMap[':s_jobStatus'] = 'activated'
# get jobs older than threshold
ret,res = taskBuffer.querySQLS(sql, varMap)
resList = []
keyList = set()
if res is not None:
for tmpItem in res:
jobDefinitionID,prodUserName,prodUserID,computingSite,maxTime,jediTaskID,processingType,workingGroup\
= tmpItem
tmpKey = (jediTaskID,jobDefinitionID)
keyList.add(tmpKey)
resList.append(tmpItem)
# get stalled assigned job
sqlA = "SELECT jobDefinitionID,prodUserName,prodUserID,computingSite,MAX(creationTime),jediTaskID,processingType,workingGroup "
sqlA += "FROM ATLAS_PANDA.jobsDefined4 "
sqlA += "WHERE prodSourceLabel IN (:prodSourceLabel1,:prodSourceLabel2) AND jobStatus IN (:jobStatus1,:jobStatus2) "
sqlA += "AND creationTime<:modificationTime AND lockedBy=:lockedBy "
sqlA += "GROUP BY jobDefinitionID,prodUserName,prodUserID,computingSite,jediTaskID,processingType,workingGroup "
varMap = {}
varMap[':prodSourceLabel1'] = 'user'
varMap[':prodSourceLabel2'] = 'panda'
varMap[':modificationTime'] = sortTimeLimit
varMap[':lockedBy'] = 'jedi'
varMap[':jobStatus1'] = 'assigned'
varMap[':jobStatus2'] = 'defined'
retA,resA = taskBuffer.querySQLS(sqlA, varMap)
if resA is not None:
for tmpItem in resA:
jobDefinitionID,prodUserName,prodUserID,computingSite,maxTime,jediTaskID,processingType,workingGroup\
= tmpItem
tmpKey = (jediTaskID,jobDefinitionID)
if tmpKey not in keyList:
keyList.add(tmpKey)
resList.append(tmpItem)
# sql to check recent activity
sql = "SELECT PandaID,stateChangeTime,jobStatus FROM %s "
sql += "WHERE prodUserName=:prodUserName AND jobDefinitionID=:jobDefinitionID "
sql += "AND computingSite=:computingSite AND jediTaskID=:jediTaskID "
sql += "AND jobStatus NOT IN (:jobStatus1,:jobStatus2,:jobStatus3) "
sql += "AND stateChangeTime>:modificationTime "
sql += "AND rownum <= 1"
# sql to get associated jobs with jediTaskID
sqlJJ = "SELECT PandaID FROM %s "
sqlJJ += "WHERE jediTaskID=:jediTaskID AND jobStatus IN (:jobS1,:jobS2,:jobS3,:jobS4,:jobS5) "
sqlJJ += "AND jobDefinitionID=:jobDefID AND computingSite=:computingSite "
timeoutMap = {}
if resList != []:
recentRuntimeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=3)
# loop over all user/jobID combinations
iComb = 0
nComb = len(resList)
_logger.debug("total combinations = %s" % nComb)
for jobDefinitionID,prodUserName,prodUserID,computingSite,maxModificationTime,jediTaskID,\
processingType, workingGroup in resList:
# check if jobs with the jobID have run recently
varMap = {}
varMap[':jediTaskID'] = jediTaskID
varMap[':computingSite'] = computingSite
varMap[':prodUserName'] = prodUserName
varMap[':jobDefinitionID'] = jobDefinitionID
varMap[':modificationTime'] = recentRuntimeLimit
varMap[':jobStatus1'] = 'closed'
varMap[':jobStatus2'] = 'failed'
varMap[':jobStatus3'] = 'starting'
_logger.debug(" rebro:%s/%s:ID=%s:%s jediTaskID=%s site=%s" % (iComb, nComb, jobDefinitionID,
prodUserName, jediTaskID,
computingSite))
iComb += 1
hasRecentJobs = False
# check site
if not siteMapper.checkSite(computingSite):
_logger.debug(" -> skip unknown site=%s" % computingSite)
continue
# check site status
tmpSiteStatus = siteMapper.getSite(computingSite).status
if tmpSiteStatus not in ['offline','test']:
if workingGroup:
if workingGroup not in timeoutMap:
tmp_timeoutVal = taskBuffer.getConfigValue('rebroker',
'ANALY_TIMEOUT_{}'.format(workingGroup))
if tmp_timeoutVal:
timeoutMap[workingGroup] = datetime.datetime.utcnow() - \
datetime.timedelta(hours=tmp_timeoutVal)
else:
timeoutMap[workingGroup] = normalTimeLimit
tmp_normalTimeLimit = timeoutMap[workingGroup]
else:
tmp_normalTimeLimit = normalTimeLimit
# use normal time limit for normal site status
if maxModificationTime > tmp_normalTimeLimit:
_logger.debug(" -> skip wait for normal timelimit=%s<maxModTime=%s" % (tmp_normalTimeLimit,
maxModificationTime))
continue
for tableName in ['ATLAS_PANDA.jobsActive4','ATLAS_PANDA.jobsArchived4']:
retU,resU = taskBuffer.querySQLS(sql % tableName, varMap)
if resU is None:
# database error
raise RuntimeError("failed to check modTime")
if resU != []:
# found recent jobs
hasRecentJobs = True
_logger.debug(" -> skip due to recent activity %s to %s at %s" % (resU[0][0],
resU[0][2],
resU[0][1]))
break
else:
_logger.debug(" -> immediate rebro due to site status=%s" % tmpSiteStatus)
if hasRecentJobs:
# skip since some jobs have run recently
continue
else:
if jediTaskID is None:
_logger.debug(" -> rebro for normal task : no action")
else:
_logger.debug(" -> rebro for JEDI task")
killJobs = []
varMap = {}
varMap[':jediTaskID'] = jediTaskID
varMap[':jobDefID'] = jobDefinitionID
varMap[':computingSite'] = computingSite
varMap[':jobS1'] = 'defined'
varMap[':jobS2'] = 'assigned'
varMap[':jobS3'] = 'activated'
varMap[':jobS4'] = 'dummy'
varMap[':jobS5'] = 'starting'
for tableName in ['ATLAS_PANDA.jobsDefined4','ATLAS_PANDA.jobsActive4']:
retJJ,resJJ = taskBuffer.querySQLS(sqlJJ % tableName, varMap)
for tmpPandaID, in resJJ:
killJobs.append(tmpPandaID)
# reverse sort to kill buildJob in the end
killJobs.sort()
killJobs.reverse()
# kill to reassign
taskBuffer.killJobs(killJobs,'JEDI','51',True)
except Exception as e:
_logger.error("rebrokerage failed with {0} : {1}".format(str(e), traceback.format_exc()))
# kill too long running jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=21)
status,res = taskBuffer.querySQLS("SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE creationTime<:creationTime",
{':creationTime':timeLimit})
jobs = []
if res is not None:
for (id,) in res:
jobs.append(id)
# kill
if len(jobs):
nJob = 100
iJob = 0
while iJob < len(jobs):
# set tobekill
_logger.debug('killJobs for Running (%s)' % jobs[iJob:iJob+nJob])
Client.killJobs(jobs[iJob:iJob+nJob],2)
# run watcher
for id in jobs[iJob:iJob+nJob]:
thr = Watcher(taskBuffer,id,single=True,sitemapper=siteMapper,sleepTime=60*24*21)
thr.start()
thr.join()
time.sleep(1)
iJob += nJob
time.sleep(10)
# kill too long waiting ddm jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=5)
varMap = {}
varMap[':prodSourceLabel'] = 'ddm'
varMap[':creationTime'] = timeLimit
status,res = taskBuffer.querySQLS("SELECT PandaID FROM | |
"D312",
"D314",
"D315",
"D316",
"D317",
"D318",
"D319",
"D321",
"D323",
"D324",
"D325",
"D327",
"D328",
"D329",
"D330",
"D331",
"D332",
"D333",
"D334",
"D338",
"D339",
"D341",
"D344",
"D345",
"D346",
"D347",
"D348",
"D350",
"D351",
"D352",
"D355",
"D356",
"D357",
"D358",
"D360",
"D361",
"D364",
"D365",
"D366",
"D367",
"D371",
"D372",
"D373",
"D374",
"D376",
"D377",
"D379",
"D380",
"D383",
"D384",
"D385",
"D386",
"D388",
"D390",
"D391",
"D392",
"D394",
"D395",
"D398",
"D399",
"D401",
"D402",
"D403",
"D406",
"D407",
"D408",
"D410",
"D411",
"D412",
"D414",
"D415",
"D416",
"D419",
"D420",
"D421",
"D422",
"D423",
"D424",
"D426",
"D428",
"D429",
"D430",
"D431",
"D433",
"D434",
"D436",
"D440",
"D441",
"D442",
"D443",
"D444",
"D445",
"D447",
"D450",
"D451",
"D452",
"D453",
"D454",
"D455",
"D456",
"D458",
"D459",
"D461",
"D462",
"D463",
"D464",
"D465",
"D467",
"D468",
"D469",
"D470",
"D471",
"D472",
"D473",
"D474",
"D475",
"D476",
"D477",
"D480",
"D482",
"D483",
"D484",
"D486",
"D487",
"D488",
"D489",
"D490",
"D491",
"D492",
"D493",
"D494",
"D495",
"D496",
"D497",
"D499",
"D501",
"D502",
"D503",
"D504",
"D505",
"D508",
"D509",
"D510",
"D511",
"D512",
"D513",
"D514",
"D518",
"D520",
"D522",
"D523",
"D524",
"D526",
"D527",
"D528",
"D530",
"D531",
"D532",
"D537",
"D538",
"D539",
"D540",
"D541",
"D542",
"D543",
"D544",
"D545",
"D546",
"D547",
"D548",
"D549",
"D550",
"D551",
"D552",
"D553",
"D554",
"D555",
"D557",
"D559",
"D560",
"D561",
"D562",
"D564",
"D565",
"D566",
"D567",
"D568",
"D569",
"D570",
"D571",
"D573",
"D574",
"D575",
"D576",
"D577",
"D578",
"D579",
"D582",
"D585",
"D586",
"D587",
"D588",
"D589",
"D590",
"D591",
"D592",
"D593",
"D594",
"D595",
"D596",
"D597",
"D599",
"D600",
"D604",
"D605",
"D606",
"D607",
"D608",
"D611",
"D612",
"D613",
"D614",
"D615",
"D617",
"D619",
"D621",
"D622",
"D623",
"D624",
"D628",
"D629",
"D630",
"D634",
"D635",
"D636",
"D637",
"D638",
"D639",
"D640",
"D641",
"D643",
"D644",
"D645",
"D646",
"D649",
"D650",
"D651",
"D652",
"D653",
"D654",
"D655",
"D656",
"D660",
"D661",
"D662",
"D665",
"D666",
"D667",
"D668",
"D670",
"D671",
"D672",
"D673",
"D674",
"D675",
"D676",
"D677",
"D678",
"D679",
"D680",
"D681",
"D682",
"D683",
"D684",
"D685",
"D686",
"D688",
"D689",
"D690",
"D691",
"D693",
"D694",
"D695",
"D696",
"D697",
"D700",
"D701",
"D702",
"D703",
"D704",
"D705",
"D706",
"D707",
"D708",
"D709",
"D710",
"D711",
"D712",
"D714",
"D715",
"D717",
"D718",
"D719",
"D720",
"D725",
"D727",
"D728",
"D730",
"D731",
"D732",
"D733",
"D734",
"D735",
"D736",
"D737",
"D738",
"D740",
"D741",
"D742",
"D744",
"D745",
"D746",
"D748",
"D749",
"D750",
"D751",
"D752",
"D754",
"D755",
"D756",
"D757",
"D758",
"D759",
"D760",
"D761",
"D762",
"D763",
"D764",
"D765",
"D766",
"D767",
"D768",
"D769",
"D770",
"D771",
"D773",
"D774",
"D775",
"D776",
"D777",
"D780",
"D781",
"D782",
"D783",
"D784",
"D785",
"D786",
"D787",
"D788",
"D789",
"D790",
"D791",
"D793",
"D794",
"D796",
"D797",
"D798",
"D799",
"D801",
"D802",
"D803",
"D804",
"D805",
"D807",
"D808",
"D810",
"D811",
"D812",
"D813",
"D814",
"D815",
"D817",
"D818",
"D819",
"D821",
"D823",
"D824",
"D825",
"D826",
"D827",
"D828",
"D829",
"D830",
"D832",
"D834",
"D835",
"D836",
"D839",
"D841",
"D842",
"D843",
"D844",
"D845",
"D847",
"D848",
"D849",
"D850",
"D851",
"D852",
"D853",
"D854",
"D855",
"D856",
"D858",
"D859",
"D860",
"D861",
"D862",
"D863",
"D864",
"D865",
"D867",
"D868",
"D869",
"D870",
"D871",
"D872",
"D873",
"D874",
"D875",
"D876",
"D878",
"D879",
"D881",
"D882",
"D883",
"D884",
"D885",
"D886",
"D888",
"D889",
"D890",
"D891",
"D892",
"D894",
"D895",
"D896",
"D897",
"D898",
"D899",
"D901",
"D902",
"D903",
"D905",
"D906",
"D907",
"D909",
"D910",
"D911",
"D912",
"D913",
"D915",
"D917",
"D918",
"D920",
"D921",
"D923",
"D924",
"D925",
"D926",
"D927",
"D928",
"D930",
"D931",
"D932",
"D933",
"D934",
"D935",
"D938",
"D940",
"D942",
"D943",
"D944",
"D945",
"D946",
"D947",
"D948",
"D949",
"D951",
"D952",
"D956",
"D957",
"D958",
"D959",
"D960",
"D961",
"D962",
"D963",
"D964",
"D965",
"D966",
"D967",
"D968",
"D969",
"D970",
"D971",
"D972",
"D974",
"D975",
"D976",
"D977",
"D978",
"D980",
"D981",
"D982",
"D983",
"D984",
"D987",
"D988",
"D990",
"D993",
"D994",
"D995",
"D996",
"D997",
"D998",
"D999",
"E001",
"E003",
"E004",
"E006",
"E007",
"E008",
"E009",
"E010",
"E011",
"E012",
"E013",
"E014",
"E015",
"E016",
"E017",
"E019",
"E020",
"E021",
"E022",
"E023",
"E024",
"E025",
"E026",
"E027",
"E028",
"E029",
"E030",
"E031",
"E033",
"E034",
"E036",
"E037",
"E038",
"E039",
"E040",
"E041",
"E043",
"E044",
"E045",
"E047",
"E048",
"E049",
"E050",
"E052",
"E053",
"E054",
"E055",
"E056",
"E057",
"E058",
"E060",
"E061",
"E062",
"E063",
"E064",
"E065",
"E066",
"E067",
"E068",
"E069",
"E070",
"E071",
"E072",
"E074",
"E078",
"E079",
"E081",
"E082",
"E083",
"E084",
"E085",
"E086",
"E087",
"E088",
"E089",
"E090",
"E091",
"E092",
"E093",
"E094",
"E096",
"E098",
"E100",
"E101",
"E102",
"E103",
"E104",
"E106",
"E107",
"E109",
"E111",
"E113",
"E114",
"E115",
"E116",
"E118",
"E120",
"E122",
"E124",
"E125",
"E126",
"E127",
"E128",
"E130",
"E131",
"E132",
"E133",
"E134",
"E136",
"E139",
"E141",
"E142",
"E143",
"E144",
"E145",
"E146",
"E147",
"E148",
"E149",
"E152",
"E153",
"E154",
"E155",
"E156",
"E158",
"E159",
"E160",
"E161",
"E163",
"E164",
"E165",
"E167",
"E168",
"E169",
"E170",
"E171",
"E172",
"E173",
"E177",
"E178",
"E179",
"E180",
"E182",
"E184",
"E185",
"E187",
"E188",
"E189",
"E191",
"E192",
"E193",
"E195",
"E196",
"E199",
"E200",
"E201",
"E202",
"E203",
"E204",
"E205",
"E206",
"E207",
"E208",
"E209",
"E210",
"E212",
"E213",
"E214",
"E215",
"E216",
"E217",
"E219",
"E221",
"E223",
"E224",
"E226",
"E227",
"E228",
"E229",
"E230",
"E232",
"E233",
"E234",
"E235",
"E236",
"E237",
"E238",
"E239",
"E240",
"E241",
"E242",
"E243",
"E244",
"E245",
"E246",
"E248",
"E249",
"E250",
"E251",
"E252",
"E253",
"E255",
"E256",
"E258",
"E259",
"E261",
"E263",
"E264",
"E265",
"E266",
"E269",
"E270",
"E271",
"E272",
"E273",
"E274",
"E280",
"E281",
"E282",
"E283",
"E284",
"E285",
"E287",
"E288",
"E289",
"E290",
"E291",
"E292",
"E295",
"E297",
"E299",
"E301",
"E304",
"E305",
"E306",
"E307",
"E309",
"E310",
"E311",
"E313",
"E314",
"E317",
"E320",
"E321",
"E323",
"E325",
"E326",
"E327",
"E328",
"E329",
"E330",
"E332",
"E333",
"E334",
"E335",
"E336",
"E337",
"E338",
"E339",
"E340",
"E341",
"E342",
"E343",
"E345",
"E346",
"E348",
"E349",
"E350",
"E351",
"E353",
"E354",
"E356",
"E358",
"E360",
"E363",
"E364",
"E365",
"E366",
"E367",
"E368",
"E369",
"E370",
"E371",
"E372",
"E373",
"E374",
"E375",
"E376",
"E377",
"E379",
"E380",
"E381",
"E382",
"E386",
"E387",
"E388",
"E389",
"E390",
"E391",
"E392",
"E393",
"E394",
"E395",
"E396",
"E397",
"E398",
"E400",
"E401",
"E402",
"E403",
"E405",
"E406",
"E407",
"E409",
"E410",
"E412",
"E413",
"E414",
"E415",
"E416",
"E417",
"E419",
"E420",
"E421",
"E422",
"E423",
"E424",
"E425",
"E426",
"E428",
"E429",
"E430",
"E431",
"E432",
"E433",
"E434",
"E435",
"E436",
"E437",
"E438",
"E439",
"E441",
"E443",
"E445",
"E447",
"E448",
"E450",
"E451",
"E454",
"E456",
"E457",
"E458",
"E459",
"E462",
"E463",
"E464",
"E465",
"E466",
"E467",
"E469",
"E470",
"E471",
"E472",
"E473",
"E474",
"E475",
"E476",
"E479",
"E480",
"E481",
"E482",
"E483",
"E484",
"E485",
"E486",
"E487",
"E488",
"E489",
"E490",
"E491",
"E492",
"E493",
"E494",
"E496",
"E497",
"E498",
"E500",
"E502",
"E504",
"E505",
"E506",
"E507",
"E509",
"E510",
"E512",
"E514",
"E515",
"E517",
"E518",
"E519",
"E520",
"E522",
"E523",
"E524",
"E526",
"E527",
"E528",
"E530",
"E531",
"E532",
"E535",
"E536",
"E537",
"E538",
"E539",
"E540",
"E541",
"E542",
"E543",
"E544",
"E546",
"E547",
"E548",
"E549",
"E550",
"E551",
"E553",
"E554",
"E555",
"E557",
"E558",
"E559",
"E560",
"E562",
"E563",
"E564",
"E565",
"E566",
"E569",
"E570",
"E571",
"E573",
"E574",
"E576",
"E578",
"E581",
"E583",
"E584",
"E587",
"E588",
"E589",
"E590",
"E591",
"E592",
"E593",
"E594",
"E596",
"E597",
"E599",
"E600",
"E602",
"E605",
"E606",
"E607",
"E608",
"E610",
"E611",
"E613",
"E615",
"E617",
"E618",
"E620",
"E621",
"E622",
"E623",
"E624",
"E625",
"E626",
"E627",
"E629",
"E630",
"E632",
"E633",
"E635",
"E638",
"E639",
"E640",
"E644",
"E645",
"E646",
"E647",
"E648",
"E649",
"E651",
"E652",
"E654",
"E655",
"E656",
"E659",
"E660",
"E661",
"E662",
"E664",
"E665",
"E666",
"E668",
"E669",
"E671",
"E673",
"E674",
"E675",
"E677",
"E678",
"E679",
"E680",
"E681",
"E682",
"E683",
"E684",
"E685",
"E687",
"E689",
"E690",
"E691",
"E692",
"E693",
"E694",
"E695",
"E698",
"E700",
"E704",
"E705",
"E706",
"E707",
"E708",
"E709",
"E711",
"E713",
"E714",
"E715",
"E716",
"E718",
"E719",
"E722",
"E723",
"E724",
"E726",
"E729",
"E730",
"E731",
"E734",
"E735",
"E736",
"E737",
"E738",
"E742",
"E743",
"E745",
"E746",
"E747",
"E748",
"E749",
"E750",
"E751",
"E752",
"E753",
"E754",
"E757",
"E758",
"E759",
"E760",
"E761",
"E763",
"E764",
"E767",
"E769",
"E770",
"E772",
"E773",
"E777",
"E778",
"E779",
"E780",
"E782",
"E783",
"E784",
"E785",
"E786",
"E787",
"E788",
"E789",
"E790",
"E791",
"E793",
"E794",
"E795",
"E798",
"E799",
"E800",
"E801",
"E803",
"E804",
"E805",
"E806",
"E807",
"E808",
"E809",
"E810",
"E811",
"E812",
"E813",
"E814",
"E815",
"E816",
"E817",
"E818",
"E819",
"E820",
"E821",
"E825",
"E829",
"E830",
"E833",
"E834",
"E835",
"E836",
"E837",
"E838",
"E839",
"E840",
"E841",
"E842",
"E843",
"E844",
"E847",
"E848",
"E850",
"E851",
"E852",
"E853",
"E854",
"E855",
"E856",
"E858",
"E859",
"E860",
"E862",
"E863",
"E864",
"E865",
"E868",
"E869",
"E870",
"E872",
"E873",
"E874",
"E875",
"E876",
"E877",
"E878",
"E879",
"E880",
"E882",
"E883",
"E884",
"E885",
"E887",
"E888",
"E889",
"E891",
"E892",
"E893",
"E894",
"E896",
"E897",
"E899",
"E900",
"E901",
"E902",
"E903",
"E904",
"E905",
"E906",
"E907",
"E908",
"E910",
"E911",
"E912",
"E914",
"E915",
"E917",
"E919",
"E921",
"E922",
"E923",
"E924",
"E925",
"E927",
"E928",
"E929",
"E930",
"E931",
"E932",
"E933",
"E934",
"E936",
"E938",
"E939",
"E940",
"E941",
"E944",
"E945",
"E946",
"E947",
"E949",
"E951",
"E952",
"E953",
"E954",
"E955",
"E956",
"E957",
"E958",
"E959",
"E960",
"E961",
"E962",
"E963",
"E965",
"E967",
"E968",
"E970",
"E971",
"E972",
"E973",
"E974",
"E975",
"E976",
"E977",
"E978",
"E979",
"E980",
"E981",
"E982",
"E983",
"E984",
"E986",
"E987",
"E988",
"E989",
"E990",
"E991",
"E992",
| |
# Convert Font Awesome, Fork Awesome, Google Material Design, Material Design Icons, Kenney Game, Ionicons and Fontaudio
# icon font parameters to C89, C++11 and C# compatible formats.
#
#------------------------------------------------------------------------------
# 1 - Source material
#
# 1.1 - Font Awesome
# 1.1.1 - version 4
# https://raw.githubusercontent.com/FortAwesome/Font-Awesome/fa-4/src/icons.yml
# https://github.com/FortAwesome/Font-Awesome/blob/fa-4/fonts/fontawesome-webfont.ttf
# 1.1.2 - version 5
# https://raw.githubusercontent.com/FortAwesome/Font-Awesome/master/metadata/icons.yml
# https://github.com/FortAwesome/Font-Awesome/blob/master/webfonts/fa-brands-400.ttf
# https://github.com/FortAwesome/Font-Awesome/blob/master/webfonts/fa-regular-400.ttf
# https://github.com/FortAwesome/Font-Awesome/blob/master/webfonts/fa-solid-900.ttf
# 1.1.3 - version 5 Pro
# Download files from https://fontawesome.com
# \fontawesome-pro-n.n.n-web\metadata\icons.yml
# \fontawesome-pro-n.n.n-web\webfonts\fa-brands-400.ttf
# \fontawesome-pro-n.n.n-web\webfonts\fa-light-300.ttf
# \fontawesome-pro-n.n.n-web\webfonts\fa-regular-400.ttf
# \fontawesome-pro-n.n.n-web\webfonts\fa-solid-900.ttf
# 1.2 - Fork Awesome
# https://raw.githubusercontent.com/ForkAwesome/Fork-Awesome/master/src/icons/icons.yml
# https://github.com/ForkAwesome/Fork-Awesome/blob/master/fonts/forkawesome-webfont.ttf
# 1.3 - Google Material Design
# https://raw.githubusercontent.com/google/material-design-icons/master/iconfont/codepoints
# https://github.com/google/material-design-icons/blob/master/iconfont/MaterialIcons-Regular.ttf
# 1.4 - Material Design Icons
# https://raw.githubusercontent.com/Templarian/MaterialDesign-Webfont/master/css/materialdesignicons.css
# https://github.com/Templarian/MaterialDesign-Webfont/blob/master/fonts/materialdesignicons-webfont.ttf
# 1.5 - Kenney Game icons
# https://raw.githubusercontent.com/nicodinh/kenney-icon-font/master/css/kenney-icons.css
# https://github.com/nicodinh/kenney-icon-font/blob/master/fonts/kenney-icon-font.ttf
# 1.6 - Ionicons
# https://raw.githubusercontent.com/ionic-team/ionicons/master/src/docs/archived/v2/css/ionicons.css
# https://github.com/ionic-team/ionicons/blob/master/src/docs/archived/v2/fonts/ionicons.ttf
# 1.7 - Fontaudio
# https://raw.githubusercontent.com/fefanto/fontaudio/master/font/fontaudio.css
# https://github.com/fefanto/fontaudio/blob/master/font/fontaudio.ttf
#------------------------------------------------------------------------------
# 2 - Data sample
#
# Font Awesome example:
# - input: music:
# changes:
# - '1'
# - 5.0.0
# label: Music
# search:
# terms:
# - note
# - sound
# styles:
# - solid
# unicode: f001
# - output C++11: #define ICON_FA_MUSIC u8"\uf001"
# - output C89: #define ICON_FA_MUSIC "\xEF\x80\x81"
# - output C#: public const string Music = "\uf001";
#
# All fonts have computed min and max unicode fonts ICON_MIN and ICON_MAX
# - output C89, C++11: #define ICON_MIN_FA 0xf000
# #define ICON_MAX_FA 0xf2e0
# Exception for Font Awesome brands: we use ICON_MIN_FAB and ICON_MAX_FAB
# to differentiate between brand and non-brand icons so they can be used together
# (the defines must be unique in C and C++).
# - output C#: public const int IconMin = 0xf000;
# public const int IconMax = 0xf2e0;
#
#------------------------------------------------------------------------------
# 3 - Script dependencies
#
# 3.1 - Fonts source material online
# 3.2 - Python 2.7 - https://www.python.org/download/releases/2.7/
# 3.3 - Requests - http://docs.python-requests.org/
# 3.4 - PyYAML - http://pyyaml.org/
#
#------------------------------------------------------------------------------
# 4 - References
#
# GitHub repository: https://github.com/juliettef/IconFontCppHeaders/
#
#------------------------------------------------------------------------------
import requests
import yaml
import os
# Fonts
class Font:
font_name = '[ ERROR - missing font name ]'
font_abbr = '[ ERROR - missing font abbreviation ]'
font_minmax_abbr = '' # optional - use if min and max defines must be differentiated. See Font Awesome Brand for example.
font_data = '[ ERROR - missing font data file or url ]'
font_ttf = '[ ERROR - missing ttf file or url ]'
font_file_name_ttf = '[ ERROR - missing ttf file name ]'
@classmethod
def get_icons( cls, input ):
# intermediate representation of the fonts data, identify the min and max
print( '[ ERROR - missing implementation of class method get_icons for {!s} ]'.format( cls.font_name ))
icons_data = {}
icons_data.update({ 'font_min' : '[ ERROR - missing font min ]',
'font_max' : '[ ERROR - missing font max ]',
'icons' : '[ ERROR - missing list of pairs [ font icon name, code ]]' })
return icons_data
@classmethod
def get_intermediate_representation( cls ):
font_ir = {}
input_raw = ''
if 'http' in cls.font_data: # if url, download data
response = requests.get( cls.font_data, timeout = 2 )
if response.status_code == 200:
input_raw = response.content
print( 'Downloaded - ' + cls.font_name )
else:
raise Exception( 'Download failed - ' + cls.font_name )
else: # read data from file if present
if os.path.isfile( cls.font_data ):
with open( cls.font_data, 'r' ) as f:
input_raw = f.read()
print( 'File read - ' + cls.font_name )
else:
raise Exception( 'File ' + cls.font_data + ' missing - ' + cls.font_name )
if input_raw:
icons_data = cls.get_icons( input_raw )
font_ir.update( icons_data )
font_ir.update({ 'font_ttf' : cls.font_ttf,
'font_data' : cls.font_data,
'font_file_name_ttf' : cls.font_file_name_ttf,
'font_name' : cls.font_name,
'font_abbr' : cls.font_abbr,
'font_minmax_abbr' : cls.font_minmax_abbr })
print( 'Generated intermediate data - ' + cls.font_name )
return font_ir
class FontFA4( Font ): # legacy Font Awesome version 4
font_name = 'Font Awesome 4'
font_abbr = 'FA'
font_data = 'https://raw.githubusercontent.com/FortAwesome/Font-Awesome/fa-4/src/icons.yml'
font_ttf = 'https://github.com/FortAwesome/Font-Awesome/blob/fa-4/fonts/fontawesome-webfont.ttf'
font_file_name_ttf = [[ font_abbr, font_ttf[ font_ttf.rfind('/')+1: ]]]
@classmethod
def get_icons( self, input ):
icons_data = { }
data = yaml.safe_load(input)
font_min = 'ffff'
font_max = '0'
icons = []
for item in data[ 'icons' ]:
if item[ 'unicode' ] < font_min:
font_min = item[ 'unicode' ]
if item[ 'unicode' ] >= font_max:
font_max = item[ 'unicode' ]
icons.append([ item[ 'id' ], item[ 'unicode' ]])
icons_data.update({ 'font_min' : font_min,
'font_max' : font_max,
'icons' : icons })
return icons_data
class FontFK( FontFA4 ): # Fork Awesome, based on Font Awesome 4
font_name = 'Fork Awesome'
font_abbr = 'FK'
font_data = 'https://raw.githubusercontent.com/ForkAwesome/Fork-Awesome/master/src/icons/icons.yml'
font_ttf = 'https://github.com/ForkAwesome/Fork-Awesome/blob/master/fonts/forkawesome-webfont.ttf'
font_file_name_ttf = [[ font_abbr, font_ttf[ font_ttf.rfind('/')+1: ]]]
class FontFA5( Font ): # Font Awesome version 5 - Regular and Solid styles
font_name = 'Font Awesome 5'
font_abbr = 'FA'
font_data = 'https://raw.githubusercontent.com/FortAwesome/Font-Awesome/master/metadata/icons.yml'
font_ttf = 'https://github.com/FortAwesome/Font-Awesome/blob/master/webfonts/fa-solid-900.ttf, ' +\
'https://github.com/FortAwesome/Font-Awesome/blob/master/webfonts/fa-regular-400.ttf, '
font_file_name_ttf = [[ 'FAR', 'fa-regular-400.ttf' ], [ 'FAS', 'fa-solid-900.ttf' ]]
font_fa_style = [ 'regular', 'solid' ]
@classmethod
def get_icons( self, input ):
icons_data = { }
data = yaml.safe_load(input)
if data:
font_min = 'ffff'
font_max = '0'
icons = []
for key in data:
item = data[ key ]
for style in item[ 'styles' ]:
if style in self.font_fa_style:
if [ key, item[ 'unicode' ]] not in icons:
if item[ 'unicode' ] < font_min:
font_min = item[ 'unicode' ]
if item[ 'unicode' ] >= font_max:
font_max = item[ 'unicode' ]
icons.append([ key, item[ 'unicode' ] ])
icons_data.update({ 'font_min':font_min, 'font_max':font_max, 'icons':icons })
return icons_data
class FontFA5Brands( FontFA5 ): # Font Awesome version 5 - Brand style
font_name = 'Font Awesome 5 Brands'
font_minmax_abbr = 'FAB'
font_ttf = 'https://github.com/FortAwesome/Font-Awesome/blob/master/webfonts/fa-brands-400.ttf'
font_file_name_ttf = [[ 'FAB', 'fa-brands-400.ttf' ]]
font_fa_style = [ 'brands' ]
class FontFA5Pro( FontFA5 ): # Font Awesome version 5 Pro - Light, Regular and Solid styles
font_name = 'Font Awesome 5 Pro'
font_data = 'icons.yml'
font_ttf = 'fa-light-300.ttf, fa-regular-400.ttf, fa-solid-900.ttf'
font_file_name_ttf = [[ 'FAL', 'fa-light-300.ttf' ], [ 'FAR', 'fa-regular-400.ttf' ], [ 'FAS', 'fa-solid-900.ttf' ]]
font_fa_style = [ 'light', 'regular', 'solid' ]
class FontFA5ProBrands( FontFA5 ): # Font Awesome version 5 Pro - Brand style
font_name = 'Font Awesome 5 Pro Brands'
font_minmax_abbr = 'FAB'
font_data = 'icons.yml'
font_ttf = 'fa-brands-400.ttf'
font_file_name_ttf = [[ 'FAB', 'fa-brands-400.ttf' ]]
font_fa_style = [ 'brands' ]
class FontMD( Font ): # Material Design
font_name = 'Material Design'
font_abbr = 'MD'
font_data = 'https://raw.githubusercontent.com/google/material-design-icons/master/iconfont/codepoints'
font_ttf = 'https://github.com/google/material-design-icons/blob/master/iconfont/MaterialIcons-Regular.ttf'
font_file_name_ttf = [[ font_abbr, font_ttf[ font_ttf.rfind('/')+1: ]]]
@classmethod
def get_icons( self, input ):
icons_data = {}
lines = str.split( input, '\n' )
if lines:
font_min = 'ffff'
font_max = '0'
icons = []
for line in lines :
words = str.split(line)
if words and len( words ) >= 2:
if words[ 1 ] < font_min:
font_min = words[ 1 ]
if words[ 1 ] >= font_max:
font_max = words[ 1 ]
icons.append( words )
icons_data.update({ 'font_min' : font_min,
'font_max' : font_max,
'icons' : icons })
return icons_data
class FontMDI( Font ): # Material Design Icons
font_name = 'Material Design Icons'
font_abbr = 'MDI'
font_data = 'https://raw.githubusercontent.com/Templarian/MaterialDesign-Webfont/master/css/materialdesignicons.css'
font_ttf = 'https://github.com/Templarian/MaterialDesign-Webfont/blob/master/fonts/materialdesignicons-webfont.ttf'
font_file_name_ttf = [[ font_abbr, font_ttf[ font_ttf.rfind('/')+1: ]]]
@classmethod
def get_icons( self, input ):
icons_data = {}
input_trimmed = input[ input.find( '-moz-osx-font-smoothing: grayscale;\n}\n\n' ) + len( '-moz-osx-font-smoothing: grayscale;\n}\n\n' ) : input.find( '.mdi-18px.mdi-set,' )]
lines = str.split( input_trimmed, '}\n\n' )
if lines:
font_min = 'ffff'
font_max = '0'
icons = []
for line in lines :
if '.mdi-' in line:
words = str.split(line)
if words and '.mdi-' in words[ 0 ]:
font_id = words[ 0 ].partition( '.mdi-' )[2].partition( '::before' )[0]
font_code = words[ 3 ].partition( '"\\' )[2].partition( '";' )[0].zfill(4)
if font_code < font_min:
font_min = font_code
if font_code >= font_max:
font_max = font_code
icons.append([ font_id, font_code ])
icons_data.update({ 'font_min' : font_min,
'font_max' : font_max,
'icons' : icons })
return icons_data
class FontKI( Font ): # Kenney Game icons
font_name = 'Kenney'
font_abbr = 'KI'
font_data = 'https://raw.githubusercontent.com/nicodinh/kenney-icon-font/master/css/kenney-icons.css'
font_ttf = 'https://github.com/nicodinh/kenney-icon-font/blob/master/fonts/kenney-icon-font.ttf'
font_file_name_ttf = [[ font_abbr, font_ttf[ font_ttf.rfind('/')+1: ]]]
@classmethod
def get_icons( self, input ):
icons_data = {}
lines = str.split( input, '\n' )
if lines:
font_min = 'ffff'
font_max = '0'
icons = []
for line in lines :
if '.ki-' in line:
words = str.split(line)
if words and '.ki-' in words[ 0 ]:
font_id = words[ 0 ].partition( '.ki-' )[2].partition( ':before' )[0]
font_code = words[ 2 ].partition( '"\\' )[2].partition( '";' )[0]
if font_code < font_min:
font_min = font_code
if font_code >= font_max:
font_max = font_code
icons.append([ font_id, font_code ])
icons_data.update({ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.