code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
---|---|---|---|---|---|
from typing import Tuple, List
from pddl.pddl import Domain, Problem
from sam_learner.sam_models.comparable_predicate import ComparablePredicate
from sam_learner.sam_models.parameter_binding import ParameterBinding
class StateLiteral:
"""Represent the connection between a lifted and a grounded predicate."""
fact: str
domain: Domain
def __init__(self, fact: str, domain: Domain):
self.fact = fact
self.domain = domain
@classmethod
def generate_problem_literal(cls, fact: str, domain: Domain, problem: Problem) -> Tuple[
ComparablePredicate, List[ParameterBinding]]:
"""Lift the grounded fact.
:param fact: the fact to lift.
:param domain: the domain containing all the data about the world.
:param problem: the problem object containing the information about the concrete objects.
:return: the mapping between the grounded fact and the lifted predicate.
"""
fact_data = fact.strip("()").split(" ")
predicate_name = fact_data[0]
predicate_objects = fact_data[1:]
domain_predicate: ComparablePredicate = domain.predicates[predicate_name]
bindings = []
for (parameter_name, _), object_name in zip(domain_predicate.signature, predicate_objects):
bindings.append(ParameterBinding(parameter_name,
(problem.objects[object_name],),
object_name))
return domain_predicate, bindings
@staticmethod
def ground(grounding_data: Tuple[ComparablePredicate, List[ParameterBinding]]) -> str:
"""ground lifted predicate data back to its fact form.
:param grounding_data: the data of the object to ground.
:return: the fact as a string form.
"""
predicate, bindings = grounding_data
return f"({predicate.name} [{str(binding for binding in bindings)}])"
class State:
"""Represent a state containing lifted predicates and their binding."""
facts: List[ComparablePredicate]
domain: Domain
def __init__(self, facts: List[ComparablePredicate], domain: Domain):
self.domain = domain
self.facts = facts
@classmethod
def generate_problem_state(cls, facts: frozenset, domain: Domain, problem: Problem) -> List[
Tuple[ComparablePredicate, List[ParameterBinding]]]:
"""Lift the grounded facts to and returns the lifted facts and their object binding."""
return [StateLiteral.generate_problem_literal(fact, domain, problem) for fact in facts]
def __str__(self):
return str(self.facts)
def __eq__(self, other):
return self.facts == other.facts
def ground_facts(self) -> frozenset:
"""Ground the facts to a form that the planner can use.
:returns the grounded facts as a frozen set.
"""
grounded_facts = set()
for fact in self.facts:
if len(fact.signature) == 0:
grounded_facts.add(f"({fact.name})")
else:
grounded_facts.add(f"({fact.name} {' '.join(item[0] for item in fact.signature)})")
return grounded_facts
|
/sam_learner-2.1.9-py3-none-any.whl/sam_learner/sam_models/state.py
| 0.921468 | 0.540742 |
state.py
|
pypi
|
import csv
import logging
import sys
from pathlib import Path
from typing import NoReturn, List, Dict, Any, Union
from pddl.parser import Parser
from pddl.pddl import Domain, Action
from sam_learner import SAMLearner, ESAMLearner
from sam_learner.core import DomainExporter
from sam_learner.core.trajectories_manager import TrajectorySerializationManager
from sam_learner.model_validation.action_precision_recall_calculator import PrecisionRecallCalculator
from sam_learner.model_validation.domain_comparison import validate_action_model
from sam_learner.sam_models.types import Trajectory
COLUMN_NAMES = [
"domain_name",
"num_trajectories",
"num_domain_actions",
"num_known_actions",
"learned_actions_length",
"domains_equal",
"avg_trajectories_length",
"learned_action_names",
"error"]
LEARNED_ACTIONS_STATS_COLUMNS = [
"domain_name",
"num_trajectories",
"num_trajectory_triplets",
"learned_action_name",
"num_triplets_action_appeared",
"num_triplets_with_duplicate_parameters",
"learned_preconditions",
"learned_add_effects",
"learned_delete_effects",
"ground_truth_preconditions",
"ground_truth_add_effects",
"ground_truth_delete_effects",
"preconditions_precision",
"add_effects_precision",
"delete_effects_precision",
"preconditions_recall",
"add_effects_recall",
"delete_effects_recall",
]
SAM_LEARNING = "SAMLearning"
ESAM_LEARNING = "ESAMLearning"
def validate_no_redundant_actions(known_actions: List[str],
learner: Union[SAMLearner, ESAMLearner]) -> NoReturn:
"""Validate that the initial state of the domain does not contain any redundant actions.
:param known_actions: the names of the actions that the agent already knows.
:param learner: the learning algorithm.
"""
if known_actions is None:
assert learner.learned_domain.actions == {}
else:
assert len(learner.learned_domain.actions) == len(known_actions)
class AMStatisticsExtractor:
"""Class that is used to extract statistics about the action model learner.
Attributes:
logger: the logger for the class.
working_dir_path: the path to the directory containing the domain and the relevant files.
expected_domain: the domain that contains all of the actions and the preconditions and
effects.
domain_exporter: the class that is able to export the domain to a PDDL file.
"""
logger: logging.Logger
workdir_path: str
expected_domain: Domain
domain_exporter: DomainExporter
expected_domain_file_name: str
trajectories_manager: TrajectorySerializationManager
precision_recall_calculator: PrecisionRecallCalculator
learning_algorithm: str
def __init__(self, working_dir_path: str, expected_domain_path: str,
learning_algorithm: str = SAM_LEARNING):
self.logger = logging.getLogger(__name__)
self.workdir_path = working_dir_path
self.expected_domain = Parser(expected_domain_path).parse_domain(read_from_file=True)
self.expected_domain_file_name = f"{Path(expected_domain_path).stem}.pddl"
self.domain_exporter = DomainExporter()
self.trajectories_manager = TrajectorySerializationManager(
workdir_path=Path(working_dir_path), domain_path=Path(expected_domain_path))
self.precision_recall_calculator = PrecisionRecallCalculator(self.expected_domain)
self.learning_algorithm = learning_algorithm
def create_trajectories(self) -> List[Trajectory]:
"""Create the trajectories for the learner.
:return: the trajectories needed to learn the action model.
"""
self.logger.info("Creating the trajectories for the learner to use.")
return self.trajectories_manager.create_trajectories()
def _understand_learner_type(self, known_actions_map: Dict[str, Action]) -> Union[SAMLearner, ESAMLearner]:
"""
:param known_actions_map:
:return:
"""
if self.learning_algorithm == SAM_LEARNING:
return SAMLearner(working_directory_path=self.workdir_path, known_actions=known_actions_map,
domain_file_name=self.expected_domain_file_name)
return ESAMLearner(working_directory_path=self.workdir_path, known_actions=known_actions_map,
domain_file_name=self.expected_domain_file_name)
def run_learner(
self, complete_statistics: List[Dict[str, any]], learned_actions_statistics: List[Dict[str, Any]],
known_actions: List[str]) -> NoReturn:
"""Runs the learner and accumulates the statistics from the run.
:param complete_statistics: the list of statistics that is to be accumulated.
:param learned_actions_statistics: the statistics objects containing information about the learned actions.
:param known_actions: the actions that the agent already know and thus there is no need to learn them.
"""
available_trajectories = []
domain_trajectories = self.create_trajectories()
(Path(self.workdir_path) / "generated_domains").mkdir(exist_ok=True)
for trajectory in domain_trajectories:
statistics = {}
available_trajectories.append(trajectory)
known_actions_map = {name: self.expected_domain.actions[name] for name in known_actions}
learner = self._understand_learner_type(known_actions_map)
validate_no_redundant_actions(known_actions, learner)
self.logger.debug(
f"Trying to learn the action model with {len(available_trajectories)} trajectories.")
try:
learned_model = learner.learn_action_model(available_trajectories)
actions_observation_histogram = learner.get_actions_appearance_histogram()
actions_with_duplicated_params_histogram = learner.get_actions_with_duplicated_parameters_histogram()
self.write_statistics_data(available_trajectories, learned_model, statistics, known_actions)
self.precision_recall_calculator.write_learned_actions_statistics(
available_trajectories, learned_model, learned_actions_statistics, known_actions,
actions_observation_histogram, actions_with_duplicated_params_histogram)
self.domain_exporter.export_domain(
learned_model,
Path(f"{self.workdir_path}/generated_domains/learned_domain-"
f"{len(available_trajectories)}-trajectories.pddl"))
except ValueError as error:
self.write_learning_error(error, statistics)
available_trajectories.pop()
complete_statistics.append(statistics)
def write_learning_error(self, error: Exception, statistics: dict) -> NoReturn:
"""Write the appropriate statistics data in case that there is an error in the learning process.
:param error: the error that was received while trying to learn the action model.
:param statistics: the dictionary containing the statistics about the current run.
"""
self.logger.warning(error)
statistics["domain_name"] = self.expected_domain.name
statistics["error"] = error
def write_statistics_data(
self, available_trajectories: List[Trajectory], learned_model: Domain, statistics: dict,
known_actions: List[str]) -> NoReturn:
"""Write the statistics of the current execution of the code.
:param available_trajectories: the available trajectories for the current execution.
:param learned_model: the domain that was learned with the action model.
:param statistics: the dictionary that contains the statistics collected during the
entire program's execution.
:param known_actions: the actions that were known to the model before the learning process started.
:return:
"""
num_trajectories = len(available_trajectories)
statistics["domain_name"] = self.expected_domain.name
statistics["num_trajectories"] = num_trajectories
statistics["num_domain_actions"] = len(self.expected_domain.actions)
statistics["num_known_actions"] = len(known_actions)
statistics["learned_actions_length"] = len(learned_model.actions) - len(known_actions)
statistics["domains_equal"] = validate_action_model(learned_model, self.expected_domain)
statistics["avg_trajectories_length"] = sum([
len(trajec) for trajec in available_trajectories]) / num_trajectories
statistics["learned_action_names"] = "/".join([
action for action in learned_model.actions if action not in known_actions])
statistics["error"] = None
def extract_action_model_statistics(self, stats_file_path: str, actions_stats_file_path: str,
known_actions: List[str] = []) -> NoReturn:
"""Extract the statistics and saves them into a CSV file for future usage.
:param stats_file_path: the path to the file that will contain the statistics.
:param actions_stats_file_path: the path to the file containing data about the learned actions.
:param known_actions: the names of the actions that the agent already knows.
"""
domain_statistics = []
learned_actions_statistics = []
self.run_learner(domain_statistics, learned_actions_statistics, known_actions)
with open(stats_file_path, 'wt', newline='') as domain_stats_csv_file, \
open(actions_stats_file_path, 'wt', newline='') as actions_stats_csv_file:
domain_data_writer = csv.DictWriter(domain_stats_csv_file, fieldnames=COLUMN_NAMES)
actions_data_writer = csv.DictWriter(actions_stats_csv_file, fieldnames=LEARNED_ACTIONS_STATS_COLUMNS)
domain_data_writer.writeheader()
actions_data_writer.writeheader()
for data in domain_statistics:
domain_data_writer.writerow(data)
for data in learned_actions_statistics:
actions_data_writer.writerow(data)
if __name__ == '__main__':
try:
logging.basicConfig(level=logging.DEBUG)
args = sys.argv
workdir_path = args[1]
complete_domain_path = args[2]
statistics_output_path = args[3]
actions_statistics_output_path = args[4]
stats_extractor = AMStatisticsExtractor(workdir_path, complete_domain_path)
stats_extractor.extract_action_model_statistics(statistics_output_path, actions_statistics_output_path)
except Exception as e:
print(e)
|
/sam_learner-2.1.9-py3-none-any.whl/sam_learner/model_validation/action_model_statistics_extractor.py
| 0.677794 | 0.447883 |
action_model_statistics_extractor.py
|
pypi
|
import csv
import logging
import os
import re
import sys
from pathlib import Path
from typing import Optional, NoReturn, Dict, List, Any
from task import Operator
from sam_learner.core import TrajectoryGenerator
from sam_learner.sam_models import Trajectory
from .fast_downward_solver import FastDownwardSolver
PLAN_SEARCH_TIME_LIMIT = int(os.environ.get('SEARCH_TIME', "300"))
VALID_STATUS = "valid plan!"
INVALID_PLAN_STATUS = "invalid plan!"
FAILED_ACTION_NAME_RE = re.compile(r".* - (\w+-?\w*)")
NOT_POSSIBLE_GROUNDED_ACTION = re.compile(r"KeyError: - '(\w+-?\w*)'")
TIMEOUT_WHILE_SOLVING_ERROR = "Solver could not solve the problem within the given time " \
f"limitation ({PLAN_SEARCH_TIME_LIMIT} seconds)."
PROBLEM_NOT_SOLVABLE_ERROR = "Solver could not find a solution to the given problem."
POTENTIAL_PLANNING_ERRORS = [TIMEOUT_WHILE_SOLVING_ERROR, PROBLEM_NOT_SOLVABLE_ERROR]
class ValidationResult:
"""Representation of the plan validation result."""
status: str
error: Optional[str]
failed_action_name: Optional[str]
def __init__(self, status: str, error: Optional[str] = None, failed_action_name: Optional[str] = None):
self.status = status
self.error = error
self.failed_action_name = failed_action_name
def extract_failed_action_name(error_message) -> str:
"""Extract the failed action name from the error message.
:param error_message: the message of raised when trying to create a trajectory.
:return: the name of the failed action.
"""
match = FAILED_ACTION_NAME_RE.match(error_message)
return match.group(1)
class DomainValidator:
"""Class that validates the correctness of the domains that were learned by the action model learner.
The validation process works as follows:
* Using the learned domain we create plans for the test set problems.
* For each of the created plans we validate its correctness using the validate URL.
* We need to validate that the plan that was created is safe, i.e. every action is applicable according to the
preconditions and effects allowed by the original domain.
Attributes:
expected_domain_path: the path to the complete domain containing all of the actions and their preconditions and effects.
"""
STATISTICS_COLUMNS_NAMES = ["domain_name", "generated_domain_file_name", "problem_file_name", "plan_generated",
"plan_validation_status", "validation_error"]
logger: logging.Logger
expected_domain_path: str
fast_downward_solver: FastDownwardSolver
def __init__(self, expected_domain_path: str):
self.logger = logging.getLogger(__name__)
self.expected_domain_path = expected_domain_path
self.fast_downward_solver = FastDownwardSolver()
def write_plan_file(self, plan_actions: List[Operator], solution_path: Path) -> NoReturn:
"""Write the plan that was created using the learned domain file.
:param plan_actions: the actions that were executed in the plan.
:param solution_path: the path to the file to export the plan to.
"""
self.logger.info(f"Writing the plan file - {solution_path}")
with open(solution_path, "wt") as solution_file:
solution_file.writelines([f"{operator.name}\n" for operator in plan_actions])
def generate_test_set_plans(
self, tested_domain_file_path: str, test_set_directory: str) -> Dict[str, Optional[str]]:
"""Generate plans for the given domain file and the given test set directory.
:param tested_domain_file_path: the path to the tested domain in which we want to create actions for.
:param test_set_directory: the path to the directory containing the test set problems.
:return: dictionary containing a mapping between the problem paths and their corresponding solutions.
"""
successful_plans = {}
self.logger.info("Solving the test set problems using the learned domain!")
fast_downward_execution_script_path = Path(test_set_directory) / "solver_execution_script.sh"
self.fast_downward_solver.write_batch_and_execute_solver(
output_file_path=fast_downward_execution_script_path,
test_set_directory_path=Path(test_set_directory),
problems_regex="*.pddl",
domain_file_path=tested_domain_file_path
)
for problem_file_path in Path(test_set_directory).glob("*.pddl"):
solution_file_path = problem_file_path.parent / f"{problem_file_path.stem}_plan.solution"
if solution_file_path.exists():
successful_plans[str(problem_file_path)] = str(solution_file_path)
else:
successful_plans[str(problem_file_path)] = PROBLEM_NOT_SOLVABLE_ERROR
return successful_plans
def validate_plan(self, problem_path: str, plan_path: str) -> ValidationResult:
"""Validate the correctness of the learned domain against the domain learned using the learner algorithm.
:param problem_path: the to the test set problem.
:param plan_path: the path to the plan generated by a solver.
:return: an object representing the validation status of the plan.
"""
self.logger.info(f"Validating the plan generated for the problem - {problem_path}")
trajectory_generator = TrajectoryGenerator(self.expected_domain_path, problem_path)
try:
trajectory_generator.generate_trajectory(plan_path)
return ValidationResult(VALID_STATUS)
except AssertionError as error:
self.logger.warning(f"The plan received is not applicable! {error}")
# Extracting the failed action name
error_message = str(error)
failed_action_name = extract_failed_action_name(error_message)
return ValidationResult(INVALID_PLAN_STATUS, str(error), failed_action_name)
except KeyError as error:
grounded_action = str(error)
action_name = grounded_action.strip("'()").split(" ")[0]
self.logger.warning(f"The plan received is not applicable! "
f"The operation {grounded_action} is not applicable! The failed action - {action_name}")
return ValidationResult(INVALID_PLAN_STATUS, grounded_action, action_name)
def extract_applicable_plan_components(self, problem_path: str, plan_path: str) -> Trajectory:
"""Extract the partial trajectory from the failed plan.
:param problem_path: the path to the problem file.
:param plan_path: the path to the failed plan.
:return: the partial applicable trajectory.
"""
self.logger.info(f"Extracting the applicable trajectory from the plan file - {plan_path}")
trajectory_generator = TrajectoryGenerator(self.expected_domain_path, problem_path)
return trajectory_generator.generate_trajectory(plan_path, should_return_partial_trajectory=True)
def write_statistics(self, statistics: List[Dict[str, Any]], output_statistics_path: str) -> NoReturn:
"""Writes the statistics of the learned model into a CSV file.
:param statistics: the object containing the statistics about the learning process.
:param output_statistics_path: the path to the output file.
"""
with open(output_statistics_path, 'wt', newline='') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=self.STATISTICS_COLUMNS_NAMES)
writer.writeheader()
for data in statistics:
writer.writerow(data)
@staticmethod
def clear_plans(created_plans: List[str], test_set_directory_path: str) -> NoReturn:
"""Clears the directory from the plan files so that the next iterations could continue.
:param created_plans: the paths to the plans that were created in this iteration.
:param test_set_directory_path: the path to the test set directory containing the solver execution log.
"""
for plan_path in created_plans:
if plan_path is not None:
os.remove(plan_path)
for solver_output_path in Path(test_set_directory_path).glob("*.out"):
os.remove(solver_output_path)
def on_validation_success(
self, output_statistics_path: str,
test_set_statistics: List[Dict[str, Any]]) -> NoReturn:
"""Write the needed statistics if the plans were all validated and were approved.
:param output_statistics_path: the path to the output file.
:param test_set_statistics: the statistics object containing the data about the learning process.
"""
self.logger.info("All plans are valid!")
self.write_statistics(test_set_statistics, output_statistics_path)
self.logger.info("Done!")
def log_model_safety_report(
self, domain_name: str, generated_domains_directory_path: str, test_set_directory_path: str,
output_statistics_path: str, is_learning_process_safe: bool = True,
should_stop_after_first_success: bool = True) -> NoReturn:
"""The main entry point that runs the validation process for the plans that were generated using the learned
domains.
:param domain_name: the name of the domain that is being validated.
:param generated_domains_directory_path: the path to the directory containing the generated domains.
:param test_set_directory_path: the directory containing the test set problems.
:param output_statistics_path: the path to the output statistics file.
:param is_learning_process_safe: an indicator on whether the learning algorithm used creates only safe plans.
If so, there is no need to validate the created plans of the algorithm.
:param should_stop_after_first_success: whether or not the algorithm should stop once all of the test set
problems were solved. This is relevant in cases that the learning is not a monotonic rising function.
"""
test_set_statistics = []
for generated_domain_path in Path(generated_domains_directory_path).glob("*.pddl"):
domain_successful_plans = self.generate_test_set_plans(
str(generated_domain_path), test_set_directory_path)
plan_paths = [plan for plan in domain_successful_plans.values() if plan not in POTENTIAL_PLANNING_ERRORS]
validated_plans = []
for problem_path in domain_successful_plans:
plan_path = domain_successful_plans[problem_path]
domain_statistics = {
"domain_name": domain_name,
"generated_domain_file_name": generated_domain_path.stem,
"problem_file_name": Path(problem_path).stem}
if domain_successful_plans[problem_path] in POTENTIAL_PLANNING_ERRORS:
domain_statistics["plan_generated"] = False
domain_statistics["plan_validation_status"] = domain_successful_plans[problem_path]
validated_plans.append(False)
test_set_statistics.append(domain_statistics)
continue
validation_status = ValidationResult(VALID_STATUS) if is_learning_process_safe \
else self.validate_plan(problem_path, plan_path)
domain_statistics["plan_generated"] = True
domain_statistics["plan_validation_status"] = validation_status.status
domain_statistics["validation_error"] = validation_status.error
validated_plans.append(validation_status.status == VALID_STATUS)
test_set_statistics.append(domain_statistics)
self.clear_plans(plan_paths, test_set_directory_path)
if all(validated_plans) and should_stop_after_first_success:
self.on_validation_success(output_statistics_path, test_set_statistics)
return
self.write_statistics(test_set_statistics, output_statistics_path)
self.logger.info("Done!")
if __name__ == '__main__':
try:
logging.basicConfig(level=logging.DEBUG)
args = sys.argv
validator = DomainValidator(expected_domain_path=args[1])
validator.log_model_safety_report(
domain_name=args[2],
generated_domains_directory_path=args[3],
test_set_directory_path=args[4],
output_statistics_path=args[5])
except Exception as e:
print(e)
|
/sam_learner-2.1.9-py3-none-any.whl/sam_learner/model_validation/learned_domain_validator.py
| 0.770724 | 0.305141 |
learned_domain_validator.py
|
pypi
|
import csv
import logging
from pathlib import Path
from typing import List, Set, NoReturn
from pddl.parser import Parser
from pddl.pddl import Domain, Action
from sam_learner.sam_models import ComparablePredicate
STATISTICS_COLUMNS_NAMES = ["domain_name", "domain_path", "action_name", "number_consistent_models"]
def calculate_number_consistent_models(
maybe_preconditions: List[ComparablePredicate],
maybe_effects: Set[ComparablePredicate] = frozenset()) -> int:
"""calculates the number of consistent models that are available based on the learned preconditions and effects.
:param maybe_preconditions: the possible preconditions for the learned action.
:param maybe_effects: the possible effects for the learned action.
:return: the total number of possible action models that could be learned for the specific action.
"""
return pow(2, len(maybe_preconditions)) * pow(2, len(maybe_effects))
class SafeConsistentModelsCalculator:
def __init__(self):
self.logger = logging.getLogger(__name__)
def calculate_safe_models_consistency(
self, generated_domains_directory_path: Path, output_statistics_path: str,
known_actions: List[str]) -> NoReturn:
"""
:param generated_domains_directory_path:
:param output_statistics_path:
:param known_actions:
:return:
"""
self.logger.info("Logging the number of consistent models for the safe action models that was learned.")
consistency_statistics = []
for generated_domain_path in generated_domains_directory_path.glob("*.pddl"):
self.logger.debug(f"Logging the consistency statistics for the domain - {generated_domain_path.stem}")
domain: Domain = Parser(generated_domain_path).parse_domain()
learned_actions = [action_name for action_name in domain.actions if action_name not in known_actions]
for action_name in learned_actions:
action_data: Action = domain.actions[action_name]
action_preconditions = action_data.precondition
action_consistency = {
"domain_name": domain.name,
"domain_path": generated_domain_path,
"action_name": action_name,
"number_consistent_models": calculate_number_consistent_models(action_preconditions)
}
consistency_statistics.append(action_consistency)
with open(output_statistics_path, "wt") as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=STATISTICS_COLUMNS_NAMES)
writer.writeheader()
for data in consistency_statistics:
writer.writerow(data)
|
/sam_learner-2.1.9-py3-none-any.whl/sam_learner/model_validation/consistent_models_calculator.py
| 0.756537 | 0.324971 |
consistent_models_calculator.py
|
pypi
|
import logging
from typing import List, Any, Dict, NoReturn
from pddl.pddl import Domain
from sam_learner.sam_models import Trajectory, ComparablePredicate
def calculate_true_positive_value(
learned_predicates: List[ComparablePredicate], expected_predicates: List[ComparablePredicate]) -> int:
"""
:param learned_predicates:
:param expected_predicates:
:return:
"""
return len([predicate for predicate in learned_predicates if predicate in expected_predicates])
def calculate_false_positive_value(
learned_predicates: List[ComparablePredicate], expected_predicates: List[ComparablePredicate]) -> int:
"""
:param learned_predicates:
:param expected_predicates:
:return:
"""
return len(learned_predicates) - calculate_true_positive_value(learned_predicates, expected_predicates)
def calculate_false_negative_value(
learned_predicates: List[ComparablePredicate], expected_predicates: List[ComparablePredicate]) -> int:
"""
:param learned_predicates:
:param expected_predicates:
:return:
"""
return len(expected_predicates) - calculate_true_positive_value(learned_predicates, expected_predicates)
def calculate_recall(
learned_predicates: List[ComparablePredicate], actual_predicates: List[ComparablePredicate]) -> float:
"""
:param learned_predicates:
:param actual_predicates:
:return:
"""
if len(learned_predicates) == 0:
if len(actual_predicates) == 0:
return 1
return 0
true_positives = calculate_true_positive_value(learned_predicates, actual_predicates)
false_negatives = calculate_false_negative_value(learned_predicates, actual_predicates)
return true_positives / (true_positives + false_negatives)
def calculate_precision(
learned_predicates: List[ComparablePredicate], actual_predicates: List[ComparablePredicate]) -> float:
"""
:param learned_predicates:
:param actual_predicates:
:return:
"""
if len(learned_predicates) == 0:
if len(actual_predicates) == 0:
return 1
return 0
true_positives = calculate_true_positive_value(learned_predicates, actual_predicates)
false_positives = calculate_false_positive_value(learned_predicates, actual_predicates)
return true_positives / (true_positives + false_positives)
class PrecisionRecallCalculator:
logger: logging.Logger
expected_domain: Domain
def __init__(self, expected_domain: Domain):
self.logger = logging.getLogger(__name__)
self.expected_domain = expected_domain
def calculate_precision_recall_for_unknown_actions(
self, action_name: str, num_trajectories: int, num_triplets: int) -> Dict[str, float]:
"""
:param action_name:
:param num_trajectories:
:param num_triplets:
:return:
"""
actual_action_preconditions = self.expected_domain.actions[action_name].precondition
actual_action_add_effects = list(self.expected_domain.actions[action_name].effect.addlist)
actual_action_delete_effects = list(self.expected_domain.actions[action_name].effect.dellist)
return {
"domain_name": self.expected_domain.name,
"num_trajectories": num_trajectories,
"num_trajectory_triplets": num_triplets,
"learned_action_name": action_name,
"learned_preconditions": None,
"learned_add_effects": None,
"learned_delete_effects": None,
"ground_truth_preconditions": ",".join([str(precondition) for precondition in actual_action_preconditions]),
"ground_truth_add_effects": ",".join([str(add_effect) for add_effect in actual_action_add_effects]),
"ground_truth_delete_effects": ",".join([str(del_effect) for del_effect in actual_action_delete_effects]),
"preconditions_precision": 0,
"add_effects_precision": 1,
"delete_effects_precision": 1,
"preconditions_recall": 1,
"add_effects_recall": 0,
"delete_effects_recall": 0,
}
def calculate_action_precision_recall(
self, learned_model: Domain, action_name: str) -> Dict[str, float]:
"""
:param learned_model:
:param action_name:
:return:
"""
learned_action_preconditions = learned_model.actions[action_name].precondition
learned_action_add_effects = list(learned_model.actions[action_name].effect.addlist)
learned_action_delete_effects = list(learned_model.actions[action_name].effect.dellist)
actual_action_preconditions = [ComparablePredicate(predicate=p) for p in self.expected_domain.actions[action_name].precondition]
actual_action_add_effects = [ComparablePredicate(predicate=p) for p in self.expected_domain.actions[action_name].effect.addlist]
actual_action_delete_effects = [ComparablePredicate(predicate=p) for p in self.expected_domain.actions[action_name].effect.dellist]
return {
"preconditions_precision": calculate_precision(learned_action_preconditions, actual_action_preconditions),
"add_effects_precision": calculate_precision(learned_action_add_effects, actual_action_add_effects),
"delete_effects_precision": calculate_precision(learned_action_delete_effects,
actual_action_delete_effects),
"preconditions_recall": calculate_recall(learned_action_preconditions, actual_action_preconditions),
"add_effects_recall": calculate_recall(learned_action_add_effects, actual_action_add_effects),
"delete_effects_recall": calculate_recall(learned_action_delete_effects, actual_action_delete_effects),
}
def extract_learned_action_data_vs_ground_truth(
self, learned_model: Domain, action_name: str) -> Dict[str, str]:
"""
:param learned_model:
:param action_name:
:return:
"""
learned_action_preconditions = learned_model.actions[action_name].precondition
learned_action_add_effects = list(learned_model.actions[action_name].effect.addlist)
learned_action_delete_effects = list(learned_model.actions[action_name].effect.dellist)
actual_action_preconditions = self.expected_domain.actions[action_name].precondition
actual_action_add_effects = list(self.expected_domain.actions[action_name].effect.addlist)
actual_action_delete_effects = list(self.expected_domain.actions[action_name].effect.dellist)
return {
"learned_preconditions": ",".join([str(precondition) for precondition in learned_action_preconditions]),
"learned_add_effects": ",".join([str(add_effect) for add_effect in learned_action_add_effects]),
"learned_delete_effects": ",".join([str(del_effect) for del_effect in learned_action_delete_effects]),
"ground_truth_preconditions": ",".join([str(precondition) for precondition in actual_action_preconditions]),
"ground_truth_add_effects": ",".join([str(add_effect) for add_effect in actual_action_add_effects]),
"ground_truth_delete_effects": ",".join([str(del_effect) for del_effect in actual_action_delete_effects])
}
def write_learned_actions_statistics(
self, available_trajectories: List[Trajectory], learned_model: Domain,
learned_actions_statistics: List[Dict[str, Any]], known_actions: List[str],
actions_observation_histogram: Dict[str, int],
actions_with_duplicated_objects_observations: Dict[str, int],
) -> NoReturn:
"""Writes the statistics about the learned actions. This adds additional information about the learned actions.
:param available_trajectories: the trajectories used in the learning process.
:param learned_model: the action model that was learned.
:param learned_actions_statistics: the dictionary containing the data about the learned actions.
:param known_actions: the actions that the agent already knows.
:param actions_with_duplicated_objects_observations: a mapping between the action and the number of times it was
observed with duplicated objects.
"""
num_trajectories = len(available_trajectories)
num_trajectory_triplets = sum([len(trajectory) for trajectory in available_trajectories])
learned_actions = [
action for action in learned_model.actions if action not in known_actions]
for action_name in learned_actions:
if action_name not in self.expected_domain.actions:
self.logger.debug(f"Action - {action_name} is probably a proxy action. Real action not learned.")
continue
action_statistics = {
"domain_name": self.expected_domain.name,
"num_trajectories": num_trajectories,
"num_trajectory_triplets": num_trajectory_triplets,
"learned_action_name": action_name,
"num_triplets_action_appeared": actions_observation_histogram[action_name],
"num_triplets_with_duplicate_parameters": actions_with_duplicated_objects_observations[action_name]
}
action_statistics.update(
self.extract_learned_action_data_vs_ground_truth(learned_model=learned_model, action_name=action_name))
action_statistics.update(self.calculate_action_precision_recall(
learned_model=learned_model, action_name=action_name))
learned_actions_statistics.append(action_statistics)
# writing the statistics for the actions that were not yet learned.
not_yet_learned_action_names = [action_name for action_name in self.expected_domain.actions if
action_name not in learned_model.actions]
for not_learned_action in not_yet_learned_action_names:
learned_actions_statistics.append(
self.calculate_precision_recall_for_unknown_actions(
not_learned_action, num_trajectories, num_trajectory_triplets))
|
/sam_learner-2.1.9-py3-none-any.whl/sam_learner/model_validation/action_precision_recall_calculator.py
| 0.821331 | 0.650883 |
action_precision_recall_calculator.py
|
pypi
|
import os
import numpy as np
import cv2
import math
from skimage.feature import peak_local_max
from scipy.cluster.vq import kmeans
def find_max_subarray(array: np.ndarray, window_w: int, threshold: float) -> tuple:
assert len(array.shape) == 1
best_sum = -1
start_idx = None
array_cum = np.pad(np.cumsum(array), pad_width=[(1, 0)])
max_start_idx = array.shape[0] - window_w
for idx in range(max_start_idx + 1):
cumsum_upto_windowend = array_cum[idx + window_w]
cumsum_before_windowstart = array_cum[idx]
subarray_sum = cumsum_upto_windowend - cumsum_before_windowstart
if subarray_sum > threshold and subarray_sum > best_sum:
best_sum = subarray_sum
start_idx = idx
return start_idx, best_sum
def find_rectangle(img_array, asp_ratio, keep_attention):
img_h, img_w = img_array.shape
if img_h > img_w:
transpose = True
array2d = img_array.T
img_h, img_w = img_w, img_h
else:
transpose = False
array2d = img_array
array2d_hcum = np.pad(np.cumsum(array2d, axis=0), pad_width=[(1, 0), (0, 0)])
img_h, img_w = array2d.shape
img_area = img_h * img_w
total_attention = np.sum(array2d)
threshold_attention = keep_attention * total_attention
# initialize
y_start = 0
min_height = 1
y_finish = y_start + min_height
best_area = img_area
failedToFind = True
while True:
window_h = y_finish - y_start
window_w = math.ceil(asp_ratio * window_h)
if not (
y_finish <= img_h
and window_h >= min_height
and window_h <= img_h
and window_w <= img_w
):
break
subarray2d = array2d_hcum[y_finish] - array2d_hcum[y_start]
x_start, attention_kept = find_max_subarray(
subarray2d, window_w, threshold_attention
)
if attention_kept > 0:
box_area = window_w * window_h
if (box_area < best_area) or (
box_area == best_area and attention_kept > best_attention
):
best_area = box_area
x, y, w, h = x_start, y_start, window_w, window_h
best_attention = attention_kept
failedToFind = False
y_start += 1
else:
y_finish += 1
if failedToFind:
return {}
else:
attention_factor = best_attention / total_attention
area_factor = w * h / img_area
density_factor = attention_factor / area_factor
return {
"coords": (y, x, h, w) if transpose else (x, y, w, h),
"area": area_factor,
"attention": attention_factor,
"density": density_factor,
}
def find_best_rectangle(
salient_ndimage, a_r, min_attention, step=0.02, alpha=10, beta=10, gamma=0.1
):
results = {}
attention = 1
count = 0
while attention >= min_attention:
attention -= step * (2**count)
count += 1
result = find_rectangle(salient_ndimage, a_r, attention)
if result:
score = (
-alpha * math.log10(1 - result["attention"])
- beta * math.log10(1 - result["area"])
+ gamma ** (result["density"])
)
results[score] = result.pop("coords")
if results:
sorted_scores = sorted(results)
print(sorted_scores)
x, y, w, h = results[sorted_scores[-1]]
return x, y, w, h
else:
raise Exception(
f"Failed to crop with aspect ratio: {a_r}, minimum attention: {min_attention}"
)
def get_centroids(array2d, maximum_gap=0.2, peak_theshold=0.5):
maximum_distortion = array2d.shape[0] * maximum_gap
for _k in [1, 2, 3, 4]:
peaks = peak_local_max(array2d, threshold_rel=peak_theshold).astype(np.float32)
k_peaks, distortion = kmeans(peaks.astype(float), _k)
if distortion < maximum_distortion:
return k_peaks.astype(np.uint32)
def descend_from_hilltop(array2d, cent_ij, alpha=1.5, beta=0.5, asp_ratio=1.44):
cent_i, cent_j = cent_ij
image_h, image_w = array2d.shape
_1_pct_height = int(image_h * 0.05)
total_area = image_h * image_w
total_attention = array2d.sum()
scores = []
attentions = []
densities = []
coords = []
pad_top = _1_pct_height
pad_bottom = _1_pct_height
while True:
pad_right = asp_ratio * pad_bottom
pad_left = asp_ratio * pad_top
start_i = int(cent_i - pad_top)
start_j = int(cent_j - pad_left)
finish_i = int(cent_i + pad_bottom)
finish_j = int(cent_j + pad_right)
if start_i < 0 or finish_i >= image_h or start_j < 0 or finish_j >= image_w:
break
else:
attention = array2d[start_i:finish_i, start_j:finish_j].sum()
attention_factor = attention / total_attention
attentions.append(attention_factor)
area = (finish_i - start_i + 1) * (finish_j - start_j + 1)
area_factor = area / total_area
density_factor = attention_factor / area_factor
densities.append(density_factor)
coords.append([start_i, start_j, finish_i, finish_j])
pad_bottom += _1_pct_height
pad_top += _1_pct_height
attentions = np.array(attentions)
densities = np.array(densities)
scores = np.tanh(densities**alpha) * (attentions**beta)
start_i, start_j, finish_i, finish_j = coords[np.argmax(scores)]
start_x, start_y, finish_x, finish_y = start_j, start_i, finish_j, finish_i
return start_x, start_y, finish_x, finish_y
def crop(
original_image_path,
saliency_map_path,
cropped_image_path,
boxed_image_path,
a_r,
attention,
**kwargs,
):
BLUE = (255, 0, 0)
THICKNESS = 5
original_ndimage = cv2.imread(original_image_path, cv2.IMREAD_COLOR)
boxed = np.copy(original_ndimage)
salient_ndimage = cv2.imread(saliency_map_path, cv2.IMREAD_GRAYSCALE)
# x, y, w, h = find_best_rectangle(salient_ndimage, a_r, attention, **kwargs)
# cropped_ndimage = original_ndimage[y : y + h, x : x + w, :]
# crop_success = cv2.imwrite(cropped_image_path, cropped_ndimage.astype(int))
# Draw a diagonal blue line with thickness of 5 px
# cv2.rectangle(original_ndimage, (x, y), (x + w, y + h), blue, thickness)
for i, cent_ij in enumerate(get_centroids(salient_ndimage)):
if i > 0:
name, ext = cropped_image_path.rsplit(".", 1)
cropped_image_path = f"{name}_{i}.{ext}"
start_x, start_y, finish_x, finish_y = descend_from_hilltop(
salient_ndimage, cent_ij
)
cropped_ndimage = original_ndimage[start_y:finish_y, start_x:finish_x, :]
cv2.imwrite(cropped_image_path, cropped_ndimage)
cv2.rectangle(boxed, (start_x, start_y), (finish_x, finish_y), BLUE, THICKNESS)
cv2.imwrite(boxed_image_path, boxed)
def batch_crop_images(
originals_folder,
maps_folder,
crops_folder,
boxes_folder,
aspect_ratio,
retained_attention,
**kwargs,
):
for _dir in [crops_folder, boxes_folder]:
if not os.path.exists(_dir):
os.mkdir(_dir)
for fname in os.listdir(maps_folder):
if fname in os.listdir(originals_folder):
original_file = os.path.join(originals_folder, fname)
mapping_file = os.path.join(maps_folder, fname)
crop_file = os.path.join(crops_folder, fname)
box_file = os.path.join(boxes_folder, fname)
try:
crop_success = crop(
original_file,
mapping_file,
crop_file,
box_file,
aspect_ratio,
retained_attention,
**kwargs,
)
except Exception as e:
print(f"{e} for {fname}")
continue
else:
print(f"Cropped and boxed {fname} successfully")
|
/sam_lstm-1.0.1.tar.gz/sam_lstm-1.0.1/sam_lstm/cropping.py
| 0.587352 | 0.424054 |
cropping.py
|
pypi
|
import keras.backend as K
from keras.layers import (
add,
Input,
Activation,
Conv2D,
MaxPooling2D,
ZeroPadding2D,
BatchNormalization,
)
from keras.models import Model
from keras.utils import get_file
from sam_lstm.config import TH_WEIGHTS_PATH_NO_TOP
def identity_block(input_tensor, kernel_size, filters, stage, block):
"""
UPGRADED - @SheikSadi
"""
nb_filter1, nb_filter2, nb_filter3 = filters
bn_axis = 1
conv_name_base = "res" + str(stage) + block + "_branch"
bn_name_base = "bn" + str(stage) + block + "_branch"
x = Conv2D(
nb_filter1, kernel_size=(1, 1), name=conv_name_base + "2a", trainable=False
)(input_tensor)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + "2a", trainable=False)(x)
x = Activation("relu", trainable=False)(x)
x = Conv2D(
nb_filter2,
kernel_size,
padding="same",
name=conv_name_base + "2b",
trainable=False,
)(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + "2b", trainable=False)(x)
x = Activation("relu", trainable=False)(x)
x = Conv2D(
nb_filter3, kernel_size=(1, 1), name=conv_name_base + "2c", trainable=False
)(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + "2c", trainable=False)(x)
x = add([x, input_tensor])
x = Activation("relu", trainable=False)(x)
return x
def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):
"""
UPGRADED - @SheikSadi
"""
nb_filter1, nb_filter2, nb_filter3 = filters
bn_axis = 1
conv_name_base = "res" + str(stage) + block + "_branch"
bn_name_base = "bn" + str(stage) + block + "_branch"
x = Conv2D(
nb_filter1,
kernel_size=(1, 1),
strides=strides,
name=conv_name_base + "2a",
trainable=False,
)(input_tensor)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + "2a", trainable=False)(x)
x = Activation("relu", trainable=False)(x)
x = Conv2D(
nb_filter2,
kernel_size,
padding="same",
name=conv_name_base + "2b",
trainable=False,
)(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + "2b", trainable=False)(x)
x = Activation("relu", trainable=False)(x)
x = Conv2D(
nb_filter3, kernel_size=(1, 1), name=conv_name_base + "2c", trainable=False
)(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + "2c", trainable=False)(x)
shortcut = Conv2D(
nb_filter3,
kernel_size=(1, 1),
strides=strides,
name=conv_name_base + "1",
trainable=False,
)(input_tensor)
shortcut = BatchNormalization(
axis=bn_axis, name=bn_name_base + "1", trainable=False
)(shortcut)
x = add([x, shortcut])
x = Activation("relu", trainable=False)(x)
return x
def conv_block_atrous(
input_tensor, kernel_size, filters, stage, block, dilation_rate=(2, 2)
):
"""
UPGRADED: @SheikSadi
"""
nb_filter1, nb_filter2, nb_filter3 = filters
bn_axis = 1
conv_name_base = "res" + str(stage) + block + "_branch"
bn_name_base = "bn" + str(stage) + block + "_branch"
x = Conv2D(nb_filter1, (1, 1), name=conv_name_base + "2a", trainable=False)(
input_tensor
)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + "2a", trainable=False)(x)
x = Activation("relu", trainable=False)(x)
x = Conv2D(
nb_filter2,
kernel_size,
padding="same",
dilation_rate=dilation_rate,
name=conv_name_base + "2b",
trainable=False,
)(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + "2b", trainable=False)(x)
x = Activation("relu", trainable=False)(x)
x = Conv2D(nb_filter3, (1, 1), name=conv_name_base + "2c", trainable=False)(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + "2c", trainable=False)(x)
shortcut = Conv2D(nb_filter3, (1, 1), name=conv_name_base + "1", trainable=False)(
input_tensor
)
shortcut = BatchNormalization(
axis=bn_axis, name=bn_name_base + "1", trainable=False
)(shortcut)
x = add([x, shortcut])
x = Activation("relu", trainable=False)(x)
return x
def identity_block_atrous(
input_tensor, kernel_size, filters, stage, block, dilation_rate=(2, 2)
):
"""
UPGRADED - @SheikSadi
"""
nb_filter1, nb_filter2, nb_filter3 = filters
bn_axis = 1
conv_name_base = "res" + str(stage) + block + "_branch"
bn_name_base = "bn" + str(stage) + block + "_branch"
x = Conv2D(
nb_filter1, kernel_size=(1, 1), name=conv_name_base + "2a", trainable=False
)(input_tensor)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + "2a", trainable=False)(x)
x = Activation("relu", trainable=False)(x)
x = Conv2D(
nb_filter2,
kernel_size,
dilation_rate=dilation_rate,
padding="same",
name=conv_name_base + "2b",
trainable=False,
)(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + "2b", trainable=False)(x)
x = Activation("relu", trainable=False)(x)
x = Conv2D(nb_filter3, (1, 1), name=conv_name_base + "2c", trainable=False)(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + "2c", trainable=False)(x)
x = add([x, input_tensor])
x = Activation("relu", trainable=False)(x)
return x
def dcn_resnet(input_tensor=None):
input_shape = (3, None, None)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor)
else:
img_input = input_tensor
bn_axis = 1
# conv_1
x = ZeroPadding2D((3, 3), trainable=False)(img_input)
x = Conv2D(64, (7, 7), strides=(2, 2), name="conv1", trainable=False)(x)
x = BatchNormalization(axis=bn_axis, name="bn_conv1", trainable=False)(x)
x = Activation("relu", trainable=False)(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding="same", trainable=False)(x)
# conv_2
x = conv_block(x, 3, [64, 64, 256], stage=2, block="a", strides=(1, 1))
x = identity_block(x, 3, [64, 64, 256], stage=2, block="b")
x = identity_block(x, 3, [64, 64, 256], stage=2, block="c")
# conv_3
x = conv_block(x, 3, [128, 128, 512], stage=3, block="a", strides=(2, 2))
x = identity_block(x, 3, [128, 128, 512], stage=3, block="b")
x = identity_block(x, 3, [128, 128, 512], stage=3, block="c")
x = identity_block(x, 3, [128, 128, 512], stage=3, block="d")
# conv_4
x = conv_block_atrous(
x, 3, [256, 256, 1024], stage=4, block="a", dilation_rate=(2, 2)
)
x = identity_block_atrous(
x, 3, [256, 256, 1024], stage=4, block="b", dilation_rate=(2, 2)
)
x = identity_block_atrous(
x, 3, [256, 256, 1024], stage=4, block="c", dilation_rate=(2, 2)
)
x = identity_block_atrous(
x, 3, [256, 256, 1024], stage=4, block="d", dilation_rate=(2, 2)
)
x = identity_block_atrous(
x, 3, [256, 256, 1024], stage=4, block="e", dilation_rate=(2, 2)
)
x = identity_block_atrous(
x, 3, [256, 256, 1024], stage=4, block="f", dilation_rate=(2, 2)
)
# conv_5
x = conv_block_atrous(
x, 3, [512, 512, 2048], stage=5, block="a", dilation_rate=(4, 4)
)
x = identity_block_atrous(
x, 3, [512, 512, 2048], stage=5, block="b", dilation_rate=(4, 4)
)
x = identity_block_atrous(
x, 3, [512, 512, 2048], stage=5, block="c", dilation_rate=(4, 4)
)
# Create model
model = Model(img_input, x)
# Load weights
weights_path = get_file(
"resnet50_weights_th_dim_ordering_th_kernels_notop.h5",
TH_WEIGHTS_PATH_NO_TOP,
cache_subdir="weights",
file_hash="f64f049c92468c9affcd44b0976cdafe",
)
model.load_weights(weights_path)
return model
|
/sam_lstm-1.0.1.tar.gz/sam_lstm-1.0.1/sam_lstm/dcn_resnet.py
| 0.875282 | 0.436442 |
dcn_resnet.py
|
pypi
|
import tensorflow as tf
import keras.backend as K
from keras.layers import Layer, InputSpec
from keras import initializers
class AttentiveConvLSTM(Layer):
"""
att_convlstm = AttentiveConvLSTM(
nb_filters_in=512, nb_filters_out=512, nb_filters_att=512, nb_cols=3, nb_rows=3
)(att_convlstm)
"""
def __init__(
self,
nb_filters_in,
nb_filters_out,
nb_filters_att,
nb_rows,
nb_cols,
init="normal",
inner_init="orthogonal",
attentive_init="zero",
**kwargs,
):
self.nb_filters_in = nb_filters_in
self.nb_filters_out = nb_filters_out
self.nb_filters_att = nb_filters_att
self.nb_rows = nb_rows
self.nb_cols = nb_cols
self.init = init
self.inner_init = inner_init
self.attentive_init = attentive_init
super().__init__(**kwargs)
def get_output_shape_for(self, input_shape):
return input_shape[:1] + (self.nb_filters_out,) + input_shape[3:]
def compute_mask(self, input, mask):
return None
def get_initial_states(self, x):
# we will skip the time-axis (axis=1)
initial_state_shape = (x.shape[0], x.shape[2], x.shape[3], x.shape[4])
initial_state = tf.zeros(initial_state_shape)
initial_states = [initial_state for _ in self.states]
return initial_states
def build(self, input_shape):
self.inputspec = [InputSpec(shape=input_shape)]
self.states = [None, None]
in_channels = input_shape[2]
def init_conv_weights(out_channels, name, init, bias=True):
_kernel = self.add_weight(
f"{name}_kernel",
shape=[out_channels, in_channels, self.nb_rows, self.nb_cols],
trainable=True,
initializer=initializers.get(init),
)
if bias:
_bias = self.add_weight(
f"{name}_bias",
shape=[out_channels],
trainable=True,
initializer=initializers.get(self.init),
)
else:
_bias = tf.zeros(shape=[out_channels])
return _kernel, _bias
self.Wa = init_conv_weights(self.nb_filters_att, "Wa", self.init)
self.Ua = init_conv_weights(self.nb_filters_att, "Ua", self.init)
self.Va = init_conv_weights(1, "Va", self.attentive_init, bias=False)
self.Wi = init_conv_weights(self.nb_filters_out, "Wi", self.init)
self.Ui = init_conv_weights(self.nb_filters_out, "Ui", self.inner_init)
self.Wf = init_conv_weights(self.nb_filters_out, "Wf", self.init)
self.Uf = init_conv_weights(self.nb_filters_out, "Uf", self.inner_init)
self.Wc = init_conv_weights(self.nb_filters_out, "Wc", self.init)
self.Uc = init_conv_weights(self.nb_filters_out, "Uc", self.inner_init)
self.Wo = init_conv_weights(self.nb_filters_out, "Wo", self.init)
self.Uo = init_conv_weights(self.nb_filters_out, "Uo", self.inner_init)
def preprocess_input(self, x):
return x
@staticmethod
def conv(kernel_bias, input):
kernel, bias_ = kernel_bias
kernelT = tf.transpose(kernel, perm=[3, 2, 1, 0])
kernelT_x_input = tf.nn.conv2d(
input, kernelT, strides=1, padding="SAME", data_format="NCHW"
)
bias_ = tf.expand_dims(tf.expand_dims(bias_, axis=1), axis=2)
bias = tf.repeat(
tf.repeat(bias_, repeats=input.shape[2], axis=1),
repeats=input.shape[3],
axis=2,
)
return kernelT_x_input + bias
def step(self, X, states):
sigmoid = tf.math.sigmoid
tanh = tf.math.tanh
conv = self.conv
Ht_1 = states[0]
Ct_1 = states[1]
Zt = conv(self.Va, tanh(conv(self.Wa, X) + conv(self.Ua, Ht_1)))
At = tf.repeat(
tf.reshape(
tf.nn.softmax(K.batch_flatten(Zt)),
(X.shape[0], 1, X.shape[2], X.shape[3]),
),
repeats=X.shape[1],
axis=1,
)
Xt_ = X * At
It = sigmoid(conv(self.Wi, Xt_) + conv(self.Ui, Ht_1))
Ft = sigmoid(conv(self.Wf, Xt_) + conv(self.Uf, Ht_1))
Ot = sigmoid(conv(self.Wo, Xt_) + conv(self.Uo, Ht_1))
Gt = tanh(conv(self.Wc, Xt_) + conv(self.Uc, Ht_1))
Ct = Ft * Ct_1 + It * Gt
Ht = Ot * tanh(Ct)
return Ht, [Ht, Ct]
def get_constants(self, x):
return []
def call(self, x, mask=None):
initial_states = self.get_initial_states(x)
constants = self.get_constants(x)
preprocessed_input = self.preprocess_input(x)
last_output, outputs, states = K.rnn(
self.step,
preprocessed_input,
initial_states,
go_backwards=False,
mask=mask,
constants=constants,
unroll=False,
input_length=x.shape[1],
)
if len(last_output.shape) == 3:
last_output = tf.expand_dims(last_output, axis=0)
return last_output
def get_config(self):
config_to_serialize = dict(
nb_filters_in=self.nb_filters_in,
nb_filters_out=self.nb_filters_out,
nb_filters_att=self.nb_filters_att,
nb_rows=self.nb_rows,
nb_cols=self.nb_cols,
init=self.init,
inner_init=self.inner_init,
attentive_init=self.attentive_init,
)
config = super().get_config()
config.update(config_to_serialize)
return config
|
/sam_lstm-1.0.1.tar.gz/sam_lstm-1.0.1/sam_lstm/attentive_convlstm.py
| 0.858615 | 0.492798 |
attentive_convlstm.py
|
pypi
|
import cv2
import numpy as np
import scipy.io
import scipy.ndimage
from sam_lstm.config import gaussina_sigma
def padding(img, shape_r=240, shape_c=320, channels=3):
img_padded = np.zeros((shape_r, shape_c, channels), dtype=np.uint8)
if channels == 1:
img_padded = np.zeros((shape_r, shape_c), dtype=np.uint8)
original_shape = img.shape
rows_rate = original_shape[0] / shape_r
cols_rate = original_shape[1] / shape_c
if rows_rate > cols_rate:
new_cols = (original_shape[1] * shape_r) // original_shape[0]
img = cv2.resize(img, (new_cols, shape_r))
if new_cols > shape_c:
new_cols = shape_c
img_padded[
:,
((img_padded.shape[1] - new_cols) // 2) : (
(img_padded.shape[1] - new_cols) // 2 + new_cols
),
] = img
else:
new_rows = (original_shape[0] * shape_c) // original_shape[1]
img = cv2.resize(img, (shape_c, new_rows))
if new_rows > shape_r:
new_rows = shape_r
img_padded[
((img_padded.shape[0] - new_rows) // 2) : (
(img_padded.shape[0] - new_rows) // 2 + new_rows
),
:,
] = img
return img_padded
def resize_fixation(img, rows=480, cols=640):
out = np.zeros((rows, cols))
factor_scale_r = rows / img.shape[0]
factor_scale_c = cols / img.shape[1]
coords = np.argwhere(img)
for coord in coords:
r = int(np.round(coord[0] * factor_scale_r))
c = int(np.round(coord[1] * factor_scale_c))
if r == rows:
r -= 1
if c == cols:
c -= 1
out[r, c] = 1
return out
def padding_fixation(img, shape_r=480, shape_c=640):
img_padded = np.zeros((shape_r, shape_c))
original_shape = img.shape
if original_shape == img_padded.shape:
return img
else:
rows_rate = original_shape[0] / shape_r
cols_rate = original_shape[1] / shape_c
if rows_rate > cols_rate:
new_cols = (original_shape[1] * shape_r) // original_shape[0]
img = resize_fixation(img, rows=shape_r, cols=new_cols)
if new_cols > shape_c:
new_cols = shape_c
img_padded[
:,
((img_padded.shape[1] - new_cols) // 2) : (
(img_padded.shape[1] - new_cols) // 2 + new_cols
),
] = img
else:
new_rows = (original_shape[0] * shape_c) // original_shape[1]
img = resize_fixation(img, rows=new_rows, cols=shape_c)
if new_rows > shape_r:
new_rows = shape_r
img_padded[
((img_padded.shape[0] - new_rows) // 2) : (
(img_padded.shape[0] - new_rows) // 2 + new_rows
),
:,
] = img
return img_padded
def preprocess_images(paths, shape_r, shape_c):
ims = np.zeros((len(paths), shape_r, shape_c, 3))
for i, path in enumerate(paths):
original_image = cv2.imread(path, cv2.IMREAD_COLOR)
padded_image = padding(original_image, shape_r, shape_c, 3)
ims[i] = padded_image
ims[:, :, :, 0] -= 103.939
ims[:, :, :, 1] -= 116.779
ims[:, :, :, 2] -= 123.68
ims = ims.transpose((0, 3, 1, 2))
return ims
def preprocess_maps(paths, shape_r, shape_c):
ims = np.zeros((len(paths), 1, shape_r, shape_c))
for i, path in enumerate(paths):
original_map = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
padded_map = padding(original_map, shape_r, shape_c, 1)
ims[i, 0] = padded_map.astype(np.float32)
ims[i, 0] /= 255.0
return ims
def preprocess_fixmaps(paths, shape_r, shape_c):
ims = np.zeros((len(paths), 1, shape_r, shape_c))
for i, path in enumerate(paths):
matdata = scipy.io.loadmat(path)
imshape = matdata["resolution"][0]
fix_map = np.zeros(imshape)
fixations = np.vstack([fixation[0][2] for fixation in matdata["gaze"]])
xs = fixations[:, 0] - 1
ys = fixations[:, 1] - 1
fix_map[ys, xs] = 1
ims[i, 0] = padding_fixation(fix_map, shape_r=shape_r, shape_c=shape_c)
return ims
def postprocess_predictions(pred, shape_r, shape_c, sigma=gaussina_sigma):
predictions_shape = pred.shape
rows_rate = shape_r / predictions_shape[0]
cols_rate = shape_c / predictions_shape[1]
pred = pred / np.max(pred) * 255
if rows_rate > cols_rate:
new_cols = (predictions_shape[1] * shape_r) // predictions_shape[0]
pred = cv2.resize(pred, (new_cols, shape_r))
img = pred[
:,
((pred.shape[1] - shape_c) // 2) : (
(pred.shape[1] - shape_c) // 2 + shape_c
),
]
else:
new_rows = (predictions_shape[0] * shape_c) // predictions_shape[1]
pred = cv2.resize(pred, (shape_c, new_rows))
img = pred[
((pred.shape[0] - shape_r) // 2) : (
(pred.shape[0] - shape_r) // 2 + shape_r
),
:,
]
img = scipy.ndimage.gaussian_filter(img, sigma=sigma)
img = img / np.max(img) * 255
return img.clip(0, 255).astype(np.int32)
|
/sam_lstm-1.0.1.tar.gz/sam_lstm-1.0.1/sam_lstm/utilities.py
| 0.453262 | 0.358241 |
utilities.py
|
pypi
|
import tensorflow as tf
import numpy as np
import keras.backend as K
from keras.layers import Layer, InputSpec
from keras import initializers, regularizers, constraints
floatX = K.floatx()
class LearningPrior(Layer):
def __init__(
self,
nb_gaussian,
init="normal",
weights=None,
W_regularizer=None,
activity_regularizer=None,
W_constraint=None,
**kwargs,
):
self.nb_gaussian = nb_gaussian
self.init = initializers.get(init)
self.W_regularizer = regularizers.get(W_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.input_spec = [InputSpec(ndim=4)]
self.initial_weights = weights
super(LearningPrior, self).__init__(**kwargs)
def build(self, input_shape):
if self.initial_weights is None:
self.W = self.add_weight(
name=f"{self.name}_W",
shape=(self.nb_gaussian * 4,),
initializer=self.init,
regularizer=self.W_regularizer,
trainable=True,
constraint=self.W_constraint,
)
else:
self.set_weights(self.initial_weights)
del self.initial_weights
def get_output_shape_for(self, input_shape):
self.b_s = input_shape[0]
self.height = input_shape[2]
self.width = input_shape[3]
return self.b_s, self.nb_gaussian, self.height, self.width
def call(self, x, mask=None):
mu_x = K.clip(self.W[: self.nb_gaussian], 0.25, 0.75)
mu_y = K.clip(self.W[self.nb_gaussian : self.nb_gaussian * 2], 0.35, 0.65)
sigma_x = K.clip(self.W[self.nb_gaussian * 2 : self.nb_gaussian * 3], 0.1, 0.9)
sigma_y = K.clip(self.W[self.nb_gaussian * 3 :], 0.2, 0.8)
self.b_s = x.shape[0] if x.shape[0] else 1
self.height = x.shape[2]
self.width = x.shape[3]
e = self.height / self.width
e1 = (1 - e) / 2
e2 = (1 + e) / 2
x_t, y_t = tf.meshgrid(
tf.cast(tf.linspace(0, 1, self.width), dtype=tf.float32),
tf.cast(tf.linspace(e1, e2, self.height), dtype=tf.float32),
)
x_t = tf.repeat(tf.expand_dims(x_t, axis=2), self.nb_gaussian, axis=2)
y_t = tf.repeat(tf.expand_dims(y_t, axis=2), self.nb_gaussian, axis=2)
gaussian = (
1
/ (2 * np.pi * sigma_x * sigma_y + K.epsilon())
* tf.math.exp(
-(
(x_t - mu_x) ** 2 / (2 * sigma_x**2 + K.epsilon())
+ (y_t - mu_y) ** 2 / (2 * sigma_y**2 + K.epsilon())
)
)
)
gaussian /= tf.math.reduce_sum(gaussian, axis=[0, 1])
gaussian = tf.repeat(tf.expand_dims(gaussian, axis=0), self.b_s, axis=0)
output = tf.transpose(gaussian, perm=[0, 3, 1, 2]) # To NCHW format
return output
def get_config(self):
config = {
"nb_gaussian": self.nb_gaussian,
"init": self.init.__name__,
"W_regularizer": self.W_regularizer.get_config()
if self.W_regularizer
else None,
"activity_regularizer": self.activity_regularizer.get_config()
if self.activity_regularizer
else None,
"W_constraint": self.W_constraint.get_config()
if self.W_constraint
else None,
}
base_config = super(LearningPrior, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
/sam_lstm-1.0.1.tar.gz/sam_lstm-1.0.1/sam_lstm/gaussian_prior.py
| 0.865636 | 0.422326 |
gaussian_prior.py
|
pypi
|
import keras.backend as K
import numpy as np
from sam_lstm.config import *
from sam_lstm.dcn_resnet import dcn_resnet
from sam_lstm.gaussian_prior import LearningPrior
from sam_lstm.attentive_convlstm import AttentiveConvLSTM
from keras.layers import Lambda, concatenate, Conv2D, UpSampling2D
def repeat(x):
return K.reshape(
K.repeat(K.batch_flatten(x), nb_timestep),
(b_s, nb_timestep, 512, shape_r_gt, shape_c_gt),
)
def repeat_shape(s):
return (s[0], nb_timestep) + s[1:]
def upsampling(x):
return UpSampling2D(
size=upsampling_factor,
interpolation="bilinear",
)(x)
def upsampling_shape(s):
return s[:2] + (s[2] * upsampling_factor, s[3] * upsampling_factor)
# KL-Divergence Loss
def kl_divergence(y_true, y_pred):
max_y_pred = K.repeat_elements(
K.expand_dims(
K.repeat_elements(
K.expand_dims(K.max(K.max(y_pred, axis=2), axis=2)),
shape_r_out,
axis=-1,
)
),
shape_c_out,
axis=-1,
)
y_pred /= max_y_pred
sum_y_true = K.repeat_elements(
K.expand_dims(
K.repeat_elements(
K.expand_dims(K.sum(K.sum(y_true, axis=2), axis=2)),
shape_r_out,
axis=-1,
)
),
shape_c_out,
axis=-1,
)
sum_y_pred = K.repeat_elements(
K.expand_dims(
K.repeat_elements(
K.expand_dims(K.sum(K.sum(y_pred, axis=2), axis=2)),
shape_r_out,
axis=-1,
)
),
shape_c_out,
axis=-1,
)
y_true /= sum_y_true + K.epsilon()
y_pred /= sum_y_pred + K.epsilon()
return 10 * K.sum(
K.sum(y_true * K.log((y_true / (y_pred + K.epsilon())) + K.epsilon()), axis=-1),
axis=-1,
)
# Correlation Coefficient Loss
def correlation_coefficient(y_true, y_pred):
max_y_pred = K.repeat_elements(
K.expand_dims(
K.repeat_elements(
K.expand_dims(K.max(K.max(y_pred, axis=2), axis=2)),
shape_r_out,
axis=-1,
)
),
shape_c_out,
axis=-1,
)
y_pred /= max_y_pred
sum_y_true = K.repeat_elements(
K.expand_dims(
K.repeat_elements(
K.expand_dims(K.sum(K.sum(y_true, axis=2), axis=2)),
shape_r_out,
axis=-1,
)
),
shape_c_out,
axis=-1,
)
sum_y_pred = K.repeat_elements(
K.expand_dims(
K.repeat_elements(
K.expand_dims(K.sum(K.sum(y_pred, axis=2), axis=2)),
shape_r_out,
axis=-1,
)
),
shape_c_out,
axis=-1,
)
y_true /= sum_y_true + K.epsilon()
y_pred /= sum_y_pred + K.epsilon()
N = shape_r_out * shape_c_out
sum_prod = K.sum(K.sum(y_true * y_pred, axis=2), axis=2)
sum_x = K.sum(K.sum(y_true, axis=2), axis=2)
sum_y = K.sum(K.sum(y_pred, axis=2), axis=2)
sum_x_square = K.sum(K.sum(K.square(y_true), axis=2), axis=2)
sum_y_square = K.sum(K.sum(K.square(y_pred), axis=2), axis=2)
num = sum_prod - ((sum_x * sum_y) / N)
den = K.sqrt(
(sum_x_square - K.square(sum_x) / N) * (sum_y_square - K.square(sum_y) / N)
)
return -2 * num / den
# Normalized Scanpath Saliency Loss
def nss(y_true, y_pred):
max_y_pred = K.repeat_elements(
K.expand_dims(
K.repeat_elements(
K.expand_dims(K.max(K.max(y_pred, axis=2), axis=2)),
shape_r_out,
axis=-1,
)
),
shape_c_out,
axis=-1,
)
y_pred /= max_y_pred
y_pred_flatten = K.batch_flatten(y_pred)
y_mean = K.mean(y_pred_flatten, axis=-1)
y_mean = K.repeat_elements(
K.expand_dims(
K.repeat_elements(
K.expand_dims(K.expand_dims(y_mean)), shape_r_out, axis=-1
)
),
shape_c_out,
axis=-1,
)
y_std = K.std(y_pred_flatten, axis=-1)
y_std = K.repeat_elements(
K.expand_dims(
K.repeat_elements(K.expand_dims(K.expand_dims(y_std)), shape_r_out, axis=-1)
),
shape_c_out,
axis=-1,
)
y_pred = (y_pred - y_mean) / (y_std + K.epsilon())
return -(
K.sum(K.sum(y_true * y_pred, axis=2), axis=2)
/ K.sum(K.sum(y_true, axis=2), axis=2)
)
# Gaussian priors initialization
def gaussian_priors_init(shape, name=None, **kwargs):
means = np.random.uniform(low=0.3, high=0.7, size=shape[0] // 2)
covars = np.random.uniform(low=0.05, high=0.3, size=shape[0] // 2)
return K.variable(np.concatenate((means, covars), axis=0), name=name)
def sam_resnet(x):
# Dilated Convolutional Network
dcn = dcn_resnet(input_tensor=x[0])
conv_feat = Conv2D(512, (3, 3), padding="same", activation="relu")(dcn.output)
# Attentive Convolutional LSTM
att_convlstm = Lambda(repeat, repeat_shape)(conv_feat)
att_convlstm = AttentiveConvLSTM(
nb_filters_in=512, nb_filters_out=512, nb_filters_att=512, nb_cols=3, nb_rows=3
)(att_convlstm)
# Learned Prior (1)
priors1 = LearningPrior(nb_gaussian=nb_gaussian, init=gaussian_priors_init)(x[1])
concateneted = concatenate([att_convlstm, priors1], axis=1)
learned_priors1 = Conv2D(
512, (5, 5), padding="same", activation="relu", dilation_rate=(4, 4)
)(concateneted)
# Learned Prior (2)
priors2 = LearningPrior(nb_gaussian=nb_gaussian, init=gaussian_priors_init)(x[1])
concateneted = concatenate([learned_priors1, priors2], axis=1)
learned_priors2 = Conv2D(
512, (5, 5), padding="same", activation="relu", dilation_rate=(4, 4)
)(concateneted)
# Final Convolutional Layer
outs = Conv2D(1, (1, 1), padding="same", activation="relu")(learned_priors2)
outs_up = Lambda(upsampling, upsampling_shape)(outs)
return [outs_up, outs_up, outs_up]
|
/sam_lstm-1.0.1.tar.gz/sam_lstm-1.0.1/sam_lstm/models.py
| 0.816882 | 0.390708 |
models.py
|
pypi
|
import os
import sys
import time
import warnings
from datetime import timedelta
import numpy as np
import pandas as pd
# to deactivate pygame promt
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = '1'
import pygame
from pkg_resources import resource_filename
from tqdm.auto import tqdm
from sam_ml.config import (
get_avg,
get_pos_label,
get_scoring,
get_secondary_scoring,
get_sound_on,
get_strength,
setup_logger,
)
from sam_ml.data.preprocessing import (
Embeddings_builder,
Sampler,
SamplerPipeline,
Scaler,
Selector,
)
from .AdaBoostClassifier import ABC
from .BaggingClassifier import BC
from .BernoulliNB import BNB
from .DecisionTreeClassifier import DTC
from .ExtraTreesClassifier import ETC
from .GaussianNB import GNB
from .GaussianProcessClassifier import GPC
from .GradientBoostingMachine import GBM
from .KNeighborsClassifier import KNC
from .LinearDiscriminantAnalysis import LDA
from .LinearSupportVectorClassifier import LSVC
from .LogisticRegression import LR
from .main_classifier import Classifier
from .main_pipeline import Pipeline
from .MLPClassifier import MLPC
from .QuadraticDiscriminantAnalysis import QDA
from .RandomForestClassifier import RFC
from .SupportVectorClassifier import SVC
from .XGBoostClassifier import XGBC
logger = setup_logger(__name__)
if not sys.warnoptions:
warnings.simplefilter("ignore")
os.environ["PYTHONWARNINGS"] = "ignore" # Also affects subprocesses
class CTest:
""" AutoML class """
def __init__(self, models: str | list[Classifier] = "all", vectorizer: str | Embeddings_builder | None | list[str | Embeddings_builder | None] = None, scaler: str | Scaler | None | list[str | Scaler | None] = None, selector: str | tuple[str, int] | Selector | None | list[str | tuple[str, int] | Selector | None] = None, sampler: str | Sampler | SamplerPipeline | None | list[str | Sampler | SamplerPipeline | None] = None):
"""
@params:
models:
- list of Wrapperclass models from this library
- 'all': use all Wrapperclass models (18+ models)
- 'big_data': use all Wrapperclass models except the ones that take too much space or time on big data (>200.000 data points)
- 'basic': use basic Wrapperclass models (8 models) (LogisticRegression, MLP Classifier, LinearSVC, DecisionTreeClassifier, RandomForestClassifier, SVC, Gradientboostingmachine, KNeighborsClassifier)
- 'basic2': use basic (mostly tree-based) Wrapperclass models
vectorizer: type of "data.embeddings.Embeddings_builder" or Embeddings_builder class object for automatic string column vectorizing (None for no vectorizing)
scaler: type of "data.scaler.Scaler" or Scaler class object for scaling the data (None for no scaling)
selector: type of "data.feature_selection.Selector" or Selector class object for feature selection (None for no selecting)
sampling: type of "data.sampling.Sampler" or Sampler class object for sampling the train data (None for no sampling)
"""
self.__models_input = models
if type(models) == str:
models = self.model_combs(models)
if type(vectorizer) in (str, Embeddings_builder) or vectorizer is None:
vectorizer = [vectorizer]
if type(scaler) in (str, Scaler) or scaler is None:
scaler = [scaler]
if type(selector) in (str, tuple, Selector) or selector is None:
selector = [selector]
if type(sampler) in (str, Sampler) or sampler is None:
sampler = [sampler]
self._vectorizer = vectorizer
self._scaler = scaler
self._selector = selector
self._sampler = sampler
self.models: dict = {}
for model in models:
self.add_model(model)
self.best_model: Pipeline
self.scores: dict = {}
def __repr__(self) -> str:
params: str = ""
if type(self.__models_input) == str:
params += f"models='{self.__models_input}', "
else:
params += "models=["
for model in self.__models_input:
params += f"\n {model.__str__()},"
params += "],\n"
if type(self._vectorizer) == str:
params += f"vectorizer='{self._vectorizer}'"
elif type(self._vectorizer) == Embeddings_builder:
params += f"vectorizer={self._vectorizer.__str__()}"
else:
params += f"vectorizer={self._vectorizer}"
params += ", "
if type(self._scaler) == str:
params += f"scaler='{self._scaler}'"
elif type(self._scaler) == Scaler:
params += f"scaler={self._scaler.__str__()}"
else:
params += f"scaler={self._scaler}"
params += ", "
if type(self._selector) == str:
params += f"selector='{self._selector}'"
elif type(self._selector) == Selector:
params += f"selector={self._selector.__str__()}"
else:
params += f"selector={self._selector}"
params += ", "
if type(self._sampler) == str:
params += f"sampler='{self._sampler}'"
elif type(self._sampler) == Sampler:
params += f"sampler={self._sampler.__str__()}"
else:
params += f"sampler={self._sampler}"
return f"CTest({params})"
def remove_model(self, model_name: str):
del self.models[model_name]
def add_model(self, model: Classifier):
for vec in self._vectorizer:
for scal in self._scaler:
for sel in self._selector:
for sam in self._sampler:
model_pipe_name = model.model_name+f" (vec={vec}, scaler={scal}, selector={sel}, sampler={sam})"
self.models[model_pipe_name] = Pipeline(vec, scal, sel, sam, model, model_pipe_name)
def model_combs(self, kind: str):
"""
@params:
kind:
"all": use all models
"basic": use a simple combination (LogisticRegression, MLP Classifier, LinearSVC, DecisionTreeClassifier, RandomForestClassifier, SVC, Gradientboostingmachine, AdaboostClassifier, KNeighborsClassifier)
"""
if kind == "all":
models = [
LR(),
QDA(),
LDA(),
MLPC(),
LSVC(),
DTC(),
RFC(),
SVC(),
GBM(),
ABC(estimator="DTC"),
ABC(estimator="RFC"),
ABC(estimator="LR"),
KNC(),
ETC(),
GNB(),
BNB(),
GPC(),
BC(estimator="DTC"),
BC(estimator="RFC"),
BC(estimator="LR"),
XGBC(),
]
elif kind == "basic":
models = [
LR(),
MLPC(),
LSVC(),
DTC(),
RFC(),
SVC(),
GBM(),
KNC(),
]
elif kind == "big_data":
models = [
LR(),
QDA(),
LDA(),
LSVC(),
DTC(),
RFC(),
GBM(),
ABC(estimator="DTC"),
ABC(estimator="RFC"),
ABC(estimator="LR"),
ETC(),
GNB(),
BNB(),
BC(estimator="DTC"),
BC(estimator="RFC"),
BC(estimator="LR"),
XGBC(),
]
elif kind == "basic2":
models = [
LR(),
RFC(),
ABC(estimator="DTC"),
ABC(estimator="RFC"),
ABC(estimator="LR"),
BC(estimator="DTC"),
BC(estimator="RFC"),
BC(estimator="LR"),
XGBC(),
]
else:
print(f"Cannot find model combination '{kind}' --> using all models")
models = self.model_combs("all")
return models
def __finish_sound(self):
""" little function to play a microwave sound """
if get_sound_on():
filepath = resource_filename(__name__, 'microwave_finish_sound.mp3')
pygame.mixer.init()
pygame.mixer.music.load(filepath)
pygame.mixer.music.play()
def output_scores_as_pd(self, sort_by: str | list[str] = "index", console_out: bool = True) -> pd.DataFrame:
"""
@param:
sorted_by:
'index': sort index ascending=True
'precision'/'recall'/'accuracy'/'train_score'/'train_time': sort by these columns ascending=False
e.g. ['precision', 'recall'] - sort first by 'precision' and then by 'recall'
"""
if self.scores != {}:
if sort_by == "index":
scores = pd.DataFrame.from_dict(self.scores, orient="index").sort_index(ascending=True)
else:
scores = (
pd.DataFrame.from_dict(self.scores, orient="index")
.sort_values(by=sort_by, ascending=False)
)
if console_out:
print(scores)
else:
logger.warning("no scores are created -> use 'eval_models()'/'eval_models_cv()' to create scores")
scores = None
return scores
def eval_models(
self,
x_train: pd.DataFrame,
y_train: pd.Series,
x_test: pd.DataFrame,
y_test: pd.Series,
avg: str = get_avg(),
pos_label: int | str = get_pos_label(),
secondary_scoring: str = get_secondary_scoring(),
strength: int = get_strength(),
) -> dict[str, dict]:
"""
@param:
x_train, y_train, x_test, y_test: Data to train and evaluate models
avg: average to use for precision and recall score (e.g. "micro", "weighted", "binary")
pos_label: if avg="binary", pos_label says which class to score. pos_label is used by s_score/l_score
secondary_scoring: weights the scoring (only for 's_score'/'l_score')
strength: higher strength means a higher weight for the preferred secondary_scoring/pos_label (only for 's_score'/'l_score')
@return:
saves metrics in dict self.scores and also outputs them
"""
try:
for key in tqdm(self.models.keys(), desc="Evaluation"):
tscore, ttime = self.models[key].train(x_train, y_train, console_out=False)
score = self.models[key].evaluate(
x_test, y_test, avg=avg, pos_label=pos_label, console_out=False, secondary_scoring=secondary_scoring, strength=strength,
)
score["train_score"] = tscore
score["train_time"] = ttime
self.scores[key] = score
self.__finish_sound()
return self.scores
except KeyboardInterrupt:
logger.info("KeyboardInterrupt - output interim result")
return self.scores
def eval_models_cv(
self,
X: pd.DataFrame,
y: pd.Series,
cv_num: int = 5,
avg: str = get_avg(),
pos_label: int | str = get_pos_label(),
small_data_eval: bool = False,
secondary_scoring: str = get_secondary_scoring(),
strength: int = get_strength(),
) -> dict[str, dict]:
"""
@param:
X, y: Data to train and evaluate models on
cv_num: number of different splits (ignored if small_data_eval=True)
avg: average to use for precision and recall score (e.g. "micro", "weighted", "binary")
pos_label: if avg="binary", pos_label says which class to score. pos_label is used by s_score/l_score
small_data_eval: if True: trains model on all datapoints except one and does this for all datapoints (recommended for datasets with less than 150 datapoints)
secondary_scoring: weights the scoring (only for 's_score'/'l_score')
strength: higher strength means a higher weight for the preferred secondary_scoring/pos_label (only for 's_score'/'l_score')
@return:
saves metrics in dict self.scores and also outputs them
"""
try:
for key in tqdm(self.models.keys(), desc="Crossvalidation"):
if small_data_eval:
self.models[key].cross_validation_small_data(
X, y, avg=avg, pos_label=pos_label, console_out=False, leave_loadbar=False, secondary_scoring=secondary_scoring, strength=strength,
)
else:
self.models[key].cross_validation(
X, y, cv_num=cv_num, avg=avg, pos_label=pos_label, console_out=False, secondary_scoring=secondary_scoring, strength=strength,
)
self.scores[key] = self.models[key].cv_scores
self.__finish_sound()
return self.scores
except KeyboardInterrupt:
logger.info("KeyboardInterrupt - output interim result")
return self.scores
def find_best_model_randomCV(
self,
x_train: pd.DataFrame,
y_train: pd.Series,
x_test: pd.DataFrame,
y_test: pd.Series,
n_trails: int = 5,
scoring: str = get_scoring(),
avg: str = get_avg(),
pos_label: int | str = get_pos_label(),
secondary_scoring: str = get_secondary_scoring(),
strength: int = get_strength(),
small_data_eval: bool = False,
cv_num: int = 3,
leave_loadbar: bool = True,
) -> dict:
"""
@params:
x_train: DataFrame with train features
y_train: Series with train labels
x_test: DataFrame with test features
y_test: Series with test labels
n_trails: number of parameter sets to test per modeltype
scoring: metrics to evaluate the models
avg: average to use for precision and recall score (e.g. "micro", "weighted", "binary")
pos_label: if avg="binary", pos_label says which class to score. Else pos_label is ignored (except scoring='s_score'/'l_score')
secondary_scoring: weights the scoring (only for scoring='s_score'/'l_score')
strength: higher strength means a higher weight for the preferred secondary_scoring/pos_label (only for scoring='s_score'/'l_score')
small_data_eval: if True: trains model on all datapoints except one and does this for all datapoints (recommended for datasets with less than 150 datapoints)
cv_num: number of different splits per crossvalidation (only used when small_data_eval=False)
leave_loadbar: shall the loading bar of the randomCVsearch of each individual model be visible after training (True - load bar will still be visible)
"""
for key in tqdm(self.models.keys(), desc="randomCVsearch"):
best_hyperparameters, best_score = self.models[key].randomCVsearch(x_train, y_train, n_trails=n_trails, scoring=scoring, avg=avg, pos_label=pos_label, secondary_scoring=secondary_scoring, strength=strength, small_data_eval=small_data_eval, cv_num=cv_num, leave_loadbar=leave_loadbar)
logger.info(f"{self.models[key].model_name} - score: {best_score} ({scoring}) - parameters: {best_hyperparameters}")
if best_hyperparameters:
model_best = self.models[key].get_deepcopy()
model_best.set_params(**best_hyperparameters)
train_score, train_time = model_best.train(x_train, y_train, console_out=False)
scores = model_best.evaluate(x_test, y_test, avg=avg, pos_label=pos_label, secondary_scoring=secondary_scoring, strength=strength, console_out=False)
scores["train_time"] = train_time
scores["train_score"] = train_score
scores["best_score (rCVs)"] = best_score
scores["best_hyperparameters (rCVs)"] = best_hyperparameters
self.scores[key] = scores
sorted_scores = self.output_scores_as_pd(sort_by=[scoring, "s_score", "train_time"], console_out=False)
best_model_type = sorted_scores.iloc[0].name
best_model_value = sorted_scores.iloc[0][scoring]
best_model_hyperparameters = sorted_scores.iloc[0]["best_hyperparameters (rCVs)"]
logger.info(f"best model type {best_model_type} - {scoring}: {best_model_value} - parameters: {best_model_hyperparameters}")
self.__finish_sound()
return self.scores
def find_best_model_smac(
self,
x_train: pd.DataFrame,
y_train: pd.Series,
x_test: pd.DataFrame,
y_test: pd.Series,
n_trails: int = 5,
scoring: str = get_scoring(),
avg: str = get_avg(),
pos_label: int | str = get_pos_label(),
secondary_scoring: str = get_secondary_scoring(),
strength: int = get_strength(),
small_data_eval: bool = False,
cv_num: int = 3,
smac_log_level: int = 30,
walltime_limit_per_modeltype: int = 600,
) -> dict:
"""
@params:
x_train: DataFrame with train features
y_train: Series with train labels
x_test: DataFrame with test features
y_test: Series with test labels
n_trails: max number of parameter sets to test per modeltype
scoring: metrics to evaluate the models
avg: average to use for precision and recall score (e.g. "micro", "weighted", "binary")
pos_label: if avg="binary", pos_label says which class to score. Else pos_label is ignored (except scoring='s_score'/'l_score')
secondary_scoring: weights the scoring (only for scoring='s_score'/'l_score')
strength: higher strength means a higher weight for the preferred secondary_scoring/pos_label (only for scoring='s_score'/'l_score')
small_data_eval: if True: trains model on all datapoints except one and does this for all datapoints (recommended for datasets with less than 150 datapoints)
cv_num: number of different splits per crossvalidation (only used when small_data_eval=False)
smac_log_level: 10 - DEBUG, 20 - INFO, 30 - WARNING, 40 - ERROR, 50 - CRITICAL (SMAC3 library log levels)
walltime_limit_per_modeltype: the maximum time in seconds that SMAC is allowed to run for each modeltype
"""
for key in tqdm(self.models.keys(), desc="smac_search"):
best_hyperparameters = self.models[key].smac_search(x_train, y_train, n_trails=n_trails, scoring=scoring, avg=avg, pos_label=pos_label, secondary_scoring=secondary_scoring, strength=strength, small_data_eval=small_data_eval, cv_num=cv_num, walltime_limit=walltime_limit_per_modeltype, log_level=smac_log_level)
logger.info(f"{self.models[key].model_name} - parameters: {best_hyperparameters}")
model_best = self.models[key].get_deepcopy()
model_best.set_params(**best_hyperparameters)
train_score, train_time = model_best.train(x_train, y_train, console_out=False)
scores = model_best.evaluate(x_test, y_test, avg=avg, pos_label=pos_label, secondary_scoring=secondary_scoring, strength=strength, console_out=False)
scores["train_time"] = train_time
scores["train_score"] = train_score
scores["best_hyperparameters"] = dict(best_hyperparameters)
self.scores[key] = scores
sorted_scores = self.output_scores_as_pd(sort_by=[scoring, "s_score", "train_time"], console_out=False)
best_model_type = sorted_scores.iloc[0].name
best_model_value = sorted_scores.iloc[0][scoring]
best_model_hyperparameters = sorted_scores.iloc[0]["best_hyperparameters"]
logger.info(f"best model type {best_model_type} - {scoring}: {best_model_value} - parameters: {best_model_hyperparameters}")
self.__finish_sound()
return self.scores
def find_best_model_mass_search(self,
x_train: pd.DataFrame,
y_train: pd.Series,
x_test: pd.DataFrame,
y_test: pd.Series,
n_trails: int = 10,
scoring: str = get_scoring(),
avg: str = get_avg(),
pos_label: int | str = get_pos_label(),
secondary_scoring: str = get_secondary_scoring(),
strength: int = get_strength(),
leave_loadbar: bool = True,
save_results_path: str | None = "find_best_model_mass_search_results.csv",
) -> dict:
"""
@params:
x_train: DataFrame with train features
y_train: Series with train labels
x_test: DataFrame with test features
y_test: Series with test labels
n_trails: number of parameter sets to test per modeltype
scoring: metrics to evaluate the models
avg: average to use for precision and recall score (e.g. "micro", "weighted", "binary")
pos_label: if avg="binary", pos_label says which class to score. Else pos_label is ignored (except scoring='s_score'/'l_score')
secondary_scoring: weights the scoring (only for scoring='s_score'/'l_score')
strength: higher strength means a higher weight for the preferred secondary_scoring/pos_label (only for scoring='s_score'/'l_score')
leave_loadbar: shall the loading bar of the randomCVsearch of each individual model be visible after training (True - load bar will still be visible)
save_result_path: path to use for saving the results after each step
"""
model_dict = {}
for key in self.models.keys():
model = self.models[key]
configs = model.get_random_configs(n_trails)
try:
for config in configs:
model_new = model.get_deepcopy()
model_new = model_new.set_params(**config)
if model_new.model_type != "XGBC":
model_new = model_new.set_params(**{"warm_start": True})
model_name = f"{key} {dict(config)}"
model_dict[model_name] = model_new
except:
logger.warning(f"modeltype in '{key}' is not supported for this search -> will be skipped")
total_model_num = len(model_dict)
logger.info(f"total number of models: {total_model_num}")
split_num = int(np.log2(total_model_num))+1
split_size =int(1/split_num*len(x_train))
logger.info(f"split number: {split_num-1}, split_size (x_train): {split_size}")
if split_size < 300:
raise RuntimeError(f"not enough data for the amout of models. Data per split should be over 300, but {split_size} < 300")
# shuffle x_train/y_train
x_train = x_train.sample(frac=1, random_state=42)
y_train = y_train.sample(frac=1, random_state=42)
for split_idx in tqdm(range(split_num-1), desc="splits"):
x_train_train = x_train[split_idx*split_size:(split_idx+1)*split_size]
x_train_test = x_train[(split_idx+1)*split_size:]
y_train_train = y_train[split_idx*split_size:(split_idx+1)*split_size]
y_train_test = y_train[(split_idx+1)*split_size:]
logger.info(f"split {split_idx+1}: length x_train/y_train {len(x_train_train)}/{len(y_train_train)}, length x_test/y_test {len(x_train_test)}/{len(y_train_test)}")
split_scores: dict = {}
best_score: float = -1
# train models in model_dict
for key in tqdm(model_dict.keys(), desc=f"split {split_idx+1}", leave=leave_loadbar):
# train data classes in first split on all train data
if split_idx == 0:
pre_x, _ = model_dict[key]._Pipeline__data_prepare(x_train, y_train)
logger.debug(f"total length of train data after pipeline pre-processing: {len(pre_x)} ({key})")
# XGBoostClassifier has different warm_start implementation
if model_dict[key].model_type != "XGBC" or split_idx==0:
tscore, ttime = model_dict[key].train_warm_start(x_train_train, y_train_train, scoring=scoring, avg=avg, pos_label=pos_label, secondary_scoring=secondary_scoring, strength=strength, console_out=False)
else:
start = time.time()
model_dict[key].fit_warm_start(x_train_train, y_train_train, xgb_model=model_dict[key].model)
end = time.time()
tscore, ttime = model_dict[key].evaluate_score(x_train_train, y_train_train, scoring=scoring, avg=avg, pos_label=pos_label, secondary_scoring=secondary_scoring, strength=strength), str(timedelta(seconds=int(end-start)))
score = model_dict[key].evaluate(x_train_test, y_train_test, avg=avg, pos_label=pos_label, console_out=False, secondary_scoring=secondary_scoring, strength=strength)
score["train_score"] = tscore
score["train_time"] = ttime
split_scores[key] = score
sorted_split_scores = dict(sorted(split_scores.items(), key=lambda item: (item[1][scoring], item[1]["s_score"], item[1]["train_time"]), reverse=True))
if score[scoring] > best_score:
best_model_name = list(sorted_split_scores.keys())[0]
logger.info(f"new best {scoring}: {best_score} -> {score[scoring]} ({best_model_name})")
best_score = score[scoring]
sorted_split_scores_pd = pd.DataFrame(sorted_split_scores).transpose()
# save model scores
if save_results_path is not None:
sorted_split_scores_pd.to_csv(save_results_path.split(".")[0]+f"_split{split_idx+1}."+save_results_path.split(".")[1])
logger.info(f"Split scores (top 5): \n{sorted_split_scores_pd.head(5)}")
# only keep better half of the models
for key in list(sorted_split_scores.keys())[int(len(sorted_split_scores)/2):]:
del model_dict[key]
logger.info(f"removed {len(sorted_split_scores)-len(model_dict)} models")
best_model_name = list(sorted_split_scores.keys())[0]
best_model = model_dict[list(sorted_split_scores.keys())[0]]
logger.info(f"Evaluating best model: \n\n{best_model_name}\n")
x_train_train = x_train[int(split_idx*1/split_num*len(x_train)):]
y_train_train = y_train[int(split_idx*1/split_num*len(y_train)):]
tscore, ttime = best_model.train_warm_start(x_train_train, y_train_train, console_out=False)
score = best_model.evaluate(x_test, y_test, avg=avg, pos_label=pos_label, console_out=True, secondary_scoring=secondary_scoring, strength=strength)
score["train_score"] = tscore
score["train_time"] = ttime
return best_model_name, score
|
/sam_ml_py-0.13.0-py3-none-any.whl/sam_ml/models/ClassifierTest.py
| 0.417509 | 0.168754 |
ClassifierTest.py
|
pypi
|
from ConfigSpace import ConfigurationSpace, Float, Integer, Normal
from xgboost import XGBClassifier
from sam_ml.config import get_n_jobs
from .main_classifier import Classifier
class XGBC(Classifier):
""" SupportVectorClassifier Wrapper class """
def __init__(
self,
model_name: str = "XGBClassifier",
n_jobs: str = get_n_jobs(),
random_state: int = 42,
**kwargs,
):
"""
@param (important one):
random_state: random_state for model
n_jobs: how many cores shall be used (-1 means all)
"""
model_type = "XGBC"
model = XGBClassifier(
n_jobs=n_jobs,
random_state=random_state,
**kwargs,
)
grid = ConfigurationSpace(
seed=42,
space={
"max_depth": Integer("max_depth", (3, 10), default=6),
"gamma": Float('gamma', (0, 9), default=0),
'reg_alpha' : Integer('reg_alpha', (0, 180), default=0),
'reg_lambda' : Float('reg_lambda', (0, 1), default=1),
'colsample_bytree' : Float('colsample_bytree', (0.5, 1), default=1),
'min_child_weight' : Integer('min_child_weight', (0, 10), default=1),
'n_estimators': Integer("n_estimators", bounds=(50, 750), distribution=Normal(150, 100), default=100),
"learning_rate": Float("learning_rate", bounds=(0.001, 0.30), log=True, default=0.1),
})
# workaround for now -> Problems with Normal distribution (in smac_search) (04/07/2023)
self.smac_grid = ConfigurationSpace(
seed=42,
space={
"max_depth": Integer("max_depth", (3, 10), default=6),
"gamma": Float('gamma', (0, 9), default=0),
'reg_alpha' : Integer('reg_alpha', (0, 180), default=0),
'reg_lambda' : Float('reg_lambda', (0, 1), default=1),
'colsample_bytree' : Float('colsample_bytree', (0.5, 1), default=1),
'min_child_weight' : Integer('min_child_weight', (0, 10), default=1),
'n_estimators': Integer("n_estimators", bounds=(50, 750), default=100),
"learning_rate": Float("learning_rate", bounds=(0.001, 0.30), log=True, default=0.1),
})
super().__init__(model, model_name, model_type, grid)
def feature_importance(self):
super().feature_importance()
|
/sam_ml_py-0.13.0-py3-none-any.whl/sam_ml/models/XGBoostClassifier.py
| 0.697197 | 0.232779 |
XGBoostClassifier.py
|
pypi
|
from ConfigSpace import Beta, Categorical, ConfigurationSpace, Float, Integer
from sklearn.base import ClassifierMixin
from sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from .main_classifier import Classifier
class ABC(Classifier):
""" AdaBoostClassifier Wrapper class """
def __init__(
self,
model_name: str = "AdaBoostClassifier",
random_state: int = 42,
estimator: str | ClassifierMixin = "DTC",
**kwargs,
):
"""
@param (important one):
estimator: base estimator from which the boosted ensemble is built (default: DecisionTreeClassifier with max_depth=1), also possible is string 'DTC', 'RFC', and 'LR'
n_estimator: number of boosting stages to perform
learning_rate: shrinks the contribution of each tree by learning rate
algorithm: boosting algorithm
random_state: random_state for model
"""
model_type = "ABC"
kwargs_estimator = {}
kwargs_ABC = {}
for key in kwargs:
if key.startswith("estimator__"):
new_key = key.removeprefix("estimator__")
kwargs_estimator[new_key] = kwargs[key]
else:
kwargs_ABC[key] = kwargs[key]
if type(estimator) == str:
model_name += f" ({estimator} based)"
if estimator == "DTC":
if not kwargs_estimator:
estimator = DecisionTreeClassifier(max_depth=1)
else:
estimator = DecisionTreeClassifier(**kwargs_estimator)
elif estimator == "RFC":
if not kwargs_estimator:
estimator = RandomForestClassifier(max_depth=5, n_estimators=50, random_state=42)
else:
if not "random_state" in kwargs_estimator:
estimator = RandomForestClassifier(**kwargs_estimator, random_state=42)
else:
estimator = RandomForestClassifier(**kwargs_estimator)
elif estimator == "LR":
if not kwargs_estimator:
estimator = LogisticRegression(random_state=42)
else:
if not "random_state" in kwargs_estimator:
estimator = LogisticRegression(**kwargs_estimator, random_state=42)
else:
estimator = LogisticRegression(**kwargs_estimator)
else:
raise ValueError(f"invalid string input ('{estimator}') for estimator -> use 'DTC', 'RFC', or 'LR'")
model = AdaBoostClassifier(
random_state=random_state,
estimator=estimator,
**kwargs_ABC,
)
grid = ConfigurationSpace(
seed=42,
space={
"n_estimators": Integer("n_estimators", (10, 3000), log=True, default=50),
"learning_rate": Float("learning_rate", (0.005, 2), distribution=Beta(10, 5), default=1),
"algorithm": Categorical("algorithm", ["SAMME.R", "SAMME"], default="SAMME.R"),
})
if type(model.estimator) == RandomForestClassifier:
grid.add_hyperparameter(Integer("estimator__max_depth", (1, 11), default=5))
grid.add_hyperparameter(Integer("estimator__n_estimators", (5, 100), log=True, default=50))
elif type(model.estimator) == DecisionTreeClassifier:
grid.add_hyperparameter(Integer("estimator__max_depth", (1, 11), default=1))
super().__init__(model, model_name, model_type, grid)
|
/sam_ml_py-0.13.0-py3-none-any.whl/sam_ml/models/AdaBoostClassifier.py
| 0.807043 | 0.263289 |
AdaBoostClassifier.py
|
pypi
|
import warnings
from ConfigSpace import Beta, Categorical, ConfigurationSpace, Float, Integer
from sklearn.base import ClassifierMixin
from sklearn.ensemble import BaggingClassifier, RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sam_ml.config import get_n_jobs
from .main_classifier import Classifier
warnings. filterwarnings('ignore')
class BC(Classifier):
""" BaggingClassifier Wrapper class """
def __init__(
self,
model_name: str = "BaggingClassifier",
random_state: int = 42,
n_jobs: int = get_n_jobs(),
estimator: str | ClassifierMixin = "DTC",
**kwargs,
):
"""
@param (important one):
estimator: base estimator from which the boosted ensemble is built (default: DecisionTreeClassifier with max_depth=1), also possible is string 'DTC', 'RFC', and 'LR'
n_estimator: number of boosting stages to perform
max_samples: the number of samples to draw from X to train each base estimator
max_features: the number of features to draw from X to train each base estimator
bootstrap: whether samples are drawn with replacement. If False, sampling without replacement is performed
bootstrap_features: whether features are drawn with replacement
"""
model_type = "BC"
kwargs_estimator = {}
kwargs_BC = {}
for key in kwargs:
if key.startswith("estimator__"):
new_key = key.removeprefix("estimator__")
kwargs_estimator[new_key] = kwargs[key]
else:
kwargs_BC[key] = kwargs[key]
if type(estimator) == str:
model_name += f" ({estimator} based)"
if estimator == "DTC":
if not kwargs_estimator:
estimator = DecisionTreeClassifier(max_depth=1)
else:
estimator = DecisionTreeClassifier(**kwargs_estimator)
elif estimator == "RFC":
if not kwargs_estimator:
estimator = RandomForestClassifier(max_depth=5, n_estimators=50, random_state=42)
else:
if not "random_state" in kwargs_estimator:
estimator = RandomForestClassifier(**kwargs_estimator, random_state=42)
else:
estimator = RandomForestClassifier(**kwargs_estimator)
elif estimator == "LR":
if not kwargs_estimator:
estimator = LogisticRegression(random_state=42)
else:
if not "random_state" in kwargs_estimator:
estimator = LogisticRegression(**kwargs_estimator, random_state=42)
else:
estimator = LogisticRegression(**kwargs_estimator)
else:
raise ValueError(f"invalid string input ('{estimator}') for estimator -> use 'DTC', 'RFC', or 'LR'")
model = BaggingClassifier(
random_state=random_state,
n_jobs=n_jobs,
estimator=estimator,
**kwargs_BC,
)
grid = ConfigurationSpace(
seed=42,
space={
"n_estimators": Integer("n_estimators", (3, 3000), distribution=Beta(1, 15), default=10),
"max_samples": Float("max_samples", (0.1, 1), default=1),
"max_features": Categorical("max_features", [0.5, 0.9, 1.0, 2, 4], default=1.0),
"bootstrap": Categorical("bootstrap", [True, False], default=True),
"bootstrap_features": Categorical("bootstrap_features", [True, False], default=False),
})
if type(model.estimator) == RandomForestClassifier:
grid.add_hyperparameter(Integer("estimator__max_depth", (1, 11), default=5))
grid.add_hyperparameter(Integer("estimator__n_estimators", (5, 100), log=True, default=50))
elif type(model.estimator) == DecisionTreeClassifier:
grid.add_hyperparameter(Integer("estimator__max_depth", (1, 11), default=1))
super().__init__(model, model_name, model_type, grid)
|
/sam_ml_py-0.13.0-py3-none-any.whl/sam_ml/models/BaggingClassifier.py
| 0.760651 | 0.248067 |
BaggingClassifier.py
|
pypi
|
import pickle
import time
from copy import deepcopy
from datetime import timedelta
import pandas as pd
from sam_ml.config import setup_logger
logger = setup_logger(__name__)
class Model:
""" Model parent class """
def __init__(self, model_object = None, model_name: str = "model", model_type: str = "Model"):
"""
@params:
model_object: model with 'fit', 'predict', 'set_params', and 'get_params' method (see sklearn API)
model_name: name of the model
model_type: kind of estimator (e.g. 'RFC' for RandomForestClassifier)
"""
self.model = model_object
self.model_name = model_name
self.model_type = model_type
self.train_score: float = None
self.train_time: str = None
self.feature_names: list = []
def __repr__(self) -> str:
return f"Model(model_object={self.model.__str__()}, model_name='{self.model_name}', model_type='{self.model_type}')"
def train(self, x_train: pd.DataFrame, y_train: pd.Series, console_out: bool = True, **kwargs) -> tuple[float, str]:
"""
@return:
tuple of train score and train time
"""
logger.debug(f"training {self.model_name} - started")
start_time = time.time()
self.fit(x_train, y_train)
end_time = time.time()
self.train_score = self.evaluate_score(x_train, y_train, **kwargs)
self.train_time = str(timedelta(seconds=int(end_time-start_time)))
if console_out:
print("Train score: ", self.train_score, " - Train time: ", self.train_time)
logger.debug(f"training {self.model_name} - finished")
return self.train_score, self.train_time
def train_warm_start(self, x_train: pd.DataFrame, y_train: pd.Series, console_out: bool = True, **kwargs) -> tuple[float, str]:
"""
@return:
tuple of train score and train time
"""
logger.debug(f"training {self.model_name} - started")
start_time = time.time()
self.fit_warm_start(x_train, y_train)
end_time = time.time()
self.train_score = self.evaluate_score(x_train, y_train, **kwargs)
self.train_time = str(timedelta(seconds=int(end_time-start_time)))
if console_out:
print("Train score: ", self.train_score, " - Train time: ", self.train_time)
logger.debug(f"training {self.model_name} - finished")
return self.train_score, self.train_time
def fit(self, x_train: pd.DataFrame, y_train: pd.Series, **kwargs):
self.feature_names = list(x_train.columns)
self.model.fit(x_train, y_train, **kwargs)
return self
def fit_warm_start(self, x_train: pd.DataFrame, y_train: pd.Series, **kwargs):
self.feature_names = list(x_train.columns)
self.model.fit(x_train, y_train, **kwargs)
return self
def predict(self, x_test: pd.DataFrame) -> list:
"""
@return:
list with predictions
"""
return list(self.model.predict(x_test))
def get_params(self, deep: bool = True):
return self.model.get_params(deep)
def set_params(self, **params):
self.model.set_params(**params)
return self
def evaluate_score(self, x_test: pd.DataFrame, y_test: pd.Series, **kwargs) -> float:
score = self.model.score(x_test, y_test)
return score
def get_deepcopy(self):
"""function to create a deepcopy of object"""
return deepcopy(self)
def save_model(self, path: str, only_estimator: bool = False):
"""
function to pickle and save the Class object
@params:
path: path to save the model with suffix '.pkl'
only_estimator: if True, only the estimator of the class object will be saved
"""
logger.debug(f"saving {self.model_name} - started")
with open(path, "wb") as f:
if only_estimator:
pickle.dump(self.model, f)
else:
pickle.dump(self, f)
logger.debug(f"saving {self.model_name} - finished")
@staticmethod
def load_model(path: str):
""" function to load a pickled model class object """
logger.debug("loading model - started")
with open(path, "rb") as f:
model = pickle.load(f)
logger.debug("loading model - finished")
return model
|
/sam_ml_py-0.13.0-py3-none-any.whl/sam_ml/models/main_model.py
| 0.723016 | 0.254416 |
main_model.py
|
pypi
|
from ConfigSpace import Categorical, ConfigurationSpace, Float, Integer, Normal
from sklearn.ensemble import GradientBoostingClassifier
from .main_classifier import Classifier
class GBM(Classifier):
""" GradientBoostingMachine Wrapper class """
def __init__(
self,
model_name: str = "GradientBoostingMachine",
random_state: int = 42,
**kwargs,
):
"""
@param (important one):
n_estimator: number of boosting stages to perform
criterion: function to measure the quality of a split
max_depth: Maximum number of levels in tree
min_samples_split: Minimum number of samples required to split a node
min_samples_leaf: Minimum number of samples required at each leaf node
max_features: number of features to consider when looking for the best split
subsample: fraction of samples to be used for fitting the individual base learners
loss: The loss function to be optimized. 'deviance' refers to deviance (= logistic regression) for classification with probabilistic outputs. For loss 'exponential' gradient boosting recovers the AdaBoost algorithm
learning_rate: shrinks the contribution of each tree by learning rate
warm_start: work with previous fit and add more estimator
random_state: random_state for model
"""
model_type = "GBM"
model = GradientBoostingClassifier(random_state=random_state, **kwargs,)
grid = ConfigurationSpace(
seed=42,
space={
"n_estimators": Integer("n_estimators", (20, 1500), log=True, default=100),
"max_depth": Integer("max_depth", (1, 15), distribution=Normal(5, 3), default=3),
"min_samples_split": Integer("min_samples_split", (2, 100), log=True, default=2),
"min_samples_leaf": Integer("min_samples_leaf", (1, 100), log=True, default=1),
"max_features": Categorical("max_features", [1.0, "sqrt", "log2"], default=1.0),
"subsample": Float("subsample", (0.7, 1), default=1),
"criterion": Categorical("criterion", ["friedman_mse", "squared_error"], default="friedman_mse"),
"learning_rate": Float("learning_rate", (0.005, 0.3), log=True, default=0.1),
})
# workaround for now -> Problems with Normal distribution (in smac_search) (04/07/2023)
self.smac_grid = ConfigurationSpace(
seed=42,
space={
"n_estimators": Integer("n_estimators", (20, 1500), log=True, default=100),
"max_depth": Integer("max_depth", (1, 15), default=3),
"min_samples_split": Integer("min_samples_split", (2, 100), log=True, default=2),
"min_samples_leaf": Integer("min_samples_leaf", (1, 100), log=True, default=1),
"max_features": Categorical("max_features", [1.0, "sqrt", "log2"], default=1.0),
"subsample": Float("subsample", (0.7, 1), default=1),
"criterion": Categorical("criterion", ["friedman_mse", "squared_error"], default="friedman_mse"),
"learning_rate": Float("learning_rate", (0.005, 0.3), log=True, default=0.1),
})
super().__init__(model, model_name, model_type, grid)
|
/sam_ml_py-0.13.0-py3-none-any.whl/sam_ml/models/GradientBoostingMachine.py
| 0.923463 | 0.560132 |
GradientBoostingMachine.py
|
pypi
|
from ConfigSpace import Categorical, ConfigurationSpace, Integer, Normal
from sklearn.ensemble import RandomForestClassifier
from sam_ml.config import get_n_jobs
from .main_classifier import Classifier
class RFC(Classifier):
""" RandomForestClassifier Wrapper class """
def __init__(
self,
model_name: str = "RandomForestClassifier",
n_jobs: int = get_n_jobs(),
random_state: int = 42,
**kwargs,
):
"""
@param (important one):
n_estimators: Number of trees in random forest
max_depth: Maximum number of levels in tree
n_jobs: how many cores shall be used (-1 means all)
random_state: random_state for model
verbose: log level (higher number --> more logs)
warm_start: work with previous fit and add more estimator
max_features: Number of features to consider at every split
min_samples_split: Minimum number of samples required to split a node
min_samples_leaf: Minimum number of samples required at each leaf node
bootstrap: Method of selecting samples for training each tree
criterion: function to measure the quality of a split
"""
model_type = "RFC"
model = RandomForestClassifier(
n_jobs=n_jobs,
random_state=random_state,
**kwargs,
)
grid = ConfigurationSpace(
seed=42,
space={
"n_estimators": Integer("n_estimators", (10, 1000), log=True, default=100),
"max_depth": Integer("max_depth", (3, 15), distribution=Normal(5, 3), default=5),
"min_samples_split": Integer("min_samples_split", (2, 10), default=2),
"min_samples_leaf": Integer("min_samples_leaf", (1, 4), default=1),
"bootstrap": Categorical("bootstrap", [True, False], default=True),
"criterion": Categorical("criterion", ["gini", "entropy"], default="gini"),
})
# workaround for now -> Problems with Normal distribution (in smac_search) (04/07/2023)
self.smac_grid = ConfigurationSpace(
seed=42,
space={
"n_estimators": Integer("n_estimators", (10, 1000), log=True, default=100),
"max_depth": Integer("max_depth", (3, 15), default=5),
"min_samples_split": Integer("min_samples_split", (2, 10), default=2),
"min_samples_leaf": Integer("min_samples_leaf", (1, 4), default=1),
"bootstrap": Categorical("bootstrap", [True, False], default=True),
"criterion": Categorical("criterion", ["gini", "entropy"], default="gini"),
})
super().__init__(model, model_name, model_type, grid)
|
/sam_ml_py-0.13.0-py3-none-any.whl/sam_ml/models/RandomForestClassifier.py
| 0.87464 | 0.329257 |
RandomForestClassifier.py
|
pypi
|
from ConfigSpace import Categorical, ConfigurationSpace, Float
from sklearn.neural_network import MLPClassifier
from .main_classifier import Classifier
class MLPC(Classifier):
""" MLP Classifier Wrapper class """
def __init__(
self,
model_name: str = "MLP Classifier",
random_state: int = 42,
**kwargs,
):
"""
@param (important one):
hidden_layer_sizes: the ith element represents the number of neurons in the ith hidden layer
activation: activation function for the hidden layer
solver: solver for weight optimization
alpha: l2 penalty (regularization term) parameter
learning_rate: learning rate schedule for weight updates
warm_start: work with previous fit and add more estimator
tol: Tolerance for stopping criteria
max_iter: Maximum number of iterations taken for the solvers to converge
random_state: random_state for model
verbose: logging (True/False)
batch_size: Size of minibatches for stochastic optimizers
early_stopping: True: tests on 10% of train data and stops if there is for 'n_iter_no_change' no improvement in the metrics
"""
model_type = "MLPC"
model = MLPClassifier(
random_state=random_state,
**kwargs,
)
grid = ConfigurationSpace(
seed=42,
space={
"hidden_layer_sizes": Categorical("hidden_layer_sizes", ((10, 30, 10), (20,), (10,), (100,), (50,50,50), (50,100,50)), default=(100, )),
"activation": Categorical("activation", ["tanh", "relu", "logistic"], default="relu"),
"solver": Categorical("solver", ["sgd", "adam"], default="adam"),
"alpha": Float("alpha", (0.0001, 0.05), log=True, default=0.0001),
"learning_rate": Categorical("learning_rate", ["constant", "adaptive"], default="constant"),
})
super().__init__(model, model_name, model_type, grid)
|
/sam_ml_py-0.13.0-py3-none-any.whl/sam_ml/models/MLPClassifier.py
| 0.898204 | 0.387864 |
MLPClassifier.py
|
pypi
|
import math
from sklearn.metrics import precision_score, recall_score
def samuel_function(x: float) -> float:
return math.sqrt(1/(1 + math.e**(12*(0.5-x))))
def lewis_function(x: float) -> float:
return 1-(0.5-0.5*math.cos((x-1)*math.pi))**4
def s_scoring(y_true: list, y_pred: list, scoring: str = None, pos_label: int = -1, strength: int = 3, score_func = samuel_function) -> float:
"""
@param:
y_true, y_pred: data to evaluate on
scoring:
None: no preference between precision and recall
'precision': take precision more into account
'recall': take recall more into account
pos_label:
pos_label > 0: take <scoring> in class <pos_label> more into account
pos_label = -1: handle all classes the same
strength: higher strength means a higher weight for the preferred scoring/pos_label
score_func: function to use for scoring (default: samuel_function)
@return:
score as float between 0 and 1
"""
if strength < 1:
raise ValueError(f"strength has to be positiv integer greater-equal 1, but strength={strength}")
prec = precision_score(y_true, y_pred, average=None)
rec = recall_score(y_true, y_pred, average=None)
score = 1.0
for i in range(len(prec)):
if (scoring=='precision' and pos_label==i) or (scoring=='precision' and type(pos_label)==int and pos_label<=0) or (scoring==None and pos_label==i):
score *= score_func(prec[i])**strength
else:
score *= score_func(prec[i])
for i in range(len(rec)):
if (scoring=='recall' and pos_label==i) or (scoring=='recall' and type(pos_label)==int and pos_label<=0) or (scoring==None and pos_label==i):
score *= score_func(rec[i])**strength
else:
score *= score_func(rec[i])
return score
def l_scoring(y_true: list, y_pred: list, scoring: str = None, pos_label: int = -1, strength: int = 2, score_func = lewis_function) -> float:
return s_scoring(y_true, y_pred, scoring, pos_label, strength, score_func)
|
/sam_ml_py-0.13.0-py3-none-any.whl/sam_ml/models/scorer.py
| 0.71123 | 0.558568 |
scorer.py
|
pypi
|
import inspect
import os
import sys
import warnings
from datetime import timedelta
from statistics import mean
import numpy as np
import pandas as pd
from ConfigSpace import Configuration, ConfigurationSpace
from matplotlib import pyplot as plt
from sklearn.exceptions import NotFittedError
from sklearn.metrics import (
accuracy_score,
classification_report,
make_scorer,
precision_score,
recall_score,
)
from sklearn.model_selection import cross_validate
from tqdm.auto import tqdm
from sam_ml.config import (
get_avg,
get_n_jobs,
get_pos_label,
get_scoring,
get_secondary_scoring,
get_strength,
setup_logger,
)
from .main_model import Model
from .scorer import l_scoring, s_scoring
SMAC_INSTALLED: bool
try:
from smac import HyperparameterOptimizationFacade, Scenario
SMAC_INSTALLED = True
except:
SMAC_INSTALLED = False
logger = setup_logger(__name__)
if not sys.warnoptions:
warnings.simplefilter("ignore")
os.environ["PYTHONWARNINGS"] = "ignore" # Also affects subprocesses
class Classifier(Model):
""" Classifier parent class """
def __init__(self, model_object = None, model_name: str = "classifier", model_type: str = "Classifier", grid: ConfigurationSpace = ConfigurationSpace()):
"""
@params:
model_object: model with 'fit', 'predict', 'set_params', and 'get_params' method (see sklearn API)
model_name: name of the model
model_type: kind of estimator (e.g. 'RFC' for RandomForestClassifier)
grid: hyperparameter grid for the model
"""
super().__init__(model_object, model_name, model_type)
self._grid = grid
self.cv_scores: dict[str, float] = {}
self.rCVsearch_results: pd.DataFrame|None = None
def __repr__(self) -> str:
params: str = ""
param_dict = self._changed_parameters()
for key in param_dict:
if type(param_dict[key]) == str:
params+= key+"='"+str(param_dict[key])+"', "
else:
params+= key+"="+str(param_dict[key])+", "
params += f"model_name='{self.model_name}'"
return f"{self.model_type}({params})"
def _changed_parameters(self):
params = self.get_params(deep=False)
init_params = inspect.signature(self.__init__).parameters
init_params = {name: param.default for name, param in init_params.items()}
init_params_estimator = inspect.signature(self.model.__init__).parameters
init_params_estimator = {name: param.default for name, param in init_params_estimator.items()}
def has_changed(k, v):
if k not in init_params: # happens if k is part of a **kwargs
if k not in init_params_estimator: # happens if k is part of a **kwargs
return True
else:
if v != init_params_estimator[k]:
return True
else:
return False
if init_params[k] == inspect._empty: # k has no default value
return True
elif init_params[k] != v:
return True
return False
return {k: v for k, v in params.items() if has_changed(k, v)}
@property
def grid(self):
"""
@return:
hyperparameter tuning grid of the model
"""
return self._grid
def get_random_config(self):
"""
@return;
set of random parameter from grid
"""
return dict(self.grid.sample_configuration(1))
def get_random_configs(self, n_trails: int) -> list:
"""
@return;
n_trails elements in list with sets of random parameterd from grid
NOTE: filter out duplicates -> could be less than n_trails
"""
if n_trails<1:
raise ValueError(f"n_trails has to be greater 0, but {n_trails}<1")
configs = [self._grid.get_default_configuration()]
if n_trails == 2:
configs += [self._grid.sample_configuration(n_trails-1)]
else:
configs += self._grid.sample_configuration(n_trails-1)
# remove duplicates
configs = list(dict.fromkeys(configs))
return configs
def replace_grid(self, new_grid: ConfigurationSpace):
"""
function to replace self.grid
e.g.:
ConfigurationSpace(
seed=42,
space={
"solver": Categorical("solver", ["lsqr", "eigen", "svd"]),
"shrinkage": Float("shrinkage", (0, 1)),
})
"""
self._grid = new_grid
def train(
self,
x_train: pd.DataFrame,
y_train: pd.Series,
scoring: str = get_scoring(),
avg: str = get_avg(),
pos_label: int | str = get_pos_label(),
secondary_scoring: str = get_secondary_scoring(),
strength: int = get_strength(),
console_out: bool = True
) -> tuple[float, str]:
"""
@return:
tuple of train score and train time
"""
return super().train(x_train, y_train, console_out, scoring=scoring, avg=avg, pos_label=pos_label, secondary_scoring=secondary_scoring, strength=strength)
def train_warm_start(
self,
x_train: pd.DataFrame,
y_train: pd.Series,
scoring: str = get_scoring(),
avg: str = get_avg(),
pos_label: int | str = get_pos_label(),
secondary_scoring: str = get_secondary_scoring(),
strength: int = get_strength(),
console_out: bool = True
) -> tuple[float, str]:
"""
@return:
tuple of train score and train time
"""
return super().train_warm_start(x_train, y_train, console_out, scoring=scoring, avg=avg, pos_label=pos_label, secondary_scoring=secondary_scoring, strength=strength)
def evaluate(
self,
x_test: pd.DataFrame,
y_test: pd.Series,
avg: str = get_avg(),
pos_label: int | str = get_pos_label(),
console_out: bool = True,
secondary_scoring: str = get_secondary_scoring(),
strength: int = get_strength(),
) -> dict[str, float]:
"""
@param:
x_test, y_test: Data to evaluate model
avg: average to use for precision and recall score (e.g. "micro", None, "weighted", "binary")
pos_label: if avg="binary", pos_label says which class to score. pos_label is used by s_score/l_score
console_out: shall the result be printed into the console
secondary_scoring: weights the scoring (only for 's_score'/'l_score')
strength: higher strength means a higher weight for the preferred secondary_scoring/pos_label (only for 's_score'/'l_score')
@return: dictionary with keys with scores: 'accuracy', 'precision', 'recall', 's_score', 'l_score'
"""
pred = self.predict(x_test)
# Calculate Accuracy, Precision and Recall Metrics
accuracy = accuracy_score(y_test, pred)
precision = precision_score(y_test, pred, average=avg, pos_label=pos_label)
recall = recall_score(y_test, pred, average=avg, pos_label=pos_label)
s_score = s_scoring(y_test, pred, strength=strength, scoring=secondary_scoring, pos_label=pos_label)
l_score = l_scoring(y_test, pred, strength=strength, scoring=secondary_scoring, pos_label=pos_label)
if console_out:
print("accuracy: ", accuracy)
print("precision: ", precision)
print("recall: ", recall)
print("s_score: ", s_score)
print("l_score: ", l_score)
print()
print("classification report: ")
print(classification_report(y_test, pred))
scores = {
"accuracy": accuracy,
"precision": precision,
"recall": recall,
"s_score": s_score,
"l_score": l_score,
}
return scores
def evaluate_score(
self,
x_test: pd.DataFrame,
y_test: pd.Series,
scoring: str = get_scoring(),
avg: str = get_avg(),
pos_label: int | str = get_pos_label(),
secondary_scoring: str = get_secondary_scoring(),
strength: int = get_strength(),
) -> float:
"""
@param:
x_test, y_test: Data to evaluate model
scoring: metrics to evaluate the models ("accuracy", "precision", "recall", "s_score", "l_score")
avg: average to use for precision and recall score (e.g. "micro", None, "weighted", "binary")
pos_label: if avg="binary", pos_label says which class to score. pos_label is used by s_score/l_score
secondary_scoring: weights the scoring (only for 's_score'/'l_score')
strength: higher strength means a higher weight for the preferred secondary_scoring/pos_label (only for 's_score'/'l_score')
@return: score as float
"""
pred = self.predict(x_test)
# Calculate score
if scoring == "accuracy":
score = accuracy_score(y_test, pred)
elif scoring == "precision":
score = precision_score(y_test, pred, average=avg, pos_label=pos_label)
elif scoring == "recall":
score = recall_score(y_test, pred, average=avg, pos_label=pos_label)
elif scoring == "s_score":
score = s_scoring(y_test, pred, strength=strength, scoring=secondary_scoring, pos_label=pos_label)
elif scoring == "l_score":
score = l_scoring(y_test, pred, strength=strength, scoring=secondary_scoring, pos_label=pos_label)
else:
raise ValueError(f"scoring='{scoring}' is not supported -> only 'accuracy', 'precision', 'recall', 's_score', or 'l_score' ")
return score
def cross_validation(
self,
X: pd.DataFrame,
y: pd.Series,
cv_num: int = 10,
avg: str = get_avg(),
pos_label: int | str = get_pos_label(),
console_out: bool = True,
secondary_scoring: str = get_secondary_scoring(),
strength: int = get_strength(),
) -> dict[str, float]:
"""
@param:
X, y: data to cross validate on
cv_num: number of different splits
avg: average to use for precision and recall score (e.g. "micro", "weighted", "binary")
pos_label: if avg="binary", pos_label says which class to score. pos_label is used by s_score/l_score
console_out: shall the result be printed into the console
secondary_scoring: weights the scoring (only for 's_score'/'l_score')
strength: higher strength means a higher weight for the preferred secondary_scoring/pos_label (only for 's_score'/'l_score')
@return:
dictionary with "accuracy", "precision", "recall", "s_score", "l_score", train_score", "train_time"
"""
logger.debug(f"cross validation {self.model_name} - started")
precision_scorer = make_scorer(precision_score, average=avg, pos_label=pos_label)
recall_scorer = make_scorer(recall_score, average=avg, pos_label=pos_label)
s_scorer = make_scorer(s_scoring, strength=strength, scoring=secondary_scoring, pos_label=pos_label)
l_scorer = make_scorer(l_scoring, strength=strength, scoring=secondary_scoring, pos_label=pos_label)
if avg == "binary":
scorer = {
f"precision ({avg}, label={pos_label})": precision_scorer,
f"recall ({avg}, label={pos_label})": recall_scorer,
"accuracy": "accuracy",
"s_score": s_scorer,
"l_score": l_scorer,
}
else:
scorer = {
f"precision ({avg})": precision_scorer,
f"recall ({avg})": recall_scorer,
"accuracy": "accuracy",
"s_score": s_scorer,
"l_score": l_scorer,
}
cv_scores = cross_validate(
self,
X,
y,
scoring=scorer,
cv=cv_num,
return_train_score=True,
n_jobs=get_n_jobs(),
)
pd_scores = pd.DataFrame(cv_scores).transpose()
pd_scores["average"] = pd_scores.mean(numeric_only=True, axis=1)
score = pd_scores["average"]
self.cv_scores = {
"accuracy": score[list(score.keys())[6]],
"precision": score[list(score.keys())[2]],
"recall": score[list(score.keys())[4]],
"s_score": score[list(score.keys())[8]],
"l_score": score[list(score.keys())[10]],
"train_score": score[list(score.keys())[7]],
"train_time": str(timedelta(seconds = round(score[list(score.keys())[0]]))),
}
logger.debug(f"cross validation {self.model_name} - finished")
if console_out:
print()
print(pd_scores)
return self.cv_scores
def cross_validation_small_data(
self,
X: pd.DataFrame,
y: pd.Series,
avg: str = get_avg(),
pos_label: int | str = get_pos_label(),
leave_loadbar: bool = True,
console_out: bool = True,
secondary_scoring: str = get_secondary_scoring(),
strength: int = get_strength(),
) -> dict[str, float]:
"""
Cross validation for small datasets (recommended for datasets with less than 150 datapoints)
@param:
X, y: data to cross validate on
avg: average to use for precision and recall score (e.g. "micro", "weighted", "binary")
pos_label: if avg="binary", pos_label says which class to score. pos_label is used by s_score/l_score
leave_loadbar: shall the loading bar of the training be visible after training (True - load bar will still be visible)
console_out: shall the result be printed into the console
secondary_scoring: weights the scoring (only for 's_score'/'l_score')
strength: higher strength means a higher weight for the preferred secondary_scoring/pos_label (only for 's_score'/'l_score')
@return:
dictionary with "accuracy", "precision", "recall", "s_score", "l_score", train_score", "train_time"
"""
logger.debug(f"cross validation {self.model_name} - started")
predictions = []
true_values = []
t_scores = []
t_times = []
for idx in tqdm(X.index, desc=self.model_name, leave=leave_loadbar):
x_train = X.drop(idx)
y_train = y.drop(idx)
x_test = X.loc[[idx]]
y_test = y.loc[idx]
train_score, train_time = self.train(x_train, y_train, console_out=False)
prediction = self.predict(x_test)
predictions.append(prediction)
true_values.append(y_test)
t_scores.append(train_score)
t_times.append(train_time)
accuracy = accuracy_score(true_values, predictions)
precision = precision_score(true_values, predictions, average=avg, pos_label=pos_label)
recall = recall_score(true_values, predictions, average=avg, pos_label=pos_label)
s_score = s_scoring(true_values, predictions, strength=strength, scoring=secondary_scoring, pos_label=pos_label)
l_score = l_scoring(true_values, predictions, strength=strength, scoring=secondary_scoring, pos_label=pos_label)
avg_train_score = mean(t_scores)
avg_train_time = str(timedelta(seconds=round(sum(map(lambda f: int(f[0])*3600 + int(f[1])*60 + int(f[2]), map(lambda f: f.split(':'), t_times)))/len(t_times))))
self.cv_scores = {
"accuracy": accuracy,
"precision": precision,
"recall": recall,
"s_score": s_score,
"l_score": l_score,
"train_score": avg_train_score,
"train_time": avg_train_time,
}
logger.debug(f"cross validation {self.model_name} - finished")
if console_out:
print()
print("classification report:")
print(classification_report(true_values, predictions))
return self.cv_scores
def feature_importance(self) -> plt.show:
"""
feature_importance() generates a matplotlib plot of the feature importance from self.model
"""
if not self.feature_names:
raise NotFittedError("You have to first train the classifier before getting the feature importance (with train-method)")
if self.model_type == "MLPC":
importances = [np.mean(i) for i in self.model.coefs_[0]] # MLP Classifier
elif self.model_type in ("DTC", "RFC", "GBM", "CBC", "ABC", "ETC", "XGBC"):
importances = self.model.feature_importances_
elif self.model_type in ("KNC", "GNB", "BNB", "GPC", "QDA", "BC"):
logger.warning(f"{self.model_type} does not have a feature importance")
return
else:
importances = self.model.coef_[0] # "normal"
feature_importances = pd.Series(importances, index=self.feature_names)
fig, ax = plt.subplots()
if self.model_type in ("RFC", "GBM", "ETC"):
if self.model_type in ("RFC", "ETC"):
std = np.std(
[tree.feature_importances_ for tree in self.model.estimators_], axis=0,
)
elif self.model_type == "GBM":
std = np.std(
[tree[0].feature_importances_ for tree in self.model.estimators_], axis=0,
)
feature_importances.plot.bar(yerr=std, ax=ax)
else:
feature_importances.plot.bar(ax=ax)
ax.set_title("Feature importances of " + str(self.model_name))
ax.set_ylabel("use of coefficients as importance scores")
fig.tight_layout()
plt.show()
def smac_search(
self,
x_train: pd.DataFrame,
y_train: pd.Series,
n_trails: int = 50,
cv_num: int = 5,
scoring: str = get_scoring(),
avg: str = get_avg(),
pos_label: int | str = get_pos_label(),
secondary_scoring: str = get_secondary_scoring(),
strength: int = get_strength(),
small_data_eval: bool = False,
walltime_limit: float = 600,
log_level: int = 20,
) -> Configuration:
"""
@params:
x_train: DataFrame with train features
y_train: Series with labels
n_trails: max number of parameter sets to test
cv_num: number of different splits per crossvalidation (only used when small_data_eval=False)
scoring: metrics to evaluate the models ("accuracy", "precision", "recall", "s_score", "l_score")
avg: average to use for precision and recall score (e.g. "micro", "weighted", "binary")
pos_label: if avg="binary", pos_label says which class to score. Else pos_label is ignored (except scoring='s_score'/'l_score')
secondary_scoring: weights the scoring (only for scoring='s_score'/'l_score')
strength: higher strength means a higher weight for the preferred secondary_scoring/pos_label (only for scoring='s_score'/'l_score')
small_data_eval: if True: trains model on all datapoints except one and does this for all datapoints (recommended for datasets with less than 150 datapoints)
walltime_limit: the maximum time in seconds that SMAC is allowed to run
log_level: 10 - DEBUG, 20 - INFO, 30 - WARNING, 40 - ERROR, 50 - CRITICAL (SMAC3 library log levels)
@return: ConfigSpace.Configuration with best hyperparameters (can be used like dict)
"""
if not SMAC_INSTALLED:
raise ImportError("SMAC3 library is not installed -> follow instructions in Repo to install SMAC3 (https://github.com/Priapos1004/SAM_ML)")
logger.debug("starting smac_search")
# NormalInteger and EqualsCondition in grid are not supported (using workaround for now) (04/07/2023), EqualsCondition will be fixed with SMAC3 version >2.0.1
if self.model_type in ("RFC", "ETC", "GBM", "XGBC", "LR"):
grid = self.smac_grid
else:
grid = self.grid
scenario = Scenario(
grid,
n_trials=n_trails,
deterministic=True,
walltime_limit=walltime_limit,
)
initial_design = HyperparameterOptimizationFacade.get_initial_design(scenario, n_configs=5)
logger.debug(f"initial_design: {initial_design.select_configurations()}")
# define target function
def grid_train(config: Configuration, seed: int) -> float:
logger.debug(f"config: {config}")
model = self.get_deepcopy()
model.set_params(**config)
if small_data_eval:
score = model.cross_validation_small_data(x_train, y_train, console_out=False, leave_loadbar=False, avg=avg, pos_label=pos_label, secondary_scoring=secondary_scoring, strength=strength)
else:
score = model.cross_validation(x_train, y_train, console_out=False, cv_num=cv_num, avg=avg, pos_label=pos_label, secondary_scoring=secondary_scoring, strength=strength)
return 1 - score[scoring] # SMAC always minimizes (the smaller the better)
# use SMAC to find the best hyperparameters
smac = HyperparameterOptimizationFacade(
scenario,
grid_train,
initial_design=initial_design,
overwrite=True, # If the run exists, we overwrite it; alternatively, we can continue from last state
logging_level=log_level,
)
incumbent = smac.optimize()
logger.debug("finished smac_search")
return incumbent
def randomCVsearch(
self,
x_train: pd.DataFrame,
y_train: pd.Series,
n_trails: int = 10,
cv_num: int = 5,
scoring: str = get_scoring(),
avg: str = get_avg(),
pos_label: int | str = get_pos_label(),
secondary_scoring: str = get_secondary_scoring(),
strength: int = get_strength(),
small_data_eval: bool = False,
leave_loadbar: bool = True,
) -> tuple[dict, float]:
"""
@params:
x_train: DataFrame with train features
y_train: Series with labels
n_trails: number of parameter sets to test
scoring: metrics to evaluate the models ("accuracy", "precision", "recall", "s_score", "l_score")
avg: average to use for precision and recall score (e.g. "micro", "weighted", "binary")
pos_label: if avg="binary", pos_label says which class to score. Else pos_label is ignored (except scoring='s_score'/'l_score')
secondary_scoring: weights the scoring (only for scoring='s_score'/'l_score')
strength: higher strength means a higher weight for the preferred secondary_scoring/pos_label (only for scoring='s_score'/'l_score')
small_data_eval: if True: trains model on all datapoints except one and does this for all datapoints (recommended for datasets with less than 150 datapoints)
cv_num: number of different splits per crossvalidation (only used when small_data_eval=False)
leave_loadbar: shall the loading bar of the different parameter sets be visible after training (True - load bar will still be visible)
@return: dictionary with best hyperparameters and float of best_score
"""
logger.debug("starting randomCVsearch")
results = []
configs = self.get_random_configs(n_trails)
at_least_one_run: bool = False
try:
for config in tqdm(configs, desc=f"randomCVsearch ({self.model_name})", leave=leave_loadbar):
logger.debug(f"config: {config}")
model = self.get_deepcopy()
model.set_params(**config)
if small_data_eval:
score = model.cross_validation_small_data(x_train, y_train, console_out=False, leave_loadbar=False, avg=avg, pos_label=pos_label, secondary_scoring=secondary_scoring, strength=strength)
else:
score = model.cross_validation(x_train, y_train, cv_num=cv_num, console_out=False, avg=avg, pos_label=pos_label, secondary_scoring=secondary_scoring, strength=strength)
config_dict = dict(config)
config_dict[scoring] = score[scoring]
results.append(config_dict)
at_least_one_run = True
except KeyboardInterrupt:
logger.info("KeyboardInterrupt - output interim result")
if not at_least_one_run:
return {}, -1
self.rCVsearch_results = pd.DataFrame(results, dtype=object).sort_values(by=scoring, ascending=False)
# for-loop to keep dtypes of columns
best_hyperparameters = {}
for col in self.rCVsearch_results.columns:
value = self.rCVsearch_results[col].iloc[0]
if str(value) != "nan":
best_hyperparameters[col] = value
best_score = best_hyperparameters[scoring]
best_hyperparameters.pop(scoring)
logger.debug("finished randomCVsearch")
return best_hyperparameters, best_score
|
/sam_ml_py-0.13.0-py3-none-any.whl/sam_ml/models/main_classifier.py
| 0.516352 | 0.194578 |
main_classifier.py
|
pypi
|
from ConfigSpace import Categorical, ConfigurationSpace, Integer, Normal
from sklearn.ensemble import ExtraTreesClassifier
from sam_ml.config import get_n_jobs
from .main_classifier import Classifier
class ETC(Classifier):
""" ExtraTreesClassifier Wrapper class """
def __init__(
self,
model_name: str = "ExtraTreesClassifier",
n_jobs: int = get_n_jobs(),
random_state: int = 42,
**kwargs,
):
"""
@param (important one):
n_estimators: Number of trees
max_depth: Maximum number of levels in tree
n_jobs: how many cores shall be used (-1 means all)
random_state: random_state for model
verbose: log level (higher number --> more logs)
warm_start: work with previous fit and add more estimator
max_features: Number of features to consider at every split
min_samples_split: Minimum number of samples required to split a node
min_samples_leaf: Minimum number of samples required at each leaf node
bootstrap: Method of selecting samples for training each tree
criterion: function to measure the quality of a split
"""
model_type = "ETC"
model = ExtraTreesClassifier(
n_jobs=n_jobs,
random_state=random_state,
**kwargs,
)
grid = ConfigurationSpace(
seed=42,
space={
"n_estimators": Integer("n_estimators", (10, 1000), log=True, default=100),
"max_depth": Integer("max_depth", (3, 15), distribution=Normal(5, 3), default=5),
"min_samples_split": Integer("min_samples_split", (2, 10), default=2),
"min_samples_leaf": Integer("min_samples_leaf", (1, 4), default=1),
"bootstrap": Categorical("bootstrap", [True, False], default=False),
"criterion": Categorical("criterion", ["gini", "entropy"], default="gini"),
})
# workaround for now -> Problems with Normal distribution (in smac_search) (04/07/2023)
self.smac_grid = ConfigurationSpace(
seed=42,
space={
"n_estimators": Integer("n_estimators", (10, 1000), log=True, default=100),
"max_depth": Integer("max_depth", (3, 15), default=5),
"min_samples_split": Integer("min_samples_split", (2, 10), default=2),
"min_samples_leaf": Integer("min_samples_leaf", (1, 4), default=1),
"bootstrap": Categorical("bootstrap", [True, False], default=False),
"criterion": Categorical("criterion", ["gini", "entropy"], default="gini"),
})
super().__init__(model, model_name, model_type, grid)
|
/sam_ml_py-0.13.0-py3-none-any.whl/sam_ml/models/ExtraTreesClassifier.py
| 0.869119 | 0.325306 |
ExtraTreesClassifier.py
|
pypi
|
import copy
import pandas as pd
from sam_ml.config import setup_logger
from sam_ml.data.preprocessing import (
Embeddings_builder,
Sampler,
SamplerPipeline,
Scaler,
Selector,
)
from .main_classifier import Classifier
from .RandomForestClassifier import RFC
logger = setup_logger(__name__)
class Pipeline(Classifier):
""" classifier pipeline class """
def __init__(self, vectorizer: str | Embeddings_builder | None = None, scaler: str | Scaler | None = None, selector: str | tuple[str, int] | Selector | None = None, sampler: str | Sampler | SamplerPipeline | None = None, model: Classifier = RFC(), model_name: str = "pipe"):
"""
@params:
vectorizer: type of "data.embeddings.Embeddings_builder" or Embeddings_builder class object for automatic string column vectorizing (None for no vectorizing)
scaler: type of "data.scaler.Scaler" or Scaler class object for scaling the data (None for no scaling)
selector: type of "data.feature_selection.Selector" or Selector class object for feature selection (None for no selecting)
sampling: type of "data.sampling.Sampler" or Sampler class object for sampling the train data (None for no sampling)
model: Classifier class object
model_name: name of the model
"""
if issubclass(type(model), Classifier):
super().__init__(model_object=model.model, model_name=model_name, model_type=model.model_type, grid=model.grid)
# Inherit methods and attributes from model
for attribute_name in dir(model):
attribute_value = getattr(model, attribute_name)
# Check if the attribute is a method or a variable (excluding private attributes)
if callable(attribute_value) and not attribute_name.startswith("__"):
if not hasattr(self, attribute_name):
setattr(self, attribute_name, attribute_value)
elif not attribute_name.startswith("__"):
if not hasattr(self, attribute_name):
self.__dict__[attribute_name] = attribute_value
self.__classifier = model
else:
raise ValueError(f"wrong input '{model}' for model")
if vectorizer in Embeddings_builder.params()["vec"]:
self.vectorizer = Embeddings_builder(algorithm=vectorizer)
elif type(vectorizer) == Embeddings_builder or vectorizer is None:
self.vectorizer = vectorizer
else:
raise ValueError(f"wrong input '{vectorizer}' for vectorizer")
if scaler in Scaler.params()["scaler"]:
self.scaler = Scaler(algorithm=scaler)
elif type(scaler) == Scaler or scaler is None:
self.scaler = scaler
else:
raise ValueError(f"wrong input '{scaler}' for scaler")
if selector in Selector.params()["algorithm"]:
self.selector = Selector(algorithm=selector)
elif type(selector) == tuple and len(selector) == 2:
if selector[0] in Selector.params()["algorithm"] and type(selector[1])==int:
if selector[1] > 0:
self.selector = Selector(algorithm=selector[0], num_features=selector[1])
else:
raise ValueError(f"wrong input '{selector}' for selector -> integer in tuple has to be greater 0")
else:
raise ValueError(f"wrong input '{selector}' for selector -> tuple incorrect")
elif type(selector) == Selector or selector is None:
self.selector = selector
else:
raise ValueError(f"wrong input '{selector}' for selector")
if sampler in Sampler.params()["algorithm"]:
self.sampler = Sampler(algorithm=sampler)
elif type(sampler) ==str and SamplerPipeline.check_is_valid_algorithm(sampler):
self.sampler = SamplerPipeline(algorithm=sampler)
elif type(sampler) in (Sampler, SamplerPipeline) or sampler is None:
self.sampler = sampler
else:
raise ValueError(f"wrong input '{sampler}' for sampler")
self.vectorizer_dict: dict[str, Embeddings_builder] = {}
# keep track if model was trained for warm_start
self._data_classes_trained: bool = False
def __repr__(self) -> str:
params: str = ""
for step in self.steps:
params += step[0]+"="+step[1].__str__()+", "
params += f"model_name='{self.model_name}'"
return f"Pipeline({params})"
@property
def steps(self) -> list[tuple[str, any]]:
return [("vectorizer", self.vectorizer), ("scaler", self.scaler), ("selector", self.selector), ("sampler", self.sampler), ("model", self.__classifier)]
def __auto_vectorizing(self, X: pd.DataFrame, train_on: bool = True) -> pd.DataFrame:
""" detects string columns, creates a vectorizer for each, and vectorizes them """
if train_on:
X = X.convert_dtypes()
string_columns = list(X.select_dtypes(include="string").columns)
self._string_columns = string_columns
self.vectorizer_dict = dict(zip(self._string_columns, [copy.deepcopy(self.vectorizer) for i in range(len(string_columns))]))
for col in self._string_columns:
X = pd.concat([X, self.vectorizer_dict[col].vectorize(X[col], train_on=train_on)], axis=1)
X_vec = X.drop(columns=self._string_columns)
return X_vec
def __data_prepare(self, X: pd.DataFrame, y: pd.Series, train_on: bool = True) -> tuple[pd.DataFrame, pd.Series]:
""" runs data class objects on data to prepare them for the model """
if self.vectorizer is not None:
X = self.__auto_vectorizing(X, train_on=train_on)
if self.scaler is not None:
X = self.scaler.scale(X, train_on=train_on)
if self.selector is not None:
X = self.selector.select(X, y, train_on=train_on)
if self.sampler is not None and train_on:
X, y = self.sampler.sample(X, y)
self._data_classes_trained = True
return X, y
def fit(self, x_train: pd.DataFrame, y_train: pd.Series, **kwargs):
x_train_pre, y_train_pre = self.__data_prepare(x_train, y_train, train_on=True)
self.feature_names = list(x_train_pre.columns)
return super().fit(x_train_pre, y_train_pre, **kwargs)
def fit_warm_start(self, x_train: pd.DataFrame, y_train: pd.Series, **kwargs):
x_train_pre, y_train_pre = self.__data_prepare(x_train, y_train, train_on = not self._data_classes_trained)
self.feature_names = list(x_train_pre.columns)
return super().fit(x_train_pre, y_train_pre, **kwargs)
def predict(self, x_test: pd.DataFrame) -> list:
x_test_pre, _ = self.__data_prepare(x_test, None, train_on=False)
return super().predict(x_test_pre)
def get_params(self, deep: bool = True) -> dict[str, any]:
return dict(self.steps)
|
/sam_ml_py-0.13.0-py3-none-any.whl/sam_ml/models/main_pipeline.py
| 0.727298 | 0.265202 |
main_pipeline.py
|
pypi
|
from ConfigSpace import (
Categorical,
ConfigurationSpace,
EqualsCondition,
Float,
ForbiddenAndConjunction,
ForbiddenEqualsClause,
ForbiddenInClause,
)
from sklearn.linear_model import LogisticRegression
from .main_classifier import Classifier
class LR(Classifier):
""" LogisticRegression Wrapper class """
def __init__(
self,
model_name: str = "LogisticRegression",
random_state: int = 42,
**kwargs,
):
"""
@param (important one):
n_jobs: how many cores shall be used (-1 means all) (n_jobs > 1 does not have any effect when 'solver' is set to 'liblinear)
random_state: random_state for model
verbose: log level (higher number --> more logs)
warm_start: work with previous fit and add more estimator
tol: Tolerance for stopping criteria
C: Inverse of regularization strength
max_iter: Maximum number of iterations taken for the solvers to converge
solver: Algorithm to use in the optimization problem
penalty: Specify the norm of the penalty
"""
model_type = "LR"
model = LogisticRegression(
random_state=random_state,
**kwargs,
)
grid = ConfigurationSpace(
seed=42,
space={
"solver": Categorical("solver", ["newton-cg", "lbfgs", "liblinear", "sag", "saga"], weights=[0.15, 0.15, 0.15, 0.15, 0.4], default="lbfgs"),
"penalty": Categorical("penalty", ["l2", "elasticnet"], default="l2"),
"C": Float("C", (0.01, 100), log=True, default=1),
"l1_ratio": Float("l1_ratio", (0.01, 1), default=0.1),
})
solver_and_penalty = ForbiddenAndConjunction(
ForbiddenEqualsClause(grid["penalty"], "elasticnet"),
ForbiddenInClause(grid["solver"], ["newton-cg", "lbfgs", "liblinear", "sag"]),
)
l1_ratio_cond = EqualsCondition(grid["l1_ratio"], grid["penalty"], "elasticnet")
grid.add_forbidden_clause(solver_and_penalty)
grid.add_condition(l1_ratio_cond)
# workaround for now -> Problems with EqualsCondition (KeyError: 'l1_ratio') (04/07/2023)
self.smac_grid = ConfigurationSpace(
seed=42,
space={
"solver": Categorical("solver", ["newton-cg", "lbfgs", "liblinear", "sag", "saga"], default="lbfgs"),
"penalty": Categorical("penalty", ["l2"], default="l2"),
"C": Float("C", (0.01, 100), log=True, default=1),
})
super().__init__(model, model_name, model_type, grid)
|
/sam_ml_py-0.13.0-py3-none-any.whl/sam_ml/models/LogisticRegression.py
| 0.884962 | 0.340485 |
LogisticRegression.py
|
pypi
|
import pandas as pd
from sklearn.preprocessing import (
MaxAbsScaler,
MinMaxScaler,
Normalizer,
PowerTransformer,
QuantileTransformer,
RobustScaler,
StandardScaler,
)
from sam_ml.config import setup_logger
from .main_data import DATA
logger = setup_logger(__name__)
class Scaler(DATA):
""" Scaler Wrapper class """
def __init__(self, algorithm: str = "standard", **kwargs):
"""
@param:
algorithm: kind of scaler to use
'standard': StandardScaler
'minmax': MinMaxScaler
'maxabs': MaxAbsScaler
'robust': RobustScaler
'normalizer': Normalizer
'power': PowerTransformer with method="yeo-johnson"
'quantile': QuantileTransformer (default of QuantileTransformer)
'quantile_normal': QuantileTransformer with output_distribution="normal" (gaussian pdf)
**kwargs:
additional parameters for scaler
"""
if algorithm == "standard":
scaler = StandardScaler(**kwargs)
elif algorithm == "minmax":
scaler = MinMaxScaler(**kwargs)
elif algorithm == "maxabs":
scaler = MaxAbsScaler(**kwargs)
elif algorithm == "robust":
scaler = RobustScaler(**kwargs)
elif algorithm == "normalizer":
scaler = Normalizer(**kwargs)
elif algorithm == "power":
scaler = PowerTransformer(**kwargs)
elif algorithm == "quantile":
scaler = QuantileTransformer(**kwargs)
elif algorithm == "quantile_normal":
scaler = QuantileTransformer(output_distribution="normal", **kwargs)
else:
raise ValueError(f"algorithm='{algorithm}' is not supported")
super().__init__(algorithm, scaler)
@staticmethod
def params() -> dict:
"""
@return:
possible values for the parameters of the Scaler class
"""
param = {"scaler": ["standard", "minmax", "maxabs", "robust", "normalizer", "power", "quantile", "quantile_normal"]}
return param
def scale(self, data: pd.DataFrame, train_on: bool = True) -> pd.DataFrame:
"""
@param:
train_on: if True, the scaler will fit_transform. Otherwise just transform
@return:
Dataframe with scaled data
"""
columns = data.columns
logger.debug("scaling - started")
if train_on:
scaled_ar = self.transformer.fit_transform(data)
else:
scaled_ar = self.transformer.transform(data)
scaled_df = pd.DataFrame(scaled_ar, columns=columns)
logger.debug("scaling - finished")
return scaled_df
|
/sam_ml_py-0.13.0-py3-none-any.whl/sam_ml/data/preprocessing/scaler.py
| 0.797833 | 0.312422 |
scaler.py
|
pypi
|
import concurrent.futures
import numpy as np
import pandas as pd
from sentence_transformers import SentenceTransformer
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from tqdm.auto import tqdm
from sam_ml.config import setup_logger
from .main_data import DATA
logger = setup_logger(__name__)
class Embeddings_builder(DATA):
""" Vectorizer Wrapper class """
def __init__(self, algorithm: str = "tfidf", **kwargs):
"""
@param:
algorithm:
'count': CountVectorizer (default)
'tfidf': TfidfVectorizer
'bert': SentenceTransformer("quora-distilbert-multilingual")
**kwargs:
additional parameters for CountVectorizer or TfidfVectorizer
"""
if algorithm == "bert":
vectorizer = SentenceTransformer("quora-distilbert-multilingual")
elif algorithm == "count":
vectorizer = CountVectorizer(**kwargs)
elif algorithm == "tfidf":
vectorizer = TfidfVectorizer(**kwargs)
else:
raise ValueError(f"algorithm='{algorithm}' is not supported")
super().__init__(algorithm, vectorizer)
@staticmethod
def params() -> dict:
"""
@return:
possible values for the parameters
"""
param = {"vec": ["bert", "count", "tfidf"]}
return param
def get_params(self, deep: bool = True):
class_params = {"vec": self.algorithm}
if self.algorithm != "bert":
return class_params | self.transformer.get_params(deep)
return class_params
def set_params(self, **params):
if self.algorithm == "bert":
self.transformer = SentenceTransformer("quora-distilbert-multilingual", **params)
else:
self.transformer.set_params(**params)
return self
def create_parallel_bert_embeddings(self, content: list) -> list:
logger.debug("Going to parallel process embedding creation")
# Create a progress bar
pbar = tqdm(total=len(content), desc="Bert Embeddings")
# Define a new function that updates the progress bar after each embedding
def get_embedding_and_update(text: str) -> list:
pbar.update()
return self.transformer.encode(text)
# Parallel processing
with concurrent.futures.ThreadPoolExecutor() as executor:
content_embeddings = list(executor.map(get_embedding_and_update, content))
# Close the progress bar
pbar.close()
return content_embeddings
def vectorize(self, data: pd.Series, train_on: bool = True) -> pd.DataFrame:
"""
@params:
data: pandas Series
train_on: shall the vectorizer fit before transform
@return:
pandas Dataframe with vectorized data
"""
indices = data.index
logger.debug("creating embeddings - started")
if self.algorithm == "bert":
message_embeddings = self.create_parallel_bert_embeddings(list(data))
emb_ar = np.asarray(message_embeddings)
else:
if train_on:
emb_ar = self.transformer.fit_transform(data).toarray()
else:
emb_ar = self.transformer.transform(data).toarray()
emb_df = pd.DataFrame(emb_ar, index=indices).add_suffix("_"+data.name)
logger.debug("creating embeddings - finished")
return emb_df
|
/sam_ml_py-0.13.0-py3-none-any.whl/sam_ml/data/preprocessing/embeddings.py
| 0.76882 | 0.341116 |
embeddings.py
|
pypi
|
import pandas as pd
from imblearn.over_sampling import SMOTE, BorderlineSMOTE, RandomOverSampler
from imblearn.under_sampling import (
ClusterCentroids,
NearMiss,
OneSidedSelection,
RandomUnderSampler,
TomekLinks,
)
from sam_ml.config import setup_logger
from .main_data import DATA
logger = setup_logger(__name__)
class Sampler(DATA):
""" sample algorithm Wrapper class """
def __init__(self, algorithm: str = "ros", random_state: int = 42, sampling_strategy="auto", **kwargs):
"""
@param:
algorithm: which sampling algorithm to use:
SMOTE: Synthetic Minority Oversampling Technique (upsampling)
BSMOTE: BorderlineSMOTE (upsampling)
ros: RandomOverSampler (upsampling) (default)
rus: RandomUnderSampler (downsampling)
tl: TomekLinks (cleaning downsampling)
nm: NearMiss (downsampling)
cc: ClusterCentroids (downsampling)
oss: OneSidedSelection (cleaning downsampling)
random_state: seed for random sampling
sampling_strategy: percentage of minority class size of majority class size
**kwargs:
additional parameters for sampler
"""
if algorithm == "SMOTE":
sampler = SMOTE(random_state=random_state, sampling_strategy=sampling_strategy, **kwargs)
elif algorithm == "BSMOTE":
sampler = BorderlineSMOTE(random_state=random_state, sampling_strategy=sampling_strategy, **kwargs)
elif algorithm == "rus":
sampler = RandomUnderSampler(random_state=random_state, sampling_strategy=sampling_strategy, **kwargs)
elif algorithm == "ros":
sampler = RandomOverSampler(random_state=random_state, sampling_strategy=sampling_strategy, **kwargs)
elif algorithm == "tl":
sampler = TomekLinks(**kwargs)
elif algorithm == "nm":
sampler = NearMiss(sampling_strategy=sampling_strategy, **kwargs)
elif algorithm == "cc":
sampler = ClusterCentroids(sampling_strategy=sampling_strategy, random_state=random_state, **kwargs)
elif algorithm == "oss":
sampler = OneSidedSelection(random_state=random_state, **kwargs)
else:
raise ValueError(f"algorithm='{algorithm}' is not supported")
super().__init__(algorithm, sampler)
@staticmethod
def params() -> dict:
"""
@return:
possible values for the parameters
"""
param = {"algorithm": ["SMOTE", "BSMOTE", "rus", "ros", "tl", "nm", "cc", "oss"]}
return param
def sample(self, x_train: pd.DataFrame, y_train: pd.Series) -> tuple[pd.DataFrame, pd.Series]:
"""
Function for up- and downsampling
@return:
tuple x_train_sampled, y_train_sampled
"""
x_train_sampled, y_train_sampled = self.transformer.fit_resample(x_train, y_train)
return x_train_sampled, y_train_sampled
|
/sam_ml_py-0.13.0-py3-none-any.whl/sam_ml/data/preprocessing/sampling.py
| 0.794185 | 0.428652 |
sampling.py
|
pypi
|
import pandas as pd
import statsmodels.api as sm
from sklearn.decomposition import PCA
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.feature_selection import (
RFE,
RFECV,
SelectFromModel,
SelectKBest,
SequentialFeatureSelector,
chi2,
)
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
from sam_ml.config import setup_logger
from .main_data import DATA
logger = setup_logger(__name__)
class Selector(DATA):
""" feature selection algorithm Wrapper class """
def __init__(self, algorithm: str = "kbest", num_features: int = 10, estimator = LinearSVC(penalty="l1", dual=False), **kwargs):
"""
@params:
algorithm:
'kbest': SelectKBest
'kbest_chi2': SelectKBest with score_func=chi2 (only non-negative values)
'pca': PCA (new column names after transformation)
'wrapper': uses p-values of Ordinary Linear Model from statsmodels library (no num_features parameter -> problems with too many features)
'sequential': SequentialFeatureSelector
'select_model': SelectFromModel (meta-transformer for selecting features based on importance weights)
'rfe': RFE (recursive feature elimination)
'rfecv': RFECV (recursive feature elimination with cross-validation)
estimator:
parameter is needed for SequentialFeatureSelector, SelectFromModel, RFE, RFECV (default: LinearSVC)
**kwargs:
additional parameters for selector
"""
self.num_features = num_features
if algorithm == "kbest":
selector = SelectKBest(k=num_features, **kwargs)
elif algorithm == "kbest_chi2":
selector = SelectKBest(k=num_features, score_func=chi2, **kwargs)
elif algorithm == "pca":
selector = PCA(n_components=num_features, random_state=42, **kwargs)
elif algorithm == "wrapper":
selector = {"pvalue_limit": 0.5}
elif algorithm == "sequential":
selector = SequentialFeatureSelector(estimator, n_features_to_select=num_features, **kwargs)
elif algorithm == "select_model":
selector = SelectFromModel(estimator, max_features=num_features, **kwargs)
elif algorithm == "rfe":
selector = RFE(estimator, n_features_to_select=num_features, **kwargs)
elif algorithm == "rfecv":
selector = RFECV(estimator, min_features_to_select=num_features, **kwargs)
else:
raise ValueError(f"algorithm='{algorithm}' is not supported")
super().__init__(algorithm, selector)
@staticmethod
def params() -> dict:
"""
@return:
possible/recommended values for the parameters
"""
param = {
"algorithm": ["kbest", "kbest_chi2", "pca", "wrapper", "sequential", "select_model", "rfe", "rfecv"],
"estimator": [LinearSVC(penalty="l1", dual=False), LogisticRegression(), ExtraTreesClassifier(n_estimators=50)]
}
return param
def get_params(self, deep: bool = True):
class_params = {"algorithm": self.algorithm, "num_features": self.num_features}
if self.algorithm == "wrapper":
return class_params | self.transformer
else:
selector_params = self.transformer.get_params(deep)
if self.algorithm in ("kbest", "kbest_chi2"):
selector_params.pop("k")
elif self.algorithm in ("pca"):
selector_params.pop("n_components")
elif self.algorithm in ("sequential", "rfe"):
selector_params.pop("n_features_to_select")
elif self.algorithm in ("select_model"):
selector_params.pop("max_features")
elif self.algorithm in ("rfecv"):
selector_params.pop("min_features_to_select")
return class_params | selector_params
def set_params(self, **params):
if self.algorithm == "wrapper":
self.transformer = params
else:
self.transformer.set_params(**params)
return self
def select(self, X: pd.DataFrame, y: pd.DataFrame = None, train_on: bool = True) -> pd.DataFrame:
"""
for training: the y data is also needed
"""
if len(X.columns) < self.num_features:
logger.warning("the number of features that shall be selected is greater than the number of features in X --> return X")
self.selected_features = X.columns
return X
logger.debug("selecting features - started")
if train_on:
if self.algorithm == "wrapper":
self.selected_features = self.__wrapper_select(X, y, **self.transformer)
else:
self.transformer.fit(X.values, y)
self.selected_features = self.transformer.get_feature_names_out(X.columns)
if self.algorithm == "wrapper":
X_selected = X[self.selected_features]
else:
X_selected = pd.DataFrame(self.transformer.transform(X), columns=self.selected_features)
logger.debug("selecting features - finished")
return X_selected
def __wrapper_select(self, X: pd.DataFrame, y: pd.DataFrame, pvalue_limit: float = 0.5, **kwargs) -> list:
selected_features = list(X.columns)
y = list(y)
pmax = 1
while selected_features:
p= []
X_new = X[selected_features]
X_new = sm.add_constant(X_new)
model = sm.OLS(y,X_new).fit()
p = pd.Series(model.pvalues.values[1:],index = selected_features)
pmax = max(p)
feature_pmax = p.idxmax()
if(pmax>pvalue_limit):
selected_features.remove(feature_pmax)
else:
break
if len(selected_features) == len(X.columns):
logger.warning("the wrapper algorithm selected all features")
return selected_features
|
/sam_ml_py-0.13.0-py3-none-any.whl/sam_ml/data/preprocessing/feature_selection.py
| 0.762998 | 0.45538 |
feature_selection.py
|
pypi
|
import pandas as pd
from sam_ml.config import setup_logger
from .sampling import Sampler
logger = setup_logger(__name__)
class SamplerPipeline:
def __init__(self, algorithm: str | list[Sampler] = "SMOTE_rus_20_50"):
"""
Class uses multplie up- and down-sampling algorithms instead of only one
@param:
- algorithm = "A1_A2_..._An_x1_x2_..._xn": first, use Sampler A1 with sampling_strategy x1% on data, then Sampler A2 with sampling_strategy x2% until Sampler An with sampling_strategy xn on data (only works for binary data!!!)
- algorithm = list[Sampler]: use each Sampler in list one after the other on data
@Note:
- sampling_strategy is the percentage of minority class size of majority class size
@example:
- ros_rus_10_50: RandomOverSampler for minority class to 10% of majority class and then RandomUnderSampler for majority class to 2*minority class
- SMOTE_rus_20_50: SMOTE for minority class to 20% of majority class and then RandomUnderSampler for majority class to 2*minority class
"""
if type(algorithm) == str:
self.algorithm = algorithm
samplers_ratios = algorithm.split("_")
if len(samplers_ratios)%2 == 1:
raise ValueError(f"The string has to contain for every Sampler a sampling_strategy, but {samplers_ratios}")
samplers = samplers_ratios[:int(len(samplers_ratios)/2)]
ratios = samplers_ratios[int(len(samplers_ratios)/2):]
ratios_float = [int(ratio)/100 for ratio in ratios]
self.sampler = [Sampler(algorithm=samplers[idx], sampling_strategy=ratios_float[idx]) for idx in range(len(samplers))]
else:
self.sampler = algorithm
self.algorithm = "custom"
def __repr__(self) -> str:
return f"SamplerPipeline{tuple(self.sampler)}"
@staticmethod
def check_is_valid_algorithm(algorithm: str) -> bool:
"""
@return:
True if algorithm is valid
"""
samplers_ratios = algorithm.split("_")
if len(samplers_ratios)%2 == 1:
logger.warning(f"The string has to contain for every Sampler a sampling_strategy, but {samplers_ratios}")
return False
samplers = samplers_ratios[:int(len(samplers_ratios)/2)]
ratios = samplers_ratios[int(len(samplers_ratios)/2):]
ratios_float = [int(ratio)/100 for ratio in ratios]
for idx in range(len(samplers)):
if not (samplers[idx] in Sampler.params()["algorithm"] and 0<ratios_float[idx]<=1):
logger.warning(f"invalid sampler-sampling_strategy pair: '{samplers[idx]}' with {ratios_float[idx]}")
return False
return True
def get_params(self, deep: bool = True):
return {"algorithm": self.sampler}
def set_params(self, *params):
self.sampler = params
return self
def sample(self, x_train: pd.DataFrame, y_train: pd.Series) -> tuple[pd.DataFrame, pd.Series]:
"""
Function for up- and downsampling
@return:
tuple x_train_sampled, y_train_sampled
"""
for sampler_idx in range(len(self.sampler)):
if sampler_idx == 0:
x_train_sampled, y_train_sampled = self.sampler[sampler_idx].sample(x_train, y_train)
else:
x_train_sampled, y_train_sampled = self.sampler[sampler_idx].sample(x_train_sampled, y_train_sampled)
return x_train_sampled, y_train_sampled
|
/sam_ml_py-0.13.0-py3-none-any.whl/sam_ml/data/preprocessing/sampling_pipeline.py
| 0.77928 | 0.566798 |
sampling_pipeline.py
|
pypi
|
# Overview
If you author an [AWS Serverless Application Model (SAM)](https://aws.amazon.com/serverless/sam/) template you may wish to publish this as an [AWS CloudFormation](https://docs.aws.amazon.com/cloudformation/index.html) template to allow the user to deploy the solution from the console and remove the need for the user to install the [AWS SAM CLI](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/install-sam-cli.html).
Much of this can be achieved by using commands such as `sam package` to package your template and upload the assets to S3 and [aws-sam-translator](https://pypi.org/project/aws-sam-translator/) to transform the SAM template into a AWS CloudFormation template. sam-publish allows you to further transform your CloudFormation template in three ways:
* Inlining of [AWS Lambda](https://docs.aws.amazon.com/lambda/latest/dg/welcome.html) functions into the CloudFormation template to allow the user to the see the functions in the main template
* Control of the buckets where the assets are stored e.g. [AWS Step Functions](https://aws.amazon.com/step-functions/) and [Lambda Layers](https://docs.aws.amazon.com/lambda/latest/dg/configuration-layers.html). The can be useful if you would like to deploy the assets to a separate AWS account which may have publicly accessible buckets available specifically for sharing assets with users.
* Removes the metadata and tags that are added to resources when converted using [aws-sam-translator](https://pypi.org/project/aws-sam-translator/)
# Command Line Arguments
`--working-folder WORKING_FOLDER` - Working folder for the input and output files. Normally a local temp folder.
`--cfn-input-template CFN_INPUT_TEMPLATE` - Name of JSON template to transform [default: template.json]. Normally the output from `sam package` command
`--cfn-output-template CFN_OUTPUT_TEMPLATE` - Name of YAML template to output [default: template.yaml].
`--target-asset-folder TARGET_ASSET_FOLDER` - Local folder the assets should be stored [default: ./Assets/].
`--lambda-folder LAMBDA_FOLDER` - Location the lambda assets should be stored, this is appended to the target-asset-folder [default: lambda].
`--layer-folder LAYER_FOLDER` - Location the layer assets should be stored, this is appended to the target-asset-folder [default: layer].
`--statemachine-folder STATEMACHINE_FOLDER` - Location the statemachine assets should be stored, this is appended to the target-asset-folder [default: statemachine].
`--target-asset-bucket TARGET_ASSET_BUCKET` - Bucket the assets will be stored in. This is used update the references in the CloudFormation template. The assets are not actually copied to this bucket. Typically this will be done using an `aws s3 sync` command
`--target-prefix TARGET_PREFIX` - Prefix that should be applied to the updated references in the CloudFormation template if the assets are not going to uploaded to the root [default: ''].
`--move-assets` - Should references to the assets in the CloudFormation template be updated to a different bucket [default: False]
`--tidy-tags-metadata` - Should SAM tags and metadata be tidied up [Default: True]?
`--add-layout-gaps` - Should a new line be added between each resource for readability [Default: True]?
`--debug` - Enables debug logging [Default: False]
`--verbose` - Enables verbose logging [Default: True]
# Example uses
Assuming that you have a SAM Template in the current folder e.g. https://github.com/peterjdavis/sam-publish/blob/main/samples/sam-template.yaml then the following commands could be used to transform this to the CloudFormation template shown at https://github.com/peterjdavis/sam-publish/blob/main/samples/cfn-template.yaml
```bash
#!/bin/bash
# Create a virtual environment
python3 -m venv .venv
source .venv/bin/activate
# Install sam-publish
pip3 install sam-publish
# Get some environment variables
AWSAccount=$(aws sts get-caller-identity --query Account --output text)
AWSRegion=$(aws configure get region)
export tmpCFNDir=$(mktemp -d)
# Build the SAM project
sam build -t sam-template.yaml
# Check to make sure the bucket for the SAM assets exists
if aws s3api head-bucket --bucket sam-${AWSAccount}-${AWSRegion} 2>/dev/null; \
then echo Bucket sam-${AWSAccount}-${AWSRegion} exists; \
else echo Creating bucket sam-${AWSAccount}-${AWSRegion} && \
aws s3 mb s3://sam-${AWSAccount}-${AWSRegion} --region ${AWSRegion} ; \
fi
# Package the SAM template so the assets are available in the s3 bucket and teh updated template is available
sam package -t sam-template.yaml \
--output-template-file ${tmpCFNDir}/sam-template.tmp.yaml \
--s3-bucket sam-${AWSAccount}-${AWSRegion}
# Update the CloudFormation tempalte so lambda's with an InlineSAMFunction: true metadata tag are inlined
# assets are referenced from a parameter call AssetBucket and the layer and lambda are referenced from a default prefix
sam-publish \
--working-folder ${tmpCFNDir} \
--cfn-input-template ${tmpCFNDir}/sam-template.tmp.yaml \
--cfn-output-template cfn-template.yaml \
--target-asset-folder assets/cfn \
--target-asset-bucket AssetBucket \
--move-assets \
--verbose
# Tidy up the temporary folder
rm -rf ${tmpCFNDir}
```
|
/sam-publish-0.2.1.tar.gz/sam-publish-0.2.1/README.md
| 0.828211 | 0.986442 |
README.md
|
pypi
|
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev)
|
/sam_s_distributions-0.1.tar.gz/sam_s_distributions-0.1/distributions/Gaussiandistribution.py
| 0.688364 | 0.853058 |
Gaussiandistribution.py
|
pypi
|
from abc import ABC, abstractmethod
from typing import Any, Optional, Set
class BaseArgType(ABC):
help: Optional[str] = None
@abstractmethod
def parse(self, value) -> Any:
pass
@abstractmethod
def help_repr(self) -> str:
pass
@abstractmethod
def global_help_repr(self, name: str) -> str:
pass
class FloatType(BaseArgType):
help: Optional[str] = None
minimum: Optional[float] = None
maximum: Optional[float] = None
def __init__(
self,
help: Optional[str] = None,
minimum: Optional[float] = None,
maximum: Optional[float] = None,
):
self.help = help
self.minimum = minimum
self.maximum = maximum
def parse(self, value):
try:
parsed_value = float(value)
except ValueError:
return None
except TypeError:
return None
if (self.maximum is not None and parsed_value > self.maximum) or (
self.minimum is not None and parsed_value < self.minimum
):
return None
return parsed_value
def help_repr(self) -> str:
qualifiers = ""
if self.minimum is not None and self.maximum is not None:
qualifiers = f" ({self.maximum} > cal > {self.minimum})"
elif self.minimum is not None:
qualifiers = f" (val > {self.minimum})"
elif self.minimum is not None:
qualifiers = f" ({self.maximum} > val)"
return f"float{qualifiers}"
def global_help_repr(self, name: str) -> str:
return f"{name}:float"
class IntType(BaseArgType):
help: Optional[str] = None
minimum: Optional[int] = None
maximum: Optional[int] = None
def __init__(
self,
help: Optional[str] = None,
minimum: Optional[int] = None,
maximum: Optional[int] = None,
):
self.help = help
self.minimum = minimum
self.maximum = maximum
def parse(self, value):
try:
parsed_value = int(value)
except ValueError:
return None
except TypeError:
return None
if (self.maximum is not None and parsed_value > self.maximum) or (
self.minimum is not None and parsed_value < self.minimum
):
return None
return parsed_value
def help_repr(self) -> str:
qualifiers = ""
if self.minimum is not None and self.maximum is not None:
qualifiers = f" ({self.maximum} > val > {self.minimum})"
elif self.minimum is not None:
qualifiers = f" (val > {self.minimum})"
elif self.minimum is not None:
qualifiers = f" ({self.maximum} > val)"
return f"int{qualifiers}"
def global_help_repr(self, name: str) -> str:
return f"{name}:int"
class StringType(BaseArgType):
help: Optional[str] = None
minimum_length: Optional[int] = None
maximum_length: Optional[int] = None
# regex: Optional[str] = None
def __init__(
self,
help: Optional[str] = None,
minimum_length: Optional[int] = None,
maximum_length: Optional[int] = None,
):
self.help = help
self.minimum_length = minimum_length
self.maximum_length = maximum_length
def parse(self, value):
if value is None:
return None
try:
parsed_value = str(value)
except ValueError:
return None
except TypeError:
return None
if (
self.maximum_length is not None and len(parsed_value) > self.maximum_length
) or (
self.minimum_length is not None and len(parsed_value) < self.minimum_length
):
return None
return parsed_value
def help_repr(self) -> str:
qualifiers = ""
if self.minimum_length is not None and self.maximum_length is not None:
qualifiers = f" ({self.maximum_length} > length > {self.minimum_length})"
elif self.minimum_length is not None:
qualifiers = f" (length > {self.minimum_length})"
elif self.minimum_length is not None:
qualifiers = f" ({self.maximum_length} > length)"
return f"string{qualifiers}"
def global_help_repr(self, name: str) -> str:
return f"{name}:text"
class EnumType(BaseArgType):
help: Optional[str] = None
values: Set[str]
def __init__(
self,
values: Set[str],
help: Optional[str] = None,
):
self.help = help
self.values = values
def parse(self, value):
value = str(value)
if value not in self.values:
return None
return value
def help_repr(self) -> str:
return f"enum ({'|'.join(self.values)})"
def global_help_repr(self, name: str) -> str:
return f"{name}:{'|'.join(self.values)}"
class UnknownLengthListType(BaseArgType):
help: Optional[str] = None
arg_type: BaseArgType
def __init__(self, arg_type: BaseArgType, help: Optional[str] = None):
self.arg_type = arg_type
self.help = help
def parse(self, value):
l = []
if isinstance(value, str):
value = [a for a in value.split(" ") if a != ""]
for arg in value:
l.append(self.arg_type.parse(arg))
if None in l:
return None
return l
def help_repr(self) -> str:
return f"{self.arg_type.help_repr()} [...]"
def global_help_repr(self, name: str) -> str:
return f"{self.arg_type.global_help_repr(name)} [...]"
class FlagType:
help: Optional[str] = None
def __init__(self, help: Optional[str] = None):
self.help = help
def global_help_repr(self, name: str) -> str:
return f"[--{name}]"
|
/sam_slash_slack-0.1.2-py3-none-any.whl/sam_slash_slack/arg_types.py
| 0.912592 | 0.221793 |
arg_types.py
|
pypi
|
import logging
from typing import Any, Callable, List, Optional, Set, Tuple, Union
import aiohttp
from sam_slash_slack.arg_types import (
BaseArgType,
FlagType,
StringType,
UnknownLengthListType,
)
from sam_slash_slack.blocks import _make_block_message
from sam_slash_slack.slash_slack_request import SlashSlackRequest
_NL = "\n"
logger = logging.getLogger("slash_slack")
class SlashSlackCommand:
"""
Abstraction for running command functions. Input parsing, command execution, and command help live here.
"""
command: str
help: Optional[str] = None
summary: Optional[str] = None
func: Callable
flags: List[Tuple[str, FlagType, int]]
args_type: List[Tuple[str, BaseArgType, int]]
request_arg: Optional[Tuple[str, int]] = None
is_async: bool
acknowledge_response: Optional[dict] = None
def __init__(
self,
command: str,
func: Callable,
flags: List[Tuple[str, FlagType, int]],
args_type: List[Tuple[str, BaseArgType, int]],
request_arg: Optional[Tuple[str, int]],
help: Optional[str] = None,
summary: Optional[str] = None,
is_async: bool = False,
acknowledge_response: Union[None, str, dict] = None,
):
self.command = command
self.func = func
self.flags = flags
self.args_type = args_type
self.request_arg = request_arg
self.help = help
self.summary = summary
self.is_async = is_async
if acknowledge_response is not None:
self.acknowledge_response = _make_block_message(
acknowledge_response, visible_in_channel=False
)
def parse_args(self, args: str):
"""
Parse input text for this command utilizing this commands flag and arg schema.
"""
if len(self.args_type) == 1 and isinstance(self.args_type[0][1], StringType):
p = self.args_type[0][1].parse(args)
if p is None:
return None
return [p]
split_args = [arg for arg in args.split(" ") if arg != ""]
l = []
for i, value in enumerate(split_args):
if i >= len(self.args_type):
return None
if isinstance(self.args_type[i][1], UnknownLengthListType):
l.append(self.args_type[i][1].parse(split_args[i:]))
break
l.append(self.args_type[i][1].parse(value))
if len(self.args_type) != len(l):
return None
if None in l:
return None
return l
def _hydrate_func_args(
self,
args: List[Any],
flags: Set[str],
slash_slack_request: SlashSlackRequest,
):
f_args: List[Any] = [None for _ in range(self.func.__code__.co_argcount)]
if self.request_arg is not None:
_, i = self.request_arg
f_args[i] = slash_slack_request
for i, value in enumerate(args):
f_args[self.args_type[i][2]] = value
for name, _, i in self.flags:
f_args[i] = name in flags
return f_args
async def execute(
self,
args: List[Any],
flags: Set[str],
global_flags: Set[str],
slash_slack_request: SlashSlackRequest,
):
"""
Executes this command given already parsed args, flags, and global_flags.
"""
f_args = self._hydrate_func_args(args, flags, slash_slack_request)
if self.is_async:
response = await self.func(*f_args)
else:
response = self.func(*f_args)
async with aiohttp.ClientSession() as session:
async with session.post(
slash_slack_request.response_url,
json=_make_block_message(
response, visible_in_channel="visible" in global_flags
),
) as resp:
if resp.status != 200:
logger.error(
f"Received an error when sending request to callback ({resp.status}): {await resp.text()}"
)
def _help(
self, slash_slack_request: SlashSlackRequest, visible_in_channel: bool = False
):
"""
Generates the help text response for this command. Returns Slack Block Kit.
"""
_HELP = f"""
`{slash_slack_request.command}` `{self.command}` help.
To view this message run `{slash_slack_request.command} {self.command} --help`
{f"*{self.summary}*" if self.summary else ""}
{f"> {self.help}" if self.help else ""}
`{slash_slack_request.command}` {self._generate_command_signature()}
Parameters:
{self._generate_parameter_help()}
{"Flags:" if len(self.flags) > 0 else ""}
{self._generate_flag_help()}
""".strip()
return _make_block_message(
_HELP,
visible_in_channel=visible_in_channel,
)
def _generate_command_signature(self) -> str:
return f"""
`{self.command}` {" ".join(f"`{t.global_help_repr(name)}`" for name, t, _ in self.args_type + self.flags)}
""".strip()
def _generate_parameter_help(self) -> str:
parameter_help_contents = []
for name, arg, index in self.args_type:
parameter_help_contents.append(
f"""
{f"> {arg.help}" if arg.help else ""}
> `{name}` `{arg.help_repr()}`
""".strip()
)
return (_NL * 2).join(parameter_help_contents)
def _generate_flag_help(self) -> str:
flag_help_contents = []
for name, flag, index in self.flags:
flag_help_contents.append(
f"""
> `--{name}` {flag.help if flag.help else ''}
""".strip()
)
return _NL.join(flag_help_contents)
|
/sam_slash_slack-0.1.2-py3-none-any.whl/sam_slash_slack/slash_slack_command.py
| 0.793146 | 0.207014 |
slash_slack_command.py
|
pypi
|
import hashlib
import hmac
from time import time
from typing import Dict, Optional, Union
class Clock:
def now(self) -> float:
return time()
class SignatureVerifier:
def __init__(self, signing_secret: str, clock: Clock = Clock()):
"""Slack request signature verifier
Slack signs its requests using a secret that's unique to your app.
With the help of signing secrets, your app can more confidently verify
whether requests from us are authentic.
https://api.slack.com/authentication/verifying-requests-from-slack
"""
self.signing_secret = signing_secret
self.clock = clock
def is_valid_request(
self,
body: Union[str, bytes],
headers: Dict[str, str],
) -> bool:
"""Verifies if the given signature is valid"""
if headers is None:
return False
normalized_headers = {k.lower(): v for k, v in headers.items()}
request_timestamp = normalized_headers.get("x-slack-request-timestamp", None)
request_signature = normalized_headers.get("x-slack-signature", None)
if not request_timestamp or not request_signature:
return False
return self.is_valid(
body=body,
timestamp=request_timestamp,
signature=request_signature,
)
def is_valid(
self,
body: Union[str, bytes],
timestamp: str,
signature: str,
) -> bool:
"""Verifies if the given signature is valid"""
if timestamp is None or signature is None:
return False
if abs(self.clock.now() - int(timestamp)) > 60 * 5:
return False
calculated_signature = self.generate_signature(timestamp=timestamp, body=body)
if calculated_signature is None:
return False
return hmac.compare_digest(calculated_signature, signature)
def generate_signature(
self, *, timestamp: str, body: Union[str, bytes]
) -> Optional[str]:
"""Generates a signature"""
if timestamp is None:
return None
if body is None:
body = ""
if isinstance(body, bytes):
body = body.decode("utf-8")
format_req = str.encode(f"v0:{timestamp}:{body}")
encoded_secret = str.encode(self.signing_secret)
request_hash = hmac.new(encoded_secret, format_req, hashlib.sha256).hexdigest()
calculated_signature = f"v0={request_hash}"
return calculated_signature
|
/sam_slash_slack-0.1.2-py3-none-any.whl/sam_slash_slack/signature_verifier.py
| 0.931058 | 0.266947 |
signature_verifier.py
|
pypi
|
# sam_subseq - Extract GFF Features From Aligned Reads
`sam_subseq` takes two inputs:
1. SAM file with reads (or sequences in general) aligned to one or more references
2. GFF file defining features for the reference(s)
`sam_subseq` will project the GFF coordinates (which refer to the reference)
onto the reads, extract subsequences corresponding to the GFF features (with
deletion, insertions, truncations, etc.), and output these subsequences in
FASTA format.
## Installation
```bash
pip install sam_subseq
```
## Usage
```
$ sam_subseq -h
usage: sam_subseq [-h] --gff GFF [infile] [outfile]
Extract features (subsequences) from aligned reads in a SAM file,
using annotations for the reference sequence.
sam_subseq parses the CIGAR string to determine which part of the
read sequence (the query) to output.
The SAM file must be sorted by coordinate (default for samtools sort)!
Example:
80 180 290
|---CDS----|
|----exon------------|
REF: -------------------------------
QRY: xxxxxxxxxxyyyyyyy--z
The reference has an exon annotation from position 80-290.
Extracting this feature from the query will yield: xxxxxxxxxxyyyyyyyz
The CDS in the query shows a deletion and is incompletely represented.
Extracting the CDS from 180-290 will yield yyyyyyyz.
Some information from the gff file is written into the header of each
output sequence. Coordinates conform to Python conventions, ie.
zero-based and end-exclusive.
These fields are of the form 'label=value;'. Currently, the following
information is output:
- the original sequence header
- qry_start: The start coordinate of the extracted feature in the
query (ie. aligned, non-reference sequence)
- qry_stop: The end coordinate of the extracted feature in the query
- qry_len: The length of the extracted feature in the query
The length can be zero, for example if a feature spans positions
50-100, but the alignment of the query spans only positions 10-40
- gff_id: The ID of the gff record
- gff_type: The type of the gff record
- gff_start: The start coordinate as defined in the GFF (ie. for the
reference)
- gff_end: The end coordinate as defined in the GFF
- gff_phase: The phase as defined in the GFF
- gff_name: If a 'Name' annotation is present in the GFF attribute
field, it is output. If it is not available, this is set to NA.
The output is a FASTA file with one extracted feature per record.
positional arguments:
infile Input file (.sam). Default: stdin
outfile Output file (.fasta) Default: stdout
options:
-h, --help show this help message and exit
--gff GFF GFF files with features to extract. GFF SEQIDs (field 1) must correspond to SAM RNAMEs (field 1), or they will not be found.
```
## Example
**SAM input**
```
@HD VN:1.6 SO:coordinate
@SQ SN:ref1 LN:67
@SQ SN:ref2 LN:67
@PG ID:minimap2 PN:minimap2 VN:2.26-r1175 CL:minimap2 -a -s1 -m1 -w1 -E1,0 refs.fa queries.fa
@PG ID:samtools PN:samtools PP:minimap2 VN:1.17 CL:samtools sort -O sam
qry1 0 ref1 1 60 67M * 0 0 ATCGAGTCGTAGCAGGCTGAGCGATGCGAGGCAGCGACGGACGAGTAGCAGCTAAAGCTAAGGAGCA * NM:i:0 ms:i:134 AS:i:134 nn:i:0 tp:A:P cm:i:53 s1:i:67 s2:i:0 de:f:0 rl:i:0
qry3 0 ref1 1 46 25M19D23M * 0 0 ATCGAGTCGTAGCAGGCTGAGCGATGTAGCAGCTAAAGCTAAGGAGCA * NM:i:19 ms:i:88 AS:i:73 nn:i:0 tp:A:P cm:i:21 s1:i:44 s2:i:0 de:f:0.0204 rl:i:0
qry2 0 ref1 33 48 35M * 0 0 AGCGACGGACGAGTAGCAGCTAAAGCTAAGGAGCA * NM:i:0 ms:i:70 AS:i:70 nn:i:0 tp:A:P cm:i:21 s1:i:35 s2:i:0 de:f:0 rl:i:0
qry4 0 ref2 29 55 39M * 0 0 GAGCTGATGCACGACACGACGATCGATCGACTGTATGTA * NM:i:0 ms:i:78 AS:i:78 nn:i:0 tp:A:P cm:i:25 s1:i:39 s2:i:0 de:f:0 rl:i:0
```
**Alignment**
```
<< Alignments to ref1 >>
1 11 21 31 41 51 61
ATCGAGTCGTAGCAGGCTGAGCGATGCGAGGCAGCGACGGACGAGTAGCAGCTAAAGCTAAGGAGCA
...................................................................
.........................*******************.......................
...................................
<< Alignments to ref2 >>
1 11 21 31 41 51 61
ACGACGTACGTAGCGAACGACGATCGACGAGCTGATGCACGACACGACGATCGATCGACTGTATGTA
.......................................
```
**GFF input**
```
##gff-version 3
##sequence-region ref1 1 67
ref1 . gene 1 67 . + . ID=ref1
ref1 . exon 10 62 . + . ID=ref1:exon;=ref1-exon;Parent=ref1
ref1 . CDS 20 62 . + 0 ID=ref1:CDS;Name=ref1-cds;Parent=ref1
##sequence-region ref2 1 67
ref2 . gene 1 67 . + . ID=ref2
ref2 . exon 10 62 . + . ID=ref2:exon;=ref2-exon;Parent=ref2
ref2 . CDS 20 62 . + 0 ID=ref2:CDS;Name=ref2-cds;Parent=ref2
```
**FASTA output**
```
>qry1;qry_start=0;qry_stop=67;qry_len=67;gff_id=ref1;gff_type=gene;gff_start=0;gff_end=67;gff_phase=.;gff_name=NA
ATCGAGTCGTAGCAGGCTGAGCGATGCGAGGCAGCGACGGACGAGTAGCAGCTAAAGCTAAGGAGCA
>qry1;qry_start=9;qry_stop=62;qry_len=53;gff_id=ref1;gff_type=exon;gff_start=9;gff_end=62;gff_phase=.;gff_name=NA
TAGCAGGCTGAGCGATGCGAGGCAGCGACGGACGAGTAGCAGCTAAAGCTAAG
>qry1;qry_start=19;qry_stop=62;qry_len=43;gff_id=ref1;gff_type=CDS;gff_start=19;gff_end=62;gff_phase=0;gff_name=ref1-cds
AGCGATGCGAGGCAGCGACGGACGAGTAGCAGCTAAAGCTAAG
>qry3;qry_start=0;qry_stop=48;qry_len=48;gff_id=ref1;gff_type=gene;gff_start=0;gff_end=67;gff_phase=.;gff_name=NA
ATCGAGTCGTAGCAGGCTGAGCGATGTAGCAGCTAAAGCTAAGGAGCA
>qry3;qry_start=9;qry_stop=43;qry_len=34;gff_id=ref1;gff_type=exon;gff_start=9;gff_end=62;gff_phase=.;gff_name=NA
TAGCAGGCTGAGCGATGTAGCAGCTAAAGCTAAG
>qry3;qry_start=19;qry_stop=43;qry_len=24;gff_id=ref1;gff_type=CDS;gff_start=19;gff_end=62;gff_phase=0;gff_name=ref1-cds
AGCGATGTAGCAGCTAAAGCTAAG
>qry2;qry_start=0;qry_stop=35;qry_len=35;gff_id=ref1;gff_type=gene;gff_start=0;gff_end=67;gff_phase=.;gff_name=NA
AGCGACGGACGAGTAGCAGCTAAAGCTAAGGAGCA
>qry2;qry_start=0;qry_stop=30;qry_len=30;gff_id=ref1;gff_type=exon;gff_start=9;gff_end=62;gff_phase=.;gff_name=NA
AGCGACGGACGAGTAGCAGCTAAAGCTAAG
>qry2;qry_start=0;qry_stop=30;qry_len=30;gff_id=ref1;gff_type=CDS;gff_start=19;gff_end=62;gff_phase=0;gff_name=ref1-cds
AGCGACGGACGAGTAGCAGCTAAAGCTAAG
>qry4;qry_start=0;qry_stop=39;qry_len=39;gff_id=ref2;gff_type=gene;gff_start=0;gff_end=67;gff_phase=.;gff_name=NA
GAGCTGATGCACGACACGACGATCGATCGACTGTATGTA
>qry4;qry_start=0;qry_stop=34;qry_len=34;gff_id=ref2;gff_type=exon;gff_start=9;gff_end=62;gff_phase=.;gff_name=NA
GAGCTGATGCACGACACGACGATCGATCGACTGT
>qry4;qry_start=0;qry_stop=34;qry_len=34;gff_id=ref2;gff_type=CDS;gff_start=19;gff_end=62;gff_phase=0;gff_name=ref2-cds
GAGCTGATGCACGACACGACGATCGATCGACTGT
```
|
/sam_subseq-0.1.0.tar.gz/sam_subseq-0.1.0/README.md
| 0.68215 | 0.870212 |
README.md
|
pypi
|
import re
class IndexMap:
"""
Build an index map, mapping reference coordinates to query coordinates.
This allows retrieval of mapped read segments using reference positions.
The index map is a list of tuples. Each list element corresponds to a
reference position. The tuple at this list element contains the interval of
the query positions corresponding to this reference position.
Example:
Reference sequence: AATTA
Query sequence: ATTCA
CIGAR: 1M1D2M1I1M
The resulting alignment:
REF: AATT-A
QRY: A-TTCA
CIG: 1M1D2M1I1M
The resulting index map:
[(0, 1), (1, 1), (1, 2), (2, 4), (4, 5)].
ref[0] : "A" -> qry[0:1] : "A" (match)
ref[1] : "A" -> qry[1:1] : "" (deletion)
ref[2] : "T" -> qry[1:2] : "T" (match)
ref[3] : "T" -> qry[2:4] : "TC" (insertion)
ref[4] : "A" -> qry[4:5] : "A" (match)
"""
# Which CIGAR operations consume reference/query bases?
C_CONSUME_REF = "MDN=X"
C_CONSUME_QRY = "MIS=X"
C_CONSUME_NONE = "HP"
C_IS_CLIP = "HS"
def __init__(self, cigar, offset, allow_oob = False):
"""
Initiliaze an index map.
Parameters:
cigar : str
The cigar string for the alignment.
offset : int
The start position of the alignment in the reference.
This corresponds to the SAM field 'POS' minus 1.
allow_oob : bool
Allow indexing with out-of-bounds limits? If this is False,
requesting a reference range that is not covered by the query
is an error. If it is True, the out-of-bounds parts will be
empty, and no error is raised.
"""
self._cigar = cigar
self._offset = offset
self.allow_oob = allow_oob
self._parse_cigar(cigar)
# The reference range covered
self.limits = (self._offset, len(self) + self._offset)
self._qry_cur = 0
self._index_map = []
self.make_index_map()
def qry_range(self, ref_start, ref_stop):
"""
Return the range of the query sequence corresponding to specified
reference range.
Parameters:
ref_start : int
First reference position to include for the query
(0-based, inclusive)
ref_stop : int
Last reference position for the query (0-based, exclusive)
Returns:
range : tuple
Range (qry_start, qry_end) that corresponds to the requested
reference range.
Suitable to be used as qry_seq[qry_start:qry_end]
"""
if ref_start is None:
ref_start = self._offset
if ref_stop is None:
ref_stop = len(self) + self._offset
if ref_start < 0 or ref_stop < 0:
raise IndexError("Indices cannot be negative")
if ref_start > ref_stop:
raise IndexError("Start cannot be larger than stop")
width = ref_stop - ref_start
start = ref_start - self._offset
if width > 0:
stop = start + width - 1
else:
stop = start
if not self.allow_oob:
# Bounds checking
for want in (start, stop):
if want < 0 or want > len(self):
msg = "Requested reference range "
msg += f"{ref_start} - {ref_stop}, "
msg += f"but only {self.limits[0]} - {self.limits[1]} "
msg += "is covered by the query"
raise IndexError(msg)
else:
# Fix bounds
# Both ends out of bounds to one side:
# REF -----------------
# QRY xxxxxxxxxxx xxxxxxxxxxxx
if start < 0 and stop < 0:
# fully oob to the left
# Return empty interval with earliest position of query
interval = (self[0][0], self[0][0])
print(interval)
return (self[0][0], self[0][0])
elif start >= len(self) and stop >= len(self):
# fully oob to the right
# Return empty interval with latest position of query
return(self[-1][0], self[-1][0])
# Clamp bounds to maxima
# REF ------------------------
# QRY xxxxxxxxxxx
else:
if start < 0:
# half oob left
start = 0
if stop >= len(self):
# half oob right
stop = len(self) - 1
if width == 0:
return (self[start][0], self[start][0])
elif width == 1:
return (self[start][0], self[start][1])
else:
return (self[start][0], self[stop][1])
def __getitem__(self, index):
return self._index_map[index]
def __len__(self):
"""
The length of the index map is the length of the underlying reference
sequence, ie. the sum of all cigar operations that consume the reference.
"""
length = 0
for count, op in self._cigar_ops:
if op in self.C_CONSUME_REF:
length += count
return length
def __repr__(self):
return repr(self._index_map)
def __str__(self):
return str(self._index_map)
def _parse_cigar(self, cigar):
"""
Decompose a CIGAR string into its components, ie. count and operation
"""
split_on = re.compile("([^0-9])")
cigar = re.sub(split_on, r"\1 ", cigar).strip().split()
out = []
for c in cigar:
count, op = re.split(split_on, c)[:2]
if not count:
count = 1
count = int(count)
out.append((count, op))
# Sanity checks
ops = [x[1] for x in out]
for i, op in enumerate(ops):
if op == "H" and i != 0 and i != len(ops) - 1:
raise ValueError("Invalid CIGAR: H is only allowed first or last")
if op == "S":
if i != 0 and i != len(out) - 1 and (ops[i - 1] != "H" and ops[i + 1] != "H"):
raise ValueError("Invalid CIGAR: S can only be preceded/followed by H")
if op not in set(self.C_CONSUME_QRY + self.C_CONSUME_REF + self.C_CONSUME_NONE):
raise ValueError(f"Invalid CIGAR: Unknown operation '{op}'")
self._cigar_ops = out
def advance_ref(self, by):
# Place zero-length intervals at advanced ref indices
addon = [(self._qry_cur, self._qry_cur)] * by
self._index_map += addon
def advance_qry(self, by, is_clip = False):
# Extend interval of qry at current ref index
# If there is no previous corresponding match position (ie, we're still
# consuming clipping operations), just advance the query position.
if is_clip:
self._qry_cur += by
return
before = self._index_map[-1]
after = (before[0], before[1] + by)
self._index_map[-1] = after
self._qry_cur += by
def advance_both(self, by):
addon = [(i, i + 1) for i in range(self._qry_cur, self._qry_cur + by)]
self._index_map += addon
self._qry_cur += by
def advance(self, cigar_op):
count, op = cigar_op
is_clip = op in self.C_IS_CLIP
if op in self.C_CONSUME_REF and op in self.C_CONSUME_QRY:
self.advance_both(by = count)
elif op in self.C_CONSUME_QRY:
self.advance_qry(by = count, is_clip = is_clip)
elif op in self.C_CONSUME_REF:
self.advance_ref(by = count)
elif op in self.C_CONSUME_NONE:
pass
else:
raise ValueError(f"Unexpected CIGAR operation: '{op}'")
def make_index_map(self, until = None):
for cigar_op in self._cigar_ops:
self.advance(cigar_op)
|
/sam_subseq-0.1.0.tar.gz/sam_subseq-0.1.0/src/sam_subseq/IndexMap.py
| 0.873498 | 0.652546 |
IndexMap.py
|
pypi
|
import sys
import argparse
import textwrap
from sam_subseq import io
from sam_subseq.SamRefAlignment import SamRefAlignment
def parse_args():
argparser = argparse.ArgumentParser(
formatter_class = argparse.RawTextHelpFormatter,
description = textwrap.dedent("""
Extract features (subsequences) from aligned reads in a SAM file,
using annotations for the reference sequence.
sam_subseq parses the CIGAR string to determine which part of the
read sequence (the query) to output.
The SAM file must be sorted by coordinate (default for samtools sort)!
Example:
80 180 290
|---CDS----|
|----exon------------|
REF: -------------------------------
QRY: xxxxxxxxxxyyyyyyy--z
The reference has an exon annotation from position 80-290.
Extracting this feature from the query will yield: xxxxxxxxxxyyyyyyyz
The CDS in the query shows a deletion and is incompletely represented.
Extracting the CDS from 180-290 will yield yyyyyyyz.
Some information from the gff file is written into the header of each
output sequence. Coordinates conform to Python conventions, ie.
zero-based and end-exclusive.
These fields are of the form 'label=value;'. Currently, the following
information is output:
- the original sequence header
- qry_start: The start coordinate of the extracted feature in the
query (ie. aligned, non-reference sequence)
- qry_stop: The end coordinate of the extracted feature in the query
- qry_len: The length of the extracted feature in the query
The length can be zero, for example if a feature spans positions
50-100, but the alignment of the query spans only positions 10-40
- gff_id: The ID of the gff record
- gff_type: The type of the gff record
- gff_start: The start coordinate as defined in the GFF (ie. for the
reference)
- gff_end: The end coordinate as defined in the GFF
- gff_phase: The phase as defined in the GFF
- gff_name: If a 'Name' annotation is present in the GFF attribute
field, it is output. If it is not available, this is set to NA.
The output is a FASTA file with one extracted feature per record.
""")
)
argparser.add_argument(
"infile", nargs = "?",
type = argparse.FileType("r"), default = sys.stdin,
help = "Input file (.sam). Default: stdin")
argparser.add_argument(
"outfile", nargs = "?",
type = argparse.FileType("w"), default = sys.stdout,
help = "Output file (.fasta) Default: stdout")
argparser.add_argument(
"--gff", required = True,
help = "GFF files with features to extract. GFF SEQIDs (field 1) must "
"correspond to SAM RNAMEs (field 1), or they will not be found."
)
args = argparser.parse_args()
return args
def extract_features(record, record_features, header_delim = ";"):
"""
Extract features from SAM sequence records.
Yields:
tuple : (header, subsequence)
a header with information from the feature and sequence record and
the extracted subsequence.
"""
offset = record["pos"]
cigar = record["cigar"]
seq = record["seq"]
alignment = SamRefAlignment(
seq = seq,
cigar = cigar,
offset = offset,
allow_oob = True)
for feature in record_features:
# Extract subsequence
start, stop = feature["start"], feature["end"]
qry_start, qry_stop = alignment._index_map.qry_range(start, stop)
subseq = alignment[start:stop]
# Prepare header for output
header = record["qname"]
if not header.endswith(header_delim):
header += header_delim
header += header_delim.join([
# Feature information for record, ie. actual start/end positions
f"qry_start={qry_start}",
f"qry_stop={qry_stop}",
f"qry_len={qry_stop - qry_start}",
# Feature information from the gff
io.gff_to_header(feature, delim = header_delim)
])
yield (header, subseq)
def main(samfile, gfffile, outfile):
features = {}
# Store features for each reference sequence
for feature in io.parse_gff(gfffile):
if not feature["seqid"] in features.keys():
features[feature["seqid"]] = []
features[feature["seqid"]].append(feature)
# Look for features and extract subsequences from alignments
for record in io.parse_sam(samfile):
try:
record_features = features[record["rname"]]
for header, subseq in extract_features(record, record_features):
io.write_fasta(outfile, header, subseq)
except KeyError:
# No features to extract were found
continue
def main_cmdline():
args = parse_args()
try:
main(args.infile, args.gff, args.outfile)
except BrokenPipeError:
sys.exit(0)
if __name__ == "__main__":
main()
|
/sam_subseq-0.1.0.tar.gz/sam_subseq-0.1.0/src/sam_subseq/main.py
| 0.486088 | 0.475423 |
main.py
|
pypi
|
import sys
import io
def stdin_or_fh(f):
"""
Read a line from stdin or a file on disk.
"""
if f is sys.stdin:
for line in f:
yield line
elif isinstance(f, str):
with open(f, "r") as fh:
for line in fh:
yield line
elif isinstance(f, io.TextIOWrapper):
for line in f:
yield line
else:
raise TypeError(f"Unrecognized input file of type {f.__class__.__name__}")
def parse_sam(src):
"""
Read a SAM file.
The one-based offset position is converted to zero-based.
Parameters:
src : Either sys.stdin or a fpath to a file.
Yields:
record : dict
An alignment record from the SAM file, with the keys:
qname, flag, rname, pos, mapq, cigar, rnext, pnext, tlen, seq, qual
'pos' is converted to zero-based!
Raises:
ValueError if the SAM file is not sorted by coordinate.
"""
is_sorted = False
for line in stdin_or_fh(src):
line = line.strip()
fields = line.split("\t")
if line.startswith("@"):
# Still in the header block, no sort order defined
if not is_sorted and line.startswith("@HD"):
for field in fields:
is_sorted = "SO:coordinate" in fields
continue
else:
# Not in the header block
if not is_sorted:
raise ValueError("SAM file must be sorted by coordinate!")
else:
record = {
"qname": fields[0],
"flag": fields[1],
"rname": fields[2],
# Convert one-based position to zero-based
"pos": int(fields[3]) - 1,
"mapq": fields[4],
"cigar": fields[5].upper(),
"rnext": fields[6],
"pnext": fields[7],
"tlen": int(fields[8]),
"seq": fields[9],
"qual": fields[10]
}
yield record
def parse_gff(src):
is_gff = False
for line in stdin_or_fh(src):
line = line.strip()
if line.startswith("#"):
# In the comment block
if not is_gff:
is_gff = "gff-version 3" in line
continue
else:
# Not in the comment block
fields = line.split("\t")
attributes = {}
for attribute in fields[8].split(";"):
k, v = [x.strip() for x in attribute.split("=")]
attributes[k] = v
record = {
"seqid": fields[0],
"source": fields[1],
"type": fields[2],
# One-based to zero-based
"start": int(fields[3]) - 1,
# GFF stop is one-based and inclusive. Python indexing is
# zero-based and end-exclusive -> no adjustment necesary
"end": int(fields[4]),
"score": fields[5],
"strand": fields[6],
"phase": fields[7],
"attributes": attributes
}
yield record
def gff_to_header(gff_record, delim):
"""
Format fields of a gff record for inclusion as a FASTX header.
"""
header = [
("gff_id", gff_record["seqid"]),
("gff_type", gff_record["type"]),
("gff_start", gff_record["start"]),
("gff_end", gff_record["end"]),
("gff_phase", gff_record["phase"])
]
if "Name" in gff_record["attributes"]:
header.append(("gff_name", gff_record["attributes"]["Name"]))
else:
header.append(("gff_name", "NA"))
header = ([f"{k}={v}" for k, v in header])
header = ";".join([x for x in header])
return header
def write_fasta(dst, header, seq):
dst.write(f">{header}\n{seq}\n")
|
/sam_subseq-0.1.0.tar.gz/sam_subseq-0.1.0/src/sam_subseq/io.py
| 0.433022 | 0.452838 |
io.py
|
pypi
|
from typing import List
from template_creator.util.constants import EVENT_TYPES
def create_lambda_function(name: str, handler: str, uri: str, variables, events, api) -> dict:
generic = {
'Type': 'AWS::Serverless::Function',
'Properties': {
'CodeUri': uri,
'Handler': handler,
'Role': {
'Fn::GetAtt': [
create_role_name(name),
'Arn'
]
}
}
}
add_variables(generic, variables)
add_events(generic, events, name)
add_api(generic, api)
return generic
def create_event_name(lambda_name: str, event: str) -> str:
return '{}{}Event'.format(lambda_name, event)
def create_role_name(lambda_name: str) -> str:
return '{}Role'.format(lambda_name)
def add_events(generic: dict, events: List[str], name: str) -> None:
events_with_value = dict()
if events:
for event in events:
events_with_value[create_event_name(name, event)] = EVENT_TYPES[event]
generic['Properties'].update({'Events': events_with_value})
def add_variables(generic: dict, variables: List[str]) -> None:
variables_with_value = dict()
if variables:
for variable in variables:
variables_with_value[variable] = 'Fill in value or delete if not needed'
generic['Properties'].update({
'Environment': {
'Variables': variables_with_value
}
})
def add_api(generic: dict, api: List[str]) -> None:
if api:
method = api[0]
path = api[1]
generic['Properties'].update({
'Events': {
method.upper(): {
'Type': 'Api',
'Properties': {
'Path': path,
'Method': method
}
}
}
})
def create_role(name: str, permissions: List[str]) -> (str, dict):
role_name = create_role_name(name)
actions = ['logs:CreateLogStream', 'logs:CreateLogGroup', 'logs:PutLogEvents']
actions.extend(permissions)
role = {
'Type': 'AWS::IAM::Role',
'Properties': {
'AssumeRolePolicyDocument': {
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Principal': {
'Service': ['lambda.amazonaws.com']
},
'Action': ['sts:AssumeRole']
}
]
},
'Path': '/',
'Policies': [
{
'PolicyName': 'LambdaPolicy',
'PolicyDocument': {
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Action': actions,
'Resource': '*'
}
]
}
}
]
}
}
return role_name, role
|
/sam-template-creator-0.1.3.tar.gz/sam-template-creator-0.1.3/template_creator/writer/lambda_writer.py
| 0.80456 | 0.371308 |
lambda_writer.py
|
pypi
|
import logging
import sys
from types import FrameType
from typing import List, cast
from loguru import logger
from pydantic import AnyHttpUrl, BaseSettings
class LoggingSettings(BaseSettings):
LOGGING_LEVEL: int = logging.INFO # logging levels are type int
class Settings(BaseSettings):
API_V1_STR: str = "/api/v1"
# Meta
logging: LoggingSettings = LoggingSettings()
# BACKEND_CORS_ORIGINS is a comma-separated list of origins
# e.g: http://localhost,http://localhost:4200,http://localhost:3000
BACKEND_CORS_ORIGINS: List[AnyHttpUrl] = [
"http://localhost:3000", # type: ignore
"http://localhost:8000", # type: ignore
"https://localhost:3000", # type: ignore
"https://localhost:8000", # type: ignore
]
PROJECT_NAME: str = "SALES Prediction API"
class Config:
case_sensitive = True
# See: https://loguru.readthedocs.io/en/stable/overview.html#entirely-compatible-with-standard-logging # noqa
class InterceptHandler(logging.Handler):
def emit(self, record: logging.LogRecord) -> None: # pragma: no cover
# Get corresponding Loguru level if it exists
try:
level = logger.level(record.levelname).name
except ValueError:
level = str(record.levelno)
# Find caller from where originated the logged message
frame, depth = logging.currentframe(), 2
while frame.f_code.co_filename == logging.__file__: # noqa: WPS609
frame = cast(FrameType, frame.f_back)
depth += 1
logger.opt(depth=depth, exception=record.exc_info).log(
level,
record.getMessage(),
)
def setup_app_logging(config: Settings) -> None:
"""Prepare custom logging for our application."""
LOGGERS = ("uvicorn.asgi", "uvicorn.access")
logging.getLogger().handlers = [InterceptHandler()]
for logger_name in LOGGERS:
logging_logger = logging.getLogger(logger_name)
logging_logger.handlers = [InterceptHandler(level=config.logging.LOGGING_LEVEL)]
logger.configure(
handlers=[{"sink": sys.stderr, "level": config.logging.LOGGING_LEVEL}]
)
settings = Settings()
|
/sam_tid_regression_model-0.0.6-py3-none-any.whl/api/app/config.py
| 0.535827 | 0.163646 |
config.py
|
pypi
|
from typing import Any, List, Optional
from pydantic import BaseModel
from regression_model.processing.validation import SalesDataInputSchema
class PredictionResults(BaseModel):
errors: Optional[Any]
version: str
predictions: Optional[List[float]]
class MultipleSalesDataInputs(BaseModel):
inputs: List[SalesDataInputSchema]
class Config:
schema_extra = {
"example": {
"inputs": [
{
'date': '2021-11-10',
'feature_1': 1.9714,
'feature_2': 30.99,
'feature_3': 27.0,
'feature_4': 0.0,
'feature_5': 11.2857,
'feature_6': 16.0,
'feature_7': 1.105,
'feature_8': 29.7143,
'feature_9': 30.99,
'feature_10': 26.1135,
'feature_11': 27379.1909,
'feature_12': 28.3667,
'feature_13': 27869.5214,
'feature_14': 30.99,
'feature_15': 1.449,
'feature_16': 6.6053,
'feature_17': 30.99,
'feature_18': 1.0,
'feature_19': 1.0,
'feature_20': 30.99,
'feature_21': 30.99,
'feature_22': 29662.7762,
'feature_23': 68.6626,
'feature_24': 2.0,
'feature_25': 30.99,
'feature_26': 37.0,
'feature_27': 1.0,
'feature_28': 22216.2167,
'feature_29': 30.99,
'feature_30': 1.586,
'feature_31': 60.1477,
'feature_32': 25.2857,
'feature_33': 11.1429,
'feature_34': 27.1493,
'feature_35': 50319.25,
'feature_36': 27.5714,
'feature_37': 45.6976,
'feature_38': 1.0,
'feature_39': 1.0417,
'feature_40': 9.0,
'feature_41': 160432.8226,
'feature_42': 30739.3095,
'feature_43': 24.1429,
'feature_44': 0.9829,
'feature_45': 23.0,
'feature_46': 3.8338,
'feature_47': 1.916,
'feature_48': 273131.4524,
'feature_49': 0.5824,
'feature_50': 7.5333,
'feature_51': 2.5714,
'feature_52': 47.0,
'feature_53': 0.7985,
'feature_54': 97.1123,
'feature_55': 30.99,
'feature_56': 25.0,
'feature_57': 30978.5595,
'feature_58': 30.99,
'feature_59': 33.8571,
'feature_60': 1.0357,
'feature_61': 7.3457,
'feature_62': 30.99,
'feature_63': 1.0,
'feature_64': 5.0,
'feature_65': 30.99,
'feature_66': 28.1429,
'feature_67': 25047.881,
'feature_68': 30.99,
'feature_69': 26.8571,
'feature_70': 2.0,
'feature_71': 244645.1429,
'feature_72': 1.0,
'feature_73': 32042.7476,
'feature_74': 1.5234,
'feature_75': 26758.9609,
'feature_76': 17.9381,
'feature_77': 30.99,
'feature_78': 9.2373,
'feature_79': 30.99,
'feature_80': 57250.3946,
'feature_81': 1.2721,
'feature_82': 30.99,
'feature_83': 22.2903,
'feature_84': 96.8513,
'feature_85': 2.0,
'feature_86': 23.8058,
'feature_87': 49.0,
'feature_88': 67.0,
'feature_89': 104949.5357,
'feature_90': 30263.1028,
'feature_91': 4.9378,
'feature_92': 65.6412,
'feature_93': 1.1302,
'feature_94': 25.8333,
'feature_95': 26.0,
'feature_96': 2.0,
'feature_97': 51.0,
'feature_98': 59.5747,
'feature_99': 26.0,
}
]
}
}
|
/sam_tid_regression_model-0.0.6-py3-none-any.whl/api/app/schemas/predict.py
| 0.783947 | 0.27492 |
predict.py
|
pypi
|
from pathlib import Path
from typing import Dict, List, Sequence
from pydantic import BaseModel
from strictyaml import YAML, load
import regression_model
# Project Directories
PACKAGE_ROOT = Path(regression_model.__file__).resolve().parent
ROOT = PACKAGE_ROOT.parent
CONFIG_FILE_PATH = PACKAGE_ROOT / "config.yml"
DATASET_DIR = PACKAGE_ROOT / "datasets"
TRAINED_MODEL_DIR = PACKAGE_ROOT / "trained_models"
class AppConfig(BaseModel):
"""
Application-level config.
"""
package_name: str
training_data_file: str
test_data_file: str
pipeline_save_file: str
class ModelConfig(BaseModel):
"""
All configuration relevant to model
training and feature engineering.
"""
target: str
features: List[str]
test_size: float
random_state: int
alpha: float
# categorical_vars_with_na_missing: List[str]
numerical_vars_with_na: List[str]
# temporal_vars: List[str]
# binarize_vars: Sequence[str]
categorical_vars: Sequence[str]
class Config(BaseModel):
"""Master config object."""
app_config: AppConfig
model_config: ModelConfig
def find_config_file() -> Path:
"""Locate the configuration file."""
if CONFIG_FILE_PATH.is_file():
return CONFIG_FILE_PATH
raise Exception(f"Config not found at {CONFIG_FILE_PATH!r}")
def fetch_config_from_yaml(cfg_path: Path = None) -> YAML:
"""Parse YAML containing the package configuration."""
if not cfg_path:
cfg_path = find_config_file()
if cfg_path:
with open(cfg_path, "r") as conf_file:
parsed_config = load(conf_file.read())
return parsed_config
raise OSError(f"Did not find config file at path: {cfg_path}")
def create_and_validate_config(parsed_config: YAML = None) -> Config:
"""Run validation on config values."""
if parsed_config is None:
parsed_config = fetch_config_from_yaml()
# specify the data attribute from the strictyaml YAML type.
_config = Config(
app_config=AppConfig(**parsed_config.data),
model_config=ModelConfig(**parsed_config.data),
)
return _config
config = create_and_validate_config()
|
/sam_tid_regression_model-0.0.6-py3-none-any.whl/regression_model/config/core.py
| 0.816736 | 0.282116 |
core.py
|
pypi
|
from typing import List, Optional, Tuple
import numpy as np
import pandas as pd
from pydantic import BaseModel, ValidationError
from regression_model.config.core import config
def drop_na_inputs(*, input_data: pd.DataFrame) -> pd.DataFrame:
"""Check model inputs for na values and filter."""
validated_data = input_data.copy()
new_vars_with_na = [
var
for var in config.model_config.features
if var
not in config.model_config.categorical_vars_with_na_frequent
+ config.model_config.categorical_vars_with_na_missing
+ config.model_config.numerical_vars_with_na
and validated_data[var].isnull().sum() > 0
]
validated_data.dropna(subset=new_vars_with_na, inplace=True)
return validated_data
def validate_inputs(*, input_data: pd.DataFrame) -> Tuple[pd.DataFrame, Optional[dict]]:
"""Check model inputs for unprocessable values."""
# convert syntax error field names (beginning with numbers)
# input_data.rename(columns=config.model_config.variables_to_rename, inplace=True)
# input_data["MSSubClass"] = input_data["MSSubClass"].astype("O")
# relevant_data = input_data[config.model_config.features].copy()
# validated_data = drop_na_inputs(input_data=relevant_data)
validated_data = input_data[config.model_config.features].copy()
errors = None
try:
# replace numpy nans so that pydantic can validate
MultipleSalesDataInputs(
inputs=validated_data.replace({np.nan: None}).to_dict(orient="records")
)
except ValidationError as error:
errors = error.json()
return validated_data, errors
class SalesDataInputSchema(BaseModel):
date: Optional[str]
feature_1: Optional[float]
feature_1: Optional[float]
feature_2: Optional[float]
feature_3: Optional[float]
feature_4: Optional[float]
feature_5: Optional[float]
feature_6: Optional[float]
feature_7: Optional[float]
feature_8: Optional[float]
feature_9: Optional[float]
feature_10: Optional[float]
feature_11: Optional[float]
feature_12: Optional[float]
feature_13: Optional[float]
feature_14: Optional[float]
feature_15: Optional[float]
feature_16: Optional[float]
feature_17: Optional[float]
feature_18: Optional[float]
feature_19: Optional[float]
feature_20: Optional[float]
feature_21: Optional[float]
feature_22: Optional[float]
feature_23: Optional[float]
feature_24: Optional[float]
feature_25: Optional[float]
feature_26: Optional[float]
feature_27: Optional[float]
feature_28: Optional[float]
feature_29: Optional[float]
feature_30: Optional[float]
feature_31: Optional[float]
feature_32: Optional[float]
feature_33: Optional[float]
feature_34: Optional[float]
feature_35: Optional[float]
feature_36: Optional[float]
feature_37: Optional[float]
feature_38: Optional[float]
feature_39: Optional[float]
feature_40: Optional[float]
feature_41: Optional[float]
feature_42: Optional[float]
feature_43: Optional[float]
feature_44: Optional[float]
feature_45: Optional[float]
feature_46: Optional[float]
feature_47: Optional[float]
feature_48: Optional[float]
feature_49: Optional[float]
feature_50: Optional[float]
feature_51: Optional[float]
feature_52: Optional[float]
feature_53: Optional[float]
feature_54: Optional[float]
feature_55: Optional[float]
feature_56: Optional[float]
feature_57: Optional[float]
feature_58: Optional[float]
feature_59: Optional[float]
feature_60: Optional[float]
feature_61: Optional[float]
feature_62: Optional[float]
feature_63: Optional[float]
feature_64: Optional[float]
feature_65: Optional[float]
feature_66: Optional[float]
feature_67: Optional[float]
feature_68: Optional[float]
feature_69: Optional[float]
feature_70: Optional[float]
feature_71: Optional[float]
feature_72: Optional[float]
feature_73: Optional[float]
feature_74: Optional[float]
feature_75: Optional[float]
feature_76: Optional[float]
feature_77: Optional[float]
feature_78: Optional[float]
feature_79: Optional[float]
feature_80: Optional[float]
feature_81: Optional[float]
feature_82: Optional[float]
feature_83: Optional[float]
feature_84: Optional[float]
feature_85: Optional[float]
feature_86: Optional[float]
feature_87: Optional[float]
feature_88: Optional[float]
feature_89: Optional[float]
feature_90: Optional[float]
feature_91: Optional[float]
feature_92: Optional[float]
feature_93: Optional[float]
feature_94: Optional[float]
feature_95: Optional[float]
feature_96: Optional[float]
feature_97: Optional[float]
feature_98: Optional[float]
feature_99: Optional[float]
class MultipleSalesDataInputs(BaseModel):
inputs: List[SalesDataInputSchema]
|
/sam_tid_regression_model-0.0.6-py3-none-any.whl/regression_model/processing/validation.py
| 0.831964 | 0.474936 |
validation.py
|
pypi
|
tsfresh
=========
tsfresh is a package that can be used to calculate many timeseries-related features used for analysing time series, especially based on physics, and use them as features in your models. It's pretty straightforward to use, because it has built-in functions that calculate all these features, and select only the relevant ones. To do this, you need the following:
* Some train data, with missing values removed/imputed
* Some train target, to see which features are relevant and which aren't
* Optionally, some validation/test data, to calculate the same features on
tsfresh cannot handle missing values. It is recommended to always be careful with your missing values. For example: ask yourself if missing values contain any extra information, like a sensor failure. If the missing values are mostly random, it is recommended to use the pandas `interpolate` or `fillna` function to interpolate the missing values. Try not to set all the missing values to 0 or -1: this means the time series loses meaning and tsfresh will have worse results!
## Using the tsfresh transformer
The main tsfresh approach looks like this: Given some data `X` and target `y`:
* Keep the original `X` that will later be used to add the created features to
* Make a copy of `X` where we remove unneeded columns and missing values. We will refer to this as `tsdata`.
* Transform `tsdata` to the format that tsfresh requires
* Fit a transformer that calculates all the timefeatures, and adds them to `X`
* Optionally: Use the transformer to also add the same timefeatures to your testdata `X_test`
### Step 0: Obtaining data
For this tutorial, we are going to use synthetic Nereda data. The function `create_synthetic_nereda` is not part of the SAM package, but can be found at https://dev.ynformed.nl/P9 .
If you don't want to use this data, just know that this data contains the specific reactor in the `UnitName` column. One dataframe can contain multiple reactors, whose data is separate and should not be mixed. Also, the time is stored in the `HistBatchStartDate` column. The rest of the columns simply contain data, in numeric format.
```python
X = create_synthetic_nereda(units=['test'], start='2010-01-01', end='2017-05-01')
X = X.dropna(subset=['NH4']) # drop rows with missing target
y = X['NH4']
```
### Step 1: Preparing tsdata
tsfresh does not support missing data in the `X` dataframe. Furthermore, tsfresh calculation can take quite long, so we want to remove all columns that are not relevant to tsfresh. To do this, we make a copy of `X`, called `X_copy`. Importantly, the output from tsfresh is later added to `X` by looking at the row index. Therefore, we should not change the row index of `X_copy` during this process.
We do not get rid of two columns with meta-data: the column with the timestamps (`HistBatchStartDate`), and the column that specifies which timeseries each row belongs to, in case there are multiple timeseries. If there is only one timeseries in the data, a column will have to be added, for example by saying: `X_copy['UnitName'] = 1`.
```python
# We are careful to not change the row index of tsdata
# Drop colums we don't want tsfresh to calculate on
X_copy = X.drop(['Id', 'HistBatchEndDate'], axis=1)
# We get rid of all missings. Just use simple mean imputation for now.
X_copy = X_copy.fillna(X_copy.mean())
```
### Step 2: Bring into format usable by tsfresh
Tsfresh supports a dataformat where each id corresponds to a single instance. This means that for each instance, the entire rolling window has to be given its own id. For more information about this format, [check the official documentation here](https://tsfresh.readthedocs.io/en/latest/text/forecasting.html). Luckily, there is a convenience function to do exactly this.
```python
from tsfresh.utilities.dataframe_functions import roll_time_series
tsdata = roll_time_series(
X_copy, # the normal data where 1 row corresponds to one time measurement
column_id='UnitName', # An id that identifies different timeseries
column_sort='HistBatchStartDate', # A time id that sorts the data
column_kind=None, # optional, for if you want to group certain columns
rolling_direction=1, # 1 for rolling windows
max_timeshift=10 # max rolling window size. To save computation time
)
```
The `max_timeshift` option sets the rolling window size that tsfresh will use to calculate features for a single time point. This should be high enough that tsfresh can calculate interesting features, but the computation time increases quadratically with this parameter, so it should not be set too high.
We can now proceed to calling the transformer described in the previous section. We just have to correctly choose the `column_id` and `column_sort`: in this example: `UnitName` and `HistBatchStartDate`.
### Step 3: Actually calculating the features
To actually calculate all the features, it is recommended to use the sklearn transformer that comes with tsfresh:
```python
from tsfresh.transformers import RelevantFeatureAugmenter
from tsfresh.feature_extraction import EfficientFCParameters
transformer = RelevantFeatureAugmenter(
column_id='UnitName',
column_sort='HistBatchStartDate',
timeseries_container=tsdata,
default_fc_parameters=EfficientFCParameters()
)
# transformer.set_params(timeseries_container=tsdata) # optional
X_new = transformer.fit_transform(X, y)
```
The result will be that `X_new` will have the same columns as `X`, but with extra columns added, as calculated by tsfresh. This sklearn transformer can be used in combination with other sklearn transformers, such as pipelines and column transformers. You will just have to be careful to calculate `tsdata` beforehand, and make sure that it corresponds to `X`.
### Step 4: Applying the transformer to test data
After all that, to apply this transformation to the test data (to make a prediction), we need to do this same process, except with `transform` instead of `fit_transform`. Here we calculate some `testtsdata`, which has had the same preprocessing as `tsdata`.
Imagine we have some data `X_test` with the same format as `X`:
```python
# The same preprocessing as was done to tsdata, but now for the test data instead
X_test_copy = X_test.drop(['Id', 'HistBatchEndDate'], axis=1)
X_test_copy = X_test_copy.fillna(X_test_copy.mean())
tstestdata = roll_time_series(X_test_copy, ...) # same parameters as earlier
# apply the transformer to the test data
transformer.set_params(timeseries_container=tstestdata)
X_test_new = transformer.transform(X_test)
```
This will only calculate the features that were previously selected, and add them all to `X_test`.
## More details about tsfresh
### fc_parameters
In the transformer, there was the variable `default_fc_parameters`. There are three possible values for `default_fc_parameters`:
* ComprehensiveFCParameters()
* EfficientFCParameters()
* MinimalFCParameters()
The only difference is in how many features they compute: Comprehensive computes all features available in tsfresh, but will take a long time to compute. Efficient computes all but the most computationally intensive. Minimal only computes a few.
Minimal is only for testing purposes, beause it only adds simple features like the mean, median, maximum, etcetera. However, because tsfresh can take very long if you have more than a few thousand rows, it is recommended to always start with Minimal! That way, you can see if your pipeline works, before you step up to Efficient or Comprehensive and find out after a long computation that you had a bug in your code.
### Feature selection
tsfresh computes many, many features, in the order of hundreds. Furthermore, it computes those hundreds of features for each input feature. For example, if `tsdata` contains 7 features, it will compute 7 sets of hundreds of features. The output will therefore contain thousands of columns. This is far too much, and most of these features will be completely irrelevant. tsfresh already filters out the irrelevant features by using `RelevantFeatureAugmenter` (Hence the word `relevant`). It does this by comparing each of the thousands of features to the target variable, and removing those that have absolutely no relation.
Therefore, the transformer will not return thousands of features, but perhaps only a few dozen. There is no hard rule on this. There are multiple significance tests applied, depending on the type of feature: either the Fisher test, the Mann-Whitney test, or the Kendall test. Only the features with the lowest p-values are kept, with the exact threshold decided by the Benjamini Hochberg procedure. Therefore, the number of returned features can differ depending on the data: if you have purely random noise data, there might be no new features returned at all.
Furthermore, because of the complexity of this entire process, it is not guaranteed that running the code with 99% similar data will return 99% similar feature columns. However, we haven't done enough experimentation to know exactly how much this varies.
### Obtaining specific features
Knowing that tsfresh is not guaranteed to return the same features, we might want to ask it to calculate specific features, instead of leaving it all up to the selection algorithm described above. There are two ways of doing this:
* The first, and probably easiest way is when you just want to use the same columns as were selected in a previous run. This can be done by using the sklearn-transformer as described in the first section. By using `transform`, only the features selected during the `fit` will be calculated. This means the `transform` call will be much faster than the `fit` call, since only a few features are calculated, instead of hundreds.
* The second method applies if you want even more fine control over which features are calculated. In this case, [check the tsfresh documentation here](https://tsfresh.readthedocs.io/en/latest/text/feature_extraction_settings.html). This page describes creating a dictionary that lists all the features you want to calculate. This is obviously more labor-intensive. Although it is not mentioned on this page, this approach can also be combined with the sklearn-transformer: just pass the dictionary to the transformer, in the `default_fc_parameters` argument.
### Troubleshooting: ZeroDivisionError
During development, we encountered the following error: `ZeroDivisionError: integer division or modulo by zero`. We have not (yet) found out exactly what triggers this error, but we know how to fix it.
The key is the `time` column. When this column is converted to a simple range from 1 to `n`, the problem dissapears. This code is ran in the 'Step 1: Preparing tsdata' section.
```python
X_copy['HistBatchStartDate'] = range(X_copy.shape[0])
```
This should not change the result, because tsfresh does not actually use the content of the `time` column: it's only used for sorting the data correctly. It is important though, to double check that this does not mess up the sorting. To be safe, you should always check if your dataframe is already sorted by the `time` column. If it's not, the above operation could destroy the meaning of your time column!
|
/sam-3.1.9.tar.gz/sam-3.1.9/docs/source/general_documents/tsfresh.md
| 0.638497 | 0.991032 |
tsfresh.md
|
pypi
|
Project approach
==================
In this document we describe the typical steps to take in a sensor analysis project.
## Before the project
Use the SAM package as much as possible. If relevant functionality is missing, let us add that and extend the package.
Tip: read the tips!
### General notes, tips and tricks
* Events that we're trying to predict are typically rare. That means
* high performance, like 90% precision, is unrealistic. Always compare performance to a current situation without the model.
* supervised learning will generally be difficult if there's not a ton of data (containing incidents/anomalies).
* Often someone responsible for asset operation will have definitions/thoughts on what is an anomaly/incident. Can also be
* a machine not functioning for a while: an outage
* a output sensor value going over a threshold
* a sensor value acting differently than a related sensor (in the same pipe for example)
* Anomalies found by unsupervised methods are often too hard to interpret by the end user and thus less valuable.
* SAM has build-in logging capabilities, use those to keep track of the processing steps from data ingestion to model outcome.
## Preprocessing
Data from sensors should be nicer to work with than data typed in by humans, but preprocessing still requires attention. Important to note is that some anomalies in the data are the stuff that we're trying to find/predict, so we should not take those out. Think of sensor values becoming "too low" due to sensor drift, or "missing data" due to machine failure.
### Picking the target variable
The choice of target variable is of course crucial to the success of the model. You could
* Predict the future value of a variable by shifting it backward
* When predicting a future variable value, consider _predicting the derivative instead_. This might make the model more robust to for example shifts in the whole dataset (e.g. recalibrating a sensor), or application in a different situation (e.g. another Nereda reactor with overall lower values).
* Predict the current value of a variable for anomaly detection
* Create a binary variable indicating another variable being above (1) or below (0) a threshold. Then shift that variable backwards and predict it.
* Create a multiclass variable for bins of the target variable, e.g. <0, 0-1, 1-2, 3-4, >4, shift that backward and predict it.
* Predict the time (continuous) to a next event by adding a column containing time to a specific future event (like failure).
* Predict the absence of data in a specific variable (binary) due to down time. Can also be shifted backwards for early warning.
### Labelling data
We've joined RHDHV to make use of their domain expertise, which can be implemented by having domain experts label data for us. Could also be anyone outside of RHDHV with domain knowledge of course ;-) We have good experience with using Grafana for labelling of time series data. Typically you would create a Grafana dashboard from the data that's in Influx or MongoDB, create IPA-accounts for the colleagues doing the labelling and have them use the annotation tool in Grafana to do the actual labelling in one of the graphs. Ask Ruben, Rutger or Fenno to get you up to speed on this.
Grafana is part of the SAM platform and be found [here](https://grafana-sam.ynformed.nl).
## Sampling
Data from sensors is usually timestamped. When sampling data into train/test/validation sets it is often important to **keep the time order intact**. After all, training on a set that is chronologically after the test set can lead to information leakage: you are predicting the past. This is especially the case when predicting processes that evolve over time, and thus have interdependent events, e.g. the biological Nereda process.
### Cross validating with sliding window

If we have enough data, we can cross validate using a sliding window over time.
### Cross validating with present holdout set

Here we divide the data in multiple blocks, use the most recent as holdout/validation set and randomly assigning the other blocks to train/test. The latter are used for model optimisation with the holdout set providing a representative view of actual performance.
_Note that this performance on the holdout set is heavily influenced by the amount of (or lack of) events happening in that set._
**References**
* [Sklearn's GroupShuffleSplit](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GroupShuffleSplit.html)
* [Sklearn's TimeSeriesSplit](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.TimeSeriesSplit.html)
* https://towardsdatascience.com/time-series-nested-cross-validation-76adba623eb9
### Discarding old data
Imagine a process that is evolving over time, e.g. a biological process. Origin of events might change over time, making it less useful to include "old" events in your training data when predicting events tomorrow. Test this by discarding "old" data from training and see if performance improves.
### Up/down sampling
When doing classification, the events to predict are usually rare. You could therefore try to (randomly) down-sample the majority class to e.g. a 50-50 distribution in the train set, or synthetically generate new minority class samples.
## Feature engineering
Whereas time series models (and RNN's) extract time-related features from the data themselves, classification methods don't. We have to generate these ourselves. Obvious features are lags of current values, aggregated lags like `min`, `max`, `avg`, but we can also use for example [Fourier transforms](https://en.wikipedia.org/wiki/Fourier_transform) or the like. These have mostly been implemented in SAM's `sam.feature_engineering.BuildRollingFeatures` class, see [the docs](http://10.2.0.20/sam/feature_engineering.html#rolling-features).
Loes has done some more extensive research on these features, which can be found [here](/sam/general_documents/feature_extraction.html). An extensive amount of these features can also be implemented using the `tsfresh` Python package.
** References **
* [tsfresh documentation](https://tsfresh.readthedocs.io/en/latest/)
## Feature selection
The feature space in a typical time series classification project rapidly grows big due to all these lag features etc. That might not matter in most cases when you aim for maximum performance. When you need an explicable model though, you'll want less features. There are several visualisations/analyses that can be used to only select relevant features:
* Autocorrelation plots: see how a (target) features correlates with itself over time, and only select high correlating lags as features
* Cross-correlation plots: see how much a feature correlates with the target over time, and only select high correlating lags of the feature
** References **
* See http://10.2.0.20/sam/feature_selection.html
### Using the target variable as feature
Using historic values of the target variable as feature improves predictions, but is not always valid.
** Valid **
* When predicting a variable going over threshold. Be careful with evaluation though: if the model just learns that once a value is > threshold the next value is also > threshold, it might get really good performance without actually making any useful prediction.
** Not valid **
* When building a model for explanation of for example high values in the target variable.
## Modelling: classification
You're typically doing analysis on time series data. That can be done either using time series
models like ARIMA, using RNN's, or using general classification techniques like Random Forest.
Time series models are good at trends and seasonalities, but can also encompass other confounding
variables. There are limited to being trained on a single time series though, a limitation that we
can overcome by using other classification techniques.
Also keep in mind that a target variable might not be continuous: it can be boolean for surpassing
a threshold yes/no, or maybe even categorical.
**In general**
* Single time series, not many confounding variables, continuous outcome, heavy trends and
seasonalities: **time series analysis** like ARIMA. Try packages like [Facebook's Prophet](https://github.com/facebook/prophet).
Example usage:
* Visitors on a webpage
* Temperature in our office
* Power usage in a building
* Learning from one/multiple time series, confounding variables, no macro trends (gradual upward
shift), continuous/discrete outcome: **classification models** like XGB.
* Being the cool kid on the block: RNN's.
## Modelling: anomaly detection
Using anomaly detection techniques we can detect non-predefined anomalies. Imagine a sensor measuring a water level in a river: it is hard to set hard thresholds for this sensor as the value is heavily influenced by e.g. the amount of rain fall. High water might be a good thing when it has rained, but an anomaly when it hasn't. Better than setting thresholds would be a dynamic way of detecting anomalies.
### Quantile regression
With quantile regression we build a classification model that predicts both the sensor value and a confidence interval around it. After "predicting" the current value we see if the actual value is within the confidence interval, if it is not, it is an outlier.
These ranges of outliers might form specific patterns. When sensor drift is occurring for example, the measured values with slowly drift below the confidence interval. If we calculate summary statistics on the range of outliers, we could **classify what is going on** and possibly prioritise the outlier accordingly.
_Note that you'll want to do some filtering on the anomalies found, often a single minimal anomaly is not enough to warn a user. More on that later when Lieke has progressed her research._
**References**
* [Quantile regression tree implementations in Python](https://scikit-garden.github.io/examples/QuantileRegressionForests/)
* Finding outliers in quantile regression predictions [`sam.train_models.find_outlier_curves`](http://10.2.0.20/sam/train_models.html?highlight=find_outlier_curves#sam.train_models.find_outlier_curves)
### Isolation Forest
An example of unsupervised anomaly detection capable of handling non-numeric features. The model attempts to predict whether an instance is "odd" compared to what it has seen in the training set.
The difficult thing here, when doing asset management projects, is that it is often unclear what exactly causes something to be an anomaly. That makes it hard to grasp for the end user and even harder to act upon.
**References**
* [IsolationForest implementation in Sklearn](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.IsolationForest.html)
## Evaluation
In general the cases that we're trying to predict or identify are **rare**. It is important to keep
that in mind when evaluating model performance: 30% precision predicting an incident that happens
0.5% of the time is very good!
There is a number of metrics that make sense when evaluating predictive models for e.g. predictive
maintenance.
### Incident recall
If we predict incidents X time steps in advance, all these time steps will have a positive target
variable. Recall with regard to all these positives is not representative for performance: if we
predict half of the time steps correct we might flag all incidents but have a 50% recall.
Therefore we want to measure the amount of incidents that we're able to flag: the incident recall.
This combined with precision gives a good indication of the model's usability in practice. For
example:
> If I get a warning, it is right 30% of the time (precision). By acting on warnings I'm able to
prevent up to 90% of incidents (incident recall).
**References**
* Metric implementation (sklearn compatible) `sam.metrics.incident_recall`
* Visualisation of precision vs. incident recall for multiple thresholds `sam...`
## Other good reads
* https://datascopeanalytics.com/blog/unevenly-spaced-time-series/
|
/sam-3.1.9.tar.gz/sam-3.1.9/docs/source/general_documents/project_approach.md
| 0.949623 | 0.981058 |
project_approach.md
|
pypi
|
Weather data
============
Often, weather features are important predictors. For training the model, historic data might be relevant, but when making predictions, weather forecast can also be useful. However, there is a big difference in the availability of historic weather data vs forecasts, the resolution and frequency of forecasts are for example often lower.
Weather forecasts are never saved, therefore it is not possible to train/validate a model with authentic weather forecasts. In this case, historic data can be used to model both the historic weather features as well as the forecast weather features. However, to make these weather forecast features realistic, they should be modeled according to the availability of the weather forecast data. Weather forecasts might for example have a lower resolution or frequency than historic weather data. If this is the case, the features should resemble this same low resolution or frequency even if they are constructed based on historical data.
## Historic
Historic weather data is available through KNMI. These can be exported both [hourly](https://projects.knmi.nl/klimatologie/uurgegevens/selectie.cgi) or [daily](http://projects.knmi.nl/klimatologie/daggegevens/selectie.cgi). Depending on the chosen resolution, there are about 30 different variables available, the most interesting are often temperature (eg T, T10N), sunshine (eg SQ, Q), precipitation (eg DR, RH) and wind (eg DD, FH). The measurements are available for around 50 weather stations across the Netherlands, so given a coordinate the closest station should be determined and used.
The KNMI data can be accessed through multiple python packages, for example [knmy](https://pypi.org/project/knmy/) (only export entire years at a time), or [knmi-py](https://pypi.org/project/knmi-py/) (only daily export, not hourly), but is also available in Galileo. The SAM package uses knmy, and can be used with the `sam.data_sources.read_knmi` function.
### Precipitation historic
Precipitation is often the most important weater feature and is available much more detailed through the [Nationale Regenradar](https://nationaleregenradar.nl/), which is made by Royal HaskoningDHV and Nelen & Schuurmans. It combines weather stations with both Dutch, German and Belgian radar images, resulting in a resolution of 1x1km. The data is available since 2010, with a frequency of 5 minutes. More information can be found [here](https://nationaleregenradar.nl/pdfs/hoofdrapport_NRR_definitief.pdf).
The data is available through [Lizard](https://rhdhv.lizard.net), but will also be in Galileo in the future (in case of big queries, it might be better to use the raw 'netcdf’s'). In SAM, the lizard data can be used with the `sam.data_sources.read_regenradar` function. This function requires a `user` and `password` to be supplied in the `regenradar` section in the .config file. Loes has such an account. (See .config.example for an example)
## Forecast
KNMI does not (now) have an API to export their forecasts for other weather features besides precipitation. If variables like temperature, sunshine or wind are needed as forecasted features, [OpenWeatherMap](https://openweathermap.org/api) can be used. They have a free 5 day forecast, with a measurement every 3 hours. This forecast is available by city name, geographical coordinates or by zipcode, for any location in the world. A list of all variables that can be exported can be found [here](https://openweathermap.org/forecast5), and includes for example temperature, wind and precipitation. In SAM, this forecast can be obtained with the `sam.data_sources.read_openweathermap` function. This function requires an `apikey` to be supplied in the `openweathermap` section in the .config file. (See .config.example for an example)
### Precipitation forecast
Forecast of precipitation is also available in the Nationale Regenradar. There is a nowcast which forecasts 3 hours with the same frequency (5 minutes) and resolution (1x1 km) as the historic data. The forecast with a bigger horizon is done by KNMI. They make a distinction in midterm prediction till 48 hours (also [HIRLAM](https://data.knmi.nl/datasets/hirlam_p5/0.2?q=hirlam)) and longterm predictions up to 10 days (also [ECMWF](http://projects.knmi.nl/datacentrum/catalogus/catalogus/content/nl-ecm-eps-ts-surf.htm)). The midterm prediction is available with a maximum resolution of 10x10km and a frequency of 6 hours. The longterm prediction with 50x50km and a frequency of 12 hours. Nationale Regenradar directly forwards the KNMI data, without doing any improvements/adaptions. More information can be found [here](https://nationaleregenradar.nl/pdfs/hoofdrapport_NRR_definitief.pdf).
The nowcast is available through [Lizard](https://rhdhv.lizard.net). The KNMI data is not in Lizard, but can be obtained directly through KNMI (see [HIRLAM](http://projects.knmi.nl/datacentrum/catalogus/catalogus/content/nl-nwp-lam-grid-p5.htm) & [ECMWF](http://projects.knmi.nl/datacentrum/catalogus/catalogus/content/nl-ecm-eps-ts-surf.htm)). As of now, this information is not yet available in SAM.
| Type | Data | Source |Frequency | Horizon | Resolution
|---|---|---|---|---|---|
| Historic | Weather | KNMI | 1 hour / 1 day |- | 50 weather stations
| Prediction | Weather | OpenWeatherMap | 3 hours | 5 days | city or coordinate
||
| Historic | Rain | Nationale Regenradar | 5 minutes |- | 1x1 km
| Prediction | Rain | Nationale Regenradar | 3 hours | 5 minutes | 1x1 km
| Prediction | Rain | Nationale Regenradar | 6 hours | 48 hours | 10x10 km
| Prediction | Rain | KNMI | 12 hours | 10 days | 50x50 km
|
/sam-3.1.9.tar.gz/sam-3.1.9/docs/source/general_documents/weather_features.md
| 0.953972 | 0.970352 |
weather_features.md
|
pypi
|
Feature Extraction
==================
This is the start of the documentation on which features to use when.
## Transforms
Best practices on types of transforms that you can apply.
### Summarizing
Summarizing a window prior to the prediction moment with some basic functions is always a good starting point:
- Basic: min, max, median, sum, number of positive values
- Distribution: mean, std, var, skewness, kurtosis
- Miscellaneous: crossing points (the number of times the mean is crossed)
Forecasting techniques traditionally used for time series can also be used as features to summarize this window, eg:
- Weighted average
- Exponential smoothing (double/triple)
- Holt-Winters
- ...
### Deviation
Next to using a metric itself, we can use several measures of deviations between a metric and its expected value (based on recent historical data). Historically relative properties can be more important than the properties itself and can sometimes capture physical properties.
Such deviations should be calculated if:
- We expect (relatively) constant values.
- Especially useful for monitoring the behaviour of pumps.
### Autocorrelation / lag
Calculated lagged features can help model autocorrelation. However when using these, there is also a risk of just predicting previous values when the changes in the response variable are mostly gradual over time. Because of this, accuracy metrics can be very misleading, especially when we are looking for anomalies.
To test the predictive powers in case of autocorrelation, you can define the model to predict the difference in values between time steps, rather than the value itself.
Using lagged features is:
- Not recommended when the focus is on anomalies and understanding the prediction.
- Recommended when the focus is on predicting as precise as possible.
### Fourier transform (FFT)
Decompose the signal into frequencies. The output we use is a column with an amplitude, for each possible frequency. So the higher column values, the more present this frequency is. More information on [Fourier Transform](https://en.wikipedia.org/wiki/Fourier_transform).
Fourier transforms are especially useful in case:
- We predict *whether* something will happen, rather than *when*.
- We classify a time series or look for the presence of specific patterns.
- There is strong periodicity in the data.
Extension: you can also add the positions of the peaks in the frequency spectrum as features. This adds information on which frequency is most important.
### Wavelet transform (WT)
Wavelet transforms are similar to fourier transforms, however the key difference is the temporal resolution: a wavelet captures both frequency *and* location information. This not only gives us the frequencies that are present, but also at which time these frequencies occur.
Wavelets are better suited in case:
- We want to capture how the frequencies change over time
- Abrupt changes in frequency for a short period of time (peaks) should influence our prediction heavily.
The same extension as in fourier transforms holds for wavelets. You can add additional features on the locations of peaks.
### Symbolic Aggregate Approximation (SAX)
Discretization of time series into one symbolic string. First we convert the time series to PAA (Piecewise Aggregate Approximation) representation and then we convert to symbols. This means that we can summarize an entire time series (or window of a time series) into one value. See an [example](https://jmotif.github.io/sax-vsm_site/morea/algorithm/SAX.html).
Advantages:
- Only one value instead of multiple columns
- Good for visualisation
- Useful when comparing/finding patterns in a time series
### Miscellaneous: tsfresh
The python library [tsfresh](https://github.com/blue-yonder/tsfresh) calculates a comprehensive number (approximately 65) of features for time series (Feature Calculation). The calculated features are very diverse, ranging from 'time reversal asymmetric statistic' to 'entropy', etc. There is some overlap with the summarizing functions in this document. It also contains automatic feature selection (Feature Filtering) based on significant correlation with the target.
Since many of the features are not very intuitive, it is difficult to know which ones to use in a specific project. However there might be very powerful features in there. It is therefore recommended to use the Feature Calculation *and* Feature Filtering of tsfresh. By using the filtering, the significance of the features is tested and only the most important ones should be added to the model. These features should then be added next to other mentioned features in this document and should not be used as a replacement. This way we keep the intuitive features which might not be significant based only on the target variable, but have an interaction effect with other variables.
## Type of variables
Best practices for types of variables that you might have in your model.
### Seasonality
There are many ways to capture seasonality, to capture the cyclic nature, using more than one might be the best option. Which one(s) to use depends also on what model you use, for example some features might work better for tree-based models and others for linear models. In tree-based models we try to group similar time periods together by giving them a similar value for the feature.
The following uses months as example, but applies on different levels, like: seasons, months, weeks, days, hours, etc.
- Categorical encoding (or dummy encoding in Python)
- When: All possible groupings are possible.
- How: A column with the name of the month is added.
- Sinus encoding
- When: Months that are close to each other are easily grouped.
- How: We add a column with the discrete values of a sinus corresponding to the months. The frequency depends on the level of seasonality we want to capture (for yearly patterns like seasons and months the frequency is 1, for weekly patterns the frequency is 52, etc.)
- Target encoding
- When: Months that have a similar value in the target are easily grouped.
- How: Each month gets the average value of the target of all months.
- Numeric encoding
- When: Months that are close to each other are very easily grouped, however the cyclic nature isn't accounted for.
- How: To partly account for the cyclic nature, multiple columns can be added, eg 1: jan till 12: dec, but also 1: jul till 12: jun.
*Another option would be to first remove the seasonality and not include any seasonal features in the model.*
The year should be added as a numeric variable if we suspect a trend in the data, however depending on whether a model is chosen that can extrapolate values, the trend should be removed from the data separately by decomposition.
Holidays should be included as a categorical or boolean variable. For this we need a standardised list of holidays.
### Pumps
When working with pumps (or other discrete processes that take some time), some features that can be used are:
- Features based on the number of pumping events per day.
- The pump's flow rate.
- The duration of the pumping events.
- The ratio of a pump's flow rate to amount of handle motion (i.e volume of water per human effort).
For all these categories, using the deviations as well as the values itself, also captures the physical properties of the pumps and is highly recommended.
### Water level
Since the water level is fairly constant or changing gradually, there is often high autocorrelation present. Using this in our features in most cases means getting a better performance, but might also lead to missing sensor defects/degradation. Also see the section on autocorrelation.
|
/sam-3.1.9.tar.gz/sam-3.1.9/docs/source/general_documents/feature_extraction.md
| 0.962081 | 0.988525 |
feature_extraction.md
|
pypi
|
# Feature engineering examples
This notebook contains some examples of feature engineering using SAM.
We use the following example dataset:
```
import pandas as pd
from sam.datasets import load_rainbow_beach
data = load_rainbow_beach()
```
## Simple feature engineering for timeseries data
The class `sam.feature_engineering.SimpleFeatureEngineering` is used to create common features for timeseries data: rolling features and time components.
A rolling feature can be parameterized by a tuple of the form `(column_name, method, rolling_window)`, where `column_name` is the name of the column to be used as the time series, `method` is the type of rolling feature (e.g. "mean", "lag", "max"), and `rolling_window` is the size of the rolling window.
A time component can be either be descibed by a dummy variable (one hot encoding) or a cyclic variable (sin/cos). To parameterize a cyclic variable, a tuple of the form `(period, component_type)` is used.
======================
For this example we will create the following set of features:
- Mean water_temperature of the past day
- Mean water_temperature of the past two days
- Maximum turbidity of the past day
- Maximum turbidity of the past two days
- Cyclical features (sin/cos) of hour of the day
- Cyclical features (sin/cos) of day of the week
The following code shows how SAM can be used to create a feature engineering pipeline.
```
from sam.feature_engineering import SimpleFeatureEngineer
sfe = SimpleFeatureEngineer(
rolling_features=[
("water_temperature", "mean", "1D"),
("water_temperature", "mean", "2D"),
("turbidity", "max", "1D"),
("turbidity", "max", "2D"),
],
time_features=[
("hour_of_day", "cyclical"),
("day_of_week", "cyclical"),
],
)
sfe.fit_transform(data).head()
```
## Custom feature engineering function
If you want more freedom and customize your feature engineering, you can use `sam.feature_engineering.FeatureEngineering` to create your own feature engineering transformer from a feature engineering function. This class provides methods to make sure the interface is compatible with sam models.
```
from sam.feature_engineering import FeatureEngineer
def my_feature_engineering(X, y=None):
"""Don't forget documentation
"""
X_out = X.copy()
X_out = X_out[["water_temperature", "turbidity"]]
X_out['my_feature'] = X_out['water_temperature'].rolling(window=24).mean().pow(2)
return X_out
my_fe = FeatureEngineer(my_feature_engineering)
my_fe.fit_transform(data)
```
## Customized feature engineering class
If a single function does not fit your needs, you can create your own feature engineering class. By creating a subclass of `sam.feature_engineering.BaseFeatureEngineer`, you can implement your own feature engineering function. You only need to implement the `feature_engineer_` method. If you want to fit certain parameters, you can implement the `fit` method as well. Check the current implementation of `BaseFeatureEngineer` or `SimpleFeatureEngineer` for an example.
|
/sam-3.1.9.tar.gz/sam-3.1.9/examples/feature_engineering.ipynb
| 0.465873 | 0.989879 |
feature_engineering.ipynb
|
pypi
|
# sam4onnx
A very simple tool to rewrite parameters such as attributes and constants for OPs in ONNX models. **S**imple **A**ttribute and Constant **M**odifier for **ONNX**.
https://github.com/PINTO0309/simple-onnx-processing-tools
[](https://pepy.tech/project/sam4onnx)  [](https://pypi.org/project/sam4onnx/) [](https://github.com/PINTO0309/sam4onnx/actions?query=workflow%3ACodeQL)
<p align="center">
<img src="https://user-images.githubusercontent.com/33194443/170155850-65e2f103-baa9-4061-a268-020f0c8bc6f8.png" />
</p>
# Key concept
- [x] Specify an arbitrary OP name and Constant type INPUT name or an arbitrary OP name and Attribute name, and pass the modified constants to rewrite the parameters of the relevant OP.
- [x] Two types of input are accepted: .onnx file input and onnx.ModelProto format objects.
- [x] To design the operation to be simple, only a single OP can be specified.
- [x] Attributes and constants are forcibly rewritten, so the integrity of the entire graph is not checked in detail.
- [x] Support for recursive search of subgraphs of `If` OP.
## 1. Setup
### 1-1. HostPC
```bash
### option
$ echo export PATH="~/.local/bin:$PATH" >> ~/.bashrc \
&& source ~/.bashrc
### run
$ pip install -U onnx \
&& python3 -m pip install -U onnx_graphsurgeon --index-url https://pypi.ngc.nvidia.com \
&& pip install -U sam4onnx
```
### 1-2. Docker
https://github.com/PINTO0309/simple-onnx-processing-tools#docker
## 2. CLI Usage
```
$ sam4onnx -h
usage:
sam4onnx [-h]
-if INPUT_ONNX_FILE_PATH
-of OUTPUT_ONNX_FILE_PATH
[-on OP_NAME]
[-a NAME DTYPE VALUE]
[-da DELETE_ATTRIBUTES [DELETE_ATTRIBUTES ...]]
[-ic NAME DTYPE VALUE]
[-os OUTPUT_NAME OUTPUT_SHAPE]
[-n]
optional arguments:
-h, --help
show this help message and exit
-if INPUT_ONNX_FILE_PATH, --input_onnx_file_path INPUT_ONNX_FILE_PATH
Input onnx file path.
-of OUTPUT_ONNX_FILE_PATH, --output_onnx_file_path OUTPUT_ONNX_FILE_PATH
Output onnx file path.
-on OP_NAME, --op_name OP_NAME
OP name of the attributes to be changed.
When --attributes is specified, --op_name must always be specified.
e.g. --op_name aaa
-a ATTRIBUTES ATTRIBUTES ATTRIBUTES, --attributes ATTRIBUTES ATTRIBUTES ATTRIBUTES
Parameter to change the attribute of the OP specified in --op_name.
If the OP specified in --op_name has no attributes,
it is ignored. attributes can be specified multiple times.
--attributes name dtype value dtype is one of
"float32" or "float64" or "int32" or "int64" or "str".
https://github.com/onnx/onnx/blob/main/docs/Operators.md
e.g.
--attributes alpha float32 [[1.0]]
--attributes beta float32 [1.0]
--attributes transA int64 0
--attributes transB int64 0
-da DELETE_ATTRIBUTES [DELETE_ATTRIBUTES ...], --delete_attributes DELETE_ATTRIBUTES [DELETE_ATTRIBUTES ...]
Parameter to delete the attribute of the OP specified in --op_name.
If the OP specified in --op_name has no attributes,
it is ignored. delete_attributes can be specified multiple times.
--delete_attributes name1 name2 name3
https://github.com/onnx/onnx/blob/main/docs/Operators.md
e.g. --delete_attributes alpha beta
-ic INPUT_CONSTANTS INPUT_CONSTANTS INPUT_CONSTANTS, --input_constants INPUT_CONSTANTS INPUT_CONSTANTS INPUT_CONSTANTS
Specifies the name of the constant to be changed.
If you want to change only the constant,
you do not need to specify --op_name and --attributes.
input_constants can be specified multiple times.
--input_constants constant_name numpy.dtype value
e.g.
--input_constants constant_name1 int64 0
--input_constants constant_name2 float32 [[1.0,2.0,3.0],[4.0,5.0,6.0]]
--input_constants constant_name3 float32 [\'-Infinity\']
-os OUTPUT_SHAPES OUTPUT_SHAPES, --output_shapes OUTPUT_SHAPES OUTPUT_SHAPES
Specifies the name of the output to be changed. output_shapes can be specified multiple times.
--output_shapes output_name1 shape1
--output_shapes output_name2 shape2
e.g.
--output_shapes output_name1 [1]
--output_shapes output_name2 [1,3,224,224]
-n, --non_verbose
Do not show all information logs. Only error logs are displayed.
```
## 3. In-script Usage
```python
$ python
>>> from sam4onnx import modify
>>> help(modify)
Help on function modify in module sam4onnx.onnx_attr_const_modify:
modify(
input_onnx_file_path: Union[str, NoneType] = '',
output_onnx_file_path: Union[str, NoneType] = '',
onnx_graph: Union[onnx.onnx_ml_pb2.ModelProto, NoneType] = None,
op_name: Union[str, NoneType] = '',
attributes: Union[dict, NoneType] = None,
delete_attributes: Union[List[str], NoneType] = None,
input_constants: Union[dict, NoneType] = None,
output_shapes: Optional[List] = None,
non_verbose: Union[bool, NoneType] = False
) -> onnx.onnx_ml_pb2.ModelProto
Parameters
----------
input_onnx_file_path: Optional[str]
Input onnx file path.
Either input_onnx_file_path or onnx_graph must be specified.
output_onnx_file_path: Optional[str]
Output onnx file path.
If output_onnx_file_path is not specified, no .onnx file is output.
onnx_graph: Optional[onnx.ModelProto]
onnx.ModelProto.
Either input_onnx_file_path or onnx_graph must be specified.
onnx_graph If specified, ignore input_onnx_file_path and process onnx_graph.
op_name: Optional[str]
OP name of the attributes to be changed.
When --attributes is specified, --op_name must always be specified.
Default: ''
https://github.com/onnx/onnx/blob/main/docs/Operators.md
attributes: Optional[dict]
Specify output attributes for the OP to be generated.
See below for the attributes that can be specified.
{"attr_name1": numpy.ndarray, "attr_name2": numpy.ndarray, ...}
e.g. attributes =
{
"alpha": np.asarray(1.0, dtype=np.float32),
"beta": np.asarray(1.0, dtype=np.float32),
"transA": np.asarray(0, dtype=np.int64),
"transB": np.asarray(0, dtype=np.int64),
}
Default: None
https://github.com/onnx/onnx/blob/main/docs/Operators.md
delete_attributes: Optional[List[str]]
Parameter to delete the attribute of the OP specified in --op_name.
If the OP specified in --op_name has no attributes, it is ignored.
delete_attributes can be specified multiple times.
--delete_attributes name1 name2 name3
https://github.com/onnx/onnx/blob/main/docs/Operators.md
e.g.
--delete_attributes alpha beta
input_constants: Optional[dict]
Specifies the name of the constant to be changed.
If you want to change only the constant,
you do not need to specify --op_name and --attributes.
{"constant_name1": numpy.ndarray, "constant_name2": numpy.ndarray, ...}
e.g.
input_constants =
{
"constant_name1": np.asarray(0, dtype=np.int64),
"constant_name2": np.asarray([[1.0,2.0,3.0],[4.0,5.0,6.0]], dtype=np.float32),
"constant_name3": np.asarray([-np.inf], dtype=np.float32),
}
Default: None
https://github.com/onnx/onnx/blob/main/docs/Operators.md
output_shapes: Optional[List[int]]
Specifies the name of the output_shapes to be changed.
output_shapes can be specified multiple times.
output_shapes = [
['output_name1', shape1],
['output_name2', shape2],
:
]
e.g.
output_shapes = [
['aaa', [1]],
['bbb', [1,3,224,224]],
]
non_verbose: Optional[bool]
Do not show all information logs. Only error logs are displayed.
Default: False
Returns
-------
modified_graph: onnx.ModelProto
Mddified onnx ModelProto
```
## 4. CLI Execution
```bash
$ sam4onnx \
--input_onnx_file_path input.onnx \
--output_onnx_file_path output.onnx \
--op_name Transpose_17 \
--attributes perm int64 [0,1]
```
## 5. In-script Execution
```python
from sam4onnx import modify
modified_graph = modify(
onnx_graph=graph,
op_name="Reshape_17",
input_constants={"241": np.asarray([1], dtype=np.int64)},
non_verbose=True,
)
```
## 6. Sample
### 6-1. Transpose - update **`perm`** and **`output_shapes`**

```bash
$ sam4onnx \
--input_onnx_file_path hitnet_sf_finalpass_720x1280_nonopt.onnx \
--output_onnx_file_path hitnet_sf_finalpass_720x1280_nonopt_mod.onnx \
--op_name Transpose_17 \
--attributes perm int64 [0,1] \
--output_shapes [128,256]
```

### 6-2. Mul - update **`Constant (170)`** - From: **`2`**, To: **`1`**

```bash
$ sam4onnx \
--input_onnx_file_path hitnet_sf_finalpass_720x1280_nonopt.onnx \
--output_onnx_file_path hitnet_sf_finalpass_720x1280_nonopt_mod.onnx \
--op_name Mul_5 \
--input_constants 170 float32 1
```

### 6-3. Reshape - update **`Constant (241)`** - From: **`[-1]`**, To: **`[1]`**

```bash
$ sam4onnx \
--input_onnx_file_path hitnet_sf_finalpass_720x1280_nonopt.onnx \
--output_onnx_file_path hitnet_sf_finalpass_720x1280_nonopt_mod.onnx \
--op_name Reshape_34 \
--input_constants 241 int64 [1]
```

## 7. Issues
https://github.com/PINTO0309/simple-onnx-processing-tools/issues
|
/sam4onnx-1.0.14.tar.gz/sam4onnx-1.0.14/README.md
| 0.645902 | 0.81468 |
README.md
|
pypi
|
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev)
|
/sama_probability-0.1.tar.gz/sama_probability-0.1/sama_probability/Gaussiandistribution.py
| 0.688364 | 0.853058 |
Gaussiandistribution.py
|
pypi
|
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev)
|
/saman_distributions-0.1.tar.gz/saman_distributions-0.1/saman_distributions/Gaussiandistribution.py
| 0.688364 | 0.853058 |
Gaussiandistribution.py
|
pypi
|
describe("map_events.js file", function () {
describe("deselectText", function () {});
describe("distanceSquared", function () {
it("is accurate", function () {
expect(distanceSquared(0, 0, 0, 10)).toEqual(100);
expect(distanceSquared(0, 0, 10, 0)).toEqual(100);
expect(distanceSquared(0, 10, 0, 0)).toEqual(100);
expect(distanceSquared(10, 0, 0, 0)).toEqual(100);
expect(distanceSquared(3, 4, 6, 8)).toEqual(25);
});
});
describe("contains", function () {
it("works", function () {
let n = {abs_x: 50, abs_y: 500, radius: 10}
expect(contains(n, 10, 10)).toBe(false);
expect(contains(n, 50, 489)).toBe(false);
expect(contains(n, 50, 490)).toBe(true);
expect(contains(n, 50, 510)).toBe(true);
expect(contains(n, 50, 511)).toBe(false);
expect(contains(n, 61, 500)).toBe(false);
expect(contains(n, 60, 500)).toBe(true);
expect(contains(n, 40, 500)).toBe(true);
expect(contains(n, 39, 500)).toBe(false);
});
});
describe("pick", function () {
it("works", function () {
let nodeA = {subnet: 32, abs_x: 20, abs_y: 20, radius: 5}
let nodeB = {subnet: 32, abs_x: 20, abs_y: 40, radius: 5}
let nodeC = {subnet: 32, abs_x: 40, abs_y: 20, radius: 5}
let nodeD = {subnet: 32, abs_x: 40, abs_y: 40, radius: 5}
renderCollection = [nodeA, nodeB, nodeC, nodeD];
expect(pick(20, 20, 1)).toBe(nodeA);
expect(pick(20, 40, 1)).toBe(nodeB);
expect(pick(40, 40, 1)).toBe(nodeD);
expect(pick(40, 20, 1)).toBe(nodeC);
expect(pick(30, 30, 1)).toBeNull();
expect(pick(24, 20, 1)).toBe(nodeA);
expect(pick(20, 24, 1)).toBe(nodeA);
expect(pick(24, 24, 1)).toBeNull();
});
it("skips subnets when zoomed in", function () {
let nodeA = {subnet: 32, abs_x: 20, abs_y: 20, radius: 5}
let nodeB = {subnet: 16, abs_x: 20, abs_y: 20, radius: 50}
renderCollection = [nodeA, nodeB];
expect(pick(20, 20, 1)).toBe(nodeA);
expect(pick(20, 26, 1)).toBe(nodeB);
nodeB = {subnet: 8, abs_x: 20, abs_y: 20, radius: 50}
renderCollection = [nodeA, nodeB];
expect(pick(20, 26, 1)).toBe(null);
});
});
//these do not lend themselves well to unit testing...
describe("mouseup", function () {});
describe("mousemove", function () {});
describe("wheel", function () {});
describe("keydown", function () {});
describe("applyfilter", function () {});
describe("onfilter", function () {});
describe("applysearch", function () {});
describe("onsearch", function () {});
describe("applyProtocolFilter", function () {});
describe("onProtocolFilter", function () {});
describe("onResize", function () {});
});
|
/samapper-0.3.2.tar.gz/samapper-0.3.2/spec/javascripts/map_events_spec.js
| 0.895222 | 0.925432 |
map_events_spec.js
|
pypi
|
describe("table_filters.js file", function () {
beforeEach(function () {
g_known_tags = ["tag1", "tag2"];
g_known_envs = ["production", "dev", "inherit"];
});
describe("members", function () {
it("has filter types", function () {
expect(Object.keys(filters.private.types)).toContain("connections");
expect(Object.keys(filters.private.types)).toContain("env");
expect(Object.keys(filters.private.types)).toContain("mask");
expect(Object.keys(filters.private.types)).toContain("port");
expect(Object.keys(filters.private.types)).toContain("protocol");
expect(Object.keys(filters.private.types)).toContain("role");
expect(Object.keys(filters.private.types)).toContain("tags");
expect(Object.keys(filters.private.types)).toContain("target");
expect(Object.keys(filters.private.types)).toContain("subnet");
Object.keys(filters.private.types).forEach(function (k) {
let f = filters.private.types[k];
expect(typeof(f[0])).toEqual("function");
expect(typeof(f[1])).toEqual("string");
expect(typeof(f[2][0])).toEqual("string");
});
});
});
describe("addFilter", function () {
it("adds a filter", function () {
filters.filters = [];
filters.addFilter("env", ["production"]);
expect(filters.filters.length).toEqual(1);
expect(filters.filters[0].type).toEqual("env");
});
it("fails on bad params", function () {
filters.filters = [];
spyOn(filters.private, "createFilter");
filters.addFilter("env", ["production", "dev"]);
expect(filters.private.createFilter).not.toHaveBeenCalled();
expect(filters.filters.length).toEqual(0);
});
it("fails on bad filter", function () {
filters.filters = [];
spyOn(filters.private, "createFilter");
filters.addFilter("envy", ["production"]);
expect(filters.private.createFilter).not.toHaveBeenCalled();
expect(filters.filters.length).toEqual(0);
});
});
describe("deleteFilter", function () {
it("removes the filter in question", function () {
spyOn(filters.private, "updateSummary");
filters.filters = [0,1,2,3,4];
filters.deleteFilter(2);
expect(filters.filters).toEqual([0,1,3,4])
expect(filters.private.updateSummary).toHaveBeenCalled();
});
it("doesn't run on out-of-bounds", function () {
spyOn(filters.private, "updateSummary");
filters.filters = [0,1,2,3,4];
filters.deleteFilter(7);
expect(filters.filters).toEqual([0,1,2,3,4])
expect(filters.private.updateSummary).not.toHaveBeenCalled();
});
});
describe("updateDisplay", function () {});
describe("getFilters", function () {
it("works on all filter types", function () {
filters.ds = "ds5";
filters.filters = [];
filters.addFilter("connections", [">", "i", "300"]);
filters.addFilter("env", ["production"]);
filters.addFilter("mask", ["192.168.0.0/16"]);
filters.addFilter("port", ["1", "443"]);
filters.addFilter("protocol", ["0", "TCP"]);
filters.addFilter("role", [">", "0.75"]);
filters.addFilter("tags", ["1", "64GB"]);
filters.addFilter("target", ["10.20.30.40", "0"]);
filters.addFilter("subnet", ["24"]);
let expected = "ds5|0;1;>;i;300|1;1;production|2;1;192.168.0.0/16|3;1;1;443|4;1;0;TCP|5;1;>;0.75|7;1;1;64GB|8;1;10.20.30.40;0|6;1;24";
let actual = filters.getFilters();
expect(actual).toEqual(expected);
});
});
describe("setFilters", function () {
it("works on all filter types", function () {
let filterstring = "ds1|0;1;>;i;300|1;1;production|2;1;192.168.0.0/16|3;1;1;443|4;1;0;TCP|5;1;>;0.75|7;1;1;64GB|8;1;10.20.30.40;0|6;1;24";
filters.filters = [];
filters.setFilters(filterstring);
expect(filters.filters.length).toEqual(9);
})
});
describe("private.createFilter", function () {
it("works for each type", function () {
function rmkr(p) {
return [document.createElement("DIV")];
}
let f1 = filters.private.createFilter(true, "connections", [">", "i", "300"], rmkr);
let f2 = filters.private.createFilter(false, "env", ["production"], rmkr);
let f3 = filters.private.createFilter(true, "mask", ["192.168.0.0/16"], rmkr);
let f4 = filters.private.createFilter(false, "port", ["1", "443"], rmkr);
let f5 = filters.private.createFilter(true, "protocol", ["0", "TCP"], rmkr);
let f6 = filters.private.createFilter(false, "role", [">", "0.75"], rmkr);
let f7 = filters.private.createFilter(true, "tags", ["1", "64GB"], rmkr);
let f8 = filters.private.createFilter(false, "target", ["10.20.30.40", "0"], rmkr);
let f9 = filters.private.createFilter(true, "subnet", ["24"], rmkr);
let enableds = [f1.enabled, f2.enabled, f3.enabled, f4.enabled, f5.enabled, f6.enabled, f7.enabled, f8.enabled, f9.enabled];
let expected_enableds = [true, false, true, false, true, false, true, false, true];
expect(enableds).toEqual(expected_enableds);
let types = [f1.type, f2.type, f3.type, f4.type, f5.type, f6.type, f7.type, f8.type, f9.type];
let expected_types = ["connections", "env", "mask", "port", "protocol", "role", "tags", "target", "subnet"];
expect(types).toEqual(expected_types);
});
});
describe("private.createSubnetFilterRow", function () {});
describe("private.createMaskFilterRow", function () {});
describe("private.createRoleFilterRow", function () {});
describe("private.createEnvFilterRow", function () {});
describe("private.createPortFilterRow", function () {});
describe("private.createProtocolFilterRow", function () {});
describe("private.createTagFilterRow", function () {});
describe("private.createTargetFilterRow", function () {});
describe("private.createConnectionsFilterRow", function () {});
describe("private.markupBoilerplate", function () {});
describe("private.markupSelection", function () {});
describe("private.markupTags", function () {});
describe("private.markupInput", function () {});
describe("private.markupSpan", function () {});
describe("private.dsCallback", function () {});
describe("private.addCallback", function () {
it("works normally", function () {
let div = document.createElement("DIV");
let button = document.createElement("BUTTON");
let icon = document.createElement("I");
let selector_div = document.createElement("DIV");
let selector_input = document.createElement("INPUT");
let garbage_div = document.createElement("DIV");
let garbage_input = document.createElement("INPUT");
button.appendChild(icon);
selector_div.appendChild(selector_input);
garbage_div.appendChild(garbage_input);
div.appendChild(button);
div.appendChild(selector_div);
div.appendChild(garbage_div);
selector_input.value = "protocol";
garbage_input.value = "wrong";
mock_event = {target: icon};
mock_event2 = {target: button};
spyOn(filters.private, "extractRowValues").and.returnValue(["mock_params"]);
spyOn(filters, "addFilter");
spyOn(filters, "updateDisplay");
filters.private.addCallback(mock_event);
expect(filters.private.extractRowValues).toHaveBeenCalled();
expect(filters.addFilter).toHaveBeenCalledWith("protocol", ["mock_params"]);
filters.private.addCallback(mock_event2);
expect(filters.private.extractRowValues).toHaveBeenCalled();
expect(filters.addFilter).toHaveBeenCalledWith("protocol", ["mock_params"]);
});
});
describe("private.deleteCallback", function () {});
describe("private.updateEvent", function () {});
describe("private.getRowIndex", function () {});
describe("private.extractRowValues", function () {});
describe("private.writeSummary", function () {
it("summarizes connections", function () {
filter = {type: "connections", direction: "i", comparator: "<", limit: "50"};
expect(filters.private.writeSummary(filter).innerText).toEqual("<50 conns/s (in)");
filter = {type: "connections", direction: "o", comparator: ">", limit: "60"};
expect(filters.private.writeSummary(filter).innerText).toEqual(">60 conns/s (out)");
filter = {type: "connections", direction: "c", comparator: ">", limit: "60"};
expect(filters.private.writeSummary(filter).innerText).toEqual(">60 conns/s (in+out)");
});
it("summarizes subnet", function () {
filter = {type: "subnet", subnet: "24"};
expect(filters.private.writeSummary(filter).innerText).toEqual("subnet /24");
});
it("summarizes mask", function () {
filter = {type: "mask", mask: "192.168.0.0/16"};
expect(filters.private.writeSummary(filter).innerText).toEqual("subnet 192.168.0.0/16");
});
it("summarizes port", function () {
filter = {type: "port", port: "443", connection: "0"};
expect(filters.private.writeSummary(filter).innerText).toEqual("conn to (443)");
filter = {type: "port", port: "443", connection: "1"};
expect(filters.private.writeSummary(filter).innerText).toEqual("no conn to (443)");
filter = {type: "port", port: "443", connection: "2"};
expect(filters.private.writeSummary(filter).innerText).toEqual("conn from (443)");
filter = {type: "port", port: "443", connection: "3"};
expect(filters.private.writeSummary(filter).innerText).toEqual("no conn from (443)");
});
it("summarizes protocol", function () {
filter = {type: "protocol", protocol: "TCP", handles: "0"};
expect(filters.private.writeSummary(filter).innerText).toEqual("TCP in");
filter = {type: "protocol", protocol: "TCP", handles: "1"};
expect(filters.private.writeSummary(filter).innerText).toEqual("no TCP in");
filter = {type: "protocol", protocol: "TCP", handles: "2"};
expect(filters.private.writeSummary(filter).innerText).toEqual("TCP out");
filter = {type: "protocol", protocol: "TCP", handles: "3"};
expect(filters.private.writeSummary(filter).innerText).toEqual("no TCP out");
});
it("summarizes target", function () {
filter = {type: "target", target: "10.20.30.40", to: "0"};
expect(filters.private.writeSummary(filter).innerText).toEqual("to 10.20.30.40");
filter = {type: "target", target: "10.20.30.40", to: "1"};
expect(filters.private.writeSummary(filter).innerText).toEqual("not to 10.20.30.40");
filter = {type: "target", target: "10.20.30.40", to: "2"};
expect(filters.private.writeSummary(filter).innerText).toEqual("from 10.20.30.40");
filter = {type: "target", target: "10.20.30.40", to: "3"};
expect(filters.private.writeSummary(filter).innerText).toEqual("not from 10.20.30.40");
});
it("summarizes tags", function () {
filter = {type: "tags", tags: "A,B", has: "1"};
expect(filters.private.writeSummary(filter).innerText).toEqual("tagged (A,B)");
filter = {type: "tags", tags: "A,B", has: "0"};
expect(filters.private.writeSummary(filter).innerText).toEqual("no tag (A,B)");
});
it("summarizes env", function () {
filter = {type: "env", env: "spleen"};
expect(filters.private.writeSummary(filter).innerText).toEqual("env: spleen");
});
it("summarizes role", function () {
filter = {type: "role", comparator: ">", ratio: "0.248"};
expect(filters.private.writeSummary(filter).innerText).toEqual(">25% server");
});
});
describe("private.updateSummary", function () {});
describe("private.dropKeys", function () {
it("doesn't affect the original object list", function () {
let test = [
{a: 11, b: 21, c: 31},
{a: 12, b: 22, c: 32},
{a: 13, b: 23, c: 33},
{a: 14, b: 24, c: 34}
];
let actual = filters.private.dropKeys(test, ["b"]);
expect(Object.keys(actual[0])).not.toContain("b");
expect(Object.keys(actual[1])).not.toContain("b");
expect(Object.keys(actual[2])).not.toContain("b");
expect(Object.keys(actual[3])).not.toContain("b");
expect(Object.keys(test[0])).toContain("b");
expect(Object.keys(test[1])).toContain("b");
expect(Object.keys(test[2])).toContain("b");
expect(Object.keys(test[3])).toContain("b");
});
it("works with multiple drop keys", function () {
let test = [
{a: 11, b: 21, c: 31},
{a: 12, b: 22, c: 32},
{a: 13, b: 23, c: 33},
{a: 14, b: 24, c: 34}
];
actual = filters.private.dropKeys(test, ["b", "c"]);
expect(actual[0]).toEqual({a: 11});
expect(actual[1]).toEqual({a: 12});
expect(actual[2]).toEqual({a: 13});
expect(actual[3]).toEqual({a: 14});
});
});
describe("private.encodeFilters", function () {
it("encodes all types", function () {
filters.ds = "ds5";
filters.filters = [];
filters.addFilter("connections", [">", "i", "300"]);
let input = filters.private.dropKeys(filters.filters, ["html"]);
let expected = "ds5|0;1;>;i;300";
let actual = filters.private.encodeFilters(input);
expect(actual).toEqual(expected);
filters.filters = [];
filters.addFilter("env", ["production"]);
input = filters.private.dropKeys(filters.filters, ["html"]);
expected = "ds5|1;1;production";
actual = filters.private.encodeFilters(input);
expect(actual).toEqual(expected);
filters.filters = [];
filters.addFilter("mask", ["192.168.0.0/16"]);
input = filters.private.dropKeys(filters.filters, ["html"]);
expected = "ds5|2;1;192.168.0.0/16";
actual = filters.private.encodeFilters(input);
expect(actual).toEqual(expected);
filters.filters = [];
filters.addFilter("port", ["1", "443"]);
input = filters.private.dropKeys(filters.filters, ["html"]);
expected = "ds5|3;1;1;443";
actual = filters.private.encodeFilters(input);
expect(actual).toEqual(expected);
filters.filters = [];
filters.addFilter("protocol", ["0", "TCP"]);
input = filters.private.dropKeys(filters.filters, ["html"]);
expected = "ds5|4;1;0;TCP";
actual = filters.private.encodeFilters(input);
expect(actual).toEqual(expected);
filters.filters = [];
filters.addFilter("role", [">", "0.75"]);
input = filters.private.dropKeys(filters.filters, ["html"]);
expected = "ds5|5;1;>;0.75";
actual = filters.private.encodeFilters(input);
expect(actual).toEqual(expected);
filters.filters = [];
filters.addFilter("tags", ["1", "64GB"]);
input = filters.private.dropKeys(filters.filters, ["html"]);
expected = "ds5|7;1;1;64GB";
actual = filters.private.encodeFilters(input);
expect(actual).toEqual(expected);
filters.filters = [];
filters.addFilter("target", ["10.20.30.40", "0"]);
input = filters.private.dropKeys(filters.filters, ["html"]);
expected = "ds5|8;1;10.20.30.40;0";
actual = filters.private.encodeFilters(input);
expect(actual).toEqual(expected);
filters.filters = [];
filters.addFilter("subnet", ["24"]);
input = filters.private.dropKeys(filters.filters, ["html"]);
expected = "ds5|6;1;24";
actual = filters.private.encodeFilters(input);
expect(actual).toEqual(expected);
});
it("encodes multiple filters", function () {
filters.ds = "ds5";
filters.filters = [];
filters.addFilter("connections", [">", "i", "300"]);
filters.addFilter("target", ["10.20.30.40", "0"]);
filters.addFilter("subnet", ["24"]);
let input = filters.private.dropKeys(filters.filters, ["html"]);
let expected = "ds5|0;1;>;i;300|8;1;10.20.30.40;0|6;1;24";
let actual = filters.private.encodeFilters(input);
expect(actual).toEqual(expected);
})
});
describe("private.decodeFilters", function () {});
describe("private.createFilterCreator", function () {});
});
|
/samapper-0.3.2.tar.gz/samapper-0.3.2/spec/javascripts/table_filters_spec.js
| 0.818664 | 0.778944 |
table_filters_spec.js
|
pypi
|
describe("map_render.js file", function () {
describe("fadeFont", function () {
it("works", function () {
expect(fadeFont("#FFFFFF", 1.0)).toEqual("rgba(255,255,255,1)");
expect(fadeFont("#706050", 0.25)).toEqual("rgba(112,96,80,0.25)");
});
});
describe("color_links", function () {
it("works", function () {
let linkU = {"protocols": "UDP"};
let linkT = {"protocols": "TCP"};
let linkUT = {"protocols": "UDP,TCP"};
let linkS = {"protocols": "SCP"};
color_links([linkU, linkT, linkUT, linkS]);
expect(linkU.color).toEqual(renderConfig.linkColorUdp);
expect(linkU.color_faded).toEqual(renderConfig.linkColorUdpFaded);
expect(linkT.color).toEqual(renderConfig.linkColorTcp);
expect(linkT.color_faded).toEqual(renderConfig.linkColorTcpFaded);
expect(linkUT.color).toEqual(renderConfig.linkColorUdpTcp);
expect(linkUT.color_faded).toEqual(renderConfig.linkColorUdpTcpFaded);
expect(linkS.color).toEqual(renderConfig.linkColorOther);
expect(linkS.color_faded).toEqual(renderConfig.linkColorOtherFaded);
});
});
describe("opacity", function () {
it("works for nodes", function () {
expect(opacity(8, "node", 0.0007)).toEqual(1);
expect(opacity(8, "node", zLinks16)).toEqual(1);
expect(opacity(8, "node", zLinks16*2)).toEqual(0);
expect(opacity(24, "node", zNodes24)).toEqual(0);
expect(opacity(24, "node", zNodes24*2)).toEqual(1);
expect(opacity(24, "node", zLinks32)).toEqual(1);
expect(opacity(24, "node", zLinks32*2)).toEqual(0);
expect(opacity(32, "node", zNodes32)).toEqual(0);
expect(opacity(32, "node", zNodes32*2)).toEqual(1);
});
it("works for links", function () {
expect(opacity(8, "link", 0.0007)).toEqual(1);
expect(opacity(8, "link", zLinks16)).toEqual(1);
expect(opacity(8, "link", zLinks16*2)).toEqual(0);
expect(opacity(24, "link", zLinks24)).toEqual(0);
expect(opacity(24, "link", zLinks24*2)).toEqual(1);
expect(opacity(24, "link", zLinks32)).toEqual(1);
expect(opacity(24, "link", zLinks32*2)).toEqual(0);
expect(opacity(32, "link", zLinks32)).toEqual(0);
expect(opacity(32, "link", zLinks32*2)).toEqual(1);
});
});
describe("trapezoidInterpolation", function () {
it("matches trapezoid shape", function () {
let start = 2;
let peak1 = 4;
let peak2 = 6;
let cease = 8;
expect(trapezoidInterpolation(start, peak1, peak2, cease, 1)).toEqual(0);
expect(trapezoidInterpolation(start, peak1, peak2, cease, 2)).toEqual(0);
expect(trapezoidInterpolation(start, peak1, peak2, cease, 2.5)).toEqual(0.25);
expect(trapezoidInterpolation(start, peak1, peak2, cease, 3)).toEqual(0.5);
expect(trapezoidInterpolation(start, peak1, peak2, cease, 3.5)).toEqual(0.75);
expect(trapezoidInterpolation(start, peak1, peak2, cease, 4)).toEqual(1);
expect(trapezoidInterpolation(start, peak1, peak2, cease, 5)).toEqual(1);
expect(trapezoidInterpolation(start, peak1, peak2, cease, 6)).toEqual(1);
expect(trapezoidInterpolation(start, peak1, peak2, cease, 6.5)).toEqual(0.75);
expect(trapezoidInterpolation(start, peak1, peak2, cease, 7)).toEqual(0.5);
expect(trapezoidInterpolation(start, peak1, peak2, cease, 7.5)).toEqual(0.25);
expect(trapezoidInterpolation(start, peak1, peak2, cease, 8)).toEqual(0);
expect(trapezoidInterpolation(start, peak1, peak2, cease, 9)).toEqual(0);
})
})
describe("magnitudeSquared", function () {
it("is accurate", function () {
expect(magnitudeSquared(3, 3)).toEqual(18);
expect(magnitudeSquared(4, 2)).toEqual(20);
expect(magnitudeSquared(10, 0)).toEqual(100);
expect(magnitudeSquared(0, 10)).toEqual(100);
});
});
//depends on nodes in render collection, controller.rect, tx, ty, and g_scale.
describe("getSubnetLabel", function () {});
//TODO: these two are difficult to test
describe("onScreenRecursive", function () {});
describe("onScreen", function () {});
describe("get_bbox", function () {
it("works on a single node", function () {
let collection = {abs_x: 100, abs_y: 50, radius_orig: 10};
expected = {
"left": 90,
"right": 110,
"top": 40,
"bottom": 60
};
expect(get_bbox({"a": collection})).toEqual(expected);
});
});
//TODO: unit testing is a poor fit for the following.
describe("resetViewport", function () {});
describe("updateRenderRoot", function () {});
describe("drawLoopArrow", function () {});
describe("renderLinks", function () {});
describe("renderSubnetLabel", function () {});
describe("renderLabels", function () {});
describe("renderNode", function () {});
describe("renderClusters", function () {});
describe("render_axis", function () {});
describe("render", function () {});
describe("render_all", function () {});
});
|
/samapper-0.3.2.tar.gz/samapper-0.3.2/spec/javascripts/map_render_spec.js
| 0.764452 | 0.862352 |
map_render_spec.js
|
pypi
|
describe("metadata.js file", function () {
describe("normalizeIP", function () {
it("works with short IPs", function () {
expect(normalizeIP("110")).toEqual("110.0.0.0/8");
expect(normalizeIP("110.23")).toEqual("110.23.0.0/16");
expect(normalizeIP("110.23.45")).toEqual("110.23.45.0/24");
expect(normalizeIP("110.23.45.67")).toEqual("110.23.45.67/32");
});
it("works with subnets", function () {
expect(normalizeIP("110.23.45.67/8")).toEqual("110.0.0.0/8");
expect(normalizeIP("110.23.45.67/16")).toEqual("110.23.0.0/16");
expect(normalizeIP("110.23.45.67/24")).toEqual("110.23.45.0/24");
expect(normalizeIP("110.23.45.67/32")).toEqual("110.23.45.67/32");
});
});
describe("getIP_Subnet", function () {});
describe("minimizeIP", function () {
it("drops extras", function () {
expect(minimizeIP("12.34.56.78")).toEqual("12.34.56.78");
expect(minimizeIP("12.34.56.78/32")).toEqual("12.34.56.78");
expect(minimizeIP("12.34.56.78/24")).toEqual("12.34.56");
expect(minimizeIP("12.34.56.78/16")).toEqual("12.34");
expect(minimizeIP("12.34.56.78/8")).toEqual("12");
});
});
describe("dsCallback", function () {});
describe("writeHash", function () {});
describe("readHash", function () {});
describe("buildKeyValueRow", function () {
it("creates a table row", function () {
let tr = buildKeyValueRow("key1", "value1");
expect(tr.tagName).toEqual("TR");
expect(tr.childNodes.length).toEqual(2);
});
it("handles values", function () {
let tr = buildKeyValueRow("key1", "value1");
expect(tr.children[0].innerText).toEqual("key1");
expect(tr.children[1].innerText).toEqual("value1");
tr = buildKeyValueRow("key1", null);
expect(tr.children[0].innerText).toEqual("key1");
expect(tr.children[1].innerText).toEqual("null");
tr = buildKeyValueRow("key1", undefined);
expect(tr.children[0].innerText).toEqual("key1");
expect(tr.children[1].innerText).toEqual("undefined");
tr = buildKeyValueRow("key1", 405);
expect(tr.children[0].innerText).toEqual("key1");
expect(tr.children[1].innerText).toEqual("405");
let child = document.createElement("BUTTON");
let td = document.createElement("TD");
td.appendChild(child);
tr = buildKeyValueRow("key1", td);
expect(tr.children[0].innerText).toEqual("key1");
expect(tr.children[1].children[0].tagName).toEqual("BUTTON");
});
});
describe("buildKeyMultiValueRows", function () {
it("sets rowSpan", function () {
let key = "k1";
let val = [12, 34, 56, 78];
let rows = buildKeyMultiValueRows(key, val);
expect(rows[0].children[0].rowSpan).toEqual(val.length);
});
});
describe("build_link", function () {
it("returns an anchor", function () {
g_ds = 6;
let anchor = build_link("189.59.134.0", 24);
expect(anchor.tagName).toEqual("A");
let expected = "/metadata#ip=189.59.134.0/24&ds=6";
expect(anchor.href.endsWith(expected)).toBe(true);
});
});
describe("build_role_text", function () {
it("matchs spec", function () {
expect(build_role_text(0)).toEqual("0.00 (" + strings.meta_role_cc + ")");
expect(build_role_text(0.1)).toEqual("0.10 (" + strings.meta_role_c + ")");
expect(build_role_text(0.2)).toEqual("0.20 (" + strings.meta_role_c + ")");
expect(build_role_text(0.3)).toEqual("0.30 (" + strings.meta_role_c + ")");
expect(build_role_text(0.4)).toEqual("0.40 (" + strings.meta_role_cs + ")");
expect(build_role_text(0.5)).toEqual("0.50 (" + strings.meta_role_cs + ")");
expect(build_role_text(0.6)).toEqual("0.60 (" + strings.meta_role_cs + ")");
expect(build_role_text(0.7)).toEqual("0.70 (" + strings.meta_role_s + ")");
expect(build_role_text(0.8)).toEqual("0.80 (" + strings.meta_role_s + ")");
expect(build_role_text(0.9)).toEqual("0.90 (" + strings.meta_role_s + ")");
expect(build_role_text(1)).toEqual("1.00 (" + strings.meta_role_ss + ")");
});
});
describe("build_label_packetrate", function () {
it("matches spec", function () {
let b = build_label_packetrate;
expect(b(1)).toEqual("1.00 p/s");
expect(b(100)).toEqual("100.00 p/s");
expect(b(10000)).toEqual("10.00 Kp/s");
expect(b(1000000)).toEqual("1.00 Mp/s");
expect(b(100000000)).toEqual("100.00 Mp/s");
expect(b(10000000000)).toEqual("10.00 Gp/s");
expect(b(1000000000000)).toEqual("1000.00 Gp/s");
});
});
describe("build_table_children", function () {});
describe("build_pagination", function () {});
describe("build_label", function () {
it("produces a label", function () {
let label = build_label("mytext", "blue", false);
expect(label.tagName).toEqual("SPAN");
expect(label.innerText).toEqual("mytext");
expect(label.classList.contains("blue")).toBe(true);
expect(label.classList.contains("disabled")).toBe(false);
label = build_label("mytext", "green", true);
expect(label.tagName).toEqual("SPAN");
expect(label.innerText).toEqual("mytext");
expect(label.classList.contains("green")).toBe(true);
expect(label.classList.contains("disabled")).toBe(true);
});
});
describe("present_quick_info", function () {});
describe("present_detailed_info", function () {});
describe("clear_detailed_info", function () {});
describe("clear_quick_info", function () {});
describe("generic_ajax_failure", function () {});
describe("header_sort_callback", function () {});
describe("hostname_edit_callback", function () {});
describe("tag_change_callback", function () {});
describe("env_change_callback", function () {});
describe("POST_tags", function () {});
describe("GET_data", function () {});
describe("GET_page_callback", function () {});
describe("GET_page", function () {});
describe("abortRequests", function () {});
describe("StateChangeEvent", function () {
it("creates a new object with state", function () {
let evt = new StateChangeEvent({"s": "state1"});
expect(evt.type).toEqual("stateChange");
expect(evt.newState).toEqual({"s": "state1"});
});
});
describe("dispatcher", function () {
it("filters by type", function () {
let foo = {
bar: function () {
return true;
},
old: function () {
return true;
}
}
spyOn(foo, "bar");
spyOn(foo, "old");
let good = {type: "stateChange", newState: foo.bar};
let bad = {type: "notStateChange", newState: foo.bar};
g_state = foo.old;
dispatcher(bad);
// no state change so the old state is the recipient of the event.
expect(foo.old).toHaveBeenCalledTimes(1);
expect(foo.bar).toHaveBeenCalledTimes(0);
dispatcher(good);
// state change, so the new state is the recipient of the event.
expect(foo.old).toHaveBeenCalledTimes(1);
expect(foo.bar).toHaveBeenCalledTimes(1);
});
it("executes or errors", function () {
let foo = {
bar: function () {
return true;
},
baz: "not a function"
}
spyOn(foo, "bar");
let good = {type: "stateChange", newState: foo.bar};
let bad = {type: "stateChange", newState: foo.baz};
expect(dispatcher(good)).toBe(true);
expect(dispatcher(bad)).toBe(false);
});
});
describe("restartTypingTimer", function () {});
describe("scanForPorts", function () {
it("reads inputs", function () {
let response = {
outputs: {headers: [], rows: []},
ports: {headers: [], rows: []},
inputs: {
headers: [["notport", "Not Port"], ["port", "Port"], ["alsonotport", "Also Not Port"]],
rows: [
[123, 456, 789],
[122, 455, 788],
[121, 454, 787]
]
}
};
spyOn(ports, "request_submit");
ports.private.requests = [];
scanForPorts(response);
expect(ports.private.requests).toEqual([456, 455, 454]);
expect(ports.request_submit).toHaveBeenCalled();
});
it("reads outputs", function () {
let response = {
inputs: {headers: [], rows: []},
ports: {headers: [], rows: []},
outputs: {
headers: [["notport", "Not Port"], ["port", "Port"], ["alsonotport", "Also Not Port"]],
rows: [
[123, 456, 789],
[122, 455, 788],
[121, 454, 787]
]
}
};
spyOn(ports, "request_submit");
ports.private.requests = [];
scanForPorts(response);
expect(ports.private.requests).toEqual([456, 455, 454]);
expect(ports.request_submit).toHaveBeenCalled();
});
it("reads ports", function () {
let response = {
inputs: {headers: [], rows: []},
outputs: {headers: [], rows: []},
ports: {
headers: [["notport", "Not Port"], ["port", "Port"], ["alsonotport", "Also Not Port"]],
rows: [
[123, 456, 789],
[122, 455, 788],
[121, 454, 787]
]
}
};
spyOn(ports, "request_submit");
ports.private.requests = [];
scanForPorts(response);
expect(ports.private.requests).toEqual([456, 455, 454]);
expect(ports.request_submit).toHaveBeenCalled();
});
});
describe("requestMoreDetails", function () {});
describe("requestQuickInfo", function () {});
describe("init", function () {});
});
|
/samapper-0.3.2.tar.gz/samapper-0.3.2/spec/javascripts/metadata_spec.js
| 0.82994 | 0.784567 |
metadata_spec.js
|
pypi
|
describe("map_node.js file", function () {
describe("Node", function () {
beforeEach(function () {
n1 = new Node("bob", "192.168", 168, 24, 1, 1, 1, 10);
});
it("prepares details member", function () {
expect(n1.hasOwnProperty("details")).toEqual(true);
expect(n1.details.hasOwnProperty("loaded")).toEqual(true);
expect(n1.details.loaded).toBe(false);
});
it("prepares children member", function () {
expect(typeof(n1.children)).toEqual("object");
expect(n1.childrenLoaded).toBe(false);
});
});
describe("find_by_addr", function () {
beforeEach(function () {
nodes.nodes = get_mock_node_tree();
});
it("can find addresses", function () {
let n = nodes.find_by_addr("110");
expect(n).not.toBeNull();
expect(n.address).toEqual("110.0.0.0");
n = nodes.find_by_addr("110.145");
expect(n).not.toBeNull();
expect(n.address).toEqual("110.145.0.0");
n = nodes.find_by_addr("110.145.200");
expect(n).not.toBeNull();
expect(n.address).toEqual("110.145.200.0");
n = nodes.find_by_addr("110.145.200.79");
expect(n).not.toBeNull();
expect(n.address).toEqual("110.145.200.79");
});
it("works with subnets", function () {
let n = nodes.find_by_addr("110/8");
expect(n).not.toBeNull();
expect(n.address).toEqual("110.0.0.0");
n = nodes.find_by_addr("110.0/8");
expect(n).not.toBeNull();
expect(n.address).toEqual("110.0.0.0");
n = nodes.find_by_addr("110.0.0/8");
expect(n).not.toBeNull();
expect(n.address).toEqual("110.0.0.0");
n = nodes.find_by_addr("110.0.0.0/8");
expect(n).not.toBeNull();
expect(n.address).toEqual("110.0.0.0");
n = nodes.find_by_addr("110.145/16");
expect(n).not.toBeNull();
expect(n.address).toEqual("110.145.0.0");
n = nodes.find_by_addr("110.145.0/16");
expect(n).not.toBeNull();
expect(n.address).toEqual("110.145.0.0");
n = nodes.find_by_addr("110.145.0.0/16");
expect(n).not.toBeNull();
expect(n.address).toEqual("110.145.0.0");
n = nodes.find_by_addr("110.145.200/24");
expect(n).not.toBeNull();
expect(n.address).toEqual("110.145.200.0");
n = nodes.find_by_addr("110.145.200.0/24");
expect(n).not.toBeNull();
expect(n.address).toEqual("110.145.200.0");
n = nodes.find_by_addr("110.145.200.79/32");
expect(n).not.toBeNull();
expect(n.address).toEqual("110.145.200.79");
});
});
describe("find_by_range", function () {
beforeEach(function () {
nodes.nodes = get_mock_node_tree();
});
it("finds nodes", function () {
let n = nodes.find_by_range(1845493760, 1862270975);
expect(n).not.toBeNull();
expect(n.address).toEqual("110.0.0.0");
n = nodes.find_by_range(1854996480, 1855062015);
expect(n).not.toBeNull();
expect(n.address).toEqual("110.145.0.0");
n = nodes.find_by_range(1855047680, 1855047935);
expect(n).not.toBeNull();
expect(n.address).toEqual("110.145.200.0");
n = nodes.find_by_range(1855047759, 1855047759);
expect(n).not.toBeNull();
expect(n.address).toEqual("110.145.200.79");
});
it("finds nearest node when missing", function () {
let n = nodes.find_by_range(1855062016, 1855127551);
expect(n).not.toBeNull();
expect(n.address).toEqual("110.0.0.0");
n = nodes.find_by_range(1855047936, 1855048191);
expect(n).not.toBeNull();
expect(n.address).toEqual("110.145.0.0");
n = nodes.find_by_range(1855047760, 1855047760);
expect(n).not.toBeNull();
expect(n.address).toEqual("110.145.200.0");
});
it("returns null when nothing available", function () {
let n = nodes.find_by_range(2147483648, 2164260863)
expect(n).toBeNull();
});
});
describe("find_common_root", function () {
beforeEach(function () {
nodes.nodes = get_mock_node_tree();
});
it("finds parents", function () {
let nodeA = nodes.find_by_addr("110.145.200.77");
let nodeB = nodes.find_by_addr("110.145.216.179");
let nodeC = nodes.find_by_addr("110.145.200.146");
let parent;
parent = nodes.find_common_root(nodeA, nodeB);
expect(parent.address).toEqual("110.145.0.0")
parent = nodes.find_common_root(nodeA, nodeC);
expect(parent.address).toEqual("110.145.200.0")
parent = nodes.find_common_root(nodeB, nodeC);
expect(parent.address).toEqual("110.145.0.0")
})
});
describe("insert", function () {
beforeEach(function () {
nodes.nodes = get_mock_node_tree();
});
it("works in normal case", function () {
let n = nodes.find_by_addr("136.164");
expect(n.address).toEqual("136.0.0.0");
let record = {
"subnet":16,
"ipstart":2292449280,
"ipend":2292514815,
"alias":null,
"radius":864,
"env":null,
"y":29030.4,
"x":12441.6
};
let flat = false;
nodes.insert(record, flat);
n = nodes.find_by_addr("136.164");
expect(n.address).toEqual("136.164.0.0");
});
});
describe("GET_response", function () {
it("works in the root case", function () {
let root_response = {"_":[
{"subnet":8,"ipstart":352321536,"ipend":369098751,"alias":null,"radius":20736,"env":null,"y":-287539.188,"x":-110592},
{"subnet":8,"ipstart":889192448,"ipend":905969663,"alias":null,"radius":20736,"env":null,"y":-199065.594,"x":-110592},
{"subnet":8,"ipstart":1325400064,"ipend":1342177279,"alias":null,"radius":20736,"env":null,"y":-154828.797,"x":331776},
{"subnet":8,"ipstart":1845493760,"ipend":1862270975,"alias":null,"radius":20736,"env":null,"y":-66355.203,"x":287539.188},
{"subnet":8,"ipstart":2030043136,"ipend":2046820351,"alias":null,"radius":20736,"env":null,"y":-22118.4,"x":66355.203},
{"subnet":8,"ipstart":2281701376,"ipend":2298478591,"alias":null,"radius":20736,"env":null,"y":22118.4,"x":22118.4},
{"subnet":8,"ipstart":3170893824,"ipend":3187671039,"alias":null,"radius":20736,"env":null,"y":154828.797,"x":243302.406},
{"subnet":8,"ipstart":3489660928,"ipend":3506438143,"alias":null,"radius":20736,"env":null,"y":243302.406,"x":-331776}]};
nodes.nodes = {};
nodes.GET_response(root_response);
let expected = ["1325400064","1845493760","2030043136","2281701376","3170893824","3489660928","352321536","889192448"];
let real = Object.keys(nodes.nodes).sort();
expect(real).toEqual(expected);
});
it("works in the child case", function () {
nodes.nodes = get_mock_node_tree();
let response = {"136.0.0.0/8":[{"subnet":16,"ipstart":2292449280,"ipend":2292514815,"alias":null,"radius":864,"env":null,"y":29030.4,"x":12441.6}]};
let n = nodes.find_by_addr("136.164");
expect(n.address).toEqual("136.0.0.0");
nodes.GET_response(response);
n = nodes.find_by_addr("136.164");
expect(n.address).toEqual("136.164.0.0");
});
});
describe("determine_number", function () {
beforeEach(function () {
nodes.nodes = get_mock_node_tree();
});
it("/8", function () {
let n = nodes.find_by_addr("110");
expect(nodes.determine_number(n)).toEqual(110);
});
it("/16", function () {
let n = nodes.find_by_addr("110.145");
expect(nodes.determine_number(n)).toEqual(145);
});
it("/24", function () {
let n = nodes.find_by_addr("110.145.200");
expect(nodes.determine_number(n)).toEqual(200);
});
it("/32", function () {
let n = nodes.find_by_addr("110.145.200.79");
expect(nodes.determine_number(n)).toEqual(79);
});
});
describe("port_to_pos", function () {
it("matches correctly", function () {
let n = {"abs_x": 970, "abs_y": 50, "radius": 15};
expect(nodes.port_to_pos(n, 't-l')).toEqual([965, 29]);
expect(nodes.port_to_pos(n, 't-r')).toEqual([975, 29]);
expect(nodes.port_to_pos(n, 'b-l')).toEqual([965, 71]);
expect(nodes.port_to_pos(n, 'b-r')).toEqual([975, 71]);
expect(nodes.port_to_pos(n, 'l-t')).toEqual([949, 45]);
expect(nodes.port_to_pos(n, 'l-b')).toEqual([949, 55]);
expect(nodes.port_to_pos(n, 'r-t')).toEqual([991, 45]);
expect(nodes.port_to_pos(n, 'r-b')).toEqual([991, 55]);
});
});
describe("nearest_corner", function() {
it("matches corners", function() {
let n = {"abs_x": 500, "abs_y": 50, "radius": 15};
expect(nodes.nearest_corner(n, 400, 30)).toEqual([485, 35]);
expect(nodes.nearest_corner(n, 400, 50)).toEqual([485, 65]);
expect(nodes.nearest_corner(n, 400, 70)).toEqual([485, 65]);
expect(nodes.nearest_corner(n, 500, 30)).toEqual([515, 35]);
expect(nodes.nearest_corner(n, 500, 50)).toEqual([515, 65]);
expect(nodes.nearest_corner(n, 500, 70)).toEqual([515, 65]);
expect(nodes.nearest_corner(n, 600, 30)).toEqual([515, 35]);
expect(nodes.nearest_corner(n, 600, 50)).toEqual([515, 65]);
expect(nodes.nearest_corner(n, 600, 70)).toEqual([515, 65]);
});
});
describe("delta_to_dest", function () {
it("follows normal path", function () {
let n = {"abs_x": 500, "abs_y": 50, "radius": 15};
expect(nodes.delta_to_dest(n, 400, 30)).toEqual([485, 53]);
expect(nodes.delta_to_dest(n, 400, 50)).toEqual([485, 53]);
expect(nodes.delta_to_dest(n, 400, 70)).toEqual([485, 53]);
expect(nodes.delta_to_dest(n, 500, 30)).toEqual([497, 35]);
expect(nodes.delta_to_dest(n, 500, 50)).toEqual([497, 35]);
expect(nodes.delta_to_dest(n, 500, 70)).toEqual([503, 65]);
expect(nodes.delta_to_dest(n, 600, 30)).toEqual([515, 47]);
expect(nodes.delta_to_dest(n, 600, 50)).toEqual([515, 47]);
expect(nodes.delta_to_dest(n, 600, 70)).toEqual([515, 47]);
});
});
describe("delta_to_src", function () {
it("follows normal path", function () {
let n = {"abs_x": 500, "abs_y": 50, "radius": 15};
expect(nodes.delta_to_src(n, 400, 30)).toEqual([485, 47]);
expect(nodes.delta_to_src(n, 400, 50)).toEqual([485, 47]);
expect(nodes.delta_to_src(n, 400, 70)).toEqual([485, 47]);
expect(nodes.delta_to_src(n, 500, 30)).toEqual([503, 35]);
expect(nodes.delta_to_src(n, 500, 50)).toEqual([503, 35]);
expect(nodes.delta_to_src(n, 500, 70)).toEqual([497, 65]);
expect(nodes.delta_to_src(n, 600, 30)).toEqual([515, 53]);
expect(nodes.delta_to_src(n, 600, 50)).toEqual([515, 53]);
expect(nodes.delta_to_src(n, 600, 70)).toEqual([515, 53]);
});
});
describe("get_inbound_link_point", function () {
it("lines up to existing ports", function () {
let n = {"abs_x": 500, "abs_y": 50, "radius": 15, "subnet": 32, "ports": {123: "t-l", 456: "r-b"}};
expect(nodes.get_inbound_link_point(n, 400, 30, 123)).toEqual(nodes.port_to_pos(n, "t-l"));
expect(nodes.get_inbound_link_point(n, 400, 30, 456)).toEqual(nodes.port_to_pos(n, "r-b"));
});
it("uses normal connection points when subnet is not 32", function () {
let n = {"abs_x": 500, "abs_y": 50, "radius": 15, "subnet": 8, ports: {}};
let x = 400;
let y = 30;
let port = 123;
expect(nodes.get_inbound_link_point(n, x, y, port)).toEqual(nodes.delta_to_dest(n, x, y));
n = {"abs_x": 500, "abs_y": 50, "radius": 15, "subnet": 16, ports: {}};
x = 800;
y = 30;
expect(nodes.get_inbound_link_point(n, x, y, port)).toEqual(nodes.delta_to_dest(n, x, y));
n = {"abs_x": 500, "abs_y": 50, "radius": 15, "subnet": 24, ports: {}};
x = 800;
y = 90;
expect(nodes.get_inbound_link_point(n, x, y, port)).toEqual(nodes.delta_to_dest(n, x, y));
n = {"abs_x": 500, "abs_y": 50, "radius": 15, "subnet": 32, ports: {}};
x = 400;
y = 90;
expect(nodes.get_inbound_link_point(n, x, y, port)).not.toEqual(nodes.delta_to_dest(n, x, y));
});
it("picks a corner if subnet is 32 but no port match", function () {
let n = {"abs_x": 500, "abs_y": 50, "radius": 15, "subnet": 32, "ports": {123: "t-l", 456: "r-b"}};
let x = 400;
let y = 30;
let port = 80;
expect(nodes.get_inbound_link_point(n, x, y, port)).toEqual(nodes.nearest_corner(n, x, y));
port = 123;
expect(nodes.get_inbound_link_point(n, x, y, port)).not.toEqual(nodes.nearest_corner(n, x, y));
x = 600;
y = 70;
port = 443;
expect(nodes.get_inbound_link_point(n, x, y, port)).toEqual(nodes.nearest_corner(n, x, y));
port = 456;
expect(nodes.get_inbound_link_point(n, x, y, port)).not.toEqual(nodes.nearest_corner(n, x, y));
});
});
describe("get_outbound_link_point", function () {
it("uses normal connection points when subnet is not 32", function () {
let n = {"abs_x": 500, "abs_y": 50, "radius": 15, "subnet": 8, ports: {}};
let x = 400;
let y = 30;
let port = 123;
expect(nodes.get_outbound_link_point(n, x, y, port)).toEqual(nodes.delta_to_src(n, x, y));
n = {"abs_x": 500, "abs_y": 50, "radius": 15, "subnet": 16, ports: {}};
x = 800;
y = 30;
expect(nodes.get_outbound_link_point(n, x, y, port)).toEqual(nodes.delta_to_src(n, x, y));
n = {"abs_x": 500, "abs_y": 50, "radius": 15, "subnet": 24, ports: {}};
x = 800;
y = 90;
expect(nodes.get_outbound_link_point(n, x, y, port)).toEqual(nodes.delta_to_src(n, x, y));
n = {"abs_x": 500, "abs_y": 50, "radius": 15, "subnet": 32, ports: {}};
x = 400;
y = 90;
expect(nodes.get_outbound_link_point(n, x, y, port)).not.toEqual(nodes.delta_to_src(n, x, y));
});
it("picks a corner if subnet is 32", function () {
let n = {"abs_x": 500, "abs_y": 50, "radius": 15, "subnet": 32, "ports": {123: "t-l", 456: "r-b"}};
let x = 400;
let y = 30;
let port = 80;
expect(nodes.get_outbound_link_point(n, x, y, port)).toEqual(nodes.nearest_corner(n, x, y));
port = 123;
expect(nodes.get_outbound_link_point(n, x, y, port)).toEqual(nodes.nearest_corner(n, x, y));
x = 600;
y = 70;
port = 443;
expect(nodes.get_outbound_link_point(n, x, y, port)).toEqual(nodes.nearest_corner(n, x, y));
port = 456;
expect(nodes.get_outbound_link_point(n, x, y, port)).toEqual(nodes.nearest_corner(n, x, y));
});
});
describe("update_pos_tree", function () {
beforeEach(function () {
a = {address: "10.0.0.0", subnet: 8, children: {}, rel_x: 1000, rel_y: 1};
b = {address: "10.20.0.0", subnet: 16, children: {}, rel_x: 100, rel_y: 2};
c = {address: "10.20.30.0", subnet: 24, children: {}, rel_x: 10, rel_y: 4};
d = {address: "10.20.30.40", subnet: 32, children: {}, rel_x: 1, rel_y: 8};
a.children[169082880] = b;
b.children[169090560] = c;
c.children[169090600] = d;
b.parent = a;
c.parent = b;
d.parent = c;
});
it("calculates absolute pos, cascading", function () {
nodes.update_pos_tree(a, null);
expect(a.abs_x).toEqual(1000);
expect(a.abs_y).toEqual(1);
expect(b.abs_x).toEqual(1100);
expect(b.abs_y).toEqual(3);
expect(c.abs_x).toEqual(1110);
expect(c.abs_y).toEqual(7);
expect(d.abs_x).toEqual(1111);
expect(d.abs_y).toEqual(15);
});
});
describe("set_relative_pos", function () {
beforeEach(function () {
a = {address: "10.0.0.0", subnet: 8, children: {}, rel_x: 1000, rel_y: 1};
b = {address: "10.20.0.0", subnet: 16, children: {}, rel_x: 100, rel_y: 2};
c = {address: "10.20.30.0", subnet: 24, children: {}, rel_x: 10, rel_y: 4};
d = {address: "10.20.30.40", subnet: 32, children: {}, rel_x: 1, rel_y: 8};
a.children[169082880] = b;
b.children[169090560] = c;
c.children[169090600] = d;
b.parent = a;
c.parent = b;
d.parent = c;
});
it("cascades", function () {
nodes.set_relative_pos(a, 2000, 17);
expect(a.abs_x).toEqual(2000);
expect(a.abs_y).toEqual(17);
expect(b.abs_x).toEqual(2100);
expect(b.abs_y).toEqual(19);
expect(c.abs_x).toEqual(2110);
expect(c.abs_y).toEqual(23);
expect(d.abs_x).toEqual(2111);
expect(d.abs_y).toEqual(31);
});
});
describe("get_name", function () {
it("prefers an alias", function () {
let alias = "test1";
let n = {"alias": alias, address: "10.20.30.40", subnet: 32, children: {}, rel_x: 1, rel_y: 8};
expect(nodes.get_name(n)).toEqual(alias);
expect(typeof(nodes.get_name(n))).toEqual("string");
});
it("returns a number string otherwise", function () {
let addr = "10.20.30.40";
nodes.layout_flat = true;
let n = {alias: "", address: addr, subnet: 16, ipstart: 169082880, ipend: 169148415};
expect(nodes.get_name(n)).toEqual(addr);
expect(typeof(nodes.get_name(n))).toEqual("string");
nodes.layout_flat = false;
expect(nodes.get_name(n)).toEqual("20");
expect(typeof(nodes.get_name(n))).toEqual("string");
});
});
describe("flat_scale", function () {});
describe("get_address", function () {
it("appends subnet (unless =32)", function () {
let n = {address: "1.2.3.4", subnet: 32};
expect(nodes.get_address(n)).toEqual("1.2.3.4");
n = {address: "1.2.3.4", subnet: 24};
expect(nodes.get_address(n)).toEqual("1.2.3.4/24");
n = {address: "1.2.3.4", subnet: 16};
expect(nodes.get_address(n)).toEqual("1.2.3.4/16");
n = {address: "1.2.3.4", subnet: 8};
expect(nodes.get_address(n)).toEqual("1.2.3.4/8");
});
it("pads with zeroes", function () {
let n = {address: "1.2.3", subnet: 24};
expect(nodes.get_address(n)).toEqual("1.2.3.0/24");
n = {address: "1.2", subnet: 16};
expect(nodes.get_address(n)).toEqual("1.2.0.0/16");
n = {address: "1", subnet: 8};
expect(nodes.get_address(n)).toEqual("1.0.0.0/8");
});
});
describe("do_layout", function () {});
describe("set_layout", function () {
it("only works with valid styles", function () {
spyOn(nodes, "do_layout");
expect(nodes.set_layout("Circle")).toBe(true);
expect(nodes.set_layout("Square")).toBe(false);
expect(nodes.set_layout("Grid")).toBe(true);
expect(nodes.set_layout("Litmus")).toBe(false);
expect(nodes.set_layout("Address")).toBe(true);
expect(nodes.do_layout).toHaveBeenCalledTimes(3);
});
});
});
describe("address layout", function () {
beforeEach(function () {
address = nodes.layouts.Address;
});
describe("recursive_placement", function () {
it("base case", function () {
expect(address.recursive_placement(150, [0])).toEqual({x: -75, y: -75});
expect(address.recursive_placement(150, [1])).toEqual({x: -65, y: -75});
expect(address.recursive_placement(150, [14])).toEqual({x: 65, y: -75});
expect(address.recursive_placement(150, [15])).toEqual({x: 75, y: -75});
expect(address.recursive_placement(150, [16])).toEqual({x: -75, y: -65});
expect(address.recursive_placement(150, [239])).toEqual({x: 75, y: 65});
expect(address.recursive_placement(150, [240])).toEqual({x: -75, y: 75});
expect(address.recursive_placement(150, [241])).toEqual({x: -65, y: 75});
expect(address.recursive_placement(150, [254])).toEqual({x: 65, y: 75});
expect(address.recursive_placement(150, [255])).toEqual({x: 75, y: 75});
});
it("recursive case", function () {
expect(address.recursive_placement(36000, [0, 0])).toEqual({x: -19125, y: -19125});
expect(address.recursive_placement(36000, [15, 0])).toEqual({x: -16875, y: -19125});
expect(address.recursive_placement(36000, [240, 0])).toEqual({x: -19125, y: -16875});
expect(address.recursive_placement(36000, [255, 0])).toEqual({x: -16875, y: -16875});
expect(address.recursive_placement(36000, [0, 15])).toEqual({x: 16875, y: -19125});
expect(address.recursive_placement(36000, [15, 15])).toEqual({x: 19125, y: -19125});
expect(address.recursive_placement(36000, [240, 15])).toEqual({x: 16875, y: -16875});
expect(address.recursive_placement(36000, [255, 15])).toEqual({x: 19125, y: -16875});
expect(address.recursive_placement(36000, [0, 240])).toEqual({x: -19125, y: 16875});
expect(address.recursive_placement(36000, [15, 240])).toEqual({x: -16875, y: 16875});
expect(address.recursive_placement(36000, [240, 240])).toEqual({x: -19125, y: 19125});
expect(address.recursive_placement(36000, [255, 240])).toEqual({x: -16875, y: 19125});
expect(address.recursive_placement(36000, [0, 255])).toEqual({x: 16875, y: 16875});
expect(address.recursive_placement(36000, [15, 255])).toEqual({x: 19125, y: 16875});
expect(address.recursive_placement(36000, [240, 255])).toEqual({x: 16875, y: 19125});
expect(address.recursive_placement(36000, [255, 255])).toEqual({x: 19125, y: 19125});
});
});
describe("get_segment_difference", function () {
it("base case", function () {
expect(address.get_segment_difference(0, 8, 169090600)).toEqual(["10"]);
expect(address.get_segment_difference(0, 16, 169090600)).toEqual(["10", "20"]);
expect(address.get_segment_difference(0, 24, 169090600)).toEqual(["10", "20", "30"]);
expect(address.get_segment_difference(0, 32, 169090600)).toEqual(["10", "20", "30", "40"]);
expect(address.get_segment_difference(8, 16, 169090600)).toEqual(["20"]);
expect(address.get_segment_difference(8, 24, 169090600)).toEqual(["20", "30"]);
expect(address.get_segment_difference(8, 32, 169090600)).toEqual(["20", "30", "40"]);
expect(address.get_segment_difference(16, 24, 169090600)).toEqual(["30"]);
expect(address.get_segment_difference(16, 32, 169090600)).toEqual(["30", "40"]);
expect(address.get_segment_difference(24, 32, 169090600)).toEqual(["40"]);
expect(address.get_segment_difference(0, 0, 169090600)).toEqual([]);
expect(address.get_segment_difference(8, 8, 169090600)).toEqual([]);
expect(address.get_segment_difference(16, 16, 169090600)).toEqual([]);
expect(address.get_segment_difference(24, 24, 169090600)).toEqual([]);
expect(address.get_segment_difference(32, 32, 169090600)).toEqual([]);
});
});
describe("arrange_collection", function () {});
describe("layout", function () {});
});
describe("grid layout", function () {
beforeEach(function () {
grid = nodes.layouts.Grid;
});
describe("arrange_collection", function () {});
describe("layout", function () {});
});
describe("circle layout", function () {
beforeEach(function () {
circle = nodes.layouts.Circle;
});
describe("find_center_node", function () {
it("finds the most-connected node", function () {
let tree = get_mock_node_tree();
let fake_tree = {
b: {inputs: [1,2], outputs: [4,5,6]},
a: {inputs: [1,2,3], outputs: [4,5,6]},
c: {inputs: [1,2,3], outputs: [4,5]},
};
expect(circle.find_center_node(fake_tree)).toEqual(fake_tree['a']);
expect(circle.find_center_node(tree)).toEqual(tree[352321536])
});
});
//cannot test because test data has input/output connections broken.
describe("get_all_attached_nodes", function () {});
describe("sorted_unique", function () {
it("sorts", function () {
sorter = function(a, b) {return a-b};
expect(circle.sorted_unique([3,2,7,6,8], sorter)).toEqual([2,3,6,7,8]);
});
it("uniquifies", function () {
sorter = function(a, b) {return a.ipstart-b.ipstart};
nodelist = [
{address: "30", ipstart: 30, subnet: 5},
{address: "10", ipstart: 10, subnet: 5},
{address: "20", ipstart: 20, subnet: 5},
{address: "30", ipstart: 30, subnet: 5},
{address: "20", ipstart: 20, subnet: 5},
{address: "10", ipstart: 10, subnet: 5},
{address: "10", ipstart: 10, subnet: 5},
{address: "30", ipstart: 30, subnet: 5},
{address: "20", ipstart: 20, subnet: 5},
]
expected = [
{address: "10", ipstart: 10, subnet: 5},
{address: "20", ipstart: 20, subnet: 5},
{address: "30", ipstart: 30, subnet: 5},
]
expect(circle.sorted_unique(nodelist, sorter)).toEqual(expected);
})
});
describe("remove_item", function () {
it("removes an item", function () {
numlist = [3,2,7,6,8];
circle.remove_item(numlist, 6);
expect(numlist).toEqual([3,2,7,8]);
});
});
describe("move_to_center", function () {});
describe("arrange_nodes_recursion", function () {});
describe("arrange_nodes_evenly", function () {});
describe("node_sorter", function () {
it("sorts", function () {
nodelist = [
{ipstart: 30, subnet: 5},
{ipstart: 25, subnet: 10},
{ipstart: 31, subnet: 1},
{ipstart: 20, subnet: 5},
{ipstart: 25, subnet: 5},
{ipstart: 10, subnet: 5},
{ipstart: 20, subnet: 10},
{ipstart: 15, subnet: 10},
]
expected = [
{ipstart: 10, subnet: 5},
{ipstart: 15, subnet: 10},
{ipstart: 20, subnet: 5},
{ipstart: 20, subnet: 10},
{ipstart: 25, subnet: 5},
{ipstart: 25, subnet: 10},
{ipstart: 30, subnet: 5},
{ipstart: 31, subnet: 1},
]
expect(circle.sorted_unique(nodelist, circle.node_sorter)).toEqual(expected);
});
});
describe("layout", function () {});
});
|
/samapper-0.3.2.tar.gz/samapper-0.3.2/spec/javascripts/map_node_spec.js
| 0.880463 | 0.74704 |
map_node_spec.js
|
pypi
|
describe("map.js file", function () {
describe("zoom levels", function() {
it("defined", function () {
expect(zNodes16).toBeDefined();
expect(zNodes24).toBeDefined();
expect(zNodes32).toBeDefined();
expect(zLinks16).toBeDefined();
expect(zLinks24).toBeDefined();
expect(zLinks32).toBeDefined();
});
it("ascending order", function () {
expect(zLinks16).toBeLessThan(zLinks24);
expect(zLinks24).toBeLessThan(zLinks32);
expect(zNodes16).toBeLessThan(zNodes24);
expect(zNodes24).toBeLessThan(zNodes32);
});
});
describe("config", function () {
it("defined", function() {
cfg = Object.keys(config);
expect(cfg).toContain('filter');
expect(cfg).toContain('tmin');
expect(cfg).toContain('tmax');
expect(cfg).toContain('tstart');
expect(cfg).toContain('tend');
expect(cfg).toContain('protocol');
expect(cfg).toContain('initial_zoom');
});
});
describe("currentSubnet", function () {
beforeEach(function () {
epsilon = 0.00001;
});
it("matches 8", function () {
expect(currentSubnet(zNodes16 - epsilon)).toEqual(8);
});
it("matches 16", function () {
expect(currentSubnet(zNodes16)).toEqual(16);
expect(currentSubnet(zNodes24 - epsilon)).toEqual(16);
});
it("matches 24", function () {
expect(currentSubnet(zNodes24)).toEqual(24);
expect(currentSubnet(zNodes32 - epsilon)).toEqual(24);
});
it("matches 24", function () {
expect(currentSubnet(zNodes32)).toEqual(32);
});
});
describe("get_view_center", function () {
beforeEach(function () {
viewrect = { x: 0, y: 58, width: 959, height: 555, top: 58, right: 959, bottom: 613, left: 0 };
});
it("untransformed", function () {
expect(get_view_center(viewrect, 0, 0, 1)).toEqual({x: viewrect.width / 2, y: viewrect.height / 2});
});
it("translated", function () {
let x = viewrect.width / 2;
let y = viewrect.height / 2;
expect(get_view_center(viewrect, x, y, 1)).toEqual({x: 0, y: 0});
expect(get_view_center(viewrect, x - 100, y - 100, 1)).toEqual({x: 100, y: 100});
expect(get_view_center(viewrect, x + 100, y + 100, 1)).toEqual({x: -100, y: -100});
});
it("scaled", function () {
let x = viewrect.width / 2;
let y = viewrect.height / 2;
let scale = 0.5;
expect(get_view_center(viewrect, x, y, 1)).toEqual({x: 0, y: 0});
expect(get_view_center(viewrect, x - 100, y - 100, scale)).toEqual({x: 200, y: 200});
expect(get_view_center(viewrect, x + 100, y + 100, scale)).toEqual({x: -200, y: -200});
scale = 2;
expect(get_view_center(viewrect, x, y, 1)).toEqual({x: 0, y: 0});
expect(get_view_center(viewrect, x - 100, y - 100, scale)).toEqual({x: 50, y: 50});
expect(get_view_center(viewrect, x + 100, y + 100, scale)).toEqual({x: -50, y: -50});
});
});
describe("removeChildren", function () {
it("works without children", function () {
var div = document.createElement("div");
removeChildren(div);
expect(div.childElementCount).toEqual(0);
});
it("works with children", function () {
var div = document.createElement("div");
div.appendChild(document.createElement("p"));
div.appendChild(document.createElement("p"));
div.appendChild(document.createElement("p"));
removeChildren(div);
expect(div.childElementCount).toEqual(0);
});
it("works with grandchildren", function () {
var i;
var div = document.createElement("div");
var cdiv;
for (i = 0; i < 10; i += 1) {
cdiv = document.createElement("div");
cdiv.appendChild(document.createElement("p"));
cdiv.appendChild(document.createElement("p"));
cdiv.appendChild(document.createElement("p"));
div.appendChild(cdiv)
}
removeChildren(div);
expect(div.childElementCount).toEqual(0);
});
});
describe("normalize_addr", function () {
it("doesn't change good addrs", function () {
expect(normalize_addr("192.168.100.101/32")).toEqual("192.168.100.101/32");
expect(normalize_addr("192.168.0.0/32")).toEqual("192.168.0.0/32");
expect(normalize_addr("192.168.0.0/16")).toEqual("192.168.0.0/16");
});
it("pads zeroes", function () {
expect(normalize_addr("192/32")).toEqual("192.0.0.0/32");
expect(normalize_addr("192/24")).toEqual("192.0.0.0/24");
expect(normalize_addr("192/16")).toEqual("192.0.0.0/16");
expect(normalize_addr("192/8")).toEqual("192.0.0.0/8");
expect(normalize_addr("192")).toEqual("192.0.0.0/8");
});
it("works without subnet", function () {
expect(normalize_addr("192")).toEqual("192.0.0.0/8");
expect(normalize_addr("192.168")).toEqual("192.168.0.0/16");
expect(normalize_addr("192.168.0")).toEqual("192.168.0.0/24");
expect(normalize_addr("192.168.0.1")).toEqual("192.168.0.1/32");
});
});
describe("generic_ajax_failure", function () {});
describe("generic_ajax_success", function () {});
describe("ip_ntos", function () {
it("converts", function () {
expect(ip_ntos(16909060)).toEqual("1.2.3.4");
expect(ip_ntos(4026531840)).toEqual("240.0.0.0");
expect(ip_ntos(4042388211)).toEqual("240.241.242.243");
});
});
describe("checkLoD", function () {});
});
describe("controller", function () {
describe("init", function () {});
describe("init_buttons", function () {});
describe("init_demo", function () {});
describe("init_window", function () {});
describe("GET_settings", function () {});
describe("import_settings", function () {
it("sets variables", function () {
settings = {
"color_bg":11206621,
"color_node":5592524,
"color_tcp":5592524,
"color_udp":13391189,
"color_error":10053222,
"color_label":0,
"color_label_bg":16777215,
"datasources":{
"3":{
"flat":0,
"name":"default",
"ar_active":0,
"ar_interval":300,
"id":3,
"subscription":1
},
"7":{
"flat":0,
"name":"seventh",
"ar_active":1,
"ar_interval":300,
"id":7,
"subscription":1
}
},
"datasource":7,
"subscription":1
};
controller.import_settings(settings);
expect(controller.dsid).toEqual(7);
expect(controller.datasource).toEqual(settings["datasources"]["7"]);
expect(controller.autorefresh).toBe(true);
settings["datasource"] = 3;
controller.import_settings(settings);
expect(controller.dsid).toEqual(3);
expect(controller.datasource).toEqual(settings["datasources"]["3"]);
expect(controller.autorefresh).toBe(false);
})
});
describe("GET_timerange", function () {});
describe("import_timerange", function () {
beforeEach(function () {
spyOn(window, "slider_init");
})
it("window is 300s", function () {
controller.import_timerange({min: 1000, max: 1000});
expect(config.tend - config.tstart).toEqual(300);
controller.import_timerange({min: 1000, max: 2000});
expect(config.tend - config.tstart).toEqual(300);
expect(slider_init).toHaveBeenCalledTimes(2);
});
it("range is at least 300s", function () {
range = {min: 1000, max: 1000}
controller.import_timerange(range);
expect(config.tmax).toEqual(1000);
expect(config.tmin).toEqual(700);
range = {min: 1000, max: 2000}
controller.import_timerange(range);
expect(config.tmax).toEqual(2000);
expect(config.tmin).toEqual(1000);
expect(slider_init).toHaveBeenCalledTimes(2);
});
});
describe("event_to_tag", function () {
it("works both ways", function () {
let div = {tagName: "DIV"};
let button = {tagName: "BUTTON", parentElement: div};
let header = {tagName: "H4", parentElement: button};
let optA = {target: button};
let optB = {target: header};
let objA = controller.event_to_tag(optA, "BUTTON");
let objB = controller.event_to_tag(optB, "BUTTON");
expect(objA.tagName).toEqual("BUTTON");
expect(objB.tagName).toEqual("BUTTON");
});
});
describe("event_datasource", function () {});
describe("event_auto_refresh", function () {});
describe("event_line_width", function () {});
describe("event_show_buttons", function () {});
describe("event_layout_mode", function () {});
describe("event_layout_arrangement", function () {});
});
describe("map", function () {
beforeEach(function () {
map = map_settings;
});
describe("reset", function () {
it("resets the object", function () {
map.structure = {"any": "thing", "can": "go", "in": "here"};
map.reset();
let keys = Object.keys(map.structure);
expect(keys).toContain("objects");
expect(map.structure.objects.length).toEqual(0);
expect(keys).toContain("children");
expect(Object.keys(map.structure.children).length).toEqual(0);
})
});
describe("clear_html", function () {});
describe("make_html", function () {});
describe("init_accordion", function () {});
describe("rebuild", function () {});
describe("add_category", function () {
it("adds a category if missing", function () {
map.reset();
map.add_category("cat1");
map.add_category("cat2");
map.add_category("cat3");
map.add_category("cat2");
map.add_category("cat1");
map.add_category("cat2");
let cats = Object.keys(map.structure.children);
cats.sort();
expect(cats).toEqual(["cat1", "cat2", "cat3"]);
});
});
describe("add_subcategory", function () {
it("adds when needed", function () {
map.reset();
map.add_subcategory("cat1", "sc1");
map.add_subcategory("cat2", "sc2");
map.add_subcategory("cat1", "sc3");
map.add_subcategory("cat2", "sc4");
map.add_subcategory("cat1", "sc1");
map.add_subcategory("cat2", "sc2");
let cats = Object.keys(map.structure.children).sort();
expect(cats).toEqual(["cat1", "cat2"]);
let subcat1 = Object.keys(map.structure.children.cat1.children).sort();
expect(subcat1).toEqual(["sc1", "sc3"]);
let subcat2 = Object.keys(map.structure.children.cat2.children).sort();
expect(subcat2).toEqual(["sc2", "sc4"]);
});
});
describe("add_object", function () {
it("creates heirarchy", function () {
map.reset();
let objA = {"a": 1};
let objB = {"b": 3};
let objC = {"c": 9};
let objD = {"d": 27};
map.add_object("cat1", "sc1", objA);
map.add_object("cat2", "sc2", objB);
map.add_object("cat1", "sc3", objC);
map.add_object("cat2", "sc4", objD);
let cats = Object.keys(map.structure.children).sort();
expect(cats).toEqual(["cat1", "cat2"]);
let subcat1 = Object.keys(map.structure.children.cat1.children).sort();
expect(subcat1).toEqual(["sc1", "sc3"]);
let subcat2 = Object.keys(map.structure.children.cat2.children).sort();
expect(subcat2).toEqual(["sc2", "sc4"]);
expect(map.structure.children.cat1.children.sc1.objects).toEqual([objA]);
expect(map.structure.children.cat1.children.sc3.objects).toEqual([objC]);
expect(map.structure.children.cat2.children.sc2.objects).toEqual([objB]);
expect(map.structure.children.cat2.children.sc4.objects).toEqual([objD]);
});
it("if needed", function () {
map.reset();
let objA = {"a": 1};
let objB = {"b": 3};
let objC = {"c": 9};
let objD = {"d": 27};
map.add_object(null, null, objA);
map.add_object(null, "sc1", objB);
map.add_object("cat1", null, objC);
map.add_object("cat1", "sc1", objD);
expect(map.structure.objects).toEqual([objA]);
expect(map.structure.children.sc1.objects).toEqual([objB]);
expect(map.structure.children.cat1.objects).toEqual([objC]);
expect(map.structure.children.cat1.children.sc1.objects).toEqual([objD]);
})
});
describe("create_labeliconbutton", function () {
it("returns a button", function () {
let btn = map.create_labeliconbutton("id1", "lock", "Secure", "click me", true, null);
expect(btn.tagName).toEqual("BUTTON");
})
});
describe("create_iconbutton", function () {
it("returns a button", function () {
let btn = map.create_iconbutton("id1", "lock", "click me", true, null);
expect(btn.tagName).toEqual("BUTTON");
})
});
describe("create_labelbutton", function () {
it("returns a button", function () {
let btn = map.create_labelbutton("id1", "Secure", "click me", true, null);
expect(btn.tagName).toEqual("BUTTON");
})
});
describe("create_divider", function () {});
describe("btn_toggleable", function () {});
describe("create_buttongroup", function () {});
describe("create_input", function () {});
describe("init", function () {});
});
|
/samapper-0.3.2.tar.gz/samapper-0.3.2/spec/javascripts/map_spec.js
| 0.854171 | 0.822332 |
map_spec.js
|
pypi
|
describe("map_links file", function () {
describe("link_request_add", function () {
it("adds to the queue", function () {
m_link_requests = [];
link_request_add("1.2.3.4");
link_request_add("2.3.4.5");
link_request_add("3.4.5.6");
link_request_add("4.5.6.7");
let expected = [
"1.2.3.4",
"2.3.4.5",
"3.4.5.6",
"4.5.6.7"
]
expect(m_link_requests).toEqual(expected);
})
});
describe("link_request_add_all", function () {
it("adds all nodes", function () {
nodeE = {address: "110.169.200.144", subnet: 32, children: []};
nodeD = {address: "110.169.200.0", subnet: 24, children: [nodeE]};
nodeC = {address: "110.169.0.0", subnet: 16, children: [nodeD]};
nodeB = {address: "110.0.0.0", subnet: 8, children: [nodeC]};
nodeA = {address: "21.0.0.0", subnet: 8, children: []};
let coll = {
a: nodeA,
b: nodeB
}
m_link_requests = [];
link_request_add_all(coll);
let expected = ['21.0.0.0/8', '110.0.0.0/8', '110.169.0.0/16',
'110.169.200.0/24', '110.169.200.144'];
expect(m_link_requests).toEqual(expected);
});
});
describe("dist_between_squared", function () {
it("is accurate", function () {
expect(dist_between_squared(0, 0, 0, 10)).toEqual(100);
expect(dist_between_squared(0, 0, 10, 0)).toEqual(100);
expect(dist_between_squared(0, 10, 0, 0)).toEqual(100);
expect(dist_between_squared(10, 0, 0, 0)).toEqual(100);
expect(dist_between_squared(3, 4, 6, 8)).toEqual(25);
});
});
describe("link_comparator", function () {
it("finds the middlemost", function () {
let nodeA = {ipstart: 168427520, ipend: 168493055, address: "10.10.0.0", abs_x: 0, abs_y: 0, subnet: 16};
let nodeB = {ipstart: 169082880, ipend: 169148415, address: "10.20.0.0", abs_x: 10, abs_y: 0, subnet: 16};
let nodeC = {ipstart: 169738240, ipend: 169803775, address: "10.30.0.0", abs_x: 14, abs_y: 14, subnet: 16};
let nodeD = {ipstart: 167772160, ipend: 184549375, address: "10.0.0.0", abs_x: 20, abs_y: 0, subnet: 8};
let nodeE = {ipstart: 170393600, ipend: 170459135, address: "10.40.0.0", abs_x: 0, abs_y: 20, subnet: 16};
let nodeF = {ipstart: 171048960, ipend: 171114495, address: "10.50.0.0", abs_x: 15, abs_y: 15, subnet: 16};
let nodeG = {ipstart: 171704320, ipend: 171769855, address: "10.60.0.0", abs_x: 20, abs_y: 20, subnet: 16};
nodeD.children = {
168427520: nodeA,
169082880: nodeB,
169738240: nodeC,
170393600: nodeE,
171048960: nodeF,
171704320: nodeG,
};
nodes.nodes = {
"167772160": nodeD
};
renderCollection = [nodeA, nodeB, nodeC, nodeD, nodeE, nodeF, nodeG];
controller.rect = { x: 0, y: 58, width: 959, height: 555, top: 58, right: 959, bottom: 613, left: 0 };
tx = controller.rect.width / 2;
ty = controller.rect.height / 2;
g_scale = 1;
let request = ["10.20.0.0/16", "10.50.0.0/16", "10.30.0.0/16", "10.60.0.0/16", "10.40.0.0/16", "10.0.0.0/8", "10.10.0.0/16"];
request.sort(link_comparator);
let expected = ["10.0.0.0/8", "10.10.0.0/16", "10.20.0.0/16", "10.30.0.0/16", "10.40.0.0/16", "10.50.0.0/16", "10.60.0.0/16"];
expect(request).toEqual(expected);
});
it("delays nodes that aren't in the render collection", function () {
let nodeA = {ipstart: 168427520, ipend: 168493055, address: "10.10.0.0", abs_x: 0, abs_y: 0, subnet: 16};
let nodeB = {ipstart: 169082880, ipend: 169148415, address: "10.20.0.0", abs_x: 10, abs_y: 0, subnet: 16};
let nodeC = {ipstart: 169738240, ipend: 169803775, address: "10.30.0.0", abs_x: 14, abs_y: 14, subnet: 16};
let nodeD = {ipstart: 167772160, ipend: 184549375, address: "10.0.0.0", abs_x: 20, abs_y: 0, subnet: 8};
let nodeE = {ipstart: 170393600, ipend: 170459135, address: "10.40.0.0", abs_x: 0, abs_y: 20, subnet: 16};
let nodeF = {ipstart: 171048960, ipend: 171114495, address: "10.50.0.0", abs_x: 15, abs_y: 15, subnet: 16};
let nodeG = {ipstart: 171704320, ipend: 171769855, address: "10.60.0.0", abs_x: 20, abs_y: 20, subnet: 16};
nodeD.children = {
168427520: nodeA,
169082880: nodeB,
169738240: nodeC,
170393600: nodeE,
171048960: nodeF,
171704320: nodeG,
};
nodes.nodes = {
"167772160": nodeD
};
renderCollection = [nodeA, nodeB, nodeC, nodeE, nodeF, nodeG];
controller.rect = { x: 0, y: 58, width: 959, height: 555, top: 58, right: 959, bottom: 613, left: 0 };
tx = controller.rect.width / 2;
ty = controller.rect.height / 2;
g_scale = 1;
let request = ["10.20.0.0/16", "10.50.0.0/16", "10.30.0.0/16", "10.60.0.0/16", "10.40.0.0/16", "10.0.0.0/8", "10.10.0.0/16"];
request.sort(link_comparator);
let expected = ["10.10.0.0/16", "10.20.0.0/16", "10.30.0.0/16", "10.40.0.0/16", "10.50.0.0/16", "10.60.0.0/16", "10.0.0.0/8"];
expect(request).toEqual(expected);
})
});
describe("link_request_submit", function () {
it("submits a limited number", function () {
spyOn(window, "GET_links")
//100 reqests
m_link_requests = [
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't',
'a2', 'b2', 'c2', 'd2', 'e2', 'f2', 'g2', 'h2', 'i2', 'j2', 'k2', 'l2', 'm2', 'n2', 'o2', 'p2', 'q2', 'r2', 's2', 't2',
'a3', 'b3', 'c3', 'd3', 'e3', 'f3', 'g3', 'h3', 'i3', 'j3', 'k3', 'l3', 'm3', 'n3', 'o3', 'p3', 'q3', 'r3', 's3', 't3',
'a4', 'b4', 'c4', 'd4', 'e4', 'f4', 'g4', 'h4', 'i4', 'j4', 'k4', 'l4', 'm4', 'n4', 'o4', 'p4', 'q4', 'r4', 's4', 't4',
'a5', 'b5', 'c5', 'd5', 'e5', 'f5', 'g5', 'h5', 'i5', 'j5', 'k5', 'l5', 'm5', 'n5', 'o5', 'p5', 'q5', 'r5', 's5', 't5',
];
g_chunkSize = 20;
let expected = [ 'a', 'a2', 'a3', 'a4', 'a5', 'b', 'b2', 'b3', 'b4', 'b5', 'c', 'c2', 'c3', 'c4', 'c5', 'd', 'd2', 'd3', 'd4', 'd5' ];
link_request_submit();
clearTimeout(m_link_timer);
expect(window.GET_links).toHaveBeenCalledTimes(1);
expect(window.GET_links).toHaveBeenCalledWith(expected);
});
it("doesn't fire if empty", function () {
spyOn(window, "GET_links")
m_link_requests = [];
link_request_submit();
expect(window.GET_links).not.toHaveBeenCalled();
});
it("skips duplicates", function () {
spyOn(window, "GET_links")
m_link_requests = ['a', 'b', 'c', 'b', 'a', 'b', 'c', 'b', 'a'];
let expected = ['a', 'b', 'c'];
link_request_submit();
clearTimeout(m_link_timer);
expect(window.GET_links).toHaveBeenCalledTimes(1);
expect(window.GET_links).toHaveBeenCalledWith(expected);
});
});
describe("link_remove_all", function () {
it("removes inputs, outputs, server, client", function () {
let coll = get_mock_node_tree();
node21 = coll["352321536"];
node110 = coll["1845493760"];
node110_145 = node110.children["1854996480"];
node110_145_200 = node110_145.children["1855047680"]
expect(node21.inputs.length).toEqual(4);
expect(node21.outputs.length).toEqual(3);
expect(node21.server).toBe(true);
expect(node21.client).toBe(true);
expect(node110_145_200.inputs.length).toEqual(0);
expect(node110_145_200.outputs.length).toEqual(1);
expect(node110_145_200.server).toBe(false);
expect(node110_145_200.client).toBe(true);
link_remove_all(coll);
expect(node21.inputs.length).toEqual(0);
expect(node21.outputs.length).toEqual(0);
expect(node21.server).toBe(false);
expect(node21.client).toBe(false);
expect(node110_145_200.inputs.length).toEqual(0);
expect(node110_145_200.outputs.length).toEqual(0);
expect(node110_145_200.server).toBe(false);
expect(node110_145_200.client).toBe(false);
})
});
describe("links_reset", function () {});
describe("GET_links", function () {});
describe("GET_links_callback", function () {});
describe("fix_link_pointers", function () {
it("fixes inputs and outputs", function () {
let node1 = {"address": "10.0.0.0", subnet: 8, ipstart: 167772160, ipend: 184549375};
let node11 = {"address": "10.10.0.0", subnet: 16, ipstart: 168427520, ipend: 168493055};
let node111 = {"address": "10.10.10.0", subnet: 24, ipstart: 168430080, ipend: 168430335};
let node1111 = {"address": "10.10.10.10", subnet: 32, ipstart: 168430090, ipend: 168430090};
let node112 = {"address": "10.10.20.0", subnet: 24, ipstart: 168432640, ipend: 168432895};
let node1122 = {"address": "10.10.20.20", subnet: 32, ipstart: 168432660, ipend: 168432660};
let node13 = {"address": "10.30.0.0", subnet: 16, ipstart: 169738240, ipend: 169803775};
let node133 = {"address": "10.30.30.0", subnet: 24, ipstart: 169745920, ipend: 169746175};
let node1333 = {"address": "10.30.30.30", subnet: 32, ipstart: 169745950, ipend: 169745950};
node1.children = {
168427520: node11,
169738240: node13
};
node11.children = {
168430080: node111,
168432640: node112,
};
node111.children = {168430090: node1111};
node112.children = {168432660: node1122};
node13.children = {169745920: node133};
node133.children = {169745950: node1333};
node1333.parent = node133;
node133.parent = node13;
node13.parent = node1;
node1122.parent = node112;
node112.parent=node11;
node1111.parent = node111;
node111.parent = node11
node11.parent = node1;
node1111.inputs = [
{src_start: 168432660, src_end: 168432660}
];
node1111.outputs = [
{dst_start: 169745950, dst_end: 169745950}
];
nodes.nodes = {
167772160: node1
};
fix_link_pointers(node1111);
expect(node1111.inputs[0].src.address).toEqual("10.10.20.0");
expect(node1111.outputs[0].dst.address).toEqual("10.30.0.0");
})
});
describe("link_closestEmptyPort", function () {
it("chooses closest port", function () {
let nodeLeft = {abs_x: -10, abs_y: 0};
let nodeRight = {abs_x: 10, abs_y: 0};
let nodeBottom = {abs_x: 0, abs_y: 10};
let nodeTop = {abs_x: 0, abs_y: -10};
let used = [false, false, false, false, false, false, false, false];
expect(link_closestEmptyPort(nodeLeft, nodeRight, used)).toEqual(1);
expect(link_closestEmptyPort(nodeRight, nodeLeft, used)).toEqual(4);
expect(link_closestEmptyPort(nodeTop, nodeBottom, used)).toEqual(6);
expect(link_closestEmptyPort(nodeBottom, nodeTop, used)).toEqual(3);
});
it("skips used ports", function () {
let nodeLeft = {abs_x: -10, abs_y: 0};
let nodeRight = {abs_x: 10, abs_y: 0};
let nodeBottom = {abs_x: 0, abs_y: 10};
let nodeTop = {abs_x: 0, abs_y: -10};
let used = [true, true, false, false, false, false, false, false];
expect(link_closestEmptyPort(nodeLeft, nodeRight, used)).toEqual(2);
used = [false, false, false, false, true, true, false, false];
expect(link_closestEmptyPort(nodeRight, nodeLeft, used)).toEqual(3);
used = [false, false, false, false, false, false, true, true];
expect(link_closestEmptyPort(nodeTop, nodeBottom, used)).toEqual(5);
used = [false, false, true, true, false, false, false, false];
expect(link_closestEmptyPort(nodeBottom, nodeTop, used)).toEqual(4);
});
});
describe("link_processPorts", function () {
it("adds ports as needed", function () {
let node1 = {"address": "10.0.0.0", subnet: 8, ipstart: 167772160, ipend: 184549375, abs_x: 0, abs_y: 0};
let node11 = {"address": "10.10.0.0", subnet: 16, ipstart: 168427520, ipend: 168493055, abs_x: 0, abs_y: 0};
let node111 = {"address": "10.10.10.0", subnet: 24, ipstart: 168430080, ipend: 168430335, abs_x: 0, abs_y: 0};
let node1111 = {"address": "10.10.10.10", subnet: 32, ipstart: 168430090, ipend: 168430090, abs_x: 10, abs_y: 0, ports: {}};
let node112 = {"address": "10.10.20.0", subnet: 24, ipstart: 168432640, ipend: 168432895, abs_x: 0, abs_y: 0};
let node1122 = {"address": "10.10.20.20", subnet: 32, ipstart: 168432660, ipend: 168432660, abs_x: 0, abs_y: 0};
let node13 = {"address": "10.30.0.0", subnet: 16, ipstart: 169738240, ipend: 169803775, abs_x: 0, abs_y: 0};
let node133 = {"address": "10.30.30.0", subnet: 24, ipstart: 169745920, ipend: 169746175, abs_x: 0, abs_y: 0};
let node1333 = {"address": "10.30.30.30", subnet: 32, ipstart: 169745950, ipend: 169745950, abs_x: -10, abs_y: 0};
node1.children = {
168427520: node11,
169738240: node13
};
node11.children = {
168430080: node111,
168432640: node112,
};
node111.children = {168430090: node1111};
node112.children = {168432660: node1122};
node13.children = {169745920: node133};
node133.children = {169745950: node1333};
node1333.parent = node133;
node133.parent = node13;
node13.parent = node1;
node1122.parent = node112;
node112.parent=node11;
node1111.parent = node111;
node111.parent = node11
node11.parent = node1;
nodes.nodes = {
167772160: node1
};
let dest = node1111;
expect(Object.keys(dest.ports)).toEqual([]);
let links = [
{port: 27, src_start: 169745950, src_end: 169745950},
{port: 28, src_start: 169745950, src_end: 169745950},
{port: 29, src_start: 169745950, src_end: 169745950},
{port: 30, src_start: 169745950, src_end: 169745950},
{port: 31, src_start: 169745950, src_end: 169745950}
];
link_processPorts(links, dest);
expect(Object.keys(dest.ports).sort()).toEqual(["27", "28", "29", "30", "31"]);
link_processPorts(links, dest);
expect(Object.keys(dest.ports).sort()).toEqual(["27", "28", "29", "30", "31"]);
links = [
{port: 26, src_start: 169745950, src_end: 169745950},
];
link_processPorts(links, dest);
expect(Object.keys(dest.ports).sort()).toEqual(["26", "27", "28", "29", "30", "31"]);
links = [
{port: 32, src_start: 169745950, src_end: 169745950},
{port: 33, src_start: 169745950, src_end: 169745950},
{port: 34, src_start: 169745950, src_end: 169745950},
{port: 35, src_start: 169745950, src_end: 169745950},
{port: 36, src_start: 169745950, src_end: 169745950},
{port: 37, src_start: 169745950, src_end: 169745950},
{port: 38, src_start: 169745950, src_end: 169745950},
];
link_processPorts(links, dest);
expect(Object.keys(dest.ports).sort()).toEqual(["26", "27", "28", "29", "30", "31", "32", "33"]);
});
});
});
|
/samapper-0.3.2.tar.gz/samapper-0.3.2/spec/javascripts/map_links_spec.js
| 0.859826 | 0.712401 |
map_links_spec.js
|
pypi
|
describe("map_ports.js file", function () {
describe("ports.loaded", function () {
beforeEach(function () {
get_mock_m_ports();
});
it("exists", function () {
expect(ports.loaded(443)).toEqual(true)
});
it("doesn't exist", function () {
expect(ports.loaded(444)).toEqual(false)
});
it("is disabled", function () {
expect(ports.loaded(8081)).toEqual(true)
});
});
describe("ports.get_name", function () {
beforeEach(function () {
get_mock_m_ports();
});
it("doesn't exist", function () {
expect(ports.get_name(444)).toEqual("444");
});
it("exists", function () {
expect(ports.get_name(443)).toEqual("443 - https");
});
it("has an alias", function () {
expect(ports.get_name(3268)).toEqual("3268 - other name");
});
it("is disabled", function () {
expect(ports.get_name(8081)).toEqual("8081")
});
});
describe("ports.get_alias", function () {
beforeEach(function () {
get_mock_m_ports();
});
it("doesn't exist", function () {
expect(ports.get_alias(444)).toEqual("444");
});
it("exists", function () {
expect(ports.get_alias(443)).toEqual("https");
});
it("has an alias", function () {
expect(ports.get_alias(3268)).toEqual("other name");
});
it("is disabled", function () {
expect(ports.get_alias(8081)).toEqual("8081")
});
});
describe("ports.get_description", function () {
beforeEach(function () {
get_mock_m_ports();
});
it("doesn't exist", function () {
expect(ports.get_description(444)).toEqual("");
});
it("exists", function () {
expect(ports.get_description(443)).toEqual("http protocol over TLS/SSL");
});
it("has an alias", function () {
expect(ports.get_description(3268)).toEqual("other description");
});
it("is disabled", function () {
expect(ports.get_description(8081)).toEqual("")
});
});
describe("ports.get_protocols", function () {
beforeEach(function () {
get_mock_m_ports();
});
it("doesn't exist", function () {
expect(ports.get_protocols(444)).toEqual("");
});
it("exists", function () {
expect(ports.get_protocols(443)).toEqual("UDP,SCTP,TCP")
});
});
describe("ports.get_presentation", function () {
beforeEach(function () {
get_mock_m_ports();
});
it("doesn't exist", function () {
let link = ports.get_presentation(444);
expect(link.outerHTML).toEqual("<a>444</a>");
expect(typeof(link.onclick)).toBe("function");
});
it("exists", function () {
let link = ports.get_presentation(443);
expect(link.outerHTML).toEqual("<a data-tooltip=\"http protocol over TLS/SSL\">443 - https</a>")
expect(typeof(link.onclick)).toBe("function");
});
it("has alias", function () {
let link = ports.get_presentation(3268);
expect(link.outerHTML).toEqual("<a data-tooltip=\"other description\">3268 - other name</a>")
expect(typeof(link.onclick)).toBe("function");
});
it("is disabled", function () {
let link = ports.get_presentation(8081);
expect(link.outerHTML).toEqual("<a>8081</a>")
expect(typeof(link.onclick)).toBe("function");
});
})
describe("ports.set", function () {
beforeEach(function () {
get_mock_m_ports();
});
it("creates new port", function () {
expect(ports.get_alias(4)).toEqual("4");
expect(ports.get_description(4)).toEqual("");
var new_port = {
active: 1,
name: "name1",
description: "desc1",
alias_name: "name2",
alias_description: "desc2",
};
ports.set(4, new_port);
expect(ports.get_alias(4)).toEqual("name2");
expect(ports.get_description(4)).toEqual("desc2");
});
it("update existing port", function () {
expect(ports.get_alias(443)).toEqual("https");
expect(ports.get_description(443)).toEqual("http protocol over TLS/SSL");
var new_port = {
active: 1,
name: "name1",
description: "desc1",
alias_name: "name2",
alias_description: "desc2",
};
ports.set(443, new_port);
expect(ports.get_alias(443)).toEqual("name2");
expect(ports.get_description(443)).toEqual("desc2");
});
});
describe("ports.request_add", function () {
beforeEach(function () {
get_mock_m_ports();
ports.private.requests = [];
});
it("add simple", function () {
ports.request_add(47);
ports.request_add(48);
ports.request_add(49);
ports.request_add(50);
expect(ports.private.requests).toEqual([47,48,49,50]);
});
it("add duplicates", function () {
ports.request_add(47);
ports.request_add(47);
ports.request_add(47);
ports.request_add(50);
expect(ports.private.requests).toEqual([47,47,47,50]);
});
it("add existing", function () {
ports.request_add(7680);
ports.request_add(3268);
ports.request_add(443);
ports.request_add(50);
expect(ports.private.requests).toEqual([50]);
});
});
describe("ports.request_submit", function () {
beforeEach(function () {
get_mock_m_ports();
spyOn(ports, "GET_portinfo");
});
it("removes duplicates mid", function () {
ports.private.requests = [40, 47, 47, 47, 50];
ports.request_submit();
expect(ports.GET_portinfo).toHaveBeenCalledTimes(1);
expect(ports.GET_portinfo).toHaveBeenCalledWith([40, 47, 50], undefined);
});
it("removes duplicates start", function () {
ports.private.requests = [47, 47, 47, 40, 50];
ports.request_submit();
expect(ports.GET_portinfo).toHaveBeenCalledTimes(1);
expect(ports.GET_portinfo).toHaveBeenCalledWith([40, 47, 50], undefined);
});
it("removes duplicates end", function () {
ports.private.requests = [40, 50, 47, 47, 47];
ports.request_submit();
expect(ports.GET_portinfo).toHaveBeenCalledTimes(1);
expect(ports.GET_portinfo).toHaveBeenCalledWith([40, 47, 50], undefined);
});
it("removes already loaded ports", function () {
ports.private.requests = [443, 3268, 50, 7680, 8081];
ports.request_submit();
expect(ports.GET_portinfo).toHaveBeenCalledTimes(1);
expect(ports.GET_portinfo).toHaveBeenCalledWith([50], undefined);
});
it("doesn't fire empty requests", function () {
ports.private.requests = [443, 3268, 7680, 8081];
ports.request_submit();
ports.private.requests = [443, 443, 443, 443];
ports.request_submit();
expect(ports.GET_portinfo).not.toHaveBeenCalled();
});
});
describe("show_edit_window", function () {
beforeEach(function () {
get_mock_m_ports();
pa_id = document.createElement("input");
pa_id.type = "checkbox";
pnum_id = document.createElement("span");
p_proto = document.createElement("span");
pn_id = document.createElement("td");
pd_id = document.createElement("td");
pan_id = document.createElement("input");
pan_id.type = "text";
pad_id = document.createElement("input");
pad_id.type = "text";
document.getElementById = function(query) {
if (query === "port_name") { return pn_id; }
if (query === "port_active") { return pa_id; }
if (query === "port_number") { return pnum_id; }
if (query === "port_protocols") { return p_proto; }
if (query === "port_alias_name") { return pan_id; }
if (query === "port_description") { return pd_id; }
if (query === "port_alias_description") { return pad_id; }
};
spyOn(ports, "GET_portinfo");
spyOn(ports.private, "edit");
});
it("loaded", function () {
ports.show_edit_window(443);
expect(ports.GET_portinfo).not.toHaveBeenCalled();
expect(ports.private.edit).toHaveBeenCalled();
});
it("not loaded", function () {
ports.show_edit_window(444);
expect(ports.GET_portinfo).toHaveBeenCalled();
expect(ports.private.edit).not.toHaveBeenCalled();
});
});
describe("ports.POST_portinfo", function () {
beforeEach(function () {
spyOn($, "ajax");
});
it("sends all request data", function () {
ports.POST_portinfo({'a': '123', 'b': '456'})
let expected = {
url: "./portinfo",
type: "POST",
data: {'a': '123', 'b': '456'},
error: generic_ajax_failure,
success: generic_ajax_success
};
expect($.ajax).toHaveBeenCalledWith(expected);
});
});
//don't know how to test this.
describe("ports.GET_portinfo", function () {
beforeEach(function () {
spyOn($, "ajax")
})
it("doesn't run with zero ports", function () {
ports.GET_portinfo([]);
expect($.ajax).not.toHaveBeenCalled();
});
it("runs ajax", function () {
ports.GET_portinfo([443]);
expect($.ajax).toHaveBeenCalledTimes(1);
});
});
describe("ports.private.GET_response", function () {
beforeEach(function () {
get_mock_m_ports();
ports.private.requests = [];
});
it("adds new ports", function () {
results = {
"389": {"protocols": "UDP,TCP", "description": "Lightweight Directory Access Protocol", "active": 1, "alias_name": "ldap", "port": 389, "alias_description": "Lightweight Directory Access Protocol", "name": "ldap"},
};
expect(ports.ports[389]).toBe(undefined);
ports.private.GET_response(results)
expect(ports.ports[389]).toEqual(results["389"]);
});
it("updates existing ports", function () {
results = {
"445": {"protocols": "UDP,TCP", "description": "Microsoft-DS", "active": 1, "alias_name": "microsoft-", "port": 445, "alias_description": "Microsoft-DS", "name": "microsoft-"}
};
expect(ports.ports[445]).toBe(undefined);
ports.private.GET_response(results)
expect(ports.ports[445]).toEqual(results["445"]);
results2 = {
"445": {"protocols": "AB", "description": "CD", "active": 1, "alias_name": "EF", "port": 445, "alias_description": "GH", "name": "IJ"}
};
ports.private.GET_response(results2)
expect(ports.ports[445]).toEqual(results2["445"]);
});
});
describe("ports.private.click", function () {
beforeEach(function () {
link = document.createElement("a");
link.onclick = ports.private.click;
spyOn(ports, "show_edit_window");
link.innerHTML = "443 - https";
link.click();
link.innerHTML = "444";
link.click();
link.innerHTML = "12345 - rainbow";
link.click();
});
it("show_window is called", function () {
expect(ports.show_edit_window).toHaveBeenCalledTimes(3);
});
it("extracts args", function () {
expect(ports.show_edit_window).toHaveBeenCalledWith(443);
expect(ports.show_edit_window).toHaveBeenCalledWith(444);
expect(ports.show_edit_window).toHaveBeenCalledWith(12345);
});
});
describe("ports.private.save", function () {
beforeEach(function () {
get_mock_m_ports();
m_portinfo = {};
pa_id = document.createElement("input");
pa_id.type = "checkbox";
pnum_id = document.createElement("span");
p_proto = document.createElement("span");
pn_id = document.createElement("td");
pd_id = document.createElement("td");
pan_id = document.createElement("input");
pan_id.type = "text";
pad_id = document.createElement("input");
pad_id.type = "text";
document.getElementById = function(query) {
if (query === "port_name") { return pn_id; }
if (query === "port_active") { return pa_id; }
if (query === "port_number") { return pnum_id; }
if (query === "port_protocols") { return p_proto; }
if (query === "port_alias_name") { return pan_id; }
if (query === "port_description") { return pd_id; }
if (query === "port_alias_description") { return pad_id; }
};
set_to = function(test_port) {
"use strict";
pnum_id.innerHTML = test_port;
ports.private.edit(test_port, ports.ports[test_port]);
};
spyOn(ports, "POST_portinfo");
});
it("doesn't get called with no changes.", function () {
set_to(3268);
ports.private.save();
expect(ports.POST_portinfo).not.toHaveBeenCalled();
});
it("gets called when 'active' changes.", function () {
set_to(3268);
pa_id.checked = false;
ports.private.save();
expect(ports.POST_portinfo).toHaveBeenCalled();
var changes = {port: 3268, active: 0};
expect(ports.ports[3268].active).toEqual(0);
expect(ports.POST_portinfo).toHaveBeenCalledWith(changes);
});
it("gets called when 'alias_name' changes.", function () {
set_to(3268);
pan_id.value = "new_alias";
ports.private.save();
expect(ports.POST_portinfo).toHaveBeenCalled();
var changes = {port: 3268, alias_name: "new_alias"};
expect(ports.ports[3268].alias_name).toEqual("new_alias");
expect(ports.POST_portinfo).toHaveBeenCalledWith(changes);
});
it("gets called when 'alias_description' changes.", function () {
set_to(3268);
pad_id.value = "new_desc";
ports.private.save();
expect(ports.POST_portinfo).toHaveBeenCalled();
var changes = {port: 3268, alias_description: "new_desc"};
expect(ports.ports[3268].alias_description).toEqual("new_desc");
expect(ports.POST_portinfo).toHaveBeenCalledWith(changes);
});
});
describe("ports.private.edit", function () {
beforeEach(function () {
get_mock_m_ports();
pa_id = document.createElement("input");
pa_id.type = "checkbox";
pnum_id = document.createElement("span");
p_proto = document.createElement("span");
pn_id = document.createElement("td");
pd_id = document.createElement("td");
pan_id = document.createElement("input");
pan_id.type = "text";
pad_id = document.createElement("input");
pad_id.type = "text";
document.getElementById = function(query) {
if (query === "port_name") { return pn_id; }
if (query === "port_active") { return pa_id; }
if (query === "port_number") { return pnum_id; }
if (query === "port_protocols") { return p_proto; }
if (query === "port_alias_name") { return pan_id; }
if (query === "port_description") { return pd_id; }
if (query === "port_alias_description") { return pad_id; }
};
set_to = function(test_port) {
"use strict";
pnum_id.innerHTML = test_port;
ports.private.edit(test_port, ports.ports[test_port]);
};
});
it("sets div values (exists)", function () {
"use strict";
ports.private.edit(3268, ports.ports[3268]);
expect(pa_id.checked).toEqual(true);
expect(pn_id.innerHTML).toEqual("msft-gc");
expect(pd_id.innerHTML).toEqual("Microsoft Global Catalog");
expect(pan_id.value).toEqual("other name");
expect(pad_id.value).toEqual("other description");
});
it("sets div values (unaliased)", function () {
"use strict";
ports.private.edit(8081, ports.ports[8081]);
expect(pa_id.checked).toEqual(false);
expect(pn_id.innerHTML).toEqual("sunproxyad");
expect(pd_id.innerHTML).toEqual("Sun Proxy Admin Service");
expect(pan_id.value).toEqual("");
expect(pad_id.value).toEqual("");
});
it("sets div values (not exists)", function () {
"use strict";
ports.private.edit(4, ports.ports[4]);
expect(pa_id.checked).toEqual(true);
expect(pn_id.innerHTML).toEqual("none");
expect(pd_id.innerHTML).toEqual("none");
expect(pan_id.value).toEqual("");
expect(pad_id.value).toEqual("");
});
});
});
|
/samapper-0.3.2.tar.gz/samapper-0.3.2/spec/javascripts/map_ports_spec.js
| 0.782455 | 0.743075 |
map_ports_spec.js
|
pypi
|
from sam import common, integrity
class DBPlugin(object):
@staticmethod
def checkIntegrity(db):
"""
Checks if the database is correct and returns the equivalent to false if db is consistent.
if db is healthy, return False,
examples: False, 0, [] or {}
if db is unhealthy return something useful for the fixIntegrity function, such as a list of missing tables
example: {'missing': ['s1_mytable', 's3_mytable'], 'malformed': ['s2_mytable']}
:param db: database connection
:return: False or equivalent iff consistent.
"""
raise NotImplementedError
@staticmethod
def fixIntegrity(db, errors):
"""
:param db: database connection
:param errors: the result of checkIntegrity function, ideally with helpful info on what to fix.
:return: True if fixed, False if failed to fix.
"""
raise NotImplementedError
@staticmethod
def simple_sub_table_check(*table_format_strings):
@staticmethod
def check(db):
all_tables = set(integrity.get_table_names(db))
subs = integrity.get_all_subs(db)
missing = []
for sub_id in subs:
for fstring in table_format_strings:
if fstring.format(acct=sub_id) not in all_tables:
missing.append(sub_id)
if not missing:
return {}
return {'missing': missing}
return check
@staticmethod
def simple_sub_table_fix(**sql_scripts):
@staticmethod
def fix(db, errors):
missing_table_subs = errors['missing']
for sub_id in missing_table_subs:
replacements = {
'acct': sub_id
}
with db.transaction():
if db.dbname in sql_scripts:
common.exec_sql(db, sql_scripts[db.dbname], replacements)
return True
return fix
|
/samapper-0.3.2.tar.gz/samapper-0.3.2/sam/models/base.py
| 0.69946 | 0.284843 |
base.py
|
pypi
|
import web
from sam import common
from sam.models.links import Links
class Nodes(object):
default_environments = {'production', 'dev', 'inherit'}
def __init__(self, db, subscription):
"""
:type db: web.DB
:type subscription: int
:param db:
:param subscription:
"""
self.db = db
self.sub = subscription
self.table_nodes = 's{acct}_Nodes'.format(acct=self.sub)
self.table_tags = 's{acct}_Tags'.format(acct=self.sub)
if self.db.dbname == 'mysql':
self.divop = 'DIV'
else:
self.divop = '/'
def set_alias(self, address, alias):
"""
:param address: node address to edit. e.g. '10.20.30.40', '50.60', '192.0.0.0/24'
:type address: unicode
:param alias: name to use. string or none
:type alias: unicode or None
:return:
"""
r = common.determine_range_string(address)
where = {"ipstart": r[0], "ipend": r[1]}
self.db.update(self.table_nodes, where, alias=alias)
def get(self, address):
r = common.determine_range_string(address)
qvars = {"start": r[0], "end": r[1]}
rows = self.db.select(self.table_nodes, where="ipstart=$start and ipend=$end", vars=qvars)
return rows.first()
def set_tags(self, address, new_tags):
"""
Assigns a new set of tags to an address overwriting any existing tag assignments.
:param address: A string dotted-decimal IP address such as "192.168.2.100" or "21.66" or "1.2.0.0/16"
:param new_tags: A list of tag strings. e.g. ['tag_one', 'tag_two', 'tag_three']
:return: None
"""
what = "ipstart, ipend, tag"
r = common.determine_range_string(address)
row = {"ipstart": r[0], "ipend": r[1]}
where = "ipstart = $ipstart AND ipend = $ipend"
existing = list(self.db.select(self.table_tags, vars=row, what=what, where=where))
new_tags = set(new_tags)
old_tags = {x.tag for x in existing}
removals = old_tags - new_tags
additions = new_tags - old_tags
for tag in additions:
row['tag'] = tag
self.db.insert(self.table_tags, **row)
for tag in removals:
row['tag'] = tag
where = "ipstart = $ipstart AND ipend = $ipend AND tag = $tag"
self.db.delete(self.table_tags, where=where, vars=row)
def get_tags(self, address):
"""
Gets all directly assigned tags and inherited parent tags for a given addresss
:param address: A string dotted-decimal IP address such as "192.168.2.100" or "21.66" or "1.2.0.0/16"
:return: A dict of lists of strings, with keys 'tags' and 'p_tags'
where p_tags are inherited tags from parent nodes
"""
ipstart, ipend = common.determine_range_string(address)
where = 'ipstart <= $start AND ipend >= $end'
qvars = {'start': ipstart, 'end': ipend}
data = self.db.select(self.table_tags, vars=qvars, where=where)
parent_tags = []
tags = []
for row in data:
if row.ipend == ipend and row.ipstart == ipstart:
tags.append(row.tag)
else:
parent_tags.append(row.tag)
return {"p_tags": parent_tags, "tags": tags}
def get_tag_list(self):
return [row.tag for row in self.db.select(self.table_tags, what="DISTINCT tag") if row.tag]
def set_env(self, address, env):
r = common.determine_range_string(address)
where = {"ipstart": r[0], "ipend": r[1]}
self.db.update(self.table_nodes, where, env=env)
def get_env(self, address):
ipstart, ipend = common.determine_range_string(address)
where = 'ipstart <= $start AND ipend >= $end'
qvars = {'start': ipstart, 'end': ipend}
data = self.db.select(self.table_nodes, vars=qvars, where=where, what="ipstart, ipend, env")
parent_env = "production"
env = "inherit"
nearest_distance = -1
for row in data:
if row.ipend == ipend and row.ipstart == ipstart:
if row.env:
env = row.env
else:
dist = row.ipend - ipend + ipstart - row.ipstart
if nearest_distance == -1 or dist < nearest_distance:
if row.env and row.env != "inherit":
parent_env = row.env
return {"env": env, "p_env": parent_env}
def get_env_list(self):
envs = set(row.env for row in self.db.select(self.table_nodes, what="DISTINCT env", where="env IS NOT NULL") if row.env)
envs |= self.default_environments
return envs
def delete_custom_tags(self):
common.db.delete(self.table_tags, "1")
def delete_custom_envs(self):
common.db.update(self.table_nodes, "1", env=web.sqlliteral("NULL"))
def delete_custom_hostnames(self):
common.db.update(self.table_nodes, "1", alias=common.web.sqlliteral("NULL"))
def get_hostnames_preview(self):
return [row.alias for row in self.db.select(self.table_nodes, what="DISTINCT alias", where="alias IS NOT NULL", limit=10) if row.alias]
def get_root_nodes(self):
return list(self.db.select(self.table_nodes, where="subnet=8"))
def get_flat_nodes(self, ds):
link_model = Links(self.db, self.sub, ds)
view_name = "s{sub}_ds{ds}_nodes_view".format(sub=self.sub, ds=ds)
create_view = """
CREATE VIEW {v_nodes} AS SELECT DISTINCT ipstart, ipend, subnet, alias, env, x, y, radius
FROM {t_nodes} AS `n`
JOIN (SELECT src AS 'ip' from {t_links}
UNION
SELECT dst AS 'ip' from {t_links}) AS `lnks`
ON `lnks`.ip BETWEEN `n`.ipstart AND `n`.ipend
WHERE (`n`.ipstart=`n`.ipend OR alias IS NOT NULL)
ORDER BY ipstart ASC, ipend ASC;""".format(t_nodes=self.table_nodes,
t_links=link_model.table_links, v_nodes=view_name)
q_main = """SELECT ipstart, ipend, subnet, alias, env, x, y, radius
FROM {v_nodes} AS `n`;""".format(v_nodes=view_name)
q_groups = """
SELECT `us`.ipstart, `us`.ipend, `us`.subnet, `us`.alias, n.env, n.x, n.y, n.radius
FROM (SELECT u8.ipstart * 16777216 AS 'ipstart', u8.ipstart * 16777216 + 16777215 AS 'ipend', 8 AS 'subnet', u8.alias AS 'alias'
FROM (SELECT ipstart {div} 16777216 AS 'ipstart', MAX(alias) AS 'alias', COUNT(1) AS 'hosts', COUNT(DISTINCT alias) AS 'aliases'
FROM {v_nodes}
GROUP BY ipstart {div} 16777216
HAVING aliases = 1 AND hosts > 1
) AS `u8`
UNION
SELECT u16.ipstart * 65536 AS 'ipstart', u16.ipstart * 65536 + 65535 AS 'ipend', 16 AS 'subnet', u16.alias AS 'alias'
FROM (SELECT ipstart {div} 65536 AS 'ipstart', MAX(alias) AS 'alias', COUNT(1) AS 'hosts', COUNT(DISTINCT alias) AS 'aliases'
FROM {v_nodes}
GROUP BY ipstart {div} 65536
HAVING aliases = 1 AND hosts > 1
) AS `u16`
UNION
SELECT u24.ipstart * 256 AS 'ipstart', u24.ipstart * 256 + 255 AS 'ipend', 24 AS 'subnet', u24.alias AS 'alias'
FROM (SELECT ipstart {div} 256 AS 'ipstart', MAX(alias) AS 'alias', COUNT(1) AS 'hosts', COUNT(DISTINCT alias) AS 'aliases'
FROM {v_nodes}
GROUP BY ipstart {div} 256
HAVING aliases = 1 AND hosts > 1
) AS `u24`
) AS `us`
JOIN {t_nodes} AS `n`
ON n.ipstart = us.ipstart AND n.ipend = us.ipend
ORDER BY `us`.subnet ASC;""".format(v_nodes=view_name, t_nodes=self.table_nodes, div=self.divop)
q_drop = "DROP VIEW IF EXISTS {v_nodes}".format(v_nodes=view_name)
t = self.db.transaction()
try:
# drop view
self.db.query(q_drop)
# create view
self.db.query(create_view)
# run selects
main_rows = list(self.db.query(q_main))
group_rows = list(self.db.query(q_groups))
# drop view
self.db.query(q_drop)
except:
t.rollback()
raise
else:
t.commit()
return merge_groups(main_rows, group_rows)
def get_children(self, address):
ip_start, ip_end = common.determine_range_string(address)
diff = ip_end - ip_start
if diff > 16777215:
subnet = 8
elif diff > 65536:
subnet = 16
elif diff > 255:
subnet = 24
elif diff > 0:
subnet = 32
else:
return []
where = "subnet={2} AND ipstart BETWEEN {0} AND {1}".format(ip_start, ip_end, subnet)
rows = self.db.select(self.table_nodes, where=where)
return list(rows)
def get_all_endpoints(self):
rows = self.db.select(self.table_nodes, what='ipstart', where='subnet=32')
return [row['ipstart'] for row in rows]
def get_all(self):
rows = self.db.select(self.table_nodes, what='ipstart')
return [row['ipstart'] for row in rows]
def delete_hosts(self, hostlist):
"""
:param hostlist: list of host numbers (32-bit int) to delete
:type hostlist: list[int]
:return: number of deleted nodes.
"""
collection = "({})".format(','.join(map(str, map(int, hostlist))))
# the int cast is to induce failure in the case of sql injection attacks.
# Almost equivalent to:
# hostlist = str(tuple(map(int, collection)))
# Which is better?
deleted = self.db.delete(self.table_nodes, where='ipstart=ipend and ipstart IN {hosts}'.format(hosts=collection))
# the deleted endpoints may have (now childless) aggregate parents
# TODO: delete childless parent nodes (e.g. subnets like /24)
return deleted
def delete_collection(self, nodes):
"""
:type self: Nodes
:type nodes: list[str]
:param nodes: list of nodes to delete ("192"|"127.0.0.0/16"|"1.2.3"). (their children will not be deleted)
:return: The number of nodes deleted
:rtype: int
"""
deleted = 0
for node in nodes:
low, high = common.determine_range_string(node)
where = 'ipstart={low} and ipend={high}'.format(low=low, high=high)
deleted += self.db.delete(self.table_nodes, where=where)
return deleted
def merge_groups(main, groups):
keepers = {}
nodes = []
for node in main:
if node['subnet'] == 32:
nodes.append(node)
else:
keepers[node['ipstart']] = node
keys = [v['ipstart'] for v in keepers.values()]
for node in groups:
if (node['ipstart'] & 0xff000000) in keepers:
continue
elif (node['ipstart'] & 0xffff0000) in keepers:
continue
elif (node['ipstart'] & 0xffffff00) in keepers:
continue
if any([node['ipstart'] <= k <= node['ipend'] for k in keys]):
continue
keepers[node['ipstart']] = node
nodes.extend(keepers.values())
return nodes
|
/samapper-0.3.2.tar.gz/samapper-0.3.2/sam/models/nodes.py
| 0.679072 | 0.262605 |
nodes.py
|
pypi
|
import os
import cPickle
import web
from sam.models.security import rule_template, rule
class Rules():
TABLE_FORMAT = "s{}_Rules"
def __init__(self, db, sub_id):
"""
:param db: database connection
:type db: web.DB
:param sub_id: subscription id
:type sub_id: int
"""
self.db = db
self.sub = sub_id
self.table = Rules.TABLE_FORMAT.format(self.sub)
self.rules = []
def clear(self):
return self.db.delete(self.table, where="1")
def count(self):
rows = self.db.select(self.table, what="COUNT(0) AS 'count'")
return rows.first()['count']
def decode_row(self, row):
if 'active' in row:
row['active'] = row['active'] == 1
if 'params' in row:
row['params'] = cPickle.loads(str(row['params']))
return row
def row_to_rule(self, row):
rule_obj = rule.Rule(row['id'], row['active'], row['name'], row['description'], row['rule_path'])
params = row.get('params', {})
action_params = params.get('actions', {})
exposed_params = params.get('exposed', {})
rule_obj.set_action_params(action_params)
rule_obj.set_exposed_params(exposed_params)
return rule_obj
def add_rule(self, path, name, description, params):
"""
:param path: file name: 'compromised.yml', 'plugin: compromised.yml', 'custom: compromised.yml'
:type path: str
:param name: Short rule name
:type name: str
:param description: Long description of rule
:type description: str
:param params: default parameters to use for rule customization
:type params: dict
"""
valid_path = rule_template.abs_rule_path(path)
if valid_path is None:
print("Rule definition path cannot be verified. Saving anyway.")
if not isinstance(name, (str, unicode)) or len(name) == 0:
raise ValueError("Name cannot be empty.")
name = name.strip()
if not isinstance(description, (str, unicode)):
raise ValueError("Description must be a string.")
description = description.strip()
r_id = self.db.insert(self.table, active=False, rule_path=path,
name=name, description=description,
params=cPickle.dumps(params))
return r_id
def get_all_rules(self):
"""
:return: All the security rules, briefly
:rtype: list[ rule.Rule ]
"""
rows = list(self.db.select(self.table, what="id, rule_path, active, name, description"))
decoded = map(self.decode_row, rows)
rule_objs = list(map(self.row_to_rule, decoded))
return rule_objs
def get_ruleset(self):
"""
:return: All the active security rules, fully
:rtype: list[ rule.Rule ]
"""
rows = list(self.db.select(self.table, where="active=1"))
decoded = map(self.decode_row, rows)
rule_objs = list(map(self.row_to_rule, decoded))
return rule_objs
def get_rule(self, rule_id):
qvars = {
'rid': rule_id
}
rows = self.db.select(self.table, where='id=$rid', vars=qvars, limit=1)
row = rows.first()
if row is None:
return None
self.decode_row(row)
rule_obj = self.row_to_rule(row)
return rule_obj
def delete_rule(self, rule_id):
qvars = {
'rid': rule_id
}
num_rows_deleted = self.db.delete(self.table, where='id=$rid', vars=qvars)
return num_rows_deleted
def edit_rule(self, rule_id, edits):
qvars = {
'rid': rule_id
}
if 'actions' in edits or 'exposed' in edits:
actions = edits.pop('actions', {})
exposed = edits.pop('exposed', {})
old_rule = self.get_rule(rule_id)
old_rule.set_action_params(actions)
old_rule.set_exposed_params(exposed)
params = old_rule.export_params()
edits['params'] = cPickle.dumps(params)
q = self.db.update(self.table, where='id=$rid', vars=qvars, _test=True, **edits)
self.db.update(self.table, where='id=$rid', vars=qvars, **edits)
|
/samapper-0.3.2.tar.gz/samapper-0.3.2/sam/models/security/rules.py
| 0.442637 | 0.151216 |
rules.py
|
pypi
|
from sam import common
import re
from sam import errors
import base
import sam.models.details
import sam.models.nodes
import sam.models.links
# This class is for getting the main selection details, such as ins, outs, and ports.
def nice_protocol(strings, p_in, p_out):
"""
:param p_in: comma-seperated protocol list for inbound connections
:type p_in: unicode
:param p_out: comma-seperated protocol list for outbound connections
:type p_out: unicode
:return: user-friendly string describing in-/outbound connections
:rtype: unicode
"""
pin = p_in.split(u',') if p_in else []
pout = p_out.split(u',') if p_out else []
protocols = set(pin) | set(pout)
protocols.discard(u'')
ins = []
outs = []
both = []
for p in protocols:
if p in pin and p in pout:
both.append(u'{} {}'.format(p, strings.table_proto_io))
elif p in pin:
ins.append(u'{} {}'.format(p, strings.table_proto_i))
else:
outs.append(u'{} {}'.format(p, strings.table_proto_o))
return u', '.join(ins+both+outs)
def si_formatting(strings, f, places=2):
"""
:param f: real number, the value to express
:type f: float
:param places: number of decimal places to keep
:type places: int
:return: string with K/M/G postfix
:rtype: unicode
"""
format_string = u'{{val:.{places}f}}{{prefix}}'.format(places=places)
f = float(f)
if f < 1000:
return format_string.format(val=f, prefix=u'')
f /= 1000
if f < 1000:
return format_string.format(val=f, prefix=strings.units_kilo)
f /= 1000
if f < 1000:
return format_string.format(val=f, prefix=strings.units_mega)
f /= 1000
return format_string.format(val=f, prefix=strings.units_giga)
class Details(base.headless):
"""
The expected GET data includes:
'address': dotted-decimal IP addresses.
Each address is only as long as the subnet,
so 12.34.0.0/16 would be written as 12.34
'ds': string, specify the data source, ex: "ds_19_"
'tstart': optional. Used with 'tend'. The start of the time range to report links during.
'tend': optional. Used with 'tstart'. The end of the time range to report links during.
:return: A JSON-encoded dictionary where
the keys are ['conn_in', 'conn_out', 'ports_in', 'unique_in', 'unique_out', 'unique_ports'] and
the values are numbers or lists
"""
default_page = 1
default_page_size = 50
def __init__(self):
base.Headless.__init__(self)
# self.ip_range = (0, 4294967295)
self.page_size = Details.default_page_size
self.detailsModel = None
self.nodesModel = sam.models.nodes.Nodes(common.db, self.page.user.viewing)
self.linksModel = None # set during decode_get_request()
def decode_get_request(self, data):
# data source
if "ds" in data:
ds_match = re.search("(\d+)", data['ds'])
if ds_match:
ds = int(ds_match.group())
else:
raise errors.MalformedRequest("Could not read data source ('ds')")
else:
raise errors.RequiredKey('data source', 'ds')
self.linksModel = sam.models.links.Links(common.db, self.page.user.viewing, ds)
# port filter
port = data.get('port')
# time range
try:
tstart = int(data.get('tstart'))
tend = int(data.get('tend'))
except ValueError:
raise errors.MalformedRequest("Time range ({0} .. {1}) cannot be read. Check formatting"
.format(data.get('tstart'), data.get('tend')))
except (KeyError, TypeError):
t_range = self.linksModel.get_timerange()
tstart = t_range['min']
tend = t_range['max']
# address
address = data.get('address')
if not address:
raise errors.RequiredKey('address', 'address')
# pagination
try:
page = int(data.get('page', self.default_page))
except ValueError:
raise errors.MalformedRequest("Could not read page number: {0}".format(data.get('page')))
try:
page_size = int(data.get('page_size', self.default_page_size))
except ValueError:
raise errors.MalformedRequest("Could not read page size: {0}".format(data.get('page_size')))
order = data.get('order')
simple = data.get('simple', False) == "true"
components = data.get('component', [])
if components:
components = components.split(',')
self.page_size = page_size
self.detailsModel = sam.models.details.Details(common.db, self.page.user.viewing, ds, address, (tstart, tend), port, page_size)
request = {
'ds': ds,
'address': address,
'page': page,
'page_size': page_size,
'order': order,
'simple': simple,
'components': components,
'time_range': (tstart, tend)
}
return request
def perform_get_command(self, request):
"""
request = {
'ds': ds,
'address': ips "10.20",
'page': page,
'page_size': page_size,
'order': order,
'simple': simple,
'components': components,
'time_range': (tstart, tend)
}
:param request:
:return:
"""
self.page.require_group('read')
details = {}
if request['components']:
for c_name in request['components']:
if c_name == 'quick_info':
details[c_name] = self.quick_info(request['address'])
elif c_name == 'inputs':
details[c_name] = self.inputs(request['page'],
request['order'],
request['simple'])
elif c_name == 'outputs':
details[c_name] = self.outputs(request['page'],
request['order'],
request['simple'])
elif c_name == 'ports':
details[c_name] = self.ports(request['page'],
request['order'])
elif c_name == 'children':
details[c_name] = self.children(request['page'],
request['order'])
elif c_name == 'summary':
details[c_name] = self.summary()
else:
details[c_name] = {"result": "No component matches request for {0}".format(c_name)}
else:
details = self.selection_info(request['page'], request['order'], request['simple'])
return details
def encode_get_response(self, response):
return response
@staticmethod
def nice_ip_address(address):
ip_start, ip_end = common.determine_range_string(address)
subnet = 32 - (ip_end - ip_start).bit_length()
return "{0}/{1}".format(common.IPtoString(ip_start), subnet)
def quick_info(self, address):
info = {}
node_info = self.detailsModel.get_metadata()
info['address'] = self.nice_ip_address(address)
if node_info:
tags = self.nodesModel.get_tags(address)
envs = self.nodesModel.get_env(address)
# node_info has:
# hostname
# unique_out_ip
# unique_out_conn
# overall_bps
# total_out
# out_bytes_sent
# out_bytes_received
# out_packets_sent
# out_packets_received
# unique_in_ip
# unique_in_conn
# total_in
# in_bytes_sent
# in_bytes_received
# in_packets_sent
# in_packets_received
# ports_used
# endpoints
# seconds
info['name'] = node_info.hostname
info['tags'] = tags
info['envs'] = envs
info['protocols'] = nice_protocol(self.page.strings, node_info.in_protocols, node_info.out_protocols)
info['bps'] = node_info.overall_bps
info['in'] = {}
info['in']['total'] = node_info.total_in
info['in']['u_ip'] = node_info.unique_in_ip
info['in']['u_conn'] = node_info.unique_in_conn
info['in']['seconds'] = node_info.seconds
if not node_info.in_bytes_sent and not node_info.in_bytes_received:
info['in']['bytes_sent'] = 0
info['in']['bytes_received'] = 0
else:
info['in']['bytes_sent'] = node_info.in_bytes_sent
info['in']['bytes_received'] = node_info.in_bytes_received
info['in']['max_bps'] = node_info.in_max_bps if node_info.in_max_bps else 0
info['in']['avg_bps'] = node_info.in_avg_bps if node_info.in_avg_bps else 0
if not node_info.in_packets_sent and not node_info.in_packets_received:
info['in']['packets_sent'] = 0
info['in']['packets_received'] = 0
else:
info['in']['packets_sent'] = node_info.in_packets_sent
info['in']['packets_received'] = node_info.in_packets_received
info['in']['duration'] = node_info.in_duration
info['out'] = {}
info['out']['total'] = node_info.total_out
info['out']['u_ip'] = node_info.unique_out_ip
info['out']['u_conn'] = node_info.unique_out_conn
info['out']['seconds'] = node_info.seconds
if not node_info.out_bytes_sent and not node_info.out_bytes_received:
info['out']['bytes_sent'] = 0
info['out']['bytes_received'] = 0
else:
info['out']['bytes_sent'] = node_info.out_bytes_sent
info['out']['bytes_received'] = node_info.out_bytes_received
info['out']['max_bps'] = node_info.out_max_bps if node_info.out_max_bps else 0
info['out']['avg_bps'] = node_info.out_avg_bps if node_info.out_avg_bps else 0
if not node_info.out_packets_sent and not node_info.out_packets_received:
info['out']['packets_sent'] = 0
info['out']['packets_received'] = 0
else:
info['out']['packets_sent'] = node_info.out_packets_sent
info['out']['packets_received'] = node_info.out_packets_received
info['out']['duration'] = node_info.out_duration
info['role'] = float(node_info.total_in / max(1, (node_info.total_in + node_info.total_out)))
info['ports'] = node_info.ports_used
info['endpoints'] = int(node_info.endpoints)
else:
info['error'] = self.page.strings.meta_none
return info
def inputs(self, page, order, simple):
inputs = self.detailsModel.get_details_connections(inbound=True, page=page, order=order, simple=simple)
if simple:
headers = [
['src', self.page.strings.meta_src],
['port', self.page.strings.meta_port],
['links', self.page.strings.meta_links]
]
else:
headers = [
['src', self.page.strings.meta_src],
['dst', self.page.strings.meta_dst],
['port', self.page.strings.meta_port],
['links', self.page.strings.meta_links],
# ['protocols', self.page.strings.meta_protocols],
['sum_bytes', self.page.strings.meta_sum_bytes],
# ['avg_bytes', self.page.strings.meta_avg_bytes],
['sum_packets', self.page.strings.meta_sum_packets],
# ['avg_packets', self.page.strings.meta_avg_packets],
['avg_duration', self.page.strings.meta_avg_duration],
]
# convert list of dicts to ordered list of values
minutes = float(self.request['time_range'][1] - self.request['time_range'][0]) / 60.0
minutes = max(minutes, 1.0)
conn_in = []
for row in inputs:
conn_row = []
for h in headers:
if h[0] == 'links':
conn_row.append(si_formatting(self.page.strings, float(row['links']) / minutes))
else:
conn_row.append(row[h[0]])
conn_in.append(conn_row)
response = {
"page": page,
"page_size": self.request['page_size'],
"order": order,
"direction": "desc",
"component": "inputs",
"headers": headers,
"rows": conn_in
}
return response
def outputs(self, page, order, simple):
outputs = self.detailsModel.get_details_connections(inbound=False, page=page, order=order, simple=simple)
if simple:
headers = [
['dst', self.page.strings.meta_dst],
['port', self.page.strings.meta_port],
['links', self.page.strings.meta_links]
]
else:
headers = [
['src', self.page.strings.meta_src],
['dst', self.page.strings.meta_dst],
['port', self.page.strings.meta_port],
['links', self.page.strings.meta_links],
# ['protocols', self.page.strings.meta_protocols],
['sum_bytes', self.page.strings.meta_sum_bytes],
# ['avg_bytes', self.page.strings.meta_avg_bytes],
['sum_packets', self.page.strings.meta_sum_packets],
# ['avg_packets', self.page.strings.meta_avg_packets],
['avg_duration', self.page.strings.meta_avg_duration],
]
minutes = float(self.request['time_range'][1] - self.request['time_range'][0]) / 60.0
minutes = max(minutes, 1.0)
conn_out = []
for row in outputs:
conn_row = []
for h in headers:
if h[0] == 'links':
conn_row.append(si_formatting(self.page.strings, float(row['links']) / minutes))
else:
conn_row.append(row[h[0]])
conn_out.append(conn_row)
response = {
"page": page,
"page_size": self.request['page_size'],
"order": order,
"direction": "desc",
"component": "outputs",
"headers": headers,
"rows": conn_out
}
return response
def ports(self, page, order):
ports = self.detailsModel.get_details_ports(page, order)
headers = [
['port', self.page.strings.meta_ports],
['links', self.page.strings.meta_links]
]
minutes = float(self.request['time_range'][1] - self.request['time_range'][0]) / 60.0
minutes = max(minutes, 1.0)
ports_in = []
for row in ports:
conn_row = []
for h in headers:
if h[0] == 'links':
conn_row.append(si_formatting(self.page.strings, float(row['links']) / minutes))
else:
conn_row.append(row[h[0]])
ports_in.append(conn_row)
response = {
"page": page,
"page_size": self.request['page_size'],
"order": order,
"component": "ports",
"headers": headers,
"rows": ports_in
}
return response
def children(self, page, order):
children = self.detailsModel.get_details_children(order)
first = (page - 1) * self.page_size
last = first + self.page_size
response = {
"page": page,
"page_size": self.request['page_size'],
"order": order,
"count": len(children),
"component": "children",
"headers": [
['ipstart', self.page.strings.meta_child_ip],
['hostname', self.page.strings.meta_child_name],
['endpoints', self.page.strings.meta_child_count],
['ratio', self.page.strings.meta_child_ratio]
],
"rows": children[first:last]
}
return response
def summary(self):
summary = self.detailsModel.get_details_summary()
return summary
def selection_info(self, page, order, simple):
# called for selections in the map pane
summary = self.summary()
details = {'unique_out': summary.unique_out,
'unique_in': summary.unique_in,
'unique_ports': summary.unique_ports,
'inputs': self.inputs(page, order, simple),
'outputs': self.outputs(page, order, simple),
'ports': self.ports(page, order)}
return details
|
/samapper-0.3.2.tar.gz/samapper-0.3.2/sam/pages/details.py
| 0.621426 | 0.314169 |
details.py
|
pypi
|
import base
import sam.models.ports
from sam import errors
from sam import common
# This class is for getting the aliases for a port number
class Portinfo(base.headless_post):
"""
The expected GET data includes:
'port': comma-seperated list of port numbers
A request for ports 80, 443, and 8080
would look like: "80,443,8080"
:return: A JSON-encoded dictionary where
the keys are the requested ports and
the values are dictionaries describing the port's attributes
The expected POST data includes:
'port': The port to set data upon
'alias_name': optional, the new short name to give that port
'alias_description': optional, the new long name to give that port
'active': optional, (1 or 0) where 1 means use the name and 0 means use the number for display.
:return: A JSON-encoded dictionary with one key "result" and a value of success or error.
"""
def __init__(self):
super(Portinfo, self).__init__()
self.portModel = sam.models.ports.Ports(common.db, self.page.user.viewing)
def decode_get_request(self, data):
port_string = data.get('port')
if not port_string:
raise errors.RequiredKey('port', 'port')
try:
ports = [int(port) for port in port_string.split(',') if port]
except ValueError:
raise errors.MalformedRequest("Could not read port ('port') number. Use comma delimited list.")
return {'ports': ports}
def perform_get_command(self, request):
self.page.require_group('read')
portModel = sam.models.ports.Ports(common.db, self.page.user.viewing)
ports = portModel.get(request['ports'])
return ports
def encode_get_response(self, response):
return {str(i['port']): i for i in response}
def decode_post_request(self, data):
port_string = data.get('port')
if not port_string:
raise errors.RequiredKey('port', 'port')
try:
request = {'port': int(port_string)}
except (ValueError, TypeError):
raise errors.MalformedRequest("Could not read port ('port') number.")
if 'alias_name' in data:
request['alias_name'] = data['alias_name']
if 'alias_description' in data:
request['alias_description'] = data['alias_description']
if 'active' in data:
request['active'] = data['active']
return request
def perform_post_command(self, request):
self.page.require_group('write')
port = request.pop('port')
self.portModel.set(port, request)
return 'success'
def encode_post_response(self, response):
return {'result': response}
|
/samapper-0.3.2.tar.gz/samapper-0.3.2/sam/pages/portinfo.py
| 0.615435 | 0.375477 |
portinfo.py
|
pypi
|
from sam import errors
import re
import base64
import base
import sam.models.settings
import sam.models.datasources
import sam.models.livekeys
import sam.models.nodes
import sam.models.links
import sam.models.upload
from sam import common
def nice_name(s):
s = re.sub("([a-z])([A-Z]+)", lambda x: "{0} {1}".format(x.group(1), x.group(2)), s)
s = s.replace("_", " ")
s = re.sub("\s+", ' ', s)
return s.title()
class Settings(base.headless_post):
recognized_commands = ["ds_name", "ds_live", "ds_interval", "ds_flat",
"ds_new", "ds_rm", "ds_select", "rm_hosts",
"rm_tags", "rm_envs", "rm_conns", "upload",
"del_live_key", "add_live_key"]
def __init__(self):
super(Settings, self).__init__()
self.settingsModel = sam.models.settings.Settings(common.db, self.page.session, self.page.user.viewing)
self.dsModel = sam.models.datasources.Datasources(common.db, self.page.session, self.page.user.viewing)
self.livekeyModel = sam.models.livekeys.LiveKeys(common.db, self.page.user.viewing)
self.nodesModel = sam.models.nodes.Nodes(common.db, self.page.user.viewing)
self.linksModel = None
self.uploadModel = None
def decode_get_request(self, data):
return None
def perform_get_command(self, request):
settings = self.settingsModel.copy()
datasources = self.dsModel.datasources
response = {'settings': settings,
'datasources': datasources}
return response
def encode_get_response(self, response):
result = response['settings']
result['datasources'] = response['datasources']
return result
@staticmethod
def decode_datasource(param):
ds = None
if param is None:
return None
ds_match = re.search("(\d+)", param)
if ds_match:
try:
ds = int(ds_match.group())
except (ValueError, TypeError):
pass
return ds
def decode_post_request(self, data):
request = {}
command = data.get('command')
if not command:
raise errors.RequiredKey('command', 'command')
request['command'] = command
if command not in self.recognized_commands:
raise errors.MalformedRequest("Unrecognized command: '{0}'".format(command))
if command in ('ds_rm', 'ds_select', 'rm_conns', 'ds_name', 'ds_live', 'ds_interval',
'ds_flat', 'upload', 'add_live_key'):
ds = self.decode_datasource(data.get('ds'))
if not ds:
raise errors.RequiredKey('datasource', 'ds')
request['ds'] = ds
if command == 'ds_name':
request['name'] = data.get('name')
elif command == 'ds_live':
active = data.get('is_active')
if active is None:
request['is_active'] = None
else:
request['is_active'] = active == 'true'
elif command == 'ds_interval':
try:
request['interval'] = int(data.get('interval', 'e'))
except ValueError:
raise errors.MalformedRequest("Could not interpret auto-refresh interval from '{0}'"
.format(repr(data.get('interval'))))
elif command == 'ds_flat':
flat = data.get('is_flat')
if flat is None:
request['is_flat'] = None
else:
request['is_flat'] = flat == 'true'
elif command == 'ds_new':
request['name'] = data.get('name')
elif command == 'upload':
request['format'] = data.get('format')
request['file'] = data.get('file')
elif command == "del_live_key":
request['key'] = data.get('key')
if None in request.values():
raise errors.MalformedRequest("Could not parse arguments for command.")
return request
def perform_post_command(self, request):
"""
Action Command Variables
------ ------- ---------
rename DS "ds_name" (ds, name)
toggle ar "ds_live" (ds, is_active)
ar interv "ds_interval" (ds, interval)
flat view "ds_flat" (ds, is_flat)
new datas "ds_new" (name)
remove ds "ds_rm" (ds)
select ds "ds_select" (ds)
delete hn "rm_hosts" ()
delete tg "rm_tags" ()
delete ev "rm_envs" ()
delete cn "rm_conns" (ds)
upload lg "upload" (ds, format, file)
add_liveK "add_live_key" (ds)
del_liveK "del_live_key" (key)
see also: self.recognized_commands
"""
self.page.require_group('write')
command = request['command']
if command == 'ds_name':
self.dsModel.set(request['ds'], name=request['name'])
elif command == 'ds_live':
db_active = 1 if request['is_active'] else 0
self.dsModel.set(request['ds'], ar_active=db_active)
elif command == 'ds_interval':
self.dsModel.set(request['ds'], ar_interval=request['interval'])
elif command == 'ds_flat':
ds_flat = 1 if request['is_flat'] else 0
self.dsModel.set(request['ds'], flat=ds_flat)
elif command == 'ds_new':
self.dsModel.create_datasource(request['name'])
elif command == 'ds_rm':
self.dsModel.remove_datasource(request['ds'])
elif command == 'ds_select':
self.settingsModel['datasource'] = request['ds']
elif command == 'rm_hosts':
self.nodesModel.delete_custom_hostnames()
elif command == 'rm_tags':
self.nodesModel.delete_custom_tags()
elif command == 'rm_envs':
self.nodesModel.delete_custom_envs()
elif command == 'rm_conns':
self.linksModel = sam.models.links.Links(common.db, self.page.user.viewing, request['ds'])
self.linksModel.delete_connections()
elif command == 'upload':
b64start = request['file'].find(",")
if b64start == -1:
raise errors.MalformedRequest("Could not decode file")
log_file = base64.b64decode(request['file'][b64start + 1:])
self.uploadModel = sam.models.upload.Uploader(common.db, self.page.user.viewing,
request['ds'], request['format'])
self.uploadModel.import_log(log_file)
elif command == 'add_live_key':
self.livekeyModel.create(request['ds'])
elif command == 'del_live_key':
self.livekeyModel.delete(request['key'])
return "success"
def encode_post_response(self, response):
encoded = {'result': response,
'settings': self.settingsModel.copy(),
'datasources': self.dsModel.sorted_list(),
'livekeys': self.livekeyModel.read()}
return encoded
|
/samapper-0.3.2.tar.gz/samapper-0.3.2/sam/pages/settings.py
| 0.503906 | 0.156201 |
settings.py
|
pypi
|
import re
import base
import sam.models.nodes
from sam import errors
from sam import common
# This class is for getting the child nodes of all nodes in a node list, for the map
class Nodes(base.headless_post):
"""
The expected GET data includes:
'address': comma-seperated list of dotted-decimal IP addresses.
Each address is only as long as the subnet,
so 12.34.0.0/16 would be written as 12.34
A request for 1.2.3.0/24, 192.168.0.0/16, and 21.0.0.0/8
would be "1.2.3,192.168,21"
:return: A JSON-encoded dictionary where
the keys are the supplied addresses (or _ if no address) and
the values are a list of child nodes.
POST Expects a query string including:
node: ip address
like "189.179.4.0/24"
or "189.179" ( == 189.179.0.0/16)
or "189.2.3/8" ( == 189.0.0.0/8)
alias: (optional) new alias string for the node
tags: (optional) comma separated string of tags to associate with this node
env: (optional) string, this host's environment category
:return:
"""
def __init__(self):
base.HeadlessPost.__init__(self)
self.flatmode_tolerance = 256
self.nodesModel = sam.models.nodes.Nodes(common.db, self.page.user.viewing)
def check_flat_tolerance(self):
endpoints = self.nodesModel.get_all_endpoints()
count = len(endpoints)
return count <= self.flatmode_tolerance
def decode_get_request(self, data):
addresses = []
address_str = data.get('address')
if address_str:
addresses = address_str.split(',')
addresses = filter(lambda x: bool(x), addresses)
flat = data.get('flat', 'false').lower() == 'true'
if 'ds' in data:
ds_match = re.search('(\d+)', data['ds'])
if ds_match:
ds = int(ds_match.group())
else:
raise errors.MalformedRequest("Could not read data source ('ds')")
else:
raise errors.RequiredKey('data source', 'ds')
return {'addresses': addresses, 'flat': flat, 'ds': ds}
def perform_get_command(self, request):
self.page.require_group('read')
if request['flat']:
if self.check_flat_tolerance():
response = {'flat': self.nodesModel.get_flat_nodes(request['ds'])}
else:
response = {'error': 'Flat mode is not supported once a graph has exceeded {} hosts.'.format(self.flatmode_tolerance)}
elif len(request['addresses']) == 0:
response = {'_': self.nodesModel.get_root_nodes()}
else:
response = {address: self.nodesModel.get_children(address) for address in request['addresses']}
return response
def encode_get_response(self, response):
return response
def decode_post_request(self, data):
node = data.get('node')
if not node:
raise errors.RequiredKey('node', 'node')
alias = data.get('alias')
tags = data.get('tags')
env = data.get('env')
request = {'node': node}
if alias is not None:
request['alias'] = alias
if tags is not None:
request['tags'] = tags
if env is not None:
request['env'] = env
return request
def perform_post_command(self, request):
self.page.require_group('write')
node = request.pop('node')
for key, value in request.iteritems():
if key == 'alias':
self.nodesModel.set_alias(node, value)
elif key == 'tags':
tags = filter(lambda x: bool(x), value.split(','))
self.nodesModel.set_tags(node, tags)
elif key == 'env':
if value:
self.nodesModel.set_env(node, value)
else:
self.nodesModel.set_env(node, None)
else:
print("Error in nodeinfo, unrecognized assignment {0} = {1}".format(key, value))
return 0, "Success"
def encode_post_response(self, response):
return {'code': response[0], 'message': response[1]}
|
/samapper-0.3.2.tar.gz/samapper-0.3.2/sam/pages/nodes.py
| 0.555797 | 0.423696 |
nodes.py
|
pypi
|
import math
import re
from datetime import datetime
from sam import errors, common
from sam.pages import base
from sam.models.security import alerts
def time_to_seconds(tstring):
"""
Converts a period of time (expressed as a string) to seconds.
:param tstring: string time period. use # of (years/weeks/days/hours/minutes/seconds)
examples: "1 year", "3 min, 6 hr, 1 second 1 year 20 weeks", "500" (seconds assumed.)
:type tstring: str
:return: number of seconds
:rtype: int
"""
matches = Alerts.TIME_REGEX.findall(tstring)
timespan = 0
for quantity, period in matches:
if period == 'y':
factor = 31556926
elif period == 'w':
factor = 604800
elif period == 'd':
factor = 86400
elif period == 'h':
factor = 3600
elif period == 'm':
factor = 60
else:
factor = 1
timespan += int(quantity) * factor
return timespan
def iprange_to_string(ipstart, ipend):
ip = common.IPtoString(ipstart)
if ipend == ipstart:
return ip
else:
subnet = 32 - (ipend - ipstart).bit_length()
return "{}/{}".format(ip, subnet)
def fuzzy_time(seconds):
if seconds < 120:
return "{:.0f} seconds".format(seconds)
seconds /= 60.0
if seconds < 10:
return "{:.1f} minutes".format(seconds)
if seconds < 120:
return "{:.0f} minutes".format(seconds)
seconds /= 60.0
if seconds < 12:
return "{:.1f} hours".format(seconds)
if seconds < 48:
return "{:.0f} hours".format(seconds)
seconds /= 24.0
if seconds < 5:
return "{:.0f} days".format(seconds)
if seconds < 14:
return "{:.0f} days".format(seconds)
seconds /= 7.0
if seconds < 10:
return "{:.1f} weeks".format(seconds)
if seconds < 106:
return "{:.0f} weeks".format(seconds)
seconds /= 52.177457
if seconds < 10:
return "{:.1f} years".format(seconds)
return "{:.0f} years".format(seconds)
class Alerts(base.headless_post):
TIME_REGEX = re.compile(r'(\d+)\s*([ywdhms])', re.I)
# ------------------- GET ---------------------
def decode_get_request(self, data):
# request should (but doesn't need to) include: 'subnet', 'severity', 'time', 'sort', 'sort_dir', 'page_size', 'page_num'
subnet = data.get('subnet', None)
try:
severity = int(data.get('severity', 1))
except:
severity = 1
time = time_to_seconds(data.get('time', '1 week'))
sort = data.get('sort', 'id')
sort_dir = 'ASC' if data.get('sort_dir', 'DESC').upper() == 'ASC' else 'DESC'
try:
page_size = int(data['page_size'])
except:
page_size = 50
try:
page_num = int(data['page_num'])
except:
page_num = 1
request = {
'subnet': subnet,
'severity': severity,
'time': time,
'sort': sort,
'sort_dir': sort_dir,
'page_size': page_size,
'page_num': page_num
}
return request
def perform_get_command(self, request):
# request keys are: subnet, severity, time, sort, sort_dir, page_size, page_num
response = {}
m_alerts = alerts.Alerts(common.db, self.page.user.viewing)
page_offset = (request['page_num'] - 1) * request['page_size']
alert_filters = alerts.AlertFilter(min_severity=request['severity'], sort=request['sort'], order=request['sort_dir'], age_limit=request['time'], limit=request['page_size'], offset=page_offset)
if request['subnet'] is None:
response['alerts'] = m_alerts.get(alert_filters)
total_alerts = m_alerts.count(alert_filters)
else:
ipstart, ipend = common.determine_range_string(request['subnet'])
response['alerts'] = m_alerts.get_by_host(alert_filters, ipstart, ipend)
total_alerts = m_alerts.count(alert_filters, ipstart, ipend)
response['results'] = total_alerts
response['page'] = request['page_num']
response['pages'] = int(math.ceil(float(total_alerts) / request['page_size']))
return response
def encode_get_response(self, response):
encoded = {}
alert_list = []
for alert in response['alerts']:
alert_list.append({
'id': str(alert['id']),
'host': iprange_to_string(alert['ipstart'], alert['ipend']),
'log_time': datetime.fromtimestamp(alert['log_time']).strftime('%Y-%m-%d %H:%M:%S'),
'report_time': datetime.fromtimestamp(alert['report_time']).strftime('%Y-%m-%d %H:%M:%S'),
'severity': "sev{}".format(alert['severity']),
'label': alert['label'],
'rule_name': alert['rule_name']
})
encoded['alerts'] = alert_list
encoded['results'] = response['results']
encoded['page'] = response['page']
encoded['pages'] = response['pages']
return encoded
# ------------------- POST ---------------------
def decode_post_request(self, data):
method = data.get('method', None)
if method not in ('delete_all', 'delete'):
raise errors.MalformedRequest("method must be either 'delete' or 'delete_all'")
alert_id = None
if method == 'delete':
try:
alert_id = int(data.get('id'))
except:
raise errors.RequiredKey('alert id', 'id')
request = {
'method': method,
'id': alert_id
}
return request
def perform_post_command(self, request):
m_alerts = alerts.Alerts(common.db, self.page.user.viewing)
if request['method'] == 'delete_all':
m_alerts.clear()
elif request['method'] == 'delete':
m_alerts.delete(request['id'])
else:
raise errors.MalformedRequest('Method not understood.')
return "success"
def encode_post_response(self, response):
encoded = {'result': response}
return encoded
class AlertDetails(base.headless_post):
# ------------------- GET ---------------------
def decode_get_request(self, data):
try:
request = {'id': int(data.get('id'))}
except:
raise errors.RequiredKey('alert id', 'id')
return request
def perform_get_command(self, request):
response = {}
m_alerts = alerts.Alerts(common.db, self.page.user.viewing)
response['for'] = request['id']
response['details'] = m_alerts.get_details(request['id'])
return response
def encode_get_response(self, response):
details = response['details']
if details is None:
encoded = {
'for': response['for'],
'time': None,
'host': None,
'severity': None,
'label': None,
'rule_name': None,
'details': None,
'description': None,
}
return encoded
raw_metadata = details['details']
metadata = {}
if isinstance(raw_metadata, (str, unicode)):
metadata['data'] = raw_metadata
elif isinstance(raw_metadata, dict):
metadata.update(raw_metadata)
elif isinstance(raw_metadata, list):
for i, value in enumerate(raw_metadata):
metadata['Value {}'.format(i + 1)] = value
else:
metadata['data'] = str(raw_metadata)
# prettify some values
if 'timestamp' in metadata:
metadata['timestamp'] = metadata['timestamp'].strftime('%Y-%m-%d %H:%M:%S')
if 'src' in metadata:
metadata['src'] = common.IPtoString(metadata['src'])
if 'dst' in metadata:
metadata['dst'] = common.IPtoString(metadata['dst'])
if 'duration' in metadata:
metadata['duration'] = fuzzy_time(metadata['duration'])
host = iprange_to_string(details['ipstart'], details['ipend'])
encoded = {
'for': response['for'],
'time': datetime.fromtimestamp(details['report_time']).strftime('%Y-%m-%d %H:%M:%S'),
'host': host,
'severity': details['severity'],
'label': details['label'],
'rule_name': details['rule_name'],
'details': metadata,
'description': 'Rule "{}" triggered on {}'.format(details['rule_name'], host)
}
return encoded
# ------------------- POST ---------------------
def decode_post_request(self, data):
# Queries include updating alert label (and adding notes?)
method = data.get('method', None)
if method != 'update_label':
raise errors.RequiredKey("method (must be 'update_label')", "method")
request = {
'method': method
}
if method == "update_label":
try:
request['id'] = int(data.get('id'))
except:
raise errors.RequiredKey('alert id', 'id')
try:
request['label'] = data.get('label', '')
assert len(request['label']) > 0
except:
raise errors.RequiredKey('label', 'label')
return request
def perform_post_command(self, request):
if request['method'] == 'update_label':
a_model = alerts.Alerts(common.db, self.page.user.viewing)
a_model.set_label(request['id'], request['label'])
return "success"
def encode_post_response(self, response):
encoded = {'result': response}
return encoded
|
/samapper-0.3.2.tar.gz/samapper-0.3.2/sam/pages/alerts.py
| 0.608594 | 0.368491 |
alerts.py
|
pypi
|
from importlib import import_module
import inspect
# Republishing for easy serialization
from pickle import load, loads, dump, dumps # noqa
def import_string(dotted_path):
"""
Import a dotted module path or a element from it if a `:` separator
is provided
:arg dotted_path: path to import (e.g. 'my_module.my_package:MyClass')
"""
try:
module_path, class_name = dotted_path.rsplit(':', 1)
except ValueError:
return import_module(dotted_path)
module = import_module(module_path)
try:
return getattr(module, class_name)
except AttributeError:
msg = 'Module "%s" does not define a "%s" attribute/class' % (
dotted_path, class_name)
raise ImportError(msg)
def signature_factory(target):
"""
Create a :class Signature: of the given object
"""
if inspect.ismodule(target):
return ModuleSignature(target)
elif inspect.isclass(target):
return ClassSignature(target)
elif inspect.isfunction(target):
return FunctionSignature(target)
elif inspect.isgenerator(target) or inspect.isgeneratorfunction(target):
return GeneratorSignature(target)
else:
return AttributeSignature(target)
class ValidationError(Exception):
pass
class Signature:
"""
Representation of a public API
"""
def __init__(self, target=None):
if target:
self.build_signature(target)
def __str__(self):
raise NotImplementedError
def build_signature(self, target):
self._name = target.__name__
def validate(self, signature):
if self.__class__ != signature.__class__:
return ('type mismatch (orginal: %s, actual: %s)' %
(self, signature))
def __eq__(self, other):
return not self.validate(other)
class LeafSignature(Signature):
pass
class NodeSignature(Signature):
def __init__(self, *args, **kwargs):
self._signature = {}
super().__init__(*args, **kwargs)
def build_signature(self, target):
super().build_signature(target)
public_attrs = (m for m in dir(target) if not m.startswith('_'))
for attr in public_attrs:
self._signature[attr] = signature_factory(getattr(target, attr))
def validate(self, original):
errors = super().validate(original)
if errors:
return errors
errors = {}
original_keys = original._signature.keys()
keys = self._signature.keys()
errors.update({str(original._signature[m]): 'missing element'
for m in original_keys - keys})
errors.update({str(self._signature[u]): 'unknown element'
for u in keys - original_keys})
for key in original_keys & keys:
err = self._signature[key].validate(original._signature[key])
if err:
errors[str(self._signature[key])] = err
if errors:
return errors
class ModuleSignature(NodeSignature):
def __str__(self):
return 'Module %s' % self._name
class ClassSignature(NodeSignature):
def __str__(self):
return 'Class %s' % self._name
class FunctionSignature(LeafSignature):
def __init__(self, *args, **kwargs):
self._built_in_function = False
self._signature = None
super().__init__(*args, **kwargs)
def build_signature(self, target):
super().build_signature(target)
self._built_in_function = False
self._signature = None
try:
argspec = inspect.getfullargspec(target)
self._signature = {
"args": argspec.args,
"varargs": argspec.varargs,
"varkw": argspec.varkw,
"kwonlyargs": argspec.kwonlyargs
}
if argspec.defaults:
if isinstance(argspec.defaults, list):
self._signature["defaults"] = (signature_factory(d)
for d in argspec.defaults)
else:
self._signature["defaults"] = signature_factory(
argspec.defaults)
if argspec.kwonlydefaults:
self._signature["kwonlydefaults"] = {
k: signature_factory(v)
for k, v in argspec.kwonlydefaults.items()}
if argspec.annotations:
self._signature["annotations"] = {
k: signature_factory(v)
for k, v in argspec.annotations.items()}
# Serialize params
except TypeError:
# Cannot use metaprogramming on C functions
self._built_in_function = True
def __str__(self):
if self._built_in_function:
return 'Function %s <built-in function>' % self._name
else:
return 'Function %s (%s)' % (self._name, self._signature)
def validate(self, original):
errors = super().validate(original)
if errors:
return errors
if (self._built_in_function != original._built_in_function or
self._signature != original._signature):
return ("Function signature has changed, original: %s, actual %s" %
(self, original))
class AttributeSignature(LeafSignature):
def __init__(self, *args, **kwargs):
self._type = None
super().__init__(*args, **kwargs)
def build_signature(self, target):
self._type = type(target).__name__
def validate(self, original):
errors = super().validate(original)
if errors:
return errors
if self._type != original._type:
return ("Attribute type has changed: original %s, actual %s" %
(self._type, original._type))
def __str__(self):
return 'Attribute'
class GeneratorSignature(LeafSignature):
def __str__(self):
return 'Generator'
def build_signature(target_path):
"""
Generate a :class Signature: representing the element at target_path
:arg target_path: dotted path to the element, can contain a final `:`
to point on a package attribute
"""
target = import_string(target_path)
return signature_factory(target)
def check_signature(target_path, signature):
"""
Try to validate the given target object against the :class Signature:
or raise a :class ValidationError: exception
:arg target_path: dotted path to the element, can contain a final `:`
to point on a package attribute
"""
current = build_signature(target_path)
errors = current.validate(signature)
if errors:
raise ValidationError(errors)
|
/samarche-0.0.1.tar.gz/samarche-0.0.1/samarche.py
| 0.708918 | 0.185892 |
samarche.py
|
pypi
|
import os
import pandas as pd
import numpy as np
from scipy.spatial.distance import cdist
from sewar.full_ref import mse, sam
def load_img(folder_path, tag=None):
"""
Reads all the images saved in a certain folder path and in the tag file
:param folder_path: Path of the folder where the images from micro2matrix are saved (str)
:param Default is None, but if we want to work only on images which we have theri tag
a tag dataframe or series should be passed too (pandas)
:return: (1) final array with all the loaded images from the folder (list)
(2) names list with the loaded images names (list)
"""
arrays = []
names = []
for file in os.listdir(folder_path):
if file.endswith(".npy"):
if file == "bact_names.npy":
continue
file_path = os.path.join(folder_path, file)
if tag is None:
arrays.append(np.load(file_path, allow_pickle=True, mmap_mode='r'))
else:
if file_path.split("\\")[-1].replace(".npy", "") in tag.index:
arrays.append(np.load(file_path, allow_pickle=True, mmap_mode='r'))
names.append(file_path.split("\\")[-1].replace(".npy", ""))
final_array = np.stack(arrays, axis=0)
return final_array, names
def fft_process(x, cutoff):
"""
Apply FFT on each images with the cutoff given.
:param x: A single image (ndarray)
:param cutoff: Cutoff frequency as a fraction of the maximum possible frequency (float)
:return: A filtered image (ndarray)
"""
fft = np.fft.fft2(x)
# Shift the zero-frequency component to the center of the array
fft_shifted = np.fft.fftshift(fft)
# Define the cutoff frequency (as a fraction of the maximum possible frequency)
cutoff_freq = cutoff
# Create a mask to keep only the low-frequency components
rows, cols = fft_shifted.shape
crow, ccol = int(rows / 2), int(cols / 2)
mask = np.zeros((rows, cols), dtype=bool)
mask[crow - int(cutoff_freq * crow):crow + int(cutoff_freq * crow),
ccol - int(cutoff_freq * ccol):ccol + int(cutoff_freq * ccol)] = True
# Apply the mask to the FFT
fft_cutoff = np.copy(fft_shifted)
fft_cutoff[~mask] = 0
# Inverse FFT to get the filtered image
img_filtered_x = np.fft.ifft2(np.fft.ifftshift(fft_cutoff))
return img_filtered_x
def final_distance(output1, output2, f):
"""
Calculate the distance between 2 filtered images.
:param output1: Filtered image 1 (ndarray)
:param output2: Filtered image 2 (ndarray)
:param f: Metric to calculate the distance according to
One of "d1","d2","d3","sam","mse"
:return:
"""
if f == "d1":
# Euclidean distance
return np.linalg.norm(output1 - output2)
elif f == "d2":
# Absolute difference
return np.sum(np.abs(np.abs(output1) - np.abs(output2)))
elif f == "d3":
# Difference of angles
return np.sum(np.abs(np.angle(output1) - np.angle(output2)))
elif f == "sam":
return sam(output1, output2)
elif f == "mse":
return mse(output1, output2)
def build_SAMBA_distance_matrix(folder_path, metric="sam", cutoff=0.8, tag=None):
"""
Build SAMBA distance matrix of the FFT processed images using the metric as the
final distance metric between the processed images, and the cutoff as the FFT cutoff
:param folder_path: Path of the folder where the images from micro2matrix are saved (str)
:param metric: Metric to calculate the distance according to.
One of "d1","d2","d3","sam","mse"
:param cutoff: Cutoff frequency as a fraction of the maximum possible frequency (float)
:param tag: Default is None, but if we want to work only on images which we have theri tag
a tag dataframe or series should be passed too (pandas)
:return: Distance matrix dataframe (pandas)
"""
# Load images from the folder
imgs, names = load_img(folder_path, tag)
# Image shape
x_axis = imgs.shape[-1]
y_axis = imgs.shape[-2]
# Function for images adjusting (FFT) and calculating the pairwise distance
def fft_dist(x, y):
x = x.reshape(x_axis, y_axis)
y = y.reshape(x_axis, y_axis)
x_after = fft_process(x, cutoff)
y_after = fft_process(y, cutoff)
return final_distance(x_after, y_after, metric)
# Build the SAMBA distance matrix
dm = cdist(imgs.reshape(imgs.shape[0], -1), imgs.reshape(imgs.shape[0], -1), metric=fft_dist)
if tag is None:
dm = pd.DataFrame(dm, index=names, columns=names)
else:
dm = pd.DataFrame(dm, index=tag.index, columns=tag.index)
return dm
|
/samba_metric-0.0.8.tar.gz/samba_metric-0.0.8/src/samba/SAMBA_metric.py
| 0.731155 | 0.66238 |
SAMBA_metric.py
|
pypi
|
from collections import defaultdict
from pathlib import Path
from typing import List, Tuple, Union, Optional, Sequence
import array
import bz2
import csv
import functools
import itertools
import logging
import math
import pickle
import re
# Import local modules
from .newick import Node
# Define the path to the 'etc' directory, with supplementary material
ETC_PATH = Path(__file__).parent / "etc"
class DistanceMatrix:
"""
This class provides a memory-efficient implementation of a symmetric distance matrix.
Rather than storing all elements, it only keeps half of the elements (excluding the diagonal),
leveraging the symmetry of the matrix. The diagonal elements are assumed to be zeros.
The matrix elements are flattened into a one-dimensional array.
"""
data: array.array
def __init__(
self,
keys: Optional[List[str]] = None,
filename: Optional[Union[str, Path]] = None,
datatype: str = "f",
):
"""
Initializes the matrix. The matrix can be populated in one of two ways:
1. By providing a list of keys. An empty matrix is initialized with these keys.
2. By providing a filename to load the matrix from.
Exactly one of 'keys' or 'filename' must be provided.
@param keys: A list of unique keys that represent the elements of the matrix.
@param filename: The name (or Path object) of the file from which to load the matrix.
@param datatype: The datatype of the elements in the array. Must be a valid type code.
"""
# Ensuring that either keys or filename is provided
if (keys is None) == (filename is None):
raise ValueError(
"Either 'keys' or 'filename' must be provided, but not both."
)
# If filename is provided, read the matrix from file
if filename:
self.keys, self.data = self.read(filename)
else: # Else, create an empty matrix with given keys
self.keys = sorted(keys)
# Initializing the half-matrix with zeroes
self.data = array.array(
datatype, [0] * (len(self.keys) * (len(self.keys) - 1) // 2)
)
# Generate indices for each key for quick lookup
self.indices = {key: i for i, key in enumerate(self.keys)}
def read(self, filename: Union[str, Path]) -> Tuple[List[str], array.array]:
"""
Reads a matrix from a compressed file.
The method opens the compressed file, deserializes it, and returns the keys and data.
@param filename: The filename or Path object to read the matrix from.
@return: A tuple containing the keys and data of the matrix.
"""
# Ensuring filename is a Path object
filename = Path(filename) if isinstance(filename, str) else filename
with bz2.open(filename, "rb") as file:
keys, data = pickle.load(file)
return keys, data
def save(self, filename: Union[str, Path]):
"""
Writes the matrix to a compressed file.
This method serializes the keys and data and writes them to a compressed file.
@param filename: The filename or Path object to write the matrix to.
"""
# Ensuring filename is a Path object
filename = Path(filename) if isinstance(filename, str) else filename
with bz2.open(filename, "wb") as file:
pickle.dump((self.keys, self.data), file)
@functools.cache # Caching results for repeated calls with the same arguments
def _get_index(self, i: str, j: str) -> int:
"""
Computes the index in the flattened array for the given pair of keys (i, j).
This method calculates the index of each element in the one-dimensional array based on the keys.
The computation is performed based on the lower-left triangle of the matrix,
excluding the diagonal.
@param i: The first key.
@param j: The second key.
@return: The index in the array corresponding to the pair of keys.
"""
if self.indices[i] > self.indices[j]:
i, j = j, i # Ensuring i <= j for the calculation
return self.indices[j] * (self.indices[j] - 1) // 2 + self.indices[i]
def __setitem__(self, key: Sequence[str], value: float):
"""
Sets the value for a specific pair of keys (i, j) in the distance matrix.
Note that the method does not check if the keys are valid (in particular, if
only two keys are provided and if they are in the matrix).
@param key: A sequence containing the two keys.
@param value: The value to be set for the pair of keys.
"""
self.data[self._get_index(key[0], key[1])] = value
def __getitem__(self, item: Sequence[str]) -> float:
"""
Returns the value for a specific pair of keys (i, j) in the distance matrix.
Note that the diagonal values are assumed to be 0. Also note that the
method does not check if the keys are valid (in particular, if
only two keys are provided and if they are in the matrix).
@param item: A sequence containing the two keys.
@return: The value for the pair of keys.
"""
if item[0] == item[1]:
return 0 # Diagonal values are assumed to be 0
return self.data[self._get_index(item[0], item[1])]
def rescale(
self, scale_range: Tuple[float, float] = (0.0, 1.0), factor: float = 1.0
):
"""
Rescales the distance matrix to a given range and by the given factor.
@param scale_range: The range to which the values should be rescaled.
@param factor: The factor by which the values should be multiplied.
"""
# Calculating the minimum and maximum values
min_value = min(self.data)
max_value = max(self.data)
# Build a new temporary array of the same size of self.data, but
# always of floating point type, to hold the new values
temp_array = array.array("f", [0] * len(self.data))
# Rescaling the values
for i, value in enumerate(self.data):
temp_array[i] = (value - min_value) / (
max_value - min_value
) * factor + scale_range[0]
# Use the new array as the data
self.data = temp_array
def tree2matrix(tree: Node) -> DistanceMatrix:
"""
Converts a Newick tree into a symmetric distance matrix.
@param tree: The input Newick tree to be converted.
@return: The resulting distance matrix.
"""
def most_recent_common_ancestor(
anc_list1: List[Node], anc_list2: List[Node]
) -> Node:
"""
Finds the most recent common ancestor of two lists of ancestors.
@param anc_list1: The first list of ancestors.
@param anc_list2: The second list of ancestors.
@return: The label of the most recent common ancestor.
@raise ValueError: Raised when no common ancestor is found.
"""
for anc in anc_list1:
if anc in anc_list2:
return anc
raise ValueError("No common ancestor found")
@functools.cache
def compute_distance(leaf1: str, leaf2: str) -> float:
"""
Computes the distance between two leaves in the tree.
The distance is computed as the sum of lengths from each leaf to their most recent common ancestor (MRCA).
@param leaf1: The first leaf.
@param leaf2: The second leaf.
@return: The computed distance.
"""
# Get the most recent common ancestor of the two leaves
mrca = most_recent_common_ancestor(ancestors[leaf1], ancestors[leaf2])
# Compute the lengths between leaves and the MRCA
leaf1_length = sum(
[n.length for n in ancestors[leaf1][: ancestors[leaf1].index(mrca)]]
)
leaf2_length = sum(
[n.length for n in ancestors[leaf2][: ancestors[leaf2].index(mrca)]]
)
return leaf1_length + leaf2_length
# Extract all leaves from the tree
leaves = tree.get_leaves()
# Initialize the distance matrix
matrix = DistanceMatrix([leaf.name for leaf in leaves])
# Build a dictionary mapping leaves to their ancestors; note that this currently
# requires a complete traversal of the tree for each leaf, which is not efficient
# (this could be improved by storing the ancestors in the tree nodes, but involves
# changes to the `Node` class that are not urgent)
ancestors = {leaf: leaf.ancestors for leaf in leaves}
# Compute pairwise distances for each combination of leaves
num_comb = math.comb(len(leaves), 2)
for idx, (leaf1, leaf2) in enumerate(itertools.combinations(leaves, 2)):
if idx % 1000 == 0:
logging.info(
f"Processed {idx} pairs of leaves (at `{leaf1.name},{leaf2.name}`) [{(idx/num_comb)*100:.2f}%]..."
)
matrix[leaf1.name, leaf2.name] = compute_distance(leaf1, leaf2)
return matrix
def dst2matrix(filename: Union[Path, str]) -> DistanceMatrix:
"""
Read a distance matrix in the SplitsTree format from a file.
The distance matrix is returned as a dictionary of dictionaries,
with each value as a dictionary to all other taxa. The function takes care
Parameters
----------
filename
The file to read.
Returns
-------
matrix
A dictionary of dictionaries, where the first key is the taxon and the
second key is the taxon to which the distance is computed.
"""
# Read raw data
header = True
taxa = []
matrix = {}
with open(Path(filename), encoding="utf-8") as handler:
for line in handler.readlines():
if header:
header = False
else:
line = re.sub(r"\s+", " ", line)
tokens = line.split()
taxon = tokens[0]
taxa.append(taxon)
dists = [float(dist) for dist in tokens[1:]]
matrix[taxon] = dists
# Make an actual dictionary matrix
mtx = DistanceMatrix(taxa)
for taxon_a, dists in matrix.items():
for dist, taxon_b in zip(dists, taxa):
mtx[taxon_a, taxon_b] = dist
return mtx
def build_table_from_file(
filename: Union[str, Path],
key: str,
value: str,
encoding: str = "utf-8",
multiple: str = "average",
) -> dict:
"""
Build a dictionary from a tabular file.
@param filename: The name of the file to read.
@param key: The name of the column to use as key.
@param value: The name of the column to use as value.
@param encoding: The encoding of the file.
@param multiple: How to handle multiple values for the same key.
@return: A dictionary with the values from the file.
"""
# Make sure filename is a Path object
filename = Path(filename)
# Check that multiple is a valid option
if multiple not in ["average", "max", "min"]:
raise ValueError(f"Invalid value for 'multiple': {multiple}")
# Open the file
with filename.open(newline="", encoding=encoding) as csvfile:
# Let the csv library sniff the dialect of the file
dialect = csv.Sniffer().sniff(csvfile.read(1024))
csvfile.seek(0)
# Create a csv reader object
reader = csv.DictReader(csvfile, dialect=dialect)
# Create a defaultdict of lists to store all values associated with each key
result_dict = defaultdict(list)
for row in reader:
# Append each value to the list of values for the appropriate key
result_dict[row[key]].append(float(row[value]))
# Calculate the final value to be associated with each key, based on the value of 'multiple'
for key, values in result_dict.items():
if multiple == "average":
result_dict[key] = sum(values) / len(values)
elif multiple == "max":
result_dict[key] = max(values)
elif multiple == "min":
result_dict[key] = min(values)
return dict(result_dict)
|
/samba_sampler-0.3.tar.gz/samba_sampler-0.3/src/samba_sampler/common.py
| 0.941506 | 0.640854 |
common.py
|
pypi
|
# Import Python standard libraries
import argparse
import sys
# Import our library to leverage functions and classes
import samba_sampler as samba
# Define a dictionary for models and their parameters
models = {
"tiago1": {
"algorithm": "standard",
"freq_weight": 1.0,
"matrices": "gled.matrix.bz2,haversine.matrix.bz2",
"matrix_weights": "1.0,0.75",
"tables": None,
"table_weights": None,
},
"tiago2": {
"algorithm": "progressive",
"freq_weight": 0.5,
"matrices": "gled.matrix.bz2,haversine.matrix.bz2",
"matrix_weights": "1.0,0.5",
"tables": None,
"table_weights": None,
},
}
def main():
parser = argparse.ArgumentParser(
description="Interface with the samba_sampler library"
)
parser.add_argument(
"--model", default=None, help="Model to use. Overrides other parameters."
)
parser.add_argument(
"--algorithm",
default="standard",
help='Algorithm to use: "standard" or "progressive"',
)
parser.add_argument("k", type=int, help="Size of samples")
parser.add_argument("n", type=int, help="Number of samples")
parser.add_argument(
"--freq_weight", type=float, default=1.0, help="A frequency weight factor"
)
parser.add_argument(
"--matrices",
default=None,
help="List of one or more filenames separated by commas",
)
parser.add_argument(
"--matrix_weights",
default=None,
help="List of matrix weights, given as floating points separated by commas",
)
parser.add_argument(
"--tables",
default=None,
help="List of one or more filenames separated by commas",
)
parser.add_argument(
"--table_weights",
default=None,
help="List of table weights, given as floating points separated by commas",
)
parser.add_argument("-s", "--seed", type=int, help="Random seed")
args = parser.parse_args()
# If a model is specified, update the default parameters
if args.model:
if args.model in models:
model_params = models[args.model]
for key, value in model_params.items():
if getattr(args, key) == parser.get_default(key):
setattr(args, key, value)
else:
parser.error(
f"Invalid model. Available models are: {', '.join(models.keys())}"
)
# Convert string arguments into corresponding Python types
matrices = args.matrices.split(",") if args.matrices else None
tables = args.tables.split(",") if args.tables else None
matrix_weights = (
list(map(float, args.matrix_weights.split(",")))
if args.matrix_weights
else None
)
table_weights = (
list(map(float, args.table_weights.split(","))) if args.table_weights else None
)
# If matrices or tables were provided, make sure the corresponding weights have the same length
if matrices and matrix_weights and len(matrices) != len(matrix_weights):
parser.error("The number of matrices and matrix weights must be the same")
if tables and table_weights and len(tables) != len(table_weights):
parser.error("The number of tables and table weights must be the same")
# Create an instance of the sampler
sampler = samba.GenericSampler(
matrix_files=matrices,
table_files=tables,
matrix_weights=matrix_weights,
table_weights=table_weights,
)
# Print tuples yielded by method sample
for taxa in sampler.sample(
args.k,
args.n,
algorithm=args.algorithm,
freq_weight=args.freq_weight,
seed=args.seed,
):
sys.stdout.write(str(",".join(sorted(taxa))) + "\n")
if __name__ == "__main__":
main()
|
/samba_sampler-0.3.tar.gz/samba_sampler-0.3/src/samba_sampler/__main__.py
| 0.550849 | 0.553083 |
__main__.py
|
pypi
|
# Samba
An extremly tiny PaaS (platform as a s service) to deploy multiple apps on a single servers with git, similar to Heroku or Dokku.
It is simple and compatible with current infrastucture.
It supports Python (Flask/Django), Nodejs, PHP and Static HTML.
### Features
- Easy command line setup
- Instant deploy with Git
- Multi applications deployment
- App management: deploy, stop, delete, scale, logs apps
- Simple and straight forward
- SSL/HTTPS with LetsEncrypt
- Any languages: Python, Nodejs, PHP, HTML/Static
- Supports any Shell script, therefore any other languages are supported
- Metrics to see app's health
- Create static sites
- Support Flask, Django, Express, etc...
- Easy configuration with app.json
- Nginx
- Logs
### Requirements
- Fresh server
- SSH to server with root access
- Ubuntu 18.04
### Languages Supported
- [x] Python
- [x] Nodejs
- [x] Static HTML
- [x] PHP
- [x] Any shell script
---
## Setup
### 1. Install On Server/Remote machine
To start off, install Samba on the server/remote machine.
The Samba install.sh script creates a **samba** user on the system and installs all the necessary packages.
Download the install script from Samba github, then run the script:
```
curl https://raw.githubusercontent.com/mardix/samba/master/install.sh > install.sh
chmod 755 install.sh
./install.sh
```
### 2. Prepare application on local environement
### Git Remote
1.Make sure you have GIT on your machine, initialize the application repo
```
git init
git add .
git commit
```
2.Add a remote named **samba** with the username **samba** and substitute example.com with the public IP address of your Linode
format: `git remote add samba samba@[HOST]:[APP_NAME]`
Example
```
git remote add samba [email protected]:flask-example
```
### 3. Edit app.json
At a minimum, the `samba.json` should look like this.
If the root directory contains `requirements.txt` it will use Python, `package.json` will use Node, else it will use it as STATIC site to serve HTML & PHP.
```js
// samba.json
{
"domain_name": "mysite.com",
"runtime": "python",
"run": {
"web": "app:app"
}
}
```
### 4. Deploy application
Once you are ready to deploy, push your code to master
`git push samba master`
---
## Commands
Samba communicates with your server via SSH, with the user name: `samba`
ie: `ssh [email protected]`
### General
#### List all commands
List all commands
```
ssh [email protected]
```
#### apps
List all apps
```
ssh [email protected] apps
```
#### deploy
Deploy app. `$app_name` is the app name
```
ssh [email protected] deploy $app_name
```
#### reload
Reload an app
```
ssh [email protected] reload $app_name
```
#### stop
Stop an app
```
ssh [email protected] stop $app_name
```
#### destroy
Delete an app
```
ssh [email protected] destroy $app_name
```
#### reload-all
Reload all apps on the server
```
ssh [email protected] reload-all
```
#### stop-all
Stop all apps on the server
```
ssh [email protected] stop-all
```
### Scaling
To scale the application
### ps
Show the process count
```
ssh [email protected] ps $app_name
```
### scale
Scale processes
```
ssh [email protected] scale $app_name $proc=$count $proc2=$count2
```
ie: `ssh [email protected] scale site.com web=4`
### Environment
To edit application's environment variables
#### env
Show ENV configuration for app
```
ssh [email protected] env $app_name
```
#### set
Set ENV config
```
ssh [email protected] del $app_name $KEY=$VAL $KEY2=$VAL2
```
#### del
Delete a key from the environment var
```
ssh [email protected] del $app_name $KEY
```
### Log
To view application's log
```
ssh [email protected] log $app_name
```
### Update
To update Samba to the latest from Github
```
ssh [email protected] update
```
### Version
To get Samba's version
```
ssh [email protected] version
```
---
## app.json
`app.json` is a manifest format for describing web apps. It declares environment variables, scripts, and other information required to run an app on your server. This document describes the schema in detail.
*(scroll down for a full app.json without the comments)*
```js
// app.json
{
"name": "", // name
"version": "", // version
"description": "", // description
// samba: SAMI specific configuration
"samba": {
// domain_name (string): the server name without http
"domain_name": "",
// runtime: python|node|static|shell
// python for wsgi application (default python)
// node: for node application, where the command should be ie: 'node inde.js 2>&1 | cat'
// static: for HTML/Static page and PHP
// shell: for any script that can be executed via the shell script, ie: command 2>&1 | cat
"runtime": "python",
// runtime_version: python : 3(default)|2, node: node version
"runtime_version": "3",
// auto_restart (bool): to force server restarts when deploying
"auto_restart": false,
// static_paths (array): specify list of static path to expose, [/url:path, ...]
"static_paths": ["/url:path", "/url2:path2"],
// https_only (bool): when true (default), it will redirect http to https
"https_only": true,
// threads (int): The total threads to use
"threads": "4",
// wsgi (bool): if runtime is python by default it will use wsgi, if false it will fallback to the command provided
"wsgi": true,
// letsencrypt (bool) true(default)
"ssl_letsencrypt": true,
// nginx (object): nginx specific config. can be omitted
"nginx": {
"cloudflare_acl": false,
"include_file": ""
},
// uwsgi (object): uwsgi specific config. can be omitted
"uwsgi": {
"gevent": false,
"asyncio": false
},
// env, custom environment variable
"env": {
},
// scripts to run during application lifecycle
"scripts": {
// release (array): commands to execute each time the application is released/pushed
"release": [],
// destroy (array): commands to execute when the application is being deleted
"destroy": [],
// predeploy (array): commands to execute before spinning the app
"predeploy": [],
// postdeploy (array): commands to execute after spinning the app
"postdeploy": []
},
// run: processes to run.
// 'web' is special, it’s the only process type that can receive external HTTP traffic
// all other process name will be regular worker. The name doesn't matter
"run": {
// web (string): it’s the only process type that can receive external HTTP traffic
// -> app:app (for python using wsgi)
// -> node server.js 2>&1 cat (For other web app which requires a server command)
// -> /web-root-dir-name (for static html+php)
"web": "",
// worker* (string): command to run, with a name. The name doesn't matter.
// it can be named anything
"worker": ""
}
}
}
```
### [app.json] without the comments:
Copy and edit the config below in your `app.json` file.
```json
{
"name": "",
"version": "",
"description": "",
"samba": {
"domain_name": "",
"runtime": "static",
"runtime_version": "3",
"auto_restart": true,
"static_paths": [],
"https_only": true,
"threads": 4,
"wsgi": true,
"ssl_letsencrypt": true,
"nginx": {
"cloudflare_acl": false,
"include_file": ""
},
"uwsgi": {
"gevent": false,
"asyncio": false
},
"env": {
},
"scripts": {
"release": [],
"destroy": [],
"predeploy": [],
"postdeploy": []
},
"run": {
"web": "/",
"worker": ""
}
}
}
```
---
## Multiple Apps Deployment
**Samba** allows multiple sites deployment on a single repo.
If you have a mono repo and want to deploy multiple applications based on the domain name, you can do so by having *app.json:samba* as an array instead of an object. The `app_name` must match the `domain_name` from the *app.json:samba[array]*
### Examples
#### Config
Add multiple domains
```json
[
{
"domain_name": "mysite.com",
...
},
{
"domain_name": "myothersite.com",
...
},
...
]
```
#### Setup GIT
```
git remote add samba-mysite [email protected]:mysite.com
```
```
git remote add samba-myothersite [email protected]:myothersite.com
```
#### Deploy app
`git push samba-mysite master` will deploy *mysite.com*
`git push samba-myothersite master` will deploy *myothersite.com*
---
## Upgrade Samba
If you're already using Samba, you can upgrade Samba with:
```
ssh [email protected] update
```
---
## Credit
Samba is a fork of **Piku** https://github.com/piku/piku. Great work and Thank you
---
## Alternatives
- [Dokku](https://github.com/dokku/dokku)
- [Piku](https://github.com/piku/piku)
- [Caprover](https://github.com/CapRover/CapRover)
---
## CHANGELOG
- 0.2.0
- Multiple domain name deployment.
Sites in Mono repo can now rely on different config based on the app name
by having app.gooku as a list of dict, it will test for 'domain_name' to match the app_name
```
gooku : [
{"domain_name": "abc.com", ...},
{"domain_name": "xyz.com", ...},
]
```
- 0.1.0
- Initial
- app.json contains the application configuration
- 'app.run.web' is set for static/web/wsgi command. Static accepts one path
- added 'cli.upgrade' to upgrade to the latest version
- 'app.json' can now have scripts to run
- 'uwsgi' and 'nginx' are hidden, 'app.env' can contain basic key
- 'app.static_paths' is an array
- Fixed python virtualenv setup, if the repo was used for a different runtime
- Simplifying "web" worker. No need for static or wsgi.
- Python default to wsgi worker, to force to a standalone set env.wsgi: false
- reformat uwsgi file name '{app-name}___{kind}.{index}.ini' (3 underscores)
- static sites have their own directives
- combined static html & php
- Support languages: Python(2, 3), Node, Static HTML, PHP
- simplify command name
- added metrics
- Letsencrypt
- ssl default
- https default
---
## TODO
- [x] (0.2.0) Allow multiple site deployment. For multi sites, instead of app.samba being an object, gooku will be an array with all the site. 'domain_name' should match the app in git.
ie: [email protected]:api.com, [email protected]:dev.api.com; with api.com and dev.api.com being two domain_name in the config
---
License: MIT - Copyright 2020-Forever Mardix
|
/samba-0.0.0.tar.gz/samba-0.0.0/README.md
| 0.572484 | 0.808275 |
README.md
|
pypi
|
from cobra.flux_analysis import flux_variability_analysis
import logging
import time
log = logging.getLogger(__name__)
def run_fva(model, rxnsOfInterest, proc, fraction_opt):
log.info("Starting FVA...")
start_time = time.time()
s = flux_variability_analysis(model, reaction_list=rxnsOfInterest, fraction_of_optimum=fraction_opt,
processes=proc)
elapsed_time = time.time() - start_time
log.info("Total elapsed FVA time: {:.2f} sec".format(elapsed_time))
return s
def calculate_score(fva_wt_mut_merged):
rxns_of_interest = fva_wt_mut_merged.index
# Format the flux ranges to be more usable:
WTint = {}
mutantint = {}
for rn in rxns_of_interest:
WTint[rn] = [round(fva_wt_mut_merged.loc[rn]["minWT"], 3), round(fva_wt_mut_merged.loc[rn]["maxWT"], 3)]
mutantint[rn] = [round(fva_wt_mut_merged.loc[rn]["minKO"], 3), round(fva_wt_mut_merged.loc[rn]["maxKO"], 3)]
# Calculate a score for each pair of flux ranges (WT and mutant):
score = [] # Score to be compared to the score threshold
flux_change = {} # Flux change direction
for r in rxns_of_interest: # For the one KO or for each reaction in the group of KOs
lb = [WTint[r][0], mutantint[r][0]] # Store the two lower bounds (WT and mutant)
ub = [WTint[r][1], mutantint[r][1]] # Store the two upper bounds (WT and mutant)
if lb == [0, 0]:
change_lower_bound = 0
else:
# Calculate the difference between the lower bounds divided by the biggest absolute lower
# bound Ex: WT = [-20, 50] mutant = [1, 30] change_lower_bound = abs(1 - 20) / 20 = 0.95
# change_upper_bound = abs(50 - 30) / 50 = 0.4 ==> score will be 0.95
change_lower_bound = abs(max(lb) - min(lb)) / max([abs(el) for el in lb])
if ub == [0, 0]:
change_upper_bound = 0
else:
change_upper_bound = abs(max(ub) - min(ub)) / max([abs(el) for el in ub])
score.append(max(change_lower_bound, change_upper_bound)) # Choose the max change as the score
# Determine direction of change
# If both lower bounds are the same, and both upper bounds are the same ==> no change
if WTint[r][0] == mutantint[r][0] and WTint[r][1] == mutantint[r][1]:
flux_change[r] = 0
# If the WT upper bound is lower than the mutant lower bound ==> significant change
elif WTint[r][1] < mutantint[r][0]:
flux_change[r] = 1
elif WTint[r][0] > mutantint[r][1]:
flux_change[r] = -1
elif ((WTint[r][0] <= mutantint[r][0])
and (WTint[r][1] <= mutantint[r][1])
and (max(abs(WTint[r][0] - mutantint[r][0]),
abs(WTint[r][1] - mutantint[r][1])) > 0)):
flux_change[r] = 1
elif ((WTint[r][0] >= mutantint[r][0])
and (WTint[r][1] >= mutantint[r][1])
and (abs(WTint[r][0] - mutantint[r][0]) > 0
or abs(WTint[r][1] - mutantint[r][1]) > 0)):
flux_change[r] = -1
else:
flux_change[r] = 0
score_dict = dict(zip(rxns_of_interest, score))
return score_dict, flux_change
|
/sambaflux-0.1.8-py3-none-any.whl/samba/fva/fva_functions.py
| 0.73678 | 0.463687 |
fva_functions.py
|
pypi
|
from scipy.interpolate import interp1d
import numpy as np
class CorrelationIntegrands():
"""
Class to compute different integrands of correlation
functions for a specified x. In particular, the
integrands:
B2(x,f) = w(x)*x**2*f(x)*g2(x), (1)
B3(x,f) = x*f(x)*\int dv v**2*w(v)*g3(v,x), (2)
P2(x,f) = x**2*f(x)*g2(x), (3)
C2(x,f) = x**3*f(x)*w(x)**2*g2(x) (4)
xi2 = \int [V'(x)]^2 g2(x)*x*dx (5)
xi3 = \int\int V'(x)*V'(y)*x*y*g3(x,y)*dx*dy (6)
and the 2D integrand (no interpolating option here)
Q3(u,v) = v**2*w(v)*g3(u,v). (5)
In the above, f(x) is some arbitrary function (typically
either WCA potential or delta function), and w(x) is
some function (usually the wfunc of our abp system).
g2 and g3 are the two-body and
angle-integrated three-body correlation functions,
respectively.
Note: If wanting to compute \int dx B(x,f) where
f(x) = delta^2(x-y)*g(x) is a 2D delta function, just
set f = lambda x : 1.0 and don't integrate over x. So
\int dx B(x,delta^2(x-y)*g(x)) = B(y,g(y)).
Attributes
----------
w : callable function
Typically should be the function w from the wBessel or
wPerturb class.
r2s : np.array of shape (bins_2,)
Array of points where two-body correlation function is
sampled.
g2s : np.array of shape (bins_2,)
Values of two-body correlation function, sampled at
the points self.r2s.
r3s : np.array of shape (bins_3,)
Array of points such that np.meshgrid(r3s,r3s) will
give the domain where three-body correlation function
is sampled.
G3s : np.array of shape (bins_3,bins_3)
Values of three-body correlation function, sampled at
the points np.meshgrid(self.r3s,self.r3s)
Pi : float (optional)
Value of D_r/D_t. Default is 3.0.
epsilon : float (optional)
Value of the WCA interaction potential strength.
Default is 1.0.
Methods
-------
B2(self,x,f,interpkind='cubic')
B3(self,x,f,interpkind='cubic')
Q3(self,insert_zeros=False)
P2(self,x,f,interpkind='cubic')
xi2(self,Vprime)
xi3(self, Vprime)
"""
def __init__(self,wfunc,r2s,g2s,r3s,G3s):
"""
Initialise attributes.
Parameters
----------
w : callable function
Typically should be the function w from the wBessel or
wPerturb class.
r2s : np.array of shape (bins_2,)
Array of points where two-body correlation function is
sampled.
g2s : np.array of shape (bins_2,)
Values of two-body correlation function, sampled at
the points self.r2s.
r3s : np.array of shape (bins_3,)
Array of points such that np.meshgrid(r3s,r3s) will
give the domain where three-body correlation function
is sampled.
G3s : np.array of shape (bins_3,bins_3)
Values of three-body correlation function, sampled at
the points np.meshgrid(self.r3s,self.r3s)
"""
self.w = wfunc
self.r2s = r2s
self.g2s = g2s
self.r3s = r3s
self.G3s = G3s
return
def B2(self,x,f,interpkind='cubic'):
"""
Compute the function
B2(x,f) = w(x)*x**2*f(x)*g2(x).
Parameters
----------
x : float or np.array of shape(lenx,)
Points to evaluate B2 at.
f : callable function
Function to compute B2 with.
interpkind : string (optional)
What kind of interpolation is used. Default
is 'cubic'.
Returns
-------
out : float or np.array of shape(lenx,)
Value of B2 at the points in x.
"""
raw = self.r2s**2*f(self.r2s)*self.w(self.r2s)*self.g2s
raw[np.isnan(raw)] = 0.0
fl = interp1d(self.r2s,raw,kind=interpkind)
return fl(x)
def B3(self,x,f,interpkind='cubic'):
"""
Compute the function
B3(x,f) = x*f(x)*\int dv v**2*w(v)*g3(v,x).
Parameters
----------
x : float or np.array of shape(lenx,)
Points to evaluate B3 at.
f : callable function
Function to compute B3 with.
interpkind : string (optional)
What kind of interpolation is used. Default
is 'cubic'.
Returns
-------
out : float or np.array of shape(lenx,)
Value of B3 at the points in x.
"""
UU,VV,integrand = self.Q3(insert_zeros=True)
dv = VV[1,0]-VV[0,0]
vals = np.sum(integrand,axis=0)*dv
raw = self.r3s*f(self.r3s)*vals
fl = interp1d(self.r3s,raw,kind=interpkind)
return fl(x)
def P2(self,x,f,interpkind='cubic'):
"""
Compute the function
P2(x,f) = x**2*f(x)*g2(x).
Parameters
----------
x : float or np.array of shape(lenx,)
Points to evaluate P2 at.
f : callable function
Function to compute P2 with.
interpkind : string (optional)
What kind of interpolation is used. Default
is 'cubic'.
Returns
-------
out : float or np.array of shape(lenx,)
Value of P2 at the points in x.
"""
raw = self.r2s**2*f(self.r2s)*self.g2s
raw[np.isnan(raw)] = 0.0
fl = interp1d(self.r2s,raw,kind=interpkind)
return fl(x)
def C2(self,x,f,interpkind='cubic'):
"""
Compute the function
C2(x,f) = x**3*f(x)*w(x)**2*g2(x).
Parameters
----------
x : float or np.array of shape(lenx,)
Points to evaluate C2 at.
f : callable function
Function to compute C2 with.
interpkind : string (optional)
What kind of interpolation is used. Default
is 'cubic'.
Returns
-------
out : float or np.array of shape(lenx,)
Value of C2 at the points in x.
"""
raw = self.r2s**3*f(self.r2s)*self.w(self.r2s)**2*self.g2s
raw[np.isnan(raw)] = 0.0
fl = interp1d(self.r2s,raw,kind=interpkind)
return fl(x)
def Q3(self,insert_zeros=False):
"""
Compute the function
Q3(u,v) = v**2*w(v)*g3(u,v)
at the discrete points u,v = np.meshgrid(self.rs,self.rs)
(no interpolation option for this function).
Parameters
----------
insert_zeros : bool (optional)
Insert zeros where the nans of self.G3s are.
Should be set to true if interpolating this
function. Default is False.
Returns
-------
out : list of 3 np.arrays
First two items of list are
np.meshgrid(self.rs,self.rs), third item of list is
the value of Q3 at the discrete points
"""
XX,YY = np.meshgrid(self.r3s,self.r3s)
prod = self.G3s*self.w(YY)*YY*YY
if insert_zeros:
prod[np.isnan(prod)] = 0.0
return XX, YY, prod
def xi2(self,Vprime):
"""
Compute the constant
xi2 = \int [V'(x)]^2 g2(x)*x*dx.
Parameters
----------
Vprime : callable function
Derivative of pairwise WCA potential.
Returns
-------
out : float
Value of constant for specified Vprime.
"""
raw = self.r2s* Vprime(self.r2s) * Vprime(self.r2s)*self.g2s
raw[np.isnan(raw)] = 0.0
dx = self.r2s[1]-self.r2s[0]
return np.sum(raw)*dx
def xi3(self,Vprime):
"""
Compute the function
xi3 = \int\int V'(x)*V'(y)*x*y*g3(x,y)*dx*dy
Parameters
----------
Vprime : callable function
Derivative of pairwise WCA potential.
Returns
-------
out : float
Value of constant for specified Vprime.
"""
UU,VV = np.meshgrid(self.r3s,self.r3s)
integrand = self.G3s*Vprime(UU)*Vprime(VV)*UU*VV
integrand[np.isnan(integrand)] = 0.0
dv = VV[1,0]-VV[0,0]
du = UU[0,1]-UU[0,0]
return np.sum(np.sum(integrand,axis=0))*dv*du
if __name__ == "__main__":
import matplotlib.pyplot as plt
import pickle
from sambristol_ssabp_w import wBessel,wPerturb
|
/sambristol_ssabp_w-0.0.15.tar.gz/sambristol_ssabp_w-0.0.15/sambristol_ssabp_w/correlationintegrands.py
| 0.825379 | 0.780077 |
correlationintegrands.py
|
pypi
|
import numpy as np
def meshODE(t,sol):
"""
Given a 2D meshgrid array input and an OdeSolution
object, output the ODE solution held in OdeSolution
as a meshgrid compatible with the input.
Parameters
----------
t : 2D np.array
Form of the array should be like either XX or YY in
XX,YY = np.meshgrid(xs,ys) for arbitrary xs and ys.
sol : scipy.integrate.OdeSolution
Continous ODE solution that you would like to extend
to a second dimension.
Returns
-------
out : 2D np.array
The equivalent of calling sol(t) for the 2D input
array, if that were an allowed operation.
Example
-------
from scipy.integrate import solve_ivp
import matplotlib.pyplot as plt
def fun(t,y):
return [y[1],-6*y[1]-8*y[0]]
def exact(t,y0):
A = - (y0[1]+ 2*y0[0])/2.0
B = (y0[1]+4*y0[0])/2.0
return [A*np.exp(-4*t) + B*np.exp(-2*t),
-4*A*np.exp(-4*t) - 2*B*np.exp(-2*t)]
t_span = (0,1)
y0 = (3.0,-4.0)
sol = solve_ivp(fun,t_span,y0,dense_output=True).sol
ts = np.linspace(t_span[0],t_span[1],num=101,endpoint=True)
TT,QQ = np.meshgrid(ts,ts)
plt.plot(QQ[:,0],meshODE(QQ,sol)[0][:,0],'ro')
plt.plot(TT[0,:],meshODE(TT,sol)[0][0,:],'bo')
plt.plot(ts,exact(ts,y0)[0],'k--')
plt.show()
"""
rows = len(t[:,0])
cols = len(t[0,:])
if np.abs(t[1,0]-t[0,0])>1e-15:
dum = sol(t[:,0])
outputdimensions = dum.ndim
if outputdimensions == 1:
dum_y = np.repeat(dum, repeats=cols)
out_y = dum_y.reshape(rows,cols)
else:
dum_y = np.repeat(dum[0], repeats=cols)
dum_yp = np.repeat(dum[1], repeats=cols)
out_y = dum_y.reshape(rows,cols)
out_yp = dum_yp.reshape(rows,cols)
elif np.abs(t[0,1]-t[0,0])>1e-15:
dum = sol(t[0,:])
outputdimensions = dum.ndim
if outputdimensions == 1:
dum_y = np.repeat(dum, repeats=rows)
out_y = dum_y.reshape(cols,rows).T
else:
dum_y = np.repeat(dum[0], repeats=rows)
dum_yp = np.repeat(dum[1], repeats=rows)
out_y = dum_y.reshape(cols,rows).T
out_yp = dum_yp.reshape(cols,rows).T
else:
raise ValueError('Array input does not have the form '
'XX or YY from XX,YY = np.meshgrid(...).')
if outputdimensions == 1:
return out_y
else:
return out_y,out_yp
if __name__ == "__main__":
from scipy.integrate import solve_ivp
import matplotlib.pyplot as plt
def fun(t,y):
return [y[1],-6*y[1]-8*y[0]]
def exact(t,y0):
A = - (y0[1]+ 2*y0[0])/2.0
B = (y0[1]+4*y0[0])/2.0
return [A*np.exp(-4*t) + B*np.exp(-2*t),
-4*A*np.exp(-4*t) - 2*B*np.exp(-2*t)]
t_span = (0,1)
y0 = (3.0,-4.0)
sol = solve_ivp(fun,t_span,y0,dense_output=True).sol
ts = np.linspace(t_span[0],t_span[1],num=101,endpoint=True)
TT,QQ = np.meshgrid(ts,ts)
plt.plot(QQ[:,0],meshODE(QQ,sol)[0][:,0],'ro')
plt.plot(TT[0,:],meshODE(TT,sol)[0][0,:],'bo')
plt.plot(ts,exact(ts,y0)[0],'k--')
plt.show()
|
/sambristol_ssabp_w-0.0.15.tar.gz/sambristol_ssabp_w-0.0.15/sambristol_ssabp_w/meshode.py
| 0.681939 | 0.739834 |
meshode.py
|
pypi
|
import numpy as np
import scipy.special as special
from .effectivepotential import twobody_value, twobody_derivative
from .effectivepotential import threebody_value, threebody_derivative_u
class wLowDt():
"""
Evaluate the w(r) function when D_t goes to 0 (is much smaller than D_r*sigma**2)
Attributes
----------
Pi : float (optional)
value of the parameter Pi in eqn 1 above. Default value is 3.
denom : float (optional)
value of fitting parameter. No-fit parameter defaults to 3.
as well as inherited attributes epsilon from parent class.
Methods
-------
__init__(self,potentialclass)
Initialise attributes.
w(self,r)
Compute the solution to the ode mentioned in the class doc.
w_prime(self,r)
Compute the derivative of the solution to the ode mentioned in
the class doc.
"""
def __init__(self,potentialclass,cutoff=1):
"""
Initialise attributes.
Parameters
----------
potentialclass : potential
class with potential that has attribute self.V_derivative(r) and
self.V_doublederivative(r)
"""
self.Vderiv = potentialclass.V_derivative
self.Vdoublederiv = potentialclass.V_doublederivative
self.cutoff = cutoff
return
def w(self,r):
"""
Compute the solution to the ode mentioned in the class doc.
Parameters
----------
r : float or np.array
Distance between particles.
Returns
-------
value : float or np.array
Value of the w-function.
"""
return np.where(r<self.cutoff,-self.Vderiv(r)/(2*r),0)
def w_prime(self,r):
"""
Compute the derivative of the solution to the ode mentioned in
the class doc.
Parameters
----------
r : float or np.array
Distance between particles.
Returns
-------
value : float or np.array
Value of the w-function derivative.
"""
return np.where(r<self.cutoff,-self.Vdoublederiv(r)/(2*r)+self.Vderiv(r)/(2*r*r),0)
def Effective2Bod(self,r,fp):
"""
Compute the effective two-body interaction.
Parameters
----------
r : float or np.array
Distance between particles.
fp : float
value of the active force.
Returns
-------
value : float or np.array
Value of the effective interaction at r.
"""
return twobody_value(r,fp,self.w,self.V_value)
def Effective2Bod_prime(self,r,fp):
"""
Compute the derivative of the effective
two-body interaction.
Parameters
----------
r : float or np.array
Distance between particles.
fp : float
value of the active force.
Returns
-------
value : float or np.array
Derivative of the effective interaction at r.
"""
return twobody_derivative(r,fp,self.w,
self.w_prime,
self.V_derivative)
def Effective3Bod(self,u,v,fp):
"""
Compute the effective three-body interaction aside
from a cosine factor.
Parameters
----------
u : float or np.array
Distance between particles 1 and 2.
v: float or np.array
Distance between particles 1 and 3.
fp : float
value of the active force.
Returns
-------
value : float or np.array
Value of the effective interaction at u,v.
"""
return threebody_value(self,u,v,fp,self.w)
def Effective3Bod_prime_u(self,u,v,fp):
"""
Compute the effective three-body interaction aside
from a cosine factor.
Parameters
----------
u : float or np.array
Distance between particles 1 and 2.
v: float or np.array
Distance between particles 1 and 3.
fp : float
value of the active force.
Returns
-------
value : float or np.array
Derivative of the effective interaction at u,v.
"""
return threebody_derivative_u(self,u,v,fp,
self.w,self.w_prime)
|
/sambristol_ssabp_w-0.0.15.tar.gz/sambristol_ssabp_w-0.0.15/sambristol_ssabp_w/wlowDt.py
| 0.891832 | 0.732687 |
wlowDt.py
|
pypi
|
import numpy as np
"""
Calculate effective potentials for ABP system, given a
specific w-function.
Methods
-------
twobody_value(r)
twobody_derivative(r)
threebody_value(u,v)
threebody_derivative_u(u,v)
"""
def twobody_value(r,fp,w,V_value):
"""
Compute effective two-body potential.
Parameters
----------
r : float or np.array
Position to compute effective two-body potential at.
fp : float
Self-propulsion force.
w : scalar function with call signature w(r)
Calculated via the perturbative FP equation.
V_value : scalar function with call signature V_value(r)
Conservative potential (e.g. WCA interaction).
Returns
-------
out : float or np.array
out is V_value(r)-0.5*fp**2*r**2*w(r)**2 when r<2**(1./6.),
and -0.5*fp**2*r**2*w(r)**2 otherwise.
"""
out2 = -0.5*fp**2*w(r)**2*r**2
out1 = V_value(r) + out2
return np.where(r<2**(1./6.),out1,out2)
def twobody_derivative(r,fp,w,w_prime,V_derivative):
"""
Compute derivative of effective two-body potential.
Parameters
----------
r : float or np.array
Position to compute effective two-body potential at.
fp : float
Self-propulsion force.
w : scalar function with call signature w(r)
Calculated via the perturbative FP equation.
w_prime : scalar function with call signature w_prime(r)
Derivative of w(r).
V_derivative : scalar function with with call signature V_derivative(r)
Derivative of V_value(r).
Returns
-------
out : float or np.array
out is V_derivative(r) - fp**2*(r*w(r)**2 + r**2*w(r)*w_prime(r))
when r < 2**(1./6.), and
-fp**2*(r*w(r)**2 + r**2*w(r)*w_prime(r)) otherwise.
"""
out2 = -fp**2*(r*w(r)**2 + r**2*w(r)*w_prime(r))
out1 = V_derivative(r) + out2
return np.where(r<2**(1./6.),out1,out2)
def threebody_value(u,v,fp,w):
"""
Compute effective three-body potential without the
cosine factor.
Parameters
----------
u : float or np.array
Position of first particle
v : float or np.array
Position of second particle
fp : float
Self-propulsion force.
w : scalar function with call signature w(r)
Calculated via the perturbative FP equation.
Returns
-------
out : float or np.array
out is -3./2.*fp**2*u*v*w(u)*w(v)
"""
return -3./2.*fp**2*u*v*w(u)*w(v)
def threebody_derivative_u(u,v,fp,w,w_prime):
"""
Compute effective two-body potential, i.e.
V_value(r)-0.5*fp**2*r**2*w(r)**2.
Parameters
----------
u : float or np.array
Position of first particle
v : float or np.array
Position of second particle
fp : float
Self-propulsion force.
w : scalar function with call signature w(r)
Calculated via the perturbative FP equation.
w_prime : scalar function with call signature w_prime(r)
Derivative of w(r).
Returns
-------
out : float or np.array
out is -3./2.*fp**2*(v*w(u)*w(v) + u*v*w_prime(u)*w(v)).
"""
return -3./2.*fp**2*(v*w(u)*w(v) + u*v*w_prime(u)*w(v))
|
/sambristol_ssabp_w-0.0.15.tar.gz/sambristol_ssabp_w-0.0.15/sambristol_ssabp_w/effectivepotential.py
| 0.917307 | 0.740667 |
effectivepotential.py
|
pypi
|
import numpy as np
import scipy.special as special
from scipy.integrate import solve_ivp, solve_bvp
from scipy.interpolate import interp1d
from .trimerbc import shiftedLJ
from .effectivepotential import twobody_value,twobody_derivative
from .meshode import meshODE
from .whardsphere import wHardSphere
class wXi(shiftedLJ):
"""
Evaluate the average w(r) function which satisfies the
stochastic differential equation
w''(r) + (3/r - V'(r)-xi)w'(r)
- (Pi/2 + V'(r)/r+xi/r)*w(r) = V'(r)/(2r), (1)
with Pi being a constant, and xi being a random variable,
which is assumed to be Gaussian.
w(r) satisfies the boundary conditions w(r=infty)=0 and
r_0 w'(r_0) + w(r_0) = -1/denom, (2)
where r_0 = 2**(1./6.) to first order in f_P, and denom SHOULD
be 3.0 but could be treated as a fitting parameter.
Child class of shiftedLJ.
Attributes
----------
xi_variance : float
variance of the distribution for generating xi values.
xis : float
list of all xi samples used to generate average w and w'
Pi : float (optional)
value of the parameter Pi in eqn 1 above. Default value is 3.
denom : float (optional)
value of fitting parameter. No-fit parameter defaults to 3.
rinfty : float (optional)
large r value which approximates infinity in the BVP for w.
Default value is 60.
bvpmesh : float (optional)
number of mesh points between r0 and rinfty. Default is 10001.
rlowlim : float (optional)
lower value of w(r) domain. Default is 0.7.
noisetype : string (optional)
choose the type of noise used to generate xi values. Only
acceptable inputs are "gaussian" or "uniform". Default
is "gaussian".
num_rns : int (optional)
number of times to sample xi from distribution. Default is 1000.
as well as inherited attributes epsilon from parent class.
Methods
-------
__init__(self,xi_variance,epsilon=1,Pi=3.0,denom=3.0,
rinfty = 60)
Initialise attributes.
w(self,r)
Compute the solution to the ode mentioned in the class doc.
w_prime(self,r)
Compute the derivative of the solution to the ode mentioned in
the class doc.
"""
def __init__(self,xi_variance,epsilon=1,Pi=3.0,denom=3.0,
rinfty = 60,bvpmesh=10001,rlowlim=0.7,
noisetype="gaussian",
num_rns=1000):
"""
Initialise attributes.
Parameters
----------
xi_variance: float
variance of the distribution for generating xi values.
epsilon : float (optional)
strength of the potential V(r). Default value is 1.
Pi : float (optional)
value of the parameter Pi (=D^r*sigma**2/D_t).
Default value is 3.
denom : float (optional)
value of fitting parameter. No-fit parameter defaults to 3.
rinfty : float (optional)
large r value which approximates infinity in the BVP for w.
Default value is 60.
bvpmesh : float (optional)
number of mesh points between r0 and rinfty. Default is 10001.
rlowlim : float (optional)
lower value of w(r) domain. Default is 0.7.
noisetype : string (optional)
choose the type of noise used to generate xi values. Only
acceptable inputs are "gaussian" or "uniform". Default
is "gaussian".
num_rns : int (optional)
number of times to sample xi from distribution. Default is 1000.
"""
super().__init__(epsilon=epsilon)
self.xivar = xi_variance
self.Pi = Pi
self.r0 = 2**(1./6.)
self.denom = denom
self.rinfty = rinfty
self.bvpmesh = bvpmesh
self.rlowlim = rlowlim
self.noisetype = noisetype
self.num_rns = num_rns
self.hs = wHardSphere(epsilon=epsilon,Pi=Pi,denom=denom)
# chose number of points to sample for r < r0 (does not
# affect solution convergence).
self.ivppoints = 1001
# compute interpolations for w and w' right away to avoid
# having to compute every time e.g. self.w(rs) is called.
self.__interp_y,self.__interp_prime = self.__find_av_w()
return
def __ivpfun(self,r,y,xi):
"""
RHS of y' = f(r,y) for IVP ODE to be solved, with y[0] = w, y[1] = w'.
Parameters
----------
r : array-like
independent variable (distance between particles). MUST BE
LESS THAN 2**(1./6.).
y : array-like
dependent variables with y[0] = w, y[1] = w'.
xi: float
xi value.
Returns
------
fun : np.array
RHS of y' = f(r,y) (ODE at r) for r < 2**(1./6.).
"""
return np.vstack((y[1],-(3/r -self.V_derivative(r) -xi)*y[1]
+ (self.Pi/2 + self.V_derivative(r) + xi/r)*y[0]
+ self.V_derivative(r)/(2*r)))
def __bvpfun(self,r,y,xi):
"""
RHS of y' = f(r,y) for BVP ODE to be solved, with y[0] = w, y[1] = w'.
Parameters
----------
r : array-like
independent variable (distance between particles). MUST BE
GREATER THAN 2**(1./6.).
y : array-like
dependent variables with y[0] = w, y[1] = w'.
xi: float
xi value.
Returns
------
fun : np.array
RHS of y' = f(r,y) (ODE at r) for r > 2**(1./6.).
"""
return np.vstack((y[1],-(3/r -xi)*y[1] + (self.Pi/2 + xi/r)*y[0]))
def __bvpfun_jac(self,r,y,xi):
"""
Jacobian of f(r,y) where y' = f(r,y) for BVP ODE to be solved,
with y[0] = w, y[1] = w'.
Parameters
----------
r : array-like
independent variable (distance between particles). MUST BE
GREATER THAN 2**(1./6.).
y : array-like
dependent variables with y[0] = w, y[1] = w'.
xi: float
xi value.
Returns
------
jac : np.array
Jacobian of BVP ODE.
"""
return np.array([[r*0,r*0+1],[self.Pi/2 + xi/r,-(3/r -xi)]])
def __bc(self,ya,yb):
"""
Boundary conditions at self.r0 and self.rinfty, the former
being r0*w'(r0) + w(r0) + 1/self.denom = 0, the latter
being w(rinfty) = 0.
Parameters
----------
ya : array-like
dependent variables at r0.
yb : array-like
dependent variables at rinfty.
Returns
------
bc : np.array
boundary conditions at the two endpoints.
"""
return np.array([self.r0*ya[1]+ya[0]+1./self.denom,yb[0]])
def __bc_jac(self,ya,yb):
"""
The boundary condition Jacobians at self.r0 and self.rinfty.
Parameters
----------
ya : array-like
dependent variables at r0.
yb : array-like
dependent variables at rinfty.
Returns
------
bc_jacs : list
boundary condition Jacobians at the two endpoints.
"""
return [np.array([[1,self.r0],[0,0]]),np.array([[0,0],[1,0]])]
def __above_r0(self,xi):
"""
Solve the BVP for r > 2**(1./6.) at a specified xi value.
Parameters
----------
xi: float
xi value.
Returns
-------
wr : array
values of w on meshpoints for r >= 2**(1./6.)
"""
f0 = lambda r,y : self.__bvpfun(r,y,xi)
rs = np.linspace(self.r0,self.rinfty,num=self.bvpmesh,
endpoint=True)
# use hard sphere values as guess.
ys = np.array([self.hs.w(rs),self.hs.w_prime(rs)])
f0_jac = lambda r,y : self.__bvpfun_jac(r,y,xi)
res = solve_bvp(f0,self.__bc,rs,ys,fun_jac = f0_jac,bc_jac=self.__bc_jac)
return res.sol(rs)
def __below_r0(self,xi,y0):
"""
Solve the IVP for r < 2**(1./6.) at a specified xi value.
Parameters
----------
xi: float
xi value.
y0 : array like
array of initial values w(r0) and w'(r0).
Returns
-------
wr : array
values of w on ivppoints for r <= 2**(1./6.)
"""
t_span = (self.r0,self.rlowlim)
rs = np.linspace(self.r0,self.rlowlim,num=self.ivppoints,
endpoint=True)
res = solve_ivp(self.__ivpfun,t_span,y0,args=(xi,),
t_eval = rs,
vectorized=True,dense_output=True)
return res.y[:,::-1]
def __find_av_w(self):
"""
Find the average value of w(r) by sampling a num_rns
randomly generated xi values from the rng.
Returns
-------
interp_y : interp1d object
interpolation of w in range rlowlim to rinfty.
interp_prime : interp1d object
interpolation of w' in range rlowlim to rinfty.
"""
# generate a sample of xi values
if self.noisetype == "uniform":
xis = np.random.uniform(-0.5,0.5,size=self.num_rns)
xis = xis*12*self.xivar
elif self.noisetype == "gaussian":
xis = np.random.normal(loc=0.0,scale=np.sqrt(self.xivar),
size=self.num_rns)
else:
raise ValueError("noisetype variable must be either "
"'gaussian' or 'uniform'")
self.xis = xis
# these arrays will store the solutions w and w'
av_uppers = np.zeros([2,self.bvpmesh],float)
av_lowers = np.zeros([2,self.ivppoints],float)
# calculate w and w' for each xi, then average
for xi in xis:
tmp = self.__above_r0(xi)
av_uppers += tmp
y0 = tmp[:,0]
av_lowers += self.__below_r0(xi,y0)
av_uppers /= self.num_rns
av_lowers /= self.num_rns
# generate x,y data for building interpolations via concatenation
rlows = np.linspace(self.rlowlim,self.r0,num=self.ivppoints,
endpoint=True)
rupps = np.linspace(self.r0,self.rinfty,num=self.bvpmesh,
endpoint=True)
rs = np.concatenate((rlows,rupps[1:]))
ys = np.concatenate((av_lowers[0,:],av_uppers[0,1:]))
yprimes = np.concatenate((av_lowers[1,:],av_uppers[1,1:]))
return (interp1d(rs,ys,fill_value="extrapolate"),
interp1d(rs,yprimes,fill_value="extrapolate"))
def w(self,r):
"""
Compute the solution to the ode mentioned in the class doc.
Parameters
----------
r : float or np.array
Distance between particles.
Returns
-------
value : float or np.array
Value of the w-function.
"""
r = np.asarray(r)
if r.ndim == 2:
out = meshODE(r,self.__interp_y)
else:
out = self.__interp_y(r)
return out
def w_prime(self,r):
"""
Compute the derivative of the solution to the ode mentioned in
the class doc.
Parameters
----------
r : float or np.array
Distance between particles.
Returns
-------
value : float or np.array
Value of the w-function derivative.
"""
r = np.asarray(r)
if r.ndim == 2:
out = meshODE(r,self.__interp_prime)
else:
out = self.__interp_prime(r)
return out
def Effective2Bod(self,r,fp):
"""
Compute the effective two-body interaction.
Parameters
----------
r : float or np.array
Distance between particles.
fp : float
value of the active force.
Returns
-------
value : float or np.array
Value of the effective interaction at r.
"""
return twobody_value(r,fp,self.w,self.V_value)
|
/sambristol_ssabp_w-0.0.15.tar.gz/sambristol_ssabp_w-0.0.15/sambristol_ssabp_w/wxi.py
| 0.836521 | 0.727044 |
wxi.py
|
pypi
|
class shiftedLJ():
"""
Simple class which allows for evaluation of Lennard-Jones (LJ)
potential with value of 0 at 2**(1./6.). For
r<2**(1./6.), this potential is equivalent to the
Weeks-Chandler-Anderson potential. Note that length
is measured in units of sigma. The explicit form of the
potential is
V(r) = epsilon*(4*((r)**(-12)-(r)**(-6))+1).
Attributes
----------
epsilon : float (optional)
value of the potential strength. Default is 1.
Methods
-------
__init__(self,epsilon=1)
Initialise attributes.
V_value(self,r)
Compute value of potential at r (could be array or scalar).
V_derivative(self,r)
Compute derivative of potential at r (could be array or
scalar).
"""
def __init__(self,epsilon=1):
"""
Initialise potential strength epsilon.
Parameters
----------
epsilon : float (optional)
value of the potential strength. Default is 1.
"""
self.epsilon = epsilon
return
def V_value(self,r):
"""
Compute value of shiftedLJ potential at r.
Parameters
----------
r : float or np.array
distance between particles.
Returns
-------
Value of shiftedLJ potential
"""
return self.epsilon*(4*((r)**(-12)-(r)**(-6))+1)
def V_derivative(self,r):
"""
Compute derivative value of shiftedLJ potential at r.
Parameters
----------
r : float or np.array
distance between particles.
Returns
-------
Derivative value of shiftedLJ potential
"""
return -24*self.epsilon*(2*r**(-13)-r**(-7))
def V_doublederivative(self,r):
"""
Compute second derivative value of shiftedLJ potential at r.
Parameters
----------
r : float or np.array
distance between particles.
Returns
-------
Derivative value of shiftedLJ potential
"""
return 24*self.epsilon*(26/r**16-7/r**8)
class expPot():
"""
Simple class which allows for different potential. Note that length
is measured in units of 'a'. The explicit form of the
potential is
V(r) = epsilon*exp(-1/(1-(r/a)**2))
Attributes
----------
epsilon : float (optional)
value of the potential strength. Default is 1.
Methods
-------
__init__(self,epsilon=1)
Initialise attributes.
V_value(self,r)
Compute value of potential at r (could be array or scalar).
V_derivative(self,r)
Compute derivative of potential at r (could be array or
scalar).
"""
def __init__(self,epsilon=500):
"""
Initialise potential strength epsilon.
Parameters
----------
epsilon : float (optional)
value of the potential strength. Default is 1.
"""
self.epsilon = epsilon
return
def V_value(self,r):
"""
Compute value of expPot potential at r.
Parameters
----------
r : float or np.array
distance between particles.
Returns
-------
Value of expPot potential
"""
r = np.asarray(r)
mask = (r < 1.0)
x = np.empty_like(r)
x[mask] = self.epsilon*np.exp(-1/(1-r[mask]**2))
x[~mask] = 0.0
return x
def V_derivative(self,r):
"""
Compute derivative value of expPot potential at r.
Parameters
----------
r : float or np.array
distance between particles.
Returns
-------
Derivative value of expPot potential
"""
r = np.asarray(r)
mask = (r < 1.0)
x = np.empty_like(r)
x[mask] = -2*self.epsilon*r[mask]*np.exp(-1/(1-r[mask]**2))/(1-r[mask]**2)**2
x[~mask] = 0.0
return x
def V_doublederivative(self,r):
"""
Compute second derivative value of expPot potential at r.
Parameters
----------
r : float or np.array
distance between particles.
Returns
-------
Derivative value of expPot potential
"""
r = np.asarray(r)
mask = (r < 1.0)
x = np.empty_like(r)
x[mask] = -2*self.epsilon*(1-3*r[mask]**2)*np.exp(-1/(1-r[mask]**2))/(1-r[mask]**2)**4
x[~mask] = 0.0
return x
import numpy as np
import scipy.optimize as optimize
class TrimerBC(shiftedLJ):
"""
Class to compute the zero-force distance r0 for a trimer of active
brownian particles in an equilateral configuration. The
competition between active and passive forces allows for
this configuration to be stable. Therefore, both epsilon
(potential strength) and fp (active force) must be non-zero.
r0 satisfies
fp + sqrt(3)*V'(r0) = 0. (1)
The potential well is a shifted LJ potential, with the form
V(r) = epsilon*(4*((r)**(-12)-(r)**(-6))+1), r < 2**(1./6.), (2)
and zero otherwise. This potential is inherited from the parent
class shiftedLJ().
Child class of shiftedLJ.
Attributes
----------
fp : float
value of active force
r0 : float
zero-force distance between particles.
as well as inherited attribute epsilon from parent class.
Methods
-------
__init__(self,epsilon=1)
Initialise fp and compute r0.
as well as inherited methods V_value and V_derivative from
parent class.
"""
def __init__(self,fp,epsilon=1):
"""
Initialise fp and compute r0.
Parameters
----------
fp : float
active force strength.
epsilon : float
potential well strength.
"""
super().__init__(epsilon=epsilon)
self.fp = fp
self.__a0 = self.__a_0()
if fp <= 0:
raise ValueError("Active force must be greater than zero.")
self.r0 = self.__r_0()
return
def __a_0(self):
"""
Returns
-------
a0 : float
Dimensionless constant fp/(24*np.sqrt(3)*epsilon).
"""
return self.fp/(24*np.sqrt(3)*self.epsilon)
def __r_0_large_a_0(self):
"""
Returns
-------
r_0 : float
Approximate value of r0 when fp >> epsilon.
"""
q = (2**(7./6.)*self.__a0)**(-1/13)
p = (2**(7./6.)*self.__a0)**(-6/13)
f7 = 13-7*p
g4 = 13-4*p
return 2**(1./6.)*q*(1+1/14*f7/g4*(1-np.sqrt(1+28*p*g4/f7**2)))
def __r_0_small_a_0(self):
"""
Returns
-------
r_0 : float
Approximate value of r0 when fp << epsilon.
"""
return 2**(1./6.)*(1+1/21*(1-np.sqrt(1+7*(2**(7./6.)*self.__a0))))
def __r_0_approx(self):
"""
Returns
-------
r_0 : float
Approximate value of r0 to first order in either fp/epsilon
or epsilon/fp depending on which is the small parameter.
"""
if self.__a0 < 2**(-7./6.):
r0 = self.__r_0_small_a_0()
else:
r0 = self.__r_0_large_a_0()
return r0
def __forcebalance(self,r):
"""
Compute force on a trimer of particles in an equilateral configuration,
where the force is given by
fp + sqrt(3)*V'(r0).
Returns
-------
force : float
force on trimer
"""
return self.fp + np.sqrt(3)*self.V_derivative(r)
def __r_0(self):
"""
Compute the distance between particles in an equilateral trimer
configuration where the force is zero.
Returns
-------
r0 : float
distance between particles in the trimer where force is zero.
"""
r0 = self.__r_0_approx()
if abs(r0-2**(1./6.))>1e-14:
r0 = optimize.newton(self.__forcebalance,r0)
return r0
if __name__ == "__main__":
# test shiftedLJ derivative
import matplotlib.pyplot as plt
import numpy as np
rs = np.logspace(-0.2,0.2,num=10000,endpoint=True)
cp = shiftedLJ()
fig,axarr = plt.subplots(3,sharex=True)
fig.set_size_inches(4,4*2)
axarr[0].plot(rs,cp.V_value(rs))
true_p = cp.V_derivative(rs)
num_p = np.gradient(cp.V_value(rs),rs)
axarr[1].plot(rs,true_p,'.')
axarr[1].plot(rs,num_p,'k-')
axarr[2].plot(rs[1:],np.abs(true_p-num_p)[1:],'o')
axarr[2].set_yscale('log')
plt.show()
# test shift
fp = 1.0
tr = TrimerBC(fp)
fig,ax = plt.subplots()
fig.set_size_inches(4,4)
ax.plot(rs,tr._TrimerBC__forcebalance(rs))
ax.plot(tr.r0,tr._TrimerBC__forcebalance(tr.r0),'ko')
plt.show()
|
/sambristol_ssabp_w-0.0.15.tar.gz/sambristol_ssabp_w-0.0.15/sambristol_ssabp_w/trimerbc.py
| 0.942784 | 0.852045 |
trimerbc.py
|
pypi
|
import numpy as np
import scipy.special as special
from scipy.integrate import solve_ivp, solve_bvp
from .trimerbc import expPot
from .effectivepotential import twobody_value,twobody_derivative
from .meshode import meshODE
class wdiffPot(expPot):
"""
Evaluate the w(r) function which satisfies the differential
equation
w''(r) + (3/r - V'(r))w'(r) - (Pi/2 + V'(r)/r)*w(r)
= V'(r)/(2r), (1)
with Pi being a constant. w(r) satisfies the boundary conditions
w(r=infty)=0 and
r_0 w'(r_0) + w(r_0) = -1/denom, (2)
where r_0 = 1 to first order in f_P, and denom SHOULD
be 3.0 but could be treated as a fitting parameter.
The full details of the calculation can be found in the doc/
folder, but essentially, eqn (1) with eqn (2) can
be written as integrals over bessel functions. This class then
computes these integrals using some help from special functions.
Child class of expPot.
Attributes
----------
Pi : float (optional)
value of the parameter Pi in eqn 1 above. Default value is 3.
denom : float (optional)
value of fitting parameter. No-fit parameter defaults to 3.
as well as inherited attributes epsilon from parent class.
Methods
-------
__init__(self,epsilon=1,Pi=3.0,denom=3.0)
Initialise attributes.
w(self,r)
Compute the solution to the ode mentioned in the class doc.
w_prime(self,r)
Compute the derivative of the solution to the ode mentioned in
the class doc.
"""
def __init__(self,epsilon=1,Pi=3.0,denom=3.0):
"""
Initialise attributes.
Parameters
----------
epsilon : float (optional)
strength of the potential V(r). Default value is 1.
Pi : float (optional)
value of the parameter Pi (=D^r*sigma**2/D_t).
Default value is 3.
denom : float (optional)
value of fitting parameter. No-fit parameter defaults to 3.
"""
super().__init__(epsilon=epsilon)
self.Pi = Pi
self.r0 = 1.0
self.denom = denom
pre = np.sqrt(Pi/2.0)
tw = self.r0
# storing a couple local variables to reduce func evaluations.
self.__k1_tw = special.k1(pre*tw)
self.__k1p_tw = special.kvp(1,pre*tw)
self.__c0 = -1./(self.denom*np.sqrt(self.Pi/2.))/special.kvp(1,np.sqrt(self.Pi/2.)*self.r0,1)
self.__w_lower = self.__below_r0()
return
def __fun(self,r,y):
"""
RHS of y' = f(r,y) for ODE to be solved, with y[0] = w, y[1] = w'.
Parameters
----------
r : array-like
independent variable (distance between particles). MUST BE
LESS THAN r0.
y : array-like
dependent variables with y[0] = w, y[1] = w'.
p : array-like
list of parameters, which for us is just p = [c0], and is
only passed to this function as a dummy argument.
Returns
------
fun : np.array
RHS of y' = f(r,y) (ODE at r).
"""
y0 = y[0]
y1 = y[1]
y0p = y1
y1p = (-3*y1/r+self.Pi/2.*y0+
+self.V_derivative(r)*(y1+y0/r+0.5/r))
return np.vstack((y0p,y1p))
def __y0(self):
"""
At r = r0, continuity enforces that the prefactor c0
of the r > r0 solution to the ODE, namely,
w_{+}(r) = special.k1(np.sqrt(self.Pi/2.)*r)/r), be related
to the prefactors of the solution w_{-}(r) for r<r0.
This function enforces that relationship for arbitrary c0.
Returns
-------
out : list
Values of w_{-}(r=r0) and w'_{-}(r=r0).
"""
tw = self.r0
pre = np.sqrt(self.Pi/2.0)
y0 = self.__c0*special.k1(pre*tw)/tw
y1 = self.__c0*(pre*special.kvp(1,pre*tw)/tw-special.k1(pre*tw)/tw**2)
return [y0,y1]
def __below_r0(self,rf = 0.7):
y0 = self.__y0()
t_span = (self.r0,rf)
# since self.__fun takes a dummy argument, I'll set it to 1
res = solve_ivp(self.__fun,t_span,y0,
vectorized=True,dense_output=True)
return res.sol
def __w_minus_both(self,r):
r = np.asarray(r)
if r.ndim == 2:
lowout = meshODE(r,self.__w_lower)
else:
lowout = self.__w_lower(r)
return lowout
def __w_plus(self,r):
return self.__c0*special.k1(np.sqrt(self.Pi/2.)*r)/r
def __w_plus_prime(self,r):
sPi = np.sqrt(self.Pi/2.)
return (self.__c0*sPi*special.kvp(1,sPi*r,1)/r
- self.__w_plus(r)/r)
def w(self,r):
"""
Compute the solution to the ode mentioned in the class doc.
Parameters
----------
r : float or np.array
Distance between particles.
Returns
-------
value : float or np.array
Value of the w-function.
"""
return np.where(r>=self.r0,self.__w_plus(r),
self.__w_minus_both(r)[0])
def w_prime(self,r):
"""
Compute the derivative of the solution to the ode mentioned in
the class doc.
Parameters
----------
r : float or np.array
Distance between particles.
Returns
-------
value : float or np.array
Value of the w-function derivative.
"""
a = np.where(r>=self.r0,self.__w_plus_prime(r),
self.__w_minus_both(r)[1])
return a
def Effective2Bod(self,r,fp):
"""
Compute the effective two-body interaction.
Parameters
----------
r : float or np.array
Distance between particles.
fp : float
value of the active force.
Returns
-------
value : float or np.array
Value of the effective interaction at r.
"""
return twobody_value(r,fp,self.w,self.V_value)
if __name__ == "__main__":
import matplotlib.pyplot as plt
from sambristol_ssabp_w import wBessel
from sambristol_ssabp_w import wPerturb
# solve ODE using class defined above
fp = 1.0
epsilon = 5.0
ss = wBessel(epsilon)
wperturb = wPerturb(epsilon)
wexpPot = wdiffPot(epsilon)
rs = np.linspace(1.0,1.2,num=201)
plt.plot(rs,wperturb.Effective2Bod(rs),'ro-')
plt.plot(rs,ss.Effective2Bod(rs),'k--')
plt.plot(rs,wexpPot.Effective2Bod(rs),'b:')
plt.show()
plt.plot(rs,twobody_derivative(rs,fp,wperturb.w,wperturb.V_value,
wperturb.V_derivative),'ro-')
plt.plot(rs,twobody_derivative(rs,fp,ss.w,ss.V_value,
ss.V_derivative),'ko-')
plt.show()
|
/sambristol_ssabp_w-0.0.15.tar.gz/sambristol_ssabp_w-0.0.15/sambristol_ssabp_w/wdiffpot.py
| 0.829734 | 0.575021 |
wdiffpot.py
|
pypi
|
import numpy as np
import scipy.special as special
from scipy.integrate import solve_ivp, solve_bvp
from .trimerbc import shiftedLJ
from .effectivepotential import twobody_value,twobody_derivative
from .meshode import meshODE
class wPerturb(shiftedLJ):
"""
Evaluate the w(r) function which satisfies the differential
equation
w''(r) + (3/r - V'(r))w'(r) - (Pi/2 + V'(r)/r)*w(r)
= V'(r)/(2r), (1)
with Pi being a constant. w(r) satisfies the boundary conditions
w(r=infty)=0 and
r_0 w'(r_0) + w(r_0) = -1/denom, (2)
where r_0 = 2**(1./6.) to first order in f_P, and denom SHOULD
be 3.0 but could be treated as a fitting parameter.
The full details of the calculation can be found in the doc/
folder, but essentially, eqn (1) with eqn (2) can
be written as integrals over bessel functions. This class then
computes these integrals using some help from special functions.
Child class of shiftedLJ.
Attributes
----------
Pi : float (optional)
value of the parameter Pi in eqn 1 above. Default value is 3.
denom : float (optional)
value of fitting parameter. No-fit parameter defaults to 3.
as well as inherited attributes epsilon from parent class.
Methods
-------
__init__(self,epsilon=1,Pi=3.0,denom=3.0)
Initialise attributes.
w(self,r)
Compute the solution to the ode mentioned in the class doc.
w_prime(self,r)
Compute the derivative of the solution to the ode mentioned in
the class doc.
"""
def __init__(self,epsilon=1,Pi=3.0,denom=3.0):
"""
Initialise attributes.
Parameters
----------
epsilon : float (optional)
strength of the potential V(r). Default value is 1.
Pi : float (optional)
value of the parameter Pi (=D^r*sigma**2/D_t).
Default value is 3.
denom : float (optional)
value of fitting parameter. No-fit parameter defaults to 3.
"""
super().__init__(epsilon=epsilon)
self.Pi = Pi
self.r0 = 2**(1./6.)
self.denom = denom
pre = np.sqrt(Pi/2.0)
tw = 2**(1./6.)
# storing a couple local variables to reduce func evaluations.
self.__k1_tw = special.k1(pre*tw)
self.__k1p_tw = special.kvp(1,pre*tw)
self.__c0 = -1./(self.denom*np.sqrt(self.Pi/2.))/special.kvp(1,np.sqrt(self.Pi/2.)*self.r0,1)
self.__w_lower = self.__below_r0()
return
def __fun(self,r,y):
"""
RHS of y' = f(r,y) for ODE to be solved, with y[0] = w, y[1] = w'.
Parameters
----------
r : array-like
independent variable (distance between particles). MUST BE
LESS THAN 2**(1./6.).
y : array-like
dependent variables with y[0] = w, y[1] = w'.
p : array-like
list of parameters, which for us is just p = [c0], and is
only passed to this function as a dummy argument.
Returns
------
fun : np.array
RHS of y' = f(r,y) (ODE at r).
"""
y0 = y[0]
y1 = y[1]
y0p = y1
y1p = (-3*y1/r+self.Pi/2.*y0+
+self.V_derivative(r)*(y1+y0/r+0.5/r))
return np.vstack((y0p,y1p))
def __y0(self):
"""
At r = 2**(1./6.), continuity enforces that the prefactor c0
of the r > 2**(1./6.) solution to the ODE, namely,
w_{+}(r) = special.k1(np.sqrt(self.Pi/2.)*r)/r), be related
to the prefactors of the solution w_{-}(r) for r<2**(1./6.).
This function enforces that relationship for arbitrary c0.
Returns
-------
out : list
Values of w_{-}(r=2**(1./6.)) and w'_{-}(r=2**(1./6.)).
"""
tw = 2**(1./6.)
pre = np.sqrt(self.Pi/2.0)
y0 = self.__c0*special.k1(pre*tw)/2**(1./6.)
y1 = self.__c0*(pre*special.kvp(1,pre*tw)/tw-special.k1(pre*tw)/tw**2)
return [y0,y1]
def __below_r0(self,rf = 0.7):
y0 = self.__y0()
t_span = (self.r0,rf)
# since self.__fun takes a dummy argument, I'll set it to 1
res = solve_ivp(self.__fun,t_span,y0,
vectorized=True,dense_output=True)
return res.sol
def __w_minus_both(self,r):
r = np.asarray(r)
if r.ndim == 2:
lowout = meshODE(r,self.__w_lower)
else:
lowout = self.__w_lower(r)
return lowout
def __w_plus(self,r):
return self.__c0*special.k1(np.sqrt(self.Pi/2.)*r)/r
def __w_plus_prime(self,r):
sPi = np.sqrt(self.Pi/2.)
return (self.__c0*sPi*special.kvp(1,sPi*r,1)/r
- self.__w_plus(r)/r)
def w(self,r):
"""
Compute the solution to the ode mentioned in the class doc.
Parameters
----------
r : float or np.array
Distance between particles.
Returns
-------
value : float or np.array
Value of the w-function.
"""
return np.where(r>=2**(1./6.),self.__w_plus(r),
self.__w_minus_both(r)[0])
def w_prime(self,r):
"""
Compute the derivative of the solution to the ode mentioned in
the class doc.
Parameters
----------
r : float or np.array
Distance between particles.
Returns
-------
value : float or np.array
Value of the w-function derivative.
"""
a = np.where(r>=2**(1./6.),self.__w_plus_prime(r),
self.__w_minus_both(r)[1])
return a
def Effective2Bod(self,r,fp):
"""
Compute the effective two-body interaction.
Parameters
----------
r : float or np.array
Distance between particles.
fp : float
value of the active force.
Returns
-------
value : float or np.array
Value of the effective interaction at r.
"""
return twobody_value(r,fp,self.w,self.V_value)
if __name__ == "__main__":
import matplotlib.pyplot as plt
from sambristol_ssabp_w import wBessel
# solve ODE using class defined above
fp = 1.0
epsilon = 5.0
ss = wBessel(epsilon=epsilon)
wperturb = wPerturb(epsilon=epsilon)
rs = np.linspace(1.0,1.2,num=201)
plt.plot(rs,wperturb.Effective2Bod(rs),'ro-')
plt.plot(rs,ss.Effective2Bod(rs),'k--')
plt.show()
plt.plot(rs,twobody_derivative(rs,fp,wperturb.w,wperturb.V_value,
wperturb.V_derivative),'ro-')
plt.plot(rs,twobody_derivative(rs,fp,ss.w,ss.V_value,
ss.V_derivative),'ko-')
plt.show()
|
/sambristol_ssabp_w-0.0.15.tar.gz/sambristol_ssabp_w-0.0.15/sambristol_ssabp_w/wperturb.py
| 0.81309 | 0.714827 |
wperturb.py
|
pypi
|
import numpy as np
import scipy.special as special
from .trimerbc import shiftedLJ
from .effectivepotential import twobody_value, twobody_derivative
from .effectivepotential import threebody_value, threebody_derivative_u
class wHardSphere(shiftedLJ):
"""
Evaluate the w(r) function which satisfies the differential
equation
w''(r) + 3w'(r)/r - Pi/2*w(r) = 0, (1)
with Pi being a constant. w(r) satisfies the boundary conditions
w(r=infty)=0 and
r_0 w'(r_0) + w(r_0) = -1/denom, (2)
where r_0 = 2**(1./6.) to first order in f_P, and denom SHOULD
be 3.0 but could be treated as a fitting parameter.
The above equations can be solved analytically.
Child class of shiftedLJ.
Attributes
----------
Pi : float (optional)
value of the parameter Pi in eqn 1 above. Default value is 3.
denom : float (optional)
value of fitting parameter. No-fit parameter defaults to 3.
as well as inherited attributes epsilon from parent class.
Methods
-------
__init__(self,epsilon=1,sigma=3.0,denom = 3.0)
Initialise attributes.
w(self,r)
Compute the solution to the ode mentioned in the class doc.
w_prime(self,r)
Compute the derivative of the solution to the ode mentioned in
the class doc.
"""
def __init__(self,epsilon=1,Pi=3.0,denom=3.0):
"""
Initialise attributes.
Parameters
----------
epsilon : float (optional)
strength of the potential V(r). Default value is 1.
Pi : float (optional)
value of the parameter Pi (=D^r*sigma**2/D_t).
Default value is 3.
denom : float (optional)
value of fitting parameter. No-fit parameter defaults to 3.
"""
super().__init__(epsilon=epsilon)
self.Pi = Pi
self.r0 = 2**(1./6.)
self.denom = denom
return
def w(self,r):
"""
Compute the solution to the ode mentioned in the class doc.
Parameters
----------
r : float or np.array
Distance between particles.
Returns
-------
value : float or np.array
Value of the w-function.
"""
return np.where(r>=2**(1./6.),
-np.sqrt(2/self.Pi)*special.k1(np.sqrt(self.Pi/2.)*r)
/(self.denom*special.kvp(1,np.sqrt(self.Pi/2.)*self.r0,1)*r),
0)
def w_prime(self,r):
"""
Compute the derivative of the solution to the ode mentioned in
the class doc.
Parameters
----------
r : float or np.array
Distance between particles.
Returns
-------
value : float or np.array
Value of the w-function derivative.
"""
return np.where(r>=2**(1./6.),
1.0/(self.denom*special.kvp(1,np.sqrt(self.Pi/2.)*self.r0,1)*r)
*(-special.kvp(1,np.sqrt(self.Pi/2.)*r,1)/r
+np.sqrt(2/self.Pi)*special.k1(np.sqrt(self.Pi/2.)*r)/r**2),0)
def Effective2Bod(self,r,fp):
"""
Compute the effective two-body interaction.
Parameters
----------
r : float or np.array
Distance between particles.
fp : float
value of the active force.
Returns
-------
value : float or np.array
Value of the effective interaction at r.
"""
return twobody_value(r,fp,self.w,self.V_value)
def Effective2Bod_prime(self,r,fp):
"""
Compute the derivative of the effective
two-body interaction.
Parameters
----------
r : float or np.array
Distance between particles.
fp : float
value of the active force.
Returns
-------
value : float or np.array
Derivative of the effective interaction at r.
"""
return twobody_derivative(r,fp,self.w,
self.w_prime,
self.V_derivative)
def Effective3Bod(self,u,v,fp):
"""
Compute the effective three-body interaction aside
from a cosine factor.
Parameters
----------
u : float or np.array
Distance between particles 1 and 2.
v: float or np.array
Distance between particles 1 and 3.
fp : float
value of the active force.
Returns
-------
value : float or np.array
Value of the effective interaction at u,v.
"""
return threebody_value(self,u,v,fp,self.w)
def Effective3Bod_prime_u(self,u,v,fp):
"""
Compute the effective three-body interaction aside
from a cosine factor.
Parameters
----------
u : float or np.array
Distance between particles 1 and 2.
v: float or np.array
Distance between particles 1 and 3.
fp : float
value of the active force.
Returns
-------
value : float or np.array
Derivative of the effective interaction at u,v.
"""
return threebody_derivative_u(self,u,v,fp,
self.w,self.w_prime)
|
/sambristol_ssabp_w-0.0.15.tar.gz/sambristol_ssabp_w-0.0.15/sambristol_ssabp_w/whardsphere.py
| 0.893626 | 0.697648 |
whardsphere.py
|
pypi
|
import numpy as np
import math
from scipy.integrate import romb
class MayerInt():
"""
This class is focused on computing the integral of the first
correction in density to the radial distribution function for
the 2D active brownian particle system with effective two-body
potential V(r).
The radial distribution function has the form
g(r12) = exp(-V(r12))*(1+ rho*c_2(r12) + ...)
where
c_2(r12) = \int fmayer(r13)*fmayer(r23) d^2r_3
and fmayer(r) = (exp(-V(r))-1) is the Mayer function for
the two-body potential V(r). This class mainly outputs
the integrand fmayer(r13)*fmayer(r23), and the integral
c_2(r12) but has other features as well.
Attributes
----------
twobody : callable function with one argument (position)
two body potential to use in mayer functions.
Methods
-------
c_2_integral(self,r12s,x1=(0.0,0.0),r3_domain=(0,3.0),
r3num = 513, theta3num = 513,
check_invariance=False,rtol=1e-5,
atol=1e-8,x1_inv=(0.1,0.6)):
Compute the integral c_2(r12s) from the product
of two mayer functions.
g_2(self,rs,rho)
Simple function to compute approximation to radial
distribution function accurate to first order in
density.
integrand(self,r3,theta_3,x1,x2)
Compute the integrand of c_2(r12) for polar coordinate
inputs. Can take np.array inputs and will always output a
np.array.
theta_2(self,r12,x1,r2):
Simple helper function to compute the angle coordinate
theta2 of particle 2 given that one wants the total
distance between particles 1 and 2 to be r12.
Examples
--------
This first example shows how to visualise the integrand using polar
coordinates plot.
import matplotlib.pyplot as plt
from sambristol_ssabp_w.wbessel import wBessel
fp = 0.5
epsilon = 1.0
Pi = 3.0
ss = wBessel(fp=fp,epsilon=epsilon,Pi=Pi)
mi = MayerInt(ss.Effective2Bod)
r12 = 1.0
r1 = 0.3
theta1 = 0.0
r2 = 1.0
theta2 = mi.theta_2(r12,(r1,theta1),r2)
r3s = np.linspace(0,3.0,num=513,endpoint=True)
theta3s = np.linspace(0,2*np.pi,num=513,endpoint=True)
rs,thetas = np.meshgrid(r3s,theta3s)
vals = mi.integrand(rs,thetas,(r1,theta1),(r2,theta2))
fig,ax = plt.subplots(subplot_kw=dict(projection='polar'))
s = ax.contourf(thetas,rs,vals)
fig.colorbar(s,ax=ax)
plt.show()
The following script demonstrates how one would compute the
c(r12) integral with this class.
from scipy.integrate import romb
from sambristol_ssabp_w.wbessel import wBessel
fp = 0.5
epsilon = 1.0
Pi = 3.0
ss = wBessel(fp=fp,epsilon=epsilon,Pi=Pi)
mi = MayerInt(ss.Effective2Bod)
r12 = 1.0
r1 = 0.3
theta1 = 0.0
r2 = 1.0
theta2 = mi.theta_2(r12,(r1,theta1),r2)
r3s = np.linspace(0,3.0,num=513,endpoint=True)
theta3s = np.linspace(0,2*np.pi,num=513,endpoint=True)
rs,thetas = np.meshgrid(r3s,theta3s)
vals = mi.integrand(rs,thetas,(r1,theta1),(r2,theta2))
integrals = romb(vals,dx = thetas[1,0]-thetas[0,0],axis=0)
int_final = romb(integrals,dx=rs[0,1]-rs[0,0],show=True)
print(int_final)
Note that the integral should be invariant with respect to
translations and rotations, so that as long as r12 stays
the same, the integral printed by ''print(int_final)''
should be the same. This can be confirmed by running the
following script and comparing to the above one.
from scipy.integrate import romb
from sambristol_ssabp_w.wbessel import wBessel
fp = 0.5
epsilon = 1.0
Pi = 3.0
ss = wBessel(fp=fp,epsilon=epsilon,Pi=Pi)
mi = MayerInt(ss.Effective2Bod)
r12 = 1.0
r1 = 0.1
theta1 = 0.0
r2 = 0.9
theta2 = mi.theta_2(r12,(r1,theta1),r2)
r3s = np.linspace(0,3.0,num=513,endpoint=True)
theta3s = np.linspace(0,2*np.pi,num=513,endpoint=True)
rs,thetas = np.meshgrid(r3s,theta3s)
vals = mi.integrand(rs,thetas,(r1,theta1),(r2,theta2))
integrals = romb(vals,dx = thetas[1,0]-thetas[0,0],axis=0)
int_final = romb(integrals,dx=rs[0,1]-rs[0,0],show=True)
print(int_final)
Finally, since this is all function based, one could use dbl quad
integration to compute the integral. This is usually MUCH slower
than using romb above. Example script is:
from scipy.integrate import dblquad
from scipy.integrate import romb
from sambristol_ssabp_w.wbessel import wBessel
fp = 0.5
epsilon = 1.0
Pi = 3.0
ss = wBessel(fp=fp,epsilon=epsilon,Pi=Pi)
mi = MayerInt(ss.Effective2Bod)
r12 = 1.0
r1 = 0.3
theta1 = 0.0
r2 = 1.0
theta2 = mi.theta_2(r12,(r1,theta1),r2)
func = lambda y,x : mi.integrand(x,y,(r1,theta1),(r2,theta2))
int_final = dblquad(func,0,3.0,0,2*np.pi)
print(int_final)
All three of these examples should output the same value when
calling ''print(int_final)''.
"""
def __init__(self,twobody):
"""
Initialise class attributes.
"""
self.twobody = twobody
return
def _fmayer(self,rs):
"""
Compute the Mayer function with effective potential
w_class.V_value(r) - 0.5*fp**2*w_class.w(r)**2*r**2.
Parameters
----------
rs : float or np.array
Position(s) to evaluate Mayer function at.
Returns
-------
out : np.array
Mayer function exp(-Veff(r))-1.
"""
Vs = self.twobody(rs)
return np.where(rs < 0.8,-1,np.exp(-Vs)-1)
def g_2(self,rs,rho):
"""
Compute the first two terms in the density expansion
of the radial distribution function. This function
is very limited in what it is doing and ignores a lot
of keyword arguments. The code is just
prefac = np.exp(- self.twobody(rs))
corr = self.c_2_integral(rs)
return prefac*(1+rho*corr).
It may be worth writing custom g_2 function with more
details and e.g. tests of integrand domain using the
functions self.c_2_integral, self.integrand, etc.
Parameters
----------
rs : np.array
Points to evaluate g2 at.
rho : float
density of the system.
Returns
-------
out : np.array
Values exp(-V(rs))*(1+rho*c_2(rs)).
"""
prefac = np.exp(- self.twobody(rs))
corr = self.c_2_integral(rs)
return prefac*(1+rho*corr)
def c_2_integral(self,r12s,x1=(0.1,0.6),r3_domain=(0,3.0),
r3num = 513, theta3num = 513,
check_invariance=False,rtol=1e-5,
atol=1e-8,x1_inv=(0.3,0.0)):
"""
Compute the correction factor c_2(r) to the radial
distribution function at a set of points r12. Useful
for linear interpolation. c_2(r) is defined as
g(r) = exp(-V(r))(1+rho*c_2(r))
where V is the (effective) potential of the system and
rho is the density. Explicitly,
c_2(|r12|) = int f(|r13|)*f(|r23|)|r3|d|r3|dtheta3.
where f(|r|) = e^{-V(|r|)}-1 is the mayer function.
Parameters
----------
r12s : np.array
array of distances between particles 1 and 2. should
not be too close to zero.
x1 : two-tuple of floats (optional)
x1=(r1,theta1) is the position of particle 1 when
computing the integrand. The output should be
invariant of this positioning, so no need to use
this option except for in special cases/debugging.
r3_domain : two-tuple of floats (optional)
domain of radial integrand. Default is (0,3.0). Should
check (by plotting integrand) that this domain is large
enough.
r3num : int (optional)
number of points to use in r3 domain when integrating
(romberg integration, so r3num-1 = 2**k is required).
Default is 513.
theta3num : int (optional)
number of points to use in theta3 domain when integrating
(romberg integration, so theta3num-1 = 2**k is required).
Default is 513.
check_invariance : bool (optional)
if True, then integrate for different sets of particle
positions (x1,x2) for each r12 = |x1-x2| to check that
the resulting integral is the same (it should be).
Default is false.
rtol : float (optional)
positive floating point number to pass to np.allclose
(only used when check_invariance=True). Default value
is 1e-5.
atol : float (optional)
positive floating point number to pass to np.allclose
(only used when check_invariance=True). Default value
is 1e-8.
x1_inv : two-tuple of floats (optional)
x1_inv=(r1,theta1) is the position of particle 1 when
computing the integrand. This is only used when
check_invariance=True.
Returns
------
out : np.array
c_2 evaluated at r12s
Also, prints a warning if check_invariance=True and the
integral at each point r12 is not the same for the two
sets of (x1,x2) positions.
"""
hss1 = np.empty([len(r12s)],float)
if check_invariance:
hss2 = np.empty([len(r12s)],float)
if np.allclose(x1,x1_inv):
raise ValueError("The two tuples of points x1 and x1_inv "
"are identical, so check for "
"translational/rotational invariance of "
"the integral is unhelpful.")
for i,r12 in enumerate(r12s):
r1 = x1[0]
theta1 = x1[1]
r2 = r12+0.99*r1
theta2 = self.theta_2(r12,(r1,theta1),r2)
r3s = np.linspace(r3_domain[0],r3_domain[1],num=r3num,
endpoint=True)
theta3s = np.linspace(0,2*np.pi,num=theta3num,
endpoint=True)
rs,thetas = np.meshgrid(r3s,theta3s)
vals = self.integrand(rs,thetas,(r1,theta1),(r2,theta2))
integrals = romb(vals,dx = thetas[1,0]-thetas[0,0],axis=0)
int_final = romb(integrals,dx=rs[0,1]-rs[0,0])
hss1[i] = int_final
if check_invariance:
# look at another point in the (r1,theta1),(r2,theta2)
# plane to see if the answer to the integral is different
# (it shouldn't be, since it should only be a function of
# |x1-x2| (where x1 and x2 are vectors).
r1 = x1_inv[0]
theta1 = x1_inv[1]
r2 = r12+0.8*r1
theta2 = self.theta_2(r12,(r1,theta1),r2)
vals = self.integrand(rs,thetas,(r1,theta1),(r2,theta2))
integrals = romb(vals,dx = thetas[1,0]-thetas[0,0],axis=0)
int_final = romb(integrals,dx=rs[0,1]-rs[0,0])
hss2[i] = int_final
if check_invariance:
if not np.allclose(hss1,hss2,rtol=rtol,atol=atol):
print("error! the integral is not translationally "
"invariant!")
index = np.argmax(np.abs(hss1-hss2))
rerr = r12s[index]
maxerr = hss1[index]-hss2[index]
print(f"maximum error is at r12 = {rerr} "
f"taking the value {maxerr}.")
return hss1
def integrand(self,r3,theta_3,x1,x2):
"""
Compute the integrand of c_2(r12) (as described in
MayerInt.__doc__ string) for polar coordinate inputs
(includes factor of r3 since d^2r3 = r3*dr3*dtheta_3).
I.e.
fmayer(|r13|)*fmayer(|r23|)*r3
with
|rij| = sqrt(ri**2+rj**2-2*ri*rj*cos(theta_i-theta_j)).
Parameters
----------
r3 : float or np.array
radial distance of particle 3 to some arbitrary
origin (is one of the variables to integrate over).
theta_3 : float or np.array
polar angle of particle 3 with respect to arbitrary
origin (is the second variable to integrate over).
x1 : two-tuple of floats
polar coordinates x1=(r1,theta1) of particle 1.
x2 : two-tuple of floats
polar coordinates x2=(r2,theta2) of particle 2.
Returns
-------
out : np.array
Product fmayer(|r13|)*fmayer(|r23|)*r3 with
out.shape having the same shape as the tensor
product r3\times theta_3 if r3 and theta_3 are
1D arrays, and having the same shape as r3
(or equivalently theta_3) if r3,theta_3 are generated
from a np.meshgrid.
"""
r1,theta_1 = x1
r2,theta_2 = x2
r23 = np.sqrt(r3*r3+r2*r2-2*r3*r2*np.cos(theta_3-theta_2))
r13 = np.sqrt(r3*r3+r1*r1-2*r3*r1*np.cos(theta_3-theta_1))
return self._fmayer(r23)*self._fmayer(r13)*r3
def theta_2(self,r12,x1,r2):
"""
Simple helper function to compute the angle coordinate
theta2 of particle 2 given that one wants the total
distance between particles 1 and 2 to be r12.
np.arccos(-(r12**2-r1**2-r2**2)/(2*r1*r2))+theta1.
Parameters
----------
r12 : float or np.array
specified distance between particles 1 and 2.
x1 : two-tuple of floats or np.arrays
polar coordinates x1=(r1,theta1) of particle 1.
r2 : float or np.array
radial coordinate of particle 2.
Returns
-------
theta2 : float or np.array
angle coordinate of particle 2 such that the
distance between particles 1 and 2 is r12.
"""
r1,theta1=x1
return np.arccos(-(r12**2-r1**2-r2**2)/(2*r1*r2))+theta1
if __name__ == "__main__":
import matplotlib.pyplot as plt
from scipy.integrate import romb
from sambristol_ssabp_w.wbessel import wBessel
fp = 0.5
epsilon = 1.0
Pi = 3.0
ss = wBessel(fp=fp,epsilon=epsilon,Pi=Pi)
mi = MayerInt(ss.Effective2Bod)
r12 = 1.0
r1 = 0.3
theta1 = 0.0
r2 = 1.0
theta2 = mi.theta_2(r12,(r1,theta1),r2)
r3s = np.linspace(0,3.0,num=513,endpoint=True)
theta3s = np.linspace(0,2*np.pi,num=513,endpoint=True)
rs,thetas = np.meshgrid(r3s,theta3s)
vals = mi.integrand(rs,thetas,(r1,theta1),(r2,theta2))
integrals = romb(vals,dx = thetas[1,0]-thetas[0,0],axis=0)
int_final = romb(integrals,dx=rs[0,1]-rs[0,0],show=True)
print(int_final)
fig,ax = plt.subplots(subplot_kw=dict(projection='polar'))
s = ax.contourf(thetas,rs,vals)
fig.colorbar(s,ax=ax)
r12 = 1.0
r1 = 0.1
theta1 = 0.0
r2 = 0.9
theta2 = mi.theta_2(r12,(r1,theta1),r2)
#rs,thetas = np.meshgrid(r3s,theta3s)
vals = mi.integrand(rs,thetas,(r1,theta1),(r2,theta2))
print(mi.integrand(0.4,1.0,(r1,theta1),(r2,theta2)))
integrals = romb(vals,dx = thetas[1,0]-thetas[0,0],axis=0)
int_final = romb(integrals,dx=rs[0,1]-rs[0,0],show=True)
print(int_final)
fig,ax = plt.subplots(subplot_kw=dict(projection='polar'))
s = ax.contourf(thetas,rs,vals)
fig.colorbar(s,ax=ax)
from scipy.integrate import dblquad
func = lambda y,x : mi.integrand(x,y,(r1,theta1),(r2,theta2))
int_final = dblquad(func,0,3.0,0,2*np.pi)
print(int_final)
plt.show()
|
/sambristol_ssabp_w-0.0.15.tar.gz/sambristol_ssabp_w-0.0.15/sambristol_ssabp_w/mayerint.py
| 0.722625 | 0.751101 |
mayerint.py
|
pypi
|
import networkx as nx
import itertools
import numpy as np
from math import factorial
def gen_expected_crick_angles(P, rep_len, start_ph1, ap=False):
step = 360 / P
if ap:
sign=-1
else:
sign=1
return [adj(start_ph1+(sign * i * float(step))) for i in range(rep_len)]
def adj2(ang):
ang = adj(ang)
if ang < -180:
ang += 360
return ang
def adj(ang):
ang += 180.0
if ang>360:
c = int(ang / 360.0)
return ang - (360*c) - 180.0
else:
return ang - 180.0
def adjustangle(angle):
if abs(angle)>180 and angle > 0:
angle = angle - 360.0
elif abs(angle)>180 and angle < 0:
angle += 360.0
return angle
def diffangle(targetA, sourceA):
a = targetA - sourceA
a = (a + 180) % 360 - 180
return a
def crick_to_pos(start_Cr_ang, exp_helix_crick):
diff = [abs(adj(start_Cr_ang-i)) for i in exp_helix_crick]
mindiff = min(diff)
start_Cr_ang_pos = diff.index(mindiff)
try:
name = chr(97+start_Cr_ang_pos)
except ValueError:
name = '?'
return start_Cr_ang_pos, name, mindiff
def calc_crick_ang_dev(twister, exp_helix_crick, firstpos, lastpos, force_start_Cr_ang_pos=None):
if firstpos == lastpos:
assert lastpos == None
# define Crick angle of the starting position
if force_start_Cr_ang_pos==None:
if lastpos==None:
firstpos = 0
if type(twister) is list:
start_Cr_ang = twister[firstpos]
else:
start_Cr_ang = twister.iloc[firstpos]['Cr_ang']
start_Cr_ang_pos, name, _ = crick_to_pos(start_Cr_ang, exp_helix_crick)
else:
start_Cr_ang_pos = force_start_Cr_ang_pos
cr = itertools.cycle(exp_helix_crick)
# skip to the first repeat pos
for n in range(start_Cr_ang_pos):
next(cr)
if lastpos==None:
laspos = len(twister)
if type(twister) is list:
data = twister[firstpos:lastpos]
else:
data = twister['Cr_ang'].iloc[firstpos:lastpos]
Cr_ang_dev = [adj2(c - next(cr)) for c in data]
m,b = np.polyfit(range(len(Cr_ang_dev)), Cr_ang_dev, 1)
return Cr_ang_dev, m, b
def window(seq, n=2):
"""Returns a sliding window (of width n) over data from the iterable
s -> (s0,s1,...s[n-1]), (s1,s2,...,sn), ...
"""
it = iter(seq)
result = tuple(itertools.islice(it, n))
if len(result) == n:
yield result
for elem in it:
result = result[1:] + (elem,)
yield result
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techniques.
Parameters
----------
y : array_like, shape (N,)
the values of the time history of the signal.
window_size : int
the length of the window. Must be an odd integer number.
order : int
the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
deriv: int
the order of the derivative to compute (default = 0 means only smoothing)
Returns
-------
ys : ndarray, shape (N)
the smoothed signal (or it's n-th derivative).
"""
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError as msg:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order+1)
half_window = (window_size -1) // 2
# precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve( m[::-1], y, mode='valid')
# SOCKET helper functions
def detect_helices_orientation(indices, orientation_data):
"""
Arguments
indices: dict of bundle helices from Socket
orientation_data: relative orientation of the helices from indices
Returns
a dict helix_id -> orientation
this function uses a graph to convert relative orientations between the helices
to parallel/anti-parallel labels for each helix
"""
G=nx.Graph()
for o in orientation_data:
G.add_edge(o[0], o[1], o=o[2])
res = {}
for g in nx.connected_component_subgraphs(G):
first=True
for e in list(nx.dfs_edges(g)):
if first:
g.node[e[0]]['o'] = 1
first = False
if g[e[0]][e[1]]['o']=='antiparallel':
g.node[e[1]]['o'] = g.node[e[0]]['o']*-1
else:
g.node[e[1]]['o'] = g.node[e[0]]['o']
for n in g.nodes():
res[n] = g.node[n]['o']
return res
|
/samcc-turbo-0.0.2.tar.gz/samcc-turbo-0.0.2/samcc/helper_functions.py
| 0.505859 | 0.593315 |
helper_functions.py
|
pypi
|
import itertools
import heapq
import numpy as np
import scipy.spatial.distance as distance
import scipy.optimize
import functools
from operator import attrgetter
from Bio import PDB
from .bundle import get_local_axis
def create_pymol_selection_from_socket_results(indices):
"""create pymol-readable selection from socket data (converted to bundleDesc)
input: list of helices, each helix represented as a tuple in format (first_residue_number, last_residue_number, chain_id)
output: list of pymol-readable selections, each as a string
"""
selection = []
for idx in indices:
selection.append('(chain {} and resi {}-{})'.format(idx[2], idx[0], idx[1]))
return selection
def extract_CA_coords(helices, model, verbose=False):
"""get CA coordinates for rach residue in each helix
input: helix list, each helix is list of its residues in format (chain, ('', resnum, ''))
output: list of helices, each helix is list of CA coords (Biopython Vector class) of its residues
"""
print('Extracting CA coordinates...')
helices_CA = []
for helix in enumerate(helices):
#ignore KeyError structures (i.e. those with HETATM)
try:
helix_CA = [ model[res[0]][res[1]]['CA'].get_vector() for res in helix[1] ]
except KeyError:
return KeyError
if verbose:
print('CA COORDS: HELIX ' + str(helix[0]))
for h in helix_CA:
print(h)
helices_CA.append(helix_CA)
return helices_CA
def convert_Vector_data_to_np_array(helices_CA_Vector):
"""converts list of helices format from Biopython Vector to lists of 3 points
input: list of helices, each helix is list of Vectors, each is coords for one residue
output: list of helices, each helix is list of 3-element lists describing residue coords
"""
return [ np.array([ (v[0], v[1], v[2]) for v in helix_CA ]) for helix_CA in helices_CA_Vector ]
def select_minimal_angle_layer_set2(layers_sets, best_layer_nb=1):
def check_layers_shape(layers_set):
def calculate_plane_equation_from_points(points):
def plane(x, y, params):
a = params[0]
b = params[1]
c = params[2]
z = a*x + b*y + c
return z
def error(params, points):
result = 0
for (x,y,z) in points:
plane_z = plane(x, y, params)
diff = abs(plane_z - z)
result += diff**2
return result
def cross(a, b):
return [a[1]*b[2] - a[2]*b[1],
a[2]*b[0] - a[0]*b[2],
a[0]*b[1] - a[1]*b[0]]
fun = functools.partial(error, points=points)
params0 = [0, 0, 0]
res = scipy.optimize.minimize(fun, params0)
return res
def calculate_angle_between_planes(plane1, plane2, silent_warning=True):
"""calculate angle between two planes defined with equation ax+bx+cx+d
cos = |a1a2 + b1b2 + c1c2| / (sqrt(a1^2 + b1^2 + c1^2) * sqrt(a2^2 + b2^2 + c2^2))
input: (plane1/2) plane equation in form of dicionary where keys correspond to coefficients of equation
output: angle between planes in degrees (float)
"""
numerator = abs(sum([ plane1[coef]*plane2[coef] for coef in 'abc' ]))
len_plane1 = np.sqrt(sum([ np.power(plane1[coef], 2) for coef in 'abc' ]))
len_plane2 = np.sqrt(sum([ np.power(plane2[coef], 2) for coef in 'abc' ]))
cos_phi = numerator / (len_plane1 * len_plane2)
if (1+1e-8 > cos_phi > 1.0):
cos_phi = 1.0
if not silent_warning:
print('Warning: calculated cosine slightly beyound function codomain(max 1e-8 beyound) - corrected to 1.0.')
phi_deg = np.rad2deg(np.arccos(cos_phi))
return phi_deg
layer_equations = []
layer_angles = []
for layer in layers_set.iterlayer():
points = [ tuple(point.coords) for point in layer ]
plane_equation = calculate_plane_equation_from_points(points)
layer_equations.append({'a':plane_equation.x[0], 'b':plane_equation.x[1], 'c':plane_equation.x[2]})
for layer in range(len(layer_equations)-1):
# calculate angle between two planes
layer_angles.append(calculate_angle_between_planes(layer_equations[layer], layer_equations[layer+1]))
layers_set.average_dist_angle = np.median(layer_angles) #FIXME np.mean(layer_angles)
print('V2', layers_set.average_dist_angle)
return layers_set
# this will return same layer sets list but with layers with set avg angle attribute
layer_set_angles = (list(map(check_layers_shape, layers_sets)))
if best_layer_nb == 1:
best_layer_set_angle = min(layer_set_angles, key=attrgetter('average_dist_angle'))
elif best_layer_nb == 'rank':
best_layer_set_angle = sorted(layer_set_angles, key=attrgetter('average_dist_angle'))
else:
best_layer_set_angle = heapq.nsmallest(best_layer_nb, layer_set_angles, key=attrgetter('average_dist_angle'))
return best_layer_set_angle
def select_minimal_angle_layer_set(layers_sets, best_layer_nb=1):
"""find layer set with minimal angle between layers
input: (CA_layers_list) list of lists of layers, each layer represented as n-element list of 3-element lists of coords (float) of CAs
n=number of helices, topmost list comprise of lists of layers generated by scanning with different methods (from top or bottom of the bundle)
input: (layer_ids_list) list of lists of identifiers (int) of CAs used to define first layer, each identifier represents one helix
topmost list elements correspond to the elements of topmost list from CA_layers_list
output: (best_layer_set) list of layers with minimal angle between them in format identical to elements of topmost list of CA_layers_list
output: (best_layer_ids) element of layer_ids_list corresponding to selected best_layer_set element
"""
def check_layers_shape(layers_set):
"""compute average angle between layers in bundle
for each layer find all possible planes and compute angles between planes in vertical groups
result is averaged angle from list of average angles from groups of planes
input: list of layers, each layer is n-element list of 3-element lists (coords), n=oligomerization state
output: average angle between layers (float)
"""
def calculate_plane_equation_from_points(x, y, z):
a = np.column_stack((x, y, z))
return np.linalg.lstsq(a, np.ones_like(x), rcond=None)[0]
def calculate_angle_between_planes(plane1, plane2, silent_warning=True):
"""calculate angle between two planes defined with equation ax+bx+cx+d
cos = |a1a2 + b1b2 + c1c2| / (sqrt(a1^2 + b1^2 + c1^2) * sqrt(a2^2 + b2^2 + c2^2))
input: (plane1/2) plane equation in form of dicionary where keys correspond to coefficients of equation
output: angle between planes in degrees (float)
"""
numerator = abs(sum([ plane1[coef]*plane2[coef] for coef in 'abc' ]))
len_plane1 = np.sqrt(sum([ np.power(plane1[coef], 2) for coef in 'abc' ]))
len_plane2 = np.sqrt(sum([ np.power(plane2[coef], 2) for coef in 'abc' ]))
cos_phi = numerator / (len_plane1 * len_plane2)
if (1+1e-8 > cos_phi > 1.0):
cos_phi = 1.0
if not silent_warning:
print('Warning: calculated cosine slightly beyound function codomain(max 1e-8 beyound) - corrected to 1.0.')
phi_deg = np.rad2deg(np.arccos(cos_phi))
return phi_deg
layer_equations = []
layer_angles = []
for layer in layers_set.iterlayer():
x = [ point.coords[0] for point in layer ] # all x coords from layer
y = [ point.coords[1] for point in layer ]
z = [ point.coords[2] for point in layer ]
plane_equation = calculate_plane_equation_from_points(x,y,z)
layer_equations.append({'a':plane_equation[0], 'b':plane_equation[1], 'c':plane_equation[2]})
for layer in range(len(layer_equations)-1):
# calculate angle between two planes
layer_angles.append(calculate_angle_between_planes(layer_equations[layer], layer_equations[layer+1]))
layers_set.average_dist_angle = np.median(layer_angles)
return layers_set
# this will return same layer sets list but with layers with set avg angle attribute
layer_set_angles = (list(map(check_layers_shape, layers_sets)))
if best_layer_nb == 1:
best_layer_set_angle = min(layer_set_angles, key=attrgetter('average_dist_angle'))
elif best_layer_nb == 'rank':
best_layer_set_angle = sorted(layer_set_angles, key=attrgetter('average_dist_angle'))
else:
best_layer_set_angle = heapq.nsmallest(best_layer_nb, layer_set_angles, key=attrgetter('average_dist_angle'))
return best_layer_set_angle
def find_bundle_boundry_layer_from_all(boundry_layers, distance_threshold, search_layer_setting_num):
min_distance_set = heapq.nsmallest(search_layer_setting_num, boundry_layers)
return [ layer for layer in min_distance_set if layer.total_distance <= distance_threshold ]
def select_minimal_distance_layer_set(layers_sets):
"""find set of layers with minimal distance between 2 helices (dimers only)
input/output same as described in select_minimal_angle_layer_set function
only difference is that this function search for minimal distance between helices, not angle between layers
"""
def check_layers_distances(layers_set):
layer_distances = []
for layer in layers_set.iterlayer():
point1 = layer[0].coords
point2 = layer[1].coords
layer_distances.append(distance.euclidean(point1, point2))
layers_set.average_dist_angle = np.mean(layer_distances)
return layers_set
layer_set_distances = (list(map(check_layers_distances, layers_sets)))
best_layer_set_distance = min(layer_set_distances, key=attrgetter('average_dist_angle'))
return best_layer_set_distance
def convert_to_bundleClass_format(samcc_selection):
"""converts samcc selection from this library to original format used by bundleClass
input: samcc-ready selection, list of helices, each helix a list of format [ chain_id(string), start_residue(int), stop_residue(int) ]
output: BundleClass-ready selection, list of helices, each helix a tuple of format ( start_residue(int), stop_residue(int), chain_id(string), antiparallel(bool) )
"""
input_helices = []
for helix in samcc_selection:
if helix[1] < helix[2]:
input_helices.append((helix[1], helix[2], helix[0], False))
else:
input_helices.append((helix[2], helix[1], helix[0], True))
return input_helices
def select_minimal_dist_to_plane_layer_set(layers_sets, best_layer_nb=1):
def check_layers_dist_to_plane(layers_set):
def calculate_plane_equation_from_points(x, y, z):
a = np.column_stack((x, y, z))
return np.linalg.lstsq(a, np.ones_like(x), rcond=None)[0]
def calculate_point_to_plane_distance(plane_equation, point):
# plane coefficients
A = plane_equation[0]
B = plane_equation[1]
C = plane_equation[2]
# point coords
x = point.coords[0]
y = point.coords[1]
z = point.coords[2]
numerator = abs(A*x + B*y + C*z)
denominator = np.sqrt(A**2 + B**2 + C**2)
return numerator / denominator
layer_distances = []
for layer in layers_set.iterlayer():
x = [ point.coords[0] for point in layer ] # all x coords from layer
y = [ point.coords[1] for point in layer ]
z = [ point.coords[2] for point in layer ]
plane_equation = calculate_plane_equation_from_points(x,y,z)
points_in_layer = []
for point in layer:
points_in_layer.append(calculate_point_to_plane_distance(plane_equation, point))
layer_distances.append(np.mean(points_in_layer))
layers_set.average_dist_to_plane = np.mean(layer_distances)
return layers_set
layer_set_dist_to_plane = (list(map(check_layers_dist_to_plane, layers_sets)))
if best_layer_nb == 1:
best_layer_set_dist_to_plane = min(layer_set_dist_to_plane, key=attrgetter('average_dist_to_plane'))
elif best_layer_nb == 'rank':
best_layer_set_dist_to_plane = sorted(layer_set_dist_to_plane, key=attrgetter('average_dist_to_plane'))
else:
best_layer_set_dist_to_plane = heapq.nsmallest(best_layer_nb, layer_set_dist_to_plane, key=attrgetter('average_dist_to_plane'))
return best_layer_set_dist_to_plane
def select_minimal_total_distance_layer_set(layers_sets, best_layer_nb=1):
def check_layers_total_distances(layers_set):
def calculate_total_distance(layer, neighbour_interactions):
"""Calculate total distance between neighbouring points in layer
neighbourhood is determined according to neighbour_interactions list
"""
total_distance = 0
for axis_point1 in layer:
for axis_point2 in layer:
if (axis_point1.helix_id, axis_point2.helix_id) in neighbour_interactions:
total_distance += distance.euclidean(axis_point1.CA_coords, axis_point2.CA_coords)
return total_distance
layers_all_distances = []
for layer in layers_set.iterlayer():
layers_all_distances.append(calculate_total_distance(layer, layers_set.neighbour_interactions))
layers_set.average_layers_dist = np.mean(layers_all_distances)
return layers_set
layer_set_total_distances = list(map(check_layers_total_distances, layers_sets))
if best_layer_nb == 1:
best_layer_set_total_distances = min(layer_set_total_distances, key=attrgetter('average_layers_dist'))
elif best_layer_nb == 'rank':
best_layer_set_total_distances = sorted(layer_set_total_distances, key=attrgetter('average_layers_dist'))
else:
best_layer_set_total_distances = heapq.nsmallest(best_layer_nb, layer_set_total_distances, key=attrgetter('average_layers_dist'))
return best_layer_set_total_distances
def get_layers_set_with_best_ranks(layers_sets):
best_layer_set_angle = sorted(layers_sets, key=attrgetter('average_dist_angle'))
best_layer_set_dist_to_plane = sorted(layers_sets, key=attrgetter('average_dist_to_plane'))
best_layer_set_total_distances = sorted(layers_sets, key=attrgetter('average_layers_dist'))
for pos, layer_set in enumerate(best_layer_set_angle):
layer_set.ranks += pos
for pos, layer_set in enumerate(best_layer_set_total_distances):
layer_set.ranks += pos
return min(layers_sets, key=attrgetter('ranks'))
def find_closest_CA_to_point(boundry_layers, helices_axis_all):
def get_closest_CA_to_axis_point(point, helix_pts):
helix_point = helix_pts[0]
dst_best = distance.euclidean(helix_pts[0].CA_coords, point.coords)
for helix_pt in helix_pts:
dst = distance.euclidean(helix_pt.CA_coords, point.coords)
if dst < dst_best:
helix_point = helix_pt
dst_best = dst
return helix_point
boundry_layers_CA = []
for layer, helix_axis in zip(boundry_layers, helices_axis_all):
layer_adjusted = []
for point in layer:
closest_CA_helix_point = get_closest_CA_to_axis_point(point, [helix_axis[point.point_id+i] for i in range(-2,3) if point.point_id+i>0])
layer_adjusted.append(closest_CA_helix_point)
layer.axis_points = layer_adjusted
boundry_layers_CA.append(layer)
return boundry_layers_CA
|
/samcc-turbo-0.0.2.tar.gz/samcc-turbo-0.0.2/samcc/layer_detection.py
| 0.510252 | 0.5047 |
layer_detection.py
|
pypi
|
import argparse
import os
import json
from random import shuffle
from pathlib import Path
from flattenKern import flatten_gpos_kerning
from typing import Union
from fontTools.ttLib import TTFont
from defcon import Font
__all__ = ["SameWidther", "TTFont", "Font"]
class SameWidther:
def __init__(self, font: Union[TTFont, Font], language: str) -> None:
self.font = font
if type(font) == TTFont:
self.uni_name = self.TTF_OTF_unicodeMap
self.name_uni = {v: k for k, v in self.uni_name.items()}
self.kerning = self.TTF_OTF_kerning
self.metrics = self.TTF_OTF_metrics
unitsPerEm = self.font["head"].unitsPerEm
if type(font) == Font:
self.uni_name = self.UFO_unicodeMap
self.name_uni = {v: k for k, v in self.uni_name.items()}
self.kerning = self.UFO_kerning
self.metrics = self.UFO_metrics
unitsPerEm = self.font.info.unitsPerEm
self.scale = unitsPerEm / 1000
self.database = self.loadDatabase(language)
shuffle(self.database)
def loadDatabase(self, language: str) -> list:
""" loads database, either from inbuilt or your own if existing path provided """
customDatabase = Path(language)
if customDatabase.exists() and customDatabase.suffix == ".json":
path = customDatabase.absolute()
else:
path = Path(__file__).parent / "databases" / f"{language.upper()}.json"
assert (
path.exists()
), f"database {language} doesn't exist as inbuilt database"
with open(path, encoding="utf-8") as inputFile:
data = json.load(inputFile)
return data
@property
def UFO_kerning(self) -> dict:
return self.font.kerning
@property
def UFO_metrics(self) -> dict:
return {glyph.unicode: glyph.width for glyph in self.font}
@property
def UFO_unicodeMap(self) -> dict:
return {glyph.unicode: glyph.name for glyph in self.font}
@property
def TTF_OTF_kerning(self) -> dict:
flattened = flatten_gpos_kerning(self.font)
if flattened:
kerning = {
(self.name_uni.get(left), self.name_uni.get(right)): value
for left, right, value in flattened
}
return kerning
else:
return {}
@property
def TTF_OTF_metrics(self) -> dict:
hmtx = self.font.get("hmtx")
widths = {self.name_uni.get(k): hmtx[k][0] for k in self.font.getGlyphOrder()}
return widths
@property
def TTF_OTF_unicodeMap(self) -> dict:
return self.font.getBestCmap()
def getWords(
self, wordWidth: float, wordCount: int, threshold: int = 10, case: str = "lower"
) -> list:
sameLongLetters = []
for word in self.database:
word = getattr(str, case)(word)
currentWordWidth = sum(map(lambda x: self.metrics[ord(x)], word))
pairs = [(word[i], word[i + 1]) for i in range(len(word) - 1)]
if self.kerning:
pairsKerning = sum(
map(lambda x: self.kerning.get(tuple(map(ord, x)), 0), pairs)
)
totalWidth = currentWordWidth + pairsKerning
else:
totalWidth = currentWordWidth
totalWidth /= self.scale
if wordWidth - threshold < totalWidth <= wordWidth + threshold:
sameLongLetters.append(word)
if len(sameLongLetters) == wordCount:
break
else:
print("not enough matches")
return sameLongLetters
class Args:
def __init__(self) -> None:
parser = argparse.ArgumentParser(
description="Get words of the same width, nice for specimens."
)
parser.add_argument(
"font", type=Path, help="OTF/TTF/UFO file which you want to use"
)
parser.add_argument(
"language",
type=str,
help="three letter short for language of word database, currently available [ENG, GER]. Or provide existing path to a existing database. It must be list a list in JSON file. With such structure: [\"word\", \"house\", \"apple\", ...]",
)
parser.add_argument(
"width",
type=float,
help="""How wide should the words be? Width of font's units. (400-500) is normally width of a letter""",
)
parser.add_argument("wordCount", type=int, help="How many words you need?")
parser.add_argument(
"-t",
"--threshold",
type=float,
default=10,
help="(optional, default:10) Threshold for the width",
)
parser.add_argument(
"-c",
"--case",
type=str,
default="lower",
help="(optional, default:lower) change case of the words [upper, lower, capitalize]",
)
self.parser = parser.parse_args()
def run(args) -> None:
suffix = args.font.suffix.lower()
if suffix == ".ufo":
font = Font(args.font)
if suffix in [".ttf", ".otf"]:
font = TTFont(args.font)
sameWidther = SameWidther(font, args.language)
print(
"\n".join(
sameWidther.getWords(
args.width, args.wordCount, threshold=args.threshold, case=args.case
)
)
)
def main() -> None:
args = Args()
run(args.parser)
if __name__ == "__main__":
main()
|
/sameWidther-0.0.5.tar.gz/sameWidther-0.0.5/Lib/sameWidther.py
| 0.762336 | 0.291321 |
sameWidther.py
|
pypi
|
import dash
from dash import dcc, html, Input, Output
import dash_bootstrap_components as dbc
import plotly.express as px
import pandas as pd
import matplotlib.font_manager as fm
def get_system_fonts():
font_list = fm.findSystemFonts(fontpaths=None, fontext='ttf')
font_names = [fm.FontProperties(fname=font_file).get_name() for font_file in font_list]
return font_names
def stacked_bar(data, px_kwargs={}, layout_kwargs={}):
"""Creates an interactive stacked bar chart using Plotly Dash.
The chart allows the user to select the x, y, and color dimension from dropdown menus.
It also allows selecting the font and font size for the chart.
Parameters:
- data (dict): The data to plot as a dictionary of columns
- px_kwargs (dict): Keyword arguments passed to plotly express
- layout_kwargs (dict): Keyword arguments passed to plotly layout
It returns a Dash app that can be run with app.run_server().
"""
df = pd.DataFrame(data)
available_fonts = get_system_fonts()
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])
"""
- A dropdown is created for selecting x, y, and color columns
- A dropdown is created for selecting font
- An input is created for selecting font size
"""
app.layout = dbc.Container([
dbc.Row([
dbc.Col(dcc.Dropdown(id='x-dropdown', placeholder='Select X Column')),
dbc.Col(dcc.Dropdown(id='y-dropdown', placeholder='Select Y Column')),
dbc.Col(dcc.Dropdown(id='z-dropdown', placeholder='Select coloring Column')),
], style={'margin': 20}),
dbc.Row([
dbc.Col(dcc.Dropdown(id='font-dropdown',
options=[{'label': font, 'value': font} for font in available_fonts],
#value=available_fonts[0],
placeholder='Select Font')),
dbc.Col(dcc.Input(id='font-size-input', type='number', value=12)),
]),
dcc.Graph(id='bar-chart'),
],style={'margin': 20})
"""
Callbacks update the options and figure based on selections.
A stacked bar chart is produced using plotly express and customized with:
- x, y, color dimensions from dropdowns
- Font size and family from inputs
- kwargs passed to px and layout
"""
@app.callback(
Output('x-dropdown', 'options'),
Output('y-dropdown', 'options'),
Output('z-dropdown', 'options'),
Input('bar-chart', 'figure')
)
def update_dropdown_options(figure):
x_options = [{'label': col, 'value': col} for col in df.columns]
y_options = [{'label': col, 'value': col} for col in df.columns]
z_options = [{'label': col, 'value': col} for col in df.columns]
return x_options, y_options, z_options
@app.callback(
Output('bar-chart', 'figure'),
Input('x-dropdown', 'value'),
Input('y-dropdown', 'value'),
Input('z-dropdown', 'value'),
Input('font-dropdown', 'value'), # Add font dropdown input
Input('font-size-input','value')
)
def update_bar_chart(x_col, y_col, z_col, selected_font, font_size):
if x_col and y_col and z_col:
font = {'family': selected_font, 'size': font_size}
fig = px.bar(df, x=x_col, y=y_col, color=z_col,**px_kwargs)
fig.update_layout(barmode='stack', font=font, **layout_kwargs)
return fig
else:
return {}
app.run_server(mode='inline')
|
/sameh-stirling-0.0.6.tar.gz/sameh-stirling-0.0.6/sameh_stirling/stacked_bar.py
| 0.68616 | 0.253476 |
stacked_bar.py
|
pypi
|
import dash
from dash import dcc, html, Input, Output
import dash_bootstrap_components as dbc
import plotly.express as px
import pandas as pd
import matplotlib.font_manager as fm
def get_system_fonts():
font_list = fm.findSystemFonts(fontpaths=None, fontext='ttf')
font_names = [fm.FontProperties(fname=font_file).get_name() for font_file in font_list]
return font_names
def bubble_chart(data, px_kwargs={}, layout_kwargs={}):
"""
Creates an interactive bubble chart Dash app that allows users to customize
the plot.
Parameters::
- data: Pandas DataFrame containing the data to plot
- px_kwargs: Keyword arguments passed to plotly express
- layout_kwargs: Keyword arguments passed to plotly layout
Outputs:
A Dash app that generates an interactive bubble chart based on user input.
The app allows the user to select the x, y, color, size, and hover text
variables to control the bubble chart plot. Additional customization like
font family and size are also provided.
The app uses Dash callbacks to update the figure in response to user input.
"""
if not isinstance(data, pd.DataFrame):
raise ValueError("The input data must be a pandas DataFrame.")
# Initialize the Dash app
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])
# Define the app layout
app.layout = html.Div([
html.H1("Custom Bubble Chart"),
# First row with four dropdowns
dbc.Row([
# Dropdown for x variable
dbc.Col([
dcc.Dropdown(
id='x-variable-dropdown',
options=[{'label': col, 'value': col} for col in data.columns],
placeholder='Select X Variable'
)
], width=3),
# Dropdown for y variable
dbc.Col([
dcc.Dropdown(
id='y-variable-dropdown',
options=[{'label': col, 'value': col} for col in data.columns],
placeholder='Select Y Variable'
)
], width=3),
# Dropdown for color variable
dbc.Col([
dcc.Dropdown(
id='color-variable-dropdown',
options=[{'label': col, 'value': col} for col in data.columns],
placeholder='Select Color Variable'
)
], width=3),
# Dropdown for hover name variable
dbc.Col([
dcc.Dropdown(
id='hover-name-variable-dropdown',
options=[{'label': col, 'value': col} for col in data.columns],
placeholder="Select Hover Name Variable"
)
], width=3)
], justify='center',style={'margin': '20px'}),
# Second row with two dropdowns and one input
dbc.Row([
# Dropdown for size variable
dbc.Col([
dcc.Dropdown(
id='size-variable-dropdown',
options=[{'label': col, 'value': col} for col in data.columns],
placeholder="Select Size Variable"
)
], width=4),
# Dropdown for font family
dbc.Col([
dcc.Dropdown(
id='font-family-dropdown',
options=[{'label':font, 'value':font} for font in get_system_fonts()],
placeholder="Select Font Family"
)
], width=4),
# Font size input
dbc.Col([
dbc.Input(id='font-size-input', type='number', placeholder='Enter font size')
], width=4)
], justify='center',style={'margin': '20px'}),
# Bubble chart
dcc.Graph(id='bubble-chart')
])
# Define callback to update the bubble chart based on user input
@app.callback(
Output('bubble-chart', 'figure'),
Input('x-variable-dropdown', 'value'),
Input('y-variable-dropdown', 'value'),
Input('color-variable-dropdown', 'value'),
Input('size-variable-dropdown', 'value'),
Input('hover-name-variable-dropdown', 'value'),
Input('font-family-dropdown', 'value'),
Input('font-size-input', 'value')
)
def update_bubble_chart(x_variable, y_variable, color_variable, size_variable, hover_name_variable,
font_family, font_size):
if x_variable is None or y_variable is None:
return "please choose variable"
fig = px.scatter(
data,
x=x_variable,
y=y_variable,
size=size_variable,
color=color_variable,
hover_name=hover_name_variable,
**px_kwargs
)
fig.update_layout(
font={'family': font_family, 'size': font_size},
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)',
**layout_kwargs
)
fig.update_traces(selector=dict(type='scatter'))
return fig
app.run_server(mode='inline')
|
/sameh-stirling-0.0.6.tar.gz/sameh-stirling-0.0.6/sameh_stirling/bubble_chart.py
| 0.646349 | 0.301683 |
bubble_chart.py
|
pypi
|
# Check clients that are known to be incompatible with `SameSite=None`.
import re
def should_send_same_site_none(useragent):
return useragent is None or not is_same_site_none_incompatible(useragent)
# _classes of browsers known to be incompatible.
def is_same_site_none_incompatible(useragent):
return has_web_kit_same_site_bug(useragent) or drops_unrecognized_same_site_cookies(
useragent
)
def has_web_kit_same_site_bug(useragent):
return is_ios_version(major=12, useragent=useragent) or (
is_macosx_version(major=10, minor=14, useragent=useragent)
and (is_safari(useragent) or is_mac_embedded_browser(useragent))
)
def drops_unrecognized_same_site_cookies(useragent):
if is_uc_browser(useragent):
return not is_uc_browser_version_at_least(
major=12, minor=13, build=2, useragent=useragent
)
return (
is_chromium_based(useragent)
and is_chromium_version_at_least(major=51, useragent=useragent)
and not is_chromium_version_at_least(major=67, useragent=useragent)
)
# _regex parsing of _user-_agent string. (_see note above!)
def is_ios_version(major, useragent):
regex = re.compile(r"\(iP.+; CPU .*OS (\d+)[_\d]*.*\) AppleWebKit/")
match = regex.search(useragent)
# _extract digits from first capturing group.
return match and match.group(1) == str(major)
def is_macosx_version(major, minor, useragent):
regex = re.compile(r"\(Macintosh;.*Mac OS X (\d+)_(\d+)[_\d]*.*\) AppleWebKit/")
match = regex.search(useragent)
# _extract digits from first and second capturing groups.
return match and match.group(1) == str(major) and match.group(2) == str(minor)
def is_safari(useragent):
safari_regex = re.compile(r"Version/.* Safari/")
return safari_regex.search(useragent) and not is_chromium_based(useragent)
def is_mac_embedded_browser(useragent):
regex = re.compile(
r"^Mozilla/[\.\d]+ \(Macintosh;.*Mac OS X [_\d]+\) "
+ r"AppleWebKit/[\.\d]+ \(KHTML, like Gecko\)$"
)
return regex.search(useragent)
def is_chromium_based(useragent):
regex = re.compile(r"Chrom(e|ium)")
return regex.search(useragent)
def is_chromium_version_at_least(major, useragent):
regex = re.compile(r"Chrom[^ /]+/(\d+)[\.\d]* ")
match = regex.search(useragent)
if not match:
return False
# _extract digits from first capturing group.
version = int(match.group(1))
return version >= major
def is_uc_browser(useragent):
regex = re.compile(r"UCBrowser/")
return regex.search(useragent)
def is_uc_browser_version_at_least(major, minor, build, useragent):
regex = re.compile(r"UCBrowser/(\d+)\.(\d+)\.(\d+)[\.\d]* ")
match = regex.search(useragent)
if not match:
return False
# _extract digits from three capturing groups.
major_version = int(match.group(1))
minor_version = int(match.group(2))
build_version = int(match.group(3))
if major_version != major:
return major_version > major
if minor_version != minor:
return minor_version > minor
return build_version >= build
|
/samesite-compat-check-0.2.0.tar.gz/samesite-compat-check-0.2.0/samesite_compat_check/check.py
| 0.767864 | 0.416856 |
check.py
|
pypi
|
import sys
import subprocess
import argparse
import re
from collections import defaultdict
parser = argparse.ArgumentParser(
description='This script is for parsing the BAM file and look for reads overlapping with the target genes and report the pileup.')
parser.add_argument('sample_id', help='sample ID')
parser.add_argument('bam_file', help='an aligned bam file')
parser.add_argument(
'gene_file', help='a tab-delimited file of six columns in this order: contigId, geneId, begin, end, strand, DNA transcript seq. (Note: begin<end)')
parser.add_argument('min_bq', type=int,
help='the minimum base quality score of a sequenced base')
parser.add_argument('min_mq', type=int,
help='the minimum MQ mapping score of the aligned reads')
parser.add_argument('min_d', type=int, help='the minimum depth, an integer.')
args = parser.parse_args()
usage = f"""{sys.argv[0]} <sampleId> <bam file> <gene file> <minimum BQ> <minimum MQ> <minimum D> <maximum D>
{args.sample_id}: sample ID
{args.bam_file}: an aligned bam file
{args.gene_file}: a tab-delimited file of six columns in this order: contigId, geneId, begin, end, strand, DNA transcript seq. (Note: begin<end)
{args.min_bq}: the minimum base quality score of a sequenced base
{args.min_mq}: the minimum MQ mapping score of the aligned reads
{args.min_d}: the minimum depth, an integer.
This script is for parsing the BAM file and look for reads overlapping with the target genes and report the pileup.
Reads must pass the minimum MQ threshold.
For each position, an allele must have a depth >= <minimum D>.
The frequency of each allele is calculated after the filterings.
Note: the quality score must be Sanger Phred score (ascii 33).
Dependencies:
Use Samtools
CIGAR parser: the embedded cigar parser is not versatile, please review it to make sure that it is handling the cigar code appropriately.
The script generates a tab-delimited table directly to STDOUT.
Each row is a base of the queried genes.
A base could be reported repeatedly up to four times as four rows when polymorphisms are observed: A,T, G or C.
The columns are:
sample ID
contig ID
This Base Position
Gene ID
Ref allele of this Base
Condon Position (if coding gene)
Observed Consensus Allele of this Base (in this BAM)
Observed Allele
Coverage Depth of the Observed Allele
Allele Frequency of the Observed Allele
"""
def parse_gene(file):
"""
Parse the input gene file
Args:
gene_file (str): the gene file name
Returns:
dict: a dictionary containing the gene name as key and the contig, start, end, strand, and sequence as values
"""
data = {}
with open(file, "r") as f:
for line in f:
line = line.strip()
contig_id, gene_id, begin, end, strand, seq = line.split("\t")
data[gene_id] = {
'contig': contig_id,
'begin': int(begin),
'end': int(end),
'strand': strand,
'seq': seq
}
return data
def parse_bases(genes):
"""
Go through each gene and add the nucleotide positions to a dictionary indexed by contigs
Args:
genes (dict): a dictionary containing gene data
Returns:
dict: a dictionary indexed by contigs and containing the gene name, reference nucleotide, and codon position as values
"""
nuc = defaultdict(dict)
for g, gene_data in genes.items():
begin = gene_data['begin']
end = gene_data['end']
c = gene_data['contig']
strand = gene_data['strand']
temp = list(gene_data['seq'])
for i in range(begin, end + 1):
codon_pos = (i - begin + 1) % 3
if strand == '-' and codon_pos != 2:
codon_pos = 1 if codon_pos == 0 else 0
codon_pos = 3 if codon_pos == 0 else codon_pos
nuc[c][i] = f"{g}\t{temp[i-begin]}\t{codon_pos}"
return nuc
def decode_cigar(cigar):
"""
Decode the cigar string
Args:
cigar (str): the cigar string
Returns:
str: the decoded cigar string
"""
cigar_parts = re.findall(r'(\d+)([MIDNSHP])', cigar)
new_string = ''.join(c * int(n) for n, c in cigar_parts)
return new_string
def convert_qual(qual_string):
"""
Convert the quality string to a list of quality scores
Args:
qual_string (str): the quality string
Returns:
list: a list of quality scores
"""
scores = [ord(q) - 33 for q in qual_string]
return scores
def pileup(sample_id, bam_file, gene_file, min_bq, min_mq, min_depth):
"""
Parse the BAM file and look for reads overlapping with the target genes and report the pileup
Args:
sample_id (str): the sample ID
bam_file (str): the BAM file name
gene_file (str): the gene file name
min_bq (int): the minimum base quality score of a sequenced base
min_mq (int): the minimum MQ mapping score of the aligned reads
min_depth (int): the minimum depth, an integer.
Returns:
None
"""
genes = parse_gene(gene_file)
bases = parse_bases(genes)
f_table = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
with subprocess.Popen(["samtools", "view", bam_file], stdout=subprocess.PIPE, universal_newlines=True) as bam_process:
for line in bam_process.stdout:
qname, flag, rname, begin, mapq, cigar, mrnm, mpos, isize, seq, qual, *info = line.strip().split('\t')
if rname == "*" or int(mapq) < min_mq or rname not in bases:
continue
begin = int(begin)
end = begin + len(seq) - 1
qual_scores = convert_qual(qual)
s = decode_cigar(cigar)
b = list(seq)
ci = list(s)
new = []
read_i = 0
for cigar_i in range(len(ci)):
base = "-"
if ci[cigar_i] == "D":
base = "-"
elif ci[cigar_i] == "H":
continue
elif ci[cigar_i] in ["I", "S"]:
read_i += 1
continue
elif qual_scores[read_i] < min_bq:
base = "-"
read_i += 1
else:
base = b[read_i]
read_i += 1
new.append(base)
b = new
for i in range(begin, begin + len(b)):
nuc = b[i - begin]
if bases[rname].get(i):
if nuc != "-":
f_table[rname][i][nuc] += 1
for c in f_table:
print(f"{c}===")
print("Sample\tContig\tPosition\tGene\tRef\tCodon\tConsensus\tAllele\tCounts\tFrequency")
for c in f_table:
for pos in sorted(f_table[c]):
total = 0
major = ""
for nuc in sorted(f_table[c][pos]):
counts = f_table[c][pos][nuc]
if counts < min_depth:
continue
total += counts
if major == "":
major = nuc
elif f_table[c][pos][major] < counts:
major = nuc
for nuc in sorted(f_table[c][pos]):
counts = f_table[c][pos][nuc]
if counts < min_depth:
continue
percent = 100 * counts / total
print(f"{sample_id}\t{c}\t{pos}\t{bases[c][pos]}\t{major}\t{nuc}\t{counts}\t{percent:.0f}")
def main():
pileup(args.sample_id,
args.bam_file,
args.gene_file,
args.min_bq,
args.min_mq,
args.min_d)
if __name__ == "__main__":
main()
|
/samestr-1.2023.4-py3-none-any.whl/samestr-1.2023.4.data/scripts/kpileup.py
| 0.640636 | 0.37777 |
kpileup.py
|
pypi
|
import argparse
import numpy as np
from os.path import isdir, basename
from os import makedirs
# Input arguments
# ---------------
parser = argparse.ArgumentParser()
parser.add_argument('--kp', help='Kpileup alignments (.kp.txt)')
parser.add_argument('--map', help='Map of genomes to contigs (tab-delimited)')
parser.add_argument('--sample', help='Sample Name', type=str, required=True)
parser.add_argument('--gene-file', help='kpileup gene file')
parser.add_argument('--output-dir', help='Output dir', default='./')
args = parser.parse_args()
# Read data
# ---------
# Numpy alignments
# x[contig] = numpy alignment
# y[genome] = concatenated numpy alignments
# Merge kpileups from multiple samples. Write dictionary of (M, N, 4) numpy arrays where:
# M = samples
# N = alignment sites
# 4 = nucleotides (ACGT)
# Entry (i,j,k) of this array corresponds to the count of nucleotide k at position j of sample i
# Initialize data
nts = 'ACGT'
M = 1
# Initialize numpy arrays for each contig
x = {}
for line in open(args.gene_file):
line = line.rstrip().split()
contig = line[0]
beg = int(line[2])
end = int(line[3])
x[contig] = np.zeros([M, end, 4])
# Add kpileup results to numpy arrays
with open(args.kp, 'r') as f:
for line in f.readlines():
line = line.rstrip().split()
if len(line) == 10 and line[0] != 'Sample':
sample = line[0]
i = 0
contig = line[1]
j = int(line[2])
nt = line[7]
k = nts.index(nt)
count = int(line[8])
x[contig][i, j - 1, k] = count
# Sample list
# M = [sample1]
M = np.array([args.sample])
# Contig map
# cmap[genome] = [contig1, contig2, ...]
cmap = {}
for line in open(args.map):
line = line.strip().split()
genome = line[0]
contig = line[1]
if genome not in cmap:
cmap[genome] = []
cmap[genome].append(contig)
# Create dir if not exists
if not isdir(args.output_dir):
makedirs(args.output_dir)
# Mapping stats
cols = '\t'.join([
'alignment', 'genome', 'mean_cov', 'median_cov', 'n_sites', 'n_gaps',
'n_covered', 'n_mono', 'n_duo', 'n_tri', 'n_quat', 'n_poly', 'f_covered',
'f_mono', 'f_duo', 'f_tri', 'f_quat', 'f_poly'
])
stats = [cols]
# Concatenate contigs
# -------------------
y = {}
for genome in cmap:
contigs = cmap[genome]
# Initialize array
m = len(M)
n = sum([np.shape(x[c])[1] for c in contigs])
k = 4
y[genome] = np.zeros([m, n, k])
# Add alignment data
beg = 0
end = 0
for contig in contigs:
end += np.shape(x[contig])[1]
y[genome][0, beg:end, :] = x[contig]
beg = end
np_filepath = '%s/%s.%s.npy' % (args.output_dir, genome, args.sample)
np.save(np_filepath, y[genome], allow_pickle=True)
species = y[genome]
cov = species.sum(axis=2)
# coverage [depth]
mean_cov = round(np.mean(cov), 4)
median_cov = round(np.median(cov), 4)
# coverage [width]
n_sites = species.shape[1]
n_gaps = (cov == 0).sum()
n_covered = n_sites - n_gaps
# n of variant sites, monomorphic, .., polymorphic
n_mono = ((species > 0).sum(axis=2) == 1).sum()
n_duo = ((species > 0).sum(axis=2) == 2).sum()
n_tri = ((species > 0).sum(axis=2) == 3).sum()
n_quat = ((species > 0).sum(axis=2) == 4).sum()
n_poly = ((species > 0).sum(axis=2) > 1).sum()
# fraction of covered sites,
# fraction of covered sites with variant, monomorphic, .., polymorphic
if not n_covered == 0:
f_covered = round(n_covered / n_sites, 4)
f_mono = round(n_mono / n_covered, 4)
f_duo = round(n_duo / n_covered, 4)
f_tri = round(n_tri / n_covered, 4)
f_quat = round(n_quat / n_covered, 4)
f_poly = round(n_poly / n_covered, 4)
else:
f_covered, f_mono, f_duo, \
f_tri, f_quat, f_poly = 0, 0, 0, 0, 0, 0
stat = [
basename(np_filepath), genome, mean_cov, median_cov, n_sites, n_gaps,
n_covered, n_mono, n_duo, n_tri, n_quat, n_poly, f_covered, f_mono,
f_duo, f_tri, f_quat, f_poly
]
stat = [str(s) for s in stat]
stats.append('\t'.join(stat))
with open('%s/%s.aln_stats.txt' % (args.output_dir, args.sample), 'w') as file:
file.write('\n'.join(stats))
|
/samestr-1.2023.4-py3-none-any.whl/samestr-1.2023.4.data/scripts/kp2np.py
| 0.488527 | 0.307969 |
kp2np.py
|
pypi
|
# Samil Power inverter tool
[](https://pypi.org/project/samil/)
Get model and status data from Samil Power inverters over the network.
If you just need PVOutput.org uploading, you can also try the
[old version](https://github.com/mhvis/solar/tree/v1).
## Supported inverter series
* SolarRiver TL
* SolarRiver TL-D
* SolarLake TL
The inverter needs to be equipped with a network connection and connected to the same network, the serial port is not supported.
If you have a SolarLake TL-PM series inverter, check out this fork!
->
[semonet/solar](https://github.com/semonet/solar)
## Features
* View inverter data
* Upload to PVOutput.org
* Publish to MQTT broker
The following features are not implemented but can be easily implemented upon request:
* Filter inverter based on IP or serial number
* Support for multiple PVOutput.org systems
## Getting started
### Docker
You can run any of the available commands with Docker.
Make sure to use host networking because the app relies on UDP broadcasts.
The image is currently not built for ARM platforms like Raspberry Pi,
so for these platforms you need to build it yourself or install via pip.
```
docker run --network host mhvis/samil monitor
```
Here is a sample `compose.yaml`:
```yaml
name: "samil"
services:
samil:
image: mhvis/samil
command: monitor # Adapt as desired
network_mode: host
restart: unless-stopped
```
### Ubuntu/Debian/Raspberry Pi
```
$ sudo apt install python3-pip
$ pip3 install --user samil
```
After installing, invoke `samil --help` for usage info.
If the `samil` command can't be found, first try to relogin.
If that doesn't help you need to change the `PATH` variable
with the following command and relogin to apply the change.
```
$ echo 'PATH="$HOME/.local/bin:$PATH"' >> ~/.profile
```
### Other
```
$ pip install samil
```
## Usage
#### Monitor
The command `samil monitor` will search for an inverter in the network and print model and status info.
It will connect to the first inverter it finds and print status data every 5 seconds.
See `samil monitor --help` for additional options.
#### MQTT
The command `samil mqtt` connects to one or more inverters and sends status
messages to an MQTT broker continuously. These messages include inverter data
like input power, output power, energy and temperature.
Example: `samil mqtt -h 192.168.1.2 -p 1883 --username user --password pw --inverters 2 --interval 10`.
This command connects to the MQTT broker at address `192.168.1.2`, and
authenticates with the given username `user` and password `pw`. It will
connect to 2 inverters in the network and send an MQTT message continuously every 10 seconds.
For full usage info, run `samil mqtt --help`.
To run this command at startup, [see below](#run-command-at-boot).
#### PVOutput.org uploading
The command `samil pvoutput` gathers status data from 1 or more inverters and uploads it to your PVOutput.org system.
If you have multiple inverters, the data of each inverter is aggregated before uploading.
For full usage info, run `samil pvoutput --help`.
By default, the script uploads once and then stops. You can use cron to execute the script every 5 minutes.
#### Fetch historical data
*Todo*
## Run command at boot
Follow the instructions here to run the MQTT or PVOutput command automatically at startup.
If you run PVOutput using cron, you don't need this!
The instructions are based on [this post](https://raspberrypi.stackexchange.com/a/108723)
and tested on Raspberry Pi OS Lite version May 2020.
Create a new service:
```
$ sudo systemctl edit --force --full samil.service
```
In the empty file that opened, insert the following statements, adjust as necessary, save and close.
```
[Unit]
Description=Samil
After=multi-user.target
[Service]
# Adjust the command to your needs! Keep the path as is unless you installed to somewhere else.
ExecStart=/home/pi/.local/bin/samil mqtt --host 192.168.1.2
# Adjust if you have a different user account
User=pi
Group=pi
# Automatically restart on crashes after 30 seconds
Restart=on-failure
RestartSec=30
Environment="PYTHONUNBUFFERED=1" # Leave as is
[Install]
WantedBy=multi-user.target
```
Enable and start the service:
```
$ sudo systemctl enable --now samil.service
```
Check if the service has successfully started:
```
$ sudo systemctl status samil.service
```
#### Disabling
If you want to stop the script, run:
```
$ sudo systemctl stop samil.service
```
If you want to disable the script from starting on boot:
```
$ sudo systemctl disable samil.service
```
## Background info
The protocol used by these inverters is described
[here](https://mhvis.github.io/solar/).
The following units are used for the status values:
* Voltage in volts
* Current in amperes
* Energy in kilowatt hours
* Power in watts
* Temperature in degrees Celcius
* Operating time in hours
This project was originally a fork of [zombiekipling/solriv](https://github.com/zombiekipling/solriv)
but is now completely rewritten to implement new requirements.
## As a library
You can use this project as a library.
For documentation you will need to read through the source code.
To get started I recommend to read the `monitor` function in `samil.cli`.
## CLI reference
The following commands and options are available:
```
$ samil monitor --help
Usage: samil monitor [OPTIONS]
Print model and status info for an inverter.
When you have multiple inverters, run this command multiple times to
connect to all inverters.
Options:
--interval FLOAT Status interval. [default: 5.0]
--interface TEXT IP address of local network interface to bind to.
--help Show this message and exit.
```
```
$ samil mqtt --help
Usage: samil mqtt [OPTIONS]
Publish inverter data to an MQTT broker.
The default topic format is inverter/<serial number>/status, e.g.
inverter/DW413B8080/status. The message value is a JSON object with all
status data from the inverter. Example message value:
{"operation_mode":"Normal","total_operation_time":45,
"pv1_input_power":2822.0,"pv2_input_power":0.0,"pv1_voltage":586.5,
"pv2_voltage":6.7,"pv1_current":4.8,"pv2_current":0.1,
"output_power":2589.0,"energy_today":21.2,"energy_total":77.0,
"grid_voltage":242.6,"grid_current":3.6,"grid_frequency":50.01,
"internal_temperature":35.0}
Options:
-n, --inverters INTEGER Number of inverters. [default: 1]
-i, --interval FLOAT Interval between status messages in seconds.
[default: 10.0]
-h, --host TEXT MQTT broker hostname or IP. [default: localhost]
-p, --port INTEGER MQTT broker port. [default: 1883]
--client-id TEXT MQTT client ID. If not provided, one will be
randomly generated.
--tls Enable MQTT SSL/TLS support.
--username TEXT MQTT username.
--password TEXT MQTT password.
--topic-prefix TEXT MQTT topic prefix. [default: inverter]
--interface TEXT IP address of local network interface to bind to.
--help Show this message and exit.
```
```
$ samil pvoutput --help
Usage: samil pvoutput [OPTIONS] SYSTEM_ID API_KEY
Upload inverter status to a PVOutput.org system.
Specify the PVOutput system using the SYSTEM_ID and API_KEY arguments. The
command will connect to the inverter, upload the current status data and
exit. Use something like cron to upload status data every 5 minutes.
If you have multiple inverters, specify -n with the number of inverters.
Data of all inverters will be aggregated before uploading to PVOutput,
energy is summed, voltage and temperature are averaged. For temperature,
the internal temperature is used, not the heatsink temperature. If the
inverter uses three phases, the voltage of each phase is averaged.
If you don't want to use cron, specify the --interval option to make the
application upload status data on the specified interval. With this mode
the application will stay connected to the inverters in between uploads,
this is less recommended.
Options:
-n INTEGER Connect to n inverters. [default: 1]
--dc-voltage By default, AC voltage is uploaded, specify this if
you want to upload DC (panel) voltage instead.
-i, --interval INTEGER Interval between status uploads in minutes, should
be 5, 10 or 15. If not specified, only does a single
upload.
--dry-run Do not upload data to PVOutput.org.
--interface TEXT IP address of local network interface to bind to.
--help Show this message and exit.
```
## Development info
Development installation (usually in a virtual environment):
```commandline
pip install -e .
pip install -r dev-requirements.txt
```
Lint code: `flake8`
Run testcases: `python -m unittest`
## License
MIT
|
/samil-2.2.1.tar.gz/samil-2.2.1/README.md
| 0.435661 | 0.943504 |
README.md
|
pypi
|
<div align="center">
<img src="https://github.com/sepandhaghighi/samila/raw/master/otherfiles/logo.png" width=400 height=400>
<br/>
<h1>Samila</h1>
<br/>
<a href="https://www.python.org/"><img src="https://img.shields.io/badge/built%20with-Python3-green.svg" alt="built with Python3" /></a>
<a href="https://codecov.io/gh/sepandhaghighi/samila">
<img src="https://codecov.io/gh/sepandhaghighi/samila/branch/master/graph/badge.svg" />
</a>
<a href="https://badge.fury.io/py/samila"><img src="https://badge.fury.io/py/samila.svg" alt="PyPI version" height="18"></a>
<a href="https://anaconda.org/sepandhaghighi/samila"><img src="https://anaconda.org/sepandhaghighi/samila/badges/version.svg"></a>
<a href="https://colab.research.google.com/github/sepandhaghighi/samila/blob/master">
<img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Samila-Colab"/>
</a>
<a href="https://discord.com/invite/94bz5QGZWb">
<img src="https://img.shields.io/discord/900055829225562162.svg" alt="Discord Channel">
</a>
</div>
----------
## Table of contents
* [Overview](https://github.com/sepandhaghighi/samila#overview)
* [Installation](https://github.com/sepandhaghighi/samila#installation)
* [Usage](https://github.com/sepandhaghighi/samila#usage)
* [Mathematical Details](https://github.com/sepandhaghighi/samila#mathematical-details)
* [Try Samila in Your Browser](https://github.com/sepandhaghighi/samila#try-samila-in-your-browser)
* [Issues & Bug Reports](https://github.com/sepandhaghighi/samila#issues--bug-reports)
* [Social Media](https://github.com/sepandhaghighi/samila#social-media)
* [Contribution](https://github.com/sepandhaghighi/samila/blob/master/.github/CONTRIBUTING.md)
* [References](https://github.com/sepandhaghighi/samila#references)
* [Acknowledgments](https://github.com/sepandhaghighi/samila#acknowledgments)
* [Authors](https://github.com/sepandhaghighi/samila/blob/master/AUTHORS.md)
* [License](https://github.com/sepandhaghighi/samila/blob/master/LICENSE)
* [Show Your Support](https://github.com/sepandhaghighi/samila#show-your-support)
* [Changelog](https://github.com/sepandhaghighi/samila/blob/master/CHANGELOG.md)
* [Code of Conduct](https://github.com/sepandhaghighi/samila/blob/master/.github/CODE_OF_CONDUCT.md)
## Overview
<p align="justify">
Samila is a generative art generator written in Python, Samila lets you create images based on many thousand points. The position of every single point is calculated by a formula, which has random parameters. Because of the random numbers, every image looks different.
</p>
<table>
<tr>
<td align="center">Open Hub</td>
<td align="center"><a href="https://www.openhub.net/p/samila"><img src="https://www.openhub.net/p/samila/widgets/project_thin_badge.gif"></a></td>
</tr>
<tr>
<td align="center">PyPI Counter</td>
<td align="center"><a href="http://pepy.tech/project/samila"><img src="http://pepy.tech/badge/samila"></a></td>
</tr>
<tr>
<td align="center">Github Stars</td>
<td align="center"><a href="https://github.com/sepandhaghighi/samila"><img src="https://img.shields.io/github/stars/sepandhaghighi/samila.svg?style=social&label=Stars"></a></td>
</tr>
</table>
<table>
<tr>
<td align="center">Branch</td>
<td align="center">master</td>
<td align="center">dev</td>
</tr>
<tr>
<td align="center">CI</td>
<td align="center"><img src="https://github.com/sepandhaghighi/samila/workflows/CI/badge.svg?branch=master"></td>
<td align="center"><img src="https://github.com/sepandhaghighi/samila/workflows/CI/badge.svg?branch=dev"></td>
</tr>
</table>
<table>
<tr>
<td align="center">Code Quality</td>
<td><a href="https://www.codacy.com/gh/sepandhaghighi/samila/dashboard?utm_source=github.com&utm_medium=referral&utm_content=sepandhaghighi/samila&utm_campaign=Badge_Grade"><img src="https://app.codacy.com/project/badge/Grade/14df8ed5f8434aaea85889555b0182a9"/></a></td>
<td><a href="https://codebeat.co/projects/github-com-sepandhaghighi-samila-dev"><img alt="codebeat badge" src="https://codebeat.co/badges/01e6aa48-4cc2-4d9c-8288-c9fb490ad371" /></a></td>
<td><a href="https://www.codefactor.io/repository/github/sepandhaghighi/samila"><img src="https://www.codefactor.io/repository/github/sepandhaghighi/samila/badge" alt="CodeFactor" /></a></td>
</tr>
</table>
## Installation
### Source code
- Download [Version 1.1](https://github.com/sepandhaghighi/samila/archive/v1.1.zip) or [Latest Source](https://github.com/sepandhaghighi/samila/archive/dev.zip)
- Run `pip install -r requirements.txt` or `pip3 install -r requirements.txt` (Need root access)
- Run `python3 setup.py install` or `python setup.py install` (Need root access)
### PyPI
- Check [Python Packaging User Guide](https://packaging.python.org/installing/)
- Run `pip install samila==1.1` or `pip3 install samila==1.1` (Need root access)
### Easy install
- Run `easy_install --upgrade samila` (Need root access)
### Conda
- Check [Conda Managing Package](https://conda.io)
- `conda install -c sepandhaghighi samila` (Need root access)
## Usage
### Magic
```pycon
>>> import matplotlib.pyplot as plt
>>> from samila import GenerativeImage
>>> g = GenerativeImage()
>>> g.generate()
>>> g.plot()
>>> plt.show()
```
<img src="https://github.com/sepandhaghighi/samila/raw/master/otherfiles/images/7.png">
### Basic
```pycon
>>> import random
>>> import math
>>> def f1(x, y):
result = random.uniform(-1,1) * x**2 - math.sin(y**2) + abs(y-x)
return result
>>> def f2(x, y):
result = random.uniform(-1,1) * y**3 - math.cos(x**2) + 2*x
return result
>>> g = GenerativeImage(f1, f2)
>>> g.generate()
>>> g.plot()
>>> g.seed
188781
>>> plt.show()
```
<img src="https://github.com/sepandhaghighi/samila/raw/master/otherfiles/images/1.png">
### Projection
```pycon
>>> from samila import Projection
>>> g = GenerativeImage(f1, f2)
>>> g.generate()
>>> g.plot(projection=Projection.POLAR)
>>> g.seed
829730
>>> plt.show()
```
<img src="https://github.com/sepandhaghighi/samila/raw/master/otherfiles/images/2.png">
* Supported projections : `RECTILINEAR`, `POLAR`, `AITOFF`, `HAMMER`, `LAMBERT`, `MOLLWEIDE` and `RANDOM`
* Default projection is `RECTILINEAR`
### Marker
```pycon
>>> from samila import Marker
>>> g = GenerativeImage(f1, f2)
>>> g.generate()
>>> g.plot(marker=Marker.CIRCLE, spot_size=10)
>>> g.seed
448742
>>> plt.show()
```
<img src="https://github.com/sepandhaghighi/samila/raw/master/otherfiles/images/9.png">
* Supported markers : `POINT`, `PIXEL`, `CIRCLE`, `TRIANGLE_DOWN`, `TRIANGLE_UP`, `TRIANGLE_LEFT`, `TRIANGLE_RIGHT`, `TRI_DOWN`, `TRI_UP`, `TRI_LEFT`, `TRI_RIGHT`, `OCTAGON`, `SQUARE`, `PENTAGON`, `PLUS`, `PLUS_FILLED`, `STAR`, `HEXAGON_VERTICAL`, `HEXAGON_HORIZONTAL`, `X`, `X_FILLED`, `DIAMOND`, `DIAMON_THIN`, `VLINE`, `HLINE` and `RANDOM`
* Default marker is `POINT`
### Rotation
You can even rotate your art by using `rotation` parameter. Enter your desired rotation for the image in degrees and you will have it.
```pycon
>>> g = GenerativeImage(f1, f2)
>>> g.generate()
>>> g.plot(rotation=45)
```
* Default rotation is 0
### Range
```pycon
>>> g = GenerativeImage(f1, f2)
>>> g.generate(start=-2*math.pi, step=0.01, stop=0)
>>> g.plot()
>>> g.seed
234752
>>> plt.show()
```
<img src="https://github.com/sepandhaghighi/samila/raw/master/otherfiles/images/3.png">
### Color
```pycon
>>> g = GenerativeImage(f1, f2)
>>> g.generate()
>>> g.plot(color="yellow", bgcolor="black", projection=Projection.POLAR)
>>> g.seed
1018273
>>> plt.show()
```
<img src="https://github.com/sepandhaghighi/samila/raw/master/otherfiles/images/4.png">
* Supported colors are available in `VALID_COLORS` list
* `color` and `bgcolor` parameters supported formats:
1. Color name (example: `color="yellow"`)
2. RGB/RGBA (example: `color=(0.1,0.1,0.1)`, `color=(0.1,0.1,0.1,0.1)`)
3. Hex (example: `color="#eeefff"`)
4. Random (example: `color="random"`)
5. Complement (example: `color="complement", bgcolor="blue"`)
6. Transparent (example: `bgcolor="transparent"`)
7. List (example: `color=["black", "#fffeef",...]`)
⚠️ **Transparent** mode is only available for background
⚠️ **List** mode is only available for color
⚠️ In **List** mode, the length of this list must be equal to the lengths of data1 and data2
#### Point Color
You can make your custom color map and use it in Samila
```pycon
>>> colorarray = [
... [0.7, 0.2, 0.2, 1],
... [0.6, 0.3, 0.2, 1],
... "black",
... [0.4, 0.4, 0.3, 1],
... [0.3, 0.4, 0.4, 1],
... "#ff2561"]
>>> g.generate()
>>> g.seed
454893
>>> g.plot(cmap=colorarray, color=g.data2, projection=Projection.POLAR)
>>> plt.show()
```
<img src="https://github.com/sepandhaghighi/samila/raw/master/otherfiles/images/8.png">
### Regeneration
```pycon
>>> g = GenerativeImage(f1, f2)
>>> g.generate(seed=1018273)
>>> g.plot(projection=Projection.POLAR)
>>> plt.show()
```
<img src="https://github.com/sepandhaghighi/samila/raw/master/otherfiles/images/5.png">
### NFT.storage
Upload generated image directly to [NFT.storage](https://NFT.storage)
```pycon
>>> g.nft_storage(api_key="YOUR_API_KEY", timeout=5000)
{'status': True, 'message': 'FILE_LINK'}
```
You can also upload your config/data to nft storage as follows:
```pycon
>>> g.nft_storage(api_key="API_KEY", upload_config=True)
{'status': {'image': True, 'config':True}, 'message': {'image':'IMAGE_FILE_LINK', 'config':'CONFIG_FILE_LINK'}
```
or
```pycon
>>> g.nft_storage(api_key="API_KEY", upload_data=True)
{'status': {'image': True, 'data':True}, 'message': {'image':'IMAGE_FILE_LINK', 'data':'DATA_FILE_LINK'}
```
* Default timeout is **3000** seconds
### Save image
Save generated image
```pycon
>>> g.save_image(file_adr="test.png")
{'status': True, 'message': 'FILE_PATH'}
```
Save generated image in higher resolutions
```pycon
>>> g.save_image(file_adr="test.png", depth=5)
{'status': True, 'message': 'FILE_PATH'}
```
### Save data
Save generated image data
```pycon
>>> g.save_data(file_adr="data.json")
{'status': True, 'message': 'FILE_PATH'}
```
So you can load it into a `GenerativeImage` instance later by
```pycon
>>> g = GenerativeImage(data=open('data.json', 'r'))
```
Data structure:
```JSON
{
"plot": {
"projection": "polar",
"bgcolor": "black",
"color": "snow",
"spot_size": 0.01
},
"matplotlib_version": "3.0.3",
"data1": [
0.3886741692042526,
22.57390286376703,
-0.1646310981668766,
66.23632344600155
],
"data2": [
-0.14588750183600108,
20.197945942677833,
0.5485453260942901,
-589.3284610518896
]
}
```
### Save config
Save generated image config. It contains string formats of functions which is also human readable.
```pycon
>>> g.save_config(file_adr="config.json")
{'status': True, 'message': 'FILE_PATH'}
```
So you can load it into a `GenerativeImage` instance later by
```pycon
>>> g = GenerativeImage(config=open('config.json', 'r'))
```
Config structure:
```JSON
{
"matplotlib_version": "3.0.3",
"generate": {
"seed": 379184,
"stop": 3.141592653589793,
"step": 0.01,
"start": -3.141592653589793
},
"f2": "random.uniform(-1,1)*math.cos(x*(y**3))+random.uniform(-1,1)*math.ceil(y-x)",
"f1": "random.uniform(-1,1)*math.ceil(y)-random.uniform(-1,1)*y**2+random.uniform(-1,1)*abs(y-x)",
"plot": {
"color": "snow",
"bgcolor": "black",
"projection": "polar",
"spot_size": 0.01
}
}
```
## Mathematical details
Samila is simply a transformation between a square-shaped space from the Cartesian coordinate system to any arbitrary coordination like [Polar coordinate system](https://en.wikipedia.org/wiki/Polar_coordinate_system).
### Example
<img src="https://github.com/sepandhaghighi/samila/raw/master/otherfiles/mathematical_details/transformation.png">
We have set of points in the first space (left square) which can be defined as follow:
<img src="https://github.com/sepandhaghighi/samila/raw/master/otherfiles/mathematical_details/S1.jpg">
And below functions are used for transformation:
```pycon
>>> def f1(x, y):
result = random.uniform(-1,1) * x**2 - math.sin(y**2) + abs(y-x)
return result
>>> def f2(x, y):
result = random.uniform(-1,1) * y**3 - math.cos(x**2) + 2*x
return result
```
<img src="https://github.com/sepandhaghighi/samila/raw/master/otherfiles/mathematical_details/S2.jpg">
here we use `Projection.POLAR` so later space will be the polar space and we have:
```pycon
>>> g = GenerativeImage(f1, f2)
>>> g.generate(seed=10)
>>> g.plot(projection=Projection.POLAR)
```
<img src="https://github.com/sepandhaghighi/samila/raw/master/otherfiles/mathematical_details/S2_.jpg">
<img src="https://github.com/sepandhaghighi/samila/raw/master/otherfiles/images/6.png">
## Try Samila in your browser!
Samila can be used online in interactive Jupyter Notebooks via the Binder or Colab services! Try it out now! :
[](https://mybinder.org/v2/gh/sepandhaghighi/samila/master)
[](https://colab.research.google.com/github/sepandhaghighi/samila/blob/master)
* Check `examples` folder
## Issues & bug reports
Just fill an issue and describe it. We'll check it ASAP! or send an email to [[email protected]](mailto:[email protected] "[email protected]").
- Please complete the issue template
You can also join our discord server
<a href="https://discord.com/invite/94bz5QGZWb">
<img src="https://img.shields.io/discord/900055829225562162.svg?style=for-the-badge" alt="Discord Channel">
</a>
## Social media
1. [Instagram](https://www.instagram.com/samila_arts)
2. [Telegram](https://t.me/samila_arts)
3. [Twitter](https://twitter.com/samila_arts)
4. [Discord](https://discord.com/invite/94bz5QGZWb)
## References
<blockquote>1- Schönlieb, Carola-Bibiane, and Franz Schubert. "Random simulations for generative art construction–some examples." Journal of Mathematics and the Arts 7.1 (2013): 29-39.</blockquote>
<blockquote>2- <a href="https://github.com/cutterkom/generativeart">Create Generative Art with R</a></blockquote>
<blockquote>3- <a href="https://nft.storage/">NFT.storage : Free decentralized storage and bandwidth for NFTs</a></blockquote>
## Acknowledgments
This project was funded through the **Next Step Microgrant**, a program established by [Protocol Labs](https://protocol.ai/).
## Show your support
<h3>Star this repo</h3>
Give a ⭐️ if this project helped you!
<h3>Donate to our project</h3>
If you do like our project and we hope that you do, can you please support us? Our project is not and is never going to be working for profit. We need the money just so we can continue doing what we do ;-) .
<h4>Bitcoin</h4>
1KtNLEEeUbTEK9PdN6Ya3ZAKXaqoKUuxCy
<h4>Ethereum</h4>
0xcD4Db18B6664A9662123D4307B074aE968535388
<h4>Litecoin</h4>
Ldnz5gMcEeV8BAdsyf8FstWDC6uyYR6pgZ
<h4>Doge</h4>
DDUnKpFQbBqLpFVZ9DfuVysBdr249HxVDh
<h4>Tron</h4>
TCZxzPZLcJHr2qR3uPUB1tXB6L3FDSSAx7
<h4>Ripple</h4>
rN7ZuRG7HDGHR5nof8nu5LrsbmSB61V1qq
<h4>Binance Coin</h4>
bnb1zglwcf0ac3d0s2f6ck5kgwvcru4tlctt4p5qef
<h4>Tether</h4>
0xcD4Db18B6664A9662123D4307B074aE968535388
<h4>Dash</h4>
Xd3Yn2qZJ7VE8nbKw2fS98aLxR5M6WUU3s
<h4>Stellar</h4>
GALPOLPISRHIYHLQER2TLJRGUSZH52RYDK6C3HIU4PSMNAV65Q36EGNL
<h4>Zilliqa</h4>
zil1knmz8zj88cf0exr2ry7nav9elehxfcgqu3c5e5
<h4>Coffeete</h4>
<a href="http://www.coffeete.ir/opensource">
<img src="http://www.coffeete.ir/images/buttons/lemonchiffon.png" style="width:260px;" />
</a>
<h4>Gitcoin</h4>
<a href="https://gitcoin.co/grants/3915/samila-generative-art-generator">
<img src="https://github.com/sepandhaghighi/samila/raw/master/otherfiles/gitcoin_btn.png" style="width:260px;" />
</a>
|
/samila-1.1.tar.gz/samila-1.1/README.md
| 0.523177 | 0.900836 |
README.md
|
pypi
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'DonationSuggestion'
db.create_table('samklang_payment_donationsuggestion', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('campaign', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['samklang_payment.DonationCampaign'])),
('amount', self.gf('django.db.models.fields.DecimalField')(max_digits=9, decimal_places=2)),
))
db.send_create_signal('samklang_payment', ['DonationSuggestion'])
# Adding field 'DonationCampaign.default_amount'
db.add_column('samklang_payment_donationcampaign', 'default_amount', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=9, decimal_places=2, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting model 'DonationSuggestion'
db.delete_table('samklang_payment_donationsuggestion')
# Deleting field 'DonationCampaign.default_amount'
db.delete_column('samklang_payment_donationcampaign', 'default_amount')
models = {
'samklang_payment.donation': {
'Meta': {'ordering': "('-created',)", 'object_name': 'Donation'},
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '7', 'decimal_places': '2'}),
'campaign': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['samklang_payment.DonationCampaign']", 'null': 'True', 'blank': 'True'}),
'captured': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}),
'transaction': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'blank': 'True'})
},
'samklang_payment.donationcampaign': {
'Meta': {'object_name': 'DonationCampaign'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'default_amount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '9', 'decimal_places': '2', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'other_donations': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '9', 'decimal_places': '2', 'blank': 'True'}),
'payment_site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['samklang_payment.PaymentSite']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'started': ('django.db.models.fields.DateTimeField', [], {}),
'target_amount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '9', 'decimal_places': '2', 'blank': 'True'}),
'test_mode': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'thank_you_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'thank_you_text_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'total_donations': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '9', 'decimal_places': '2', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'samklang_payment.donationsuggestion': {
'Meta': {'ordering': "('amount',)", 'object_name': 'DonationSuggestion'},
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '9', 'decimal_places': '2'}),
'campaign': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['samklang_payment.DonationCampaign']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'samklang_payment.paymentsite': {
'Meta': {'object_name': 'PaymentSite'},
'default_currency': ('django.db.models.fields.CharField', [], {'default': "'NOK'", 'max_length': '3'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'merchant_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'production_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'site': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['sites.Site']", 'unique': 'True'}),
'test_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['samklang_payment']
|
/samklang-payment-0.6.0.tar.gz/samklang-payment-0.6.0/samklang_payment/migrations/0003_auto__add_donationsuggestion__add_field_donationcampaign_default_amoun.py
| 0.400867 | 0.150559 |
0003_auto__add_donationsuggestion__add_field_donationcampaign_default_amoun.py
|
pypi
|
# SAML Reader
## **IMPORTANT**
Please **DO NOT** add any personally identifiable information (PII) when reporting an issue.
This means **DO NOT** upload any SAML data, even if it is yours. I don't want to be responsible
for it. :)
## Table of Contents
- [SAML Reader](#saml-reader)
- [**IMPORTANT**](#important)
- [Table of Contents](#table-of-contents)
- [What is this tool?](#what-is-this-tool)
- [Installation](#installation)
- [Dependencies for `xmlsec`](#dependencies-for-xmlsec)
- [Installing from PyPI](#installing-from-pypi)
- [Installing from GitHub source](#installing-from-github-source)
- [Updating the package](#updating-the-package)
- [From PyPI](#from-pypi)
- [From GitHub source](#from-github-source)
- [Running the web app](#running-the-web-app)
- [Running the CLI](#running-the-cli)
- [Data Sources](#data-sources)
- [**Reading from a file**](#reading-from-a-file)
- [**Reading from clipboard**](#reading-from-clipboard)
- [**Reading from pipe**](#reading-from-pipe)
- [Other command line options](#other-command-line-options)
- [`--summary`](#--summary)
- [`--summary-only`](#--summary-only)
- [`--compare`](#--compare)
- [Reporting issues](#reporting-issues)
- [Contributing](#contributing)
## What is this tool?
This tool parses SAML responses, gathering relevant info for diagnosing issues with federated authentication for MongoDB Cloud.
---
## Installation
### Dependencies for `xmlsec`
One of the tools used in this package requires `xmlsec`, which requires some libraries be installed on your system. See [this page](https://pypi.org/project/xmlsec/) for details on the required packages. For Mac, they can be installed by running [Homebrew](https://brew.sh/):
```
brew install libxml2 libxmlsec1 pkg-config
```
For Windows, installing the `xmlsec` package from PyPI already has these dependencies pre-built into the installation process for the package, so there should be no need to install them separately.
### Installing from PyPI
To install SAML Reader from PyPI:
1. It is **highly recommended** that this package be run in a Python virtual environment such as [virtualenv](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/) or [Anaconda](https://docs.conda.io/projects/conda/en/latest/user-guide/getting-started.html). Please follow one of the previous links to learn how to create a Python environment of your choice. Create the environment with Python 3.9+ and activate it. I do not recommend installing this directly into your system's global environment. There is just so much that can go wrong.
2. Install the package from PyPI:
```bash
pip install saml_reader
```
3. Run the command line interface by running `saml_reader` with options specified below.
### Installing from GitHub source
If you wish to install from the GitHub source:
1. Clone the repository locally with `git clone`.
2. Create a virtual environment such as [virtualenv](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/) or [Anaconda](https://docs.conda.io/projects/conda/en/latest/user-guide/getting-started.html) using Python 3.9+ and activate it.
3. In the root directory of the repository, run `pip install .` to install the package. If you are planning to make changes to the package, run `pip install -e .` instead to install the package in editable mode.
4. Run the command line interface by running `saml_reader` with options specified below.
## Updating the package
As this software is in its infancy, updates will be made quickly as bugs are discovered and improvements are made.
### From PyPI
To get the latest version, run:
```
pip install --upgrade saml_reader
```
This should uninstall the old version and install the new.
### From GitHub source
To pull down the latest version:
1. Checkout `master` branch with `git checkout master`.
2. Run `git pull` to pull down the latest changes.
3. If you installed in editable mode, you should be good to go. If you did not install in editable mode, run `pip install .` in the root directory of the repository.
---
## Running the web app
This tool can be run locally as a web app. You simply need to run:
```
saml_web_app
```
This will run the web app, serving it on `localhost` and port `8050`. Your default browser will
open automatically to http://localhost:8050. There are a couple of arguments that the web app will
take:
- `--host <host>`: this lets you specify host/IP address where the web app is listening. Default is `localhost`
- `--port <port>`: this lets you specify port where the web app is listening. Default is `8050`
- `--no-open-browser`: suppresses opening the web browser automatically
- `--keep-alive`: keeps the web server running indefinitely, or until killed with Ctrl+C. The server will time out after 30 minutes otherwise.
- `--version`: returns the installed version and exits
- `--help`: displays the help menu
When you navigate to the web app, the `Analyze SAML` link is the only one that currently has any functionality. You enter the SAML data on the left side and specify any comparison values you wish to include on the right side. Once you do that, click `Analyze` and the output will appear.
When you are done using the web app, please be sure to close the web server by pressing Ctrl+C in the terminal where you ran the web app. If you did not specify `--keep-alive`, the server will automatically terminate after 30 minutes.
---
## Running the CLI
This tool can accept a SAML response as properly-formatted XML or
a base64-encoded string, or it can be extracted directly from a HAR file dump.
The data can be input from a file, from the system clipboard,
or from a Unix pipe.
### Data Sources
You can read from a number of different sources in a number of different formats.
#### **Reading from a file**
You with different types
```bash
saml_reader /path/to/file.xml # XML is default type
saml_reader /path/to/base64.txt --type base64 # base64 requires flag
saml_reader /path/to/harfile.har --type har # har requires flag
```
#### **Reading from clipboard**
If you have the xml, base64, or har data in your system clipboard, run:
```bash
saml_reader --clip --type <xml, base64, har>
```
The `--type` flag is not required for an XML file.
#### **Reading from pipe**
If you prefer piping or have been doing your own parsing on the command line:
```
cat file.xml | saml_reader
cat base64.txt | saml_reader --type base64
cat file.har | saml_reader --type har
```
You can specify `saml_reader --stdin` but it is not required.
### Other command line options
By default, the application will only output the results of validation
tests. There are some extra options to expand the tests and the information
that is output by the program.
#### `--summary`
This flag will print a full summary of key parameters pulled directly from the SAML
response and certificate.
#### `--summary-only`
This will only print the summary and skip any validation tests. Cannot be specified
with `--compare`
#### `--compare`
This will allow the user to input expected values to compare with the SAML response.
SAML Reader will prompt for each value in the terminal. Values can
be skipped by pressing Enter without inputting a value. Example:
```
Customer First Name: Sam
Customer Last Name: Ell
Customer Email Address: [email protected]
MongoDB Assertion Consumer Service URL: https://auth.mongodb.com/sso/saml2/01234abcDE56789ZyXwv
MongoDB Audience URL: https://www.okta.com/saml2/service-provider/abcdefghijklmnopqrst
Domain(s) associated with IdP:
1. foo.com
2. bar.net
3. mydomain.com
4.
IdP Issuer URI: Issuer_URI_Here
Signing Certificate Expiration Date (MM/DD/YYYY): 01/31/2021
Encryption Algorithm (SHA1 or SHA256): SHA256
Is customer expecting role mapping (y/N): y
Expected role mapping group names (if unknown, leave blank):
1. Test Group Name
2.
```
All values will be validated to see if they match expected values for MongoDB Cloud.
If an attribute does not pass validation, you will be asked to re-enter it or skip it.
Alternatively, this option will accept a single argument as a path to a JSON file containing the
comparison values in the format:
```javascript
{
"firstName": "Sam",
"lastName": "Ell",
"email": "[email protected]",
"issuer": "Issuer URI here",
"cert_expiration": "Date in MM/DD/YYYY format",
"acs": "Assertion Consumer Service URL here",
"audience": "Audience URL here",
"encryption": "Must be 'SHA1' or 'SHA256'",
"domains": ["foo.com", "bar.net", "mydomain.com"],
"role_mapping_expected": "Must be 'Y' or 'N'",
"memberOf": ["Test Group Name"]
}
```
Note that `domains` and `memberOf` must be lists. Any value can be omitted or substituted with `null` to be ignored.
An empty string (`""`) or empty list (`[]`) will be interpreted as an invalid value.
---
## Reporting issues
Because this tool inherently deals with personally identifiable information (PII)
and security information, this bears repeating...
**IMPORTANT: Please DO NOT add any personally**
**identifiable information (PII) when reporting an issue.**
This means **DO NOT** upload any SAML data, even if it is yours.
That said, thank you in advance for reporting any issues that you find while using
this tool. This tool is in its infancy, so it's sure to have issues and non-graceful
handling of errors. To report an issue, please open an issue on this repository,
describing the issue you are experiencing and one of the maintainers will look into the issue.
## Contributing
I do not have any specific requirements for contributing at this time, other than
that I am using Google-style docstrings. Please feel free to open a pull request!
As the architecture has evolved, I plan to create a document with more information on
the structure of the application and how to contribute.
|
/saml_reader-0.0.6.tar.gz/saml_reader-0.0.6/README.md
| 0.494385 | 0.754146 |
README.md
|
pypi
|
import json
from urllib.parse import unquote
import haralyzer
class HarParsingError(Exception):
"""
Custom exception raised when we get any error from the HAR parser
"""
pass
class NoSAMLResponseFound(Exception):
"""
Custom exception if we don't find a SAML response
"""
pass
class HarParser(object):
"""
Wrapper around haralyzer package to read HAR file contents and retrieve SAML responses.
"""
def __init__(self, data):
"""
Create object containing raw HAR data.
Args:
data (basestring): Raw HAR data as JSON-string
"""
# TODO: Consider parsing this upon creation and writing a getter for SAML response(s)
# to wrap the haralyzer package more thoroughly
try:
self.data = json.loads(data)
except json.JSONDecodeError:
raise HarParsingError("Problem reading HAR JSON data")
self.parsed_data = None
self.errors = []
def parse(self):
"""
Parses the raw HAR data and stores it in the object.
Returns:
(basestring): SAML response as base64 string
"""
try:
parsed_har = haralyzer.HarParser(self.data)
except Exception:
# This is a wide catch-all
raise HarParsingError("Could not parse the HAR data")
responses = []
for page in parsed_har.pages:
for post in page.post_requests:
for param in post.get('request', {}).get('postData', {}).get('params', []):
if param['name'] == 'SAMLResponse':
responses.append(param['value'])
if len(responses) > 1:
self.errors.append("Multiple SAML responses found. Using the first one.")
if not responses:
raise NoSAMLResponseFound("No SAML response found in the HAR file")
self.parsed_data = unquote(responses[0])
return self.parsed_data
@classmethod
def from_file(cls, filename):
"""
Read HAR file to create parser object
Args:
filename (basestring): path to HAR file
Returns:
(HarParser) parser object
"""
with open(filename, 'r') as f:
return cls(f.read())
|
/saml_reader-0.0.6.tar.gz/saml_reader-0.0.6/saml_reader/har.py
| 0.450359 | 0.330924 |
har.py
|
pypi
|
from itertools import zip_longest
from cryptography import x509
from cryptography.hazmat.backends import default_backend
class Certificate(object):
"""
Wrapper around cryptography's x509 parser for PEM certificates with
helper functions to retrieve relevant data from the certificate
"""
def __init__(self, cert_string):
"""
Creates certificate object from raw certificate content (without header/footer)
Args:
cert_string: certificate contents
"""
# This formats the raw certificate string into lines of 64 characters
cert_string = cert_string.replace('\n', '')
cert_string = "\n".join(["".join(v) for v in zip_longest(*[iter(cert_string)] * 64, fillvalue='')])
# This adds the header and footer
cert_string = '-----BEGIN CERTIFICATE-----\n' + cert_string + \
'\n-----END CERTIFICATE-----'
decoded_cert = x509.load_pem_x509_certificate(bytes(cert_string, 'utf-8'),
default_backend())
self._certificate = decoded_cert
def get_subject(self, as_string=False):
"""
Gets the certificate subject contents
Args:
as_string (bool): True returns the subject as a string,
False returns the subject as a dict keyed by field name
Returns:
(string, dict) value of subject field
"""
if as_string:
return self._certificate.subject.rfc4514_string()
subject_dict = dict()
for subject in self._certificate.subject.rdns:
field, value = subject.rfc4514_string().split("=", 1)
subject_dict[field] = value
return subject_dict
def get_organization_name(self):
"""
Get organization section of certificate subject
Returns:
(basestring) subject organization
"""
return self.get_subject().get("O")
def get_common_name(self):
"""
Get common name section of certificate subject
Returns:
(basestring) subject common name
"""
return self.get_subject().get("CN")
def get_expiration_date(self):
"""
Get expiration date for certificate
Returns:
datetime.date: the expiration date for the certificate
"""
return self._certificate.not_valid_after.date()
|
/saml_reader-0.0.6.tar.gz/saml_reader-0.0.6/saml_reader/cert.py
| 0.841565 | 0.249979 |
cert.py
|
pypi
|
import sys
import pyperclip
from saml_reader.cert import Certificate
from saml_reader.saml.parser import RegexSamlParser, StandardSamlParser
from saml_reader.saml.errors import SamlParsingError, SamlResponseEncryptedError, IsASamlRequest, DataTypeInvalid
from saml_reader.har import HarParser, HarParsingError, NoSAMLResponseFound
class TextReader:
"""
Parses raw SAML and certificate data from various input sources
Attributes:
VALID_INPUT_TYPES (set): set of strings of the valid input types for this tool
"""
VALID_INPUT_TYPES = {'base64', 'xml', 'har'}
def __init__(self, input_type, raw_data):
"""
Parses input for SAML response and displays summary and analysis
Args:
input_type (basestring): data type of `data`, must be
`'base64'`, `'xml'`, or `'har'`
raw_data (basestring): raw data to be parsed for SAML data
Returns:
None
Raises:
(DataTypeInvalid) if the `input_type` is invalid
"""
self._errors = []
input_type = input_type.lower()
if input_type not in self.VALID_INPUT_TYPES:
raise DataTypeInvalid(f"Invalid input type: {input_type}")
self._valid_cert = False
self._cert = None
self._saml = None
self._valid_saml = True
self._parser_used = 'strict'
is_encrypted = False
is_a_response = False
try:
self._saml = self._parse_raw_data(input_type, raw_data)
if self._saml.used_relaxed_parser():
self._parser_used = 'relaxed'
except SamlParsingError:
self._parser_used = 'regex'
except SamlResponseEncryptedError as e:
is_encrypted = True
self._valid_saml = False
self._parser_used = e.parser
except IsASamlRequest as e:
is_a_response = True
self._valid_saml = False
self._parser_used = e.parser
except NoSAMLResponseFound:
self._valid_saml = False
self._errors.append("Could not find a SAML response in the HAR data.\n"
"Please verify the input type and data is correct.")
return
if self._parser_used == 'regex':
try:
self._saml = self._parse_raw_data(input_type, raw_data,
parser=RegexSamlParser)
except SamlResponseEncryptedError:
is_encrypted = True
self._saml = None
self._valid_saml = False
except IsASamlRequest:
is_a_response = True
self._saml = None
self._valid_saml = False
if self._parser_used != 'strict':
self._errors.append(f"WARNING: XML parsing failed. Using fallback '{self._parser_used}' parser. "
f"Some values may not parse correctly.\n")
if is_encrypted:
self._errors.append(
"SAML response is encrypted. Cannot parse.\n"
"Advise customer to update their identity provider "
"to send an unencrypted SAML response."
)
return
if is_a_response:
self._errors.append(
"The input data appears to be a SAML request instead of a SAML response.\n"
"Please ask the customer for the SAML response instead of the request."
)
return
if not self._saml.found_any_values():
self._errors.append(
"Could not parse any relevant information from the input data.\n"
"Please make sure that your input contains SAML data."
)
self._valid_saml = False
if self._valid_saml:
raw_cert = self._saml.get_certificate()
self._cert = None
if raw_cert:
try:
self._cert = Certificate(raw_cert)
except ValueError as e:
if not e.args[0].startswith("Unable to load certificate"):
raise e
if not self._cert:
self._errors.append(
"Could not locate certificate. Identity provider info will not be available."
)
self._valid_cert = self._cert is not None
@classmethod
def from_clipboard(cls, data_type):
"""
Read data from the clipboard.
Args:
data_type (basestring): data type of `data`, must be
`'base64'`, `'xml'`, or `'har'`
Returns:
(TextReader) parsed SAML data
"""
raw_data = cls._read_clipboard()
return cls(data_type, raw_data)
@classmethod
def from_stdin(cls, data_type):
"""
Read data from the stdin.
Args:
data_type (basestring): data type of `data`, must be
`'base64'`, `'xml'`, or `'har'`
Returns:
(TextReader) parsed SAML data
"""
raw_data = cls._read_stdin()
return cls(data_type, raw_data)
@classmethod
def from_file(cls, data_type, filename):
"""
Read data from the clipboard.
Args:
data_type (basestring): data type of `data`, must be
`'base64'`, `'xml'`, or `'har'`
filename (basestring): path to file
Returns:
(TextReader) parsed SAML data
"""
raw_data = cls._read_file(filename)
return cls(data_type, raw_data)
@staticmethod
def _read_file(filename):
"""
Reads data from a file
Args:
filename (basestring): path of file to read
Returns:
(basestring) contents of file
Raises:
(FileNotFoundError) if the file does not exist or cannot be read
"""
try:
with open(filename, 'r') as f:
data = f.read()
except FileNotFoundError:
raise FileNotFoundError(f"Cannot find file specified: {filename}")
return data
@staticmethod
def _read_clipboard():
"""
Reads data from the system clipboard
Returns:
(basestring) contents of clipboard
"""
data = pyperclip.paste()
return data
@staticmethod
def _read_stdin():
"""
Reads contents of stdin (standard in) to get piped data
Returns:
(basestring) concatenated contents of stdin
"""
data = "".join(sys.stdin.readlines())
return data
def _parse_raw_data(self, input_type, data, parser=StandardSamlParser):
"""
Parse various data types to return SAML response
Args:
input_type (basestring): data type of `data`, must be
`'base64'`, `'xml'`, or `'har'`
data (basestring): data to parse for SAML response
parser (BaseSamlParser): parser class. Default: StandardSamlParser
Returns:
(BaseSamlParser) Object containing SAML data
Raises:
(DataTypeInvalid) if an invalid `input_type` is specified
"""
if input_type == 'base64':
return parser.from_base64(data)
if input_type == 'xml':
return parser.from_xml(data)
if input_type == 'har':
try:
# TODO: Do the HAR parsing in the constructor?
har_parser = HarParser(data)
data = har_parser.parse()
except HarParsingError as e:
raise DataTypeInvalid(*e.args)
self._errors.extend(har_parser.errors)
return parser.from_base64(data)
raise DataTypeInvalid(f"Invalid data type specified: {input_type}")
def get_saml(self):
"""
Gets parsed SAML object
Returns:
(BaseSamlParser) Object containing SAML data. Returns None if the SAML
data could not be parsed because it was encrypted
"""
return self._saml
def get_certificate(self):
"""
Gets certificate object
Returns:
(Certificate) Object containing certificate data. Returns None if
certificate could not be parsed from SAML data
"""
return self._cert
def saml_is_valid(self):
"""
Indicates if SAML response was successfully parsed
Returns:
(bool) True if the SAML response was successfully parsed, False otherwise.
Call `Parser.get_errors()` to see errors.
"""
return self._valid_saml
def cert_is_valid(self):
"""
Indicates if certificate was successfully parsed
Returns:
(bool) True if the certificate was successfully parsed, False otherwise.
Call `Parser.get_errors()` to see errors.
"""
return self._valid_cert
def get_errors(self):
"""
Returns errors encountered during parsing process.
Returns:
(`list` of `basestring`) If there were errors, will contain text explaining
errors. Empty list if no errors were encountered.
"""
return self._errors
|
/saml_reader-0.0.6.tar.gz/saml_reader-0.0.6/saml_reader/text_reader.py
| 0.575588 | 0.239061 |
text_reader.py
|
pypi
|
from abc import ABC, abstractmethod
class BaseSamlParser(ABC):
"""
Generalized SAML response parser
"""
def __init__(self):
"""
Parses SAML response from base64 input.
Args:
response (basestring): SAML response as a base64-encoded string
Raises:
(SamlResponseEncryptedError) Raised when SAML response is encrypted
"""
pass
@classmethod
@abstractmethod
def from_xml(cls, xml):
"""
Instantiates the class using XML input.
Args:
xml (basestring): SAML response as stringified XML document
Returns:
(BaseSamlParser) parsed SAML response object
"""
pass
@classmethod
@abstractmethod
def from_base64(cls, base64):
"""
Instantiates the class using base64-encoded XML input.
Args:
base64 (basestring): SAML response as base64-encoded XML document
Returns:
(BaseSamlParser) parsed SAML response object
"""
pass
@abstractmethod
def get_certificate(self):
"""
Retrieves text of X.509 public certificate included in the SAML response.
Returns:
(basestring) Certificate contents as string
Raises:
(ValueError) Raised when the certificate entry is not found in the data
"""
pass
@abstractmethod
def get_subject_name_id(self):
"""
Retrieves the Name ID value from the subject section.
Returns:
(basestring) Value of the Name ID
Raises:
(ValueError) Raised when the Name ID entry is not found in the data
"""
pass
@abstractmethod
def get_subject_name_id_format(self):
"""
Retrieves the Name ID format from the subject section.
Returns:
(basestring) Format attribute of Name ID
Raises:
(ValueError) Raised when the Name ID entry is not found in the data
"""
pass
@abstractmethod
def get_assertion_consumer_service_url(self):
"""
Retrieves the service provider's Assertion Consumer Service URL.
Returns:
(basestring) Value of Assertion Consumer Service URL
Raises:
(ValueError) Raised when the Assertion Consumer Service
entry is not found in the data
"""
pass
@abstractmethod
def get_encryption_algorithm(self):
"""
Retrieves the encryption algorithm used for certificate. Should be
"sha1" or "sha256".
Returns:
(basestring) Value of encryption algorithm
Raises:
(ValueError) Raised when the encryption algorithm
entry is not found in the data
"""
pass
@abstractmethod
def get_audience_url(self):
"""
Retrieves the service provider's Audience URL.
Returns:
(basestring) Value of encryption algorithm
Raises:
(ValueError) Raised when the Audience URL
entry is not found in the data
"""
pass
@abstractmethod
def get_issuer_uri(self):
"""
Retrieves the identity provider's Audience URL.
Returns:
(basestring) Value of encryption algorithm
Raises:
(ValueError) Raised when the Issuer URI
entry is not found in the data
"""
pass
@abstractmethod
def get_attributes(self):
"""
Retrieves the identity provider's claim attributes.
Returns:
(basestring) Value of encryption algorithm
Raises:
(ValueError) Raised when the attributes
are not found in the data
"""
pass
@abstractmethod
def is_assertion_found(self):
"""
Checks if the response contains exactly one assertion.
Returns:
(bool): True if the response contains one assertion, False otherwise
"""
pass
@abstractmethod
def get_xml(self, pretty=False):
"""
Return raw XML of SAML response
Args:
pretty (bool): Pretty-prints XML if True. False is XML in one line.
Default: False.
Returns:
(basestring) SAML response as XML string
"""
pass
@abstractmethod
def found_any_values(self):
"""
Checks to see if we were able to parse any values at all
Returns:
(bool) True if any values were able to be parsed, False otherwise
"""
pass
@abstractmethod
def get_duplicate_attribute_names(self):
"""Return any attribute names that were duplicated in the
attribute statement.
Returns:
set: set of duplicated attribute names
"""
pass
|
/saml_reader-0.0.6.tar.gz/saml_reader-0.0.6/saml_reader/saml/base.py
| 0.92297 | 0.535584 |
base.py
|
pypi
|
from collections import defaultdict
import re
from onelogin.saml2.utils import OneLogin_Saml2_Utils as utils
from urllib.parse import unquote
from lxml import etree
from saml_reader.saml.base import BaseSamlParser
from saml_reader.saml.oli import OLISamlParser
from saml_reader.saml.errors import SamlResponseEncryptedError, IsASamlRequest, DataTypeInvalid
class StandardSamlParser(BaseSamlParser):
"""
Wrapper around OneLogin SAML response parser, adding functionality to
grab fields other than what is supported by default.
"""
def __init__(self, response):
"""
Parses SAML response from XML input.
Args:
response (basestring): SAML response as a stringified XML document
Raises:
(SamlResponseEncryptedError) Raised when SAML response is encrypted
"""
self._saml = OLISamlParser(response)
self._saml_values = dict()
self._duplicate_attributes = set()
super().__init__()
self._parse_saml_values()
def used_relaxed_parser(self):
"""
Determine if the parser had to fall back on an XML parser that
attempts to correct syntax errors. If the relaxed parser was used,
may indicate there were errors in the SAML response data (did you copy-paste
correctly?)
Returns:
(bool) True if we used the syntax-correcting parser,
False for standard, strict parser
"""
return self._saml.used_relaxed_parser
def _parse_saml_values(self):
"""
Pre-parse SAML values and cache them
Returns:
None
"""
value_by_field = {
'certificate': [
self._saml.query_assertion(
'/ds:Signature/ds:KeyInfo/ds:X509Data/ds:X509Certificate'
),
self._saml.query(
'/samlp:Response/ds:Signature/ds:KeyInfo/ds:X509Data/ds:X509Certificate'
)
],
'name_id': self._saml.query_assertion(
'/saml:Subject/saml:NameID'
),
'name_id_format': self._saml.query_assertion(
'/saml:Subject/saml:NameID'
),
'acs': [
self._saml.query('/samlp:Response'),
self._saml.query_assertion(
'/saml:Subject/saml:SubjectConfirmation/saml:SubjectConfirmationData'
)
],
'encryption':
self._saml.query_assertion('/ds:Signature/ds:SignedInfo/ds:SignatureMethod') or
self._saml.query('/samlp:Response/ds:Signature/ds:SignedInfo/ds:SignatureMethod'),
'audience': self._saml.query_assertion('/saml:Conditions/saml:AudienceRestriction/saml:Audience'),
'issuer': self._saml.query_assertion('/saml:Issuer'),
'attributes': self._saml.get_attributes(mark_duplicate_attributes=True)
}
transform_by_field = {
'certificate': lambda x: None if not x else
x[0][0].text if x[0] else
x[1][0].text if x[1] else None,
'name_id': lambda x: x[0].text if x else None,
'name_id_format': lambda x: x[0].attrib.get('Format') if x else None,
'acs': lambda x: x[0][0].attrib.get('Destination') or x[1][0].attrib.get('Recipient') or None,
'encryption': self.__parse_encryption,
'audience': lambda x: x[0].text if x else None,
'issuer': lambda x: x[0].text if x else None,
'attributes': self.__parse_attributes
}
for field, value in value_by_field.items():
self._saml_values[field] = transform_by_field[field](value)
self._duplicate_attributes = set(
k for k, v in value_by_field['attributes'].items()
if v['is_duplicate']
)
@staticmethod
def __parse_attributes(attribute_data):
"""
Apply specific transformations to claim attributes.
Args:
attribute_data (dict): attribute data from SAML response
Returns:
(dict) transformed attributes
"""
if not attribute_data:
return None
# No special transforms at this time
special_transform_by_attribute = {}
transformed_attributes = dict()
for attribute_name, value_dict in attribute_data.items():
value = value_dict['values']
if attribute_name in special_transform_by_attribute:
transformed_attributes[attribute_name] = (
special_transform_by_attribute[attribute_name](value)
)
elif len(value) > 1:
transformed_attributes[attribute_name] = value
else:
transformed_attributes[attribute_name] = (
value[0] if value else ""
)
return transformed_attributes
@staticmethod
def __parse_encryption(result):
"""
Parse encryption values from URI
Args:
result (lxml.etree.Element): signature method query result
Returns:
(basestring) encryption algorithm, None if not found
"""
if not result:
return None
uri = result[0].attrib.get('Algorithm') or ""
algorithm = re.findall(r"(?i)sha(1|256)$", uri)
if algorithm:
return "SHA" + algorithm[0]
return None
@classmethod
def from_xml(cls, xml):
"""
Instantiates the class using XML input.
Args:
xml (basestring): SAML response as stringified XML document
Returns:
(BaseSamlParser) parsed SAML response object
"""
rx = r'[<>]'
if not re.search(rx, xml):
raise DataTypeInvalid("This does not appear to be XML")
return cls(xml)
@classmethod
def from_base64(cls, base64, url_decode=False):
"""
Instantiates the class using base64-encoded XML input.
Args:
base64 (basestring): SAML response as base64-encoded XML string
url_decode (bool): True performs url decoding before parsing. Default: False.
Returns:
(BaseSamlParser) parsed SAML response object
"""
value = base64 if not url_decode else unquote(base64)
# Check to see if this is valid base64
rx = r'[^a-zA-Z0-9/+=]'
if re.search(rx, value):
raise DataTypeInvalid("This does not appear to be valid base64")
return cls(utils.b64decode(value))
def get_certificate(self):
"""
Retrieves text of X.509 public certificate included in the SAML response.
Returns:
(basestring) Certificate contents as string, None if value not found
"""
return self._saml_values.get('certificate')
def get_subject_name_id(self):
"""
Retrieves the Name ID value from the subject section.
Returns:
(basestring) Value of the Name ID, None if value not found
"""
return self._saml_values.get('name_id')
def get_subject_name_id_format(self):
"""
Retrieves the Name ID format from the subject section.
Returns:
(basestring) Format attribute of Name ID format, None if value not found
"""
return self._saml_values.get('name_id_format')
def get_assertion_consumer_service_url(self):
"""
Retrieves the service provider's Assertion Consumer Service URL.
Returns:
(basestring) Value of Assertion Consumer Service URL, None if value not found
"""
return self._saml_values.get('acs')
def get_encryption_algorithm(self):
"""
Retrieves the encryption algorithm used for certificate. Should be
"sha1" or "sha256".
Returns:
(basestring) Value of encryption algorithm, None if value not found
"""
return self._saml_values.get('encryption')
def get_audience_url(self):
"""
Retrieves the service provider's Audience URL.
Returns:
(basestring) Value of Audience URL, None if value not found
"""
return self._saml_values.get('audience')
def get_issuer_uri(self):
"""
Retrieves the identity provider's Issuer URI.
Returns:
(basestring) Value of Issuer URI, None if value not found
"""
return self._saml_values.get('issuer')
def get_attributes(self):
"""
Retrieves the identity provider's claim attributes.
Returns:
(dict) Claim attribute values keyed by attribute name, empty dict if no attributes were found
"""
return self._saml_values.get('attributes') or dict()
def is_assertion_found(self):
"""
Checks if the response contains exactly one assertion.
Returns:
(bool): True if the response contains one assertion, False otherwise
"""
return self._saml.validate_num_assertions()
def get_xml(self, pretty=False):
"""
Return raw XML of SAML response
Args:
pretty (bool): Pretty-prints XML if True. False is XML in one line.
Default: False.
Returns:
(basestring) SAML response as XML string
"""
if pretty:
try:
pretty_xml = etree.tostring(self._saml.document, pretty_print=True)
return str(pretty_xml)
except etree.XMLSyntaxError:
raise ValueError("Cannot pretty print")
return str(self._saml.response)
def found_any_values(self):
"""
Checks to see if we were able to parse any values at all
Returns:
(bool) True if any values were able to be parsed, False otherwise
"""
return any(self._saml_values.values())
def get_duplicate_attribute_names(self):
"""Return any attribute names that were duplicated in the
attribute statement.
Returns:
set: set of duplicated attribute names
"""
return self._duplicate_attributes
class RegexSamlParser(BaseSamlParser):
"""
SAML parser which will be a little more forgiving to XML syntax errors by
relying on regex instead of an XML parser
"""
def __init__(self, response):
"""
Parses SAML response from XML input.
Args:
response (basestring): SAML response as stringified XML document
Raises:
(SamlResponseEncryptedError) Raised when SAML response is encrypted
"""
self._saml = str(response)
self._saml_values = dict()
self._duplicate_attributes = set()
if self._is_encrypted():
raise SamlResponseEncryptedError("SAML response is encrypted. Cannot parse without key", 'regex')
if self._is_saml_request():
raise IsASamlRequest("The SAML data contains a request and not a response", 'regex')
super().__init__()
self._parse_saml_values()
def _parse_saml_values(self):
"""
Pre-parse SAML values and cache them
Returns:
None
"""
# TODO: Let's use named groups instead, where we can
regex_by_field = {
'certificate': re.compile(r"(?s)<(?:ds:)?X509Certificate.*?>(.*?)</(?:ds:)?X509Certificate>"),
'name_id': re.compile(r"(?s)<(?:saml.?:)?NameID.*?>(.*?)</(?:saml.?:)?NameID>"),
'name_id_format': re.compile(r"(?s)<(?:saml.?:)?NameID.*?Format=\"(.+?)\".*?>"),
# This is a pretty relaxed regex because it occurs right at the beginning of the
# SAML response where there could be syntax errors if someone copy-pasted poorly
'acs': re.compile(
r"(?s)((?:<saml.*?:Response)?.*?Destination=\"(?P<acs>.+?)\".*?>|"
r"<(?:saml.?:)?SubjectConfirmationData.*?Recipient=\"(?P<acs_alt>.+?)\".*?)"
),
'encryption': re.compile(r"(?s)<(?:ds:)?SignatureMethod.*?Algorithm=\".+?sha(1|256)\".*?>"),
'audience': re.compile(r"(?s)<(?:saml.?:)?Audience(?:\s.*?>|>)(.*?)</(?:saml.?:)?Audience>"),
'issuer': re.compile(r"(?s)<(?:saml.?:)?Issuer.*?>(.*?)<\/(?:saml.?:)?Issuer>"),
'attributes': re.compile(
r"(?s)<(?:saml.?:)?Attribute.*?Name=\"(.+?)\".*?>\s*(.*?)\s*</(?:saml.?:)?Attribute>"
)
}
transform_by_field = {
'certificate': lambda x: x[0] if x else None,
'name_id': lambda x: x[0] if x else None,
'name_id_format': lambda x: x[0] if x else None,
'acs': lambda x: x[0][1] if x[0] and x[0][1] else x[0][2]
if x and x[0] and x[0][2] else None,
'encryption': lambda x: "SHA" + x[0] if x else None,
'audience': lambda x: x[0] if x else None,
'issuer': lambda x: x[0] if x else None,
'attributes': self.__transform_attributes
}
for field, regex in regex_by_field.items():
result = regex.findall(self._saml)
result = transform_by_field[field](result)
self._saml_values[field] = result
def __transform_attributes(self, raw_data):
"""
Apply specific transformations to claim attributes.
Args:
raw_data (dict): attribute data from SAML response
Returns:
(dict) transformed attributes
"""
if not raw_data:
return None
value_regex = re.compile(r"(?s)<(?:saml.?:)?AttributeValue.*?>(.*?)</(?:saml.?:)?AttributeValue>")
special_transform_by_attribute = {}
self._duplicate_attributes = set()
transformed_attributes = defaultdict(list)
for name, value in raw_data:
if name in transformed_attributes:
self._duplicate_attributes.add(name)
value = value_regex.findall(value)
if not value:
# findall() returns a list with an empty string if there was a match but the group was empty
# but returns an empty list if there were no matches
value = ['(could not parse)']
if name in special_transform_by_attribute:
transformed_attributes[name].append(
special_transform_by_attribute[name](value)
)
elif len(value) > 1:
transformed_attributes[name].extend(value)
else:
transformed_attributes[name].append(
value[0] if value else ""
)
return {
k: "" if not v else v if len(v) > 1 else v[0]
for k, v in transformed_attributes.items()
}
def _is_encrypted(self):
"""
Determines if the SAML response is encrypted.
Returns:
(bool) True if encrypted, False otherwise
"""
rx = r"(?s)<\/?(?:saml.?:)?EncryptedAssertion"
result = re.findall(rx, self._saml)
return bool(result)
def _is_saml_request(self):
"""
Determines if received SAML data is actually a SAML request instead of response
Returns:
(bool) True if it is a request, False otherwise
"""
rx = r"<\/?(?:saml.{0,2}:)?AuthnRequest"
result = re.findall(rx, self._saml)
return bool(result)
@classmethod
def from_xml(cls, xml):
"""
Instantiates the class using XML input.
Args:
xml (basestring): SAML response as stringified XML document
Returns:
(BaseSamlParser) parsed SAML response object
"""
# Check to see if this couldn't be XML
rx = r'[<>]'
if not re.search(rx, xml):
raise DataTypeInvalid("This does not appear to be XML")
return cls(xml)
@classmethod
def from_base64(cls, base64, url_decode=False):
"""
Instantiates the class using base64-encoded XML input.
Args:
base64 (basestring): SAML response as base64-encoded XML string
url_decode (bool): True performs url decoding before parsing. Default: False.
Returns:
(BaseSamlParser) parsed SAML response object
"""
value = base64 if not url_decode else unquote(base64)
# Check to see if this is valid base64
rx = r'[^a-zA-Z0-9/?=]'
if re.search(rx, value):
raise DataTypeInvalid("This does not appear to be valid base64")
return cls(utils.b64decode(value))
def get_certificate(self):
"""
Retrieves text of X.509 public certificate included in the SAML response.
Returns:
(basestring) Certificate contents as string, None if value not found
"""
return self._saml_values.get('certificate')
def get_subject_name_id(self):
"""
Retrieves the Name ID value from the subject section.
Returns:
(basestring) Value of the Name ID, None if value not found
"""
return self._saml_values.get('name_id')
def get_subject_name_id_format(self):
"""
Retrieves the Name ID format from the subject section.
Returns:
(basestring) Format attribute of Name ID, None if value not found
"""
return self._saml_values.get('name_id_format')
def get_assertion_consumer_service_url(self):
"""
Retrieves the service provider's Assertion Consumer Service URL.
Returns:
(basestring) Value of Assertion Consumer Service URL, None if value not found
"""
return self._saml_values.get('acs')
def get_encryption_algorithm(self):
"""
Retrieves the encryption algorithm used for certificate. Should be
"sha1" or "sha256".
Returns:
(basestring) Value of encryption algorithm, None if value not found
"""
return self._saml_values.get('encryption')
def get_audience_url(self):
"""
Retrieves the service provider's Audience URL.
Returns:
(basestring) Value of Audience URL algorithm, None if value not found
"""
return self._saml_values.get('audience')
def get_issuer_uri(self):
"""
Retrieves the identity provider's Issuer URI.
Returns:
(basestring) Value of Issuer URI, None if value not found
"""
return self._saml_values.get('issuer')
def get_attributes(self):
"""
Retrieves the identity provider's claim attributes.
Returns:
(dict) Claim attribute values keyed by attribute name, empty dict if no values found
"""
return self._saml_values.get('attributes') or dict()
def is_assertion_found(self):
"""
Checks if the response contains exactly one assertion.
Returns:
(bool): True if the response contains one assertion, False otherwise
"""
rx = r"(?s)<(?:saml.?:)?Assertion.*?ID=\"(.+?)\".*?>"
result = re.findall(rx, self._saml)
return len(result) == 1
def get_xml(self, pretty=False):
"""
Return raw XML of SAML response
Args:
pretty (bool): Pretty-prints XML if True. False is XML in one line.
Default: False.
Returns:
(basestring) SAML response as XML string
"""
raw_xml = self._saml
if pretty:
# If we had to rely on this parser, there's not an easy way to
# pretty-print this badly-formed XML
return raw_xml
return raw_xml
def found_any_values(self):
"""
Checks to see if we were able to parse any values at all
Returns:
(bool) True if any values were able to be parsed, False otherwise
"""
return any(self._saml_values.values())
def get_duplicate_attribute_names(self):
"""Return any attribute names that were duplicated in the
attribute statement.
Returns:
set: set of duplicated attribute names
"""
return self._duplicate_attributes
|
/saml_reader-0.0.6.tar.gz/saml_reader-0.0.6/saml_reader/saml/parser.py
| 0.817502 | 0.362715 |
parser.py
|
pypi
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.