metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "johnjajeh/Sage",
"score": 4
} |
#### File: Sage/src/dealer.py
```python
from random import randint
def deal():
cards = [0, 0]
while cards[1] - cards[0] <= 1:
cards[0] = randint(1, 10)
cards[1] = randint(1, 10)
cards.sort()
card3 = randint(1, 10)
return (cards, card3)
```
#### File: Sage/src/player_factory.py
```python
from players.always_bet import PlayerAlwaysBet
from players.difference_plus import PlayerDifferencePlus
from players.difference import PlayerDifference
from players.hard_splits import PlayerHardSplits
from players.spread import PlayerSpread
from players.player import Player
class PlayerFactory:
def getPlayer1(self) -> Player:
return self.player1
def getPlayer2(self) -> Player:
return self.player2
def determinePlayers(self):
self.printIntroduction()
self.player1 = self.queryPlayer(1)
self.player2 = self.queryPlayer(2)
def queryPlayer(self, playerNum: int):
switcher = {
'1': lambda: PlayerAlwaysBet(),
'2': lambda: PlayerDifference(),
'3': lambda: PlayerDifferencePlus(),
'4': lambda: PlayerHardSplits(),
'5': lambda: PlayerSpread(),
}
selection = input('Select Player #{}: '.format(playerNum))
return switcher.get(selection, lambda: PlayerAlwaysBet())
def printIntroduction(self):
print('-------------------------------')
print('-- Pick Players --')
print('-- Input a number from below --')
print('-------------------------------')
print('')
print(' [1] Always bet')
print(' [2] Difference')
print(' [3] Difference plus 1')
print(' [4] Hard splits')
print(' [5] Spread')
print('')
```
#### File: src/players/player.py
```python
class Player:
def __init__(self):
self.score = 100
def getName(self):
return self.__class__.__name__
def changeScore(self, change: int):
self.score += change
def play(self, low: int, high: int) -> int: #TODO add more info if needed (maybe round and other player score)
pass
```
#### File: src/players/spread.py
```python
from players.player import Player
class PlayerSpread(Player):
def play(self, low: int, high: int) -> int:
if high - low >= 6:
return 9
return 0;
```
#### File: Sage/src/sage_game.py
```python
from players.player import Player
from dealer import deal
from reporter import Reporter
class Sage:
def __init__(self, player1: Player, player2: Player, reporter: Reporter):
self.player1 = player1
self.player2 = player2
self.reporter = reporter
def run(self): #Probably pass something in to record data for each round
for roundNum in range(10):
self.__playRound(self.player1)
self.__playRound(self.player2)
self.reporter.recordData(self.player1.score, self.player2.score)
return (self.player1.score, self.player2.score)
def __playRound(self, player: Player):
cards, card3 = deal()
bet = 1 + player.play(cards[0], cards[1])
if (cards[0] < card3 and card3 < cards[1]):
player.changeScore(bet)
else:
player.changeScore(-bet)
``` |
{
"source": "john-james-ai/cvr",
"score": 2
} |
#### File: cvr/core/profile.py
```python
import pandas as pd
from pandas.api.types import is_categorical_dtype
from pandas.api.types import is_string_dtype
from pandas.api.types import is_numeric_dtype
import numpy as np
from typing import Union
# plotting modules
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_style("whitegrid")
sns.set_palette("mako")
from cvr.data import categorical_columns, numeric_columns, criteo_columns
from cvr.utils.printing import Printer
from cvr.utils.format import titlelize, titlelize_df
from cvr.visualize.features import (
CategoricalFeatureVisualizer,
NumericFeatureVisualizer,
)
# ---------------------------------------------------------------------------- #
class DataProfiler:
"""Data profiler for Dataset objects."""
def __init__(self, data: pd.DataFrame) -> None:
self._data = data
self._title = "Criteo Sponsored Search Conversion Log Dataset"
self._printer = Printer()
self._categorical_visualizer = CategoricalFeatureVisualizer()
self._numeric_visualizer = NumericFeatureVisualizer()
# ------------------------------------------------------------------------ #
@property
def summary(self, verbose: bool = True) -> None:
subtitle = "Dataset Summary"
summary = self._data.summary
if verbose:
self._printer.print_title(self._title, subtitle)
self._printer.print_dictionary(content=summary)
else:
return summary
# ------------------------------------------------------------------------ #
@property
def info(self, verbose: bool = False) -> None:
subtitle = "Overview of Columns"
info = self._data.info
if verbose:
self._printer.print_title(self._title, subtitle)
self._printer.print_dictionary(content=info)
else:
return info
# ------------------------------------------------------------------------ #
# DESCRIPTIVE STATISTICS #
# ------------------------------------------------------------------------ #
def describe(self, column: str, verbose=False) -> pd.DataFrame:
self._data.describe(column)
# ------------------------------------------------------------------------ #
def topn_plot(self, column: str) -> None:
data = self._data.rank_frequencies(column, 10)
self._categorical_visualizer.fit(self._data[column])
self._categorical_visualizer.topn()
def cfd_plot(self, column: str) -> None:
self._categorical_visualizer.fit(self._data[column])
self._categorical_visualizer.cfd()
def zipf_plot(self, column: str) -> None:
self._categorical_visualizer.fit(self._data[column])
self._categorical_visualizer.zipf()
```
#### File: cvr/core/task.py
```python
from abc import ABC, abstractmethod
import pandas as pd
import inspect
from dataclasses import dataclass, field
from datetime import datetime, timedelta
from cvr.utils.printing import Printer
from cvr.core.asset import AssetPassport
from cvr.core.dataset import Dataset
# ---------------------------------------------------------------------------- #
# TASK RESULT #
# ---------------------------------------------------------------------------- #
@dataclass
class TaskResult(ABC):
"""Standardized result object for all tasks"""
executed: bool = field(default=False)
passed: bool = field(default=False)
complete: str = field(default=False)
completed: datetime = field(init=False)
comment: str = field(default="")
def __post_init__(self) -> None:
self._printer = Printer()
def todict(self) -> dict:
d = {
"Executed": self.executed,
"Passed": self.passed,
"Complete": self.complete,
"Completed": self.completed,
"Comment": self.comment,
}
return d
def print(self) -> None:
d = self.todict()
self._printer.print_title("Task Result")
self._printer.print_dictionary(d)
# ---------------------------------------------------------------------------- #
# TASK RESPONSE #
# ---------------------------------------------------------------------------- #
@dataclass
class TaskResponse(ABC):
"""Task specific metrics to be overridden by subclasses."""
start: datetime = field(init=False)
end: datetime = field(init=False)
duration: timedelta = field(init=False)
def __post_init__(self) -> None:
pass
def begin(self) -> None:
self.start = datetime.now()
def stop(self) -> None:
self.end = datetime.now()
self.duration = self.end - self.start
def __post_init__(self) -> None:
self._printer = Printer()
def todict(self) -> dict:
d = {"Start": self.start, "End": self.end, "Duration": self.duration}
return d
def print(self) -> None:
title = "Task Response"
self._printer.print_title(title)
d = self.todict()
self._printer.print_dictionary(d)
# ---------------------------------------------------------------------------- #
# TASK SUMMARY #
# ---------------------------------------------------------------------------- #
@dataclass
class TaskSummary(ABC):
"""Summarizes a Task.
Args:
passport (AssetPassport): Identity object
start (datetime): start time for event
end (datetime): end time for event
duration (timedelta): the duration of the event in minutes
passed (bool): True if the event passed
executed (bool): True if the event was executed. An event
may be skipped if its endpoint already exists
response (dict): Event specific response
result (str): result of the event
"""
passport: AssetPassport
response: TaskResponse
result: TaskResult
def __post_init__(self) -> None:
self._printer = Printer()
def print(self) -> None:
self.passport.print()
self.response.print()
self.result.print()
# ---------------------------------------------------------------------------- #
# TASK #
# ---------------------------------------------------------------------------- #
class Task(ABC):
"""Defines interface for task classes."""
def __init__(self, passport: AssetPassport, **kwargs) -> None:
self._passport = passport
self._config = None
@property
def config(self):
return self._config
@property
@abstractmethod
def passport(self):
pass
@property
@abstractmethod
def response(self) -> TaskResponse:
pass
@property
@abstractmethod
def result(self) -> TaskResult:
pass
@config.setter
def config(self, config) -> None:
self._config = config
def setup(self, **kwargs) -> None:
# Logging facility
self._logger = self._config.logger
# Subclass specific setup
self._setup()
def _setup(self) -> None:
pass
def teardown(self, **kwargs) -> None:
# Subclass specific teardown.
self._teardown()
# Base class gets last word
self._result.executed = "No" if self._result.executed is False else "Yes"
self._result.passed = "No" if self._result.passed is False else "Yes"
self._result.complete = "No" if self._result.complete is False else "Yes"
self._summary = TaskSummary(
passport=self.passport,
response=self.response,
result=self.result,
)
def _teardown(self, **kwargs) -> None:
pass
@abstractmethod
def run(self, data: pd.DataFrame = None) -> pd.DataFrame:
"""Runs the task through delegation to a private member on the subclass
Args:
df (pd.DataFrame): Input DataFrame object.
Returns:
df (pd.DataFrame): DataFrame object
response (dict): Dictionary containing task response information.
"""
pass
@abstractmethod
def passed(self) -> bool:
"""Checks success of task. Returns True if conditions pass."""
pass
def summary(self) -> TaskSummary:
return self._summary
def summarize(self) -> None:
self._summary.print()
# ============================================================================ #
# DATASET FACTORY #
# ============================================================================ #
class DatasetFactory(Task):
"""Creates Dataset objects."""
def __init__(self, passport: AssetPassport, dataset_passport: AssetPassport) -> None:
super(DatasetFactory, self).__init__(passport=passport)
self._dataset_passport = dataset_passport
def run(self, data: pd.DataFrame) -> Dataset:
self.setup()
self._logger.debug("\tStarted {} {}".format(self.__class__.__name__, inspect.stack()[0][3]))
aid = self._config.dataset_repo.aid_gen()
self._dataset_passport.aid = aid
dataset = Dataset(
passport=self._dataset_passport,
data=data,
)
dataset = self._config.dataset_repo.set_version(dataset)
self._logger.debug(
"\tCompleted {} {}".format(self.__class__.__name__, inspect.stack()[0][3])
)
self.teardown()
return dataset
def _setup(self) -> None:
self._response = TaskResponse()
self._response.begin()
self._result = TaskResult()
def _teardown(self) -> None:
self._response.stop()
self._result.executed = True
self._result.passed = self.passed()
self._result.complete = True
self._result.completed = datetime.now()
@property
def passport(self) -> AssetPassport:
return self._passport
@property
def response(self) -> TaskResponse:
return self._response
@property
def result(self) -> TaskResult:
return self._result
def passed(self) -> bool:
return True
```
#### File: cvr/data/outliers.py
```python
from abc import ABC, abstractmethod
import os
import pandas as pd
import numpy as np
import logging
import math
from scipy import stats
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
from sklearn.ensemble import IsolationForest
from sklearn.neighbors import LocalOutlierFactor
from cvr.utils.printing import Printer
# ------------------------------------------------------------------------------------------------------------------------ #
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------------------------------------------------ #
DEFAULT_N_JOBS = 18
# ======================================================================================================================== #
# OUTLIER DETECTION #
# ======================================================================================================================== #
class OutlierDetector:
"""Outlier detection with selected outlier detection algorithms.
Args:
criterion (str): Indicates criterion for final determination of an observation, given results
from various outlier detection algorithms. Values include 'any', 'all', 'vote' for
majority vote.
numeric_algorithms(dict): Dictionary of instantiated numeric outlier detection algorithms
categorical_algorithms(dict): Dictionary of instantiated categorical outlier detection algorithms
random_state (int): Pseudo random generator seed for Isolation Forest
Attributes:
results_: Contains a nested dictionary with three numeric, categorical and combined outlier labels
summary_:
Returns:
Numpy array containing the labels labels
"""
def __init__(
self,
criterion="vote",
numeric_algorithms: dict = None,
categorical_algorithms: dict = None,
random_state=None,
) -> None:
self._criterion = criterion
self._random_state = random_state
self.results_ = {}
self._n = None
# Numeric Outlier Detection Algorithms
self._numeric_algorithms = (
numeric_algorithms
if numeric_algorithms
else {
"Z-Score": OutlierZScore(),
"IQR": OutlierIQR(),
"Robust Covariance": OutlierEllipticEnvelope(random_state=random_state),
"Isolation Forest": OutlierIsolationForest(random_state=random_state),
"Local Outlier Factor": OutlierLocalOutlierFactor(),
}
)
# Categorical Outlier Detection Algorithms
self._categorical_algorithms = (
categorical_algorithms
if categorical_algorithms
else {
"Attribute Value Frequency": OutlierAVF(),
"Square of Complement Frequency": OutlierSCF(),
"Weighted Attribute Value Frequency": OutlierWAVF(),
}
)
# Algorithms for numeric and categorical (object) data outlier detection
self._detectors = {
"number": self._numeric_algorithms,
"object": self._categorical_algorithms,
}
def fit(self, X, y=None):
"""Fits several outlier detection algorithms.
Args:
X (pd.DataFrame): Input
"""
self._n = len(X)
labels_ensemble = {}
for datatype, algorithms in self._detectors.items():
labels_datatype = {}
X_datatype = X.select_dtypes(include=datatype)
for name, algorithm in algorithms.items():
name_datatype = name + " (" + datatype + ")"
print(
"Currently fitting outlier detector {}.".format(name_datatype),
end=" ",
)
algorithm.fit(X_datatype)
labels = algorithm.predict(X_datatype)
o = labels.sum()
p = round(o / self._n * 100, 2)
print("Detected {} outliers, {}% of the data.".format(str(o), str(p)))
labels_datatype[name] = labels
labels_ensemble[name_datatype] = labels
self.results_[datatype] = self._compute_results(labels_datatype, datatype)
# Combine results for numeric and categorical outlier labels
self.results_["ensemble"] = self._compute_results(labels_ensemble, "combined")
def predict(self, X) -> pd.DataFrame:
o = self.results_["ensemble"]["labels"].sum()
p = round(o / self._n * 100, 2)
print(
"\nThe ensemble detected {} outliers constituting {}% of the data using the {} criterion.".format(
str(o), str(p), str(self._criterion)
)
)
return self.results_["ensemble"]["labels"].to_frame().reset_index()
def _compute_results(self, labels: dict, datatype: str) -> dict:
"""Aggregates results for several outlier detection algorithms."""
d = {}
# Store labels by algorithm
d["labels_by_algorithm"] = pd.DataFrame.from_dict(labels, orient="columns")
# Store aggregated labels based upon the criteria
d["labels_any"] = d["labels_by_algorithm"].any(axis=1)
d["labels_all"] = d["labels_by_algorithm"].all(axis=1)
d["labels_vote"] = d["labels_by_algorithm"].mean(axis=1) > 0.5
# Store the labels according to the selected criterion
if self._criterion == "any":
d["labels"] = d["labels_any"]
elif self._criterion == "all":
d["labels"] = d["labels_all"]
else:
d["labels"] = d["labels_vote"]
# Update labels by algorithms to include the labels aggregated by the three criteria
all_desc = self._get_label_description(datatype, " (All)")
any_desc = self._get_label_description(datatype, " (Any)")
vote_desc = self._get_label_description(datatype, " (Majority Vote)")
ensemble_desc = self._get_label_description(datatype, "")
d["labels_by_algorithm"][all_desc] = d["labels_all"]
d["labels_by_algorithm"][any_desc] = d["labels_any"]
d["labels_by_algorithm"][vote_desc] = d["labels_vote"]
d["labels_by_algorithm"][ensemble_desc] = d["labels"]
# Aggregate the total counts for all algorithms for selected and criteria
d["summary"] = d["labels_by_algorithm"].sum()
return d
def _get_label_description(self, datatype: str, criterion: str) -> str:
if datatype == "number":
return "Numeric Ensemble" + criterion
elif datatype == "object":
return "Categorical Ensemble" + criterion
else:
return "Combined Ensemble" + criterion
# ------------------------------------------------------------------------------------------------------------------------ #
# OUTLIER ANALYSIS Z-SCORE #
# ------------------------------------------------------------------------------------------------------------------------ #
class OutlierZScore:
def __init__(self, threshold: int = 3) -> None:
self._threshold = threshold
self._labels = None
def fit(self, X, y=None) -> None:
"""Computes the zscores for a 2 dimensional array.
Args:
X (pd.DataFrame): Input
"""
# Convert dataframe to numpy array.
X = X.select_dtypes(include="number").values
z = stats.zscore(X)
labels = np.where(np.abs(z) > self._threshold, 1, 0)
self._labels = np.any(labels, axis=1)
def predict(self, X):
"""Returns the prediction
Args:
X (np.array): Input
"""
return self._labels
# ------------------------------------------------------------------------------------------------------------------------ #
# OUTLIER ANALYSIS IQR #
# ------------------------------------------------------------------------------------------------------------------------ #
class OutlierIQR:
def __init__(self, threshold: float = 1.5) -> None:
self._threshold = threshold
self._labels = None
def fit(self, X, y=None) -> None:
"""Computes the zscores for a 2 dimensional array.
X (pd.DataFrame): Input
"""
# Convert dataframe to numpy array.
X = X.select_dtypes(include="number").values
q1, q3 = np.percentile(a=X, q=[25, 75], axis=0)
iqr = q3 - q1
lower = q1 - (iqr * self._threshold)
upper = q3 + (iqr * self._threshold)
labels = np.where(np.greater(X, upper) | np.less(X, lower), 1, 0)
self._labels = np.any(labels, axis=1)
def predict(self, X) -> np.array:
return self._labels
# ======================================================================================================================== #
# SKLEARN OUTLIER DETECTOR WRAPPERS #
# ======================================================================================================================== #
class OutliersSKLearn(ABC):
"""Abstract base class for sklearn outlier detectors wrappers.
The SKLearn classifiers cannot handle NaNs. Hence, NaNs were replaced as follows:
- Numeric variables replace NaNs with the mean.
- Categorical variables replace NaNs with -1
"""
def __init__(
self,
contamination: float = None,
n_jobs: int = DEFAULT_N_JOBS,
random_state: int = None,
**kwargs
) -> None:
self._contamination = contamination
self._n_jobs = n_jobs
self._random_state = random_state
self._clf = self.get_clf()
@abstractmethod
def get_clf(self) -> None:
pass
def fit(self, X: pd.DataFrame, y: np.ndarray = None) -> None:
X = X.select_dtypes(include="number")
X = self._impute(X).values
self._clf.fit(X)
def predict(self, X: pd.DataFrame) -> np.ndarray:
X = X.select_dtypes(include="number")
X = self._impute(X).values
labels = self._clf.predict(X)
return np.where(labels == -1, 1, 0)
def _impute(self, X) -> pd.DataFrame:
"""Imputes missing numerics with their means and missing categoricals with '-1'"""
imputer = {
"sale": 0,
"sales_amount": X["sales_amount"].mean(),
"conversion_time_delay": X["conversion_time_delay"].mean(),
"click_ts": X["click_ts"].mean(),
"n_clicks_1week": X["n_clicks_1week"].mean(),
"product_price": X["product_price"].mean(),
"product_age_group": "-1",
"device_type": "-1",
"audience_id": "-1",
"product_gender": "-1",
"product_brand": "-1",
"product_category_1": "-1",
"product_category_2": "-1",
"product_category_3": "-1",
"product_category_4": "-1",
"product_category_5": "-1",
"product_category_6": "-1",
"product_category_7": "-1",
"product_country": "-1",
"product_id": "-1",
"product_title": "-1",
"partner_id": "-1",
"user_id": "-1",
}
X.fillna(value=imputer, inplace=True)
return X
# ------------------------------------------------------------------------------------------------------------------------ #
# OUTLIER ANALYSIS ELLIPTIC ENVELOPE #
# ------------------------------------------------------------------------------------------------------------------------ #
class OutlierEllipticEnvelope(OutliersSKLearn):
"""Wrapper for sklearn's Elliptic Envelope class which accepts dataframes as input.
Args:
support_fraction (float): The proportion of points to be included in the support of the raw MCD estimate. If None, the minimum value of support_fraction will be used within the algorithm: [n_sample + n_features + 1] / 2. Range is (0, 1). Default is None.
contamination (float): The amount of contamination of the data set, i.e. the proportion of outliers in the data set. Range is (0, 0.5]. Default is 0.1
random_state (int): Pseudo random generator seed. Default is None.
"""
def __init__(
self,
support_fraction: float = 0.6,
contamination: float = 0.1,
random_state: int = None,
) -> None:
self._support_fraction = support_fraction
super(OutlierEllipticEnvelope, self).__init__(
contamination=contamination, random_state=random_state
)
def get_clf(self):
return EllipticEnvelope(
support_fraction=self._support_fraction,
contamination=self._contamination,
random_state=self._random_state,
)
# ------------------------------------------------------------------------------------------------------------------------ #
# OUTLIER ANALYSIS ISOLATION FOREST #
# ------------------------------------------------------------------------------------------------------------------------ #
class OutlierIsolationForest(OutliersSKLearn):
"""Wrapper for sklearn's Isolation Forest class which accepts dataframes as input.
Args:
contamination (float): The amount of contamination of the data set, i.e. the proportion of outliers in
the data set. Range is (0, 0.5]. Default is 0.1
n_jobs (int). The number of jobs to run in parallel.
random_state (int): Pseudo random generator seed. Default is None.
"""
def __init__(
self,
contamination="auto",
n_jobs: int = DEFAULT_N_JOBS,
random_state: int = None,
) -> None:
super(OutlierIsolationForest, self).__init__(
contamination=contamination, n_jobs=n_jobs, random_state=random_state
)
def get_clf(self):
return IsolationForest(
contamination=self._contamination,
n_jobs=self._n_jobs,
random_state=self._random_state,
)
# ------------------------------------------------------------------------------------------------------------------------ #
# OUTLIER ANALYSIS ISOLATION FOREST #
# ------------------------------------------------------------------------------------------------------------------------ #
class OutlierLocalOutlierFactor(OutliersSKLearn):
"""Wrapper for sklearn's Local Outlier Factor class which accepts dataframes as input.
Args:
contamination (float): The amount of contamination of the data set, i.e. the proportion of outliers in
the data set. Range is (0, 0.5]. Default is 0.1
n_jobs (int). The number of jobs to run in parallel.
random_state (int): Pseudo random generator seed. Default is None.
"""
def __init__(self, contamination="auto", n_jobs: int = DEFAULT_N_JOBS) -> None:
super(OutlierLocalOutlierFactor, self).__init__(
contamination=contamination, n_jobs=n_jobs
)
def get_clf(self):
return LocalOutlierFactor(
contamination=self._contamination, n_jobs=self._n_jobs
)
def predict(self, X: pd.DataFrame) -> None:
X = X.select_dtypes(include="number")
X = self._impute(X).values
labels = self._clf.fit_predict(X)
return np.where(labels == -1, 1, 0)
# ======================================================================================================================== #
# OUTLIER CATEGORICAL ANALYSIS #
# ======================================================================================================================== #
# ------------------------------------------------------------------------------------------------------------------------ #
# OUTLIER ANALYSIS ATTRIBUTE VALUE FREQUENCY #
# ------------------------------------------------------------------------------------------------------------------------ #
class OutlierAVF:
"""Detects outliers using the Attribute Value Frequency method.
Args:
threshold (float): The threshold used to determine the lowest M AVF scores. Assuming frequencies are normally
distributed.
"""
def __init__(self, threshold: float = 0.1) -> None:
self._threshold = threshold
self._labels = None
def fit(self, X, y=None) -> None:
"""Fits the model
X (pd.DataFrame): Input
"""
X = X.select_dtypes(include="object")
df = pd.DataFrame()
# Iterative over columns and create dataframe that contains the frequencies of the values.
for col in X.columns:
# Create a one column dataframe
df1 = X[col].to_frame()
# Compute value counts and convert series to frame
df2 = df1.value_counts().to_frame().reset_index()
df2.columns = ["value", "count"]
# Merge the two dataframes and extract the column with the frequencies and add to new dataframe
merged = pd.merge(df1, df2, how="left", left_on=col, right_on="value")
df[col] = merged["count"]
# We need to determine a threhold in terms of the observations with the M lowest AVF scores.
# Taking the assumption that frequences are normally distributed, we can select the
# observations with avf scores below a number of standard deviations below the mean avf.
avf = df.mean(axis=1)
n = len(df)
k = math.ceil(n * self._threshold)
threshold = avf.sort_values().head(k).max()
self._labels = avf < threshold
def predict(self, X) -> np.array:
# Convert the dataframe to a numpy array to comport with the other estimators.
return self._labels.values
# ------------------------------------------------------------------------------------------------------------------------ #
# OUTLIER ANALYSIS WEIGHTED ATTRIBUTE VALUE FREQUENCY #
# ------------------------------------------------------------------------------------------------------------------------ #
class OutlierWAVF:
"""Detects outliers using the Weighted Attribute Value Frequency method.
Args:
threshold (float): The threshold used to determine the lowest M WAVF scores. Assuming frequencies are normally
distributed.
"""
def __init__(self, threshold: float = 0.1) -> None:
self._threshold = threshold
self._labels = None
def fit(self, X, y=None) -> None:
"""Fits the model
X (pd.DataFrame): Input
"""
X = X.select_dtypes(include="object")
weights = self._compute_weights(X)
df = pd.DataFrame()
# Iterative over columns and create dataframe that contains the frequencies of the values.
for col in X.columns:
# Create a one column dataframe
df1 = X[col].to_frame()
# Compute value counts and convert series to frame
df2 = df1.value_counts().to_frame().reset_index()
df2.columns = ["value", "count"]
# Merge the two dataframes and extract the column with the frequencies and add to new dataframe
merged = pd.merge(df1, df2, how="left", left_on=col, right_on="value")
df[col] = merged["count"] * weights[col]
# We need to determine a threhold in terms of the observations with the M lowest AVF scores.
# Taking the assumption that frequences are normally distributed, we can select the
# observations with avf scores below a number of standard deviations below the mean avf.
avf = df.mean(axis=1)
n = len(df)
k = math.ceil(n * self._threshold)
threshold = avf.sort_values().head(k).max()
self._labels = avf < threshold
def predict(self, X) -> np.array:
# Convert the dataframe to a numpy array to comport with the other estimators.
return self._labels.values
def _compute_weights(self, X: pd.DataFrame) -> dict:
"""Computes the weights as the range of frequencies for each variable."""
weights = {}
for col in X.columns:
counts = X[col].value_counts()
weights[col] = max(counts) - min(counts)
return weights
# ------------------------------------------------------------------------------------------------------------------------ #
# OUTLIER ANALYSIS SQUARE OF THE COMPLEMENT FREQUENCY #
# ------------------------------------------------------------------------------------------------------------------------ #
class OutlierSCF:
"""Detects outliers using the Square of the Complement Frequency (SCF).
Args:
threshold (float): The threshold used to determine the lowest M SCF scores. Assuming frequencies are normally
distributed.
"""
def __init__(self, threshold: float = 3) -> None:
self._threshold = threshold
self._labels = None
def fit(self, X, y=None) -> None:
"""Fits the model
X (pd.DataFrame): Input
"""
X = X.select_dtypes(include="object")
df = pd.DataFrame()
n = X.shape[0]
# Iterative over columns and create dataframe that contains the frequencies of the values.
for col in X.columns:
# Create a one column dataframe
df1 = X[col].to_frame()
# Get the number of categories in col
c = X[col].nunique()
# Compute the marginal relative frequency (value counts / number of observations)
p = df1.value_counts().to_frame() / n
# Compute the square complement frequency
df2 = (1 - p) ** 2 / c
# Merge the two dataframes and extract the column with the frequencies and add to new dataframe
df[col] = pd.merge(df1, df2, on=[col], how="left")[0]
# Take the sum across columns
scf = df.sum(axis=1)
# Designate the scores above threshold standard deviations of the man as outliers
upper_bound = scf.mean() + scf.std() * self._threshold
self._labels = scf > upper_bound
def predict(self, X) -> np.array:
# Convert the dataframe to a numpy array to comport with the other estimators.
return self._labels.values
```
#### File: cvr/utils/data.py
```python
import numpy
import pandas as pd
# ---------------------------------------------------------------------------- #
def to_list(x, ignore: str = None) -> list:
"""Converts a non iterable to a list if it isn't already a list."""
if x is ignore:
result = x
else:
result = x if isinstance(x, list) else [x]
return result
def sample(
data: pd.DataFrame, frac: float, stratify: str = None, random_state: int = None
) -> pd.DataFrame:
"""Sample from the current DataFrame.
Args:
data (pd.DataFrame): The data to sample
frac (float): The fraction of observations to draw as fraction
stratify (str): None or the column to stratify
random_state (int): Pseudo random generator seed
Returns:
DataFrame containing the requested sample.
"""
# Sample if stratify
if stratify is not None:
df = data.groupby(by=[stratify]).sample(frac=frac, random_state=random_state)
else:
df = data.sample(frac=frac, random_state=random_state)
return df
```
#### File: cvr/tests/test_etl.py
```python
import os
import pytest
import logging
import pandas as pd
from datetime import datetime
import inspect
import time
import shutil
from cvr.core.lab import Project
from cvr.core.pipeline import PipelineCommand
from cvr.core.dataset import DatasetRequest
from cvr.data.etl import Extract, TransformETL, LoadDataset
from cvr.core.pipeline import (
DataPipelineBuilder,
DataPipeline,
DataPipelineCommand,
)
from cvr.data import criteo_columns, criteo_dtypes
from cvr.utils.config import CriteoConfig
# ------------------------------------------------------------------------------------------------------------------------ #
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------------------------------------------------ #
class ETLTests:
def __init__(self):
# Create Test Lab if it doesn't already exist
wsm = Project()
if wsm.exists("test_etl"):
self._lab = wsm.get_lab("test_etl")
else:
self._lab = wsm.create_lab(
name="test_etl", description="Test ETL", current=True
)
# Get configuration for data source
self._config = CriteoConfig()
def test_tasks(self):
logger.info(
"\tStarted {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]
)
)
self._extract = Extract(datasource_config=self._config)
self._transform = TransformETL(value=[-1, "-1"])
self._load = LoadDataset()
logger.info(
"\tSuccessfully completed {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]
)
)
def test_builder(self):
logger.info(
"\tStarted {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]
)
)
dataset_request = DatasetRequest(
name="test_etl_dataset",
description="Sample Dataset for ETL Test",
stage="test",
sample_size=1000,
lab_name=self._lab.name,
lab_directory=self._lab.directory,
)
data_pipeline_request = DataPipelineCommand(
name="test_etl_pipeline",
description="Testing The Sax Man",
stage="mezzanine",
lab_name=self._lab.name,
lab_directory=self._lab.directory,
logging_level="info",
force=True,
verbose=True,
progress=False,
random_state=602,
dataset_request=dataset_request,
)
self._builder = DataPipelineBuilder()
self._pipeline = (
self._builder.make_request(data_pipeline_request)
.add_task(self._extract)
.add_task(self._transform)
.add_task(self._load)
.build()
.pipeline
)
logger.info(
"\tSuccessfully completed {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]
)
)
def test_pipeline(self):
logger.info(
"\tStarted {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]
)
)
self.dataset = self._pipeline.run()
self._pipeline.summary
logger.info(
"\tSuccessfully completed {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]
)
)
def data(self):
logger.info(
"\tStarted {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]
)
)
self.dataset.info()
self.dataset.profile.summary
self.dataset.profile.datatypes
self.dataset.profile.missing
self.dataset.profile.cardinality
self.dataset.profile.frequency_stats
self.dataset.profile.analyze("product_brand")
self.dataset.profile.analyze("sales_amount")
logger.info(
"\tSuccessfully completed {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]
)
)
if __name__ == "__main__":
logger.info(" Started ETL Pipeline Tests ")
t = ETLTests()
t.test_tasks()
t.test_builder()
t.test_pipeline()
t.data()
logger.info(" Completed ETL Pipeline Tests ")
#%%
``` |
{
"source": "john-james-ai/DeepCVR",
"score": 2
} |
#### File: deepcvr/data/dag.py
```python
from abc import ABC, abstractmethod
import importlib
from typing import Any
# ------------------------------------------------------------------------------------------------ #
class AbstractDAG(ABC):
"""Abstract base class for directed acyclic graph of operations.
Args:
dag_id (str): Identifier for the dag
dag_description (str): Brief description
tasks (list): List of tasks to execute
"""
def __init__(self, dag_id: str, dag_description: str, tasks: list, context: Any = None) -> None:
self._dag_id = dag_id
self._dag_description = dag_description
self._tasks = tasks
self._context = context
@abstractmethod
def run(self) -> None:
pass
# ------------------------------------------------------------------------------------------------ #
class Dag(AbstractDAG):
"""Directed acyclic graph of operations.
Args:
dag_id (str): Identifier for the dag
dag_description (str): Brief description
tasks (list): List of tasks to execute
"""
def __init__(self, dag_id: str, dag_description: str, tasks: list, context: Any = None) -> None:
super(Dag, self).__init__(
dag_id=dag_id, dag_description=dag_description, tasks=tasks, context=context
)
def run(self) -> None:
data = None
for task in self._tasks:
result = task.execute(data=data, context=self._context)
data = result if result is not None else data
# ------------------------------------------------------------------------------------------------ #
class DagBuilder:
"""Constructs a DAG from a configuration dictionary
Args:
config (dict): Nested dictionary of tasks defined by a dag_id, dag_description and
a nested dictionary of tasks, where each task is defined by:
task_id: Sequence number of task
task: Name of the class that executes the task
module: The module containing the task
task_name: A name for the task
task_params: Any parameters required by the task
"""
def __init__(self, config: dict, context: dict = None) -> None:
self._config = config
self._context = context
self._dag = None
@property
def dag(self) -> Dag:
return self._dag
def build(self) -> Dag:
dag_id = self._config["dag_id"]
dag_description = self._config["dag_description"]
tasks = []
for _, task_config in self._config["tasks"].items():
# Create task object from string using importlib
module = importlib.import_module(task_config["module"])
task = getattr(module, task_config["task"])
task_instance = task(
task_id=task_config["task_id"],
task_name=task_config["task_name"],
params=task_config["task_params"],
)
tasks.append(task_instance)
self._dag = Dag(
dag_id=dag_id, dag_description=dag_description, tasks=tasks, context=self._context
)
return self._dag
```
#### File: deepcvr/data/operator.py
```python
from abc import ABC, abstractmethod
import pandas as pd
from typing import Any
# ------------------------------------------------------------------------------------------------ #
class Operator(ABC):
"""Abstract class for operator classes
Args:
task_id (int): A number, typically used to indicate the sequence of the task within a DAG
task_name (str): String name
params (Any): Parameters for the task
"""
def __init__(self, task_id: int, task_name: str, params: list) -> None:
self._task_id = task_id
self._task_name = task_name
self._params = params
def __str__(self) -> str:
return str(
"Task id: {}\tTask name: {}\tParams: {}".format(
self._task_id, self._task_name, self._params
)
)
@property
def task_id(self) -> int:
return self._task_id
@property
def task_name(self) -> str:
return self._task_name
@property
def params(self) -> Any:
return self._params
@abstractmethod
def execute(self, data: pd.DataFrame = None, context: dict = None) -> Any:
pass
```
#### File: deepcvr/data/transform.py
```python
import re
import logging
from pyspark.sql import SparkSession
from pyspark.sql.functions import pandas_udf, PandasUDFType
from pyspark.sql.types import StringType, DoubleType, StructField, StructType, LongType
import pandas as pd
import numpy as np
from typing import Any
from deepcvr.base.operator import Operator
from deepcvr.utils.decorators import operator
# ------------------------------------------------------------------------------------------------ #
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------------------------ #
MAX_PARTITION_SIZE = 1024 * 1024 * 100
# ------------------------------------------------------------------------------------------------ #
# COLUMN LABELER #
# ------------------------------------------------------------------------------------------------ #
class ColumnLabeler(Operator):
"""Adds column names to a DataFrame. Includes support for threaded concurrency."""
def __init__(self, task_id: int, task_name: str, params: list) -> None:
super(ColumnLabeler, self).__init__(task_id=task_id, task_name=task_name, params=params)
@operator
def execute(self, data: pd.DataFrame = None, context: Any = None) -> Any:
"""Executes the labeling task
Args:
data (pd.DataFrame): Data from the previous step
context (dict): Context data shared by operators within a dag
"""
data.columns = self._params["names"]
return data
# ------------------------------------------------------------------------------------------------ #
# TARGET LABELER #
# ------------------------------------------------------------------------------------------------ #
class TargetLabeler(Operator):
"""Adds the target labels for view, clicks, and conversions."""
def __init__(self, task_id: int, task_name: str, params: list) -> None:
super(TargetLabeler, self).__init__(task_id=task_id, task_name=task_name, params=params)
@operator
def execute(self, data: pd.DataFrame = None, context: Any = None) -> Any:
"""Executes the preprocessing task
Adds a target label variable
Args:
data (pd.DataFrame): Data from the previous step
context (dict): Context data shared by operators within a dag
"""
return self._add_label(data=data)
def _add_label(self, data: pd.DataFrame) -> pd.DataFrame:
"""Adds the appropriate label, either 'View', 'Click', and 'Convert' to core dataset"""
data["target_label"] = np.where(data["conversion_label"] == 1, "Convert", "View")
data["target_label"] = np.where(data["click_label"] == 1, "Click", "View")
return data
# ------------------------------------------------------------------------------------------------ #
# DATA TYPER #
# ------------------------------------------------------------------------------------------------ #
class DataTyper(Operator):
"""Casts the datatypes in a DataFrame. Includes support for threaded concurrency."""
def __init__(self, task_id: int, task_name: str, params: list) -> None:
super(DataTyper, self).__init__(task_id=task_id, task_name=task_name, params=params)
@operator
def execute(self, data: pd.DataFrame = None, context: Any = None) -> Any:
"""Executes the data typing task
Args:
data (pd.DataFrame): Data from the previous step
context (dict): Context data shared by operators within a dag
"""
for k, v in self._params["dtype"].items():
data[k].astype(v, errors="ignore")
return data
# ------------------------------------------------------------------------------------------------ #
# DATA SELECTOR #
# ------------------------------------------------------------------------------------------------ #
class DataSelector(Operator):
"""Reads and returns selected columns from the input DataFrame ."""
def __init__(self, task_id: int, task_name: str, params: list) -> None:
super(DataSelector, self).__init__(task_id=task_id, task_name=task_name, params=params)
@operator
def execute(self, data: pd.DataFrame = None, context: Any = None) -> Any:
"""Executes the data typing task
Args:
data (pd.DataFrame): Data from the previous step
context (dict): Context data shared by operators within a dag
"""
return data[self._params["names"]]
# ------------------------------------------------------------------------------------------------ #
# FEATURE TRANSFORMER #
# ------------------------------------------------------------------------------------------------ #
class FeatureTransformer(Operator):
"""Extracts, reformats and stores core features in a format for loading. """
def __init__(self, task_id: int, task_name: str, params: list) -> None:
super(FeatureTransformer, self).__init__(
task_id=task_id, task_name=task_name, params=params
)
@operator
def execute(self, data: pd.DataFrame = None, context: Any = None) -> None:
"""Core feature extraction and conversion
Args:
data (pd.DataFrame): Context data for the DAG
context (dict): Context data shared by operators within a dag
"""
logger.info(data.head())
data = self._partition_data(data, self._params["n_partitions"])
cores = str("local[" + str(self._params["cores"]) + "]")
spark = (
SparkSession.builder.master(cores).appName("DeepCVR Core Features ETL").getOrCreate()
)
spark.sparkContext.setLogLevel("ERROR")
sdf = spark.createDataFrame(data)
if self._is_sample_feature_data(data):
logger.info("Sample data")
logger.info(data.head())
result = sdf.groupby("partition").apply(sample_features)
else:
logger.info("Not sample data")
logger.info(data.head())
result = sdf.groupby("partition").apply(common_features)
df = result.toPandas()
return df
def _is_sample_feature_data(self, data: pd.DataFrame) -> bool:
return "sample_id" in data.columns
def _partition_data(self, data: pd.DataFrame, n_partitions: int) -> pd.DataFrame:
"""Partitions the dataset"""
reindexed = data.reset_index()
data["partition"] = reindexed.index % n_partitions
return data
# ------------------------------------------------------------------------------------------------ #
sample_schema = StructType(
[
StructField("sample_id", LongType(), False),
StructField("feature_name", StringType(), False),
StructField("feature_id", LongType(), False),
StructField("feature_value", DoubleType(), False),
]
)
# ------------------------------------------------------------------------------------------------ #
@pandas_udf(sample_schema, PandasUDFType.GROUPED_MAP)
def sample_features(partition):
output = pd.DataFrame()
for _, row in partition.iterrows():
sample_id = int(row[0])
num_features = int(row[1])
feature_string = row[2]
df = parse_feature_string(
id_name="sample_id",
id_value=sample_id,
num_features=num_features,
feature_string=feature_string,
)
output = pd.concat([output, df], axis=0)
return output
# ------------------------------------------------------------------------------------------------ #
common_schema = StructType(
[
StructField("common_features_index", StringType(), False),
StructField("feature_name", StringType(), False),
StructField("feature_id", LongType(), False),
StructField("feature_value", DoubleType(), False),
]
)
# ------------------------------------------------------------------------------------------------ #
@pandas_udf(common_schema, PandasUDFType.GROUPED_MAP)
def common_features(partition):
output = pd.DataFrame()
for _, row in partition.iterrows():
common_features_index = row[0]
num_features = int(row[1])
feature_string = row[2]
df = parse_feature_string(
id_name="common_features_index",
id_value=common_features_index,
num_features=num_features,
feature_string=feature_string,
)
output = pd.concat([output, df], axis=0)
return output
# ------------------------------------------------------------------------------------------------ #
def parse_feature_string(
id_name: str, id_value: Any, num_features: int, feature_string: str
) -> dict:
"""Parses a feature string from a single observation in the dataset.
Feature strings contain one or more feature structures, each deliminated by ASCII character
'0x01'. Each feature structure contains three components,
- feature_name (string),
- feature_id (int), and
- feature_value (float)
This function parses the feature string and its feature structures into three column DataFrame
comprised of one row per feature structure.
Args:
id_name (str): The column name for the id
id_value (str,int): The column value for the id
num_features (int): The number of feature structures in the list
feature_string (str): String containing feature structures
"""
feature_names = []
feature_ids = []
feature_values = []
# Expand into a list of feature structures
feature_structures = re.split("\x01", str(feature_string))
for structure in feature_structures:
name, id, value = re.split("\x02|\x03", str(structure))
feature_names.append(name)
feature_ids.append(int(id))
feature_values.append(float(value))
d = {
id_name: id_value,
"feature_name": feature_names,
"feature_id": feature_ids,
"feature_value": feature_values,
}
df = pd.DataFrame(data=d)
# Confirms number of rows equals expected number of features.
assert df.shape[0] == num_features, logger.error("Feature count doesn't not match num_features")
return df
```
#### File: tests/test_data/test_decompress.py
```python
import os
import pytest
import logging
import inspect
from deepcvr.data.extract import Decompress
# ---------------------------------------------------------------------------- #
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
# ---------------------------------------------------------------------------- #
@pytest.mark.decompress
class TestDecompress:
def test_decompress(self) -> None:
logger.info("\tStarted {} {}".format(self.__class__.__name__, inspect.stack()[0][3]))
source = "tests/data/development/external/taobao_test.tar.gz"
destination = "tests/data/development/raw"
target_1 = os.path.join(destination, "sample_skeleton_test.csv")
target_2 = os.path.join(destination, "common_features_test.csv")
decompress = Decompress(source=source, destination=destination)
decompress.execute()
assert os.path.exists(target_1), logger.error("Failure. {} is missing".format(target_1))
assert os.path.exists(target_2), logger.error("Failure. {} is missing".format(target_2))
source = "tests/data/development/external/taobao_train.tar.gz"
target_1 = os.path.join(destination, "sample_skeleton_train.csv")
target_2 = os.path.join(destination, "common_features_train.csv")
decompress = Decompress(source=source, destination=destination)
decompress.execute()
assert os.path.exists(target_1), logger.error("Failure. {} is missing".format(target_1))
assert os.path.exists(target_2), logger.error("Failure. {} is missing".format(target_2))
logger.info(
"\tSuccessfully completed {} {}".format(self.__class__.__name__, inspect.stack()[0][3])
)
if __name__ == "__main__":
t = TestDecompress()
t.test_decompress()
#%%
``` |
{
"source": "john-james-ai/ml-studio",
"score": 2
} |
#### File: supervised_learning/training/metrics.py
```python
from abc import ABC, abstractmethod
import math
import numpy as np
from ml_studio.utils.data_manager import decode
class Metric(ABC):
"""Abstract base class for all metrics."""
@abstractmethod
def __init__(self):
raise NotImplementedError("This method is not implemented for "
"this Abstract Base Class.")
@abstractmethod
def __call__(self, y, y_pred):
raise NotImplementedError("This method is not implemented for "
"this Abstract Base Class.")
class RegressionMetric(Metric):
"""Base class for regression metrics."""
@abstractmethod
def __init__(self):
raise NotImplementedError("This method is not implemented for "
"this Abstract Base Class.")
@abstractmethod
def __call__(self, y, y_pred):
raise NotImplementedError("This method is not implemented for "
"this Abstract Base Class.")
class ClassificationMetric(Metric):
"""Base class for classification metrics."""
@abstractmethod
def __init__(self):
raise NotImplementedError("This method is not implemented for "
"this Abstract Base Class.")
@abstractmethod
def __call__(self, y, y_pred):
raise NotImplementedError("This method is not implemented for "
"this Abstract Base Class.")
# --------------------------------------------------------------------------- #
# REGRESSION METRICS #
# --------------------------------------------------------------------------- #
class SSR(RegressionMetric):
"""Computes sum squared residuals given"""
def __init__(self):
self.mode = 'min'
self.name = 'residual_sum_squared_error'
self.label = "Residual Sum Squared Error"
self.stateful = False
self.best = np.min
self.better = np.less
self.worst = np.Inf
self.precision_factor = -1
def __call__(self, y, y_pred):
e = y - y_pred
return np.sum(e**2)
class SST(RegressionMetric):
"""Computes total sum of squares"""
def __init__(self):
self.mode = 'min'
self.name = 'total_sum_squared_error'
self.label = "Total Sum Squared Error"
self.stateful = False
self.best = np.min
self.better = np.less
self.worst = np.Inf
self.precision_factor = -1
def __call__(self, y, y_pred):
y_avg = np.mean(y)
e = y-y_avg
return np.sum(e**2)
class R2(RegressionMetric):
"""Computes coefficient of determination."""
def __init__(self):
self.mode = 'max'
self.name = 'R2'
self.label = r"$R^2$"
self.stateful = False
self.best = np.max
self.better = np.greater
self.worst = -np.Inf
self.precision_factor = 1
def __call__(self, y, y_pred):
self._ssr = SSR()
self._sst = SST()
r2 = 1 - (self._ssr(y, y_pred)/self._sst(y, y_pred))
return r2
class VarExplained(RegressionMetric):
"""Computes proportion of variance explained."""
def __init__(self):
self.mode = 'max'
self.name = 'percent_variance_explained'
self.label = "Percent Variance Explained"
self.stateful = False
self.best = np.max
self.better = np.greater
self.worst = -np.Inf
self.precision_factor = 1
def __call__(self, y, y_pred):
var_explained = 1 - (np.var(y-y_pred) / np.var(y))
return var_explained
class MAE(RegressionMetric):
"""Computes mean absolute error given data and parameters."""
def __init__(self):
self.mode = 'min'
self.name = 'mean_absolute_error'
self.label = "Mean Absolute Error"
self.stateful = False
self.best = np.min
self.better = np.less
self.worst = np.Inf
self.precision_factor = -1
def __call__(self, y, y_pred):
e = abs(y-y_pred)
return np.mean(e)
class MSE(RegressionMetric):
"""Computes mean squared error given data and parameters."""
def __init__(self):
self.mode = 'min'
self.name = 'mean_squared_error'
self.label = "Mean Squared Error"
self.stateful = False
self.best = np.min
self.better = np.less
self.worst = np.Inf
self.precision_factor = -1
def __call__(self, y, y_pred):
e = y-y_pred
return np.mean(e**2)
class NMSE(RegressionMetric):
"""Computes negative mean squared error given data and parameters."""
def __init__(self):
self.mode = 'max'
self.name = 'neg_mean_squared_error'
self.label = "Negative Mean Squared Error"
self.stateful = False
self.best = np.max
self.better = np.greater
self.worst = -np.Inf
self.precision_factor = 1
def __call__(self, y, y_pred):
e = y-y_pred
return -np.mean(e**2)
class RMSE(RegressionMetric):
"""Computes root mean squared error given data and parameters."""
def __init__(self):
self.mode = 'min'
self.name = 'root_mean_squared_error'
self.label = "Root Mean Squared Error"
self.stateful = False
self.best = np.min
self.better = np.less
self.worst = np.Inf
self.precision_factor = -1
def __call__(self, y, y_pred):
e = y-y_pred
return np.sqrt(np.mean(e**2))
class NRMSE(RegressionMetric):
"""Computes negative root mean squared error given data and parameters."""
def __init__(self):
self.mode = 'max'
self.name = 'neg_root_mean_squared_error'
self.label = "Negative Root Mean Squared Error"
self.stateful = False
self.best = np.max
self.better = np.greater
self.worst = -np.Inf
self.precision_factor = 1
def __call__(self, y, y_pred):
e = y-y_pred
return -np.sqrt(np.mean(e**2))
class MSLE(RegressionMetric):
"""Computes mean squared log error given data and parameters."""
def __init__(self):
self.mode = 'min'
self.name = 'mean_squared_log_error'
self.label = "Mean Squared Log Error"
self.stateful = False
self.best = np.min
self.better = np.less
self.worst = np.Inf
self.precision_factor = -1
def __call__(self, y, y_pred):
e = np.log(y+1)-np.log(y_pred+1)
return np.mean(e**2)
class RMSLE(RegressionMetric):
"""Computes root mean squared log error given data and parameters."""
def __init__(self):
self.mode = 'min'
self.name = 'root_mean_squared_log_error'
self.label = "Root Mean Squared Log Error"
self.stateful = False
self.best = np.min
self.better = np.less
self.worst = np.Inf
self.precision_factor = -1
def __call__(self, y, y_pred):
e = np.log(y+1)-np.log(y_pred+1)
return np.sqrt(np.mean(e**2))
class MEDAE(RegressionMetric):
"""Computes median absolute error given data and parameters."""
def __init__(self):
self.mode = 'min'
self.name = 'median_absolute_error'
self.label = "Median Absolute Error"
self.stateful = False
self.best = np.min
self.better = np.less
self.worst = np.Inf
self.precision_factor = -1
def __call__(self, y, y_pred):
return np.median(np.abs(y_pred-y))
class MAPE(RegressionMetric):
"""Computes mean absolute percentage given data and parameters."""
def __init__(self):
self.mode = 'min'
self.name = 'mean_absolute_percentage_error'
self.label = "Mean Absolute Percentage Error"
self.stateful = False
self.best = np.min
self.better = np.less
self.worst = np.Inf
self.precision_factor = -1
def __call__(self, y, y_pred):
return 100*np.mean(np.abs((y-y_pred)/y))
class RegressionMetricFactory:
"""Returns the requested score class."""
def __call__(self, metric='mse'):
dispatcher = {'r2': R2(),
'var_explained': VarExplained(),
'mae': MAE(),
'mse': MSE(),
'nmse': NMSE(),
'rmse': RMSE(),
'nrmse': NRMSE(),
'msle': MSLE(),
'rmsle': RMSLE(),
'medae': MEDAE(),
'mape': MAPE()}
return(dispatcher.get(metric,False))
# --------------------------------------------------------------------------- #
# CLASSIFICATION METRICS #
# --------------------------------------------------------------------------- #
class Accuracy(ClassificationMetric):
"""Computes accuracy."""
def __init__(self):
self.mode = 'max'
self.name = 'accuracy'
self.label = "Accuracy"
self.stateful = False
self.best = np.max
self.better = np.greater
self.worst = -np.Inf
self.precision_factor = 1
def __call__(self, y, y_pred):
"""Computes accuracy as correct over total."""
# If scoring multinomial logistical regression with one-hot vectors,
# convert them back to 1d integers.
if len(y.shape) > 1:
y = decode(y)
if len(y_pred.shape) > 1:
y_pred = decode(y_pred)
return np.sum(np.equal(y,y_pred)) / y.shape[0]
class ClassificationMetricFactory:
"""Returns the requested score class."""
def __call__(self, metric='accuracy'):
dispatcher = {'accuracy': Accuracy()}
return(dispatcher.get(metric,False))
```
#### File: ml_studio/utils/data_operations.py
```python
import numpy as np
import pandas as pd
def is_array(a):
if isinstance(a, (pd.Series, np.ndarray, list, tuple)):
return True
else:
return False
def is_simple_array(a):
if isinstance(a, (list, tuple)):
return True
else:
return False
def is_homogeneous_array(a):
if isinstance(a, (pd.Series, pd.Index, np.ndarray)):
return True
else:
return False
def is_numpy_convertable(a):
"""Evaluate whether an array can be converted to a numpy array."""
return hasattr(a, "__array__") or hasattr(a, "__array_interface__")
def to_native_type(a):
"""Converts a numpy scalar or array to python native scalar or list."""
# Handle scalar
if np.isscalar(a) and hasattr(a, "item"):
return a.item()
# Handle simplearray
if is_simple_array(a):
return [to_native_type(e) for e in a]
# Handle numpy array
if isinstance(a, np.ndarray) and a.ndim == 0:
return a.item()
# Handle pandas Series
if isinstance(a, (pd.Series, pd.Index)):
return [to_native_type(e) for e in a]
# Handle convertable array
if is_numpy_convertable(a):
return to_native_type(np.array(a))
return a
def coerce_homogeneous_array(value, kind=None, force_numeric=False):
"""Coerces homogeneous array to numeric numpy array if possible.
This code was inspired by:
******************************************************************
* Title : copy_to_readonly_numpy_array
* Author(s) : <NAME>, <NAME>
* Date : December 26, 2019
* Version 4.4.1
* Availability : https://github.com/plotly/plotly.py/blob/90f237060092d86d5b8bd9ec8cf158e0e5a7f728/packages/python/plotly/_plotly_utils/basevalidators.py#L56
******************************************************************
"""
# Initialize kind
if not kind:
kind = ()
elif isinstance(kind, str):
kind = (kind,)
# Extract first kind
first_kind = kind[0] if kind else None
# Designate numeric kinds and default types
numeric_kinds = {"u", "i", "f"}
kind_default_dtypes = {"u": "uint32", "i": "int32", "f": \
"float64", "O": "object", "S": "string"}
# Coerce pandas Series and Index objects
if isinstance(value, (pd.Series, pd.Index)):
if value.dtype.kind is numeric_kinds:
# Extract the numeric numpy array
value = value.values
# If value is not a numpy array, then attempt to convert
if not isinstance(value, np.ndarray):
if is_numpy_convertable(value):
return(coerce_homogeneous_array(value, kind,
force_numeric=force_numeric))
else:
# value is a simple array
value = [to_native_type(e) for e in value]
# Lookup dtype for the requested kind, if any
dtype=kind_default_dtypes.get(first_kind, None)
# Construct new array from list
value = np.array(value, order="C", dtype=dtype)
elif value.dtype.kind in numeric_kinds:
# value is a homogeneous array
if kind and value.dtype.kind not in kind:
# Kind(s) specified but not matched
# Convert to the default type for the first kind
dtype = kind_default_dtypes.get(first_kind, None)
value = np.ascontiguousarray(value.astype(dtype))
else:
# Either no kind was requested or requested kind is satisfied
value = np.ascontiguousarray(value.copy())
else:
# value is a non-numeric homogeneous array
value = value.copy()
# Handle force numeric parameter
if force_numeric and value.dtype.kind not in numeric_kinds:
raise ValueError("Unable to force non-numeric to numeric")
# Force non-numeric arrays to object type
if "U" not in kind and value.dtype.kind \
not in kind_default_dtypes.keys():
value = np.array(value, dtype="object")
return value
```
#### File: ml_studio/utils/file_manager.py
```python
import os
import time
from matplotlib import animation, rc
from matplotlib.animation import FuncAnimation
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotly.offline as py
from ml_studio.utils.misc import snake
def save_fig(fig, directory, filename):
if os.path.exists(directory):
path = os.path.join(os.path.abspath(directory), filename)
fig.savefig(path, facecolor='w', bbox_inches=None)
else:
os.makedirs(directory)
path = os.path.join(os.path.abspath(directory),filename)
fig.savefig(path, facecolor='w', bbox_inches=None)
def save_gif(ani, directory, filename, fps):
face_edge_colors = {'facecolor': 'w', 'edgecolor': 'w'}
path = os.path.join(os.path.abspath(directory), filename)
if os.path.exists(directory):
ani.save(path, writer='imagemagick', fps=fps, savefig_kwargs = face_edge_colors)
else:
os.makedirs(directory)
ani.save(path, writer='imagemagick', fps=fps, savefig_kwargs = face_edge_colors)
def save_csv(df, directory, filename):
path = os.path.join(os.path.abspath(directory), filename)
if os.path.exists(directory):
df.to_csv(path, index=False)
else:
os.makedirs(directory)
df.to_csv(path, index=False)
def save_numpy(a, directory, filename):
path = os.path.join(os.path.abspath(directory), filename)
if os.path.exists(directory):
np.save(file=path, arr=a)
else:
os.makedirs(directory)
np.save(file=path, arr=a)
def save_plotly(a, directory, filename):
path = os.path.join(os.path.abspath(directory), filename)
if os.path.exists(directory):
py.plot(a, filename=path, auto_open=False, include_mathjax='cdn')
else:
os.makedirs(directory)
py.plot(a, filename=path, auto_open=False, include_mathjax='cdn')
def get_filename(instance, fileext, element=None):
"""Creates a standard format filename for saving plots."""
# Obtain user id, class name and date time
project = "ml_studio_"
userhome = os.path.expanduser('~')
username = os.path.split(userhome)[-1] + "_"
clsname = instance.__class__.__name__ + "_"
if element:
element = element + "_"
else:
element = ""
timestr = time.strftime("%Y%m%d-%H%M%S")
# Snake case format filename
filename = project + username + clsname + element + timestr + fileext
filename = snake(filename)
return filename
```
#### File: tests/test_operations/test_callbacks.py
```python
import datetime
import math
import numpy as np
import pytest
from pytest import mark
from ml_studio.supervised_learning.training.monitor import History
from ml_studio.supervised_learning.training.metrics import RegressionMetricFactory
class HistoryTests:
@mark.history
def test_history_on_train_begin(self, get_history):
history = get_history
history.on_train_begin()
assert history.total_epochs == 0, "total_epochs is not zero"
assert history.total_batches == 0, "total_batches is not zero"
assert isinstance(
history.start, datetime.datetime), "start is not a datetime object"
assert isinstance(history.epoch_log,
dict), "history.epoch_log is not a dictionary object"
assert isinstance(history.batch_log,
dict), "history.batch_log is not a dictionary object"
assert len(
history.epoch_log) == 0, "history.epoch_log has a non-zero length"
assert len(
history.batch_log) == 0, "history.batch_log has a non-zero length"
@mark.history
def test_history_on_batch_end(self, get_history):
history = get_history
history.on_train_begin()
batch = 1
batch_size = 32
theta = np.random.normal(size=10)
train_cost = 100 * np.random.random_sample((1))
batch_log = {'batch': batch, 'batch_size': batch_size,
'theta': theta, 'train_cost': train_cost}
history.on_batch_end(batch, batch_log)
assert history.total_batches == 1, "total_batches 1st iteration not equal 1"
assert history.batch_log['batch'][0] == 1, "batch number 1st iteration not 1"
assert history.batch_log['batch_size'][0] == 32, "batch_size not correct"
assert history.batch_log['theta'][0].shape == (
10,), "theta shape not correct"
assert isinstance(
history.batch_log['theta'][0], (list, np.ndarray)), "theta is not a list or ndarray"
assert history.batch_log['train_cost'][0] == train_cost, "train_cost not valid"
batch = 2
batch_size = 32
theta = np.random.normal(size=10)
train_cost = 100 * np.random.random_sample((1))
batch_log = {'batch': batch, 'batch_size': batch_size,
'theta': theta, 'train_cost': train_cost}
history.on_batch_end(batch, batch_log)
assert history.total_batches == 2, "total_batches 1st iteration not equal 1"
assert history.batch_log['batch'][1] == 2, "batch number 1st iteration not 1"
assert history.batch_log['batch_size'][1] == 32, "batch_size not correct"
assert history.batch_log['theta'][1].shape == (
10,), "theta shape not correct"
assert isinstance(
history.batch_log['theta'][1], (list, np.ndarray)), "theta is not a list or ndarray"
assert history.batch_log['train_cost'][1] == train_cost, "train_cost not valid"
@mark.history
def test_history_on_epoch_end_w_validation(self, get_history):
# Evaluate batches
history = get_history
history.on_train_begin()
total_costs = 0
for i in np.arange(1, 11):
batch = i
batch_size = 32
theta = np.random.normal(size=10)
train_cost = 100 * np.random.random_sample((1))
total_costs += train_cost
batch_log = {'batch': batch, 'batch_size': batch_size,
'theta': theta, 'train_cost': train_cost}
history.on_batch_end(batch, batch_log)
assert history.total_batches == 10, "number of batches is incorrect"
assert np.sum(history.batch_log['batch']) == np.sum(
np.arange(1, 11)), "batch number list incorrect"
assert len(history.batch_log['batch']) == 10, "batch number is wrong shape"
assert np.sum(history.batch_log['batch_size']) == np.sum(
np.repeat(32, 10)), "batch size list incorrect"
assert len(history.batch_log['batch_size']) == 10, "batch size is wrong shape"
assert len(history.batch_log['theta']) == 10, "theta is wrong length"
assert np.isclose(np.sum(history.batch_log['train_cost']), total_costs[0], 10**4), "train costs don't sum"
assert len(history.batch_log['train_cost']) ==10, "train_cost not correct shape"
assert isinstance(
history.batch_log['batch'], list), "batch number is not a list"
assert isinstance(
history.batch_log['batch_size'], list), "batch size is not a list"
assert isinstance(
history.batch_log['theta'], list), "theta is not an ndarray"
for theta in history.batch_log['theta']:
assert isinstance(theta, np.ndarray), "thetas are not np.ndarrays"
assert isinstance(
history.batch_log['train_cost'], list), "train_cost is not a list"
# Evaluate epochs
epoch = 1
train_cost = 1000*np.random.random_sample((1))
train_score = 1000*np.random.random_sample((1))
val_cost = 1000*np.random.random_sample((1))
val_score = 1000*np.random.random_sample((1))
theta = np.random.normal(size=10)
log = {'epoch':epoch, 'train_cost': train_cost, 'train_score': train_score,
'val_cost': val_cost, 'val_score': val_score, 'theta': theta}
history.on_epoch_end(epoch, logs=log)
assert history.epoch_log['train_cost'][0] == train_cost, "train_cost 1st iteration not correct"
assert history.epoch_log['train_score'][0] == train_score, "train_score 1st iteration not correct"
assert history.epoch_log['val_cost'][0] == val_cost, "val_cost 1st iteration not correct"
assert history.epoch_log['val_score'][0] == val_score, "val_score 1st iteration not correct"
assert (history.epoch_log['theta'][0]==theta).all(), "theta 1st iteration not correct"
assert isinstance(
history.epoch_log['epoch'], list), "epochs is not a list"
assert isinstance(
history.epoch_log['train_cost'], list), "train_cost is not a list"
assert isinstance(
history.epoch_log['train_score'], list), "train_score is not a list"
assert isinstance(
history.epoch_log['val_cost'], list), "val_cost is not a list"
assert isinstance(
history.epoch_log['val_score'], list), "val_score is not a list"
epoch = 2
train_cost = 1000*np.random.random_sample((1))
train_score = 1000*np.random.random_sample((1))
val_cost = 1000*np.random.random_sample((1))
val_score = 1000*np.random.random_sample((1))
theta = np.random.normal(size=10)
log = {'epoch':epoch, 'train_cost': train_cost, 'train_score': train_score,
'val_cost': val_cost, 'val_score': val_score, 'theta': theta}
history.on_epoch_end(epoch, logs=log)
assert history.epoch_log['epoch'][1] == 2, "epochs is not 1 on first iteration"
assert history.epoch_log['train_cost'][1] == train_cost, "train_cost 1st iteration not correct"
assert history.epoch_log['train_score'][1] == train_score, "train_score 1st iteration not correct"
assert history.epoch_log['val_cost'][1] == val_cost, "val_cost 1st iteration not correct"
assert history.epoch_log['val_score'][1] == val_score, "val_score 1st iteration not correct"
assert (history.epoch_log['theta'][1]==theta).all(), "theta 2nd iteration not correct"
assert len(history.epoch_log['epoch']) == 2, "epochs shape not correct on second iteration"
assert len(history.epoch_log['train_cost']) == 2, "train_cost length not correct on second iteration"
assert len(history.epoch_log['train_score']) == 2, "train_score shape not correct on second iteration"
assert len(history.epoch_log['val_cost']) == 2, "val_cost shape not correct on second iteration"
assert len(history.epoch_log['val_score']) == 2, "val_score shape not correct on second iteration"
@mark.history
def test_history_on_train_end(self, get_history):
history = get_history
history.on_train_begin()
history.on_train_end()
assert isinstance(
history.end, datetime.datetime), "end is not a datetime object"
assert isinstance(history.duration, float), "duration is not a float"
```
#### File: tests/test_operations/test_cost.py
```python
import math
import numpy as np
import pytest
from pytest import mark
from ml_studio.supervised_learning.training.cost import Quadratic
from ml_studio.supervised_learning.training.cost import BinaryCrossEntropy
from ml_studio.supervised_learning.training.cost import CategoricalCrossEntropy
class QuadraticCostTests:
@mark.cost
def test_quadratic_cost(self, get_quadratic_y, get_quadratic_y_pred, get_quadratic_cost):
y = get_quadratic_y
y_pred = get_quadratic_y_pred
J = get_quadratic_cost
J_test = 1/2 * np.mean((y_pred-y)**2)
assert math.isclose(J, J_test, abs_tol=1)
@mark.cost
def test_quadratic_cost_gradient(self, get_quadratic_X, get_quadratic_y, get_quadratic_y_pred, get_quadratic_gradient):
y = get_quadratic_y
y_pred = get_quadratic_y_pred
X = get_quadratic_X
grad = get_quadratic_gradient
grad_test = 1/y.shape[0] * (y_pred- y).dot(X)
for a,b in zip(grad, grad_test):
assert math.isclose(a, b, abs_tol=1.0)
class BinaryCostTests:
@mark.cost
def test_binary_cost(self, get_binary_cost_y, get_binary_cost_y_pred, get_binary_cost):
y = get_binary_cost_y
y_pred = get_binary_cost_y_pred
J = get_binary_cost
J_test = -1*(1/y.shape[0] * np.sum(np.multiply(y,np.log(y_pred), np.multiply((1-y),np.log(1-y_pred)))))
assert math.isclose(J, J_test, abs_tol=10**4)
@mark.cost
def test_binary_cost_gradient(self, get_binary_cost_X, get_binary_cost_y, get_binary_cost_y_pred, get_binary_cost_gradient):
X = get_binary_cost_X
y = get_binary_cost_y
y_pred = get_binary_cost_y_pred
grad = get_binary_cost_gradient
grad_test = X.T.dot(y_pred-y)
for a,b in zip(grad, grad_test):
assert math.isclose(a, b, abs_tol=1.0)
class CategoricalCostTests:
@mark.cost
def test_categorical_cost(self, get_categorical_cost_y, get_categorical_cost_y_pred, get_categorical_cost):
y = get_categorical_cost_y
y_pred = get_categorical_cost_y_pred
J = get_categorical_cost
J_test = -1*(1/y.shape[0] * np.sum(np.multiply(y,np.log(y_pred), np.multiply((1-y),np.log(1-y_pred)))))
assert math.isclose(J, J_test, abs_tol=10**4)
@mark.cost
def test_categorical_cost_gradient(self, get_categorical_cost_X, get_categorical_cost_y, get_categorical_cost_y_pred,
get_categorical_cost_gradient):
X = get_categorical_cost_X
y = get_categorical_cost_y
y_pred = get_categorical_cost_y_pred
grad = get_categorical_cost_gradient
grad_test = 1/y.shape[0] * X.T.dot(y_pred-y)
for array_a,array_b in zip(grad, grad_test):
for a, b in zip(array_a, array_b):
assert math.isclose(a, b, abs_tol=1.0)
```
#### File: tests/test_operations/test_learning_rate_schedules.py
```python
import math
import numpy as np
import pytest
from pytest import mark
from ml_studio.supervised_learning.training.learning_rate_schedules import TimeDecay
from ml_studio.supervised_learning.training.learning_rate_schedules import StepDecay
from ml_studio.supervised_learning.training.learning_rate_schedules import NaturalExponentialDecay
from ml_studio.supervised_learning.training.learning_rate_schedules import ExponentialDecay
from ml_studio.supervised_learning.training.learning_rate_schedules import InverseScaling
from ml_studio.supervised_learning.training.learning_rate_schedules import PolynomialDecay
from ml_studio.supervised_learning.training.learning_rate_schedules import Adaptive
from ml_studio.supervised_learning.regression import LinearRegression
class LearningRateScheduleTests:
# ----------------------------------------------------------------------- #
# Time Decay #
# ----------------------------------------------------------------------- #
@mark.learning_rate_schedules
@mark.learning_rate_schedules_time
def test_time_decay_learning_rate_schedule_wo_staircase(self, get_regression_data):
exp_result = [0.0909090909, 0.0833333333, 0.0769230769, 0.0714285714, 0.0666666667]
act_result = []
lrs = TimeDecay(learning_rate=0.1, decay_rate=0.5, decay_steps=5)
lrs.model = LinearRegression()
iterations = [i+1 for i in range(5)]
for i in iterations:
lrs.on_epoch_end(i)
act_result.append(lrs.model.eta)
assert all(np.isclose(exp_result,act_result,rtol=1e-1)), "Time decay not working"
@mark.learning_rate_schedules
@mark.learning_rate_schedules_time_staircase
def test_time_decay_learning_rate_schedule_w_staircase(self, get_regression_data):
exp_result = [0.1000000000, 0.1000000000, 0.1000000000, 0.1000000000, 0.0666666667]
act_result = []
lrs = TimeDecay(learning_rate=0.1, decay_steps=5, decay_rate=0.5, staircase=True)
lrs.model = LinearRegression()
iterations = [i+1 for i in range(5)]
for i in iterations:
lrs.on_epoch_end(i)
act_result.append(lrs.model.eta)
assert all(np.isclose(exp_result,act_result,rtol=1e-1)), "Time decay with step not working"
# ----------------------------------------------------------------------- #
# Step Decay #
# ----------------------------------------------------------------------- #
@mark.learning_rate_schedules
@mark.learning_rate_schedules_step
def test_step_decay_learning_rate_schedule(self, get_regression_data):
exp_result = [0.1000000000, 0.1000000000, 0.1000000000, 0.0500000000, 0.0500000000]
act_result = []
lrs = StepDecay(learning_rate=0.1, decay_rate=0.5, decay_steps=5)
lrs.model = LinearRegression()
iterations = [i+1 for i in range(5)]
for i in iterations:
lrs.on_epoch_end(i)
act_result.append(lrs.model.eta)
assert all(np.isclose(exp_result,act_result,rtol=1e-1)), "Step decay not working"
# ----------------------------------------------------------------------- #
# Natural Exponential Decay #
# ----------------------------------------------------------------------- #
@mark.learning_rate_schedules
@mark.learning_rate_schedules_nat_exp_no_staircase
def test_nat_exp_decay_learning_rate_schedule_wo_staircase(self, get_regression_data):
exp_result = [0.0904837418,0.0818730753,0.0740818221,0.0670320046,0.0606530660]
act_result = []
lrs = NaturalExponentialDecay(learning_rate=0.1, decay_rate=0.5, decay_steps=5)
lrs.model = LinearRegression()
iterations = [i+1 for i in range(5)]
for i in iterations:
lrs.on_epoch_end(i)
act_result.append(lrs.model.eta)
assert all(np.isclose(exp_result,act_result,rtol=1e-1)), "Natural exponential decay not working"
@mark.learning_rate_schedules
@mark.learning_rate_schedules_nat_exp_staircase
def test_nat_exp_decay_learning_rate_schedule_w_staircase(self, get_regression_data):
exp_result = [0.1000000000, 0.1000000000, 0.1000000000, 0.1000000000, 0.0606530660]
act_result = []
lrs = NaturalExponentialDecay(learning_rate=0.1, decay_steps=5, decay_rate=0.5,
staircase=True)
lrs.model = LinearRegression()
iterations = [i+1 for i in range(5)]
for i in iterations:
lrs.on_epoch_end(i)
act_result.append(lrs.model.eta)
assert all(np.isclose(exp_result,act_result,rtol=1e-1)), "Natural exponential decay with steps not working"
# ----------------------------------------------------------------------- #
# Exponential Decay #
# ----------------------------------------------------------------------- #
@mark.learning_rate_schedules
def test_exp_decay_learning_rate_schedule_wo_staircase(self, get_regression_data):
exp_result = [0.0870550563, 0.0757858283, 0.0659753955, 0.0574349177, 0.0500000000]
act_result = []
lrs = ExponentialDecay(learning_rate=0.1, decay_rate=0.5, decay_steps=5)
lrs.model = LinearRegression()
iterations = [i+1 for i in range(5)]
for i in iterations:
lrs.on_epoch_end(i)
act_result.append(lrs.model.eta)
assert all(np.isclose(exp_result,act_result,rtol=1e-1)), "Exponential decay not working"
@mark.learning_rate_schedules
def test_exp_decay_learning_rate_schedule_w_staircase(self, get_regression_data):
exp_result = [0.1,0.1,0.1,0.1,0.05]
act_result = []
lrs = ExponentialDecay(learning_rate=0.1, decay_rate=0.5, decay_steps=5, staircase=True)
lrs.model = LinearRegression()
iterations = [i+1 for i in range(5)]
for i in iterations:
lrs.on_epoch_end(i)
act_result.append(lrs.model.eta)
assert all(np.isclose(exp_result,act_result,rtol=1e-1)), "Exponential decay with steps and staircase not working"
# ----------------------------------------------------------------------- #
# Inverse Scaling #
# ----------------------------------------------------------------------- #
@mark.learning_rate_schedules
def test_inv_scaling_learning_rate_schedule(self, get_regression_data):
exp_result = [0.1,0.070710678,0.057735027,0.05,0.04472136]
act_result = []
lrs = InverseScaling(learning_rate=0.1, power=0.5)
lrs.model = LinearRegression()
iterations = [i+1 for i in range(5)]
for i in iterations:
lrs.on_epoch_end(i)
act_result.append(lrs.model.eta)
assert all(np.isclose(exp_result,act_result,rtol=1e-1)), "Inverse scaling not working"
# ----------------------------------------------------------------------- #
# Polynomial Decay #
# ----------------------------------------------------------------------- #
@mark.learning_rate_schedules
def test_polynomial_decay_learning_rate_schedule_wo_cycle(self, get_regression_data):
exp_result = [0.0895,0.0775,0.0633,0.0448,0.0001]
act_result = []
lrs = PolynomialDecay(learning_rate=0.1, decay_steps=5, power=0.5,
end_learning_rate=0.0001)
lrs.model = LinearRegression()
iterations = [i+1 for i in range(5)]
for i in iterations:
lrs.on_epoch_end(i)
act_result.append(lrs.model.eta)
assert all(np.isclose(exp_result,act_result,rtol=1e-1)), "Polynomial decay not working"
@mark.learning_rate_schedules
def test_polynomial_decay_learning_rate_schedule_w_cycle(self, get_regression_data):
exp_result = [0.0895,0.0775,0.0633,0.0448,0.0001]
act_result = []
lrs = PolynomialDecay(learning_rate=0.1, decay_steps=5, power=0.5,
end_learning_rate=0.0001, cycle=True)
lrs.model = LinearRegression()
iterations = [i+1 for i in range(5)]
for i in iterations:
lrs.on_epoch_end(i)
act_result.append(lrs.model.eta)
assert all(np.isclose(exp_result,act_result,rtol=1e-1)), "Polynomial decay with cycle not working"
# ----------------------------------------------------------------------- #
# Adaptive #
# ----------------------------------------------------------------------- #
@mark.learning_rate_schedules
@mark.learning_rate_schedules_adaptive
def test_adaptive_learning_rate_schedule(self, get_regression_data):
logs = {}
exp_result = [0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.05,0.05,0.05]
act_result = []
lrs = Adaptive(learning_rate=0.1, decay_rate=0.5, precision=0.01, patience=5)
lrs.model = LinearRegression()
lrs.model.eta = 0.1
logs['learning_rate'] = 0.1
cost = [5,5,5,5,4,4,4,4,4,4,4, 3]
iterations = [i+1 for i in range(12)]
for i in iterations:
logs['train_cost'] = cost[i-1]
lrs.on_epoch_end(i, logs)
act_result.append(lrs.model.eta)
logs['learning_rate'] = lrs.model.eta
assert all(np.isclose(exp_result,act_result,rtol=1e-1)), "Adaptive decay with cycle not working"
```
#### File: tests/test_operations/test_regularizers.py
```python
import math
import numpy as np
import pytest
from pytest import mark
from ml_studio.supervised_learning.training.regularizers import L1, L2, ElasticNet
class L1Tests:
@mark.regularization
def test_l1_cost(self, get_alpha, get_weights, get_l1_cost):
alpha = get_alpha
w = get_weights
l1_cost = get_l1_cost
with pytest.raises(AssertionError):
l1 = L1(alpha='f')
l1_cost_test = l1(w)
l1 = L1(alpha=alpha)
l1_cost_test = l1(w)
assert math.isclose(l1_cost, l1_cost_test, abs_tol=10**-4)
@mark.regularization
def test_l1_gradient(self, get_alpha, get_weights, get_l1_grad):
alpha = get_alpha
w = get_weights
l1_grad = get_l1_grad
with pytest.raises(AssertionError):
l1 = L1(alpha='f')
l1_grad_test = l1.gradient(w)
l1 = L1(alpha=alpha)
l1_grad_test = l1.gradient(w)
for a,b in zip(l1_grad, l1_grad_test):
assert math.isclose(a, b, abs_tol=10**-4)
class L2Tests:
@mark.regularization
def test_l2_cost(self, get_alpha, get_weights, get_l2_cost):
alpha = get_alpha
w = get_weights
l2_cost = get_l2_cost
with pytest.raises(AssertionError):
l2 = L2(alpha='f')
l2_cost_test = l2(w)
l2 = L2(alpha=alpha)
l2_cost_test = l2(w)
assert math.isclose(l2_cost, l2_cost_test, abs_tol=10**-4)
@mark.regularization
def test_l2_gradient(self, get_alpha, get_weights, get_l2_grad):
alpha = get_alpha
w = get_weights
l2_grad = get_l2_grad
with pytest.raises(AssertionError):
l2 = L2(alpha='f')
l2_grad_test = l2.gradient(w)
l2 = L2(alpha=alpha)
l2_grad_test = l2.gradient(w)
for a,b in zip(l2_grad, l2_grad_test):
assert math.isclose(a, b, abs_tol=10**-4)
class ElasticNetTests:
@mark.regularization
def test_elasticnet_cost(self, get_alpha, get_ratio, get_weights, get_elasticnet_cost):
alpha = get_alpha
ratio = get_ratio
w = get_weights
elasticnet_cost = get_elasticnet_cost
with pytest.raises(AssertionError):
elasticnet = ElasticNet(alpha='f')
elasticnet_cost_test = elasticnet(w)
elasticnet = ElasticNet(alpha=alpha, ratio=ratio)
elasticnet_cost_test = elasticnet(w)
assert math.isclose(elasticnet_cost, elasticnet_cost_test, abs_tol=10**-4)
@mark.regularization
def test_elasticnet_gradient(self, get_alpha, get_ratio, get_weights, get_elasticnet_grad):
alpha = get_alpha
w = get_weights
ratio = get_ratio
elasticnet_grad = get_elasticnet_grad
with pytest.raises(AssertionError):
elasticnet = ElasticNet(alpha='f')
elasticnet_grad_test = elasticnet.gradient(w)
elasticnet = ElasticNet(alpha=alpha, ratio=ratio)
elasticnet_grad_test = elasticnet.gradient(w)
for a,b in zip(elasticnet_grad, elasticnet_grad_test):
assert math.isclose(a, b, abs_tol=10**-4)
```
#### File: test_services/test_validation/test_rules.py
```python
from collections import OrderedDict
import numpy as np
import pandas as pd
import pytest
from pytest import mark
from ml_studio.services.validation.rules import Rule, RuleSet
class SyntacticRuleTests:
@mark.validation
@mark.validation_rules
@mark.validation_rules_syntactic
@mark.validation_rules_none
def test_validation_rules_none(self, get_validation_rule_test_object):
test_object = get_validation_rule_test_object
rule = Rule()
# Evaluates to true
answer = rule.on(test_object)\
.when('a_n')\
.is_none\
.evaluate\
.is_valid
assert answer is True, "Invalid evaluation of none rule"
# Evaluates to true
answer = rule.on(test_object)\
.when('a_xn')\
.is_none\
.evaluate\
.is_valid
assert answer is False, "Invalid evaluation of none rule"
@mark.validation
@mark.validation_rules
@mark.validation_rules_syntactic
@mark.validation_rules_not_none
def test_validation_rules_not_none(self, get_validation_rule_test_object):
test_object = get_validation_rule_test_object
rule = Rule()
# Evaluates to true
answer = rule.on(test_object)\
.when('a_n')\
.is_not_none\
.evaluate\
.is_valid
assert answer is False, "Invalid evaluation of not none rule"
# Evaluates to false
answer = rule.on(test_object)\
.when('a_xn')\
.is_not_none\
.evaluate\
.is_valid
assert answer is True, "Invalid evaluation of not none rule"
@mark.validation
@mark.validation_rules
@mark.validation_rules_syntactic
@mark.validation_rules_empty
def test_validation_rules_empty(self, get_validation_rule_test_object):
test_object = get_validation_rule_test_object
rule = Rule()
# Evaluates to true
answer = rule.on(test_object)\
.when('e')\
.is_empty\
.evaluate\
.is_valid
assert answer is True, "Invalid evaluation of empty rule"
# Evaluates to false
answer = rule.on(test_object)\
.when('s')\
.is_empty\
.evaluate\
.is_valid
assert answer is False, "Invalid evaluation of empty rule"
@mark.validation
@mark.validation_rules
@mark.validation_rules_syntactic
@mark.validation_rules_not_empty
def test_validation_rules_not_empty(self, get_validation_rule_test_object):
test_object = get_validation_rule_test_object
rule = Rule()
# Evaluates to true
answer = rule.on(test_object)\
.when('e')\
.is_not_empty\
.evaluate\
.is_valid
assert answer is False, "Invalid evaluation of not empty rule"
# Evaluates to false
answer = rule.on(test_object)\
.when('s')\
.is_not_empty\
.evaluate\
.is_valid
assert answer is True, "Invalid evaluation of not empty rule"
@mark.validation
@mark.validation_rules
@mark.validation_rules_syntactic
@mark.validation_rules_bool
def test_validation_rules_bool(self, get_validation_rule_test_object):
test_object = get_validation_rule_test_object
rule = Rule()
# Evaluates to true
answer = rule.on(test_object)\
.when('b')\
.is_bool\
.evaluate\
.is_valid
assert answer is True, "Invalid evaluation of bool rule"
# Evaluates to false
answer = rule.on(test_object)\
.when('s')\
.is_bool\
.evaluate\
.is_valid
assert answer is False, "Invalid evaluation of bool rule"
@mark.validation
@mark.validation_rules
@mark.validation_rules_syntactic
@mark.validation_rules_integer
def test_validation_rules_integer(self, get_validation_rule_test_object):
test_object = get_validation_rule_test_object
rule = Rule()
# Evaluates to true
answer = rule.on(test_object)\
.when('i')\
.is_integer\
.evaluate\
.is_valid
assert answer is True, "Invalid evaluation of integer rule"
# Evaluates to false
answer = rule.on(test_object)\
.when('s')\
.is_integer\
.evaluate\
.is_valid
assert answer is False, "Invalid evaluation of integer rule"
@mark.validation
@mark.validation_rules
@mark.validation_rules_syntactic
@mark.validation_rules_number
def test_validation_rules_number(self, get_validation_rule_test_object):
test_object = get_validation_rule_test_object
rule = Rule()
# Evaluates to true
answer = rule.on(test_object)\
.when('f')\
.is_number\
.evaluate\
.is_valid
assert answer is True, "Invalid evaluation of number rule"
# Evaluates to false
answer = rule.on(test_object)\
.when('s')\
.is_number\
.evaluate\
.is_valid
assert answer is False, "Invalid evaluation of number rule"
@mark.validation
@mark.validation_rules
@mark.validation_rules_syntactic
@mark.validation_rules_string
def test_validation_rules_string(self, get_validation_rule_test_object):
test_object = get_validation_rule_test_object
rule = Rule()
# Evaluates to true
answer = rule.on(test_object)\
.when('a_s')\
.is_string\
.evaluate\
.is_valid
assert answer is True, "Invalid evaluation of string rule"
# Evaluates to false
answer = rule.on(test_object)\
.when('a_l')\
.is_string\
.evaluate\
.is_valid
assert answer is False, "Invalid evaluation of string rule"
class SemanticRuleTests:
@mark.validation
@mark.validation_rules
@mark.validation_rules_equal
@mark.validation_rules_semantic
@mark.validation_rules_equal_number
def test_validation_rules_equal_number(self, get_validation_rule_test_object):
test_object = get_validation_rule_test_object
rule = Rule()
# Evaluates to true with constant
answer = rule.on(test_object)\
.when('i')\
.is_equal(5)\
.evaluate\
.is_valid
assert answer is True, "Invalid evaluation of equal rule"
# Evaluates to false with constant
answer = rule.on(test_object)\
.when('a_g')\
.is_equal(50)\
.evaluate\
.is_valid
assert answer is False, "Invalid evaluation of equal rule"
# Evaluates to true with attribute
answer = rule.on(test_object)\
.when('a_ge')\
.is_equal('a_g')\
.evaluate\
.is_valid
assert answer is True, "Invalid evaluation of equal rule"
# Evaluates to false with attribute
answer = rule.on(test_object)\
.when('f')\
.is_equal('i')\
.evaluate\
.is_valid
assert answer is False, "Invalid evaluation of equal rule"
@mark.validation
@mark.validation_rules
@mark.validation_rules_equal
@mark.validation_rules_semantic
@mark.validation_rules_equal_string
def test_validation_rules_equal_string(self, get_validation_rule_test_object):
test_object = get_validation_rule_test_object
rule = Rule()
# Evaluates to true with constant
answer = rule.on(test_object)\
.when('s')\
.is_equal("hats")\
.evaluate\
.is_valid
assert answer is True, "Invalid evaluation of equal rule"
# Evaluates to false with constant
answer = rule.on(test_object)\
.when('s')\
.is_equal("fott")\
.evaluate\
.is_valid
assert answer is False, "Invalid evaluation of equal rule"
# Evaluates to true with attribute
answer = rule.on(test_object)\
.when('a_s')\
.is_equal('a_s')\
.evaluate\
.is_valid
assert answer is True, "Invalid evaluation of equal rule"
# Evaluates to false with attribute
answer = rule.on(test_object)\
.when('a_s')\
.is_equal('a_sg')\
.evaluate\
.is_valid
assert answer is False, "Invalid evaluation of equal rule"
@mark.validation
@mark.validation_rules
@mark.validation_rules_not_equal
@mark.validation_rules_semantic
@mark.validation_rules_not_equal_number
def test_validation_rules_not_equal_number(self, get_validation_rule_test_object):
test_object = get_validation_rule_test_object
rule = Rule()
# Evaluates to true with constant
answer = rule.on(test_object)\
.when('i')\
.is_not_equal(6)\
.evaluate\
.is_valid
assert answer is True, "Invalid evaluation of not_equal rule"
# Evaluates to false with constant
answer = rule.on(test_object)\
.when('i')\
.is_not_equal(5)\
.evaluate\
.is_valid
assert answer is False, "Invalid evaluation of not_equal rule"
# Evaluates to true with attribute
answer = rule.on(test_object)\
.when('f')\
.is_not_equal('i')\
.evaluate\
.is_valid
assert answer is True, "Invalid evaluation of not_equal rule"
# Evaluates to false with attribute
answer = rule.on(test_object)\
.when('a_ge')\
.is_not_equal('a_g')\
.evaluate\
.is_valid
assert answer is False, "Invalid evaluation of not_equal rule"
@mark.validation
@mark.validation_rules
@mark.validation_rules_not_equal
@mark.validation_rules_semantic
@mark.validation_rules_not_equal_string
def test_validation_rules_not_equal_string(self, get_validation_rule_test_object):
test_object = get_validation_rule_test_object
rule = Rule()
# Evaluates to true with constant
answer = rule.on(test_object)\
.when('s')\
.is_not_equal("disc")\
.evaluate\
.is_valid
assert answer is True, "Invalid evaluation of not_equal rule"
# Evaluates to false with constant
answer = rule.on(test_object)\
.when('s')\
.is_not_equal('hats')\
.evaluate\
.is_valid
assert answer is False, "Invalid evaluation of not_equal rule"
# Evaluates to true with attribute
answer = rule.on(test_object)\
.when('a_s')\
.is_not_equal('a_sg')\
.evaluate\
.is_valid
assert answer is True, "Invalid evaluation of not_equal rule"
# Evaluates to false with attribute
answer = rule.on(test_object)\
.when('a_s')\
.is_not_equal('a_s')\
.evaluate\
.is_valid
assert answer is False, "Invalid evaluation of not_equal rule"
@mark.validation
@mark.validation_rules
@mark.validation_rules_less
@mark.validation_rules_semantic
@mark.validation_rules_less_numbers
def test_validation_rules_less_numbers(self, get_validation_rule_test_object):
test_object = get_validation_rule_test_object
rule = Rule()
# Evaluates to true with constant
answer = rule.on(test_object)\
.when('i')\
.is_less(6)\
.evaluate\
.is_valid
assert answer is True, "Invalid evaluation of less rule"
# Evaluates to false with constant
answer = rule.on(test_object)\
.when('a_g')\
.is_less(4)\
.evaluate\
.is_valid
assert answer is False, "Invalid evaluation of less rule"
# Evaluates to true with attribute
answer = rule.on(test_object)\
.when('f')\
.is_less('i')\
.evaluate\
.is_valid
assert answer is True, "Invalid evaluation of less rule"
# Evaluates to false with attribute
answer = rule.on(test_object)\
.when('i')\
.is_less('f')\
.evaluate\
.is_valid
assert answer is False, "Invalid evaluation of less rule"
@mark.validation
@mark.validation_rules
@mark.validation_rules_less
@mark.validation_rules_semantic
@mark.validation_rules_less_strings
def test_validation_rules_less_strings(self, get_validation_rule_test_object):
test_object = get_validation_rule_test_object
rule = Rule()
# Evaluates to true with constant
answer = rule.on(test_object)\
.when('s')\
.is_less('z')\
.evaluate\
.is_valid
assert answer is True, "Invalid evaluation of less rule"
# Evaluates to false with constant
answer = rule.on(test_object)\
.when('s')\
.is_less('a')\
.evaluate\
.is_valid
assert answer is False, "Invalid evaluation of less rule"
# Evaluates to true with attribute
answer = rule.on(test_object)\
.when('s')\
.is_less('z')\
.evaluate\
.is_valid
assert answer is True, "Invalid evaluation of less rule"
# Evaluates to false with attribute
answer = rule.on(test_object)\
.when('s')\
.is_less('a_s')\
.evaluate\
.is_valid
assert answer is False, "Invalid evaluation of less rule"
@mark.validation
@mark.validation_rules
@mark.validation_rules_less_equal
@mark.validation_rules_semantic
@mark.validation_rules_less_equal_numbers
def test_validation_rules_less_equal_numbers(self, get_validation_rule_test_object):
test_object = get_validation_rule_test_object
rule = Rule()
# Evaluates to true with constant
answer = rule.on(test_object)\
.when('i')\
.is_less_equal(5)\
.evaluate\
.is_valid
assert answer is True, "Invalid evaluation of less_equal rule"
# Evaluates to false with constant
answer = rule.on(test_object)\
.when('a_g')\
.is_less_equal(7)\
.evaluate\
.is_valid
assert answer is False, "Invalid evaluation of less_equal rule"
# Evaluates to true with attribute
answer = rule.on(test_object)\
.when('a_g')\
.is_less_equal('a_ge')\
.evaluate\
.is_valid
assert answer is True, "Invalid evaluation of less_equal rule"
# Evaluates to false with attribute
answer = rule.on(test_object)\
.when('a_ge')\
.is_less_equal('a_le')\
.evaluate\
.is_valid
assert answer is False, "Invalid evaluation of less_equal rule"
@mark.validation
@mark.validation_rules
@mark.validation_rules_less_equal
@mark.validation_rules_semantic
@mark.validation_rules_less_equal_strings
def test_validation_rules_less_equal_strings(self, get_validation_rule_test_object):
test_object = get_validation_rule_test_object
rule = Rule()
# Evaluates to true with constant
answer = rule.on(test_object)\
.when('s')\
.is_less_equal('hats')\
.evaluate\
.is_valid
assert answer is True, "Invalid evaluation of less_equal rule"
# Evaluates to false with constant
answer = rule.on(test_object)\
.when('s')\
.is_less_equal('a')\
.evaluate.is_valid
assert answer is False, "Invalid evaluation of less_equal rule"
# Evaluates to true with attribute
answer = rule.on(test_object)\
.when('a_s')\
.is_less_equal('a_s')\
.evaluate.is_valid
assert answer is True, "Invalid evaluation of less_equal rule"
# Evaluates to false with attribute
answer = rule.on(test_object)\
.when('a_s')\
.is_less_equal('s')\
.evaluate.is_valid
assert answer is False, "Invalid evaluation of less_equal rule"
@mark.validation
@mark.validation_rules
@mark.validation_rules_greater
@mark.validation_rules_semantic
@mark.validation_rules_greater_numbers
def test_validation_rules_greater_numbers(self, get_validation_rule_test_object):
test_object = get_validation_rule_test_object
rule = Rule()
# Evaluates to true with constant
answer = rule.on(test_object)\
.when('i')\
.is_greater(3)\
.evaluate.is_valid
assert answer is True, "Invalid evaluation of greater rule"
# Evaluates to false with constant
answer = rule.on(test_object)\
.when('a_g')\
.is_greater(3)\
.evaluate.is_valid
assert answer is False, "Invalid evaluation of greater rule"
# Evaluates to true with attribute
answer = rule.on(test_object)\
.when('i')\
.is_greater('f')\
.evaluate.is_valid
assert answer is True, "Invalid evaluation of greater rule"
# Evaluates to false with attribute
answer = rule.on(test_object)\
.when('f')\
.is_greater('i')\
.evaluate.is_valid
assert answer is False, "Invalid evaluation of greater rule"
@mark.validation
@mark.validation_rules
@mark.validation_rules_greater
@mark.validation_rules_semantic
@mark.validation_rules_greater_strings
def test_validation_rules_greater_strings(self, get_validation_rule_test_object):
test_object = get_validation_rule_test_object
rule = Rule()
# Evaluates to true with constant
answer = rule.on(test_object)\
.when('s')\
.is_greater('a')\
.evaluate.is_valid
assert answer is True, "Invalid evaluation of greater rule"
# Evaluates to false with constant
answer = rule.on(test_object)\
.when('s')\
.is_greater('z')\
.evaluate.is_valid
assert answer is False, "Invalid evaluation of greater rule"
# Evaluates to true with attribute
answer = rule.on(test_object)\
.when('a_sg')\
.is_greater('a_s')\
.evaluate.is_valid
assert answer is True, "Invalid evaluation of greater rule"
# Evaluates to false with attribute
answer = rule.on(test_object)\
.when('a_s')\
.is_greater('a_sg')\
.evaluate.is_valid
assert answer is False, "Invalid evaluation of greater rule"
@mark.validation
@mark.validation_rules
@mark.validation_rules_greater_equal
@mark.validation_rules_semantic
@mark.validation_rules_greater_equal_numbers
def test_validation_rules_greater_equal_numbers(self, get_validation_rule_test_object):
test_object = get_validation_rule_test_object
rule = Rule()
# Evaluates to true with constant
answer = rule.on(test_object)\
.when('i')\
.is_greater_equal(5)\
.evaluate.is_valid
assert answer is True, "Invalid evaluation of greater_equal rule"
# Evaluates to false with constant
answer = rule.on(test_object)\
.when('a_g')\
.is_greater_equal(50)\
.evaluate.is_valid
assert answer is False, "Invalid evaluation of greater_equal rule"
# Evaluates to true with attribute
answer = rule.on(test_object)\
.when('a_g')\
.is_greater_equal('a_ge')\
.evaluate.is_valid
assert answer is True, "Invalid evaluation of greater_equal rule"
# Evaluates to false with attribute
answer = rule.on(test_object)\
.when('a_le')\
.is_greater_equal('a_ge')\
.evaluate.is_valid
assert answer is False, "Invalid evaluation of greater_equal rule"
@mark.validation
@mark.validation_rules
@mark.validation_rules_greater_equal
@mark.validation_rules_semantic
@mark.validation_rules_greater_equal_strings
def test_validation_rules_greater_equal_strings(self, get_validation_rule_test_object):
test_object = get_validation_rule_test_object
rule = Rule()
# Evaluates to true with constant
answer = rule.on(test_object)\
.when('s')\
.is_greater_equal("hats")\
.evaluate.is_valid
assert answer is True, "Invalid evaluation of greater_equal rule"
# Evaluates to false with constant
answer = rule.on(test_object)\
.when('s')\
.is_greater_equal('z')\
.evaluate.is_valid
assert answer is False, "Invalid evaluation of greater_equal rule"
# Evaluates to true with attribute
answer = rule.on(test_object)\
.when('a_sg')\
.is_greater_equal('a_s')\
.evaluate.is_valid
assert answer is True, "Invalid evaluation of greater_equal rule"
# Evaluates to false with attribute
answer = rule.on(test_object)\
.when('a_s')\
.is_greater_equal('a_sg')\
.evaluate.is_valid
assert answer is False, "Invalid evaluation of greater_equal rule"
@mark.validation
@mark.validation_rules
@mark.validation_rules_semantic
@mark.validation_rules_match
def test_validation_rules_match(self, get_validation_rule_test_object):
test_object = get_validation_rule_test_object
rule = Rule()
# Evaluates to true with constant
answer = rule.on(test_object)\
.when('a_s')\
.is_match("[a-zA-Z]+")\
.evaluate.is_valid
assert answer is True, "Invalid evaluation of match rule"
# Evaluates to false with constant
answer = rule.on(test_object)\
.when('a_s')\
.is_match("[0-9]+")\
.evaluate.is_valid
assert answer is False, "Invalid evaluation of match rule"
class RuleSetTests:
@mark.validation
@mark.validation_rules
@mark.validation_rules_semantic
@mark.validation_rules_set
@mark.validation_rules_set_and
def test_validation_rules_set_and(self, get_validation_rule_test_object):
test_object = get_validation_rule_test_object
# Create True
rule1 = Rule().on(test_object)\
.when('i')\
.is_equal(5)
assert rule1.evaluate.is_valid is True, "Failed Rule 1 before assigning to set."
# Create another True rule
rule2 = Rule().on(test_object)\
.when('a_ge')\
.is_equal('a_g')
assert rule2.evaluate.is_valid is True, "Failed Rule 2 before assigning to set."
# Create a false rule
rule3 = Rule().on(test_object)\
.when('a_g')\
.is_equal(50)
assert rule3.evaluate.is_valid is False, "Failed Rule 3 before assigning to set."
# Create another false rule
rule4 = Rule().on(test_object)\
.when('a_g')\
.is_greater(50)
assert rule4.evaluate.is_valid is False, "Failed Rule 4 before assigning to set."
# Create True/True rule set where all must be true
rs = RuleSet()
rs.when_all_rules_are_true
rs.add_rule(rule1).add_rule(rule2)
# Check evaluation
answer = rs.evaluate.is_valid
assert answer is True, "Invalid evaluation of rules 1 and 2"
# Create True/False rule set where all must be true
rs.remove_rule(rule2).add_rule(rule3)
# Check evaluation
answer = rs.evaluate.is_valid
assert answer is False, "Invalid evaluation of rules 1 and 3"
# Create False/False rule where all must be true
rs.remove_rule(rule3).add_rule(rule4)
# Check evaluation
answer = rs.evaluate.is_valid
assert answer is False, "Invalid evaluation of rules 1 and 4"
@mark.validation
@mark.validation_rules
@mark.validation_rules_semantic
@mark.validation_rules_set
@mark.validation_rules_set_or
def test_validation_rules_set_or(self, get_validation_rule_test_object):
test_object = get_validation_rule_test_object
# Create True
rule1 = Rule().on(test_object)\
.when('i')\
.is_equal(5)
# Create another True rule
rule2 = Rule().on(test_object)\
.when('a_ge')\
.is_equal('a_g')
# Create a false rule
rule3 = Rule().on(test_object)\
.when('a_g')\
.is_equal(50)
# Create another false rule
rule4 = Rule().on(test_object)\
.when('a_g')\
.is_greater(50)
# Create True/True rule set where all must be true
rs = RuleSet()
rs.when_any_rule_is_true
rs.add_rule(rule1)
rs.add_rule(rule2)
answer = rs.evaluate.is_valid
assert answer is True, "Invalid evaluation of rules 1 and 2"
# Create True/False rule set where all must be true
rs.remove_rule(rule2)
rs.add_rule(rule3)
answer = rs.evaluate.is_valid
rs.print_rule_set
assert answer is True, "Invalid evaluation of rules 1 and 3"
# Create False/False rule where all must be true
rs.remove_rule(rule1)
rs.add_rule(rule4)
answer = rs.evaluate.is_valid
rs.print_rule_set
assert answer is False, "Invalid evaluation of rules 3 and 4"
@mark.validation
@mark.validation_rules
@mark.validation_rules_semantic
@mark.validation_rules_set
@mark.validation_rules_set_none
def test_validation_rules_set_none(self, get_validation_rule_test_object):
test_object = get_validation_rule_test_object
# Create True
rule1 = Rule().on(test_object)\
.when('i')\
.is_equal(5)
# Create another True rule
rule2 = Rule().on(test_object)\
.when('a_ge')\
.is_equal('a_g')
# Create a false rule
rule3 = Rule().on(test_object)\
.when('a_g')\
.is_equal(50)
# Create another false rule
rule4 = Rule().on(test_object)\
.when('a_g')\
.is_greater(50)
# Create True/True rule set where all must be true
rs = RuleSet()
rs.when_no_rules_are_true
rs.add_rule(rule1)
rs.add_rule(rule2)
answer = rs.evaluate.is_valid
assert answer is False, "Invalid evaluation of rules 1 and 2"
# Create True/False rule set where all must be true
rs.remove_rule(rule2)
rs.add_rule(rule3)
answer = rs.evaluate.is_valid
rs.print_rule_set
assert answer is False, "Invalid evaluation of rules 1 and 3"
# Create False/False rule where all must be true
rs.remove_rule(rule1)
rs.add_rule(rule4)
answer = rs.evaluate.is_valid
rs.print_rule_set
assert answer is True, "Invalid evaluation of rules 3 and 4"
class ChildNodeTests:
@mark.validation
@mark.validation_rules
@mark.validation_rules_printing
def test_validation_rules_printing(self, get_validation_rule_test_object):
"""Testing propagation of data down through child nodes."""
test_object = get_validation_rule_test_object
# Create some rules
# True Test Rule
rule1 = Rule().on(test_object)\
.when('i')\
.is_equal(5)
# Create another True rule
rule2 = Rule().on(test_object)\
.when('a_ge')\
.is_equal('a_g')
# Create a false rule
rule3 = Rule().on(test_object)\
.when('a_g')\
.is_equal(50)
# Create another false rule
rule4 = Rule().on(test_object)\
.when('a_g')\
.is_greater(50)
# Add 1 and 2 to a new Rule Set
rs1 = RuleSet()
rs1.add_rule(rule1).add_rule(rule2)
# Add RS1 and rules 3 and 4 to new RuleSet
rs2 = RuleSet()
rs2.add_rule(rs1).add_rule(rule3).add_rule(rule4)
rs2.when_no_rules_are_true
# Print to see how it looks in the hierarchy
rs2.print_rule_set
@mark.validation
@mark.validation_rules
@mark.validation_rules_propagation
def test_validation_rules_propagation(self, get_validation_rule_test_object):
"""Testing propagation of data down through child nodes."""
test_object = get_validation_rule_test_object
# Create some rules
# True Test Rule
rule1 = Rule().on(test_object)\
.when('i')\
.is_equal(5)
# Create another True rule
rule2 = Rule().on(test_object)\
.when('a_ge')\
.is_equal('a_g')
# Create a false rule
rule3 = Rule().on(test_object)\
.when('a_g')\
.is_equal(50)
# Create another false rule
rule4 = Rule().on(test_object)\
.when('a_g')\
.is_greater(50)
# Add 1 and 2 to a new Rule Set
rs1 = RuleSet()
rs1.add_rule(rule1).add_rule(rule2)
# Add RS1 and rules 3 and 4 to new RuleSet
rs2 = RuleSet()
rs2.add_rule(rs1).add_rule(rule3).add_rule(rule4)
rs2.when_no_rules_are_true
# Set the target object and attribute
rs2.on(test_object)
rs2.attribute('a_g')
# Traverse through confirming updates
def traverse(rule):
assert rule._evaluated_instance == test_object, "Test object not set"
assert rule._evaluated_attribute == "a_g", "Attribute not set"
if isinstance(rule, RuleSet):
for _,rule in rule._rules.items():
return traverse(rule)
else:
assert rule._evaluated_instance == test_object, "Test object not set"
assert rule._evaluated_attribute == "a_g", "Attribute not set"
traverse(rs2)
class ErrorHandlingTests:
@mark.validation
@mark.validation_rules
@mark.validation_rules_error_handling
def test_validation_rules_error_handling(self, get_validation_rule_test_object):
"""Testing propagation of data down through child nodes."""
test_object = get_validation_rule_test_object
# Indicate action on error
rule = Rule().on(test_object)\
.when('i')\
.is_equal(5)\
.on_fail_report_error
assert rule._action_on_fail == "report", "Action assignment failed"
# Report Error Message ForRule
rule.error_message = "Dangerous for i to be 5."
assert "Dangerous" in rule._error_message, "Error message assignment didn't work"
```
#### File: tests/test_supervised_learning/test_classification.py
```python
import math
import numpy as np
import pandas as pd
import pytest
from pytest import mark
from ml_studio.supervised_learning.classification import LogisticRegression
from ml_studio.supervised_learning.classification import MultinomialLogisticRegression
from ml_studio.supervised_learning.training.early_stop import EarlyStopImprovement
from ml_studio.supervised_learning.training.early_stop import EarlyStopStrips
from ml_studio.supervised_learning.training.metrics import Metric
# --------------------------------------------------------------------------- #
#%%
# --------------------------------------------------------------------------- #
# LOGISTIC REGRESSION #
# --------------------------------------------------------------------------- #
class LogisticRegressionTests:
@mark.logistic_regression
@mark.logistic_regression_name
def test_logistic_regression_name(self, get_binary_classification_data):
X, y = get_binary_classification_data
clf = LogisticRegression(epochs=50)
clf.fit(X,y)
assert clf.name == 'Logistic Regression with Batch Gradient Descent'
clf = LogisticRegression(epochs=50, batch_size=1)
clf.fit(X,y)
assert clf.name == 'Logistic Regression with Stochastic Gradient Descent'
clf = LogisticRegression(epochs=50, batch_size=32)
clf.fit(X,y)
assert clf.name == 'Logistic Regression with Minibatch Gradient Descent'
@mark.logistic_regression
@mark.logistic_regression_val
def test_logistic_regression_validation(self, get_binary_classification_data):
X, y = get_binary_classification_data
clf = LogisticRegression(epochs=50, metric='mean')
with pytest.raises(ValueError):
clf.fit(X,y)
clf = LogisticRegression(epochs=50, cost='quadratic')
with pytest.raises(ValueError):
clf.fit(X,y)
@mark.logistic_regression
@mark.logistic_regression_predict
def test_logistic_regression_predict(self, get_binary_classification_data):
X, y = get_binary_classification_data
clf = LogisticRegression(epochs=100, learning_rate=0.01, checkpoint=10)
clf.fit(X,y)
y_pred = clf._predict(X)
assert y_pred.shape == (y.shape[0],), "y_pred has wrong shape for binary problem"
y_pred = clf.predict(X)
score = clf.score(X,y)
assert y_pred.shape == (y.shape[0],), "y_pred has wrong shape for binary problem"
assert score > 0.3, "Accuracy below 0.3"
assert score < 1, "Accuracy is greater than or equal to 1"
@mark.logistic_regression
@mark.logistic_regression_history
def test_logistic_regression_history_w_early_stop(self, get_binary_classification_data):
X, y = get_binary_classification_data
es = EarlyStopImprovement()
clf = LogisticRegression(epochs=10, early_stop=es)
clf.fit(X, y)
# Test epoch history
assert clf.history.total_epochs == len(clf.history.epoch_log.get('epoch')), "number of epochs in log doesn't match epochs"
assert clf.history.total_epochs == len(clf.history.epoch_log.get('learning_rate')), "number of learning rates in log doesn't match epochs"
assert clf.history.total_epochs == len(clf.history.epoch_log.get('theta')), "number of thetas in log doesn't match epochs"
assert clf.history.total_epochs == len(clf.history.epoch_log.get('train_cost')), "number of train costs in log doesn't match epochs"
assert clf.history.total_epochs == len(clf.history.epoch_log.get('val_cost')), "number of val costs in log doesn't match epochs"
assert clf.history.total_epochs == len(clf.history.epoch_log.get('train_score')), "number of train score in log doesn't match epochs"
assert clf.history.total_epochs == len(clf.history.epoch_log.get('val_score')), "number of val score in log doesn't match epochs"
assert all(np.equal(clf.theta, clf.history.epoch_log.get('theta')[-1])), "Last theta in log doesn't equal final theta."
assert clf.history.epoch_log.get('train_cost')[0] > clf.history.epoch_log.get('train_cost')[-1], "train_cost does not decrease"
#assert clf.history.epoch_log.get('train_score')[0] > clf.history.epoch_log.get('train_score')[-1], "train_score does not decrease"
assert clf.history.epoch_log.get('val_cost')[0] > clf.history.epoch_log.get('val_cost')[-1], "val_cost does not decrease"
#assert clf.history.epoch_log.get('val_score')[0] > clf.history.epoch_log.get('val_score')[-1], "val_score does not decrease"
# Test batch history
assert clf.history.total_batches == len(clf.history.batch_log.get('batch')), "number of batches in log doesn't match total batches"
assert clf.history.total_batches == len(clf.history.batch_log.get('batch_size')), "number of batch sizes in log doesn't match total batches"
assert clf.history.total_batches == len(clf.history.batch_log.get('theta')), "number of thetas in log doesn't match total batches"
assert clf.history.total_batches == len(clf.history.batch_log.get('train_cost')), "number of train_costs in log doesn't match total batches"
@mark.logistic_regression
@mark.logistic_regression_learning_rate_schedules
def test_logistic_regression_learning_rate_schedules(self, learning_rate_schedules, get_binary_classification_data):
X, y = get_binary_classification_data
clf = LogisticRegression(epochs=200, checkpoint=10, learning_rate=learning_rate_schedules, patience=40)
clf.fit(X, y)
# Confirm learning rates decreased
assert clf.history.epoch_log.get('learning_rate')[0] > clf.history.epoch_log.get('learning_rate')[-1], "Learning rate didn't decrease"
assert clf.history.epoch_log.get('learning_rate')[0] != clf.eta, "Learning rate didn't change"
@mark.logistic_regression
def test_logistic_regression_early_stop_from_estimator_train_cost(self, get_binary_classification_data):
X, y = get_binary_classification_data
clf = LogisticRegression(epochs=5000, early_stop=False, val_size=0.3, metric=None)
clf.fit(X, y)
assert clf.convergence_monitor.monitor == 'train_cost', "Estimator is not sending correct metric"
@mark.logistic_regression
def test_logistic_regression_early_stop_from_estimator_train_score(self, get_binary_classification_data):
X, y = get_binary_classification_data
clf = LogisticRegression(epochs=5000, early_stop=False, val_size=0.3, metric='accuracy')
clf.fit(X, y)
assert clf.convergence_monitor.monitor == 'train_score', "Estimator is not sending correct metric"
@mark.logistic_regression
def test_logistic_regression_early_stop_from_estimator_val_cost(self, get_binary_classification_data):
X, y = get_binary_classification_data
clf = LogisticRegression(epochs=5000, early_stop=True, val_size=0.3, metric=None)
clf.fit(X, y)
assert clf.convergence_monitor.monitor == 'val_cost', "Estimator is not sending correct metric"
@mark.logistic_regression
def test_logistic_regression_early_stop_from_estimator_val_score(self, get_binary_classification_data):
X, y = get_binary_classification_data
clf = LogisticRegression(epochs=5000, early_stop=True, val_size=0.3, metric='accuracy')
clf.fit(X, y)
assert clf.convergence_monitor.monitor == 'val_score', "Estimator is not sending correct metric"
# --------------------------------------------------------------------------- #
# MULTINOMIAL LOGISTIC REGRESSION #
# --------------------------------------------------------------------------- #
class MultinomialLogisticRegressionTests:
@mark.logistic_regression
@mark.multinomial_logistic_regression
@mark.multinomial_logistic_regression_name
def test_multinomial_logistic_regression_name(self, get_multinomial_classification_data):
X, y = get_multinomial_classification_data
clf = MultinomialLogisticRegression(epochs=50, cost='categorical_cross_entropy')
clf.fit(X,y)
assert clf.name == 'Multinomial Logistic Regression with Batch Gradient Descent'
clf = MultinomialLogisticRegression(epochs=50, batch_size=1,cost='categorical_cross_entropy')
clf.fit(X,y)
assert clf.name == 'Multinomial Logistic Regression with Stochastic Gradient Descent'
clf = MultinomialLogisticRegression(epochs=50, batch_size=32, cost='categorical_cross_entropy')
clf.fit(X,y)
assert clf.name == 'Multinomial Logistic Regression with Minibatch Gradient Descent'
@mark.logistic_regression
@mark.multinomial_logistic_regression
@mark.multinomial_logistic_regression_val
def test_multinomial_logistic_regression_validation(self, get_multinomial_classification_data):
X, y = get_multinomial_classification_data
clf = MultinomialLogisticRegression(epochs=50, metric='mse')
with pytest.raises(ValueError):
clf.fit(X,y)
clf = MultinomialLogisticRegression(epochs=50, cost='binary_cross_entropy')
with pytest.raises(ValueError):
clf.fit(X,y)
@mark.logistic_regression
@mark.multinomial_logistic_regression
@mark.multinomial_logistic_regression_prep_data
def test_multinomial_logistic_regression_prep_data(self, get_multinomial_classification_data):
X, y = get_multinomial_classification_data
clf = MultinomialLogisticRegression(epochs=50, cost='categorical_cross_entropy',
val_size=0, early_stop=False)
clf.fit(X,y)
assert X.shape[0] == clf.X.shape[0], "X.shape[0] incorrect in prep data"
assert X.shape[1]+1 == clf._X_design.shape[1], "X.shape[1] incorrect in prep data"
@mark.logistic_regression
@mark.multinomial_logistic_regression
@mark.multinomial_logistic_regression_init_weights
def test_multinomial_logistic_regression_init_weights(self, get_multinomial_classification_data):
X, y = get_multinomial_classification_data
n_features = X.shape[1]+1
n_classes = len(np.unique(y))
clf = MultinomialLogisticRegression(epochs=50)
clf.fit(X,y)
assert clf.theta.shape == (n_features,n_classes), "theta shape incorrect for multi classification"
@mark.logistic_regression
@mark.multinomial_logistic_regression
@mark.multinomial_logistic_regression_predict
def test_multinomial_logistic_regression_predict(self, get_multinomial_classification_data):
X, y = get_multinomial_classification_data
clf = MultinomialLogisticRegression(epochs=1000, cost='categorical_cross_entropy', patience=40)
clf.fit(X,y)
y_pred = clf._predict(X)
assert y_pred.shape == (y.shape[0],3), "Shape of prediction is not correct."
y_pred = clf.predict(X)
score = clf.score(X,y)
assert y_pred.shape == (y.shape[0],), "Shape of prediction is not correct."
assert clf.history.epoch_log.get('train_cost')[0] > clf.history.epoch_log.get('train_cost')[-1], "Training costs didn't decrease"
assert clf.history.epoch_log.get('train_score')[0] < clf.history.epoch_log.get('train_score')[-1], "Training score didn't increase"
assert score >= 0.5, "Accuracy below 0.5"
assert score < 1, "Accuracy is greater than or equal to 1"
@mark.logistic_regression
@mark.multinomial_logistic_regression
@mark.multinomial_logistic_regression_early_stop
def test_multinomial_logistic_regression_early_stop(self, get_multinomial_classification_data):
X, y = get_multinomial_classification_data
es = EarlyStopImprovement(precision=0.001, patience=5)
clf = MultinomialLogisticRegression(epochs=100, early_stop=es, checkpoint=10)
clf.fit(X, y)
# Confirm early stop happened
assert clf.history.total_epochs < clf.epochs, "Early stop didn't happen."
# Test epoch history
assert clf.history.total_epochs == len(clf.history.epoch_log.get('epoch')), "number of epochs in log doesn't match epochs"
assert clf.history.total_epochs == len(clf.history.epoch_log.get('learning_rate')), "number of learning rates in log doesn't match epochs"
assert clf.history.total_epochs == len(clf.history.epoch_log.get('theta')), "number of thetas in log doesn't match epochs"
assert clf.history.total_epochs == len(clf.history.epoch_log.get('train_cost')), "number of train costs in log doesn't match epochs"
assert clf.history.total_epochs == len(clf.history.epoch_log.get('val_cost')), "number of val costs in log doesn't match epochs"
assert clf.history.total_epochs == len(clf.history.epoch_log.get('train_score')), "number of train score in log doesn't match epochs"
assert clf.history.total_epochs == len(clf.history.epoch_log.get('val_score')), "number of val score in log doesn't match epochs"
assert np.array_equal(clf.theta, clf.history.epoch_log.get('theta')[-1]) == True, "Last theta in log doesn't equal final theta."
# Test Performance Trends
assert clf.history.epoch_log.get('train_cost')[0] > clf.history.epoch_log.get('train_cost')[-1], "Training costs didn't decrease"
#assert clf.history.epoch_log.get('train_score')[0] < clf.history.epoch_log.get('train_score')[-1], "Training score didn't increase"
assert clf.history.epoch_log.get('val_cost')[0] > clf.history.epoch_log.get('val_cost')[-1], "Validation costs didn't decrease"
#assert clf.history.epoch_log.get('val_score')[0] < clf.history.epoch_log.get('val_score')[-1], "Validation score didn't increase"
# Test batch history
assert clf.history.total_batches == len(clf.history.batch_log.get('batch')), "number of batches in log doesn't match total batches"
assert clf.history.total_batches == len(clf.history.batch_log.get('batch_size')), "number of batch sizes in log doesn't match total batches"
assert clf.history.total_batches == len(clf.history.batch_log.get('theta')), "number of thetas in log doesn't match total batches"
assert clf.history.total_batches == len(clf.history.batch_log.get('train_cost')), "number of train_costs in log doesn't match total batches"
@mark.logistic_regression
@mark.multinomial_logistic_regression
@mark.multinomial_logistic_regression_learning_rate_schedules
def test_multinomial_logistic_regression_learning_rate_schedules(self, learning_rate_schedules, get_multinomial_classification_data):
X, y = get_multinomial_classification_data
clf = MultinomialLogisticRegression(epochs=50, checkpoint=10, learning_rate=learning_rate_schedules)
clf.fit(X, y)
# Confirm learning rates decreased
assert clf.history.epoch_log.get('learning_rate')[0] > clf.history.epoch_log.get('learning_rate')[-1], "Learning rate didn't decrease"
assert clf.history.epoch_log.get('learning_rate')[0] != clf.eta, "Learning rate didn't change"
@mark.logistic_regression
@mark.multinomial_logistic_regression
def test_multinomial_logistic_regression_early_stop_from_estimator_train_cost(self, get_multinomial_classification_data):
X, y = get_multinomial_classification_data
clf = MultinomialLogisticRegression(epochs=5000, early_stop=False, val_size=0.3, metric=None)
clf.fit(X, y)
assert clf.convergence_monitor.monitor == 'train_cost', "Estimator is not sending correct metric"
@mark.logistic_regression
@mark.multinomial_logistic_regression
def test_multinomial_logistic_regression_early_stop_from_estimator_train_score(self, get_multinomial_classification_data):
X, y = get_multinomial_classification_data
clf = MultinomialLogisticRegression(epochs=5000, early_stop=False, val_size=0.3, metric='accuracy')
clf.fit(X, y)
assert clf.convergence_monitor.monitor == 'train_score', "Estimator is not sending correct metric"
@mark.logistic_regression
@mark.multinomial_logistic_regression
def test_multinomial_logistic_regression_early_stop_from_estimator_val_cost(self, get_multinomial_classification_data):
X, y = get_multinomial_classification_data
clf = MultinomialLogisticRegression(epochs=5000, early_stop=True, val_size=0.3, metric=None)
clf.fit(X, y)
assert clf.convergence_monitor.monitor == 'val_cost', "Estimator is not sending correct metric"
@mark.logistic_regression
@mark.multinomial_logistic_regression
def test_multinomial_logistic_regression_early_stop_from_estimator_val_score(self, get_multinomial_classification_data):
X, y = get_multinomial_classification_data
clf = MultinomialLogisticRegression(epochs=5000, early_stop=True, val_size=0.3, metric='accuracy')
clf.fit(X, y)
assert clf.convergence_monitor.monitor == 'val_score', "Estimator is not sending correct metric"
``` |
{
"source": "john-james-ai/xrec",
"score": 2
} |
#### File: tests/test_data/test_data_source.py
```python
import os
import pytest
import time
import logging
import inspect
import pandas as pd
import numpy as np
from datetime import datetime
from xrec.data.source import AmazonSource
from xrec.utils.config import Config
from xrec.data.extract import download_callback
# ------------------------------------------------------------------------------------------------------------------------ #
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class AmazonSourceTests:
def test_setup(self):
logger.info("Started {}".format(self.__class__.__name__))
amazon = AmazonSource()
amazon.reset_extract()
def test_create_metadata(self):
logger.info(" Started {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
c = Config()
url = c.read('DATA', 'url')
amazon = AmazonSource()
amazon.create_metadata(url)
assert isinstance(amazon.metadata, pd.DataFrame), \
logger.error(" Failure in {}.".format(inspect.stack()[0][3]))
assert amazon.metadata.shape[0] == 58, \
logger.error(" Failure in {}.".format(inspect.stack()[0][3]))
assert amazon.metadata.shape[1] == 12, \
logger.error(" Failure in {}.".format(inspect.stack()[0][3]))
assert amazon.n_files == 58, \
logger.error(" Failure in {}.".format(inspect.stack()[0][3]))
assert amazon.n_files_downloaded == 0, \
logger.error(" Failure in {}.".format(inspect.stack()[0][3]))
logger.info(" Successfully completed {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
def test_read_metadata(self):
logger.info(" Started {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
c = Config()
url = c.read('DATA', 'url')
amazon = AmazonSource()
# Test without parameters
metadata = amazon.read_metadata()
assert isinstance(metadata, pd.DataFrame), \
logger.error(" Failure in {}.".format(inspect.stack()[0][3]))
assert metadata.shape[0] == 58, \
logger.error(" Failure in {}.".format(inspect.stack()[0][3]))
assert metadata.shape[1] == 12, \
logger.error(" Failure in {}.".format(inspect.stack()[0][3]))
# Test with key only
metadata = amazon.read_metadata(key='books')
assert isinstance(metadata, pd.DataFrame), \
logger.error(" Failure in {}.".format(inspect.stack()[0][3]))
assert metadata.shape[0] == 2, \
logger.error(" Failure in {}.".format(inspect.stack()[0][3]))
assert metadata.shape[1] == 12, \
logger.error(" Failure in {}.".format(inspect.stack()[0][3]))
# Test with key and kind
metadata = amazon.read_metadata(key='books', kind='p')
assert isinstance(metadata, pd.DataFrame), \
logger.error(" Failure in {}.".format(inspect.stack()[0][3]))
assert metadata.shape[0] == 1, \
logger.error(" Failure in {}.".format(inspect.stack()[0][3]))
assert metadata.shape[1] == 12, \
logger.error(" Failure in {}.".format(inspect.stack()[0][3]))
logger.info(" Successfully completed {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
def test_update_metadata(self):
logger.info(" Started {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
amazon = AmazonSource()
key = 'video'
kind = 'r'
download_date = np.datetime64(datetime.now())
download_duration = 2398
download_size = 126543
downloaded = True
amazon.update_metadata(key, kind, downloaded, download_date,
download_duration, download_size)
result = amazon.read_metadata(key, kind)
assert result['downloaded'].values[0] == downloaded,\
logger.error(" Failure in {}.".format(inspect.stack()[0][3]))
assert result['download_date'].values[0] == download_date,\
logger.error(" Failure in {}.".format(inspect.stack()[0][3]))
assert result['download_duration'].values[0] == download_duration,\
logger.error(" Failure in {}.".format(inspect.stack()[0][3]))
assert result['download_size'].values[0] == download_size,\
logger.error(" Failure in {}.".format(inspect.stack()[0][3]))
logger.info(" Successfully completed {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
def test_reset_extract(self):
logger.info(" Started {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
amazon = AmazonSource()
amazon.reset_extract()
assert amazon.n_files_downloaded == 0,\
logger.error(" Failure in {}.".format(inspect.stack()[0][3]))
logger.info(" Successfully completed {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
def test_get_extract(self):
self._test_get_extract_tasks_all()
self._test_get_extract_tasks_max_tasks()
def _test_get_extract_tasks_all(self):
logger.info(" Started {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
amazon = AmazonSource()
tasks = amazon.get_extract_tasks()
assert isinstance(tasks, list), \
logger.error(" Failure in {}.".format(inspect.stack()[0][3]))
assert len(tasks) == 58, \
logger.error(" Failure in {}. Expected length=56, actual={}".format(
inspect.stack()[0][3], len(tasks)))
for task in tasks:
assert isinstance(task, dict), \
logger.error(" Failure in {}.".format(
inspect.stack()[0][3]))
assert 'url' in task.keys(), \
logger.error(" Failure in {}.".format(
inspect.stack()[0][3]))
assert 'filepath' in task.keys(), \
logger.error(" Failure in {}.".format(
inspect.stack()[0][3]))
logger.info(" Successfully completed {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
def _test_get_extract_tasks_max_tasks(self):
logger.info(" Started {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
max_tasks = 2
amazon = AmazonSource()
tasks = amazon.get_extract_tasks(max_tasks=max_tasks)
assert len(tasks) == 2,\
logger.error(" Failure in {}.".format(inspect.stack()[0][3]))
assert tasks[0].get('kind') == 'reviews', \
logger.error(" Failure in {}.".format(inspect.stack()[0][3]))
assert tasks[1].get('kind') == 'products', \
logger.error(" Failure in {}.".format(inspect.stack()[0][3]))
logger.info(" Successfully completed {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
def test_extract(self):
logger.info(" Started {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
max_tasks = 2
amazon = AmazonSource()
amazon.reset_extract()
assert amazon.n_files == 58, \
logger.error(" Failure in {}.".format(inspect.stack()[0][3]))
assert amazon.n_files_downloaded == 0, \
logger.error(" Failure in {}.".format(inspect.stack()[0][3]))
n_downloads = 0
tasks = amazon.get_extract_tasks(max_tasks=max_tasks)
while len(tasks) > 0:
for task in tasks:
result = self._simulate_download(task)
download_callback(result)
n_downloads += 1
print(amazon.describe(task['key'], task['kind']))
assert amazon.n_files_downloaded == n_downloads, \
logger.error(" Failure in {}.".format(
inspect.stack()[0][3]))
tasks = amazon.get_extract_tasks(max_tasks=max_tasks)
logger.info(" Successfully completed {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
def _simulate_download(self, task):
start = datetime.now()
end = datetime.now()
duration = end - start
task['downloaded'] = True
task['download_date'] = np.datetime64(end)
task['download_duration'] = duration
task['download_size'] = np.random.randint(1000000, 99999999)
return task
def test_delete_metadata(self):
logger.info(" Started {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
c = Config()
url = c.read('DATA', 'url')
amazon = AmazonSource()
amazon.delete_metadata()
assert amazon.metadata is None, \
logger.error(" Failure in {}.".format(inspect.stack()[0][3]))
logger.info(" Successfully completed {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
def test_teardown(self):
logger.info("Successfully completed {}".format(
self.__class__.__name__))
if __name__ == "__main__":
t = AmazonSourceTests()
t.test_setup()
t.test_create_metadata()
t.test_read_metadata()
t.test_update_metadata()
t.test_reset_extract()
t.test_get_extract()
t.test_extract()
t.test_delete_metadata()
t.test_teardown()
# %%
``` |
{
"source": "john-james-sf/lab",
"score": 2
} |
#### File: experiments/parallel/pool_apply_with_log.py
```python
import logging
import logging.handlers
import numpy as np
import time
import multiprocessing
import pandas as pd
log_file = 'log_file.log'
def listener_configurer():
root = logging.getLogger()
h = logging.FileHandler(log_file)
f = logging.Formatter(
'%(asctime)s %(processName)-10s %(name)s %(levelname)-8s %(message)s')
h.setFormatter(f)
root.addHandler(h)
# This is the listener process top-level loop: wait for logging events
# (LogRecords)on the queue and handle them, quit when you get a None for a
# LogRecord.
def listener_process(queue, configurer):
configurer()
while True:
try:
record = queue.get()
if record is None: # We send this as a sentinel to tell the listener to quit.
break
logger = logging.getLogger(record.name)
# No level or filter logic applied - just do it!
logger.handle(record)
except Exception:
import sys
import traceback
print('Whoops! Problem:', file=sys.stderr)
traceback.print_exc(file=sys.stderr)
def worker_configurer_old(queue):
h = logging.handlers.QueueHandler(queue) # Just the one handler needed
root = logging.getLogger()
root.addHandler(h)
# send all messages, for demo; no other level or filter logic applied.
root.setLevel(logging.DEBUG)
def worker_configurer(queue):
root = logging.getLogger()
# print(f'{root.handlers=}')
if len(root.handlers) == 0:
h = logging.handlers.QueueHandler(queue)
root.addHandler(h)
root.setLevel(logging.DEBUG)
# This is the worker process top-level loop, which just logs ten events with
# random intervening delays before terminating.
# The print messages are just so you know it's doing something!
def worker_function(sleep_time, name, queue, configurer):
configurer(queue)
start_message = 'Worker {} started and will now sleep for {}s'.format(
name, sleep_time)
logging.info(start_message)
time.sleep(sleep_time)
success_message = 'Worker {} has finished sleeping for {}s'.format(
name, sleep_time)
logging.info(success_message)
def main_with_pool():
start_time = time.time()
queue = multiprocessing.Manager().Queue(-1)
listener = multiprocessing.Process(target=listener_process,
args=(queue, listener_configurer))
listener.start()
pool = multiprocessing.Pool(processes=3)
job_list = [np.random.randint(10) / 2 for i in range(10)]
single_thread_time = np.sum(job_list)
for i, sleep_time in enumerate(job_list):
name = str(i)
pool.apply_async(worker_function,
args=(sleep_time, name, queue, worker_configurer))
pool.close()
pool.join()
queue.put_nowait(None)
listener.join()
end_time = time.time()
print("Script execution time was {}s, but single-thread time was {}s".format(
(end_time - start_time),
single_thread_time
))
if __name__ == "__main__":
main_with_pool()
# %%
``` |
{
"source": "john-james-sf/nlr",
"score": 2
} |
#### File: nlr/data/builder.py
```python
from abc import ABC, abstractmethod
import logging
from nlr.data.base import Director
# ------------------------------------------------------------------------------------------------------------------------ #
logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------------------------------------------------ #
class DatasetBuilder(ABC):
"""Abstract dataset builder class.
The DatasetBuilder class specifies the methods for obtaining, ingesting, processing, and presenting datasets for modeling.
"""
def __init__(self, dao: DatasetMetadataDAO)
@property
@abstractmethod
def build_metadata(self) -> None:
pass
@abstractmethod
def extract(self) -> None:
pass
@abstractmethod
def explore(self) -> None:
pass
@abstractmethod
def clean(self) -> None:
pass
@abstractmethod
def transform(self) -> None:
pass
@abstractmethod
def combine(self) -> None:
pass
@abstractmethod
def split(self) -> None:
pass
@abstractmethod
def combine(self) -> None:
pass
# ------------------------------------------------------------------------------------------------------------------------ #
class DatasetBuilderRatings(DatasetBuilder):
"""Builder rating datasets. """
@property
@abstractmethod
def build_metadata(self) -> None:
pass
@abstractmethod
def extract(self) -> None:
pass
@abstractmethod
def explore(self) -> None:
pass
@abstractmethod
def clean(self) -> None:
pass
@abstractmethod
def transform(self) -> None:
pass
@abstractmethod
def combine(self) -> None:
pass
@abstractmethod
def split(self) -> None:
pass
@abstractmethod
def combine(self) -> None:
pass
```
#### File: nlr/lab/profiling.py
```python
import collections
import time
from pprint import pprint
from datetime import datetime
import psutil
p = psutil.Process()
# print(datetime.now())
X = p.as_dict(attrs=['pid', 'create_time', 'cpu_percent', 'cpu_times', 'num_threads', 'num_handles',
'name', 'memory_percent'])
T = ['io_counters', 'memory_full_info', 'num_ctx_switches']
X['resources'] = {
'num_open_files': len(p.open_files()),
'num_open_connections': len(p.connections())
}
X['craate_time_format'] = datetime.fromtimestamp(
p.create_time()).strftime("%Y-%m-%d %H:%M:%S")
final = {}
def flatten(d, prev='', sep='_'):
items = {}
pprint(X)
final = {}
sep = '_'
for a, b in X.items():
if isinstance(b, tuple):
for c, d in b._asdict().items():
if isinstance(d, tuple):
for e, f in d._asdict().items():
if isinstance(f, tuple):
for g, h in f._asdict().items():
knu = [a, c, e, g].join(sep)
final[knu] = h
elif isinstance(f, dict):
for g, h in f.items():
knu = [a, c, e, g].join(sep)
final[knu] = h
else:
knu = [a, c, e].join(sep)
final[knu] = f
elif isinstance(d, dict):
for e, f in d.items():
if isinstance(f, )
for g, h in f.items():
knu = [a, c, e, g].join(sep)
final[knu] = h
d2 = v._asdict()
d3 = {k: v for k, v in d2.items()}
if isinstance(o, t)
else:
final[k] = v
pprint(final)
print(len(p.open_files()))
print(len(p.connections()))
print(p.name())
print(datetime.fromtimestamp(
p.create_time()).strftime("%Y-%m-%d %H:%M:%S"))
def flatten(d, parent_key='', sep='_'):
items = []
if isinstance(d, dict):
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, tuple):
items.extend(flatten(v, new_key, sep=sep))
else:
items.append((new_key, v))
return dict(items)
elif isinstance(d, tuple):
print(flatten(X))
# %%
```
#### File: nlr/nlr/nlr.py
```python
import time
from nlr.setup.folders import setup_folders
from nlr.setup.database import setup_database
from nlr.database.sequel import TABLES
from nlr.database import DBNAME, SERVER, HOST, PORT
# %%
def authorize():
ok = input(
"Do we have your authorization to download and organize data on your machine? [y/n]") or 'y'
if 'y' in ok or 'Y' in ok:
print("Most excellent. Let's get started.\n\n")
time.sleep(1)
return True
else:
print("Not worries. Please reference the README.md for this project and data setup instructions.\n\n")
time.sleep(1)
return False
def main():
"""Main entry point."""
config = {}
print("Welcome to Natural Language Recommendation Project!\nLet's jump right in. We'll need to:\n\t1. Designate your project home and data directories,\n\t2. Setup your MySQL Database\n\t3. Select the data sources that we will be using on the project, and \n\t4. Launch a potentially time-intensive data extraction process. \n\n")
time.sleep(1)
print("This will obviously require us to create directories and download data onto your machine.")
time.sleep(1)
if authorize():
setup_folders()
time.sleep(1)
setup_database(SERVER, DBNAME, HOST, PORT, TABLES)
if __name__ == "__main__":
main()
# %%
```
#### File: tests/test_database/test_admin.py
```python
import os
import pytest
import pandas as pd
import logging
import inspect
from nlr.database.connect import MySQLDatabase, MySQLServer
from nlr.database.admin import DatabaseAdmin, TableAdmin
from nlr.database import DBNAME
from nlr.database.ddl import TABLES
# ------------------------------------------------------------------------------------------------------------------------ #
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class DatabaseAdminTests:
def test_drop(self):
logger.info(" Started {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
db = DatabaseAdmin()
server_con = MySQLServer()
with server_con() as connection:
db.drop(DBNAME, connection)
assert not db.exists(DBNAME, connection), "Failure in {}".format(
inspect.stack()[0][3])
logger.info(" Successfully completed {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
def test_create(self):
logger.info(" Started {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
db = DatabaseAdmin()
server_conn = MySQLServer()
with server_conn() as connection:
db.create(DBNAME, connection, exist_ok=False)
assert db.exists(DBNAME, connection), "Failure in {}".format(
inspect.stack()[0][3])
logger.info(" Successfully completed {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
def test_create_exist(self):
# Should log an error
logger.info(" Started {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
db = DatabaseAdmin()
server_conn = MySQLServer()
with server_conn() as connection:
db.create(DBNAME, connection, exist_ok=False)
assert db.exists(DBNAME, connection), "Failure in {}".format(
inspect.stack()[0][3])
logger.info(" Successfully completed {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
def test_create_exist_ok(self):
# Should log at information level
logger.info(" Started {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
db = DatabaseAdmin()
server_conn = MySQLServer()
with server_conn() as connection:
db.create(DBNAME, connection, exist_ok=True)
assert db.exists(DBNAME, connection), "Failure in {}".format(
inspect.stack()[0][3])
logger.info(" Successfully completed {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
class TableAdminTests:
TABLE = 'datasources'
def test_drop(self):
logger.info(" Started {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
table_admin = TableAdmin()
dbconn = MySQLDatabase()
with dbconn(DBNAME) as connection:
table.drop(TABLES, connection)
assert not table.exists(self.TABLE, connection), "Failure in {}".format(
inspect.stack()[0][3])
logger.info(" Successfully completed {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
def test_create(self):
logger.info(" Started {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
table_admin = TableAdmin()
dbconn = MySQLDatabase()
with dbconn(DBNAME) as connection:
table_admin.create(TABLES, connection)
for name in TABLES.keys():
assert table.exists(name, connection), "Failure in {}".format(
inspect.stack()[0][3])
logger.info(" Successfully completed {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
def test_create_exist(self):
# Should log error.
logger.info(" Started {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
table_admin = TableAdmin()
dbconn = MySQLDatabase()
with dbconn(DBNAME) as connection:
table_admin.create(TABLES, connection, exist_ok=False)
for name in TABLES.keys():
assert table.exists(name, connection), "Failure in {}".format(
inspect.stack()[0][3])
logger.info(" Successfully completed {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
def test_create_exist_ok(self):
# Should log information
logger.info(" Started {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
table_admin = TableAdmin()
dbconn = MySQLDatabase()
with dbconn(DBNAME) as connection:
table_admin.create(TABLES, connection, exist_ok=True)
for name in TABLES.keys():
assert table.exists(name, connection), "Failure in {}".format(
inspect.stack()[0][3])
logger.info(" Successfully completed {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
if __name__ == "__main__":
t = DatabaseAdminTests()
t.test_drop()
t.test_create()
t.test_create_exist()
t.test_create_exist_ok()
t = TableAdminTests()
t.test_drop()
t.test_create()
t.test_create_exist()
t.test_create_exist_ok()
# %%
```
#### File: test_data/test__datasources/test_datasource.py
```python
import os
import pytest
import pandas as pd
import logging
import inspect
# from importlib import reload
# logging.shutdown()
# reload(logging)
from nlr.data.sources import DataSource
# ------------------------------------------------------------------------------------------------------------------------ #
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class DataSourceTests:
def test_get_filenames(self):
logger.info(" Started {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
ds = DataSource()
filenames = ds.get_filenames()
assert isinstance(filenames, list), "Failure in {}".format(
inspect.stack()[0][3])
assert len(filenames) > 7, "Failure in {} ".format(
inspect.stack()[0][3])
logger.info(" Successfully completed {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
def test_bucket_exists(self):
logger.info(" Started {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
ds = DataSource()
assert ds.s3_bucket_exists(), "Failure in {} S3 bucket doesn't exist".format(
inspect.stack()[0][3])
logger.info(" Successfully completed {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
def test_local_metadata(self):
logger.info(" Started {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
ds = DataSource()
meta = ds.get_metadata(local=True)
assert isinstance(meta, dict), "Failure in {} local data metadata {}".format(
inspect.stack()[0][3])
logger.info(" Successfully completed {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
def test_remote_metadata(self):
logger.info(" Started {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
ds = DataSource()
meta = ds.get_metadata(local=False)
assert isinstance(meta, dict), "Failure in {} local data metadata {}".format(
inspect.stack()[0][3])
logger.info(" Successfully completed {} {}".format(
self.__class__.__name__, inspect.stack()[0][3]))
if __name__ == "__main__":
t = DataSourceTests()
t.test_get_filenames()
t.test_bucket_exists()
t.test_local_metadata()
t.test_remote_metadata()
# %%
``` |
{
"source": "john-james-sf/predict-fda",
"score": 2
} |
#### File: predict-fda/src/main.py
```python
from pathlib import Path
import logging
from src.infrastructure.setup import PlatformBuilder
from src.infrastructure.data.config import pg_pg_login, DBCredentials
# import click
def setup_database(dbname: str, credentials: DBCredentials) -> None:
print(credentials)
mdata = "platform/metadata/metadata_table_create.sql"
dsources = "..data/metadata/datasources.csv"
builder = PlatformBuilder(pg_pg_login)
builder.build_user(credentials)
builder.build_database(dbname)
builder.build_metadata(mdata)
builder.build_datasources(dsources)
def get_user_config():
dbname = 'rx2m'
# username = input(
# "Welcome. Please enter your credentials in the config file and enter\
# your username? ['j2']"
# )
config = DBCredentials()
credentials = config.get_config('j2')
setup_database(dbname, credentials)
# @click.command()
# @click.argument('input_filepath', type=click.Path(exists=True))
# @click.argument('output_filepath', type=click.Path())
def main():
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger = logging.getLogger(__name__)
logger.info('making final data set from raw data')
get_user_config()
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# not used in this stub but often useful for finding various files
project_dir = Path(__file__).resolve().parents[2]
main()
``` |
{
"source": "johnjarman/octopus-energy-display",
"score": 3
} |
#### File: johnjarman/octopus-energy-display/octopusenergy.py
```python
import logging
import json
import requests
from datetime import datetime
from dateutil import tz
def load_api_key_from_file(filename):
with open(filename) as f:
s = f.read()
return s.strip()
class OctopusEnergy:
def __init__(self, api_key, api_url='https://api.octopus.energy/v1/products/AGILE-18-02-21/electricity-tariffs/E-1R-AGILE-18-02-21-J/standard-unit-rates/'):
self.api_key = api_key
self.api_url = api_url
self.date_format = '%Y-%m-%dT%H:%M:%SZ'
self.data = None
def get_elec_price(self, cache=True):
""" Get current electricity price
@param bool cache: Allow caching of fetched values
"""
price = None
if cache:
try:
# RAM cache
price = self._get_current_price_from_data(self.data)
except:
pass
if price is None:
logging.info('Loading price data over HTTP')
# Get price via HTTP
self.data = self._get_data_http()
price = self._get_current_price_from_data(self.data)
return price
def _get_data_http(self):
r = requests.get(self.api_url, auth=(self.api_key + ':', ''))
return json.loads(r.text)
def _get_current_price_from_data(self, data):
current_time = datetime.now()
utc = tz.tzutc()
local_tz = tz.tzlocal()
current_time.replace(tzinfo=local_tz)
current_time = current_time.astimezone(utc)
price = None
try:
for val in data['results']:
valid_from = datetime.strptime(val['valid_from'], self.date_format).replace(tzinfo=utc)
valid_to = datetime.strptime(val['valid_to'], self.date_format).replace(tzinfo=utc)
if (valid_from <= current_time and
valid_to > current_time):
price = val['value_inc_vat']
except KeyError:
try:
logging.error("Could not get price data: " + data['detail'])
except:
logging.error("Could not get price data or error info.")
except json.JSONDecodeError as err:
logging.error('JSON decode error: {}'.format(err))
return price
if __name__ == '__main__':
api_key = load_api_key_from_file('api_key.txt')
oe = OctopusEnergy(api_key)
print(oe.get_elec_price())
``` |
{
"source": "johnjarmitage/flem",
"score": 3
} |
#### File: flem/flem/read_dem.py
```python
import numpy as np
import elevation as elv
import os
from osgeo import gdal
from fenics import FunctionSpace, Function, Point
from mshr import Rectangle, generate_mesh
from scipy import interpolate
def read_dem(bounds, res):
"""
Function to read in a DEM from SRTM amd interplolate it onto a dolphyn mesh. This function uses the python package
'elevation' (http://elevation.bopen.eu/en/stable/) and the gdal libraries.
I will assume you want the 30m resolution SRTM model.
:param bounds: west, south, east, north coordinates
:return u_n, lx, ly: the elevation interpolated onto the dolphyn mesh and the lengths of the domain
"""
west, south, east, north = bounds
# Create a temporary file to store the DEM and go get it using elevation
dem_path = 'tmp.tif'
output = os.getcwd() + '/' + dem_path
elv.clip(bounds=bounds, output=output, product='SRTM1')
# read in the DEM into a numpy array
gdal_data = gdal.Open(output)
data_array = gdal_data.ReadAsArray().astype(np.float)
# The DEM is 30m per pixel, so lets make a array for x and y at 30 m
ny, nx = np.shape(data_array)
lx = nx*30
ly = ny*30
x, y = np.meshgrid(np.linspace(0, lx/ly, nx), np.linspace(1, 0, ny))
# Create mesh and define function space
domain = Rectangle(Point(0, 0), Point(lx/ly, 1))
mesh = generate_mesh(domain, res)
V = FunctionSpace(mesh, 'P', 1)
u_n = Function(V)
# Get the global coordinates
gdim = mesh.geometry().dim()
gc = V.tabulate_dof_coordinates().reshape((-1, gdim))
# Interpolate elevation into the initial condition
elevation = interpolate.griddata((x.flatten(), y.flatten()), data_array.flatten(), (gc[:, 0], gc[:, 1]),
method='nearest')
u_n.vector()[:] = elevation/ly
# remove tmp DEM
os.remove(output)
return u_n, lx, ly, mesh, V
``` |
{
"source": "johnjasa/CADRE",
"score": 2
} |
#### File: CADRE/attitude_dymos/ode_rate_comp.py
```python
from __future__ import print_function, division
from six import string_types, iteritems
import numpy as np
from scipy.sparse import csr_matrix
from openmdao.api import ExplicitComponent
from dymos.phases.grid_data import GridData
from dymos.utils.misc import get_rate_units
class ODERateComp(ExplicitComponent):
"""
Compute the approximated rates of a variable in the ODE.
Currently this component only works in the ODE of Radau Pseudospectral phases since it
requires input values to be provided at all nodes.
Notes
-----
.. math::
\\dot{p}_a = \\frac{d\\tau_s}{dt} \\left[ D \\right] p_a
where
:math:`p_a` are the values of the variable at all nodes,
:math:`\\dot{p}_a` are the time-derivatives of the variable at all nodes,
:math:`D` is the Lagrange differentiation matrix,
and :math:`\\frac{d\\tau_s}{dt}` is the ratio of segment duration in segment tau space
[-1 1] to segment duration in time.
"""
def initialize(self):
self.options.declare(
'time_units', default=None, allow_none=True, types=string_types,
desc='Units of time')
self.options.declare(
'grid_data', types=GridData,
desc='Container object for grid info')
# Save the names of the dynamic controls/parameters
self._dynamic_names = []
self._input_names = {}
self._output_rate_names = {}
self._var_options = {}
def _setup_variables(self):
num_nodes = self.num_nodes
time_units = self.options['time_units']
for name, options in iteritems(self._var_options):
self._input_names[name] = name
self._output_rate_names[name] = 'dYdt:{0}'.format(name)
shape = options['shape']
input_shape = (num_nodes,) + shape
output_shape = (num_nodes,) + shape
print(name, input_shape, output_shape)
units = options['units']
rate_units = get_rate_units(units, time_units)
self._dynamic_names.append(name)
self.add_input(self._input_names[name], val=np.ones(input_shape), units=units)
self.add_output(self._output_rate_names[name], shape=output_shape, units=rate_units)
size = np.prod(shape)
self.rate_jacs[name] = np.zeros((num_nodes, size, num_nodes, size))
for i in range(size):
self.rate_jacs[name][:, i, :, i] = self.D
self.rate_jacs[name] = self.rate_jacs[name].reshape((num_nodes * size,
num_nodes * size),
order='C')
self.rate_jac_rows[name], self.rate_jac_cols[name] = \
np.where(self.rate_jacs[name] != 0)
self.sizes[name] = size
cs = np.tile(np.arange(num_nodes, dtype=int), reps=size)
rs = np.concatenate([np.arange(0, num_nodes * size, size, dtype=int) + i
for i in range(size)])
self.declare_partials(of=self._output_rate_names[name],
wrt='dt_dstau',
rows=rs, cols=cs)
self.declare_partials(of=self._output_rate_names[name],
wrt=self._input_names[name],
rows=self.rate_jac_rows[name], cols=self.rate_jac_cols[name])
def setup(self):
num_nodes = self.options['grid_data'].num_nodes
time_units = self.options['time_units']
gd = self.options['grid_data']
self.add_input('dt_dstau', shape=num_nodes, units=time_units)
# self._var_options = {}
self.rate_jacs = {}
self.rate_jac_rows = {}
self.rate_jac_cols = {}
self.sizes = {}
self.num_nodes = num_nodes
# The control interpolation matrix L is the product of M_index_to_disc and the nominal
# pseudospectral interpolation matrix from the discretization nodes to all nodes.
_, self.D = gd.phase_lagrange_matrices('all', 'all')
self._setup_variables()
self.set_check_partial_options('*', method='cs')
def add_rate(self, name, shape, units):
self._var_options[name] = {'shape': shape,
'units': units}
def compute(self, inputs, outputs):
var_options = self._var_options
for name, options in iteritems(var_options):
u = inputs[self._input_names[name]]
a = np.tensordot(self.D, u, axes=(1, 0)).T
# divide each "row" by dt_dstau or dt_dstau**2
outputs[self._output_rate_names[name]] = (a / inputs['dt_dstau']).T
def compute_partials(self, inputs, partials):
var_options = self._var_options
num_nodes = self.options['grid_data'].subset_num_nodes['all']
for name, options in iteritems(var_options):
control_name = self._input_names[name]
size = self.sizes[name]
rate_name = self._output_rate_names[name]
# Unroll matrix-shaped controls into an array at each node
u_d = np.reshape(inputs[control_name], (num_nodes, size))
dt_dstau = inputs['dt_dstau']
dt_dstau_tile = np.tile(dt_dstau, size)
partials[rate_name, 'dt_dstau'] = \
(-np.dot(self.D, u_d).ravel(order='F') / dt_dstau_tile ** 2)
dt_dstau_x_size = np.repeat(dt_dstau, size)[:, np.newaxis]
r_nz, c_nz = self.rate_jac_rows[name], self.rate_jac_cols[name]
partials[rate_name, control_name] = \
(self.rate_jacs[name] / dt_dstau_x_size)[r_nz, c_nz]
```
#### File: CADRE/attitude_dymos/VectorUnitizeComp.py
```python
from six import string_types
import numpy as np
from openmdao.core.explicitcomponent import ExplicitComponent
class VectorUnitizeComp(ExplicitComponent):
"""
Computes the unitized vector
math::
\hat{a} = \bar{a} / np.sqrt(np.dot(a, a))
where a is of shape (vec_size, n)
"""
def initialize(self):
"""
Declare options.
"""
self.options.declare('vec_size', types=int, default=1,
desc='The number of points at which the vector magnitude is computed')
self.options.declare('length', types=int, default=3,
desc='The length of the input vector at each point')
self.options.declare('in_name', types=string_types, default='a',
desc='The variable name for input vector.')
self.options.declare('units', types=string_types, default=None, allow_none=True,
desc='The units for the input and output vector.')
self.options.declare('out_name', types=string_types, default='a_mag',
desc='The variable name for output unitized vector.')
def setup(self):
"""
Declare inputs, outputs, and derivatives for the vector magnitude component.
"""
opts = self.options
vec_size = opts['vec_size']
m = opts['length']
self.add_input(name=opts['in_name'],
shape=(vec_size, m),
units=opts['units'])
self.add_output(name=opts['out_name'],
val=np.zeros(shape=(vec_size, m)),
units=opts['units'])
row_idxs = np.repeat(np.arange(vec_size * m, dtype=int), m)
temp = np.reshape(np.arange(vec_size * m, dtype=int), newshape=(vec_size, m))
col_idxs = np.repeat(temp, m, axis=0).ravel()
self.declare_partials(of=opts['out_name'], wrt=opts['in_name'],
rows=row_idxs, cols=col_idxs, val=1.0)
def compute(self, inputs, outputs):
"""
Compute the vector magnitude of input.
Parameters
----------
inputs : Vector
unscaled, dimensional input variables read via inputs[key]
outputs : Vector
unscaled, dimensional output variables read via outputs[key]
"""
opts = self.options
a = inputs[opts['in_name']]
a_mag = np.sqrt(np.einsum('ni,ni->n', a, a))
outputs[opts['out_name']] = a / a_mag[:, np.newaxis]
def compute_partials(self, inputs, partials):
"""
Compute the sparse partials for the vector magnitude w.r.t. the inputs.
Parameters
----------
inputs : Vector
unscaled, dimensional input variables read via inputs[key]
partials : Jacobian
sub-jac components written to partials[output_name, input_name]
"""
opts = self.options
m = opts['length']
a = inputs[opts['in_name']]
a_mag = np.sqrt(np.einsum('ni,ni->n', a, a))
f = a[0, :]
g = a_mag
gp = a.ravel() / np.sqrt(np.einsum('ni,ni->n', a, a))
g_squared = np.einsum('ni,ni->n', a, a)
print(a_mag - (a * a[0] / a_mag)[0] / (a_mag ** 2))
# print()
# print(f)
# # print(fp)
# print(g)
# print(gp)
# print()
partials[opts['out_name'], opts['in_name']] = 88
print()
print('gp')
print(gp.shape)
print('f')
print(f.shape)
print((g - gp[0] * f[0]) / g_squared, end='')
print((g - gp[1] * f[0]) / g_squared, end='')
print((g - gp[2] * f[0]) / g_squared)
print((g - gp[0] * f[1]) / g_squared, end='')
print((g - gp[1] * f[1]) / g_squared, end='')
print((g - gp[2] * f[1]) / g_squared)
print((g - gp[0] * f[2]) / g_squared, end='')
print((g - gp[1] * f[2]) / g_squared, end='')
print((g - gp[2] * f[2]) / g_squared)
diag = (g - gp * f) * g_squared
#
# "f' * g - g' * f / 2g"
# fp = 1
# g = np.sqrt(np.einsum('ni,ni->n', a, a))[:, np.newaxis]
# gp = a.ravel() / np.repeat(np.sqrt(np.einsum('ni,ni->n', a, a)), opts['length'])
# f = a
#
# # Use the following for sparse partials
# partials[opts['out_name'], opts['in_name']] = a.ravel() / np.repeat(np.sqrt(np.einsum('ni,ni->n', a, a)), opts['length'])
```
#### File: CADRE/comm_dymos/comm_group.py
```python
from __future__ import print_function, division, absolute_import
from openmdao.api import Group, VectorMagnitudeComp, MatrixVectorProductComp
from .comm_ant_quaternion_comp import CommAntQuaternionComp
from .comm_ant_rotation_matrix_comp import CommAntRotationMatrixComp
from .comm_data_rate_comp import CommDataRateComp
from .comm_earth_rotation_quaternion_comp import CommEarthRotationQuaternionComp
from .comm_earth_rotation_matrix_comp import CommEarthRotationMatrixComp
from .comm_gain_pattern_comp import CommGainPatternComp
from .comm_gs_pos_eci_comp import CommGSPosECIComp
from .comm_los_comp import CommLOSComp
from .comm_vector_eci_comp import CommVectorECIComp
from .comm_vector_spherical_comp import CommVectorSphericalComp
class CommGroup(Group):
"""
The Comm subsystem for CADRE.
Externally sourced inputs:
t - time
r_e2b_I - state
antAngle - parameter
P_comm - parameter
"""
def initialize(self):
self.options.declare('num_nodes', types=(int,))
self.options.declare('lat_gs', types=(float,), default=42.2708,
desc='ground station latitude (degrees)')
self.options.declare('lon_gs', types=(float,), default=-83.7264,
desc='ground station longitude (degrees)')
self.options.declare('alt_gs', types=(float,), default=0.256,
desc='ground station altitude (km)')
self.options.declare('Re', types=(float,), default=6378.137,
desc='Earth equatorial radius (km)')
def setup(self):
nn = self.options['num_nodes']
lat_gs = self.options['lat_gs']
lon_gs = self.options['lon_gs']
alt_gs = self.options['alt_gs']
self.add_subsystem('ant_quaternion_comp',
CommAntQuaternionComp(num_nodes=nn),
promotes_inputs=['antAngle'], promotes_outputs=['q_AB'])
self.add_subsystem('ant_rotation_matrix_comp',
CommAntRotationMatrixComp(num_nodes=nn),
promotes_inputs=['q_AB'], promotes_outputs=['O_AB'])
self.add_subsystem('comm_earth_rotation_quaternion_comp',
CommEarthRotationQuaternionComp(num_nodes=nn, gha0=0.0),
promotes_inputs=['t'], promotes_outputs=['q_IE'])
self.add_subsystem('comm_earth_rotation_matrix_comp',
CommEarthRotationMatrixComp(num_nodes=nn),
promotes_inputs=['q_IE'], promotes_outputs=['O_IE'])
self.add_subsystem('comm_gs_pos_eci_comp',
CommGSPosECIComp(num_nodes=nn, lat=lat_gs, lon=lon_gs, alt=alt_gs),
promotes_inputs=['O_IE'], promotes_outputs=['r_e2g_I'])
self.add_subsystem('comm_vector_eci_comp', CommVectorECIComp(num_nodes=nn),
promotes_inputs=['r_e2g_I', 'r_e2b_I'], promotes_outputs=['r_b2g_I'])
self.add_subsystem('comm_vector_body_comp',
MatrixVectorProductComp(vec_size=nn, A_name='O_BI', x_name='r_b2g_I',
b_name='r_b2g_B', x_units='km', b_units='km'),
promotes_inputs=['O_BI', 'r_b2g_I'], promotes_outputs=['r_b2g_B'])
self.add_subsystem('comm_vector_ant_comp',
MatrixVectorProductComp(vec_size=nn, A_name='O_AB', x_name='r_b2g_B',
b_name='r_b2g_A', x_units='km', b_units='km'),
promotes_inputs=['O_AB', 'r_b2g_B'], promotes_outputs=['r_b2g_A'])
self.add_subsystem('comm_vector_spherical_comp',
CommVectorSphericalComp(num_nodes=nn),
promotes_inputs=['r_b2g_A'],
promotes_outputs=['elevationGS', 'azimuthGS'])
self.add_subsystem('comm_distance_comp',
VectorMagnitudeComp(vec_size=nn, in_name='r_b2g_A', mag_name='GSdist',
units='km'),
promotes_inputs=['r_b2g_A'], promotes_outputs=['GSdist'])
self.add_subsystem('comm_gain_pattern_comp',
CommGainPatternComp(num_nodes=nn),
promotes_inputs=['elevationGS', 'azimuthGS'],
promotes_outputs=['gain'])
self.add_subsystem('comm_los_comp', CommLOSComp(num_nodes=nn),
promotes_inputs=['r_b2g_I', 'r_e2g_I'], promotes_outputs=['CommLOS'])
self.add_subsystem('data_rate_comp',
CommDataRateComp(num_nodes=nn),
promotes_inputs=['P_comm', 'gain', 'CommLOS', 'GSdist'],
promotes_outputs=['dXdt:data'])
```
#### File: CADRE/comm_dymos/comm_vector_spherical_comp.py
```python
from __future__ import print_function, division, absolute_import
import numpy as np
from openmdao.api import ExplicitComponent
from ..kinematics import computepositionspherical, computepositionsphericaljacobian
class CommVectorSphericalComp(ExplicitComponent):
"""
Convert satellite-ground vector into Az-El.
"""
def initialize(self):
self.options.declare('num_nodes', types=(int,))
def setup(self):
nn = self.options['num_nodes']
# Inputs
self.add_input('r_b2g_A', np.zeros((nn, 3)), units='km',
desc='Position vector from satellite to ground station '
'in antenna angle frame over time')
# Outputs
self.add_output('azimuthGS', np.zeros(nn), units='rad',
desc='Azimuth angle from satellite to ground station in '
'Earth-fixed frame over time')
self.add_output('elevationGS', np.zeros(nn), units='rad',
desc='Elevation angle from satellite to ground station '
'in Earth-fixed frame over time')
rows = np.tile(np.array([0, 0, 0]), nn) + np.repeat(np.arange(nn), 3)
cols = np.arange(nn*3)
self.declare_partials('elevationGS', 'r_b2g_A', rows=rows, cols=cols)
self.declare_partials('azimuthGS', 'r_b2g_A', rows=rows, cols=cols)
def compute(self, inputs, outputs):
"""
Calculate outputs.
"""
azimuthGS, elevationGS = computepositionspherical(self.options['num_nodes'], inputs['r_b2g_A'])
outputs['azimuthGS'] = azimuthGS
outputs['elevationGS'] = elevationGS
def compute_partials(self, inputs, partials):
"""
Calculate and save derivatives. (i.e., Jacobian)
"""
nn = self.options['num_nodes']
Ja1, Ja2 = computepositionsphericaljacobian(nn, 3*nn, inputs['r_b2g_A'])
partials['azimuthGS', 'r_b2g_A'] = Ja1
partials['elevationGS', 'r_b2g_A'] = Ja2
```
#### File: CADRE/odes_dymos/cadre_orbit_ode.py
```python
from __future__ import print_function, division, absolute_import
from openmdao.api import Group, VectorMagnitudeComp
from dymos import declare_state, declare_time, declare_parameter
from CADRE.orbit_dymos.orbit_eom import OrbitEOMComp
from CADRE.orbit_dymos.gravity_perturbations_comp import GravityPerturbationsComp
from CADRE.orbit_dymos.ori_comp import ORIComp
from CADRE.orbit_dymos.obr_comp import OBRComp
from CADRE.orbit_dymos.obi_comp import OBIComp
@declare_time(units='s')
@declare_state('r_e2b_I', rate_source='orbit_eom_comp.dXdt:r_e2b_I', targets=['r_e2b_I'],
units='km', shape=(3,))
@declare_state('v_e2b_I', rate_source='orbit_eom_comp.dXdt:v_e2b_I', targets=['v_e2b_I'],
units='km/s', shape=(3,))
@declare_parameter('Gamma', targets=['Gamma'], units='rad')
class CadreOrbitODE(Group):
def initialize(self):
self.options.declare('num_nodes', types=(int,))
def setup(self):
nn = self.options['num_nodes']
self.add_subsystem('rmag_comp',
VectorMagnitudeComp(vec_size=nn, length=3, in_name='r_e2b_I',
mag_name='rmag_e2b_I', units='km'),
promotes_inputs=['r_e2b_I'], promotes_outputs=['rmag_e2b_I'])
self.add_subsystem('grav_pert_comp', GravityPerturbationsComp(num_nodes=nn),
promotes_inputs=['r_e2b_I', 'rmag_e2b_I'],
promotes_outputs=[('a_pert:J2', 'a_pert_I')])
self.add_subsystem('ori_comp',
ORIComp(num_nodes=nn),
promotes_inputs=['r_e2b_I', 'v_e2b_I'],
promotes_outputs=['O_RI'])
self.add_subsystem('obr_comp',
OBRComp(num_nodes=nn),
promotes_inputs=['Gamma'],
promotes_outputs=['O_BR'])
self.add_subsystem('obi_comp',
OBIComp(num_nodes=nn),
promotes_inputs=['O_BR', 'O_RI'],
promotes_outputs=['O_BI'])
self.add_subsystem('orbit_eom_comp', OrbitEOMComp(num_nodes=nn),
promotes_inputs=['rmag_e2b_I', 'r_e2b_I', 'v_e2b_I', 'a_pert_I'])
```
#### File: odes_dymos/test/test_cadre_orbit_ode.py
```python
from __future__ import print_function
import unittest
import numpy as np
from openmdao.api import Problem, Group, pyOptSparseDriver
from openmdao.utils.assert_utils import assert_rel_error, assert_check_partials
from dymos import Phase
from CADRE.odes_dymos.cadre_orbit_ode import CadreOrbitODE
GM = 398600.44
rmag = 7000.0
period = 2 * np.pi * np.sqrt(rmag ** 3 / GM)
vcirc = np.sqrt(GM / rmag)
duration = period / 1
class TestCadreOrbitODE(unittest.TestCase):
@classmethod
def setUpClass(cls):
p = cls.p = Problem(model=Group())
p.driver = pyOptSparseDriver()
p.driver.options['optimizer'] = 'SNOPT'
p.driver.options['dynamic_simul_derivs'] = True
p.driver.opt_settings['Major iterations limit'] = 100
p.driver.opt_settings['Major feasibility tolerance'] = 1.0E-6
p.driver.opt_settings['Major optimality tolerance'] = 1.0E-6
p.driver.opt_settings['iSumm'] = 6
phase = Phase('radau-ps',
ode_class=CadreOrbitODE,
num_segments=10,
transcription_order=7,
compressed=False)
p.model.add_subsystem('phase0', phase)
phase.set_time_options(fix_initial=True, duration_bounds=(duration, duration))
phase.set_state_options('r_e2b_I', defect_scaler=1000, fix_initial=True, units='km')
phase.set_state_options('v_e2b_I', defect_scaler=1000, fix_initial=True, units='km/s')
# phase.set_state_options('SOC', defect_scaler=1, fix_initial=True, units=None)
# phase.add_design_parameter('P_bat', opt=False, units='W')
phase.add_design_parameter('Gamma', opt=False, units='rad')
phase.add_objective('time', loc='final', scaler=10)
# p.model.options['assembled_jac_type'] = 'csc'
# p.model.linear_solver = DirectSolver(assemble_jac=True)
p.setup(check=True, force_alloc_complex=True)
p['phase0.t_initial'] = 0.0
p['phase0.t_duration'] = duration
# print(phase.grid_data.num_nodes)
#
p['phase0.states:r_e2b_I'][:, 0] = rmag
p['phase0.states:r_e2b_I'][:, 1] = 0.0
p['phase0.states:r_e2b_I'][:, 2] = 0.0
p['phase0.states:v_e2b_I'][:, 0] = 0.0
p['phase0.states:v_e2b_I'][:, 1] = vcirc
p['phase0.states:v_e2b_I'][:, 2] = 0.0
# p['phase0.design_parameters:P_bat'] = 2.0
p['phase0.design_parameters:Gamma'] = 0.0
p.run_model()
p.run_driver()
def test_results(self):
r_e2b_I = self.p.model.phase0.get_values('r_e2b_I')
v_e2b_I = self.p.model.phase0.get_values('v_e2b_I')
rmag_e2b = self.p.model.phase0.get_values('rmag_e2b_I')
assert_rel_error(self, rmag_e2b, rmag * np.ones_like(rmag_e2b), tolerance=1.0E-9)
delta_trua = 2 * np.pi * (duration / period)
assert_rel_error(self, r_e2b_I[-1, :], rmag * np.array([np.cos(delta_trua), np.sin(delta_trua), 0]), tolerance=1.0E-9)
assert_rel_error(self, v_e2b_I[-1, :], vcirc * np.array([-np.sin(delta_trua), np.cos(delta_trua), 0]), tolerance=1.0E-9)
print(self.p.model.phase0.get_values('O_BI'))
def test_partials(self):
np.set_printoptions(linewidth=10000, edgeitems=1024)
cpd = self.p.check_partials(compact_print=True)
assert_check_partials(cpd, atol=1.0E-4, rtol=1.0)
def test_simulate(self):
phase = self.p.model.phase0
exp_out = phase.simulate(times=500)
import matplotlib.pyplot as plt
plt.figure()
plt.plot(exp_out.get_values('r_e2b_I')[:, 0], exp_out.get_values('r_e2b_I')[:, 1], 'b-')
plt.plot(phase.get_values('r_e2b_I')[:, 0], phase.get_values('r_e2b_I')[:, 1], 'ro')
# plt.figure()
# plt.plot(exp_out.get_values('time'), exp_out.get_values('SOC'), 'b-')
# plt.plot(phase.get_values('time'), phase.get_values('SOC'), 'ro')
plt.show()
```
#### File: CADRE/orbit_dymos/obi_comp.py
```python
from __future__ import print_function, division, absolute_import
import numpy as np
from openmdao.api import ExplicitComponent
class OBIComp(ExplicitComponent):
"""
Calculates the rotation matrix from body fixed to the roll frame.
"""
def initialize(self):
self.options.declare('num_nodes', types=(int,))
def setup(self):
nn = self.options['num_nodes']
# Inputs
self.add_input('O_BR', np.zeros((nn, 3, 3)), units=None,
desc='Rotation matrix from body-fixed frame to rolled body-fixed '
'frame over time')
self.add_input('O_RI', np.zeros((nn, 3, 3)), units=None,
desc='Rotation matrix from rolled body-fixed to inertial '
'frame over time')
self.add_output('O_BI', np.zeros((nn, 3, 3)), units=None,
desc='Rotation matrix from rolled body-fixed to inertial '
'frame over time')
template = np.kron(np.eye(nn, dtype=int), np.kron(np.ones((3, 3), dtype=int), np.eye(3, dtype=int)))
rs, cs = template.nonzero()
self.declare_partials('O_BI', 'O_BR', rows=rs, cols=cs, val=1.0)
template = np.kron(np.eye(nn * 3, dtype=int), np.ones((3, 3), dtype=int))
cs, rs = template.nonzero() # Note we're transposing the matrix at each node here
self.declare_partials('O_BI', 'O_RI', rows=rs, cols=cs)
def compute(self, inputs, outputs):
"""
Calculate outputs.
"""
O_RI = inputs['O_RI']
O_BR = inputs['O_BR']
outputs['O_BI'] = np.matmul(O_RI, O_BR)
def compute_partials(self, inputs, partials):
"""
Calculate and save derivatives. (i.e., Jacobian)
"""
nn = self.options['num_nodes']
partials['O_BI', 'O_BR'] = np.tile(inputs['O_RI'], 3).ravel()
partials['O_BI', 'O_RI'] = np.tile(np.reshape(inputs['O_BR'], (nn, 9)), 3).ravel()
```
#### File: orbit_dymos/test/test_obi_comp.py
```python
from __future__ import print_function
import unittest
import numpy as np
from openmdao.api import Problem, IndepVarComp, Group
from openmdao.utils.assert_utils import assert_check_partials
from CADRE.orbit_dymos.obi_comp import OBIComp
class TestOBIComp(unittest.TestCase):
@classmethod
def setUpClass(cls):
nn = 10
cls.p = Problem(model=Group())
ivc = cls.p.model.add_subsystem('ivc', IndepVarComp(), promotes_outputs=['*'])
ivc.add_output('O_BR', val=np.ones((nn, 3, 3)))
ivc.add_output('O_RI', val=np.ones((nn, 3, 3)))
cls.p.model.add_subsystem('obi_comp', OBIComp(num_nodes=nn),
promotes_inputs=['*'], promotes_outputs=['*'])
cls.p.setup(check=True, force_alloc_complex=True)
cls.p['O_BR'] = np.random.rand(nn, 3, 3)
cls.p['O_RI'] = np.random.rand(nn, 3, 3)
cls.p.run_model()
def test_results(self):
pass
def test_partials(self):
np.set_printoptions(linewidth=1024, edgeitems=1000)
cpd = self.p.check_partials(method='cs')
assert_check_partials(cpd)
```
#### File: CADRE/power_dymos/power_cell_voltage.py
```python
from __future__ import print_function, division, absolute_import
from six.moves import range
import os
import numpy as np
from openmdao.api import ExplicitComponent
from MBI import MBI
class PowerCellVoltage(ExplicitComponent):
"""
Compute the output voltage of the solar panels.
"""
def initialize(self):
fpath = os.path.dirname(os.path.realpath(__file__))
self.options.declare('num_nodes', types=(int, ),
desc="Number of time points.")
self.options.declare('filename', fpath + '/../data/Power/curve.dat',
desc="File containing surrogate model for voltage.")
def setup(self):
nn = self.options['num_nodes']
filename = self.options['filename']
dat = np.genfromtxt(filename)
nT, nA, nI = dat[:3]
nT = int(nT)
nA = int(nA)
nI = int(nI)
T = dat[3:3 + nT]
A = dat[3 + nT:3 + nT + nA]
I = dat[3 + nT + nA:3 + nT + nA + nI] # noqa: E741
V = dat[3 + nT + nA + nI:].reshape((nT, nA, nI), order='F')
self.MBI = MBI(V, [T, A, I], [6, 6, 15], [3, 3, 3])
self.x = np.zeros((84 * nn, 3), order='F')
self.xV = self.x.reshape((nn, 7, 12, 3), order='F')
# Inputs
self.add_input('LOS', np.zeros((nn, )), units=None,
desc='Line of Sight over Time')
self.add_input('temperature', np.zeros((nn, 5)), units='degK',
desc='Temperature of solar cells over time')
self.add_input('exposed_area', np.zeros((nn, 7, 12)), units='m**2',
desc='Exposed area to sun for each solar cell over time')
self.add_input('Isetpt', np.zeros((nn, 12)), units='A',
desc='Currents of the solar panels')
# Outputs
self.add_output('V_sol', np.zeros((nn, 12)), units='V',
desc='Output voltage of solar panel over time')
rows = np.arange(nn*12)
cols = np.tile(np.repeat(0, 12), nn) + np.repeat(np.arange(nn), 12)
self.declare_partials('V_sol', 'LOS', rows=rows, cols=cols)
row = np.tile(np.repeat(0, 5), 12) + np.repeat(np.arange(12), 5)
rows = np.tile(row, nn) + np.repeat(12*np.arange(nn), 60)
col = np.tile(np.arange(5), 12)
cols = np.tile(col, nn) + np.repeat(5*np.arange(nn), 60)
self.declare_partials('V_sol', 'temperature', rows=rows, cols=cols)
row = np.tile(np.arange(12), 7)
rows = np.tile(row, nn) + np.repeat(12*np.arange(nn), 84)
cols = np.arange(nn*7*12)
self.declare_partials('V_sol', 'exposed_area', rows=rows, cols=cols)
row_col = np.arange(nn*12)
self.declare_partials('V_sol', 'Isetpt', rows=row_col, cols=row_col)
def setx(self, inputs):
temperature = inputs['temperature']
LOS = inputs['LOS']
exposed_area = inputs['exposed_area']
Isetpt = inputs['Isetpt']
for p in range(12):
i = 4 if p < 4 else (p % 4)
for c in range(7):
self.xV[:, c, p, 0] = temperature[:, i]
self.xV[:, c, p, 1] = LOS * exposed_area[:, c, p]
self.xV[:, c, p, 2] = Isetpt[:, p]
def compute(self, inputs, outputs):
"""
Calculate outputs.
"""
nn = self.options['num_nodes']
self.setx(inputs)
self.raw = self.MBI.evaluate(self.x)[:, 0].reshape((nn, 7, 12), order='F')
outputs['V_sol'] = np.zeros((nn, 12))
for c in range(7):
outputs['V_sol'] += self.raw[:, c, :]
def compute_partials(self, inputs, partials):
"""
Calculate and save derivatives. (i.e., Jacobian)
"""
nn = self.options['num_nodes']
exposed_area = inputs['exposed_area']
LOS = inputs['LOS']
raw1 = self.MBI.evaluate(self.x, 1)[:, 0].reshape((nn, 7, 12), order='F')
raw2 = self.MBI.evaluate(self.x, 2)[:, 0].reshape((nn, 7, 12), order='F')
raw3 = self.MBI.evaluate(self.x, 3)[:, 0].reshape((nn, 7, 12), order='F')
dV_dL = np.empty((nn, 12))
dV_dT = np.zeros((nn, 12, 5))
dV_dA = np.zeros((nn, 7, 12))
dV_dI = np.empty((nn, 12))
for p in range(12):
i = 4 if p < 4 else (p % 4)
for c in range(7):
dV_dL[:, p] += raw2[:, c, p] * exposed_area[:, c, p]
dV_dT[:, p, i] += raw1[:, c, p]
dV_dA[:, c, p] += raw2[:, c, p] * LOS
dV_dI[:, p] += raw3[:, c, p]
partials['V_sol', 'LOS'] = dV_dL.flatten()
partials['V_sol', 'temperature'] = dV_dT.flatten()
partials['V_sol', 'exposed_area'] = dV_dA.flatten()
partials['V_sol', 'Isetpt'] = dV_dI.flatten()
```
#### File: CADRE/power_dymos/power_group.py
```python
from __future__ import print_function, division, absolute_import
import os
from openmdao.api import Group
from CADRE.power_dymos.power_cell_voltage import PowerCellVoltage
from CADRE.power_dymos.power_solar_power import PowerSolarPower
from CADRE.power_dymos.power_total import PowerTotal
class PowerGroup(Group):
"""
The Sun subsystem for CADRE.
Externally sourced inputs:
w_RW - Reaction wheel angular velocity state.
w_B - Body fixed angular velocity vector of satellite.
T_RW - Reaction wheel torque.
"""
def initialize(self):
fpath = os.path.dirname(os.path.realpath(__file__))
self.options.declare('num_nodes', types=(int,),
desc="Number of time points.")
self.options.declare('filename', fpath + '/../data/Power/curve.dat',
desc="File containing surrogate model for voltage.")
def setup(self):
nn = self.options['num_nodes']
self.add_subsystem('cell_voltage_comp',
PowerCellVoltage(num_nodes=nn, filename=self.options['filename']),
promotes_inputs=['LOS', 'temperature', 'exposed_area', 'Isetpt'],
promotes_outputs=['V_sol'])
self.add_subsystem('solar_power_comp',
PowerSolarPower(num_nodes=nn),
promotes_inputs=['Isetpt', 'V_sol'], promotes_outputs=['P_sol'])
self.add_subsystem('total_power_comp',
PowerTotal(num_nodes=nn),
promotes_inputs=['P_sol', 'P_comm', 'P_RW'],
promotes_outputs=['P_bat'])
```
#### File: CADRE/power_dymos/power_solar_power.py
```python
from __future__ import print_function, division, absolute_import
from six.moves import range
import numpy as np
from openmdao.api import ExplicitComponent
class PowerSolarPower(ExplicitComponent):
"""
Compute the output power of the solar panels.
"""
def initialize(self):
self.options.declare('num_nodes', types=(int, ),
desc="Number of time points.")
def setup(self):
nn = self.options['num_nodes']
# Inputs
self.add_input('Isetpt', np.zeros((nn, 12)), units='A',
desc='Currents of the solar panels')
self.add_input('V_sol', np.zeros((nn, 12)), units='V',
desc='Output voltage of solar panel over time')
self.add_output('P_sol', np.zeros((nn, )), units='W',
desc='Solar panels power over time')
rows = np.tile(np.repeat(0, 12), nn) + np.repeat(np.arange(nn), 12)
cols = np.arange(12*nn)
self.declare_partials('P_sol', 'Isetpt', rows=rows, cols=cols)
self.declare_partials('P_sol', 'V_sol', rows=rows, cols=cols)
def compute(self, inputs, outputs):
"""
Calculate outputs.
"""
V_sol = inputs['V_sol']
Isetpt = inputs['Isetpt']
outputs['P_sol'] = np.einsum('ij,ij->i', V_sol, Isetpt)
def compute_partials(self, inputs, partials):
"""
Calculate and save derivatives. (i.e., Jacobian)
"""
V_sol = inputs['V_sol']
Isetpt = inputs['Isetpt']
partials['P_sol', 'Isetpt'] = V_sol.flatten()
partials['P_sol', 'V_sol'] = Isetpt.flatten()
```
#### File: CADRE/rw_dymos/rw_power.py
```python
from __future__ import print_function, division, absolute_import
from six.moves import range
import numpy as np
from openmdao.api import ExplicitComponent
class ReactionWheelPowerComp(ExplicitComponent):
"""
Compute reaction wheel power.
"""
def initialize(self):
self.options.declare('num_nodes', types=(int, ),
desc="Number of time points.")
# Curve fit from manufacturer-provided data.
# I = I_0 + (a*w + b*T)**2
self.options.declare('I0', 0.017,
desc="Base Current for zero torque and angular velocity.")
self.options.declare('a', 4.9e-4,
desc="Current coefficient for angular velocity.")
self.options.declare('b', 4.5e2,
desc="Current coefficient for torque.")
self.options.declare('V', 4.0,
desc='Reaction wheel voltage.')
def setup(self):
nn = self.options['num_nodes']
# Inputs
self.add_input('w_RW', np.zeros((nn, 3)), units='1/s',
desc='Angular velocity vector of reaction wheel over time')
self.add_input('T_RW', np.zeros((nn, 3)), units='N*m',
desc='Torque vector of reaction wheel over time')
# Outputs
self.add_output('P_RW', np.ones((nn, 3)), units='W',
desc='Reaction wheel power over time')
row_col = np.arange(3*nn)
self.declare_partials('P_RW', 'w_RW', rows=row_col, cols=row_col)
self.declare_partials('P_RW', 'T_RW', rows=row_col, cols=row_col)
def compute(self, inputs, outputs):
"""
Calculate outputs.
"""
a = self.options['a']
b = self.options['b']
V = self.options['V']
I0 = self.options['I0']
w_RW = inputs['w_RW']
T_RW = inputs['T_RW']
outputs['P_RW'] = V * ((a * w_RW + b * T_RW)**2 + I0)
def compute_partials(self, inputs, partials):
"""
Calculate and save derivatives. (i.e., Jacobian)
"""
a = self.options['a']
b = self.options['b']
V = self.options['V']
I0 = self.options['I0']
w_RW = inputs['w_RW']
T_RW = inputs['T_RW']
prod = 2.0 * V * (a * w_RW + b * T_RW)
dP_dw = a * prod
dP_dT = b * prod
partials['P_RW', 'w_RW'] = dP_dw.flatten()
partials['P_RW', 'T_RW'] = dP_dT.flatten()
```
#### File: CADRE/CADRE/solar_dymos.py
```python
from __future__ import print_function, division, absolute_import
from six.moves import range
import os
import numpy as np
from openmdao.core.explicitcomponent import ExplicitComponent
from CADRE.kinematics import fixangles
from MBI import MBI
try:
from postprocessing.MultiView.MultiView import MultiView
multiview_installed = True
except:
multiview_installed = False
from smt.surrogate_models import RMTB, RMTC, KRG
USE_SMT = True
class SolarExposedAreaComp(ExplicitComponent):
"""
Exposed area calculation for a given solar cell
p: panel ID [0,11]
c: cell ID [0,6]
a: fin angle [0,90]
z: azimuth [0,360]
e: elevation [0,180]
LOS: line of sight with the sun [0,1]
"""
def initialize(self):
fpath = os.path.dirname(os.path.realpath(__file__))
self.options.declare('num_nodes', types=(int, ),
desc="Number of time points.")
self.options.declare('raw1_file', fpath + '/data/Solar/Area10.txt',
desc="angle, azimuth, elevation points for exposed area interpolation.")
self.options.declare('raw2_file', fpath + '/data/Solar/Area_all.txt',
desc="exposed area at points in raw1_file for exposed area interpolation.")
def setup(self):
nn = self.options['num_nodes']
raw1_file = self.options['raw1_file']
raw2_file = self.options['raw2_file']
raw1 = np.genfromtxt(raw1_file)
raw2 = np.loadtxt(raw2_file)
nc = self.nc = 7
self.np = 12
ncp = self.nc * self.np
self.na = 10
self.nz = 73
self.ne = 37
angle = np.zeros(self.na)
azimuth = np.zeros(self.nz)
elevation = np.zeros(self.ne)
index = 0
for i in range(self.na):
angle[i] = raw1[index]
index += 1
for i in range(self.nz):
azimuth[i] = raw1[index]
index += 1
index -= 1
azimuth[self.nz - 1] = 2.0 * np.pi
for i in range(self.ne):
elevation[i] = raw1[index]
index += 1
angle[0] = 0.0
angle[-1] = np.pi / 2.0
azimuth[0] = 0.0
azimuth[-1] = 2 * np.pi
elevation[0] = 0.0
elevation[-1] = np.pi
counter = 0
data = np.zeros((self.na, self.nz, self.ne, self.np * self.nc))
flat_size = self.na * self.nz * self.ne
for p in range(self.np):
for c in range(nc):
data[:, :, :, counter] = \
raw2[nc * p + c][119:119 + flat_size].reshape((self.na,
self.nz,
self.ne))
counter += 1
# self.MBI = MBI(data, [angle, azimuth, elevation],
# [4, 10, 8],
# [4, 4, 4])
angles, azimuths, elevations = np.meshgrid(angle, azimuth, elevation, indexing='ij')
xt = np.array([angles.flatten(), azimuths.flatten(), elevations.flatten()]).T
yt = np.zeros((flat_size, ncp))
counter = 0
for p in range(self.np):
for c in range(nc):
yt[:, counter] = data[:, :, :, counter].flatten()
counter += 1
xlimits = np.array([
[angle[0], angle[-1]],
[azimuth[0], azimuth[-1]],
[elevation[0], elevation[-1]],
])
this_dir = os.path.split(__file__)[0]
# Create the _smt_cache directory if it doesn't exist
if not os.path.exists(os.path.join(this_dir, '_smt_cache')):
os.makedirs(os.path.join(this_dir, '_smt_cache'))
self.interp = interp = RMTB(
xlimits=xlimits,
num_ctrl_pts=8,
order=4,
approx_order=4,
nonlinear_maxiter=2,
solver_tolerance=1.e-20,
energy_weight=1.e-4,
regularization_weight=1.e-14,
# smoothness=np.array([1., 1., 1.]),
extrapolate=False,
print_global=True,
data_dir=os.path.join(this_dir, '_smt_cache'),
)
interp.set_training_values(xt, yt)
interp.train()
if multiview_installed:
info = {'nx':3,
'ny':ncp,
'user_func':interp.predict_values,
'resolution':100,
'plot_size':8,
'dimension_names':[
'Angle',
'Azimuth',
'Elevation'],
'bounds':xlimits.tolist(),
'X_dimension':0,
'Y_dimension':1,
'scatter_points':[xt, yt],
'dist_range': 0.0,
}
# Initialize display parameters and draw GUI
MultiView(info)
self.x = np.zeros((nn, 3))
# Inputs
self.add_input('fin_angle', 0.0, units='rad',
desc='Fin angle of solar panel')
self.add_input('azimuth', np.zeros((nn, )), units='rad',
desc='Azimuth angle of the sun in the body-fixed frame over time')
self.add_input('elevation', np.zeros((nn, )), units='rad',
desc='Elevation angle of the sun in the body-fixed frame over time')
# Outputs
self.add_output('exposed_area', np.zeros((nn, self.nc, self.np)),
desc='Exposed area to sun for each solar cell over time',
units='m**2', lower=-5e-3, upper=1.834e-1)
self.declare_partials('exposed_area', 'fin_angle')
rows = np.tile(np.arange(ncp), nn) + np.repeat(ncp*np.arange(nn), ncp)
cols = np.tile(np.repeat(0, ncp), nn) + np.repeat(np.arange(nn), ncp)
self.declare_partials('exposed_area', 'azimuth', rows=rows, cols=cols)
self.declare_partials('exposed_area', 'elevation', rows=rows, cols=cols)
def compute(self, inputs, outputs):
"""
Calculate outputs.
"""
nn = self.options['num_nodes']
self.setx(inputs)
if USE_SMT:
P = self.interp.predict_values(self.x)
else:
P = self.MBI.evaluate(self.x)
outputs['exposed_area'] = P.reshape(nn, self.nc, self.np, order='F')
def setx(self, inputs):
"""
Sets our state array
"""
nn = self.options['num_nodes']
result = fixangles(nn, inputs['azimuth'], inputs['elevation'])
self.x[:, 0] = inputs['fin_angle']
self.x[:, 1] = result[0]
self.x[:, 2] = result[1]
def compute_partials(self, inputs, partials):
"""
Calculate and save derivatives. (i.e., Jacobian)
"""
nn = self.options['num_nodes']
if USE_SMT:
Jfin = self.interp.predict_derivatives(self.x, 0).reshape(nn, self.nc, self.np, order='F')
Jaz = self.interp.predict_derivatives(self.x, 1).reshape(nn, self.nc, self.np, order='F')
Jel = self.interp.predict_derivatives(self.x, 2).reshape(nn, self.nc, self.np, order='F')
else:
Jfin = self.MBI.evaluate(self.x, 1).reshape(nn, self.nc, self.np, order='F')
Jaz = self.MBI.evaluate(self.x, 2).reshape(nn, self.nc, self.np, order='F')
Jel = self.MBI.evaluate(self.x, 3).reshape(nn, self.nc, self.np, order='F')
partials['exposed_area', 'fin_angle'] = Jfin.flatten()
partials['exposed_area', 'azimuth'] = Jaz.flatten()
partials['exposed_area', 'elevation'] = Jel.flatten()
```
#### File: CADRE/CADRE/sun.py
```python
from six.moves import range
import numpy as np
import scipy.sparse
from openmdao.core.explicitcomponent import ExplicitComponent
from CADRE.kinematics import computepositionspherical, computepositionsphericaljacobian
class Sun_LOS(ExplicitComponent):
"""
Compute the Satellite to sun line of sight.
"""
def __init__(self, n=2):
super(Sun_LOS, self).__init__()
self.n = n
# Earth's radius is 6378 km. 0.85 is the alpha in John Hwang's paper
self.r1 = 6378.137 * 0.85
self.r2 = 6378.137
def setup(self):
n = self.n
self.add_input('r_e2b_I', np.zeros((n, 6), order='F'), units=None,
desc='Position and velocity vectors from '
'Earth to satellite in Earth-centered '
'inertial frame over time.')
self.add_input('r_e2s_I', np.zeros((n, 3), order='F'), units='km',
desc='Position vector from Earth to sun in Earth-centered '
'inertial frame over time.')
self.add_output('LOS', np.zeros((n, ), order='F'), units=None,
desc='Satellite to sun line of sight over time')
rows = np.tile(np.repeat(0, 3), n) + np.repeat(np.arange(n), 3)
cols = np.tile(np.arange(3), n) + np.repeat(6*np.arange(n), 3)
self.declare_partials('LOS', 'r_e2b_I', rows=rows, cols=cols)
rows = np.tile(np.repeat(0, 3), n) + np.repeat(np.arange(n), 3)
cols = np.arange(n*3)
self.declare_partials('LOS', 'r_e2s_I', rows=rows, cols=cols)
def compute(self, inputs, outputs):
"""
Calculate outputs.
"""
n = self.n
r_e2b_I = inputs['r_e2b_I']
r_e2s_I = inputs['r_e2s_I']
LOS = outputs['LOS']
for i in range(n):
r_b = r_e2b_I[i, :3]
r_s = r_e2s_I[i, :3]
dot = np.dot(r_b, r_s)
cross = np.cross(r_b, r_s)
dist = np.sqrt(cross.dot(cross))
if dot >= 0.0:
LOS[i] = 1.0
elif dist <= self.r1:
LOS[i] = 0.0
elif dist >= self.r2:
LOS[i] = 1.0
else:
x = (dist - self.r1) / (self.r2 - self.r1)
LOS[i] = 3*x**2 - 2*x**3
def compute_partials(self, inputs, partials):
"""
Calculate and save derivatives. (i.e., Jacobian)
"""
r_e2b_I = inputs['r_e2b_I']
r_e2s_I = inputs['r_e2s_I']
nj = 3*self.n
Jab = np.zeros(shape=(nj, ), dtype=np.float)
Jas = np.zeros(shape=(nj, ), dtype=np.float)
for i in range(self.n):
r_b = r_e2b_I[i, :3]
r_s = r_e2s_I[i, :3]
dot = np.dot(r_b, r_s)
cross = np.cross(r_b, r_s)
dist = np.sqrt(np.dot(cross, cross))
if dot >= 0.0:
continue
elif dist <= self.r1:
continue
elif dist >= self.r2:
continue
else:
x = (dist-self.r1)/(self.r2-self.r1)
# LOS = 3*x**2 - 2*x**3
ddist_dcross = cross/dist
dcross_drb = crossMatrix(-r_s)
dcross_drs = crossMatrix(r_b)
dx_ddist = 1.0/(self.r2 - self.r1)
dLOS_dx = 6*x - 6*x**2
dLOS_drb = dLOS_dx * dx_ddist * np.dot(ddist_dcross, dcross_drb)
dLOS_drs = dLOS_dx * dx_ddist * np.dot(ddist_dcross, dcross_drs)
Jab[i*3:i*3+3] = dLOS_drb
Jas[i*3:i*3+3] = dLOS_drs
partials['LOS', 'r_e2b_I'] = Jab
partials['LOS', 'r_e2s_I'] = Jas
def crossMatrix(v):
# so m[1,0] is v[2], for example
m = np.array([[0.0, -v[2], v[1]],
[v[2], 0.0, -v[0]],
[-v[1], v[0], 0.0]])
return m
class Sun_PositionBody(ExplicitComponent):
"""
Position vector from earth to sun in body-fixed frame.
"""
def __init__(self, n=2):
super(Sun_PositionBody, self).__init__()
self.n = n
def setup(self):
n = self.n
# Inputs
self.add_input('O_BI', np.zeros((n, 3, 3)), units=None,
desc='Rotation matrix from the Earth-centered inertial frame '
'to the satellite frame.')
self.add_input('r_e2s_I', np.zeros((n, 3)), units='km',
desc='Position vector from Earth to Sun in Earth-centered '
'inertial frame over time.')
# Outputs
self.add_output('r_e2s_B', np.zeros((n, 3)), units='km',
desc='Position vector from Earth to Sun in body-fixed '
'frame over time.')
row = np.tile(np.repeat(0, 3), 3) + np.repeat(np.arange(3), 3)
col = np.tile(np.arange(3), 3)
rows = np.tile(row, n) + np.repeat(3*np.arange(n), 9)
cols = np.tile(col, n) + np.repeat(3*np.arange(n), 9)
self.declare_partials('r_e2s_B', 'r_e2s_I', rows=rows, cols=cols)
row = np.tile(np.array([0, 0, 0]), n) + np.repeat(3*np.arange(n), 3)
col = np.tile(np.arange(3), n) + np.repeat(9*np.arange(n), 3)
rows = np.concatenate([row, row+1, row+2])
cols = np.concatenate([col, col+3, col+6])
self.declare_partials('r_e2s_B', 'O_BI', rows=rows, cols=cols)
def compute(self, inputs, outputs):
"""
Calculate outputs.
"""
outputs['r_e2s_B'] = np.einsum('ijk,ik->ij', inputs['O_BI'], inputs['r_e2s_I'])
def compute_partials(self, inputs, partials):
"""
Calculate and save derivatives. (i.e., Jacobian)
"""
n = self.n
O_BI = inputs['O_BI']
r_e2s_I = inputs['r_e2s_I']
partials['r_e2s_B', 'r_e2s_I'] = O_BI.flatten()
nn = 3*n
dO_AB = r_e2s_I.flatten()
partials['r_e2s_B', 'O_BI'][:nn] = dO_AB
partials['r_e2s_B', 'O_BI'][nn:2*nn] = dO_AB
partials['r_e2s_B', 'O_BI'][2*nn:3*nn] = dO_AB
class Sun_PositionECI(ExplicitComponent):
"""
Compute the position vector from Earth to Sun in Earth-centered inertial frame.
"""
# constants
d2r = np.pi/180.
def __init__(self, n=2):
super(Sun_PositionECI, self).__init__()
self.n = n
def setup(self):
n = self.n
# Inputs
self.add_input('LD', 0.0, units=None)
self.add_input('t', np.zeros((n, )), units='s', desc='Time')
# Outputs
self.add_output('r_e2s_I', np.zeros((n, 3)), units='km',
desc='Position vector from Earth to Sun in Earth-centered '
'inertial frame over time.')
self.declare_partials('r_e2s_I', 'LD')
rows = np.arange(n*3)
cols = np.tile(np.repeat(0, 3), n) + np.repeat(np.arange(n), 3)
self.declare_partials('r_e2s_I', 't', rows=rows, cols=cols)
def compute(self, inputs, outputs):
"""
Calculate outputs.
"""
n = self.n
r_e2s_I = outputs['r_e2s_I']
T = inputs['LD'] + inputs['t'][:]/3600./24.
for i in range(0, n):
L = self.d2r*280.460 + self.d2r*0.9856474*T[i]
g = self.d2r*357.528 + self.d2r*0.9856003*T[i]
Lambda = L + self.d2r*1.914666*np.sin(g) + self.d2r*0.01999464*np.sin(2*g)
eps = self.d2r*23.439 - self.d2r*3.56e-7*T[i]
r_e2s_I[i, 0] = np.cos(Lambda)
r_e2s_I[i, 1] = np.sin(Lambda)*np.cos(eps)
r_e2s_I[i, 2] = np.sin(Lambda)*np.sin(eps)
def compute_partials(self, inputs, partials):
"""
Calculate and save derivatives. (i.e., Jacobian)
"""
n = self.n
tconv = (1.0 / 3600. / 24.)
T = inputs['LD'] + inputs['t'][:] * tconv
dr_dt = np.empty(3)
Ja = np.zeros(3*n)
dL_dt = self.d2r * 0.9856474
dg_dt = self.d2r * 0.9856003
deps_dt = -self.d2r*3.56e-7
for i in range(n):
L = self.d2r*280.460 + self.d2r*0.9856474*T[i]
g = self.d2r*357.528 + self.d2r*0.9856003*T[i]
Lambda = L + self.d2r*1.914666*np.sin(g) + self.d2r*0.01999464*np.sin(2*g)
eps = self.d2r*23.439 - self.d2r*3.56e-7*T[i]
dlambda_dt = (dL_dt + self.d2r*1.914666*np.cos(g)*dg_dt +
self.d2r*0.01999464*np.cos(2*g)*2*dg_dt)
dr_dt[0] = -np.sin(Lambda)*dlambda_dt
dr_dt[1] = np.cos(Lambda)*np.cos(eps)*dlambda_dt - np.sin(Lambda)*np.sin(eps)*deps_dt
dr_dt[2] = np.cos(Lambda)*np.sin(eps)*dlambda_dt + np.sin(Lambda)*np.cos(eps)*deps_dt
Ja[i*3:i*3+3] = dr_dt
dr_e2s = Ja.flatten()
partials['r_e2s_I', 'LD'] = dr_e2s
partials['r_e2s_I', 't'] = dr_e2s * tconv
class Sun_PositionSpherical(ExplicitComponent):
"""
Compute the elevation angle of the Sun in the body-fixed frame.
"""
def __init__(self, n=2):
super(Sun_PositionSpherical, self).__init__()
self.n = n
def setup(self):
n = self.n
# Inputs
self.add_input('r_e2s_B', np.zeros((n, 3)), units='km',
desc='Position vector from Earth to Sun in body-fixed '
'frame over time.')
# Outputs
self.add_output('azimuth', np.zeros((n, )), units='rad',
desc='Ezimuth angle of the Sun in the body-fixed frame '
'over time.')
self.add_output('elevation', np.zeros((n, )), units='rad',
desc='Elevation angle of the Sun in the body-fixed frame '
'over time.')
rows = np.tile(np.array([0, 0, 0]), n) + np.repeat(np.arange(n), 3)
cols = np.arange(n*3)
self.declare_partials('elevation', 'r_e2s_B', rows=rows, cols=cols)
self.declare_partials('azimuth', 'r_e2s_B', rows=rows, cols=cols)
def compute(self, inputs, outputs):
"""
Calculate outputs.
"""
azimuth, elevation = computepositionspherical(self.n, inputs['r_e2s_B'])
outputs['azimuth'] = azimuth
outputs['elevation'] = elevation
def compute_partials(self, inputs, partials):
"""
Calculate and save derivatives. (i.e., Jacobian)
"""
n = self.n
Ja1, Ja2 = computepositionsphericaljacobian(n, 3*n, inputs['r_e2s_B'])
partials['azimuth', 'r_e2s_B'] = Ja1
partials['elevation', 'r_e2s_B'] = Ja2
```
#### File: CADRE/examples/cadre_dymos.py
```python
from __future__ import print_function
import unittest
import numpy as np
from openmdao.api import Problem, Group, pyOptSparseDriver, DirectSolver, SqliteRecorder
from dymos import Phase
from dymos.utils.indexing import get_src_indices_by_row
from dymos.phases.components import ControlInterpComp
from CADRE.odes_dymos.cadre_orbit_ode import CadreOrbitODE
from CADRE.attitude_dymos.angular_velocity_comp import AngularVelocityComp
from CADRE.odes_dymos.cadre_systems_ode import CadreSystemsODE
GM = 398600.44
rmag = 7000.0
period = 2 * np.pi * np.sqrt(rmag ** 3 / GM)
vcirc = np.sqrt(GM / rmag)
duration = period
duration = 6 * 3600.0
p = Problem(model=Group())
p.driver = pyOptSparseDriver()
p.driver.options['optimizer'] = 'SNOPT'
p.driver.options['dynamic_simul_derivs'] = True
p.driver.opt_settings['Major iterations limit'] = 1000
p.driver.opt_settings['Major feasibility tolerance'] = 1.0E-4
p.driver.opt_settings['Major optimality tolerance'] = 1.0E-4
p.driver.opt_settings['Major step limit'] = 0.1
p.driver.opt_settings['iSumm'] = 6
p.driver.recording_options['includes'] = ['*']
p.driver.recording_options['record_objectives'] = True
p.driver.recording_options['record_constraints'] = True
p.driver.recording_options['record_desvars'] = True
recorder = SqliteRecorder("cases.sql")
p.driver.add_recorder(recorder)
NUM_SEG = 30
TRANSCRIPTION_ORDER = 3
orbit_phase = Phase('radau-ps',
ode_class=CadreOrbitODE,
num_segments=NUM_SEG,
transcription_order=TRANSCRIPTION_ORDER,
compressed=False)
p.model.add_subsystem('orbit_phase', orbit_phase)
orbit_phase.set_time_options(fix_initial=True, fix_duration=True, duration_ref=duration)
orbit_phase.set_state_options('r_e2b_I', defect_scaler=1000, fix_initial=True, units='km')
orbit_phase.set_state_options('v_e2b_I', defect_scaler=1000, fix_initial=True, units='km/s')
# orbit_phase.set_state_options('SOC', defect_scaler=1, fix_initial=True, units=None)
# orbit_phase.add_design_parameter('P_bat', opt=False, units='W')
orbit_phase.add_control('Gamma', opt=True, lower=-90, upper=90, units='deg', ref0=-90, ref=90,
continuity=True, rate_continuity=True)
# Add a control interp comp to interpolate the rates of O_BI from the orbit phase.
faux_control_options = {'O_BI': {'units': None, 'shape': (3, 3)}}
p.model.add_subsystem('obi_rate_interp_comp',
ControlInterpComp(control_options=faux_control_options,
time_units='s',
grid_data=orbit_phase.grid_data),
promotes_outputs=[('control_rates:O_BI_rate', 'Odot_BI')])
control_input_nodes_idxs = orbit_phase.grid_data.subset_node_indices['control_input']
src_idxs = get_src_indices_by_row(control_input_nodes_idxs, shape=(3, 3))
p.model.connect('orbit_phase.rhs_all.O_BI', 'obi_rate_interp_comp.controls:O_BI',
src_indices=src_idxs, flat_src_indices=True)
p.model.connect('orbit_phase.time.dt_dstau',
('obi_rate_interp_comp.dt_dstau', 'w_B_rate_interp_comp.dt_dstau'))
# Use O_BI and Odot_BI to compute the angular velocity vector
p.model.add_subsystem('angular_velocity_comp',
AngularVelocityComp(num_nodes=orbit_phase.grid_data.num_nodes))
p.model.connect('orbit_phase.rhs_all.O_BI', 'angular_velocity_comp.O_BI')
p.model.connect('Odot_BI', 'angular_velocity_comp.Odot_BI')
# Add another interpolation comp to compute the rate of w_B
faux_control_options = {'w_B': {'units': '1/s', 'shape': (3,)}}
p.model.add_subsystem('w_B_rate_interp_comp',
ControlInterpComp(control_options=faux_control_options,
time_units='s',
grid_data=orbit_phase.grid_data),
promotes_outputs=[('control_rates:w_B_rate', 'wdot_B')])
src_idxs = get_src_indices_by_row(control_input_nodes_idxs, shape=(3,))
p.model.connect('angular_velocity_comp.w_B', 'w_B_rate_interp_comp.controls:w_B',
src_indices=src_idxs, flat_src_indices=True)
# Now add the systems phase
systems_phase = Phase('radau-ps',
ode_class=CadreSystemsODE,
num_segments=NUM_SEG,
transcription_order=TRANSCRIPTION_ORDER,
compressed=False)
p.model.add_subsystem('systems_phase', systems_phase)
systems_phase.set_time_options(fix_initial=True, fix_duration=True, duration_ref=duration)
systems_phase.set_state_options('SOC', defect_ref=10, lower=0.2, fix_initial=True, units=None)
systems_phase.set_state_options('w_RW', defect_ref=10000, fix_initial=True, units='1/s')
systems_phase.set_state_options('data', defect_ref=10, fix_initial=True, units='Gibyte')
systems_phase.set_state_options('temperature', ref0=273, ref=373, defect_ref=1000,
fix_initial=True, units='degK')
systems_phase.add_design_parameter('LD', opt=False, units='d')
systems_phase.add_design_parameter('fin_angle', opt=True, lower=0., upper=np.pi / 2.)
systems_phase.add_design_parameter('antAngle', opt=True, lower=-np.pi / 4, upper=np.pi / 4)
systems_phase.add_design_parameter('cellInstd', opt=True, lower=0.0, upper=1.0, ref=1.0)
# Add r_e2b_I and O_BI as non-optimized controls, allowing them to be connected to external sources
systems_phase.add_control('r_e2b_I', opt=False, units='km')
systems_phase.add_control('O_BI', opt=False)
systems_phase.add_control('w_B', opt=False)
systems_phase.add_control('wdot_B', opt=False)
systems_phase.add_control('P_comm', opt=True, lower=0.0, upper=30.0, units='W')
systems_phase.add_control('Isetpt', opt=True, lower=1.0E-4, upper=0.4, units='A')
systems_phase.add_objective('data', loc='final', ref=-1.0)
# Connect r_e2b_I and O_BI values from all nodes in the orbit phase to the input values
# in the attitude phase.
src_idxs = get_src_indices_by_row(control_input_nodes_idxs, shape=(3,))
p.model.connect('orbit_phase.states:r_e2b_I', 'systems_phase.controls:r_e2b_I',
src_indices=src_idxs, flat_src_indices=True)
p.model.connect('angular_velocity_comp.w_B', 'systems_phase.controls:w_B',
src_indices=src_idxs, flat_src_indices=True)
p.model.connect('wdot_B', 'systems_phase.controls:wdot_B',
src_indices=src_idxs, flat_src_indices=True)
src_idxs = get_src_indices_by_row(control_input_nodes_idxs, shape=(3, 3))
p.model.connect('orbit_phase.rhs_all.O_BI', 'systems_phase.controls:O_BI',
src_indices=src_idxs, flat_src_indices=True)
p.model.options['assembled_jac_type'] = 'csc'
p.model.linear_solver = DirectSolver(assemble_jac=True)
p.setup(check=True)
# from openmdao.api import view_model
# view_model(p.model)
# Initialize values in the orbit phase
p['orbit_phase.t_initial'] = 0.0
p['orbit_phase.t_duration'] = duration
# p['systems_phase.states:w_RW'][:, 0] = 0.0
# p['systems_phase.states:w_RW'][:, 1] = 0.0
# p['systems_phase.states:w_RW'][:, 2] = 0.0
# Default starting orbit
# [ 2.89078958e+03 5.69493134e+03 -2.55340189e+03 2.56640460e-01
# 3.00387409e+00 6.99018448e+00]
p['orbit_phase.states:r_e2b_I'][:, 0] = 2.89078958e+03
p['orbit_phase.states:r_e2b_I'][:, 1] = 5.69493134e+03
p['orbit_phase.states:r_e2b_I'][:, 2] = -2.55340189e+03
p['orbit_phase.states:v_e2b_I'][:, 0] = 2.56640460e-01
p['orbit_phase.states:v_e2b_I'][:, 1] = 3.00387409e+00
p['orbit_phase.states:v_e2b_I'][:, 2] = 6.99018448e+00
# Initialize values in the systems phase
p['systems_phase.t_initial'] = 0.0
p['systems_phase.t_duration'] = duration
# p['systems_phase.states:w_RW'][:, 0] = 0.0
# p['systems_phase.states:w_RW'][:, 1] = 0.0
# p['systems_phase.states:w_RW'][:, 2] = 0.0
p['systems_phase.states:SOC'] = systems_phase.interpolate(ys=[1, .5], nodes='state_input')
p['systems_phase.states:w_RW'] = 100.0
p['systems_phase.states:data'] = systems_phase.interpolate(ys=[0, 10], nodes='state_input')
p['systems_phase.states:temperature'] = 273.0
# p['systems_phase.states:v_e2b_I'][:, 0] = 0.0
# p['systems_phase.states:v_e2b_I'][:, 1] = vcirc
# p['systems_phase.states:v_e2b_I'][:, 2] = 0.0
p['systems_phase.controls:P_comm'] = 0.01
p['systems_phase.controls:Isetpt'] = 0.1
p['systems_phase.design_parameters:LD'] = 5233.5
p['systems_phase.design_parameters:fin_angle'] = np.radians(70.0)
p['systems_phase.design_parameters:cellInstd'] = 0.0
p.run_model()
# Simulate the orbit phase to get a (exact) guess to the orbit history solution.
exp_out = orbit_phase.simulate()
# import matplotlib.pyplot as plt
# from mpl_toolkits import mplot3d
#
# plt.figure()
# ax = plt.axes(projection='3d')
# # plt.plot(exp_out.get_values('r_e2b_I')[:, 0], exp_out.get_values('r_e2b_I')[:, 1], 'b-')
# ax.plot3D(exp_out.get_values('r_e2b_I')[:, 0], exp_out.get_values('r_e2b_I')[:, 1], exp_out.get_values('r_e2b_I')[:, 2], 'b-')
# plt.show()
p['orbit_phase.states:r_e2b_I'] = orbit_phase.interpolate(ys=exp_out.get_values('r_e2b_I'), xs=exp_out.get_values('time'), nodes='state_input')
p['orbit_phase.states:v_e2b_I'] = orbit_phase.interpolate(ys=exp_out.get_values('v_e2b_I'), xs=exp_out.get_values('time'), nodes='state_input')
p.run_driver()
r_e2b_I = p.model.orbit_phase.get_values('r_e2b_I')
v_e2b_I = p.model.orbit_phase.get_values('v_e2b_I')
rmag_e2b = p.model.orbit_phase.get_values('rmag_e2b_I')
# exp_out = systems_phase.simulate(times=500)
import matplotlib.pyplot as plt
plt.figure()
plt.plot(orbit_phase.get_values('r_e2b_I')[:, 0], orbit_phase.get_values('r_e2b_I')[:, 1], 'ro')
plt.figure()
# plt.plot(exp_out.get_values('time')[:, 0], exp_out.get_values('data')[:, 1], 'b-')
plt.plot(systems_phase.get_values('time'), systems_phase.get_values('data'), 'ro')
plt.figure()
# plt.plot(exp_out.get_values('time')[:, 0], exp_out.get_values('data')[:, 1], 'b-')
plt.plot(systems_phase.get_values('time'), systems_phase.get_values('P_comm'), 'r-')
plt.plot(systems_phase.get_values('time'), systems_phase.get_values('P_sol'), 'b-')
plt.plot(systems_phase.get_values('time'), systems_phase.get_values('P_RW'), 'g-')
plt.plot(systems_phase.get_values('time'), systems_phase.get_values('P_bat'), 'k-')
plt.figure()
plt.plot(systems_phase.get_values('time'), systems_phase.get_values('SOC'), 'r-')
plt.plot(systems_phase.get_values('time'), systems_phase.get_values('dXdt:SOC'), 'r--')
plt.show()
# plt.figure()
# plt.plot(exp_out.get_values('time'), exp_out.get_values('SOC'), 'b-')
# plt.plot(phase.get_values('time'), phase.get_values('SOC'), 'ro')
# assert_rel_error(self, rmag_e2b, rmag * np.ones_like(rmag_e2b), tolerance=1.0E-9)
# delta_trua = 2 * np.pi * (duration / period)
# assert_rel_error(self, r_e2b_I[-1, :],
# rmag * np.array([np.cos(delta_trua), np.sin(delta_trua), 0]),
# tolerance=1.0E-9)
# assert_rel_error(self, v_e2b_I[-1, :],
# vcirc * np.array([-np.sin(delta_trua), np.cos(delta_trua), 0]),
# tolerance=1.0E-9)
# def test_partials(self):
# np.set_printoptions(linewidth=10000, edgeitems=1024)
# cpd = self.p.check_partials(compact_print=True, out_stream=None)
# assert_check_partials(cpd, atol=1.0E-4, rtol=1.0)
#
# def test_simulate(self):
# phase = self.p.model.orbit_phase
# exp_out = phase.simulate(times=500)
#
# import matplotlib.pyplot as plt
#
# plt.figure()
# plt.plot(exp_out.get_values('r_e2b_I')[:, 0], exp_out.get_values('r_e2b_I')[:, 1], 'b-')
# plt.plot(phase.get_values('r_e2b_I')[:, 0], phase.get_values('r_e2b_I')[:, 1], 'ro')
#
# # plt.figure()
# # plt.plot(exp_out.get_values('time'), exp_out.get_values('SOC'), 'b-')
# # plt.plot(phase.get_values('time'), phase.get_values('SOC'), 'ro')
#
# plt.show()
``` |
{
"source": "johnjasa/dt-qp-py-project",
"score": 2
} |
#### File: Linear-Quadratic/fowt/f_dtqp_fowt.py
```python
from mat4py import loadmat
from scipy.signal import filtfilt
import numpy as np
from scipy.interpolate import interp1d,PchipInterpolator
import matplotlib.pyplot as plt
import os
import sys
from dtqpy.src.classes.DTQPy_CLASS_OPTS import *
from dtqpy.src.classes.DTQPy_CLASS_SETUP import *
from dtqpy.src.DTQPy_solve import DTQPy_solve
def f_dtqp_fowt(LinearModels,disturbance):
# load linear models
Chan = LinearModels['Chan']
#breakpoint()
# obtain the size of the arrays
nl = len(Chan)
nx,nx = np.shape(Chan[0]['A'])
nx,nu = np.shape(Chan[0]['B'])
ny = len(LinearModels['OutName'])
OutputName = LinearModels['OutName']
# initialize
Aw = np.zeros((nl,nx,nx))
Bw = np.zeros((nl,nx,nu))
Cw = np.zeros((nl,ny,nx))
Dw = np.zeros((nl,ny,nu))
xw = np.zeros((nx,nl))
uw = np.zeros((nu,nl))
yw = np.zeros((nl,ny))
ws = np.zeros((nl))
# collect
for i in range(nl):
Aw[i,:,:] = np.array(Chan[i]['A'])
Bw[i,:,:] = np.array(Chan[i]['B'])
Cw[i,:,:] = np.array(Chan[i]['C'])
Dw[i,:,:] = np.array(Chan[i]['D'])
xw[:,i] = np.squeeze(np.array(Chan[i]['xop']))
uw[:,i] = np.squeeze(np.array(Chan[i]['uop']))
yw[i,:] = np.squeeze(np.array(Chan[i]['yop']))
ws[i] = Chan[i]['WindSpeed']
# construct LPV models
# A matrix
A_op_pp = PchipInterpolator(ws, Aw, axis = 0)
A_op = lambda w: A_op_pp(w)
# Bmatrix
B_op_pp = PchipInterpolator(ws, Bw, axis = 0)
B_op = lambda w: B_op_pp(w)
# Cmatrix
C_op_pp = PchipInterpolator(ws,Cw,axis = 0)
C_op = lambda w: C_op_pp(w)
# Dmatrix
D_op_pp = PchipInterpolator(ws,Dw,axis = 0)
D_op = lambda w: D_op_pp(w)
# control operating points
Uo_pp = PchipInterpolator(ws,uw,axis = 1)
Uo_fun = lambda w: Uo_pp(w)
# state operating points
Xo_pp = PchipInterpolator(ws, xw, axis = 1)
Xo_fun = lambda w: Xo_pp(w)
# outputs
Yo_pp = PchipInterpolator(ws, yw, axis = 0)
Yo_fun = lambda w: Yo_pp(w)
# first time derivative of state operating points
DXo_pp = Xo_pp.derivative
DXo_pp = DXo_pp(nu=1)
DXo_fun = lambda w: DXo_pp(w)
Wind_o = disturbance['Chan']
Wind_speed = np.array(Wind_o['RtVAvgxh'])
tt = np.array(Wind_o['tt'])
filterflag = 1
if filterflag:
t_f = 1
dt = tt[2,0]-tt[1,0]
nb = int(np.floor(t_f/dt))
b = np.ones((nb,))/nb
a = b*nb
Wind_speed = filtfilt(b,1,Wind_speed,axis = 0)
opts = options()
opts.dt.nt = 1000
opts.solver.tolerence = 1e-16
opts.solver.maxiters = 1000000
opts.solver.function = 'pyoptsparse'
time = np.linspace(tt[0],tt[-1],opts.dt.nt)
W_pp = PchipInterpolator(np.squeeze(tt),np.squeeze(Wind_speed))
dW_pp = W_pp.derivative
dW_pp = dW_pp(nu = 1)
DW_fun = lambda t: dW_pp(t)
W_fun = lambda t: W_pp(t)
DXoDt_fun = lambda t: (-DXo_fun(W_fun(t)).T*DW_fun(t)).T
def BuildLambda(Ax):
return lambda t: Ax(t)
def TVmat2cell(f,time):
"""
function to convert nt*nx*nz matrix to nx*nx cell
"""
# evaluate function
At = f(time)
s = np.shape(At)
if len(s) ==4:
At = np.squeeze(At)
elif len(s) == 3:
At = np.squeeze(At)
At= At.T
# get size
try:
null,m,n = np.shape(At)
except:
null,m = np.shape(At)
n = 1
# initialize storage
A = np.empty((m,n),dtype = 'O')
Aval = np.empty((8,8))
#breakpoint()
for i in range(m):
for j in range(n):
try:
Ax = PchipInterpolator(np.squeeze(time),At[:,i,j],axis = 0)
except:
Ax = PchipInterpolator(np.squeeze(time),At[:,i],axis = 0)
# work around, as defining lambda functions in a loop in python is tricky
A[i,j] = BuildLambda(Ax)
return A
## Disc2 cont
def BuildFunction(w_ops,X):
Xpp = PchipInterpolator(w_ops,X)
return lambda w: Xpp(w)
# Generator speed function
GS_fun = BuildFunction(ws,xw[4,:])
# -1*GS function
GSn_fun = BuildFunction(ws,-xw[4,:])
# Generator torque
GT_fun = BuildFunction(ws,uw[1,:])
# -Generator torque
GTn_fun = BuildFunction(ws,-uw[1,:])
# Blade pitch
BP_fun = BuildFunction(ws,uw[2,:])
# Generator power
GP_fun = BuildFunction(ws,-uw[1,:]*xw[4,:])
# State operating point values
r = Xo_fun(ws)
# lambda function to find the values of lambda function at specific indices
indexat = lambda expr,index: expr[index,:]
# get shape
nws,nx,nu = np.shape(Bw)
# initialize
ub = np.ones((nx,1))*np.inf
lb = -np.ones((nx,1))*np.inf
# set ub values for PtfmPitch and Genspeed
ub[0] = np.deg2rad(6)
ub[4] = 0.7913+0.0001
# initialize
UBx = np.empty((nx,1),dtype = 'O')
LBx = np.empty((nx,1),dtype = 'O')
# need this function to define anaonymous functions in a loop in python
def BuildLambdaUB(ub,indexat,Xo_fun,W_fun,i):
return lambda t: ub - indexat(Xo_fun(W_fun(t)),i)
# build ub and lb functions
for i in range(nx):
UBx[i,0] = BuildLambdaUB(ub[i],indexat,Xo_fun,W_fun,i)
LBx[i,0] = BuildLambdaUB(lb[i],indexat,Xo_fun,W_fun,i)
# control bounds
UBc = np.array([[lambda t: W_fun(t)-W_fun(t)],
[lambda t: max(uw[1,:])-GT_fun(W_fun(t))],
[lambda t: max(uw[2,:])-BP_fun(W_fun(t))]])
LBc = np.array([[lambda t: W_fun(t)-W_fun(t)],
[lambda t: min(uw[1,:])-GT_fun(W_fun(t))],
[lambda t: min(uw[2,:])-BP_fun(W_fun(t))]])
# initial state
X0_n = np.array( [[0.0493],
[0.1957],
[0.0000],
[0.0001],
[0.7913],
[0],
[0],
[0]])
UBs = X0_n - Xo_fun(W_fun(0))[None].T
LBs = X0_n - Xo_fun(W_fun(0))[None].T
# UB,LB
UB = [Simple_Bounds() for n in range(3)]
LB = [Simple_Bounds() for n in range(3)]
# states
UB[0].right = 2
UB[0].matrix = UBx
LB[0].right = 2
LB[0].matrix = LBx
# control bounds
UB[1].right = 1
UB[1].matrix = UBc
LB[1].right = 1
LB[1].matrix = LBc
# initial state
UB[2].right = 4
UB[2].matrix = UBs
LB[2].right = 4
LB[2].matrix = LBs
# lagrange terms
R1 = 1e-0; R2 = 1e+8
lx = 0
L = [LQ_objective() for n in range(5)]
# uRu
L[lx].left = 1
L[lx].right = 1
L[lx].matrix = np.diag([0,R1,R2])
lx = lx+1
# uPX
L[lx].left = 1
L[lx].right = 2
Lmat = np.zeros((nu,nx)); Lmat[1,4] = -1
L[lx].matrix = Lmat
lx = lx+1
L[lx].left = 0;
L[lx].right = 1
L2mat = np.zeros((1,nu),dtype = 'O')
L2mat[0,1] = lambda t: GSn_fun(W_fun(t))
L[lx].matrix = L2mat
lx = lx+1
L[lx].left = 0
L[lx].right = 2
L3mat = np.zeros((1,nx),dtype = 'O')
L3mat[0,4] = lambda t: GTn_fun(W_fun(t))
L[lx].matrix = L3mat
lx = lx+1
L[lx].left = 0
L[lx].right = 0
L4mat = np.empty((1,1),dtype = 'O')
L4mat[0,0] = lambda t: GP_fun(W_fun(t))
L[lx].matrix = L4mat
#
scale = Scaling(right = 1, matrix = np.array([1,1e-16,1e-4]))
# setup
s = setup()
s.A = TVmat2cell(lambda t: A_op(W_fun(t)),time)
s.B = TVmat2cell(lambda t: B_op(W_fun(t)),time)
s.d = TVmat2cell(DXoDt_fun,time)
s.Lagrange = L
s.UB = UB
s.LB = LB
s.Scaling = scale
s.t0 = 0
s.tf = 600
#breakpoint()
[T,Ul,Xl,P,F,internal,opts] = DTQPy_solve(s,opts)
# calculate offset
Xo_off = np.squeeze(Xo_fun(W_fun(T))).T
Uo_off = np.squeeze(Uo_fun(W_fun(T))).T
# Add offset to estimated states
X = Xl + Xo_off
U = Ul + Uo_off
# plot
fig, ((ax1,ax2,ax3)) = plt.subplots(3,1,)
# wind
ax1.plot(T,U[:,0])
ax1.set_title('Wind Speed [m/s]')
ax1.set_xlim([0,600])
# torue
ax2.plot(T,U[:,1]/1e+07)
ax2.set_ylim([1.8,2])
ax2.set_title('Gen Torque [MWm]')
ax2.set_xlim([0,600])
# blade pitch
ax3.plot(T,U[:,2])
#ax3.set_ylim([0.2, 0.3])
ax3.set_title('Bld Pitch [rad/s]')
ax3.set_xlim([0,600])
fig.subplots_adjust(hspace = 0.65)
fig2, ((ax1,ax2)) = plt.subplots(2,1)
# PtfmPitch
ax1.plot(T,np.rad2deg(X[:,0]))
ax1.set_xlim([0,600])
ax1.set_title('Ptfm Pitch [deg]')
# FenSpeed
ax2.plot(T,X[:,4])
ax2.set_xlim([0,600])
ax2.set_title('Gen Speed [rad/s]')
fig2.subplots_adjust(hspace = 0.65)
plt.show()
if __name__ == '__main__':
ex_name = os.path.dirname(os.path.realpath(__file__))
Wind_file = ex_name + os.sep + '072720_183300.mat'
Wind_o = loadmat(Wind_file)
Linfile = ex_name + os.sep +'SS2py.mat'
LinearModels = loadmat(Linfile)
f_dtqp_fowt(LinearModels,Wind_o)
```
#### File: dtqpy/src/DTQPy_create.py
```python
import numpy as np
from scipy import sparse
from dtqpy.src.classes.DTQPy_CLASS_INTERNAL import *
from dtqpy.src.objective.DTQPy_createH import DTQPy_createH
from dtqpy.src.objective.DTQPy_createf import DTQPy_createf
from dtqpy.src.objective.DTQPy_createc import DTQPy_createc
from dtqpy.src.defects.DTQPy_DEFECTS import DTQPy_DEFECTS
from dtqpy.src.add_constraints.DTQPy_create_YZ import DTQPy_create_YZ
from dtqpy.src.add_constraints.DTQPy_create_bnds import DTQPy_create_bnds
def DTQPy_create(setup,opts):
# initialize some stuff
setup,internal = DTQPy_initialize(setup,opts.dt)
# objective function
H = DTQPy_createH(setup.Lquadratic,setup.Mquadratic,internal,opts)
f = DTQPy_createf(setup.Llinear,setup.Mlinear,internal,opts)
c = DTQPy_createc(setup.Lconstant,setup.Mconstant,internal,opts)
# constraints
# defect constraints
Aeq1,beq1 = DTQPy_DEFECTS(setup.A,setup.B,setup.G,setup.d,internal,opts)
# linear path and boundary equality constraints
Aeq2,beq2 = DTQPy_create_YZ(setup.Y,internal)
# combine linear equality constraints
Aeq = sparse.vstack([Aeq1,Aeq2])
beq = sparse.vstack([beq1,beq2])
# path and boundary inequality constraints
A,b = DTQPy_create_YZ(setup.Z,internal)
# create simple bounds
lb,ub = DTQPy_create_bnds(setup.LB,setup.UB,internal)
return H,f,c,A,b,Aeq,beq,lb,ub,setup,internal,opts
```
#### File: dtqpy/src/DTQPy_scalingLinear.py
```python
import numpy as np
import types
from scipy.sparse import csc_matrix
from scipy import sparse
def createScalingVector(y,ny,T,nt):
# create the scaling vector for the particular input
if type(y) is types.LambdaType:
# evaluate time-varying function
Y = y(T)
# reshape time-based matrix to column vector
Y.reshape(-1,1)
elif np.size(y) == ny:
# expand scalar scaling
Y = np.repeat(y.reshape(-1,1),[nt])
Y = Y.reshape(-1,1)
elif np.shape(y) == [nt,ny]:
# reshape time-based matrix to column vector
Y = y.reshape(-1,1)
else:
raise ValueError("Wrong Size")
return Y
def DTQPy_scalingLinear(H,f,c,A,b,Aeq,beq,lb,ub,internal,scaling):
# extract values
T = internal.t; nt = internal.nt; nu = internal.nu; ny = internal.ny; npl = internal.npl
# initialize matrices
s1mat = np.ones((nu*nt,1)); s2mat = np.ones((ny*nt,1)); s3mat = np.ones((npl,1))
s1con = np.zeros((nu*nt,1)); s2con = np.zeros((ny*nt,1)); s3con = np.zeros((npl,1))
for k in range(len(scaling)):
# extract
mat = scaling[k].matrix
sc = scaling[k].constant
right = scaling[k].right
if right == 1:
# controls
s1mat = createScalingVector(mat,nu,T,nt)
s1con = createScalingVector(sc,nu,T,nt)
elif right == 2:
# states
s2mat = createScalingVector(mat,ny,T,nt)
s2con = createScalingVector(sc,ny,T,nt)
elif right ==3:
# parameters
s3mat = createScalingVector(mat,nu,[],nt)
s3con = createScalingVector(sc,npl,[],nt)
else:
raise ValueError("")
# combine
sm = np.vstack([s1mat,s2mat,s3mat])
nR = len(sm)
r = np.arange(nR)
# scaling diagonal matrix
sM = csc_matrix((np.squeeze(sm),(r,r)),shape = (nR,nR))
# scaling constant vector
sC = sparse.vstack([s1con,s2con,s3con])
if any(f.nonzero()[0]):
c = c+ ((f.T).dot(sC)).todense()
if any(H.nonzero()[0]):
c = c + 0.5*((sC.T).dot(H.dot(sC))).todense()
if any(f.nonzero()[0]):
f = sM*f
if any(H.nonzero()[0]):
if any(f.nonzero()[0]):
f = f + sM.dot(H.dot(sC))
else:
f = sM.dot(H.dot(sC))
H = sM.dot(H.dot(sM))
if any(A.nonzero()[0]):
b = b - A.dot(sC)
A = A.dot(sM)
if any(Aeq.nonzero()[0]):
beq = beq - Aeq.dot(sC)
Aeq = Aeq.dot(sM)
if any(ub):
ub = (ub - np.squeeze(sC.todense()))/np.squeeze(sm)
if any(lb):
lb = (lb - np.squeeze(sC.todense()))/np.squeeze(sm)
return H,f,c,A,b,Aeq,beq,lb.T,ub.T,internal,sm,sC.todense()
```
#### File: src/objective/DTQPy_M.py
```python
import numpy as np
from numpy.matlib import repmat
# import DTQPy specific functions
from dtqpy.src.utilities.DTQPy_tmultiprod import DTQPy_tmultiprod
from dtqpy.src.DTQPy_getQPIndex import DTQPy_getQPIndex
def DTQPy_M(Mfull,internal,opts):
# extract variables
nt = internal.nt; IN = internal.IN; I_stored = internal.I_stored
# initialize storage arrays
Isav = np.array([]); Jsav = np.array([]); Vsav = np.array([])
# go through each Mayer term
for k in range(len(Mfull)):
# obtain current substructure
Mleft = Mfull[k].left
Mright = Mfull[k].right
Mmatrix = Mfull[k].matrix
# obtain matrix
Mt = DTQPy_tmultiprod(Mmatrix,[],np.array([0]))
if Mleft != 0:
R = IN[Mleft-1]
else:
R = np.array([0])
if Mright !=0:
#breakpoint()
C = IN[Mright-1]
else:
C = np.array([0])
# determine locations and matrix values at these points
for i in range(len(R)):
for j in range(len(C)):
# get current matrix value
Mv = Mt[:,i,j]
if Mv.any():
# hessian index sequence
r = DTQPy_getQPIndex(R[i],Mleft,0,nt,I_stored)
c = DTQPy_getQPIndex(C[j],Mright,0,nt,I_stored)
# assign
Isav = np.append(Isav,r)
Jsav = np.append(Jsav,c)
Vsav = np.append(Vsav,Mv)
return Isav,Jsav,Vsav
```
#### File: src/utilities/DTQPy_tmatrix.py
```python
import numpy as np
from numpy.matlib import repmat
import types
def DTQPy_tmatrix(A,p,*args):
# check if another time mesh is imputted
if len(args) !=0:
t = args[0]
else:
t = p.t
# convert lambda function to a np array of type object
if type(A) is types.LambdaType:
Ae = np.empty((1,1),dtype ='O')
Ae[0,0] = A
A = Ae
# if A is empty
if len(A)==0:
At = np.array([])
elif A.dtype.kind == 'f' or A.dtype.kind == 'i': # constant matrix
r,c = np.shape(A)
# add dummy dimension
Al = np.empty((1,r,c))
Al[0,:,:] = A
# replicate the matrix nt times
At = np.tile(Al,(len(t),1,1))
else:
# obtain shape
r,c = np.shape(A)
# initialize
At = np.zeros((len(t),r,c))
# go through each row and column in A
for i in range(r):
for j in range(c):
if len(A)==0: # A is empty
pass # do nothing
elif type(A[i,j]) is types.LambdaType: # A is time varying
if A[i,j].__code__.co_argcount == 2:
At[:,i,j] = np.ravel(A[i,j](t,p))
elif A[i,j].__code__.co_argcount == 1:
At[:,i,j] = np.ravel(A[i,j](t))
else:
raise ValueError("not a properly defined function")
elif A[i,j] == 0:
pass
else:
At[:,i,j] = A[i,j] # A is time invariant
return At
```
#### File: dtqpy/tests/DTPQy_TEST_scaling.py
```python
import os
import sys
os.chdir('../')
import numpy as np
from dtqpy.src.classes.DTQPy_CLASS_OPTS import *
from dtqpy.src.classes.DTQPy_CLASS_SETUP import *
from dtqpy.src.DTQPy_solve import DTQPy_solve
import matplotlib.pyplot as plt
# Bryson Denham problems
def problem():
opts = options()
# options
opts.dt.nt = 1000
opts.solver.function = 'pyoptsparse'
s = setup()
s.t0 = 0; s.tf = 1
A = np.array([[0,1],[0,0]])
B = np.array([[0],[1]])
# Lagrange term
L = LQ_objective(left = 1, right = 1,matrix = 0.5*np.ones((1,1)))
# ub and lb
UB = [Simple_Bounds() for n in range(3)]
LB = [Simple_Bounds() for n in range(2)]
UB[0].right = 4; UB[0].matrix = np.array([[0],[1]])
LB[0].right = 4; LB[0].matrix = np.array([[0],[1]])
UB[1].right = 5; UB[1].matrix = np.array([[0],[-1]])
LB[1].right = 5; LB[1].matrix = np.array([[0],[-1]])
UB[2].right = 2; UB[2].matrix = np.array([[1/9],[np.inf]])
s.A = A; s.B = B; s.Lagrange = L; s.UB = UB; s.LB = LB
return s,opts
test = [1]
[s,opts] = problem()
T1 = [];U1 = [];X1 = [];P1 = [];F1 = [];internal1 = [];opts1 = []
fig,ax = plt.subplots()
fig, a = plt.subplots()
for k in range(len(test)):
s.Scaling = []
if test[k] == 1:
scaling = Scaling(right = 1, matrix = 6)
s.Scaling = scaling
elif test[k] == 2:
scaling = Scaling(right = 2,matrix = np.array([1/9,1]))
s.Scaling = scaling
T,U,X,P,F,internal,opts = DTQPy_solve(s,opts)
T1.append(T); U1.append(U); X1.append(X); P1.append(P); F1.append(F);internal1.append(internal);opts1.append(opts)
# plot
#plt.close('all')
ax.plot(T, X[:,0], label="x1")
ax.plot(T, X[:,1], label="x2")
ax.set_xlabel('t')
ax.set_ylabel('x')
ax.set_title('States');
a.plot(T,U,label = "u")
a.set_xlabel('t')
a.set_ylabel('u')
a.set_title('Controls');
``` |
{
"source": "johnjasa/OpenMDAO",
"score": 3
} |
#### File: nonlinear/tests/test_newton.py
```python
import unittest
import numpy as np
import openmdao.api as om
from openmdao.core.tests.test_discrete import InternalDiscreteGroup
from openmdao.test_suite.components.double_sellar import DoubleSellar, DoubleSellarImplicit, \
SubSellar
from openmdao.test_suite.components.implicit_newton_linesearch import ImplCompTwoStates
from openmdao.test_suite.components.sellar import SellarDerivativesGrouped, \
SellarNoDerivatives, SellarDerivatives, SellarStateConnection, StateConnection, \
SellarDis1withDerivatives, SellarDis2withDerivatives
from openmdao.utils.assert_utils import assert_rel_error, assert_warning
class TestNewton(unittest.TestCase):
def test_specify_newton_linear_solver_in_system(self):
my_newton = om.NewtonSolver()
my_newton.linear_solver = om.DirectSolver()
prob = om.Problem(model=SellarDerivatives(nonlinear_solver=my_newton))
prob.setup()
self.assertIsInstance(prob.model.nonlinear_solver.linear_solver, om.DirectSolver)
prob.run_model()
assert_rel_error(self, prob['y1'], 25.58830273, .00001)
assert_rel_error(self, prob['y2'], 12.05848819, .00001)
def test_feature_newton_basic(self):
""" Feature test for slotting a Newton solver and using it to solve
Sellar.
"""
import openmdao.api as om
from openmdao.test_suite.components.sellar import SellarDerivatives
prob = om.Problem(model=SellarDerivatives(nonlinear_solver=om.NewtonSolver()))
prob.setup()
prob.run_model()
assert_rel_error(self, prob['y1'], 25.58830273, .00001)
assert_rel_error(self, prob['y2'], 12.05848819, .00001)
def test_sellar_grouped(self):
# Tests basic Newton solution on Sellar in a subgroup
prob = om.Problem(model=SellarDerivativesGrouped(nonlinear_solver=om.NewtonSolver()))
prob.setup()
prob.set_solver_print(level=0)
prob.run_model()
assert_rel_error(self, prob['y1'], 25.58830273, .00001)
assert_rel_error(self, prob['y2'], 12.05848819, .00001)
# Make sure we aren't iterating like crazy
self.assertLess(prob.model.nonlinear_solver._iter_count, 8)
def test_sellar(self):
# Just tests Newton on Sellar with FD derivs.
prob = om.Problem(model=SellarNoDerivatives(nonlinear_solver=om.NewtonSolver()))
prob.setup()
prob.run_model()
assert_rel_error(self, prob['y1'], 25.58830273, .00001)
assert_rel_error(self, prob['y2'], 12.05848819, .00001)
# Make sure we aren't iterating like crazy
self.assertLess(prob.model.nonlinear_solver._iter_count, 8)
def test_line_search_deprecated(self):
top = om.Problem()
top.model.add_subsystem('px', om.IndepVarComp('x', 1.0))
top.model.add_subsystem('comp', ImplCompTwoStates())
top.model.connect('px.x', 'comp.x')
top.model.nonlinear_solver = om.NewtonSolver()
top.model.nonlinear_solver.options['maxiter'] = 10
top.model.linear_solver = om.ScipyKrylov()
msg = "The 'line_search' attribute provides backwards compatibility with OpenMDAO 1.x ; " \
"use 'linesearch' instead."
with assert_warning(DeprecationWarning, msg):
top.model.nonlinear_solver.line_search = om.ArmijoGoldsteinLS(bound_enforcement='vector')
with assert_warning(DeprecationWarning, msg):
ls = top.model.nonlinear_solver.line_search
ls.options['maxiter'] = 10
ls.options['alpha'] = 1.0
top.setup()
# Test lower bound: should go to the lower bound and stall
top['px.x'] = 2.0
top['comp.y'] = 0.0
top['comp.z'] = 1.6
top.run_model()
assert_rel_error(self, top['comp.z'], 1.5, 1e-8)
# Test upper bound: should go to the upper bound and stall
top['px.x'] = 0.5
top['comp.y'] = 0.0
top['comp.z'] = 2.4
top.run_model()
assert_rel_error(self, top['comp.z'], 2.5, 1e-8)
def test_sellar_derivs(self):
# Test top level Sellar (i.e., not grouped).
# Also, piggybacked testing that makes sure we only call apply_nonlinear
# on the head component behind the cycle break.
prob = om.Problem()
prob.model = SellarDerivatives(nonlinear_solver=om.NewtonSolver(),
linear_solver=om.LinearBlockGS())
prob.setup()
prob.set_solver_print(level=0)
prob.run_model()
assert_rel_error(self, prob['y1'], 25.58830273, .00001)
assert_rel_error(self, prob['y2'], 12.05848819, .00001)
# Make sure we aren't iterating like crazy
self.assertLess(prob.model.nonlinear_solver._iter_count, 8)
## Make sure we only call apply_linear on 'heads'
#nd1 = prob.model.d1.execution_count
#nd2 = prob.model.d2.execution_count
#if prob.model.d1._run_apply == True:
#self.assertEqual(nd1, 2*nd2)
#else:
#self.assertEqual(2*nd1, nd2)
def test_sellar_derivs_with_Lin_GS(self):
prob = om.Problem(model=SellarDerivatives(nonlinear_solver=om.NewtonSolver()))
prob.setup()
prob.set_solver_print(level=0)
prob.run_model()
assert_rel_error(self, prob['y1'], 25.58830273, .00001)
assert_rel_error(self, prob['y2'], 12.05848819, .00001)
# Make sure we aren't iterating like crazy
self.assertLess(prob.model.nonlinear_solver._iter_count, 8)
def test_sellar_state_connection(self):
# Sellar model closes loop with state connection instead of a cycle.
prob = om.Problem(model=SellarStateConnection(nonlinear_solver=om.NewtonSolver()))
prob.set_solver_print(level=0)
prob.setup()
prob.run_model()
assert_rel_error(self, prob['y1'], 25.58830273, .00001)
assert_rel_error(self, prob['state_eq.y2_command'], 12.05848819, .00001)
# Make sure we aren't iterating like crazy
self.assertLess(prob.model.nonlinear_solver._iter_count, 8)
def test_sellar_state_connection_fd_system(self):
# Sellar model closes loop with state connection instead of a cycle.
# This test is just fd.
prob = om.Problem(model=SellarStateConnection(nonlinear_solver=om.NewtonSolver()))
prob.model.approx_totals(method='fd')
prob.setup()
prob.set_solver_print(level=0)
prob.run_model()
assert_rel_error(self, prob['y1'], 25.58830273, .00001)
assert_rel_error(self, prob['state_eq.y2_command'], 12.05848819, .00001)
# Make sure we aren't iterating like crazy
self.assertLess(prob.model.nonlinear_solver._iter_count, 6)
def test_sellar_specify_linear_solver(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('px', om.IndepVarComp('x', 1.0), promotes=['x'])
model.add_subsystem('pz', om.IndepVarComp('z', np.array([5.0, 2.0])), promotes=['z'])
proms = ['x', 'z', 'y1', 'state_eq.y2_actual', 'state_eq.y2_command', 'd1.y2', 'd2.y2']
sub = model.add_subsystem('sub', om.Group(), promotes=proms)
subgrp = sub.add_subsystem('state_eq_group', om.Group(),
promotes=['state_eq.y2_actual', 'state_eq.y2_command'])
subgrp.linear_solver = om.ScipyKrylov()
subgrp.add_subsystem('state_eq', StateConnection())
sub.add_subsystem('d1', SellarDis1withDerivatives(), promotes=['x', 'z', 'y1'])
sub.add_subsystem('d2', SellarDis2withDerivatives(), promotes=['z', 'y1'])
model.connect('state_eq.y2_command', 'd1.y2')
model.connect('d2.y2', 'state_eq.y2_actual')
model.add_subsystem('obj_cmp', om.ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',
z=np.array([0.0, 0.0]), x=0.0, y1=0.0, y2=0.0),
promotes=['x', 'z', 'y1', 'obj'])
model.connect('d2.y2', 'obj_cmp.y2')
model.add_subsystem('con_cmp1', om.ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])
model.add_subsystem('con_cmp2', om.ExecComp('con2 = y2 - 24.0'), promotes=['con2'])
model.connect('d2.y2', 'con_cmp2.y2')
model.nonlinear_solver = om.NewtonSolver()
# Use bad settings for this one so that problem doesn't converge.
# That way, we test that we are really using Newton's Lin Solver
# instead.
model.linear_solver = om.ScipyKrylov()
model.linear_solver.options['maxiter'] = 1
# The good solver
model.nonlinear_solver.linear_solver = om.ScipyKrylov()
prob.set_solver_print(level=0)
prob.setup()
prob.run_model()
assert_rel_error(self, prob['y1'], 25.58830273, .00001)
assert_rel_error(self, prob['state_eq.y2_command'], 12.05848819, .00001)
# Make sure we aren't iterating like crazy
self.assertLess(model.nonlinear_solver._iter_count, 8)
self.assertEqual(model.linear_solver._iter_count, 0)
self.assertGreater(model.nonlinear_solver.linear_solver._iter_count, 0)
def test_sellar_specify_linear_direct_solver(self):
prob = om.Problem()
model = prob.model
model.add_subsystem('px', om.IndepVarComp('x', 1.0), promotes=['x'])
model.add_subsystem('pz', om.IndepVarComp('z', np.array([5.0, 2.0])), promotes=['z'])
proms = ['x', 'z', 'y1', 'state_eq.y2_actual', 'state_eq.y2_command', 'd1.y2', 'd2.y2']
sub = model.add_subsystem('sub', om.Group(), promotes=proms)
subgrp = sub.add_subsystem('state_eq_group', om.Group(),
promotes=['state_eq.y2_actual', 'state_eq.y2_command'])
subgrp.linear_solver = om.ScipyKrylov()
subgrp.add_subsystem('state_eq', StateConnection())
sub.add_subsystem('d1', SellarDis1withDerivatives(), promotes=['x', 'z', 'y1'])
sub.add_subsystem('d2', SellarDis2withDerivatives(), promotes=['z', 'y1'])
model.connect('state_eq.y2_command', 'd1.y2')
model.connect('d2.y2', 'state_eq.y2_actual')
model.add_subsystem('obj_cmp', om.ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',
z=np.array([0.0, 0.0]), x=0.0, y1=0.0, y2=0.0),
promotes=['x', 'z', 'y1', 'obj'])
model.connect('d2.y2', 'obj_cmp.y2')
model.add_subsystem('con_cmp1', om.ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])
model.add_subsystem('con_cmp2', om.ExecComp('con2 = y2 - 24.0'), promotes=['con2'])
model.connect('d2.y2', 'con_cmp2.y2')
model.nonlinear_solver = om.NewtonSolver()
# Use bad settings for this one so that problem doesn't converge.
# That way, we test that we are really using Newton's Lin Solver
# instead.
sub.linear_solver = om.ScipyKrylov()
sub.linear_solver.options['maxiter'] = 1
# The good solver
model.nonlinear_solver.linear_solver = om.DirectSolver()
prob.set_solver_print(level=0)
prob.setup()
prob.run_model()
assert_rel_error(self, prob['y1'], 25.58830273, .00001)
assert_rel_error(self, prob['state_eq.y2_command'], 12.05848819, .00001)
# Make sure we aren't iterating like crazy
self.assertLess(model.nonlinear_solver._iter_count, 8)
self.assertEqual(model.linear_solver._iter_count, 0)
def test_solve_subsystems_basic(self):
prob = om.Problem(model=DoubleSellar())
model = prob.model
g1 = model.g1
g1.nonlinear_solver = om.NewtonSolver()
g1.nonlinear_solver.options['rtol'] = 1.0e-5
g1.linear_solver = om.DirectSolver(assemble_jac=True)
g1.options['assembled_jac_type'] = 'dense'
g2 = model.g2
g2.nonlinear_solver = om.NewtonSolver()
g2.nonlinear_solver.options['rtol'] = 1.0e-5
g2.linear_solver = om.DirectSolver(assemble_jac=True)
g2.options['assembled_jac_type'] = 'dense'
model.nonlinear_solver = om.NewtonSolver()
model.linear_solver = om.ScipyKrylov(assemble_jac=True)
model.options['assembled_jac_type'] = 'dense'
model.nonlinear_solver.options['solve_subsystems'] = True
prob.setup()
prob.run_model()
assert_rel_error(self, prob['g1.y1'], 0.64, .00001)
assert_rel_error(self, prob['g1.y2'], 0.80, .00001)
assert_rel_error(self, prob['g2.y1'], 0.64, .00001)
assert_rel_error(self, prob['g2.y2'], 0.80, .00001)
def test_solve_subsystems_basic_csc(self):
prob = om.Problem(model=DoubleSellar())
model = prob.model
g1 = model.g1
g1.nonlinear_solver = om.NewtonSolver(rtol=1.0e-5)
g1.options['assembled_jac_type'] = 'dense'
g1.linear_solver = om.DirectSolver(assemble_jac=True)
g2 = model.g2
g2.nonlinear_solver = om.NewtonSolver(rtol=1.0e-5)
g2.linear_solver = om.DirectSolver(assemble_jac=True)
g2.options['assembled_jac_type'] = 'dense'
model.nonlinear_solver = om.NewtonSolver(solve_subsystems=True)
model.linear_solver = om.ScipyKrylov(assemble_jac=True)
prob.setup()
prob.run_model()
assert_rel_error(self, prob['g1.y1'], 0.64, .00001)
assert_rel_error(self, prob['g1.y2'], 0.80, .00001)
assert_rel_error(self, prob['g2.y1'], 0.64, .00001)
assert_rel_error(self, prob['g2.y2'], 0.80, .00001)
def test_solve_subsystems_basic_dense_jac(self):
prob = om.Problem(model=DoubleSellar())
model = prob.model
g1 = model.g1
g1.nonlinear_solver = om.NewtonSolver(rtol=1.0e-5)
g1.linear_solver = om.DirectSolver()
g2 = model.g2
g2.nonlinear_solver = om.NewtonSolver(rtol=1.0e-5)
g2.linear_solver = om.DirectSolver()
model.nonlinear_solver = om.NewtonSolver(solve_subsystems=True)
model.linear_solver = om.ScipyKrylov(assemble_jac=True)
model.options['assembled_jac_type'] = 'dense'
prob.setup()
prob.run_model()
assert_rel_error(self, prob['g1.y1'], 0.64, .00001)
assert_rel_error(self, prob['g1.y2'], 0.80, .00001)
assert_rel_error(self, prob['g2.y1'], 0.64, .00001)
assert_rel_error(self, prob['g2.y2'], 0.80, .00001)
def test_solve_subsystems_basic_dense_jac_scaling(self):
prob = om.Problem(model=DoubleSellar(units=None, scaling=True))
model = prob.model
g1 = model.g1
g1.nonlinear_solver = om.NewtonSolver(rtol=1.0e-5)
g1.linear_solver = om.DirectSolver()
g2 = model.g2
g2.nonlinear_solver = om.NewtonSolver(rtol=1.0e-5)
g2.linear_solver = om.DirectSolver()
model.nonlinear_solver = om.NewtonSolver(solve_subsystems=True)
model.linear_solver = om.ScipyKrylov(assemble_jac=True)
model.options['assembled_jac_type'] = 'dense'
prob.setup()
prob.run_model()
assert_rel_error(self, prob['g1.y1'], 0.64, .00001)
assert_rel_error(self, prob['g1.y2'], 0.80, .00001)
assert_rel_error(self, prob['g2.y1'], 0.64, .00001)
assert_rel_error(self, prob['g2.y2'], 0.80, .00001)
def test_solve_subsystems_basic_dense_jac_units_scaling(self):
prob = om.Problem(model=DoubleSellar(units=True, scaling=True))
model = prob.model
g1 = model.g1
g1.nonlinear_solver = om.NewtonSolver(rtol=1.0e-5)
g1.linear_solver = om.DirectSolver()
g2 = model.g2
g2.nonlinear_solver = om.NewtonSolver(rtol=1.0e-5)
g2.linear_solver = om.DirectSolver()
model.nonlinear_solver = om.NewtonSolver(solve_subsystems=True)
model.linear_solver = om.ScipyKrylov(assemble_jac=True)
model.options['assembled_jac_type'] = 'dense'
prob.setup()
prob.run_model()
assert_rel_error(self, prob['g1.y1'], 0.0533333333, .00001)
assert_rel_error(self, prob['g1.y2'], 0.80, .00001)
assert_rel_error(self, prob['g2.y1'], 0.0533333333, .00001)
assert_rel_error(self, prob['g2.y2'], 0.80, .00001)
def test_solve_subsystems_assembled_jac_top(self):
prob = om.Problem(model=DoubleSellar())
model = prob.model
g1 = model.g1
g1.nonlinear_solver = om.NewtonSolver(rtol=1.0e-5)
g1.linear_solver = om.DirectSolver()
g2 = model.g2
g2.nonlinear_solver = om.NewtonSolver(rtol=1.0e-5)
g2.linear_solver = om.DirectSolver()
model.nonlinear_solver = om.NewtonSolver(solve_subsystems=True)
model.linear_solver = om.ScipyKrylov(assemble_jac=True)
model.options['assembled_jac_type'] = 'dense'
prob.setup()
prob.run_model()
assert_rel_error(self, prob['g1.y1'], 0.64, .00001)
assert_rel_error(self, prob['g1.y2'], 0.80, .00001)
assert_rel_error(self, prob['g2.y1'], 0.64, .00001)
assert_rel_error(self, prob['g2.y2'], 0.80, .00001)
def test_solve_subsystems_assembled_jac_top_csc(self):
prob = om.Problem(model=DoubleSellar())
model = prob.model
g1 = model.g1
g1.nonlinear_solver = om.NewtonSolver(rtol=1.0e-5)
g1.linear_solver = om.DirectSolver()
g2 = model.g2
g2.nonlinear_solver = om.NewtonSolver(rtol=1.0e-5)
g2.linear_solver = om.DirectSolver()
model.nonlinear_solver = om.NewtonSolver(solve_subsystems=True)
model.linear_solver = om.ScipyKrylov(assemble_jac=True)
prob.setup()
prob.run_model()
assert_rel_error(self, prob['g1.y1'], 0.64, .00001)
assert_rel_error(self, prob['g1.y2'], 0.80, .00001)
assert_rel_error(self, prob['g2.y1'], 0.64, .00001)
assert_rel_error(self, prob['g2.y2'], 0.80, .00001)
def test_solve_subsystems_assembled_jac_top_implicit(self):
prob = om.Problem(model=DoubleSellarImplicit())
model = prob.model
g1 = model.g1
g1.nonlinear_solver = om.NewtonSolver(rtol=1.0e-5)
g1.linear_solver = om.DirectSolver()
g2 = model.g2
g2.nonlinear_solver = om.NewtonSolver(rtol=1.0e-5)
g2.linear_solver = om.DirectSolver()
model.nonlinear_solver = om.NewtonSolver(solve_subsystems=True)
model.linear_solver = om.ScipyKrylov(assemble_jac=True)
model.options['assembled_jac_type'] = 'dense'
prob.setup()
prob.run_model()
assert_rel_error(self, prob['g1.y1'], 0.64, .00001)
assert_rel_error(self, prob['g1.y2'], 0.80, .00001)
assert_rel_error(self, prob['g2.y1'], 0.64, .00001)
assert_rel_error(self, prob['g2.y2'], 0.80, .00001)
def test_solve_subsystems_assembled_jac_top_implicit_scaling(self):
prob = om.Problem(model=DoubleSellarImplicit(scaling=True))
model = prob.model
g1 = model.g1
g1.nonlinear_solver = om.NewtonSolver(rtol=1.0e-5)
g1.linear_solver = om.DirectSolver()
g2 = model.g2
g2.nonlinear_solver = om.NewtonSolver(rtol=1.0e-5)
g2.linear_solver = om.DirectSolver()
model.nonlinear_solver = om.NewtonSolver(solve_subsystems=True)
model.linear_solver = om.ScipyKrylov(assemble_jac=True)
model.options['assembled_jac_type'] = 'dense'
prob.setup()
prob.run_model()
assert_rel_error(self, prob['g1.y1'], 0.64, .00001)
assert_rel_error(self, prob['g1.y2'], 0.80, .00001)
assert_rel_error(self, prob['g2.y1'], 0.64, .00001)
assert_rel_error(self, prob['g2.y2'], 0.80, .00001)
def test_solve_subsystems_assembled_jac_top_implicit_scaling_units(self):
prob = om.Problem(model=DoubleSellarImplicit(units=True, scaling=True))
model = prob.model
g1 = model.g1
g1.nonlinear_solver = om.NewtonSolver(rtol=1.0e-5)
g1.linear_solver = om.DirectSolver()
g2 = model.g2
g2.nonlinear_solver = om.NewtonSolver(rtol=1.0e-5)
g2.linear_solver = om.DirectSolver()
model.nonlinear_solver = om.NewtonSolver(solve_subsystems=True)
model.linear_solver = om.ScipyKrylov(assemble_jac=True)
model.options['assembled_jac_type'] = 'dense'
prob.setup()
prob.run_model()
assert_rel_error(self, prob['g1.y1'], 0.053333333, .00001)
assert_rel_error(self, prob['g1.y2'], 0.80, .00001)
assert_rel_error(self, prob['g2.y1'], 0.053333333, .00001)
assert_rel_error(self, prob['g2.y2'], 0.80, .00001)
def test_solve_subsystems_assembled_jac_subgroup(self):
prob = om.Problem(model=DoubleSellar())
model = prob.model
g1 = model.g1
g1.nonlinear_solver = om.NewtonSolver(rtol=1.0e-5)
g1.linear_solver = om.DirectSolver(assemble_jac=True)
model.options['assembled_jac_type'] = 'dense'
g2 = model.g2
g2.nonlinear_solver = om.NewtonSolver(rtol=1.0e-5)
g2.linear_solver = om.DirectSolver()
model.nonlinear_solver = om.NewtonSolver()
model.linear_solver = om.ScipyKrylov()
prob.setup()
prob.run_model()
assert_rel_error(self, prob['g1.y1'], 0.64, .00001)
assert_rel_error(self, prob['g1.y2'], 0.80, .00001)
assert_rel_error(self, prob['g2.y1'], 0.64, .00001)
assert_rel_error(self, prob['g2.y2'], 0.80, .00001)
def test_solve_subsystems_internals(self):
# Here we test that this feature is doing what it should do by counting the
# number of calls in various places.
class CountNewton(om.NewtonSolver):
""" This version of Newton also counts how many times it runs in total."""
def __init__(self, **kwargs):
super(CountNewton, self).__init__(**kwargs)
self.total_count = 0
def _single_iteration(self):
super(CountNewton, self)._single_iteration()
self.total_count += 1
class CountDS(om.DirectSolver):
""" This version of Newton also counts how many times it linearizes"""
def __init__(self, **kwargs):
super(CountDS, self).__init__(**kwargs)
self.lin_count = 0
def _linearize(self):
super(CountDS, self)._linearize()
self.lin_count += 1
prob = om.Problem(model=DoubleSellar())
model = prob.model
# each SubSellar group converges itself
g1 = model.g1
g1.nonlinear_solver = CountNewton()
g1.nonlinear_solver.options['rtol'] = 1.0e-5
g1.linear_solver = CountDS() # used for derivatives
g2 = model.g2
g2.nonlinear_solver = CountNewton()
g2.nonlinear_solver.options['rtol'] = 1.0e-5
g2.linear_solver = om.DirectSolver()
# Converge the outer loop with Gauss Seidel, with a looser tolerance.
model.nonlinear_solver = om.NewtonSolver()
model.linear_solver = om.ScipyKrylov()
# Enfore behavior: max_sub_solves = 0 means we run once during init
model.nonlinear_solver.options['maxiter'] = 5
model.nonlinear_solver.options['solve_subsystems'] = True
model.nonlinear_solver.options['max_sub_solves'] = 0
prob.set_solver_print(level=0)
prob.setup()
prob.run_model()
# Verifying subsolvers ran
self.assertEqual(g1.nonlinear_solver.total_count, 2)
self.assertEqual(g2.nonlinear_solver.total_count, 2)
self.assertEqual(g1.linear_solver.lin_count, 2)
prob = om.Problem(model=DoubleSellar())
model = prob.model
# each SubSellar group converges itself
g1 = model.g1
g1.nonlinear_solver = CountNewton()
g1.nonlinear_solver.options['rtol'] = 1.0e-5
g1.linear_solver = CountDS() # used for derivatives
g2 = model.g2
g2.nonlinear_solver = CountNewton()
g2.nonlinear_solver.options['rtol'] = 1.0e-5
g2.linear_solver = om.DirectSolver()
# Converge the outer loop with Gauss Seidel, with a looser tolerance.
model.nonlinear_solver = om.NewtonSolver()
model.linear_solver = om.ScipyKrylov()
# Enforce Behavior: baseline
model.nonlinear_solver.options['maxiter'] = 5
model.nonlinear_solver.options['solve_subsystems'] = True
model.nonlinear_solver.options['max_sub_solves'] = 5
prob.set_solver_print(level=0)
prob.setup()
prob.run_model()
# Verifying subsolvers ran
self.assertEqual(g1.nonlinear_solver.total_count, 5)
self.assertEqual(g2.nonlinear_solver.total_count, 5)
self.assertEqual(g1.linear_solver.lin_count, 5)
prob = om.Problem(model=DoubleSellar())
model = prob.model
# each SubSellar group converges itself
g1 = model.g1
g1.nonlinear_solver = CountNewton()
g1.nonlinear_solver.options['rtol'] = 1.0e-5
g1.linear_solver = CountDS() # used for derivatives
g2 = model.g2
g2.nonlinear_solver = CountNewton()
g2.nonlinear_solver.options['rtol'] = 1.0e-5
g2.linear_solver = om.DirectSolver()
# Converge the outer loop with Gauss Seidel, with a looser tolerance.
model.nonlinear_solver = om.NewtonSolver()
model.linear_solver = om.ScipyKrylov()
# Enfore behavior: max_sub_solves = 1 means we run during init and first iteration of iter_execute
model.nonlinear_solver.options['maxiter'] = 5
model.nonlinear_solver.options['solve_subsystems'] = True
model.nonlinear_solver.options['max_sub_solves'] = 1
prob.set_solver_print(level=0)
prob.setup()
prob.run_model()
# Verifying subsolvers ran
self.assertEqual(g1.nonlinear_solver.total_count, 4)
self.assertEqual(g2.nonlinear_solver.total_count, 4)
self.assertEqual(g1.linear_solver.lin_count, 4)
def test_maxiter_one(self):
# Fix bug when maxiter was set to 1.
# This bug caused linearize to run before apply in this case.
class ImpComp(om.ImplicitComponent):
def setup(self):
self.add_input('a', val=1.)
self.add_output('x', val=0.)
self.applied = False
self.declare_partials(of='*', wrt='*')
def apply_nonlinear(self, inputs, outputs, residuals):
residuals['x'] = np.exp(outputs['x']) - \
inputs['a']**2 * outputs['x']**2
self.applied = True
def solve_nonlinear(self, inputs, outputs):
pass
def linearize(self, inputs, outputs, jacobian):
jacobian['x', 'x'] = np.exp(outputs['x']) - \
2 * inputs['a']**2 * outputs['x']
jacobian['x', 'a'] = -2 * inputs['a'] * outputs['x']**2
if not self.applied:
raise RuntimeError("Bug! Linearize called before Apply!")
prob = om.Problem()
root = prob.model
root.add_subsystem('p1', om.IndepVarComp('a', 1.0))
root.add_subsystem('comp', ImpComp())
root.connect('p1.a', 'comp.a')
root.nonlinear_solver = om.NewtonSolver()
root.nonlinear_solver.options['maxiter'] = 1
prob.set_solver_print(level=0)
prob.setup()
prob.run_model()
def test_err_on_maxiter_deprecated(self):
# Raise AnalysisError when it fails to converge
prob = om.Problem()
nlsolver = om.NewtonSolver()
prob.model = SellarDerivatives(nonlinear_solver=nlsolver,
linear_solver=om.LinearBlockGS())
nlsolver.options['err_on_maxiter'] = True
nlsolver.options['maxiter'] = 1
prob.setup()
prob.set_solver_print(level=0)
msg = "The 'err_on_maxiter' option provides backwards compatibility " + \
"with earlier version of OpenMDAO; use options['err_on_non_converge'] " + \
"instead."
#prob.final_setup()
with assert_warning(DeprecationWarning, msg):
prob.final_setup()
with self.assertRaises(om.AnalysisError) as context:
prob.run_model()
msg = "Solver 'NL: Newton' on system '' failed to converge in 1 iterations."
self.assertEqual(str(context.exception), msg)
def test_err_on_non_converge(self):
# Raise AnalysisError when it fails to converge
prob = om.Problem()
nlsolver = om.NewtonSolver()
prob.model = SellarDerivatives(nonlinear_solver=nlsolver,
linear_solver=om.LinearBlockGS())
nlsolver.options['err_on_non_converge'] = True
nlsolver.options['maxiter'] = 1
prob.setup()
prob.set_solver_print(level=0)
with self.assertRaises(om.AnalysisError) as context:
prob.run_driver()
msg = "Solver 'NL: Newton' on system '' failed to converge in 1 iterations."
self.assertEqual(str(context.exception), msg)
def test_err_message_inf_nan(self):
prob = om.Problem()
nlsolver = om.NewtonSolver()
prob.model = SellarDerivatives(nonlinear_solver=nlsolver,
linear_solver=om.LinearBlockGS())
nlsolver.options['err_on_non_converge'] = True
nlsolver.options['maxiter'] = 1
prob.setup()
prob.set_solver_print(level=0)
prob['x'] = np.nan
with self.assertRaises(om.AnalysisError) as context:
prob.run_model()
msg = "Solver 'NL: Newton' on system '': residuals contain 'inf' or 'NaN' after 0 iterations."
self.assertEqual(str(context.exception), msg)
def test_relevancy_for_newton(self):
class TestImplCompSimple(om.ImplicitComponent):
def setup(self):
self.add_input('a', val=1.)
self.add_output('x', val=0.)
self.declare_partials(of='*', wrt='*')
def apply_nonlinear(self, inputs, outputs, residuals):
residuals['x'] = np.exp(outputs['x']) - \
inputs['a']**2 * outputs['x']**2
def linearize(self, inputs, outputs, jacobian):
jacobian['x', 'x'] = np.exp(outputs['x']) - \
2 * inputs['a']**2 * outputs['x']
jacobian['x', 'a'] = -2 * inputs['a'] * outputs['x']**2
prob = om.Problem()
model = prob.model
model.add_subsystem('p1', om.IndepVarComp('x', 3.0))
model.add_subsystem('icomp', TestImplCompSimple())
model.add_subsystem('ecomp', om.ExecComp('y = x*p', p=1.0))
model.connect('p1.x', 'ecomp.x')
model.connect('icomp.x', 'ecomp.p')
model.add_design_var('p1.x', 3.0)
model.add_objective('ecomp.y')
model.nonlinear_solver = om.NewtonSolver()
model.linear_solver = om.ScipyKrylov()
prob.setup()
prob.run_model()
J = prob.compute_totals()
assert_rel_error(self, J['ecomp.y', 'p1.x'][0][0], -0.703467422498, 1e-6)
def test_linsearch_3_deprecation(self):
prob = om.Problem(model=SellarDerivatives(nonlinear_solver=om.NewtonSolver()))
prob.setup()
msg = 'Deprecation warning: In V 3.0, the default Newton solver setup will change ' + \
'to use the BoundsEnforceLS line search.'
with assert_warning(DeprecationWarning, msg):
prob.final_setup()
class TestNewtonFeatures(unittest.TestCase):
def test_feature_basic(self):
import numpy as np
import openmdao.api as om
from openmdao.test_suite.components.sellar import SellarDis1withDerivatives, SellarDis2withDerivatives
prob = om.Problem()
model = prob.model
model.add_subsystem('px', om.IndepVarComp('x', 1.0), promotes=['x'])
model.add_subsystem('pz', om.IndepVarComp('z', np.array([5.0, 2.0])), promotes=['z'])
model.add_subsystem('d1', SellarDis1withDerivatives(), promotes=['x', 'z', 'y1', 'y2'])
model.add_subsystem('d2', SellarDis2withDerivatives(), promotes=['z', 'y1', 'y2'])
model.add_subsystem('obj_cmp', om.ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',
z=np.array([0.0, 0.0]), x=0.0),
promotes=['obj', 'x', 'z', 'y1', 'y2'])
model.add_subsystem('con_cmp1', om.ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])
model.add_subsystem('con_cmp2', om.ExecComp('con2 = y2 - 24.0'), promotes=['con2', 'y2'])
model.linear_solver = om.DirectSolver()
model.nonlinear_solver = om.NewtonSolver()
prob.setup()
prob.run_model()
assert_rel_error(self, prob['y1'], 25.58830273, .00001)
assert_rel_error(self, prob['y2'], 12.05848819, .00001)
def test_feature_maxiter(self):
import numpy as np
import openmdao.api as om
from openmdao.test_suite.components.sellar import SellarDis1withDerivatives, SellarDis2withDerivatives
prob = om.Problem()
model = prob.model
model.add_subsystem('px', om.IndepVarComp('x', 1.0), promotes=['x'])
model.add_subsystem('pz', om.IndepVarComp('z', np.array([5.0, 2.0])), promotes=['z'])
model.add_subsystem('d1', SellarDis1withDerivatives(), promotes=['x', 'z', 'y1', 'y2'])
model.add_subsystem('d2', SellarDis2withDerivatives(), promotes=['z', 'y1', 'y2'])
model.add_subsystem('obj_cmp', om.ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',
z=np.array([0.0, 0.0]), x=0.0),
promotes=['obj', 'x', 'z', 'y1', 'y2'])
model.add_subsystem('con_cmp1', om.ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])
model.add_subsystem('con_cmp2', om.ExecComp('con2 = y2 - 24.0'), promotes=['con2', 'y2'])
model.linear_solver = om.DirectSolver()
nlgbs = model.nonlinear_solver = om.NewtonSolver()
nlgbs.options['maxiter'] = 2
prob.setup()
prob.run_model()
assert_rel_error(self, prob['y1'], 25.5878516779, .00001)
assert_rel_error(self, prob['y2'], 12.0607416105, .00001)
def test_feature_rtol(self):
import numpy as np
import openmdao.api as om
from openmdao.test_suite.components.sellar import SellarDis1withDerivatives, SellarDis2withDerivatives
prob = om.Problem()
model = prob.model
model.add_subsystem('px', om.IndepVarComp('x', 1.0), promotes=['x'])
model.add_subsystem('pz', om.IndepVarComp('z', np.array([5.0, 2.0])), promotes=['z'])
model.add_subsystem('d1', SellarDis1withDerivatives(), promotes=['x', 'z', 'y1', 'y2'])
model.add_subsystem('d2', SellarDis2withDerivatives(), promotes=['z', 'y1', 'y2'])
model.add_subsystem('obj_cmp', om.ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',
z=np.array([0.0, 0.0]), x=0.0),
promotes=['obj', 'x', 'z', 'y1', 'y2'])
model.add_subsystem('con_cmp1', om.ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])
model.add_subsystem('con_cmp2', om.ExecComp('con2 = y2 - 24.0'), promotes=['con2', 'y2'])
model.linear_solver = om.DirectSolver()
nlgbs = model.nonlinear_solver = om.NewtonSolver()
nlgbs.options['rtol'] = 1e-3
prob.setup()
prob.run_model()
assert_rel_error(self, prob['y1'], 25.5878516779, .00001)
assert_rel_error(self, prob['y2'], 12.0607416105, .00001)
def test_feature_atol(self):
import numpy as np
import openmdao.api as om
from openmdao.test_suite.components.sellar import SellarDis1withDerivatives, SellarDis2withDerivatives
prob = om.Problem()
model = prob.model
model.add_subsystem('px', om.IndepVarComp('x', 1.0), promotes=['x'])
model.add_subsystem('pz', om.IndepVarComp('z', np.array([5.0, 2.0])), promotes=['z'])
model.add_subsystem('d1', SellarDis1withDerivatives(), promotes=['x', 'z', 'y1', 'y2'])
model.add_subsystem('d2', SellarDis2withDerivatives(), promotes=['z', 'y1', 'y2'])
model.add_subsystem('obj_cmp', om.ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',
z=np.array([0.0, 0.0]), x=0.0),
promotes=['obj', 'x', 'z', 'y1', 'y2'])
model.add_subsystem('con_cmp1', om.ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])
model.add_subsystem('con_cmp2', om.ExecComp('con2 = y2 - 24.0'), promotes=['con2', 'y2'])
model.linear_solver = om.DirectSolver()
nlgbs = model.nonlinear_solver = om.NewtonSolver()
nlgbs.options['atol'] = 1e-4
prob.setup()
prob.run_model()
assert_rel_error(self, prob['y1'], 25.5882856302, .00001)
assert_rel_error(self, prob['y2'], 12.05848819, .00001)
def test_feature_linear_solver(self):
import numpy as np
import openmdao.api as om
from openmdao.test_suite.components.sellar import SellarDis1withDerivatives, \
SellarDis2withDerivatives
prob = om.Problem()
model = prob.model
model.add_subsystem('px', om.IndepVarComp('x', 1.0), promotes=['x'])
model.add_subsystem('pz', om.IndepVarComp('z', np.array([5.0, 2.0])), promotes=['z'])
model.add_subsystem('d1', SellarDis1withDerivatives(), promotes=['x', 'z', 'y1', 'y2'])
model.add_subsystem('d2', SellarDis2withDerivatives(), promotes=['z', 'y1', 'y2'])
model.add_subsystem('obj_cmp', om.ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',
z=np.array([0.0, 0.0]), x=0.0),
promotes=['obj', 'x', 'z', 'y1', 'y2'])
model.add_subsystem('con_cmp1', om.ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])
model.add_subsystem('con_cmp2', om.ExecComp('con2 = y2 - 24.0'), promotes=['con2', 'y2'])
model.linear_solver = om.LinearBlockGS()
nlgbs = model.nonlinear_solver = om.NewtonSolver()
nlgbs.linear_solver = om.DirectSolver()
prob.setup()
prob.run_model()
assert_rel_error(self, prob['y1'], 25.58830273, .00001)
assert_rel_error(self, prob['y2'], 12.05848819, .00001)
def test_feature_max_sub_solves(self):
import numpy as np
import openmdao.api as om
from openmdao.test_suite.components.double_sellar import SubSellar
prob = om.Problem()
model = prob.model
model.add_subsystem('g1', SubSellar())
model.add_subsystem('g2', SubSellar())
model.connect('g1.y2', 'g2.x')
model.connect('g2.y2', 'g1.x')
# Converge the outer loop with Gauss Seidel, with a looser tolerance.
model.nonlinear_solver = om.NewtonSolver()
model.linear_solver = om.DirectSolver()
g1 = model.g1
g1.nonlinear_solver = om.NewtonSolver()
g1.nonlinear_solver.options['rtol'] = 1.0e-5
g1.linear_solver = om.DirectSolver()
g2 = model.g2
g2.nonlinear_solver = om.NewtonSolver()
g2.nonlinear_solver.options['rtol'] = 1.0e-5
g2.linear_solver = om.DirectSolver()
model.nonlinear_solver = om.NewtonSolver()
model.linear_solver = om.ScipyKrylov()
model.nonlinear_solver.options['solve_subsystems'] = True
model.nonlinear_solver.options['max_sub_solves'] = 0
prob.setup()
prob.run_model()
def test_feature_err_on_non_converge(self):
import numpy as np
import openmdao.api as om
from openmdao.test_suite.components.sellar import SellarDis1withDerivatives, SellarDis2withDerivatives
prob = om.Problem()
model = prob.model
model.add_subsystem('px', om.IndepVarComp('x', 1.0), promotes=['x'])
model.add_subsystem('pz', om.IndepVarComp('z', np.array([5.0, 2.0])), promotes=['z'])
model.add_subsystem('d1', SellarDis1withDerivatives(), promotes=['x', 'z', 'y1', 'y2'])
model.add_subsystem('d2', SellarDis2withDerivatives(), promotes=['z', 'y1', 'y2'])
model.add_subsystem('obj_cmp', om.ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',
z=np.array([0.0, 0.0]), x=0.0),
promotes=['obj', 'x', 'z', 'y1', 'y2'])
model.add_subsystem('con_cmp1', om.ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])
model.add_subsystem('con_cmp2', om.ExecComp('con2 = y2 - 24.0'), promotes=['con2', 'y2'])
model.linear_solver = om.DirectSolver()
nlgbs = model.nonlinear_solver = om.NewtonSolver()
nlgbs.options['maxiter'] = 1
nlgbs.options['err_on_non_converge'] = True
prob.setup()
try:
prob.run_model()
except om.AnalysisError:
pass
def test_solve_subsystems_basic(self):
import openmdao.api as om
from openmdao.test_suite.components.double_sellar import DoubleSellar
prob = om.Problem(model=DoubleSellar())
model = prob.model
g1 = model.g1
g1.nonlinear_solver = om.NewtonSolver()
g1.nonlinear_solver.options['rtol'] = 1.0e-5
g1.linear_solver = om.DirectSolver()
g2 = model.g2
g2.nonlinear_solver = om.NewtonSolver()
g2.nonlinear_solver.options['rtol'] = 1.0e-5
g2.linear_solver = om.DirectSolver()
model.nonlinear_solver = om.NewtonSolver()
model.linear_solver = om.ScipyKrylov()
model.nonlinear_solver.options['solve_subsystems'] = True
prob.setup()
prob.run_model()
assert_rel_error(self, prob['g1.y1'], 0.64, .00001)
assert_rel_error(self, prob['g1.y2'], 0.80, .00001)
assert_rel_error(self, prob['g2.y1'], 0.64, .00001)
assert_rel_error(self, prob['g2.y2'], 0.80, .00001)
if __name__ == "__main__":
unittest.main()
```
#### File: visualization/meta_model_viewer/meta_model_visualization.py
```python
import math
from itertools import product
from scipy.spatial import cKDTree
import numpy as np
import logging
from bokeh.io import curdoc
from bokeh.layouts import row, column
from bokeh.plotting import figure
from bokeh.models import Slider, ColumnDataSource, HoverTool
from bokeh.models import ColorBar, BasicTicker, LinearColorMapper, Range1d
from bokeh.models.widgets import TextInput, Select
from bokeh.server.server import Server
from openmdao.components.meta_model_unstructured_comp import MetaModelUnStructuredComp
from openmdao.components.meta_model_structured_comp import MetaModelStructuredComp
from openmdao.core.problem import Problem
def stack_outputs(outputs_dict):
"""
Stack the values of a dictionary.
Parameters
----------
outputs_dict : dict
Dictionary of outputs
Returns
-------
array
np.stack of values
"""
return np.stack([np.asarray(v) for v in outputs_dict.values()], axis=-1)
class MetaModelVisualization(object):
"""
Top-level container for the Meta Model Visualization.
Attributes
----------
prob : Problem
Name of variable corresponding to Problem Component
meta_model : MetaModel
Name of empty Meta Model Component object reference
resolution : int
Number used to calculate width and height of contour plot
is_structured_meta_model : Bool
Boolean used to signal whether the meta model is structured or unstructured
slider_source : ColumnDataSource
Data source containing dictionary of sliders
contour_training_data_source : ColumnDataSource
Data source containing dictionary of training data points
bottom_plot_source : ColumnDataSource
Data source containing data for the bottom subplot
bottom_plot_scatter_source : ColumnDataSource
Data source containing scatter point data for the bottom subplot
right_plot_source : ColumnDataSource
Data source containing data for the right subplot
right_plot_scatter_source : ColumnDataSource
Data source containing scatter point data for the right subplot
contour_plot_source : ColumnDataSource
Data source containing data for the contour plot
input_names : list
List of input data titles as strings
output_names : list
List of output data titles as strings
training_inputs : dict
Dictionary of input training data
x_input_select : Select
Bokeh Select object containing a list of inputs for the x axis
y_input_select : Select
Bokeh Select object containing a list of inputs for the y axis
output_select : Select
Bokeh Select object containing a list of inputs for the outputs
x_input_slider : Slider
Bokeh Slider object containing a list of input values for the x axis
y_input_slider : Slider
Bokeh Slider object containing a list of input values for the y axis
slider_dict : dict
Dictionary of slider names and their respective slider objects
predict_inputs : dict
Dictionary containing training data points to predict at.
num_inputs : int
Number of inputs
num_outputs : int
Number of outputs
limit_range : array
Array containing the range of each input
scatter_distance : TextInput
Text input for user to enter custom value to calculate distance of training points around
slice line
right_alphas : array
Array of points containing alpha values for right plot
bottom_alphas : array
Array of points containing alpha values for bottom plot
dist_range : float
Value taken from scatter_distance used for calculating distance of training points around
slice line
x_index : int
Value of x axis column
y_index : int
Value of y axis column
output_variable : int
Value of output axis column
sliders_and_selects : layout
Layout containing the sliders and select elements
doc_layout : layout
Contains first row of plots
doc_layout2 : layout
Contains second row of plots
Z : array
A 2D array containing contour plot data
"""
def __init__(self, model, resolution=50, doc=None):
"""
Initialize parameters.
Parameters
----------
model : MetaModelComponent
Reference to meta model component
resolution : int
Value used to calculate the size of contour plot meshgrid
doc : Document
The bokeh document to build.
"""
self.prob = Problem()
self.resolution = resolution
logging.getLogger("bokeh").setLevel(logging.ERROR)
# If the surrogate model coming in is structured
if isinstance(model, MetaModelUnStructuredComp):
self.is_structured_meta_model = False
# Create list of input names, check if it has more than one input, then create list
# of outputs
self.input_names = [name[0] for name in model._surrogate_input_names]
if len(self.input_names) < 2:
raise ValueError('Must have more than one input value')
self.output_names = [name[0] for name in model._surrogate_output_names]
# Create reference for untructured component
self.meta_model = MetaModelUnStructuredComp(
default_surrogate=model.options['default_surrogate'])
# If the surrogate model coming in is unstructured
elif isinstance(model, MetaModelStructuredComp):
self.is_structured_meta_model = True
self.input_names = [name for name in model._var_rel_names['input']]
if len(self.input_names) < 2:
raise ValueError('Must have more than one input value')
self.output_names = [name for name in model._var_rel_names['output']]
self.meta_model = MetaModelStructuredComp(
distributed=model.options['distributed'],
extrapolate=model.options['extrapolate'],
method=model.options['method'],
training_data_gradients=model.options['training_data_gradients'],
vec_size=1)
# Pair input list names with their respective data
self.training_inputs = {}
self._setup_empty_prob_comp(model)
# Setup dropdown menus for x/y inputs and the output value
self.x_input_select = Select(title="X Input:", value=[x for x in self.input_names][0],
options=[x for x in self.input_names])
self.x_input_select.on_change('value', self._x_input_update)
self.y_input_select = Select(title="Y Input:", value=[x for x in self.input_names][1],
options=[x for x in self.input_names])
self.y_input_select.on_change('value', self._y_input_update)
self.output_select = Select(title="Output:", value=[x for x in self.output_names][0],
options=[x for x in self.output_names])
self.output_select.on_change('value', self._output_value_update)
# Create sliders for each input
self.slider_dict = {}
self.predict_inputs = {}
for title, values in self.training_inputs.items():
slider_data = np.linspace(min(values), max(values), self.resolution)
self.predict_inputs[title] = slider_data
# Calculates the distance between slider ticks
slider_step = slider_data[1] - slider_data[0]
slider_object = Slider(start=min(values), end=max(values), value=min(values),
step=slider_step, title=str(title))
self.slider_dict[title] = slider_object
self._slider_attrs()
# Length of inputs and outputs
self.num_inputs = len(self.input_names)
self.num_outputs = len(self.output_names)
# Precalculate the problem bounds.
limits = np.array([[min(value), max(value)] for value in self.training_inputs.values()])
self.limit_range = limits[:, 1] - limits[:, 0]
# Positional indicies
self.x_index = 0
self.y_index = 1
self.output_variable = self.output_names.index(self.output_select.value)
# Data sources are filled with initial values
# Slider Column Data Source
self.slider_source = ColumnDataSource(data=self.predict_inputs)
# Contour plot Column Data Source
self.contour_plot_source = ColumnDataSource(data=dict(
z=np.random.rand(self.resolution, self.resolution)))
self.contour_training_data_source = ColumnDataSource(
data=dict(x=np.repeat(0, self.resolution), y=np.repeat(0, self.resolution)))
# Bottom plot Column Data Source
self.bottom_plot_source = ColumnDataSource(data=dict(
x=np.repeat(0, self.resolution), y=np.repeat(0, self.resolution)))
self.bottom_plot_scatter_source = ColumnDataSource(data=dict(
bot_slice_x=np.repeat(0, self.resolution), bot_slice_y=np.repeat(0, self.resolution)))
# Right plot Column Data Source
self.right_plot_source = ColumnDataSource(data=dict(
x=np.repeat(0, self.resolution), y=np.repeat(0, self.resolution)))
self.right_plot_scatter_source = ColumnDataSource(data=dict(
right_slice_x=np.repeat(0, self.resolution),
right_slice_y=np.repeat(0, self.resolution)))
# Text input to change the distance of reach when searching for nearest data points
self.scatter_distance = TextInput(value="0.1", title="Scatter Distance")
self.scatter_distance.on_change('value', self._scatter_input)
self.dist_range = float(self.scatter_distance.value)
# Grouping all of the sliders and dropdowns into one column
sliders = [value for value in self.slider_dict.values()]
sliders.extend(
[self.x_input_select, self.y_input_select, self.output_select, self.scatter_distance])
self.sliders_and_selects = row(
column(*sliders))
# Layout creation
self.doc_layout = row(self._contour_data(), self._right_plot(), self.sliders_and_selects)
self.doc_layout2 = row(self._bottom_plot())
if doc is None:
doc = curdoc()
doc.add_root(self.doc_layout)
doc.add_root(self.doc_layout2)
doc.title = 'Meta Model Visualization'
def _setup_empty_prob_comp(self, metamodel):
"""
Take data from surrogate ref and pass it into new surrogate model with empty Problem model.
Parameters
----------
metamodel : MetaModelComponent
Reference to meta model component
"""
# Check for structured or unstructured
if self.is_structured_meta_model:
# Loop through the input names
for idx, name in enumerate(self.input_names):
# Check for no training data
try:
# Append the input data/titles to a dictionary
self.training_inputs[name] = metamodel.params[idx]
# Also, append the data as an 'add_input' to the model reference
self.meta_model.add_input(name, 0.,
training_data=metamodel.params[idx])
except TypeError:
msg = "No training data present for one or more parameters"
raise TypeError(msg)
# Add the outputs to the model reference
for idx, name in enumerate(self.output_names):
self.meta_model.add_output(
name, 0.,
training_data=metamodel.training_outputs[name])
else:
for name in self.input_names:
try:
self.training_inputs[name] = {
title for title in metamodel.options['train:' + str(name)]}
self.meta_model.add_input(
name, 0.,
training_data=[
title for title in metamodel.options['train:' + str(name)]])
except TypeError:
msg = "No training data present for one or more parameters"
raise TypeError(msg)
for name in self.output_names:
self.meta_model.add_output(
name, 0.,
training_data=[
title for title in metamodel.options['train:' + str(name)]])
# Add the subsystem and setup
self.prob.model.add_subsystem('interp', self.meta_model)
self.prob.setup()
def _slider_attrs(self):
"""
Assign data to slider objects and callback functions.
Parameters
----------
None
"""
for name, slider_object in self.slider_dict.items():
# Checks if there is a callback previously assigned and then clears it
if len(slider_object._callbacks) == 1:
slider_object._callbacks.clear()
# Check if the name matches the 'x input' title
if name == self.x_input_select.value:
# Set the object and add an event handler
self.x_input_slider = slider_object
self.x_input_slider.on_change('value', self._scatter_plots_update)
# Check if the name matches the 'y input' title
elif name == self.y_input_select.value:
# Set the object and add an event handler
self.y_input_slider = slider_object
self.y_input_slider.on_change('value', self._scatter_plots_update)
else:
# If it is not an x or y input then just assign it the event handler
slider_object.on_change('value', self._update)
def _make_predictions(self, data):
"""
Run the data parameter through the surrogate model which is given in prob.
Parameters
----------
data : dict
Dictionary containing training points.
Returns
-------
array
np.stack of predicted points.
"""
# Create dictionary with an empty list
outputs = {name: [] for name in self.output_names}
# Parse dict into shape [n**2, number of inputs] list
inputs = np.empty([self.resolution**2, self.num_inputs])
for idx, values in enumerate(data.values()):
inputs[:, idx] = values.flatten()
# Check for structured or unstructured
if self.is_structured_meta_model:
# Assign each row of the data coming in to a tuple. Loop through the tuple, and append
# the name of the input and value.
for idx, tup in enumerate(inputs):
for name, val in zip(data.keys(), tup):
self.prob[self.meta_model.name + '.' + name] = val
self.prob.run_model()
# Append the predicted value(s)
for title in self.output_names:
outputs[title].append(
np.array(self.prob[self.meta_model.name + '.' + title]))
else:
for idx, tup in enumerate(inputs):
for name, val in zip(data.keys(), tup):
self.prob[self.meta_model.name + '.' + name] = val
self.prob.run_model()
for title in self.output_names:
outputs[title].append(
float(self.prob[self.meta_model.name + '.' + title]))
return stack_outputs(outputs)
def _contour_data_calcs(self):
"""
Parse input data into a dictionary to be predicted at.
Parameters
----------
None
Returns
-------
dict
Dictionary of training data to be predicted at.
"""
# Create initial data array of training points
resolution = self.resolution
x_data = np.zeros((resolution, resolution, self.num_inputs))
self._slider_attrs()
# Broadcast the inputs to every row of x_data array
x_data[:, :, :] = np.array(self.input_point_list)
# Find the x/y input titles and match their index positions
for idx, (title, values) in enumerate(self.slider_source.data.items()):
if title == self.x_input_select.value:
self.xlins_mesh = values
x_index_position = idx
if title == self.y_input_select.value:
self.ylins_mesh = values
y_index_position = idx
# Make meshgrid from the x/y inputs to be plotted
X, Y = np.meshgrid(self.xlins_mesh, self.ylins_mesh)
# Move the x/y inputs to their respective positions in x_data
x_data[:, :, x_index_position] = X
x_data[:, :, y_index_position] = Y
pred_dict = {}
for idx, title in enumerate(self.slider_source.data):
pred_dict.update({title: x_data[:, :, idx]})
return pred_dict
def _contour_data(self):
"""
Create a contour plot.
Parameters
----------
None
Returns
-------
Bokeh Image Plot
"""
resolution = self.resolution
# Output data array initialization
y_data = np.zeros((resolution, resolution, self.num_outputs))
self.input_point_list = [point.value for point in self.slider_dict.values()]
# Pass the dict to make predictions and then reshape the output to
# (resolution, resolution, number of outputs)
y_data[:, :, :] = self._make_predictions(self._contour_data_calcs()).reshape(
(resolution, resolution, self.num_outputs))
# Use the output variable to pull the correct column of data from the predicted
# data (y_data)
self.Z = y_data[:, :, self.output_variable]
# Reshape it to be 2D
self.Z = self.Z.reshape(resolution, resolution)
# Update the data source with new data
self.contour_plot_source.data = dict(z=[self.Z])
# Min to max of training data
self.contour_x_range = xlins = self.xlins_mesh
self.contour_y_range = ylins = self.ylins_mesh
# Color bar formatting
color_mapper = LinearColorMapper(
palette="Viridis11", low=np.amin(self.Z), high=np.amax(self.Z))
color_bar = ColorBar(color_mapper=color_mapper, ticker=BasicTicker(), label_standoff=12,
location=(0, 0))
# Contour Plot
self.contour_plot = contour_plot = figure(
match_aspect=False,
tooltips=[(self.x_input_select.value, "$x"), (self.y_input_select.value, "$y"),
(self.output_select.value, "@z")], tools='')
contour_plot.x_range.range_padding = 0
contour_plot.y_range.range_padding = 0
contour_plot.plot_width = 600
contour_plot.plot_height = 500
contour_plot.xaxis.axis_label = self.x_input_select.value
contour_plot.yaxis.axis_label = self.y_input_select.value
contour_plot.min_border_left = 0
contour_plot.add_layout(color_bar, 'right')
contour_plot.x_range = Range1d(min(xlins), max(xlins))
contour_plot.y_range = Range1d(min(ylins), max(ylins))
contour_plot.image(image='z', source=self.contour_plot_source, x=min(xlins), y=min(ylins),
dh=(max(ylins) - min(ylins)), dw=(max(xlins) - min(xlins)),
palette="Viridis11")
# Adding training data points overlay to contour plot
if self.is_structured_meta_model:
data = self._structured_training_points()
else:
data = self._unstructured_training_points()
if len(data):
# Add training data points overlay to contour plot
data = np.array(data)
if self.is_structured_meta_model:
self.contour_training_data_source.data = dict(x=data[:, 0], y=data[:, 1],
z=self.meta_model.training_outputs[
self.output_select.value].flatten())
else:
self.contour_training_data_source.data = dict(x=data[:, 0], y=data[:, 1],
z=self.meta_model._training_output[
self.output_select.value])
training_data_renderer = self.contour_plot.circle(
x='x', y='y', source=self.contour_training_data_source,
size=5, color='white', alpha=0.50)
self.contour_plot.add_tools(HoverTool(renderers=[training_data_renderer], tooltips=[
(self.x_input_select.value + " (train)", '@x'),
(self.y_input_select.value + " (train)", '@y'),
(self.output_select.value + " (train)", '@z'), ]))
return self.contour_plot
def _right_plot(self):
"""
Create the right side subplot to view the projected slice.
Parameters
----------
None
Returns
-------
Bokeh figure
"""
# List of the current positions of the sliders
self.input_point_list = [point.value for point in self.slider_dict.values()]
# Find the title of the y input and match it with the data
y_idx = self.y_input_select.value
y_data = self.predict_inputs[y_idx]
# Find the position of the x_input slider
x_value = self.x_input_slider.value
# Rounds the x_data to match the predict_inputs value
subplot_value_index = np.where(
np.around(self.predict_inputs[self.x_input_select.value], 5) ==
np.around(x_value, 5))[0]
# Make slice in Z data at the point calculated before and add it to the data source
z_data = self.Z[:, subplot_value_index].flatten()
x = z_data
y = self.slider_source.data[y_idx]
# Update the data source with new data
self.right_plot_source.data = dict(x=x, y=y)
# Create and format figure
self.right_plot_fig = right_plot_fig = figure(
plot_width=250, plot_height=500,
title="{} vs {}".format(y_idx, self.output_select.value), tools="pan")
right_plot_fig.xaxis.axis_label = self.output_select.value
right_plot_fig.yaxis.axis_label = y_idx
right_plot_fig.xaxis.major_label_orientation = math.pi / 9
right_plot_fig.line(x='x', y='y', source=self.right_plot_source)
right_plot_fig.x_range.range_padding = 0.1
right_plot_fig.y_range.range_padding = 0.02
# Determine distance and alpha opacity of training points
if self.is_structured_meta_model:
data = self._structured_training_points(compute_distance=True, source='right')
else:
data = self._unstructured_training_points(compute_distance=True, source='right')
self.right_alphas = 1.0 - data[:, 2] / self.dist_range
# Training data scatter plot
scatter_renderer = right_plot_fig.scatter(x=data[:, 3], y=data[:, 1], line_color=None,
fill_color='#000000',
fill_alpha=self.right_alphas.tolist())
right_plot_fig.add_tools(HoverTool(renderers=[scatter_renderer], tooltips=[
(self.output_select.value + " (train)", '@x'),
(y_idx + " (train)", '@y'),
]))
right_plot_fig.scatter(x=data[:, 3], y=data[:, 1], line_color=None, fill_color='#000000',
fill_alpha=self.right_alphas.tolist())
# Set the right_plot data source to new values
self.right_plot_scatter_source.data = dict(
right_slice_x=np.repeat(x_value, self.resolution), right_slice_y=y_data)
self.contour_plot.line(
'right_slice_x', 'right_slice_y', source=self.right_plot_scatter_source,
color='black', line_width=2)
return self.right_plot_fig
def _bottom_plot(self):
"""
Create the bottom subplot to view the projected slice.
Parameters
----------
None
Returns
-------
Bokeh figure
"""
# List of the current positions of the sliders
self.input_point_list = [point.value for point in self.slider_dict.values()]
# Find the title of the x input and match it with the data
x_idx = self.x_input_select.value
x_data = self.predict_inputs[x_idx]
# Find the position of the y_input slider
y_value = self.y_input_slider.value
# Rounds the y_data to match the predict_inputs value
subplot_value_index = np.where(
np.around(self.predict_inputs[self.y_input_select.value], 5) ==
np.around(y_value, 5))[0]
# Make slice in Z data at the point calculated before and add it to the data source
z_data = self.Z[subplot_value_index, :].flatten()
x = self.slider_source.data[x_idx]
y = z_data
# Update the data source with new data
self.bottom_plot_source.data = dict(x=x, y=y)
# Create and format figure
self.bottom_plot_fig = bottom_plot_fig = figure(
plot_width=550, plot_height=250,
title="{} vs {}".format(x_idx, self.output_select.value), tools="")
bottom_plot_fig.xaxis.axis_label = x_idx
bottom_plot_fig.yaxis.axis_label = self.output_select.value
bottom_plot_fig.line(x='x', y='y', source=self.bottom_plot_source)
bottom_plot_fig.x_range.range_padding = 0.02
bottom_plot_fig.y_range.range_padding = 0.1
# Determine distance and alpha opacity of training points
if self.is_structured_meta_model:
data = self._structured_training_points(compute_distance=True)
else:
data = self._unstructured_training_points(compute_distance=True)
self.bottom_alphas = 1.0 - data[:, 2] / self.dist_range
# Training data scatter plot
scatter_renderer = bottom_plot_fig.scatter(x=data[:, 0], y=data[:, 3], line_color=None,
fill_color='#000000',
fill_alpha=self.bottom_alphas.tolist())
bottom_plot_fig.add_tools(HoverTool(renderers=[scatter_renderer], tooltips=[
(x_idx + " (train)", '@x'),
(self.output_select.value + " (train)", '@y'),
]))
# Set the right_plot data source to new values
self.bottom_plot_scatter_source.data = dict(
bot_slice_x=x_data,
bot_slice_y=np.repeat(y_value, self.resolution))
self.contour_plot.line(
'bot_slice_x', 'bot_slice_y', source=self.bottom_plot_scatter_source, color='black',
line_width=2)
return self.bottom_plot_fig
def _unstructured_training_points(self, compute_distance=False, source='bottom'):
"""
Calculate the training points and returns and array containing the position and alpha.
Parameters
----------
compute_distance : bool
If true, compute the distance of training points from surrogate line.
source : str
Which subplot the method is being called from.
Returns
-------
array
The array of training points and their alpha opacity with respect to the surrogate line
"""
# Input training data and output training data
x_training = self.meta_model._training_input
training_output = np.squeeze(stack_outputs(self.meta_model._training_output), axis=1)
# Index of input/output variables
x_index = self.x_input_select.options.index(self.x_input_select.value)
y_index = self.y_input_select.options.index(self.y_input_select.value)
output_variable = self.output_names.index(self.output_select.value)
# Vertically stack the x/y inputs and then transpose them
infos = np.vstack((x_training[:, x_index], x_training[:, y_index])).transpose()
if not compute_distance:
return infos
points = x_training.copy()
# Normalize so each dimension spans [0, 1]
points = np.divide(points, self.limit_range)
dist_limit = np.linalg.norm(self.dist_range * self.limit_range)
scaled_x0 = np.divide(self.input_point_list, self.limit_range)
# Query the nearest neighbors tree for the closest points to the scaled x0 array
# Nearest points to x slice
if x_training.shape[1] < 3:
tree = cKDTree(points)
# Query the nearest neighbors tree for the closest points to the scaled x0 array
dists, idxs = tree.query(
scaled_x0, k=len(x_training), distance_upper_bound=self.dist_range)
# kdtree query always returns requested k even if there are not enough valid points
idx_finite = np.where(np.isfinite(dists))
dists = dists[idx_finite]
idxs = idxs[idx_finite]
else:
dists, idxs = self._multidimension_input(scaled_x0, points, source=source)
# data contains:
# [x_value, y_value, ND-distance, func_value]
data = np.zeros((len(idxs), 4))
for dist_index, j in enumerate(idxs):
data[dist_index, 0:2] = infos[j, :]
data[dist_index, 2] = dists[dist_index]
data[dist_index, 3] = training_output[j, output_variable]
return data
def _structured_training_points(self, compute_distance=False, source='bottom'):
"""
Calculate the training points and return an array containing the position and alpha.
Parameters
----------
compute_distance : bool
If true, compute the distance of training points from surrogate line.
source : str
Which subplot the method is being called from.
Returns
-------
array
The array of training points and their alpha opacity with respect to the surrogate line
"""
# Create tuple of the input parameters
input_dimensions = tuple(self.meta_model.params)
# Input training data and output training data
x_training = np.array([z for z in product(*input_dimensions)])
training_output = self.meta_model.training_outputs[self.output_select.value].flatten()
# Index of input/output variables
x_index = self.x_input_select.options.index(self.x_input_select.value)
y_index = self.y_input_select.options.index(self.y_input_select.value)
# Vertically stack the x/y inputs and then transpose them
infos = np.vstack((x_training[:, x_index], x_training[:, y_index])).transpose()
if not compute_distance:
return infos
points = x_training.copy()
# Normalize so each dimension spans [0, 1]
points = np.divide(points, self.limit_range)
self.dist_limit = np.linalg.norm(self.dist_range * self.limit_range)
scaled_x0 = np.divide(self.input_point_list, self.limit_range)
# Query the nearest neighbors tree for the closest points to the scaled x0 array
# Nearest points to x slice
if x_training.shape[1] < 3:
x_tree, x_idx = self._two_dimension_input(scaled_x0, points, source=source)
else:
x_tree, x_idx = self._multidimension_input(scaled_x0, points, source=source)
# format for 'data'
# [x_value, y_value, ND-distance_(x or y), func_value]
n = len(x_tree)
data = np.zeros((n, 4))
for dist_index, j in enumerate(x_idx):
data[dist_index, 0:2] = infos[j, :]
data[dist_index, 2] = x_tree[dist_index]
data[dist_index, 3] = training_output[j]
return data
def _two_dimension_input(self, scaled_points, training_points, source='bottom'):
"""
Calculate the distance of training points to the surrogate line.
Parameters
----------
scaled_points : array
Array of normalized slider positions.
training_points : array
Array of input training data.
source : str
Which subplot the method is being called from.
Returns
-------
idxs : array
Index of closest points that are within the dist range.
x_tree : array
One dimentional array of points that are within the dist range.
"""
# Column of the input
if source == 'right':
col_idx = self.y_input_select.options.index(self.y_input_select.value)
else:
col_idx = self.x_input_select.options.index(self.x_input_select.value)
# Delete the axis of input from source to predicted 1D distance
x = np.delete(scaled_points, col_idx, axis=0)
x_training_points = np.delete(training_points, col_idx, axis=1).flatten()
# Tree of point distances
x_tree = np.abs(x - x_training_points)
# Only return points that are within our distance-viewing paramter.
idx = np.where(x_tree <= self.dist_range)
x_tree = x_tree[idx]
return x_tree, idx[0]
def _multidimension_input(self, scaled_points, training_points, source='bottom'):
"""
Calculate the distance of training points to the surrogate line.
Parameters
----------
scaled_points : array
Array of normalized slider positions.
training_points : array
Array of input training data.
source : str
Which subplot the method is being called from.
Returns
-------
idxs : array
Index of closest points that are within the dist range.
x_tree : array
Array of points that are within the dist range.
"""
# Column of the input
if source == 'right':
col_idx = self.y_input_select.options.index(self.y_input_select.value)
else:
col_idx = self.x_input_select.options.index(self.x_input_select.value)
# Delete the axis of input from source to predicted distance
x = np.delete(scaled_points, col_idx, axis=0)
x_training_points = np.delete(training_points, col_idx, axis=1)
# Tree of point distances
x_tree = cKDTree(x_training_points)
# Query the nearest neighbors tree for the closest points to the scaled array
dists, idx = x_tree.query(x, k=len(x_training_points),
distance_upper_bound=self.dist_range)
# kdtree query always returns requested k even if there are not enough valid points
idx_finite = np.where(np.isfinite(dists))
dists_finite = dists[idx_finite]
idx = idx[idx_finite]
return dists_finite, idx
# Event handler functions
def _update_all_plots(self):
self.doc_layout.children[0] = self._contour_data()
self.doc_layout.children[1] = self._right_plot()
self.doc_layout2.children[0] = self._bottom_plot()
def _update_subplots(self):
self.doc_layout.children[1] = self._right_plot()
self.doc_layout2.children[0] = self._bottom_plot()
def _update(self, attr, old, new):
self._update_all_plots()
def _scatter_plots_update(self, attr, old, new):
self._update_subplots()
def _scatter_input(self, attr, old, new):
# Text input update function of dist range value
self.dist_range = float(new)
self._update_all_plots()
def _x_input_update(self, attr, old, new):
# Checks that x and y inputs are not equal to each other
if new == self.y_input_select.value:
raise ValueError("Inputs should not equal each other")
else:
self.x_input_select.value = new
self._update_all_plots()
def _y_input_update(self, attr, old, new):
# Checks that x and y inputs are not equal to each other
if new == self.x_input_select.value:
raise ValueError("Inputs should not equal each other")
else:
self.y_input_select.value = new
self._update_all_plots()
def _output_value_update(self, attr, old, new):
self.output_variable = self.output_names.index(new)
self._update_all_plots()
def view_metamodel(meta_model_comp, resolution, port_number):
"""
Visualize a metamodel.
Parameters
----------
meta_model_comp : MetaModelStructuredComp or MetaModelUnStructuredComp
The metamodel component.
resolution : int
Number of points to control contour plot resolution.
port_number : int
Bokeh plot port number.
"""
from bokeh.application.application import Application
from bokeh.application.handlers import FunctionHandler
def make_doc(doc):
MetaModelVisualization(meta_model_comp, resolution, doc=doc)
# print('Opening Bokeh application on http://localhost:5006/')
server = Server({'/': Application(FunctionHandler(make_doc))}, port=int(port_number))
server.io_loop.add_callback(server.show, "/")
server.io_loop.start()
``` |
{
"source": "johnjasa/ORBIT",
"score": 2
} |
#### File: ORBIT/core/components.py
```python
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, National Renewable Energy Laboratory"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
import simpy
from ORBIT.core._defaults import process_times as pt
from ORBIT.core.exceptions import (
ItemNotFound,
CargoMassExceeded,
DeckSpaceExceeded,
InsufficientCable,
)
# TODO: __str__ methods for Components
class Crane:
"""Base Crane Class"""
def __init__(self, crane_specs):
"""
Creates an instance of Crane.
Parameters
----------
crane_specs : dict
Dictionary containing crane system specifications.
"""
self.extract_crane_specs(crane_specs)
def extract_crane_specs(self, crane_specs):
"""
Extracts and defines crane specifications.
Parameters
----------
crane_specs : dict
Dictionary of crane specifications.
"""
# Physical Dimensions
self.boom_length = crane_specs.get("boom_length", None)
self.radius = crane_specs.get("radius", None)
# Operational Parameters
self.max_lift = crane_specs.get("max_lift", None)
self.max_hook_height = crane_specs.get("max_hook_height", None)
self.max_windspeed = crane_specs.get("max_windspeed", 99)
self._crane_rate = crane_specs.get("crane_rate", 100)
def crane_rate(self, **kwargs):
"""Returns `self._crane_rate`."""
return self._crane_rate
@staticmethod
def reequip(**kwargs):
"""
Calculates time taken to change crane equipment.
Parameters
----------
crane_reequip_time : int | float
Time required to change crane equipment (h).
Returns
-------
reequip_time : float
Time required to change crane equipment (h).
"""
_key = "crane_reequip_time"
duration = kwargs.get(_key, pt[_key])
return duration
class JackingSys:
"""Base Jacking System Class"""
def __init__(self, jacksys_specs):
"""
Creates an instance of JackingSys.
Parameters
----------
jacksys_specs : dict
Dictionary containing jacking system specifications.
"""
self.extract_jacksys_specs(jacksys_specs)
def extract_jacksys_specs(self, jacksys_specs):
"""
Extracts and defines jacking system specifications.
Parameters
----------
jacksys_specs : dict
Dictionary containing jacking system specifications.
"""
# Physical Dimensions
self.num_legs = jacksys_specs.get("num_legs", None)
self.leg_length = jacksys_specs.get("leg_length", None)
self.air_gap = jacksys_specs.get("air_gap", None)
self.leg_pen = jacksys_specs.get("leg_pen", None)
# Operational Parameters
self.max_depth = jacksys_specs.get("max_depth", None)
self.max_extension = jacksys_specs.get("max_extension", None)
self.speed_below_depth = jacksys_specs.get("speed_below_depth", None)
self.speed_above_depth = jacksys_specs.get("speed_above_depth", None)
def jacking_time(self, extension, depth):
"""
Calculates jacking time for a given depth.
Parameters
----------
extension : int | float
Height to jack-up to or jack-down from (m).
depth : int | float
Depth at jack-up location (m).
Returns
-------
extension_time : float
Time required to jack-up to given extension (h).
"""
if extension > self.max_extension:
raise Exception(
"{} extension is greater than {} maximum"
"".format(extension, self.max_extension)
)
elif depth > self.max_depth:
raise Exception(
"{} is beyond the operating depth {}"
"".format(depth, self.max_depth)
)
elif depth > extension:
raise Exception("Extension must be greater than depth")
else:
return (
depth / self.speed_below_depth
+ (extension - depth) / self.speed_above_depth
) / 60
class VesselStorage(simpy.FilterStore):
"""Vessel Storage Class"""
required_keys = ["type", "mass", "deck_space"]
def __init__(
self, env, max_cargo, max_deck_space, max_deck_load, **kwargs
):
"""
Creates an instance of VesselStorage.
Parameters
----------
env : simpy.Environment
SimPy environment that simulation runs on.
max_cargo : int | float
Maximum mass the storage system can carry (t).
max_deck_space : int | float
Maximum deck space the storage system can use (m2).
max_deck_load : int | float
Maximum deck load that the storage system can apply (t/m2).
"""
capacity = kwargs.get("capacity", float("inf"))
super().__init__(env, capacity)
self.max_cargo_mass = max_cargo
self.max_deck_space = max_deck_space
self.max_deck_load = max_deck_load
@property
def current_cargo_mass(self):
"""Returns current cargo mass in tons."""
return sum([item.mass for item in self.items])
@property
def current_deck_space(self):
"""Returns current deck space used in m2."""
return sum([item.deck_space for item in self.items])
def put_item(self, item):
"""
Checks VesselStorage specific constraints and triggers self.put()
if successful.
Items put into the instance should be a dictionary with the following
attributes:
- name
- mass (t)
- deck_space (m2)
Parameters
----------
item : dict
Dictionary of item properties.
"""
# if any(x not in item.keys() for x in self.required_keys):
# raise ItemPropertyNotDefined(item, self.required_keys)
if self.current_deck_space + item.deck_space > self.max_deck_space:
raise DeckSpaceExceeded(
self.max_deck_space, self.current_deck_space, item
)
if self.current_cargo_mass + item.mass > self.max_cargo_mass:
raise CargoMassExceeded(
self.max_cargo_mass, self.current_cargo_mass, item
)
self.put(item)
def get_item(self, _type):
"""
Checks `self.items` for an item satisfying `item.type = _type`. Returns
item if found, otherwise returns an error.
Parameters
----------
_type : str
Type of item to retrieve.
"""
target = None
for i in self.items:
if i.type == _type:
target = i
break
if not target:
raise ItemNotFound(_type)
else:
res = self.get(lambda x: x == target)
return res.value
def any_remaining(self, _type):
"""
Checks `self.items` for an item satisfying `item.type = _type`. Returns
True/False depending on if an item is found. Used to trigger vessel
release if empty without having to wait for next self.get_item()
iteration.
Parameters
----------
_type : str
Type of item to retrieve.
Returns
-------
resp : bool
Indicates if any items in self.items satisfy `_type`.
"""
target = None
for i in self.items:
if i.type == _type:
target = i
break
if target:
return True
else:
return False
class ScourProtectionStorage(simpy.Container):
"""Scour Protection Storage Class"""
def __init__(self, env, max_mass, **kwargs):
"""
Creates an instance of VesselStorage.
Parameters
----------
env : simpy.Environment
SimPy environment that simulation runs on.
max_mass : int | float
Maximum mass the storage system can carry (t).
"""
self.max_mass = max_mass
super().__init__(env, self.max_mass)
@property
def available_capacity(self):
"""Returns available cargo capacity."""
return self.max_mass - self.level
class CableCarousel(simpy.Container):
"""Cable Storage Class"""
def __init__(self, env, max_mass, **kwargs):
"""
Creates an instance of CableCarousel.
Parameters
----------
env : simpy.Environment
SimPy environment that simulation runs on.
max_mass : int | float
Maximum mass the storage system can carry (t).
"""
self.cable = None
self.max_mass = max_mass
super().__init__(env)
@property
def available_mass(self):
"""Returns available cargo mass capacity."""
return self.max_mass - self.current_mass
@property
def current_mass(self):
"""Returns current cargo mass"""
try:
mass = self.level * self.cable.linear_density
return mass
except AttributeError:
return 0
def available_length(self, cable):
"""Returns available length capacity based on input linear density."""
return self.available_mass / cable.linear_density
def reset(self):
"""Resets `self.cable` and empties `self.level`."""
if self.level != 0.0:
_ = self.get(self.level)
self.cable = None
def load_cable(self, cable, length=None):
"""
Loads input `cable` type onto `self.level`. If `length` isn't passed,
defaults to maximum amount of cable that can be loaded.
Parameters
----------
cable : Cable | SimpleCable
length : int | float
Raises
------
ValueError
"""
if self.cable and self.cable != cable:
raise AttributeError("Carousel already has a cable type.")
self.cable = cable
if length is None:
# Load maximum amount
length = self.available_length(self.cable)
self.put(length)
else:
# Load length of cable
proposed = length * cable.linear_density
if proposed > self.available_mass:
raise ValueError(
f"Length {length} of {cable} can't be loaded."
)
self.put(length)
def get_cable(self, length):
"""
Retrieves `length` of cable from `self.level`.
Parameters
----------
length : int | float
Length of cable to retrieve.
Raises
------
InsufficientCable
"""
if self.cable is None:
raise AttributeError("Carousel doesn't have any cable.")
if length > self.level:
raise InsufficientCable(self.level, length)
else:
return self.get(length).amount
```
#### File: ORBIT/core/port.py
```python
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, National Renewable Energy Laboratory"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
import simpy
from ORBIT.core.exceptions import ItemNotFound
class Port(simpy.FilterStore):
"""Port Class"""
def __init__(self, env, **kwargs):
"""
Creates an instance of Port.
Parameters
----------
env : simpy.Environment
SimPy environment that simulation runs on.
"""
capacity = kwargs.get("capacity", float("inf"))
super().__init__(env, capacity)
def get_item(self, _type):
"""
Checks self.items for an item satisfying `item.type = _type`, otherwise
returns `ItemNotFound`.
Parameters
----------
_type : str
Type of item to match. Checks `item.type`.
Returns
-------
res.value : FilterStoreGet.value
Returned item.
Raises
------
ItemNotFound
"""
target = None
for i in self.items:
try:
if i.type == _type:
target = i
break
except AttributeError:
continue
if not target:
raise ItemNotFound(_type)
else:
res = self.get(lambda x: x == target)
return res.value
class WetStorage(simpy.Store):
"""Storage infrastructure for floating substructures."""
def __init__(self, env, capacity):
"""
Creates an instance of WetStorage.
Parameters
----------
capacity : int
Number of substructures or assemblies that can be stored.
"""
super().__init__(env, capacity)
```
#### File: install/mooring_install/mooring.py
```python
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, National Renewable Energy Laboratory"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
from marmot import process
from ORBIT.core import Cargo, Vessel
from ORBIT.core.logic import position_onsite, get_list_of_items_from_port
from ORBIT.core._defaults import process_times as pt
from ORBIT.phases.install import InstallPhase
from ORBIT.core.exceptions import ItemNotFound
class MooringSystemInstallation(InstallPhase):
"""Module to model the installation of mooring systems at sea."""
phase = "Mooring System Installation"
#:
expected_config = {
"mooring_install_vessel": "dict | str",
"site": {"depth": "m", "distance": "km"},
"plant": {"num_turbines": "int"},
"mooring_system": {
"num_lines": "int",
"line_mass": "t",
"anchor_mass": "t",
"anchor_type": "str (optional, default: 'Suction Pile')",
},
}
def __init__(self, config, weather=None, **kwargs):
"""
Creates an instance of `MooringSystemInstallation`.
Parameters
----------
config : dict
Simulation specific configuration.
weather : np.array
Weather data at site.
"""
super().__init__(weather, **kwargs)
config = self.initialize_library(config, **kwargs)
self.config = self.validate_config(config)
self.extract_defaults()
self.setup_simulation(**kwargs)
def setup_simulation(self, **kwargs):
"""
Sets up the required simulation infrastructure:
- initializes port
- initializes installation vessel
- initializes mooring systems at port.
"""
self.initialize_port()
self.initialize_installation_vessel()
self.initialize_components()
depth = self.config["site"]["depth"]
distance = self.config["site"]["distance"]
install_mooring_systems(
self.vessel,
self.port,
distance,
depth,
self.number_systems,
**kwargs,
)
def initialize_installation_vessel(self):
"""Initializes the mooring system installation vessel."""
vessel_specs = self.config.get("mooring_install_vessel", None)
name = vessel_specs.get("name", "Mooring System Installation Vessel")
vessel = Vessel(name, vessel_specs)
self.env.register(vessel)
vessel.initialize()
vessel.at_port = True
vessel.at_site = False
self.vessel = vessel
def initialize_components(self):
"""Initializes the Cargo components at port."""
system = MooringSystem(**self.config["mooring_system"])
self.number_systems = self.config["plant"]["num_turbines"]
for _ in range(self.number_systems):
self.port.put(system)
@property
def detailed_output(self):
"""Detailed outputs of the scour protection installation."""
outputs = {self.phase: {**self.agent_efficiencies}}
return outputs
@process
def install_mooring_systems(vessel, port, distance, depth, systems, **kwargs):
"""
Logic for the Mooring System Installation Vessel.
Parameters
----------
vessel : Vessel
Mooring System Installation Vessel
port : Port
distance : int | float
Distance between port and site (km).
systems : int
Total systems to install.
"""
n = 0
while n < systems:
if vessel.at_port:
try:
# Get mooring systems from port.
yield get_list_of_items_from_port(
vessel, port, ["MooringSystem"], **kwargs
)
except ItemNotFound:
# If no items are at port and vessel.storage.items is empty,
# the job is done
if not vessel.storage.items:
vessel.submit_debug_log(
message="Item not found. Shutting down."
)
break
# Transit to site
vessel.update_trip_data()
vessel.at_port = False
yield vessel.transit(distance)
vessel.at_site = True
if vessel.at_site:
if vessel.storage.items:
system = yield vessel.get_item_from_storage(
"MooringSystem", **kwargs
)
for _ in range(system.num_lines):
yield position_onsite(vessel, **kwargs)
yield perform_mooring_site_survey(vessel, **kwargs)
yield install_mooring_anchor(
vessel, depth, system.anchor_type, **kwargs
)
yield install_mooring_line(vessel, depth, **kwargs)
n += 1
else:
# Transit to port
vessel.at_site = False
yield vessel.transit(distance)
vessel.at_port = True
vessel.submit_debug_log(message="Mooring systems installation complete!")
@process
def perform_mooring_site_survey(vessel, **kwargs):
"""
Calculates time required to perform a mooring system survey.
Parameters
----------
vessel : Vessel
Vessel to perform action.
Yields
------
vessel.task representing time to "Perform Mooring Site Survey".
"""
key = "mooring_site_survey_time"
survey_time = kwargs.get(key, pt[key])
yield vessel.task(
"Perform Mooring Site Survey",
survey_time,
constraints=vessel.transit_limits,
**kwargs,
)
@process
def install_mooring_anchor(vessel, depth, _type, **kwargs):
"""
Calculates time required to install a mooring system anchor.
Parameters
----------
vessel : Vessel
Vessel to perform action.
depth : int | float
Depth at site (m).
_type : str
Anchor type. 'Suction Pile' or 'Drag Embedment'.
Yields
------
vessel.task representing time to install mooring anchor.
"""
if _type == "Suction Pile":
key = "suction_pile_install_time"
task = "Install Suction Pile Anchor"
fixed = kwargs.get(key, pt[key])
elif _type == "Drag Embedment":
key = "drag_embed_install_time"
task = "Install Drag Embedment Anchor"
fixed = kwargs.get(key, pt[key])
else:
raise ValueError(
f"Mooring System Anchor Type: {_type} not recognized."
)
install_time = fixed + 0.005 * depth
yield vessel.task(
task, install_time, constraints=vessel.transit_limits, **kwargs
)
@process
def install_mooring_line(vessel, depth, **kwargs):
"""
Calculates time required to install a mooring system line.
Parameters
----------
vessel : Vessel
Vessel to perform action.
depth : int | float
Depth at site (m).
Yields
------
vessel.task representing time to install mooring line.
"""
install_time = 0.005 * depth
yield vessel.task(
"Install Mooring Line",
install_time,
constraints=vessel.transit_limits,
**kwargs,
)
class MooringSystem(Cargo):
"""Mooring System Cargo"""
def __init__(
self,
num_lines=None,
line_mass=None,
anchor_mass=None,
anchor_type="Suction Pile",
**kwargs,
):
"""Creates an instance of MooringSystem"""
self.num_lines = num_lines
self.line_mass = line_mass
self.anchor_mass = anchor_mass
self.anchor_type = anchor_type
self.deck_space = 0
@property
def mass(self):
"""Returns total system mass in t."""
return self.num_lines * (self.line_mass + self.anchor_mass)
@staticmethod
def fasten(**kwargs):
"""Dummy method to work with `get_list_of_items_from_port`."""
key = "mooring_system_load_time"
time = kwargs.get(key, pt[key])
return "Load Mooring System", time
@staticmethod
def release(**kwargs):
"""Dummy method to work with `get_list_of_items_from_port`."""
return "", 0
def anchor_install_time(self, depth):
"""
Returns time to install anchor. Varies by depth.
Parameters
----------
depth : int | float
Depth at site (m).
"""
if self.anchor_type == "Suction Pile":
fixed = 11
elif self.anchor_type == "Drag Embedment":
fixed = 5
else:
raise ValueError(
f"Mooring System Anchor Type: {self.anchor_type} not recognized."
)
return fixed + 0.005 * depth
```
#### File: api/wisdem/test_fixed_wisdem_api.py
```python
__author__ = ["<NAME>"]
__copyright__ = "Copyright 2020, National Renewable Energy Laboratory"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
import openmdao.api as om
from ORBIT.api.wisdem import OrbitWisdemFixed
def test_wisdem_monopile_api_default():
prob = om.Problem()
prob.model = OrbitWisdemFixed()
prob.setup()
prob.run_driver()
prob.model.list_inputs()
prob.model.list_outputs()
```
#### File: tests/core/test_port.py
```python
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, National Renewable Energy Laboratory"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
import pytest
from marmot import Environment
from ORBIT.core import Port, Cargo
from ORBIT.core.exceptions import ItemNotFound
class SampleItem(Cargo):
def __init__(self):
pass
def test_port_creation():
env = Environment()
port = Port(env)
item = SampleItem()
port.put(item)
port.put(item)
items = [item for item in port.items if item.type == "SampleItem"]
assert len(items) == 2
def test_get_item():
env = Environment()
port = Port(env)
item = SampleItem()
port.put(item)
port.put(item)
returned = port.get_item("SampleItem")
assert returned == item
assert len(port.items) == 1
port.put({"type": "Not type Cargo"})
with pytest.raises(ItemNotFound):
_ = port.get_item("WrongItem")
_ = port.get_item("SampleItem")
with pytest.raises(ItemNotFound):
_ = port.get_item("SampleItem")
```
#### File: phases/design/test_cable.py
```python
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, National Renewable Energy Laboratory"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
import copy
import itertools
import numpy as np
import pytest
from ORBIT.phases.design._cables import Cable, Plant
cables = {
"empty": {},
"passes": {
"conductor_size": 400,
"current_capacity": 610,
"rated_voltage": 33,
"ac_resistance": 0.06,
"inductance": 0.375,
"capacitance": 225,
"linear_density": 35,
"cost_per_km": 300000,
"name": "passes",
},
}
plant_config_calculate_all_ring = {
"site": {"depth": 20},
"plant": {
"layout": "ring",
"row_spacing": 7,
"turbine_spacing": 5,
"num_turbines": 40,
},
"turbine": {"rotor_diameter": 154, "turbine_rating": 10},
}
plant_config_calculate_all_grid = {
"site": {"depth": 20},
"plant": {
"layout": "grid",
"row_spacing": 7,
"turbine_spacing": 5,
"num_turbines": 40,
},
"turbine": {"rotor_diameter": 154, "turbine_rating": 10},
}
plant_config_distance_provided_ring = {
"site": {"depth": 20},
"plant": {
"layout": "ring",
"row_distance": 0.4,
"turbine_distance": 0.1,
"num_turbines": 40,
"substation_distance": 0.2,
},
"turbine": {"rotor_diameter": 154, "turbine_rating": 10},
}
plant_config_distance_provided_grid = {
"site": {"depth": 20},
"plant": {
"layout": "grid",
"row_distance": 0.4,
"turbine_distance": 0.1,
"num_turbines": 40,
"substation_distance": 0.2,
},
"turbine": {"rotor_diameter": 154, "turbine_rating": 10},
}
plant_config_custom = {
"site": {"depth": 20},
"plant": {
"layout": "custom",
"row_distance": 0.4,
"turbine_distance": 0.1,
"num_turbines": 40,
"substation_distance": 0.2,
},
"turbine": {"rotor_diameter": 154, "turbine_rating": 10},
}
def test_cable_creation():
cable = Cable(cables["passes"])
assert cable
for r in cable.required:
assert getattr(cable, r, None) == cables["passes"][r]
def test_cable_required_inputs():
with pytest.raises(ValueError):
Cable(cables["empty"])
def test_power_factor():
c = copy.deepcopy(cables["passes"])
results = []
for i in itertools.product(
range(100, 1001, 150), # conductor size
np.arange(0.01, 0.91, 0.1), # ac_resistance
np.arange(0, 1, 0.15), # inductance
range(100, 1001, 150), # capacitance
):
c["conductor_size"] = i[0]
c["ac_resistance"] = i[1]
c["inductance"] = i[2]
c["capacitance"] = i[3]
cable = Cable(c)
results.append(cable.power_factor)
if any((a < 0) | (a > 1) for a in results):
raise Exception("Invalid Power Factor.")
@pytest.mark.parametrize(
"config",
(
plant_config_calculate_all_ring,
plant_config_calculate_all_grid,
plant_config_distance_provided_ring,
plant_config_distance_provided_grid,
),
ids=["calculate_ring", "calculate_grid", "provided_ring", "provided_grid"],
)
def test_plant_creation(config):
plant = Plant(config)
assert plant.turbine_rating == config["turbine"]["turbine_rating"]
assert plant.site_depth == config["site"]["depth"] / 1000.0
assert plant.layout == config["plant"]["layout"]
assert plant.num_turbines == config["plant"]["num_turbines"]
if "turbine_spacing" in config["plant"]:
td = (
config["turbine"]["rotor_diameter"]
* config["plant"]["turbine_spacing"]
/ 1000.0
)
else:
td = config["plant"]["turbine_distance"]
assert plant.turbine_distance == td
if "row_spacing" in config["plant"]:
if config["plant"]["layout"] == "grid":
rd = (
config["turbine"]["rotor_diameter"]
* config["plant"]["row_spacing"]
/ 1000.0
)
if config["plant"]["layout"] == "ring":
rd = td
else:
rd = config["plant"]["row_distance"]
assert plant.row_distance == rd
if "substation_distance" in config["plant"]:
sd = config["plant"]["substation_distance"]
else:
sd = td
assert plant.substation_distance == sd
def test_custom_plant_creation():
plant = Plant(plant_config_custom)
for attr in ("row_distance", "turbine_distance", "substation_distance"):
assert getattr(plant, attr, None) is None
```
#### File: install/turbine_install/test_turbine_install.py
```python
__author__ = "<NAME>"
__copyright__ = "Copyright 2019, National Renewable Energy Laboratory"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
from copy import deepcopy
import pandas as pd
import pytest
from tests.data import test_weather
from ORBIT.library import extract_library_specs
from ORBIT.core._defaults import process_times as pt
from ORBIT.phases.install import TurbineInstallation
config_wtiv = extract_library_specs("config", "turbine_install_wtiv")
config_long_mobilize = extract_library_specs(
"config", "turbine_install_long_mobilize"
)
config_wtiv_feeder = extract_library_specs("config", "turbine_install_feeder")
config_wtiv_multi_feeder = deepcopy(config_wtiv_feeder)
config_wtiv_multi_feeder["num_feeders"] = 2
@pytest.mark.parametrize(
"config",
(config_wtiv, config_wtiv_feeder, config_wtiv_multi_feeder),
ids=["wtiv_only", "single_feeder", "multi_feeder"],
)
def test_simulation_setup(config):
sim = TurbineInstallation(config)
assert sim.config == config
assert sim.env
assert sim.port.crane.capacity == config["port"]["num_cranes"]
assert sim.num_turbines == config["plant"]["num_turbines"]
t = len([i for i in sim.port.items if i.type == "TowerSection"])
assert t == sim.num_turbines
n = len([i for i in sim.port.items if i.type == "Nacelle"])
assert n == sim.num_turbines
b = len([i for i in sim.port.items if i.type == "Blade"])
assert b == sim.num_turbines * 3
@pytest.mark.parametrize(
"config",
(config_wtiv, config_wtiv_feeder, config_wtiv_multi_feeder),
ids=["wtiv_only", "single_feeder", "multi_feeder"],
)
def test_vessel_creation(config):
sim = TurbineInstallation(config)
assert sim.wtiv
assert sim.wtiv.jacksys
assert sim.wtiv.crane
assert sim.wtiv.storage
if config.get("feeder", None) is not None:
assert len(sim.feeders) == config["num_feeders"]
for feeder in sim.feeders:
assert feeder.jacksys
assert feeder.storage
@pytest.mark.parametrize(
"config, expected", [(config_wtiv, 72), (config_long_mobilize, 14 * 24)]
)
def test_vessel_mobilize(config, expected):
sim = TurbineInstallation(config)
assert sim.wtiv
mobilize = [a for a in sim.env.actions if a["action"] == "Mobilize"][0]
assert mobilize["duration"] == expected
@pytest.mark.parametrize(
"config",
(config_wtiv, config_wtiv_feeder, config_wtiv_multi_feeder),
ids=["wtiv_only", "single_feeder", "multi_feeder"],
)
@pytest.mark.parametrize(
"weather", (None, test_weather), ids=["no_weather", "test_weather"]
)
def test_for_complete_logging(weather, config):
sim = TurbineInstallation(config, weather=weather)
sim.run()
df = pd.DataFrame(sim.env.actions)
df = df.assign(shift=(df["time"] - df["time"].shift(1)))
for vessel in df["agent"].unique():
_df = df[df["agent"] == vessel].copy()
_df = _df.assign(shift=(_df["time"] - _df["time"].shift(1)))
assert (_df["shift"] - _df["duration"]).abs().max() < 1e-9
assert ~df["cost"].isnull().any()
_ = sim.agent_efficiencies
_ = sim.detailed_output
@pytest.mark.parametrize(
"config",
(config_wtiv, config_wtiv_feeder, config_wtiv_multi_feeder),
ids=["wtiv_only", "single_feeder", "multi_feeder"],
)
def test_for_complete_installation(config):
sim = TurbineInstallation(config)
sim.run()
installed_nacelles = len(
[a for a in sim.env.actions if a["action"] == "Attach Nacelle"]
)
assert installed_nacelles == sim.num_turbines
def test_kwargs():
sim = TurbineInstallation(config_wtiv)
sim.run()
baseline = sim.total_phase_time
keywords = [
"tower_section_fasten_time",
"tower_section_release_time",
"tower_section_attach_time",
"nacelle_fasten_time",
"nacelle_release_time",
"nacelle_attach_time",
"blade_fasten_time",
"blade_release_time",
"blade_attach_time",
"site_position_time",
"crane_reequip_time",
]
failed = []
for kw in keywords:
default = pt[kw]
kwargs = {kw: default + 2}
new_sim = TurbineInstallation(config_wtiv, **kwargs)
new_sim.run()
new_time = new_sim.total_phase_time
if new_time > baseline:
pass
else:
failed.append(kw)
if failed:
raise Exception(f"'{failed}' not affecting results.")
else:
assert True
def test_multiple_tower_sections():
sim = TurbineInstallation(config_wtiv)
sim.run()
baseline = len(
[a for a in sim.env.actions if a["action"] == "Attach Tower Section"]
)
two_sections = deepcopy(config_wtiv)
two_sections["turbine"]["tower"]["sections"] = 2
sim2 = TurbineInstallation(two_sections)
sim2.run()
new = len(
[a for a in sim2.env.actions if a["action"] == "Attach Tower Section"]
)
assert new == 2 * baseline
df = pd.DataFrame(sim.env.actions)
for vessel in df["agent"].unique():
vl = df[df["agent"] == vessel].copy()
vl = vl.assign(shift=(vl["time"] - vl["time"].shift(1)))
assert (vl["shift"] - vl["duration"]).abs().max() < 1e-9
```
#### File: ORBIT/tests/test_project_manager.py
```python
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, National Renewable Energy Laboratory"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
from copy import deepcopy
import pandas as pd
import pytest
from ORBIT import ProjectManager
from tests.data import test_weather
from ORBIT.library import extract_library_specs
from ORBIT.manager import ProjectProgress
from ORBIT.core.exceptions import (
MissingInputs,
PhaseNotFound,
WeatherProfileError,
PhaseDependenciesInvalid,
)
weather_df = pd.DataFrame(test_weather).set_index("datetime")
config = extract_library_specs("config", "project_manager")
complete_project = extract_library_specs("config", "complete_project")
### Top Level
@pytest.mark.parametrize("weather", (None, weather_df))
def test_complete_run(weather):
project = ProjectManager(config, weather=weather)
project.run_project()
actions = pd.DataFrame(project.project_actions)
phases = ["MonopileInstallation", "TurbineInstallation"]
assert all(p in list(actions["phase"]) for p in phases)
### Module Integrations
def test_for_required_phase_structure():
"""
Automated integration test to verify that all classes listed in
ProjectManager.possible_phases are structured correctly.
"""
for p in ProjectManager._install_phases:
assert isinstance(p.expected_config, dict)
for p in ProjectManager._design_phases:
assert isinstance(p.expected_config, dict)
assert isinstance(p.output_config, dict)
# TODO: Expand these tests
### Config Management
def test_phase_specific_definitions():
"""
Tests that phase specific information makes it to phase_config.
"""
project = ProjectManager(config)
phase_config = project.create_config_for_phase("MonopileInstallation")
assert phase_config["wtiv"]["name"] == "Phase Specific WTIV"
assert phase_config["site"]["distance"] == 500
phase_config = project.create_config_for_phase("TurbineInstallation")
assert phase_config["wtiv"]["name"] == "Example WTIV"
assert phase_config["site"]["distance"] == 50
project.run_project()
def test_expected_config_merging():
"""
Tests for merging of expected configs
"""
config1 = {
"site": {"distance": "float", "depth": "float"},
"plant": {"num_turbines": "int"},
}
config2 = {
"site": {"distance": "float", "wave_height": "float"},
"monopile": {"diameter": "float"},
}
config = ProjectManager.merge_dicts(config1, config2)
assert config == {
"site": {
"distance": "float",
"depth": "float",
"wave_height": "float",
},
"plant": {"num_turbines": "int"},
"monopile": {"diameter": "float"},
}
def test_find_key_match():
class SpecificTurbineInstallation:
expected_config = {}
TestProjectManager = deepcopy(ProjectManager)
TestProjectManager._install_phases.append(SpecificTurbineInstallation)
phase_dict = TestProjectManager.phase_dict()
assert "SpecificTurbineInstallation" in phase_dict.keys()
tests = [
("TurbineInstallation", "TurbineInstallation"),
("TurbineInstallation_Test", "TurbineInstallation"),
("TurbineInstallation Test", "TurbineInstallation"),
("TurbineInstallation Test_1", "TurbineInstallation"),
("SpecificTurbineInstallation", "SpecificTurbineInstallation"),
("SpecificTurbineInstallation_Test", "SpecificTurbineInstallation"),
("SpecificTurbineInstallation Test", "SpecificTurbineInstallation"),
("SpecificTurbineInstallation Test_1", "SpecificTurbineInstallation"),
]
for test in tests:
i, expected = test
response = TestProjectManager.find_key_match(i)
assert response.__name__ == expected
fails = [
"DifferentTurbineInstallation",
"Other TurbineInstallation",
"Extra Different TurbineInstallation_1",
]
for f in fails:
assert TestProjectManager.find_key_match(f) is None
### Overlapping Install Phases
def test_install_phase_start_parsing():
config_mixed_starts = deepcopy(config)
config_mixed_starts["install_phases"] = {
"MonopileInstallation": 0,
"TurbineInstallation": "10/22/2009",
"ArrayCableInstallation": ("MonopileInstallation", 0.5),
}
project = ProjectManager(config_mixed_starts, weather=weather_df)
defined, depends = project._parse_install_phase_values(
config_mixed_starts["install_phases"]
)
assert len(defined) == 2
assert len(depends) == 1
assert defined["MonopileInstallation"] == 0
assert defined["TurbineInstallation"] == 1
def test_chained_dependencies():
config_chained = deepcopy(config)
config_chained["spi_vessel"] = "test_scour_protection_vessel"
config_chained["scour_protection"] = {"tons_per_substructure": 200}
config_chained["install_phases"] = {
"ScourProtectionInstallation": 0,
"MonopileInstallation": ("ScourProtectionInstallation", 0.1),
"TurbineInstallation": ("MonopileInstallation", 0.5),
}
project = ProjectManager(config_chained)
project.run_project()
df = pd.DataFrame(project.project_actions)
sp = list(df.loc[df["phase"] == "ScourProtectionInstallation"]["time"])
mp = list(df.loc[df["phase"] == "MonopileInstallation"]["time"])
tu = list(df.loc[df["phase"] == "TurbineInstallation"]["time"])
assert min(sp) == 0
assert min(mp) == (max(sp) - min(sp)) * 0.1
assert min(tu) == (max(mp) - min(mp)) * 0.5 + min(mp)
@pytest.mark.parametrize(
"m_start, t_start", [(0, 0), (0, 100), (100, 100), (100, 200)]
)
def test_index_starts(m_start, t_start):
"""
Tests functionality related to passing index starts into 'install_phases' sub-dict.
"""
_target_diff = t_start - m_start
config_with_index_starts = deepcopy(config)
config_with_index_starts["install_phases"] = {
"MonopileInstallation": m_start,
"TurbineInstallation": t_start,
}
project = ProjectManager(config_with_index_starts)
project.run_project()
df = pd.DataFrame(project.project_actions)
_m = df.loc[df["phase"] == "MonopileInstallation"].iloc[0]
_t = df.loc[df["phase"] == "TurbineInstallation"].iloc[0]
_diff = (_t["time"] - _t["duration"]) - (_m["time"] - _m["duration"])
assert _diff == _target_diff
@pytest.mark.parametrize(
"m_start, t_start, expected",
[
(0, 0, 0),
(0, 1000, 1000),
(0, "05/01/2010", 4585),
("03/01/2010", "03/01/2010", 0),
("03/01/2010", "05/01/2010", 1464),
],
)
def test_start_dates_with_weather(m_start, t_start, expected):
config_with_defined_starts = deepcopy(config)
config_with_defined_starts["install_phases"] = {
"MonopileInstallation": m_start,
"TurbineInstallation": t_start,
}
project = ProjectManager(config_with_defined_starts, weather=weather_df)
project.run_project()
df = pd.DataFrame(project.project_actions)
_m = df.loc[df["phase"] == "MonopileInstallation"].iloc[0]
_t = df.loc[df["phase"] == "TurbineInstallation"].iloc[0]
_diff = (_t["time"] - _t["duration"]) - (_m["time"] - _m["duration"])
assert _diff == expected
def test_duplicate_phase_definitions():
config_with_duplicates = deepcopy(config)
config_with_duplicates["MonopileInstallation_1"] = {
"plant": {"num_turbines": 5}
}
config_with_duplicates["MonopileInstallation_2"] = {
"plant": {"num_turbines": 5},
"site": {"distance": 100},
}
config_with_duplicates["install_phases"] = {
"MonopileInstallation_1": 0,
"MonopileInstallation_2": 800,
"TurbineInstallation": 1600,
}
project = ProjectManager(config_with_duplicates)
project.run_project()
df = (
pd.DataFrame(project.project_actions)
.groupby(["phase", "action"])
.count()["time"]
)
assert df.loc[("MonopileInstallation_1", "Drive Monopile")] == 5
assert df.loc[("MonopileInstallation_2", "Drive Monopile")] == 5
assert df.loc[("TurbineInstallation", "Attach Tower Section")] == 10
### Design Phase Interactions
def test_design_phases():
config_with_design = deepcopy(config)
# Add MonopileDesign
config_with_design["design_phases"] = ["MonopileDesign"]
# Add required parameters
config_with_design["site"]["mean_windspeed"] = 9
config_with_design["turbine"]["rotor_diameter"] = 200
config_with_design["turbine"]["rated_windspeed"] = 10
config_with_design["monopile_design"] = {}
# Remove monopile sub dictionary
_ = config_with_design.pop("monopile")
project = ProjectManager(config_with_design)
project.run_project()
assert isinstance(project.config["monopile"], dict)
project = ProjectManager(config_with_design)
project.run_project()
### Outputs
def test_resolve_project_capacity():
# Missing turbine rating
config1 = {"plant": {"capacity": 600, "num_turbines": 40}}
out1 = ProjectManager.resolve_project_capacity(config1)
assert out1["plant"]["capacity"] == config1["plant"]["capacity"]
assert out1["plant"]["num_turbines"] == config1["plant"]["num_turbines"]
assert out1["turbine"]["turbine_rating"] == 15
# Missing plant capacity
config2 = {
"plant": {"num_turbines": 40},
"turbine": {"turbine_rating": 15},
}
out2 = ProjectManager.resolve_project_capacity(config2)
assert out2["plant"]["capacity"] == 600
assert out2["plant"]["num_turbines"] == config2["plant"]["num_turbines"]
assert (
out2["turbine"]["turbine_rating"]
== config2["turbine"]["turbine_rating"]
)
# Missing number of turbines
config3 = {"plant": {"capacity": 600}, "turbine": {"turbine_rating": 15}}
out3 = ProjectManager.resolve_project_capacity(config3)
assert out3["plant"]["capacity"] == config3["plant"]["capacity"]
assert out3["plant"]["num_turbines"] == 40
assert (
out3["turbine"]["turbine_rating"]
== config3["turbine"]["turbine_rating"]
)
# Test for float precision
config4 = {
"plant": {"capacity": 600, "num_turbines": 40},
"turbine": {"turbine_rating": 15.0},
}
out4 = ProjectManager.resolve_project_capacity(config4)
assert out4["plant"]["capacity"] == config4["plant"]["capacity"]
assert out4["plant"]["num_turbines"] == config4["plant"]["num_turbines"]
assert (
out4["turbine"]["turbine_rating"]
== config4["turbine"]["turbine_rating"]
)
# Non matching calculated value
config5 = {
"plant": {"capacity": 700, "num_turbines": 40},
"turbine": {"turbine_rating": 15.0},
}
with pytest.raises(AttributeError):
_ = ProjectManager.resolve_project_capacity(config5)
# Test for not enough information
config6 = {"plant": {"capacity": 600}}
out6 = ProjectManager.resolve_project_capacity(config6)
assert out6["plant"]["capacity"] == config6["plant"]["capacity"]
with pytest.raises(KeyError):
_ = out6["turbine"]["turbine_rating"]
with pytest.raises(KeyError):
_ = out6["plant"]["num_turbines"]
### Exceptions
def test_incomplete_config():
incomplete_config = deepcopy(config)
_ = incomplete_config["site"].pop("depth")
with pytest.raises(MissingInputs):
project = ProjectManager(incomplete_config)
project.run_project()
def test_wrong_phases():
wrong_phases = deepcopy(config)
wrong_phases["install_phases"].append("IncorrectPhaseName")
with pytest.raises(PhaseNotFound):
project = ProjectManager(wrong_phases)
project.run_project()
def test_bad_dates():
bad_dates = deepcopy(config)
bad_dates["install_phases"] = {
"MonopileInstallation": "03/01/2015",
"TurbineInstallation": "05/01/2015",
}
with pytest.raises(WeatherProfileError):
project = ProjectManager(bad_dates, weather=weather_df)
project.run_project()
def test_no_defined_start():
missing_start = deepcopy(config)
missing_start["install_phases"] = {
"MonopileInstallation": ("TurbineInstallation", 0.1),
"TurbineInstallation": ("MonopileInstallation", 0.1),
}
with pytest.raises(ValueError):
project = ProjectManager(missing_start)
project.run_project()
def test_circular_dependencies():
circular_deps = deepcopy(config)
circular_deps["spi_vessel"] = "test_scour_protection_vessel"
circular_deps["scour_protection"] = {"tons_per_substructure": 200}
circular_deps["install_phases"] = {
"ScourProtectionInstallation": 0,
"MonopileInstallation": ("TurbineInstallation", 0.1),
"TurbineInstallation": ("MonopileInstallation", 0.1),
}
with pytest.raises(PhaseDependenciesInvalid):
project = ProjectManager(circular_deps)
project.run_project()
def test_dependent_phase_ordering():
wrong_order = deepcopy(config)
wrong_order["spi_vessel"] = "test_scour_protection_vessel"
wrong_order["scour_protection"] = {"tons_per_substructure": 200}
wrong_order["install_phases"] = {
"ScourProtectionInstallation": ("TurbineInstallation", 0.1),
"TurbineInstallation": ("MonopileInstallation", 0.1),
"MonopileInstallation": 0,
}
project = ProjectManager(wrong_order)
project.run_project()
assert len(project.phase_times) == 3
def test_ProjectProgress():
data = [
("Export System", 10),
("Offshore Substation", 20),
("Array String", 15),
("Array String", 25),
("Turbine", 5),
("Turbine", 10),
("Turbine", 15),
("Turbine", 20),
("Turbine", 25),
("Substructure", 6),
("Substructure", 9),
("Substructure", 14),
("Substructure", 22),
("Substructure", 26),
]
progress = ProjectProgress(data)
assert progress.parse_logs("Export System") == [10]
turbines = progress.parse_logs("Turbine")
assert len(turbines) == 5
chunks = list(progress.chunk_max(turbines, 2))
assert chunks[0] == 10
assert chunks[1] == 20
assert chunks[2] == 25
assert progress.complete_export_system == 20
times, _ = progress.complete_array_strings
assert times == [15, 26]
times, turbines = progress.energize_points
assert times == [20, 26]
assert sum(turbines) == 5
def test_ProjectProgress_with_incomplete_project():
project = ProjectManager(config)
project.run_project()
_ = project.progress.parse_logs("Substructure")
_ = project.progress.parse_logs("Turbine")
with pytest.raises(ValueError):
project.progress.complete_export_system
with pytest.raises(ValueError):
project.progress.complete_array_strings
def test_ProjectProgress_with_complete_project():
project = ProjectManager(complete_project)
project.run_project()
_ = project.progress.parse_logs("Substructure")
_ = project.progress.parse_logs("Turbine")
_ = project.progress.parse_logs("Array String")
_ = project.progress.parse_logs("Export System")
_ = project.progress.parse_logs("Offshore Substation")
_ = project.progress.complete_export_system
_ = project.progress.complete_array_strings
_ = project.progress.energize_points
new = deepcopy(complete_project)
new["plant"]["num_turbines"] = 61
# Uneven strings
project = ProjectManager(new)
project.run_project()
_ = project.progress.energize_points
def test_monthly_expenses():
project = ProjectManager(complete_project)
project.run_project()
_ = project.monthly_expenses
# Still report expenses for "incomplete" project
config = deepcopy(complete_project)
_ = config["install_phases"].pop("TurbineInstallation")
project = ProjectManager(config)
project.run_project()
_ = project.monthly_expenses
def test_monthly_revenue():
project = ProjectManager(complete_project)
project.run_project()
_ = project.monthly_revenue
# Can't generate revenue with "incomplete" project
config = deepcopy(complete_project)
_ = config["install_phases"].pop("TurbineInstallation")
project = ProjectManager(config)
project.run_project()
with pytest.raises(ValueError):
_ = project.monthly_revenue
def test_cash_flow():
project = ProjectManager(complete_project)
project.run_project()
_ = project.cash_flow
# Can't generate revenue with "incomplete" project but cash flow will still
# be reported
config = deepcopy(complete_project)
_ = config["install_phases"].pop("TurbineInstallation")
project = ProjectManager(config)
project.run_project()
cash_flow = project.cash_flow
assert all(v <= 0 for v in cash_flow.values())
def test_npv():
project = ProjectManager(complete_project)
project.run_project()
baseline = project.npv
config = deepcopy(complete_project)
config["ncf"] = 0.35
project = ProjectManager(config)
project.run_project()
assert project.npv != baseline
config = deepcopy(complete_project)
config["offtake_price"] = 70
project = ProjectManager(config)
project.run_project()
assert project.npv != baseline
config = deepcopy(complete_project)
config["project_lifetime"] = 30
project = ProjectManager(config)
project.run_project()
assert project.npv != baseline
config = deepcopy(complete_project)
config["discount_rate"] = 0.03
project = ProjectManager(config)
project.run_project()
assert project.npv != baseline
config = deepcopy(complete_project)
config["opex_rate"] = 120
project = ProjectManager(config)
project.run_project()
assert project.npv != baseline
``` |
{
"source": "johnjasa/WISDEM",
"score": 4
} |
#### File: docs/docstrings/getdocstrings.py
```python
__all__ = ("get_docstrings", "print_docstrings")
import ast
from os.path import basename, splitext
from itertools import groupby
NODE_TYPES = {ast.ClassDef: "Class", ast.FunctionDef: "Function/Method", ast.Module: "Module"}
def get_docstrings(source):
"""Parse Python source code and yield a tuple of ast node instance, name,
line number and docstring for each function/method, class and module.
The line number refers to the first line of the docstring. If there is
no docstring, it gives the first line of the class, funcion or method
block, and docstring is None.
"""
tree = ast.parse(source)
for node in ast.walk(tree):
if isinstance(node, tuple(NODE_TYPES)):
docstring = ast.get_docstring(node)
lineno = getattr(node, "lineno", None)
if node.body and isinstance(node.body[0], ast.Expr) and isinstance(node.body[0].value, ast.Str):
# lineno attribute of docstring node is where string ends
lineno = node.body[0].lineno - len(node.body[0].value.s.splitlines()) + 1
yield (node, getattr(node, "name", None), lineno, docstring)
def print_docstrings(source, module="<string>", print_flag=True):
"""Parse Python source code from file or string and print docstrings.
For each class, method or function and the module, prints a heading with
the type, name and line number and then the docstring with normalized
indentation.
The module name is determined from the filename, or, if the source is passed
as a string, from the optional `module` argument.
The line number refers to the first line of the docstring, if present,
or the first line of the class, funcion or method block, if there is none.
Output is ordered by type first, then name.
"""
if hasattr(source, "read"):
filename = getattr(source, "name", module)
module = splitext(basename(filename))[0]
source = source.read()
docstrings = sorted(get_docstrings(source), key=lambda x: (NODE_TYPES.get(type(x[0])), x[1]))
grouped = groupby(docstrings, key=lambda x: NODE_TYPES.get(type(x[0])))
if print_flag:
for type_, group in grouped:
for node, name, lineno, docstring in group:
name = name if name else module
heading = "%s '%s', line %s" % (type_, name, lineno or "?")
print(heading)
print("-" * len(heading))
print("")
print(docstring or "")
print("\n")
return grouped
if __name__ == "__main__":
import sys
with open(sys.argv[1]) as fp:
print_docstrings(fp)
```
#### File: examples/04_openmdao/betz_limit.py
```python
import openmdao.api as om
# --
# Specific the Actuator Disc theory into a derived OpenMDAO class
class ActuatorDisc(om.ExplicitComponent):
# Inputs and Outputs
def setup(self):
# Inputs into the the model
self.add_input("a", 0.5, desc="Indcued velocity factor")
self.add_input("Area", 10.0, units="m**2", desc="Rotor disc area")
self.add_input("rho", 1.225, units="kg/m**3", desc="Air density")
self.add_input("Vu", 10.0, units="m/s", desc="Freestream air velocity, upstream of rotor")
# Outputs
self.add_output("Vr", 0.0, units="m/s", desc="Air velocity at rotor exit plane")
self.add_output("Vd", 0.0, units="m/s", desc="Slipstream air velocity, downstream of rotor")
self.add_output("Ct", 0.0, desc="Thrust coefficient")
self.add_output("Cp", 0.0, desc="Power coefficient")
self.add_output("power", 0.0, units="W", desc="Power produced by the rotor")
self.add_output("thrust", 0.0, units="m/s")
# Declare which outputs are dependent on which inputs
self.declare_partials("Vr", ["a", "Vu"])
self.declare_partials("Vd", "a")
self.declare_partials("Ct", "a")
self.declare_partials("thrust", ["a", "Area", "rho", "Vu"])
self.declare_partials("Cp", "a")
self.declare_partials("power", ["a", "Area", "rho", "Vu"])
# --------
# Core theory
def compute(self, inputs, outputs):
a = inputs["a"]
Vu = inputs["Vu"]
rho = inputs["rho"]
Area = inputs["Area"]
qA = 0.5 * rho * Area * Vu ** 2
outputs["Vd"] = Vd = Vu * (1 - 2 * a)
outputs["Vr"] = 0.5 * (Vu + Vd)
outputs["Ct"] = Ct = 4 * a * (1 - a)
outputs["thrust"] = Ct * qA
outputs["Cp"] = Cp = Ct * (1 - a)
outputs["power"] = Cp * qA * Vu
# --------
# Derivatives of outputs wrt inputs
def compute_partials(self, inputs, J):
a = inputs["a"]
Vu = inputs["Vu"]
Area = inputs["Area"]
rho = inputs["rho"]
a_times_area = a * Area
one_minus_a = 1.0 - a
a_area_rho_vu = a_times_area * rho * Vu
J["Vr", "a"] = -Vu
J["Vr", "Vu"] = one_minus_a
J["Vd", "a"] = -2.0 * Vu
J["Ct", "a"] = 4.0 - 8.0 * a
J["thrust", "a"] = 0.5 * rho * Vu ** 2 * Area * J["Ct", "a"]
J["thrust", "Area"] = 2.0 * Vu ** 2 * a * rho * one_minus_a
J["thrust", "Vu"] = 4.0 * a_area_rho_vu * one_minus_a
J["Cp", "a"] = 4.0 * a * (2.0 * a - 2.0) + 4.0 * one_minus_a ** 2
J["power", "a"] = (
2.0 * Area * Vu ** 3 * a * rho * (2.0 * a - 2.0) + 2.0 * Area * Vu ** 3 * rho * one_minus_a ** 2
)
J["power", "Area"] = 2.0 * Vu ** 3 * a * rho * one_minus_a ** 2
J["power", "rho"] = 2.0 * a_times_area * Vu ** 3 * (one_minus_a) ** 2
J["power", "Vu"] = 6.0 * Area * Vu ** 2 * a * rho * one_minus_a ** 2
# -- end the class
# Optional: include underlying model in a group with Independent Variables
class Betz(om.Group):
"""
Group containing the actuator disc equations for deriving the Betz limit.
"""
def setup(self):
indeps = self.add_subsystem("indeps", om.IndepVarComp(), promotes=["*"])
indeps.add_output("a", 0.5)
indeps.add_output("Area", 10.0, units="m**2")
indeps.add_output("rho", 1.225, units="kg/m**3")
indeps.add_output("Vu", 10.0, units="m/s")
self.add_subsystem("a_disk", ActuatorDisc(), promotes=["a", "Area", "rho", "Vu"])
# --------
# Instantiate the model
prob = om.Problem()
prob.model = Betz()
# -----
# Specify the optimization 'driver'
prob.driver = om.ScipyOptimizeDriver()
prob.driver.options["optimizer"] = "SLSQP"
# -----
# Assign objective and design variables
prob.model.add_design_var("a", lower=0.0, upper=1.0)
prob.model.add_design_var("Area", lower=0.0, upper=1.0)
prob.model.add_objective("a_disk.Cp", scaler=-1.0)
# -----
# Execute!
prob.setup()
prob.run_driver()
# --------
# Display the output
print("Coefficient of power Cp = ", prob["a_disk.Cp"])
print("Induction factor a =", prob["a"])
print("Rotor disc Area =", prob["Area"], "m^2")
prob.model.list_inputs(units=True)
prob.model.list_outputs(units=True)
# --------
```
#### File: wisdem/commonse/akima.py
```python
import numpy as np
def abs_smooth_dv(x, x_deriv, delta_x):
"""
Compute the absolute value in a smooth differentiable manner.
The valley is rounded off using a quadratic function.
Parameters
----------
x : float
Quantity value
x_deriv : float
Derivative value
delta_x : float
Half width of the rounded section.
Returns
-------
float
Smooth absolute value of the quantity.
float
Smooth absolute value of the derivative.
"""
if x >= delta_x:
y_deriv = x_deriv
y = x
elif x <= -delta_x:
y_deriv = -x_deriv
y = -x
else:
y_deriv = 2.0 * x * x_deriv / (2.0 * delta_x)
y = x ** 2 / (2.0 * delta_x) + delta_x / 2.0
return y, y_deriv
def akima_interp_with_derivs(xpt, ypt, x, delta_x=0.1):
a = Akima(xpt, ypt, delta_x)
return a.interp(x)
class Akima(object):
def __init__(self, xpt, ypt, delta_x=0.1, eps=1e-30):
"""
Train the akima spline and save the derivatives.
Conversion of fortran function AKIMA_DV.
Parameters
----------
xpt : ndarray
Values at which the akima spline was trained.
ypt : ndarray
Training values for the akima spline.
"""
xpt = np.array(xpt)
ncp = np.size(xpt)
nbdirs = 2 * ncp
ypt = np.array(ypt)
self.flatFlag = ypt.ndim == 1
if self.flatFlag:
assert xpt.size == ypt.size
ypt = ypt.reshape((1, ypt.size))
if ypt.shape[0] == ncp:
ypt = ypt.T
vec_size = ypt.shape[0]
# Poly points and derivs
p1 = np.empty((vec_size, ncp - 1), dtype=ypt.dtype)
p2 = np.empty((vec_size, ncp - 1), dtype=ypt.dtype)
p3 = np.empty((vec_size, ncp - 1), dtype=ypt.dtype)
p0d = np.empty((vec_size, nbdirs, ncp - 1), dtype=ypt.dtype)
p1d = np.empty((vec_size, nbdirs, ncp - 1), dtype=ypt.dtype)
p2d = np.empty((vec_size, nbdirs, ncp - 1), dtype=ypt.dtype)
p3d = np.empty((vec_size, nbdirs, ncp - 1), dtype=ypt.dtype)
md = np.zeros((nbdirs, ncp + 3), dtype=ypt.dtype)
m = np.zeros((ncp + 3,), dtype=ypt.dtype)
td = np.zeros((nbdirs, ncp), dtype=ypt.dtype)
t = np.zeros((ncp,), dtype=ypt.dtype)
xptd = np.vstack([np.eye(ncp, dtype=ypt.dtype), np.zeros((ncp, ncp), dtype=ypt.dtype)])
yptd = np.vstack([np.zeros((ncp, ncp), dtype=ypt.dtype), np.eye(ncp, dtype=ypt.dtype)])
dx = xpt[1:] - xpt[:-1]
dx2 = dx ** 2
dxd = xptd[:, 1:] - xptd[:, :-1]
p0 = ypt[:, :-1]
for jj in range(vec_size):
ypt_jj = ypt[jj, :]
# Compute segment slopes
temp = (yptd[:, 1:] - yptd[:, :-1]) * (xpt[1:] - xpt[:-1]) - (ypt_jj[1:] - ypt_jj[:-1]) * (
xptd[:, 1:] - xptd[:, :-1]
)
md[:, 2 : ncp + 1] = np.divide(
temp, (xpt[1:] - xpt[:-1]) ** 2, out=np.zeros_like(temp), where=(xpt[1:] - xpt[:-1]) != 0.0
)
# m[2:ncp + 1] = (ypt_jj[1:] - ypt_jj[:-1]) / (xpt[1:] - xpt[:-1])
m[2 : ncp + 1] = np.divide(
ypt_jj[1:] - ypt_jj[:-1],
xpt[1:] - xpt[:-1],
out=np.zeros_like(ypt_jj[1:]),
where=(xpt[1:] - xpt[:-1]) != 0.0,
)
# Estimation for end points.
md[:, 1] = 2.0 * md[:, 2] - md[:, 3]
md[:, 0] = 2.0 * md[:, 1] - md[:, 2]
md[:, ncp + 1] = 2.0 * md[:, ncp] - md[:, ncp - 1]
md[:, ncp + 2] = 2.0 * md[:, ncp + 1] - md[:, ncp]
m[1] = 2.0 * m[2] - m[3]
m[0] = 2.0 * m[1] - m[2]
m[ncp + 1] = 2.0 * m[ncp] - m[ncp - 1]
m[ncp + 2] = 2.0 * m[ncp + 1] - m[ncp]
# Slope at points.
for i in range(2, ncp + 1):
m1d = md[:, i - 2]
m2d = md[:, i - 1]
m3d = md[:, i]
m4d = md[:, i + 1]
arg1d = m4d - m3d
m1 = m[i - 2]
m2 = m[i - 1]
m3 = m[i]
m4 = m[i + 1]
arg1 = m4 - m3
w1, w1d = abs_smooth_dv(arg1, arg1d, delta_x)
arg1d = m2d - m1d
arg1 = m2 - m1
w2, w2d = abs_smooth_dv(arg1, arg1d, delta_x)
if w1 < eps and w2 < eps:
# Special case to avoid divide by zero.
td[:, i - 2] = 0.5 * (m2d + m3d)
t[i - 2] = 0.5 * (m2 + m3)
else:
td[:, i - 2] = (
(w1d * m2 + w1 * m2d + w2d * m3 + w2 * m3d) * (w1 + w2) - (w1 * m2 + w2 * m3) * (w1d + w2d)
) / (w1 + w2) ** 2
t[i - 2] = (w1 * m2 + w2 * m3) / (w1 + w2)
# Polynomial Coefficients
t1 = t[:-1]
t2 = t[1:]
p1[jj, :] = t1
p2[jj, :] = np.divide(
(3.0 * m[2 : ncp + 1] - 2.0 * t1 - t2), dx, out=np.zeros_like(p2[jj, :]), where=dx != 0.0
) # (3.0 * m[2:ncp + 1] - 2.0 * t1 - t2) / dx
p3[jj, :] = np.divide(
(t1 + t2 - 2.0 * m[2 : ncp + 1]), dx2, out=np.zeros_like(p3[jj, :]), where=dx2 != 0.0
) # (t1 + t2 - 2.0 * m[2:ncp + 1]) / dx2
p0d[jj, ...] = yptd[:, :-1]
p1d[jj, ...] = td[:, :-1]
temp = (3.0 * md[:, 2 : ncp + 1] - 2.0 * td[:, :-1] - td[:, 1:]) * dx - (
3.0 * m[2 : ncp + 1] - 2.0 * t1 - t2
) * dxd
p2d[jj, ...] = np.divide(temp, dx2, out=np.zeros_like(p2d[jj, ...]), where=dx2 != 0.0)
temp = (td[:, :-1] + td[:, 1:] - 2.0 * md[:, 2 : ncp + 1]) * dx2 - (
t1 + t2 - 2.0 * m[2 : ncp + 1]
) * 2 * dx * dxd
p3d[jj, ...] = np.divide(temp, dx2 ** 2, out=np.zeros_like(p3d[jj, ...]), where=dx2 != 0.0)
self.xpt = xpt
self.p0 = p0
self.p1 = p1
self.p2 = p2
self.p3 = p3
self.dp0_dxcp = p0d[:, :ncp, :].transpose((0, 2, 1))
self.dp0_dycp = p0d[:, ncp:, :].transpose((0, 2, 1))
self.dp1_dxcp = p1d[:, :ncp, :].transpose((0, 2, 1))
self.dp1_dycp = p1d[:, ncp:, :].transpose((0, 2, 1))
self.dp2_dxcp = p2d[:, :ncp, :].transpose((0, 2, 1))
self.dp2_dycp = p2d[:, ncp:, :].transpose((0, 2, 1))
self.dp3_dxcp = p3d[:, :ncp, :].transpose((0, 2, 1))
self.dp3_dycp = p3d[:, ncp:, :].transpose((0, 2, 1))
def __call__(self, x):
return self.interp(x)
def interp(self, x):
xcp = self.xpt
ncp = np.size(xcp)
n = np.size(x)
vec_size = self.p0.shape[0]
p0 = self.p0
p1 = self.p1
p2 = self.p2
p3 = self.p3
# All vectorized points uses same grid, so find these once.
j_idx = np.zeros(n, dtype=np.int_)
for i in range(n):
# Find location in array (use end segments if out of bounds)
if x[i] < xcp[0]:
j = 0
else:
# Linear search for now
for j in range(ncp - 2, -1, -1):
if x[i] >= xcp[j]:
break
j_idx[i] = j
dx = x - xcp[j_idx]
dx2 = dx * dx
dx3 = dx2 * dx
# Evaluate polynomial (and derivative)
y = p0[:, j_idx] + p1[:, j_idx] * dx + p2[:, j_idx] * dx2 + p3[:, j_idx] * dx3
dydx = p1[:, j_idx] + 2.0 * p2[:, j_idx] * dx + 3.0 * p3[:, j_idx] * dx2
dydxcp = (
self.dp0_dxcp[:, j_idx, :]
+ np.einsum("kij,i->kij", self.dp1_dxcp[:, j_idx, :], dx)
+ np.einsum("kij,i->kij", self.dp2_dxcp[:, j_idx, :], dx2)
+ np.einsum("kij,i->kij", self.dp3_dxcp[:, j_idx, :], dx3)
)
for jj in range(vec_size):
for i in range(n):
j = j_idx[i]
dydxcp[jj, i, j] -= dydx[jj, i]
dydycp = (
self.dp0_dycp[:, j_idx, :]
+ np.einsum("kij,i->kij", self.dp1_dycp[:, j_idx, :], dx)
+ np.einsum("kij,i->kij", self.dp2_dycp[:, j_idx, :], dx2)
+ np.einsum("kij,i->kij", self.dp3_dycp[:, j_idx, :], dx3)
)
if self.flatFlag:
y = np.squeeze(y)
dydx = np.squeeze(dydx)
dydxcp = np.squeeze(dydxcp)
dydycp = np.squeeze(dydycp)
return (y, dydx, dydxcp, dydycp)
```
#### File: wisdem/commonse/cross_sections.py
```python
from __future__ import print_function
import numpy as np
from wisdem.commonse.utilities import nodal2sectional
import openmdao.api as om
class CylindricalShellProperties(om.ExplicitComponent):
"""
OpenMDAO wrapper for tube class to obtain cylindrical sheel properties.
Parameters
----------
d : numpy array[nFull], [m]
tower diameter at corresponding locations
t : numpy array[nFull-1], [m]
shell thickness at corresponding locations
Returns
-------
Az : numpy array[nFull-1], [m**2]
cross-sectional area
Asx : numpy array[nFull-1], [m**2]
x shear area
Asy : numpy array[nFull-1], [m**2]
y shear area
Jz : numpy array[nFull-1], [m**4]
polar moment of inertia
Ixx : numpy array[nFull-1], [m**4]
area moment of inertia about x-axis
Iyy : numpy array[nFull-1], [m**4]
area moment of inertia about y-axis
"""
def initialize(self):
self.options.declare("nFull")
def setup(self):
nFull = self.options["nFull"]
self.add_input("d", np.zeros(nFull), units="m")
self.add_input("t", np.zeros(nFull - 1), units="m")
self.add_output("Az", np.zeros(nFull - 1), units="m**2")
self.add_output("Asx", np.zeros(nFull - 1), units="m**2")
self.add_output("Asy", np.zeros(nFull - 1), units="m**2")
self.add_output("Jz", np.zeros(nFull - 1), units="m**4")
self.add_output("Ixx", np.zeros(nFull - 1), units="m**4")
self.add_output("Iyy", np.zeros(nFull - 1), units="m**4")
# Derivatives
self.declare_partials("*", "*", method="fd", form="central", step=1e-6)
def compute(self, inputs, outputs):
d, _ = nodal2sectional(inputs["d"])
tube = Tube(d, inputs["t"])
outputs["Az"] = tube.Area
outputs["Asx"] = tube.Asx
outputs["Asy"] = tube.Asy
outputs["Jz"] = tube.J0
outputs["Ixx"] = tube.Jxx
outputs["Iyy"] = tube.Jyy
class Tube:
"""The Tube Class contains functions to calculate properties of tubular circular cross-sections
for structural analyses."""
def __init__(self, D, t, Lgth=np.NaN, Kbuck=1.0):
self.D = D
self.t = t
self.L = Lgth * np.ones(np.size(D)) # this makes sure we exapnd Lght if D,t, arrays
self.Kbuck = Kbuck * np.ones(np.size(D)) # this makes sure we exapnd Kbuck if D,t, arrays
@property
def Area(self): # Cross sectional area of tube
return (self.D ** 2 - (self.D - 2 * self.t) ** 2) * np.pi / 4
@property
def derivArea(self):
return {"D": 2 * np.pi / 4 * (self.D ** 2 - (self.D - 2 * self.t)) * (2 * self.D - 1), "t": 0}
@property
def Amid(self): # mid-thickness inscribed area of tube (thin wall torsion calculation)
return (self.D - self.t) ** 2 * np.pi / 4
@property
def Jxx(self): # 2nd area moment of inertia w.r.t. x-x axis (Jxx=Jyy for tube)
return (self.D ** 4 - (self.D - 2 * self.t) ** 4) * np.pi / 64
@property
def Jyy(self): # 2nd area moment of inertia w.r.t. x-x axis (Jxx=Jyy for tube)
return self.Jxx
@property
def J0(self): # polar moment of inertia w.r.t. z-z axis (torsional)
return 2.0 * self.Jxx
@property
def Asy(self): # Shear Area for tubular cross-section
Ri = self.D / 2 - self.t
Ro = self.D / 2
return self.Area / (1.124235 + 0.055610 * (Ri / Ro) + 1.097134 * (Ri / Ro) ** 2 - 0.630057 * (Ri / Ro) ** 3)
@property
def Asx(self): # Shear Area for tubular cross-section
return self.Asy
@property
def BdgMxx(self): # Bending modulus for tubular cross-section
return self.Jxx / (self.D / 2)
@property
def BdgMyy(self): # Bending modulus for tubular cross-section =BdgMxx
return self.Jyy / (self.D / 2)
@property
def TorsConst(self): # Torsion shear constant for tubular cross-section
return self.J0 / (self.D / 2)
@property
def S(self): # Bending modulus for tubular cross-section
return self.BdgMxx
@property
def C(self): # Torsion shear constant for tubular cross-section
return self.TorsConst
@property
def Rgyr(self): # Radius of Gyration for circular tube
return np.sqrt(self.Jxx / self.Area)
@property
def Klr(self): # Klr buckling parameter
return self.Kbuck * self.L / self.Rgyr
class IBeam:
def __init__(self, L_flange, t_flange, H_web, t_web):
self.Lf = L_flange
self.tf = t_flange
self.Hw = H_web
self.tw = t_web
self.H = H_web + 2 * t_flange
@property
def AreaFlange(self): # Cross sectional area of tube
return self.Lf * self.tf
@property
def AreaWeb(self): # Cross sectional area of tube
return self.Hw * self.tw
@property
def Area(self): # Cross sectional area of tube
return self.AreaWeb + 2 * self.AreaFlange
@property
def Iyy(self): # 2nd area moment of inertia w.r.t. y-y axis running parallel to flange through CG
return (self.Lf * self.H ** 3 - (self.Lf - self.tw) * self.Hw ** 3) / 12.0
@property
def Izz(self): # 2nd area moment of inertia w.r.t. z-z running through center of web
return (2 * self.tw * self.Lf ** 3 + self.Hw * self.tw ** 3) / 12.0
@property
def Jxx(self): # polar moment of inertia w.r.t. z-z axis (torsional)
return 2 * self.Lf * self.tf ** 3 + self.H * self.tw ** 3
@property
def Asy(self): # Shear Area for tubular cross-section
return 1.64 * self.Lf * self.tf
@property
def Asz(self): # Shear Area for tubular cross-section
return self.tw * self.H
@property
def BdgMyy(self): # Bending modulus for tubular cross-section
return 2 * self.Iyy / self.H
@property
def BdgMzz(self): # Bending modulus for tubular cross-section =BdgMxx
return 2 * self.Izz / self.Lf
@property
def TorsConst(self): # Torsion shear constant for tubular cross-section
return self.Jxx / (1.28 * self.tf)
@property
def Syy(self): # Bending modulus for tubular cross-section
return self.BdgMyy
@property
def Szz(self): # Bending modulus for tubular cross-section
return self.BdgMzz
@property
def C(self): # Torsion shear constant for tubular cross-section
return self.TorsConst
@property
def Rgyr(self): # Radius of Gyration for circular tube
return np.sqrt(self.Jxx / self.Area)
@property
def CG(self): # Radius of Gyration for circular tube
return 0.5 * self.Hw + self.tf
```
#### File: wisdem/commonse/csystem.py
```python
from __future__ import print_function
import numpy as np
class DirectionVector(object):
"""Handles rotation of direction vectors to appropriate coordinate systems.
All angles must be in degrees.
"""
def __init__(self, x, y, z, dx=None, dy=None, dz=None):
"""3-Dimensional vector that depends on direction only (not position).
Parameters
----------
x : float or ndarray
x-direction of vector(s)
y : float or ndarray
y-direction of vector(s)
z : float or ndarray
z-direction of vector(s)
"""
self.x = np.array(x)
self.y = np.array(y)
self.z = np.array(z)
if dx is None:
dx = {}
dx["dx"] = np.ones_like(self.x)
dx["dy"] = np.zeros_like(self.y)
dx["dz"] = np.zeros_like(self.z)
dy = {}
dy["dx"] = np.zeros_like(self.x)
dy["dy"] = np.ones_like(self.y)
dy["dz"] = np.zeros_like(self.z)
dz = {}
dz["dx"] = np.zeros_like(self.x)
dz["dy"] = np.zeros_like(self.y)
dz["dz"] = np.ones_like(self.z)
self.dx = dx
self.dy = dy
self.dz = dz
@classmethod
def fromArray(cls, array):
"""initialize with NumPy array
Parameters
----------
array : ndarray
construct DirectionVector using array of size 3
"""
return cls(array[0], array[1], array[2])
def toArray(self):
"""convert DirectionVector to NumPy array
Returns
-------
array : ndarray
NumPy array in order x, y, z containing DirectionVector data
"""
return np.c_[self.x, self.y, self.z]
def _rotateAboutZ(self, xstring, ystring, zstring, theta, thetaname, reverse=False):
"""
x X y = z. rotate c.s. about z, +theta
all angles in degrees
"""
thetaM = 1.0
if reverse:
thetaM = -1.0
x = getattr(self, xstring)
y = getattr(self, ystring)
z = getattr(self, zstring)
dx = getattr(self, "d" + xstring)
dy = getattr(self, "d" + ystring)
dz = getattr(self, "d" + zstring)
theta = np.radians(theta * thetaM)
c = np.cos(theta)
s = np.sin(theta)
xnew = x * c + y * s
ynew = -x * s + y * c
znew = z
angles = []
for key in dx.keys():
if not key in ["dx", "dy", "dz"]:
angles.append(key)
dxnew = {}
dxnew["dx"] = dx["dx"] * c + dy["dx"] * s
dxnew["dy"] = dx["dy"] * c + dy["dy"] * s
dxnew["dz"] = dx["dz"] * c + dy["dz"] * s
dxnew["d" + thetaname] = (-x * s + y * c) * np.radians(thetaM)
for dangle in angles:
dxnew[dangle] = dx[dangle] * c + dy[dangle] * s
dynew = {}
dynew["dx"] = -dx["dx"] * s + dy["dx"] * c
dynew["dy"] = -dx["dy"] * s + dy["dy"] * c
dynew["dz"] = -dx["dz"] * s + dy["dz"] * c
dynew["d" + thetaname] = (-x * c - y * s) * np.radians(thetaM)
for dangle in angles:
dynew[dangle] = -dx[dangle] * s + dy[dangle] * c
dznew = {}
dznew["dx"] = dz["dx"] * np.ones_like(theta) # multiply by ones just to get right size in case of float
dznew["dy"] = dz["dy"] * np.ones_like(theta)
dznew["dz"] = dz["dz"] * np.ones_like(theta)
dznew["d" + thetaname] = np.zeros_like(theta)
for dangle in angles:
dznew[dangle] = dz[dangle]
return xnew, ynew, znew, dxnew, dynew, dznew
def windToInertial(self, beta):
"""Rotates from wind-aligned to inertial
Parameters
----------
beta : float (deg)
wind angle
Returns
-------
vector : DirectionVector
a DirectionVector in the inertial coordinate system
"""
xw, yw, zw, dxw, dyw, dzw = self._rotateAboutZ("x", "y", "z", beta, "beta", reverse=True)
return DirectionVector(xw, yw, zw, dxw, dyw, dzw)
def inertialToWind(self, beta):
"""Rotates from inertial to wind-aligned
Parameters
----------
beta : float (deg)
wind angle
Returns
-------
vector : DirectionVector
a DirectionVector in the wind-aligned coordinate system
"""
xw, yw, zw, dxw, dyw, dzw = self._rotateAboutZ("x", "y", "z", beta, "beta")
return DirectionVector(xw, yw, zw, dxw, dyw, dzw)
def yawToWind(self, Psi):
"""Rotates from yaw-aligned to wind-aligned
Parameters
----------
Psi : float (deg)
yaw angle
Returns
-------
vector : DirectionVector
a DirectionVector in the wind-aligned coordinate system
"""
xw, yw, zw, dxw, dyw, dzw = self._rotateAboutZ("x", "y", "z", Psi, "yaw", reverse=True)
return DirectionVector(xw, yw, zw, dxw, dyw, dzw)
def windToYaw(self, Psi):
"""Rotates from wind-aligned to yaw-aligned
Parameters
----------
Psi : float (deg)
yaw angle
Returns
-------
vector : DirectionVector
a DirectionVector in the yaw-aligned coordinate system
"""
xy, yy, zy, dxy, dyy, dzy = self._rotateAboutZ("x", "y", "z", Psi, "yaw")
return DirectionVector(xy, yy, zy, dxy, dyy, dzy)
def hubToYaw(self, Theta, derivatives=False):
"""Rotates from hub-aligned to yaw-aligned
Parameters
----------
Theta : float (deg)
tilt angle
Returns
-------
vector : DirectionVector
a DirectionVector in the yaw-aligned coordinate system
"""
zy, xy, yy, dzy, dxy, dyy = self._rotateAboutZ("z", "x", "y", Theta, "tilt", reverse=True)
return DirectionVector(xy, yy, zy, dxy, dyy, dzy)
def yawToHub(self, Theta):
"""Rotates from yaw-aligned to hub-aligned
Parameters
----------
Theta : float (deg)
tilt angle
Returns
-------
vector : DirectionVector
a DirectionVector in the hub-aligned coordinate system
"""
zh, xh, yh, dzh, dxh, dyh = self._rotateAboutZ("z", "x", "y", Theta, "tilt")
return DirectionVector(xh, yh, zh, dxh, dyh, dzh)
def hubToAzimuth(self, Lambda):
"""Rotates from hub-aligned to azimuth-aligned
Parameters
----------
Lambda : float or ndarray (deg)
azimuth angle
Returns
-------
vector : DirectionVector
a DirectionVector in the azimuth-aligned coordinate system
"""
yz, zz, xz, dyz, dzz, dxz = self._rotateAboutZ("y", "z", "x", Lambda, "azimuth")
return DirectionVector(xz, yz, zz, dxz, dyz, dzz)
def azimuthToHub(self, Lambda):
"""Rotates from azimuth-aligned to hub-aligned
Parameters
----------
Lambda : float or ndarray (deg)
azimuth angle
Returns
-------
vector : DirectionVector
a DirectionVector in the hub-aligned coordinate system
"""
yh, zh, xh, dyh, dzh, dxh = self._rotateAboutZ("y", "z", "x", Lambda, "azimuth", reverse=True)
return DirectionVector(xh, yh, zh, dxh, dyh, dzh)
def azimuthToBlade(self, Phi):
"""Rotates from azimuth-aligned to blade-aligned
Parameters
----------
Phi : float (deg)
precone angle
Returns
-------
vector : DirectionVector
a DirectionVector in the blade-aligned coordinate system
"""
zb, xb, yb, dzb, dxb, dyb = self._rotateAboutZ("z", "x", "y", Phi, "precone", reverse=True)
return DirectionVector(xb, yb, zb, dxb, dyb, dzb)
def bladeToAzimuth(self, Phi):
"""Rotates from blade-aligned to azimuth-aligned
Parameters
----------
Phi : float (deg)
precone angle
Returns
-------
vector : DirectionVector
a DirectionVector in the azimuth-aligned coordinate system
"""
za, xa, ya, dza, dxa, dya = self._rotateAboutZ("z", "x", "y", Phi, "precone")
return DirectionVector(xa, ya, za, dxa, dya, dza)
def airfoilToBlade(self, theta):
"""Rotates from airfoil-aligned to blade-aligned
Parameters
----------
theta : float (deg)
twist angle
Returns
-------
vector : DirectionVector
a DirectionVector in the blade-aligned coordinate system
"""
xb, yb, zb, dxb, dyb, dzb = self._rotateAboutZ("x", "y", "z", theta, "theta")
return DirectionVector(xb, yb, zb, dxb, dyb, dzb)
def bladeToAirfoil(self, theta):
"""Rotates from blade-aligned to airfoil-aligned
Parameters
----------
theta : float (deg)
twist angle
Returns
-------
vector : DirectionVector
a DirectionVector in the airfoil-aligned coordinate system
"""
xa, ya, za, dxa, dya, dza = self._rotateAboutZ("x", "y", "z", theta, "theta", reverse=True)
return DirectionVector(xa, ya, za, dxa, dya, dza)
def airfoilToProfile(self):
"""Rotates from airfoil-aligned to profile
Returns
-------
vector : DirectionVector
a DirectionVector in the profile coordinate system
"""
return DirectionVector(self.y, self.x, self.z, self.dy, self.dx, self.dz)
def profileToAirfoil(self):
"""Rotates from profile to airfoil-aligned
Returns
-------
vector : DirectionVector
a DirectionVector in the airfoil-aligned coordinate system
"""
return DirectionVector(self.y, self.x, self.z, self.dy, self.dx, self.dz)
def cross(self, other):
"""cross product between two DirectionVectors
Parameters
----------
other : DirectionVector
other vector to cross with
Returns
-------
vector : DirectionVector
vector = self X other
"""
v1 = np.c_[self.x, self.y, self.z]
v2 = np.c_[other.x, other.y, other.z]
v = np.cross(v1, v2)
if len(v.shape) > 1:
return DirectionVector(v[:, 0], v[:, 1], v[:, 2])
else:
return DirectionVector(v[0], v[1], v[2])
def cross_deriv(self, other, namea="a", nameb="b"):
"""defined only for floats for now"""
# c = a X b
a = self
b = other
dx = {}
dx[namea] = np.r_[0.0, b.z, -b.y]
dx[nameb] = np.r_[0.0, -a.z, a.y]
dy = {}
dy[namea] = np.r_[-b.z, 0.0, b.x]
dy[nameb] = np.r_[a.z, 0.0, -a.x]
dz = {}
dz[namea] = np.r_[b.y, -b.x, 0.0]
dz[nameb] = np.r_[-a.y, a.x, 0.0]
return dx, dy, dz
def cross_deriv_array(self, other, namea="a", nameb="b"):
# c = a X b
a = self
b = other
dx = {}
dx["d" + namea + "x"] = np.zeros_like(b.x)
dx["d" + namea + "y"] = b.z
dx["d" + namea + "z"] = -b.y
dx["d" + nameb + "x"] = np.zeros_like(a.x)
dx["d" + nameb + "y"] = -a.z
dx["d" + nameb + "z"] = a.y
dy = {}
dy["d" + namea + "x"] = -b.z
dy["d" + namea + "y"] = np.zeros_like(b.y)
dy["d" + namea + "z"] = b.x
dy["d" + nameb + "x"] = a.z
dy["d" + nameb + "y"] = np.zeros_like(a.y)
dy["d" + nameb + "z"] = -a.x
dz = {}
dz["d" + namea + "x"] = b.y
dz["d" + namea + "y"] = -b.x
dz["d" + namea + "z"] = np.zeros_like(b.z)
dz["d" + nameb + "x"] = -a.y
dz["d" + nameb + "y"] = a.x
dz["d" + nameb + "z"] = np.zeros_like(a.z)
return dx, dy, dz
def __neg__(self):
"""negate direction vector"""
return DirectionVector(-self.x, -self.y, -self.z)
def __add__(self, other):
"""add two DirectionVector objects (v1 = v2 + v3)"""
if isinstance(other, DirectionVector):
return DirectionVector(self.x + other.x, self.y + other.y, self.z + other.z)
else:
return DirectionVector(self.x + other, self.y + other, self.z + other)
def __sub__(self, other):
"""subtract DirectionVector objects (v1 = v2 - v3)"""
if isinstance(other, DirectionVector):
return DirectionVector(self.x - other.x, self.y - other.y, self.z - other.z)
else:
return DirectionVector(self.x - other, self.y - other, self.z - other)
def __iadd__(self, other):
"""add DirectionVector object to self (v1 += v2)"""
if isinstance(other, DirectionVector):
self.x += other.x
self.y += other.y
self.z += other.z
else:
self.x += other
self.y += other
self.z += other
return self
def __isub__(self, other):
"""subract DirectionVector object from self (v1 -= v2)"""
if isinstance(other, DirectionVector):
self.x -= other.x
self.y -= other.y
self.z -= other.z
else:
self.x -= other
self.y -= other
self.z -= other
return self
def __mul__(self, other):
"""multiply vector times a scalar or element by element muiltiply times another vector (v1 = alpha * v2 or v1 = v2 * v3)"""
if isinstance(other, DirectionVector):
return DirectionVector(self.x * other.x, self.y * other.y, self.z * other.z)
else:
return DirectionVector(self.x * other, self.y * other, self.z * other)
def __truediv__(self, other):
"""divide vector by a scalar or element by element division with another vector (v1 = v2 / alpha or v1 = v2 / v3)"""
if isinstance(other, DirectionVector):
return DirectionVector(self.x / other.x, self.y / other.y, self.z / other.z)
else:
return DirectionVector(self.x / float(other), self.y / float(other), self.z / float(other))
def __imul__(self, other):
"""multiply self times a scalar or element by element muiltiply times another vector (v1 *= alpha or v1 *= v2)"""
if isinstance(other, DirectionVector):
self.x *= other.x
self.y *= other.y
self.z *= other.z
else:
self.x *= other
self.y *= other
self.z *= other
return self
def __str__(self):
"""print string representation"""
return "{0}, {1}, {2}".format(self.x, self.y, self.z)
```
#### File: wisdem/commonse/mpi_tools.py
```python
import os
import sys
try:
from mpi4py import MPI
except:
MPI = False
def under_mpirun():
"""Return True if we're being executed under mpirun."""
# this is a bit of a hack, but there appears to be
# no consistent set of environment vars between MPI
# implementations.
for name in os.environ.keys():
if (
name == "OMPI_COMM_WORLD_RANK"
or name == "MPIEXEC_HOSTNAME"
or name.startswith("MPIR_")
or name.startswith("MPICH_")
):
return True
return False
if under_mpirun():
from mpi4py import MPI
def debug(*msg): # pragma: no cover
newmsg = ["%d: " % MPI.COMM_WORLD.rank] + list(msg)
for m in newmsg:
sys.stdout.write("%s " % m)
sys.stdout.write("\n")
sys.stdout.flush()
else:
MPI = None
def map_comm_heirarchical(K, K2):
"""
Heirarchical parallelization communicator mapping. Assumes K top level processes with K2 subprocessors each.
Requires comm_world_size >= K + K*K2. Noninclusive, Ki not included in K2i execution.
(TODO, this is not the most efficient architecture, could be achieve with K fewer processors, but this was easier to generalize)
"""
N = K + K * K2
comm_map_down = {}
comm_map_up = {}
color_map = [0] * K
for i in range(K):
comm_map_down[i] = [K + j + i * K2 for j in range(K2)]
color_map.extend([i + 1] * K2)
for j in comm_map_down[i]:
comm_map_up[j] = i
return comm_map_down, comm_map_up, color_map
def subprocessor_loop(comm_map_up):
"""
Subprocessors loop, waiting to receive a function and its arguements to evaluate.
Output of the function is returned. Loops until a stop signal is received
Input data format:
data[0] = function to be evaluated
data[1] = [list of arguments]
If the function to be evaluated does not fit this format, then a wrapper function
should be created and passed, that handles the setup, argument assignment, etc
for the actual function.
Stop sigal:
data[0] = False
"""
# comm = impl.world_comm()
rank = MPI.COMM_WORLD.Get_rank()
rank_target = comm_map_up[rank]
keep_running = True
while keep_running == True:
data = MPI.COMM_WORLD.recv(source=(rank_target), tag=0)
if data[0] == False:
break
else:
func_execution = data[0]
args = data[1]
output = func_execution(args)
MPI.COMM_WORLD.send(output, dest=(rank_target), tag=1)
def subprocessor_stop(comm_map_down):
"""
Send stop signal to subprocessors
"""
# comm = MPI.COMM_WORLD
for rank in comm_map_down.keys():
subranks = comm_map_down[rank]
for subrank_i in subranks:
MPI.COMM_WORLD.send([False], dest=subrank_i, tag=0)
print("All MPI subranks closed.")
if __name__ == "__main__":
from mpi4py import MPI
(
_,
_,
_,
) = map_comm_heirarchical(2, 4)
```
#### File: wisdem/drivetrainse/generator_models.py
```python
import sys
import numpy as np
import pandas as pd
import openmdao.api as om
from wisdem.commonse import gravity
eps = 1e-3
# Convenience functions for computing McDonald's C and F parameters
def chsMshc(x):
return np.cosh(x) * np.sin(x) - np.sinh(x) * np.cos(x)
def chsPshc(x):
return np.cosh(x) * np.sin(x) + np.sinh(x) * np.cos(x)
def carterFactor(airGap, slotOpening, slotPitch):
"""Return Carter factor
(based on Langsdorff's empirical expression)
See page 3-13 Boldea Induction machines Chapter 3
"""
gma = (2 * slotOpening / airGap) ** 2 / (5 + 2 * slotOpening / airGap)
return slotPitch / (slotPitch - airGap * gma * 0.5)
# ---------------
def carterFactorMcDonald(airGap, h_m, slotOpening, slotPitch):
"""Return Carter factor using Carter's equation
(based on Schwartz-Christoffel's conformal mapping on simplified slot geometry)
This code is based on Eq. B.3-5 in Appendix B of McDonald's thesis.
It is used by PMSG_arms and PMSG_disc.
h_m : magnet height (m)
b_so : stator slot opening (m)
tau_s : Stator slot pitch (m)
"""
mu_r = 1.06 # relative permeability (probably for neodymium magnets, often given as 1.05 - GNS)
g_1 = airGap + h_m / mu_r # g
b_over_a = slotOpening / (2 * g_1)
gamma = 4 / np.pi * (b_over_a * np.arctan(b_over_a) - np.log(np.sqrt(1 + b_over_a ** 2)))
return slotPitch / (slotPitch - gamma * g_1)
# ---------------
def carterFactorEmpirical(airGap, slotOpening, slotPitch):
"""Return Carter factor using Langsdorff's empirical expression"""
sigma = (slotOpening / airGap) / (5 + slotOpening / airGap)
return slotPitch / (slotPitch - sigma * slotOpening)
# ---------------
def carterFactorSalientPole(airGap, slotWidth, slotPitch):
"""Return Carter factor for salient pole rotor
Where does this equation come from? It's different from other approximations above.
Original code:
tau_s = np.pi * dia / S # slot pitch
b_s = tau_s * b_s_tau_s # slot width
b_t = tau_s - b_s # tooth width
K_C1 = (tau_s + 10 * g_a) / (tau_s - b_s + 10 * g_a) # salient pole rotor
slotPitch - slotWidth == toothWidth
"""
return (slotPitch + 10 * airGap) / (slotPitch - slotWidth + 10 * airGap) # salient pole rotor
# ---------------------------------
def array_seq(q1, b, c, Total_number):
Seq = np.array([1, 0, 0, 1, 0])
diff = Total_number * 5 / 6
G = np.prod(Seq.shape)
return Seq, diff, G
# ---------------------------------
def winding_factor(Sin, b, c, p, m):
S = int(Sin)
# Step 1 Writing q1 as a fraction
q1 = b / c
# Step 2: Writing a binary sequence of b-c zeros and b ones
Total_number = int(S / b)
L = array_seq(q1, b, c, Total_number)
# STep 3 : Repeat binary sequence Q_s/b times
New_seq = np.tile(L[0], Total_number)
Actual_seq1 = pd.DataFrame(New_seq[:, None].T)
Winding_sequence = ["A", "C1", "B", "A1", "C", "B1"]
New_seq2 = np.tile(Winding_sequence, int(L[1]))
Actual_seq2 = pd.DataFrame(New_seq2[:, None].T)
Seq_f = pd.concat([Actual_seq1, Actual_seq2], ignore_index=True)
Seq_f.reset_index(drop=True)
Slots = S
R = S if S % 2 == 0 else S + 1
Windings_arrange = (pd.DataFrame(index=Seq_f.index, columns=Seq_f.columns[1:R])).fillna(0)
counter = 1
# Step #4 Arranging winding in Slots
for i in range(0, len(New_seq)):
if Seq_f.loc[0, i] == 1:
Windings_arrange.loc[0, counter] = Seq_f.loc[1, i]
counter = counter + 1
Windings_arrange.loc[1, 1] = "C1"
for k in range(1, R):
if Windings_arrange.loc[0, k] == "A":
Windings_arrange.loc[1, k + 1] = "A1"
elif Windings_arrange.loc[0, k] == "B":
Windings_arrange.loc[1, k + 1] = "B1"
elif Windings_arrange.loc[0, k] == "C":
Windings_arrange.loc[1, k + 1] = "C1"
elif Windings_arrange.loc[0, k] == "A1":
Windings_arrange.loc[1, k + 1] = "A"
elif Windings_arrange.loc[0, k] == "B1":
Windings_arrange.loc[1, k + 1] = "B"
elif Windings_arrange.loc[0, k] == "C1":
Windings_arrange.loc[1, k + 1] = "C"
Phase_A = np.zeros((1000, 1), dtype=float)
counter_A = 0
# Windings_arrange.to_excel('test.xlsx')
# Winding vector, W_A for Phase A
for l in range(1, R):
if Windings_arrange.loc[0, l] == "A" and Windings_arrange.loc[1, l] == "A":
Phase_A[counter_A, 0] = l
Phase_A[counter_A + 1, 0] = l
counter_A = counter_A + 2
elif Windings_arrange.loc[0, l] == "A1" and Windings_arrange.loc[1, l] == "A1":
Phase_A[counter_A, 0] = -1 * l
Phase_A[counter_A + 1, 0] = -1 * l
counter_A = counter_A + 2
elif Windings_arrange.loc[0, l] == "A" or Windings_arrange.loc[1, l] == "A":
Phase_A[counter_A, 0] = l
counter_A = counter_A + 1
elif Windings_arrange.loc[0, l] == "A1" or Windings_arrange.loc[1, l] == "A1":
Phase_A[counter_A, 0] = -1 * l
counter_A = counter_A + 1
W_A = (np.trim_zeros(Phase_A)).T
# Calculate winding factor
K_w = 0
for r in range(0, int(2 * (S) / 3)):
Gamma = 2 * np.pi * p * np.abs(W_A[0, r]) / S
K_w += np.sign(W_A[0, r]) * (np.exp(Gamma * 1j))
K_w = np.abs(K_w) / (2 * S / 3)
CPMR = np.lcm(S, int(2 * p))
N_cog_s = CPMR / S
N_cog_p = CPMR / p
N_cog_t = CPMR * 0.5 / p
A = np.lcm(S, int(2 * p))
b_p_tau_p = 2 * 1 * p / S - 0
b_t_tau_s = (2) * S * 0.5 / p - 2
return K_w
# ---------------------------------
def shell_constant(R, t, l, x, E, v):
Lambda = (3 * (1 - v ** 2) / (R ** 2 * t ** 2)) ** 0.25
D = E * t ** 3 / (12 * (1 - v ** 2))
C_14 = (np.sinh(Lambda * l)) ** 2 + (np.sin(Lambda * l)) ** 2
C_11 = (np.sinh(Lambda * l)) ** 2 - (np.sin(Lambda * l)) ** 2
F_2 = np.cosh(Lambda * x) * np.sin(Lambda * x) + np.sinh(Lambda * x) * np.cos(Lambda * x)
C_13 = np.cosh(Lambda * l) * np.sinh(Lambda * l) - np.cos(Lambda * l) * np.sin(Lambda * l)
F_1 = np.cosh(Lambda * x) * np.cos(Lambda * x)
F_4 = np.cosh(Lambda * x) * np.sin(Lambda * x) - np.sinh(Lambda * x) * np.cos(Lambda * x)
return D, Lambda, C_14, C_11, F_2, C_13, F_1, F_4
# ---------------------------------
def plate_constant(a, b, E, v, r_o, t):
D = E * t ** 3 / (12 * (1 - v ** 2))
C_2 = 0.25 * (1 - (b / a) ** 2 * (1 + 2 * np.log(a / b)))
C_3 = 0.25 * (b / a) * (((b / a) ** 2 + 1) * np.log(a / b) + (b / a) ** 2 - 1)
C_5 = 0.5 * (1 - (b / a) ** 2)
C_6 = 0.25 * (b / a) * ((b / a) ** 2 - 1 + 2 * np.log(a / b))
C_8 = 0.5 * (1 + v + (1 - v) * (b / a) ** 2)
C_9 = (b / a) * (0.5 * (1 + v) * np.log(a / b) + 0.25 * (1 - v) * (1 - (b / a) ** 2))
L_11 = (1 / 64) * (
1 + 4 * (r_o / a) ** 2 - 5 * (r_o / a) ** 4 - 4 * (r_o / a) ** 2 * (2 + (r_o / a) ** 2) * np.log(a / r_o)
)
L_17 = 0.25 * (1 - 0.25 * (1 - v) * ((1 - (r_o / a) ** 4) - (r_o / a) ** 2 * (1 + (1 + v) * np.log(a / r_o))))
return D, C_2, C_3, C_5, C_6, C_8, C_9, L_11, L_17
# ---------------------------------
debug = False
# ---------------------------------
class GeneratorBase(om.ExplicitComponent):
"""
Base class for generators
Parameters
----------
B_r : float, [T]
Remnant flux density
E : float, [Pa]
youngs modulus
G : float, [Pa]
Shear modulus
P_Fe0e : float, [W/kg]
specific eddy losses @ 1.5T, 50Hz
P_Fe0h : float, [W/kg]
specific hysteresis losses W / kg @ 1.5 T @50 Hz
S_N : float
Slip
alpha_p : float
b_r_tau_r : float
Rotor Slot width / Slot pitch ratio
b_ro : float, [m]
Rotor slot opening width
b_s_tau_s : float
Stator Slot width/Slot pitch ratio
b_so : float, [m]
Stator slot opening width
cofi : float
power factor
freq : float, [Hz]
grid frequency
h_i : float, [m]
coil insulation thickness
h_sy0 : float
h_w : float, [m]
Slot wedge height
k_fes : float
Stator iron fill factor per Grauers
k_fillr : float
Rotor slot fill factor
k_fills : float
Stator Slot fill factor
k_s : float
magnetic saturation factor for iron
m : int
Number of phases
mu_0 : float, [m*kg/s**2/A**2]
permeability of free space
mu_r : float, [m*kg/s**2/A**2]
relative permeability (neodymium)
p : float
number of pole pairs (taken as int within code)
phi : numpy array[90], [rad]
tilt angle (during transportation)
q1 : int
Stator slots per pole per phase
q2 : int
Rotor slots per pole per phase
ratio_mw2pp : float
ratio of magnet width to pole pitch(bm / self.tau_p)
resist_Cu : float, [ohm/m]
Copper resistivity
sigma : float, [Pa]
assumed max shear stress
v : float
poisson ratio
y_tau_p : float
Stator coil span to pole pitch
y_tau_pr : float
Rotor coil span to pole pitch
I_0 : float, [A]
no-load excitation current
T_rated : float, [N*m]
Rated torque
d_r : float, [m]
arm depth d_r
h_m : float, [m]
magnet height
h_0 : float, [m]
Slot height
h_s : float, [m]
Yoke height h_s
len_s : float, [m]
Stator core length
machine_rating : float, [W]
Machine rating
shaft_rpm : numpy array[n_pc], [rpm]
rated speed of input shaft (lss for direct, hss for geared)
n_r : float
number of arms n
rad_ag : float, [m]
airgap radius
t_wr : float, [m]
arm depth thickness
n_s : float
number of stator arms n_s
b_st : float, [m]
arm width b_st
d_s : float, [m]
arm depth d_s
t_ws : float, [m]
arm depth thickness
D_shaft : float, [m]
Shaft diameter
rho_Copper : float, [kg*m**-3]
Copper density
rho_Fe : float, [kg*m**-3]
Magnetic Steel density
rho_Fes : float, [kg*m**-3]
Structural Steel density
rho_PM : float, [kg*m**-3]
Magnet density
Returns
-------
B_rymax : float, [T]
Peak Rotor yoke flux density
B_trmax : float, [T]
maximum tooth flux density in rotor
B_tsmax : float, [T]
maximum tooth flux density in stator
B_g : float, [T]
Peak air gap flux density B_g
B_g1 : float, [T]
air gap flux density fundamental
B_pm1 : float
Fundamental component of peak air gap flux density
N_s : float
Number of turns in the stator winding
b_s : float, [m]
slot width
b_t : float, [m]
tooth width
A_Curcalc : float, [mm**2]
Conductor cross-section mm^2
A_Cuscalc : float, [mm**2]
Stator Conductor cross-section mm^2
b_m : float
magnet width
mass_PM : float, [kg]
Magnet mass
Copper : float, [kg]
Copper Mass
Iron : float, [kg]
Electrical Steel Mass
Structural_mass : float, [kg]
Structural Mass
generator_mass : float, [kg]
Actual mass
f : float
Generator output frequency
I_s : float, [A]
Generator output phase current
R_s : float, [ohm]
Stator resistance
L_s : float
Stator synchronising inductance
J_s : float, [A*m**-2]
Stator winding current density
A_1 : float
Specific current loading
K_rad : float
Stack length ratio
Losses : numpy array[n_pc], [W]
Total loss
generator_efficiency : numpy array[n_pc]
Generator electromagnetic efficiency values (<1)
u_ar : float, [m]
Rotor radial deflection
u_as : float, [m]
Stator radial deflection
u_allow_r : float, [m]
Allowable radial rotor
u_allow_s : float, [m]
Allowable radial stator
y_ar : float, [m]
Rotor axial deflection
y_as : float, [m]
Stator axial deflection
y_allow_r : float, [m]
Allowable axial
y_allow_s : float, [m]
Allowable axial
z_ar : float, [m]
Rotor circumferential deflection
z_as : float, [m]
Stator circumferential deflection
z_allow_r : float, [m]
Allowable circum rotor
z_allow_s : float, [m]
Allowable circum stator
b_allow_r : float, [m]
Allowable arm dimensions
b_allow_s : float, [m]
Allowable arm
TC1 : float, [m**3]
Torque constraint
TC2r : float, [m**3]
Torque constraint-rotor
TC2s : float, [m**3]
Torque constraint-stator
R_out : float, [m]
Outer radius
S : float
Stator slots
Slot_aspect_ratio : float
Slot aspect ratio
Slot_aspect_ratio1 : float
Stator slot aspect ratio
Slot_aspect_ratio2 : float
Rotor slot aspect ratio
D_ratio : float
Stator diameter ratio
J_r : float
Rotor winding Current density
L_sm : float
mutual inductance
Q_r : float
Rotor slots
R_R : float
Rotor resistance
b_r : float
rotor slot width
b_tr : float
rotor tooth width
b_trmin : float
minimum tooth width
"""
def initialize(self):
self.options.declare("n_pc", default=20)
def setup(self):
n_pc = self.options["n_pc"]
# Constants and parameters
self.add_input("B_r", val=1.2, units="T")
self.add_input("E", val=0.0, units="Pa")
self.add_input("G", val=0.0, units="Pa")
self.add_input("P_Fe0e", val=1.0, units="W/kg")
self.add_input("P_Fe0h", val=4.0, units="W/kg")
self.add_input("S_N", val=-0.002)
self.add_input("alpha_p", val=0.5 * np.pi * 0.7)
self.add_input("b_r_tau_r", val=0.45)
self.add_input("b_ro", val=0.004, units="m")
self.add_input("b_s_tau_s", val=0.45)
self.add_input("b_so", val=0.004, units="m")
self.add_input("cofi", val=0.85)
self.add_input("freq", val=60, units="Hz")
self.add_input("h_i", val=0.001, units="m")
self.add_input("h_sy0", val=0.0)
self.add_input("h_w", val=0.005, units="m")
self.add_input("k_fes", val=0.9)
self.add_input("k_fillr", val=0.7)
self.add_input("k_fills", val=0.65)
self.add_input("k_s", val=0.2)
self.add_discrete_input("m", val=3)
self.add_input("mu_0", val=np.pi * 4e-7, units="m*kg/s**2/A**2")
self.add_input("mu_r", val=1.06, units="m*kg/s**2/A**2")
self.add_input("p", val=3.0)
self.add_input("phi", val=np.deg2rad(90), units="rad")
self.add_discrete_input("q1", val=6)
self.add_discrete_input("q2", val=4)
self.add_input("ratio_mw2pp", val=0.7)
self.add_input("resist_Cu", val=1.8e-8 * 1.4, units="ohm/m")
self.add_input("sigma", val=40e3, units="Pa")
self.add_input("v", val=0.3)
self.add_input("y_tau_p", val=1.0)
self.add_input("y_tau_pr", val=10.0 / 12)
# General inputs
# self.add_input('r_s', val=0.0, units='m', desc='airgap radius r_s')
self.add_input("I_0", val=0.0, units="A")
self.add_input("rated_torque", val=0.0, units="N*m")
self.add_input("d_r", val=0.0, units="m")
self.add_input("h_m", val=0.0, units="m")
self.add_input("h_0", val=0.0, units="m")
self.add_input("h_s", val=0.0, units="m")
self.add_input("len_s", val=0.0, units="m")
self.add_input("machine_rating", val=0.0, units="W")
self.add_input("shaft_rpm", val=np.zeros(n_pc), units="rpm")
self.add_input("n_r", val=0.0)
self.add_input("rad_ag", val=0.0, units="m")
self.add_input("t_wr", val=0.0, units="m")
# Structural design variables
self.add_input("n_s", val=0.0)
self.add_input("b_st", val=0.0, units="m")
self.add_input("d_s", val=0.0, units="m")
self.add_input("t_ws", val=0.0, units="m")
self.add_input("D_shaft", val=0.0, units="m")
# Material properties
self.add_input("rho_Copper", val=8900.0, units="kg*m**-3")
self.add_input("rho_Fe", val=7700.0, units="kg*m**-3")
self.add_input("rho_Fes", val=7850.0, units="kg*m**-3")
self.add_input("rho_PM", val=7450.0, units="kg*m**-3")
# Magnetic loading
self.add_output("B_rymax", val=0.0, units="T")
self.add_output("B_trmax", val=0.0, units="T")
self.add_output("B_tsmax", val=0.0, units="T")
self.add_output("B_g", val=0.0, units="T")
self.add_output("B_g1", val=0.0, units="T")
self.add_output("B_pm1", val=0.0)
# Stator design
self.add_output("N_s", val=0.0)
self.add_output("b_s", val=0.0, units="m")
self.add_output("b_t", val=0.0, units="m")
self.add_output("A_Curcalc", val=0.0, units="mm**2")
self.add_output("A_Cuscalc", val=0.0, units="mm**2")
# Rotor magnet dimension
self.add_output("b_m", val=0.0)
# Mass Outputs
self.add_output("mass_PM", val=0.0, units="kg")
self.add_output("Copper", val=0.0, units="kg")
self.add_output("Iron", val=0.0, units="kg")
self.add_output("Structural_mass", val=0.0, units="kg")
self.add_output("generator_mass", val=0.0, units="kg")
# Electrical performance
self.add_output("f", val=np.zeros(n_pc))
self.add_output("I_s", val=np.zeros(n_pc), units="A")
self.add_output("R_s", val=np.zeros(n_pc), units="ohm")
self.add_output("L_s", val=0.0)
self.add_output("J_s", val=np.zeros(n_pc), units="A*m**-2")
self.add_output("A_1", val=np.zeros(n_pc))
# Objective functions
self.add_output("K_rad", val=0.0)
self.add_output("Losses", val=np.zeros(n_pc), units="W")
self.add_output("eandm_efficiency", val=np.zeros(n_pc))
# Structural performance
self.add_output("u_ar", val=0.0, units="m")
self.add_output("u_as", val=0.0, units="m")
self.add_output("u_allow_r", val=0.0, units="m")
self.add_output("u_allow_s", val=0.0, units="m")
self.add_output("y_ar", val=0.0, units="m")
self.add_output("y_as", val=0.0, units="m")
self.add_output("y_allow_r", val=0.0, units="m")
self.add_output("y_allow_s", val=0.0, units="m")
self.add_output("z_ar", val=0.0, units="m")
self.add_output("z_as", val=0.0, units="m")
self.add_output("z_allow_r", val=0.0, units="m")
self.add_output("z_allow_s", val=0.0, units="m")
self.add_output("b_allow_r", val=0.0, units="m")
self.add_output("b_allow_s", val=0.0, units="m")
self.add_output("TC1", val=0.0, units="m**3")
self.add_output("TC2r", val=0.0, units="m**3")
self.add_output("TC2s", val=0.0, units="m**3")
# Other parameters
self.add_output("R_out", val=0.0, units="m")
self.add_output("S", val=0.0)
self.add_output("Slot_aspect_ratio", val=0.0)
self.add_output("Slot_aspect_ratio1", val=0.0)
self.add_output("Slot_aspect_ratio2", val=0.0)
self.add_output("D_ratio", val=0.0)
self.add_output("J_r", val=np.zeros(n_pc))
self.add_output("L_sm", val=0.0)
self.add_output("Q_r", val=0.0)
self.add_output("R_R", val=0.0)
self.add_output("b_r", val=0.0)
self.add_output("b_tr", val=0.0)
self.add_output("b_trmin", val=0.0)
# ----------------------------------------------------------------------------------------
class PMSG_Outer(GeneratorBase):
"""
Estimates overall electromagnetic dimensions and Efficiency of PMSG -arms generator.
Parameters
----------
P_mech : float, [W]
Shaft mechanical power
N_c : float
Number of turns per coil
b : float
Slot pole combination
c : float
Slot pole combination
E_p : float, [V]
Stator phase voltage
h_yr : float, [m]
rotor yoke height
h_ys : float, [m]
Yoke height
h_sr : float, [m]
Structural Mass
h_ss : float, [m]
Stator yoke height
t_r : float, [m]
Rotor disc thickness
t_s : float, [m]
Stator disc thickness
y_sh : float, [m]
Shaft deflection
theta_sh : float, [rad]
slope of shaft
D_nose : float, [m]
Nose outer diameter
y_bd : float, [m]
Deflection of the bedplate
theta_bd : float, [rad]
Slope at the bedplate
u_allow_pcent : float
Radial deflection as a percentage of air gap diameter
y_allow_pcent : float
Radial deflection as a percentage of air gap diameter
z_allow_deg : float, [deg]
Allowable torsional twist
B_tmax : float, [T]
Peak Teeth flux density
Returns
-------
B_smax : float, [T]
Peak Stator flux density
B_symax : float, [T]
Peak Stator flux density
tau_p : float, [m]
Pole pitch
q : float, [N/m**2]
Normal stress
len_ag : float, [m]
Air gap length
h_t : float, [m]
tooth height
tau_s : float, [m]
Slot pitch
J_actual : float, [A/m**2]
Current density
T_e : float, [N*m]
Electromagnetic torque
twist_r : float, [deg]
torsional twist
twist_s : float, [deg]
Stator torsional twist
Structural_mass_rotor : float, [kg]
Rotor mass (kg)
Structural_mass_stator : float, [kg]
Stator mass (kg)
Mass_tooth_stator : float, [kg]
Teeth and copper mass
Mass_yoke_rotor : float, [kg]
Rotor yoke mass
Mass_yoke_stator : float, [kg]
Stator yoke mass
rotor_mass : float, [kg]
Total rotor mass
stator_mass : float, [kg]
Total stator mass
"""
def initialize(self):
super(PMSG_Outer, self).initialize()
def setup(self):
super(PMSG_Outer, self).setup()
n_pc = self.options["n_pc"]
# PMSG_structrual inputs
self.add_input("P_mech", units="W")
self.add_input("N_c", 0.0)
self.add_input("b", 0.0)
self.add_input("c", 0.0)
self.add_input("E_p", 0.0, units="V")
self.add_input("h_yr", val=0.0, units="m")
self.add_input("h_ys", val=0.0, units="m")
self.add_input("h_sr", 0.0, units="m")
self.add_input("h_ss", 0.0, units="m")
self.add_input("t_r", 0.0, units="m")
self.add_input("t_s", 0.0, units="m")
self.add_input("y_sh", units="m")
self.add_input("theta_sh", 0.0, units="rad")
self.add_input("D_nose", 0.0, units="m")
self.add_input("y_bd", units="m")
self.add_input("theta_bd", 0.0, units="rad")
self.add_input("u_allow_pcent", 0.0)
self.add_input("y_allow_pcent", 0.0)
self.add_input("z_allow_deg", 0.0, units="deg")
# Magnetic loading
self.add_input("B_tmax", 0.0, units="T")
self.add_output("B_smax", val=0.0, units="T")
self.add_output("B_symax", val=0.0, units="T")
self.add_output("tau_p", 0.0, units="m")
self.add_output("q", 0.0, units="N/m**2")
self.add_output("len_ag", 0.0, units="m")
# Stator design
self.add_output("h_t", 0.0, units="m")
self.add_output("tau_s", 0.0, units="m")
# Electrical performance
self.add_output("J_actual", val=np.zeros(n_pc), units="A/m**2")
self.add_output("T_e", 0.0, units="N*m")
# Material properties
self.add_output("twist_r", 0.0, units="deg")
self.add_output("twist_s", 0.0, units="deg")
# Mass Outputs
self.add_output("Structural_mass_rotor", 0.0, units="kg")
self.add_output("Structural_mass_stator", 0.0, units="kg")
self.add_output("Mass_tooth_stator", 0.0, units="kg")
self.add_output("Mass_yoke_rotor", 0.0, units="kg")
self.add_output("Mass_yoke_stator", 0.0, units="kg")
self.add_output("rotor_mass", 0.0, units="kg")
self.add_output("stator_mass", 0.0, units="kg")
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
# Unpack inputs
rad_ag = float(inputs["rad_ag"])
len_s = float(inputs["len_s"])
p = float(inputs["p"])
b = float(inputs["b"])
c = float(inputs["c"])
h_m = float(inputs["h_m"])
h_ys = float(inputs["h_ys"])
h_yr = float(inputs["h_yr"])
h_s = float(inputs["h_s"])
h_ss = float(inputs["h_ss"])
h_0 = float(inputs["h_0"])
B_tmax = float(inputs["B_tmax"])
E_p = float(inputs["E_p"])
P_mech = float(inputs["P_mech"])
P_av_v = float(inputs["machine_rating"])
h_sr = float(inputs["h_sr"])
t_r = float(inputs["t_r"])
t_s = float(inputs["t_s"])
R_sh = 0.5 * float(inputs["D_shaft"])
R_no = 0.5 * float(inputs["D_nose"])
y_sh = float(inputs["y_sh"])
y_bd = float(inputs["y_bd"])
rho_Fes = float(inputs["rho_Fes"])
rho_Fe = float(inputs["rho_Fe"])
sigma = float(inputs["sigma"])
shaft_rpm = inputs["shaft_rpm"]
# Grab constant values
B_r = float(inputs["B_r"])
E = float(inputs["E"])
G = float(inputs["G"])
P_Fe0e = float(inputs["P_Fe0e"])
P_Fe0h = float(inputs["P_Fe0h"])
cofi = float(inputs["cofi"])
h_w = float(inputs["h_w"])
k_fes = float(inputs["k_fes"])
k_fills = float(inputs["k_fills"])
m = int(discrete_inputs["m"])
mu_0 = float(inputs["mu_0"])
mu_r = float(inputs["mu_r"])
p = float(inputs["p"])
phi = float(inputs["phi"])
ratio_mw2pp = float(inputs["ratio_mw2pp"])
resist_Cu = float(inputs["resist_Cu"])
v = float(inputs["v"])
"""
#Assign values to universal constants
B_r = 1.279 # Tesla remnant flux density
E = 2e11 # N/m^2 young's modulus
ratio = 0.8 # ratio of magnet width to pole pitch(bm/self.tau_p)
mu_0 = np.pi*4e-7 # permeability of free space
mu_r = 1.06 # relative permeability
cofi = 0.85 # power factor
#Assign values to design constants
h_0 = 0.005 # Slot opening height
h_w = 0.004 # Slot wedge height
m = 3 # no of phases
#b_s_tau_s = 0.45 # slot width to slot pitch ratio
k_fills = 0.65 # Slot fill factor
P_Fe0h = 4 # specific hysteresis losses W/kg @ 1.5 T
P_Fe0e = 1 # specific hysteresis losses W/kg @ 1.5 T
k_fes = 0.8 # Iron fill factor
#Assign values to universal constants
phi = 90*2*np.pi/360 # tilt angle (rotor tilt -90 degrees during transportation)
v = 0.3 # Poisson's ratio
G = 79.3e9
"""
######################## Electromagnetic design ###################################
K_rad = len_s / (2 * rad_ag) # Aspect ratio
# Calculating air gap length
dia = 2 * rad_ag # air gap diameter
len_ag = 0.001 * dia # air gap length
r_s = rad_ag - len_ag # Stator outer radius
b_so = 2 * len_ag # Slot opening
tau_p = np.pi * dia / (2 * p) # pole pitch
# Calculating winding factor
Slot_pole = b / c
S = Slot_pole * 2 * p * m
testval = S / (m * np.gcd(int(S), int(p)))
if float(np.round(testval, 3)).is_integer():
k_w = winding_factor(int(S), b, c, int(p), m)
b_m = ratio_mw2pp * tau_p # magnet width
alpha_p = np.pi / 2 * ratio_mw2pp
tau_s = np.pi * (dia - 2 * len_ag) / S
# Calculating Carter factor for statorand effective air gap length
gamma = (
4
/ np.pi
* (
b_so / 2 / (len_ag + h_m / mu_r) * np.arctan(b_so / 2 / (len_ag + h_m / mu_r))
- np.log(np.sqrt(1 + (b_so / 2 / (len_ag + h_m / mu_r)) ** 2))
)
)
k_C = tau_s / (tau_s - gamma * (len_ag + h_m / mu_r)) # carter coefficient
g_eff = k_C * (len_ag + h_m / mu_r)
# angular frequency in radians
om_m = 2 * np.pi * shaft_rpm / 60
om_e = p * om_m
freq = om_e / 2 / np.pi # outout frequency
# Calculating magnetic loading
B_pm1 = B_r * h_m / mu_r / (g_eff)
B_g = B_r * h_m / (mu_r * g_eff) * (4 / np.pi) * np.sin(alpha_p)
B_symax = B_pm1 * b_m / (2 * h_ys) * k_fes
B_rymax = B_pm1 * b_m * k_fes / (2 * h_yr)
b_t = B_pm1 * tau_s / B_tmax
N_c = 2 # Number of turns per coil
q = (B_g) ** 2 / 2 / mu_0
# Stator winding length ,cross-section and resistance
l_Cus = 2 * (len_s + np.pi / 4 * (tau_s + b_t)) # length of a turn
# Calculating no-load voltage induced in the stator
N_s = np.rint(E_p / (np.sqrt(2) * len_s * r_s * k_w * om_m * B_g))
# Z = P_av_v / (m*E_p)
# Calculating leakage inductance in stator
V_1 = E_p / 1.1
I_n = P_av_v / 3 / cofi / V_1
J_s = 6.0
A_Cuscalc = I_n / J_s
A_slot = 2 * N_c * A_Cuscalc * (10 ** -6) / k_fills
tau_s_new = np.pi * (dia - 2 * len_ag - 2 * h_w - 2 * h_0) / S
b_s2 = tau_s_new - b_t # Slot top width
b_s1 = np.sqrt(b_s2 ** 2 - 4 * np.pi * A_slot / S)
b_s = (b_s1 + b_s2) * 0.5
N_coil = 2 * S
P_s = mu_0 * (h_s / 3 / b_s + h_w * 2 / (b_s2 + b_so) + h_0 / b_so) # Slot permeance function
L_ssigmas = S / 3 * 4 * N_c ** 2 * len_s * P_s # slot leakage inductance
L_ssigmaew = (
N_coil * N_c ** 2 * mu_0 * tau_s * np.log((0.25 * np.pi * tau_s ** 2) / (0.5 * h_s * b_s))
) # end winding leakage inductance
L_aa = 2 * np.pi / 3 * (N_c ** 2 * mu_0 * len_s * r_s / g_eff)
L_m = L_aa
L_ssigma = L_ssigmas + L_ssigmaew
L_s = L_m + L_ssigma
G_leak = np.abs((1.1 * E_p) ** 4 - (1 / 9) * (P_av_v * om_e * L_s) ** 2)
# Calculating stator current and electrical loading
I_s = np.sqrt(2 * (np.abs((E_p * 1.1) ** 2 - G_leak ** 0.5)) / (om_e * L_s) ** 2)
A_1 = 6 * I_s * N_s / np.pi / dia
J_actual = I_s / (A_Cuscalc * 2 ** 0.5)
L_Cus = N_s * l_Cus
R_s = inputs["resist_Cu"] * (N_s) * l_Cus / (A_Cuscalc * (10 ** -6))
B_smax = np.sqrt(2) * I_s * mu_0 / g_eff
# Calculating Electromagnetically active mass
wedge_area = (b_s * 0.5 - b_so * 0.5) * (2 * h_0 + h_w)
V_Cus = m * L_Cus * (A_Cuscalc * (10 ** -6)) # copper volume
h_t = h_s + h_w + h_0
V_Fest = len_s * S * (b_t * (h_s + h_w + h_0) + wedge_area) # volume of iron in stator tooth
V_Fesy = (
len_s
* np.pi
* ((rad_ag - len_ag - h_s - h_w - h_0) ** 2 - (rad_ag - len_ag - h_s - h_w - h_0 - h_ys) ** 2)
) # volume of iron in stator yoke
V_Fery = len_s * np.pi * ((rad_ag + h_m + h_yr) ** 2 - (rad_ag + h_m) ** 2)
Copper = V_Cus[-1] * inputs["rho_Copper"]
M_Fest = V_Fest * rho_Fe # Mass of stator tooth
M_Fesy = V_Fesy * rho_Fe # Mass of stator yoke
M_Fery = V_Fery * rho_Fe # Mass of rotor yoke
Iron = M_Fest + M_Fesy + M_Fery
mass_PM = 2 * np.pi * (rad_ag + h_m) * len_s * h_m * ratio_mw2pp * inputs["rho_PM"]
# Calculating Losses
##1. Copper Losses
K_R = 1.0 # Skin effect correction co-efficient
P_Cu = m * (I_s / 2 ** 0.5) ** 2 * R_s * K_R
# Iron Losses ( from Hysteresis and eddy currents)
P_Hyys = (
M_Fesy * (B_symax / 1.5) ** 2 * (P_Fe0h * om_e / (2 * np.pi * 60))
) # Hysteresis losses in stator yoke
P_Ftys = (
M_Fesy * ((B_symax / 1.5) ** 2) * (P_Fe0e * (om_e / (2 * np.pi * 60)) ** 2)
) # Eddy losses in stator yoke
P_Fesynom = P_Hyys + P_Ftys
P_Hyd = (
M_Fest * (B_tmax / 1.5) ** 2 * (P_Fe0h * om_e / (2 * np.pi * 60))
) # Hysteresis losses in stator teeth
P_Ftd = (
M_Fest * (B_tmax / 1.5) ** 2 * (P_Fe0e * (om_e / (2 * np.pi * 60)) ** 2)
) # Eddy losses in stator teeth
P_Festnom = P_Hyd + P_Ftd
# Iron Losses ( from Hysteresis and eddy currents)
P_Hyyr = (
M_Fery * (B_rymax / 1.5) ** 2 * (P_Fe0h * om_e / (2 * np.pi * 60))
) # Hysteresis losses in stator yoke
P_Ftyr = (
M_Fery * ((B_rymax / 1.5) ** 2) * (P_Fe0e * (om_e / (2 * np.pi * 60)) ** 2)
) # Eddy losses in stator yoke
P_Ferynom = P_Hyyr + P_Ftyr
# additional stray losses due to leakage flux
P_ad = 0.2 * (P_Hyys + P_Ftys + P_Hyd + P_Ftd + P_Hyyr + P_Ftyr)
pFtm = 300 # specific magnet loss
P_Ftm = pFtm * 2 * p * b_m * len_s
Losses = P_Cu + P_Festnom + P_Fesynom + P_ad + P_Ftm + P_Ferynom
gen_eff = (P_mech - Losses) / (P_mech)
I_snom = gen_eff * (P_mech / m / E_p / cofi) # rated current
I_qnom = gen_eff * P_mech / (m * E_p)
X_snom = om_e * (L_m + L_ssigma)
T_e = np.pi * rad_ag ** 2 * len_s * 2 * sigma
Stator = M_Fesy + M_Fest + Copper # modified mass_stru_steel
Rotor = M_Fery + mass_PM # modified (N_r*(R_1-self.R_sh)*a_r*self.rho_Fes))
Mass_tooth_stator = M_Fest + Copper
Mass_yoke_rotor = M_Fery
Mass_yoke_stator = M_Fesy
R_out = (dia + 2 * h_m + 2 * h_yr + 2 * inputs["h_sr"]) * 0.5
Losses = Losses
generator_efficiency = gen_eff
else:
# Bad design
for k in outputs.keys():
outputs[k] = 1e30
return
######################## Rotor inactive (structural) design ###################################
# Radial deformation of rotor
R = rad_ag + h_m
L_r = len_s + t_r + 0.125
constants_x_0 = shell_constant(R, t_r, L_r, 0, E, v)
constants_x_L = shell_constant(R, t_r, L_r, L_r, E, v)
f_d_denom1 = R / (E * ((R) ** 2 - (R_sh) ** 2)) * ((1 - v) * R ** 2 + (1 + v) * (R_sh) ** 2)
f_d_denom2 = (
t_r
/ (2 * constants_x_0[0] * (constants_x_0[1]) ** 3)
* (
constants_x_0[2] / (2 * constants_x_0[3]) * constants_x_0[4]
- constants_x_0[5] / constants_x_0[3] * constants_x_0[6]
- 0.5 * constants_x_0[7]
)
)
f = q * (R) ** 2 * t_r / (E * (h_yr + h_sr) * (f_d_denom1 + f_d_denom2))
u_d = (
f
/ (constants_x_L[0] * (constants_x_L[1]) ** 3)
* (
(
constants_x_L[2] / (2 * constants_x_L[3]) * constants_x_L[4]
- constants_x_L[5] / constants_x_L[3] * constants_x_L[6]
- 0.5 * constants_x_L[7]
)
)
+ y_sh
)
u_ar = (q * (R) ** 2) / (E * (h_yr + h_sr)) - u_d
u_ar = np.abs(u_ar + y_sh)
u_allow_r = 2 * rad_ag / 1000 * inputs["u_allow_pcent"] / 100
# axial deformation of rotor
W_back_iron = plate_constant(R + h_sr + h_yr, R_sh, E, v, 0.5 * h_yr + R, t_r)
W_ssteel = plate_constant(R + h_sr + h_yr, R_sh, E, v, h_yr + R + h_sr * 0.5, t_r)
W_mag = plate_constant(R + h_sr + h_yr, R_sh, E, v, h_yr + R - 0.5 * h_m, t_r)
W_ir = rho_Fe * gravity * np.sin(phi) * (L_r - t_r) * h_yr
y_ai1r = (
-W_ir
* (0.5 * h_yr + R) ** 4
/ (R_sh * W_back_iron[0])
* (W_back_iron[1] * W_back_iron[4] / W_back_iron[3] - W_back_iron[2])
)
W_sr = rho_Fes * gravity * np.sin(phi) * (L_r - t_r) * h_sr
y_ai2r = (
-W_sr
* (h_sr * 0.5 + h_yr + R) ** 4
/ (R_sh * W_ssteel[0])
* (W_ssteel[1] * W_ssteel[4] / W_ssteel[3] - W_ssteel[2])
)
W_m = np.sin(phi) * mass_PM / (2 * np.pi * (R - h_m * 0.5))
y_ai3r = -W_m * (R - h_m) ** 4 / (R_sh * W_mag[0]) * (W_mag[1] * W_mag[4] / W_mag[3] - W_mag[2])
w_disc_r = rho_Fes * gravity * np.sin(phi) * t_r
a_ii = R + h_sr + h_yr
r_oii = R_sh
M_rb = (
-w_disc_r
* a_ii ** 2
/ W_ssteel[5]
* (W_ssteel[6] * 0.5 / (a_ii * R_sh) * (a_ii ** 2 - r_oii ** 2) - W_ssteel[8])
)
Q_b = w_disc_r * 0.5 / R_sh * (a_ii ** 2 - r_oii ** 2)
y_aiir = (
M_rb * a_ii ** 2 / W_ssteel[0] * W_ssteel[1]
+ Q_b * a_ii ** 3 / W_ssteel[0] * W_ssteel[2]
- w_disc_r * a_ii ** 4 / W_ssteel[0] * W_ssteel[7]
)
I = np.pi * 0.25 * (R ** 4 - (R_sh) ** 4)
F_ecc = q * 2 * np.pi * K_rad * rad_ag ** 3
M_ar = F_ecc * L_r * 0.5
y_ar = (
np.abs(y_ai1r + y_ai2r + y_ai3r)
+ y_aiir
+ (R + h_yr + h_sr) * inputs["theta_sh"]
+ M_ar * L_r ** 2 * 0 / (2 * E * I)
)
y_allow_r = L_r / 100 * inputs["y_allow_pcent"]
# Torsional deformation of rotor
J_dr = 0.5 * np.pi * ((R + h_yr + h_sr) ** 4 - R_sh ** 4)
J_cylr = 0.5 * np.pi * ((R + h_yr + h_sr) ** 4 - R ** 4)
twist_r = 180 / np.pi * inputs["rated_torque"] / G * (t_r / J_dr + (L_r - t_r) / J_cylr)
Structural_mass_rotor = (
rho_Fes
* np.pi
* (((R + h_yr + h_sr) ** 2 - (R_sh) ** 2) * t_r + ((R + h_yr + h_sr) ** 2 - (R + h_yr) ** 2) * len_s)
)
TC1 = inputs["rated_torque"] / (2 * np.pi * sigma)
TC2r = (R + (h_yr + h_sr)) ** 2 * L_r
######################## Stator inactive (structural) design ###################################
# Radial deformation of Stator
L_stator = len_s + t_s + 0.1
R_stator = rad_ag - len_ag - h_t - h_ys - h_ss
constants_x_0 = shell_constant(R_stator, t_s, L_stator, 0, E, v)
constants_x_L = shell_constant(R_stator, t_s, L_stator, L_stator, E, v)
f_d_denom1 = (
R_stator / (E * ((R_stator) ** 2 - (R_no) ** 2)) * ((1 - v) * R_stator ** 2 + (1 + v) * (R_no) ** 2)
)
f_d_denom2 = (
t_s
/ (2 * constants_x_0[0] * (constants_x_0[1]) ** 3)
* (
constants_x_0[2] / (2 * constants_x_0[3]) * constants_x_0[4]
- constants_x_0[5] / constants_x_0[3] * constants_x_0[6]
- 0.5 * constants_x_0[7]
)
)
f = q * (R_stator) ** 2 * t_s / (E * (h_ys + h_ss) * (f_d_denom1 + f_d_denom2))
# TODO: Adds y_bd twice?
u_as = (
(q * (R_stator) ** 2) / (E * (h_ys + h_ss))
- f
* 0
/ (constants_x_L[0] * (constants_x_L[1]) ** 3)
* (
(
constants_x_L[2] / (2 * constants_x_L[3]) * constants_x_L[4]
- constants_x_L[5] / constants_x_L[3] * constants_x_L[6]
- 1 / 2 * constants_x_L[7]
)
)
+ y_bd
)
u_as = np.abs(u_as + y_bd)
u_allow_s = 2 * rad_ag / 1000 * inputs["u_allow_pcent"] / 100
# axial deformation of stator
W_back_iron = plate_constant(R_stator + h_ss + h_ys + h_t, R_no, E, v, 0.5 * h_ys + h_ss + R_stator, t_s)
W_ssteel = plate_constant(R_stator + h_ss + h_ys + h_t, R_no, E, v, R_stator + h_ss * 0.5, t_s)
W_active = plate_constant(R_stator + h_ss + h_ys + h_t, R_no, E, v, R_stator + h_ss + h_ys + h_t * 0.5, t_s)
W_is = rho_Fe * gravity * np.sin(phi) * (L_stator - t_s) * h_ys
y_ai1s = (
-W_is
* (0.5 * h_ys + R_stator) ** 4
/ (R_no * W_back_iron[0])
* (W_back_iron[1] * W_back_iron[4] / W_back_iron[3] - W_back_iron[2])
)
W_ss = rho_Fes * gravity * np.sin(phi) * (L_stator - t_s) * h_ss
y_ai2s = (
-W_ss
* (h_ss * 0.5 + h_ys + R_stator) ** 4
/ (R_no * W_ssteel[0])
* (W_ssteel[1] * W_ssteel[4] / W_ssteel[3] - W_ssteel[2])
)
W_cu = np.sin(phi) * Mass_tooth_stator / (2 * np.pi * (R_stator + h_ss + h_ys + h_t * 0.5))
y_ai3s = (
-W_cu
* (R_stator + h_ss + h_ys + h_t * 0.5) ** 4
/ (R_no * W_active[0])
* (W_active[1] * W_active[4] / W_active[3] - W_active[2])
)
w_disc_s = rho_Fes * gravity * np.sin(phi) * t_s
a_ii = R_stator + h_ss + h_ys + h_t
r_oii = R_no
M_rb = (
-w_disc_s
* a_ii ** 2
/ W_ssteel[5]
* (W_ssteel[6] * 0.5 / (a_ii * R_no) * (a_ii ** 2 - r_oii ** 2) - W_ssteel[8])
)
Q_b = w_disc_s * 0.5 / R_no * (a_ii ** 2 - r_oii ** 2)
y_aiis = (
M_rb * a_ii ** 2 / W_ssteel[0] * W_ssteel[1]
+ Q_b * a_ii ** 3 / W_ssteel[0] * W_ssteel[2]
- w_disc_s * a_ii ** 4 / W_ssteel[0] * W_ssteel[7]
)
I = np.pi * 0.25 * (R_stator ** 4 - (R_no) ** 4)
F_ecc = q * 2 * np.pi * K_rad * rad_ag ** 2
M_as = F_ecc * L_stator * 0.5
y_as = np.abs(
y_ai1s + y_ai2s + y_ai3s + y_aiis + (R_stator + h_ys + h_ss + h_t) * inputs["theta_bd"]
) + M_as * L_stator ** 2 * 0 / (2 * E * I)
y_allow_s = L_stator * inputs["y_allow_pcent"] / 100
# Torsional deformation of stator
J_ds = 0.5 * np.pi * ((R_stator + h_ys + h_ss + h_t) ** 4 - R_no ** 4)
J_cyls = 0.5 * np.pi * ((R_stator + h_ys + h_ss + h_t) ** 4 - R_stator ** 4)
twist_s = 180.0 / np.pi * inputs["rated_torque"] / G * (t_s / J_ds + (L_stator - t_s) / J_cyls)
Structural_mass_stator = rho_Fes * (
np.pi * ((R_stator + h_ys + h_ss + h_t) ** 2 - (R_no) ** 2) * t_s
+ np.pi * ((R_stator + h_ss) ** 2 - R_stator ** 2) * len_s
)
TC2s = (R_stator + h_ys + h_ss + h_t) ** 2 * L_stator
######################## Outputs ###################################
outputs["K_rad"] = K_rad
outputs["len_ag"] = len_ag
outputs["tau_p"] = tau_p
outputs["S"] = S
outputs["tau_s"] = tau_s
outputs["b_m"] = b_m
outputs["f"] = freq
outputs["B_pm1"] = B_pm1
outputs["B_g"] = B_g
outputs["B_symax"] = B_symax
outputs["B_rymax"] = B_rymax
outputs["b_t"] = b_t
outputs["q"] = q
outputs["N_s"] = N_s[-1]
outputs["A_Cuscalc"] = A_Cuscalc
outputs["b_s"] = b_s
outputs["L_s"] = L_s
outputs["J_s"] = J_s
outputs["Slot_aspect_ratio"] = h_s / b_s
outputs["I_s"] = I_s
outputs["A_1"] = A_1
outputs["J_actual"] = J_actual
outputs["R_s"] = R_s
outputs["B_smax"] = B_smax[-1]
outputs["h_t"] = h_t
outputs["Copper"] = Copper
outputs["Iron"] = Iron
outputs["mass_PM"] = mass_PM
outputs["T_e"] = T_e
outputs["Mass_tooth_stator"] = Mass_tooth_stator
outputs["Mass_yoke_rotor"] = Mass_yoke_rotor
outputs["Mass_yoke_stator"] = Mass_yoke_stator
outputs["R_out"] = R_out
outputs["Losses"] = Losses
outputs["eandm_efficiency"] = np.maximum(eps, gen_eff)
outputs["u_ar"] = u_ar
outputs["u_allow_r"] = u_allow_r
outputs["y_ar"] = y_ar
outputs["y_allow_r"] = y_allow_r
outputs["twist_r"] = twist_r
outputs["Structural_mass_rotor"] = Structural_mass_rotor
outputs["TC1"] = TC1
outputs["TC2r"] = TC2r
outputs["u_as"] = u_as
outputs["u_allow_s"] = u_allow_s
outputs["y_as"] = y_as
outputs["y_allow_s"] = y_allow_s
outputs["twist_s"] = twist_s
outputs["Structural_mass_stator"] = Structural_mass_stator
outputs["TC2s"] = TC2s
outputs["Structural_mass"] = outputs["Structural_mass_rotor"] + outputs["Structural_mass_stator"]
outputs["stator_mass"] = Stator + outputs["Structural_mass_stator"]
outputs["rotor_mass"] = Rotor + outputs["Structural_mass_rotor"]
outputs["generator_mass"] = Stator + Rotor + outputs["Structural_mass"]
# ----------------------------------------------------------------------------------------
class PMSG_Disc(GeneratorBase):
"""
Estimates overall mass dimensions and Efficiency of PMSG-disc rotor generator.
Parameters
----------
tau_p : float, [m]
Pole pitch self.tau_p
t_d : float, [m]
disc thickness
h_yr : float, [m]
rotor yoke height
h_ys : float, [m]
Yoke height
Returns
-------
B_tmax : float, [T]
Peak Teeth flux density
B_smax : float, [T]
Peak Stator Yoke flux density B_ymax
B_symax : float, [T]
Peak Stator Yoke flux density B_ymax
E_p : float
Stator phase voltage
"""
def initialize(self):
super(PMSG_Disc, self).initialize()
def setup(self):
super(PMSG_Disc, self).setup()
self.add_input("tau_p", val=0.0, units="m")
self.add_input("t_d", val=0.0, units="m")
self.add_input("h_yr", val=0.0, units="m")
self.add_input("h_ys", val=0.0, units="m")
self.add_output("B_tmax", val=0.0, units="T")
self.add_output("B_smax", val=0.0, units="T")
self.add_output("B_symax", val=0.0, units="T")
self.add_output("E_p", val=np.zeros(self.options["n_pc"]))
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
# Unpack inputs
rad_ag = inputs["rad_ag"]
len_s = inputs["len_s"]
h_s = inputs["h_s"]
tau_p = inputs["tau_p"]
h_m = inputs["h_m"]
h_ys = inputs["h_ys"]
h_yr = inputs["h_yr"]
machine_rating = inputs["machine_rating"]
shaft_rpm = inputs["shaft_rpm"]
Torque = inputs["rated_torque"]
b_st = inputs["b_st"]
d_s = inputs["d_s"]
t_ws = inputs["t_ws"]
n_s = inputs["n_s"]
t_d = inputs["t_d"]
R_sh = 0.5 * inputs["D_shaft"]
rho_Fe = inputs["rho_Fe"]
rho_Copper = inputs["rho_Copper"]
rho_Fes = inputs["rho_Fes"]
rho_PM = inputs["rho_PM"]
# Grab constant values
B_r = inputs["B_r"]
E = inputs["E"]
P_Fe0e = inputs["P_Fe0e"]
P_Fe0h = inputs["P_Fe0h"]
S_N = inputs["S_N"]
alpha_p = inputs["alpha_p"]
b_r_tau_r = inputs["b_r_tau_r"]
b_ro = inputs["b_ro"]
b_s_tau_s = inputs["b_s_tau_s"]
b_so = inputs["b_so"]
cofi = inputs["cofi"]
freq = inputs["freq"]
h_i = inputs["h_i"]
h_sy0 = inputs["h_sy0"]
h_w = inputs["h_w"]
k_fes = inputs["k_fes"]
k_fillr = inputs["k_fillr"]
k_fills = inputs["k_fills"]
k_s = inputs["k_s"]
m = discrete_inputs["m"]
mu_0 = inputs["mu_0"]
mu_r = inputs["mu_r"]
p = inputs["p"]
phi = inputs["phi"]
q1 = discrete_inputs["q1"]
ratio_mw2pp = inputs["ratio_mw2pp"]
resist_Cu = inputs["resist_Cu"]
sigma = inputs["sigma"]
v = inputs["v"]
y_tau_p = inputs["y_tau_p"]
y_tau_pr = inputs["y_tau_pr"]
"""
# Assign values to universal constants
B_r = 1.2 # remnant flux density (Tesla = kg / (s^2 A))
E = 2e11 # N / m^2 young's modulus
sigma = 40000.0 # shear stress assumed
ratio_mw2pp = 0.7 # ratio of magnet width to pole pitch(bm / self.tau_p)
mu_0 = np.pi * 4e-7 # permeability of free space in m * kg / (s**2 * A**2)
mu_r = 1.06 # relative permeability (probably for neodymium magnets, often given as 1.05 - GNS)
phi = np.deg2rad(90) # tilt angle (rotor tilt -90 degrees during transportation)
cofi = 0.85 # power factor
# Assign values to design constants
h_w = 0.005 # wedge height
y_tau_p = 1.0 # coil span to pole pitch
m = 3 # no of phases
q1 = 1 # no of slots per pole per phase
b_s_tau_s = 0.45 # slot width / slot pitch ratio
k_fills = 0.65 # Slot fill factor
P_Fe0h = 4.0 # specific hysteresis losses W / kg @ 1.5 T
P_Fe0e = 1.0 # specific hysteresis losses W / kg @ 1.5 T
resist_Cu = 1.8e-8 * 1.4 # resistivity of copper
b_so = 0.004 # stator slot opening
k_fes = 0.9 # useful iron stack length
#T = Torque
v = 0.3 # poisson's ratio
"""
# back iron thickness for rotor and stator
t_s = h_ys
t = h_yr
# Aspect ratio
K_rad = len_s / (2 * rad_ag) # aspect ratio
###################################################### Electromagnetic design#############################################
dia_ag = 2 * rad_ag # air gap diameter
len_ag = 0.001 * dia_ag # air gap length
b_m = ratio_mw2pp * tau_p # magnet width
l_u = k_fes * len_s # useful iron stack length
l_e = len_s + 2 * 0.001 * rad_ag # equivalent core length
r_r = rad_ag - len_ag # rotor radius
p = np.round(np.pi * rad_ag / tau_p) # pole pairs Eq.(11)
f = p * shaft_rpm / 60.0 # rpm to frequency (Hz)
S = 2 * p * q1 * m # Stator slots Eq.(12)
N_conductors = S * 2
N_s = N_conductors / 2 / m # Stator turns per phase
tau_s = np.pi * dia_ag / S # slot pitch Eq.(13)
b_s = b_s_tau_s * tau_s # slot width
b_t = tau_s - b_s # tooth width Eq.(14)
Slot_aspect_ratio = h_s / b_s
alpha_p = np.pi / 2 * 0.7
# Calculating Carter factor for stator and effective air gap length
gamma = (
4
/ np.pi
* (
b_so / 2 / (len_ag + h_m / mu_r) * np.arctan(b_so / 2 / (len_ag + h_m / mu_r))
- np.log(np.sqrt(1 + (b_so / 2 / (len_ag + h_m / mu_r)) ** 2))
)
)
k_C = tau_s / (tau_s - gamma * (len_ag + h_m / mu_r)) # carter coefficient
g_eff = k_C * (len_ag + h_m / mu_r)
# angular frequency in radians / sec
om_m = 2 * np.pi * (shaft_rpm / 60.0) # rpm to rad/s
om_e = p * om_m / 2
# Calculating magnetic loading
B_pm1 = B_r * h_m / mu_r / g_eff
B_g = B_r * h_m / mu_r / g_eff * (4.0 / np.pi) * np.sin(alpha_p)
B_symax = B_g * b_m * l_e / (2 * h_ys * l_u)
B_rymax = B_g * b_m * l_e / (2 * h_yr * len_s)
B_tmax = B_g * tau_s / b_t
k_wd = np.sin(np.pi / 6) / q1 / np.sin(np.pi / 6 / q1) # winding factor
L_t = len_s + 2 * tau_p
# Stator winding length, cross-section and resistance
l_Cus = 2 * N_s * (2 * tau_p + L_t)
A_s = b_s * (h_s - h_w) * q1 * p # m^2
A_scalc = b_s * 1e3 * (h_s - h_w) * 1e3 * q1 * p # mm^2
A_Cus = A_s * k_fills / N_s
A_Cuscalc = A_scalc * k_fills / N_s
R_s = l_Cus * resist_Cu / A_Cus
# Calculating leakage inductance in stator
L_m = 2 * mu_0 * N_s ** 2 / p * m * k_wd ** 2 * tau_p * L_t / np.pi ** 2 / g_eff
L_ssigmas = (
2 * mu_0 * N_s ** 2 / p / q1 * len_s * ((h_s - h_w) / (3 * b_s) + h_w / b_so)
) # slot leakage inductance
L_ssigmaew = (
2 * mu_0 * N_s ** 2 / p / q1 * len_s * 0.34 * len_ag * (l_e - 0.64 * tau_p * y_tau_p) / len_s
) # end winding leakage inductance
L_ssigmag = (
2 * mu_0 * N_s ** 2 / p / q1 * len_s * (5 * (len_ag * k_C / b_so) / (5 + 4 * (len_ag * k_C / b_so)))
) # tooth tip leakage inductance
L_ssigma = L_ssigmas + L_ssigmaew + L_ssigmag
L_s = L_m + L_ssigma
# Calculating no-load voltage induced in the stator and stator current
E_p = np.sqrt(2) * N_s * L_t * rad_ag * k_wd * om_m * B_g
Z = machine_rating / (m * E_p)
G = np.maximum(0.0, E_p ** 2 - (om_e * L_s * Z) ** 2)
# Calculating stator current and electrical loading
I_s = np.sqrt(Z ** 2 + (((E_p - G ** 0.5) / (om_e * L_s) ** 2) ** 2))
B_smax = np.sqrt(2) * I_s * mu_0 / g_eff
J_s = I_s / A_Cuscalc
A_1 = 6 * N_s * I_s / (np.pi * dia_ag)
I_snom = machine_rating / (m * E_p * cofi) # rated current
I_qnom = machine_rating / (m * E_p)
X_snom = om_e * (L_m + L_ssigma)
# Calculating electromagnetically active mass
V_Cus = m * l_Cus * A_Cus # copper volume
V_Fest = L_t * 2 * p * q1 * m * b_t * h_s # volume of iron in stator tooth
V_Fesy = L_t * np.pi * ((rad_ag + h_s + h_ys) ** 2 - (rad_ag + h_s) ** 2) # volume of iron in stator yoke
V_Fery = L_t * np.pi * ((r_r - h_m) ** 2 - (r_r - h_m - h_yr) ** 2) # volume of iron in rotor yoke
Copper = V_Cus * rho_Copper
M_Fest = V_Fest * rho_Fe # mass of stator tooth
M_Fesy = V_Fesy * rho_Fe # mass of stator yoke
M_Fery = V_Fery * rho_Fe # mass of rotor yoke
Iron = M_Fest + M_Fesy + M_Fery
# Calculating losses
# 1.Copper losses
K_R = 1.2 # Skin effect correction co - efficient
P_Cu = m * I_snom ** 2 * R_s * K_R
# Iron Losses ( from Hysteresis and eddy currents)
P_Hyys = M_Fesy * (B_symax / 1.5) ** 2 * (P_Fe0h * om_e / (2 * np.pi * 60)) # Hysteresis losses in stator yoke
P_Ftys = (
M_Fesy * (B_symax / 1.5) ** 2 * (P_Fe0e * (om_e / (2 * np.pi * 60)) ** 2)
) # Eddy losses in stator yoke
P_Fesynom = P_Hyys + P_Ftys
P_Hyd = M_Fest * (B_tmax / 1.5) ** 2 * (P_Fe0h * om_e / (2 * np.pi * 60)) # Hysteresis losses in stator teeth
P_Ftd = (
M_Fest * (B_tmax / 1.5) ** 2 * (P_Fe0e * (om_e / (2 * np.pi * 60)) ** 2)
) # Eddy losses in stator teeth
P_Festnom = P_Hyd + P_Ftd
P_ad = 0.2 * (P_Hyys + P_Ftys + P_Hyd + P_Ftd) # additional stray losses due to leakage flux
pFtm = 300 # specific magnet loss
P_Ftm = pFtm * 2 * p * b_m * len_s # magnet losses
Losses = P_Cu + P_Festnom + P_Fesynom + P_ad + P_Ftm
gen_eff = machine_rating / (machine_rating + Losses)
################################################## Structural Design ############################################################
## Structural deflection calculations
# rotor structure
R = rad_ag - len_ag - h_m - 0.5 * t # mean radius of the rotor rim
# l = L_t using L_t everywhere now
b = R_sh # Shaft radius (not used)
R_b = R - 0.5 * t # Inner radius of the rotor
R_a = R + 0.5 * h_yr # Outer radius of rotor yoke
a = R - 0.5 * t # same as R_b
a_1 = R_b # same as R_b, a
c = R / 500
u_allow_r = c / 20 # allowable radial deflection
y_allow = 2 * L_t / 100 # allowable axial deflection
R_1 = R - 0.5 * t # inner radius of rotor cylinder # same as R_b, a, a_1 (not used)
K = 4 * (np.sin(ratio_mw2pp * np.pi / 2)) / np.pi # (not used)
q3 = B_g ** 2 / (2 * mu_0) # normal component of Maxwell's stress
mass_PM = 2 * np.pi * (R + 0.5 * t) * L_t * h_m * ratio_mw2pp * rho_PM # magnet mass
mass_st_lam = rho_Fe * 2 * np.pi * R * L_t * h_yr # mass of rotor yoke steel
# Calculation of radial deflection of rotor
# cylindrical shell function and circular plate parameters for disc rotor based on Table 11.2 Roark's formulas
# lamb, C* and F* parameters are from Appendix A of McDonald
lamb = (3 * (1 - v ** 2) / R_a ** 2 / h_yr ** 2) ** 0.25 # m^-1
x1 = lamb * L_t # no units
# ----------------
C_2 = chsPshc(x1)
C_4 = chsMshc(x1)
C_13 = chsMshc(x1) # (not used)
C_a2 = chsPshc(x1 * 0.5)
F_2_x0 = chsPshc(lamb * 0)
F_2_ls2 = chsPshc(x1 / 2)
F_a4_x0 = chsMshc(lamb * (0))
Fa4arg = np.pi / 180 * lamb * (0.5 * len_s - a)
F_a4_ls2 = chsMshc(Fa4arg)
# print('pmsg_disc: F_a4_ls2, Fa4arg, lamb, len_s, a ', F_a4_ls2, Fa4arg, lamb, len_s, a)
# if np.isnan(F_a4_ls2):
# sys.stderr.write('*** pmsg_discSE error: F_a4_ls2 is nan\n')
# C_2 = np.cosh(x1) * np.sin(x1) + np.sinh(x1) * np.cos(x1)
C_3 = np.sinh(x1) * np.sin(x1)
# C_4 = np.cosh(x1) * np.sin(x1) - np.sinh(x1) * np.cos(x1)
C_11 = (np.sinh(x1)) ** 2 - (np.sin(x1)) ** 2
# C_13 = np.cosh(x1) * np.sinh(x1) - np.cos(x1) * np.sin(x1) # (not used)
C_14 = np.sinh(x1) ** 2 + np.sin(x1) ** 2 # (not used)
C_a1 = np.cosh(x1 * 0.5) * np.cos(x1 * 0.5)
# C_a2 = np.cosh(x1 * 0.5) * np.sin(x1 * 0.5) + np.sinh(x1 * 0.5) * np.cos(x1 * 0.5)
F_1_x0 = np.cosh(lamb * 0) * np.cos(lamb * 0)
F_1_ls2 = np.cosh(lamb * 0.5 * len_s) * np.cos(lamb * 0.5 * len_s)
# F_2_x0 = np.cosh(lamb * 0) * np.sin(lamb * 0) + np.sinh(lamb * 0) * np.cos(lamb * 0)
# F_2_ls2 = np.cosh(x1 / 2) * np.sin(x1 / 2) + np.sinh(x1 / 2) * np.cos(x1 / 2)
if len_s < 2 * a:
a = len_s / 2
else:
a = len_s * 0.5 - 1
# F_a4_x0 = np.cosh(lamb * (0)) * np.sin(lamb * (0)) \
# - np.sinh(lamb * (0)) * np.cos(lamb * (0))
# F_a4_ls2 = np.cosh(np.pi / 180 * lamb * (0.5 * len_s - a)) * np.sin(np.pi / 180 * lamb * (0.5 * len_s - a)) \
# - np.sinh(np.pi / 180 * lamb * (0.5 * len_s - a)) * np.cos(np.pi / 180 * lamb * (0.5 * len_s - a))
"""
Where did the np.pi/180 factor (conversion to radians) come from?
lamb is m^-1
0.5*len_s - a is m
"""
# ----------------
D_r = E * h_yr ** 3 / (12 * (1 - v ** 2))
D_ax = E * t_d ** 3 / (12 * (1 - v ** 2))
# Radial deflection analytical model from McDonald's thesis defined in parts
Part_1 = R_b * ((1 - v) * R_b ** 2 + (1 + v) * R_sh ** 2) / (R_b ** 2 - R_sh ** 2) / E
Part_2 = (C_2 * C_a2 - 2 * C_3 * C_a1) / 2 / C_11
Part_3 = (C_3 * C_a2 - C_4 * C_a1) / C_11
Part_4 = 0.25 / D_r / lamb ** 3
Part_5 = q3 * R_b ** 2 / (E * (R_a - R_b))
f_d = Part_5 / (Part_1 - t_d * (Part_4 * Part_2 * F_2_ls2 - Part_3 * 2 * Part_4 * F_1_ls2 - Part_4 * F_a4_ls2))
fr = f_d * t_d
u_ar = abs(
Part_5
+ fr
/ (2 * D_r * lamb ** 3)
* (
(-F_1_x0 / C_11) * (C_3 * C_a2 - C_4 * C_a1)
+ (F_2_x0 / 2 / C_11) * (C_2 * C_a2 - 2 * C_3 * C_a1)
- F_a4_x0 / 2
)
)
# Calculation of Axial deflection of rotor
W = (
0.5 * gravity * np.sin(phi) * ((L_t - t_d) * h_yr * rho_Fes)
) # uniform annular line load acting on rotor cylinder assumed as an annular plate
w = rho_Fes * gravity * np.sin(phi) * t_d # disc assumed as plate with a uniformly distributed pressure between
a_i = R_sh
# Flat circular plate constants according to Roark's table 11.2
C_2p = 0.25 * (1 - (((R_sh / R) ** 2) * (1 + (2 * np.log(R / R_sh)))))
C_3p = (R_sh / 4 / R) * ((1 + (R_sh / R) ** 2) * np.log(R / R_sh) + (R_sh / R) ** 2 - 1)
C_6 = (R_sh / 4 / R_a) * ((R_sh / R_a) ** 2 - 1 + 2 * np.log(R_a / R_sh))
C_5 = 0.5 * (1 - (R_sh / R) ** 2)
C_8 = 0.5 * (1 + v + (1 - v) * ((R_sh / R) ** 2))
C_9 = (R_sh / R) * (0.5 * (1 + v) * np.log(R / R_sh) + (1 - v) / 4 * (1 - (R_sh / R) ** 2))
# Flat circular plate loading constants
L_11 = (
1
+ 4 * (R_sh / a_1) ** 2
- 5 * (R_sh / a_1) ** 4
- 4 * ((R_sh / a_1) ** 2) * np.log(a_1 / R_sh) * (2 + (R_sh / a_1) ** 2)
) / 64
L_14 = (1 - (R_sh / R_b) ** 4 - 4 * (R_sh / R_b) ** 2 * np.log(R_b / R_sh)) / 16
y_ai = (
-W * (a_1 ** 3) * (C_2p * (C_6 * a_1 / R_sh - C_6) / C_5 - a_1 * C_3p / R_sh + C_3p) / D_ax
) # Axial deflection of plate due to deflection of an annular plate with a uniform annular line load
# Axial Deflection due to uniformaly distributed pressure load
M_rb = -w * R ** 2 * (C_6 * (R ** 2 - R_sh ** 2) * 0.5 / R / R_sh - L_14) / C_5
Q_b = w * 0.5 * (R ** 2 - R_sh ** 2) / R_sh
y_aii = M_rb * R_a ** 2 * C_2p / D_ax + Q_b * R_a ** 3 * C_3p / D_ax - w * R_a ** 4 * L_11 / D_ax
y_ar = abs(y_ai + y_aii)
z_allow_r = np.deg2rad(0.05 * R) # allowable torsional deflection of rotor
# stator structure deflection calculation
R_out = R / 0.995 + h_s + h_ys
a_s = (b_st * d_s) - ((b_st - 2 * t_ws) * (d_s - 2 * t_ws)) # cross-sectional area of stator armms
A_st = L_t * t_s # cross-sectional area of rotor cylinder
N_st = np.round(n_s)
theta_s = np.pi * 1 / N_st # half angle between spokes
I_st = L_t * t_s ** 3 / 12 # second moment of area of stator cylinder
I_arm_axi_s = (
(b_st * d_s ** 3) - ((b_st - 2 * t_ws) * (d_s - 2 * t_ws) ** 3)
) / 12 # second moment of area of stator arm
I_arm_tor_s = (
(d_s * b_st ** 3) - ((d_s - 2 * t_ws) * (b_st - 2 * t_ws) ** 3)
) / 12 # second moment of area of rotot arm w.r.t torsion
R_st = rad_ag + h_s + h_ys * 0.5
k_2 = np.sqrt(I_st / A_st) # radius of gyration
b_allow_s = 2 * np.pi * R_sh / N_st
m2 = (k_2 / R_st) ** 2
c1 = R_st / 500
R_1s = R_st - t_s * 0.5
d_se = dia_ag + 2 * (h_ys + h_s + h_w) # stator outer diameter
# Calculation of radial deflection of stator
Numers = R_st ** 3 * (
(0.25 * (np.sin(theta_s) - (theta_s * np.cos(theta_s))) / (np.sin(theta_s)) ** 2)
- (0.5 / np.sin(theta_s))
+ (0.5 / theta_s)
)
Povs = ((theta_s / (np.sin(theta_s)) ** 2) + 1 / np.tan(theta_s)) * (
(0.25 * R_st / A_st) + (0.25 * R_st ** 3 / I_st)
)
Qovs = R_st ** 3 / (2 * I_st * theta_s * (m2 + 1))
Lovs = (R_1s - R_sh) * 0.5 / a_s
Denoms = I_st * (Povs - Qovs + Lovs)
u_as = (q3 * R_st ** 2 / E / t_s) * (1 + Numers / Denoms)
# Calculation of axial deflection of stator
mass_st_lam_s = M_Fest + np.pi * L_t * rho_Fe * ((R_st + 0.5 * h_ys) ** 2 - (R_st - 0.5 * h_ys) ** 2)
W_is = (
0.5 * gravity * np.sin(phi) * (rho_Fes * L_t * d_s ** 2)
) # length of stator arm beam at which self-weight acts
W_iis = (
gravity * np.sin(phi) * (mass_st_lam_s + V_Cus * rho_Copper) / 2 / N_st
) # weight of stator cylinder and teeth
w_s = rho_Fes * gravity * np.sin(phi) * a_s * N_st # uniformly distributed load of the arms
l_is = R_st - R_sh # distance at which the weight of the stator cylinder acts
l_iis = l_is # distance at which the weight of the stator cylinder acts
l_iiis = l_is # distance at which the weight of the stator cylinder acts
u_allow_s = c1 / 20
X_comp1 = (
W_is * l_is ** 3 / 12 / E / I_arm_axi_s
) # deflection component due to stator arm beam at which self-weight acts
X_comp2 = W_iis * l_iis ** 4 / 24 / E / I_arm_axi_s # deflection component due to 1/nth of stator cylinder
X_comp3 = w_s * l_iiis ** 4 / 24 / E / I_arm_axi_s # deflection component due to weight of arms
y_as = X_comp1 + X_comp2 + X_comp3 # axial deflection
# Stator circumferential deflection
z_allow_s = np.deg2rad(0.05 * R_st) # allowable torsional deflection
z_as = (
2 * np.pi * (R_st + 0.5 * t_s) * L_t / (2 * N_st) * sigma * (l_is + 0.5 * t_s) ** 3 / (3 * E * I_arm_tor_s)
)
mass_stru_steel = 2 * (N_st * (R_1s - R_sh) * a_s * rho_Fes)
TC1 = Torque * 1.0 / (2 * np.pi * sigma) # Torque / shear stress
TC2r = R ** 2 * L_t # Evaluating Torque constraint for rotor
TC2s = R_st ** 2 * L_t # Evaluating Torque constraint for stator
Structural_mass = mass_stru_steel + (np.pi * (R ** 2 - R_sh ** 2) * t_d * rho_Fes)
Mass = Structural_mass + Iron + Copper + mass_PM
outputs["B_tmax"] = B_tmax
outputs["B_rymax"] = B_rymax
outputs["B_symax"] = B_symax
outputs["B_smax"] = B_smax[-1]
outputs["B_pm1"] = B_pm1
outputs["B_g"] = B_g
outputs["N_s"] = N_s
outputs["b_s"] = b_s
outputs["b_t"] = b_t
outputs["A_Cuscalc"] = A_Cuscalc
outputs["b_m"] = b_m
outputs["E_p"] = E_p
outputs["f"] = f
outputs["I_s"] = I_s
outputs["R_s"] = R_s
outputs["L_s"] = L_s
outputs["A_1"] = A_1
outputs["J_s"] = J_s
outputs["Losses"] = Losses
outputs["K_rad"] = K_rad
outputs["eandm_efficiency"] = np.maximum(eps, gen_eff)
outputs["S"] = S
outputs["Slot_aspect_ratio"] = Slot_aspect_ratio
outputs["Copper"] = Copper
outputs["Iron"] = Iron
outputs["u_ar"] = u_ar
outputs["y_ar"] = y_ar
outputs["u_as"] = u_as
outputs["y_as"] = y_as
outputs["z_as"] = z_as
outputs["u_allow_r"] = u_allow_r
outputs["u_allow_s"] = u_allow_s
outputs["y_allow_r"] = outputs["y_allow_s"] = y_allow
outputs["z_allow_s"] = z_allow_s
outputs["z_allow_r"] = z_allow_r
outputs["b_allow_s"] = b_allow_s
outputs["TC1"] = TC1
outputs["TC2r"] = TC2r
outputs["TC2s"] = TC2s
outputs["R_out"] = R_out
outputs["Structural_mass"] = Structural_mass
outputs["generator_mass"] = Mass
outputs["mass_PM"] = mass_PM
# ----------------------------------------------------------------------------------------
class PMSG_Arms(GeneratorBase):
"""
Estimates overall mass dimensions and Efficiency of PMSG-disc rotor generator.
Parameters
----------
b_arm : float, [m]
arm width
tau_p : float, [m]
Pole pitch self.tau_p
h_yr : float, [m]
rotor yoke height
h_ys : float, [m]
Yoke height
Returns
-------
B_tmax : float, [T]
Peak Teeth flux density
B_smax : float, [T]
Peak Stator Yoke flux density B_ymax
B_symax : float, [T]
Peak Stator Yoke flux density B_ymax
E_p : float
Stator phase voltage
"""
def initialize(self):
super(PMSG_Arms, self).initialize()
def setup(self):
super(PMSG_Arms, self).setup()
self.add_input("b_arm", val=0.0, units="m")
self.add_input("tau_p", val=0.0, units="m")
self.add_input("h_yr", val=0.0, units="m")
self.add_input("h_ys", val=0.0, units="m")
self.add_output("B_tmax", val=0.0, units="T")
self.add_output("B_smax", val=0.0, units="T")
self.add_output("B_symax", val=0.0, units="T")
self.add_output("E_p", val=np.zeros(self.options["n_pc"]))
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
# Unpack inputs
# r_s = inputs['r_s']
rad_ag = inputs["rad_ag"]
len_s = inputs["len_s"]
h_s = inputs["h_s"]
tau_p = inputs["tau_p"]
h_m = inputs["h_m"]
h_ys = inputs["h_ys"]
h_yr = inputs["h_yr"]
machine_rating = inputs["machine_rating"]
shaft_rpm = inputs["shaft_rpm"]
Torque = inputs["rated_torque"]
b_st = inputs["b_st"]
d_s = inputs["d_s"]
t_ws = inputs["t_ws"]
n_r = inputs["n_r"]
n_s = inputs["n_s"]
b_r = inputs["b_arm"]
d_r = inputs["d_r"]
t_wr = inputs["t_wr"]
R_sh = 0.5 * inputs["D_shaft"]
rho_Fe = inputs["rho_Fe"]
rho_Copper = inputs["rho_Copper"]
rho_Fes = inputs["rho_Fes"]
rho_PM = inputs["rho_PM"]
# Grab constant values
B_r = inputs["B_r"]
E = inputs["E"]
P_Fe0e = inputs["P_Fe0e"]
P_Fe0h = inputs["P_Fe0h"]
S_N = inputs["S_N"]
alpha_p = inputs["alpha_p"]
b_r_tau_r = inputs["b_r_tau_r"]
b_ro = inputs["b_ro"]
b_s_tau_s = inputs["b_s_tau_s"]
b_so = inputs["b_so"]
cofi = inputs["cofi"]
freq = inputs["freq"]
h_i = inputs["h_i"]
h_sy0 = inputs["h_sy0"]
h_w = inputs["h_w"]
k_fes = inputs["k_fes"]
k_fillr = inputs["k_fillr"]
k_fills = inputs["k_fills"]
k_s = inputs["k_s"]
m = discrete_inputs["m"]
mu_0 = inputs["mu_0"]
mu_r = inputs["mu_r"]
p = inputs["p"]
phi = inputs["phi"]
q1 = discrete_inputs["q1"]
ratio_mw2pp = inputs["ratio_mw2pp"]
resist_Cu = inputs["resist_Cu"]
sigma = inputs["sigma"]
v = inputs["v"]
y_tau_p = inputs["y_tau_p"]
y_tau_pr = inputs["y_tau_pr"]
"""
# Assign values to universal constants
B_r = 1.2 # Tesla remnant flux density
E = 2e11 # N / m^2 young's modulus
sigma = 40e3 # shear stress assumed (yield strength of ?? steel, in psi - GNS)
ratio_mw2pp = 0.7 # ratio of magnet width to pole pitch(bm / tau_p)
mu_0 = np.pi * 4e-7 # permeability of free space in m * kg / (s**2 * A**2)
mu_r = 1.06 # relative permeability (probably for neodymium magnets, often given as 1.05 - GNS)
phi = np.deg2rad(90) # tilt angle (rotor tilt -90 degrees during transportation)
cofi = 0.85 # power factor
# Assign values to design constants
h_w = 0.005 # Slot wedge height
h_i = 0.001 # coil insulation thickness
y_tau_p = 1 # Coil span to pole pitch
m = 3 # no of phases
q1 = 1 # no of slots per pole per phase
b_s_tau_s = 0.45 # slot width to slot pitch ratio
k_fills = 0.65 # Slot fill factor
P_Fe0h = 4 # specific hysteresis losses W / kg @ 1.5 T
P_Fe0e = 1 # specific eddy losses W / kg @ 1.5 T
resist_Cu = 1.8e-8 * 1.4 # Copper resisitivty
k_fes = 0.9 # Stator iron fill factor per Grauers
b_so = 0.004 # Slot opening
alpha_p = np.pi / 2 * 0.7
"""
# back iron thickness for rotor and stator
t_s = h_ys
t = h_yr
###################################################### Electromagnetic design#############################################
K_rad = len_s / (2 * rad_ag) # Aspect ratio
# T = Torque # rated torque
l_u = k_fes * len_s # useful iron stack length
We = tau_p
l_b = 2 * tau_p # end winding length
l_e = len_s + 2 * 0.001 * rad_ag # equivalent core length
b_m = 0.7 * tau_p # magnet width
# Calculating air gap length
dia_ag = 2 * rad_ag # air gap diameter
len_ag = 0.001 * dia_ag # air gap length
r_m = rad_ag + h_ys + h_s # magnet radius
r_r = rad_ag - len_ag # rotor radius
p = np.round(np.pi * dia_ag / (2 * tau_p)) # pole pairs
f = shaft_rpm * p / 60.0 # outout frequency rpm to Hz
S = 2 * p * q1 * m # Stator slots
N_conductors = S * 2
N_s = N_conductors / (2 * m) # Stator turns per phase
tau_s = np.pi * dia_ag / S # Stator slot pitch
b_s = b_s_tau_s * tau_s # slot width
b_t = tau_s - b_s # tooth width
Slot_aspect_ratio = h_s / b_s
# Calculating Carter factor for stator and effective air gap length
ahm = len_ag + h_m / mu_r
ba = b_so / (2 * ahm)
gamma = 4 / np.pi * (ba * np.arctan(ba) - np.log(np.sqrt(1 + ba ** 2)))
k_C = tau_s / (tau_s - gamma * ahm) # carter coefficient
g_eff = k_C * ahm
# angular frequency in radians
om_m = 2 * np.pi * shaft_rpm / 60.0 # rpm to radians per second
om_e = p * om_m / 2 # electrical output frequency (Hz)
# Calculating magnetic loading
B_pm1 = B_r * h_m / mu_r / g_eff
B_g = B_r * h_m / mu_r / g_eff * (4 / np.pi) * np.sin(alpha_p)
B_symax = B_g * b_m * l_e / (2 * h_ys * l_u)
B_rymax = B_g * b_m * l_e / (2 * h_yr * len_s)
B_tmax = B_g * tau_s / b_t
# Calculating winding factor
k_wd = np.sin(np.pi / 6) / q1 / np.sin(np.pi / 6 / q1)
L_t = len_s + 2 * tau_p # overall stator len w/end windings - should be tau_s???
# l = L_t # length - now using L_t everywhere
# Stator winding length, cross-section and resistance
l_Cus = 2 * N_s * (2 * tau_p + L_t)
A_s = b_s * (h_s - h_w) * q1 * p
A_scalc = b_s * 1000 * (h_s - h_w) * 1000 * q1 * p
A_Cus = A_s * k_fills / N_s
A_Cuscalc = A_scalc * k_fills / N_s
R_s = l_Cus * resist_Cu / A_Cus
# Calculating leakage inductance in stator
L_m = 2 * mu_0 * N_s ** 2 / p * m * k_wd ** 2 * tau_p * L_t / np.pi ** 2 / g_eff
L_ssigmas = (
2 * mu_0 * N_s ** 2 / p / q1 * len_s * ((h_s - h_w) / (3 * b_s) + h_w / b_so)
) # slot leakage inductance
L_ssigmaew = (
2 * mu_0 * N_s ** 2 / p / q1 * len_s * 0.34 * len_ag * (l_e - 0.64 * tau_p * y_tau_p) / len_s
) # end winding leakage inductance
L_ssigmag = (
2 * mu_0 * N_s ** 2 / p / q1 * len_s * (5 * (len_ag * k_C / b_so) / (5 + 4 * (len_ag * k_C / b_so)))
) # tooth tip leakage inductance
L_ssigma = L_ssigmas + L_ssigmaew + L_ssigmag
L_s = L_m + L_ssigma
# Calculating no-load voltage induced in the stator
E_p = 2 * N_s * L_t * rad_ag * k_wd * om_m * B_g / np.sqrt(2)
Z = machine_rating / (m * E_p)
G = np.maximum(0.0, E_p ** 2 - (om_e * L_s * Z) ** 2)
# Calculating stator current and electrical loading
is2 = Z ** 2 + (((E_p - G ** 0.5) / (om_e * L_s) ** 2) ** 2)
I_s = np.sqrt(Z ** 2 + (((E_p - G ** 0.5) / (om_e * L_s) ** 2) ** 2))
J_s = I_s / A_Cuscalc
A_1 = 6 * N_s * I_s / (np.pi * dia_ag)
I_snom = machine_rating / (m * E_p * cofi) # rated current
I_qnom = machine_rating / (m * E_p)
X_snom = om_e * (L_m + L_ssigma)
B_smax = np.sqrt(2) * I_s * mu_0 / g_eff
# Calculating Electromagnetically active mass
V_Cus = m * l_Cus * A_Cus # copper volume
V_Fest = L_t * 2 * p * q1 * m * b_t * h_s # volume of iron in stator tooth
V_Fesy = L_t * np.pi * ((rad_ag + h_s + h_ys) ** 2 - (rad_ag + h_s) ** 2) # volume of iron in stator yoke
V_Fery = L_t * np.pi * ((r_r - h_m) ** 2 - (r_r - h_m - h_yr) ** 2)
Copper = V_Cus * rho_Copper
M_Fest = V_Fest * rho_Fe # Mass of stator tooth
M_Fesy = V_Fesy * rho_Fe # Mass of stator yoke
M_Fery = V_Fery * rho_Fe # Mass of rotor yoke
Iron = M_Fest + M_Fesy + M_Fery
# Calculating Losses
##1. Copper Losses
K_R = 1.2 # Skin effect correction co-efficient
P_Cu = m * I_snom ** 2 * R_s * K_R
# Iron Losses ( from Hysteresis and eddy currents)
P_Hyys = M_Fesy * (B_symax / 1.5) ** 2 * (P_Fe0h * om_e / (2 * np.pi * 60)) # Hysteresis losses in stator yoke
P_Ftys = (
M_Fesy * (B_symax / 1.5) ** 2 * (P_Fe0e * (om_e / (2 * np.pi * 60)) ** 2)
) # Eddy losses in stator yoke
P_Fesynom = P_Hyys + P_Ftys
P_Hyd = M_Fest * (B_tmax / 1.5) ** 2 * (P_Fe0h * om_e / (2 * np.pi * 60)) # Hysteresis losses in stator teeth
P_Ftd = (
M_Fest * (B_tmax / 1.5) ** 2 * (P_Fe0e * (om_e / (2 * np.pi * 60)) ** 2)
) # Eddy losses in stator teeth
P_Festnom = P_Hyd + P_Ftd
# additional stray losses due to leakage flux
P_ad = 0.2 * (P_Hyys + P_Ftys + P_Hyd + P_Ftd)
pFtm = 300 # specific magnet loss
P_Ftm = pFtm * 2 * p * b_m * len_s
Losses = P_Cu + P_Festnom + P_Fesynom + P_ad + P_Ftm
gen_eff = machine_rating / (machine_rating + Losses)
#################################################### Structural Design ############################################################
## Deflection Calculations ##
# rotor structure calculations
a_r = (b_r * d_r) - ((b_r - 2 * t_wr) * (d_r - 2 * t_wr)) # cross-sectional area of rotor arms
A_r = L_t * t # cross-sectional area of rotor cylinder
N_r = np.round(n_r) # rotor arms
theta_r = np.pi * 1 / N_r # half angle between spokes
I_r = L_t * t ** 3 / 12 # second moment of area of rotor cylinder
I_arm_axi_r = (
(b_r * d_r ** 3) - ((b_r - 2 * t_wr) * (d_r - 2 * t_wr) ** 3)
) / 12 # second moment of area of rotor arm
I_arm_tor_r = (
(d_r * b_r ** 3) - ((d_r - 2 * t_wr) * (b_r - 2 * t_wr) ** 3)
) / 12 # second moment of area of rotot arm w.r.t torsion
R = rad_ag - len_ag - h_m - 0.5 * t # Rotor mean radius
c = R / 500
u_allow_r = c / 20 # allowable radial deflection
R_1 = R - t * 0.5 # inner radius of rotor cylinder
k_1 = np.sqrt(I_r / A_r) # radius of gyration
m1 = (k_1 / R) ** 2
l_ir = R # length of rotor arm beam at which rotor cylinder acts
l_iir = R_1
b_allow_r = 2 * np.pi * R_sh / N_r # allowable circumferential arm dimension for rotor
q3 = B_g ** 2 / 2 / mu_0 # normal component of Maxwell stress
mass_PM = 2 * np.pi * (R + 0.5 * t) * L_t * h_m * ratio_mw2pp * rho_PM # magnet mass
# Calculating radial deflection of the rotor
Numer = R ** 3 * (
(0.25 * (np.sin(theta_r) - (theta_r * np.cos(theta_r))) / (np.sin(theta_r)) ** 2)
- (0.5 / np.sin(theta_r))
+ (0.5 / theta_r)
)
Pov = ((theta_r / (np.sin(theta_r)) ** 2) + 1 / np.tan(theta_r)) * ((0.25 * R / A_r) + (0.25 * R ** 3 / I_r))
Qov = R ** 3 / (2 * I_r * theta_r * (m1 + 1))
Lov = (R_1 - R_sh) / a_r
Denom = I_r * (Pov - Qov + Lov) # radial deflection % rotor
u_ar = (q3 * R ** 2 / E / t) * (1 + Numer / Denom)
# Calculating axial deflection of the rotor under its own weight
w_r = rho_Fes * gravity * np.sin(phi) * a_r * N_r # uniformly distributed load of the weight of the rotor arm
mass_st_lam = rho_Fe * 2 * np.pi * R * L_t * h_yr # mass of rotor yoke steel
W = gravity * np.sin(phi) * (mass_st_lam / N_r + mass_PM / N_r) # weight of 1/nth of rotor cylinder
y_a1 = W * l_ir ** 3 / 12 / E / I_arm_axi_r # deflection from weight component of back iron
y_a2 = w_r * l_iir ** 4 / 24 / E / I_arm_axi_r # deflection from weight component of the arms
y_ar = y_a1 + y_a2 # axial deflection
y_allow = 2 * L_t / 100 # allowable axial deflection
# Calculating # circumferential deflection of the rotor
z_allow_r = np.deg2rad(0.05 * R) # allowable torsional deflection
z_ar = (
(2 * np.pi * (R - 0.5 * t) * L_t / N_r) * sigma * (l_ir - 0.5 * t) ** 3 / 3 / E / I_arm_tor_r
) # circumferential deflection
val_str_rotor = mass_PM + (mass_st_lam + (N_r * (R_1 - R_sh) * a_r * rho_Fes)) # rotor mass
# stator structure deflection calculation
a_s = (b_st * d_s) - ((b_st - 2 * t_ws) * (d_s - 2 * t_ws)) # cross-sectional area of stator armms
A_st = L_t * t_s # cross-sectional area of stator cylinder
N_st = np.round(n_s) # stator arms
theta_s = np.pi * 1 / N_st # half angle between spokes
I_st = L_t * t_s ** 3 / 12 # second moment of area of stator cylinder
k_2 = np.sqrt(I_st / A_st) # radius of gyration
I_arm_axi_s = (
(b_st * d_s ** 3) - ((b_st - 2 * t_ws) * (d_s - 2 * t_ws) ** 3)
) / 12 # second moment of area of stator arm
I_arm_tor_s = (
(d_s * b_st ** 3) - ((d_s - 2 * t_ws) * (b_st - 2 * t_ws) ** 3)
) / 12 # second moment of area of rotot arm w.r.t torsion
R_st = rad_ag + h_s + h_ys * 0.5 # stator cylinder mean radius
R_1s = R_st - t_s * 0.5 # inner radius of stator cylinder, m
m2 = (k_2 / R_st) ** 2
d_se = dia_ag + 2 * (h_ys + h_s + h_w) # stator outer diameter
# allowable radial deflection of stator
c1 = R_st / 500
u_allow_s = c1 / 20
R_out = R / 0.995 + h_s + h_ys
l_is = R_st - R_sh # distance at which the weight of the stator cylinder acts
l_iis = l_is # distance at which the weight of the stator cylinder acts
l_iiis = l_is # distance at which the weight of the stator cylinder acts
mass_st_lam_s = M_Fest + np.pi * L_t * rho_Fe * ((R_st + 0.5 * h_ys) ** 2 - (R_st - 0.5 * h_ys) ** 2)
W_is = (
0.5 * gravity * np.sin(phi) * (rho_Fes * L_t * d_s ** 2)
) # length of stator arm beam at which self-weight acts
W_iis = (
gravity * np.sin(phi) * (mass_st_lam_s + V_Cus * rho_Copper) / 2 / N_st
) # weight of stator cylinder and teeth
w_s = rho_Fes * gravity * np.sin(phi) * a_s * N_st # uniformly distributed load of the arms
mass_stru_steel = 2 * (N_st * (R_1s - R_sh) * a_s * rho_Fes) # Structural mass of stator arms
# Calculating radial deflection of the stator
Numers = R_st ** 3 * (
(0.25 * (np.sin(theta_s) - (theta_s * np.cos(theta_s))) / (np.sin(theta_s)) ** 2)
- (0.5 / np.sin(theta_s))
+ (0.5 / theta_s)
)
Povs = ((theta_s / (np.sin(theta_s)) ** 2) + 1 / np.tan(theta_s)) * (
(0.25 * R_st / A_st) + (0.25 * R_st ** 3 / I_st)
)
Qovs = R_st ** 3 / (2 * I_st * theta_s * (m2 + 1))
Lovs = (R_1s - R_sh) * 0.5 / a_s
Denoms = I_st * (Povs - Qovs + Lovs)
u_as = (q3 * R_st ** 2 / E / t_s) * (1 + Numers / Denoms)
# Calculating axial deflection of the stator
X_comp1 = (
W_is * l_is ** 3 / 12 / E / I_arm_axi_s
) # deflection component due to stator arm beam at which self-weight acts
X_comp2 = W_iis * l_iis ** 4 / 24 / E / I_arm_axi_s # deflection component due to 1 / nth of stator cylinder
X_comp3 = w_s * l_iiis ** 4 / 24 / E / I_arm_axi_s # deflection component due to weight of arms
y_as = X_comp1 + X_comp2 + X_comp3 # axial deflection
# Calculating circumferential deflection of the stator
z_as = 2 * np.pi * (R_st + 0.5 * t_s) * L_t / (2 * N_st) * sigma * (l_is + 0.5 * t_s) ** 3 / 3 / E / I_arm_tor_s
z_allow_s = np.deg2rad(0.05 * R_st) # allowable torsional deflection
b_allow_s = 2 * np.pi * R_sh / N_st # allowable circumferential arm dimension
val_str_stator = mass_stru_steel + mass_st_lam_s
val_str_mass = val_str_rotor + val_str_stator
TC1 = Torque / (2 * np.pi * sigma) # Desired shear stress
TC2r = R ** 2 * L_t # Evaluating Torque constraint for rotor
TC2s = R_st ** 2 * L_t # Evaluating Torque constraint for stator
Structural_mass = mass_stru_steel + (N_r * (R_1 - R_sh) * a_r * rho_Fes)
Stator = mass_st_lam_s + mass_stru_steel + Copper
Rotor = ((2 * np.pi * t * L_t * R * rho_Fe) + (N_r * (R_1 - R_sh) * a_r * rho_Fes)) + mass_PM
Mass = Stator + Rotor
outputs["B_tmax"] = B_tmax
outputs["B_rymax"] = B_rymax
outputs["B_symax"] = B_symax
outputs["B_smax"] = B_smax[-1]
outputs["B_pm1"] = B_pm1
outputs["B_g"] = B_g
outputs["N_s"] = N_s
outputs["b_s"] = b_s
outputs["b_t"] = b_t
outputs["A_Cuscalc"] = A_Cuscalc
outputs["b_m"] = b_m
outputs["E_p"] = E_p
outputs["f"] = f
outputs["I_s"] = I_s
outputs["R_s"] = R_s
outputs["L_s"] = L_s
outputs["A_1"] = A_1
outputs["J_s"] = J_s
outputs["Losses"] = Losses
outputs["K_rad"] = K_rad
outputs["eandm_efficiency"] = np.maximum(eps, gen_eff)
outputs["S"] = S
outputs["Slot_aspect_ratio"] = Slot_aspect_ratio
outputs["Copper"] = Copper
outputs["Iron"] = Iron
outputs["u_ar"] = u_ar
outputs["y_ar"] = y_ar
outputs["z_ar"] = z_ar
outputs["u_as"] = u_as
outputs["y_as"] = y_as
outputs["z_as"] = z_as
outputs["u_allow_r"] = u_allow_r
outputs["u_allow_s"] = u_allow_s
outputs["y_allow_r"] = outputs["y_allow_s"] = y_allow
outputs["z_allow_s"] = z_allow_s
outputs["z_allow_r"] = z_allow_r
outputs["b_allow_s"] = b_allow_s
outputs["b_allow_r"] = b_allow_r
outputs["TC1"] = TC1
outputs["TC2r"] = TC2r
outputs["TC2s"] = TC2s
outputs["R_out"] = R_out
outputs["Structural_mass"] = Structural_mass
outputs["generator_mass"] = Mass
outputs["mass_PM"] = mass_PM
# ----------------------------------------------------------------------------------------
class DFIG(GeneratorBase):
"""
Estimates overall mass, dimensions and Efficiency of DFIG generator.
Parameters
----------
S_Nmax : float
Max rated Slip
B_symax : float, [T]
Peak Stator Yoke flux density B_ymax
Returns
-------
N_r : float
Rotor turns
L_r : float
Rotor inductance
h_yr : float
rotor yoke height
h_ys : float
Stator Yoke height
tau_p : float
Pole pitch
Current_ratio : float
Rotor current ratio
E_p : float
Stator phase voltage
"""
def initialize(self):
super(DFIG, self).initialize()
def setup(self):
super(DFIG, self).setup()
n_pc = self.options["n_pc"]
self.add_input("S_Nmax", val=0.0)
self.add_input("B_symax", val=0.0, units="T")
self.add_output("N_r", val=0.0)
self.add_output("L_r", val=0.0)
self.add_output("h_yr", val=0.0)
self.add_output("h_ys", val=0.0)
self.add_output("tau_p", val=0.0)
self.add_output("Current_ratio", val=np.zeros(n_pc))
self.add_output("E_p", val=np.zeros(n_pc))
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
# Unpack inputs
rad_ag = inputs["rad_ag"]
len_s = inputs["len_s"]
h_s = inputs["h_s"]
h_0 = inputs["h_0"]
I_0 = inputs["I_0"]
machine_rating = inputs["machine_rating"]
shaft_rpm = inputs["shaft_rpm"]
rho_Fe = inputs["rho_Fe"]
rho_Copper = inputs["rho_Copper"]
B_symax = inputs["B_symax"]
S_Nmax = inputs["S_Nmax"]
# Grab constant values
B_r = inputs["B_r"]
E = inputs["E"]
P_Fe0e = inputs["P_Fe0e"]
P_Fe0h = inputs["P_Fe0h"]
S_N = inputs["S_N"]
alpha_p = inputs["alpha_p"]
b_r_tau_r = inputs["b_r_tau_r"]
b_ro = inputs["b_ro"]
b_s_tau_s = inputs["b_s_tau_s"]
b_so = inputs["b_so"]
cofi = inputs["cofi"]
freq = inputs["freq"]
h_i = inputs["h_i"]
h_sy0 = inputs["h_sy0"]
h_w = inputs["h_w"]
k_fes = inputs["k_fes"]
k_fillr = inputs["k_fillr"]
k_s = inputs["k_s"]
m = discrete_inputs["m"]
mu_0 = inputs["mu_0"]
mu_r = inputs["mu_r"]
p = inputs["p"]
phi = inputs["phi"]
q1 = discrete_inputs["q1"]
q2 = q1 - 1 # Rotor slots per pole per phase
ratio_mw2pp = inputs["ratio_mw2pp"]
resist_Cu = inputs["resist_Cu"]
sigma = inputs["sigma"]
v = inputs["v"]
y_tau_p = inputs["y_tau_p"]
y_tau_pr = inputs["y_tau_pr"]
"""
#Assign values to universal constants
sigma = 21.5e3 # shear stress in psi (what material? Al, brass, Cu?) ~148e6 Pa
mu_0 = np.pi * 4e-7 # permeability of free space in m * kg / (s**2 * A**2)
cofi = 0.9 # power factor
h_w = 0.005 # wedge height
m = 3 # Number of phases
resist_Cu = 1.8e-8 * 1.4 # copper resisitivity
h_sy0 = 0
#Assign values to design constants
b_so = 0.004 # Stator slot opening width
b_ro = 0.004 # Rotor slot opening width
q1 = 5 # Stator slots per pole per phase
b_s_tau_s = 0.45 # Stator slot-width / slot-pitch ratio
b_r_tau_r = 0.45 # Rotor slot-width / slot-pitch ratio
y_tau_p = 12. / 15 # Stator coil span to pole pitch
y_tau_pr = 10. / 12 # Rotor coil span to pole pitch
p = 3 # pole pairs
freq = 60 # grid frequency in Hz
k_fillr = 0.55 # Rotor Slot fill factor
P_Fe0h = 4 # specific hysteresis losses W / kg @ 1.5 T
P_Fe0e = 1 # specific eddy losses W / kg @ 1.5 T
"""
K_rs = 1 / (-1 * S_Nmax) # Winding turns ratio between rotor and Stator
I_SN = machine_rating / (np.sqrt(3) * 3000) # Rated current
I_SN_r = I_SN / K_rs # Stator rated current reduced to rotor
# Calculating winding factor for stator and rotor
k_y1 = np.sin(np.pi / 2 * y_tau_p) # winding chording factor
k_q1 = np.sin(np.pi / 6) / (q1 * np.sin(np.pi / (6 * q1))) # winding zone factor
k_y2 = np.sin(np.pi / 2 * y_tau_pr) # winding chording factor
k_q2 = np.sin(np.pi / 6) / (q2 * np.sin(np.pi / (6 * q2))) # winding zone factor
k_wd1 = k_y1 * k_q1 # Stator winding factor
k_wd2 = k_q2 * k_y2 # Rotor winding factor
ag_dia = 2 * rad_ag # air gap diameter
ag_len = (0.1 + 0.012 * machine_rating ** (1.0 / 3)) * 0.001 # air gap length in m
K_rad = len_s / ag_dia # Aspect ratio
rad_r = rad_ag - ag_len # rotor radius (was r_r)
tau_p = np.pi * ag_dia / (2 * p) # pole pitch
S = 2 * p * q1 * m # Stator slots
N_slots_pp = S / (m * p * 2) # Number of stator slots per pole per phase
n = S / 2 * p / q1 # no of slots per pole per phase
tau_s = tau_p / (m * q1) # Stator slot pitch
b_s = b_s_tau_s * tau_s # Stator slot width
b_t = tau_s - b_s # Stator tooth width
Q_r = 2 * p * m * q2 # Rotor slots
tau_r = np.pi * (ag_dia - 2 * ag_len) / Q_r # Rotor slot pitch
b_r = b_r_tau_r * tau_r # Rotor slot width
b_tr = tau_r - b_r # Rotor tooth width
# Calculating equivalent slot openings
mu_rs = 0.005
mu_rr = 0.005
W_s = (b_s / mu_rs) * 1e-3 # Stator, in m
W_r = (b_r / mu_rr) * 1e-3 # Rotor, in m
Slot_aspect_ratio1 = h_s / b_s
Slot_aspect_ratio2 = h_0 / b_r
# Calculating Carter factor for stator,rotor and effective air gap length
gamma_s = (2 * W_s / ag_len) ** 2 / (5 + 2 * W_s / ag_len)
K_Cs = tau_s / (tau_s - ag_len * gamma_s * 0.5) # page 3 - 13
gamma_r = (2 * W_r / ag_len) ** 2 / (5 + 2 * W_r / ag_len)
K_Cr = tau_r / (tau_r - ag_len * gamma_r * 0.5) # page 3 - 13
K_C = K_Cs * K_Cr
g_eff = K_C * ag_len
om_m = 2 * np.pi * shaft_rpm / 60 # mechanical frequency
om_e = p * om_m # electrical frequency
f = shaft_rpm * p / 60 # generator output freq
K_s = 0.3 # saturation factor for Iron
n_c1 = 2 # number of conductors per coil
a1 = 2 # number of parallel paths
N_s = np.round(2 * p * N_slots_pp * n_c1 / a1) # Stator winding turns per phase
N_r = np.round(N_s * k_wd1 * K_rs / k_wd2) # Rotor winding turns per phase
n_c2 = N_r / (Q_r / m) # rotor turns per coil
# Calculating peak flux densities and back iron thickness
B_g1 = mu_0 * 3 * N_r * I_0 * np.sqrt(2) * k_y2 * k_q2 / (np.pi * p * g_eff * (1 + K_s))
B_g = B_g1 * K_C
h_ys = B_g * tau_p / (B_symax * np.pi)
B_rymax = B_symax
h_yr = h_ys
B_tsmax = B_g * tau_s / b_t
d_se = ag_dia + 2 * (h_ys + h_s + h_w) # stator outer diameter
D_ratio = d_se / ag_dia # Diameter ratio
# Stator slot fill factor
if ag_dia > 2:
k_fills = 0.65
else:
k_fills = 0.4
# Stator winding calculation
# End connection length for stator winding coils
l_fs = 2 * (0.015 + y_tau_p * tau_p / (2 * np.cos(np.deg2rad(40)))) + np.pi * h_s # added radians() 2019 09 11
l_Cus = 2 * N_s * (l_fs + len_s) / a1 # Length of Stator winding
# Conductor cross-section
A_s = b_s * (h_s - h_w)
A_scalc = b_s * 1000 * (h_s - h_w) * 1000
A_Cus = A_s * q1 * p * k_fills / N_s
A_Cuscalc = A_scalc * q1 * p * k_fills / N_s
# Stator winding resistance
R_s = l_Cus * resist_Cu / A_Cus
tau_r_min = np.pi * (ag_dia - 2 * (ag_len + h_0)) / Q_r
# Peak magnetic loading on the rotor tooth
b_trmin = tau_r_min - b_r_tau_r * tau_r_min
B_trmax = B_g * tau_r / b_trmin
# Calculating leakage inductance in stator
K_01 = 1 - 0.033 * (W_s ** 2 / ag_len / tau_s)
sigma_ds = 0.0042
L_ssigmas = (2 * mu_0 * len_s * n_c1 ** 2 * S / m / a1 ** 2) * (
(h_s - h_w) / (3 * b_s) + h_w / b_so
) # slot leakage inductance
L_ssigmaew = (
(2 * mu_0 * len_s * n_c1 ** 2 * S / m / a1 ** 2) * 0.34 * q1 * (l_fs - 0.64 * tau_p * y_tau_p) / len_s
) # end winding leakage inductance
L_ssigmag = (2 * mu_0 * len_s * n_c1 ** 2 * S / m / a1 ** 2) * (
0.9 * tau_s * q1 * k_wd1 * K_01 * sigma_ds / g_eff
) # tooth tip leakage inductance
L_ssigma = L_ssigmas + L_ssigmaew + L_ssigmag # stator leakage inductance
L_sm = 6 * mu_0 * len_s * tau_p * (k_wd1 * N_s) ** 2 / (np.pi ** 2 * (p) * g_eff * (1 + K_s))
L_s = L_ssigmas + L_ssigmaew + L_ssigmag # stator inductance
# Calculating leakage inductance in rotor
K_02 = 1 - 0.033 * (W_r ** 2 / ag_len / tau_r)
sigma_dr = 0.0062
l_fr = (0.015 + y_tau_pr * tau_r / 2 / np.cos(np.deg2rad(40))) + np.pi * h_0 # Rotor end connection length
L_rsl = (mu_0 * len_s * (2 * n_c2) ** 2 * Q_r / m) * (
(h_0 - h_w) / (3 * b_r) + h_w / b_ro
) # slot leakage inductance
L_rel = (
(mu_0 * len_s * (2 * n_c2) ** 2 * Q_r / m) * 0.34 * q2 * (l_fr - 0.64 * tau_r * y_tau_pr) / len_s
) # end winding leakage inductance
L_rtl = (mu_0 * len_s * (2 * n_c2) ** 2 * Q_r / m) * (
0.9 * tau_s * q2 * k_wd2 * K_02 * sigma_dr / g_eff
) # tooth tip leakage inductance
L_r = (L_rsl + L_rtl + L_rel) / K_rs ** 2 # rotor leakage inductance
sigma1 = 1 - (L_sm ** 2 / L_s / L_r)
# Rotor Field winding
# conductor cross-section
diff = h_0 - h_w
A_Cur = k_fillr * p * q2 * b_r * diff / N_r
A_Curcalc = A_Cur * 1e6
L_cur = 2 * N_r * (l_fr + len_s) # rotor winding length
Resist_r = resist_Cu * L_cur / A_Cur # Rotor resistance
R_R = Resist_r / K_rs ** 2 # Equivalent rotor resistance reduced to stator
om_s = shaft_rpm * 2 * np.pi / 60 # synchronous speed in rad / s
P_e = machine_rating / (1 - S_Nmax) # Air gap power
# Calculating No-load voltage
E_p = om_s * N_s * k_wd1 * rad_ag * len_s * B_g1 * np.sqrt(2)
I_r = P_e / m / E_p # rotor active current
I_sm = E_p / (2 * np.pi * freq * (L_s + L_sm)) # stator reactive current
I_s = np.sqrt(I_r ** 2 + I_sm ** 2) # Stator current
I_srated = machine_rating / 3 / K_rs / E_p # Rated current
# Calculating winding current densities and specific current loading
J_s = I_s / A_Cuscalc
J_r = I_r / A_Curcalc
A_1 = 2 * m * N_s * I_s / (np.pi * 2 * rad_ag)
Current_ratio = I_0 / I_srated # Ratio of magnetization current to rated current
# Calculating masses of the electromagnetically active materials
V_Cuss = m * l_Cus * A_Cus
V_Cusr = m * L_cur * A_Cur
V_Fest = len_s * np.pi * ((rad_ag + h_s) ** 2 - rad_ag ** 2) - (2 * m * q1 * p * b_s * h_s * len_s)
V_Fesy = len_s * np.pi * ((rad_ag + h_s + h_ys) ** 2 - (rad_ag + h_s) ** 2)
V_Fert = len_s * np.pi * (rad_r ** 2 - (rad_r - h_0) ** 2) - 2 * m * q2 * p * b_r * h_0 * len_s
V_Fery = len_s * np.pi * ((rad_r - h_0) ** 2 - (rad_r - h_0 - h_yr) ** 2)
Copper = (V_Cuss + V_Cusr) * rho_Copper
M_Fest = V_Fest * rho_Fe
M_Fesy = V_Fesy * rho_Fe
M_Fert = V_Fert * rho_Fe
M_Fery = V_Fery * rho_Fe
Iron = M_Fest + M_Fesy + M_Fert + M_Fery
M_gen = (Copper) + (Iron)
# K_gen = Cu * C_Cu + (Iron) * C_Fe #%M_pm * K_pm
L_tot = len_s
Structural_mass = 0.0002 * M_gen ** 2 + 0.6457 * M_gen + 645.24
Mass = M_gen + Structural_mass
# Calculating Losses and efficiency
# 1. Copper losses
K_R = 1.2 # skin effect correction coefficient
P_Cuss = m * I_s ** 2 * R_s * K_R # Copper loss - stator
P_Cusr = m * I_r ** 2 * R_R # Copper loss - rotor
P_Cusnom = P_Cuss + P_Cusr # Copper loss - total
# Iron Losses ( from Hysteresis and eddy currents)
P_Hyys = M_Fesy * (B_symax / 1.5) ** 2 * (P_Fe0h * om_e / (2 * np.pi * 60)) # Hysteresis losses in stator yoke
P_Ftys = M_Fesy * (B_symax / 1.5) ** 2 * (P_Fe0e * (om_e / (2 * np.pi * 60)) ** 2) # Eddy losses in stator yoke
P_Hyd = M_Fest * (B_tsmax / 1.5) ** 2 * (P_Fe0h * om_e / (2 * np.pi * 60)) # Hysteresis losses in stator teeth
P_Ftd = M_Fest * (B_tsmax / 1.5) ** 2 * (P_Fe0e * (om_e / (2 * np.pi * 60)) ** 2) # Eddy losses in stator teeth
P_Hyyr = (
M_Fery * (B_rymax / 1.5) ** 2 * (P_Fe0h * abs(S_Nmax) * om_e / (2 * np.pi * 60))
) # Hysteresis losses in rotor yoke
P_Ftyr = (
M_Fery * (B_rymax / 1.5) ** 2 * (P_Fe0e * (abs(S_Nmax) * om_e / (2 * np.pi * 60)) ** 2)
) # Eddy losses in rotor yoke
P_Hydr = (
M_Fert * (B_trmax / 1.5) ** 2 * (P_Fe0h * abs(S_Nmax) * om_e / (2 * np.pi * 60))
) # Hysteresis losses in rotor teeth
P_Ftdr = (
M_Fert * (B_trmax / 1.5) ** 2 * (P_Fe0e * (abs(S_Nmax) * om_e / (2 * np.pi * 60)) ** 2)
) # Eddy losses in rotor teeth
P_add = 0.5 * machine_rating / 100 # additional losses
P_Fesnom = P_Hyys + P_Ftys + P_Hyd + P_Ftd + P_Hyyr + P_Ftyr + P_Hydr + P_Ftdr # Total iron loss
delta_v = 1 # allowable brush voltage drop
p_b = 3 * delta_v * I_r # Brush loss
Losses = P_Cusnom + P_Fesnom + p_b + P_add
gen_eff = (P_e - Losses) / P_e
# Calculating stator winding current density
J_s = I_s / A_Cuscalc
# Calculating electromagnetic torque
T_e = p * (machine_rating * 1.01) / (2 * np.pi * freq * (1 - S_Nmax))
# Calculating for tangential stress constraints
TC1 = T_e / (2 * np.pi * sigma)
TC2r = rad_ag ** 2 * len_s
r_out = d_se * 0.5
outputs["R_out"] = r_out
outputs["B_g"] = B_g
outputs["B_g1"] = B_g1
outputs["B_rymax"] = B_rymax
outputs["B_tsmax"] = B_tsmax
outputs["B_trmax"] = B_trmax
outputs["N_s"] = N_s
outputs["S"] = S
outputs["h_ys"] = h_ys
outputs["b_s"] = b_s
outputs["b_t"] = b_t
outputs["D_ratio"] = D_ratio
outputs["A_Cuscalc"] = A_Cuscalc
outputs["Slot_aspect_ratio1"] = Slot_aspect_ratio1
outputs["h_yr"] = h_yr
outputs["tau_p"] = tau_p
outputs["Q_r"] = Q_r
outputs["N_r"] = N_r
outputs["b_r"] = b_r
outputs["b_trmin"] = b_trmin
outputs["b_tr"] = b_tr
outputs["A_Curcalc"] = A_Curcalc
outputs["Slot_aspect_ratio2"] = Slot_aspect_ratio2
outputs["E_p"] = E_p
outputs["f"] = f
outputs["I_s"] = I_s
outputs["A_1"] = A_1
outputs["J_s"] = J_s
outputs["J_r"] = J_r
outputs["R_s"] = R_s
outputs["R_R"] = R_R
outputs["L_r"] = L_r
outputs["L_s"] = L_s
outputs["L_sm"] = L_sm
outputs["generator_mass"] = Mass
outputs["K_rad"] = K_rad
outputs["Losses"] = Losses
outputs["eandm_efficiency"] = np.maximum(eps, gen_eff)
outputs["Copper"] = Copper
outputs["Iron"] = Iron
outputs["Structural_mass"] = Structural_mass
outputs["TC1"] = TC1
outputs["TC2r"] = TC2r
outputs["Current_ratio"] = Current_ratio
# ----------------------------------------------------------------------------------------
class SCIG(GeneratorBase):
"""
Estimates overall mass dimensions and Efficiency of Squirrel cage Induction generator.
Parameters
----------
B_symax : float, [T]
Peak Stator Yoke flux density B_ymax
Returns
-------
h_yr : float
rotor yoke height
h_ys : float
Stator Yoke height
tau_p : float
Pole pitch
D_ratio_UL : float
Dia ratio upper limit
D_ratio_LL : float
Dia ratio Lower limit
K_rad_UL : float
Aspect ratio upper limit
K_rad_LL : float
Aspect ratio Lower limit
rad_r : float
rotor radius
A_bar : float
Rotor Conductor cross-section mm^2
E_p : float
Stator phase voltage
"""
def initialize(self):
super(SCIG, self).initialize()
def setup(self):
super(SCIG, self).setup()
self.add_input("B_symax", val=0.0, units="T")
self.add_output("h_yr", val=0.0)
self.add_output("h_ys", val=0.0)
self.add_output("tau_p", val=0.0)
self.add_output("D_ratio_UL", val=0.0)
self.add_output("D_ratio_LL", val=0.0)
self.add_output("K_rad_UL", val=0.0)
self.add_output("K_rad_LL", val=0.0)
self.add_output("rad_r", val=0.0)
self.add_output("A_bar", val=0.0)
self.add_output("E_p", val=np.zeros(self.options["n_pc"]))
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
# Unpack inputs
rad_ag = inputs["rad_ag"]
len_s = inputs["len_s"]
h_s = inputs["h_s"]
h_0 = inputs["h_0"]
machine_rating = inputs["machine_rating"]
shaft_rpm = inputs["shaft_rpm"]
I_0 = inputs["I_0"]
rho_Fe = inputs["rho_Fe"]
rho_Copper = inputs["rho_Copper"]
B_symax = inputs["B_symax"]
# Grab constant values
B_r = inputs["B_r"]
E = inputs["E"]
P_Fe0e = inputs["P_Fe0e"]
P_Fe0h = inputs["P_Fe0h"]
S_N = inputs["S_N"]
alpha_p = inputs["alpha_p"]
b_r_tau_r = inputs["b_r_tau_r"]
b_ro = inputs["b_ro"]
b_s_tau_s = inputs["b_s_tau_s"]
b_so = inputs["b_so"]
cofi = inputs["cofi"]
freq = inputs["freq"]
h_i = inputs["h_i"]
h_sy0 = inputs["h_sy0"]
h_w = inputs["h_w"]
k_fes = inputs["k_fes"]
k_fillr = inputs["k_fillr"]
k_s = inputs["k_s"]
m = discrete_inputs["m"]
mu_0 = inputs["mu_0"]
mu_r = inputs["mu_r"]
p = inputs["p"]
phi = inputs["phi"]
q1 = discrete_inputs["q1"]
q2 = discrete_inputs["q2"]
ratio_mw2pp = inputs["ratio_mw2pp"]
resist_Cu = inputs["resist_Cu"]
sigma = inputs["sigma"]
v = inputs["v"]
y_tau_p = inputs["y_tau_p"]
y_tau_pr = inputs["y_tau_pr"]
"""
# Assign values to universal constants
sigma = 21.5e3 # shear stress (psi) what material?
mu_0 = np.pi*4e-7 # permeability of free space in m * kg / (s**2 * A**2)
cofi = 0.9 # power factor
h_w = 0.005 # wedge height
m = 3 # Number of phases
resist_Cu = 1.8e-8 * 1.4 # Copper resistivity
#Assign values to design constants
b_so = 0.004 # Stator slot opening width
b_ro = 0.004 # Rotor slot opening width
q1 = 6 # Stator slots per pole per phase
q2 = 4 # Rotor slots per pole per phase
b_s_tau_s = 0.45 # Stator Slot width/Slot pitch ratio
b_r_tau_r = 0.45 # Rotor Slot width/Slot pitch ratio
y_tau_p = 12./15 # Coil span/pole pitch
p = 3 # number of pole pairs
freq = 60 # frequency in Hz
k_fillr = 0.7 # Rotor slot fill factor
P_Fe0h = 4 # specific hysteresis losses W / kg @ 1.5 T @50 Hz
P_Fe0e = 1 # specific eddy losses W / kg @ 1.5 T @50 Hz
S_N = -0.002 # Slip
"""
n_1 = shaft_rpm / (1 - S_N) # actual rotor speed (rpm)
# Calculating winding factor
k_y1 = np.sin(np.pi / 2 * y_tau_p) # winding chording factor
k_q1 = np.sin(np.pi / 6) / (q1 * np.sin(np.pi / (6 * q1))) # zone factor
k_wd = k_y1 * k_q1 # winding factor
# Calculating air gap length
ag_dia = 2 * rad_ag # air gap diameter
ag_len = (0.1 + 0.012 * machine_rating ** (1.0 / 3)) * 0.001 # air gap length in m
K_rad = len_s / ag_dia # Aspect ratio
K_rad_LL = 0.5 # lower limit on aspect ratio
K_rad_UL = 1.5 # upper limit on aspect ratio
rad_r = rad_ag - ag_len # rotor radius
tau_p = np.pi * ag_dia / (2 * p) # pole pitch
S = 2 * p * q1 * m # Stator slots
N_slots_pp = S / (m * p * 2) # Number of stator slots per pole per phase
tau_s = tau_p / (m * q1) # Stator slot pitch
b_s = b_s_tau_s * tau_s # Stator slot width
b_t = tau_s - b_s # Stator tooth width
Q_r = 2 * p * m * q2 # Rotor slots
tau_r = np.pi * (ag_dia - 2 * ag_len) / Q_r # Rotor slot pitch
b_r = b_r_tau_r * tau_r # Rotor slot width
b_tr = tau_r - b_r # Rotor tooth width
tau_r_min = np.pi * (ag_dia - 2 * (ag_len + h_0)) / Q_r
b_trmin = tau_r_min - b_r_tau_r * tau_r_min # minumum rotor tooth width
# Calculating equivalent slot openings
mu_rs = 0.005
mu_rr = 0.005
W_s = (b_s / mu_rs) * 1e-3 # Stator, in m
W_r = (b_r / mu_rr) * 1e-3 # Rotor, in m
Slot_aspect_ratio1 = h_s / b_s # Stator slot aspect ratio
Slot_aspect_ratio2 = h_0 / b_r # Rotor slot aspect ratio
# Calculating Carter factor for stator,rotor and effective air gap length
"""
gamma_s = (2 * W_s / ag_len)**2 / (5 + 2 * W_s / ag_len)
K_Cs = tau_s / (tau_s - ag_len * gamma_s * 0.5) # page 3-13 Boldea Induction machines Chapter 3
gamma_r = (2 * W_r / ag_len)**2 / (5 + 2 * W_r / ag_len)
K_Cr = tau_r / (tau_r - ag_len * gamma_r * 0.5) # page 3-13 Boldea Induction machines Chapter 3
"""
K_Cs = carterFactor(ag_len, W_s, tau_s)
K_Cr = carterFactor(ag_len, W_r, tau_r)
K_C = K_Cs * K_Cr
g_eff = K_C * ag_len
om_m = 2 * np.pi * shaft_rpm / 60 # mechanical frequency
om_e = p * om_m # electrical frequency
f = shaft_rpm * p / 60 # generator output freq
K_s = 0.3 # saturation factor for Iron
n_c = 2 # number of conductors per coil
a1 = 2 # number of parallel paths
# Calculating stator winding turns
N_s = np.round(2 * p * N_slots_pp * n_c / a1)
# Calculating Peak flux densities
B_g1 = mu_0 * 3 * N_s * I_0 * np.sqrt(2) * k_y1 * k_q1 / (np.pi * p * g_eff * (1 + K_s))
B_g = B_g1 * K_C
B_rymax = B_symax
# calculating back iron thickness
h_ys = B_g * tau_p / (B_symax * np.pi)
h_yr = h_ys
d_se = ag_dia + 2 * (h_ys + h_s + h_w) # stator outer diameter
D_ratio = d_se / ag_dia # Diameter ratio
# limits for Diameter ratio depending on pole pair
if 2 * p == 2:
D_ratio_LL = 1.65
D_ratio_UL = 1.69
elif 2 * p == 4:
D_ratio_LL = 1.46
D_ratio_UL = 1.49
elif 2 * p == 6:
D_ratio_LL = 1.37
D_ratio_UL = 1.4
elif 2 * p == 8:
D_ratio_LL = 1.27
D_ratio_UL = 1.3
else:
D_ratio_LL = 1.2
D_ratio_UL = 1.24
# Stator slot fill factor
if ag_dia > 2:
k_fills = 0.65
else:
k_fills = 0.4
# Stator winding length and cross-section
l_fs = 2 * (0.015 + y_tau_p * tau_p / 2 / np.cos(np.deg2rad(40))) + np.pi * h_s # end connection
l_Cus = 2 * N_s * (l_fs + len_s) / a1 # shortpitch
A_s = b_s * (h_s - h_w) # Slot area
A_scalc = b_s * 1000 * (h_s - h_w) * 1000 # Conductor cross-section (mm^2)
A_Cus = A_s * q1 * p * k_fills / N_s # Conductor cross-section (m^2)
A_Cuscalc = A_scalc * q1 * p * k_fills / N_s
# Stator winding resistance
R_s = l_Cus * resist_Cu / A_Cus
# Calculating no-load voltage
om_s = shaft_rpm * 2 * np.pi / 60 # rated angular frequency
P_e = machine_rating / (1 - S_N) # Electrical power
E_p = om_s * N_s * k_wd * rad_ag * len_s * B_g1 * np.sqrt(2)
S_GN = (1.0 - S_N) * machine_rating # same as P_e?
T_e = p * S_GN / (2 * np.pi * freq * (1 - S_N))
I_srated = machine_rating / (3 * E_p * cofi)
# Rotor design
diff = h_0 - h_w
A_bar = b_r * diff # bar cross section
Beta_skin = np.sqrt(np.pi * mu_0 * freq / 2 / resist_Cu) # coefficient for skin effect correction
k_rm = Beta_skin * h_0 # coefficient for skin effect correction
J_b = 6e06 # Bar current density
K_i = 0.864
I_b = 2 * m * N_s * k_wd * I_srated / Q_r # bar current
# Calculating bar resistance
R_rb = resist_Cu * k_rm * len_s / A_bar
I_er = I_b / (2 * np.sin(np.pi * p / Q_r)) # End ring current
J_er = 0.8 * J_b # End ring current density
A_er = I_er / J_er # End ring cross-section
b = h_0 # End ring dimension
a = A_er / b # End ring dimension
D_er = (rad_ag * 2 - 2 * ag_len) - 0.003 # End ring diameter
l_er = np.pi * (D_er - b) / Q_r # End ring segment length
if debug:
sys.stderr.write("l_er {:.4f} A_er {:.4f} D_er {:.4f}\n".format(l_er[0], A_er[0], D_er[0]))
# Calculating end ring resistance
R_re = resist_Cu * l_er / (2 * A_er * (np.sin(np.pi * p / Q_r)) ** 2)
# Calculating equivalent rotor resistance
if debug:
sys.stderr.write("R_rb {:.3e} R_re {:.3e} k_wd {:.4f} N_s {} Q_r {}\n".format(R_rb, R_re, k_wd, N_s, Q_r))
R_R = (R_rb + R_re) * 4 * m * (k_wd * N_s) ** 2 / Q_r
# Calculating Rotor and Stator teeth flux density
B_trmax = B_g * tau_r / b_trmin
B_tsmax = B_g * tau_s / b_t
# Calculating Equivalent core lengths
l_r = len_s + 4 * ag_len # for axial cooling
l_se = len_s + (2 / 3) * ag_len
K_fe = 0.95 # Iron factor
L_e = l_se * K_fe # radial cooling
# Calculating leakage inductance in stator
if debug:
sys.stderr.write("b_s {:.3e} b_so {:.3e}\n".format(b_s[0], b_so[0]))
L_ssigmas = (
2 * mu_0 * len_s * N_s ** 2 / p / q1 * ((h_s - h_w) / (3 * b_s) + h_w / b_so)
) # slot leakage inductance
L_ssigmaew = (
2 * mu_0 * len_s * N_s ** 2 / p / q1 * 0.34 * q1 * (l_fs - 0.64 * tau_p * y_tau_p) / len_s
) # end winding leakage inductance
L_ssigmag = (
2 * mu_0 * len_s * N_s ** 2 / p / q1 * (5 * (ag_len * K_C / b_so) / (5 + 4 * (ag_len * K_C / b_so)))
) # tooth tip leakage inductance
L_s = L_ssigmas + L_ssigmaew + L_ssigmag # stator leakage inductance
L_sm = 6 * mu_0 * len_s * tau_p * (k_wd * N_s) ** 2 / (np.pi ** 2 * p * g_eff * (1 + K_s))
# Calculating leakage inductance in rotor
lambda_ei = 2.3 * D_er / (4 * Q_r * len_s * (np.sin(np.pi * p / Q_r) ** 2)) * np.log(4.7 * ag_dia / (a + 2 * b))
lambda_b = h_0 / (3 * b_r) + h_w / b_ro
L_i = np.pi * ag_dia / Q_r
L_rsl = mu_0 * len_s * ((h_0 - h_w) / (3 * b_r) + h_w / b_ro) # slot leakage inductance
L_rel = mu_0 * (len_s * lambda_b + 2 * lambda_ei * L_i) # end winding leakage inductance
L_rtl = mu_0 * len_s * (0.9 * tau_r * 0.09 / g_eff) # tooth tip leakage inductance
L_rsigma = (L_rsl + L_rtl + L_rel) * 4 * m * (k_wd * N_s) ** 2 / Q_r # rotor leakage inductance
# Calculating rotor current
if debug:
sys.stderr.write(
"S_N {} P_e {:.1f} m {} R_R {:.4f} = {:.1f}\n".format(S_N, P_e, m, R_R, -S_N * P_e / m / R_R)
)
I_r = np.sqrt(-S_N * P_e / m / R_R)
I_sm = E_p / (2 * np.pi * freq * L_sm)
# Calculating stator currents and specific current loading
I_s = np.sqrt((I_r ** 2 + I_sm ** 2))
A_1 = 2 * m * N_s * I_s / (np.pi * 2 * rad_ag)
# Calculating masses of the electromagnetically active materials
V_Cuss = m * l_Cus * A_Cus # Volume of copper in stator
V_Cusr = Q_r * len_s * A_bar + np.pi * (D_er * A_er - A_er * b) # Volume of copper in rotor
V_Fest = (
len_s * np.pi * ((rad_ag + h_s) ** 2 - rad_ag ** 2) - 2 * m * q1 * p * b_s * h_s * len_s
) # Volume of iron in stator teeth
V_Fesy = len_s * np.pi * ((rad_ag + h_s + h_ys) ** 2 - (rad_ag + h_s) ** 2) # Volume of iron in stator yoke
rad_r = rad_ag - ag_len # rotor radius
V_Fert = (
np.pi * len_s * (rad_r ** 2 - (rad_r - h_0) ** 2) - 2 * m * q2 * p * b_r * h_0 * len_s
) # Volume of iron in rotor teeth
V_Fery = np.pi * len_s * ((rad_r - h_0) ** 2 - (rad_r - h_0 - h_yr) ** 2) # Volume of iron in rotor yoke
Copper = (V_Cuss + V_Cusr)[-1] * rho_Copper # Mass of Copper
M_Fest = V_Fest * rho_Fe # Mass of stator teeth
M_Fesy = V_Fesy * rho_Fe # Mass of stator yoke
M_Fert = V_Fert * rho_Fe # Mass of rotor tooth
M_Fery = V_Fery * rho_Fe # Mass of rotor yoke
Iron = M_Fest + M_Fesy + M_Fert + M_Fery
Active_mass = Copper + Iron
L_tot = len_s
Structural_mass = 0.0001 * Active_mass ** 2 + 0.8841 * Active_mass - 132.5
Mass = Active_mass + Structural_mass
# Calculating Losses and efficiency
# 1. Copper losses
K_R = 1.2 # skin effect correction coefficient
P_Cuss = m * I_s ** 2 * R_s * K_R # Copper loss - stator
P_Cusr = m * I_r ** 2 * R_R # Copper loss - rotor
P_Cusnom = P_Cuss + P_Cusr # Copper loss - total
# Iron Losses ( from Hysteresis and eddy currents)
P_Hyys = M_Fesy * (B_symax / 1.5) ** 2 * (P_Fe0h * om_e / (2 * np.pi * 60)) # Hysteresis losses in stator yoke
P_Ftys = (
M_Fesy * (B_symax / 1.5) ** 2 * (P_Fe0e * (om_e / (2 * np.pi * 60)) ** 2)
) # Eddy losses in stator yoke
P_Hyd = M_Fest * (B_tsmax / 1.5) ** 2 * (P_Fe0h * om_e / (2 * np.pi * 60)) # Hysteresis losses in stator tooth
P_Ftd = (
M_Fest * (B_tsmax / 1.5) ** 2 * (P_Fe0e * (om_e / (2 * np.pi * 60)) ** 2)
) # Eddy losses in stator tooth
P_Hyyr = (
M_Fery * (B_rymax / 1.5) ** 2 * (P_Fe0h * abs(S_N) * om_e / (2 * np.pi * 60))
) # Hysteresis losses in rotor yoke
P_Ftyr = (
M_Fery * (B_rymax / 1.5) ** 2 * (P_Fe0e * (abs(S_N) * om_e / (2 * np.pi * 60)) ** 2)
) # Eddy losses in rotor yoke
P_Hydr = (
M_Fert * (B_trmax / 1.5) ** 2 * (P_Fe0h * abs(S_N) * om_e / (2 * np.pi * 60))
) # Hysteresis losses in rotor tooth
P_Ftdr = (
M_Fert * (B_trmax / 1.5) ** 2 * (P_Fe0e * (abs(S_N) * om_e / (2 * np.pi * 60)) ** 2)
) # Eddy losses in rotor tooth
# Calculating Additional losses
P_add = 0.5 * machine_rating / 100
P_Fesnom = P_Hyys + P_Ftys + P_Hyd + P_Ftd + P_Hyyr + P_Ftyr + P_Hydr + P_Ftdr
Losses = P_Cusnom + P_Fesnom + P_add
gen_eff = (P_e - Losses) / P_e
# Calculating current densities in the stator and rotor
J_s = I_s / A_Cuscalc
J_r = I_r / A_bar / 1e6
# Calculating Tangential stress constraints
TC1 = T_e / (2 * np.pi * sigma)
TC2r = rad_ag ** 2 * len_s
# Calculating mass moments of inertia and center of mass
r_out = d_se * 0.5
outputs["R_out"] = r_out
outputs["B_tsmax"] = B_tsmax
outputs["B_trmax"] = B_trmax
outputs["B_rymax"] = B_rymax
outputs["B_g"] = B_g
outputs["B_g1"] = B_g1
outputs["N_s"] = N_s
outputs["S"] = S
outputs["h_ys"] = h_ys
outputs["b_s"] = b_s
outputs["b_t"] = b_t
outputs["D_ratio"] = D_ratio
outputs["D_ratio_UL"] = D_ratio_UL
outputs["D_ratio_LL"] = D_ratio_LL
outputs["A_Cuscalc"] = A_Cuscalc
outputs["Slot_aspect_ratio1"] = Slot_aspect_ratio1
outputs["h_yr"] = h_yr
outputs["tau_p"] = tau_p
outputs["Q_r"] = Q_r
outputs["b_r"] = b_r
outputs["b_trmin"] = b_trmin
outputs["b_tr"] = b_tr
outputs["rad_r"] = rad_r
outputs["A_bar"] = A_bar
outputs["Slot_aspect_ratio2"] = Slot_aspect_ratio2
outputs["E_p"] = E_p
outputs["f"] = f
outputs["I_s"] = I_s
outputs["A_1"] = A_1
outputs["J_s"] = J_s
outputs["J_r"] = J_r
outputs["R_s"] = R_s
outputs["R_R"] = R_R[-1]
outputs["L_s"] = L_s
outputs["L_sm"] = L_sm
outputs["generator_mass"] = Mass
outputs["K_rad"] = K_rad
outputs["K_rad_UL"] = K_rad_UL
outputs["K_rad_LL"] = K_rad_LL
outputs["Losses"] = Losses
outputs["eandm_efficiency"] = np.maximum(eps, gen_eff)
outputs["Copper"] = Copper
outputs["Iron"] = Iron
outputs["Structural_mass"] = Structural_mass
outputs["TC1"] = TC1
outputs["TC2r"] = TC2r
# ----------------------------------------------------------------------------------------
class EESG(GeneratorBase):
"""
Estimates overall mass dimensions and Efficiency of Electrically Excited Synchronous generator.
Parameters
----------
I_f : float, [A]
Excitation current
N_f : float
field turns
b_arm : float, [m]
arm width
h_yr : float, [m]
rotor yoke height
h_ys : float, [m]
Yoke height
tau_p : float, [m]
Pole pitch self.tau_p
Returns
-------
n_brushes : float
number of brushes
h_p : float, [m]
Pole height
b_p : float, [m]
Pole width
L_m : float, [H]
Stator synchronising inductance
R_r : float, [ohm]
Rotor resistance
B_tmax : float, [T]
Peak Teeth flux density
B_gfm : float, [T]
Average air gap flux density B_g
B_pc : float, [T]
Pole core flux density
B_symax : float, [T]
Peak Stator Yoke flux density B_ymax
E_s : float, [V]
Stator phase voltage
J_f : float, [A*m**-2]
rotor Current density
Power_ratio : float
Power_ratio
Load_mmf_ratio : float
mmf_ratio
"""
def initialize(self):
super(EESG, self).initialize()
def setup(self):
super(EESG, self).setup()
self.add_input("I_f", val=0.0, units="A")
self.add_input("N_f", val=0.0)
self.add_input("b_arm", val=0.0, units="m")
self.add_input("h_yr", val=0.0, units="m")
self.add_input("h_ys", val=0.0, units="m")
self.add_input("tau_p", val=0.0, units="m")
self.add_output("n_brushes", val=0.0)
self.add_output("h_p", val=0.0, units="m")
self.add_output("b_p", val=0.0, units="m")
self.add_output("L_m", val=0.0, units="H")
self.add_output("R_r", val=0.0, units="ohm")
self.add_output("B_tmax", val=0.0, units="T")
self.add_output("B_gfm", val=0.0, units="T")
self.add_output("B_pc", val=0.0, units="T")
self.add_output("B_symax", val=0.0, units="T")
self.add_output("E_s", val=np.zeros(self.options["n_pc"]), units="V")
self.add_output("J_f", val=0.0, units="A*m**-2")
self.add_output("Power_ratio", val=0.0)
self.add_output("Load_mmf_ratio", val=0.0)
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
# Unpack inputs
rad_ag = inputs["rad_ag"]
len_s = inputs["len_s"]
h_s = inputs["h_s"]
tau_p = inputs["tau_p"]
N_f = inputs["N_f"]
I_f = inputs["I_f"]
h_ys = inputs["h_ys"]
h_yr = inputs["h_yr"]
machine_rating = inputs["machine_rating"]
shaft_rpm = inputs["shaft_rpm"]
Torque = inputs["rated_torque"]
b_st = inputs["b_st"]
d_s = inputs["d_s"]
t_ws = inputs["t_ws"]
n_r = inputs["n_r"]
n_s = inputs["n_s"]
b_r = inputs["b_arm"]
d_r = inputs["d_r"]
t_wr = inputs["t_wr"]
R_sh = 0.5 * inputs["D_shaft"]
rho_Fe = inputs["rho_Fe"]
rho_Copper = inputs["rho_Copper"]
rho_Fes = inputs["rho_Fes"]
# Grab constant values
B_r = inputs["B_r"]
E = inputs["E"]
P_Fe0e = inputs["P_Fe0e"]
P_Fe0h = inputs["P_Fe0h"]
S_N = inputs["S_N"]
alpha_p = inputs["alpha_p"]
b_r_tau_r = inputs["b_r_tau_r"]
b_ro = inputs["b_ro"]
b_s_tau_s = inputs["b_s_tau_s"]
b_so = inputs["b_so"]
cofi = inputs["cofi"]
freq = inputs["freq"]
h_i = inputs["h_i"]
h_sy0 = inputs["h_sy0"]
h_w = inputs["h_w"]
k_fes = inputs["k_fes"]
k_fillr = inputs["k_fillr"]
k_fills = inputs["k_fills"]
k_s = inputs["k_s"]
m = discrete_inputs["m"]
mu_0 = inputs["mu_0"]
mu_r = inputs["mu_r"]
p = inputs["p"]
phi = inputs["phi"]
q1 = discrete_inputs["q1"]
q2 = discrete_inputs["q2"]
ratio_mw2pp = inputs["ratio_mw2pp"]
resist_Cu = inputs["resist_Cu"]
sigma = inputs["sigma"]
v = inputs["v"]
y_tau_p = inputs["y_tau_p"]
y_tau_pr = inputs["y_tau_pr"]
"""
# Assign values to universal constants
E = 2e11 # N / m^2 young's modulus
sigma = 48.373e3 # shear stress of steel in psi (~333 MPa)
mu_0 = np.pi * 4e-7 # permeability of free space in m * kg / (s**2 * A**2)
phi = np.deg2rad(90)
# Assign values to design constants
h_w = 0.005
b_so = 0.004 # Stator slot opening
m = 3 # number of phases
q1 = 2 # no of stator slots per pole per phase
b_s_tau_s = 0.45 # ratio of slot width to slot pitch
P_Fe0h = 4 # specific hysteresis losses W / kg @ 1.5 T @50 Hz
P_Fe0e = 1 # specific eddy losses W / kg @ 1.5 T @50 Hz
resist_Cu = 1.8e-8 * 1.4 # resisitivity of copper # ohm-meter (Why the 1.4 factor?)
k_fes = 0.9 # iron fill factor (not used)
y_tau_p = 1 # coil span / pole pitch fullpitch
k_fillr = 0.7 # rotor slot fill factor
k_s = 0.2 # magnetic saturation factor for iron
cofi = 0.85 # power factor
"""
T = Torque
# back iron thickness for rotor and stator
t_s = h_ys
t = h_yr
# Aspect ratio
K_rad = len_s / (2 * rad_ag)
###################################################### Electromagnetic design#############################################
alpha_p = np.pi / 2 * 0.7 # (not used)
dia = 2 * rad_ag # air gap diameter
# air gap length and minimum values
g = 0.001 * dia
if g < 0.005:
g = 0.005
r_r = rad_ag - g # rotor radius
d_se = dia + 2 * h_s + 2 * h_ys # stator outer diameter (not used)
p = np.round(np.pi * dia / (2 * tau_p)) # number of pole pairs
S = 2 * p * q1 * m # number of slots of stator phase winding
N_conductors = S * 2
N_s = N_conductors / 2 / m # Stator turns per phase
alpha = 180 / S / p # electrical angle (not used)
tau_s = np.pi * dia / S # slot pitch
h_ps = 0.1 * tau_p # height of pole shoe
b_pc = 0.4 * tau_p # width of pole core
h_pc = 0.6 * tau_p # height of pole core
h_p = 0.7 * tau_p # pole height
b_p = h_p
b_s = tau_s * b_s_tau_s # slot width
Slot_aspect_ratio = h_s / b_s
b_t = tau_s - b_s # tooth width
# Calculating Carter factor and effective air gap
g_a = g
K_C1 = (tau_s + 10 * g_a) / (tau_s - b_s + 10 * g_a) # salient pole rotor
g_1 = K_C1 * g
# calculating angular frequency
om_m = 2 * np.pi * shaft_rpm / 60
om_e = 60
f = shaft_rpm * p / 60
# Slot fill factor according to air gap radius
if 2 * rad_ag > 2:
K_fills = 0.65
else:
K_fills = 0.4
# Calculating Stator winding factor
k_y1 = np.sin(y_tau_p * np.pi / 2) # chording factor
k_q1 = np.sin(np.pi / 6) / q1 / np.sin(np.pi / 6 / q1) # winding zone factor
k_wd = k_y1 * k_q1
# Calculating stator winding conductor length, cross-section and resistance
shortpitch = 0
l_Cus = 2 * N_s * (2 * (tau_p - shortpitch / m / q1) + len_s) # length of winding
A_s = b_s * (h_s - h_w)
A_scalc = b_s * 1000 * (h_s - h_w) * 1000 # cross section in mm^2
A_Cus = A_s * q1 * p * K_fills / N_s
A_Cuscalc = A_scalc * q1 * p * K_fills / N_s
R_s = l_Cus * resist_Cu / A_Cus
# field winding design, conductor length, cross-section and resistance
N_f = np.round(N_f) # rounding the field winding turns to the nearest integer
I_srated = machine_rating / (np.sqrt(3) * 5000 * cofi)
l_pole = len_s - 0.050 + 0.120 # 50mm smaller than stator and 120mm longer to accommodate end stack
K_fe = 0.95
l_pfe = l_pole * K_fe
l_Cur = 4 * p * N_f * (l_pfe + b_pc + np.pi / 4 * (np.pi * (r_r - h_pc - h_ps) / p - b_pc))
A_Cur = k_fillr * h_pc * 0.5 / N_f * (np.pi * (r_r - h_pc - h_ps) / p - b_pc)
A_Curcalc = k_fillr * h_pc * 1000 * 0.5 / N_f * (np.pi * (r_r - h_pc - h_ps) / p - b_pc) * 1000
Slot_Area = A_Cur * 2 * N_f / k_fillr # (not used)
R_r = resist_Cu * l_Cur / A_Cur # ohms
# field winding current density
J_f = I_f / A_Curcalc
# calculating air flux density
B_gfm = mu_0 * N_f * I_f / (g_1 * (1 + k_s)) # No-load air gap flux density
B_g = B_gfm * 4 * np.sin(0.5 * b_p * np.pi / tau_p) / np.pi # fundamental component
B_symax = tau_p * B_g / np.pi / h_ys # stator yoke flux density
L_fg = (
2
* mu_0
* p
* len_s
* 4
* N_f ** 2
* ((h_ps / (tau_p - b_p)) + (h_pc / (3 * np.pi * (r_r - h_pc - h_ps) / p - b_pc)))
) # (not used)
# calculating no-load voltage and stator current
E_s = 2 * N_s * len_s * rad_ag * k_wd * om_m * B_g / np.sqrt(2) # no-load voltage
# I_s = (E_s - (E_s**2 - 4 * R_s * machine_rating / m)**0.5) / (2 * R_s)
erm = np.maximum(0.0, E_s ** 2 - 4 * R_s * machine_rating / m)
I_s = (E_s - erm ** 0.5) / (2 * R_s)
# Calculating stator winding current density and specific current loading
A_1 = 6 * N_s * I_s / (np.pi * dia)
J_s = I_s / A_Cuscalc
# Calculating magnetic loading in other parts of the machine
delta_m = 0 # Initialising load angle
# peak flux density in pole core, rotor yoke and stator teeth
B_pc = (1 / b_pc) * (
(2 * tau_p / np.pi) * B_g * np.cos(delta_m)
+ (2 * mu_0 * I_f * N_f * ((2 * h_ps / (tau_p - b_p)) + (h_pc / (tau_p - b_pc))))
)
B_rymax = 0.5 * b_pc * B_pc / h_yr
B_tmax = (B_gfm + B_g) * tau_s * 0.5 / b_t
# Calculating leakage inductances in the stator
L_ssigmas = (
2 * mu_0 * len_s * N_s ** 2 / p / q1 * ((h_s - h_w) / (3 * b_s) + h_w / b_so)
) # slot leakage inductance
L_ssigmaew = mu_0 * 1.2 * N_s ** 2 / p * 1.2 * (2 / 3 * tau_p + 0.01) # end winding leakage inductance
L_ssigmag = (
2 * mu_0 * len_s * N_s ** 2 / p / q1 * (5 * (g / b_so) / (5 + 4 * (g / b_so)))
) # tooth tip leakage inductance
L_ssigma = L_ssigmas + L_ssigmaew + L_ssigmag # stator leakage inductance
# Calculating effective air gap
"""
What is the source of this function that combines 1st and 13th powers? Very suspicious...
Inputs appear to be in the range of 0.45 to 2.2, so outputs are 180 to 178000
Equations given without reference in:
<NAME>, <NAME> . “Design optimization of a synchronous generator for a direct-drive wind turbine,”
(paper presented at the European Wind Energy Conference, Copenhagen, Denmark, July2–6, 2001
def airGapFn(B, fact):
val = 400 * B + 7 * B**13
ans = val * fact
sys.stderr.write('aGF: B {} val {} ans {}\n'.format(B, val, ans))
return val
At_t = h_s * airGapFn(B_tmax, h_s)
At_sy = tau_p / 2 * airGapFn(B_symax, tau_p/2)
At_pc = (h_pc + h_ps) * airGapFn(B_pc, h_pc + h_ps)
At_ry = tau_p / 2 * airGapFn(B_rymax, tau_p/2)
"""
At_g = g_1 * B_gfm / mu_0
At_t = h_s * (400 * B_tmax + 7 * B_tmax ** 13)
At_sy = tau_p * 0.5 * (400 * B_symax + 7 * B_symax ** 13)
At_pc = (h_pc + h_ps) * (400 * B_pc + 7 * B_pc ** 13)
At_ry = tau_p * 0.5 * (400 * B_rymax + 7 * B_rymax ** 13)
g_eff = (At_g + At_t + At_sy + At_pc + At_ry) * g_1 / At_g
L_m = 6 * k_wd ** 2 * N_s ** 2 * mu_0 * rad_ag * len_s / np.pi / g_eff / p ** 2
B_r1 = (mu_0 * I_f * N_f * 4 * np.sin(0.5 * (b_p / tau_p) * np.pi)) / g_eff / np.pi # (not used)
# Calculating direct axis and quadrature axes inductances
L_dm = (b_p / tau_p + (1 / np.pi) * np.sin(np.pi * b_p / tau_p)) * L_m
L_qm = (
b_p / tau_p - (1 / np.pi) * np.sin(np.pi * b_p / tau_p) + 2 / (3 * np.pi) * np.cos(b_p * np.pi / 2 * tau_p)
) * L_m
# Calculating actual load angle
delta_m = np.arctan(om_e * L_qm * I_s / E_s)
L_d = L_dm + L_ssigma # (not used)
L_q = L_qm + L_ssigma # (not used)
I_sd = I_s * np.sin(delta_m)
I_sq = I_s * np.cos(delta_m)
# induced voltage
E_p = om_e * L_dm * I_sd + np.sqrt(E_s ** 2 - (om_e * L_qm * I_sq) ** 2) # (not used)
# M_sf = mu_0 * 8*rad_ag * len_s * k_wd * N_s * N_f * np.sin(0.5 * b_p / tau_p * np.pi) / (p * g_eff * np.pi)
# I_f1 = np.sqrt(2) * (E_p) / (om_e * M_sf)
# I_f2 = (E_p / E_s) * B_g * g_eff * np.pi / (4 * N_f * mu_0 * np.sin(np.pi * b_p / 2/tau_p))
# phi_max_stator = k_wd * N_s * np.pi * rad_ag * len_s * 2*mu_0 * N_f * I_f * 4*np.sin(0.5 * b_p / tau_p / np.pi) / (p * np.pi * g_eff * np.pi)
# M_sf = mu_0 * 8*rad_ag * len_s * k_wd * N_s * N_f * np.sin(0.5 * b_p / tau_p / np.pi) / (p * g_eff * np.pi)
L_tot = len_s + 2 * tau_p
# Excitation power
V_fn = 500
Power_excitation = V_fn * 2 * I_f # total rated power in excitation winding
Power_ratio = Power_excitation * 100 / machine_rating
# Calculating Electromagnetically Active mass
L_tot = len_s + 2 * tau_p # (not used)
V_Cuss = m * l_Cus * A_Cus # volume of copper in stator
V_Cusr = l_Cur * A_Cur # volume of copper in rotor
V_Fest = (
len_s * np.pi * ((rad_ag + h_s) ** 2 - rad_ag ** 2) - 2 * m * q1 * p * b_s * h_s * len_s
) # volume of iron in stator tooth
V_Fesy = len_s * np.pi * ((rad_ag + h_s + h_ys) ** 2 - (rad_ag + h_s) ** 2) # volume of iron in stator yoke
V_Fert = l_pfe * 2 * p * (h_pc * b_pc + b_p * h_ps) # volume of iron in rotor pole
V_Fery = (
l_pfe * np.pi * ((r_r - h_ps - h_pc) ** 2 - (r_r - h_ps - h_pc - h_yr) ** 2)
) # volume of iron in rotor yoke
Copper = (V_Cuss + V_Cusr) * rho_Copper
M_Fest = V_Fest * rho_Fe
M_Fesy = V_Fesy * rho_Fe
M_Fert = V_Fert * rho_Fe
M_Fery = V_Fery * rho_Fe
Iron = M_Fest + M_Fesy + M_Fert + M_Fery
I_snom = machine_rating / (3 * E_s * cofi)
## Optional## Calculating mmf ratio
F_1no_load = 3 * 2 ** 0.5 * N_s * k_wd * I_s / (np.pi * p) # (not used)
Nf_If_no_load = N_f * I_f
F_1_rated = (3 * 2 ** 0.5 * N_s * k_wd * I_srated) / (np.pi * p)
Nf_If_rated = 2 * Nf_If_no_load
Load_mmf_ratio = Nf_If_rated / F_1_rated
## Calculating losses
# 1. Copper losses
K_R = 1.2 # skin effect correction coefficient
P_Cuss = m * I_snom ** 2 * R_s * K_R
P_Cusr = I_f ** 2 * R_r
P_Cusnom_total = P_Cuss + P_Cusr # Watts
# 2. Iron losses ( Hysteresis and Eddy currents)
P_Hyys = M_Fesy * (B_symax / 1.5) ** 2 * (P_Fe0h * om_e / (2 * np.pi * 60)) # Hysteresis losses in stator yoke
P_Ftys = M_Fesy * (B_symax / 1.5) ** 2 * (P_Fe0e * (om_e / (2 * np.pi * 60)) ** 2) # Eddy losses in stator yoke
P_Fesynom = P_Hyys + P_Ftys
P_Hyd = M_Fest * (B_tmax / 1.5) ** 2 * (P_Fe0h * om_e / (2 * np.pi * 60)) # Hysteresis losses in stator teeth
P_Ftd = M_Fest * (B_tmax / 1.5) ** 2 * (P_Fe0e * (om_e / (2 * np.pi * 60)) ** 2) # Eddy losses in stator teeth
P_Festnom = P_Hyd + P_Ftd
# brushes
delta_v = 1
n_brushes = I_f * 2 / 120
if n_brushes < 0.5:
n_brushes = 1
else:
n_brushes = np.round(n_brushes)
# 3. brush losses
p_b = 2 * delta_v * I_f
Losses = P_Cusnom_total + P_Festnom + P_Fesynom + p_b
gen_eff = machine_rating / (Losses + machine_rating)
################################################## Structural Design ########################################################
## Structural deflection calculations
# rotor structure
q3 = B_g ** 2 / 2 / mu_0 # normal component of Maxwell's stress
# l = l_s # l - stator core length - now using l_s everywhere
l_b = 2 * tau_p # end winding length # (not used)
l_e = len_s + 2 * 0.001 * rad_ag # equivalent core length # (not used)
a_r = (b_r * d_r) - ((b_r - 2 * t_wr) * (d_r - 2 * t_wr)) # cross-sectional area of rotor armms
A_r = len_s * t # cross-sectional area of rotor cylinder
N_r = np.round(n_r)
theta_r = np.pi / N_r # half angle between spokes
I_r = len_s * t ** 3 / 12 # second moment of area of rotor cylinder
I_arm_axi_r = (
(b_r * d_r ** 3) - ((b_r - 2 * t_wr) * (d_r - 2 * t_wr) ** 3)
) / 12 # second moment of area of rotor arm
I_arm_tor_r = (
(d_r * b_r ** 3) - ((d_r - 2 * t_wr) * (b_r - 2 * t_wr) ** 3)
) / 12 # second moment of area of rotot arm w.r.t torsion
R = r_r - h_ps - h_pc - 0.5 * h_yr
R_1 = R - h_yr * 0.5 # inner radius of rotor cylinder
k_1 = np.sqrt(I_r / A_r) # radius of gyration
m1 = (k_1 / R) ** 2
c = R / 500 # (not used)
u_allow_r = R / 10000 # allowable radial deflection
b_allow_r = 2 * np.pi * R_sh / N_r # allowable circumferential arm dimension
# Calculating radial deflection of rotor structure according to Mc Donald's
Numer = R ** 3 * (
(0.25 * (np.sin(theta_r) - (theta_r * np.cos(theta_r))) / (np.sin(theta_r)) ** 2)
- (0.5 / np.sin(theta_r))
+ (0.5 / theta_r)
)
Pov = ((theta_r / (np.sin(theta_r)) ** 2) + 1 / np.tan(theta_r)) * ((0.25 * R / A_r) + (0.25 * R ** 3 / I_r))
Qov = R ** 3 / (2 * I_r * theta_r * (m1 + 1))
Lov = (R_1 - R_sh) / a_r
Denom = I_r * (Pov - Qov + Lov) # radial deflection % rotor
u_ar = (q3 * R ** 2 / E / h_yr) * (1 + Numer / Denom)
# Calculating axial deflection of rotor structure
w_r = rho_Fes * gravity * np.sin(phi) * a_r * N_r
mass_st_lam = rho_Fe * 2 * np.pi * (R + 0.5 * h_yr) * len_s * h_yr # mass of rotor yoke steel
W = gravity * np.sin(phi) * (mass_st_lam + (V_Cusr * rho_Copper) + M_Fert) / N_r # weight of rotor cylinder
l_ir = R # length of rotor arm beam at which rotor cylinder acts
l_iir = R_1
y_ar = (W * l_ir ** 3 / 12 / E / I_arm_axi_r) + (w_r * l_iir ** 4 / 24 / E / I_arm_axi_r) # axial deflection
# Calculating torsional deflection of rotor structure
z_allow_r = np.deg2rad(0.05 * R) # allowable torsional deflection
z_ar = (
(2 * np.pi * (R - 0.5 * h_yr) * len_s / N_r) * sigma * (l_ir - 0.5 * h_yr) ** 3 / (3 * E * I_arm_tor_r)
) # circumferential deflection
# STATOR structure
A_st = len_s * t_s
a_s = (b_st * d_s) - ((b_st - 2 * t_ws) * (d_s - 2 * t_ws))
N_st = np.round(n_s)
theta_s = np.pi / N_st
I_st = len_s * t_s ** 3 / 12
I_arm_axi_s = (
(b_st * d_s ** 3) - ((b_st - 2 * t_ws) * (d_s - 2 * t_ws) ** 3)
) / 12 # second moment of area of stator arm
I_arm_tor_s = (
(d_s * b_st ** 3) - ((d_s - 2 * t_ws) * (b_st - 2 * t_ws) ** 3)
) / 12 # second moment of area of rotot arm w.r.t torsion
R_st = rad_ag + h_s + h_ys * 0.5
R_1s = R_st - h_ys * 0.5
k_2 = np.sqrt(I_st / A_st)
m2 = (k_2 / R_st) ** 2
# allowable deflections
b_allow_s = 2 * np.pi * R_sh / N_st
u_allow_s = R_st / 10000
y_allow = 2 * len_s / 100 # allowable axial deflection
z_allow_s = np.deg2rad(0.05 * R_st) # allowable torsional deflection
# Calculating radial deflection according to McDonald's
Numers = R_st ** 3 * (
(0.25 * (np.sin(theta_s) - (theta_s * np.cos(theta_s))) / (np.sin(theta_s)) ** 2)
- (0.5 / np.sin(theta_s))
+ (0.5 / theta_s)
)
Povs = ((theta_s / (np.sin(theta_s)) ** 2) + 1 / np.tan(theta_s)) * (
(0.25 * R_st / A_st) + (0.25 * R_st ** 3 / I_st)
)
Qovs = R_st ** 3 / (2 * I_st * theta_s * (m2 + 1))
Lovs = (R_1s - R_sh) * 0.5 / a_s
Denoms = I_st * (Povs - Qovs + Lovs)
R_out = R / 0.995 + h_s + h_ys
u_as = (q3 * R_st ** 2 / E / t_s) * (1 + Numers / Denoms)
# Calculating axial deflection according to McDonald
l_is = R_st - R_sh
l_iis = l_is
l_iiis = l_is # length of rotor arm beam at which self-weight acts
mass_st_lam_s = M_Fest + np.pi * len_s * rho_Fe * ((R_st + 0.5 * h_ys) ** 2 - (R_st - 0.5 * h_ys) ** 2)
W_is = gravity * np.sin(phi) * (rho_Fes * len_s * d_s ** 2 * 0.5) # weight of rotor cylinder
W_iis = gravity * np.sin(phi) * (V_Cuss * rho_Copper + mass_st_lam_s) / 2 / N_st
w_s = rho_Fes * gravity * np.sin(phi) * a_s * N_st
X_comp1 = W_is * l_is ** 3 / (12 * E * I_arm_axi_s)
X_comp2 = W_iis * l_iis ** 4 / (24 * E * I_arm_axi_s)
X_comp3 = w_s * l_iiis ** 4 / (24 * E * I_arm_axi_s)
y_as = X_comp1 + X_comp2 + X_comp3 # axial deflection
# Calculating torsional deflection
z_as = (
2
* np.pi
* (R_st + 0.5 * t_s)
* len_s
/ (2 * N_st)
* sigma
* (l_is + 0.5 * t_s) ** 3
/ (3 * E * I_arm_tor_s)
)
# tangential stress constraints
TC1 = T / (2 * np.pi * sigma)
TC2r = R ** 2 * len_s
TC2s = R_st ** 2 * len_s
# Calculating inactive mass and total mass
mass_stru_steel = 2 * N_st * (R_1s - R_sh) * a_s * rho_Fes
Structural_mass = mass_stru_steel + (N_r * (R_1 - R_sh) * a_r * rho_Fes)
Mass = Copper + Iron + Structural_mass
outputs["B_symax"] = B_symax
outputs["B_tmax"] = B_tmax
outputs["B_rymax"] = B_rymax
outputs["B_gfm"] = B_gfm
outputs["B_g"] = B_g
outputs["B_pc"] = B_pc
outputs["N_s"] = N_s
outputs["b_s"] = b_s
outputs["b_t"] = b_t
outputs["A_Cuscalc"] = A_Cuscalc
outputs["A_Curcalc"] = A_Curcalc
outputs["b_p"] = b_p
outputs["h_p"] = h_p
outputs["E_s"] = E_s
outputs["f"] = f
outputs["I_s"] = I_s
outputs["R_s"] = R_s
outputs["L_m"] = L_m
outputs["A_1"] = A_1
outputs["J_s"] = J_s
outputs["R_r"] = R_r
outputs["Losses"] = Losses
outputs["Load_mmf_ratio"] = Load_mmf_ratio
outputs["Power_ratio"] = Power_ratio
outputs["n_brushes"] = n_brushes
outputs["J_f"] = J_f
outputs["K_rad"] = K_rad
outputs["eandm_efficiency"] = np.maximum(eps, gen_eff)
outputs["S"] = S
outputs["Slot_aspect_ratio"] = Slot_aspect_ratio
outputs["Copper"] = Copper
outputs["Iron"] = Iron
outputs["u_ar"] = u_ar
outputs["y_ar"] = y_ar
outputs["z_ar"] = z_ar
outputs["u_as"] = u_as
outputs["y_as"] = y_as
outputs["z_as"] = z_as
outputs["u_allow_r"] = u_allow_r
outputs["u_allow_s"] = u_allow_s
outputs["y_allow_r"] = outputs["y_allow_s"] = y_allow
outputs["z_allow_s"] = z_allow_s
outputs["z_allow_r"] = z_allow_r
outputs["b_allow_s"] = b_allow_s
outputs["b_allow_r"] = b_allow_r
outputs["TC1"] = TC1
outputs["TC2r"] = TC2r
outputs["TC2s"] = TC2s
outputs["R_out"] = R_out
outputs["Structural_mass"] = Structural_mass
outputs["generator_mass"] = Mass
# ----------------------------------------------------------------------------------------
```
#### File: wisdem/drivetrainse/generator.py
```python
import numpy as np
import openmdao.api as om
import wisdem.drivetrainse.generator_models as gm
# ----------------------------------------------------------------------------------------------
class Constraints(om.ExplicitComponent):
"""
Provides a material cost estimate for a PMSG _arms generator. Manufacturing costs are excluded.
Parameters
----------
u_allow_s : float, [m]
u_as : float, [m]
z_allow_s : float, [m]
z_as : float, [m]
y_allow_s : float, [m]
y_as : float, [m]
b_allow_s : float, [m]
b_st : float, [m]
u_allow_r : float, [m]
u_ar : float, [m]
y_allow_r : float, [m]
y_ar : float, [m]
z_allow_r : float, [m]
z_ar : float, [m]
b_allow_r : float, [m]
b_arm : float, [m]
TC1 : float, [m**3]
TC2r : float, [m**3]
TC2s : float, [m**3]
B_g : float, [T]
B_smax : float, [T]
K_rad : float
K_rad_LL : float
K_rad_UL : float
D_ratio : float
D_ratio_LL : float
D_ratio_UL : float
Returns
-------
con_uas : float, [m]
con_zas : float, [m]
con_yas : float, [m]
con_bst : float, [m]
con_uar : float, [m]
con_yar : float, [m]
con_zar : float, [m]
con_br : float, [m]
TCr : float, [m**3]
TCs : float, [m**3]
con_TC2r : float, [m**3]
con_TC2s : float, [m**3]
con_Bsmax : float, [T]
K_rad_L : float
K_rad_U : float
D_ratio_L : float
D_ratio_U : float
"""
def setup(self):
self.add_input("u_allow_s", val=0.0, units="m")
self.add_input("u_as", val=0.0, units="m")
self.add_input("z_allow_s", val=0.0, units="m")
self.add_input("z_as", val=0.0, units="m")
self.add_input("y_allow_s", val=0.0, units="m")
self.add_input("y_as", val=0.0, units="m")
self.add_input("b_allow_s", val=0.0, units="m")
self.add_input("b_st", val=0.0, units="m")
self.add_input("u_allow_r", val=0.0, units="m")
self.add_input("u_ar", val=0.0, units="m")
self.add_input("y_allow_r", val=0.0, units="m")
self.add_input("y_ar", val=0.0, units="m")
self.add_input("z_allow_r", val=0.0, units="m")
self.add_input("z_ar", val=0.0, units="m")
self.add_input("b_allow_r", val=0.0, units="m")
self.add_input("b_arm", val=0.0, units="m")
self.add_input("TC1", val=0.0, units="m**3")
self.add_input("TC2r", val=0.0, units="m**3")
self.add_input("TC2s", val=0.0, units="m**3")
self.add_input("B_g", val=0.0, units="T")
self.add_input("B_smax", val=0.0, units="T")
self.add_input("K_rad", val=0.0)
self.add_input("K_rad_LL", val=0.0)
self.add_input("K_rad_UL", val=0.0)
self.add_input("D_ratio", val=0.0)
self.add_input("D_ratio_LL", val=0.0)
self.add_input("D_ratio_UL", val=0.0)
self.add_output("con_uas", val=0.0, units="m")
self.add_output("con_zas", val=0.0, units="m")
self.add_output("con_yas", val=0.0, units="m")
self.add_output("con_bst", val=0.0, units="m")
self.add_output("con_uar", val=0.0, units="m")
self.add_output("con_yar", val=0.0, units="m")
self.add_output("con_zar", val=0.0, units="m")
self.add_output("con_br", val=0.0, units="m")
self.add_output("TCr", val=0.0, units="m**3")
self.add_output("TCs", val=0.0, units="m**3")
self.add_output("con_TC2r", val=0.0, units="m**3")
self.add_output("con_TC2s", val=0.0, units="m**3")
self.add_output("con_Bsmax", val=0.0, units="T")
self.add_output("K_rad_L", val=0.0)
self.add_output("K_rad_U", val=0.0)
self.add_output("D_ratio_L", val=0.0)
self.add_output("D_ratio_U", val=0.0)
def compute(self, inputs, outputs):
outputs["con_uas"] = inputs["u_allow_s"] - inputs["u_as"]
outputs["con_zas"] = inputs["z_allow_s"] - inputs["z_as"]
outputs["con_yas"] = inputs["y_allow_s"] - inputs["y_as"]
outputs["con_bst"] = inputs["b_allow_s"] - inputs["b_st"] # b_st={'units':'m'}
outputs["con_uar"] = inputs["u_allow_r"] - inputs["u_ar"]
outputs["con_yar"] = inputs["y_allow_r"] - inputs["y_ar"]
outputs["con_TC2r"] = inputs["TC2s"] - inputs["TC1"]
outputs["con_TC2s"] = inputs["TC2s"] - inputs["TC1"]
outputs["con_Bsmax"] = inputs["B_g"] - inputs["B_smax"]
outputs["con_zar"] = inputs["z_allow_r"] - inputs["z_ar"]
outputs["con_br"] = inputs["b_allow_r"] - inputs["b_arm"] # b_r={'units':'m'}
outputs["TCr"] = inputs["TC2r"] - inputs["TC1"]
outputs["TCs"] = inputs["TC2s"] - inputs["TC1"]
outputs["K_rad_L"] = inputs["K_rad"] - inputs["K_rad_LL"]
outputs["K_rad_U"] = inputs["K_rad"] - inputs["K_rad_UL"]
outputs["D_ratio_L"] = inputs["D_ratio"] - inputs["D_ratio_LL"]
outputs["D_ratio_U"] = inputs["D_ratio"] - inputs["D_ratio_UL"]
# ----------------------------------------------------------------------------------------------
class MofI(om.ExplicitComponent):
"""
Compute moments of inertia.
Parameters
----------
R_out : float, [m]
Outer radius
stator_mass : float, [kg]
Total rotor mass
rotor_mass : float, [kg]
Total rotor mass
generator_mass : float, [kg]
Actual mass
len_s : float, [m]
Stator core length
Returns
-------
generator_I : numpy array[3], [kg*m**2]
Moments of Inertia for the component [Ixx, Iyy, Izz] around its center of mass
rotor_I : numpy array[3], [kg*m**2]
Moments of Inertia for the rotor about its center of mass
stator_I : numpy array[3], [kg*m**2]
Moments of Inertia for the stator about its center of mass
"""
def setup(self):
self.add_input("R_out", val=0.0, units="m")
self.add_input("stator_mass", val=0.0, units="kg")
self.add_input("rotor_mass", val=0.0, units="kg")
self.add_input("generator_mass", val=0.0, units="kg")
self.add_input("len_s", val=0.0, units="m")
self.add_output("generator_I", val=np.zeros(3), units="kg*m**2")
self.add_output("rotor_I", val=np.zeros(3), units="kg*m**2")
self.add_output("stator_I", val=np.zeros(3), units="kg*m**2")
def compute(self, inputs, outputs):
R_out = inputs["R_out"]
Mass = inputs["generator_mass"]
m_stator = inputs["stator_mass"]
m_rotor = inputs["rotor_mass"]
len_s = inputs["len_s"]
I = np.zeros(3)
I[0] = 0.50 * Mass * R_out ** 2
I[1] = I[2] = 0.5 * I[0] + Mass * len_s ** 2 / 12.0
outputs["generator_I"] = I
coeff = m_stator / Mass if m_stator > 0.0 else 0.5
outputs["stator_I"] = coeff * I
coeff = m_rotor / Mass if m_rotor > 0.0 else 0.5
outputs["rotor_I"] = coeff * I
# ----------------------------------------------------------------------------------------------
class Cost(om.ExplicitComponent):
"""
Provides a material cost estimate for a PMSG _arms generator. Manufacturing costs are excluded.
Parameters
----------
C_Cu : float, [USD/kg]
Specific cost of copper
C_Fe : float, [USD/kg]
Specific cost of magnetic steel/iron
C_Fes : float, [USD/kg]
Specific cost of structural steel
C_PM : float, [USD/kg]
Specific cost of Magnet
Copper : float, [kg]
Copper mass
Iron : float, [kg]
Iron mass
mass_PM : float, [kg]
Magnet mass
Structural_mass : float, [kg]
Structural mass
Returns
-------
generator_cost : float, [USD]
Total cost
"""
def setup(self):
# Specific cost of material by type
self.add_input("C_Cu", val=0.0, units="USD/kg")
self.add_input("C_Fe", val=0.0, units="USD/kg")
self.add_input("C_Fes", val=0.0, units="USD/kg")
self.add_input("C_PM", val=0.0, units="USD/kg")
# Mass of each material type
self.add_input("Copper", val=0.0, units="kg")
self.add_input("Iron", val=0.0, units="kg")
self.add_input("mass_PM", val=0.0, units="kg")
self.add_input("Structural_mass", val=0.0, units="kg")
# Outputs
self.add_output("generator_cost", val=0.0, units="USD")
# self.declare_partials('*', '*', method='fd', form='central', step=1e-6)
def compute(self, inputs, outputs):
Copper = inputs["Copper"]
Iron = inputs["Iron"]
mass_PM = inputs["mass_PM"]
Structural_mass = inputs["Structural_mass"]
C_Cu = inputs["C_Cu"]
C_Fes = inputs["C_Fes"]
C_Fe = inputs["C_Fe"]
C_PM = inputs["C_PM"]
# Industrial electricity rate $/kWh https://www.eia.gov/electricity/monthly/epm_table_grapher.php?t=epmt_5_6_a
k_e = 0.064
# Material cost ($/kg) and electricity usage cost (kWh/kg)*($/kWh) for the materials with waste fraction
K_copper = Copper * (1.26 * C_Cu + 96.2 * k_e)
K_iron = Iron * (1.21 * C_Fe + 26.9 * k_e)
K_pm = mass_PM * (1.0 * C_PM + 79.0 * k_e)
K_steel = Structural_mass * (1.21 * C_Fes + 15.9 * k_e)
# Account for capital cost and labor share from BLS MFP by NAICS
outputs["generator_cost"] = (K_copper + K_pm) / 0.619 + (K_iron + K_steel) / 0.684
# ----------------------------------------------------------------------------------------------
class PowerElectronicsEff(om.ExplicitComponent):
"""
Compute representative efficiency of power electronics
Parameters
----------
machine_rating : float, [W]
Machine rating
shaft_rpm : numpy array[n_pc], [rpm]
rated speed of input shaft (lss for direct, hss for geared)
eandm_efficiency : numpy array[n_pc]
Generator electromagnetic efficiency values (<1)
Returns
-------
converter_efficiency : numpy array[n_pc]
Converter efficiency values (<1)
transformer_efficiency : numpy array[n_pc]
Transformer efficiency values (<1)
generator_efficiency : numpy array[n_pc]
Full generato and power electronics efficiency values (<1)
"""
def initialize(self):
self.options.declare("n_pc", default=20)
def setup(self):
n_pc = self.options["n_pc"]
self.add_input("machine_rating", val=0.0, units="W")
self.add_input("shaft_rpm", val=np.zeros(n_pc), units="rpm")
self.add_input("eandm_efficiency", val=np.zeros(n_pc))
self.add_output("converter_efficiency", val=np.zeros(n_pc))
self.add_output("transformer_efficiency", val=np.zeros(n_pc))
self.add_output("generator_efficiency", val=np.zeros(n_pc))
def compute(self, inputs, outputs):
# Unpack inputs
rating = inputs["machine_rating"]
rpmData = inputs["shaft_rpm"]
rpmRatio = rpmData / rpmData[-1]
# This converter efficiency is from the APEEM Group in 2020
# See <NAME>, <NAME>, <NAME>, <NAME>
# Converter constants
v_dc, v_dc0, c0, c1, c2, c3 = 6600, 6200, -2.1e-10, 1.2e-5, 1.46e-3, -2e-4
p_ac0, p_dc0 = 0.99 * rating, rating
p_s0 = 1e-3 * p_dc0
# calculated parameters
a = p_dc0 * (1.0 + c1 * (v_dc - v_dc0))
b = p_s0 * (1.0 + c2 * (v_dc - v_dc0))
c = c0 * (1.0 + c3 * (v_dc - v_dc0))
# Converter efficiency
p_dc = rpmRatio * p_dc0
p_ac = (p_ac0 / (a - b) - c * (a - b)) * (p_dc - b) + c * ((p_dc - b) ** 2)
conv_eff = p_ac / p_dc
# Transformer loss model is P_loss = P_0 + a^2 * P_k
# a is output power/rated
p0, pk, rT = 16.0, 111.0, 5.0 / 3.0
a = rpmRatio * (1 / rT)
# This gives loss in kW, so need to convert to efficiency
trans_eff = 1.0 - (p0 + a * a * pk) / (1e-3 * rating)
# Store outputs
outputs["converter_efficiency"] = conv_eff
outputs["transformer_efficiency"] = trans_eff
outputs["generator_efficiency"] = conv_eff * trans_eff * inputs["eandm_efficiency"]
# ----------------------------------------------------------------------------------------------
class Generator(om.Group):
def initialize(self):
genTypes = ["scig", "dfig", "eesg", "pmsg_arms", "pmsg_disc", "pmsg_outer"]
self.options.declare("design", values=genTypes + [m.upper() for m in genTypes])
self.options.declare("n_pc", default=20)
def setup(self):
genType = self.options["design"]
n_pc = self.options["n_pc"]
# ivc = om.IndepVarComp()
# sivc = om.IndepVarComp()
self.set_input_defaults("B_r", val=1.2, units="T")
self.set_input_defaults("P_Fe0e", val=1.0, units="W/kg")
self.set_input_defaults("P_Fe0h", val=4.0, units="W/kg")
self.set_input_defaults("S_N", val=-0.002)
self.set_input_defaults("alpha_p", val=0.5 * np.pi * 0.7)
self.set_input_defaults("b_r_tau_r", val=0.45)
self.set_input_defaults("b_ro", val=0.004, units="m")
self.set_input_defaults("b_s_tau_s", val=0.45)
self.set_input_defaults("b_so", val=0.004, units="m")
self.set_input_defaults("cofi", val=0.85)
self.set_input_defaults("freq", val=60, units="Hz")
self.set_input_defaults("h_i", val=0.001, units="m")
self.set_input_defaults("h_sy0", val=0.0)
self.set_input_defaults("h_w", val=0.005, units="m")
self.set_input_defaults("k_fes", val=0.9)
self.set_input_defaults("k_fillr", val=0.7)
self.set_input_defaults("k_fills", val=0.65)
self.set_input_defaults("k_s", val=0.2)
self.set_input_defaults("m", val=3)
self.set_input_defaults("mu_0", val=np.pi * 4e-7, units="m*kg/s**2/A**2")
self.set_input_defaults("mu_r", val=1.06, units="m*kg/s**2/A**2")
self.set_input_defaults("p", val=3.0)
self.set_input_defaults("phi", val=np.deg2rad(90), units="rad")
self.set_input_defaults("q1", val=6)
self.set_input_defaults("q2", val=4)
self.set_input_defaults("ratio_mw2pp", val=0.7)
self.set_input_defaults("resist_Cu", val=1.8e-8 * 1.4, units="ohm/m")
self.set_input_defaults("sigma", val=40e3, units="Pa")
self.set_input_defaults("y_tau_p", val=1.0)
self.set_input_defaults("y_tau_pr", val=10.0 / 12)
# self.set_input_defaults('I_0', val=0.0, units='A')
# self.set_input_defaults('d_r', val=0.0, units='m')
# self.set_input_defaults('h_m', val=0.0, units='m')
# self.set_input_defaults('h_0', val=0.0, units ='m')
# self.set_input_defaults('h_s', val=0.0, units='m')
# self.set_input_defaults('len_s', val=0.0, units='m')
# self.set_input_defaults('n_r', val=0.0)
# self.set_input_defaults('rad_ag', val=0.0, units='m')
# self.set_input_defaults('t_wr', val=0.0, units='m')
# self.set_input_defaults('n_s', val=0.0)
# self.set_input_defaults('b_st', val=0.0, units='m')
# self.set_input_defaults('d_s', val=0.0, units='m')
# self.set_input_defaults('t_ws', val=0.0, units='m')
# self.set_input_defaults('rho_Copper', val=0.0, units='kg*m**-3')
# self.set_input_defaults('rho_Fe', val=0.0, units='kg*m**-3')
# self.set_input_defaults('rho_Fes', val=0.0, units='kg*m**-3')
# self.set_input_defaults('rho_PM', val=0.0, units='kg*m**-3')
# self.set_input_defaults('C_Cu', val=0.0, units='USD/kg')
# self.set_input_defaults('C_Fe', val=0.0, units='USD/kg')
# self.set_input_defaults('C_Fes', val=0.0, units='USD/kg')
# self.set_input_defaults('C_PM', val=0.0, units='USD/kg')
# if genType.lower() in ['pmsg_outer']:
# self.set_input_defaults('r_g',0.0, units ='m')
# self.set_input_defaults('N_c',0.0)
# self.set_input_defaults('b',0.0)
# self.set_input_defaults('c',0.0)
# self.set_input_defaults('E_p',0.0, units ='V')
# self.set_input_defaults('h_yr', val=0.0, units ='m')
# self.set_input_defaults('h_ys', val=0.0, units ='m')
# self.set_input_defaults('h_sr',0.0,units='m',desc='Structural Mass')
# self.set_input_defaults('h_ss',0.0, units ='m')
# self.set_input_defaults('t_r',0.0, units ='m')
# self.set_input_defaults('t_s',0.0, units ='m')
# self.set_input_defaults('u_allow_pcent',0.0)
# self.set_input_defaults('y_allow_pcent',0.0)
# self.set_input_defaults('z_allow_deg',0.0,units='deg')
# self.set_input_defaults('B_tmax',0.0, units='T')
# self.set_input_defaults('P_mech', 0.0, units='W')
# self.set_input_defaults('y_sh', units ='m')
# self.set_input_defaults('theta_sh', 0.0, units='rad')
# self.set_input_defaults('D_nose',0.0, units ='m')
# self.set_input_defaults('y_bd', units ='m')
# self.set_input_defaults('theta_bd', 0.0, units='rad')
# if genType.lower() in ['eesg','pmsg_arms','pmsg_disc']:
# self.set_input_defaults('tau_p', val=0.0, units='m')
# self.set_input_defaults('h_ys', val=0.0, units='m')
# self.set_input_defaults('h_yr', val=0.0, units='m')
# self.set_input_defaults('b_arm', val=0.0, units='m')
# elif genType.lower() in ['scig','dfig']:
# self.set_input_defaults('B_symax', val=0.0, units='T')
# self.set_input_defaults('S_Nmax', val=-0.2)
# if topLevelFlag:
# self.add_subsystem('ivc', ivc, promotes=['*'])
# self.set_input_defaults('machine_rating', 0.0, units='W')
# self.set_input_defaults('shaft_rpm', np.linspace(1.0, 10.0, n_pc), units='rpm')
# self.set_input_defaults('rated_torque', 0.0, units='N*m')
# self.set_input_defaults('D_shaft', val=0.0, units='m')
self.set_input_defaults("E", val=210e9, units="Pa")
self.set_input_defaults("G", val=81e9, units="Pa")
# self.add_subsystem('sivc', sivc, promotes=['*'])
# Easy Poisson ratio assuming isotropic
self.add_subsystem(
"poisson", om.ExecComp("v = 0.5*E/G - 1.0", E={"units": "Pa"}, G={"units": "Pa"}), promotes=["*"]
)
# Add generator design component and cost
if genType.lower() == "scig":
mygen = gm.SCIG
elif genType.lower() == "dfig":
mygen = gm.DFIG
elif genType.lower() == "eesg":
mygen = gm.EESG
elif genType.lower() == "pmsg_arms":
mygen = gm.PMSG_Arms
elif genType.lower() == "pmsg_disc":
mygen = gm.PMSG_Disc
elif genType.lower() == "pmsg_outer":
mygen = gm.PMSG_Outer
self.add_subsystem("generator", mygen(n_pc=n_pc), promotes=["*"])
self.add_subsystem("mofi", MofI(), promotes=["*"])
self.add_subsystem("gen_cost", Cost(), promotes=["*"])
self.add_subsystem("constr", Constraints(), promotes=["*"])
self.add_subsystem("eff", PowerElectronicsEff(n_pc=n_pc), promotes=["*"])
```
#### File: wisdem/floatingse/floating_frame.py
```python
import numpy as np
import openmdao.api as om
import wisdem.commonse.utilities as util
import wisdem.pyframe3dd.pyframe3dd as pyframe3dd
import wisdem.commonse.utilization_dnvgl as util_dnvgl
import wisdem.commonse.utilization_constraints as util_con
from wisdem.commonse import NFREQ, gravity
from wisdem.floatingse.member import NULL, MEMMAX, Member
NNODES_MAX = 1000
NELEM_MAX = 1000
RIGID = 1e30
EPS = 1e-6
class PlatformFrame(om.ExplicitComponent):
def initialize(self):
self.options.declare("options")
def setup(self):
opt = self.options["options"]
n_member = opt["floating"]["members"]["n_members"]
for k in range(n_member):
self.add_input(f"member{k}:nodes_xyz", NULL * np.ones((MEMMAX, 3)), units="m")
self.add_input(f"member{k}:nodes_r", NULL * np.ones(MEMMAX), units="m")
self.add_input(f"member{k}:section_D", NULL * np.ones(MEMMAX), units="m")
self.add_input(f"member{k}:section_t", NULL * np.ones(MEMMAX), units="m")
self.add_input(f"member{k}:section_A", NULL * np.ones(MEMMAX), units="m**2")
self.add_input(f"member{k}:section_Asx", NULL * np.ones(MEMMAX), units="m**2")
self.add_input(f"member{k}:section_Asy", NULL * np.ones(MEMMAX), units="m**2")
self.add_input(f"member{k}:section_Ixx", NULL * np.ones(MEMMAX), units="kg*m**2")
self.add_input(f"member{k}:section_Iyy", NULL * np.ones(MEMMAX), units="kg*m**2")
self.add_input(f"member{k}:section_Izz", NULL * np.ones(MEMMAX), units="kg*m**2")
self.add_input(f"member{k}:section_rho", NULL * np.ones(MEMMAX), units="kg/m**3")
self.add_input(f"member{k}:section_E", NULL * np.ones(MEMMAX), units="Pa")
self.add_input(f"member{k}:section_G", NULL * np.ones(MEMMAX), units="Pa")
self.add_input(f"member{k}:section_sigma_y", NULL * np.ones(MEMMAX), units="Pa")
self.add_input(f"member{k}:idx_cb", 0)
self.add_input(f"member{k}:buoyancy_force", 0.0, units="N")
self.add_input(f"member{k}:displacement", 0.0, units="m**3")
self.add_input(f"member{k}:center_of_buoyancy", np.zeros(3), units="m")
self.add_input(f"member{k}:center_of_mass", np.zeros(3), units="m")
self.add_input(f"member{k}:ballast_mass", 0.0, units="kg")
self.add_input(f"member{k}:total_mass", 0.0, units="kg")
self.add_input(f"member{k}:total_cost", 0.0, units="USD")
self.add_input(f"member{k}:I_total", np.zeros(6), units="kg*m**2")
self.add_input(f"member{k}:Awater", 0.0, units="m**2")
self.add_input(f"member{k}:Iwater", 0.0, units="m**4")
self.add_input(f"member{k}:added_mass", np.zeros(6), units="kg")
self.add_input(f"member{k}:waterline_centroid", np.zeros(2), units="m")
self.add_input(f"member{k}:variable_ballast_capacity", val=0.0, units="m**3")
self.add_input(f"member{k}:Px", np.zeros(MEMMAX), units="N/m")
self.add_input(f"member{k}:Py", np.zeros(MEMMAX), units="N/m")
self.add_input(f"member{k}:Pz", np.zeros(MEMMAX), units="N/m")
self.add_input(f"member{k}:qdyn", np.zeros(MEMMAX), units="Pa")
self.add_input("transition_node", np.zeros(3), units="m")
self.add_input("transition_piece_mass", 0.0, units="kg")
self.add_input("transition_piece_cost", 0.0, units="USD")
self.add_output("transition_piece_I", np.zeros(6), units="kg*m**2")
self.add_output("platform_nodes", NULL * np.ones((NNODES_MAX, 3)), units="m")
self.add_output("platform_Fnode", NULL * np.ones((NNODES_MAX, 3)), units="N")
self.add_output("platform_Rnode", NULL * np.ones(NNODES_MAX), units="m")
self.add_output("platform_elem_n1", NULL * np.ones(NELEM_MAX, dtype=np.int_))
self.add_output("platform_elem_n2", NULL * np.ones(NELEM_MAX, dtype=np.int_))
self.add_output("platform_elem_D", NULL * np.ones(NELEM_MAX), units="m")
self.add_output("platform_elem_t", NULL * np.ones(NELEM_MAX), units="m")
self.add_output("platform_elem_A", NULL * np.ones(NELEM_MAX), units="m**2")
self.add_output("platform_elem_Asx", NULL * np.ones(NELEM_MAX), units="m**2")
self.add_output("platform_elem_Asy", NULL * np.ones(NELEM_MAX), units="m**2")
self.add_output("platform_elem_Ixx", NULL * np.ones(NELEM_MAX), units="kg*m**2")
self.add_output("platform_elem_Iyy", NULL * np.ones(NELEM_MAX), units="kg*m**2")
self.add_output("platform_elem_Izz", NULL * np.ones(NELEM_MAX), units="kg*m**2")
self.add_output("platform_elem_rho", NULL * np.ones(NELEM_MAX), units="kg/m**3")
self.add_output("platform_elem_E", NULL * np.ones(NELEM_MAX), units="Pa")
self.add_output("platform_elem_G", NULL * np.ones(NELEM_MAX), units="Pa")
self.add_output("platform_elem_sigma_y", NULL * np.ones(NELEM_MAX), units="Pa")
self.add_output("platform_elem_Px1", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_output("platform_elem_Px2", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_output("platform_elem_Py1", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_output("platform_elem_Py2", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_output("platform_elem_Pz1", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_output("platform_elem_Pz2", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_output("platform_elem_qdyn", NULL * np.ones(NELEM_MAX), units="Pa")
self.add_discrete_output("platform_elem_memid", [-1] * NELEM_MAX)
self.add_output("platform_displacement", 0.0, units="m**3")
self.add_output("platform_center_of_buoyancy", np.zeros(3), units="m")
self.add_output("platform_hull_center_of_mass", np.zeros(3), units="m")
self.add_output("platform_centroid", np.zeros(3), units="m")
self.add_output("platform_ballast_mass", 0.0, units="kg")
self.add_output("platform_hull_mass", 0.0, units="kg")
self.add_output("platform_mass", 0.0, units="kg")
self.add_output("platform_I_hull", np.zeros(6), units="kg*m**2")
self.add_output("platform_cost", 0.0, units="USD")
self.add_output("platform_Awater", 0.0, units="m**2")
self.add_output("platform_Iwater", 0.0, units="m**4")
self.add_output("platform_added_mass", np.zeros(6), units="kg")
self.add_output("platform_variable_capacity", np.zeros(n_member), units="m**3")
self.node_mem2glob = {}
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
# Seems like we have to run this each time as numbering can change during optimization
self.node_mem2glob = {}
self.set_connectivity(inputs, outputs)
self.set_node_props(inputs, outputs)
self.set_element_props(inputs, outputs, discrete_inputs, discrete_outputs)
def set_connectivity(self, inputs, outputs):
# Load in number of members
opt = self.options["options"]
n_member = opt["floating"]["members"]["n_members"]
# Initialize running lists across all members
nodes_temp = np.empty((0, 3))
elem_n1 = np.array([], dtype=np.int_)
elem_n2 = np.array([], dtype=np.int_)
# Look over members and grab all nodes and internal connections
for k in range(n_member):
inode_xyz = inputs[f"member{k}:nodes_xyz"]
inodes = np.where(inode_xyz[:, 0] == NULL)[0][0]
inode_xyz = inode_xyz[:inodes, :]
inode_range = np.arange(inodes - 1)
n = nodes_temp.shape[0]
for ii in range(inodes):
self.node_mem2glob[(k, ii)] = n + ii
elem_n1 = np.append(elem_n1, n + inode_range)
elem_n2 = np.append(elem_n2, n + inode_range + 1)
nodes_temp = np.append(nodes_temp, inode_xyz, axis=0)
# Reveal connectivity by using mapping to unique node positions
nodes, idx, inv = np.unique(nodes_temp.round(8), axis=0, return_index=True, return_inverse=True)
nnode = nodes.shape[0]
outputs["platform_nodes"] = NULL * np.ones((NNODES_MAX, 3))
outputs["platform_nodes"][:nnode, :] = nodes
outputs["platform_centroid"] = nodes.mean(axis=0)
# Use mapping to set references to node joints
nelem = elem_n1.size
outputs["platform_elem_n1"] = NULL * np.ones(NELEM_MAX, dtype=np.int_)
outputs["platform_elem_n2"] = NULL * np.ones(NELEM_MAX, dtype=np.int_)
outputs["platform_elem_n1"][:nelem] = inv[elem_n1]
outputs["platform_elem_n2"][:nelem] = inv[elem_n2]
# Update global 2 member mappings
for k in self.node_mem2glob.keys():
self.node_mem2glob[k] = inv[self.node_mem2glob[k]]
def set_node_props(self, inputs, outputs):
# Load in number of members
opt = self.options["options"]
n_member = opt["floating"]["members"]["n_members"]
# Number of valid nodes
node_platform = outputs["platform_nodes"]
nnode = np.where(node_platform[:, 0] == NULL)[0][0]
node_platform = node_platform[:nnode, :]
# Find greatest radius of all members at node intersections
Rnode = np.zeros(nnode)
for k in range(n_member):
irnode = inputs[f"member{k}:nodes_r"]
n = np.where(irnode == NULL)[0][0]
for ii in range(n):
iglob = self.node_mem2glob[(k, ii)]
Rnode[iglob] = np.array([Rnode[iglob], irnode[ii]]).max()
# Find forces on nodes
Fnode = np.zeros((nnode, 3))
for k in range(n_member):
icb = int(inputs[f"member{k}:idx_cb"])
iglob = self.node_mem2glob[(k, icb)]
Fnode[iglob, 2] += inputs[f"member{k}:buoyancy_force"]
# Get transition piece inertial properties
itrans_platform = util.closest_node(node_platform, inputs["transition_node"])
m_trans = float(inputs["transition_piece_mass"])
r_trans = Rnode[itrans_platform]
I_trans = m_trans * r_trans ** 2.0 * np.r_[0.5, 0.5, 1.0, np.zeros(3)]
outputs["transition_piece_I"] = I_trans
# Store outputs
outputs["platform_Rnode"] = NULL * np.ones(NNODES_MAX)
outputs["platform_Rnode"][:nnode] = Rnode
outputs["platform_Fnode"] = NULL * np.ones((NNODES_MAX, 3))
outputs["platform_Fnode"][:nnode, :] = Fnode
def set_element_props(self, inputs, outputs, discrete_inputs, discrete_outputs):
# Load in number of members
opt = self.options["options"]
n_member = opt["floating"]["members"]["n_members"]
# Initialize running lists across all members
elem_D = np.array([])
elem_t = np.array([])
elem_A = np.array([])
elem_Asx = np.array([])
elem_Asy = np.array([])
elem_Ixx = np.array([])
elem_Iyy = np.array([])
elem_Izz = np.array([])
elem_rho = np.array([])
elem_E = np.array([])
elem_G = np.array([])
elem_sigy = np.array([])
elem_Px1 = np.array([])
elem_Px2 = np.array([])
elem_Py1 = np.array([])
elem_Py2 = np.array([])
elem_Pz1 = np.array([])
elem_Pz2 = np.array([])
elem_qdyn = np.array([])
elem_memid = np.array([], dtype=np.int_)
mass = 0.0
m_ball = 0.0
cost = 0.0
volume = 0.0
Awater = 0.0
Iwater = 0.0
m_added = np.zeros(6)
cg_plat = np.zeros(3)
cb_plat = np.zeros(3)
centroid = outputs["platform_centroid"][:2]
variable_capacity = np.zeros(n_member)
# Append all member data
for k in range(n_member):
n = np.where(inputs[f"member{k}:section_A"] == NULL)[0][0]
elem_D = np.append(elem_D, inputs[f"member{k}:section_D"][:n])
elem_t = np.append(elem_t, inputs[f"member{k}:section_t"][:n])
elem_A = np.append(elem_A, inputs[f"member{k}:section_A"][:n])
elem_Asx = np.append(elem_Asx, inputs[f"member{k}:section_Asx"][:n])
elem_Asy = np.append(elem_Asy, inputs[f"member{k}:section_Asy"][:n])
elem_Ixx = np.append(elem_Ixx, inputs[f"member{k}:section_Ixx"][:n])
elem_Iyy = np.append(elem_Iyy, inputs[f"member{k}:section_Iyy"][:n])
elem_Izz = np.append(elem_Izz, inputs[f"member{k}:section_Izz"][:n])
elem_rho = np.append(elem_rho, inputs[f"member{k}:section_rho"][:n])
elem_E = np.append(elem_E, inputs[f"member{k}:section_E"][:n])
elem_G = np.append(elem_G, inputs[f"member{k}:section_G"][:n])
elem_sigy = np.append(elem_sigy, inputs[f"member{k}:section_sigma_y"][:n])
elem_qdyn = np.append(elem_qdyn, inputs[f"member{k}:qdyn"][:n])
elem_memid = np.append(elem_memid, k * np.ones(n, dtype=np.int_))
# The loads should come in with length n+1
elem_Px1 = np.append(elem_Px1, inputs[f"member{k}:Px"][:n])
elem_Px2 = np.append(elem_Px2, inputs[f"member{k}:Px"][1 : (n + 1)])
elem_Py1 = np.append(elem_Py1, inputs[f"member{k}:Py"][:n])
elem_Py2 = np.append(elem_Py2, inputs[f"member{k}:Py"][1 : (n + 1)])
elem_Pz1 = np.append(elem_Pz1, inputs[f"member{k}:Pz"][:n])
elem_Pz2 = np.append(elem_Pz2, inputs[f"member{k}:Pz"][1 : (n + 1)])
# Mass, volume, cost tallies
imass = inputs[f"member{k}:total_mass"]
ivol = inputs[f"member{k}:displacement"]
mass += imass
volume += ivol
cost += inputs[f"member{k}:total_cost"]
m_ball += inputs[f"member{k}:ballast_mass"]
Awater_k = inputs[f"member{k}:Awater"]
Awater += Awater_k
Rwater2 = np.sum((inputs[f"member{k}:waterline_centroid"] - centroid) ** 2)
Iwater += inputs[f"member{k}:Iwater"] + Awater_k * Rwater2
m_added += inputs[f"member{k}:added_mass"]
variable_capacity[k] = inputs[f"member{k}:variable_ballast_capacity"]
# Center of mass / buoyancy tallies
cg_plat += imass * inputs[f"member{k}:center_of_mass"]
cb_plat += ivol * inputs[f"member{k}:center_of_buoyancy"]
# Add transition piece
m_trans = inputs["transition_piece_mass"]
cg_trans = inputs["transition_node"]
I_trans = util.assembleI(outputs["transition_piece_I"])
mass += m_trans
cost += inputs["transition_piece_cost"]
cg_plat += m_trans * cg_trans
# Finalize outputs
cg_plat /= mass
cb_plat /= volume
# With CG known, loop back through to compute platform I
unit_z = np.array([0.0, 0.0, 1.0])
I_hull = np.zeros((3, 3))
for k in range(n_member):
xyz_k = inputs[f"member{k}:nodes_xyz"]
inodes = np.where(xyz_k[:, 0] == NULL)[0][0]
xyz_k = xyz_k[:inodes, :]
imass = inputs[f"member{k}:total_mass"]
cg_k = inputs[f"member{k}:center_of_mass"]
R = cg_plat - cg_k
# Figure out angle to make member parallel to global c.s.
vec_k = xyz_k[-1, :] - xyz_k[0, :]
T = util.rotate_align_vectors(vec_k, unit_z)
# Rotate member inertia tensor
I_k = util.assembleI(inputs[f"member{k}:I_total"])
I_k_rot = T @ I_k @ T.T
# Now do parallel axis theorem
I_hull += np.array(I_k_rot) + imass * (np.dot(R, R) * np.eye(3) - np.outer(R, R))
# Add in transition piece
R = cg_plat - cg_trans
I_hull += I_trans + m_trans * (np.dot(R, R) * np.eye(3) - np.outer(R, R))
# Store outputs
nelem = elem_A.size
outputs["platform_elem_D"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_t"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_A"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_Asx"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_Asy"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_Ixx"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_Iyy"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_Izz"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_rho"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_E"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_G"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_sigma_y"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_Px1"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_Px2"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_Py1"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_Py2"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_Pz1"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_Pz2"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_qdyn"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_D"][:nelem] = elem_D
outputs["platform_elem_t"][:nelem] = elem_t
outputs["platform_elem_A"][:nelem] = elem_A
outputs["platform_elem_Asx"][:nelem] = elem_Asx
outputs["platform_elem_Asy"][:nelem] = elem_Asy
outputs["platform_elem_Ixx"][:nelem] = elem_Ixx
outputs["platform_elem_Iyy"][:nelem] = elem_Iyy
outputs["platform_elem_Izz"][:nelem] = elem_Izz
outputs["platform_elem_rho"][:nelem] = elem_rho
outputs["platform_elem_E"][:nelem] = elem_E
outputs["platform_elem_G"][:nelem] = elem_G
outputs["platform_elem_sigma_y"][:nelem] = elem_sigy
outputs["platform_elem_Px1"][:nelem] = elem_Px1
outputs["platform_elem_Px2"][:nelem] = elem_Px2
outputs["platform_elem_Py1"][:nelem] = elem_Py1
outputs["platform_elem_Py2"][:nelem] = elem_Py2
outputs["platform_elem_Pz1"][:nelem] = elem_Pz1
outputs["platform_elem_Pz2"][:nelem] = elem_Pz2
outputs["platform_elem_qdyn"][:nelem] = elem_qdyn
discrete_outputs["platform_elem_memid"] = elem_memid
outputs["platform_mass"] = mass
outputs["platform_ballast_mass"] = m_ball
outputs["platform_hull_mass"] = mass - m_ball
outputs["platform_cost"] = cost
outputs["platform_displacement"] = volume
outputs["platform_hull_center_of_mass"] = cg_plat
outputs["platform_center_of_buoyancy"] = cb_plat
outputs["platform_I_hull"] = util.unassembleI(I_hull)
outputs["platform_Awater"] = Awater
outputs["platform_Iwater"] = Iwater
outputs["platform_added_mass"] = m_added
outputs["platform_variable_capacity"] = variable_capacity
class TowerPreMember(om.ExplicitComponent):
def setup(self):
self.add_input("transition_node", np.zeros(3), units="m")
self.add_input("tower_height", 0.0, units="m")
self.add_output("tower_top_node", np.zeros(3), units="m")
def compute(self, inputs, outputs):
transition_node = inputs["transition_node"]
tower_top_node = 0 # previous code altered the original definition of transition_node
tower_top_node += transition_node
tower_top_node[2] += float(inputs["tower_height"])
outputs["tower_top_node"] = tower_top_node
class PlatformTowerFrame(om.ExplicitComponent):
def initialize(self):
self.options.declare("options")
def setup(self):
opt = self.options["options"]
n_member = opt["floating"]["members"]["n_members"]
n_attach = opt["mooring"]["n_attach"]
self.add_input("platform_nodes", NULL * np.ones((NNODES_MAX, 3)), units="m")
self.add_input("platform_Fnode", NULL * np.ones((NNODES_MAX, 3)), units="N")
self.add_input("platform_Rnode", NULL * np.ones(NNODES_MAX), units="m")
self.add_input("platform_elem_n1", NULL * np.ones(NELEM_MAX, dtype=np.int_))
self.add_input("platform_elem_n2", NULL * np.ones(NELEM_MAX, dtype=np.int_))
self.add_input("platform_elem_D", NULL * np.ones(NELEM_MAX), units="m")
self.add_input("platform_elem_t", NULL * np.ones(NELEM_MAX), units="m")
self.add_input("platform_elem_A", NULL * np.ones(NELEM_MAX), units="m**2")
self.add_input("platform_elem_Asx", NULL * np.ones(NELEM_MAX), units="m**2")
self.add_input("platform_elem_Asy", NULL * np.ones(NELEM_MAX), units="m**2")
self.add_input("platform_elem_Ixx", NULL * np.ones(NELEM_MAX), units="kg*m**2")
self.add_input("platform_elem_Iyy", NULL * np.ones(NELEM_MAX), units="kg*m**2")
self.add_input("platform_elem_Izz", NULL * np.ones(NELEM_MAX), units="kg*m**2")
self.add_input("platform_elem_rho", NULL * np.ones(NELEM_MAX), units="kg/m**3")
self.add_input("platform_elem_E", NULL * np.ones(NELEM_MAX), units="Pa")
self.add_input("platform_elem_G", NULL * np.ones(NELEM_MAX), units="Pa")
self.add_input("platform_elem_sigma_y", NULL * np.ones(NELEM_MAX), units="Pa")
self.add_input("platform_elem_Px1", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_input("platform_elem_Px2", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_input("platform_elem_Py1", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_input("platform_elem_Py2", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_input("platform_elem_Pz1", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_input("platform_elem_Pz2", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_input("platform_elem_qdyn", NULL * np.ones(NELEM_MAX), units="Pa")
self.add_input("platform_hull_center_of_mass", np.zeros(3), units="m")
self.add_input("platform_mass", 0.0, units="kg")
self.add_input("platform_I_hull", np.zeros(6), units="kg*m**2")
self.add_input("platform_displacement", 0.0, units="m**3")
self.add_input("tower_nodes", NULL * np.ones((MEMMAX, 3)), units="m")
self.add_output("tower_Fnode", copy_shape="tower_nodes", units="N")
self.add_input("tower_Rnode", NULL * np.ones(MEMMAX), units="m")
self.add_output("tower_elem_n1", copy_shape="tower_elem_A")
self.add_output("tower_elem_n2", copy_shape="tower_elem_A")
self.add_output("tower_elem_L", copy_shape="tower_elem_A", units="m")
self.add_input("tower_elem_D", NULL * np.ones(MEMMAX), units="m")
self.add_input("tower_elem_t", NULL * np.ones(MEMMAX), units="m")
self.add_input("tower_elem_A", NULL * np.ones(MEMMAX), units="m**2")
self.add_input("tower_elem_Asx", NULL * np.ones(MEMMAX), units="m**2")
self.add_input("tower_elem_Asy", NULL * np.ones(MEMMAX), units="m**2")
self.add_input("tower_elem_Ixx", NULL * np.ones(MEMMAX), units="kg*m**2")
self.add_input("tower_elem_Iyy", NULL * np.ones(MEMMAX), units="kg*m**2")
self.add_input("tower_elem_Izz", NULL * np.ones(MEMMAX), units="kg*m**2")
self.add_input("tower_elem_rho", NULL * np.ones(MEMMAX), units="kg/m**3")
self.add_input("tower_elem_E", NULL * np.ones(MEMMAX), units="Pa")
self.add_input("tower_elem_G", NULL * np.ones(MEMMAX), units="Pa")
self.add_input("tower_elem_sigma_y", NULL * np.ones(MEMMAX), units="Pa")
self.add_input("tower_elem_Px", NULL * np.ones(MEMMAX), units="N/m")
self.add_output("tower_elem_Px1", NULL * np.ones(MEMMAX), units="N/m")
self.add_output("tower_elem_Px2", NULL * np.ones(MEMMAX), units="N/m")
self.add_input("tower_elem_Py", NULL * np.ones(MEMMAX), units="N/m")
self.add_output("tower_elem_Py1", NULL * np.ones(MEMMAX), units="N/m")
self.add_output("tower_elem_Py2", NULL * np.ones(MEMMAX), units="N/m")
self.add_input("tower_elem_Pz", NULL * np.ones(MEMMAX), units="N/m")
self.add_output("tower_elem_Pz1", NULL * np.ones(MEMMAX), units="N/m")
self.add_output("tower_elem_Pz2", NULL * np.ones(MEMMAX), units="N/m")
self.add_input("tower_elem_qdyn", NULL * np.ones(MEMMAX), units="Pa")
self.add_input("tower_center_of_mass", np.zeros(3), units="m")
self.add_input("tower_mass", 0.0, units="kg")
self.add_input("rho_water", 0.0, units="kg/m**3")
self.add_input("tower_top_node", np.zeros(3), units="m")
self.add_input("transition_node", np.zeros(3), units="m")
self.add_input("rna_mass", 0.0, units="kg")
self.add_input("rna_cg", np.zeros(3), units="m")
self.add_input("mooring_neutral_load", np.zeros((n_attach, 3)), units="N")
self.add_input("platform_variable_capacity", np.zeros(n_member), units="m**3")
for k in range(n_member):
self.add_input(f"member{k}:nodes_xyz", NULL * np.ones((MEMMAX, 3)), units="m")
self.add_input(f"member{k}:variable_ballast_Vpts", val=np.zeros(10), units="m**3")
self.add_input(f"member{k}:variable_ballast_spts", val=np.zeros(10))
self.add_output("system_nodes", NULL * np.ones((NNODES_MAX, 3)), units="m")
self.add_output("system_Fnode", NULL * np.ones((NNODES_MAX, 3)), units="N")
self.add_output("system_Rnode", NULL * np.ones(NNODES_MAX), units="m")
self.add_output("system_elem_n1", NULL * np.ones(NELEM_MAX, dtype=np.int_))
self.add_output("system_elem_n2", NULL * np.ones(NELEM_MAX, dtype=np.int_))
self.add_output("system_elem_L", NULL * np.ones(NELEM_MAX), units="m")
self.add_output("system_elem_D", NULL * np.ones(NELEM_MAX), units="m")
self.add_output("system_elem_t", NULL * np.ones(NELEM_MAX), units="m")
self.add_output("system_elem_A", NULL * np.ones(NELEM_MAX), units="m**2")
self.add_output("system_elem_Asx", NULL * np.ones(NELEM_MAX), units="m**2")
self.add_output("system_elem_Asy", NULL * np.ones(NELEM_MAX), units="m**2")
self.add_output("system_elem_Ixx", NULL * np.ones(NELEM_MAX), units="kg*m**2")
self.add_output("system_elem_Iyy", NULL * np.ones(NELEM_MAX), units="kg*m**2")
self.add_output("system_elem_Izz", NULL * np.ones(NELEM_MAX), units="kg*m**2")
self.add_output("system_elem_rho", NULL * np.ones(NELEM_MAX), units="kg/m**3")
self.add_output("system_elem_E", NULL * np.ones(NELEM_MAX), units="Pa")
self.add_output("system_elem_G", NULL * np.ones(NELEM_MAX), units="Pa")
self.add_output("system_elem_sigma_y", NULL * np.ones(NELEM_MAX), units="Pa")
self.add_output("system_elem_Px1", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_output("system_elem_Px2", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_output("system_elem_Py1", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_output("system_elem_Py2", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_output("system_elem_Pz1", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_output("system_elem_Pz2", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_output("system_elem_qdyn", NULL * np.ones(NELEM_MAX), units="Pa")
self.add_output("system_structural_center_of_mass", np.zeros(3), units="m")
self.add_output("system_structural_mass", 0.0, units="kg")
self.add_output("system_center_of_mass", np.zeros(3), units="m")
self.add_output("system_mass", 0.0, units="kg")
self.add_output("variable_ballast_mass", 0.0, units="kg")
self.add_output("variable_center_of_mass", val=np.zeros(3), units="m")
self.add_output("constr_variable_margin", val=0.0)
self.add_output("member_variable_volume", val=np.zeros(n_member), units="m**3")
self.add_output("member_variable_height", val=np.zeros(n_member))
self.add_output("platform_total_center_of_mass", np.zeros(3), units="m")
self.add_output("platform_I_total", np.zeros(6), units="kg*m**2")
def compute(self, inputs, outputs):
# Combine nodes
node_platform = inputs["platform_nodes"]
node_tower = inputs["tower_nodes"]
nnode_platform = np.where(node_platform[:, 0] == NULL)[0][0]
nnode_tower = np.where(node_tower[:, 0] == NULL)[0][0]
nnode_system = nnode_platform + np.maximum(1, nnode_tower) - 1
nelem_platform = np.where(inputs["platform_elem_A"] == NULL)[0][0]
nelem_tower = np.where(inputs["tower_elem_A"] == NULL)[0][0]
nelem_system = nelem_platform + nelem_tower
# Combine elements indices and have tower base node point to platform transition node
outputs["tower_Fnode"] = np.zeros(node_tower.shape)
outputs["tower_elem_n1"] = NULL * np.ones(MEMMAX, dtype=np.int_)
outputs["tower_elem_n2"] = NULL * np.ones(MEMMAX, dtype=np.int_)
outputs["tower_elem_L"] = NULL * np.ones(MEMMAX)
tower_n1 = np.arange(nelem_tower, dtype=np.int_)
tower_n2 = np.arange(nelem_tower, dtype=np.int_) + 1
outputs["tower_elem_n1"][:nelem_tower] = idx1 = tower_n1.copy()
outputs["tower_elem_n2"][:nelem_tower] = idx2 = tower_n2.copy()
itrans_platform = util.closest_node(node_platform[:nnode_platform, :], inputs["transition_node"])
tower_n1 += nnode_platform - 1
tower_n2 += nnode_platform - 1
tower_n1[0] = itrans_platform
outputs["tower_elem_L"][:nelem_tower] = np.sqrt(
np.sum((node_tower[idx2, :] - node_tower[idx1, :]) ** 2, axis=1)
)
# Store all outputs
outputs["system_nodes"] = NULL * np.ones((NNODES_MAX, 3))
outputs["system_Fnode"] = NULL * np.ones((NNODES_MAX, 3))
outputs["system_Rnode"] = NULL * np.ones(NNODES_MAX)
outputs["system_elem_n1"] = NULL * np.ones(NELEM_MAX, dtype=np.int_)
outputs["system_elem_n2"] = NULL * np.ones(NELEM_MAX, dtype=np.int_)
outputs["system_elem_L"] = NULL * np.ones(NELEM_MAX)
outputs["system_nodes"][:nnode_system, :] = sysnode = np.vstack(
(node_platform[:nnode_platform, :], node_tower[1:nnode_tower, :])
)
outputs["system_Fnode"][:nnode_system, :] = np.vstack(
(inputs["platform_Fnode"][:nnode_platform, :], outputs["tower_Fnode"][1:nnode_tower, :])
)
outputs["system_Rnode"][:nnode_system] = np.r_[
inputs["platform_Rnode"][:nnode_platform], inputs["tower_Rnode"][1:nnode_tower]
]
outputs["system_elem_n1"][:nelem_system] = idx1 = np.r_[
inputs["platform_elem_n1"][:nelem_platform],
tower_n1,
]
outputs["system_elem_n2"][:nelem_system] = idx2 = np.r_[
inputs["platform_elem_n2"][:nelem_platform],
tower_n2,
]
outputs["system_elem_L"][:nelem_system] = np.sqrt(
np.sum((sysnode[np.int_(idx2), :] - sysnode[np.int_(idx1), :]) ** 2, axis=1)
)
for var in [
"elem_D",
"elem_t",
"elem_A",
"elem_Asx",
"elem_Asy",
"elem_Ixx",
"elem_Iyy",
"elem_Izz",
"elem_rho",
"elem_E",
"elem_G",
"elem_sigma_y",
"elem_qdyn",
]:
outputs["system_" + var] = NULL * np.ones(NELEM_MAX)
outputs["system_" + var][:nelem_system] = np.r_[
inputs["platform_" + var][:nelem_platform], inputs["tower_" + var][:nelem_tower]
]
# Have to divide up tower member loads to beginning and end points
for var in ["elem_Px1", "elem_Py1", "elem_Pz1", "elem_Px2", "elem_Py2", "elem_Pz2"]:
outputs["system_" + var] = NULL * np.ones(NELEM_MAX)
outputs["tower_" + var] = NULL * np.ones(MEMMAX)
tower_P = inputs["tower_" + var[:-1]]
outputs["tower_" + var][:nelem_tower] = (
tower_P[:nelem_tower] if var[-1] == "1" else tower_P[1 : (nelem_tower + 1)]
)
outputs["system_" + var][:nelem_system] = np.r_[
inputs["platform_" + var][:nelem_platform], outputs["tower_" + var][:nelem_tower]
]
# Mass summaries
m_platform = inputs["platform_mass"]
cg_platform = inputs["platform_hull_center_of_mass"]
I_platform = util.assembleI(inputs["platform_I_hull"])
m_tower = inputs["tower_mass"]
m_rna = inputs["rna_mass"]
m_sys = m_platform + m_tower + m_rna
outputs["system_structural_mass"] = m_sys
outputs["system_structural_center_of_mass"] = (
m_platform * cg_platform
+ m_tower * inputs["tower_center_of_mass"]
+ m_rna * (inputs["rna_cg"] + inputs["tower_top_node"])
) / m_sys
# Balance out variable ballast
mooringFz = inputs["mooring_neutral_load"][:, 2].sum()
capacity = inputs["platform_variable_capacity"]
capacity_sum = capacity.sum() + EPS # Avoid divide by zeros
rho_water = inputs["rho_water"]
m_variable = inputs["platform_displacement"] * rho_water - m_sys + mooringFz / gravity
V_variable = m_variable / rho_water
outputs["variable_ballast_mass"] = m_variable
outputs["constr_variable_margin"] = V_variable / capacity_sum
V_variable_member = V_variable * capacity / capacity_sum
outputs["member_variable_volume"] = V_variable_member
m_variable_member = V_variable_member * rho_water
# Now find the CG of the variable mass assigned to each member
n_member = capacity.size
outputs["member_variable_height"] = np.zeros(n_member)
cg_variable_member = np.zeros((n_member, 3))
for k in range(n_member):
if V_variable_member[k] == 0.0:
continue
xyz = inputs[f"member{k}:nodes_xyz"]
inodes = np.where(xyz[:, 0] == NULL)[0][0]
xyz = xyz[:inodes, :]
dxyz = xyz[-1, :] - xyz[0, :]
spts = inputs[f"member{k}:variable_ballast_spts"]
Vpts = inputs[f"member{k}:variable_ballast_Vpts"]
s_cg = np.interp(0.5 * V_variable_member[k], Vpts, spts)
cg_variable_member[k, :] = xyz[0, :] + s_cg * dxyz
s_end = np.interp(V_variable_member[k], Vpts, spts)
outputs["member_variable_height"][k] = s_end - spts[0]
cg_variable = np.dot(V_variable_member, cg_variable_member) / V_variable
outputs["variable_center_of_mass"] = cg_variable
# Now find total system mass
outputs["system_mass"] = m_sys + m_variable
outputs["system_center_of_mass"] = (
m_sys * outputs["system_structural_center_of_mass"] + m_variable * cg_variable
) / (m_sys + m_variable)
# Compute the total cg for the platform and the variable ballast together using a weighted sum approach
cg_plat_total = (m_variable * cg_variable + m_platform * cg_platform) / (m_variable + m_platform)
outputs["platform_total_center_of_mass"] = cg_plat_total
# Now loop again to compute variable I
unit_z = np.array([0.0, 0.0, 1.0])
I_variable = np.zeros((3, 3))
for k in range(n_member):
if V_variable_member[k] == 0.0:
continue
xyz = inputs[f"member{k}:nodes_xyz"]
inodes = np.where(xyz[:, 0] == NULL)[0][0]
xyz = xyz[:inodes, :]
vec_k = xyz[-1, :] - xyz[0, :]
ds = outputs["member_variable_height"][k]
# Compute I aligned with member
h_k = ds * np.sqrt(np.sum(vec_k ** 2))
if h_k == 0.0:
continue
r_k = np.sqrt(V_variable_member[k] / h_k / np.pi)
I_k = (
m_variable_member[k] * np.r_[(3 * r_k ** 2 + h_k ** 2) / 12.0 * np.ones(2), 0.5 * r_k ** 2, np.ones(3)]
)
# Rotate I to global c.s.
T = util.rotate_align_vectors(vec_k, unit_z)
I_k_rot = T @ util.assembleI(I_k) @ T.T
# Now do parallel axis theorem
R = cg_variable - cg_variable_member[k, :]
I_variable += np.array(I_k_rot) + m_variable_member[k] * (np.dot(R, R) * np.eye(3) - np.outer(R, R))
# Find platform I with variable contribution
I_total = np.zeros((3, 3))
# Compute the full moment of inertia for the platform and variable ballast
R = cg_plat_total - cg_platform
I_total += I_platform + m_platform * (np.dot(R, R) * np.eye(3) - np.outer(R, R))
R = cg_plat_total - cg_variable
I_total += I_variable + m_variable * (np.dot(R, R) * np.eye(3) - np.outer(R, R))
outputs["platform_I_total"] = util.unassembleI(I_total)
class FrameAnalysis(om.ExplicitComponent):
def initialize(self):
self.options.declare("options")
def setup(self):
opt = self.options["options"]
n_attach = opt["mooring"]["n_attach"]
self.add_input("platform_mass", 0.0, units="kg")
self.add_input("platform_hull_center_of_mass", np.zeros(3), units="m")
self.add_input("platform_added_mass", np.zeros(6), units="kg")
self.add_input("platform_center_of_buoyancy", np.zeros(3), units="m")
self.add_input("platform_displacement", 0.0, units="m**3")
self.add_input("tower_nodes", NULL * np.ones((MEMMAX, 3)), units="m")
self.add_input("tower_Fnode", NULL * np.ones((MEMMAX, 3)), units="N")
self.add_input("tower_Rnode", NULL * np.ones(MEMMAX), units="m")
self.add_input("tower_elem_n1", NULL * np.ones(MEMMAX))
self.add_input("tower_elem_n2", NULL * np.ones(MEMMAX))
self.add_input("tower_elem_D", NULL * np.ones(MEMMAX), units="m")
self.add_input("tower_elem_t", NULL * np.ones(MEMMAX), units="m")
self.add_input("tower_elem_L", NULL * np.ones(MEMMAX), units="m")
self.add_input("tower_elem_A", NULL * np.ones(MEMMAX), units="m**2")
self.add_input("tower_elem_Asx", NULL * np.ones(MEMMAX), units="m**2")
self.add_input("tower_elem_Asy", NULL * np.ones(MEMMAX), units="m**2")
self.add_input("tower_elem_Ixx", NULL * np.ones(MEMMAX), units="kg*m**2")
self.add_input("tower_elem_Iyy", NULL * np.ones(MEMMAX), units="kg*m**2")
self.add_input("tower_elem_Izz", NULL * np.ones(MEMMAX), units="kg*m**2")
self.add_input("tower_elem_rho", NULL * np.ones(MEMMAX), units="kg/m**3")
self.add_input("tower_elem_E", NULL * np.ones(MEMMAX), units="Pa")
self.add_input("tower_elem_G", NULL * np.ones(MEMMAX), units="Pa")
self.add_input("tower_elem_Px1", NULL * np.ones(MEMMAX), units="N/m")
self.add_input("tower_elem_Px2", NULL * np.ones(MEMMAX), units="N/m")
self.add_input("tower_elem_Py1", NULL * np.ones(MEMMAX), units="N/m")
self.add_input("tower_elem_Py2", NULL * np.ones(MEMMAX), units="N/m")
self.add_input("tower_elem_Pz1", NULL * np.ones(MEMMAX), units="N/m")
self.add_input("tower_elem_Pz2", NULL * np.ones(MEMMAX), units="N/m")
self.add_input("system_nodes", NULL * np.ones((NNODES_MAX, 3)), units="m")
self.add_input("system_Fnode", NULL * np.ones((NNODES_MAX, 3)), units="N")
self.add_input("system_Rnode", NULL * np.ones(NNODES_MAX), units="m")
self.add_input("system_elem_n1", NULL * np.ones(NELEM_MAX, dtype=np.int_))
self.add_input("system_elem_n2", NULL * np.ones(NELEM_MAX, dtype=np.int_))
self.add_input("system_elem_D", NULL * np.ones(NELEM_MAX), units="m")
self.add_input("system_elem_t", NULL * np.ones(NELEM_MAX), units="m")
self.add_input("system_elem_L", NULL * np.ones(NELEM_MAX), units="m")
self.add_input("system_elem_A", NULL * np.ones(NELEM_MAX), units="m**2")
self.add_input("system_elem_Asx", NULL * np.ones(NELEM_MAX), units="m**2")
self.add_input("system_elem_Asy", NULL * np.ones(NELEM_MAX), units="m**2")
self.add_input("system_elem_Ixx", NULL * np.ones(NELEM_MAX), units="kg*m**2")
self.add_input("system_elem_Iyy", NULL * np.ones(NELEM_MAX), units="kg*m**2")
self.add_input("system_elem_Izz", NULL * np.ones(NELEM_MAX), units="kg*m**2")
self.add_input("system_elem_rho", NULL * np.ones(NELEM_MAX), units="kg/m**3")
self.add_input("system_elem_E", NULL * np.ones(NELEM_MAX), units="Pa")
self.add_input("system_elem_G", NULL * np.ones(NELEM_MAX), units="Pa")
self.add_input("system_elem_Px1", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_input("system_elem_Px2", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_input("system_elem_Py1", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_input("system_elem_Py2", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_input("system_elem_Pz1", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_input("system_elem_Pz2", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_input("transition_node", np.zeros(3), units="m")
self.add_input("transition_piece_mass", 0.0, units="kg")
self.add_input("transition_piece_I", np.zeros(6), units="kg*m**2")
self.add_input("rna_F", np.zeros(3), units="N")
self.add_input("rna_M", np.zeros(3), units="N*m")
self.add_input("mooring_neutral_load", np.zeros((n_attach, 3)), units="N")
self.add_input("mooring_fairlead_joints", np.zeros((n_attach, 3)), units="m")
self.add_input("mooring_stiffness", np.zeros((6, 6)), units="N/m")
self.add_input("variable_ballast_mass", 0.0, units="kg")
self.add_input("variable_center_of_mass", val=np.zeros(3), units="m")
NFREQ2 = int(NFREQ / 2)
self.add_output("tower_freqs", val=np.zeros(NFREQ), units="Hz")
self.add_output("tower_fore_aft_modes", val=np.zeros((NFREQ2, 5)))
self.add_output("tower_side_side_modes", val=np.zeros((NFREQ2, 5)))
self.add_output("tower_torsion_modes", val=np.zeros((NFREQ2, 5)))
self.add_output("tower_fore_aft_freqs", val=np.zeros(NFREQ2))
self.add_output("tower_side_side_freqs", val=np.zeros(NFREQ2))
self.add_output("tower_torsion_freqs", val=np.zeros(NFREQ2))
self.add_output("system_base_F", np.zeros(3), units="N")
self.add_output("system_base_M", np.zeros(3), units="N*m")
self.add_output("system_Fz", NULL * np.ones(NELEM_MAX), units="N")
self.add_output("system_Vx", NULL * np.ones(NELEM_MAX), units="N")
self.add_output("system_Vy", NULL * np.ones(NELEM_MAX), units="N")
self.add_output("system_Mxx", NULL * np.ones(NELEM_MAX), units="N*m")
self.add_output("system_Myy", NULL * np.ones(NELEM_MAX), units="N*m")
self.add_output("system_Mzz", NULL * np.ones(NELEM_MAX), units="N*m")
self.add_output("tower_base_F", np.zeros(3), units="N")
self.add_output("tower_base_M", np.zeros(3), units="N*m")
self.add_output("tower_Fz", NULL * np.ones(NELEM_MAX), units="N")
self.add_output("tower_Vx", NULL * np.ones(NELEM_MAX), units="N")
self.add_output("tower_Vy", NULL * np.ones(NELEM_MAX), units="N")
self.add_output("tower_Mxx", NULL * np.ones(NELEM_MAX), units="N*m")
self.add_output("tower_Myy", NULL * np.ones(NELEM_MAX), units="N*m")
self.add_output("tower_Mzz", NULL * np.ones(NELEM_MAX), units="N*m")
def compute(self, inputs, outputs):
# Unpack variables
opt = self.options["options"]
n_attach = opt["mooring"]["n_attach"]
cb = inputs["platform_center_of_buoyancy"]
V_total = inputs["platform_displacement"]
I_trans = inputs["transition_piece_I"]
m_variable = float(inputs["variable_ballast_mass"])
cg_variable = inputs["variable_center_of_mass"]
fairlead_joints = inputs["mooring_fairlead_joints"]
mooringF = inputs["mooring_neutral_load"]
mooringK = np.abs(np.diag(inputs["mooring_stiffness"]))
# Create frame3dd instance: nodes, elements, reactions, and options
for frame in ["tower", "system"]:
nodes = inputs[f"{frame}_nodes"]
nnode = np.where(nodes[:, 0] == NULL)[0][0]
nodes = nodes[:nnode, :]
rnode = np.zeros(nnode) # inputs[f"{frame}_Rnode"][:nnode]
Fnode = inputs[f"{frame}_Fnode"][:nnode, :]
Mnode = np.zeros((nnode, 3))
ihub = np.argmax(nodes[:, 2]) - 1
itrans = util.closest_node(nodes, inputs["transition_node"])
N1 = np.int_(inputs[f"{frame}_elem_n1"])
nelem = np.where(N1 == NULL)[0][0]
N1 = N1[:nelem]
N2 = np.int_(inputs[f"{frame}_elem_n2"][:nelem])
A = inputs[f"{frame}_elem_A"][:nelem]
Asx = inputs[f"{frame}_elem_Asx"][:nelem]
Asy = inputs[f"{frame}_elem_Asy"][:nelem]
Ixx = inputs[f"{frame}_elem_Ixx"][:nelem]
Iyy = inputs[f"{frame}_elem_Iyy"][:nelem]
Izz = inputs[f"{frame}_elem_Izz"][:nelem]
rho = inputs[f"{frame}_elem_rho"][:nelem]
E = inputs[f"{frame}_elem_E"][:nelem]
G = inputs[f"{frame}_elem_G"][:nelem]
roll = np.zeros(nelem)
L = inputs[f"{frame}_elem_L"][:nelem] # np.sqrt(np.sum((nodes[N2, :] - nodes[N1, :]) ** 2, axis=1))
inodes = np.arange(nnode) + 1
node_obj = pyframe3dd.NodeData(inodes, nodes[:, 0], nodes[:, 1], nodes[:, 2], rnode)
ielem = np.arange(nelem) + 1
elem_obj = pyframe3dd.ElementData(ielem, N1 + 1, N2 + 1, A, Asx, Asy, Izz, Ixx, Iyy, E, G, roll, rho)
# Use Mooring stiffness (TODO Hydro_K too)
if frame == "tower":
rid = np.array([itrans]) # np.array([np.argmin(nodes[:, 2])])
else:
ind = []
for k in range(n_attach):
ind.append(util.closest_node(nodes, fairlead_joints[k, :]))
rid = np.array([ind]) # np.array([np.argmin(nodes[:, 2])])
Rx = Ry = Rz = Rxx = Ryy = Rzz = RIGID * np.ones(rid.size)
# Rx, Ry, Rz = [mooringK[0]], [mooringK[1]], [mooringK[2]]
# Only this solution works and there isn't much different with fully rigid
# Rx, Ry, Rz = [RIGID], [RIGID], [mooringK[2]]
# Rxx, Ryy, Rzz = [RIGID], [RIGID], [RIGID]
react_obj = pyframe3dd.ReactionData(rid + 1, Rx, Ry, Rz, Rxx, Ryy, Rzz, rigid=RIGID)
frame3dd_opt = opt["WISDEM"]["FloatingSE"]["frame3dd"]
opt_obj = pyframe3dd.Options(frame3dd_opt["shear"], frame3dd_opt["geom"], -1.0)
myframe = pyframe3dd.Frame(node_obj, react_obj, elem_obj, opt_obj)
# Added mass
m_trans = float(inputs["transition_piece_mass"])
if frame == "tower":
m_trans += float(inputs["platform_mass"]) + inputs["platform_added_mass"][0] + m_variable
cg_trans = inputs["transition_node"] - inputs["platform_hull_center_of_mass"]
I_trans[:3] += inputs["platform_added_mass"][3:]
else:
m_trans += m_variable
cg_trans = np.zeros(3)
add_gravity = True
mID = np.array([itrans], dtype=np.int_).flatten()
m_add = np.array([m_trans]).flatten()
I_add = np.c_[I_trans]
cg_add = np.c_[cg_trans]
myframe.changeExtraNodeMass(
mID + 1,
m_add,
I_add[0, :],
I_add[1, :],
I_add[2, :],
I_add[3, :],
I_add[4, :],
I_add[5, :],
cg_add[0, :],
cg_add[1, :],
cg_add[2, :],
add_gravity,
)
# Dynamics
if frame == "tower" and frame3dd_opt["modal"]:
Mmethod = 1
lump = 0
shift = 0.0
myframe.enableDynamics(2 * NFREQ, Mmethod, lump, frame3dd_opt["tol"], shift)
# Initialize loading with gravity, mooring line forces, and buoyancy (already in nodal forces)
gx = gy = 0.0
gz = -gravity
load_obj = pyframe3dd.StaticLoadCase(gx, gy, gz)
if frame == "system":
for k in range(n_attach):
ind = util.closest_node(nodes, fairlead_joints[k, :])
Fnode[ind, :] += mooringF[k, :]
else:
# Combine all buoyancy forces into one
ind = util.closest_node(nodes, cb)
Fnode[ind, -1] += V_total * 1025 * gravity
Fnode[ihub, :] += inputs["rna_F"]
Mnode[ihub, :] += inputs["rna_M"]
nF = np.where(np.abs(Fnode).sum(axis=1) > 0.0)[0]
load_obj.changePointLoads(
nF + 1, Fnode[nF, 0], Fnode[nF, 1], Fnode[nF, 2], Mnode[nF, 0], Mnode[nF, 1], Mnode[nF, 2]
)
# trapezoidally distributed loads
xx1 = xy1 = xz1 = np.zeros(ielem.size)
xx2 = xy2 = xz2 = 0.99 * L # multiply slightly less than unity b.c. of precision
wx1 = inputs[f"{frame}_elem_Px1"][:nelem]
wx2 = inputs[f"{frame}_elem_Px2"][:nelem]
wy1 = inputs[f"{frame}_elem_Py1"][:nelem]
wy2 = inputs[f"{frame}_elem_Py2"][:nelem]
wz1 = inputs[f"{frame}_elem_Pz1"][:nelem]
wz2 = inputs[f"{frame}_elem_Pz2"][:nelem]
load_obj.changeTrapezoidalLoads(ielem, xx1, xx2, wx1, wx2, xy1, xy2, wy1, wy2, xz1, xz2, wz1, wz2)
# Add the load case and run
myframe.addLoadCase(load_obj)
# myframe.write(f"{frame}.3dd")
# myframe.draw()
displacements, forces, reactions, internalForces, mass, modal = myframe.run()
# natural frequncies
if frame == "tower" and frame3dd_opt["modal"]:
outputs[f"{frame}_freqs"] = modal.freq[:NFREQ]
# Get all mode shapes in batch
NFREQ2 = int(NFREQ / 2)
freq_x, freq_y, freq_z, mshapes_x, mshapes_y, mshapes_z = util.get_xyz_mode_shapes(
nodes[:, 2], modal.freq, modal.xdsp, modal.ydsp, modal.zdsp, modal.xmpf, modal.ympf, modal.zmpf
)
outputs[f"{frame}_fore_aft_freqs"] = freq_x[:NFREQ2]
outputs[f"{frame}_side_side_freqs"] = freq_y[:NFREQ2]
outputs[f"{frame}_torsion_freqs"] = freq_z[:NFREQ2]
outputs[f"{frame}_fore_aft_modes"] = mshapes_x[:NFREQ2, :]
outputs[f"{frame}_side_side_modes"] = mshapes_y[:NFREQ2, :]
outputs[f"{frame}_torsion_modes"] = mshapes_z[:NFREQ2, :]
# Determine reaction forces
outputs[f"{frame}_base_F"] = -np.array([reactions.Fx.sum(), reactions.Fy.sum(), reactions.Fz.sum()])
outputs[f"{frame}_base_M"] = -np.array([reactions.Mxx.sum(), reactions.Myy.sum(), reactions.Mzz.sum()])
# Forces and moments along the structure
ic = 0 # case number
outputs[f"{frame}_Fz"] = NULL * np.ones(NELEM_MAX)
outputs[f"{frame}_Vx"] = NULL * np.ones(NELEM_MAX)
outputs[f"{frame}_Vy"] = NULL * np.ones(NELEM_MAX)
outputs[f"{frame}_Mxx"] = NULL * np.ones(NELEM_MAX)
outputs[f"{frame}_Myy"] = NULL * np.ones(NELEM_MAX)
outputs[f"{frame}_Mzz"] = NULL * np.ones(NELEM_MAX)
outputs[f"{frame}_Fz"][:nelem] = forces.Nx[ic, 1::2]
outputs[f"{frame}_Vx"][:nelem] = -forces.Vz[ic, 1::2]
outputs[f"{frame}_Vy"][:nelem] = forces.Vy[ic, 1::2]
outputs[f"{frame}_Mxx"][:nelem] = -forces.Mzz[ic, 1::2]
outputs[f"{frame}_Myy"][:nelem] = forces.Myy[ic, 1::2]
outputs[f"{frame}_Mzz"][:nelem] = forces.Txx[ic, 1::2]
class FloatingPost(om.ExplicitComponent):
def initialize(self):
self.options.declare("options")
def setup(self):
self.add_input("tower_elem_L", NULL * np.ones(MEMMAX), units="m")
self.add_input("tower_elem_D", NULL * np.ones(MEMMAX), units="m")
self.add_input("tower_elem_t", NULL * np.ones(MEMMAX), units="m")
self.add_input("tower_elem_A", NULL * np.ones(MEMMAX), units="m**2")
self.add_input("tower_elem_Asx", NULL * np.ones(MEMMAX), units="m**2")
self.add_input("tower_elem_Asy", NULL * np.ones(MEMMAX), units="m**2")
self.add_input("tower_elem_Ixx", NULL * np.ones(MEMMAX), units="kg*m**2")
self.add_input("tower_elem_Iyy", NULL * np.ones(MEMMAX), units="kg*m**2")
self.add_input("tower_elem_Izz", NULL * np.ones(MEMMAX), units="kg*m**2")
self.add_input("tower_elem_E", NULL * np.ones(MEMMAX), units="Pa")
self.add_input("tower_elem_G", NULL * np.ones(MEMMAX), units="Pa")
self.add_input("tower_elem_sigma_y", NULL * np.ones(MEMMAX), units="Pa")
self.add_input("tower_elem_qdyn", NULL * np.ones(MEMMAX), units="Pa")
self.add_input("system_elem_L", NULL * np.ones(NELEM_MAX), units="m")
self.add_input("system_elem_D", NULL * np.ones(NELEM_MAX), units="m")
self.add_input("system_elem_t", NULL * np.ones(NELEM_MAX), units="m")
self.add_input("system_elem_A", NULL * np.ones(NELEM_MAX), units="m**2")
self.add_input("system_elem_Asx", NULL * np.ones(NELEM_MAX), units="m**2")
self.add_input("system_elem_Asy", NULL * np.ones(NELEM_MAX), units="m**2")
self.add_input("system_elem_Ixx", NULL * np.ones(NELEM_MAX), units="kg*m**2")
self.add_input("system_elem_Iyy", NULL * np.ones(NELEM_MAX), units="kg*m**2")
self.add_input("system_elem_Izz", NULL * np.ones(NELEM_MAX), units="kg*m**2")
self.add_input("system_elem_E", NULL * np.ones(NELEM_MAX), units="Pa")
self.add_input("system_elem_G", NULL * np.ones(NELEM_MAX), units="Pa")
self.add_input("system_elem_sigma_y", NULL * np.ones(NELEM_MAX), units="Pa")
self.add_input("system_elem_qdyn", NULL * np.ones(NELEM_MAX), units="Pa")
# Processed Frame3DD/OpenFAST outputs
self.add_input("system_Fz", NULL * np.ones(NELEM_MAX), units="N")
self.add_input("system_Vx", NULL * np.ones(NELEM_MAX), units="N")
self.add_input("system_Vy", NULL * np.ones(NELEM_MAX), units="N")
self.add_input("system_Mxx", NULL * np.ones(NELEM_MAX), units="N*m")
self.add_input("system_Myy", NULL * np.ones(NELEM_MAX), units="N*m")
self.add_input("system_Mzz", NULL * np.ones(NELEM_MAX), units="N*m")
self.add_input("tower_Fz", NULL * np.ones(NELEM_MAX), units="N")
self.add_input("tower_Vx", NULL * np.ones(NELEM_MAX), units="N")
self.add_input("tower_Vy", NULL * np.ones(NELEM_MAX), units="N")
self.add_input("tower_Mxx", NULL * np.ones(NELEM_MAX), units="N*m")
self.add_input("tower_Myy", NULL * np.ones(NELEM_MAX), units="N*m")
self.add_input("tower_Mzz", NULL * np.ones(NELEM_MAX), units="N*m")
self.add_output("constr_tower_stress", NULL * np.ones(NELEM_MAX))
self.add_output("constr_tower_shell_buckling", NULL * np.ones(NELEM_MAX))
self.add_output("constr_tower_global_buckling", NULL * np.ones(NELEM_MAX))
self.add_output("constr_system_stress", NULL * np.ones(NELEM_MAX))
self.add_output("constr_system_shell_buckling", NULL * np.ones(NELEM_MAX))
self.add_output("constr_system_global_buckling", NULL * np.ones(NELEM_MAX))
def compute(self, inputs, outputs):
# Loop over tower and system analysis
for frame in ["tower", "system"]:
# Unpack some variables
d = inputs[f"{frame}_elem_D"]
nelem = np.where(d == NULL)[0][0]
d = d[:nelem]
t = inputs[f"{frame}_elem_t"][:nelem]
h = inputs[f"{frame}_elem_L"][:nelem]
Az = inputs[f"{frame}_elem_A"][:nelem]
Asx = inputs[f"{frame}_elem_Asx"][:nelem]
Jz = inputs[f"{frame}_elem_Izz"][:nelem]
Iyy = inputs[f"{frame}_elem_Iyy"][:nelem]
sigy = inputs[f"{frame}_elem_sigma_y"][:nelem]
E = inputs[f"{frame}_elem_E"][:nelem]
G = inputs[f"{frame}_elem_G"][:nelem]
qdyn = inputs[f"{frame}_elem_qdyn"][:nelem]
r = 0.5 * d
gamma_f = self.options["options"]["gamma_f"]
gamma_m = self.options["options"]["gamma_m"]
gamma_n = self.options["options"]["gamma_n"]
gamma_b = self.options["options"]["gamma_b"]
# Get loads from Framee3dd/OpenFAST
Fz = inputs[f"{frame}_Fz"][:nelem]
Vx = inputs[f"{frame}_Vx"][:nelem]
Vy = inputs[f"{frame}_Vy"][:nelem]
Mxx = inputs[f"{frame}_Mxx"][:nelem]
Myy = inputs[f"{frame}_Myy"][:nelem]
Mzz = inputs[f"{frame}_Mzz"][:nelem]
M = np.sqrt(Mxx ** 2 + Myy ** 2)
V = np.sqrt(Vx ** 2 + Vy ** 2)
# Initialize outputs
outputs[f"constr_{frame}_stress"] = NULL * np.ones(NELEM_MAX)
outputs[f"constr_{frame}_shell_buckling"] = NULL * np.ones(NELEM_MAX)
outputs[f"constr_{frame}_global_buckling"] = NULL * np.ones(NELEM_MAX)
# See http://svn.code.sourceforge.net/p/frame3dd/code/trunk/doc/Frame3DD-manual.html#structuralmodeling
# print(Fz.shape, Az.shape, M.shape, r.shape, Iyy.shape)
axial_stress = Fz / Az + M * r / Iyy
shear_stress = np.abs(Mzz) / Jz * r + V / Asx
hoop_stress = util_con.hoopStress(d, t, qdyn)
outputs[f"constr_{frame}_stress"][:nelem] = util_con.vonMisesStressUtilization(
axial_stress, hoop_stress, shear_stress, gamma_f * gamma_m * gamma_n, sigy
)
# Use DNV-GL CP202 Method
check = util_dnvgl.CylinderBuckling(h, d, t, E=E, G=G, sigma_y=sigy, gamma=gamma_f * gamma_b)
results = check.run_buckling_checks(Fz, M, axial_stress, hoop_stress, shear_stress)
outputs[f"constr_{frame}_shell_buckling"][:nelem] = results["Shell"]
outputs[f"constr_{frame}_global_buckling"][:nelem] = results["Global"]
class FloatingFrame(om.Group):
def initialize(self):
self.options.declare("modeling_options")
def setup(self):
opt = self.options["modeling_options"]
self.add_subsystem("plat", PlatformFrame(options=opt), promotes=["*"])
self.add_subsystem("pre", TowerPreMember(), promotes=["*"])
prom = [
"E_mat",
"G_mat",
"sigma_y_mat",
"sigma_ult_mat",
"wohler_exp_mat",
"wohler_A_mat",
"rho_mat",
"rho_water",
"unit_cost_mat",
"material_names",
"painting_cost_rate",
"labor_cost_rate",
]
prom += [
("nodes_xyz", "tower_nodes"),
("nodes_r", "tower_Rnode"),
("total_mass", "tower_mass"),
("total_cost", "tower_cost"),
("center_of_mass", "tower_center_of_mass"),
("joint1", "transition_node"),
("joint2", "tower_top_node"),
("Px", "tower_elem_Px"),
("Py", "tower_elem_Py"),
("Pz", "tower_elem_Pz"),
("qdyn", "tower_elem_qdyn"),
]
for var in ["D", "t", "A", "Asx", "Asy", "rho", "Ixx", "Iyy", "Izz", "E", "G", "sigma_y"]:
prom += [("section_" + var, "tower_elem_" + var)]
prom += [
"Uref",
"zref",
"z0",
"shearExp",
"cd_usr",
"cm",
"beta_wind",
"rho_air",
"mu_air",
"beta_wave",
"mu_water",
"Uc",
"Hsig_wave",
"Tsig_wave",
"water_depth",
]
self.add_subsystem(
"tower",
Member(column_options=opt["floating"]["tower"], idx=0, n_mat=opt["materials"]["n_mat"]),
promotes=prom,
)
self.add_subsystem("mux", PlatformTowerFrame(options=opt), promotes=["*"])
self.add_subsystem("frame", FrameAnalysis(options=opt), promotes=["*"])
self.add_subsystem("post", FloatingPost(options=opt["WISDEM"]["FloatingSE"]), promotes=["*"])
```
#### File: wisdem/floatingse/floating.py
```python
import openmdao.api as om
from wisdem.floatingse.member import Member
from wisdem.floatingse.mooring import Mooring
from wisdem.floatingse.constraints import FloatingConstraints
from wisdem.floatingse.floating_frame import FloatingFrame
class FloatingSE(om.Group):
def initialize(self):
self.options.declare("modeling_options")
def setup(self):
opt = self.options["modeling_options"]
# self.set_input_defaults("mooring_type", "chain")
# self.set_input_defaults("anchor_type", "SUCTIONPILE")
# self.set_input_defaults("loading", "hydrostatic")
# self.set_input_defaults("wave_period_range_low", 2.0, units="s")
# self.set_input_defaults("wave_period_range_high", 20.0, units="s")
# self.set_input_defaults("cd_usr", -1.0)
# self.set_input_defaults("zref", 100.0)
# self.set_input_defaults("number_of_offset_columns", 0)
# self.set_input_defaults("material_names", ["steel"])
n_member = opt["floating"]["members"]["n_members"]
mem_prom = [
"E_mat",
"G_mat",
"sigma_y_mat",
"sigma_ult_mat",
"wohler_exp_mat",
"wohler_A_mat",
"rho_mat",
"rho_water",
"unit_cost_mat",
"material_names",
"painting_cost_rate",
"labor_cost_rate",
]
mem_prom += [
"Uref",
"zref",
"z0",
"shearExp",
"cd_usr",
"cm",
"beta_wind",
"rho_air",
"mu_air",
"beta_wave",
"mu_water",
"Uc",
"Hsig_wave",
"Tsig_wave",
"water_depth",
]
for k in range(n_member):
self.add_subsystem(
f"member{k}",
Member(column_options=opt["floating"]["members"], idx=k, n_mat=opt["materials"]["n_mat"]),
promotes=mem_prom,
)
# Next run MapMooring
self.add_subsystem(
"mm", Mooring(options=opt["mooring"], gamma=opt["WISDEM"]["FloatingSE"]["gamma_f"]), promotes=["*"]
)
# Add in the connecting truss
self.add_subsystem("load", FloatingFrame(modeling_options=opt), promotes=["*"])
# Evaluate system constraints
self.add_subsystem("cons", FloatingConstraints(modeling_options=opt), promotes=["*"])
# Connect all input variables from all models
mem_vars = [
"nodes_xyz",
"nodes_r",
"section_D",
"section_t",
"section_A",
"section_Asx",
"section_Asy",
"section_Ixx",
"section_Iyy",
"section_Izz",
"section_rho",
"section_E",
"section_G",
"section_sigma_y",
"idx_cb",
"variable_ballast_capacity",
"variable_ballast_Vpts",
"variable_ballast_spts",
"constr_ballast_capacity",
"buoyancy_force",
"displacement",
"center_of_buoyancy",
"center_of_mass",
"ballast_mass",
"total_mass",
"total_cost",
"I_total",
"Awater",
"Iwater",
"added_mass",
"waterline_centroid",
"Px",
"Py",
"Pz",
"qdyn",
]
for k in range(n_member):
for var in mem_vars:
self.connect(f"member{k}." + var, f"member{k}:" + var)
```
#### File: wisdem/floatingse/visualize.py
```python
import numpy as np
from mayavi import mlab
def sectional2nodal(x):
return np.r_[x[0], np.convolve(x, [0.5, 0.5], "valid"), x[-1]]
def nodal2sectional(x):
return 0.5 * (x[:-1] + x[1:])
def set_axes_equal(ax):
"""Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Input
ax: a matplotlib axis, e.g., as output from plt.gca().
"""
x_limits = ax.get_xlim3d()
y_limits = ax.get_ylim3d()
z_limits = ax.get_zlim3d()
x_range = abs(x_limits[1] - x_limits[0])
x_middle = np.mean(x_limits)
y_range = abs(y_limits[1] - y_limits[0])
y_middle = np.mean(y_limits)
z_range = abs(z_limits[1] - z_limits[0])
z_middle = np.mean(z_limits)
# The plot bounding box is a sphere in the sense of the infinity
# norm, hence I call half the max range the plot radius.
plot_radius = 0.5 * max([x_range, y_range, z_range])
ax.set_xlim3d([x_middle - plot_radius, x_middle + plot_radius])
ax.set_ylim3d([y_middle - plot_radius, y_middle + plot_radius])
ax.set_zlim3d([z_middle - plot_radius, z_middle + plot_radius])
class Visualize(object):
def __init__(self, prob):
prob.run_model()
self.prob = prob
self.fig = None
def draw_spar(self, fname="spar.png"):
self.init_figure()
self.draw_ocean()
self.draw_mooring(self.prob["mooring_plot_matrix"])
zcut = 1.0 + self.prob["main_freeboard"]
self.draw_pontoons(self.prob["plot_matrix"], 0.5 * self.prob["fairlead_support_outer_diameter"], zcut)
self.draw_column(
[0.0, 0.0],
self.prob["main_freeboard"],
self.prob["main.section_height"],
0.5 * self.prob["main.outer_diameter"],
self.prob["main.stiffener_spacing"],
)
t_full = sectional2nodal(self.prob["main.wall_thickness"])
self.draw_ballast(
[0.0, 0.0],
self.prob["main_freeboard"],
self.prob["main.section_height"],
0.5 * self.prob["main.outer_diameter"] - t_full,
self.prob["main.permanent_ballast_height"],
self.prob["variable_ballast_height"],
)
self.draw_column(
[0.0, 0.0],
self.prob["hub_height"],
self.prob["tow.tower_section_height"],
0.5 * self.prob["tow.tower_outer_diameter"],
None,
(0.9,) * 3,
)
if self.prob["main.buoyancy_tank_mass"] > 0.0:
self.draw_buoyancy_tank(
[0.0, 0.0],
self.prob["main_freeboard"],
self.prob["main.section_height"],
self.prob["main.buoyancy_tank_location"],
0.5 * self.prob["main.buoyancy_tank_diameter"],
self.prob["main.buoyancy_tank_height"],
)
self.set_figure(fname)
def draw_semi(self, fname="semi.png"):
self.init_figure()
self.draw_ocean()
self.draw_mooring(self.prob["mooring_plot_matrix"])
pontoonMat = self.prob["plot_matrix"]
zcut = 1.0 + np.maximum(self.prob["main_freeboard"], self.prob["offset_freeboard"])
self.draw_pontoons(pontoonMat, 0.5 * self.prob["pontoon_outer_diameter"], zcut)
self.draw_column(
[0.0, 0.0],
self.prob["main_freeboard"],
self.prob["main.section_height"],
0.5 * self.prob["main.outer_diameter"],
self.prob["main.stiffener_spacing"],
)
t_full = sectional2nodal(self.prob["main.wall_thickness"])
self.draw_ballast(
[0.0, 0.0],
self.prob["main_freeboard"],
self.prob["main.section_height"],
0.5 * self.prob["main.outer_diameter"] - t_full,
self.prob["main.permanent_ballast_height"],
self.prob["variable_ballast_height"],
)
if self.prob["main.buoyancy_tank_mass"] > 0.0:
self.draw_buoyancy_tank(
[0.0, 0.0],
self.prob["main_freeboard"],
self.prob["main.section_height"],
self.prob["main.buoyancy_tank_location"],
0.5 * self.prob["main.buoyancy_tank_diameter"],
self.prob["main.buoyancy_tank_height"],
)
R_semi = self.prob["radius_to_offset_column"]
ncolumn = int(self.prob["number_of_offset_columns"])
angles = np.linspace(0, 2 * np.pi, ncolumn + 1)
x = R_semi * np.cos(angles)
y = R_semi * np.sin(angles)
for k in range(ncolumn):
self.draw_column(
[x[k], y[k]],
self.prob["offset_freeboard"],
self.prob["off.section_height"],
0.5 * self.prob["off.outer_diameter"],
self.prob["off.stiffener_spacing"],
)
t_full = sectional2nodal(self.prob["off.wall_thickness"])
self.draw_ballast(
[x[k], y[k]],
self.prob["offset_freeboard"],
self.prob["off.section_height"],
0.5 * self.prob["off.outer_diameter"] - t_full,
self.prob["off.permanent_ballast_height"],
0.0,
)
if self.prob["off.buoyancy_tank_mass"] > 0.0:
self.draw_buoyancy_tank(
[x[k], y[k]],
self.prob["offset_freeboard"],
self.prob["off.section_height"],
self.prob["off.buoyancy_tank_location"],
0.5 * self.prob["off.buoyancy_tank_diameter"],
self.prob["off.buoyancy_tank_height"],
)
self.draw_column(
[0.0, 0.0],
self.prob["hub_height"],
self.prob["tow.tower_section_height"],
0.5 * self.prob["tow.tower_outer_diameter"],
None,
(0.9,) * 3,
)
self.set_figure(fname)
def init_figure(self):
mysky = np.array([135, 206, 250]) / 255.0
mysky = tuple(mysky.tolist())
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# fig = mlab.figure(bgcolor=(1,)*3, size=(1600,1100))
# fig = mlab.figure(bgcolor=mysky, size=(1600,1100))
self.fig = mlab.figure(bgcolor=(0,) * 3, size=(1600, 1100))
def draw_ocean(self):
if self.fig is None:
self.init_figure()
npts = 100
# mybrown = np.array([244, 170, 66]) / 255.0
# mybrown = tuple(mybrown.tolist())
mywater = np.array([95, 158, 160]) / 255.0 # (0.0, 0.0, 0.8) [143, 188, 143]
mywater = tuple(mywater.tolist())
alpha = 0.3
# Waterplane box
x = y = 100 * np.linspace(-1, 1, npts)
X, Y = np.meshgrid(x, y)
Z = np.sin(100 * X * Y) # np.zeros(X.shape)
# ax.plot_surface(X, Y, Z, alpha=alpha, color=mywater)
mlab.mesh(X, Y, Z, opacity=alpha, color=mywater, figure=self.fig)
# Sea floor
Z = -self.prob["water_depth"] * np.ones(X.shape)
# ax.plot_surface(10*X, 10*Y, Z, alpha=1.0, color=mybrown)
# mlab.mesh(10*X,10*Y,Z, opacity=1.0, color=mybrown, figure=self.fig)
# Sides
# x = 500 * np.linspace(-1, 1, npts)
# z = self.prob['water_depth'] * np.linspace(-1, 0, npts)
# X,Z = np.meshgrid(x,z)
# Y = x.max()*np.ones(Z.shape)
##ax.plot_surface(X, Y, Z, alpha=alpha, color=mywater)
# mlab.mesh(X,Y,Z, opacity=alpha, color=mywater, figure=self.fig)
# mlab.mesh(X,-Y,Z, opacity=alpha, color=mywater, figure=self.fig)
# mlab.mesh(Y,X,Z, opacity=alpha, color=mywater, figure=self.fig)
##mlab.mesh(-Y,X,Z, opacity=alpha, color=mywater, figure=self.fig)
def draw_mooring(self, mooring):
mybrown = np.array([244, 170, 66]) / 255.0
mybrown = tuple(mybrown.tolist())
npts = 100
# Sea floor
print(self.prob["anchor_radius"])
r = np.linspace(0, self.prob["anchor_radius"], npts)
th = np.linspace(0, 2 * np.pi, npts)
R, TH = np.meshgrid(r, th)
X = R * np.cos(TH)
Y = R * np.sin(TH)
Z = -self.prob["water_depth"] * np.ones(X.shape)
# ax.plot_surface(X, Y, Z, alpha=1.0, color=mybrown)
mlab.mesh(X, Y, Z, opacity=1.0, color=mybrown, figure=self.fig)
cmoor = (0, 0.8, 0)
nlines = int(self.prob["number_of_mooring_connections"] * self.prob["mooring_lines_per_connection"])
for k in range(nlines):
# ax.plot(mooring[k,:,0], mooring[k,:,1], mooring[k,:,2], 'k', lw=2)
mlab.plot3d(
mooring[k, :, 0],
mooring[k, :, 1],
mooring[k, :, 2],
color=cmoor,
tube_radius=0.5 * self.prob["mooring_diameter"],
figure=self.fig,
)
def draw_pontoons(self, truss, R, freeboard):
nE = truss.shape[0]
c = (0.5, 0, 0)
for k in range(nE):
if np.any(truss[k, 2, :] > freeboard):
continue
mlab.plot3d(truss[k, 0, :], truss[k, 1, :], truss[k, 2, :], color=c, tube_radius=R, figure=self.fig)
def draw_column(self, centerline, freeboard, h_section, r_nodes, spacingVec=None, ckIn=None):
npts = 20
nsection = h_section.size
z_nodes = np.flipud(freeboard - np.r_[0.0, np.cumsum(np.flipud(h_section))])
th = np.linspace(0, 2 * np.pi, npts)
for k in range(nsection):
rk = np.linspace(r_nodes[k], r_nodes[k + 1], npts)
z = np.linspace(z_nodes[k], z_nodes[k + 1], npts)
R, TH = np.meshgrid(rk, th)
Z, _ = np.meshgrid(z, th)
X = R * np.cos(TH) + centerline[0]
Y = R * np.sin(TH) + centerline[1]
# Draw parameters
if ckIn is None:
ck = (0.6,) * 3 if np.mod(k, 2) == 0 else (0.4,) * 3
else:
ck = ckIn
# ax.plot_surface(X, Y, Z, alpha=0.5, color=ck)
mlab.mesh(X, Y, Z, opacity=0.7, color=ck, figure=self.fig)
if spacingVec is None:
continue
z = z_nodes[k] + spacingVec[k]
while z < z_nodes[k + 1]:
rk = np.interp(z, z_nodes[k:], r_nodes[k:])
# ax.plot(rk*np.cos(th), rk*np.sin(th), z*np.ones(th.shape), 'r', lw=0.25)
mlab.plot3d(
rk * np.cos(th) + centerline[0],
rk * np.sin(th) + centerline[1],
z * np.ones(th.shape),
color=(0.5, 0, 0),
figure=self.fig,
)
z += spacingVec[k]
"""
# Web
r = np.linspace(rk - self.prob['stiffener_web_height'][k], rk, npts)
R, TH = np.meshgrid(r, th)
Z, _ = np.meshgrid(z, th)
X = R*np.cos(TH)
Y = R*np.sin(TH)
ax.plot_surface(X, Y, Z, alpha=0.7, color='r')
# Flange
r = r[0]
h = np.linspace(0, self.prob['stiffener_flange_width'][k], npts)
zflange = z + h - 0.5*self.prob['stiffener_flange_width'][k]
R, TH = np.meshgrid(r, th)
Z, _ = np.meshgrid(zflange, th)
X = R*np.cos(TH)
Y = R*np.sin(TH)
ax.plot_surface(X, Y, Z, alpha=0.7, color='r')
"""
def draw_ballast(self, centerline, freeboard, h_section, r_nodes, h_perm, h_water):
npts = 40
th = np.linspace(0, 2 * np.pi, npts)
z_nodes = np.flipud(freeboard - np.r_[0.0, np.cumsum(np.flipud(h_section))])
# Permanent ballast
z_perm = z_nodes[0] + np.linspace(0, h_perm, npts)
r_perm = np.interp(z_perm, z_nodes, r_nodes)
R, TH = np.meshgrid(r_perm, th)
Z, _ = np.meshgrid(z_perm, th)
X = R * np.cos(TH) + centerline[0]
Y = R * np.sin(TH) + centerline[1]
ck = np.array([122, 85, 33]) / 255.0
ck = tuple(ck.tolist())
mlab.mesh(X, Y, Z, color=ck, figure=self.fig)
# Water ballast
z_water = z_perm[-1] + np.linspace(0, h_water, npts)
r_water = np.interp(z_water, z_nodes, r_nodes)
R, TH = np.meshgrid(r_water, th)
Z, _ = np.meshgrid(z_water, th)
X = R * np.cos(TH) + centerline[0]
Y = R * np.sin(TH) + centerline[1]
ck = (0.0, 0.1, 0.8) # Dark blue
mlab.mesh(X, Y, Z, color=ck, figure=self.fig)
def draw_buoyancy_tank(self, centerline, freeboard, h_section, loc, r_box, h_box):
npts = 20
z_nodes = np.flipud(freeboard - np.r_[0.0, np.cumsum(np.flipud(h_section))])
z_lower = loc * (z_nodes[-1] - z_nodes[0]) + z_nodes[0]
# Lower and Upper surfaces
r = np.linspace(0, r_box, npts)
th = np.linspace(0, 2 * np.pi, npts)
R, TH = np.meshgrid(r, th)
X = R * np.cos(TH) + centerline[0]
Y = R * np.sin(TH) + centerline[1]
Z = z_lower * np.ones(X.shape)
ck = (0.9,) * 3
mlab.mesh(X, Y, Z, opacity=0.5, color=ck, figure=self.fig)
Z += h_box
mlab.mesh(X, Y, Z, opacity=0.5, color=ck, figure=self.fig)
# Cylinder part
z = z_lower + np.linspace(0, h_box, npts)
Z, TH = np.meshgrid(z, th)
R = r_box * np.ones(Z.shape)
X = R * np.cos(TH) + centerline[0]
Y = R * np.sin(TH) + centerline[1]
mlab.mesh(X, Y, Z, opacity=0.5, color=ck, figure=self.fig)
def set_figure(self, fname=None):
# ax.set_aspect('equal')
# set_axes_equal(ax)
# ax.autoscale_view(tight=True)
# ax.set_xlim([-125, 125])
# ax.set_ylim([-125, 125])
# ax.set_zlim([-220, 30])
# plt.axis('off')
# plt.show()
# mlab.move([-517.16728532, -87.0711504, 5.60826224], [1.35691603e+01, -2.84217094e-14, -1.06547500e+02])
# mlab.view(-170.68320804213343, 78.220729198686854, 549.40101471336777, [1.35691603e+01, 0.0, -1.06547500e+02])
if not fname is None:
fpart = fname.split(".")
if len(fpart) == 1 or not fpart[-1].lower() in ["jpg", "png", "bmp"]:
fname += ".png"
mlab.savefig(fname, figure=self.fig)
mlab.show()
```
#### File: wisdem/glue_code/gc_LoadInputs.py
```python
import numpy as np
import wisdem.inputs as sch
class WindTurbineOntologyPython(object):
# Pure python class to load the input yaml file and break into few sub-dictionaries, namely:
# - modeling_options: dictionary with all the inputs that will be passed as options to the openmdao components, such as the length of the arrays
# - blade: dictionary representing the entry blade in the yaml file
# - tower: dictionary representing the entry tower in the yaml file
# - nacelle: dictionary representing the entry nacelle in the yaml file
# - materials: dictionary representing the entry materials in the yaml file
# - airfoils: dictionary representing the entry airfoils in the yaml file
def __init__(self, fname_input_wt, fname_input_modeling, fname_input_analysis):
self.modeling_options = sch.load_modeling_yaml(fname_input_modeling)
self.analysis_options = sch.load_analysis_yaml(fname_input_analysis)
if fname_input_wt is None:
self.wt_init = None
else:
self.wt_init = sch.load_geometry_yaml(fname_input_wt)
self.set_run_flags()
self.set_openmdao_vectors()
self.set_opt_flags()
def get_input_data(self):
return self.wt_init, self.modeling_options, self.analysis_options
def set_run_flags(self):
# Create components flag struct
self.modeling_options["flags"] = {}
# Backwards compatibility
modules = ["RotorSE", "DriveSE", "GeneratorSE", "TowerSE", "FloatingSE", "Loading", "BOS"]
for m in modules:
if m in self.modeling_options:
self.modeling_options["WISDEM"][m].update(self.modeling_options[m])
for k in ["blade", "hub", "nacelle", "tower", "monopile", "floating_platform", "mooring", "RNA"]:
self.modeling_options["flags"][k] = k in self.wt_init["components"]
for k in ["assembly", "components", "airfoils", "materials", "control", "environment", "bos", "costs"]:
self.modeling_options["flags"][k] = k in self.wt_init
# Generator flag
self.modeling_options["flags"]["generator"] = False
if self.modeling_options["flags"]["nacelle"] and "generator" in self.wt_init["components"]["nacelle"]:
self.modeling_options["flags"]["generator"] = True
if not "GeneratorSE" in self.modeling_options["WISDEM"]:
self.modeling_options["WISDEM"]["GeneratorSE"] = {}
self.modeling_options["WISDEM"]["GeneratorSE"]["type"] = self.wt_init["components"]["nacelle"]["generator"][
"generator_type"
].lower()
# Offshore flags
self.modeling_options["flags"]["floating"] = self.modeling_options["flags"]["floating_platform"]
self.modeling_options["flags"]["offshore"] = (
self.modeling_options["flags"]["floating"] or self.modeling_options["flags"]["monopile"]
)
# Put in some logic about what needs to be in there
flags = self.modeling_options["flags"]
# Even if the block is in the inputs, the user can turn off via modeling options
if flags["bos"]:
flags["bos"] = self.modeling_options["WISDEM"]["BOS"]["flag"]
if flags["blade"]:
flags["blade"] = self.modeling_options["WISDEM"]["RotorSE"]["flag"]
if flags["tower"]:
flags["tower"] = self.modeling_options["WISDEM"]["TowerSE"]["flag"]
if flags["hub"]:
flags["hub"] = self.modeling_options["WISDEM"]["DriveSE"]["flag"]
if flags["nacelle"]:
flags["nacelle"] = self.modeling_options["WISDEM"]["DriveSE"]["flag"]
if flags["generator"]:
flags["generator"] = self.modeling_options["WISDEM"]["DriveSE"]["flag"]
flags["hub"] = flags["nacelle"] = flags["hub"] or flags["nacelle"] # Hub and nacelle have to go together
# Blades and airfoils
if flags["blade"] and not flags["airfoils"]:
raise ValueError("Blades/rotor analysis is requested but no airfoils are found")
if flags["airfoils"] and not flags["blade"]:
print("WARNING: Airfoils provided but no blades/rotor found or RotorSE deactivated")
# Blades, tower, monopile and environment
if flags["blade"] and not flags["environment"]:
raise ValueError("Blades/rotor analysis is requested but no environment input found")
if flags["tower"] and not flags["environment"]:
raise ValueError("Tower analysis is requested but no environment input found")
if flags["monopile"] and not flags["environment"]:
raise ValueError("Monopile analysis is requested but no environment input found")
if flags["floating_platform"] and not flags["environment"]:
raise ValueError("Floating analysis is requested but no environment input found")
if flags["environment"] and not (
flags["blade"] or flags["tower"] or flags["monopile"] or flags["floating_platform"]
):
print("WARNING: Environment provided but no related component found found")
# Floating/monopile
if flags["floating_platform"] and flags["monopile"]:
raise ValueError("Cannot have both floating and monopile components")
# Water depth check
if "water_depth" in self.wt_init["environment"]:
if self.wt_init["environment"]["water_depth"] <= 0.0 and flags["offshore"]:
raise ValueError("Water depth must be > 0 to do monopile or floating analysis")
def set_openmdao_vectors(self):
# Class instance to determine all the parameters used to initialize the openmdao arrays, i.e. number of airfoils, number of angles of attack, number of blade spanwise stations, etc
# ==modeling_options = {}
# Materials
self.modeling_options["materials"] = {}
self.modeling_options["materials"]["n_mat"] = len(self.wt_init["materials"])
# Airfoils
if self.modeling_options["flags"]["airfoils"]:
self.modeling_options["WISDEM"]["RotorSE"]["n_af"] = len(self.wt_init["airfoils"])
self.modeling_options["WISDEM"]["RotorSE"]["n_aoa"] = self.modeling_options["WISDEM"]["RotorSE"]["n_aoa"]
if self.modeling_options["WISDEM"]["RotorSE"]["n_aoa"] / 4.0 == int(
self.modeling_options["WISDEM"]["RotorSE"]["n_aoa"] / 4.0
):
# One fourth of the angles of attack from -pi to -pi/6, half between -pi/6 to pi/6, and one fourth from pi/6 to pi
self.modeling_options["WISDEM"]["RotorSE"]["aoa"] = np.unique(
np.hstack(
[
np.linspace(
-np.pi, -np.pi / 6.0, int(self.modeling_options["WISDEM"]["RotorSE"]["n_aoa"] / 4.0 + 1)
),
np.linspace(
-np.pi / 6.0,
np.pi / 6.0,
int(self.modeling_options["WISDEM"]["RotorSE"]["n_aoa"] / 2.0),
),
np.linspace(
np.pi / 6.0, np.pi, int(self.modeling_options["WISDEM"]["RotorSE"]["n_aoa"] / 4.0 + 1)
),
]
)
)
else:
self.modeling_options["WISDEM"]["RotorSE"]["aoa"] = np.linspace(
-np.pi, np.pi, self.modeling_options["WISDEM"]["RotorSE"]["n_aoa"]
)
print(
"WARNING: If you like a grid of angles of attack more refined between +- 30 deg, please choose a n_aoa in the analysis option input file that is a multiple of 4. The current value of "
+ str(self.modeling_options["WISDEM"]["RotorSE"]["n_aoa"])
+ " is not a multiple of 4 and an equally spaced grid is adopted."
)
Re_all = []
self.modeling_options["WISDEM"]["RotorSE"]["AFTabMod"] = 1
for i in range(self.modeling_options["WISDEM"]["RotorSE"]["n_af"]):
for j in range(len(self.wt_init["airfoils"][i]["polars"])):
Re_all.append(self.wt_init["airfoils"][i]["polars"][j]["re"])
if len(self.wt_init["airfoils"][i]["polars"]) > 1:
self.modeling_options["WISDEM"]["RotorSE"]["AFTabMod"] = 2
self.modeling_options["WISDEM"]["RotorSE"]["n_Re"] = len(np.unique(Re_all))
self.modeling_options["WISDEM"]["RotorSE"]["n_tab"] = 1
self.modeling_options["WISDEM"]["RotorSE"]["n_xy"] = self.modeling_options["WISDEM"]["RotorSE"]["n_xy"]
self.modeling_options["WISDEM"]["RotorSE"]["af_used"] = self.wt_init["components"]["blade"][
"outer_shape_bem"
]["airfoil_position"]["labels"]
# Blade
if self.modeling_options["flags"]["blade"]:
self.modeling_options["WISDEM"]["RotorSE"]["nd_span"] = np.linspace(
0.0, 1.0, self.modeling_options["WISDEM"]["RotorSE"]["n_span"]
) # Equally spaced non-dimensional spanwise grid
self.modeling_options["WISDEM"]["RotorSE"]["n_af_span"] = len(
self.wt_init["components"]["blade"]["outer_shape_bem"]["airfoil_position"]["labels"]
) # This is the number of airfoils defined along blade span and it is often different than n_af, which is the number of airfoils defined in the airfoil database
self.modeling_options["WISDEM"]["RotorSE"]["n_webs"] = len(
self.wt_init["components"]["blade"]["internal_structure_2d_fem"]["webs"]
)
self.modeling_options["WISDEM"]["RotorSE"]["n_layers"] = len(
self.wt_init["components"]["blade"]["internal_structure_2d_fem"]["layers"]
)
self.modeling_options["WISDEM"]["RotorSE"]["lofted_output"] = False
self.modeling_options["WISDEM"]["RotorSE"]["n_freq"] = 10 # Number of blade nat frequencies computed
self.modeling_options["WISDEM"]["RotorSE"]["layer_name"] = self.modeling_options["WISDEM"]["RotorSE"][
"n_layers"
] * [""]
self.modeling_options["WISDEM"]["RotorSE"]["layer_mat"] = self.modeling_options["WISDEM"]["RotorSE"][
"n_layers"
] * [""]
for i in range(self.modeling_options["WISDEM"]["RotorSE"]["n_layers"]):
self.modeling_options["WISDEM"]["RotorSE"]["layer_name"][i] = self.wt_init["components"]["blade"][
"internal_structure_2d_fem"
]["layers"][i]["name"]
self.modeling_options["WISDEM"]["RotorSE"]["layer_mat"][i] = self.wt_init["components"]["blade"][
"internal_structure_2d_fem"
]["layers"][i]["material"]
self.modeling_options["WISDEM"]["RotorSE"]["web_name"] = self.modeling_options["WISDEM"]["RotorSE"][
"n_webs"
] * [""]
for i in range(self.modeling_options["WISDEM"]["RotorSE"]["n_webs"]):
self.modeling_options["WISDEM"]["RotorSE"]["web_name"][i] = self.wt_init["components"]["blade"][
"internal_structure_2d_fem"
]["webs"][i]["name"]
# Distributed aerodynamic control devices along blade
self.modeling_options["WISDEM"]["RotorSE"]["n_te_flaps"] = 0
if "aerodynamic_control" in self.wt_init["components"]["blade"]:
if "te_flaps" in self.wt_init["components"]["blade"]["aerodynamic_control"]:
self.modeling_options["WISDEM"]["RotorSE"]["n_te_flaps"] = len(
self.wt_init["components"]["blade"]["aerodynamic_control"]["te_flaps"]
)
self.modeling_options["WISDEM"]["RotorSE"]["n_tab"] = 3
else:
raise RuntimeError(
"A distributed aerodynamic control device is provided in the yaml input file, but not supported by wisdem."
)
# Drivetrain
if self.modeling_options["flags"]["nacelle"]:
self.modeling_options["WISDEM"]["DriveSE"]["direct"] = self.wt_init["assembly"]["drivetrain"].lower() in [
"direct",
"direct_drive",
"pm_direct_drive",
]
# Tower
if self.modeling_options["flags"]["tower"]:
self.modeling_options["WISDEM"]["TowerSE"]["n_height_tower"] = len(
self.wt_init["components"]["tower"]["outer_shape_bem"]["outer_diameter"]["grid"]
)
self.modeling_options["WISDEM"]["TowerSE"]["n_layers_tower"] = len(
self.wt_init["components"]["tower"]["internal_structure_2d_fem"]["layers"]
)
self.modeling_options["WISDEM"]["TowerSE"]["n_height"] = self.modeling_options["WISDEM"]["TowerSE"][
"n_height_tower"
]
self.modeling_options["WISDEM"]["TowerSE"]["n_height_monopile"] = 0
self.modeling_options["WISDEM"]["TowerSE"]["n_layers_monopile"] = 0
# Monopile
if self.modeling_options["flags"]["monopile"]:
self.modeling_options["WISDEM"]["TowerSE"]["n_height_monopile"] = len(
self.wt_init["components"]["monopile"]["outer_shape_bem"]["outer_diameter"]["grid"]
)
self.modeling_options["WISDEM"]["TowerSE"]["n_layers_monopile"] = len(
self.wt_init["components"]["monopile"]["internal_structure_2d_fem"]["layers"]
)
self.modeling_options["WISDEM"]["TowerSE"]["n_height"] += (
self.modeling_options["WISDEM"]["TowerSE"]["n_height_monopile"] - 1
)
# Floating platform
self.modeling_options["floating"] = {}
if self.modeling_options["flags"]["floating_platform"]:
n_joints = len(self.wt_init["components"]["floating_platform"]["joints"])
self.modeling_options["floating"]["joints"] = {}
self.modeling_options["floating"]["joints"]["n_joints"] = n_joints
self.modeling_options["floating"]["joints"]["name"] = [""] * n_joints
self.modeling_options["floating"]["joints"]["transition"] = [False] * n_joints
self.modeling_options["floating"]["joints"]["cylindrical"] = [False] * n_joints
for i in range(n_joints):
self.modeling_options["floating"]["joints"]["name"][i] = self.wt_init["components"][
"floating_platform"
]["joints"][i]["name"]
self.modeling_options["floating"]["joints"]["transition"][i] = self.wt_init["components"][
"floating_platform"
]["joints"][i]["transition"]
self.modeling_options["floating"]["joints"]["cylindrical"][i] = self.wt_init["components"][
"floating_platform"
]["joints"][i]["cylindrical"]
# Create name->index dictionary for joint names, will add on axial joints later
name2idx = dict(zip(self.modeling_options["floating"]["joints"]["name"], range(n_joints)))
# Check that there is at most one transition joint
if self.modeling_options["floating"]["joints"]["transition"].count(True) > 1:
raise ValueError("Can only support one tower on the floating platform for now")
try:
itrans = self.modeling_options["floating"]["joints"]["transition"].index(True)
self.modeling_options["floating"]["transition_joint"] = itrans
except:
self.modeling_options["floating"]["transition_joint"] = None
n_members = len(self.wt_init["components"]["floating_platform"]["members"])
self.modeling_options["floating"]["members"] = {}
self.modeling_options["floating"]["members"]["n_members"] = n_members
self.modeling_options["floating"]["members"]["name"] = [""] * n_members
self.modeling_options["floating"]["members"]["joint1"] = [""] * n_members
self.modeling_options["floating"]["members"]["joint2"] = [""] * n_members
self.modeling_options["floating"]["members"]["outer_shape"] = [""] * n_members
self.modeling_options["floating"]["members"]["n_height"] = np.zeros(n_members, dtype=int)
self.modeling_options["floating"]["members"]["n_layers"] = np.zeros(n_members, dtype=int)
self.modeling_options["floating"]["members"]["n_ballasts"] = np.zeros(n_members, dtype=int)
self.modeling_options["floating"]["members"]["n_bulkheads"] = np.zeros(n_members, dtype=int)
self.modeling_options["floating"]["members"]["n_axial_joints"] = np.zeros(n_members, dtype=int)
for i in range(n_members):
self.modeling_options["floating"]["members"]["name"][i] = self.wt_init["components"][
"floating_platform"
]["members"][i]["name"]
self.modeling_options["floating"]["members"]["joint1"][i] = self.wt_init["components"][
"floating_platform"
]["members"][i]["joint1"]
self.modeling_options["floating"]["members"]["joint2"][i] = self.wt_init["components"][
"floating_platform"
]["members"][i]["joint2"]
self.modeling_options["floating"]["members"]["outer_shape"][i] = self.wt_init["components"][
"floating_platform"
]["members"][i]["outer_shape"]["shape"]
grid = self.wt_init["components"]["floating_platform"]["members"][i]["outer_shape"]["outer_diameter"][
"grid"
][:]
n_layers = len(
self.wt_init["components"]["floating_platform"]["members"][i]["internal_structure"]["layers"]
)
self.modeling_options["floating"]["members"]["n_layers"][i] = n_layers
if "ballasts" in self.wt_init["components"]["floating_platform"]["members"][i]["internal_structure"]:
n_ballasts = len(
self.wt_init["components"]["floating_platform"]["members"][i]["internal_structure"]["ballasts"]
)
else:
n_ballasts = 0
self.modeling_options["floating"]["members"]["n_ballasts"][i] = n_ballasts
# Add in bulkheads and enforce at least endcaps for submerged environment
# Don't add to master grid as they are handled differently in FloatingSE
if "bulkhead" in self.wt_init["components"]["floating_platform"]["members"][i]["internal_structure"]:
bulkgrid = self.wt_init["components"]["floating_platform"]["members"][i]["internal_structure"][
"bulkhead"
]["thickness"]["grid"]
if not 0.0 in bulkgrid:
self.wt_init["components"]["floating_platform"]["members"][i]["internal_structure"]["bulkhead"][
"thickness"
]["grid"].append(0.0)
self.wt_init["components"]["floating_platform"]["members"][i]["internal_structure"]["bulkhead"][
"thickness"
]["values"].append(0.02)
if not 1.0 in bulkgrid:
self.wt_init["components"]["floating_platform"]["members"][i]["internal_structure"]["bulkhead"][
"thickness"
]["grid"].append(1.0)
self.wt_init["components"]["floating_platform"]["members"][i]["internal_structure"]["bulkhead"][
"thickness"
]["values"].append(0.02)
# grid += bulkgrid # Handled differently in the floating code
else:
self.wt_init["components"]["floating_platform"]["members"][i]["internal_structure"]["bulkhead"] = {}
self.wt_init["components"]["floating_platform"]["members"][i]["internal_structure"]["bulkhead"][
"material"
] = self.wt_init["components"]["floating_platform"]["members"][i]["internal_structure"]["layers"][
0
][
"material"
]
self.wt_init["components"]["floating_platform"]["members"][i]["internal_structure"]["bulkhead"][
"thickness"
] = {}
self.wt_init["components"]["floating_platform"]["members"][i]["internal_structure"]["bulkhead"][
"thickness"
]["grid"] = [0.0, 1.0]
self.wt_init["components"]["floating_platform"]["members"][i]["internal_structure"]["bulkhead"][
"thickness"
]["values"] = [0.02, 0.02]
n_bulk = len(
self.wt_init["components"]["floating_platform"]["members"][i]["internal_structure"]["bulkhead"][
"thickness"
]["grid"]
)
self.modeling_options["floating"]["members"]["n_bulkheads"][i] = n_bulk
self.modeling_options["floating"]["members"][
"layer_mat_member_" + self.modeling_options["floating"]["members"]["name"][i]
] = [""] * n_layers
for j in range(n_layers):
self.modeling_options["floating"]["members"][
"layer_mat_member_" + self.modeling_options["floating"]["members"]["name"][i]
][j] = self.wt_init["components"]["floating_platform"]["members"][i]["internal_structure"][
"layers"
][
j
][
"material"
]
grid += self.wt_init["components"]["floating_platform"]["members"][i]["internal_structure"][
"layers"
][j]["thickness"]["grid"]
self.modeling_options["floating"]["members"][
"ballast_flag_member_" + self.modeling_options["floating"]["members"]["name"][i]
] = [False] * n_ballasts
self.modeling_options["floating"]["members"][
"ballast_mat_member_" + self.modeling_options["floating"]["members"]["name"][i]
] = [""] * n_ballasts
for k in range(n_ballasts):
self.modeling_options["floating"]["members"][
"ballast_flag_member_" + self.modeling_options["floating"]["members"]["name"][i]
][k] = self.wt_init["components"]["floating_platform"]["members"][i]["internal_structure"][
"ballasts"
][
k
][
"variable_flag"
]
if (
self.modeling_options["floating"]["members"][
"ballast_flag_member_" + self.modeling_options["floating"]["members"]["name"][i]
][k]
== False
):
self.modeling_options["floating"]["members"][
"ballast_mat_member_" + self.modeling_options["floating"]["members"]["name"][i]
][k] = self.wt_init["components"]["floating_platform"]["members"][i]["internal_structure"][
"ballasts"
][
k
][
"material"
]
grid += self.wt_init["components"]["floating_platform"]["members"][i]["internal_structure"][
"ballasts"
][k]["grid"]
if "axial_joints" in self.wt_init["components"]["floating_platform"]["members"][i]:
n_axial_joints = len(self.wt_init["components"]["floating_platform"]["members"][i]["axial_joints"])
self.modeling_options["floating"]["members"]["n_axial_joints"][i] = n_axial_joints
self.modeling_options["floating"]["members"][
"axial_joint_name_member_" + self.modeling_options["floating"]["members"]["name"][i]
] = [""] * n_axial_joints
for m in range(n_axial_joints):
self.modeling_options["floating"]["members"][
"axial_joint_name_member_" + self.modeling_options["floating"]["members"]["name"][i]
][m] = self.wt_init["components"]["floating_platform"]["members"][i]["axial_joints"][m]["name"]
grid.append(
self.wt_init["components"]["floating_platform"]["members"][i]["axial_joints"][m]["grid"]
)
name2idx[
self.wt_init["components"]["floating_platform"]["members"][i]["axial_joints"][m]["name"]
] = len(name2idx)
else:
self.modeling_options["floating"]["members"]["n_axial_joints"][i] = 0
final_grid = np.unique(grid)
self.modeling_options["floating"]["members"][
"grid_member_" + self.modeling_options["floating"]["members"]["name"][i]
] = final_grid
self.modeling_options["floating"]["members"]["n_height"][i] = len(final_grid)
# Store joint info
self.modeling_options["floating"]["joints"]["name2idx"] = name2idx
# Floating tower params
self.modeling_options["floating"]["tower"] = {}
self.modeling_options["floating"]["tower"]["n_ballasts"] = [0]
self.modeling_options["floating"]["tower"]["n_bulkheads"] = [0]
self.modeling_options["floating"]["tower"]["n_axial_joints"] = [0]
if self.modeling_options["flags"]["tower"]:
self.modeling_options["floating"]["tower"]["n_height"] = [
self.modeling_options["WISDEM"]["TowerSE"]["n_height_tower"]
]
self.modeling_options["floating"]["tower"]["n_layers"] = [
self.modeling_options["WISDEM"]["TowerSE"]["n_layers_tower"]
]
else:
self.modeling_options["floating"]["tower"]["n_height"] = [0]
self.modeling_options["floating"]["tower"]["n_layers"] = [0]
# Mooring
self.modeling_options["mooring"] = {}
if self.modeling_options["flags"]["mooring"]:
n_nodes = len(self.wt_init["components"]["mooring"]["nodes"])
n_lines = len(self.wt_init["components"]["mooring"]["lines"])
n_line_types = len(self.wt_init["components"]["mooring"]["line_types"])
n_anchor_types = len(self.wt_init["components"]["mooring"]["anchor_types"])
self.modeling_options["mooring"]["symmetric"] = self.modeling_options["WISDEM"]["FloatingSE"][
"symmetric_moorings"
]
self.modeling_options["mooring"]["n_nodes"] = n_nodes
self.modeling_options["mooring"]["n_lines"] = n_lines
self.modeling_options["mooring"]["n_anchors"] = n_lines
self.modeling_options["mooring"]["n_line_types"] = n_line_types
self.modeling_options["mooring"]["n_anchor_types"] = n_anchor_types
self.modeling_options["mooring"]["node_type"] = [""] * n_nodes
self.modeling_options["mooring"]["node_names"] = [""] * n_nodes
self.modeling_options["mooring"]["anchor_type"] = [""] * n_nodes
self.modeling_options["mooring"]["fairlead_type"] = [""] * n_nodes
for i in range(n_nodes):
self.modeling_options["mooring"]["node_type"][i] = self.wt_init["components"]["mooring"]["nodes"][i][
"node_type"
]
self.modeling_options["mooring"]["node_names"][i] = self.wt_init["components"]["mooring"]["nodes"][i][
"name"
]
self.modeling_options["mooring"]["anchor_type"][i] = self.wt_init["components"]["mooring"]["nodes"][i][
"anchor_type"
]
self.modeling_options["mooring"]["fairlead_type"][i] = self.wt_init["components"]["mooring"]["nodes"][
i
]["fairlead_type"]
self.modeling_options["mooring"]["node1"] = [""] * n_lines
self.modeling_options["mooring"]["node2"] = [""] * n_lines
self.modeling_options["mooring"]["line_type"] = [""] * n_lines
self.modeling_options["mooring"]["line_material"] = [""] * n_lines
self.modeling_options["mooring"]["line_anchor"] = [""] * n_lines
fairlead_nodes = []
for i in range(n_lines):
self.modeling_options["mooring"]["node1"][i] = self.wt_init["components"]["mooring"]["lines"][i][
"node1"
]
self.modeling_options["mooring"]["node2"][i] = self.wt_init["components"]["mooring"]["lines"][i][
"node2"
]
self.modeling_options["mooring"]["line_type"][i] = self.wt_init["components"]["mooring"]["lines"][i][
"line_type"
]
# For the vessel attachments, find the list of fairlead nodes on the structure
node1id = self.modeling_options["mooring"]["node_names"].index(
self.modeling_options["mooring"]["node1"][i]
)
node2id = self.modeling_options["mooring"]["node_names"].index(
self.modeling_options["mooring"]["node2"][i]
)
if self.modeling_options["mooring"]["node_type"][node1id] == "vessel":
fairlead_nodes.append(self.wt_init["components"]["mooring"]["nodes"][node1id]["joint"])
if self.modeling_options["mooring"]["node_type"][node2id] == "vessel":
fairlead_nodes.append(self.wt_init["components"]["mooring"]["nodes"][node2id]["joint"])
# Store the anchor type names to start
if "fix" in self.modeling_options["mooring"]["node_type"][node1id]:
self.modeling_options["mooring"]["line_anchor"][i] = self.modeling_options["mooring"][
"anchor_type"
][node1id]
if "fix" in self.modeling_options["mooring"]["node_type"][node2id]:
self.modeling_options["mooring"]["line_anchor"][i] = self.modeling_options["mooring"][
"anchor_type"
][node2id]
self.modeling_options["mooring"]["line_type_name"] = [""] * n_line_types
self.modeling_options["mooring"]["line_type_type"] = [""] * n_line_types
for i in range(n_line_types):
self.modeling_options["mooring"]["line_type_name"][i] = self.wt_init["components"]["mooring"][
"line_types"
][i]["name"]
self.modeling_options["mooring"]["line_type_type"][i] = self.wt_init["components"]["mooring"][
"line_types"
][i]["type"].lower()
for j in range(n_lines):
if (
self.modeling_options["mooring"]["line_type"][j]
== self.modeling_options["mooring"]["line_type_name"][i]
):
self.modeling_options["mooring"]["line_material"][j] = self.modeling_options["mooring"][
"line_type_type"
][i]
self.modeling_options["mooring"]["anchor_type_name"] = [""] * n_anchor_types
self.modeling_options["mooring"]["anchor_type_type"] = [""] * n_anchor_types
for i in range(n_anchor_types):
self.modeling_options["mooring"]["anchor_type_name"][i] = self.wt_init["components"]["mooring"][
"anchor_types"
][i]["name"]
self.modeling_options["mooring"]["anchor_type_type"][i] = self.wt_init["components"]["mooring"][
"anchor_types"
][i]["type"].lower()
for j in range(n_lines):
if (
self.modeling_options["mooring"]["line_anchor"][j]
== self.modeling_options["mooring"]["anchor_type_name"][i]
):
self.modeling_options["mooring"]["line_anchor"][j] = self.modeling_options["mooring"][
"anchor_type_type"
][i]
self.modeling_options["mooring"]["n_attach"] = len(set(fairlead_nodes))
# Assembly
self.modeling_options["assembly"] = {}
self.modeling_options["assembly"]["number_of_blades"] = int(self.wt_init["assembly"]["number_of_blades"])
def set_opt_flags(self):
# Recursively look for flags to set global optimization flag
def recursive_flag(d):
opt_flag = False
for k, v in d.items():
if isinstance(v, dict):
opt_flag = opt_flag or recursive_flag(v)
elif k == "flag":
opt_flag = opt_flag or v
return opt_flag
# The user can provide `opt_flag` in analysis_options.yaml,
# but if it's not provided, we check the individual opt flags
# from analysis_options.yaml and set a global `opt_flag`
if "opt_flag" in self.analysis_options["driver"]:
self.analysis_options["opt_flag"] = self.analysis_options["driver"]["opt_flag"]
else:
self.analysis_options["opt_flag"] = recursive_flag(self.analysis_options["design_variables"])
# If not an optimization DV, then the number of points should be same as the discretization
blade_opt_options = self.analysis_options["design_variables"]["blade"]
if not blade_opt_options["aero_shape"]["twist"]["flag"]:
blade_opt_options["aero_shape"]["twist"]["n_opt"] = self.modeling_options["WISDEM"]["RotorSE"]["n_span"]
elif blade_opt_options["aero_shape"]["twist"]["n_opt"] < 4:
raise ValueError("Cannot optimize twist with less than 4 control points along blade span")
if not blade_opt_options["aero_shape"]["chord"]["flag"]:
blade_opt_options["aero_shape"]["chord"]["n_opt"] = self.modeling_options["WISDEM"]["RotorSE"]["n_span"]
elif blade_opt_options["aero_shape"]["chord"]["n_opt"] < 4:
raise ValueError("Cannot optimize chord with less than 4 control points along blade span")
if not blade_opt_options["aero_shape"]["t/c"]["flag"]:
blade_opt_options["aero_shape"]["t/c"]["n_opt"] = self.modeling_options["WISDEM"]["RotorSE"]["n_span"]
elif blade_opt_options["aero_shape"]["t/c"]["n_opt"] < 4:
raise ValueError("Cannot optimize t/c with less than 4 control points along blade span")
if not blade_opt_options["aero_shape"]["L/D"]["flag"]:
blade_opt_options["aero_shape"]["L/D"]["n_opt"] = self.modeling_options["WISDEM"]["RotorSE"]["n_span"]
elif blade_opt_options["aero_shape"]["L/D"]["n_opt"] < 4:
raise ValueError("Cannot optimize L/D with less than 4 control points along blade span")
if not blade_opt_options["structure"]["spar_cap_ss"]["flag"]:
blade_opt_options["structure"]["spar_cap_ss"]["n_opt"] = self.modeling_options["WISDEM"]["RotorSE"][
"n_span"
]
elif blade_opt_options["structure"]["spar_cap_ss"]["n_opt"] < 4:
raise ValueError("Cannot optimize spar cap suction side with less than 4 control points along blade span")
if not blade_opt_options["structure"]["spar_cap_ps"]["flag"]:
blade_opt_options["structure"]["spar_cap_ps"]["n_opt"] = self.modeling_options["WISDEM"]["RotorSE"][
"n_span"
]
elif blade_opt_options["structure"]["spar_cap_ps"]["n_opt"] < 4:
raise ValueError("Cannot optimize spar cap pressure side with less than 4 control points along blade span")
if not blade_opt_options["structure"]["te_ss"]["flag"]:
blade_opt_options["structure"]["te_ss"]["n_opt"] = self.modeling_options["WISDEM"]["RotorSE"]["n_span"]
elif blade_opt_options["structure"]["te_ss"]["n_opt"] < 4:
raise ValueError(
"Cannot optimize trailing edge suction side with less than 4 control points along blade span"
)
if not blade_opt_options["structure"]["te_ps"]["flag"]:
blade_opt_options["structure"]["te_ps"]["n_opt"] = self.modeling_options["WISDEM"]["RotorSE"]["n_span"]
elif blade_opt_options["structure"]["te_ps"]["n_opt"] < 4:
raise ValueError(
"Cannot optimize trailing edge pressure side with less than 4 control points along blade span"
)
# Handle linked joints and members in floating platform
if self.modeling_options["flags"]["floating"]:
float_opt_options = self.analysis_options["design_variables"]["floating"]
# First the joints
dv_info = []
for c in ["z", "r"]:
for idv in float_opt_options["joints"][c + "_coordinate"]:
inames = idv["names"]
idx = [self.modeling_options["floating"]["joints"]["name"].index(m) for m in inames]
idict = {}
idict["indices"] = idx
idict["dimension"] = 0 if c == "r" else 2
dv_info.append(idict)
# Check for r-coordinate dv and cylindrical consistency
for idict in dv_info:
if idict["dimension"] != 0:
continue
for k in idict["indices"]:
m = self.modeling_options["floating"]["joints"]["name"][k]
if not self.modeling_options["floating"]["joints"]["cylindrical"][k]:
raise ValueError(f"Cannot optimize r-coordinate of, {m}, becase it is not a cylindrical joint")
# Store DV information for needed linking and IVC assignment
self.modeling_options["floating"]["joints"]["design_variable_data"] = dv_info
# Now the members
memgrps = [[m] for m in self.modeling_options["floating"]["members"]["name"]]
for idv in float_opt_options["members"]["groups"]:
inames = idv["names"]
idx = [self.modeling_options["floating"]["members"]["name"].index(m) for m in inames]
for k in range(1, len(idx)):
try:
memgrps[idx[k]].remove(inames[k])
memgrps[idx[0]].append(inames[k])
except ValueError:
raise ValueError("Cannot put member," + inames[k] + ", as part of multiple groups")
# Remove entries for members that are now linked with others
while [] in memgrps:
memgrps.remove([])
self.modeling_options["floating"]["members"]["linked_members"] = memgrps
# Make a name 2 group index lookup
name2grp = {}
for k, kgrp in enumerate(memgrps):
for kname in kgrp:
name2grp[kname] = k
self.modeling_options["floating"]["members"]["name2idx"] = name2grp
def write_ontology(self, wt_opt, fname_output):
# Update blade
if self.modeling_options["flags"]["blade"]:
# Update blade outer shape
self.wt_init["components"]["blade"]["outer_shape_bem"]["airfoil_position"]["grid"] = wt_opt[
"blade.opt_var.af_position"
].tolist()
self.wt_init["components"]["blade"]["outer_shape_bem"]["chord"]["grid"] = wt_opt[
"blade.outer_shape_bem.s"
].tolist()
self.wt_init["components"]["blade"]["outer_shape_bem"]["chord"]["values"] = wt_opt[
"blade.pa.chord_param"
].tolist()
self.wt_init["components"]["blade"]["outer_shape_bem"]["twist"]["grid"] = wt_opt[
"blade.outer_shape_bem.s"
].tolist()
self.wt_init["components"]["blade"]["outer_shape_bem"]["twist"]["values"] = wt_opt["rotorse.theta"].tolist()
self.wt_init["components"]["blade"]["outer_shape_bem"]["pitch_axis"]["grid"] = wt_opt[
"blade.outer_shape_bem.s"
].tolist()
self.wt_init["components"]["blade"]["outer_shape_bem"]["pitch_axis"]["values"] = wt_opt[
"blade.outer_shape_bem.pitch_axis"
].tolist()
if self.modeling_options["WISDEM"]["RotorSE"]["inn_af"]:
self.wt_init["components"]["blade"]["outer_shape_bem"]["t/c"]["grid"] = wt_opt[
"blade.outer_shape_bem.s"
].tolist()
self.wt_init["components"]["blade"]["outer_shape_bem"]["t/c"]["values"] = wt_opt[
"blade.interp_airfoils.r_thick_interp"
].tolist()
self.wt_init["components"]["blade"]["outer_shape_bem"]["L/D"]["grid"] = wt_opt[
"blade.outer_shape_bem.s"
].tolist()
self.wt_init["components"]["blade"]["outer_shape_bem"]["L/D"]["values"] = wt_opt[
"rotorse.rp.powercurve.L_D"
].tolist()
self.wt_init["components"]["blade"]["outer_shape_bem"]["c_d"]["grid"] = wt_opt[
"blade.outer_shape_bem.s"
].tolist()
self.wt_init["components"]["blade"]["outer_shape_bem"]["c_d"]["values"] = wt_opt[
"rotorse.rp.powercurve.cd_regII"
].tolist()
self.wt_init["components"]["blade"]["outer_shape_bem"]["stall_margin"]["grid"] = wt_opt[
"blade.outer_shape_bem.s"
].tolist()
stall_margin = np.deg2rad(
wt_opt["rotorse.stall_check.stall_angle_along_span"] - wt_opt["rotorse.stall_check.aoa_along_span"]
)
self.wt_init["components"]["blade"]["outer_shape_bem"]["stall_margin"]["values"] = stall_margin.tolist()
self.wt_init["components"]["blade"]["outer_shape_bem"]["reference_axis"]["x"]["grid"] = wt_opt[
"blade.outer_shape_bem.s"
].tolist()
self.wt_init["components"]["blade"]["outer_shape_bem"]["reference_axis"]["y"]["grid"] = wt_opt[
"blade.outer_shape_bem.s"
].tolist()
self.wt_init["components"]["blade"]["outer_shape_bem"]["reference_axis"]["z"]["grid"] = wt_opt[
"blade.outer_shape_bem.s"
].tolist()
self.wt_init["components"]["blade"]["outer_shape_bem"]["reference_axis"]["x"]["values"] = wt_opt[
"blade.outer_shape_bem.ref_axis"
][:, 0].tolist()
self.wt_init["components"]["blade"]["outer_shape_bem"]["reference_axis"]["y"]["values"] = wt_opt[
"blade.outer_shape_bem.ref_axis"
][:, 1].tolist()
self.wt_init["components"]["blade"]["outer_shape_bem"]["reference_axis"]["z"]["values"] = wt_opt[
"blade.outer_shape_bem.ref_axis"
][:, 2].tolist()
# Update blade structure
# Reference axis from blade outer shape
self.wt_init["components"]["blade"]["internal_structure_2d_fem"]["reference_axis"] = self.wt_init[
"components"
]["blade"]["outer_shape_bem"]["reference_axis"]
# Webs positions
for i in range(self.modeling_options["WISDEM"]["RotorSE"]["n_webs"]):
if "rotation" in self.wt_init["components"]["blade"]["internal_structure_2d_fem"]["webs"]:
self.wt_init["components"]["blade"]["internal_structure_2d_fem"]["webs"][i]["rotation"][
"grid"
] = wt_opt["blade.internal_structure_2d_fem.s"].tolist()
self.wt_init["components"]["blade"]["internal_structure_2d_fem"]["webs"][i]["rotation"][
"values"
] = wt_opt["blade.internal_structure_2d_fem.web_rotation"][i, :].tolist()
self.wt_init["components"]["blade"]["internal_structure_2d_fem"]["webs"][i]["offset_y_pa"][
"grid"
] = wt_opt["blade.internal_structure_2d_fem.s"].tolist()
self.wt_init["components"]["blade"]["internal_structure_2d_fem"]["webs"][i]["offset_y_pa"][
"values"
] = wt_opt["blade.internal_structure_2d_fem.web_offset_y_pa"][i, :].tolist()
if "start_nd_arc" not in self.wt_init["components"]["blade"]["internal_structure_2d_fem"]["webs"][i]:
self.wt_init["components"]["blade"]["internal_structure_2d_fem"]["webs"][i]["start_nd_arc"] = {}
if "end_nd_arc" not in self.wt_init["components"]["blade"]["internal_structure_2d_fem"]["webs"][i]:
self.wt_init["components"]["blade"]["internal_structure_2d_fem"]["webs"][i]["end_nd_arc"] = {}
self.wt_init["components"]["blade"]["internal_structure_2d_fem"]["webs"][i]["start_nd_arc"][
"grid"
] = wt_opt["blade.internal_structure_2d_fem.s"].tolist()
self.wt_init["components"]["blade"]["internal_structure_2d_fem"]["webs"][i]["start_nd_arc"][
"values"
] = wt_opt["blade.internal_structure_2d_fem.web_start_nd"][i, :].tolist()
self.wt_init["components"]["blade"]["internal_structure_2d_fem"]["webs"][i]["end_nd_arc"][
"grid"
] = wt_opt["blade.internal_structure_2d_fem.s"].tolist()
self.wt_init["components"]["blade"]["internal_structure_2d_fem"]["webs"][i]["end_nd_arc"][
"values"
] = wt_opt["blade.internal_structure_2d_fem.web_end_nd"][i, :].tolist()
# Structural layers
for i in range(self.modeling_options["WISDEM"]["RotorSE"]["n_layers"]):
self.wt_init["components"]["blade"]["internal_structure_2d_fem"]["layers"][i]["thickness"][
"grid"
] = wt_opt["blade.internal_structure_2d_fem.s"].tolist()
self.wt_init["components"]["blade"]["internal_structure_2d_fem"]["layers"][i]["thickness"][
"values"
] = wt_opt["blade.ps.layer_thickness_param"][i, :].tolist()
if wt_opt["blade.internal_structure_2d_fem.definition_layer"][i] < 7:
if (
"start_nd_arc"
not in self.wt_init["components"]["blade"]["internal_structure_2d_fem"]["layers"][i]
):
self.wt_init["components"]["blade"]["internal_structure_2d_fem"]["layers"][i][
"start_nd_arc"
] = {}
if (
"end_nd_arc"
not in self.wt_init["components"]["blade"]["internal_structure_2d_fem"]["layers"][i]
):
self.wt_init["components"]["blade"]["internal_structure_2d_fem"]["layers"][i]["end_nd_arc"] = {}
self.wt_init["components"]["blade"]["internal_structure_2d_fem"]["layers"][i]["start_nd_arc"][
"grid"
] = wt_opt["blade.internal_structure_2d_fem.s"].tolist()
self.wt_init["components"]["blade"]["internal_structure_2d_fem"]["layers"][i]["start_nd_arc"][
"values"
] = wt_opt["blade.internal_structure_2d_fem.layer_start_nd"][i, :].tolist()
self.wt_init["components"]["blade"]["internal_structure_2d_fem"]["layers"][i]["end_nd_arc"][
"grid"
] = wt_opt["blade.internal_structure_2d_fem.s"].tolist()
self.wt_init["components"]["blade"]["internal_structure_2d_fem"]["layers"][i]["end_nd_arc"][
"values"
] = wt_opt["blade.internal_structure_2d_fem.layer_end_nd"][i, :].tolist()
if (
wt_opt["blade.internal_structure_2d_fem.definition_layer"][i] > 1
and wt_opt["blade.internal_structure_2d_fem.definition_layer"][i] < 6
):
self.wt_init["components"]["blade"]["internal_structure_2d_fem"]["layers"][i]["width"][
"grid"
] = wt_opt["blade.internal_structure_2d_fem.s"].tolist()
self.wt_init["components"]["blade"]["internal_structure_2d_fem"]["layers"][i]["width"][
"values"
] = wt_opt["blade.internal_structure_2d_fem.layer_width"][i, :].tolist()
if (
wt_opt["blade.internal_structure_2d_fem.definition_layer"][i] == 2
or wt_opt["blade.internal_structure_2d_fem.definition_layer"][i] == 3
):
self.wt_init["components"]["blade"]["internal_structure_2d_fem"]["layers"][i]["rotation"][
"grid"
] = wt_opt["blade.internal_structure_2d_fem.s"].tolist()
self.wt_init["components"]["blade"]["internal_structure_2d_fem"]["layers"][i]["rotation"][
"values"
] = wt_opt["blade.internal_structure_2d_fem.layer_rotation"][i, :].tolist()
self.wt_init["components"]["blade"]["internal_structure_2d_fem"]["layers"][i]["offset_y_pa"][
"grid"
] = wt_opt["blade.internal_structure_2d_fem.s"].tolist()
self.wt_init["components"]["blade"]["internal_structure_2d_fem"]["layers"][i]["offset_y_pa"][
"values"
] = wt_opt["blade.internal_structure_2d_fem.layer_offset_y_pa"][i, :].tolist()
if (
wt_opt["blade.internal_structure_2d_fem.definition_layer"][i] == 4
or wt_opt["blade.internal_structure_2d_fem.definition_layer"][i] == 5
):
self.wt_init["components"]["blade"]["internal_structure_2d_fem"]["layers"][i]["midpoint_nd_arc"][
"grid"
] = wt_opt["blade.internal_structure_2d_fem.s"].tolist()
self.wt_init["components"]["blade"]["internal_structure_2d_fem"]["layers"][i]["midpoint_nd_arc"][
"values"
] = wt_opt["blade.internal_structure_2d_fem.layer_midpoint_nd"][i, :].tolist()
self.wt_init["components"]["blade"]["internal_structure_2d_fem"]["layers"][i]["fiber_orientation"] = {}
self.wt_init["components"]["blade"]["internal_structure_2d_fem"]["layers"][i]["fiber_orientation"][
"grid"
] = wt_opt["blade.internal_structure_2d_fem.s"].tolist()
self.wt_init["components"]["blade"]["internal_structure_2d_fem"]["layers"][i]["fiber_orientation"][
"values"
] = np.zeros(len(wt_opt["blade.internal_structure_2d_fem.s"])).tolist()
# Elastic properties of the blade
self.wt_init["components"]["blade"]["elastic_properties_mb"] = {}
self.wt_init["components"]["blade"]["elastic_properties_mb"]["six_x_six"] = {}
self.wt_init["components"]["blade"]["elastic_properties_mb"]["six_x_six"]["reference_axis"] = self.wt_init[
"components"
]["blade"]["internal_structure_2d_fem"]["reference_axis"]
self.wt_init["components"]["blade"]["elastic_properties_mb"]["six_x_six"]["twist"] = self.wt_init[
"components"
]["blade"]["outer_shape_bem"]["twist"]
self.wt_init["components"]["blade"]["elastic_properties_mb"]["six_x_six"]["stiff_matrix"] = {}
self.wt_init["components"]["blade"]["elastic_properties_mb"]["six_x_six"]["stiff_matrix"]["grid"] = wt_opt[
"blade.outer_shape_bem.s"
].tolist()
K = []
for i in range(self.modeling_options["WISDEM"]["RotorSE"]["n_span"]):
Ki = np.zeros(21)
Ki[11] = wt_opt["rotorse.EA"][i]
Ki[15] = wt_opt["rotorse.EIxx"][i]
Ki[18] = wt_opt["rotorse.EIyy"][i]
Ki[20] = wt_opt["rotorse.GJ"][i]
K.append(Ki.tolist())
self.wt_init["components"]["blade"]["elastic_properties_mb"]["six_x_six"]["stiff_matrix"]["values"] = K
self.wt_init["components"]["blade"]["elastic_properties_mb"]["six_x_six"]["inertia_matrix"] = {}
self.wt_init["components"]["blade"]["elastic_properties_mb"]["six_x_six"]["inertia_matrix"][
"grid"
] = wt_opt["blade.outer_shape_bem.s"].tolist()
I = []
for i in range(self.modeling_options["WISDEM"]["RotorSE"]["n_span"]):
Ii = np.zeros(21)
Ii[0] = wt_opt["rotorse.rhoA"][i]
Ii[5] = -wt_opt["rotorse.rhoA"][i] * wt_opt["rotorse.re.y_cg"][i]
Ii[6] = wt_opt["rotorse.rhoA"][i]
Ii[10] = wt_opt["rotorse.rhoA"][i] * wt_opt["rotorse.re.x_cg"][i]
Ii[11] = wt_opt["rotorse.rhoA"][i]
Ii[12] = wt_opt["rotorse.rhoA"][i] * wt_opt["rotorse.re.y_cg"][i]
Ii[13] = -wt_opt["rotorse.rhoA"][i] * wt_opt["rotorse.re.x_cg"][i]
Ii[15] = wt_opt["rotorse.re.precomp.edge_iner"][i]
Ii[16] = wt_opt["rotorse.re.precomp.edge_iner"][i]
# Ii[18] = wt_opt['re.precomp.edge_iner'][i]
Ii[20] = wt_opt["rotorse.rhoJ"][i]
I.append(Ii.tolist())
self.wt_init["components"]["blade"]["elastic_properties_mb"]["six_x_six"]["inertia_matrix"]["values"] = I
# Update hub
if self.modeling_options["flags"]["hub"]:
# Update hub
self.wt_init["components"]["hub"]["cone_angle"] = float(wt_opt["hub.cone"])
self.wt_init["components"]["hub"]["flange_t2shell_t"] = float(wt_opt["hub.flange_t2shell_t"])
self.wt_init["components"]["hub"]["flange_OD2hub_D"] = float(wt_opt["hub.flange_OD2hub_D"])
self.wt_init["components"]["hub"]["flange_ID2OD"] = float(wt_opt["hub.flange_ID2flange_OD"])
self.wt_init["components"]["hub"]["hub_blade_spacing_margin"] = float(wt_opt["hub.hub_in2out_circ"])
self.wt_init["components"]["hub"]["hub_stress_concentration"] = float(
wt_opt["hub.hub_stress_concentration"]
)
self.wt_init["components"]["hub"]["n_front_brackets"] = int(wt_opt["hub.n_front_brackets"])
self.wt_init["components"]["hub"]["n_rear_brackets"] = int(wt_opt["hub.n_rear_brackets"])
self.wt_init["components"]["hub"]["clearance_hub_spinner"] = float(wt_opt["hub.clearance_hub_spinner"])
self.wt_init["components"]["hub"]["spin_hole_incr"] = float(wt_opt["hub.spin_hole_incr"])
self.wt_init["components"]["hub"]["pitch_system_scaling_factor"] = float(
wt_opt["hub.pitch_system_scaling_factor"]
)
self.wt_init["components"]["hub"]["spinner_gust_ws"] = float(wt_opt["hub.spinner_gust_ws"])
# Update nacelle
if self.modeling_options["flags"]["nacelle"]:
# Common direct and geared
self.wt_init["components"]["nacelle"]["drivetrain"]["uptilt"] = float(wt_opt["nacelle.uptilt"])
self.wt_init["components"]["nacelle"]["drivetrain"]["distance_tt_hub"] = float(
wt_opt["nacelle.distance_tt_hub"]
)
self.wt_init["components"]["nacelle"]["drivetrain"]["overhang"] = float(wt_opt["nacelle.overhang"])
self.wt_init["components"]["nacelle"]["drivetrain"]["distance_hub_mb"] = float(
wt_opt["nacelle.distance_hub2mb"]
)
self.wt_init["components"]["nacelle"]["drivetrain"]["distance_mb_mb"] = float(
wt_opt["nacelle.distance_mb2mb"]
)
self.wt_init["components"]["nacelle"]["drivetrain"]["generator_length"] = float(
wt_opt["nacelle.L_generator"]
)
s_lss = np.linspace(0.0, 1.0, len(wt_opt["nacelle.lss_diameter"])).tolist()
self.wt_init["components"]["nacelle"]["drivetrain"]["lss_diameter"] = wt_opt[
"nacelle.lss_diameter"
].tolist()
self.wt_init["components"]["nacelle"]["drivetrain"]["lss_wall_thickness"] = wt_opt[
"nacelle.lss_wall_thickness"
].tolist()
self.wt_init["components"]["nacelle"]["drivetrain"]["gear_ratio"] = float(wt_opt["nacelle.gear_ratio"])
self.wt_init["components"]["nacelle"]["drivetrain"]["gearbox_efficiency"] = float(
wt_opt["nacelle.gearbox_efficiency"]
)
self.wt_init["components"]["nacelle"]["drivetrain"]["mb1Type"] = wt_opt["nacelle.mb1Type"]
self.wt_init["components"]["nacelle"]["drivetrain"]["mb2Type"] = wt_opt["nacelle.mb2Type"]
self.wt_init["components"]["nacelle"]["drivetrain"]["uptower"] = wt_opt["nacelle.uptower"]
self.wt_init["components"]["nacelle"]["drivetrain"]["lss_material"] = wt_opt["nacelle.lss_material"]
self.wt_init["components"]["nacelle"]["drivetrain"]["bedplate_material"] = wt_opt[
"nacelle.bedplate_material"
]
if self.modeling_options["WISDEM"]["DriveSE"]["direct"]:
# Direct only
s_nose = np.linspace(0.0, 1.0, len(wt_opt["nacelle.nose_diameter"])).tolist()
s_bed = np.linspace(0.0, 1.0, len(wt_opt["nacelle.bedplate_wall_thickness"])).tolist()
self.wt_init["components"]["nacelle"]["drivetrain"]["nose_diameter"] = wt_opt[
"nacelle.nose_diameter"
].tolist()
self.wt_init["components"]["nacelle"]["drivetrain"]["nose_wall_thickness"] = wt_opt[
"nacelle.nose_wall_thickness"
].tolist()
self.wt_init["components"]["nacelle"]["drivetrain"]["bedplate_wall_thickness"]["grid"] = s_bed
self.wt_init["components"]["nacelle"]["drivetrain"]["bedplate_wall_thickness"]["values"] = wt_opt[
"nacelle.bedplate_wall_thickness"
].tolist()
else:
# Geared only
s_hss = np.linspace(0.0, 1.0, len(wt_opt["nacelle.hss_diameter"])).tolist()
self.wt_init["components"]["nacelle"]["drivetrain"]["hss_length"] = float(wt_opt["nacelle.hss_length"])
self.wt_init["components"]["nacelle"]["drivetrain"]["hss_diameter"] = wt_opt[
"nacelle.hss_diameter"
].tolist()
self.wt_init["components"]["nacelle"]["drivetrain"]["hss_wall_thickness"] = wt_opt[
"nacelle.hss_wall_thickness"
].tolist()
self.wt_init["components"]["nacelle"]["drivetrain"]["bedplate_flange_width"] = float(
wt_opt["nacelle.bedplate_flange_width"]
)
self.wt_init["components"]["nacelle"]["drivetrain"]["bedplate_flange_thickness"] = float(
wt_opt["nacelle.bedplate_flange_thickness"]
)
self.wt_init["components"]["nacelle"]["drivetrain"]["bedplate_web_thickness"] = float(
wt_opt["nacelle.bedplate_web_thickness"]
)
self.wt_init["components"]["nacelle"]["drivetrain"]["gear_configuration"] = wt_opt[
"nacelle.gear_configuration"
]
self.wt_init["components"]["nacelle"]["drivetrain"]["planet_numbers"] = wt_opt["nacelle.planet_numbers"]
self.wt_init["components"]["nacelle"]["drivetrain"]["hss_material"] = wt_opt["nacelle.hss_material"]
# Update generator
if self.modeling_options["flags"]["generator"]:
self.wt_init["components"]["nacelle"]["generator"]["B_r"] = float(wt_opt["generator.B_r"])
self.wt_init["components"]["nacelle"]["generator"]["P_Fe0e"] = float(wt_opt["generator.P_Fe0e"])
self.wt_init["components"]["nacelle"]["generator"]["P_Fe0h"] = float(wt_opt["generator.P_Fe0h"])
self.wt_init["components"]["nacelle"]["generator"]["S_N"] = float(wt_opt["generator.S_N"])
self.wt_init["components"]["nacelle"]["generator"]["alpha_p"] = float(wt_opt["generator.alpha_p"])
self.wt_init["components"]["nacelle"]["generator"]["b_r_tau_r"] = float(wt_opt["generator.b_r_tau_r"])
self.wt_init["components"]["nacelle"]["generator"]["b_ro"] = float(wt_opt["generator.b_ro"])
self.wt_init["components"]["nacelle"]["generator"]["b_s_tau_s"] = float(wt_opt["generator.b_s_tau_s"])
self.wt_init["components"]["nacelle"]["generator"]["b_so"] = float(wt_opt["generator.b_so"])
self.wt_init["components"]["nacelle"]["generator"]["cofi"] = float(wt_opt["generator.cofi"])
self.wt_init["components"]["nacelle"]["generator"]["freq"] = float(wt_opt["generator.freq"])
self.wt_init["components"]["nacelle"]["generator"]["h_i"] = float(wt_opt["generator.h_i"])
self.wt_init["components"]["nacelle"]["generator"]["h_sy0"] = float(wt_opt["generator.h_sy0"])
self.wt_init["components"]["nacelle"]["generator"]["h_w"] = float(wt_opt["generator.h_w"])
self.wt_init["components"]["nacelle"]["generator"]["k_fes"] = float(wt_opt["generator.k_fes"])
self.wt_init["components"]["nacelle"]["generator"]["k_fillr"] = float(wt_opt["generator.k_fillr"])
self.wt_init["components"]["nacelle"]["generator"]["k_fills"] = float(wt_opt["generator.k_fills"])
self.wt_init["components"]["nacelle"]["generator"]["k_s"] = float(wt_opt["generator.k_s"])
self.wt_init["components"]["nacelle"]["generator"]["m"] = float(wt_opt["generator.m"])
self.wt_init["components"]["nacelle"]["generator"]["mu_0"] = float(wt_opt["generator.mu_0"])
self.wt_init["components"]["nacelle"]["generator"]["mu_r"] = float(wt_opt["generator.mu_r"])
self.wt_init["components"]["nacelle"]["generator"]["p"] = float(wt_opt["generator.p"])
self.wt_init["components"]["nacelle"]["generator"]["phi"] = float(wt_opt["generator.phi"])
self.wt_init["components"]["nacelle"]["generator"]["q1"] = float(wt_opt["generator.q1"])
self.wt_init["components"]["nacelle"]["generator"]["q2"] = float(wt_opt["generator.q2"])
self.wt_init["components"]["nacelle"]["generator"]["ratio_mw2pp"] = float(wt_opt["generator.ratio_mw2pp"])
self.wt_init["components"]["nacelle"]["generator"]["resist_Cu"] = float(wt_opt["generator.resist_Cu"])
self.wt_init["components"]["nacelle"]["generator"]["sigma"] = float(wt_opt["generator.sigma"])
self.wt_init["components"]["nacelle"]["generator"]["y_tau_p"] = float(wt_opt["generator.y_tau_p"])
self.wt_init["components"]["nacelle"]["generator"]["y_tau_pr"] = float(wt_opt["generator.y_tau_pr"])
self.wt_init["components"]["nacelle"]["generator"]["I_0"] = float(wt_opt["generator.I_0"])
self.wt_init["components"]["nacelle"]["generator"]["d_r"] = float(wt_opt["generator.d_r"])
self.wt_init["components"]["nacelle"]["generator"]["h_m"] = float(wt_opt["generator.h_m"])
self.wt_init["components"]["nacelle"]["generator"]["h_0"] = float(wt_opt["generator.h_0"])
self.wt_init["components"]["nacelle"]["generator"]["h_s"] = float(wt_opt["generator.h_s"])
self.wt_init["components"]["nacelle"]["generator"]["len_s"] = float(wt_opt["generator.len_s"])
self.wt_init["components"]["nacelle"]["generator"]["n_r"] = float(wt_opt["generator.n_r"])
self.wt_init["components"]["nacelle"]["generator"]["rad_ag"] = float(wt_opt["generator.rad_ag"])
self.wt_init["components"]["nacelle"]["generator"]["t_wr"] = float(wt_opt["generator.t_wr"])
self.wt_init["components"]["nacelle"]["generator"]["n_s"] = float(wt_opt["generator.n_s"])
self.wt_init["components"]["nacelle"]["generator"]["b_st"] = float(wt_opt["generator.b_st"])
self.wt_init["components"]["nacelle"]["generator"]["d_s"] = float(wt_opt["generator.d_s"])
self.wt_init["components"]["nacelle"]["generator"]["t_ws"] = float(wt_opt["generator.t_ws"])
self.wt_init["components"]["nacelle"]["generator"]["rho_Copper"] = float(wt_opt["generator.rho_Copper"])
self.wt_init["components"]["nacelle"]["generator"]["rho_Fe"] = float(wt_opt["generator.rho_Fe"])
self.wt_init["components"]["nacelle"]["generator"]["rho_Fes"] = float(wt_opt["generator.rho_Fes"])
self.wt_init["components"]["nacelle"]["generator"]["rho_PM"] = float(wt_opt["generator.rho_PM"])
self.wt_init["components"]["nacelle"]["generator"]["C_Cu"] = float(wt_opt["generator.C_Cu"])
self.wt_init["components"]["nacelle"]["generator"]["C_Fe"] = float(wt_opt["generator.C_Fe"])
self.wt_init["components"]["nacelle"]["generator"]["C_Fes"] = float(wt_opt["generator.C_Fes"])
self.wt_init["components"]["nacelle"]["generator"]["C_PM"] = float(wt_opt["generator.C_PM"])
if self.modeling_options["WISDEM"]["GeneratorSE"]["type"] in ["pmsg_outer"]:
self.wt_init["components"]["nacelle"]["generator"]["N_c"] = float(wt_opt["generator.N_c"])
self.wt_init["components"]["nacelle"]["generator"]["b"] = float(wt_opt["generator.b"])
self.wt_init["components"]["nacelle"]["generator"]["c"] = float(wt_opt["generator.c"])
self.wt_init["components"]["nacelle"]["generator"]["E_p"] = float(wt_opt["generator.E_p"])
self.wt_init["components"]["nacelle"]["generator"]["h_yr"] = float(wt_opt["generator.h_yr"])
self.wt_init["components"]["nacelle"]["generator"]["h_ys"] = float(wt_opt["generator.h_ys"])
self.wt_init["components"]["nacelle"]["generator"]["h_sr"] = float(wt_opt["generator.h_sr"])
self.wt_init["components"]["nacelle"]["generator"]["h_ss"] = float(wt_opt["generator.h_ss"])
self.wt_init["components"]["nacelle"]["generator"]["t_r"] = float(wt_opt["generator.t_r"])
self.wt_init["components"]["nacelle"]["generator"]["t_s"] = float(wt_opt["generator.t_s"])
self.wt_init["components"]["nacelle"]["generator"]["u_allow_pcent"] = float(
wt_opt["generator.u_allow_pcent"]
)
self.wt_init["components"]["nacelle"]["generator"]["y_allow_pcent"] = float(
wt_opt["generator.y_allow_pcent"]
)
self.wt_init["components"]["nacelle"]["generator"]["z_allow_deg"] = float(
wt_opt["generator.z_allow_deg"]
)
self.wt_init["components"]["nacelle"]["generator"]["B_tmax"] = float(wt_opt["generator.B_tmax"])
if self.modeling_options["WISDEM"]["GeneratorSE"]["type"] in ["eesg", "pmsg_arms", "pmsg_disc"]:
self.wt_init["components"]["nacelle"]["generator"]["tau_p"] = float(wt_opt["generator.tau_p"])
self.wt_init["components"]["nacelle"]["generator"]["h_ys"] = float(wt_opt["generator.h_ys"])
self.wt_init["components"]["nacelle"]["generator"]["h_yr"] = float(wt_opt["generator.h_yr"])
self.wt_init["components"]["nacelle"]["generator"]["b_arm"] = float(wt_opt["generator.b_arm"])
elif self.modeling_options["WISDEM"]["GeneratorSE"]["type"] in ["scig", "dfig"]:
self.wt_init["components"]["nacelle"]["generator"]["B_symax"] = float(wt_opt["generator.B_symax"])
self.wt_init["components"]["nacelle"]["generator"]["S_Nmax"] = float(wt_opt["generator.S_Nmax"])
# Update tower
if self.modeling_options["flags"]["tower"]:
self.wt_init["components"]["tower"]["outer_shape_bem"]["outer_diameter"]["grid"] = wt_opt[
"tower_grid.s"
].tolist()
self.wt_init["components"]["tower"]["outer_shape_bem"]["outer_diameter"]["values"] = wt_opt[
"tower.diameter"
].tolist()
self.wt_init["components"]["tower"]["outer_shape_bem"]["reference_axis"]["x"]["grid"] = wt_opt[
"tower_grid.s"
].tolist()
self.wt_init["components"]["tower"]["outer_shape_bem"]["reference_axis"]["y"]["grid"] = wt_opt[
"tower_grid.s"
].tolist()
self.wt_init["components"]["tower"]["outer_shape_bem"]["reference_axis"]["z"]["grid"] = wt_opt[
"tower_grid.s"
].tolist()
self.wt_init["components"]["tower"]["outer_shape_bem"]["reference_axis"]["x"]["values"] = wt_opt[
"tower.ref_axis"
][:, 0].tolist()
self.wt_init["components"]["tower"]["outer_shape_bem"]["reference_axis"]["y"]["values"] = wt_opt[
"tower.ref_axis"
][:, 1].tolist()
self.wt_init["components"]["tower"]["outer_shape_bem"]["reference_axis"]["z"]["values"] = wt_opt[
"tower.ref_axis"
][:, 2].tolist()
self.wt_init["components"]["tower"]["internal_structure_2d_fem"]["outfitting_factor"] = float(
wt_opt["tower.outfitting_factor"]
)
for i in range(self.modeling_options["WISDEM"]["TowerSE"]["n_layers_tower"]):
self.wt_init["components"]["tower"]["internal_structure_2d_fem"]["layers"][i]["thickness"][
"grid"
] = wt_opt["tower_grid.s"].tolist()
self.wt_init["components"]["tower"]["internal_structure_2d_fem"]["layers"][i]["thickness"][
"values"
] = wt_opt["tower.layer_thickness"][i, :].tolist()
# Update monopile
if self.modeling_options["flags"]["monopile"]:
self.wt_init["components"]["monopile"]["outer_shape_bem"]["outer_diameter"]["grid"] = wt_opt[
"monopile.s"
].tolist()
self.wt_init["components"]["monopile"]["outer_shape_bem"]["outer_diameter"]["values"] = wt_opt[
"monopile.diameter"
].tolist()
self.wt_init["components"]["monopile"]["outer_shape_bem"]["reference_axis"]["x"]["grid"] = wt_opt[
"monopile.s"
].tolist()
self.wt_init["components"]["monopile"]["outer_shape_bem"]["reference_axis"]["y"]["grid"] = wt_opt[
"monopile.s"
].tolist()
self.wt_init["components"]["monopile"]["outer_shape_bem"]["reference_axis"]["z"]["grid"] = wt_opt[
"monopile.s"
].tolist()
self.wt_init["components"]["monopile"]["outer_shape_bem"]["reference_axis"]["x"]["values"] = wt_opt[
"monopile.ref_axis"
][:, 0].tolist()
self.wt_init["components"]["monopile"]["outer_shape_bem"]["reference_axis"]["y"]["values"] = wt_opt[
"monopile.ref_axis"
][:, 1].tolist()
self.wt_init["components"]["monopile"]["outer_shape_bem"]["reference_axis"]["z"]["values"] = wt_opt[
"monopile.ref_axis"
][:, 2].tolist()
self.wt_init["components"]["monopile"]["internal_structure_2d_fem"]["outfitting_factor"] = float(
wt_opt["monopile.outfitting_factor"]
)
for i in range(self.modeling_options["WISDEM"]["TowerSE"]["n_layers_monopile"]):
self.wt_init["components"]["monopile"]["internal_structure_2d_fem"]["layers"][i]["thickness"][
"grid"
] = wt_opt["monopile.s"].tolist()
self.wt_init["components"]["monopile"]["internal_structure_2d_fem"]["layers"][i]["thickness"][
"values"
] = wt_opt["monopile.layer_thickness"][i, :].tolist()
# Update floating platform and mooring
if self.modeling_options["flags"]["floating"]:
yaml_out = self.wt_init["components"]["floating_platform"]
n_joints = self.modeling_options["floating"]["joints"]["n_joints"]
for i in range(n_joints):
yaml_out["joints"][i]["location"] = wt_opt["floating.location"][i, :].tolist()
n_members = self.modeling_options["floating"]["members"]["n_members"]
for i in range(n_members):
name_member = self.modeling_options["floating"]["members"]["name"][i]
idx = self.modeling_options["floating"]["members"]["name2idx"][name_member]
yaml_out["members"][i]["outer_shape"]["outer_diameter"]["grid"] = wt_opt[
f"floating.memgrp{idx}.s"
].tolist()
yaml_out["members"][i]["outer_shape"]["outer_diameter"]["values"] = wt_opt[
f"floating.memgrp{idx}.outer_diameter"
].tolist()
istruct = yaml_out["members"][i]["internal_structure"]
n_layers = self.modeling_options["floating"]["members"]["n_layers"][i]
for j in range(n_layers):
istruct["layers"][j]["thickness"]["grid"] = wt_opt[f"floating.memgrp{idx}.s"].tolist()
istruct["layers"][j]["thickness"]["values"] = wt_opt[f"floating.memgrp{idx}.layer_thickness"][
j, :
].tolist()
if "ring_stiffeners" in istruct:
istruct["ring_stiffeners"]["web_height"] = float(
wt_opt[f"floating.memgrp{idx}.ring_stiffener_web_height"]
)
istruct["ring_stiffeners"]["web_thickness"] = float(
wt_opt[f"floating.memgrp{idx}.ring_stiffener_web_thickness"]
)
istruct["ring_stiffeners"]["flange_thickness"] = float(
wt_opt[f"floating.memgrp{idx}.ring_stiffener_flange_thickness"]
)
istruct["ring_stiffeners"]["flange_width"] = float(
wt_opt[f"floating.memgrp{idx}.ring_stiffener_flange_width"]
)
istruct["ring_stiffeners"]["spacing"] = float(
wt_opt[f"floating.memgrp{idx}.ring_stiffener_spacing"]
)
if "longitudinal_stiffeners" in istruct:
istruct["longitudinal_stiffeners"]["web_height"] = float(
wt_opt[f"floating.memgrp{idx}.axial_stiffener_web_height"]
)
istruct["longitudinal_stiffeners"]["web_thickness"] = float(
wt_opt[f"floating.memgrp{idx}.axial_stiffener_web_thickness"]
)
istruct["longitudinal_stiffeners"]["flange_thickness"] = float(
wt_opt[f"floating.memgrp{idx}.axial_stiffener_flange_thickness"]
)
istruct["longitudinal_stiffeners"]["flange_width"] = float(
wt_opt[f"floating.memgrp{idx}.axial_stiffener_flange_width"]
)
istruct["longitudinal_stiffeners"]["spacing"] = float(
wt_opt[f"floating.memgrp{idx}.axial_stiffener_spacing"]
)
n_ballasts = self.modeling_options["floating"]["members"]["n_ballasts"][i]
for j in range(n_ballasts):
if self.modeling_options["floating"]["members"]["ballast_flag_member_" + name_member][j] == False:
istruct["ballasts"][j]["volume"] = float(wt_opt[f"floating.memgrp{idx}.ballast_volume"][j])
if self.modeling_options["floating"]["members"]["n_axial_joints"][i] > 0:
for j in range(self.modeling_options["floating"]["members"]["n_axial_joints"][i]):
yaml_out["members"][i]["axial_joints"][j]["grid"] = float(
wt_opt[f"floating.memgrp{idx}.grid_axial_joints"][j]
)
if self.modeling_options["flags"]["mooring"]:
n_lines = self.modeling_options["mooring"]["n_lines"]
n_line_types = self.modeling_options["mooring"]["n_line_types"]
line_names = [self.wt_init["components"]["mooring"]["line_types"][i]["name"] for i in range(n_line_types)]
line_id = [self.wt_init["components"]["mooring"]["lines"][i]["line_type"] for i in range(n_lines)]
for i in range(n_lines):
self.wt_init["components"]["mooring"]["lines"][i]["unstretched_length"] = float(
wt_opt["mooring.unstretched_length"][i]
)
for jj, jname in enumerate(line_id):
for ii, iname in enumerate(line_names):
if jname == iname:
self.wt_init["components"]["mooring"]["line_types"][ii]["diameter"] = float(
wt_opt["mooring.line_diameter"][jj]
)
# Update rotor nacelle assembly
if self.modeling_options["flags"]["RNA"]:
self.wt_init["components"]["RNA"] = {}
self.wt_init["components"]["RNA"]["elastic_properties_mb"] = {}
self.wt_init["components"]["RNA"]["elastic_properties_mb"]["mass"] = float(wt_opt["drivese.rna_mass"])
self.wt_init["components"]["RNA"]["elastic_properties_mb"]["inertia"] = wt_opt["drivese.rna_I_TT"].tolist()
self.wt_init["components"]["RNA"]["elastic_properties_mb"]["center_mass"] = wt_opt[
"drivese.rna_cm"
].tolist()
# Update rotor diameter and hub height
if self.modeling_options["flags"]["blade"]:
self.wt_init["assembly"]["rotor_diameter"] = float(wt_opt["blade.high_level_blade_props.rotor_diameter"])
self.wt_init["assembly"]["hub_height"] = float(wt_opt["high_level_tower_props.hub_height"])
# Update controller
if self.modeling_options["flags"]["control"]:
self.wt_init["control"]["torque"]["tsr"] = float(wt_opt["control.rated_TSR"])
# Write yamls with updated values
sch.write_geometry_yaml(self.wt_init, fname_output)
def write_options(self, fname_output):
sch.write_modeling_yaml(self.modeling_options, fname_output)
sch.write_analysis_yaml(self.analysis_options, fname_output)
```
#### File: landbosse/landbosse_omdao/landbosse.py
```python
import warnings
from math import ceil
import numpy as np
import openmdao.api as om
from wisdem.landbosse.model.Manager import Manager
from wisdem.landbosse.model.DefaultMasterInputDict import DefaultMasterInputDict
from wisdem.landbosse.landbosse_omdao.OpenMDAODataframeCache import OpenMDAODataframeCache
from wisdem.landbosse.landbosse_omdao.WeatherWindowCSVReader import read_weather_window
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
import pandas as pd
use_default_component_data = -1.0
class LandBOSSE(om.Group):
def setup(self):
# Add a tower section height variable. The default value of 30 m is for transportable tower sections.
self.set_input_defaults("tower_section_length_m", 30.0, units="m")
self.set_input_defaults("blade_drag_coefficient", use_default_component_data) # Unitless
self.set_input_defaults("blade_lever_arm", use_default_component_data, units="m")
self.set_input_defaults("blade_install_cycle_time", use_default_component_data, units="h")
self.set_input_defaults("blade_offload_hook_height", use_default_component_data, units="m")
self.set_input_defaults("blade_offload_cycle_time", use_default_component_data, units="h")
self.set_input_defaults("blade_drag_multiplier", use_default_component_data) # Unitless
self.set_input_defaults("turbine_spacing_rotor_diameters", 4)
self.set_input_defaults("row_spacing_rotor_diameters", 10)
self.set_input_defaults("commissioning_pct", 0.01)
self.set_input_defaults("decommissioning_pct", 0.15)
self.set_input_defaults("trench_len_to_substation_km", 50.0, units="km")
self.set_input_defaults("interconnect_voltage_kV", 130.0, units="kV")
self.set_input_defaults("foundation_height", 0.0, units="m")
self.set_input_defaults("blade_mass", 8000.0, units="kg")
self.set_input_defaults("hub_mass", 15.4e3, units="kg")
self.set_input_defaults("nacelle_mass", 50e3, units="kg")
self.set_input_defaults("tower_mass", 240e3, units="kg")
self.set_input_defaults("turbine_rating_MW", 1500.0, units="kW")
self.add_subsystem("landbosse", LandBOSSE_API(), promotes=["*"])
class LandBOSSE_API(om.ExplicitComponent):
def setup(self):
# Clear the cache
OpenMDAODataframeCache._cache = {}
self.setup_inputs()
self.setup_outputs()
self.setup_discrete_outputs()
self.setup_discrete_inputs_that_are_not_dataframes()
self.setup_discrete_inputs_that_are_dataframes()
def setup_inputs(self):
"""
This method sets up the inputs.
"""
self.add_input("blade_drag_coefficient", use_default_component_data) # Unitless
self.add_input("blade_lever_arm", use_default_component_data, units="m")
self.add_input("blade_install_cycle_time", use_default_component_data, units="h")
self.add_input("blade_offload_hook_height", use_default_component_data, units="m")
self.add_input("blade_offload_cycle_time", use_default_component_data, units="h")
self.add_input("blade_drag_multiplier", use_default_component_data) # Unitless
# Even though LandBOSSE doesn't use foundation height, TowerSE does,
# and foundation height can be used with hub height to calculate
# tower height.
self.add_input("foundation_height", 0.0, units="m")
self.add_input("tower_section_length_m", 30.0, units="m")
self.add_input("nacelle_mass", 0.0, units="kg")
self.add_input("tower_mass", 0.0, units="kg")
# A discrete input below, number_of_blades, gives the number of blades
# on the rotor.
#
# The total mass of the rotor nacelle assembly (RNA) is the following
# sum:
#
# (blade_mass * number_of_blades) + nac_mass + hub_mass
self.add_input("blade_mass", use_default_component_data, units="kg", desc="The mass of one rotor blade.")
self.add_input("hub_mass", use_default_component_data, units="kg", desc="Mass of the rotor hub")
self.add_input(
"crane_breakdown_fraction",
val=0.0,
desc="0 means the crane is never broken down. 1 means it is broken down every turbine.",
)
self.add_input("construct_duration", val=9, desc="Total project construction time (months)")
self.add_input("hub_height_meters", val=80, units="m", desc="Hub height m")
self.add_input("rotor_diameter_m", val=77, units="m", desc="Rotor diameter m")
self.add_input("wind_shear_exponent", val=0.2, desc="Wind shear exponent")
self.add_input("turbine_rating_MW", val=1.5, units="MW", desc="Turbine rating MW")
self.add_input("fuel_cost_usd_per_gal", val=1.5, desc="Fuel cost USD/gal")
self.add_input(
"breakpoint_between_base_and_topping_percent", val=0.8, desc="Breakpoint between base and topping (percent)"
)
# Could not place units in turbine_spacing_rotor_diameters
self.add_input("turbine_spacing_rotor_diameters", desc="Turbine spacing (times rotor diameter)", val=4)
self.add_input("depth", units="m", desc="Foundation depth m", val=2.36)
self.add_input("rated_thrust_N", units="N", desc="Rated Thrust (N)", val=5.89e5)
# Can't set units
self.add_input("bearing_pressure_n_m2", desc="Bearing Pressure (n/m2)", val=191521)
self.add_input("gust_velocity_m_per_s", units="m/s", desc="50-year Gust Velocity (m/s)", val=59.5)
self.add_input("road_length_adder_m", units="m", desc="Road length adder (m)", val=5000)
# Can't set units
self.add_input("fraction_new_roads", desc="Percent of roads that will be constructed (0.0 - 1.0)", val=0.33)
self.add_input("road_quality", desc="Road Quality (0-1)", val=0.6)
self.add_input("line_frequency_hz", units="Hz", desc="Line Frequency (Hz)", val=60)
# Can't set units
self.add_input("row_spacing_rotor_diameters", desc="Row spacing (times rotor diameter)", val=10)
self.add_input(
"trench_len_to_substation_km", units="km", desc="Combined Homerun Trench Length to Substation (km)", val=50
)
self.add_input("distance_to_interconnect_mi", units="mi", desc="Distance to interconnect (miles)", val=5)
self.add_input("interconnect_voltage_kV", units="kV", desc="Interconnect Voltage (kV)", val=130)
self.add_input(
"critical_speed_non_erection_wind_delays_m_per_s",
units="m/s",
desc="Non-Erection Wind Delay Critical Speed (m/s)",
val=15,
)
self.add_input(
"critical_height_non_erection_wind_delays_m",
units="m",
desc="Non-Erection Wind Delay Critical Height (m)",
val=10,
)
self.add_discrete_input("road_distributed_winnd", val=False)
self.add_input("road_width_ft", units="ft", desc="Road width (ft)", val=20)
self.add_input("road_thickness", desc="Road thickness (in)", val=8)
self.add_input("crane_width", units="m", desc="Crane width (m)", val=12.2)
self.add_input("overtime_multiplier", desc="Overtime multiplier", val=1.4)
self.add_input("markup_contingency", desc="Markup contingency", val=0.03)
self.add_input("markup_warranty_management", desc="Markup warranty management", val=0.0002)
self.add_input("markup_sales_and_use_tax", desc="Markup sales and use tax", val=0)
self.add_input("markup_overhead", desc="Markup overhead", val=0.05)
self.add_input("markup_profit_margin", desc="Markup profit margin", val=0.05)
self.add_input("Mass tonne", val=(1.0,), desc="", units="t")
self.add_input(
"development_labor_cost_usd", val=1e6, desc="The cost of labor in the development phase", units="USD"
)
# Disabled due to Pandas conflict right now.
self.add_input("labor_cost_multiplier", val=1.0, desc="Labor cost multiplier")
self.add_input("commissioning_pct", 0.01)
self.add_input("decommissioning_pct", 0.15)
def setup_discrete_inputs_that_are_not_dataframes(self):
"""
This method sets up the discrete inputs that aren't dataframes.
"""
self.add_discrete_input("num_turbines", val=100, desc="Number of turbines in project")
# Since 3 blades are so common on rotors, that is a reasonable default
# value that will not need to be checked during component list
# assembly.
self.add_discrete_input("number_of_blades", val=3, desc="Number of blades on the rotor")
self.add_discrete_input(
"user_defined_home_run_trench", val=0, desc="Flag for user-defined home run trench length (0 = no; 1 = yes)"
)
self.add_discrete_input(
"allow_same_flag",
val=False,
desc="Allow same crane for base and topping (True or False)",
)
self.add_discrete_input(
"hour_day",
desc="Dictionary of normal and long hours for construction in a day in the form of {'long': 24, 'normal': 10}",
val={"long": 24, "normal": 10},
)
self.add_discrete_input(
"time_construct",
desc="One of the keys in the hour_day dictionary to specify how many hours per day construction happens.",
val="normal",
)
self.add_discrete_input(
"user_defined_distance_to_grid_connection",
desc="Flag for user-defined home run trench length (True or False)",
val=False,
)
# Could not place units in rate_of_deliveries
self.add_discrete_input("rate_of_deliveries", val=10, desc="Rate of deliveries (turbines per week)")
self.add_discrete_input("new_switchyard", desc="New Switchyard (True or False)", val=True)
self.add_discrete_input("num_hwy_permits", desc="Number of highway permits", val=10)
self.add_discrete_input("num_access_roads", desc="Number of access roads", val=2)
def setup_discrete_inputs_that_are_dataframes(self):
"""
This sets up the default inputs that are dataframes. They are separate
because they hold the project data and the way we need to hold their
data is different. They have defaults loaded at the top of the file
which can be overridden outside by setting the properties listed
below.
"""
# Read in default sheets for project data
default_project_data = OpenMDAODataframeCache.read_all_sheets_from_xlsx("ge15_public")
self.add_discrete_input(
"site_facility_building_area_df",
val=default_project_data["site_facility_building_area"],
desc="site_facility_building_area DataFrame",
)
self.add_discrete_input(
"components",
val=default_project_data["components"],
desc="Dataframe of components for tower, blade, nacelle",
)
self.add_discrete_input(
"crane_specs", val=default_project_data["crane_specs"], desc="Dataframe of specifications of cranes"
)
self.add_discrete_input(
"weather_window",
val=read_weather_window(default_project_data["weather_window"]),
desc="Dataframe of wind toolkit data",
)
self.add_discrete_input("crew", val=default_project_data["crew"], desc="Dataframe of crew configurations")
self.add_discrete_input(
"crew_price",
val=default_project_data["crew_price"],
desc="Dataframe of costs per hour for each type of worker.",
)
self.add_discrete_input(
"equip", val=default_project_data["equip"], desc="Collections of equipment to perform erection operations."
)
self.add_discrete_input(
"equip_price", val=default_project_data["equip_price"], desc="Prices for various type of equipment."
)
self.add_discrete_input("rsmeans", val=default_project_data["rsmeans"], desc="RSMeans price data")
self.add_discrete_input(
"cable_specs", val=default_project_data["cable_specs"], desc="cable specs for collection system"
)
self.add_discrete_input(
"material_price",
val=default_project_data["material_price"],
desc="Prices of materials for foundations and roads",
)
self.add_discrete_input("project_data", val=default_project_data, desc="Dictionary of all dataframes of data")
def setup_outputs(self):
"""
This method sets up the continuous outputs. This is where total costs
and installation times go.
To see how cost totals are calculated see, the compute_total_bos_costs
method below.
"""
self.add_output(
"bos_capex", 0.0, units="USD", desc="Total BOS CAPEX not including commissioning or decommissioning."
)
self.add_output(
"bos_capex_kW",
0.0,
units="USD/kW",
desc="Total BOS CAPEX per kW not including commissioning or decommissioning.",
)
self.add_output(
"total_capex", 0.0, units="USD", desc="Total BOS CAPEX including commissioning and decommissioning."
)
self.add_output(
"total_capex_kW",
0.0,
units="USD/kW",
desc="Total BOS CAPEX per kW including commissioning and decommissioning.",
)
self.add_output("installation_capex", 0.0, units="USD", desc="Total foundation and erection installation cost.")
self.add_output(
"installation_capex_kW", 0.0, units="USD", desc="Total foundation and erection installation cost per kW."
)
self.add_output("installation_time_months", 0.0, desc="Total balance of system installation time (months).")
def setup_discrete_outputs(self):
"""
This method sets up discrete outputs.
"""
self.add_discrete_output(
"landbosse_costs_by_module_type_operation", desc="The costs by module, type and operation", val=None
)
self.add_discrete_output(
"landbosse_details_by_module",
desc="The details from the run of LandBOSSE. This includes some costs, but mostly other things",
val=None,
)
self.add_discrete_output("erection_crane_choice", desc="The crane choices for erection.", val=None)
self.add_discrete_output(
"erection_component_name_topvbase",
desc="List of components and whether they are a topping or base operation",
val=None,
)
self.add_discrete_output(
"erection_components", desc="List of components with their values modified from the defaults.", val=None
)
def compute(self, inputs, outputs, discrete_inputs=None, discrete_outputs=None):
"""
This runs the ErectionCost module using the inputs and outputs into and
out of this module.
Note: inputs, discrete_inputs are not dictionaries. They do support
[] notation. inputs is of class 'openmdao.vectors.default_vector.DefaultVector'
discrete_inputs is of class openmdao.core.component._DictValues. Other than
[] brackets, they do not behave like dictionaries. See the following
documentation for details.
http://openmdao.org/twodocs/versions/latest/_srcdocs/packages/vectors/default_vector.html
https://mdolab.github.io/OpenAeroStruct/_modules/openmdao/core/component.html
Parameters
----------
inputs : openmdao.vectors.default_vector.DefaultVector
A dictionary-like object with NumPy arrays that hold float
inputs. Note that since these are NumPy arrays, they
need indexing to pull out simple float64 values.
outputs : openmdao.vectors.default_vector.DefaultVector
A dictionary-like object to store outputs.
discrete_inputs : openmdao.core.component._DictValues
A dictionary-like with the non-numeric inputs (like
pandas.DataFrame)
discrete_outputs : openmdao.core.component._DictValues
A dictionary-like for non-numeric outputs (like
pandas.DataFrame)
"""
# Put the inputs together and run all the modules
master_output_dict = dict()
master_input_dict = self.prepare_master_input_dictionary(inputs, discrete_inputs)
manager = Manager(master_input_dict, master_output_dict)
result = manager.execute_landbosse("WISDEM")
# Check if everything executed correctly
if result != 0:
raise Exception("LandBOSSE didn't execute correctly")
# Gather the cost and detail outputs
costs_by_module_type_operation = self.gather_costs_from_master_output_dict(master_output_dict)
discrete_outputs["landbosse_costs_by_module_type_operation"] = costs_by_module_type_operation
details = self.gather_details_from_master_output_dict(master_output_dict)
discrete_outputs["landbosse_details_by_module"] = details
# This is where we have access to the modified components, so put those
# in the outputs of the component
discrete_outputs["erection_components"] = master_input_dict["components"]
# Now get specific outputs. These have been refactored to methods that work
# with each module so as to keep this method as compact as possible.
self.gather_specific_erection_outputs(master_output_dict, outputs, discrete_outputs)
# Compute the total BOS costs
self.compute_total_bos_costs(costs_by_module_type_operation, master_output_dict, inputs, outputs)
def prepare_master_input_dictionary(self, inputs, discrete_inputs):
"""
This prepares a master input dictionary by applying all the necessary
modifications to the inputs.
Parameters
----------
inputs : openmdao.vectors.default_vector.DefaultVector
A dictionary-like object with NumPy arrays that hold float
inputs. Note that since these are NumPy arrays, they
need indexing to pull out simple float64 values.
discrete_inputs : openmdao.core.component._DictValues
A dictionary-like with the non-numeric inputs (like
pandas.DataFrame)
Returns
-------
dict
The prepared master input to go to the Manager.
"""
inputs_dict = {key: inputs[key][0] for key in inputs.keys()}
discrete_inputs_dict = {key: value for key, value in discrete_inputs.items()}
incomplete_input_dict = {**inputs_dict, **discrete_inputs_dict}
# Modify the default component data if needed and copy it into the
# appropriate values of the input dictionary.
modified_components = self.modify_component_lists(inputs, discrete_inputs)
incomplete_input_dict["project_data"]["components"] = modified_components
incomplete_input_dict["components"] = modified_components
# FoundationCost needs to have all the component data split into separate
# NumPy arrays.
incomplete_input_dict["component_data"] = modified_components
for component in incomplete_input_dict["component_data"].keys():
incomplete_input_dict[component] = np.array(incomplete_input_dict["component_data"][component])
# These are aliases because parts of the code call the same thing by
# difference names.
incomplete_input_dict["crew_cost"] = discrete_inputs["crew_price"]
incomplete_input_dict["cable_specs_pd"] = discrete_inputs["cable_specs"]
# read in RSMeans per diem:
crew_cost = discrete_inputs["crew_price"]
crew_cost = crew_cost.set_index("Labor type ID", drop=False)
incomplete_input_dict["rsmeans_per_diem"] = crew_cost.loc["RSMeans", "Per diem USD per day"]
# Calculate project size in megawatts
incomplete_input_dict["project_size_megawatts"] = float(
discrete_inputs["num_turbines"] * inputs["turbine_rating_MW"]
)
# Needed to avoid distributed wind keys
incomplete_input_dict["road_distributed_wind"] = False
defaults = DefaultMasterInputDict()
master_input_dict = defaults.populate_input_dict(incomplete_input_dict)
return master_input_dict
def gather_costs_from_master_output_dict(self, master_output_dict):
"""
This method extract all the cost_by_module_type_operation lists for
output in an Excel file.
It finds values for the keys ending in '_module_type_operation'. It
then concatenates them together so they can be easily written to
a .csv or .xlsx
On every row, it includes the:
Rotor diameter m
Turbine rating MW
Number of turbines
This enables easy mapping of new columns if need be. The columns have
spaces in the names so that they can be easily written to a user-friendly
output.
Parameters
----------
runs_dict : dict
Values are the names of the projects. Keys are the lists of
dictionaries that are lines for the .csv
Returns
-------
list
List of dicts to write to the .csv.
"""
line_items = []
# Gather the lists of costs
cost_lists = [value for key, value in master_output_dict.items() if key.endswith("_module_type_operation")]
# Flatten the list of lists that is the result of the gathering
for cost_list in cost_lists:
line_items.extend(cost_list)
# Filter out the keys needed and rename them to meaningful values
final_costs = []
for line_item in line_items:
item = {
"Module": line_item["module"],
"Type of cost": line_item["type_of_cost"],
"Cost / kW": line_item["usd_per_kw_per_project"],
"Cost / project": line_item["cost_per_project"],
"Cost / turbine": line_item["cost_per_turbine"],
"Number of turbines": line_item["num_turbines"],
"Rotor diameter (m)": line_item["rotor_diameter_m"],
"Turbine rating (MW)": line_item["turbine_rating_MW"],
"Project ID with serial": line_item["project_id_with_serial"],
}
final_costs.append(item)
return final_costs
def gather_details_from_master_output_dict(self, master_output_dict):
"""
This extracts the detail lists from all the modules to output
the detailed non-cost data from the model run.
Parameters
----------
master_output_dict : dict
The master output dict with the finished module output in it.
Returns
-------
list
List of dicts with detailed data.
"""
line_items = []
# Gather the lists of costs
details_lists = [value for key, value in master_output_dict.items() if key.endswith("_csv")]
# Flatten the list of lists
for details_list in details_lists:
line_items.extend(details_list)
return line_items
def gather_specific_erection_outputs(self, master_output_dict, outputs, discrete_outputs):
"""
This method gathers specific outputs from the ErectionCost module and places
them on the outputs.
The method does not return anything. Rather, it places the outputs directly
on the continuous of discrete outputs.
Parameters
----------
master_output_dict: dict
The master output dictionary out of LandBOSSE
outputs : openmdao.vectors.default_vector.DefaultVector
A dictionary-like object to store outputs.
discrete_outputs : openmdao.core.component._DictValues
A dictionary-like for non-numeric outputs (like
pandas.DataFrame)
"""
discrete_outputs["erection_crane_choice"] = master_output_dict["crane_choice"]
discrete_outputs["erection_component_name_topvbase"] = master_output_dict["component_name_topvbase"]
def compute_total_bos_costs(self, costs_by_module_type_operation, master_output_dict, inputs, outputs):
"""
This computes the total BOS costs from the master output dictionary
and places them on the necessary outputs.
Parameters
----------
costs_by_module_type_operation: List[Dict[str, Any]]
The lists of costs by module, type and operation.
master_output_dict: Dict[str, Any]
The master output dictionary from the run. Used to obtain the
construction time,
outputs : openmdao.vectors.default_vector.DefaultVector
The outputs in which to place the results of the computations
"""
bos_per_kw = 0.0
bos_per_project = 0.0
installation_per_project = 0.0
installation_per_kW = 0.0
for row in costs_by_module_type_operation:
bos_per_kw += row["Cost / kW"]
bos_per_project += row["Cost / project"]
if row["Module"] in ["ErectionCost", "FoundationCost"]:
installation_per_project += row["Cost / project"]
installation_per_kW += row["Cost / kW"]
commissioning_pct = inputs["commissioning_pct"]
decommissioning_pct = inputs["decommissioning_pct"]
commissioning_per_project = bos_per_project * commissioning_pct
decomissioning_per_project = bos_per_project * decommissioning_pct
commissioning_per_kW = bos_per_kw * commissioning_pct
decomissioning_per_kW = bos_per_kw * decommissioning_pct
outputs["total_capex_kW"] = np.round(bos_per_kw + commissioning_per_kW + decomissioning_per_kW, 0)
outputs["total_capex"] = np.round(bos_per_project + commissioning_per_project + decomissioning_per_project, 0)
outputs["bos_capex"] = round(bos_per_project, 0)
outputs["bos_capex_kW"] = round(bos_per_kw, 0)
outputs["installation_capex"] = round(installation_per_project, 0)
outputs["installation_capex_kW"] = round(installation_per_kW, 0)
actual_construction_months = master_output_dict["actual_construction_months"]
outputs["installation_time_months"] = round(actual_construction_months, 0)
def modify_component_lists(self, inputs, discrete_inputs):
"""
This method modifies the previously loaded default component lists with
data about blades, tower sections, if they have been provided as input
to the component.
It only modifies the project component data if default data for the proper
inputs have been overridden.
The default blade data is assumed to be the first component that begins
with the word "Blade"
This should take mass from the tower in WISDEM. Ideally, this should have
an input for transportable tower 4.3, large diameter steel tower LDST 6.2m, or
unconstrained key stone tower. Or give warnings about the boundaries
that we assume.
Parameters
----------
inputs : openmdao.vectors.default_vector.DefaultVector
A dictionary-like object with NumPy arrays that hold float
inputs. Note that since these are NumPy arrays, they
need indexing to pull out simple float64 values.
discrete_inputs : openmdao.core.component._DictValues
A dictionary-like with the non-numeric inputs (like
pandas.DataFrame)
Returns
-------
pd.DataFrame
The dataframe with the modified components.
"""
input_components = discrete_inputs["components"]
# This list is a sequence of pd.Series instances that have the
# specifications of each component.
output_components_list = []
# Need to convert kg to tonnes
kg_per_tonne = 1000
# Get the hub height
hub_height_meters = inputs["hub_height_meters"][0]
# Make the nacelle. This does not include the hub or blades.
nacelle_mass_kg = inputs["nacelle_mass"][0]
nacelle = input_components[input_components["Component"].str.startswith("Nacelle")].iloc[0].copy()
if inputs["nacelle_mass"] != use_default_component_data:
nacelle["Mass tonne"] = nacelle_mass_kg / kg_per_tonne
nacelle["Component"] = "Nacelle"
nacelle["Lift height m"] = hub_height_meters
output_components_list.append(nacelle)
# Make the hub
hub_mass_kg = inputs["hub_mass"][0]
hub = input_components[input_components["Component"].str.startswith("Hub")].iloc[0].copy()
hub["Lift height m"] = hub_height_meters
if hub_mass_kg != use_default_component_data:
hub["Mass tonne"] = hub_mass_kg / kg_per_tonne
output_components_list.append(hub)
# Make blades
blade = input_components[input_components["Component"].str.startswith("Blade")].iloc[0].copy()
# There is always a hub height, so use that as the lift height
blade["Lift height m"] = hub_height_meters
if inputs["blade_drag_coefficient"][0] != use_default_component_data:
blade["Coeff drag"] = inputs["blade_drag_coefficient"][0]
if inputs["blade_lever_arm"][0] != use_default_component_data:
blade["Lever arm m"] = inputs["blade_lever_arm"][0]
if inputs["blade_install_cycle_time"][0] != use_default_component_data:
blade["Cycle time installation hrs"] = inputs["blade_install_cycle_time"][0]
if inputs["blade_offload_hook_height"][0] != use_default_component_data:
blade["Offload hook height m"] = hub_height_meters
if inputs["blade_offload_cycle_time"][0] != use_default_component_data:
blade["Offload cycle time hrs"] = inputs["blade_offload_cycle_time"]
if inputs["blade_drag_multiplier"][0] != use_default_component_data:
blade["Multiplier drag rotor"] = inputs["blade_drag_multiplier"]
if inputs["blade_mass"][0] != use_default_component_data:
blade["Mass tonne"] = inputs["blade_mass"][0] / kg_per_tonne
# Assume that number_of_blades always has a reasonable value. It's
# default count when the discrete input is declared of 3 is always
# reasonable unless overridden by another input.
number_of_blades = discrete_inputs["number_of_blades"]
for i in range(number_of_blades):
component = f"Blade {i + 1}"
blade_i = blade.copy()
blade_i["Component"] = component
output_components_list.append(blade_i)
# Make tower sections
tower_mass_tonnes = inputs["tower_mass"][0] / kg_per_tonne
tower_height_m = hub_height_meters - inputs["foundation_height"][0]
default_tower_section = input_components[input_components["Component"].str.startswith("Tower")].iloc[0]
tower_sections = self.make_tower_sections(tower_mass_tonnes, tower_height_m, default_tower_section)
output_components_list.extend(tower_sections)
# Make the output component dataframe and return it.
output_components = pd.DataFrame(output_components_list)
return output_components
@staticmethod
def make_tower_sections(tower_mass_tonnes, tower_height_m, default_tower_section):
"""
This makes tower sections for a transportable tower.
Approximations:
- Weight is distributed uniformly among the sections
- The number of sections is either the maximum allowed by mass or
the maximum allowed by height, to maintain transportability.
For each tower section, calculate:
- lift height
- lever arm
- surface area
The rest of values should remain at their defaults.
Note: Tower sections are constrained in maximum diameter to 4.5 m.
However, their surface area is calculated with a 1.3 m radius
to agree more closely with empirical data. Also, tower sections
are approximated as cylinders.
Parameters
----------
tower_mass_tonnes: float
The total tower mass in tonnes
tower_height_m: float
The total height of the tower in meters.
default_tower_section: pd.Series
There are a number of values that are kept constant in creating
the tower sections. This series holds the values.
Returns
-------
List[pd.Series]
A list of series to be appended onto an output component list.
It is not a dataframe, because it is faster to append to a list
and make a dataframe once.
"""
tower_radius = 1.3
number_of_sections = max(ceil(tower_height_m / 30), ceil(tower_mass_tonnes / 80))
tower_section_height_m = tower_height_m / number_of_sections
tower_section_mass = tower_mass_tonnes / number_of_sections
tower_section_surface_area_m2 = np.pi * tower_section_height_m * (tower_radius ** 2)
sections = []
for i in range(number_of_sections):
lift_height_m = (i * tower_section_height_m) + tower_section_height_m
lever_arm = (i * tower_section_height_m) + (0.5 * tower_section_height_m)
name = f"Tower {i + 1}"
section = default_tower_section.copy()
section["Component"] = name
section["Mass tonne"] = tower_section_mass
section["Lift height m"] = lift_height_m
section["Surface area sq m"] = tower_section_surface_area_m2
section["Section height m"] = tower_section_height_m
section["Lever arm m"] = lever_arm
sections.append(section)
return sections
```
#### File: landbosse/model/DevelopmentCost.py
```python
import math
import traceback
import pandas as pd
from wisdem.landbosse.model.CostModule import CostModule
class DevelopmentCost(CostModule):
"""
**DevelopmentCost.py
-Created by <NAME> on June 30, 2019
Creating a simple DevelopmentCost module for now. This module reads in a user input from the detailed input Excel
file and outputs this in the detailed output excel file.
"""
def __init__(self, input_dict, output_dict, project_name):
self.input_dict = input_dict
self.output_dict = output_dict
self.project_name = project_name
def calculate_costs(self):
"""
Sets the total cost for development.
If there is a key of 'development_labor_cost_usd' in the input
dictionary, then that is used as the development cost and a
dataframe is created that holds that labor cost.
If the key is not present in the dictionary, the development
cost is retrieved from the project data.
Returns
------------------------------------
total_development_cost : pd.DataFrame
data frame with total development cost by type of cost (e.g., Labor)
"""
if "development_labor_cost_usd" in self.input_dict:
total_development_cost = pd.DataFrame(
[
{"Type of cost": "Equipment rental", "Cost USD": 0, "Phase of construction": "Development"},
{
"Type of cost": "Labor",
"Cost USD": self.input_dict["development_labor_cost_usd"],
"Phase of construction": "Development",
},
{"Type of cost": "Materials", "Cost USD": 0, "Phase of construction": "Development"},
{"Type of cost": "Mobilization", "Cost USD": 0, "Phase of construction": "Development"},
{"Type of cost": "Other", "Cost USD": 0, "Phase of construction": "Development"},
]
)
else:
total_development_cost = self.input_dict["development_df"]
self.output_dict["total_development_cost"] = total_development_cost
return total_development_cost
def outputs_for_detailed_tab(self):
"""
Creates a list of dictionaries which can be used on their own or
used to make a dataframe.
Must be called after self.run_module()
Returns
-------
list(dict)
A list of dicts, with each dict representing a row of the data.
"""
result = []
module = type(self).__name__
for _, row in self.output_dict["total_development_cost"].iterrows():
dashed_row = "{} - {} - {}".format(
row["Type of cost"], row["Phase of construction"], math.ceil(row["Cost USD"])
)
result.append(
{
"unit": "",
"type": "dataframe",
"variable_df_key_col_name": "Type of Cost - Phase of Construction - Cost in USD",
"value": dashed_row,
"last_number": row[2],
}
)
for _dict in result:
_dict["project_id_with_serial"] = self.project_name
_dict["module"] = module
self.output_dict["development_cost_csv"] = result
return result
def run_module(self):
"""
Runs the DevelopmentCost module and populates the IO dictionaries with calculated values.
"""
try:
self.calculate_costs()
self.outputs_for_detailed_tab()
self.output_dict["development_module_type_operation"] = self.outputs_for_costs_by_module_type_operation(
input_df=self.output_dict["total_development_cost"], project_id=self.project_name, total_or_turbine=True
)
return 0, 0 # module ran successfully
except Exception as error:
traceback.print_exc()
print(f"Fail {self.project_name} DevelopmentCost")
return 1, error # module did not run successfully
```
#### File: WISDEM/wisdem/main.py
```python
import numpy as np
import os
import sys
from wisdem.glue_code.runWISDEM import run_wisdem
from wisdem.inputs import load_yaml
from wisdem.inputs.gui import run as guirun
np.warnings.filterwarnings("ignore", category=np.VisibleDeprecationWarning)
def read_master_file(fyaml):
if os.path.exists(fyaml):
print("...Reading master input file,", fyaml)
else:
raise FileNotFoundError("The master input file, " + fyaml + ", cannot be found.")
input_yaml = load_yaml(fyaml)
check_list = ["geometry_file", "modeling_file", "analysis_file"]
for f in check_list:
if not os.path.exists(input_yaml[f]):
raise FileNotFoundError("The " + f + " entry, " + input_yaml[f] + ", cannot be found.")
return input_yaml
def wisdem_cmd():
usg_msg = "WISDEM command line launcher\n Arguments: \n wisdem : Starts GUI\n wisdem input.yaml : Runs master yaml file that specifies geometry, modeling, and analysis files\n wisdem geom.yaml modeling.yaml analysis.yaml : Runs specific geometry, modeling, and analysis files\n"
# Look for help message
help_flag = False
for k in range(len(sys.argv)):
if sys.argv[k] in ["-h", "--help"]:
help_flag = True
if help_flag:
print(usg_msg)
elif len(sys.argv) == 1:
# Launch GUI
guirun()
elif len(sys.argv) == 2:
# Grab master input file
fyaml = sys.argv[1]
if os.path.exists(fyaml):
print("...Reading master input file,", fyaml)
else:
raise FileNotFoundError("The master input file, " + fyaml + ", cannot be found.")
yaml_dict = load_yaml(fyaml)
check_list = ["geometry_file", "modeling_file", "analysis_file"]
for f in check_list:
if not os.path.exists(yaml_dict[f]):
raise FileNotFoundError("The " + f + " entry, " + yaml_dict[f] + ", cannot be found.")
# Run WISDEM (also saves output)
wt_opt, modeling_options, opt_options = run_wisdem(
yaml_dict["geometry_file"], yaml_dict["modeling_file"], yaml_dict["analysis_file"]
)
elif len(sys.argv) == 4:
check_list = ["geometry", "modeling", "analysis"]
for k, f in enumerate(sys.argv[1:]):
if not os.path.exists(f):
raise FileNotFoundError("The " + check_list[k] + " file, " + f + ", cannot be found.")
# Run WISDEM (also saves output)
wt_opt, modeling_options, opt_options = run_wisdem(sys.argv[1], sys.argv[2], sys.argv[3])
else:
# As if asked for help
print("Unrecognized set of inputs. Usage:")
print(usg_msg)
sys.exit(0)
if __name__ == "__main__":
wisdem_cmd()
```
#### File: wisdem/moorpy/body.py
```python
import numpy as np
from wisdem.moorpy.helpers import (
getH,
printVec,
rotatePosition,
rotationMatrix,
transformPosition,
translateForce3to6DOF,
)
class Body:
"""A class for any object in the mooring system that will have its own reference frame"""
def __init__(self, mooringSys, num, type, r6, m=0, v=0, rCG=np.zeros(3), AWP=0, rM=np.zeros(3), f6Ext=np.zeros(6)):
"""Initialize Body attributes
Parameters
----------
mooringSys : system object
The system object that contains the body object
num : int
indentifier number
type : int
the body type: 0 free to move, 1 fixed, -1 coupled externally
r6 : array
6DOF position and orientation vector [m, rad]
m : float, optional
mass, centered at CG [kg]. The default is 0.
v : float, optional
volume, centered at reference point [m^3]. The default is 0.
rCG : array, optional
center of gravity position in body reference frame [m]. The default is np.zeros(3).
AWP : float, optional
waterplane area - used for hydrostatic heave stiffness if nonzero [m^2]. The default is 0.
rM : float or array, optional
coorindates or height of metacenter relative to body reference frame [m]. The default is np.zeros(3).
f6Ext : array, optional
applied external forces and moments vector in global orientation (not including weight/buoyancy) [N]. The default is np.zeros(6).
attachedP: list, int
list of ID numbers of any Points attached to the Body
rPointRel: list, float
list of coordinates of each attached Point relative to the Body reference frame [m]
Returns
-------
None.
"""
self.sys = mooringSys # store a reference to the overall mooring system (instance of System class)
self.number = num
self.type = type # 0 free to move, or -1 coupled externally
self.r6 = np.array(r6, dtype=np.float_) # 6DOF position and orientation vector [m, rad]
self.m = m # mass, centered at CG [kg]
self.v = v # volume, assumed centered at reference point [m^3]
self.rCG = np.array(rCG, dtype=np.float_) # center of gravity position in body reference frame [m]
self.AWP = AWP # waterplane area - used for hydrostatic heave stiffness if nonzero [m^2]
if np.isscalar(rM):
self.rM = np.array(
[0, 0, rM], dtype=np.float_
) # coordinates of body metacenter relative to body reference frame [m]
else:
self.rM = np.array(rM, dtype=np.float_)
self.f6Ext = np.array(
f6Ext, dtype=np.float_
) # for adding external forces and moments in global orientation (not including weight/buoyancy)
self.attachedP = [] # ID numbers of any Points attached to the Body
self.rPointRel = [] # coordinates of each attached Point relative to the Body reference frame
self.attachedR = [] # ID numbers of any Rods attached to the Body (not yet implemented)
self.sharedLineTheta = []
self.fairR = 0.0
self.R = np.eye(3) # body orientation rotation matrix
# print("Created Body "+str(self.number))
def attachPoint(self, pointID, rAttach):
"""Adds a Point to the Body, at the specified relative position on the body.
Parameters
----------
pointID : int
The identifier ID number of a point
rAttach : array
The position of the point relative to the body's frame [m]
Returns
-------
None.
"""
self.attachedP.append(pointID)
self.rPointRel.append(np.array(rAttach))
# print("attached Point "+str(pointID)+" to Body "+str(self.number))
def setPosition(self, r6):
"""Sets the position of the Body, along with that of any dependent objects.
Parameters
----------
r6 : array
6DOF position and orientation vector of the body [m, rad]
Raises
------
ValueError
If the length of the input r6 array is not of length 6
Returns
-------
None.
"""
if len(r6) == 6:
self.r6 = np.array(r6, dtype=np.float_) # update the position of the Body itself
else:
raise ValueError(
f"Body setPosition method requires an argument of size 6, but size {len(r6):d} was provided"
)
self.R = rotationMatrix(self.r6[3], self.r6[4], self.r6[5]) # update body rotation matrix
# update the position of any attached Points
for PointID, rPointRel in zip(self.attachedP, self.rPointRel):
rPoint = np.matmul(self.R, rPointRel) + self.r6[:3] # rPoint = transformPosition(rPointRel, r6)
self.sys.pointList[PointID - 1].setPosition(rPoint)
if self.sys.display > 3:
printVec(rPoint)
breakpoint()
def getForces(self, lines_only=False):
"""Sums the forces and moments on the Body, including its own plus those from any attached objects.
Parameters
----------
lines_only : boolean, optional
An option for calculating forces from just the mooring lines or not. The default is False.
Returns
-------
f6 : array
The 6DOF forces and moments applied to the body in its current position [N, Nm]
"""
f6 = np.zeros(6)
# TODO: could save time in below by storing the body's rotation matrix when it's position is set rather than
# recalculating it in each of the following function calls.
if lines_only == False:
# add weight, which may result in moments as well as a force
rCG_rotated = rotatePosition(
self.rCG, self.r6[3:]
) # relative position of CG about body ref point in unrotated reference frame
f6 += translateForce3to6DOF(
rCG_rotated, np.array([0, 0, -self.m * self.sys.g])
) # add to net forces/moments
# add buoyancy force and moments if applicable (this can include hydrostatic restoring moments
# if rM is considered the metacenter location rather than the center of buoyancy)
rM_rotated = rotatePosition(
self.rM, self.r6[3:]
) # relative position of metacenter about body ref point in unrotated reference frame
f6 += translateForce3to6DOF(
rM_rotated, np.array([0, 0, self.sys.rho * self.sys.g * self.v])
) # add to net forces/moments
# add hydrostatic heave stiffness (if AWP is nonzero)
f6[2] -= self.sys.rho * self.sys.g * self.AWP * self.r6[2]
# add any externally applied forces/moments (in global orientation)
f6 += self.f6Ext
# add forces from any attached Points (and their attached lines)
for PointID, rPointRel in zip(self.attachedP, self.rPointRel):
fPoint = self.sys.pointList[PointID - 1].getForces(lines_only=lines_only) # get net force on attached Point
rPoint_rotated = rotatePosition(
rPointRel, self.r6[3:]
) # relative position of Point about body ref point in unrotated reference frame
f6 += translateForce3to6DOF(
rPoint_rotated, fPoint
) # add net force and moment resulting from its position to the Body
# All forces and moments on the body should now be summed, and are in global/unrotated orientations.
# For application to the body DOFs, convert the moments to be about the body's local/rotated x/y/z axes <<< do we want this in all cases?
rotMat = rotationMatrix(*self.r6[3:]) # get rotation matrix for body
moment_about_body_ref = np.matmul(
rotMat.T, f6[3:]
) # transform moments so that they are about the body's local/rotated axes
f6[3:] = moment_about_body_ref # use these moments
return f6
def getStiffness(self, X=[], tol=0.0001, dx=0.1):
"""Gets the stiffness matrix of a Body due only to mooring lines with all other objects free to equilibriate.
The rotational indicies of the stiffness matrix correspond to the local/rotated axes of the body rather than
the global x/y/z directions.
Parameters
----------
X1 : array
The position vector (6DOF) of the main axes of the Body at which the stiffness matrix is to be calculated.
dx : float, optional
The change in displacement to be used for calculating the change in force. The default is 0.01.
Returns
-------
K : matrix
The stiffness matrix of the body at the given position X1.
"""
# print("Getting Body "+str(self.number)+" stiffness matrix...")
if len(X) == 6:
X1 = np.array(X)
elif len(X) == 0:
X1 = self.r6
else:
raise ValueError("Body.getStiffness expects the optional X parameter to be size 6")
# set this Body's type to fixed so mooring system equilibrium response to its displacements can be found
type0 = self.type # store original type to restore later
self.type = 1 # set type to 1 (not free) so that it won't be adjusted when finding equilibrium
# ensure this Body is positioned at the desired linearization point
self.setPosition(X1) # set position to linearization point
self.sys.solveEquilibrium3(tol=tol) # find equilibrium of mooring system given this Body in current position
f6 = self.getForces(lines_only=True) # get the net 6DOF forces/moments from any attached lines
# Build a stiffness matrix by perturbing each DOF in turn
K = np.zeros([6, 6])
for i in range(len(K)):
X2 = X1 + np.insert(np.zeros(5), i, dx) # calculate perturbed Body position by adding dx to DOF in question
self.setPosition(X2) # perturb this Body's position
self.sys.solveEquilibrium3(tol=tol) # find equilibrium of mooring system given this Body's new position
f6_2 = self.getForces(lines_only=True) # get the net 6DOF forces/moments from any attached lines
K[i, :] = -(f6_2 - f6) / dx # get stiffness in this DOF via finite difference and add to matrix column
# ----------------- restore the system back to previous positions ------------------
self.setPosition(X1) # set position to linearization point
self.sys.solveEquilibrium3(tol=tol) # find equilibrium of mooring system given this Body in current position
self.type = type0 # restore the Body's type to its original value
return K
def getStiffnessA(self, lines_only=False):
"""Gets the analytical stiffness matrix of the Body with other objects fixed.
Returns
-------
K : matrix
6x6 analytic stiffness matrix.
"""
# print("Getting Body "+str(self.number)+" stiffness matrix...")
K = np.zeros([6, 6])
for PointID, rPointRel in zip(self.attachedP, self.rPointRel):
r = rotatePosition(
rPointRel, self.r6[3:]
) # relative position of Point about body ref point in unrotated reference frame
f3 = self.sys.pointList[
PointID - 1
].getForces() # total force on point (for additional rotational stiffness term due to change in moment arm)
K3 = self.sys.pointList[PointID - 1].getStiffnessA() # local 3D stiffness matrix of the point
# following are from functions translateMatrix3to6
H = getH(r)
K[:3, :3] += K3
K[:3, 3:] += np.matmul(
K3, H
) # only add up one off-diagonal sub-matrix for now, then we'll mirror at the end
K[3:, 3:] += np.matmul(np.matmul(H, K3), H.T) + np.matmul(getH(f3), H.T)
K[3:, :3] = K[:3, 3:].T # copy over other off-diagonal sub-matrix
if lines_only == False:
# rotational stiffness effect of weight
rCG_rotated = rotatePosition(
self.rCG, self.r6[3:]
) # relative position of CG about body ref point in unrotated reference frame
Kw = -np.matmul(getH([0, 0, -self.m * self.sys.g]), getH(rCG_rotated))
# rotational stiffness effect of buoyancy at metacenter
rM_rotated = rotatePosition(
self.rM, self.r6[3:]
) # relative position of metacenter about body ref point in unrotated reference frame
Kb = -np.matmul(getH([0, 0, self.sys.rho * self.sys.g * self.v]), getH(rM_rotated))
# hydrostatic heave stiffness (if AWP is nonzero)
Kwp = self.sys.rho * self.sys.g * self.AWP
K[3:, 3:] += Kw + Kb
K[2, 2] += Kwp
return K
def draw(self, ax):
"""Draws the reference axis of the body
Parameters
----------
ax : axes
matplotlib.pyplot axes to be used for drawing and plotting.
Returns
-------
linebit : list
a list to hold plotted lines of the body's frame axes.
"""
linebit = [] # make empty list to hold plotted lines, however many there are
rx = transformPosition(np.array([5, 0, 0]), self.r6)
ry = transformPosition(np.array([0, 5, 0]), self.r6)
rz = transformPosition(np.array([0, 0, 5]), self.r6)
linebit.append(ax.plot([self.r6[0], rx[0]], [self.r6[1], rx[1]], [self.r6[2], rx[2]], color="r"))
linebit.append(ax.plot([self.r6[0], ry[0]], [self.r6[1], ry[1]], [self.r6[2], ry[2]], color="g"))
linebit.append(ax.plot([self.r6[0], rz[0]], [self.r6[1], rz[1]], [self.r6[2], rz[2]], color="b"))
self.linebit = linebit
return linebit
def redraw(self):
"""Redraws the reference axis of the body
Returns
-------
linebit : list
a list to hold redrawn lines of the body's frame axes.
"""
linebit = self.linebit
rx = transformPosition(np.array([5, 0, 0]), self.r6)
ry = transformPosition(np.array([0, 5, 0]), self.r6)
rz = transformPosition(np.array([0, 0, 5]), self.r6)
linebit[0][0].set_data([self.r6[0], rx[0]], [self.r6[1], rx[1]])
linebit[0][0].set_3d_properties([self.r6[2], rx[2]])
linebit[1][0].set_data([self.r6[0], ry[0]], [self.r6[1], ry[1]])
linebit[1][0].set_3d_properties([self.r6[2], ry[2]])
linebit[2][0].set_data([self.r6[0], rz[0]], [self.r6[1], rz[1]])
linebit[2][0].set_3d_properties([self.r6[2], rz[2]])
return linebit
#
```
#### File: wisdem/moorpy/MoorProps.py
```python
import numpy as np
import wisdem.moorpy as mp
def getLineProps(dmm, type="chain", stud="studless", source="Orcaflex-altered", name=""):
"""getLineProps version 3.2: Restructuring v3.1 to 'Orcaflex-original' and 'Orcaflex-altered'
Motivation: The existing public, and NREL-internal references for mooring line component property
data are either proprietary, or unreliable and incomplete. The best way to derive new equations
as a function of diameter is to use data from mooring line manufacturer's catalogs. Once developed,
these new equations will serve as an updated version to compare against existing expressions.
The goal is to have NREL's own library of mooring line property equations, but more research is needed.
The original Orcaflex equations are the best we have right now and have been altered to include
a quadratic chain MBL equation rather than a cubic, to avoid negative MBLs.
Also, different cost models are put in to the altered version to avoid the Equimar data. Many sources
researched for cost data, coefficients used are close to the old NREL internal data, but still an approximation.
For more info, see the Mooring Component Properties Word doc.
- This function requires at least one input: the line diameter in millimeters.
- The rest of the inputs are optional: describe the desired type of line (chain, polyester, wire, etc.),
the type of chain (studless or studlink), the source of data (Orcaflex-original or altered), or a name identifier
- The function will output a MoorPy linetype object
"""
if source == "Orcaflex-original":
d = dmm / 1000 # orcaflex uses meters https://www.orcina.com/webhelp/OrcaFlex/
if type == "chain":
c = 1.96e4 # grade 2=1.37e4; grade 3=1.96e4; ORQ=2.11e4; R4=2.74e4
MBL = c * d ** 2 * (44 - 80 * d) * 1000 # [N] The same for both studless and studlink
if stud == "studless":
massden = 19.9 * d ** 2 * 1000 # [kg/m]
EA = 0.854e8 * d ** 2 * 1000 # [N]
d_vol = 1.8 * d # [m]
elif stud == "studlink" or stud == "stud":
massden = 21.9 * d ** 2 * 1000 # [kg/m]
EA = 1.010e8 * d ** 2 * 1000 # [N]
d_vol = 1.89 * d ** 2 # [m]
else:
raise ValueError("getLineProps error: Choose either studless or stud chain type ")
elif type == "nylon":
massden = 0.6476 * d ** 2 * 1000 # [kg/m]
EA = 1.18e5 * d ** 2 * 1000 # [N]
MBL = 139357 * d ** 2 * 1000 # [N] for wet nylon line, 163950d^2 for dry nylon line
d_vol = 0.85 * d # [m]
elif type == "polyester":
massden = 0.7978 * d ** 2 * 1000 # [kg/m]
EA = 1.09e6 * d ** 2 * 1000 # [N]
MBL = 170466 * d ** 2 * 1000 # [N]
d_vol = 0.86 * d # [m]
elif type == "polypropylene":
massden = 0.4526 * d ** 2 * 1000 # [kg/m]
EA = 1.06e6 * d ** 2 * 1000 # [N]
MBL = 105990 * d ** 2 * 1000 # [N]
d_vol = 0.80 * d # [m]
elif type == "wire-fiber" or type == "fiber":
massden = 3.6109 * d ** 2 * 1000 # [kg/m]
EA = 3.67e7 * d ** 2 * 1000 # [N]
MBL = 584175 * d ** 2 * 1000 # [N]
d_vol = 0.82 * d # [m]
elif type == "wire-wire" or type == "wire" or type == "IWRC":
massden = 3.9897 * d ** 2 * 1000 # [kg/m]
EA = 4.04e7 * d ** 2 * 1000 # [N]
MBL = 633358 * d ** 2 * 1000 # [N]
d_vol = 0.80 * d # [m]
else:
raise ValueError("getLineProps error: Linetype not valid. Choose from given rope types or chain ")
# cost
# Derived from Equimar graph: https://tethys.pnnl.gov/sites/default/files/publications/EquiMar_D7.3.2.pdf
if type == "chain":
cost = (0.21 * (MBL / 9.81 / 1000)) * 1.29 # [$/m]
elif type == "nylon" or type == "polyester" or type == "polypropylene":
cost = (0.235 * (MBL / 9.81 / 1000)) * 1.29 # [$/m]
elif type == "wire" or type == "wire-wire" or type == "IWRC" or type == "fiber" or type == "wire-fiber":
cost = (0.18 * (MBL / 9.81 / 1000) + 90) * 1.29 # [$/m]
else:
raise ValueError("getLineProps error: Linetype not valid. Choose from given rope types or chain ")
elif source == "Orcaflex-altered":
d = dmm / 1000 # orcaflex uses meters https://www.orcina.com/webhelp/OrcaFlex/
if type == "chain":
c = 2.74e4 # grade 2=1.37e4; grade 3=1.96e4; ORQ=2.11e4; R4=2.74e4
MBL = (
(371360 * d ** 2 + 51382.72 * d) * (c / 2.11e4) * 1000
) # this is a fit quadratic term to the cubic MBL equation. No negatives
if stud == "studless":
massden = 19.9 * d ** 2 * 1000 # [kg/m]
EA = 0.854e8 * d ** 2 * 1000 # [N]
d_vol = 1.8 * d # [m]
elif stud == "studlink" or stud == "stud":
massden = 21.9 * d ** 2 * 1000 # [kg/m]
EA = 1.010e8 * d ** 2 * 1000 # [N]
d_vol = 1.89 * d ** 2 # [m]
else:
raise ValueError("getLineProps error: Choose either studless or stud chain type ")
# cost = 2.5*massden # a ballpark for R4 chain
# cost = (0.58*MBL/1000/9.81) - 87.6 # [$/m] from old NREL-internal
# cost = 3.0*massden # rough value similar to old NREL-internal
cost = 2.585 * massden # [($/kg)*(kg/m)=($/m)]
# cost = 0.0
elif type == "nylon":
massden = 0.6476 * d ** 2 * 1000 # [kg/m]
EA = 1.18e5 * d ** 2 * 1000 # [N]
MBL = 139357 * d ** 2 * 1000 # [N] for wet nylon line, 163950d^2 for dry nylon line
d_vol = 0.85 * d # [m]
cost = (0.42059603 * MBL / 1000 / 9.81) + 109.5 # [$/m] from old NREL-internal
elif type == "polyester":
massden = 0.7978 * d ** 2 * 1000 # [kg/m]
EA = 1.09e6 * d ** 2 * 1000 # [N]
MBL = 170466 * d ** 2 * 1000 # [N]
d_vol = 0.86 * d # [m]
# cost = (0.42059603*MBL/1000/9.81) + 109.5 # [$/m] from old NREL-internal
# cost = 1.1e-4*MBL # rough value similar to old NREL-internal
cost = 0.162 * (MBL / 9.81 / 1000) # [$/m]
elif type == "polypropylene":
massden = 0.4526 * d ** 2 * 1000 # [kg/m]
EA = 1.06e6 * d ** 2 * 1000 # [N]
MBL = 105990 * d ** 2 * 1000 # [N]
d_vol = 0.80 * d # [m]
cost = (0.42059603 * MBL / 1000 / 9.81) + 109.5 # [$/m] from old NREL-internal
elif type == "wire-fiber" or type == "fiber":
massden = 3.6109 * d ** 2 * 1000 # [kg/m]
EA = 3.67e7 * d ** 2 * 1000 # [N]
MBL = 584175 * d ** 2 * 1000 # [N]
d_vol = 0.82 * d # [m]
cost = 0.53676471 * MBL / 1000 / 9.81 # [$/m] from old NREL-internal
elif type == "wire-wire" or type == "wire" or type == "IWRC":
massden = 3.9897 * d ** 2 * 1000 # [kg/m]
EA = 4.04e7 * d ** 2 * 1000 # [N]
MBL = 633358 * d ** 2 * 1000 # [N]
d_vol = 0.80 * d # [m]
# cost = MBL * 900./15.0e6
# cost = (0.33*MBL/1000/9.81) + 139.5 # [$/m] from old NREL-internal
cost = 5.6e-5 * MBL # rough value similar to old NREL-internal
else:
raise ValueError("getLineProps error: Linetype not valid. Choose from given rope types or chain ")
elif source == "NREL":
"""
getLineProps v3.1 used to have old NREL-internal equations here as a placeholder, but they were not trustworthy.
- The chain equations used data from Vicinay which matched OrcaFlex data. The wire rope equations matched OrcaFlex well,
the synthetic rope equations did not
The idea is to have NREL's own library of mooring line property equations, but more research needs to be done.
The 'OrcaFlex-altered' source version is a start and can change name to 'NREL' in the future, but it is
still 'OrcaFlex-altered' because most of the equations are from OrcaFlex, which is the best we have right now.
Future equations need to be optimization proof = no negative numbers anywhere (need to write an interpolation function)
Can add new line types as well, such as Aramid or HMPE
"""
pass
# Set up a main identifier for the linetype. Useful for things like "chain_bot" or "chain_top"
if name == "":
typestring = f"{type}{dmm:.0f}"
else:
typestring = name
notes = f"made with getLineProps - source: {source}"
return mp.LineType(typestring, d_vol, massden, EA, MBL=MBL, cost=cost, notes=notes, input_type=type, input_d=dmm)
def getAnchorProps(fx, fz, type="drag-embedment", display=0):
""" Calculates anchor required capacity and cost based on specified loadings and anchor type"""
# for now this is based on API RP-2SK guidance for static analysis of permanent mooring systems
# fx and fz are horizontal and vertical load components assumed to come from a dynamic (or equivalent) analysis.
# mooring line tenLimit specified in yaml and inversed for a SF in constraints
# take the line forces on the anchor and give 20% consideration for dynamic loads on lines
# coefficients in front of fx and fz in each anchorType are the SF for that anchor for quasi-static (pages 30-31 of RP-2SK)
# scale QS loads by 20% to approximate dynamic loads
fx = 1.2 * fx
fz = 1.2 * fz
# note: capacity is measured here in kg force
euros2dollars = 1.18 # the number of dollars there currently are in a euro (3-31-21)
if type == "drag-embedment":
capacity_x = 1.5 * fx / 9.81
fzCost = 0
# if fz > 0:
# fzCost = 1e3*fz
# if display > 0: print('WARNING: Nonzero vertical load specified for a drag embedment anchor.')
anchorMatCost = 0.188 * capacity_x + fzCost # material cost
anchorInstCost = 163548 * euros2dollars # installation cost
anchorDecomCost = 228967 * euros2dollars # decommissioning cost
elif type == "suction":
capacity_x = 1.6 * fx / 9.81
capacity_z = 2.0 * fz / 9.81
capacity = np.linalg.norm([capacity_x, capacity_z]) # overall capacity, assuming in any direction for now
anchorMatCost = 1.08 * capacity # material cost
anchorInstCost = 179331 * euros2dollars # installation cost
anchorDecomCost = 125532 * euros2dollars # decommissioning cost
elif type == "plate":
capacity_x = 2.0 * fx / 9.81
capacity_z = 2.0 * fz / 9.81
capacity = np.linalg.norm([capacity_x, capacity_z]) # overall capacity, assuming in any direction for now
raise ValueError("plate anchors not yet supported")
elif type == "micropile": # note: no API guidance on this one
capacity_x = 2.0 * fx / 9.81
capacity_z = 2.0 * fz / 9.81
capacity = np.linalg.norm([capacity_x, capacity_z]) # overall capacity, assuming in any direction for now
anchorMatCost = (
200000 * 1.2 / 500000
) * capacity # [(Euros*($/Euros)/kg)*kg] linear interpolation of material cost
anchorInstCost = 0 # installation cost
anchorDecomCost = 0 # decommissioning cost
elif type == "SEPLA": # cross between suction and plate
capacity_x = 2.0 * fx / 9.81
capacity_z = 2.0 * fz / 9.81
capacity = np.linalg.norm([capacity_x, capacity_z]) # overall capacity, assuming in any direction for now
anchorMatCost = 0.45 * capacity # material cost
anchorInstCost = 0 # installation cost
anchorDecomCost = 0 # decommissioning cost
else:
raise ValueError(f"getAnchorProps received an unsupported anchor type ({type})")
# mooring line sizing: Tension limit for QS: 50% MBS. Or FOS = 2
return anchorMatCost, anchorInstCost, anchorDecomCost # [USD]
```
#### File: wisdem/nrelcsm/nrel_csm_cost_2015.py
```python
import openmdao.api as om
import numpy as np
###### Rotor
# -------------------------------------------------------------------------------
class BladeCost2015(om.ExplicitComponent):
"""
Compute blade cost in the form of :math:`cost = k*mass`.
Value of :math:`k` was updated in 2015 to be $14.6 USD/kg.
Cost includes materials and manufacturing costs.
Cost can be overridden with use of `blade_cost_external`
Parameters
----------
blade_mass : float, [kg]
component mass
blade_mass_cost_coeff : float, [USD/kg]
blade mass-cost coeff
blade_cost_external : float, [USD]
Blade cost computed by RotorSE
Returns
-------
blade_cost : float, [USD]
Blade cost
"""
def setup(self):
self.add_input("blade_mass", 0.0, units="kg")
self.add_input("blade_mass_cost_coeff", 14.6, units="USD/kg")
self.add_input("blade_cost_external", 0.0, units="USD")
self.add_output("blade_cost", 0.0, units="USD")
def compute(self, inputs, outputs):
blade_mass = inputs["blade_mass"]
blade_mass_cost_coeff = inputs["blade_mass_cost_coeff"]
# calculate component cost
if inputs["blade_cost_external"] < 1.0:
outputs["blade_cost"] = blade_mass_cost_coeff * blade_mass
else:
outputs["blade_cost"] = inputs["blade_cost_external"]
# -----------------------------------------------------------------------------------------------
class HubCost2015(om.ExplicitComponent):
"""
Compute hub cost in the form of :math:`cost = k*mass`.
Value of :math:`k` was updated in 2015 to be $3.9 USD/kg.
Cost includes materials and manufacturing costs.
Parameters
----------
hub_mass : float, [kg]
component mass
hub_mass_cost_coeff : float, [USD/kg]
hub mass-cost coeff
Returns
-------
hub_cost : float, [USD]
Hub cost
"""
def setup(self):
self.add_input("hub_mass", 0.0, units="kg")
self.add_input("hub_mass_cost_coeff", 3.9, units="USD/kg")
self.add_output("hub_cost", 0.0, units="USD")
def compute(self, inputs, outputs):
hub_mass_cost_coeff = inputs["hub_mass_cost_coeff"]
hub_mass = inputs["hub_mass"]
# calculate component cost
outputs["hub_cost"] = hub_mass_cost_coeff * hub_mass
# -------------------------------------------------------------------------------
class PitchSystemCost2015(om.ExplicitComponent):
"""
Compute pitch system cost in the form of :math:`cost = k*mass`.
Value of :math:`k` was NOT updated in 2015 and remains the same as original CSM, $22.1 USD/kg.
Cost includes materials and manufacturing costs.
Parameters
----------
pitch_system_mass : float, [kg]
component mass
pitch_system_mass_cost_coeff : float, [USD/kg]
pitch system mass-cost coeff
Returns
-------
pitch_system_cost : float, [USD]
Pitch system cost
"""
def setup(self):
self.add_input("pitch_system_mass", 0.0, units="kg")
self.add_input("pitch_system_mass_cost_coeff", 22.1, units="USD/kg")
self.add_output("pitch_system_cost", 0.0, units="USD")
def compute(self, inputs, outputs):
pitch_system_mass = inputs["pitch_system_mass"]
pitch_system_mass_cost_coeff = inputs["pitch_system_mass_cost_coeff"]
# calculate system costs
outputs["pitch_system_cost"] = pitch_system_mass_cost_coeff * pitch_system_mass
# -------------------------------------------------------------------------------
class SpinnerCost2015(om.ExplicitComponent):
"""
Compute spinner cost in the form of :math:`cost = k*mass`.
Value of :math:`k` was updated in 2015 to be $11.1 USD/kg.
Cost includes materials and manufacturing costs.
Parameters
----------
spinner_mass : float, [kg]
component mass
spinner_mass_cost_coeff : float, [USD/kg]
spinner/nose cone mass-cost coeff
Returns
-------
spinner_cost : float, [USD]
Spinner cost
"""
def setup(self):
self.add_input("spinner_mass", 0.0, units="kg")
self.add_input("spinner_mass_cost_coeff", 11.1, units="USD/kg")
self.add_output("spinner_cost", 0.0, units="USD")
def compute(self, inputs, outputs):
spinner_mass_cost_coeff = inputs["spinner_mass_cost_coeff"]
spinner_mass = inputs["spinner_mass"]
# calculate system costs
outputs["spinner_cost"] = spinner_mass_cost_coeff * spinner_mass
# -------------------------------------------------------------------------------
class HubSystemCostAdder2015(om.ExplicitComponent):
"""
Aggregates the hub, pitch system, and spinner costs into a single component
that is transported to the project site and could therefore incur additional
costs. Cost multipliers are of the form,
:math:`c_{hubsys} = (1+kt_{hubsys}+kp_{hubsys}) (1+ko_{hubsys}+ka_{hubsys})
(c_{hub} + c_{pitch} + c_{spinner})`
Where conceptually, :math:`kt` is a transportation multiplier,
:math:`kp` is a profit multiplier,
:math:`ko` is an overhead cost multiplier, and
:math:`ka` is an assembly cost multiplier
By default, :math:`kt=kp=ko=ka=0`.
Parameters
----------
hub_cost : float, [USD]
Hub component cost
hub_mass : float, [kg]
Hub component mass
pitch_system_cost : float, [USD]
Pitch system cost
pitch_system_mass : float, [kg]
Pitch system mass
spinner_cost : float, [USD]
Spinner component cost
spinner_mass : float, [kg]
Spinner component mass
hub_assemblyCostMultiplier : float
Rotor assembly cost multiplier
hub_overheadCostMultiplier : float
Rotor overhead cost multiplier
hub_profitMultiplier : float
Rotor profit multiplier
hub_transportMultiplier : float
Rotor transport multiplier
Returns
-------
hub_system_mass_tcc : float, [kg]
Mass of the hub system, including hub, spinner, and pitch system for the blades
hub_system_cost : float, [USD]
Overall wind sub-assembly capial costs including transportation costs
"""
def setup(self):
self.add_input("hub_cost", 0.0, units="USD")
self.add_input("hub_mass", 0.0, units="kg")
self.add_input("pitch_system_cost", 0.0, units="USD")
self.add_input("pitch_system_mass", 0.0, units="kg")
self.add_input("spinner_cost", 0.0, units="USD")
self.add_input("spinner_mass", 0.0, units="kg")
self.add_input("hub_assemblyCostMultiplier", 0.0)
self.add_input("hub_overheadCostMultiplier", 0.0)
self.add_input("hub_profitMultiplier", 0.0)
self.add_input("hub_transportMultiplier", 0.0)
self.add_output("hub_system_mass_tcc", 0.0, units="kg")
self.add_output("hub_system_cost", 0.0, units="USD")
def compute(self, inputs, outputs):
hub_cost = inputs["hub_cost"]
pitch_system_cost = inputs["pitch_system_cost"]
spinner_cost = inputs["spinner_cost"]
hub_mass = inputs["hub_mass"]
pitch_system_mass = inputs["pitch_system_mass"]
spinner_mass = inputs["spinner_mass"]
hub_assemblyCostMultiplier = inputs["hub_assemblyCostMultiplier"]
hub_overheadCostMultiplier = inputs["hub_overheadCostMultiplier"]
hub_profitMultiplier = inputs["hub_profitMultiplier"]
hub_transportMultiplier = inputs["hub_transportMultiplier"]
# Updated calculations below to account for assembly, transport, overhead and profit
outputs["hub_system_mass_tcc"] = hub_mass + pitch_system_mass + spinner_mass
partsCost = hub_cost + pitch_system_cost + spinner_cost
outputs["hub_system_cost"] = (1 + hub_transportMultiplier + hub_profitMultiplier) * (
(1 + hub_overheadCostMultiplier + hub_assemblyCostMultiplier) * partsCost
)
# -------------------------------------------------------------------------------
class RotorCostAdder2015(om.ExplicitComponent):
"""
Sum of individual component costs to get overall rotor cost.
No additional transport and assembly multipliers are included because it is
assumed that each component is transported separately.
Parameters
----------
blade_cost : float, [USD]
Individual blade cost
blade_mass : float, [kg]
Individual blade mass
hub_system_cost : float, [USD]
Cost for hub system
hub_system_mass_tcc : float, [kg]
Mass for hub system
blade_number : int
Number of rotor blades
Returns
-------
rotor_cost : float, [USD]
Rotor cost
rotor_mass_tcc : float, [kg]
Rotor mass, including blades, pitch system, hub, and spinner
"""
def setup(self):
self.add_input("blade_cost", 0.0, units="USD")
self.add_input("blade_mass", 0.0, units="kg")
self.add_input("hub_system_cost", 0.0, units="USD")
self.add_input("hub_system_mass_tcc", 0.0, units="kg")
self.add_discrete_input("blade_number", 3)
self.add_output("rotor_cost", 0.0, units="USD")
self.add_output("rotor_mass_tcc", 0.0, units="kg")
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
blade_cost = inputs["blade_cost"]
blade_mass = inputs["blade_mass"]
blade_number = discrete_inputs["blade_number"]
hub_system_cost = inputs["hub_system_cost"]
hub_system_mass = inputs["hub_system_mass_tcc"]
outputs["rotor_cost"] = blade_cost * blade_number + hub_system_cost
outputs["rotor_mass_tcc"] = blade_mass * blade_number + hub_system_mass
# -------------------------------------------------------------------------------
###### Nacelle
# -------------------------------------------------
class LowSpeedShaftCost2015(om.ExplicitComponent):
"""
Compute low speed shaft cost in the form of :math:`cost = k*mass`.
Value of :math:`k` was updated in 2015 to be $11.9 USD/kg.
Cost includes materials and manufacturing costs.
Parameters
----------
lss_mass : float, [kg]
component mass
lss_mass_cost_coeff : float, [USD/kg]
low speed shaft mass-cost coeff
Returns
-------
lss_cost : float, [USD]
Low speed shaft cost
"""
def setup(self):
self.add_input("lss_mass", 0.0, units="kg") # mass input
self.add_input("lss_mass_cost_coeff", 11.9, units="USD/kg")
self.add_output("lss_cost", 0.0, units="USD")
def compute(self, inputs, outputs):
lss_mass_cost_coeff = inputs["lss_mass_cost_coeff"]
lss_mass = inputs["lss_mass"]
outputs["lss_cost"] = lss_mass_cost_coeff * lss_mass
# -------------------------------------------------------------------------------
class BearingCost2015(om.ExplicitComponent):
"""
Compute (single) main bearing cost in the form of :math:`cost = k*mass`.
Value of :math:`k` was updated in 2015 to be $4.5 USD/kg.
Cost includes materials and manufacturing costs.
Parameters
----------
main_bearing_mass : float, [kg]
component mass
bearing_mass_cost_coeff : float, [USD/kg]
main bearing mass-cost coeff
Returns
-------
main_bearing_cost : float, [USD]
Main bearing cost
"""
def setup(self):
self.add_input("main_bearing_mass", 0.0, units="kg") # mass input
self.add_input("bearing_mass_cost_coeff", 4.5, units="USD/kg")
self.add_output("main_bearing_cost", 0.0, units="USD")
def compute(self, inputs, outputs):
main_bearing_mass = inputs["main_bearing_mass"]
bearing_mass_cost_coeff = inputs["bearing_mass_cost_coeff"]
outputs["main_bearing_cost"] = bearing_mass_cost_coeff * main_bearing_mass
# -------------------------------------------------------------------------------
class GearboxCost2015(om.ExplicitComponent):
"""
Compute gearbox cost in the form of :math:`cost = k*mass`.
Value of :math:`k` was updated in 2015 to be $12.9 USD/kg.
Cost includes materials and manufacturing costs.
Parameters
----------
gearbox_mass : float, [kg]
component mass
gearbox_mass_cost_coeff : float, [USD/kg]
gearbox mass-cost coeff
Returns
-------
gearbox_cost : float, [USD]
Gearbox cost
"""
def setup(self):
self.add_input("gearbox_mass", 0.0, units="kg")
self.add_input("gearbox_mass_cost_coeff", 12.9, units="USD/kg")
self.add_output("gearbox_cost", 0.0, units="USD")
def compute(self, inputs, outputs):
gearbox_mass = inputs["gearbox_mass"]
gearbox_mass_cost_coeff = inputs["gearbox_mass_cost_coeff"]
outputs["gearbox_cost"] = gearbox_mass_cost_coeff * gearbox_mass
# -------------------------------------------------------------------------------
class BrakeCost2020(om.ExplicitComponent):
"""
Compute brake cost in the form of :math:`cost = k*mass`.
Value of :math:`k` was updated in 2020 to be $3.6254 USD/kg.
Cost includes materials and manufacturing costs.
Parameters
----------
brake_mass : float, [kg]
component mass
brake_mass_cost_coeff : float, [USD/kg]
brake mass-cost coeff
Returns
-------
brake_cost : float, [USD]
Brake cost
"""
def setup(self):
self.add_input("brake_mass", 0.0, units="kg")
self.add_input("brake_mass_cost_coeff", 3.6254, units="USD/kg")
self.add_output("brake_cost", 0.0, units="USD")
def compute(self, inputs, outputs):
brake_mass = inputs["brake_mass"]
brake_mass_cost_coeff = inputs["brake_mass_cost_coeff"]
outputs["brake_cost"] = brake_mass_cost_coeff * brake_mass
# -------------------------------------------------------------------------------
class HighSpeedShaftCost2015(om.ExplicitComponent):
"""
Compute high speed shaft cost in the form of :math:`cost = k*mass`.
Value of :math:`k` was updated in 2015 to be $6.8 USD/kg.
Cost includes materials and manufacturing costs.
Parameters
----------
hss_mass : float, [kg]
component mass
hss_mass_cost_coeff : float, [USD/kg]
high speed shaft mass-cost coeff
Returns
-------
hss_cost : float, [USD]
High speed shaft cost
"""
def setup(self):
self.add_input("hss_mass", 0.0, units="kg")
self.add_input("hss_mass_cost_coeff", 6.8, units="USD/kg")
self.add_output("hss_cost", 0.0, units="USD")
def compute(self, inputs, outputs):
hss_mass = inputs["hss_mass"]
hss_mass_cost_coeff = inputs["hss_mass_cost_coeff"]
outputs["hss_cost"] = hss_mass_cost_coeff * hss_mass
# -------------------------------------------------------------------------------
class GeneratorCost2015(om.ExplicitComponent):
"""
Compute generator cost in the form of :math:`cost = k*mass`.
Value of :math:`k` was updated in 2015 to be $12.4 USD/kg.
Cost includes materials and manufacturing costs.
Cost can be overridden with use of `generator_cost_external`
Parameters
----------
generator_mass : float, [kg]
component mass
generator_mass_cost_coeff : float, [USD/kg]
generator mass cost coeff
generator_cost_external : float, [USD]
Generator cost computed by GeneratorSE
Returns
-------
generator_cost : float, [USD]
Generator cost
"""
def setup(self):
self.add_input("generator_mass", 0.0, units="kg")
self.add_input("generator_mass_cost_coeff", 12.4, units="USD/kg")
self.add_input("generator_cost_external", 0.0, units="USD")
self.add_output("generator_cost", 0.0, units="USD")
def compute(self, inputs, outputs):
generator_mass = inputs["generator_mass"]
generator_mass_cost_coeff = inputs["generator_mass_cost_coeff"]
if inputs["generator_cost_external"] < 1.0:
outputs["generator_cost"] = generator_mass_cost_coeff * generator_mass
else:
outputs["generator_cost"] = inputs["generator_cost_external"]
# -------------------------------------------------------------------------------
class BedplateCost2015(om.ExplicitComponent):
"""
Compute bedplate cost in the form of :math:`cost = k*mass`.
Value of :math:`k` was updated in 2015 to be $2.9 USD/kg.
Cost includes materials and manufacturing costs.
Parameters
----------
bedplate_mass : float, [kg]
component mass
bedplate_mass_cost_coeff : float, [USD/kg]
bedplate mass-cost coeff
Returns
-------
bedplate_cost : float, [USD]
Bedplate cost
"""
def setup(self):
self.add_input("bedplate_mass", 0.0, units="kg")
self.add_input("bedplate_mass_cost_coeff", 2.9, units="USD/kg")
self.add_output("bedplate_cost", 0.0, units="USD")
def compute(self, inputs, outputs):
bedplate_mass = inputs["bedplate_mass"]
bedplate_mass_cost_coeff = inputs["bedplate_mass_cost_coeff"]
outputs["bedplate_cost"] = bedplate_mass_cost_coeff * bedplate_mass
# ---------------------------------------------------------------------------------
class YawSystemCost2015(om.ExplicitComponent):
"""
Compute yaw system cost in the form of :math:`cost = k*mass`.
Value of :math:`k` was NOT updated in 2015 and remains the same as original CSM, $8.3 USD/kg.
Cost includes materials and manufacturing costs.
Parameters
----------
yaw_mass : float, [kg]
component mass
yaw_mass_cost_coeff : float, [USD/kg]
yaw system mass cost coeff
Returns
-------
yaw_system_cost : float, [USD]
Yaw system cost
"""
def setup(self):
self.add_input("yaw_mass", 0.0, units="kg")
self.add_input("yaw_mass_cost_coeff", 8.3, units="USD/kg")
self.add_output("yaw_system_cost", 0.0, units="USD")
def compute(self, inputs, outputs):
yaw_mass = inputs["yaw_mass"]
yaw_mass_cost_coeff = inputs["yaw_mass_cost_coeff"]
outputs["yaw_system_cost"] = yaw_mass_cost_coeff * yaw_mass
# ---------------------------------------------------------------------------------
class ConverterCost2015(om.ExplicitComponent):
"""
Compute converter cost in the form of :math:`cost = k*mass`.
Value of :math:`k` was updated in 2015 to be $18.8 USD/kg.
Cost includes materials and manufacturing costs.
Parameters
----------
converter_mass : float, [kg]
component mass
converter_mass_cost_coeff : float, [USD/kg]
variable speed electronics mass cost coeff
Returns
-------
converter_cost : float, [USD]
Converter cost
"""
def setup(self):
self.add_input("converter_mass", 0.0, units="kg")
self.add_input("converter_mass_cost_coeff", 18.8, units="USD/kg")
self.add_output("converter_cost", 0.0, units="USD")
def compute(self, inputs, outputs):
converter_mass = inputs["converter_mass"]
converter_mass_cost_coeff = inputs["converter_mass_cost_coeff"]
outputs["converter_cost"] = converter_mass_cost_coeff * converter_mass
# ---------------------------------------------------------------------------------
class HydraulicCoolingCost2015(om.ExplicitComponent):
"""
Compute hydraulic cooling cost in the form of :math:`cost = k*mass`.
Value of :math:`k` was NOT updated in 2015 and remains the same as original CSM, $124.0 USD/kg.
Cost includes materials and manufacturing costs.
Parameters
----------
hvac_mass : float, [kg]
component mass
hvac_mass_cost_coeff : float, [USD/kg]
hydraulic and cooling system mass cost coeff
Returns
-------
hvac_cost : float, [USD]
HVAC cost
"""
def setup(self):
self.add_input("hvac_mass", 0.0, units="kg")
self.add_input("hvac_mass_cost_coeff", 124.0, units="USD/kg")
self.add_output("hvac_cost", 0.0, units="USD")
def compute(self, inputs, outputs):
hvac_mass = inputs["hvac_mass"]
hvac_mass_cost_coeff = inputs["hvac_mass_cost_coeff"]
# calculate cost
outputs["hvac_cost"] = hvac_mass_cost_coeff * hvac_mass
# ---------------------------------------------------------------------------------
class NacelleCoverCost2015(om.ExplicitComponent):
"""
Compute nacelle cover cost in the form of :math:`cost = k*mass`.
Value of :math:`k` was NOT updated in 2015 and remains the same as original CSM, $5.7 USD/kg.
Cost includes materials and manufacturing costs.
Parameters
----------
cover_mass : float, [kg]
component mass
cover_mass_cost_coeff : float, [USD/kg]
nacelle cover mass cost coeff
Returns
-------
cover_cost : float, [USD]
Cover cost
"""
def setup(self):
self.add_input("cover_mass", 0.0, units="kg")
self.add_input("cover_mass_cost_coeff", 5.7, units="USD/kg")
self.add_output("cover_cost", 0.0, units="USD")
def compute(self, inputs, outputs):
cover_mass = inputs["cover_mass"]
cover_mass_cost_coeff = inputs["cover_mass_cost_coeff"]
outputs["cover_cost"] = cover_mass_cost_coeff * cover_mass
# ---------------------------------------------------------------------------------
class ElecConnecCost2015(om.ExplicitComponent):
"""
Compute electrical connection cost in the form of :math:`cost = k*mass`.
Value of :math:`k` was NOT updated in 2015 and remains the same as original CSM, $41.85 USD/kg.
Cost includes materials and manufacturing costs.
Parameters
----------
machine_rating : float
machine rating
elec_connec_machine_rating_cost_coeff : float, [USD/kg]
electrical connections cost coefficient per kW
Returns
-------
elec_cost : float, [USD]
Electrical connection costs
"""
def setup(self):
self.add_input("machine_rating", 0.0, units="kW")
self.add_input("elec_connec_machine_rating_cost_coeff", 41.85, units="USD/kW")
self.add_output("elec_cost", 0.0, units="USD")
def compute(self, inputs, outputs):
machine_rating = inputs["machine_rating"]
elec_connec_machine_rating_cost_coeff = inputs["elec_connec_machine_rating_cost_coeff"]
outputs["elec_cost"] = elec_connec_machine_rating_cost_coeff * machine_rating
# ---------------------------------------------------------------------------------
class ControlsCost2015(om.ExplicitComponent):
"""
Compute controls cost in the form of :math:`cost = k*mass`.
Value of :math:`k` was NOT updated in 2015 and remains the same as original CSM, $21.15 USD/kg.
Cost includes materials and manufacturing costs.
Parameters
----------
machine_rating : float
machine rating
controls_machine_rating_cost_coeff : float, [USD/kg]
controls cost coefficient per kW
Returns
-------
controls_cost : float, [USD]
Controls cost
"""
def setup(self):
self.add_input("machine_rating", 0.0, units="kW")
self.add_input("controls_machine_rating_cost_coeff", 21.15, units="USD/kW")
self.add_output("controls_cost", 0.0, units="USD")
def compute(self, inputs, outputs):
machine_rating = inputs["machine_rating"]
coeff = inputs["controls_machine_rating_cost_coeff"]
outputs["controls_cost"] = machine_rating * coeff
# ---------------------------------------------------------------------------------
class PlatformsMainframeCost2015(om.ExplicitComponent):
"""
Compute platforms cost in the form of :math:`cost = k*mass`.
Value of :math:`k` was NOT updated in 2015 and remains the same as original CSM, $17.1 USD/kg.
Cost includes materials and manufacturing costs.
Parameters
----------
platforms_mass : float, [kg]
component mass
platforms_mass_cost_coeff : float, [USD/kg]
nacelle platforms mass cost coeff
crane : boolean
flag for presence of onboard crane
crane_cost : float, [USD]
crane cost if present
Returns
-------
platforms_cost : float, [USD]
Platforms cost
"""
def setup(self):
self.add_input("platforms_mass", 0.0, units="kg")
self.add_input("platforms_mass_cost_coeff", 17.1, units="USD/kg")
self.add_discrete_input("crane", False)
self.add_input("crane_cost", 12000.0, units="USD")
self.add_output("platforms_cost", 0.0, units="USD")
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
platforms_mass = inputs["platforms_mass"]
platforms_mass_cost_coeff = inputs["platforms_mass_cost_coeff"]
crane = discrete_inputs["crane"]
crane_cost = inputs["crane_cost"]
# bedplate_cost = inputs['bedplate_cost']
# base_hardware_cost_coeff = inputs['base_hardware_cost_coeff']
# nacelle platform cost
# crane cost
if crane:
craneCost = crane_cost
craneMass = 3e3
NacellePlatformsCost = platforms_mass_cost_coeff * (platforms_mass - craneMass)
else:
craneCost = 0.0
NacellePlatformsCost = platforms_mass_cost_coeff * platforms_mass
# base hardware cost
# BaseHardwareCost = bedplate_cost * base_hardware_cost_coeff
# aggregate all three mainframe costs
outputs["platforms_cost"] = NacellePlatformsCost + craneCost # + BaseHardwareCost
# -------------------------------------------------------------------------------
class TransformerCost2015(om.ExplicitComponent):
"""
Compute transformer cost in the form of :math:`cost = k*mass`.
Value of :math:`k` was updated in 2015 to be $18.8 USD/kg.
Cost includes materials and manufacturing costs.
Parameters
----------
transformer_mass : float, [kg]
component mass
transformer_mass_cost_coeff : float, [USD/kg]
transformer mass cost coeff
Returns
-------
transformer_cost : float, [USD]
Transformer cost
"""
def setup(self):
self.add_input("transformer_mass", 0.0, units="kg")
self.add_input("transformer_mass_cost_coeff", 18.8, units="USD/kg") # mass-cost coeff with default from ppt
self.add_output("transformer_cost", 0.0, units="USD")
def compute(self, inputs, outputs):
transformer_mass = inputs["transformer_mass"]
transformer_mass_cost_coeff = inputs["transformer_mass_cost_coeff"]
outputs["transformer_cost"] = transformer_mass_cost_coeff * transformer_mass
# -------------------------------------------------------------------------------
class NacelleSystemCostAdder2015(om.ExplicitComponent):
"""
Aggregates the nacelle system costs into a single component
that is transported to the project site and could therefore incur additional
costs. Cost multipliers are of the form,
:math:`c_{nacellesys} = (1+kt_{nacelle}+kp_{nacelle}) (1+ko_{nacelle}+ka_{nacelle}) c_{nacelle}`
Where conceptually, :math:`kt` is a transportation multiplier,
:math:`kp` is a profit multiplier,
:math:`ko` is an overhead cost multiplier, and
:math:`ka` is an assembly cost multiplier
By default, :math:`kt=kp=ko=ka=0`.
Parameters
----------
lss_cost : float, [USD]
Component cost
lss_mass : float, [kg]
Component mass
main_bearing_cost : float, [USD]
Component cost
main_bearing_mass : float, [kg]
Component mass
gearbox_cost : float, [USD]
Component cost
gearbox_mass : float, [kg]
Component mass
brake_cost : float, [USD]
Component cost
brake_mass : float, [kg]
Component mass
hss_cost : float, [USD]
Component cost
hss_mass : float, [kg]
Component mass
generator_cost : float, [USD]
Component cost
generator_mass : float, [kg]
Component mass
bedplate_cost : float, [USD]
Component cost
bedplate_mass : float, [kg]
Component mass
yaw_system_cost : float, [USD]
Component cost
yaw_mass : float, [kg]
Component mass
converter_cost : float, [USD]
Component cost
converter_mass : float, [kg]
Component mass
hvac_cost : float, [USD]
Component cost
hvac_mass : float, [kg]
Component mass
cover_cost : float, [USD]
Component cost
cover_mass : float, [kg]
Component mass
elec_cost : float, [USD]
Component cost
controls_cost : float, [USD]
Component cost
platforms_mass : float, [kg]
Component cost
platforms_cost : float, [USD]
Component cost
transformer_cost : float, [USD]
Component cost
transformer_mass : float, [kg]
Component mass
main_bearing_number : int
number of bearings
nacelle_assemblyCostMultiplier : float
nacelle assembly cost multiplier
nacelle_overheadCostMultiplier : float
nacelle overhead cost multiplier
nacelle_profitMultiplier : float
nacelle profit multiplier
nacelle_transportMultiplier : float
nacelle transport multiplier
Returns
-------
nacelle_cost : float, [USD]
component cost
nacelle_mass_tcc : float
Nacelle mass, with all nacelle components, without the rotor
"""
def setup(self):
self.add_input("lss_cost", 0.0, units="USD")
self.add_input("lss_mass", 0.0, units="kg")
self.add_input("main_bearing_cost", 0.0, units="USD")
self.add_input("main_bearing_mass", 0.0, units="kg")
self.add_input("gearbox_cost", 0.0, units="USD")
self.add_input("gearbox_mass", 0.0, units="kg")
self.add_input("hss_cost", 0.0, units="USD")
self.add_input("hss_mass", 0.0, units="kg")
self.add_input("brake_cost", 0.0, units="USD")
self.add_input("brake_mass", 0.0, units="kg")
self.add_input("generator_cost", 0.0, units="USD")
self.add_input("generator_mass", 0.0, units="kg")
self.add_input("bedplate_cost", 0.0, units="USD")
self.add_input("bedplate_mass", 0.0, units="kg")
self.add_input("yaw_system_cost", 0.0, units="USD")
self.add_input("yaw_mass", 0.0, units="kg")
self.add_input("converter_cost", 0.0, units="USD")
self.add_input("converter_mass", 0.0, units="kg")
self.add_input("hvac_cost", 0.0, units="USD")
self.add_input("hvac_mass", 0.0, units="kg")
self.add_input("cover_cost", 0.0, units="USD")
self.add_input("cover_mass", 0.0, units="kg")
self.add_input("elec_cost", 0.0, units="USD")
self.add_input("controls_cost", 0.0, units="USD")
self.add_input("platforms_mass", 0.0, units="kg")
self.add_input("platforms_cost", 0.0, units="USD")
self.add_input("transformer_cost", 0.0, units="USD")
self.add_input("transformer_mass", 0.0, units="kg")
self.add_discrete_input("main_bearing_number", 2)
# multipliers
self.add_input("nacelle_assemblyCostMultiplier", 0.0)
self.add_input("nacelle_overheadCostMultiplier", 0.0)
self.add_input("nacelle_profitMultiplier", 0.0)
self.add_input("nacelle_transportMultiplier", 0.0)
self.add_output("nacelle_cost", 0.0, units="USD")
self.add_output("nacelle_mass_tcc", 0.0, units="kg")
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
lss_cost = inputs["lss_cost"]
main_bearing_cost = inputs["main_bearing_cost"]
gearbox_cost = inputs["gearbox_cost"]
hss_cost = inputs["hss_cost"]
brake_cost = inputs["brake_cost"]
generator_cost = inputs["generator_cost"]
bedplate_cost = inputs["bedplate_cost"]
yaw_system_cost = inputs["yaw_system_cost"]
converter_cost = inputs["converter_cost"]
hvac_cost = inputs["hvac_cost"]
cover_cost = inputs["cover_cost"]
elec_cost = inputs["elec_cost"]
controls_cost = inputs["controls_cost"]
platforms_cost = inputs["platforms_cost"]
transformer_cost = inputs["transformer_cost"]
lss_mass = inputs["lss_mass"]
main_bearing_mass = inputs["main_bearing_mass"]
gearbox_mass = inputs["gearbox_mass"]
hss_mass = inputs["hss_mass"]
brake_mass = inputs["brake_mass"]
generator_mass = inputs["generator_mass"]
bedplate_mass = inputs["bedplate_mass"]
yaw_mass = inputs["yaw_mass"]
converter_mass = inputs["converter_mass"]
hvac_mass = inputs["hvac_mass"]
cover_mass = inputs["cover_mass"]
platforms_mass = inputs["platforms_mass"]
transformer_mass = inputs["transformer_mass"]
main_bearing_number = discrete_inputs["main_bearing_number"]
nacelle_assemblyCostMultiplier = inputs["nacelle_assemblyCostMultiplier"]
nacelle_overheadCostMultiplier = inputs["nacelle_overheadCostMultiplier"]
nacelle_profitMultiplier = inputs["nacelle_profitMultiplier"]
nacelle_transportMultiplier = inputs["nacelle_transportMultiplier"]
# apply multipliers for assembly, transport, overhead, and profits
outputs["nacelle_mass_tcc"] = (
lss_mass
+ main_bearing_number * main_bearing_mass
+ gearbox_mass
+ hss_mass
+ brake_mass
+ generator_mass
+ bedplate_mass
+ yaw_mass
+ converter_mass
+ hvac_mass
+ cover_mass
+ platforms_mass
+ transformer_mass
)
partsCost = (
lss_cost
+ main_bearing_number * main_bearing_cost
+ gearbox_cost
+ hss_cost
+ brake_cost
+ generator_cost
+ bedplate_cost
+ yaw_system_cost
+ converter_cost
+ hvac_cost
+ cover_cost
+ elec_cost
+ controls_cost
+ platforms_cost
+ transformer_cost
)
outputs["nacelle_cost"] = (1 + nacelle_transportMultiplier + nacelle_profitMultiplier) * (
(1 + nacelle_overheadCostMultiplier + nacelle_assemblyCostMultiplier) * partsCost
)
###### Tower
# -------------------------------------------------------------------------------
class TowerCost2015(om.ExplicitComponent):
"""
Compute tower cost in the form of :math:`cost = k*mass`.
Value of :math:`k` was updated in 2015 to be $2.9 USD/kg.
Cost includes materials and manufacturing costs.
Cost can be overridden with use of `tower_cost_external`
Parameters
----------
tower_mass : float, [kg]
tower mass
tower_mass_cost_coeff : float, [USD/kg]
tower mass-cost coeff
tower_cost_external : float
Tower cost computed by TowerSE
Returns
-------
tower_parts_cost : float, [USD]
Tower parts cost
"""
def setup(self):
self.add_input("tower_mass", 0.0, units="kg")
self.add_input("tower_mass_cost_coeff", 2.9, units="USD/kg")
self.add_input("tower_cost_external", 0.0, units="USD")
self.add_output("tower_parts_cost", 0.0, units="USD")
def compute(self, inputs, outputs):
tower_mass = inputs["tower_mass"]
tower_mass_cost_coeff = inputs["tower_mass_cost_coeff"]
# calculate component cost
if inputs["tower_cost_external"] < 1.0:
outputs["tower_parts_cost"] = tower_mass_cost_coeff * tower_mass
else:
outputs["tower_parts_cost"] = inputs["tower_cost_external"]
if outputs["tower_parts_cost"] == 0.0:
print(
"Warning: turbine_costsse_2015.py : TowerCost2015.compute : No tower mass provided. Assuming $0 for tower cost, LCOE will be artificially low."
)
# -------------------------------------------------------------------------------
class TowerCostAdder2015(om.ExplicitComponent):
"""
The tower is not aggregated with any other component, but for consistency
there are allowances for additional costs incurred from transportation and
assembly complexity,
:math:`c_{towersys} = (1+kt_{tower}+kp_{tower}) (1+ko_{tower}+ka_{tower}) c_{tower}`
Where conceptually, :math:`kt` is a transportation multiplier,
:math:`kp` is a profit multiplier,
:math:`ko` is an overhead cost multiplier, and
:math:`ka` is an assembly cost multiplier
By default, :math:`kt=kp=ko=ka=0`.
Parameters
----------
tower_parts_cost : float, [USD]
component cost
tower_assemblyCostMultiplier : float
tower assembly cost multiplier
tower_overheadCostMultiplier : float
tower overhead cost multiplier
tower_profitMultiplier : float
tower profit cost multiplier
tower_transportMultiplier : float
tower transport cost multiplier
Returns
-------
tower_cost : float, [USD]
tower cost
"""
def setup(self):
self.add_input("tower_parts_cost", 0.0, units="USD")
self.add_input("tower_assemblyCostMultiplier", 0.0)
self.add_input("tower_overheadCostMultiplier", 0.0)
self.add_input("tower_profitMultiplier", 0.0)
self.add_input("tower_transportMultiplier", 0.0)
self.add_output("tower_cost", 0.0, units="USD")
def compute(self, inputs, outputs):
tower_parts_cost = inputs["tower_parts_cost"]
tower_assemblyCostMultiplier = inputs["tower_assemblyCostMultiplier"]
tower_overheadCostMultiplier = inputs["tower_overheadCostMultiplier"]
tower_profitMultiplier = inputs["tower_profitMultiplier"]
tower_transportMultiplier = inputs["tower_transportMultiplier"]
partsCost = tower_parts_cost
outputs["tower_cost"] = (1 + tower_transportMultiplier + tower_profitMultiplier) * (
(1 + tower_overheadCostMultiplier + tower_assemblyCostMultiplier) * partsCost
)
# -------------------------------------------------------------------------------
class TurbineCostAdder2015(om.ExplicitComponent):
"""
Aggregates the turbine system costs into a single value with allowances for
additional costs incurred from transportation and assembly complexity. Costs
are reported per kW. Cost multipliers are of the form,
:math:`c_{turbine} = (1+kt_{turbine}+kp_{turbine}) (1+ko_{turbine}+ka_{turbine})
(c_{rotor} + c_{nacelle} + c_{tower})`
Where conceptually, :math:`kt` is a transportation multiplier,
:math:`kp` is a profit multiplier,
:math:`ko` is an overhead cost multiplier, and
:math:`ka` is an assembly cost multiplier
By default, :math:`kt=kp=ko=ka=0`.
Parameters
----------
rotor_cost : float, [USD]
Rotor cost
rotor_mass_tcc : float
Rotor mass
nacelle_cost : float, [USD]
Nacelle cost
nacelle_mass_tcc : float
Nacelle mass
tower_cost : float, [USD]
Tower cost
tower_mass : float, [kg]
Tower mass
machine_rating : float
Machine rating
turbine_assemblyCostMultiplier : float
Turbine multiplier for assembly cost in manufacturing
turbine_overheadCostMultiplier : float
Turbine multiplier for overhead
turbine_profitMultiplier : float
Turbine multiplier for profit markup
turbine_transportMultiplier : float
Turbine multiplier for transport costs
Returns
-------
turbine_mass_tcc : float
Turbine total mass, without foundation
turbine_cost : float, [USD]
Overall wind turbine capital costs including transportation costs
turbine_cost_kW : float
Overall wind turbine capial costs including transportation costs
"""
def setup(self):
self.add_input("rotor_cost", 0.0, units="USD")
self.add_input("rotor_mass_tcc", 0.0, units="kg")
self.add_input("nacelle_cost", 0.0, units="USD")
self.add_input("nacelle_mass_tcc", 0.0, units="kg")
self.add_input("tower_cost", 0.0, units="USD")
self.add_input("tower_mass", 0.0, units="kg")
self.add_input("machine_rating", 0.0, units="kW")
self.add_input("turbine_assemblyCostMultiplier", 0.0)
self.add_input("turbine_overheadCostMultiplier", 0.0)
self.add_input("turbine_profitMultiplier", 0.0)
self.add_input("turbine_transportMultiplier", 0.0)
self.add_output("turbine_mass_tcc", 0.0, units="kg")
self.add_output("turbine_cost", 0.0, units="USD")
self.add_output("turbine_cost_kW", 0.0, units="USD/kW")
def compute(self, inputs, outputs):
rotor_cost = inputs["rotor_cost"]
nacelle_cost = inputs["nacelle_cost"]
tower_cost = inputs["tower_cost"]
rotor_mass_tcc = inputs["rotor_mass_tcc"]
nacelle_mass_tcc = inputs["nacelle_mass_tcc"]
tower_mass = inputs["tower_mass"]
turbine_assemblyCostMultiplier = inputs["turbine_assemblyCostMultiplier"]
turbine_overheadCostMultiplier = inputs["turbine_overheadCostMultiplier"]
turbine_profitMultiplier = inputs["turbine_profitMultiplier"]
turbine_transportMultiplier = inputs["turbine_transportMultiplier"]
partsCost = rotor_cost + nacelle_cost + tower_cost
outputs["turbine_mass_tcc"] = rotor_mass_tcc + nacelle_mass_tcc + tower_mass
outputs["turbine_cost"] = (1 + turbine_transportMultiplier + turbine_profitMultiplier) * (
(1 + turbine_overheadCostMultiplier + turbine_assemblyCostMultiplier) * partsCost
)
outputs["turbine_cost_kW"] = outputs["turbine_cost"] / inputs["machine_rating"]
class Outputs2Screen(om.ExplicitComponent):
"""
Print cost outputs to the terminal
Parameters
----------
blade_cost : float, [USD]
blade cost
blade_mass : float, [kg]
Blade mass
hub_cost : float, [USD]
hub cost
hub_mass : float, [kg]
Hub mass
pitch_system_cost : float, [USD]
pitch_system cost
pitch_system_mass : float, [kg]
Pitch system mass
spinner_cost : float, [USD]
spinner cost
spinner_mass : float, [kg]
Spinner mass
lss_cost : float, [USD]
lss cost
lss_mass : float, [kg]
LSS mass
main_bearing_cost : float, [USD]
main_bearing cost
main_bearing_mass : float, [kg]
Main bearing mass
gearbox_cost : float, [USD]
gearbox cost
gearbox_mass : float, [kg]
LSS mass
hss_cost : float, [USD]
hss cost
hss_mass : float, [kg]
HSS mass
brake_cost : float, [USD]
brake cost
brake_mass : float, [kg]
brake mass
generator_cost : float, [USD]
generator cost
generator_mass : float, [kg]
Generator mass
bedplate_cost : float, [USD]
bedplate cost
bedplate_mass : float, [kg]
Bedplate mass
yaw_system_cost : float, [USD]
yaw_system cost
yaw_mass : float, [kg]
Yaw system mass
hvac_cost : float, [USD]
hvac cost
hvac_mass : float, [kg]
HVAC mass
cover_cost : float, [USD]
cover cost
cover_mass : float, [kg]
Cover mass
elec_cost : float, [USD]
elec cost
controls_cost : float, [USD]
controls cost
platforms_cost : float, [USD]
platforms cost
transformer_cost : float, [USD]
transformer cost
transformer_mass : float, [kg]
Transformer mass
converter_cost : float, [USD]
converter cost
converter_mass : float, [kg]
Converter mass
rotor_cost : float, [USD]
rotor cost
rotor_mass_tcc : float
Rotor mass
nacelle_cost : float, [USD]
nacelle cost
nacelle_mass_tcc : float
Nacelle mass
tower_cost : float, [USD]
tower cost
tower_mass : float, [kg]
Tower mass
turbine_cost : float, [USD]
Overall turbine costs
turbine_cost_kW : float
Overall wind turbine capital costs including transportation costs per kW
turbine_mass_tcc : float
Turbine mass
"""
def initialize(self):
self.options.declare("verbosity", default=False)
def setup(self):
self.add_input("blade_cost", 0.0, units="USD")
self.add_input("blade_mass", 0.0, units="kg")
self.add_input("hub_cost", 0.0, units="USD")
self.add_input("hub_mass", 0.0, units="kg")
self.add_input("pitch_system_cost", 0.0, units="USD")
self.add_input("pitch_system_mass", 0.0, units="kg")
self.add_input("spinner_cost", 0.0, units="USD")
self.add_input("spinner_mass", 0.0, units="kg")
self.add_input("lss_cost", 0.0, units="USD")
self.add_input("lss_mass", 0.0, units="kg")
self.add_input("main_bearing_cost", 0.0, units="USD")
self.add_input("main_bearing_mass", 0.0, units="kg")
self.add_input("gearbox_cost", 0.0, units="USD")
self.add_input("gearbox_mass", 0.0, units="kg")
self.add_input("hss_cost", 0.0, units="USD")
self.add_input("hss_mass", 0.0, units="kg")
self.add_input("brake_cost", 0.0, units="USD")
self.add_input("brake_mass", 0.0, units="kg")
self.add_input("generator_cost", 0.0, units="USD")
self.add_input("generator_mass", 0.0, units="kg")
self.add_input("bedplate_cost", 0.0, units="USD")
self.add_input("bedplate_mass", 0.0, units="kg")
self.add_input("yaw_system_cost", 0.0, units="USD")
self.add_input("yaw_mass", 0.0, units="kg")
self.add_input("hvac_cost", 0.0, units="USD")
self.add_input("hvac_mass", 0.0, units="kg")
self.add_input("cover_cost", 0.0, units="USD")
self.add_input("cover_mass", 0.0, units="kg")
self.add_input("elec_cost", 0.0, units="USD")
self.add_input("controls_cost", 0.0, units="USD")
self.add_input("platforms_cost", 0.0, units="USD")
self.add_input("transformer_cost", 0.0, units="USD")
self.add_input("transformer_mass", 0.0, units="kg")
self.add_input("converter_cost", 0.0, units="USD")
self.add_input("converter_mass", 0.0, units="kg")
self.add_input("rotor_cost", 0.0, units="USD")
self.add_input("rotor_mass_tcc", 0.0, units="kg")
self.add_input("nacelle_cost", 0.0, units="USD")
self.add_input("nacelle_mass_tcc", 0.0, units="kg")
self.add_input("tower_cost", 0.0, units="USD")
self.add_input("tower_mass", 0.0, units="kg")
self.add_input("turbine_cost", 0.0, units="USD")
self.add_input("turbine_cost_kW", 0.0, units="USD/kW")
self.add_input("turbine_mass_tcc", 0.0, units="kg")
def compute(self, inputs, outputs):
if self.options["verbosity"] == True:
print("################################################")
print("Computation of costs of the main turbine components from TurbineCostSE")
print(
"Blade cost %.3f k USD mass %.3f kg"
% (inputs["blade_cost"] * 1.0e-003, inputs["blade_mass"])
)
print(
"Pitch system cost %.3f k USD mass %.3f kg"
% (inputs["pitch_system_cost"] * 1.0e-003, inputs["pitch_system_mass"])
)
print(
"Hub cost %.3f k USD mass %.3f kg"
% (inputs["hub_cost"] * 1.0e-003, inputs["hub_mass"])
)
print(
"Spinner cost %.3f k USD mass %.3f kg"
% (inputs["spinner_cost"] * 1.0e-003, inputs["spinner_mass"])
)
print("------------------------------------------------")
print(
"Rotor cost %.3f k USD mass %.3f kg"
% (inputs["rotor_cost"] * 1.0e-003, inputs["rotor_mass_tcc"])
)
print("")
print(
"LSS cost %.3f k USD mass %.3f kg"
% (inputs["lss_cost"] * 1.0e-003, inputs["lss_mass"])
)
print(
"Main bearing cost %.3f k USD mass %.3f kg"
% (inputs["main_bearing_cost"] * 1.0e-003, inputs["main_bearing_mass"])
)
print(
"Gearbox cost %.3f k USD mass %.3f kg"
% (inputs["gearbox_cost"] * 1.0e-003, inputs["gearbox_mass"])
)
print(
"HSS cost %.3f k USD mass %.3f kg"
% (inputs["hss_cost"] * 1.0e-003, inputs["hss_mass"])
)
print(
"Brake cost %.3f k USD mass %.3f kg"
% (inputs["brake_cost"] * 1.0e-003, inputs["brake_mass"])
)
print(
"Generator cost %.3f k USD mass %.3f kg"
% (inputs["generator_cost"] * 1.0e-003, inputs["generator_mass"])
)
print(
"Bedplate cost %.3f k USD mass %.3f kg"
% (inputs["bedplate_cost"] * 1.0e-003, inputs["bedplate_mass"])
)
print(
"Yaw system cost %.3f k USD mass %.3f kg"
% (inputs["yaw_system_cost"] * 1.0e-003, inputs["yaw_mass"])
)
print(
"HVAC cost %.3f k USD mass %.3f kg"
% (inputs["hvac_cost"] * 1.0e-003, inputs["hvac_mass"])
)
print(
"Nacelle cover cost %.3f k USD mass %.3f kg"
% (inputs["cover_cost"] * 1.0e-003, inputs["cover_mass"])
)
print("Electr connection cost %.3f k USD" % (inputs["elec_cost"] * 1.0e-003))
print("Controls cost %.3f k USD" % (inputs["controls_cost"] * 1.0e-003))
print("Other main frame cost %.3f k USD" % (inputs["platforms_cost"] * 1.0e-003))
print(
"Transformer cost %.3f k USD mass %.3f kg"
% (inputs["transformer_cost"] * 1.0e-003, inputs["transformer_mass"])
)
print(
"Converter cost %.3f k USD mass %.3f kg"
% (inputs["converter_cost"] * 1.0e-003, inputs["converter_mass"])
)
print("------------------------------------------------")
print(
"Nacelle cost %.3f k USD mass %.3f kg"
% (inputs["nacelle_cost"] * 1.0e-003, inputs["nacelle_mass_tcc"])
)
print("")
print(
"Tower cost %.3f k USD mass %.3f kg"
% (inputs["tower_cost"] * 1.0e-003, inputs["tower_mass"])
)
print("------------------------------------------------")
print("------------------------------------------------")
print(
"Turbine cost %.3f k USD mass %.3f kg"
% (inputs["turbine_cost"] * 1.0e-003, inputs["turbine_mass_tcc"])
)
print("Turbine cost per kW %.3f k USD/kW" % inputs["turbine_cost_kW"])
print("################################################")
# -------------------------------------------------------------------------------
class Turbine_CostsSE_2015(om.Group):
"""
Print cost outputs to the terminal
"""
def initialize(self):
self.options.declare("verbosity", default=False)
def setup(self):
self.verbosity = self.options["verbosity"]
self.set_input_defaults("blade_mass_cost_coeff", units="USD/kg", val=14.6)
self.set_input_defaults("hub_mass_cost_coeff", units="USD/kg", val=3.9)
self.set_input_defaults("pitch_system_mass_cost_coeff", units="USD/kg", val=22.1)
self.set_input_defaults("spinner_mass_cost_coeff", units="USD/kg", val=11.1)
self.set_input_defaults("lss_mass_cost_coeff", units="USD/kg", val=11.9)
self.set_input_defaults("bearing_mass_cost_coeff", units="USD/kg", val=4.5)
self.set_input_defaults("gearbox_mass_cost_coeff", units="USD/kg", val=12.9)
self.set_input_defaults("hss_mass_cost_coeff", units="USD/kg", val=6.8)
self.set_input_defaults("brake_mass_cost_coeff", units="USD/kg", val=3.6254)
self.set_input_defaults("generator_mass_cost_coeff", units="USD/kg", val=12.4)
self.set_input_defaults("bedplate_mass_cost_coeff", units="USD/kg", val=2.9)
self.set_input_defaults("yaw_mass_cost_coeff", units="USD/kg", val=8.3)
self.set_input_defaults("converter_mass_cost_coeff", units="USD/kg", val=18.8)
self.set_input_defaults("transformer_mass_cost_coeff", units="USD/kg", val=18.8)
self.set_input_defaults("hvac_mass_cost_coeff", units="USD/kg", val=124.0)
self.set_input_defaults("cover_mass_cost_coeff", units="USD/kg", val=5.7)
self.set_input_defaults("elec_connec_machine_rating_cost_coeff", units="USD/kW", val=41.85)
self.set_input_defaults("platforms_mass_cost_coeff", units="USD/kg", val=17.1)
self.set_input_defaults("tower_mass_cost_coeff", units="USD/kg", val=2.9)
self.set_input_defaults("controls_machine_rating_cost_coeff", units="USD/kW", val=21.15)
self.set_input_defaults("crane_cost", units="USD", val=12e3)
self.set_input_defaults("hub_assemblyCostMultiplier", val=0.0)
self.set_input_defaults("hub_overheadCostMultiplier", val=0.0)
self.set_input_defaults("nacelle_assemblyCostMultiplier", val=0.0)
self.set_input_defaults("nacelle_overheadCostMultiplier", val=0.0)
self.set_input_defaults("tower_assemblyCostMultiplier", val=0.0)
self.set_input_defaults("tower_overheadCostMultiplier", val=0.0)
self.set_input_defaults("turbine_assemblyCostMultiplier", val=0.0)
self.set_input_defaults("turbine_overheadCostMultiplier", val=0.0)
self.set_input_defaults("hub_profitMultiplier", val=0.0)
self.set_input_defaults("nacelle_profitMultiplier", val=0.0)
self.set_input_defaults("tower_profitMultiplier", val=0.0)
self.set_input_defaults("turbine_profitMultiplier", val=0.0)
self.set_input_defaults("hub_transportMultiplier", val=0.0)
self.set_input_defaults("nacelle_transportMultiplier", val=0.0)
self.set_input_defaults("tower_transportMultiplier", val=0.0)
self.set_input_defaults("turbine_transportMultiplier", val=0.0)
self.add_subsystem("blade_c", BladeCost2015(), promotes=["*"])
self.add_subsystem("hub_c", HubCost2015(), promotes=["*"])
self.add_subsystem("pitch_c", PitchSystemCost2015(), promotes=["*"])
self.add_subsystem("spinner_c", SpinnerCost2015(), promotes=["*"])
self.add_subsystem("hub_adder", HubSystemCostAdder2015(), promotes=["*"])
self.add_subsystem("rotor_adder", RotorCostAdder2015(), promotes=["*"])
self.add_subsystem("lss_c", LowSpeedShaftCost2015(), promotes=["*"])
self.add_subsystem("bearing_c", BearingCost2015(), promotes=["*"])
self.add_subsystem("gearbox_c", GearboxCost2015(), promotes=["*"])
self.add_subsystem("hss_c", HighSpeedShaftCost2015(), promotes=["*"])
self.add_subsystem("brake_c", BrakeCost2020(), promotes=["*"])
self.add_subsystem("generator_c", GeneratorCost2015(), promotes=["*"])
self.add_subsystem("bedplate_c", BedplateCost2015(), promotes=["*"])
self.add_subsystem("yaw_c", YawSystemCost2015(), promotes=["*"])
self.add_subsystem("hvac_c", HydraulicCoolingCost2015(), promotes=["*"])
self.add_subsystem("controls_c", ControlsCost2015(), promotes=["*"])
self.add_subsystem("converter_c", ConverterCost2015(), promotes=["*"])
self.add_subsystem("elec_c", ElecConnecCost2015(), promotes=["*"])
self.add_subsystem("cover_c", NacelleCoverCost2015(), promotes=["*"])
self.add_subsystem("platforms_c", PlatformsMainframeCost2015(), promotes=["*"])
self.add_subsystem("transformer_c", TransformerCost2015(), promotes=["*"])
self.add_subsystem("nacelle_adder", NacelleSystemCostAdder2015(), promotes=["*"])
self.add_subsystem("tower_c", TowerCost2015(), promotes=["*"])
self.add_subsystem("tower_adder", TowerCostAdder2015(), promotes=["*"])
self.add_subsystem("turbine_c", TurbineCostAdder2015(), promotes=["*"])
self.add_subsystem("outputs", Outputs2Screen(verbosity=self.verbosity), promotes=["*"])
```
#### File: phases/design/semi_submersible_design.py
```python
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, National Renewable Energy Laboratory"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
from wisdem.orbit.phases.design import DesignPhase
class SemiSubmersibleDesign(DesignPhase):
"""Semi-Submersible Substructure Design"""
expected_config = {
"site": {"depth": "m"},
"plant": {"num_turbines": "int"},
"turbine": {"turbine_rating": "MW"},
"semisubmersible_design": {
"stiffened_column_CR": "$/t (optional, default: 3120)",
"truss_CR": "$/t (optional, default: 6250)",
"heave_plate_CR": "$/t (optional, default: 6250)",
"secondary_steel_CR": "$/t (optional, default: 7250)",
"towing_speed": "km/h (optional, default: 6)",
},
}
output_config = {
"substructure": {
"mass": "t",
"unit_cost": "USD",
"towing_speed": "km/h",
}
}
def __init__(self, config, **kwargs):
"""
Creates an instance of `SemiSubmersibleDesign`.
Parameters
----------
config : dict
"""
config = self.initialize_library(config, **kwargs)
self.config = self.validate_config(config)
self._design = self.config.get("semisubmersible_design", {})
self._outputs = {}
def run(self):
"""Main run function."""
substructure = {
"mass": self.substructure_mass,
"unit_cost": self.substructure_cost,
"towing_speed": self._design.get("towing_speed", 6),
}
self._outputs["substructure"] = substructure
@property
def stiffened_column_mass(self):
"""
Calculates the mass of the stiffened column for a single
semi-submersible in tonnes. From original OffshoreBOS model.
"""
rating = self.config["turbine"]["turbine_rating"]
mass = -0.9581 * rating ** 2 + 40.89 * rating + 802.09
return mass
@property
def stiffened_column_cost(self):
"""
Calculates the cost of the stiffened column for a single
semi-submersible. From original OffshoreBOS model.
"""
cr = self._design.get("stiffened_column_CR", 3120)
return self.stiffened_column_mass * cr
@property
def truss_mass(self):
"""
Calculates the truss mass for a single semi-submersible in tonnes. From
original OffshoreBOS model.
"""
rating = self.config["turbine"]["turbine_rating"]
mass = 2.7894 * rating ** 2 + 15.591 * rating + 266.03
return mass
@property
def truss_cost(self):
"""
Calculates the cost of the truss for a signle semi-submerisble. From
original OffshoreBOS model.
"""
cr = self._design.get("truss_CR", 6250)
return self.truss_mass * cr
@property
def heave_plate_mass(self):
"""
Calculates the heave plate mass for a single semi-submersible in tonnes.
From original OffshoreBOS model.
"""
rating = self.config["turbine"]["turbine_rating"]
mass = -0.4397 * rating ** 2 + 21.545 * rating + 177.42
return mass
@property
def heave_plate_cost(self):
"""
Calculates the heave plate cost for a single semi-submersible. From
original OffshoreBOS model.
"""
cr = self._design.get("heave_plate_CR", 6250)
return self.heave_plate_mass * cr
@property
def secondary_steel_mass(self):
"""
Calculates the mass of the required secondary steel for a single
semi-submersible. From original OffshoreBOS model.
"""
rating = self.config["turbine"]["turbine_rating"]
mass = -0.153 * rating ** 2 + 6.54 * rating + 128.34
return mass
@property
def secondary_steel_cost(self):
"""
Calculates the cost of the required secondary steel for a single
semi-submersible. For original OffshoreBOS model.
"""
cr = self._design.get("secondary_steel_CR", 7250)
return self.secondary_steel_mass * cr
@property
def substructure_mass(self):
"""Returns single substructure mass."""
return self.stiffened_column_mass + self.truss_mass + self.heave_plate_mass + self.secondary_steel_mass
@property
def substructure_cost(self):
"""Returns single substructure cost."""
return self.stiffened_column_cost + self.truss_cost + self.heave_plate_cost + self.secondary_steel_cost
@property
def total_substructure_mass(self):
"""Returns mass of all substructures."""
num = self.config["plant"]["num_turbines"]
return num * self.substructure_mass
@property
def design_result(self):
"""Returns the result of `self.run()`"""
if not self._outputs:
raise Exception("Has `SemiSubmersibleDesign` been ran yet?")
return self._outputs
@property
def total_cost(self):
"""Returns total phase cost in $USD."""
num = self.config["plant"]["num_turbines"]
return num * self.substructure_cost
@property
def detailed_output(self):
"""Returns detailed phase information."""
_outputs = {
"stiffened_column_mass": self.stiffened_column_mass,
"stiffened_column_cost": self.stiffened_column_cost,
"truss_mass": self.truss_mass,
"truss_cost": self.truss_cost,
"heave_plate_mass": self.heave_plate_mass,
"heave_plate_cost": self.heave_plate_cost,
"secondary_steel_mass": self.secondary_steel_mass,
"secondary_steel_cost": self.secondary_steel_cost,
}
return _outputs
```
#### File: test/test_ccblade/test_polar.py
```python
import unittest
from math import pi
import numpy as np
from wisdem.ccblade.Polar import Polar, blend
class TestBlend(unittest.TestCase):
def setUp(self):
alpha = [
-3.04,
-2.03,
-1.01,
0.01,
1.03,
2.05,
3.07,
4.09,
5.11,
6.13,
7.14,
8.16,
9.17,
10.18,
11.18,
12.19,
13.18,
14.18,
15.18,
16.17,
17.14,
18.06,
19.06,
20.07,
25,
]
cl = [
-0.071,
0.044,
0.144,
0.241,
0.338,
0.435,
0.535,
0.632,
0.728,
0.813,
0.883,
0.946,
1.001,
1.054,
1.056,
1.095,
1.138,
1.114,
1.073,
1.008,
0.95,
0.902,
0.795,
0.797,
0.8,
]
cd = [
0.0122,
0.0106,
0.0114,
0.0134,
0.0136,
0.014,
0.0147,
0.0156,
0.0162,
0.0173,
0.0191,
0.0215,
0.0248,
0.0339,
0.0544,
0.0452,
0.0445,
0.067,
0.0748,
0.1028,
0.1473,
0.2819,
0.2819,
0.2819,
0.3,
]
cm = [
-0.0044,
-0.0051,
0.0018,
-0.0216,
-0.0282,
-0.0346,
-0.0405,
-0.0455,
-0.0507,
-0.0404,
-0.0321,
-0.0281,
-0.0284,
-0.0322,
-0.0361,
-0.0363,
-0.0393,
-0.0398,
-0.0983,
-0.1242,
-0.1155,
-0.1068,
-0.0981,
-0.0894,
-0.0807,
]
Re = 1
self.polar1 = Polar(Re, alpha, cl, cd, cm)
alpha = [
-3.04,
-2.03,
-1.01,
0.01,
1.03,
2.05,
3.07,
4.09,
5.11,
6.13,
7.14,
8.16,
9.17,
10.18,
11.18,
12.19,
13.18,
14.18,
15.189,
16.17,
17.14,
18.06,
19.06,
20.07,
21.08,
22.09,
23.1,
25,
]
cl = [
-0.0852,
0.0528,
0.1728,
0.2892,
0.4056,
0.522,
0.642,
0.7584,
0.8736,
0.9756,
1.0596,
1.1352,
1.2012,
1.2648,
1.2672,
1.314,
1.3656,
1.3368,
1.2876,
1.2096,
1.14,
1.0824,
0.954,
0.9564,
1,
1.2,
1.4,
1.6,
]
cd = [
0.01464,
0.01272,
0.01368,
0.01608,
0.01632,
0.0168,
0.01764,
0.01872,
0.01944,
0.02076,
0.02292,
0.0258,
0.02976,
0.04068,
0.06528,
0.05424,
0.0534,
0.0804,
0.08976,
0.12336,
0.17676,
0.33828,
0.33828,
0.33828,
0.35,
0.4,
0.45,
0.5,
]
cm = [
-0.0037,
-0.0044,
-0.0051,
0.0018,
-0.0216,
-0.0282,
-0.0346,
-0.0405,
-0.0455,
-0.0507,
-0.0404,
-0.0321,
-0.0281,
-0.0284,
-0.0322,
-0.0361,
-0.0363,
-0.0393,
-0.0398,
-0.0983,
-0.1242,
-0.1155,
-0.1068,
-0.0981,
-0.0894,
-0.0807,
-0.072,
-0.0633,
]
self.polar2 = Polar(Re, alpha, cl, cd, cm)
def test_blend1(self):
polar3 = blend(self.polar1, self.polar2, 0.5)
alpha_blend = [
-3.04,
-2.03,
-1.01,
0.01,
1.03,
2.05,
3.07,
4.09,
5.11,
6.13,
7.14,
8.16,
9.17,
10.18,
11.18,
12.19,
13.18,
14.18,
15.18,
16.17,
17.14,
18.06,
19.06,
20.07,
25,
]
cl_blend = [
-0.078,
0.048,
0.158,
0.265,
0.372,
0.479,
0.589,
0.695,
0.801,
0.894,
0.971,
1.041,
1.101,
1.159,
1.162,
1.205,
1.252,
1.225,
1.181,
1.109,
1.045,
0.992,
0.875,
0.877,
1.200,
]
cd_blend = [
0.0134,
0.0117,
0.0125,
0.0147,
0.0150,
0.0154,
0.0162,
0.0172,
0.0178,
0.0190,
0.0210,
0.0237,
0.0273,
0.0373,
0.0598,
0.0497,
0.0490,
0.0737,
0.0822,
0.1131,
0.1620,
0.3101,
0.3101,
0.3101,
0.4000,
]
cm_blend = [
-0.00405,
-0.00475,
-0.00165,
-0.0099,
-0.0249,
-0.0314,
-0.03755,
-0.043,
-0.0481,
-0.04555,
-0.03625,
-0.0301,
-0.02825,
-0.0303,
-0.03415,
-0.0362,
-0.0378,
-0.03955,
-0.06905,
-0.11125,
-0.11985,
-0.11115,
-0.10245,
-0.09375,
-0.072,
]
# re-interpolate b/c angles of attack are different
cl3 = np.interp(alpha_blend, polar3.alpha, polar3.cl)
cd3 = np.interp(alpha_blend, polar3.alpha, polar3.cd)
cm3 = np.interp(alpha_blend, polar3.alpha, polar3.cm)
# should be within 1e-3
np.testing.assert_allclose(cl3, cl_blend, atol=1e-3)
np.testing.assert_allclose(cd3, cd_blend, atol=1e-3)
np.testing.assert_allclose(cm3, cm_blend, atol=1e-3)
def test_blend2(self):
polar3 = blend(self.polar1, self.polar2, 0.7)
alpha_blend = [
-3.04,
-2.03,
-1.01,
0.01,
1.03,
2.05,
3.07,
4.09,
5.11,
6.13,
7.14,
8.16,
9.17,
10.18,
11.18,
12.19,
13.18,
14.18,
15.18,
16.17,
17.14,
18.06,
19.06,
20.07,
25,
]
cl_blend = [
-0.081,
0.050,
0.164,
0.275,
0.385,
0.496,
0.610,
0.720,
0.830,
0.927,
1.007,
1.078,
1.141,
1.202,
1.204,
1.248,
1.297,
1.270,
1.224,
1.149,
1.083,
1.028,
0.906,
0.909,
1.360,
]
cd_blend = [
0.0139,
0.0121,
0.0130,
0.0153,
0.0155,
0.0160,
0.0168,
0.0178,
0.0185,
0.0197,
0.0218,
0.0245,
0.0283,
0.0386,
0.0620,
0.0515,
0.0507,
0.0764,
0.0852,
0.1172,
0.1679,
0.3214,
0.3214,
0.3214,
0.4400,
]
cm_blend = [
-0.00391,
-0.00461,
-0.00303,
-0.00522,
-0.02358,
-0.03012,
-0.03637,
-0.042,
-0.04706,
-0.04761,
-0.03791,
-0.0309,
-0.02819,
-0.02954,
-0.03337,
-0.03616,
-0.0372,
-0.03945,
-0.057347,
-0.10607,
-0.12159,
-0.11289,
-0.10419,
-0.09549,
-0.06852,
]
# re-interpolate b/c angles of attack are different
cl3 = np.interp(alpha_blend, polar3.alpha, polar3.cl)
cd3 = np.interp(alpha_blend, polar3.alpha, polar3.cd)
cm3 = np.interp(alpha_blend, polar3.alpha, polar3.cm)
# should be within 1e-3
np.testing.assert_allclose(cl3, cl_blend, atol=1e-3)
np.testing.assert_allclose(cd3, cd_blend, atol=1e-3)
np.testing.assert_allclose(cm3, cm_blend, atol=1e-3)
def test_blend3(self):
polar3 = blend(self.polar1, self.polar2, 0.2)
alpha_blend = [
-3.04,
-2.03,
-1.01,
0.01,
1.03,
2.05,
3.07,
4.09,
5.11,
6.13,
7.14,
8.16,
9.17,
10.18,
11.18,
12.19,
13.18,
14.18,
15.18,
16.17,
17.14,
18.06,
19.06,
20.07,
25,
]
cl_blend = [
-0.074,
0.046,
0.150,
0.251,
0.352,
0.452,
0.556,
0.657,
0.757,
0.846,
0.918,
0.984,
1.041,
1.096,
1.098,
1.139,
1.184,
1.159,
1.116,
1.048,
0.988,
0.938,
0.827,
0.829,
0.960,
]
cd_blend = [
0.0127,
0.0110,
0.0119,
0.0139,
0.0141,
0.0146,
0.0153,
0.0162,
0.0168,
0.0180,
0.0199,
0.0224,
0.0258,
0.0353,
0.0566,
0.0470,
0.0463,
0.0697,
0.0778,
0.1069,
0.1532,
0.2932,
0.2932,
0.2932,
0.3400,
]
cm_blend = [
-0.00426,
-0.00496,
0.00042,
-0.01692,
-0.02688,
-0.03332,
-0.03932,
-0.0445,
-0.04966,
-0.04246,
-0.03376,
-0.0289,
-0.02834,
-0.03144,
-0.03532,
-0.03626,
-0.0387,
-0.0397,
-0.0866,
-0.11902,
-0.11724,
-0.10854,
-0.09984,
-0.09114,
-0.07722,
]
# re-interpolate b/c angles of attack are different
cl3 = np.interp(alpha_blend, polar3.alpha, polar3.cl)
cd3 = np.interp(alpha_blend, polar3.alpha, polar3.cd)
cm3 = np.interp(alpha_blend, polar3.alpha, polar3.cm)
# should be within 1e-3
np.testing.assert_allclose(cl3, cl_blend, atol=1e-3)
np.testing.assert_allclose(cd3, cd_blend, atol=1e-3)
np.testing.assert_allclose(cm3, cm_blend, atol=1e-3)
class Test3DStall(unittest.TestCase):
def setUp(self):
alpha = [
-9.000,
-8.000,
-7.000,
-6.000,
-5.000,
-4.000,
-3.000,
-2.000,
-1.000,
0.000,
1.000,
2.000,
3.000,
4.000,
5.000,
6.000,
7.000,
8.000,
9.000,
10.000,
11.000,
12.000,
13.000,
14.000,
15.000,
16.000,
17.000,
18.000,
19.000,
20.000,
30.000,
40.000,
50.000,
]
cl = [
-0.802,
-0.721,
-0.611,
-0.506,
-0.408,
-0.313,
-0.220,
-0.133,
-0.060,
0.036,
0.227,
0.342,
0.436,
0.556,
0.692,
0.715,
0.761,
0.830,
0.893,
0.954,
1.013,
1.042,
1.061,
1.083,
1.078,
0.882,
0.811,
0.793,
0.793,
0.798,
0.772,
0.757,
0.700,
]
cd = [
0.027,
0.025,
0.024,
0.023,
0.022,
0.022,
0.023,
0.025,
0.027,
0.028,
0.024,
0.019,
0.017,
0.015,
0.017,
0.019,
0.021,
0.024,
0.027,
0.031,
0.037,
0.046,
0.058,
0.074,
0.088,
0.101,
0.114,
0.128,
0.142,
0.155,
0.321,
0.525,
0.742,
]
cm = [
-0.0037,
-0.0044,
-0.0051,
0.0018,
-0.0216,
-0.0282,
-0.0346,
-0.0405,
-0.0455,
-0.0507,
-0.0404,
-0.0321,
-0.0281,
-0.0284,
-0.0322,
-0.0361,
-0.0363,
-0.0393,
-0.0398,
-0.0983,
-0.1242,
-0.1155,
-0.1068,
-0.0981,
-0.0894,
-0.0807,
-0.072,
-0.0633,
-0.054,
-0.045,
-0.036,
-0.22,
-0.13,
]
cm_zeros = np.zeros(len(cm))
Re = 1
self.polar = Polar(Re, alpha, cl, cd, cm)
self.polar2 = Polar(Re, alpha, cl, cd, cm_zeros)
def test_stall1(self):
R = 2.4
r = 0.25 * R
chord = 0.18
Omega = 200 * pi / 30
Uinf = 10.0
tsr = Omega * R / Uinf
newpolar = self.polar.correction3D(
r / R, chord / r, tsr, alpha_max_corr=30, alpha_linear_min=-4, alpha_linear_max=4
)
cl_3d = [-0.84628298, -0.75228154, -0.64170322, -0.53398298, -0.43026406,
-0.32825998, -0.22739914, -0.12996799, -0.04053948, 0.06203622,
0.21891545, 0.33235184, 0.4337843 , 0.55007878, 0.67551912,
0.73636683, 0.81036171, 0.89750377, 0.98121612, 1.06378525,
1.14521114, 1.20948854, 1.26804979, 1.32832588, 1.328 ,
1.132 , 1.061 , 1.043 , 1.043 , 1.048 ,
0.9595 , 0.8195 , 0.7]
cd_3d = [0.027, 0.025, 0.024, 0.023, 0.022, 0.022, 0.023, 0.025, 0.027,
0.028, 0.024, 0.019, 0.017, 0.015, 0.017, 0.019, 0.021, 0.024,
0.027, 0.031, 0.037, 0.046, 0.058, 0.074, 0.088, 0.101, 0.114,
0.128, 0.142, 0.155, 0.321, 0.525, 0.742]
# test equality
np.testing.assert_allclose(newpolar.cl, cl_3d, atol=1e-3, rtol=1e-3)
np.testing.assert_allclose(newpolar.cd, cd_3d, atol=1e-3, rtol=1e-3)
def test_stall2(self):
R = 2.4
r = 0.75 * R
chord = 0.28
Omega = 200 * pi / 30
Uinf = 14.0
tsr = Omega * R / Uinf
newpolar = self.polar.correction3D(
r / R, chord / r, tsr, alpha_max_corr=30, alpha_linear_min=-4, alpha_linear_max=4
)
cl_3d = [-0.81312305, -0.72885733, -0.61871207, -0.5130288 , -0.41359231,
-0.31683302, -0.22185852, -0.13223842, -0.05511188, 0.04253981,
0.22496931, 0.33957657, 0.43544346, 0.5545127 , 0.68786031,
0.72036695, 0.77339873, 0.84695567, 0.91515823, 0.98157599,
1.04620895, 1.08406997, 1.113007 , 1.14462124, 1.15214072,
0.98921218, 0.93783339, 0.9337517 , 0.94573318, 0.96217664,
0.9595 , 0.8195 , 0.7]
cd_3d = [0.027, 0.025, 0.024, 0.023, 0.022, 0.022, 0.023, 0.025, 0.027,
0.028, 0.024, 0.019, 0.017, 0.015, 0.017, 0.019, 0.021, 0.024,
0.027, 0.031, 0.037, 0.046, 0.058, 0.074, 0.088, 0.101, 0.114,
0.128, 0.142, 0.155, 0.321, 0.525, 0.742]
# test equality
np.testing.assert_allclose(newpolar.cl, cl_3d, atol=1e-3)
np.testing.assert_allclose(newpolar.cd, cd_3d, atol=1e-3)
def test_stall3(self):
R = 5.0
r = 0.5 * R
chord = 0.5
Omega = 100 * pi / 30
Uinf = 10.0
tsr = Omega * R / Uinf
newpolar = self.polar.correction3D(
r / R, chord / r, tsr, alpha_max_corr=30, alpha_linear_min=-4, alpha_linear_max=4
)
cl_3d = [-0.82374342, -0.73635957, -0.62607561, -0.51973994, -0.41893189,
-0.32049281, -0.22363306, -0.13151125, -0.05044467, 0.04878406,
0.2230304 , 0.33726265, 0.43491207, 0.55309262, 0.68390771,
0.72549134, 0.78523713, 0.86314507, 0.93631506, 1.00790573,
1.07791708, 1.12423867, 1.16266366, 1.20345763, 1.22293081,
1.09157913, 1.05893482, 1.043 , 1.043 , 1.048 ,
0.9595 , 0.8195 , 0.7]
cd_3d = [0.027, 0.025, 0.024, 0.023, 0.022, 0.022, 0.023, 0.025, 0.027,
0.028, 0.024, 0.019, 0.017, 0.015, 0.017, 0.019, 0.021, 0.024,
0.027, 0.031, 0.037, 0.046, 0.058, 0.074, 0.088, 0.101, 0.114,
0.128, 0.142, 0.155, 0.321, 0.525, 0.742 ]
# test equality
np.testing.assert_allclose(newpolar.cl, cl_3d, atol=1e-3)
np.testing.assert_allclose(newpolar.cd, cd_3d, atol=1e-3)
def test_stall4_cm(self):
R = 5.0
r = 0.5 * R
chord = 0.5
Omega = 100 * pi / 30
Uinf = 10.0
tsr = Omega * R / Uinf
newpolar = self.polar2.correction3D(
r / R, chord / r, tsr, alpha_max_corr=30, alpha_linear_min=-4, alpha_linear_max=4
)
cl_3d = [-0.82374342, -0.73635957, -0.62607561, -0.51973994, -0.41893189,
-0.32049281, -0.22363306, -0.13151125, -0.05044467, 0.04878406,
0.2230304 , 0.33726265, 0.43491207, 0.55309262, 0.68390771,
0.72549134, 0.78523713, 0.86314507, 0.93631506, 1.00790573,
1.07791708, 1.12423867, 1.16266366, 1.20345763, 1.22293081,
1.09157913, 1.05893482, 1.043 , 1.043 , 1.048 ,
0.9595 , 0.8195 , 0.7]
cd_3d = [0.027, 0.025, 0.024, 0.023, 0.022, 0.022, 0.023, 0.025, 0.027,
0.028, 0.024, 0.019, 0.017, 0.015, 0.017, 0.019, 0.021, 0.024,
0.027, 0.031, 0.037, 0.046, 0.058, 0.074, 0.088, 0.101, 0.114,
0.128, 0.142, 0.155, 0.321, 0.525, 0.742]
cm_zeros = np.zeros(len(cd_3d))
# test equality
np.testing.assert_allclose(newpolar.cl, cl_3d, atol=1e-3)
np.testing.assert_allclose(newpolar.cd, cd_3d, atol=1e-3)
np.testing.assert_allclose(newpolar.cm, cm_zeros, atol=1e-3)
class TestExtrap(unittest.TestCase):
def setUp(self):
alpha = [
-10.1,
-8.2,
-6.1,
-4.1,
-2.1,
0.1,
2,
4.1,
6.2,
8.1,
10.2,
11.3,
12.1,
13.2,
14.2,
15.3,
16.3,
17.1,
18.1,
19.1,
20.1,
]
cl = [
-0.6300,
-0.5600,
-0.6400,
-0.4200,
-0.2100,
0.0500,
0.3000,
0.5400,
0.7900,
0.9000,
0.9300,
0.9200,
0.9500,
0.9900,
1.0100,
1.0200,
1.0000,
0.9400,
0.8500,
0.7000,
0.6600,
]
cd = [
0.0390,
0.0233,
0.0131,
0.0134,
0.0119,
0.0122,
0.0116,
0.0144,
0.0146,
0.0162,
0.0274,
0.0303,
0.0369,
0.0509,
0.0648,
0.0776,
0.0917,
0.0994,
0.2306,
0.3142,
0.3186,
]
cm = [
-0.0044,
-0.0051,
0.0018,
-0.0216,
-0.0282,
-0.0346,
-0.0405,
-0.0455,
-0.0507,
-0.0404,
-0.0321,
-0.0281,
-0.0284,
-0.0322,
-0.0361,
-0.0363,
-0.0393,
-0.0398,
-0.0983,
-0.1242,
-0.1155,
]
cm_zeros = np.zeros(len(cm))
Re = 1
self.polar = Polar(Re, alpha, cl, cd, cm)
self.polar2 = Polar(Re, alpha, cl, cd, cm_zeros)
def test_extrap1(self):
cdmax = 1.29
newpolar = self.polar.extrapolate(cdmax=cdmax)
alpha_extrap = [
-180,
-170,
-160,
-150,
-140,
-130,
-120,
-110,
-100,
-90,
-80,
-70,
-60,
-50,
-40,
-30,
-20,
-10.1,
-8.2,
-6.1,
-4.1,
-2.1,
0.1,
2,
4.1,
6.2,
8.1,
10.2,
11.3,
12.1,
13.2,
14.2,
15.3,
16.3,
17.1,
18.1,
19.1,
20.1,
30,
40,
50,
60,
70,
80,
90,
100,
110,
120,
130,
140,
150,
160,
170,
180,
]
cl_extrap = [
0.0000,
0.2299,
0.4597,
0.4907,
0.5053,
0.4805,
0.4102,
0.2985,
0.1565,
0.0000,
-0.1565,
-0.2985,
-0.4102,
-0.4805,
-0.5053,
-0.4907,
-0.4637,
-0.6300,
-0.5600,
-0.6400,
-0.4200,
-0.2100,
0.0500,
0.3000,
0.5400,
0.7900,
0.9000,
0.9300,
0.9200,
0.9500,
0.9900,
1.0100,
1.0200,
1.0000,
0.9400,
0.8500,
0.7000,
0.6600,
0.7010,
0.7219,
0.6864,
0.5860,
0.4264,
0.2235,
0.0000,
-0.1565,
-0.2985,
-0.4102,
-0.4805,
-0.5053,
-0.4907,
-0.4597,
-0.2299,
0.0000,
]
cd_extrap = [
0.1770,
0.2132,
0.3173,
0.4758,
0.6686,
0.8708,
1.0560,
1.1996,
1.2818,
1.2900,
1.2818,
1.1996,
1.0560,
0.8708,
0.6686,
0.4758,
0.3158,
0.0390,
0.0233,
0.0131,
0.0134,
0.0119,
0.0122,
0.0116,
0.0144,
0.0146,
0.0162,
0.0274,
0.0303,
0.0369,
0.0509,
0.0648,
0.0776,
0.0917,
0.0994,
0.2306,
0.3142,
0.3186,
0.4758,
0.6686,
0.8708,
1.0560,
1.1996,
1.2818,
1.2900,
1.2818,
1.1996,
1.0560,
0.8708,
0.6686,
0.4758,
0.3173,
0.2132,
0.1770,
]
cm_extrap = [
0.0000,
0.4000,
0.2431,
0.2568,
0.2865,
0.3185,
0.3458,
0.3632,
0.3672,
0.3559,
0.3443,
0.3182,
0.2808,
0.2362,
0.1886,
0.1414,
0.0942,
-0.0044,
-0.0051,
0.0018,
-0.0216,
-0.0282,
-0.0346,
-0.0405,
-0.0455,
-0.0507,
-0.0404,
-0.0321,
-0.0281,
-0.0284,
-0.0322,
-0.0361,
-0.0363,
-0.0393,
-0.0398,
-0.0983,
-0.1242,
-0.1155,
-0.1710,
-0.2202,
-0.2637,
-0.3002,
-0.3284,
-0.3471,
-0.3559,
-0.3672,
-0.3632,
-0.3458,
-0.3185,
-0.2865,
-0.2568,
-0.2431,
-0.5000,
0.0000,
]
# re-interpolate b/c angles of attack are different
cl = np.interp(alpha_extrap, newpolar.alpha, newpolar.cl)
cd = np.interp(alpha_extrap, newpolar.alpha, newpolar.cd)
cm = np.interp(alpha_extrap, newpolar.alpha, newpolar.cm)
# test equality
np.testing.assert_allclose(cl, cl_extrap, atol=1.5e-4)
np.testing.assert_allclose(cd, cd_extrap, atol=1.5e-4)
np.testing.assert_allclose(cm, cm_extrap, atol=5e-3)
def test_extrap2(self):
cdmax = 1.0
newpolar = self.polar.extrapolate(cdmax=cdmax)
alpha_extrap = [
-180,
-170,
-160,
-150,
-140,
-130,
-120,
-110,
-100,
-90,
-80,
-70,
-60,
-50,
-40,
-30,
-20,
-10.1,
-8.2,
-6.1,
-4.1,
-2.1,
0.1,
2,
4.1,
6.2,
8.1,
10.2,
11.3,
12.1,
13.2,
14.2,
15.3,
16.3,
17.1,
18.1,
19.1,
20.1,
30,
40,
50,
60,
70,
80,
90,
100,
110,
120,
130,
140,
150,
160,
170,
180,
]
cl_extrap = [
0.0000,
0.2299,
0.4597,
0.4411,
0.4287,
0.3943,
0.3297,
0.2364,
0.1225,
0.0000,
-0.1225,
-0.2364,
-0.3297,
-0.3943,
-0.4287,
-0.4411,
-0.4637,
-0.6300,
-0.5600,
-0.6400,
-0.4200,
-0.2100,
0.0500,
0.3000,
0.5400,
0.7900,
0.9000,
0.9300,
0.9200,
0.9500,
0.9900,
1.0100,
1.0200,
1.0000,
0.9400,
0.8500,
0.7000,
0.6600,
0.6302,
0.6124,
0.5633,
0.4710,
0.3378,
0.1750,
0.0000,
-0.1225,
-0.2364,
-0.3297,
-0.3943,
-0.4287,
-0.4411,
-0.4597,
-0.2299,
0.0000,
]
cd_extrap = [
0.2135,
0.2404,
0.3176,
0.4349,
0.5767,
0.7241,
0.8568,
0.9560,
1.0069,
1.0000,
1.0069,
0.9560,
0.8568,
0.7241,
0.5767,
0.4349,
0.3158,
0.0390,
0.0233,
0.0131,
0.0134,
0.0119,
0.0122,
0.0116,
0.0144,
0.0146,
0.0162,
0.0274,
0.0303,
0.0369,
0.0509,
0.0648,
0.0776,
0.0917,
0.0994,
0.2306,
0.3142,
0.3186,
0.4349,
0.5767,
0.7241,
0.8568,
0.9560,
1.0069,
1.0000,
1.0069,
0.9560,
0.8568,
0.7241,
0.5767,
0.4349,
0.3176,
0.2404,
0.2135,
]
cm_extrap = [
0.0000,
0.4000,
0.2432,
0.2354,
0.2500,
0.2695,
0.2864,
0.2961,
0.2956,
0.2834,
0.2776,
0.2603,
0.2337,
0.2013,
0.1663,
0.1310,
0.0942,
-0.0044,
-0.0051,
0.0018,
-0.0216,
-0.0282,
-0.0346,
-0.0405,
-0.0455,
-0.0507,
-0.0404,
-0.0321,
-0.0281,
-0.0284,
-0.0322,
-0.0361,
-0.0363,
-0.0393,
-0.0398,
-0.0983,
-0.1242,
-0.1155,
-0.1577,
-0.1930,
-0.2239,
-0.2494,
-0.2683,
-0.2798,
-0.2834,
-0.2956,
-0.2961,
-0.2864,
-0.2695,
-0.2500,
-0.2354,
-0.2432,
-0.5000,
0.0000,
]
# re-interpolate b/c angles of attack are different
cl = np.interp(alpha_extrap, newpolar.alpha, newpolar.cl)
cd = np.interp(alpha_extrap, newpolar.alpha, newpolar.cd)
cm = np.interp(alpha_extrap, newpolar.alpha, newpolar.cm)
# test equality
np.testing.assert_allclose(cl, cl_extrap, atol=1.5e-4)
np.testing.assert_allclose(cd, cd_extrap, atol=1.5e-4)
np.testing.assert_allclose(cm, cm_extrap, atol=5e-3)
def test_extrap3(self):
cdmax = 1.5
newpolar = self.polar.extrapolate(cdmax)
alpha_extrap = [
-180,
-170,
-160,
-150,
-140,
-130,
-120,
-110,
-100,
-90,
-80,
-70,
-60,
-50,
-40,
-30,
-20,
-10.1,
-8.2,
-6.1,
-4.1,
-2.1,
0.1,
2,
4.1,
6.2,
8.1,
10.2,
11.3,
12.1,
13.2,
14.2,
15.3,
16.3,
17.1,
18.1,
19.1,
20.1,
30,
40,
50,
60,
70,
80,
90,
100,
110,
120,
130,
140,
150,
160,
170,
180,
]
cl_extrap = [
0.0000,
0.2299,
0.4597,
0.5266,
0.5608,
0.5429,
0.4685,
0.3434,
0.1810,
0.0000,
-0.1810,
-0.3434,
-0.4685,
-0.5429,
-0.5608,
-0.5266,
-0.4637,
-0.6300,
-0.5600,
-0.6400,
-0.4200,
-0.2100,
0.0500,
0.3000,
0.5400,
0.7900,
0.9000,
0.9300,
0.9200,
0.9500,
0.9900,
1.0100,
1.0200,
1.0000,
0.9400,
0.8500,
0.7000,
0.6600,
0.7523,
0.8012,
0.7756,
0.6693,
0.4906,
0.2586,
0.0000,
-0.1810,
-0.3434,
-0.4685,
-0.5429,
-0.5608,
-0.5266,
-0.4597,
-0.2299,
0.0000,
]
cd_extrap = [
0.1506,
0.1936,
0.3170,
0.5054,
0.7351,
0.9771,
1.2003,
1.3760,
1.4809,
1.5000,
1.4809,
1.3760,
1.2003,
0.9771,
0.7351,
0.5054,
0.3158,
0.0390,
0.0233,
0.0131,
0.0134,
0.0119,
0.0122,
0.0116,
0.0144,
0.0146,
0.0162,
0.0274,
0.0303,
0.0369,
0.0509,
0.0648,
0.0776,
0.0917,
0.0994,
0.2306,
0.3142,
0.3186,
0.5054,
0.7351,
0.9771,
1.2003,
1.3760,
1.4809,
1.5000,
1.4809,
1.3760,
1.2003,
0.9771,
0.7351,
0.5054,
0.3170,
0.1936,
0.1506,
]
cm_extrap = [
0.0000,
0.4000,
0.2431,
0.2723,
0.3130,
0.3540,
0.3888,
0.4118,
0.4190,
0.4084,
0.3926,
0.3602,
0.3148,
0.2614,
0.2049,
0.1488,
0.0942,
-0.0044,
-0.0051,
0.0018,
-0.0216,
-0.0282,
-0.0346,
-0.0405,
-0.0455,
-0.0507,
-0.0404,
-0.0321,
-0.0281,
-0.0284,
-0.0322,
-0.0361,
-0.0363,
-0.0393,
-0.0398,
-0.0983,
-0.1242,
-0.1155,
-0.1807,
-0.2399,
-0.2925,
-0.3370,
-0.3719,
-0.3959,
-0.4084,
-0.4190,
-0.4118,
-0.3888,
-0.3540,
-0.3130,
-0.2723,
-0.2431,
-0.5000,
0.0000,
]
# re-interpolate b/c angles of attack are different
cl = np.interp(alpha_extrap, newpolar.alpha, newpolar.cl)
cd = np.interp(alpha_extrap, newpolar.alpha, newpolar.cd)
cm = np.interp(alpha_extrap, newpolar.alpha, newpolar.cm)
# test equality
np.testing.assert_allclose(cl, cl_extrap, atol=1.5e-4)
np.testing.assert_allclose(cd, cd_extrap, atol=1.5e-4)
np.testing.assert_allclose(cm, cm_extrap, atol=5e-3)
class TestMisc(unittest.TestCase):
def setUp(self):
alpha = [
-10.1,
-8.2,
-6.1,
-4.1,
-2.1,
0.1,
2,
4.1,
6.2,
8.1,
10.2,
11.3,
12.1,
13.2,
14.2,
15.3,
16.3,
17.1,
18.1,
19.1,
20.1,
]
cl = [
-0.6300,
-0.5600,
-0.6400,
-0.4200,
-0.2100,
0.0500,
0.3000,
0.5400,
0.7900,
0.9000,
0.9300,
0.9200,
0.9500,
0.9900,
1.0100,
1.0200,
1.0000,
0.9400,
0.8500,
0.7000,
0.6600,
]
cd = [
0.0390,
0.0233,
0.0131,
0.0134,
0.0119,
0.0122,
0.0116,
0.0144,
0.0146,
0.0162,
0.0274,
0.0303,
0.0369,
0.0509,
0.0648,
0.0776,
0.0917,
0.0994,
0.2306,
0.3142,
0.3186,
]
cm = [
-0.0044,
-0.0051,
0.0018,
-0.0216,
-0.0282,
-0.0346,
-0.0405,
-0.0455,
-0.0507,
-0.0404,
-0.0321,
-0.0281,
-0.0284,
-0.0322,
-0.0361,
-0.0363,
-0.0393,
-0.0398,
-0.0983,
-0.1242,
-0.1155,
]
cm_zeros = np.zeros(len(cm))
Re = 1
self.polar = Polar(Re, alpha, cl, cd, cm)
self.polar2 = Polar(Re, alpha, cl, cd, cm_zeros)
def test_unsteady(self):
alpha0, alpha1, alpha2, cnSlope, cn1, cn2, cd0, cm0 = self.polar.unsteadyParams()
np.testing.assert_allclose(alpha0, -0.32307692307692304)
np.testing.assert_allclose(alpha1, 9.260783831245934)
np.testing.assert_allclose(alpha2, -6.779334979177289)
np.testing.assert_allclose(cnSlope, 6.4380618436681765)
np.testing.assert_allclose(cn1, 0.9201540372961516)
np.testing.assert_allclose(cn2, -0.6377683435797556)
np.testing.assert_allclose(cd0, 0.012142307692307694)
np.testing.assert_allclose(cm0, -0.03336923076923077)
def test_fully_separated(self):
cl_fs, f_st = self.polar.cl_fully_separated()
cl_fs_ref = np.array(
[
-0.63,
-0.42017185,
-0.35815607,
-0.23440711,
-0.11213462,
0.02669872,
0.15,
0.2815297,
0.41432191,
0.51685242,
0.60852946,
0.6464375,
0.68202361,
0.7299095,
0.76769179,
0.8037866,
0.82370687,
0.81723832,
0.78926905,
0.69419819,
0.65999953,
]
)
f_st_ref = np.array(
[
0.00000000e00,
2.34199688e-01,
7.26644559e-01,
7.32580663e-01,
8.34063987e-01,
8.34063987e-01,
1.00000000e00,
8.92315821e-01,
8.77625013e-01,
6.71133852e-01,
4.28392660e-01,
3.20122429e-01,
2.90558283e-01,
2.55881726e-01,
2.18728235e-01,
1.78134763e-01,
1.33254382e-01,
8.56818538e-02,
3.81986876e-02,
3.19820908e-03,
2.39632149e-07,
]
)
np.testing.assert_allclose(cl_fs, cl_fs_ref)
np.testing.assert_allclose(f_st, f_st_ref)
def test_cl_max(self):
cl_max, alpha_cl_max = self.polar.cl_max()
np.testing.assert_allclose(cl_max, 1.02)
np.testing.assert_allclose(alpha_cl_max, 15.3)
def test_linear_region(self):
alpha_linear_region, cl_linear_region, slope, alpha0 = self.polar.linear_region()
np.testing.assert_allclose(alpha_linear_region, np.array([-3.36474623, 5.96375]))
np.testing.assert_allclose(cl_linear_region, np.array([-0.39279835, 0.811875]))
np.testing.assert_allclose(slope, 0.1291390728476821)
np.testing.assert_allclose(alpha0, -0.32307692307692304)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestBlend))
suite.addTest(unittest.makeSuite(Test3DStall))
suite.addTest(unittest.makeSuite(TestExtrap))
suite.addTest(unittest.makeSuite(TestMisc))
return suite
if __name__ == "__main__":
result = unittest.TextTestRunner().run(suite())
if result.wasSuccessful():
exit(0)
else:
exit(1)
```
#### File: test/test_commonse/test_environment.py
```python
import unittest
import numpy as np
import openmdao.api as om
import numpy.testing as npt
import wisdem.commonse.environment as env
from wisdem.commonse import gravity as g
from openmdao.utils.assert_utils import assert_check_partials
npts = 100
myones = np.ones((npts,))
class TestPowerWind(unittest.TestCase):
def setUp(self):
self.params = {}
self.unknowns = {}
self.resid = None
self.params["shearExp"] = 2.0
self.params["Uref"] = 5.0
self.params["zref"] = 3.0
self.params["z0"] = 0.0
self.params["z"] = 9.0 * myones
self.wind = env.PowerWind(nPoints=npts)
def testRegular(self):
self.wind.compute(self.params, self.unknowns)
expect = 45.0 * myones
npt.assert_equal(self.unknowns["U"], expect)
def testIndex(self):
self.params["z"][1:] = -1.0
self.wind.compute(self.params, self.unknowns)
expect = 45.0 * myones
expect[1:] = 0.0
npt.assert_equal(self.unknowns["U"], expect)
def testZ0(self):
self.params["z0"] = 10.0
self.params["z"] += 10.0
self.params["zref"] += 10.0
self.wind.compute(self.params, self.unknowns)
expect = 45.0 * myones
npt.assert_equal(self.unknowns["U"], expect)
class TestLinearWaves(unittest.TestCase):
def setUp(self):
self.params = {}
self.unknowns = {}
self.resid = None
self.params["rho_water"] = 1e3
self.params["Hsig_wave"] = 2.0
self.params["Uc"] = 5.0
self.params["z_floor"] = -30.0
self.params["z_surface"] = 0.0
self.params["z"] = -2.0 * myones
self.wave = env.LinearWaves(nPoints=npts)
def testRegular(self):
D = np.abs(self.params["z_floor"])
k = 2.5
omega = np.sqrt(g * k * np.tanh(k * D))
self.params["Tsig_wave"] = 2.0 * np.pi / omega
self.wave.compute(self.params, self.unknowns)
a = 1.0 # 0.5*Hsig_wave
z = -2.0
rho = 1e3
U_exp = 5 + omega * a * np.cosh(k * (z + D)) / np.sinh(k * D)
W_exp = -omega * a * np.sinh(k * (z + D)) / np.sinh(k * D)
V_exp = np.sqrt(U_exp ** 2 + W_exp ** 2)
A_exp = omega * omega * a * np.cosh(k * (z + D)) / np.sinh(k * D)
p_exp = -rho * g * (z - a * np.cosh(k * (z + D)) / np.cosh(k * D))
npt.assert_almost_equal(self.unknowns["U"], U_exp)
npt.assert_almost_equal(self.unknowns["W"], W_exp)
npt.assert_almost_equal(self.unknowns["V"], V_exp)
npt.assert_almost_equal(self.unknowns["A"], A_exp)
npt.assert_almost_equal(self.unknowns["p"], p_exp)
# Positive depth input
self.params["z_floor"] = 30.0
self.wave.compute(self.params, self.unknowns)
npt.assert_almost_equal(self.unknowns["U"], U_exp)
npt.assert_almost_equal(self.unknowns["W"], W_exp)
npt.assert_almost_equal(self.unknowns["V"], V_exp)
npt.assert_almost_equal(self.unknowns["A"], A_exp)
npt.assert_almost_equal(self.unknowns["p"], p_exp)
def testPositiveZ(self):
self.params["Tsig_wave"] = 2.0
self.params["z"] = 2.0 * myones
self.wave.compute(self.params, self.unknowns)
npt.assert_equal(self.unknowns["U"], 0.0)
npt.assert_equal(self.unknowns["W"], 0.0)
npt.assert_equal(self.unknowns["V"], 0.0)
npt.assert_equal(self.unknowns["A"], 0.0)
npt.assert_equal(self.unknowns["p"], 0.0)
def testQuiet(self):
self.params["Hsig_wave"] = 0.0
self.params["Tsig_wave"] = 2.0
self.wave.compute(self.params, self.unknowns)
p_exp = 2e3 * g
npt.assert_equal(self.unknowns["U"], 5.0)
npt.assert_equal(self.unknowns["W"], 0.0)
npt.assert_equal(self.unknowns["V"], 5.0)
npt.assert_equal(self.unknowns["A"], 0.0)
npt.assert_equal(self.unknowns["p"], p_exp)
class TestPowerWindGradients(unittest.TestCase):
def test(self):
z = np.linspace(0.0, 100.0, 20)
nPoints = len(z)
prob = om.Problem()
root = prob.model = om.Group()
root.add_subsystem("p", env.PowerWind(nPoints=nPoints))
prob.setup()
prob["p.Uref"] = 10.0
prob["p.zref"] = 100.0
prob["p.z0"] = 0.001 # Fails when z0 = 0, What to do here?
prob["p.shearExp"] = 0.2
prob.run_model()
check = prob.check_partials(out_stream=None, compact_print=True, method="fd")
assert_check_partials(check)
class TestLogWindGradients(unittest.TestCase):
def test(self):
nPoints = 20
z = np.linspace(0.1, 100.0, nPoints)
prob = om.Problem()
root = prob.model = om.Group()
root.add_subsystem("p", env.LogWind(nPoints=nPoints))
prob.setup()
prob["p.Uref"] = 10.0
prob["p.zref"] = 100.0
prob["p.z0"] = 0.1 # Fails when z0 = 0
prob.run_model()
check = prob.check_partials(out_stream=None, compact_print=True, method="fd")
assert_check_partials(check)
### These partials are wrong; do not test
# class TestLinearWaveGradients(unittest.TestCase):
#
# def test(self):
#
# z_floor = 0.1
# z_surface = 20.
# z = np.linspace(z_floor, z_surface, 20)
# nPoints = len(z)
#
# prob = om.Problem()
# root = prob.model = om.Group()
# root.add_subsystem('p', env.LinearWaves(nPoints=nPoints))
#
# prob.setup()
#
# prob['p.Uc'] = 7.0
# prob['p.z_floor'] = z_floor
# prob['p.z_surface'] = z_surface
# prob['p.Hsig_wave'] = 10.0
# prob['p.Tsig_wave'] = 2.0
#
# prob.run_model()
#
# check = prob.check_partials(out_stream=None, compact_print=True, method='fd')
#
# assert_check_partials(check)
### The partials are currently not correct, so skip this test
# class TestSoilGradients(unittest.TestCase):
#
# def test(self):
#
# d0 = 10.0
# depth = 30.0
# G = 140e6
# nu = 0.4
#
# prob = om.Problem()
# root = prob.model = om.Group()
# root.add_subsystem('p', env.TowerSoil())
#
# prob.setup()
#
# prob['p.G'] = G
# prob['p.nu'] = nu
# prob['p.d0'] = d0
# prob['p.depth'] = depth
#
# prob.run_model()
#
# check = prob.check_partials(out_stream=None, compact_print=True, method='fd')
#
# assert_check_partials(check)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestPowerWind))
suite.addTest(unittest.makeSuite(TestLinearWaves))
suite.addTest(unittest.makeSuite(TestPowerWindGradients))
suite.addTest(unittest.makeSuite(TestLogWindGradients))
return suite
if __name__ == "__main__":
result = unittest.TextTestRunner().run(suite())
if result.wasSuccessful():
exit(0)
else:
exit(1)
```
#### File: test/test_commonse/test_frustum.py
```python
import unittest
import numpy as np
import numpy.testing as npt
import wisdem.commonse.frustum as f
from wisdem.commonse import eps
myones = np.ones((100,))
rb = 4.0
rt = 2.0
t = 0.1
h = 3.0
class TestFrustum(unittest.TestCase):
def testFrustumVol(self):
V = np.pi / 3 * h * (rb ** 2 + rt ** 2 + rb * rt)
# Test volume- scalar and vector inputs
self.assertEqual(f.frustumVol(rb, rt, h, False), V)
self.assertEqual(f.frustumVol(2 * rb, 2 * rt, h, True), V)
npt.assert_equal(f.frustumVol(rb * myones, rt * myones, h * myones, False), V * myones)
npt.assert_equal(f.frustumVol(2 * rb * myones, 2 * rt * myones, h * myones, True), V * myones)
def testFrustumCG_solid(self):
cg_solid = h / 4 * (rb ** 2 + 3 * rt ** 2 + 2 * rb * rt) / (rb ** 2 + rt ** 2 + rb * rt)
# Test cg of solid- scalar and vector inputs
self.assertEqual(f.frustumCG(rb, rt, h, False), cg_solid)
self.assertEqual(f.frustumCG(2 * rb, 2 * rt, h, True), cg_solid)
npt.assert_equal(f.frustumCG(rb * myones, rt * myones, h * myones, False), cg_solid * myones)
npt.assert_equal(f.frustumCG(2 * rb * myones, 2 * rt * myones, h * myones, True), cg_solid * myones)
def testFrustum_shell(self):
# In limit of thickness approaching radius, should recover regular formulas
self.assertEqual(f.frustumShellVol(rb, rb, rb, h, False), f.frustumVol(rb, rb, h, False))
self.assertEqual(f.frustumShellVol(2 * rt, 2 * rt, rt, h, True), f.frustumVol(rt, rt, h, False))
self.assertEqual(f.frustumShellCG(rb, rb, rb, h, False), f.frustumCG(rb, rb, h, False))
self.assertEqual(f.frustumShellCG(2 * rt, 2 * rt, rt, h, True), f.frustumCG(rt, rt, h, False))
self.assertEqual(f.frustumShellIzz(rb, rb, rb, h, False), f.frustumIzz(rb, rb, h, False))
self.assertEqual(f.frustumShellIzz(2 * rt, 2 * rt, rt, h, True), f.frustumIzz(rt, rt, h, False))
self.assertAlmostEqual(f.frustumShellIxx(rb, rb, rb - eps, h, False), f.frustumIxx(rb, rb, h, False))
self.assertAlmostEqual(f.frustumShellIxx(2 * rt, 2 * rt, rt - eps, h, True), f.frustumIxx(rt, rt, h, False))
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestFrustum))
return suite
if __name__ == "__main__":
result = unittest.TextTestRunner().run(suite())
if result.wasSuccessful():
exit(0)
else:
exit(1)
```
#### File: test/test_examples/test_examples.py
```python
import os
import unittest
import importlib
from time import time
from pathlib import Path
thisdir = os.path.dirname(os.path.realpath(__file__))
# Find examples directory- outside the module path
root_dir = os.path.dirname(os.path.dirname(os.path.dirname(thisdir)))
examples_dir = os.path.join(root_dir, "examples")
all_examples = Path(examples_dir).rglob("*.py") if os.path.exists(examples_dir) else []
# 02_ref turbines are regression tested in test_gluecode, no need to duplicate runtime
all_scripts = [
"01_nrel_csm/costs",
"01_nrel_csm/mass",
"01_nrel_csm/mass_and_cost",
"01_nrel_csm/parametric",
"02_reference_turbines/iea10mw_driver",
"03_blade/blade_driver",
"04_openmdao/betz_limit",
"04_openmdao/sellar",
"05_tower_monopile/monopile_direct",
"05_tower_monopile/monopile_driver",
"05_tower_monopile/tower_direct",
"05_tower_monopile/tower_driver",
"06_drivetrain/drivetrain_direct",
"06_drivetrain/drivetrain_geared",
"07_generator/dfig",
"07_generator/eesg",
"07_generator/pmsg_arms",
"07_generator/pmsg_disc",
"07_generator/pmsg_outer",
"07_generator/scig",
"08_plant_finance/example",
"09_floating/mooring_opt",
"09_floating/semi_only_driver",
"09_floating/spar_only_driver",
"09_floating/spar_opt",
"09_floating/tlp_example",
"09_floating/nrel5mw-spar_oc3_driver",
"09_floating/nrel5mw-semi_oc4_driver",
"09_floating/iea15mw_driver",
"10_ccblade/example",
"10_ccblade/gradients",
"10_ccblade/precurve",
"11_airfoilprep/example",
"12_pyframe3dd/exB",
"13_design_of_experiments/doe_driver",
"14_overridden_values/driver",
"15_step_size_study/driver",
"16_inverse_design/inverse_spar_design",
]
def execute_script(fscript):
# Go to location due to relative path use for airfoil files
print("\n\n")
print("NOW RUNNING:", fscript)
print()
fullpath = os.path.join(examples_dir, fscript + ".py")
basepath = os.path.join(examples_dir, fscript.split("/")[0])
os.chdir(basepath)
# Get script/module name
froot = fscript.split("/")[-1]
# Use dynamic import capabilities
# https://www.blog.pythonlibrary.org/2016/05/27/python-201-an-intro-to-importlib/
print(froot, os.path.realpath(fullpath))
spec = importlib.util.spec_from_file_location(froot, os.path.realpath(fullpath))
mod = importlib.util.module_from_spec(spec)
s = time()
spec.loader.exec_module(mod)
print(time() - s, "seconds to run")
class TestExamples(unittest.TestCase):
def test_all_scripts(self):
for ks, s in enumerate(all_scripts):
with self.subTest(f"Running: {s}", i=ks):
try:
execute_script(s)
self.assertTrue(True)
except:
self.assertEqual(s, "Success")
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestExamples))
return suite
if __name__ == "__main__":
result = unittest.TextTestRunner().run(suite())
if result.wasSuccessful():
exit(0)
else:
exit(1)
```
#### File: test/test_optimization_drivers/test_dakota_driver.py
```python
import unittest
import numpy as np
from openmdao.utils.assert_utils import assert_near_equal
from wisdem.optimization_drivers.dakota_driver import DakotaOptimizer
try:
import dakota
except ImportError:
dakota = None
@unittest.skipIf(dakota is None, "only run if Dakota is installed.")
class TestDakotaOptimization(unittest.TestCase):
def test_2D_opt_max_iterations(self):
bounds = {"x": np.array([[0.0, 1.0], [0.0, 1.0]])}
desvars = {"x": np.array([0.0, 0.25])}
outputs = ["y"]
template_dir = "template_dir/"
model_string = "from weis.multifidelity.models.testbed_components import simple_2D_high_model as model"
output_scalers = [1.0]
options = {"method": "coliny_cobyla", "max_function_evaluations": 3}
opt = DakotaOptimizer(template_dir)
results = opt.optimize(desvars, outputs, bounds, model_string, output_scalers, options)
assert_near_equal(np.min(np.array(results["y"])), -9.5)
def test_2D_opt_EGO(self):
bounds = {"x": np.array([[0.0, 1.0], [0.0, 1.0]])}
desvars = {"x": np.array([0.0, 0.25])}
outputs = ["y"]
template_dir = "template_dir/"
model_string = "from weis.multifidelity.models.testbed_components import simple_2D_high_model as model"
output_scalers = [1.0]
options = {"initial_samples": 5, "method": "efficient_global", "seed": 123456}
opt = DakotaOptimizer(template_dir)
results = opt.optimize(desvars, outputs, bounds, model_string, output_scalers, options)
assert_near_equal(np.min(np.array(results["y"])), -9.999996864)
def test_two_variables(self):
bounds = {"x": np.array([[0.0, 1.0], [0.0, 1.0]]), "z": [1.0, 2.0]}
desvars = {"x": np.array([0.0, 0.25]), "z": 1.5}
outputs = ["y"]
template_dir = "template_dir/"
model_string = "from weis.multifidelity.models.testbed_components import simple_two_variable as model"
output_scalers = [1.0]
options = {"method": "coliny_cobyla", "max_function_evaluations": 3}
opt = DakotaOptimizer(template_dir)
results = opt.optimize(desvars, outputs, bounds, model_string, output_scalers, options)
assert_near_equal(np.min(np.array(results["y"])), 1.0)
def test_constraint(self):
bounds = {"x": np.array([[0.0, 1.0], [0.0, 1.0]])}
desvars = {"x": np.array([0.0, 0.25])}
outputs = ["y", "con"]
template_dir = "template_dir/"
model_string = "from weis.multifidelity.models.testbed_components import simple_2D_low_model as model"
output_scalers = [1.0, 1.0]
options = {"method": "coliny_cobyla", "max_function_evaluations": 3}
opt = DakotaOptimizer(template_dir)
results = opt.optimize(desvars, outputs, bounds, model_string, output_scalers, options)
assert_near_equal(np.min(np.array(results["y"])), 0.5)
assert_near_equal(np.min(np.array(results["con"])), 0.0)
if __name__ == "__main__":
unittest.main()
```
#### File: test/test_rotorse/test_rotor_cost.py
```python
import os
import unittest
import openmdao.api as om
from wisdem.glue_code.gc_LoadInputs import WindTurbineOntologyPython
from wisdem.glue_code.gc_PoseOptimization import PoseOptimization
from wisdem.rotorse.rotor_cost import StandaloneRotorCost, initialize_omdao_prob
wisdem_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
example_dir = os.path.join(wisdem_dir, "examples", "02_reference_turbines") # get path example 03_blade
fname_opt_options = os.path.join(example_dir ,"analysis_options.yaml")
accuracy = 0
class TestRC(unittest.TestCase):
def testRotorCostIEA3p4(self):
fname_modeling_options = os.path.join(example_dir, "modeling_options.yaml")
fname_wt_input = os.path.join(example_dir, "IEA-3p4-130-RWT.yaml")
wt_initial = WindTurbineOntologyPython(fname_wt_input, fname_modeling_options, fname_opt_options)
wt_init, modeling_options, opt_options = wt_initial.get_input_data()
modeling_options["WISDEM"]["RotorSE"]["flag"] = False
wt_opt = om.Problem(model=StandaloneRotorCost(modeling_options=modeling_options ,opt_options=opt_options))
wt_opt.setup(derivatives=False)
myopt = PoseOptimization(wt_init, modeling_options, opt_options)
wt_opt = myopt.set_initial(wt_opt, wt_init)
wt_opt = initialize_omdao_prob(wt_opt, modeling_options, wt_init)
wt_opt.run_model()
self.assertAlmostEqual(wt_opt["rc.total_labor_hours"][0], 942.2580090792371, places=accuracy)
self.assertAlmostEqual(wt_opt["rc.total_non_gating_ct"][0], 125.47743556393156, places=accuracy)
self.assertAlmostEqual(wt_opt["rc.total_metallic_parts_cost"][0], 4667.410277624349, places=accuracy)
self.assertAlmostEqual(wt_opt["rc.total_consumable_cost_w_waste"][0], 7200.344422290056, places=accuracy)
self.assertAlmostEqual(wt_opt["rc.total_blade_mat_cost_w_waste"][0], 69743.70423837316, places=accuracy)
self.assertAlmostEqual(wt_opt["rc.total_cost_labor"][0], 31792.709454484186, places=accuracy)
self.assertAlmostEqual(wt_opt["rc.total_cost_utility"][0], 638.5561660415175, places=accuracy)
self.assertAlmostEqual(wt_opt["rc.blade_variable_cost"][0], 102174.96985889885, places=accuracy)
self.assertAlmostEqual(wt_opt["rc.total_cost_equipment"][0], 3334.488593496729, places=accuracy)
self.assertAlmostEqual(wt_opt["rc.total_cost_tooling"][0], 9970.668209900952, places=accuracy)
self.assertAlmostEqual(wt_opt["rc.total_cost_building"][0], 544.3309391994371, places=accuracy)
self.assertAlmostEqual(wt_opt["rc.total_maintenance_cost"][0], 3582.2994780221684, places=accuracy)
self.assertAlmostEqual(wt_opt["rc.total_labor_overhead"][0], 9537.812836345256, places=accuracy)
self.assertAlmostEqual(wt_opt["rc.cost_capital"][0], 7223.287108846724, places=accuracy)
self.assertAlmostEqual(wt_opt["rc.blade_fixed_cost"][0], 34192.88716581127, places=accuracy)
self.assertAlmostEqual(wt_opt["rc.total_blade_cost"][0], 136367.85702471013, places=accuracy)
def testRotorCostIEA10(self):
fname_modeling_options = os.path.join(example_dir, "modeling_options_iea10.yaml")
fname_wt_input = os.path.join(example_dir, "IEA-10-198-RWT.yaml")
wt_initial = WindTurbineOntologyPython(fname_wt_input, fname_modeling_options, fname_opt_options)
wt_init, modeling_options, opt_options = wt_initial.get_input_data()
modeling_options["WISDEM"]["RotorSE"]["flag"] = False
modeling_options["WISDEM"]["DriveSE"]["direct"] = False
wt_opt = om.Problem(model=StandaloneRotorCost(modeling_options=modeling_options ,opt_options=opt_options))
wt_opt.setup(derivatives=False)
myopt = PoseOptimization(wt_init, modeling_options, opt_options)
wt_opt = myopt.set_initial(wt_opt, wt_init)
wt_opt = initialize_omdao_prob(wt_opt, modeling_options, wt_init)
wt_opt.run_model()
self.assertAlmostEqual(wt_opt["rc.total_labor_hours"][0], 2104.669926420341, places=accuracy)
self.assertAlmostEqual(wt_opt["rc.total_non_gating_ct"][0], 224.7722030790102, places=accuracy)
self.assertAlmostEqual(wt_opt["rc.total_metallic_parts_cost"][0], 7621.331066101973, places=accuracy)
self.assertAlmostEqual(wt_opt["rc.total_consumable_cost_w_waste"][0], 12913.891638670682, places=accuracy)
self.assertAlmostEqual(wt_opt["rc.total_blade_mat_cost_w_waste"][0], 195975.7612745464, places=accuracy)
self.assertAlmostEqual(wt_opt["rc.total_cost_labor"][0], 71148.57041818449, places=accuracy)
self.assertAlmostEqual(wt_opt["rc.total_cost_utility"][0], 2582.596384597327, places=accuracy)
self.assertAlmostEqual(wt_opt["rc.blade_variable_cost"][0], 269706.9280773282, places=accuracy)
self.assertAlmostEqual(wt_opt["rc.total_cost_equipment"][0], 7975.896398389559, places=accuracy)
self.assertAlmostEqual(wt_opt["rc.total_cost_tooling"][0], 34012.403410785664, places=accuracy)
self.assertAlmostEqual(wt_opt["rc.total_cost_building"][0], 1960.741347506985, places=accuracy)
self.assertAlmostEqual(wt_opt["rc.total_maintenance_cost"][0], 10985.232722089915, places=accuracy)
self.assertAlmostEqual(wt_opt["rc.total_labor_overhead"][0], 21344.571125455346, places=accuracy)
self.assertAlmostEqual(wt_opt["rc.cost_capital"][0], 21348.39972589469, places=accuracy)
self.assertAlmostEqual(wt_opt["rc.blade_fixed_cost"][0], 97627.24473012217, places=accuracy)
self.assertAlmostEqual(wt_opt["rc.total_blade_cost"][0], 367334.17280745035, places=accuracy)
def testRotorCostIEA15(self):
fname_modeling_options = os.path.join(example_dir, "modeling_options.yaml")
fname_wt_input = os.path.join(example_dir, "IEA-15-240-RWT.yaml")
wt_initial = WindTurbineOntologyPython(fname_wt_input, fname_modeling_options, fname_opt_options)
wt_init, modeling_options, opt_options = wt_initial.get_input_data()
modeling_options["WISDEM"]["RotorSE"]["flag"] = False
modeling_options["WISDEM"]["DriveSE"]["direct"] = False
wt_opt = om.Problem(model=StandaloneRotorCost(modeling_options=modeling_options ,opt_options=opt_options))
wt_opt.setup(derivatives=False)
myopt = PoseOptimization(wt_init, modeling_options, opt_options)
wt_opt = myopt.set_initial(wt_opt, wt_init)
wt_opt = initialize_omdao_prob(wt_opt, modeling_options, wt_init)
wt_opt.run_model()
self.assertAlmostEqual(wt_opt["rc.total_labor_hours"][0], 3144.546512161421, places=accuracy)
self.assertAlmostEqual(wt_opt["rc.total_non_gating_ct"][0], 275.33521169175623, places=accuracy)
self.assertAlmostEqual(wt_opt["rc.total_metallic_parts_cost"][0], 8885.377394584766, places=accuracy)
self.assertAlmostEqual(wt_opt["rc.total_consumable_cost_w_waste"][0], 15040.127644116868, places=accuracy)
self.assertAlmostEqual(wt_opt["rc.total_blade_mat_cost_w_waste"][0], 360963.0848169348, places=accuracy)
self.assertAlmostEqual(wt_opt["rc.total_cost_labor"][0], 106238.75799444572, places=accuracy)
self.assertAlmostEqual(wt_opt["rc.total_cost_utility"][0], 3682.6931230180785, places=accuracy)
self.assertAlmostEqual(wt_opt["rc.blade_variable_cost"][0], 470884.5359343986, places=accuracy)
self.assertAlmostEqual(wt_opt["rc.total_cost_equipment"][0], 25446.017337652025, places=accuracy)
self.assertAlmostEqual(wt_opt["rc.total_cost_tooling"][0], 32255.212178863818, places=accuracy)
self.assertAlmostEqual(wt_opt["rc.total_cost_building"][0], 2370.92487015942, places=accuracy)
self.assertAlmostEqual(wt_opt["rc.total_maintenance_cost"][0], 18184.350727870325, places=accuracy)
self.assertAlmostEqual(wt_opt["rc.total_labor_overhead"][0], 31871.627398333716, places=accuracy)
self.assertAlmostEqual(wt_opt["rc.cost_capital"][0], 35744.711245853185, places=accuracy)
self.assertAlmostEqual(wt_opt["rc.blade_fixed_cost"][0], 145872.8437587325, places=accuracy)
self.assertAlmostEqual(wt_opt["rc.total_blade_cost"][0], 616757.3796931311, places=accuracy)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestRC))
return suite
if __name__ == "__main__":
result = unittest.TextTestRunner().run(suite())
if result.wasSuccessful():
exit(0)
else:
exit(1)
``` |
{
"source": "johnjchang/carnival",
"score": 2
} |
#### File: deep_rl/utils/misc.py
```python
import numpy as np
import pickle
import os
import datetime
import torch
import time
from .torch_utils import *
try:
# python >= 3.5
from pathlib import Path
except:
# python == 2.7
from pathlib2 import Path
def run_steps(agent):
random_seed()
config = agent.config
agent_name = agent.__class__.__name__
t0 = time.time()
while True:
if config.save_interval and not agent.total_steps % config.save_interval:
agent.save('data/model-%s-%s-%s.bin' % (agent_name, config.task_name, config.tag))
if config.log_interval and not agent.total_steps % config.log_interval and len(agent.episode_rewards):
rewards = agent.episode_rewards
agent.episode_rewards = []
config.logger.info('total steps %d, returns %.2f/%.2f/%.2f/%.2f (mean/median/min/max), %.2f steps/s' % (
agent.total_steps, np.mean(rewards), np.median(rewards), np.min(rewards), np.max(rewards),
config.log_interval / (time.time() - t0)))
t0 = time.time()
if config.eval_interval and not agent.total_steps % config.eval_interval:
agent.eval_episodes()
if config.max_steps and agent.total_steps >= config.max_steps:
agent.close()
break
agent.step()
def get_time_str():
return datetime.datetime.now().strftime("%y%m%d-%H%M%S")
def get_default_log_dir(name):
return './log/%s-%s' % (name, get_time_str())
def mkdir(path):
Path(path).mkdir(parents=True, exist_ok=True)
def close_obj(obj):
if hasattr(obj, 'close'):
obj.close()
class Batcher:
def __init__(self, batch_size, data):
self.batch_size = batch_size
self.data = data
self.num_entries = len(data[0])
self.reset()
def reset(self):
self.batch_start = 0
self.batch_end = self.batch_start + self.batch_size
def end(self):
return self.batch_start >= self.num_entries
def next_batch(self):
batch = []
for d in self.data:
batch.append(d[self.batch_start: self.batch_end])
self.batch_start = self.batch_end
self.batch_end = min(self.batch_start + self.batch_size, self.num_entries)
return batch
def shuffle(self):
indices = np.arange(self.num_entries)
np.random.shuffle(indices)
self.data = [d[indices] for d in self.data]
``` |
{
"source": "johnjdailey/authlib",
"score": 2
} |
#### File: jose/rfc7517/models.py
```python
from authlib.common.encoding import json_dumps
class Key(dict):
kty = '_'
ALLOWED_PARAMS = [
'use', 'key_ops', 'alg', 'kid',
'x5u', 'x5c', 'x5t', 'x5t#S256'
]
PRIVATE_KEY_OPS = [
'sign', 'decrypt', 'unwrapKey',
]
PUBLIC_KEY_OPS = [
'verify', 'encrypt', 'wrapKey',
]
REQUIRED_JSON_FIELDS = []
RAW_KEY_CLS = bytes
def __init__(self, payload):
super(Key, self).__init__(payload)
self.key_type = 'secret'
self.raw_key = None
def get_op_key(self, key_op):
self.check_key_op(key_op)
if key_op in self.PUBLIC_KEY_OPS and self.key_type == 'private':
return self.raw_key.public_key()
return self.raw_key
def check_key_op(self, key_op):
allowed_key_ops = self.get('key_ops')
if allowed_key_ops is not None and key_op not in allowed_key_ops:
raise ValueError('Unsupported key_op "{}"'.format(key_op))
if key_op in self.PRIVATE_KEY_OPS and self.key_type == 'public':
raise ValueError('Invalid key_op "{}" for public key'.format(key_op))
def as_dict(self):
obj = dict(self)
obj['kty'] = self.kty
return obj
def as_json(self):
obj = self.as_dict()
return json_dumps(obj)
def as_pem(self):
raise RuntimeError('Not supported')
@classmethod
def check_required_fields(cls, data):
for k in cls.REQUIRED_JSON_FIELDS:
if k not in data:
raise ValueError('Missing required field: "{}"'.format(k))
@classmethod
def generate_key(cls, crv_or_size, options=None, is_private=False):
raise NotImplementedError()
@classmethod
def import_key(cls, raw, options=None):
raise NotImplementedError()
class KeySet(object):
def __init__(self, keys):
self.keys = keys
def as_dict(self):
return {'keys': [k.as_dict() for k in self.keys]}
def find_by_kid(self, kid):
for k in self.keys:
if k.get('kid') == kid:
return k
raise ValueError('Invalid JSON Web Key Set')
```
#### File: jose/rfc7518/oct_key.py
```python
from authlib.common.encoding import (
to_bytes, to_unicode,
urlsafe_b64encode, urlsafe_b64decode,
)
from authlib.common.security import generate_token
from authlib.jose.rfc7517 import Key
class OctKey(Key):
kty = 'oct'
REQUIRED_JSON_FIELDS = ['k']
def get_op_key(self, key_op):
self.check_key_op(key_op)
return self.raw_key
@classmethod
def import_key(cls, raw, options=None):
if isinstance(raw, dict):
cls.check_required_fields(raw)
payload = raw
raw_key = urlsafe_b64decode(to_bytes(payload['k']))
else:
raw_key = to_bytes(raw)
k = to_unicode(urlsafe_b64encode(raw_key))
payload = {'k': k}
if options is not None:
payload.update(options)
obj = cls(payload)
obj.raw_key = raw_key
obj.key_type = 'secret'
return obj
@classmethod
def generate_key(cls, bit_size=256, options=None, is_private=True):
if not is_private:
raise ValueError('oct key can not be generated as public')
if bit_size % 8 != 0:
raise ValueError('Invalid bit size for oct key')
return cls.import_key(generate_token(bit_size / 8), options)
```
#### File: jose/rfc8037/okp_key.py
```python
from cryptography.hazmat.primitives.asymmetric.ed25519 import (
Ed25519PublicKey, Ed25519PrivateKey
)
from cryptography.hazmat.primitives.asymmetric.ed448 import (
Ed448PublicKey, Ed448PrivateKey
)
from cryptography.hazmat.primitives.asymmetric.x25519 import (
X25519PublicKey, X25519PrivateKey
)
from cryptography.hazmat.primitives.asymmetric.x448 import (
X448PublicKey, X448PrivateKey
)
from cryptography.hazmat.primitives.serialization import (
Encoding, PublicFormat, PrivateFormat, NoEncryption
)
from authlib.common.encoding import (
to_unicode, to_bytes,
urlsafe_b64decode, urlsafe_b64encode,
)
from authlib.jose.rfc7517 import Key
from ..rfc7518 import import_key
PUBLIC_KEYS_MAP = {
'Ed25519': Ed25519PublicKey,
'Ed448': Ed448PublicKey,
'X25519': X25519PublicKey,
'X448': X448PublicKey,
}
PRIVATE_KEYS_MAP = {
'Ed25519': Ed25519PrivateKey,
'Ed448': Ed448PrivateKey,
'X25519': X25519PrivateKey,
'X448': X448PrivateKey,
}
PUBLIC_KEY_TUPLE = tuple(PUBLIC_KEYS_MAP.values())
PRIVATE_KEY_TUPLE = tuple(PRIVATE_KEYS_MAP.values())
class OKPKey(Key):
kty = 'OKP'
REQUIRED_JSON_FIELDS = ['crv', 'x']
RAW_KEY_CLS = (
Ed25519PublicKey, Ed25519PrivateKey,
Ed448PublicKey, Ed448PrivateKey,
X25519PublicKey, X25519PrivateKey,
X448PublicKey, X448PrivateKey,
)
@staticmethod
def get_key_curve(key):
if isinstance(key, (Ed25519PublicKey, Ed25519PrivateKey)):
return 'Ed25519'
elif isinstance(key, (Ed448PublicKey, Ed448PrivateKey)):
return 'Ed448'
elif isinstance(key, (X25519PublicKey, X25519PrivateKey)):
return 'X25519'
elif isinstance(key, (X448PublicKey, X448PrivateKey)):
return 'X448'
@staticmethod
def loads_private_key(obj):
crv_key = PRIVATE_KEYS_MAP[obj['crv']]
d_bytes = urlsafe_b64decode(to_bytes(obj['d']))
return crv_key.from_private_bytes(d_bytes)
@staticmethod
def loads_public_key(obj):
crv_key = PUBLIC_KEYS_MAP[obj['crv']]
x_bytes = urlsafe_b64decode(to_bytes(obj['x']))
return crv_key.from_public_bytes(x_bytes)
@staticmethod
def dumps_private_key(raw_key):
obj = OKPKey.dumps_public_key(raw_key.public_key())
d_bytes = raw_key.private_bytes(
Encoding.Raw,
PrivateFormat.Raw,
NoEncryption()
)
obj['d'] = to_unicode(urlsafe_b64encode(d_bytes))
return obj
@staticmethod
def dumps_public_key(raw_key):
x_bytes = raw_key.public_bytes(Encoding.Raw, PublicFormat.Raw)
return {
'crv': OKPKey.get_key_curve(raw_key),
'x': to_unicode(urlsafe_b64encode(x_bytes)),
}
@classmethod
def import_key(cls, raw, options=None):
return import_key(
cls, raw,
PUBLIC_KEY_TUPLE, PRIVATE_KEY_TUPLE,
b'ssh-ed25519', options
)
@classmethod
def generate_key(cls, crv, options=None, is_private=False):
pass
``` |
{
"source": "johnjdailey/aws-crt-python",
"score": 2
} |
#### File: aws-crt-python/awscrt/auth.py
```python
from __future__ import absolute_import
import _awscrt
from awscrt import isinstance_str, NativeResource
import awscrt.exceptions
from awscrt.http import HttpRequest
from awscrt.io import ClientBootstrap
from concurrent.futures import Future
import datetime
from enum import IntEnum
import time
try:
_utc = datetime.timezone.utc
except AttributeError:
# Python 2 lacks the datetime.timestamp() method.
# We can do the timestamp math ourselves, but only if datetime.tzinfo is set.
# Python 2 also lacks any predefined tzinfo classes (ex: datetime.timezone.utc),
# so we must define our own.
class _UTC(datetime.tzinfo):
ZERO = datetime.timedelta(0)
def utcoffset(self, dt):
return _UTC.ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return _UTC.ZERO
_utc = _UTC()
class AwsCredentials(NativeResource):
"""
AwsCredentials are the public/private data needed to sign an authenticated AWS request.
AwsCredentials are immutable.
"""
__slots__ = ()
def __init__(self, access_key_id, secret_access_key, session_token=None):
assert isinstance_str(access_key_id)
assert isinstance_str(secret_access_key)
assert isinstance_str(session_token) or session_token is None
super(AwsCredentials, self).__init__()
self._binding = _awscrt.credentials_new(access_key_id, secret_access_key, session_token)
@property
def access_key_id(self):
return _awscrt.credentials_access_key_id(self._binding)
@property
def secret_access_key(self):
return _awscrt.credentials_secret_access_key(self._binding)
@property
def session_token(self):
return _awscrt.credentials_session_token(self._binding)
def __deepcopy__(self, memo):
# AwsCredentials is immutable, so just return self.
return self
class AwsCredentialsProviderBase(NativeResource):
"""
Base class for providers that source the AwsCredentials needed to sign an authenticated AWS request.
"""
__slots__ = ()
def __init__(self, binding=None):
super(AwsCredentialsProviderBase, self).__init__()
if binding is None:
# TODO: create binding type that lets native code call into python subclass
raise NotImplementedError("Custom subclasses of AwsCredentialsProviderBase are not yet supported")
self._binding = binding
def get_credentials(self):
"""
Asynchronously fetch AwsCredentials.
Returns a Future which will contain AwsCredentials (or an exception)
when the call completes. The call may complete on a different thread.
"""
raise NotImplementedError()
class AwsCredentialsProvider(AwsCredentialsProviderBase):
"""
Credentials providers source the AwsCredentials needed to sign an authenticated AWS request.
This class provides new() functions for several built-in provider types.
"""
__slots__ = ()
@classmethod
def new_default_chain(cls, client_bootstrap):
"""
Create the default provider chain used by most AWS SDKs.
Generally:
(1) Environment
(2) Profile
(3) (conditional, off by default) ECS
(4) (conditional, on by default) EC2 Instance Metadata
"""
assert isinstance(client_bootstrap, ClientBootstrap)
binding = _awscrt.credentials_provider_new_chain_default(client_bootstrap)
return cls(binding)
@classmethod
def new_static(cls, access_key_id, secret_access_key, session_token=None):
"""
Create a simple provider that just returns a fixed set of credentials
"""
assert isinstance_str(access_key_id)
assert isinstance_str(secret_access_key)
assert isinstance_str(session_token) or session_token is None
binding = _awscrt.credentials_provider_new_static(access_key_id, secret_access_key, session_token)
return cls(binding)
def get_credentials(self):
future = Future()
def _on_complete(error_code, access_key_id, secret_access_key, session_token):
try:
if error_code:
future.set_exception(awscrt.exceptions.from_code(error_code))
else:
credentials = AwsCredentials(access_key_id, secret_access_key, session_token)
future.set_result(credentials)
except Exception as e:
future.set_exception(e)
try:
_awscrt.credentials_provider_get_credentials(self._binding, _on_complete)
except Exception as e:
future.set_result(e)
return future
class AwsSigningAlgorithm(IntEnum):
"""
Which signing algorithm to use.
SigV4Header: Use Signature Version 4 to sign headers.
SigV4QueryParam: Use Signature Version 4 to sign query parameters.
"""
SigV4Header = 0
SigV4QueryParam = 1
class AwsBodySigningConfigType(IntEnum):
"""Body Signing config
BodySigningOff: No attempts will be made to sign the payload, and no
x-amz-content-sha256 header will be added to the request.
BodySigningOn: The body will be signed and x-amz-content-sha256 will contain
the value of the signature
UnsignedPayload: The body will not be signed, but x-amz-content-sha256 will contain
the value UNSIGNED-PAYLOAD. This value is currently only used for Amazon S3.
"""
BodySigningOff = 0
BodySigningOn = 1
UnsignedPayload = 2
class AwsSigningConfig(NativeResource):
"""
Configuration for use in AWS-related signing.
AwsSigningConfig is immutable.
It is good practice to use a new config for each signature, or the date might get too old.
Naive dates (lacking timezone info) are assumed to be in local time.
"""
__slots__ = ('_priv_should_sign_cb')
_attributes = ('algorithm', 'credentials_provider', 'region', 'service', 'date', 'should_sign_param',
'use_double_uri_encode', 'should_normalize_uri_path', 'body_signing_type')
def __init__(self,
algorithm, # type: AwsSigningAlgorithm
credentials_provider, # type: AwsCredentialsProviderBase
region, # type: str
service, # type: str
date=None, # type: Optional[datetime.datetime]
should_sign_param=None, # type: Optional[Callable[[str], bool]]
use_double_uri_encode=False, # type: bool
should_normalize_uri_path=True, # type: bool
body_signing_type=AwsBodySigningConfigType.BodySigningOn # type: AwsBodySigningConfigType
):
# type: (...) -> None
assert isinstance(algorithm, AwsSigningAlgorithm)
assert isinstance(credentials_provider, AwsCredentialsProviderBase)
assert isinstance_str(region)
assert isinstance_str(service)
assert isinstance(date, datetime.datetime) or date is None
assert callable(should_sign_param) or should_sign_param is None
assert isinstance(body_signing_type, AwsBodySigningConfigType)
super(AwsSigningConfig, self).__init__()
if date is None:
date = datetime.datetime.now(_utc)
try:
timestamp = date.timestamp()
except AttributeError:
# Python 2 doesn't have datetime.timestamp() function.
# If it did we could just call it from binding code instead of calculating it here.
if date.tzinfo is None:
timestamp = time.mktime(date.timetuple())
else:
epoch = datetime.datetime(1970, 1, 1, tzinfo=_utc)
timestamp = (date - epoch).total_seconds()
self._priv_should_sign_cb = should_sign_param
if should_sign_param is not None:
def should_sign_param_wrapper(name):
return should_sign_param(name=name)
else:
should_sign_param_wrapper = None
self._binding = _awscrt.signing_config_new(
algorithm,
credentials_provider,
region,
service,
date,
timestamp,
should_sign_param_wrapper,
use_double_uri_encode,
should_normalize_uri_path,
body_signing_type)
def replace(self, **kwargs):
"""
Return an AwsSigningConfig with the same attributes, except for those
attributes given new values by whichever keyword arguments are specified.
"""
args = {x: kwargs.get(x, getattr(self, x)) for x in AwsSigningConfig._attributes}
return AwsSigningConfig(**args)
@property
def algorithm(self):
"""Which AwsSigningAlgorithm to invoke"""
return AwsSigningAlgorithm(_awscrt.signing_config_get_algorithm(self._binding))
@property
def credentials_provider(self):
"""AwsCredentialsProvider to fetch signing credentials with"""
return _awscrt.signing_config_get_credentials_provider(self._binding)
@property
def region(self):
"""The region to sign against"""
return _awscrt.signing_config_get_region(self._binding)
@property
def service(self):
"""Name of service to sign a request for"""
return _awscrt.signing_config_get_service(self._binding)
@property
def date(self):
"""
datetime.datetime to use during the signing process.
If None is provided to constructor then datetime.datetime.now(datetime.timezone.utc) is used.
"""
return _awscrt.signing_config_get_date(self._binding)
@property
def should_sign_param(self):
"""
Optional function to control which parameters (header or query) are a part of the canonical request.
Function signature is: (name) -> bool
Skipping auth-required params will result in an unusable signature.
Headers injected by the signing process are not skippable.
This function does not override the internal check function (x-amzn-trace-id, user-agent), but rather
supplements it. In particular, a header will get signed if and only if it returns true to both
the internal check (skips x-amzn-trace-id, user-agent) and this function (if defined).
"""
return self._priv_should_sign_cb
@property
def use_double_uri_encode(self):
"""
We assume the uri will be encoded once in preparation for transmission. Certain services
do not decode before checking signature, requiring us to actually double-encode the uri in the canonical request
in order to pass a signature check.
"""
return _awscrt.signing_config_get_use_double_uri_encode(self._binding)
@property
def should_normalize_uri_path(self):
"""Controls whether or not the uri paths should be normalized when building the canonical request"""
return _awscrt.signing_config_get_should_normalize_uri_path(self._binding)
@property
def body_signing_type(self):
"""
BodySigningOff: No attempts will be made to sign the payload, and no
x-amz-content-sha256 header will be added to the request.
BodySigningOn: The body will be signed and x-amz-content-sha256 will contain
the value of the signature
UnsignedPayload: The body will not be signed, but x-amz-content-sha256 will contain
the value UNSIGNED-PAYLOAD. This value is currently only used for Amazon S3.
"""
return AwsBodySigningConfigType(_awscrt.signing_config_get_body_signing_type(self._binding))
def aws_sign_request(http_request, signing_config):
"""
Perform AWS HTTP request signing.
The HttpRequest is transformed asynchronously, according to the AwsSigningConfig.
Returns a Future whose result will be the signed HttpRequest.
When signing:
(1) It is good practice to use a new config for each signature, or the date might get too old.
(2) Do not add the following headers to requests before signing, they may be added by the signer:
x-amz-content-sha256,
X-Amz-Date,
Authorization
(3) Do not add the following query params to requests before signing, they may be added by the signer:
X-Amz-Signature,
X-Amz-Date,
X-Amz-Credential,
X-Amz-Algorithm,
X-Amz-SignedHeaders
"""
assert isinstance(http_request, HttpRequest)
assert isinstance(signing_config, AwsSigningConfig)
future = Future()
def _on_complete(error_code):
try:
if error_code:
future.set_exception(awscrt.exceptions.from_code(error_code))
else:
future.set_result(http_request)
except Exception as e:
future.set_exception(e)
_awscrt.sign_request_aws(http_request, signing_config, _on_complete)
return future
```
#### File: aws-crt-python/awscrt/http.py
```python
from __future__ import absolute_import
import _awscrt
from concurrent.futures import Future
from awscrt import NativeResource, isinstance_str
import awscrt.exceptions
from awscrt.io import ClientBootstrap, EventLoopGroup, DefaultHostResolver, InputStream, TlsConnectionOptions, SocketOptions
from enum import IntEnum
class HttpConnectionBase(NativeResource):
"""
Base for HTTP connection classes.
Attributes:
shutdown_future (concurrent.futures.Future): Completes when the connection has finished shutting down.
Future will contain a result of None, or an exception indicating why shutdown occurred.
Note that the connection may have been garbage-collected before this future completes.
"""
__slots__ = ('shutdown_future')
def __init__(self):
super(HttpConnectionBase, self).__init__()
self.shutdown_future = Future()
def close(self):
"""
Close the connection.
Returns the connect's `shutdown_future`, which completes when shutdown has finished.
"""
_awscrt.http_connection_close(self._binding)
return self.shutdown_future
def is_open(self):
"""
Returns True if the connection is open and usable, False otherwise.
"""
return _awscrt.http_connection_is_open(self._binding)
class HttpClientConnection(HttpConnectionBase):
"""
An HTTP client connection. All operations are async.
Use HttpClientConnection.new() to establish a new connection.
"""
__slots__ = ('_host_name', '_port')
@classmethod
def new(cls,
host_name,
port,
bootstrap,
socket_options=None,
tls_connection_options=None,
proxy_options=None):
"""
Initiates a new connection to host_name and port using socket_options and tls_connection_options if supplied.
if tls_connection_options is None, then the connection will be attempted over plain-text.
Returns a future where the result is a new instance to HttpClientConnection, once the connection has completed
and is ready for use.
"""
assert isinstance(bootstrap, ClientBootstrap) or bootstrap is None
assert isinstance_str(host_name)
assert isinstance(port, int)
assert isinstance(tls_connection_options, TlsConnectionOptions) or tls_connection_options is None
assert isinstance(socket_options, SocketOptions) or socket_options is None
assert isinstance(proxy_options, HttpProxyOptions) or proxy_options is None
future = Future()
try:
if not socket_options:
socket_options = SocketOptions()
if not bootstrap:
event_loop_group = EventLoopGroup(1)
host_resolver = DefaultHostResolver(event_loop_group)
bootstrap = ClientBootstrap(event_loop_group, host_resolver)
connection = cls()
connection._host_name = host_name
connection._port = port
def on_connection_setup(binding, error_code):
if error_code == 0:
connection._binding = binding
future.set_result(connection)
else:
future.set_exception(awscrt.exceptions.from_code(error_code))
# on_shutdown MUST NOT reference the connection itself, just the shutdown_future within it.
# Otherwise we create a circular reference that prevents the connection from getting GC'd.
shutdown_future = connection.shutdown_future
def on_shutdown(error_code):
if error_code:
shutdown_future.set_exception(awscrt.exceptions.from_code(error_code))
else:
shutdown_future.set_result(None)
_awscrt.http_client_connection_new(
bootstrap,
on_connection_setup,
on_shutdown,
host_name,
port,
socket_options,
tls_connection_options,
proxy_options)
except Exception as e:
future.set_exception(e)
return future
@property
def host_name(self):
return self._host_name
@property
def port(self):
return self._port
def request(self, request, on_response=None, on_body=None):
return HttpClientStream(self, request, on_response, on_body)
class HttpStreamBase(NativeResource):
__slots__ = ('_connection', '_completion_future', '_on_body_cb')
def __init__(self, connection, on_body=None):
super(HttpStreamBase, self).__init__()
self._connection = connection
self._completion_future = Future()
self._on_body_cb = on_body
@property
def connection(self):
return self._connection
@property
def completion_future(self):
return self._completion_future
def _on_body(self, chunk):
if self._on_body_cb:
self._on_body_cb(http_stream=self, chunk=chunk)
class HttpClientStream(HttpStreamBase):
__slots__ = ('_response_status_code', '_on_response_cb', '_on_body_cb')
def __init__(self, connection, request, on_response=None, on_body=None):
assert isinstance(connection, HttpClientConnection)
assert isinstance(request, HttpRequest)
assert callable(on_response) or on_response is None
assert callable(on_body) or on_body is None
super(HttpClientStream, self).__init__(connection, on_body)
self._on_response_cb = on_response
self._response_status_code = None
_awscrt.http_client_stream_new(self, connection, request)
@property
def response_status_code(self):
return self._response_status_code
def _on_response(self, status_code, name_value_pairs):
self._response_status_code = status_code
if self._on_response_cb:
self._on_response_cb(http_stream=self, status_code=status_code, headers=name_value_pairs)
def _on_complete(self, error_code):
if error_code == 0:
self._completion_future.set_result(self._response_status_code)
else:
self._completion_future.set_exception(awscrt.exceptions.from_code(error_code))
class HttpMessageBase(NativeResource):
"""
Base for HttpRequest and HttpResponse classes.
"""
__slots__ = ('_headers')
def __init__(self, binding, headers, body_stream=None):
assert isinstance(headers, HttpHeaders)
super(HttpMessageBase, self).__init__()
self._binding = binding
self._headers = headers
if body_stream:
self.body_stream = body_stream
@property
def headers(self):
return self._headers
@property
def body_stream(self):
return _awscrt.http_message_get_body_stream(self._binding)
@body_stream.setter
def body_stream(self, stream):
stream = InputStream.wrap(stream)
return _awscrt.http_message_set_body_stream(self._binding, stream)
class HttpRequest(HttpMessageBase):
"""
Definition for an outgoing HTTP request.
The request may be transformed (ex: signing the request) before its data is eventually sent.
"""
__slots__ = ()
def __init__(self, method='GET', path='/', headers=None, body_stream=None):
assert isinstance(headers, HttpHeaders) or headers is None
if headers is None:
headers = HttpHeaders()
binding = _awscrt.http_message_new_request(headers)
super(HttpRequest, self).__init__(binding, headers, body_stream)
self.method = method
self.path = path
@classmethod
def _from_bindings(cls, request_binding, headers_binding):
"""Construct HttpRequest and its HttpHeaders from pre-existing native objects"""
# avoid class's default constructor
# just invoke parent class's __init__()
request = cls.__new__(cls)
headers = HttpHeaders._from_binding(headers_binding)
super(cls, request).__init__(request_binding, headers)
return request
@property
def method(self):
return _awscrt.http_message_get_request_method(self._binding)
@method.setter
def method(self, method):
_awscrt.http_message_set_request_method(self._binding, method)
@property
def path(self):
return _awscrt.http_message_get_request_path(self._binding)
@path.setter
def path(self, path):
return _awscrt.http_message_set_request_path(self._binding, path)
class HttpHeaders(NativeResource):
"""
Collection of HTTP headers.
A given header name may have multiple values.
Header names are always treated in a case-insensitive manner.
HttpHeaders can be iterated over as (name,value) pairs.
"""
__slots__ = ()
def __init__(self, name_value_pairs=None):
"""
Construct from a collection of (name,value) pairs.
"""
super(HttpHeaders, self).__init__()
self._binding = _awscrt.http_headers_new()
if name_value_pairs:
self.add_pairs(name_value_pairs)
@classmethod
def _from_binding(cls, binding):
"""Construct from a pre-existing native object"""
headers = cls.__new__(cls) # avoid class's default constructor
super(cls, headers).__init__() # just invoke parent class's __init__()
headers._binding = binding
return headers
def add(self, name, value):
"""
Add a name-value pair.
"""
assert isinstance_str(name)
assert isinstance_str(value)
_awscrt.http_headers_add(self._binding, name, value)
def add_pairs(self, name_value_pairs):
"""
Add list of (name,value) pairs.
"""
_awscrt.http_headers_add_pairs(self._binding, name_value_pairs)
def set(self, name, value):
"""
Set a name-value pair, any existing values for the name are removed.
"""
assert isinstance_str(name)
assert isinstance_str(value)
_awscrt.http_headers_set(self._binding, name, value)
def get_values(self, name):
"""
Return an iterator over the values for this name.
"""
assert isinstance_str(name)
name = name.lower()
for i in range(_awscrt.http_headers_count(self._binding)):
name_i, value_i = _awscrt.http_headers_get_index(self._binding, i)
if name_i.lower() == name:
yield value_i
def get(self, name, default=None):
"""
Get the first value for this name, ignoring any additional values.
Returns `default` if no values exist.
"""
assert isinstance_str(name)
return _awscrt.http_headers_get(self._binding, name, default)
def remove(self, name):
"""
Remove all values for this name.
Raises a KeyError if name not found.
"""
assert isinstance_str(name)
_awscrt.http_headers_remove(self._binding, name)
def remove_value(self, name, value):
"""
Remove a specific value for this name.
Raises a ValueError if value not found.
"""
assert isinstance_str(name)
assert isinstance_str(value)
_awscrt.http_headers_remove_value(self._binding, name, value)
def clear(self):
"""
Clear all headers
"""
_awscrt.http_headers_clear(self._binding)
def __iter__(self):
"""
Iterate over all (name,value) pairs.
"""
for i in range(_awscrt.http_headers_count(self._binding)):
yield _awscrt.http_headers_get_index(self._binding, i)
def __str__(self):
return self.__class__.__name__ + "(" + str([pair for pair in self]) + ")"
class HttpProxyAuthenticationType(IntEnum):
"""
Which proxy authentication type to use.
Nothing: no authentication
Basic: username and password
"""
Nothing = 0
Basic = 1
class HttpProxyOptions(object):
"""
Proxy options for HTTP clients.
host_name: Name of the proxy server to connect through.
port: Port number of the proxy server to connect through.
tls_connection_options: Optional TlsConnectionOptions for the Local <-> Proxy connection.
Must be distinct from the TlsConnectionOptions provided to the HTTP connection.
auth_type: Type of proxy authentication to use. Default is HttpProxyAuthenticationType.Nothing.
basic_auth_username: Username to use when auth_type is HttpProxyAuthenticationType.Basic.
basic_auth_password: Username to use when auth_type is HttpProxyAuthenticationType.Basic.
"""
def __init__(self,
host_name,
port,
tls_connection_options=None,
auth_type=HttpProxyAuthenticationType.Nothing,
auth_username=None,
auth_password=None):
self.host_name = host_name
self.port = port
self.tls_connection_options = tls_connection_options
self.auth_type = auth_type
self.auth_username = auth_username
self.auth_password = <PASSWORD>
```
#### File: johnjdailey/aws-crt-python/setup.py
```python
from __future__ import print_function
import distutils.ccompiler
import glob
import os
import os.path
import platform
import setuptools
import setuptools.command.build_ext
import subprocess
import sys
def is_64bit():
return sys.maxsize > 2**32
def is_32bit():
return is_64bit() == False
def is_arm():
return platform.machine().startswith('arm')
def determine_cross_compile_args():
host_arch = platform.machine()
if (host_arch == 'AMD64' or host_arch == 'x86_64') and is_32bit() and sys.platform != 'win32':
return ['-DCMAKE_C_FLAGS=-m32']
return []
def determine_generator_args():
if sys.platform == 'win32':
try:
# See which compiler python picks
compiler = distutils.ccompiler.new_compiler()
compiler.initialize()
# Look at compiler path to divine the Visual Studio version.
# This technique may not work with customized VS install paths.
# An alternative would be to utilize private python calls:
# (distutils._msvccompiler._find_vc2017() and _find_vc2015()).
if '\\Microsoft Visual Studio\\2019' in compiler.cc:
vs_version = 16
vs_year = 2019
elif '\\Microsoft Visual Studio\\2017' in compiler.cc:
vs_version = 15
vs_year = 2017
elif '\\Microsoft Visual Studio 14.0' in compiler.cc:
vs_version = 14
vs_year = 2015
assert(vs_version and vs_year)
except BaseException:
raise RuntimeError('No supported version of MSVC compiler could be found!')
print('Using Visual Studio', vs_version, vs_year)
vs_version_gen_str = "Visual Studio {} {}".format(vs_version, vs_year)
if vs_year <= 2017:
# For VS2017 and earlier, architecture goes at end of generator string
if is_64bit():
vs_version_gen_str += " Win64"
return ['-G', vs_version_gen_str]
# For VS2019 (and presumably later), architecture is passed via -A flag
arch_str = "x64" if is_64bit() else "Win32"
return ['-G', vs_version_gen_str, '-A', arch_str]
return []
class AwsLib(object):
def __init__(self, name, extra_cmake_args=[]):
self.name = name
self.extra_cmake_args = extra_cmake_args
# The extension depends on these libs.
# They're built along with the extension, in the order listed.
AWS_LIBS = []
if sys.platform != 'darwin' and sys.platform != 'win32':
AWS_LIBS.append(AwsLib('s2n'))
AWS_LIBS.append(AwsLib('aws-c-common'))
AWS_LIBS.append(AwsLib('aws-c-io'))
AWS_LIBS.append(AwsLib('aws-c-cal'))
AWS_LIBS.append(AwsLib('aws-c-compression'))
AWS_LIBS.append(AwsLib('aws-c-http'))
AWS_LIBS.append(AwsLib('aws-c-auth'))
AWS_LIBS.append(AwsLib('aws-c-mqtt'))
PROJECT_DIR = os.path.dirname(os.path.realpath(__file__))
DEP_BUILD_DIR = os.path.join(PROJECT_DIR, 'build', 'deps')
DEP_INSTALL_PATH = os.environ.get('AWS_C_INSTALL', os.path.join(DEP_BUILD_DIR, 'install'))
class awscrt_build_ext(setuptools.command.build_ext.build_ext):
def _build_dependency(self, aws_lib):
prev_cwd = os.getcwd() # restore cwd at end of function
lib_source_dir = os.path.join(PROJECT_DIR, 'aws-common-runtime', aws_lib.name)
build_type = 'Debug' if self.debug else 'RelWithDebInfo'
# Skip library if it wasn't pulled
if not os.path.exists(os.path.join(lib_source_dir, 'CMakeLists.txt')):
print("--- Skipping dependency: '{}' source not found ---".format(aws_lib.name))
return
print("--- Building dependency: {} ({}) ---".format(aws_lib.name, build_type))
lib_build_dir = os.path.join(DEP_BUILD_DIR, aws_lib.name)
if not os.path.exists(lib_build_dir):
os.makedirs(lib_build_dir)
os.chdir(lib_build_dir)
# cmake configure
cmake_args = ['cmake']
cmake_args.extend(determine_generator_args())
cmake_args.extend(determine_cross_compile_args())
cmake_args.extend([
'-DCMAKE_PREFIX_PATH={}'.format(DEP_INSTALL_PATH),
'-DCMAKE_INSTALL_PREFIX={}'.format(DEP_INSTALL_PATH),
'-DBUILD_SHARED_LIBS=OFF',
'-DCMAKE_BUILD_TYPE={}'.format(build_type),
'-DBUILD_TESTING=OFF',
'-DS2N_NO_PQ_ASM=ON',
])
if self.include_dirs:
cmake_args.append('-DCMAKE_INCLUDE_PATH={}'.format(';'.join(self.include_dirs)))
if self.library_dirs:
cmake_args.append('-DCMAKE_LIBRARY_PATH={}'.format(';'.join(self.library_dirs)))
cmake_args.extend(aws_lib.extra_cmake_args)
cmake_args.append(lib_source_dir)
subprocess.check_call(cmake_args)
# cmake build/install
build_cmd = [
'cmake',
'--build', './',
'--config', build_type,
'--target', 'install',
]
subprocess.check_call(build_cmd)
os.chdir(prev_cwd)
def run(self):
# build dependencies
for lib in AWS_LIBS:
self._build_dependency(lib)
# update paths so awscrt_ext can access dependencies
self.include_dirs.append(os.path.join(DEP_INSTALL_PATH, 'include'))
# some platforms (ex: fedora) use /lib64 instead of just /lib
lib_dir = 'lib'
if is_64bit() and os.path.exists(os.path.join(DEP_INSTALL_PATH, 'lib64')):
lib_dir = 'lib64'
if is_32bit() and os.path.exists(os.path.join(DEP_INSTALL_PATH, 'lib32')):
lib_dir = 'lib32'
self.library_dirs.append(os.path.join(DEP_INSTALL_PATH, lib_dir))
# continue with normal build_ext.run()
setuptools.command.build_ext.build_ext.run(self) # python2 breaks if we use super().run()
def awscrt_ext():
# fetch the CFLAGS/LDFLAGS from env
extra_compile_args = os.environ.get('CFLAGS', '').split()
extra_link_args = os.environ.get('LDFLAGS', '').split()
extra_objects = []
libraries = [x.name for x in AWS_LIBS]
# libraries must be passed to the linker with upstream dependencies listed last.
libraries.reverse()
if sys.platform == 'win32':
# the windows apis being used under the hood. Since we're static linking we have to follow the entire chain down
libraries += ['Secur32', 'Crypt32', 'Advapi32', 'BCrypt', 'Kernel32', 'Ws2_32', 'Shlwapi']
# Ensure that debug info is in the obj files, and that it is linked into the .pyd so that
# stack traces and dumps are useful
extra_compile_args += ['/Z7']
extra_link_args += ['/DEBUG']
elif sys.platform == 'darwin':
extra_link_args += ['-framework', 'Security']
# HACK: Don't understand why, but if AWS_LIBS are linked normally on macos, we get this error:
# ImportError: dlopen(_awscrt.cpython-37m-darwin.so, 2): Symbol not found: _aws_byte_cursor_eq_ignore_case
# Workaround is to pass them as 'extra_objects' instead of 'libraries'.
extra_objects = [os.path.join(DEP_INSTALL_PATH, 'lib', 'lib{}.a'.format(x.name)) for x in AWS_LIBS]
libraries = []
else: # unix
# linker will prefer shared libraries over static if it can find both.
# force linker to choose static one by using using "-l:libcrypto.a" syntax instead of just "-lcrypto".
libraries += [':libcrypto.a', 'rt']
if distutils.ccompiler.get_default_compiler() != 'msvc':
extra_compile_args += ['-Wextra', '-Werror', '-Wno-strict-aliasing', '-std=gnu99']
return setuptools.Extension(
'_awscrt',
language='c',
libraries=libraries,
sources=glob.glob('source/*.c'),
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
extra_objects=extra_objects
)
setuptools.setup(
name="awscrt",
version="0.5.6",
author="Amazon Web Services, Inc",
author_email="<EMAIL>",
description="A common runtime for AWS Python projects",
url="https://github.com/awslabs/aws-crt-python",
packages=['awscrt'],
classifiers=[
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
install_requires=[
'enum34;python_version<"3.4"',
'futures;python_version<"3.2"',
],
ext_modules=[awscrt_ext()],
cmdclass={'build_ext': awscrt_build_ext},
test_suite='test',
tests_require=[
'boto3'
],
)
```
#### File: aws-crt-python/test/test_auth.py
```python
from __future__ import absolute_import
import awscrt.auth
import awscrt.io
import datetime
import os
from test import NativeResourceTest, TIMEOUT
EXAMPLE_ACCESS_KEY_ID = 'example_access_key_id'
EXAMPLE_SECRET_ACCESS_KEY = 'example_secret_access_key'
EXAMPLE_SESSION_TOKEN = 'example_session_token'
class ScopedEnvironmentVariable(object):
"""
Set environment variable for lifetime of this object.
"""
def __init__(self, key, value):
self.key = key
self.prev_value = os.environ.get(key)
os.environ[key] = value
def __del__(self):
if self.prev_value is None:
del os.environ[self.key]
else:
os.environ[self.key] = self.prev_value
class TestCredentials(NativeResourceTest):
def test_create(self):
credentials = awscrt.auth.AwsCredentials(
EXAMPLE_ACCESS_KEY_ID,
EXAMPLE_SECRET_ACCESS_KEY,
EXAMPLE_SESSION_TOKEN)
self.assertEqual(EXAMPLE_ACCESS_KEY_ID, credentials.access_key_id)
self.assertEqual(EXAMPLE_SECRET_ACCESS_KEY, credentials.secret_access_key)
self.assertEqual(EXAMPLE_SESSION_TOKEN, credentials.session_token)
def test_create_no_session_token(self):
credentials = awscrt.auth.AwsCredentials(EXAMPLE_ACCESS_KEY_ID, EXAMPLE_SECRET_ACCESS_KEY)
self.assertEqual(EXAMPLE_ACCESS_KEY_ID, credentials.access_key_id)
self.assertEqual(EXAMPLE_SECRET_ACCESS_KEY, credentials.secret_access_key)
self.assertIsNone(credentials.session_token)
class TestProvider(NativeResourceTest):
def test_static_provider(self):
provider = awscrt.auth.AwsCredentialsProvider.new_static(
EXAMPLE_ACCESS_KEY_ID,
EXAMPLE_SECRET_ACCESS_KEY,
EXAMPLE_SESSION_TOKEN)
future = provider.get_credentials()
credentials = future.result(TIMEOUT)
self.assertEqual(EXAMPLE_ACCESS_KEY_ID, credentials.access_key_id)
self.assertEqual(EXAMPLE_SECRET_ACCESS_KEY, credentials.secret_access_key)
self.assertEqual(EXAMPLE_SESSION_TOKEN, credentials.session_token)
# TODO: test currently broken because None session_token comes back as empty string do to inconsistent use of
# aws_byte_cursor by value/pointer in aws-c-auth APIs.
#
# def test_static_provider_no_session_token(self):
# provider = AwsCredentialsProvider.new_static(
# self.example_access_key_id,
# self.example_secret_access_key)
# future = provider.get_credentials()
# credentials = future.result(TIMEOUT)
# self.assertEqual(self.example_access_key_id, credentials.access_key_id)
# self.assertEqual(self.example_secret_access_key, credentials.secret_access_key)
# self.assertIsNone(credentials.session_token)
def test_default_provider(self):
# Use environment variable to force specific credentials file
scoped_env = ScopedEnvironmentVariable('AWS_SHARED_CREDENTIALS_FILE', 'test/resources/credentials_test')
event_loop_group = awscrt.io.EventLoopGroup()
host_resolver = awscrt.io.DefaultHostResolver(event_loop_group)
bootstrap = awscrt.io.ClientBootstrap(event_loop_group, host_resolver)
provider = awscrt.auth.AwsCredentialsProvider.new_default_chain(bootstrap)
future = provider.get_credentials()
credentials = future.result(TIMEOUT)
self.assertEqual('credentials_test_access_key_id', credentials.access_key_id)
self.assertEqual('credentials_test_secret_access_key', credentials.secret_access_key)
self.assertIsNone(credentials.session_token)
del scoped_env
class TestSigningConfig(NativeResourceTest):
def test_create(self):
algorithm = awscrt.auth.AwsSigningAlgorithm.SigV4QueryParam
credentials_provider = awscrt.auth.AwsCredentialsProvider.new_static(
EXAMPLE_ACCESS_KEY_ID, EXAMPLE_SECRET_ACCESS_KEY)
region = 'us-west-2'
service = 'aws-suborbital-ion-cannon'
date = datetime.datetime(year=2000, month=1, day=1)
def should_sign_param(name):
return not name.tolower().startswith('x-do-not-sign')
use_double_uri_encode = True
should_normalize_uri_path = False
body_signing_type = awscrt.auth.AwsBodySigningConfigType.BodySigningOff
cfg = awscrt.auth.AwsSigningConfig(algorithm=algorithm,
credentials_provider=credentials_provider,
region=region,
service=service,
date=date,
should_sign_param=should_sign_param,
use_double_uri_encode=use_double_uri_encode,
should_normalize_uri_path=should_normalize_uri_path,
body_signing_type=body_signing_type)
self.assertIs(algorithm, cfg.algorithm) # assert IS enum, not just EQUAL
self.assertIs(credentials_provider, cfg.credentials_provider)
self.assertEqual(region, cfg.region)
self.assertEqual(service, cfg.service)
self.assertEqual(date, cfg.date)
self.assertIs(should_sign_param, cfg.should_sign_param)
self.assertEqual(use_double_uri_encode, cfg.use_double_uri_encode)
self.assertEqual(should_normalize_uri_path, cfg.should_normalize_uri_path)
self.assertIs(body_signing_type, cfg.body_signing_type)
def test_replace(self):
credentials_provider = awscrt.auth.AwsCredentialsProvider.new_static(
EXAMPLE_ACCESS_KEY_ID, EXAMPLE_SECRET_ACCESS_KEY)
# nondefault values, to be sure they're carried over correctly
orig_cfg = awscrt.auth.AwsSigningConfig(algorithm=awscrt.auth.AwsSigningAlgorithm.SigV4QueryParam,
credentials_provider=credentials_provider,
region='us-west-1',
service='aws-suborbital-ion-cannon',
date=datetime.datetime(year=2000, month=1, day=1),
should_sign_param=lambda x: False,
use_double_uri_encode=True,
should_normalize_uri_path=False,
body_signing_type=awscrt.auth.AwsBodySigningConfigType.BodySigningOff)
# Call replace on single attribute, then assert that ONLY the one attribute differs
def _replace_attr(name, value):
new_cfg = orig_cfg.replace(**{name: value})
self.assertIsNot(orig_cfg, new_cfg) # must return new object
self.assertEqual(value, getattr(new_cfg, name)) # must replace specified value
# check that only the one attribute differs
for attr in awscrt.auth.AwsSigningConfig._attributes:
if attr == name:
self.assertNotEqual(getattr(orig_cfg, attr), getattr(new_cfg, attr),
"replaced value should not match original")
else:
self.assertEqual(getattr(orig_cfg, attr), getattr(new_cfg, attr),
"value should match original")
_replace_attr('algorithm', awscrt.auth.AwsSigningAlgorithm.SigV4Header)
_replace_attr('credentials_provider',
awscrt.auth.AwsCredentialsProvider.new_static(EXAMPLE_ACCESS_KEY_ID, EXAMPLE_SECRET_ACCESS_KEY))
_replace_attr('region', 'us-west-2')
_replace_attr('service', 'aws-nothing-but-bees')
_replace_attr('date', datetime.datetime(year=2001, month=1, day=1))
_replace_attr('should_sign_param', lambda x: True)
_replace_attr('use_double_uri_encode', False)
_replace_attr('should_normalize_uri_path', True)
_replace_attr('body_signing_type', awscrt.auth.AwsBodySigningConfigType.BodySigningOn)
# check that we can replace multiple values at once
new_cfg = orig_cfg.replace(region='us-west-3', service='aws-slow-blinking')
self.assertEqual('us-west-3', new_cfg.region)
self.assertEqual('aws-slow-blinking', new_cfg.service)
self.assertEqual(orig_cfg.should_sign_param, new_cfg.should_sign_param)
# Test values copied from aws-c-auth/tests/aws-sig-v4-test-suite/get-vanilla"
SIGV4TEST_ACCESS_KEY_ID = 'AKIDEXAMPLE'
SIGV4TEST_SECRET_ACCESS_KEY = '<KEY>'
SIGV4TEST_SESSION_TOKEN = None
SIGV4TEST_SERVICE = 'service'
SIGV4TEST_REGION = 'us-east-1'
SIGV4TEST_METHOD = 'GET'
SIGV4TEST_PATH = '/'
SIGV4TEST_DATE = datetime.datetime(year=2015, month=8, day=30, hour=12, minute=36, second=0, tzinfo=awscrt.auth._utc)
SIGV4TEST_UNSIGNED_HEADERS = [
('Host', 'example.amazonaws.com'),
]
SIGV4TEST_SIGNED_HEADERS = [
('Host',
'example.amazonaws.com'),
('X-Amz-Date',
'20150830T123600Z'),
('Authorization',
'AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request, SignedHeaders=host;x-amz-date, Signature=5fa00fa31553b73ebf1942676e86291e8372ff2a2260956d9b8aae1d763fbf31')]
class TestSigner(NativeResourceTest):
def test_signing_sigv4_headers(self):
credentials_provider = awscrt.auth.AwsCredentialsProvider.new_static(
SIGV4TEST_ACCESS_KEY_ID, SIGV4TEST_SECRET_ACCESS_KEY, SIGV4TEST_SESSION_TOKEN)
signing_config = awscrt.auth.AwsSigningConfig(
algorithm=awscrt.auth.AwsSigningAlgorithm.SigV4Header,
credentials_provider=credentials_provider,
region=SIGV4TEST_REGION,
service=SIGV4TEST_SERVICE,
date=SIGV4TEST_DATE,
body_signing_type=awscrt.auth.AwsBodySigningConfigType.BodySigningOff)
http_request = awscrt.http.HttpRequest(
method=SIGV4TEST_METHOD,
path=SIGV4TEST_PATH,
headers=awscrt.http.HttpHeaders(SIGV4TEST_UNSIGNED_HEADERS))
signing_future = awscrt.auth.aws_sign_request(http_request, signing_config)
signing_result = signing_future.result(TIMEOUT)
self.assertIs(http_request, signing_result) # should be same object
self.assertEqual(SIGV4TEST_METHOD, http_request.method)
self.assertEqual(SIGV4TEST_PATH, http_request.path)
# existing headers should remain
for prev_header in SIGV4TEST_UNSIGNED_HEADERS:
self.assertIn(prev_header, http_request.headers)
# signed headers must be present
for signed_header in SIGV4TEST_SIGNED_HEADERS:
self.assertIn(signed_header, http_request.headers)
``` |
{
"source": "johnjdailey/DS",
"score": 3
} |
#### File: app/api/recommends.py
```python
import logging
from typing import List, Optional
from fastapi import APIRouter
from pydantic import BaseModel, Field
from ..recommend import get_recommendations
log = logging.getLogger(__name__)
router = APIRouter()
class RecommendRequest(BaseModel):
"""Input schema - the user's choices for effects and issues."""
effects: List[str] = Field(
..., # required field, no default
title='Preferred effects',
description='List of strings containing preferred dominant effects.',
example=['euphoric', 'energetic']
)
helps: List[str] = Field(
..., # required field, no default
title='Effective for these issues',
description=('List of strings containing issues '
'which strains are reported to help.'),
example=['add/adhd', 'anorexia']
)
text: Optional[str] = Field('', example='I prefer sativa heavy hybrids.')
count: Optional[int] = Field(4, gt=0, le=25, example=4)
def format(self, model='nn'):
"""Combine input data into format ready for preprocessing."""
if 'nn' == model:
return ' '.join(self.effects + self.helps + [self.text])
class RecommendItem(BaseModel):
"""Output schema - strain information."""
strain: str = Field(..., title='Strain Name')
strain_type: str = Field(..., title='Strain Type')
description: str = Field(..., title='Strain Description')
effects: List[str] = Field(..., example=['euphoric', 'energetic'])
helps: List[str] = Field(..., example=['add/adhd', 'anorexia'])
class RecommendResponse(BaseModel):
"""Output schema - List of recommended strains."""
strains: List[RecommendItem] = Field(...)
@router.post('/recommends', response_model=RecommendResponse)
async def recommends(item: RecommendRequest):
"""
Routes for user reccomendations.
### Request:
user selected labels, user text, and desired number of recommendations
- `effects`: list of strings of effects
- `helps`: list of strings of medical conditions it can help
- `text`: optional string that decribes users preferences
- `count`: optional integer, number of recommendations to return
### Response:
a list of information about each recommended strains
- `strain`: name of strain
- `strain_type`: indica or sativa
- `description`: user descriptions of strains
- `effects`: specific to the particular strain
- `helps`: specific to the particular strain
"""
return {
'strains': get_recommendations(user_input=item.format('nn'),
num=item.count),
}
```
#### File: app/tests/test_recommends.py
```python
from fastapi.testclient import TestClient
from app.main import app
client = TestClient(app)
def test_valid_input():
"""Return 200 Success on request to route."""
request_data = {
'effects': ['happy', 'focused', 'giggly', 'hungry', 'sleepy'],
'helps': ['migranes', 'nausea'],
'text': 'I prefer sativa heavy hybrids.',
'count': 6
}
response = client.post('/recommends', json=request_data)
json_body = response.json()
assert response.status_code == 200
assert 'strains' in json_body.keys()
assert len(json_body['strains']) == 6
for strain in json_body['strains']:
assert 'strain' in strain.keys()
assert 'strain_type' in strain.keys()
assert 'description' in strain.keys()
assert 'effects' in strain.keys()
assert 'helps' in strain.keys()
def test_no_input():
"""Return 422 Unprocessable Entity on empty POST request."""
response = client.post('/recommends')
assert response.status_code == 422
assert isinstance(response.json(), dict)
def test_request_get():
"""Return 405 Method Not Allowed on get request."""
response = client.get('/recommends')
assert response.status_code == 405
assert isinstance(response.json(), dict)
```
#### File: leafly/spiders/leafly_spider.py
```python
import scrapy
class LeaflySpider(scrapy.Spider):
name = "leafly"
def start_requests(self):
urls = [
f'https://www.leafly.com/strains?sort=name&page={i}'
for i in range(1, 116)
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
for strain_link in response.css('div.strain-playlist-grid a::attr(href)'):
strain_link = response.urljoin(strain_link.get())
yield scrapy.Request(strain_link, self.parse_strain)
def parse_strain(self, response):
strain = response.css('#strain-card-data > header > div.flex.flex-row.justify-between.items-center.border-t.border-deep-green-40.py-xs > div > h1').css('::text').get()
aka = response.css('#strain-card-data > header > div.flex.flex-row.justify-between.items-center.border-t.border-deep-green-40.py-xs > div > h2').css('::text').get()
if aka and aka.startswith('aka '):
aka = aka[4:]
type_ = response.css('#strain-card-data > header > div.flex.flex-row.justify-between.items-center.pb-xs > h2 > a').css('::text').get()
rating = response.css('#strain-card-data > header > div.flex.flex-row.justify-between.items-center.pb-xs > div > p > span').css('::text').get()
try:
rating = float(rating.strip())
except:
rating = None
num_reviews = response.css('#strain-card-data > header > div.flex.flex-row.justify-between.items-center.pb-xs > div > p > a').css('::text').get()
try:
num_reviews = int(num_reviews.split()[0])
except:
num_reviews = None
description = response.css('#strain-description > div > p')
try:
description = ''.join(description.css('::text').getall())
except:
description = response.css('#strain-description > div > p').css('::text').get()
num_effects_reports = response.css('#strain-effects-section > div.flex.items-center.font-mono.text-xs').css('::text').get()
try:
num_effects_reports = int(num_effects_reports.split()[0])
except:
num_effects_reports = None
if num_effects_reports is not None:
effects = response.css('#strain-effects-section > div.react-tabs.mt-lg > div > div')
else:
effects = None
if effects and len(effects) == 3:
feelings = self.parse_effects(effects[0])
helps = self.parse_effects(effects[1])
negative = self.parse_effects(effects[2])
else:
feelings = helps = negative = [(None, None)] * 5
yield {
'strain': strain,
'aka': aka,
'type': type_,
'rating': rating,
'num_reviews': num_reviews,
'num_effects_reports': num_effects_reports,
'feeling_1': feelings[0][0],
'feeling_1_pct': feelings[0][1],
'feeling_2': feelings[1][0],
'feeling_2_pct': feelings[1][1],
'feeling_3': feelings[2][0],
'feeling_3_pct': feelings[2][1],
'feeling_4': feelings[3][0],
'feeling_4_pct': feelings[3][1],
'feeling_5': feelings[4][0],
'feeling_5_pct': feelings[4][1],
'helps_1': helps[0][0],
'helps_1_pct': helps[0][1],
'helps_2': helps[1][0],
'helps_2_pct': helps[1][1],
'helps_3': helps[2][0],
'helps_3_pct': helps[2][1],
'helps_4': helps[3][0],
'helps_4_pct': helps[3][1],
'helps_5': helps[4][0],
'helps_5_pct': helps[4][1],
'negative_1': negative[0][0],
'negative_1_pct': negative[0][1],
'negative_2': negative[1][0],
'negative_2_pct': negative[1][1],
'negative_3': negative[2][0],
'negative_3_pct': negative[2][1],
'negative_4': negative[3][0],
'negative_4_pct': negative[3][1],
'negative_5': negative[4][0],
'negative_5_pct': negative[4][1],
'description': description,
'url':response.url,
}
def parse_effects(self, sl:scrapy.selector.unified.SelectorList):
texts = sl.css('::text').getall()
effects = []
for i in range(0, len(texts), 4):
effects.append((texts[i], float(texts[i+2]) / 100))
for i in range(5 - len(effects)):
effects.append((None, None))
return effects
``` |
{
"source": "johnjdailey/FinMesh",
"score": 4
} |
#### File: FinMesh/auxiliary/__init__.py
```python
from nltk.corpus import words
def pick_bad_apples(word,ignore_list):
## Determines whether or not to ignore certain words
ignore = False
for n in range(len(ignore_list)):
for i in ignore_list:
if i in word:
ignore = True
return ignore
def real_word_frequency(file):
## Returns a list fo real words sorted by use frequency
with open(file,'r') as f:
word_list = words.words()
alphabet = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
file_words = []
# Quick sieve that is used in bad_apples function to quickly throw out obvious unreal words
ignore = ['-','_','/','=',':',';','<','>','#','$','@','*','\\']
lines = f.readlines()
word_freq = {}
final_checked = {}
# Iterate lines from file
for line in lines:
words = line.lower().strip().split(' ')
# Iterate words from line
for word in words:
if not pick_bad_apples(word, ignore):
file_words.append(word)
# Iterate words that pass bad apple check
for w in file_words:
if not w == '':
length = len(w)-1
# Checks if the first and last letter are in the alphabet
if w[0] and w[length] in alphabet:
w.replace('.','').strip('\"')
if w in word_freq.keys():
word_freq[w] += 1
else:
word_freq[w] = 1
# Runs a final check against an actual dictionary
for key in word_freq.keys():
if key in word_list:
val = word_freq.get(key)
final_checked[key] = val
# Sort the words by frequency
final_checked_sorted = {k: v for k, v in sorted(final_checked.items(), key=lambda item: item[1])}
return final_checked_sorted
```
#### File: FinMesh/iex/forex.py
```python
from ._common import *
# Latest Rates
FOREX_LATEST_URL = prepend_iex_url('fx') + 'latest?'
def forex_latest_rate(symbols, vprint=False):
## Returns the latest FOREX rate for the requested currency pair symbol.
url = FOREX_LATEST_URL + f'symbols={symbols}'
return get_iex_json_request(url, vprint=vprint)
# Currency Conversion
FOREX_CONVERSION_URL = prepend_iex_url('fx') + 'convert?'
def forex_conversion(symbols, amount, vprint=False):
## Converts one currency to another using up-to-date currency information.
url = FOREX_CONVERSION_URL + f'symbols={symbols}&amount={amount}'
return get_iex_json_request(url, vprint=vprint)
# Historical Data
FOREX_HISTORICAL_URL = prepend_iex_url('fx') + 'historical?'
def forex_historical(symbols, vprint=False, **queries):
## Returns historical FOREX rates for the requested currency pair symbol.
url = FOREX_HISTORICAL_URL + f'symbols={symbols}'
for key, value in queries.items():
url += (f"&{key}={value}")
url =+ '&'
return get_iex_json_request(url, vprint=vprint)
```
#### File: FinMesh/usgov/__init__.py
```python
import os
import requests
import xmltodict
import csv
import json
# # # # # # # # # #
# FRED DATA BELOW #
# # # # # # # # # #
FRED_BASE_URL = 'https://api.stlouisfed.org/fred/'
GEOFRED_BASE_URL = 'https://api.stlouisfed.org/geofred/'
def append_fred_token(url):
token = os.getenv('FRED_TOKEN')
return f'{url}&api_key={token}'
FRED_SERIES_OBS_URL = FRED_BASE_URL + 'series/observations?'
def fred_series(series, file_type=None, realtime_start=None, realtime_end=None, limit=None, offset=None, sort_order=None, observation_start=None, observation_end=None, units=None, frequency=None, aggregation_method=None, output_type=None, vintage_dates=None):
## Returns time series historical data for the requested FRED data.
url = FRED_SERIES_OBS_URL + f'series_id={series}'
if file_type: url += f'&file_type={file_type}'
if realtime_start: url += f'&realtime_start={realtime_start}'
if realtime_end: url += f'&realtime_end={realtime_end}'
if limit: url += f'&limit={limit}'
if offset: url += f'&offset={offset}'
if sort_order: url += f'&sort_order={sort_order}'
if observation_start: url += f'&observation_start={observation_start}'
if observation_end: url += f'&observation_end={observation_end}'
if units: url += f'&units={units}'
if frequency: url += f'&frequency={frequency}'
if aggregation_method: url += f'&aggregation_method={aggregation_method}'
if output_type: url += f'&output_type={output_type}'
if vintage_dates: url += f'&vintage_dates={vintage_dates}'
url = append_fred_token(url)
result = requests.get(url)
return result.text
GEOFRED_SERIES_META_URL = GEOFRED_BASE_URL + 'series/group?'
def geofred_series_meta(series_id, file_type=None):
## Returns meta data for the requested FRED data.
url = GEOFRED_SERIES_META_URL + f'series_id={series_id}'
if file_type: url += f'&file_type={file_type}'
url = append_fred_token(url)
result = requests.get(url)
return result.text
GEOFRED_REGIONAL_SERIES_URL = GEOFRED_BASE_URL + 'series/data?'
def geofred_regional_series(series_id, file_type=None, date=None, start_date=None):
## Returns the historical, geographically organized time series data for the requested FRED data.
url = GEOFRED_REGIONAL_SERIES_URL + f'series_id={series_id}'
if file_type: url += f'&file_type={file_type}'
if date: url += f'&date={date}'
if start_date: url += f'&start_date={start_date}'
url = append_fred_token(url)
result = requests.get(url)
return result.text
# # # # # # # # # # # # # # # #
# GOVERNMENT YIELD CURVE DATA #
# # # # # # # # # # # # # # # #
GOV_YIELD_URL = 'https://data.treasury.gov/feed.svc/DailyTreasuryYieldCurveRateData?$filter=month(NEW_DATE)%20eq%204%20and%20year(NEW_DATE)%20eq%202019'
def get_yield():
## Returns government treasury bond yields. Organized in Python dictionary format by bond length.
# Formatting of XML to Python Dict
curve = requests.get(GOV_YIELD_URL)
parse_curve = xmltodict.parse(curve.content)
# This is based around retrieving the n last dates or average of n days.
feed = parse_curve['feed']
entry = feed['entry']
last_entry = len(entry)-1
content = entry[last_entry]['content']['m:properties']
# Dict that contains the whole yield curve so there is no need to bring in each rate.
yield_curve_values = {
'date' : entry[last_entry]['content']['m:properties']['d:NEW_DATE']['#text'],
'1month' : float(content['d:BC_1MONTH']['#text']),
'2month' : float(content['d:BC_2MONTH']['#text']),
'3month' : float(content['d:BC_3MONTH']['#text']),
'6month' : float(content['d:BC_6MONTH']['#text']),
'1year' : float(content['d:BC_1YEAR']['#text']),
'2year' : float(content['d:BC_2YEAR']['#text']),
'3year' : float(content['d:BC_3YEAR']['#text']),
'5year' : float(content['d:BC_5YEAR']['#text']),
'10year' : float(content['d:BC_10YEAR']['#text']),
'20year' : float(content['d:BC_20YEAR']['#text']),
'30year' : float(content['d:BC_30YEAR']['#text']),
}
return yield_curve_values
``` |
{
"source": "johnjdailey/JS-Realtime-Dashboard",
"score": 3
} |
#### File: blueprints/btc/btc.py
```python
from flask import Blueprint, jsonify
from numpy import minimum
from datetime import datetime
import requests
import psycopg2
import warnings
import pandas as pd
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error, mean_absolute_error
warnings.filterwarnings("ignore")
btcData = Blueprint('btc', __name__)
conn = psycopg2.connect(database="postgres", user="postgres",
password="password", host="127.0.0.1", port="5432")
@btcData.route("/btc/profit/deals", methods=["GET"])
def get_btc_deals():
res = {}
cur = conn.cursor()
cur.execute(
'''SELECT COUNT(*)/3 FROM Recommendations''')
rows = cur.fetchall()
conn.commit()
res["count"] = int(rows[0][0])
result = jsonify(res)
result.headers.add('Access-Control-Allow-Origin', '*')
return result
@btcData.route("/btc/recommendation/sell", methods=["GET"])
def post_btc_recommendation_sell():
ACTUAL_URL = "http://localhost:9000/btc/price"
actual_res = requests.get(url=ACTUAL_URL)
actual_data = actual_res.json()[-1:]
current_time_str = actual_data[0]["datetime"].split(":")
price = actual_data[0]["price"]
current_time = datetime.now()
current_time = current_time.replace(hour=int(current_time_str[0]), minute=int(
current_time_str[1]), second=int(current_time_str[2]), microsecond=0)
cur = conn.cursor()
cur.execute(
f"INSERT INTO Recommendations (Created_at,recommendation,price) VALUES ('{str(current_time)}', 'SELL', {price})")
conn.commit()
res = {}
res["status"] = "Success"
result = jsonify(res)
result.headers.add('Access-Control-Allow-Origin', '*')
return result
@btcData.route("/btc/recommendation/buy", methods=["GET"])
def post_btc_recommendation_buy():
ACTUAL_URL = "http://localhost:9000/btc/price"
actual_res = requests.get(url=ACTUAL_URL)
actual_data = actual_res.json()[-1:]
current_time_str = actual_data[0]["datetime"].split(":")
price = actual_data[0]["price"]
current_time = datetime.now()
current_time = current_time.replace(hour=int(current_time_str[0]), minute=int(
current_time_str[1]), second=int(current_time_str[2]), microsecond=0)
cur = conn.cursor()
cur.execute(
f"INSERT INTO Recommendations (Created_at,recommendation,price) VALUES ('{str(current_time)}', 'BUY', {price})")
conn.commit()
res = {}
res["status"] = "Success"
result = jsonify(res)
result.headers.add('Access-Control-Allow-Origin', '*')
return result
@btcData.route("/btc/profit/details/lastOperation", methods=["GET"])
def get_btc_profit_details_last_operation():
res = {}
cur = conn.cursor()
cur.execute(
'''SELECT created_at, recommendation, price FROM Recommendations ORDER BY created_at DESC LIMIT 1''')
rows = cur.fetchall()
conn.commit()
if rows[0][1] == "BUY":
res["price"] = rows[0][2]
else:
res["price"] = 0.0
res["operation"] = rows[0][1]
res["datetime"] = str(rows[0][0]).split(" ")[1]
result = jsonify(res)
result.headers.add('Access-Control-Allow-Origin', '*')
return result
@btcData.route("/btc/profit/details", methods=["GET"])
def get_btc_profit_details():
res = []
cur = conn.cursor()
cur.execute(
'''SELECT created_at, recommendation, price FROM Recommendations ORDER BY created_at DESC LIMIT 14''')
rows = cur.fetchall()
conn.commit()
rows.reverse()
if len(rows) > 2:
if rows[0][1] == "SELL":
rows = rows[1:]
previous_buy = 0
for i in range(len(rows)):
current_res = {}
if str(rows[i][1]) == "BUY":
previous_buy = rows[i][2]
current_res["profit/loss"] = 0.0
else:
current_res["profit/loss"] = rows[i][2] - previous_buy
current_res["datetime"] = str(rows[i][0])
current_res["recommendation"] = str(rows[i][1])
current_res["price"] = str(rows[i][2])
if str(rows[i][1]) != "BUY":
res.append(current_res)
result = jsonify(res)
result.headers.add('Access-Control-Allow-Origin', '*')
return result
@btcData.route("/btc/profit", methods=["GET"])
def get_btc_profit():
res = {}
cur = conn.cursor()
cur.execute(
'''SELECT created_at, recommendation, price FROM Recommendations''')
rows = cur.fetchall()
conn.commit()
# if the 1st operation is sell remove it
if rows[0][1] == "SELL":
rows = rows[1:]
# if the last operation is buy remove it
if rows[len(rows) - 1][1] == "BUY":
rows = rows[:-1]
df = pd.DataFrame(rows, columns=['time', 'recommendation', 'price'])
df["pct"] = 0
df["runing_amount"] = 0
# Investment
amount = 10000
for i in range(len(df)):
if df["recommendation"].iloc[i] == "SELL":
# to keep track of profit/loss percentage
df["pct"].iloc[i] = (df["price"].iloc[i] -
df["price"].iloc[i-1])/df["price"].iloc[i-1]
# to keep track of the running amount after selling
if i <= 1:
df["runing_amount"].iloc[i] = (1+df["pct"].iloc[i])*amount
else:
df["runing_amount"].iloc[i] = (
1+df["pct"].iloc[i])*df["runing_amount"].iloc[i-2]
# to get the percentage of profit/loss from the inital investment
rtn_pct = (df["runing_amount"].iloc[-1]) / amount * 100
# dollar profit/loss
profit_loss = df["runing_amount"].iloc[-1] - amount
res["rtn_pct"] = rtn_pct
res["profit_loss"] = profit_loss
res["runing_amount"] = df["runing_amount"].iloc[-1]
result = jsonify(res)
result.headers.add('Access-Control-Allow-Origin', '*')
return result
@btcData.route("/btc/price", methods=["GET"])
def get_btc_price():
res = []
cur = conn.cursor()
cur.execute('''SELECT avg(price) , to_timestamp(floor((extract('epoch' from Created_at) / 30 )) * 30) AT TIME ZONE 'UTC' new_time
FROM BT_Price
WHERE created_at <= date_trunc('hour', now() + interval '1 hour') and created_at >= (now() - interval '28 minute')
GROUP BY new_time
ORDER BY new_time''')
rows = cur.fetchall()
for row in rows:
row_item = {}
row_item["datetime"] = str(row[1]).split("+")[0].split(" ")[1][:8]
row_item["price"] = row[0]
res.append(row_item)
conn.commit()
result = jsonify(res)
result.headers.add('Access-Control-Allow-Origin', '*')
return result
#### PREDICTIONS ###
@btcData.route("/predict/ARIMA", methods=["GET"])
def predict_btc_price_arima():
res = []
cur = conn.cursor()
cur.execute('''SELECT to_timestamp(floor((extract('epoch' from Created_at) / 30 )) * 30) AT TIME ZONE 'UTC' time_ , avg(price) price_
FROM BTC_Price_Prediction
WHERE created_at <= date_trunc('hour', now() + interval '1 hour') and created_at >= (now() - interval '30 minute')
GROUP BY time_
ORDER BY time_''')
rows = cur.fetchall()
for row in rows:
row_item = {}
row_item["datetime"] = str(row[0]).split("+")[0].split(" ")[1][:8]
row_item["price"] = row[1]
res.append(row_item)
conn.commit()
result = jsonify(res)
result.headers.add('Access-Control-Allow-Origin', '*')
return result
@btcData.route("/predict/VARMAX", methods=["GET"])
def predict_btc_price_varmax():
res = []
cur = conn.cursor()
cur.execute('''SELECT to_timestamp(floor((extract('epoch' from Created_at) / 30 )) * 30) AT TIME ZONE 'UTC' time_ , avg(price) price_
FROM BTC_Price_Prediction_VARMAX
WHERE created_at <= date_trunc('hour', now() + interval '1 hour') and created_at >= (now() - interval '30 minute')
GROUP BY time_
ORDER BY time_''')
rows = cur.fetchall()
for row in rows:
row_item = {}
row_item["datetime"] = str(row[0]).split("+")[0].split(" ")[1][:8]
row_item["price"] = row[1]
res.append(row_item)
conn.commit()
result = jsonify(res)
result.headers.add('Access-Control-Allow-Origin', '*')
return result
@btcData.route("/predict/SES", methods=["GET"])
def predict_btc_price_ses():
res = []
cur = conn.cursor()
cur.execute('''SELECT to_timestamp(floor((extract('epoch' from Created_at) / 30 )) * 30) AT TIME ZONE 'UTC' time_ , avg(price) price_
FROM BTC_Price_Prediction_SES
WHERE created_at <= date_trunc('hour', now() + interval '1 hour') and created_at >= (now() - interval '30 minute')
GROUP BY time_
ORDER BY time_''')
rows = cur.fetchall()
for row in rows:
row_item = {}
row_item["datetime"] = str(row[0]).split("+")[0].split(" ")[1][:8]
row_item["price"] = row[1]
res.append(row_item)
conn.commit()
result = jsonify(res)
result.headers.add('Access-Control-Allow-Origin', '*')
return result
@btcData.route("/predict/RF", methods=["GET"])
def predict_btc_price_rf():
res = []
cur = conn.cursor()
cur.execute('''SELECT to_timestamp(floor((extract('epoch' from Created_at) / 30 )) * 30) AT TIME ZONE 'UTC' time_ , avg(price) price_
FROM BTC_Price_Prediction_RF
WHERE created_at <= date_trunc('hour', now() + interval '1 hour') and created_at >= (now() - interval '30 minute')
GROUP BY time_
ORDER BY time_''')
rows = cur.fetchall()
for row in rows:
row_item = {}
row_item["datetime"] = str(row[0]).split("+")[0].split(" ")[1][:8]
row_item["price"] = row[1]
res.append(row_item)
conn.commit()
result = jsonify(res)
result.headers.add('Access-Control-Allow-Origin', '*')
return result
@btcData.route("/predict/overall", methods=["GET"])
def predict_btc_price_overall():
res = []
cur = conn.cursor()
cur.execute('''SELECT to_timestamp(floor((extract('epoch' from Created_at) / 30 )) * 30) AT TIME ZONE 'UTC' time_ , avg(price) price_
FROM BTC_Price_Prediction_Overall
WHERE created_at <= date_trunc('hour', now() + interval '1 hour') and created_at >= (now() - interval '30 minute')
GROUP BY time_
ORDER BY time_''')
rows = cur.fetchall()
for row in rows:
row_item = {}
row_item["datetime"] = str(row[0]).split("+")[0].split(" ")[1][:8]
row_item["price"] = row[1]
res.append(row_item)
conn.commit()
result = jsonify(res)
result.headers.add('Access-Control-Allow-Origin', '*')
return result
#### SCORES ###
@btcData.route("/score/ARIMA", methods=["GET"])
def get_model_score_arima():
res = {}
cur = conn.cursor()
cur.execute('''SELECT to_timestamp(floor((extract('epoch' from Created_at) / 30 )) * 30) AT TIME ZONE 'UTC' time_ , avg(price) price_
FROM BTC_Price_Prediction
WHERE created_at <= date_trunc('hour', now() + interval '1 hour') and created_at >= (now() - interval '30 minute')
GROUP BY time_
ORDER BY time_''')
pred_rows = cur.fetchall()
cur.execute('''SELECT to_timestamp(floor((extract('epoch' from Created_at) / 30 )) * 30) AT TIME ZONE 'UTC' new_time, avg(price)
FROM BT_Price
WHERE created_at <= date_trunc('hour', now() + interval '1 hour') and created_at >= (now() - interval '30 minute')
GROUP BY new_time
ORDER BY new_time''')
actual_rows = cur.fetchall()
conn.commit()
actual_df = pd.DataFrame(actual_rows, columns=['time', 'price'])
preds_df = pd.DataFrame(pred_rows, columns=['time', 'preds'])
actual_values = [x for x in actual_df["price"]]
preds_values = [x for x in preds_df["preds"]]
preds_values = preds_values[:len(actual_values) - 1]
actual_values = actual_values[:len(preds_values)]
res["RMSE"] = mean_squared_error(
actual_values, preds_values, squared=False)
res["MAE"] = mean_absolute_error(actual_values, preds_values)
res["MSE"] = mean_squared_error(actual_values, preds_values)
result = jsonify(res)
result.headers.add('Access-Control-Allow-Origin', '*')
return result
@btcData.route("/score/VARMAX", methods=["GET"])
def get_model_score_varmax():
res = {}
cur = conn.cursor()
cur.execute('''SELECT to_timestamp(floor((extract('epoch' from Created_at) / 30 )) * 30) AT TIME ZONE 'UTC' time_ , avg(price) price_
FROM BTC_Price_Prediction_VARMAX
WHERE created_at <= date_trunc('hour', now() + interval '1 hour') and created_at >= (now() - interval '30 minute')
GROUP BY time_
ORDER BY time_''')
pred_rows = cur.fetchall()
cur.execute('''SELECT to_timestamp(floor((extract('epoch' from Created_at) / 30 )) * 30) AT TIME ZONE 'UTC' new_time, avg(price)
FROM BT_Price
WHERE created_at <= date_trunc('hour', now() + interval '1 hour') and created_at >= (now() - interval '30 minute')
GROUP BY new_time
ORDER BY new_time''')
actual_rows = cur.fetchall()
conn.commit()
actual_df = pd.DataFrame(actual_rows, columns=['time', 'price'])
preds_df = pd.DataFrame(pred_rows, columns=['time', 'preds'])
actual_values = [x for x in actual_df["price"]]
preds_values = [x for x in preds_df["preds"]]
preds_values = preds_values[:len(actual_values) - 1]
actual_values = actual_values[:len(preds_values)]
res["RMSE"] = mean_squared_error(
actual_values, preds_values, squared=False)
res["MAE"] = mean_absolute_error(actual_values, preds_values)
res["MSE"] = mean_squared_error(actual_values, preds_values)
result = jsonify(res)
result.headers.add('Access-Control-Allow-Origin', '*')
return result
@btcData.route("/score/SES", methods=["GET"])
def get_model_score_ses():
res = {}
cur = conn.cursor()
cur.execute('''SELECT to_timestamp(floor((extract('epoch' from Created_at) / 30 )) * 30) AT TIME ZONE 'UTC' time_ , avg(price) price_
FROM BTC_Price_Prediction
WHERE created_at <= date_trunc('hour', now() + interval '1 hour') and created_at >= (now() - interval '30 minute')
GROUP BY time_
ORDER BY time_''')
pred_rows = cur.fetchall()
cur.execute('''SELECT to_timestamp(floor((extract('epoch' from Created_at) / 30 )) * 30) AT TIME ZONE 'UTC' new_time, avg(price)
FROM BTC_Price_Prediction_SES
WHERE created_at <= date_trunc('hour', now() + interval '1 hour') and created_at >= (now() - interval '30 minute')
GROUP BY new_time
ORDER BY new_time''')
actual_rows = cur.fetchall()
conn.commit()
actual_df = pd.DataFrame(actual_rows, columns=['time', 'price'])
preds_df = pd.DataFrame(pred_rows, columns=['time', 'preds'])
actual_values = [x for x in actual_df["price"]]
preds_values = [x for x in preds_df["preds"]]
preds_values = preds_values[:len(actual_values) - 1]
actual_values = actual_values[:len(preds_values)]
res["RMSE"] = mean_squared_error(
actual_values, preds_values, squared=False)
res["MAE"] = mean_absolute_error(actual_values, preds_values)
res["MSE"] = mean_squared_error(actual_values, preds_values)
result = jsonify(res)
result.headers.add('Access-Control-Allow-Origin', '*')
return result
@btcData.route("/score/RF", methods=["GET"])
def get_model_score_RF():
res = {}
cur = conn.cursor()
cur.execute('''SELECT to_timestamp(floor((extract('epoch' from Created_at) / 30 )) * 30) AT TIME ZONE 'UTC' time_ , avg(price) price_
FROM BTC_Price_Prediction
WHERE created_at <= date_trunc('hour', now() + interval '1 hour') and created_at >= (now() - interval '30 minute')
GROUP BY time_
ORDER BY time_''')
pred_rows = cur.fetchall()
cur.execute('''SELECT to_timestamp(floor((extract('epoch' from Created_at) / 30 )) * 30) AT TIME ZONE 'UTC' new_time, avg(price)
FROM BTC_Price_Prediction_RF
WHERE created_at <= date_trunc('hour', now() + interval '1 hour') and created_at >= (now() - interval '30 minute')
GROUP BY new_time
ORDER BY new_time''')
actual_rows = cur.fetchall()
conn.commit()
actual_df = pd.DataFrame(actual_rows, columns=['time', 'price'])
preds_df = pd.DataFrame(pred_rows, columns=['time', 'preds'])
actual_values = [x for x in actual_df["price"]]
preds_values = [x for x in preds_df["preds"]]
min_val = minimum(len(actual_values), len(preds_values))
preds_values = preds_values[:min_val-1]
actual_values = actual_values[:min_val-1]
res["RMSE"] = mean_squared_error(
actual_values, preds_values, squared=False)
res["MAE"] = mean_absolute_error(actual_values, preds_values)
res["MSE"] = mean_squared_error(actual_values, preds_values)
result = jsonify(res)
result.headers.add('Access-Control-Allow-Origin', '*')
return result
@btcData.route("/score/overall", methods=["GET"])
def get_model_score_overall():
cur = conn.cursor()
cur.execute('''SELECT to_timestamp(floor((extract('epoch' from Created_at) / 30 )) * 30) AT TIME ZONE 'UTC' time_ , avg(price) price_
FROM BT_Price
WHERE created_at <= date_trunc('hour', now() + interval '1 hour') and created_at >= (now() - interval '1 hour')
GROUP BY time_
ORDER BY time_''')
actual_rows = cur.fetchall()
cur.execute('''SELECT to_timestamp(floor((extract('epoch' from Created_at) / 30 )) * 30) AT TIME ZONE 'UTC' time_ , avg(price) price_
FROM BTC_Price_Prediction_Overall
WHERE created_at <= date_trunc('hour', now() + interval '1 hour') and created_at >= (now() - interval '1 hour')
GROUP BY time_
ORDER BY time_''')
preds_rows = cur.fetchall()
conn.commit()
actual_df = pd.DataFrame(actual_rows, columns=['time', 'price'])
preds_df = pd.DataFrame(preds_rows, columns=['time', 'preds'])
actual_values = [x for x in actual_df["price"]]
preds_values = [x for x in preds_df["preds"]]
min_val = minimum(len(actual_values), len(preds_values))
preds_values = preds_values[:min_val-1]
actual_values = actual_values[:min_val-1]
r2 = r2_score(actual_values, preds_values)
res = {}
res["R2"] = r2
result = jsonify(res)
result.headers.add('Access-Control-Allow-Origin', '*')
return result
```
#### File: blueprints/tweets/tweets.py
```python
from flask import Blueprint, jsonify
import psycopg2
import warnings
from datetime import datetime
warnings.filterwarnings("ignore")
tweetsData = Blueprint('tweets', __name__)
conn = psycopg2.connect(database="postgres", user="postgres",
password="password", host="127.0.0.1", port="5432")
@tweetsData.route("/tweets/pos", methods=["GET"])
def get_positive_tweets():
res = []
cur = conn.cursor()
cur.execute('''SELECT sum(Positive) ,
to_timestamp(floor((extract('epoch' from Created_at AT TIME ZONE 'UTC') / 30 )) * 30) new_time
FROM tweets
WHERE Created_at <= date_trunc('hour', now() + interval '1 hour') AT TIME ZONE 'UTC' and Created_at > (now() - interval '30 minute') AT TIME ZONE 'UTC'
GROUP BY new_time
ORDER BY new_time''')
rows = cur.fetchall()
for row in rows:
row_item = {}
row_item["datetime"] = str(row[1]).split("+")[0].split(" ")[1][:8]
row_item["count"] = row[0]
res.append(row_item)
conn.commit()
result = jsonify(res)
result.headers.add('Access-Control-Allow-Origin', '*')
return result
@tweetsData.route("/tweets/neg", methods=["GET"])
def get_negative_tweets():
res = []
cur = conn.cursor()
cur.execute('''SELECT sum(Negative) ,
to_timestamp(floor((extract('epoch' from Created_at AT TIME ZONE 'UTC') / 30 )) * 30) new_time
FROM tweets
WHERE Created_at <= date_trunc('hour', now() + interval '1 hour') AT TIME ZONE 'UTC' and Created_at > (now() - interval '30 minute') AT TIME ZONE 'UTC'
GROUP BY new_time
ORDER BY new_time''')
rows = cur.fetchall()
for row in rows:
row_item = {}
row_item["datetime"] = str(row[1]).split("+")[0].split(" ")[1][:8]
row_item["count"] = row[0]
res.append(row_item)
conn.commit()
result = jsonify(res)
result.headers.add('Access-Control-Allow-Origin', '*')
return result
@tweetsData.route("/tweets/count", methods=["GET"])
def get_count_tweets():
res = {}
cur = conn.cursor()
cur.execute(
'''SELECT sum(Positive) pos_sum, sum(Negative) neg_sum FROM tweets''')
rows = cur.fetchall()
res["Total_pos"] = rows[0][0]
res["Total_neg"] = rows[0][1]
conn.commit()
result = jsonify(res)
result.headers.add('Access-Control-Allow-Origin', '*')
return result
@tweetsData.route(" ", methods=["GET"])
def get_daily_tweets():
res = []
cur = conn.cursor()
cur.execute('''SELECT sum(Negative), sum(Positive) ,
extract(hour from created_at AT TIME ZONE 'UTC') new_time
FROM tweets
WHERE created_at <= date_trunc('day', now() + interval '1 day') AT TIME ZONE 'UTC' and created_at >= date_trunc('day', now()) AT TIME ZONE 'UTC'
GROUP BY new_time
ORDER BY new_time''')
rows = cur.fetchall()
for row in rows:
current_time = datetime.now()
newdatetime = current_time.replace(hour=int(row[2]), minute=0)
row_item = {}
row_item["datetime"] = str(newdatetime).split(" ")[1].split(".")[0][:5]
row_item["neg"] = row[0]
row_item["pos"] = row[1]
res.append(row_item)
conn.commit()
result = jsonify(res)
result.headers.add('Access-Control-Allow-Origin', '*')
return result
```
#### File: server/models/bitcoin_price_prediction_VARMAX.py
```python
import pandas as pd
from datetime import datetime, timedelta
from statsmodels.tsa.statespace.varmax import VARMAX
import time
import SQL_STATEMENTS
def date_formating(current_time):
if current_time.second < 30:
newdatetime = current_time.replace(second=30, microsecond=0)
elif current_time.minute + 1 <= 59:
newdatetime = current_time.replace(
minute=(current_time.minute + (1)), second=0, microsecond=0)
elif (current_time.minute == 59 and current_time.hour != 23):
newdatetime = current_time.replace(
hour=(current_time.hour + 1), minute=0, second=0, microsecond=0)
elif(current_time.minute == 59 and current_time.hour == 23):
newdatetime = current_time.replace(
day=(current_time.day + 1), hour=0, minute=0, second=0, microsecond=0)
else:
newdatetime = current_time.replace(
hour=(current_time.hour + 1), minute=0, second=0, microsecond=0)
return newdatetime
def predict_VARMAX():
conn = SQL_STATEMENTS.setup()
while(1):
cur = conn.cursor()
cur.execute(SQL_STATEMENTS.BTC_PRICE_1_HOUR_WINDOW)
btc_rows = cur.fetchall()
conn.commit()
cur = conn.cursor()
cur.execute(SQL_STATEMENTS.TWEETS_1_HOUR_WINDOW)
tweets_rows = cur.fetchall()
conn.commit()
btc_df = pd.DataFrame(btc_rows, columns=['time', 'price'])
tweets_df = pd.DataFrame(tweets_rows, columns=['time', 'count'])
btc_df.time = btc_df["time"].astype(str)
tweets_df.time = tweets_df["time"].astype(str)
tweets_df.time = tweets_df["time"].map(lambda x: x.split("+")[0])
merge_df = pd.merge(btc_df, tweets_df, on='time')
merge_df = merge_df.drop(["time"], axis=1)
history = [list(x) for x in merge_df.values]
# (BTC Price, Count of tweets)
try:
for i in range(0, 2):
current_time = datetime.now() + timedelta(seconds=(i * 30))
model = VARMAX(history, order=(1, 1))
model_fit = model.fit(disp=False)
output = model_fit.forecast()
yhat = output[0]
newdatetime = date_formating(current_time)
if yhat[0] < 0:
yhat[0] = merge_df["price"].mean()
history.append(yhat)
cur = conn.cursor()
cur.execute(
f"INSERT INTO BTC_Price_Prediction_VARMAX (created_at,price) VALUES ('{str(newdatetime)}', {yhat[0]})")
conn.commit()
except:
continue
time.sleep(15)
```
#### File: server/models/bitcoin_price_web.py
```python
from selenium import webdriver
from datetime import datetime
import psycopg2
import time
def setup():
# Create database connection
conn = psycopg2.connect(database="postgres", user="postgres",
password="password", host="127.0.0.1", port="5432")
# create selenium web driver
opts = webdriver.ChromeOptions()
opts.add_experimental_option("detach", True)
driver = webdriver.Chrome(executable_path=r"./chromedriver")
return conn, driver
def scrape_data():
conn, driver = setup()
driver.get(
"https://coinmarketcap.com/currencies/bitcoin/")
previous_price = 0
while (1):
time.sleep(3)
# get currently displayed price
price = driver.find_elements_by_xpath(
"//div[@class='priceValue___11gHJ ']")
price = price[0].text[1:].replace(',', '')
# in case the website close the session
if price == previous_price:
driver.refresh()
time.sleep(1)
price = driver.find_elements_by_xpath(
"//div[@class='priceValue___11gHJ ']")
price = price[0].text[1:].replace(',', '')
previous_price = price
current_time = datetime.now()
cur = conn.cursor()
cur.execute(
f"INSERT INTO BT_Price (Created_at,Price) VALUES ('{str(current_time)}', {price})")
conn.commit()
time.sleep(20)
if __name__ == "__main__":
scrape_data()
``` |
{
"source": "johnjdailey/jupyter-archive",
"score": 3
} |
#### File: jupyter-archive/jupyter_archive/handlers.py
```python
import os
import asyncio
import zipfile
import tarfile
import pathlib
from tornado import gen, web, iostream, ioloop
from notebook.base.handlers import IPythonHandler
from notebook.utils import url2path
# The delay in ms at which we send the chunk of data
# to the client.
ARCHIVE_DOWNLOAD_FLUSH_DELAY = 100
SUPPORTED_FORMAT = [
"zip",
"tgz",
"tar.gz",
"tbz",
"tbz2",
"tar.bz",
"tar.bz2",
"txz",
"tar.xz"
]
class ArchiveStream():
def __init__(self, handler):
self.handler = handler
self.position = 0
def write(self, data):
self.position += len(data)
self.handler.write(data)
del data
def tell(self):
return self.position
def flush(self):
# Note: Flushing is done elsewhere, in the main thread
# because `write()` is called in a background thread.
# self.handler.flush()
pass
def make_writer(handler, archive_format="zip"):
fileobj = ArchiveStream(handler)
if archive_format == "zip":
archive_file = zipfile.ZipFile(fileobj, mode='w', compression=zipfile.ZIP_DEFLATED)
archive_file.add = archive_file.write
elif archive_format in ["tgz", "tar.gz"]:
archive_file = tarfile.open(fileobj=fileobj, mode='w|gz')
elif archive_format in ["tbz", "tbz2", "tar.bz", "tar.bz2"]:
archive_file = tarfile.open(fileobj=fileobj, mode='w|bz2')
elif archive_format in ["txz", "tar.xz"]:
archive_file = tarfile.open(fileobj=fileobj, mode='w|xz')
else:
raise ValueError("'{}' is not a valid archive format.".format(archive_format))
return archive_file
def make_reader(archive_path):
archive_format = ''.join(archive_path.suffixes)[1:]
if archive_format == "zip":
archive_file = zipfile.ZipFile(archive_path, mode='r')
elif archive_format in ["tgz", "tar.gz"]:
archive_file = tarfile.open(archive_path, mode='r|gz')
elif archive_format in ["tbz", "tbz2", "tar.bz", "tar.bz2"]:
archive_file = tarfile.open(archive_path, mode='r|bz2')
elif archive_format in ["txz", "tar.xz"]:
archive_file = tarfile.open(archive_path, mode='r|xz')
else:
raise ValueError("'{}' is not a valid archive format.".format(archive_format))
return archive_file
class DownloadArchiveHandler(IPythonHandler):
@web.authenticated
@gen.coroutine
def get(self, archive_path, include_body=False):
# /directories/ requests must originate from the same site
self.check_xsrf_cookie()
cm = self.contents_manager
if cm.is_hidden(archive_path) and not cm.allow_hidden:
self.log.info("Refusing to serve hidden file, via 404 Error")
raise web.HTTPError(404)
archive_token = self.get_argument('archiveToken')
archive_format = self.get_argument('archiveFormat', 'zip')
if archive_format not in SUPPORTED_FORMAT:
self.log.error("Unsupported format {}.".format(archive_format))
raise web.HTTPError(404)
archive_path = os.path.join(cm.root_dir, url2path(archive_path))
archive_path = pathlib.Path(archive_path)
archive_name = archive_path.name
archive_filename = archive_path.with_suffix(".{}".format(archive_format)).name
self.log.info('Prepare {} for archiving and downloading.'.format(archive_filename))
self.set_header('content-type', 'application/octet-stream')
self.set_header('cache-control', 'no-cache')
self.set_header('content-disposition',
'attachment; filename={}'.format(archive_filename))
self.canceled = False
self.flush_cb = ioloop.PeriodicCallback(self.flush, ARCHIVE_DOWNLOAD_FLUSH_DELAY)
self.flush_cb.start()
args = (archive_path, archive_format, archive_token)
yield ioloop.IOLoop.current().run_in_executor(None, self.archive_and_download, *args)
if self.canceled:
self.log.info('Download canceled.')
else:
self.flush()
self.log.info('Finished downloading {}.'.format(archive_filename))
self.set_cookie("archiveToken", archive_token)
self.flush_cb.stop()
self.finish()
def archive_and_download(self, archive_path, archive_format, archive_token):
with make_writer(self, archive_format) as archive:
prefix = len(str(archive_path.parent)) + len(os.path.sep)
for root, _, files in os.walk(archive_path):
for file_ in files:
file_name = os.path.join(root, file_)
if not self.canceled:
self.log.debug("{}\n".format(file_name))
archive.add(file_name, os.path.join(root[prefix:], file_))
else:
break
def on_connection_close(self):
super().on_connection_close()
self.canceled = True
self.flush_cb.stop()
class ExtractArchiveHandler(IPythonHandler):
@web.authenticated
@gen.coroutine
def get(self, archive_path, include_body=False):
# /extract-archive/ requests must originate from the same site
self.check_xsrf_cookie()
cm = self.contents_manager
if cm.is_hidden(archive_path) and not cm.allow_hidden:
self.log.info("Refusing to serve hidden file, via 404 Error")
raise web.HTTPError(404)
archive_path = os.path.join(cm.root_dir, url2path(archive_path))
archive_path = pathlib.Path(archive_path)
yield ioloop.IOLoop.current().run_in_executor(None, self.extract_archive, archive_path)
self.finish()
def extract_archive(self, archive_path):
archive_destination = archive_path.parent
self.log.info('Begin extraction of {} to {}.'.format(archive_path, archive_destination))
archive_reader = make_reader(archive_path)
with archive_reader as archive:
archive.extractall(archive_destination)
self.log.info('Finished extracting {} to {}.'.format(archive_path, archive_destination))
``` |
{
"source": "johnjdailey/lambdata-johnjdailey",
"score": 4
} |
#### File: lambdata-johnjdailey/buyholdsell/assignment_inherit.py
```python
from pandas import DataFrame
# State abbreviation -> Full Name and vice versa. Fl -> Florida, etc.
class MyFrame(DataFrame):
def add_state_names(self): # was originally my_df, doesn't matter
"""
Adds a column of state names to accompany and corresponding column of state abbreviations.
"""
names_map = {"CA": "Cali", "CO": "Colo", "CT": "Conn"}
self["name"] = self["abbrev"].map(names_map)
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.map.html
if __name__ == '__main__':
#df = DataFrame({"abbrev":["CA","CO","CT","DC","TX"]})
my_frame = MyFrame({"abbrev": ["CA", "CO", "CT", "CT", "DC", "TX"]})
print(my_frame.columns)
print(my_frame.head())
my_frame.add_state_names()
print(my_frame.head())
``` |
{
"source": "johnjdailey/NeMo",
"score": 2
} |
#### File: nlp/dialogue_state_tracking/dialogue_state_tracking_sgd.py
```python
import argparse
import math
import os
import nemo.collections.nlp as nemo_nlp
import nemo.collections.nlp.data.datasets.sgd_dataset.data_processor as data_processor
from nemo.collections.nlp.callbacks.sgd_callback import eval_epochs_done_callback, eval_iter_callback
from nemo.collections.nlp.data.datasets.sgd_dataset.schema_processor import SchemaPreprocessor
from nemo.collections.nlp.nm.trainables import SGDDecoderNM, SGDEncoderNM
from nemo.core import Backend, CheckpointCallback, EvaluatorCallback, NeuralModuleFactory, SimpleLossLoggerCallback
from nemo.utils import logging
from nemo.utils.lr_policies import get_lr_policy
# Parsing arguments
parser = argparse.ArgumentParser(description='Schema_guided_dst')
# BERT based utterance encoder related arguments
parser.add_argument(
"--max_seq_length",
default=80,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.",
)
parser.add_argument("--dropout", default=0.1, type=float, help="Dropout rate for BERT representations.")
parser.add_argument(
"--pretrained_model_name",
default="bert-base-cased",
type=str,
help="Name of the pre-trained model",
choices=nemo_nlp.nm.trainables.get_pretrained_lm_models_list(),
)
parser.add_argument("--bert_checkpoint", default=None, type=str, help="Path to model checkpoint")
parser.add_argument("--bert_config", default=None, type=str, help="Path to bert config file in json format")
parser.add_argument(
"--tokenizer_model",
default=None,
type=str,
help="Path to pretrained tokenizer model, only used if --tokenizer is sentencepiece",
)
parser.add_argument(
"--tokenizer",
default="nemobert",
type=str,
choices=["nemobert", "sentencepiece"],
help="tokenizer to use, only relevant when using custom pretrained checkpoint.",
)
parser.add_argument("--vocab_file", default=None, help="Path to the vocab file.")
parser.add_argument(
"--do_lower_case",
action='store_true',
help="Whether to lower case the input text. True for uncased models, False for cased models. "
+ "Only applicable when tokenizer is build with vocab file",
)
# Hyperparameters and optimization related flags.
parser.add_argument(
"--checkpoint_dir",
default=None,
type=str,
help="The folder containing the checkpoints for the model to continue training",
)
parser.add_argument("--train_batch_size", default=32, type=int, help="Total batch size for training.")
parser.add_argument("--eval_batch_size", default=8, type=int, help="Total batch size for eval.")
parser.add_argument("--num_epochs", default=80, type=int, help="Total number of training epochs to perform.")
parser.add_argument("--optimizer_kind", default="adam_w", type=str)
parser.add_argument("--learning_rate", default=1e-4, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--lr_policy", default="PolynomialDecayAnnealing", type=str)
parser.add_argument("--weight_decay", default=0.01, type=float)
parser.add_argument(
"--lr_warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10% of training.",
)
parser.add_argument("--grad_norm_clip", type=float, default=1, help="Gradient clipping")
parser.add_argument("--local_rank", default=None, type=int)
parser.add_argument("--amp_opt_level", default="O0", type=str, choices=["O0", "O1", "O2"])
parser.add_argument("--num_gpus", default=1, type=int)
# Input and output paths and other flags.
parser.add_argument(
"--task_name",
default="sgd_single_domain",
type=str,
choices=data_processor.FILE_RANGES.keys(),
help="The name of the task to train.",
)
parser.add_argument(
"--data_dir",
type=str,
required=True,
help="Directory for the downloaded SGD data, which contains the dialogue files"
" and schema files of all datasets (eg train, dev)",
)
parser.add_argument(
"--work_dir",
type=str,
default="output/SGD",
help="The output directory where the model checkpoints will be written.",
)
parser.add_argument(
"--schema_embedding_dir",
type=str,
default='schema_embedding_dir',
help="Directory where .npy file for embedding of entities (slots, values, intents) in the dataset_split's schema are stored.",
)
parser.add_argument(
"--no_overwrite_schema_emb_files",
action="store_false",
help="Whether to generate a new file saving the dialogue examples.",
dest="overwrite_schema_emb_files",
)
parser.add_argument(
"--joint_acc_across_turn",
action="store_true",
help="Whether to compute joint accuracy across turn instead of across service. Should be set to True when conducting multiwoz style evaluation.",
)
parser.add_argument(
"--no_fuzzy_match",
action="store_true",
help="Whether to use fuzzy string matching when comparing non-categorical slot values. Fuzz match should not be used when conducting multiwoz style evaluation.",
)
parser.add_argument(
"--dialogues_example_dir",
type=str,
default="dialogues_example_dir",
help="Directory where preprocessed SGD dialogues are stored.",
)
parser.add_argument(
"--no_overwrite_dial_files",
action="store_false",
help="Whether to generate a new file saving the dialogue examples.",
dest="overwrite_dial_files",
)
parser.add_argument("--no_shuffle", action="store_true", help="Whether to shuffle training data")
parser.add_argument("--no_time_to_log_dir", action="store_true", help="whether to add time to work_dir or not")
parser.add_argument(
"--eval_dataset",
type=str,
default="dev_test",
choices=["dev", "test", "dev_test"],
help="Dataset splits for evaluation.",
)
parser.add_argument(
"--save_epoch_freq",
default=1,
type=int,
help="Frequency of saving checkpoint '-1' - step checkpoint won't be saved",
)
parser.add_argument(
"--save_step_freq",
default=-1,
type=int,
help="Frequency of saving checkpoint '-1' - step checkpoint won't be saved",
)
parser.add_argument(
"--loss_log_freq", default=-1, type=int, help="Frequency of logging loss values, '-1' - at the end of the epoch",
)
parser.add_argument(
"--loss_reduction",
default='mean',
type=str,
help="specifies the reduction to apply to the final loss, choose 'mean' or 'sum'",
)
parser.add_argument(
"--eval_epoch_freq", default=1, type=int, help="Frequency of evaluation",
)
parser.add_argument(
"--num_workers",
default=2,
type=int,
help="Number of workers for data loading, -1 means set it automatically to the number of CPU cores",
)
parser.add_argument(
"--enable_pin_memory", action="store_true", help="Enables the pin_memory feature of Pytroch's DataLoader",
)
parser.add_argument(
"--state_tracker",
type=str,
default='baseline',
choices=['baseline', 'ret_sys_act'],
help="Specifies the state tracker mode",
)
parser.add_argument(
"--schema_emb_init",
type=str,
default='baseline',
choices=['baseline', 'random', 'last_layer_average'],
help="Specifies how schema embeddings are generated. Baseline uses ['CLS'] token",
)
parser.add_argument(
"--train_schema_emb", action="store_true", help="Specifies whether schema embeddings are trainables.",
)
parser.add_argument(
"--add_attention_head",
action="store_true",
help="Whether to use attention when computing projections. When False, uses linear projection.",
)
parser.add_argument(
"--debug_mode", action="store_true", help="Enables debug mode with more info on data preprocessing and evaluation",
)
parser.add_argument(
"--checkpoints_to_keep", default=1, type=int, help="The number of last checkpoints to keep",
)
args = parser.parse_args()
logging.info(args)
if args.debug_mode:
logging.setLevel("DEBUG")
if args.task_name == "multiwoz":
schema_config = {
"MAX_NUM_CAT_SLOT": 9,
"MAX_NUM_NONCAT_SLOT": 4,
"MAX_NUM_VALUE_PER_CAT_SLOT": 47,
"MAX_NUM_INTENT": 1,
}
else:
schema_config = {
"MAX_NUM_CAT_SLOT": 6,
"MAX_NUM_NONCAT_SLOT": 12,
"MAX_NUM_VALUE_PER_CAT_SLOT": 12,
"MAX_NUM_INTENT": 4,
}
if not os.path.exists(args.data_dir):
raise ValueError(f'Data not found at {args.data_dir}')
nf = NeuralModuleFactory(
backend=Backend.PyTorch,
local_rank=args.local_rank,
optimization_level=args.amp_opt_level,
log_dir=args.work_dir,
create_tb_writer=True,
checkpoint_dir=args.checkpoint_dir,
files_to_copy=[__file__],
add_time_to_log_dir=not args.no_time_to_log_dir,
)
pretrained_bert_model = nemo_nlp.nm.trainables.get_pretrained_lm_model(
pretrained_model_name=args.pretrained_model_name,
config=args.bert_config,
vocab=args.vocab_file,
checkpoint=args.bert_checkpoint,
)
schema_config["EMBEDDING_DIMENSION"] = pretrained_bert_model.hidden_size
schema_config["MAX_SEQ_LENGTH"] = args.max_seq_length
tokenizer = nemo_nlp.data.tokenizers.get_tokenizer(
tokenizer_name=args.tokenizer,
pretrained_model_name=args.pretrained_model_name,
tokenizer_model=args.tokenizer_model,
vocab_file=args.vocab_file,
do_lower_case=args.do_lower_case,
)
hidden_size = pretrained_bert_model.hidden_size
# Run SGD preprocessor to generate and store schema embeddings
schema_preprocessor = SchemaPreprocessor(
data_dir=args.data_dir,
schema_embedding_dir=args.schema_embedding_dir,
schema_config=schema_config,
tokenizer=tokenizer,
bert_model=pretrained_bert_model,
overwrite_schema_emb_files=args.overwrite_schema_emb_files,
bert_ckpt_dir=args.checkpoint_dir,
nf=nf,
mode=args.schema_emb_init,
is_trainable=args.train_schema_emb,
)
dialogues_processor = data_processor.SGDDataProcessor(
task_name=args.task_name,
data_dir=args.data_dir,
dialogues_example_dir=args.dialogues_example_dir,
tokenizer=tokenizer,
schema_emb_processor=schema_preprocessor,
overwrite_dial_files=args.overwrite_dial_files,
)
# define model pipeline
sgd_encoder = SGDEncoderNM(hidden_size=hidden_size, dropout=args.dropout)
sgd_decoder = SGDDecoderNM(
embedding_dim=hidden_size, schema_emb_processor=schema_preprocessor, add_attention_head=args.add_attention_head
)
dst_loss = nemo_nlp.nm.losses.SGDDialogueStateLossNM(reduction=args.loss_reduction)
def create_pipeline(dataset_split='train'):
datalayer = nemo_nlp.nm.data_layers.SGDDataLayer(
dataset_split=dataset_split,
dialogues_processor=dialogues_processor,
batch_size=args.train_batch_size,
shuffle=not args.no_shuffle if dataset_split == 'train' else False,
num_workers=args.num_workers,
pin_memory=args.enable_pin_memory,
)
data = datalayer()
# Encode the utterances using BERT.
token_embeddings = pretrained_bert_model(
input_ids=data.utterance_ids, attention_mask=data.utterance_mask, token_type_ids=data.utterance_segment,
)
encoded_utterance, token_embeddings = sgd_encoder(hidden_states=token_embeddings)
(
logit_intent_status,
logit_req_slot_status,
logit_cat_slot_status,
logit_cat_slot_value,
logit_noncat_slot_status,
logit_noncat_slot_start,
logit_noncat_slot_end,
) = sgd_decoder(
encoded_utterance=encoded_utterance,
token_embeddings=token_embeddings,
utterance_mask=data.utterance_mask,
cat_slot_values_mask=data.cat_slot_values_mask,
intent_status_mask=data.intent_status_mask,
service_ids=data.service_id,
)
if dataset_split == 'train':
loss = dst_loss(
logit_intent_status=logit_intent_status,
intent_status_labels=data.intent_status_labels,
logit_req_slot_status=logit_req_slot_status,
requested_slot_status=data.requested_slot_status,
req_slot_mask=data.req_slot_mask,
logit_cat_slot_status=logit_cat_slot_status,
categorical_slot_status=data.categorical_slot_status,
cat_slot_status_mask=data.cat_slot_status_mask,
logit_cat_slot_value=logit_cat_slot_value,
categorical_slot_values=data.categorical_slot_values,
logit_noncat_slot_status=logit_noncat_slot_status,
noncategorical_slot_status=data.noncategorical_slot_status,
noncat_slot_status_mask=data.noncat_slot_status_mask,
logit_noncat_slot_start=logit_noncat_slot_start,
logit_noncat_slot_end=logit_noncat_slot_end,
noncategorical_slot_value_start=data.noncategorical_slot_value_start,
noncategorical_slot_value_end=data.noncategorical_slot_value_end,
)
tensors = [loss]
else:
tensors = [
data.example_id_num,
data.service_id,
data.is_real_example,
data.start_char_idx,
data.end_char_idx,
logit_intent_status,
logit_req_slot_status,
logit_cat_slot_status,
logit_cat_slot_value,
logit_noncat_slot_status,
logit_noncat_slot_start,
logit_noncat_slot_end,
data.intent_status_labels,
data.requested_slot_status,
data.categorical_slot_status,
data.categorical_slot_values,
data.noncategorical_slot_status,
]
steps_per_epoch = math.ceil(len(datalayer) / (args.train_batch_size * args.num_gpus))
return steps_per_epoch, tensors
steps_per_epoch, train_tensors = create_pipeline()
logging.info(f'Steps per epoch: {steps_per_epoch}')
# Create trainer and execute training action
train_callback = SimpleLossLoggerCallback(
tensors=train_tensors,
print_func=lambda x: logging.info("Loss: {:.8f}".format(x[0].item())),
get_tb_values=lambda x: [["loss", x[0]]],
tb_writer=nf.tb_writer,
step_freq=args.loss_log_freq if args.loss_log_freq > 0 else steps_per_epoch,
)
def get_eval_callback(eval_dataset):
_, eval_tensors = create_pipeline(dataset_split=eval_dataset)
eval_callback = EvaluatorCallback(
eval_tensors=eval_tensors,
user_iter_callback=lambda x, y: eval_iter_callback(x, y, schema_preprocessor, eval_dataset),
user_epochs_done_callback=lambda x: eval_epochs_done_callback(
x,
args.task_name,
eval_dataset,
args.data_dir,
nf.work_dir,
args.state_tracker,
args.debug_mode,
schema_preprocessor,
args.joint_acc_across_turn,
args.no_fuzzy_match,
),
tb_writer=nf.tb_writer,
eval_step=args.eval_epoch_freq * steps_per_epoch,
)
return eval_callback
if args.eval_dataset == 'dev_test':
eval_callbacks = [get_eval_callback('dev'), get_eval_callback('test')]
else:
eval_callbacks = [get_eval_callback(args.eval_dataset)]
ckpt_callback = CheckpointCallback(
folder=nf.checkpoint_dir, epoch_freq=args.save_epoch_freq, step_freq=args.save_step_freq, checkpoints_to_keep=1
)
lr_policy_fn = get_lr_policy(
args.lr_policy, total_steps=args.num_epochs * steps_per_epoch, warmup_ratio=args.lr_warmup_proportion
)
nf.train(
tensors_to_optimize=train_tensors,
callbacks=[train_callback, ckpt_callback] + eval_callbacks,
lr_policy=lr_policy_fn,
optimizer=args.optimizer_kind,
optimization_params={
"num_epochs": args.num_epochs,
"lr": args.learning_rate,
"eps": 1e-6,
"weight_decay": args.weight_decay,
"grad_norm_clip": args.grad_norm_clip,
},
)
``` |
{
"source": "JohnJDyer/raft",
"score": 2
} |
#### File: raft/raft/raft.py
```python
import enum
import signal
import random
import asyncio
import heapq
import uuid
import functools
import traceback
import socket
import os
import raft.raft_pb2 as raft_pb2
from raft.util import RaftDebugger, ignore_stale
# TODO: implement signal handlers.
# TODO: multiplex commands + logs
# TODO: create isready method to block until leader known leader OR queue up messages.
# TODO: Snapshotting. Maybe use loop.sock_sendfile()
class ACT(enum.Enum):
NO_OP = enum.auto()
TIMEOUT = enum.auto()
NEW_TERM = enum.auto()
NEW_LEADER = enum.auto()
MAJORITY = enum.auto()
START = enum.auto()
ERROR = enum.auto()
class STATE(enum.Enum):
FOLLOWER = enum.auto()
CANDIDATE = enum.auto()
LEADER = enum.auto()
OFF = enum.auto()
ERROR = enum.auto()
class Raft(RaftDebugger):
TRANSITIONS = {
STATE.OFF: {
ACT.START: STATE.FOLLOWER,
},
STATE.FOLLOWER: {
ACT.TIMEOUT: STATE.CANDIDATE,
ACT.NEW_LEADER: STATE.FOLLOWER,
ACT.NEW_TERM: STATE.FOLLOWER,
},
STATE.CANDIDATE: {
ACT.TIMEOUT: STATE.CANDIDATE,
ACT.MAJORITY: STATE.LEADER,
ACT.NEW_LEADER: STATE.FOLLOWER,
ACT.NEW_TERM: STATE.FOLLOWER,
},
STATE.LEADER: {
ACT.NEW_TERM: STATE.FOLLOWER,
# ACT.TIMEOUT: ACT.LEADER,
},
}
def __init__(
self,
id,
server_ids,
listen_address='0.0.0.0',
send_address='<broadcast>',
port=27182,
debug=1,
execute_callback=None,
):
self.id = uuid.uuid4().int % (1 << 62) if id is None else id
self.port = port
self.listen_address = listen_address
self.send_address = send_address
self.term = 0
self.servers = {}
self.quorum = len(server_ids) // 2 + 1
self.state = STATE.OFF
entry = raft_pb2.LogEntries()
entry.term = self.term
entry.command = "Start Up".encode('utf-8')
self.log = [entry]
self.execute_idx = 0 # monotonic
self.commit_idx = 0 # monotonic
self.execute_callback = execute_callback
# Do we need this:
self.msg_idx = {s: 1 for s in server_ids}
self.log_idx = {s: 1 for s in server_ids}
self.new_term()
self.election_timeout_ = 100
self.service_task = None
self.heartbeat_event = asyncio.Event()
self.command_queue = asyncio.PriorityQueue() # TODO We could limit features to make FIFO work.
self.transport = None
self.DISPATCH_TABLE = {
STATE.FOLLOWER: {
raft_pb2.RaftUDPMessage.HEART: [self.handle_heartbeat],
raft_pb2.RaftUDPMessage.POLL: [self.handle_poll],
raft_pb2.RaftUDPMessage.APPEND: [self.run_append],
},
STATE.CANDIDATE: {
raft_pb2.RaftUDPMessage.VOTE: [self.handle_vote],
},
STATE.LEADER: {
raft_pb2.RaftUDPMessage.COMMAND: [self.handle_status, self.handle_command],
raft_pb2.RaftUDPMessage.STATUS: [self.handle_status],
},
}
self.SERVICES = {
STATE.FOLLOWER : self.service_failure_detector,
STATE.CANDIDATE: self.service_election,
STATE.LEADER : self.service_heartbeat}
RaftDebugger.__init__(self, debug=debug)
async def listen(self):
class UDPBroadcast(asyncio.DatagramProtocol):
def datagram_received(__, data, addr):
self.udp_callback(data, addr)
self.transport, _protocol = await asyncio.get_event_loop().create_datagram_endpoint(
protocol_factory=lambda: UDPBroadcast(),
allow_broadcast=True,
local_addr=(self.listen_address, self.port))
def __repr__(self):
return f"Raft({self.state}, commited {self.commit_idx + 1} of {len(self.log)} log entries)"
def __del__(self):
# TODO graceful shutdown server via state transition.
if self.transport:
self.transport.close()
async def start(self):
await self.listen()
self.transition(ACT.START)
return self
def new_term(self, term=0):
self.term = max(self.term + 1, term)
self.vote_cast = False
self.leader = None
self.supporters = set()
def udp_broadcast(self, message_type, log_idx=float("inf"), leader=None, command=None):
leader = self.leader if leader is None else leader
msg = raft_pb2.RaftUDPMessage()
msg.type = message_type
msg.term = self.term
msg.sender = self.id
msg.commit_idx = self.commit_idx
if leader is not None:
msg.leader = leader
msg.log_idx = min(log_idx, len(self.log))
if command is not None:
msg.command = command
else:
msg.log.extend(self.log[msg.log_idx:])
self.transport.sendto(msg.SerializeToString(), (self.send_address, self.port))
# ================= DATABASE ==================
def commit(self, commit_idx):
if commit_idx > self.commit_idx:
self.commit_idx = min(commit_idx, len(self.log) - 1)
self.execute()
# TODO This feels heavy; better way?
asyncio.create_task(self.signal_commands())
def execute(self):
while self.execute_idx < self.commit_idx:
self.execute_idx += 1
if self.execute_callback:
self.execute_callback(self)
# TODO This feels heavy; better way?
async def signal_commands(self):
while not self.command_queue.empty() and self.command_queue._queue[0][0] <= self.commit_idx:
_, _, event = await self.command_queue.get()
event.set()
# ================= TRANSPORT PROTOCOLS ==================
def udp_callback(self, data, addr):
message = raft_pb2.RaftUDPMessage()
message.ParseFromString(data)
self.handle_message(message=message)
def handle_message(self, message):
if self.term < message.term:
self.new_term(message.term)
self.transition(ACT.NEW_TERM)
for f in self.DISPATCH_TABLE.get(self.state, {}).get(message.type, []):
action = f(message=message)
if action: self.transition(action)
# ================= SERVICES ==================
async def service_failure_detector(self):
while True:
try:
await asyncio.wait_for(self.heartbeat_event.wait(), timeout=1.1)
self.heartbeat_event.clear()
except asyncio.TimeoutError:
self.transition(ACT.TIMEOUT)
async def service_heartbeat(self):
while True:
self.udp_broadcast(raft_pb2.RaftUDPMessage.HEART)
await asyncio.sleep(.5)
async def service_election(self):
while True:
self.new_term()
self.supporters = {self.id}
self.udp_broadcast(raft_pb2.RaftUDPMessage.POLL, leader=self.id)
await asyncio.sleep(random.uniform(150, 300) / 1000)
# ================= STATE MACHINE ==================
def transition(self, action):
s = Raft.TRANSITIONS.get(self.state, {}).get(action, STATE.ERROR)
if self.state != s:
if self.service_task is not None:
self.service_task.cancel()
self.state = s
service = self.SERVICES[s]
self.service_task = asyncio.create_task(service())
# ================= SEND MESSAGE ==================
def send_logs(self):
msg_idx = min(self.msg_idx.values())
if msg_idx < len(self.log):
self.udp_broadcast(raft_pb2.RaftUDPMessage.APPEND, log_idx=msg_idx - 1)
# TODO: shield from cancellation?
# TODO: retry sending message?
# TODO: timeout?
async def send_command(self, command, idx=-1):
"""
Replicate command (bytes) in every server's logs at position idx. If command depends on raft.log then do:
(1) n=len(raft.log)
(2) message = F(raft.log)
(3) send_command(message, idx=n)
This guarantees the log/state machine has not changed since message calculation. Returns true when message is
COMMITED to the local log and false if message cannot be applied at position idx. Use idx=-1 for simple
append.
:param command: bytes
:param idx: int, target log index; -1 for any index
:return: bool, True if message successfully COMMITED.
"""
self.udp_broadcast(raft_pb2.RaftUDPMessage.COMMAND, command=command, log_idx=idx)
event = asyncio.Event()
self.command_queue.put_nowait((idx, command, event))
await event.wait()
return self.log[idx].command == command # TODO: assumes commands are unique.
def send_command_nowait(self, command, idx=-1):
return asyncio.create_task(self.send_command(command, idx=idx))
# ================ OTHER STUFF ====================
@ignore_stale
def handle_heartbeat(self, message):
if self.leader is None:
self.leader = message.leader
return ACT.NEW_LEADER
self.heartbeat_event.set()
if not len(message.log):
return
if not self.merge_logs(message):
self.udp_broadcast(raft_pb2.RaftUDPMessage.STATUS, log_idx=len(self.log) - 1)
@ignore_stale
def handle_command(self, message):
entry = raft_pb2.LogEntries()
entry.term = self.term
entry.command = message.command
if message.log_idx == -1 or message.log_idx == len(self.log):
self.log.append(entry)
self.msg_idx[self.id] = len(self.log)
self.log_idx[self.id] = len(self.log)
self.send_logs()
@ignore_stale
def handle_poll(self, message):
if self.vote_cast or len(self.log) > message.log_idx:
return
self.vote_cast = True
print("Vote Cast")
self.udp_broadcast(raft_pb2.RaftUDPMessage.VOTE, leader=message.leader)
@ignore_stale
def handle_vote(self, message):
if message.leader == self.id:
self.supporters.add(message.sender)
if len(self.supporters) >= self.quorum:
self.leader = self.id
return ACT.MAJORITY
def handle_status(self, message):
for idx, log_entry in enumerate(message.log, message.log_idx):
if idx < len(self.log):
if self.log[idx].term == log_entry.term:
self.msg_idx[message.sender] = idx + 1
self.log_idx[message.sender] = idx + 1
if self.log[idx].term == self.term and idx > self.commit_idx:
best_idx = heapq.nlargest(self.quorum, self.log_idx.values())[-1] - 1
self.commit(best_idx)
else:
self.msg_idx[message.sender] = idx
break
else:
break
if self.msg_idx.get(message.sender, 0) < len(self.log) or message.commit_idx < self.commit_idx:
self.send_logs()
@ignore_stale
def run_append(self, message):
assert len(message.log) >= 2
self.merge_logs(message)
self.udp_broadcast(raft_pb2.RaftUDPMessage.STATUS, log_idx=len(self.log) - 1)
self.commit(message.commit_idx)
return
# ======= HELPERS =========
def merge_logs(self, message):
if message.log_idx >= len(self.log):
return False
if message.log[0].term == self.log[message.log_idx].term:
idx = max((i for i, (a, b) in enumerate(zip(message.log, self.log[message.log_idx:])) if a.term == b.term)) + 1
self.log = self.log[:message.log_idx + idx]
self.log.extend(message.log[idx:])
return message.log_idx + idx == len(self.log)
else:
self.log = self.log[:message.log_idx]
return False
``` |
{
"source": "johnjichaowei/etf-holdings-publisher",
"score": 2
} |
#### File: bin/deploy/update_lambda_code.py
```python
import boto3
def update_lambda_code(func_name, source_code_bucket_name, source_code_key):
print("Start to update function code for lambda {} with {} - {} in S3".format(
func_name, source_code_bucket_name, source_code_key
))
client = boto3.client('lambda')
client.update_function_code(
FunctionName=func_name,
S3Bucket=source_code_bucket_name,
S3Key=source_code_key
)
print("Updating lambda function code finished")
```
#### File: etf-holdings-publisher/src/holding.py
```python
class Holding(object):
def __init__(self, name, symbol, sector, market_val_percent, market_value, number_of_shares):
self.name = name
self.symbol = symbol
self.sector = sector
self.market_val_percent = market_val_percent
self.market_value = market_value
self.number_of_shares = number_of_shares
def __eq__(self, other):
return (
self.__class__ == other.__class__ and
self.name == other.name and self.symbol == other.symbol and self.sector == other.sector and
self.market_val_percent == other.market_val_percent and
self.market_value == other.market_value and
self.number_of_shares == other.number_of_shares
)
```
#### File: etf-holdings-publisher/src/holding_schema.py
```python
from marshmallow import Schema, fields, validates, ValidationError, EXCLUDE
# {"holding":"Commonwealth Bank of Australia","marketValPercent":8.25647,"marketValue":1.04168334081E9,
# "symbol":"CBA","countryCode":"AU","sectorName":"Diversified Banks","numberofshares":1.4389879E7,
# "currencySymbol":"$"}
class HoldingSchema(Schema):
class Meta:
unknown = EXCLUDE
ordered = True
name = fields.Str(required=True, data_key='holding')
symbol = fields.Str(required=True, data_key='symbol')
sector = fields.Str(data_key='sectorName')
market_val_percent = fields.Decimal(data_key='marketValPercent', as_string=True)
market_value = fields.Decimal(data_key='marketValue', as_string=True)
number_of_shares = fields.Decimal(data_key='numberofshares', as_string=True)
@validates('name')
def validate_name(self, value):
self.validate_non_empty_string(value, 'Holding name')
@validates('symbol')
def validate_symbol(self, value):
self.validate_non_empty_string(value, 'Holding symbol')
def validate_non_empty_string(self, value, field_name):
if value == '':
raise ValidationError("{} can't be empty.".format(field_name))
if value.strip() == '':
raise ValidationError("{} can't be only white spaces.".format(field_name))
```
#### File: etf-holdings-publisher/tests/test_holding_schema.py
```python
from src.holding_schema import *
from decimal import Decimal
def test_data_key_for_holding_name():
schema = HoldingSchema(only=['name'])
result = {'name': 'Commonwealth Bank of Australia'}
assert schema.loads('{"holding":"Commonwealth Bank of Australia"}') == result
def test_holding_name_is_required():
schema = HoldingSchema(only=['name'])
assert schema.validate({}) == {'holding': ['Missing data for required field.']}
def test_holding_name_can_not_be_empty():
schema = HoldingSchema(only=['name'])
assert schema.validate({'holding': ''}) == {'holding': ["Holding name can't be empty."]}
def test_holding_name_can_not_be_only_white_spaces():
schema = HoldingSchema(only=['name'])
assert schema.validate({'holding': ' '}) == {'holding': ["Holding name can't be only white spaces."]}
def test_data_key_for_holding_symbol():
schema = HoldingSchema(only=['symbol'])
result = {'symbol': 'CBA'}
assert schema.loads('{"symbol":"CBA"}') == result
def test_holding_symbol_is_required():
schema = HoldingSchema(only=['symbol'])
assert schema.validate({}) == {'symbol': ['Missing data for required field.']}
def test_holding_symbol_can_not_be_empty():
schema = HoldingSchema(only=['symbol'])
assert schema.validate({'symbol': ''}) == {'symbol': ["Holding symbol can't be empty."]}
def test_holding_symbol_can_not_be_only_white_spaces():
schema = HoldingSchema(only=['symbol'])
assert schema.validate({'symbol': ' '}) == {'symbol': ["Holding symbol can't be only white spaces."]}
def test_data_key_for_holding_sector():
schema = HoldingSchema(only=['sector'])
result = {'sector': 'Diversified Banks'}
assert schema.loads('{"sectorName":"Diversified Banks"}') == result
def test_data_key_for_market_val_percent():
schema = HoldingSchema(only=['market_val_percent'])
result = {'market_val_percent': Decimal('8.25647')}
assert schema.loads('{"marketValPercent":8.25647}') == result
def test_market_val_percent_must_be_a_valid_number():
schema = HoldingSchema(only=['market_val_percent'])
assert schema.validate({'marketValPercent': 'bla'}) == {'marketValPercent': ['Not a valid number.']}
def test_data_key_for_market_value():
schema = HoldingSchema(only=['market_value'])
result = {'market_value': Decimal('1041683340.81')}
assert schema.loads('{"marketValue":1.04168334081E9}') == result
def test_market_value_must_be_a_valid_number():
schema = HoldingSchema(only=['market_value'])
assert schema.validate({'marketValue': 'bla'}) == {'marketValue': ['Not a valid number.']}
def test_data_key_for_():
schema = HoldingSchema(only=['number_of_shares'])
result = {'number_of_shares': Decimal('14389879')}
assert schema.loads('{"numberofshares":1.4389879E7}') == result
def test_market_value_must_be_a_valid_number():
schema = HoldingSchema(only=['number_of_shares'])
assert schema.validate({'numberofshares': 'bla'}) == {'numberofshares': ['Not a valid number.']}
def test_schema_excludes_unknown_field():
assert HoldingSchema().unknown == EXCLUDE
def test_dumps_can_serialize_the_data():
holding = {
"name": "Commonwealth Bank of Australia",
"symbol": "CBA",
"sector": "Diversified Banks",
"market_val_percent": Decimal('8.25647'),
"market_value": Decimal('1041683340.81'),
"number_of_shares": Decimal('14389879')
}
expected_json_str = '{"holding": "Commonwealth Bank of Australia", "symbol": "CBA", "sectorName": "Diversified Banks", "marketValPercent": "8.25647", "marketValue": "1041683340.81", "numberofshares": "14389879"}'
assert HoldingSchema().dumps(holding) == expected_json_str
``` |
{
"source": "johnjichaowei/market-values-api",
"score": 3
} |
#### File: market_values_api/handlers/market_values_handler.py
```python
from aiohttp import web
import logging
import simplejson
import market_values_api.services as services
class MarketValuesHandler(object):
async def get(self, request):
logging.info(f"Handling get market values request: {request.query_string}")
companies = self._parse_companies_param(request.query)
service = services.MarketValuesService(request.app['client_session'])
market_values = await service.get(companies)
logging.info(f"Finishing get market values request")
return self._json_response(market_values)
def _parse_companies_param(self, query):
if 'companies' not in query:
raise web.HTTPBadRequest(reason='The companies param is required')
param = query['companies']
return param.split(',')
def _json_response(self, data):
response = web.Response()
response.headers.add('Content-Type', 'application/json')
response.text = simplejson.dumps(data)
return response
```
#### File: market_values_api/parsers/parse_market_value.py
```python
import re
from decimal import Decimal
from market_values_api.exceptions import ParseMarketValueError
class ParseMarketValue(object):
def __init__(self, raw_text):
self.raw_text = raw_text
def call(self):
return self._parse_market_value()
def _parse_market_value(self):
p = re.compile(
r'<td.+?data-test="MARKET_CAP-value".*?><span.*?>\s*(?P<value>\d*\.?\d*)(?P<unit>[BbMmKk]?)\s*</span></td>'
)
match = p.search(self.raw_text)
if match != None:
return self._convert_value(match.group('value'), match.group('unit'))
raise ParseMarketValueError('Failed to parse market value')
def _convert_value(self, value, unit):
multiplier = {
'B': 1000000000,
'M': 1000000,
'K': 1000,
'': 1
}[unit.upper()]
return Decimal(value) * multiplier
```
#### File: tests/services/test_market_values_service.py
```python
import asynctest
import pytest
from decimal import Decimal
from market_values_api.services import MarketValuesService
class TestMarketValuesService(object):
@pytest.fixture
def company_list(self):
return ['CBA', 'ANZ', 'REA']
@pytest.fixture
def market_values(self):
return {'CBA': Decimal('123'), 'ANZ': Decimal('321'), 'REA': Decimal('789')}
@pytest.fixture
def repository_class(self):
with asynctest.patch(
'market_values_api.repositories.MarketValueRepository',
autospec=True, scope=asynctest.LIMITED
) as repository_class:
yield repository_class
@pytest.fixture
def repository(self, repository_class, market_values):
def _side_effect(company):
if market_values.get(company) == None:
raise Exception('Unexpected error')
return (company, market_values[company])
repository = repository_class.return_value
repository.get.side_effect = _side_effect
return repository
@pytest.mark.usefixtures("repository")
async def test_get_instantiate_one_market_value_repository(self, session, company_list, repository_class):
await MarketValuesService(session).get(company_list)
repository_class.assert_called_once_with(session)
async def test_get_calls_market_value_repository_for_each_company(self, session, company_list, repository):
await MarketValuesService(session).get(company_list)
assert repository.get.call_count == 3
@pytest.mark.usefixtures("repository")
async def test_get_returns_market_values_as_a_dict(self, session, company_list, market_values):
result = await MarketValuesService(session).get(company_list)
assert result == market_values
@pytest.mark.usefixtures("repository")
async def test_get_reraises_exception_occurred_in_async_task(self, session, company_list):
with pytest.raises(Exception) as err:
await MarketValuesService(session).get(company_list + ['CompanyNotExist'])
assert 'Unexpected error' in str(err)
``` |
{
"source": "johnjihong/fastapi-reactjs-template",
"score": 2
} |
#### File: fastapi-reactjs-template/app/server.py
```python
from fastapi import FastAPI
from starlette.middleware.cors import CORSMiddleware
from datetime import datetime
app = FastAPI()
app.add_middleware(CORSMiddleware, allow_origins=['*'])
@app.get('/api/foo')
def example():
return {'text': f'Hello World and utc now is {datetime.utcnow()}'}
``` |
{
"source": "johnjihong/Gadget",
"score": 3
} |
#### File: Gadget/Gadget/tracker.py
```python
from time import time
from .log import logger
from .utils import display
from typing import Callable
from threading import Thread
from ipywidgets.widgets import HTML
class Track:
def __init__(self):
self.elapsed = 0.0
self.start_at = None
self.result = None
def run(self, *args, **kwargs):
raise NotImplementedError
def start(self, *args, **kwargs):
if self.start_at is not None:
raise RuntimeError('Already started.')
self.start_at = time()
thread = Thread(target=self.run, args=args, kwargs=kwargs)
thread.start()
def stop(self):
if self.start_at is None:
raise RuntimeError('No started.')
self.elapsed = time() - self.start_at
self.start_at = None
def reset(self):
self.elapsed = 0.0
@property
def running(self):
return self.start_at is not None
class Spinner(HTML, Track):
"""Run a task in a separate thread and display spinner during the execution.
Notebook example:
from time import sleep
def long_running_task():
for i in range(10):
sleep(1)
foo = Spinner(long_running_task)
foo.start()
foo
"""
def __init__(self, task: Callable, *args, **kwargs):
super().__init__(*args, **kwargs)
self.task = task
self.value = '<i class="fa fa-spinner fa-spin" style="font-size: 80px; color: red;"></i>'
def run(self, task: Callable):
display(self, True)
try:
self.result = self.task()
except Exception as err:
logger.exception(err)
finally:
self.stop()
def start(self, task: Callable):
super().start(task)
def stop(self):
super().stop()
display(self, False)
``` |
{
"source": "johnjihong/Robot",
"score": 2
} |
#### File: Robot/Robot/core.py
```python
from __future__ import absolute_import
from Robot.log import logger
from typing import Callable
from seleniumrequests import PhantomJS
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import NoSuchElementException, WebDriverException
class is_page_read:
def __call__(self, driver):
is_dom_ready = driver.execute_script("return document.readyState === 'complete';")
try:
has_ajax_completed = driver.execute_script('return jQuery.active === 0;')
except WebDriverException:
logger.exception(exc_info=True)
return is_dom_ready
else:
return is_dom_ready and has_ajax_completed
class Factory:
def __init__(self, username: str, password: str, timeout: int=30, executable_path: str=None):
self.username = username
self.password = password
self.timeout = timeout
self.executable_path = executable_path if executable_path else 'phantomjs'
def login(self, url):
self.driver = PhantomJS(executable_path=self.executable_path)
self.driver.get(url)
self.driver.find_element_by_id('userID').send_keys(self.username)
self.driver.find_element_by_id('password').send_keys(self.password)
self.driver.find_element_by_xpath("//input[@value='Log in']").click()
WebDriverWait(self.driver, self.timeout).until(
EC.text_to_be_present_in_element((By.XPATH, "//a[@alt='logout']"), 'Log Out'),
'%s failed to login within %d seconds timeout' % (self.username, self.timeout)
)
def start(self, method: Callable):
method()
WebDriverWait(self.driver, self.timeout).until(
is_page_read(),
'Opertation failed to be completed before the %d seconds timeout' % self.timeout
)
``` |
{
"source": "JohnJim0816/rl-tutorials",
"score": 3
} |
#### File: rl-tutorials/A2C/agent.py
```python
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical
class ActorCritic(nn.Module):
''' A2C网络模型,包含一个Actor和Critic
'''
def __init__(self, input_dim, output_dim, hidden_dim):
super(ActorCritic, self).__init__()
self.critic = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, 1)
)
self.actor = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, output_dim),
nn.Softmax(dim=1),
)
def forward(self, x):
value = self.critic(x)
probs = self.actor(x)
dist = Categorical(probs)
return dist, value
class A2C:
''' A2C算法
'''
def __init__(self,state_dim,action_dim,cfg) -> None:
self.gamma = cfg.gamma
self.device = cfg.device
self.model = ActorCritic(state_dim, action_dim, cfg.hidden_size).to(self.device)
self.optimizer = optim.Adam(self.model.parameters())
def compute_returns(self,next_value, rewards, masks):
R = next_value
returns = []
for step in reversed(range(len(rewards))):
R = rewards[step] + self.gamma * R * masks[step]
returns.insert(0, R)
return returns
``` |
{
"source": "johnjim0816/rl-tutorials",
"score": 3
} |
#### File: codes/DQN/dqn_cnn2.py
```python
import torch
import torch.nn as nn
import torch.optim as optim
import torch.autograd as autograd
import random
import math
import numpy as np
class CNN(nn.Module):
def __init__(self, n_frames, n_actions):
super(CNN,self).__init__()
self.n_frames = n_frames
self.n_actions = n_actions
# Layers
self.conv1 = nn.Conv2d(
in_channels=n_frames,
out_channels=16,
kernel_size=8,
stride=4,
padding=2
)
self.conv2 = nn.Conv2d(
in_channels=16,
out_channels=32,
kernel_size=4,
stride=2,
padding=1
)
self.fc1 = nn.Linear(
in_features=3200,
out_features=256,
)
self.fc2 = nn.Linear(
in_features=256,
out_features=n_actions,
)
# Activation Functions
self.relu = nn.ReLU()
def flatten(self, x):
batch_size = x.size()[0]
x = x.view(batch_size, -1)
return x
def forward(self, x):
# Forward pass
x = self.relu(self.conv1(x)) # In: (80, 80, 4) Out: (20, 20, 16)
x = self.relu(self.conv2(x)) # In: (20, 20, 16) Out: (10, 10, 32)
x = self.flatten(x) # In: (10, 10, 32) Out: (3200,)
x = self.relu(self.fc1(x)) # In: (3200,) Out: (256,)
x = self.fc2(x) # In: (256,) Out: (4,)
return x
class ReplayBuffer:
def __init__(self, capacity):
self.capacity = capacity # 经验回放的容量
self.buffer = [] # 缓冲区
self.position = 0
def push(self, state, action, reward, next_state, done):
''' 缓冲区是一个队列,容量超出时去掉开始存入的转移(transition)
'''
if len(self.buffer) < self.capacity:
self.buffer.append(None)
self.buffer[self.position] = (state, action, reward, next_state, done)
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
batch = random.sample(self.buffer, batch_size) # 随机采出小批量转移
state, action, reward, next_state, done = zip(*batch) # 解压成状态,动作等
return state, action, reward, next_state, done
def __len__(self):
''' 返回当前存储的量
'''
return len(self.buffer)
class DQN:
def __init__(self, n_states, n_actions, cfg):
self.n_actions = n_actions # 总的动作个数
self.device = cfg.device # 设备,cpu或gpu等
self.gamma = cfg.gamma # 奖励的折扣因子
# e-greedy策略相关参数
self.frame_idx = 0 # 用于epsilon的衰减计数
self.epsilon = lambda frame_idx: cfg.epsilon_end + \
(cfg.epsilon_start - cfg.epsilon_end) * \
math.exp(-1. * frame_idx / cfg.epsilon_decay)
self.batch_size = cfg.batch_size
self.policy_net = CNN(n_states, n_actions).to(self.device)
self.target_net = CNN(n_states, n_actions).to(self.device)
for target_param, param in zip(self.target_net.parameters(),self.policy_net.parameters()): # 复制参数到目标网路targe_net
target_param.data.copy_(param.data)
self.optimizer = optim.Adam(self.policy_net.parameters(), lr=cfg.lr) # 优化器
self.memory = ReplayBuffer(cfg.memory_capacity) # 经验回放
def choose_action(self, state):
''' 选择动作
'''
self.frame_idx += 1
if random.random() > self.epsilon(self.frame_idx):
with torch.no_grad():
state = torch.tensor([state], device=self.device, dtype=torch.float32)
q_values = self.policy_net(state)
action = q_values.max(1)[1].item() # 选择Q值最大的动作
else:
action = random.randrange(self.n_actions)
return action
def update(self):
if len(self.memory) < self.batch_size: # 当memory中不满足一个批量时,不更新策略
return
# 从经验回放中(replay memory)中随机采样一个批量的转移(transition)
state_batch, action_batch, reward_batch, next_state_batch, done_batch = self.memory.sample(
self.batch_size)
# 转为张量
state_batch = torch.tensor(state_batch, device=self.device, dtype=torch.float)
action_batch = torch.tensor(action_batch, device=self.device).unsqueeze(1)
reward_batch = torch.tensor(reward_batch, device=self.device, dtype=torch.float)
next_state_batch = torch.tensor(next_state_batch, device=self.device, dtype=torch.float)
done_batch = torch.tensor(np.float32(done_batch), device=self.device)
q_values = self.policy_net(state_batch).gather(dim=1, index=action_batch) # 计算当前状态(s_t,a)对应的Q(s_t, a)
next_q_values = self.target_net(next_state_batch).max(1)[0].detach() # 计算下一时刻的状态(s_t_,a)对应的Q值
# 计算期望的Q值,对于终止状态,此时done_batch[0]=1, 对应的expected_q_value等于reward
expected_q_values = reward_batch + self.gamma * next_q_values * (1-done_batch)
loss = nn.MSELoss()(q_values, expected_q_values.unsqueeze(1)) # 计算均方根损失
# 优化更新模型
self.optimizer.zero_grad()
loss.backward()
for param in self.policy_net.parameters(): # clip防止梯度爆炸
param.grad.data.clamp_(-1, 1)
self.optimizer.step()
def save(self, path):
torch.save(self.target_net.state_dict(), path+'dqn_checkpoint.pth')
def load(self, path):
self.target_net.load_state_dict(torch.load(path+'dqn_checkpoint.pth'))
for target_param, param in zip(self.target_net.parameters(), self.policy_net.parameters()):
param.data.copy_(target_param.data)
```
#### File: codes/PPO/ppo2.py
```python
import os
import numpy as np
import torch
import torch.optim as optim
import torch.nn as nn
from torch.distributions.categorical import Categorical
class PPOMemory:
def __init__(self, batch_size):
self.states = []
self.probs = []
self.vals = []
self.actions = []
self.rewards = []
self.dones = []
self.batch_size = batch_size
def sample(self):
batch_step = np.arange(0, len(self.states), self.batch_size)
indices = np.arange(len(self.states), dtype=np.int64)
np.random.shuffle(indices)
batches = [indices[i:i+self.batch_size] for i in batch_step]
return np.array(self.states),np.array(self.actions),np.array(self.probs),\
np.array(self.vals),np.array(self.rewards),np.array(self.dones),batches
def push(self, state, action, probs, vals, reward, done):
self.states.append(state)
self.actions.append(action)
self.probs.append(probs)
self.vals.append(vals)
self.rewards.append(reward)
self.dones.append(done)
def clear(self):
self.states = []
self.probs = []
self.actions = []
self.rewards = []
self.dones = []
self.vals = []
class Actor(nn.Module):
def __init__(self,n_states, n_actions,
hidden_dim):
super(Actor, self).__init__()
self.actor = nn.Sequential(
nn.Linear(n_states, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, n_actions),
nn.Softmax(dim=-1)
)
def forward(self, state):
dist = self.actor(state)
dist = Categorical(dist)
return dist
class Critic(nn.Module):
def __init__(self, n_states,hidden_dim):
super(Critic, self).__init__()
self.critic = nn.Sequential(
nn.Linear(n_states, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, 1)
)
def forward(self, state):
value = self.critic(state)
return value
class PPO:
def __init__(self, n_states, n_actions,cfg):
self.gamma = cfg.gamma
self.continuous = cfg.continuous
self.policy_clip = cfg.policy_clip
self.n_epochs = cfg.n_epochs
self.gae_lambda = cfg.gae_lambda
self.device = cfg.device
self.actor = Actor(n_states, n_actions,cfg.hidden_dim).to(self.device)
self.critic = Critic(n_states,cfg.hidden_dim).to(self.device)
self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=cfg.actor_lr)
self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=cfg.critic_lr)
self.memory = PPOMemory(cfg.batch_size)
self.loss = 0
def choose_action(self, state):
state = np.array([state]) # 先转成数组再转tensor更高效
state = torch.tensor(state, dtype=torch.float).to(self.device)
dist = self.actor(state)
value = self.critic(state)
action = dist.sample()
probs = torch.squeeze(dist.log_prob(action)).item()
if self.continuous:
action = torch.tanh(action)
else:
action = torch.squeeze(action).item()
value = torch.squeeze(value).item()
return action, probs, value
def update(self):
for _ in range(self.n_epochs):
state_arr, action_arr, old_prob_arr, vals_arr,reward_arr, dones_arr, batches = self.memory.sample()
values = vals_arr[:]
### compute advantage ###
advantage = np.zeros(len(reward_arr), dtype=np.float32)
for t in range(len(reward_arr)-1):
discount = 1
a_t = 0
for k in range(t, len(reward_arr)-1):
a_t += discount*(reward_arr[k] + self.gamma*values[k+1]*\
(1-int(dones_arr[k])) - values[k])
discount *= self.gamma*self.gae_lambda
advantage[t] = a_t
advantage = torch.tensor(advantage).to(self.device)
### SGD ###
values = torch.tensor(values).to(self.device)
for batch in batches:
states = torch.tensor(state_arr[batch], dtype=torch.float).to(self.device)
old_probs = torch.tensor(old_prob_arr[batch]).to(self.device)
actions = torch.tensor(action_arr[batch]).to(self.device)
dist = self.actor(states)
critic_value = self.critic(states)
critic_value = torch.squeeze(critic_value)
new_probs = dist.log_prob(actions)
prob_ratio = new_probs.exp() / old_probs.exp()
weighted_probs = advantage[batch] * prob_ratio
weighted_clipped_probs = torch.clamp(prob_ratio, 1-self.policy_clip,
1+self.policy_clip)*advantage[batch]
actor_loss = -torch.min(weighted_probs, weighted_clipped_probs).mean()
returns = advantage[batch] + values[batch]
critic_loss = (returns-critic_value)**2
critic_loss = critic_loss.mean()
total_loss = actor_loss + 0.5*critic_loss
self.loss = total_loss
self.actor_optimizer.zero_grad()
self.critic_optimizer.zero_grad()
total_loss.backward()
self.actor_optimizer.step()
self.critic_optimizer.step()
self.memory.clear()
def save(self,path):
actor_checkpoint = os.path.join(path, 'ppo_actor.pt')
critic_checkpoint= os.path.join(path, 'ppo_critic.pt')
torch.save(self.actor.state_dict(), actor_checkpoint)
torch.save(self.critic.state_dict(), critic_checkpoint)
def load(self,path):
actor_checkpoint = os.path.join(path, 'ppo_actor.pt')
critic_checkpoint= os.path.join(path, 'ppo_critic.pt')
self.actor.load_state_dict(torch.load(actor_checkpoint))
self.critic.load_state_dict(torch.load(critic_checkpoint))
```
#### File: codes/QLearning/qlearning.py
```python
import numpy as np
import math
import torch
from collections import defaultdict
class QLearning(object):
def __init__(self,n_states,
n_actions,cfg):
self.n_actions = n_actions
self.lr = cfg.lr # 学习率
self.gamma = cfg.gamma
self.epsilon = 0
self.sample_count = 0
self.epsilon_start = cfg.epsilon_start
self.epsilon_end = cfg.epsilon_end
self.epsilon_decay = cfg.epsilon_decay
self.Q_table = defaultdict(lambda: np.zeros(n_actions)) # 用嵌套字典存放状态->动作->状态-动作值(Q值)的映射,即Q表
def choose_action(self, state):
self.sample_count += 1
self.epsilon = self.epsilon_end + (self.epsilon_start - self.epsilon_end) * \
math.exp(-1. * self.sample_count / self.epsilon_decay) # epsilon是会递减的,这里选择指数递减
# e-greedy 策略
if np.random.uniform(0, 1) > self.epsilon:
action = np.argmax(self.Q_table[str(state)]) # 选择Q(s,a)最大对应的动作
else:
action = np.random.choice(self.n_actions) # 随机选择动作
return action
def predict(self,state):
action = np.argmax(self.Q_table[str(state)])
return action
def update(self, state, action, reward, next_state, done):
Q_predict = self.Q_table[str(state)][action]
if done: # 终止状态
Q_target = reward
else:
Q_target = reward + self.gamma * np.max(self.Q_table[str(next_state)])
self.Q_table[str(state)][action] += self.lr * (Q_target - Q_predict)
def save(self,path):
import dill
torch.save(
obj=self.Q_table,
f=path+"Qleaning_model.pkl",
pickle_module=dill
)
print("保存模型成功!")
def load(self, path):
import dill
self.Q_table =torch.load(f=path+'Qleaning_model.pkl',pickle_module=dill)
print("加载模型成功!")
```
#### File: codes/RainbowDQN/rainbow_dqn.py
```python
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import random
class ReplayBuffer:
def __init__(self, capacity):
self.capacity = capacity # 经验回放的容量
self.buffer = [] # 缓冲区
self.position = 0
def push(self, state, action, reward, next_state, done):
''' 缓冲区是一个队列,容量超出时去掉开始存入的转移(transition)
'''
if len(self.buffer) < self.capacity:
self.buffer.append(None)
self.buffer[self.position] = (state, action, reward, next_state, done)
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
batch = random.sample(self.buffer, batch_size) # 随机采出小批量转移
state, action, reward, next_state, done = zip(*batch) # 解压成状态,动作等
return state, action, reward, next_state, done
def __len__(self):
''' 返回当前存储的量
'''
return len(self.buffer)
class NoisyLinear(nn.Module):
def __init__(self, input_dim, output_dim, device, std_init=0.4):
super(NoisyLinear, self).__init__()
self.device = device
self.input_dim = input_dim
self.output_dim = output_dim
self.std_init = std_init
self.weight_mu = nn.Parameter(torch.FloatTensor(output_dim, input_dim))
self.weight_sigma = nn.Parameter(torch.FloatTensor(output_dim, input_dim))
self.register_buffer('weight_epsilon', torch.FloatTensor(output_dim, input_dim))
self.bias_mu = nn.Parameter(torch.FloatTensor(output_dim))
self.bias_sigma = nn.Parameter(torch.FloatTensor(output_dim))
self.register_buffer('bias_epsilon', torch.FloatTensor(output_dim))
self.reset_parameters()
self.reset_noise()
def forward(self, x):
if self.device:
weight_epsilon = self.weight_epsilon.cuda()
bias_epsilon = self.bias_epsilon.cuda()
else:
weight_epsilon = self.weight_epsilon
bias_epsilon = self.bias_epsilon
if self.training:
weight = self.weight_mu + self.weight_sigma.mul(Variable(weight_epsilon))
bias = self.bias_mu + self.bias_sigma.mul(Variable(bias_epsilon))
else:
weight = self.weight_mu
bias = self.bias_mu
return F.linear(x, weight, bias)
def reset_parameters(self):
mu_range = 1 / math.sqrt(self.weight_mu.size(1))
self.weight_mu.data.uniform_(-mu_range, mu_range)
self.weight_sigma.data.fill_(self.std_init / math.sqrt(self.weight_sigma.size(1)))
self.bias_mu.data.uniform_(-mu_range, mu_range)
self.bias_sigma.data.fill_(self.std_init / math.sqrt(self.bias_sigma.size(0)))
def reset_noise(self):
epsilon_in = self._scale_noise(self.input_dim)
epsilon_out = self._scale_noise(self.output_dim)
self.weight_epsilon.copy_(epsilon_out.ger(epsilon_in))
self.bias_epsilon.copy_(self._scale_noise(self.output_dim))
def _scale_noise(self, size):
x = torch.randn(size)
x = x.sign().mul(x.abs().sqrt())
return x
class RainbowModel(nn.Module):
def __init__(self, n_states, n_actions, n_atoms, Vmin, Vmax):
super(RainbowModel, self).__init__()
self.n_states = n_states
self.n_actions = n_actions
self.n_atoms = n_atoms
self.Vmin = Vmin
self.Vmax = Vmax
self.linear1 = nn.Linear(n_states, 32)
self.linear2 = nn.Linear(32, 64)
self.noisy_value1 = NoisyLinear(64, 64, device=device)
self.noisy_value2 = NoisyLinear(64, self.n_atoms, device=device)
self.noisy_advantage1 = NoisyLinear(64, 64, device=device)
self.noisy_advantage2 = NoisyLinear(64, self.n_atoms * self.n_actions, device=device)
def forward(self, x):
batch_size = x.size(0)
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
value = F.relu(self.noisy_value1(x))
value = self.noisy_value2(value)
advantage = F.relu(self.noisy_advantage1(x))
advantage = self.noisy_advantage2(advantage)
value = value.view(batch_size, 1, self.n_atoms)
advantage = advantage.view(batch_size, self.n_actions, self.n_atoms)
x = value + advantage - advantage.mean(1, keepdim=True)
x = F.softmax(x.view(-1, self.n_atoms)).view(-1, self.n_actions, self.n_atoms)
return x
def reset_noise(self):
self.noisy_value1.reset_noise()
self.noisy_value2.reset_noise()
self.noisy_advantage1.reset_noise()
self.noisy_advantage2.reset_noise()
def act(self, state):
state = Variable(torch.FloatTensor(state).unsqueeze(0), volatile=True)
dist = self.forward(state).data.cpu()
dist = dist * torch.linspace(self.Vmin, self.Vmax, self.n_atoms)
action = dist.sum(2).max(1)[1].numpy()[0]
return action
class RainbowDQN(nn.Module):
def __init__(self, n_states, n_actions, n_atoms, Vmin, Vmax,cfg):
super(RainbowDQN, self).__init__()
self.n_states = n_states
self.n_actions = n_actions
self.n_atoms = cfg.n_atoms
self.Vmin = cfg.Vmin
self.Vmax = cfg.Vmax
self.policy_model = RainbowModel(n_states, n_actions, n_atoms, Vmin, Vmax)
self.target_model = RainbowModel(n_states, n_actions, n_atoms, Vmin, Vmax)
self.batch_size = cfg.batch_size
self.memory = ReplayBuffer(cfg.memory_capacity) # 经验回放
self.optimizer = optim.Adam(self.policy_model.parameters(), 0.001)
def choose_action(self,state):
state = Variable(torch.FloatTensor(state).unsqueeze(0), volatile=True)
dist = self.policy_model(state).data.cpu()
dist = dist * torch.linspace(self.Vmin, self.Vmax, self.n_atoms)
action = dist.sum(2).max(1)[1].numpy()[0]
return action
def projection_distribution(self,next_state, rewards, dones):
delta_z = float(self.Vmax - self.Vmin) / (self.n_atoms - 1)
support = torch.linspace(self.Vmin, self.Vmax, self.n_atoms)
next_dist = self.target_model(next_state).data.cpu() * support
next_action = next_dist.sum(2).max(1)[1]
next_action = next_action.unsqueeze(1).unsqueeze(1).expand(next_dist.size(0), 1, next_dist.size(2))
next_dist = next_dist.gather(1, next_action).squeeze(1)
rewards = rewards.unsqueeze(1).expand_as(next_dist)
dones = dones.unsqueeze(1).expand_as(next_dist)
support = support.unsqueeze(0).expand_as(next_dist)
Tz = rewards + (1 - dones) * 0.99 * support
Tz = Tz.clamp(min=self.Vmin, max=self.Vmax)
b = (Tz - self.Vmin) / delta_z
l = b.floor().long()
u = b.ceil().long()
offset = torch.linspace(0, (self.batch_size - 1) * self.n_atoms, self.batch_size).long()\
.unsqueeze(1).expand(self.batch_size, self.n_atoms)
proj_dist = torch.zeros(next_dist.size())
proj_dist.view(-1).index_add_(0, (l + offset).view(-1), (next_dist * (u.float() - b)).view(-1))
proj_dist.view(-1).index_add_(0, (u + offset).view(-1), (next_dist * (b - l.float())).view(-1))
return proj_dist
def update(self):
if len(self.memory) < self.batch_size: # 当memory中不满足一个批量时,不更新策略
return
state, action, reward, next_state, done = self.memory.sample(self.batch_size)
state = Variable(torch.FloatTensor(np.float32(state)))
next_state = Variable(torch.FloatTensor(np.float32(next_state)), volatile=True)
action = Variable(torch.LongTensor(action))
reward = torch.FloatTensor(reward)
done = torch.FloatTensor(np.float32(done))
proj_dist = self.projection_distribution(next_state, reward, done)
dist = self.policy_model(state)
action = action.unsqueeze(1).unsqueeze(1).expand(self.batch_size, 1, self.n_atoms)
dist = dist.gather(1, action).squeeze(1)
dist.data.clamp_(0.01, 0.99)
loss = -(Variable(proj_dist) * dist.log()).sum(1)
loss = loss.mean()
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.policy_model.reset_noise()
self.target_model.reset_noise()
```
#### File: codes/TD3/task1_train.py
```python
import sys,os
curr_path = os.path.dirname(os.path.abspath(__file__)) # 当前文件所在绝对路径
parent_path = os.path.dirname(curr_path) # 父路径
sys.path.append(parent_path) # 添加路径到系统路径
import torch
import gym
import numpy as np
import datetime
from TD3.agent import TD3
from common.plot import plot_rewards
from common.utils import save_results,make_dir
curr_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") # 获取当前时间
class TD3Config:
def __init__(self) -> None:
self.algo = 'TD3' # 算法名称
self.env_name = 'Pendulum-v1' # 环境名称
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # 检测GPU
self.train_eps = 600 # 训练的回合数
self.start_timestep = 25e3 # Time steps initial random policy is used
self.epsilon_start = 50 # Episodes initial random policy is used
self.eval_freq = 10 # How often (episodes) we evaluate
self.max_timestep = 100000 # Max time steps to run environment
self.expl_noise = 0.1 # Std of Gaussian exploration noise
self.batch_size = 256 # Batch size for both actor and critic
self.gamma = 0.9 # gamma factor
self.lr = 0.0005 # 学习率
self.policy_noise = 0.2 # Noise added to target policy during critic update
self.noise_clip = 0.3 # Range to clip target policy noise
self.policy_freq = 2 # Frequency of delayed policy updates
class PlotConfig(TD3Config):
def __init__(self) -> None:
super().__init__()
self.result_path = curr_path+"/outputs/" + self.env_name + \
'/'+curr_time+'/results/' # 保存结果的路径
self.model_path = curr_path+"/outputs/" + self.env_name + \
'/'+curr_time+'/models/' # 保存模型的路径
self.save = True # 是否保存图片
# Runs policy for X episodes and returns average reward
# A fixed seed is used for the eval environment
def eval(env,agent, seed, eval_episodes=10):
eval_env = gym.make(env)
eval_env.seed(seed + 100)
avg_reward = 0.
for _ in range(eval_episodes):
state, done = eval_env.reset(), False
while not done:
# eval_env.render()
action = agent.choose_action(np.array(state))
state, reward, done, _ = eval_env.step(action)
avg_reward += reward
avg_reward /= eval_episodes
print("---------------------------------------")
print(f"Evaluation over {eval_episodes} episodes: {avg_reward:.3f}")
print("---------------------------------------")
return avg_reward
def train(cfg,env,agent):
print('开始训练!')
print(f'环境:{cfg.env_name}, 算法:{cfg.algo}, 设备:{cfg.device}')
rewards = [] # 记录所有回合的奖励
ma_rewards = [] # 记录所有回合的滑动平均奖励
for i_ep in range(int(cfg.train_eps)):
ep_reward = 0
ep_timesteps = 0
state, done = env.reset(), False
while not done:
ep_timesteps += 1
# Select action randomly or according to policy
if i_ep < cfg.epsilon_start:
action = env.action_space.sample()
else:
action = (
agent.choose_action(np.array(state))
+ np.random.normal(0, max_action * cfg.expl_noise, size=n_actions)
).clip(-max_action, max_action)
# Perform action
next_state, reward, done, _ = env.step(action)
done_bool = float(done) if ep_timesteps < env._max_episode_steps else 0
# Store data in replay buffer
agent.memory.push(state, action, next_state, reward, done_bool)
state = next_state
ep_reward += reward
# Train agent after collecting sufficient data
if i_ep+1 >= cfg.epsilon_start:
agent.update()
if (i_ep+1)%10 == 0:
print('回合:{}/{}, 奖励:{:.2f}'.format(i_ep+1, cfg.train_eps, ep_reward))
rewards.append(ep_reward)
if ma_rewards:
ma_rewards.append(0.9*ma_rewards[-1]+0.1*ep_reward)
else:
ma_rewards.append(ep_reward)
print('完成训练!')
return rewards, ma_rewards
if __name__ == "__main__":
cfg = TD3Config()
plot_cfg = PlotConfig()
env = gym.make(cfg.env_name)
env.seed(1) # 随机种子
torch.manual_seed(1)
np.random.seed(1)
n_states = env.observation_space.shape[0]
n_actions = env.action_space.shape[0]
max_action = float(env.action_space.high[0])
agent = TD3(n_states,n_actions,max_action,cfg)
rewards,ma_rewards = train(cfg,env,agent)
make_dir(plot_cfg.result_path,plot_cfg.model_path)
agent.save(path=plot_cfg.model_path)
save_results(rewards,ma_rewards,tag='train',path=plot_cfg.result_path)
plot_rewards(rewards,ma_rewards,plot_cfg,tag="train")
``` |
{
"source": "JohnJim0816/rl-tutorials",
"score": 2
} |
#### File: rl-tutorials/common/utils.py
```python
import os
import numpy as np
from pathlib import Path
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.font_manager import FontProperties # 导入字体模块
def chinese_font():
''' 设置中文字体,注意需要根据自己电脑情况更改字体路径,否则还是默认的字体
'''
try:
font = FontProperties(
fname='/System/Library/Fonts/STHeiti Light.ttc', size=15) # fname系统字体路径,此处是mac的
except:
font = None
return font
def plot_rewards_cn(rewards, ma_rewards, plot_cfg, tag='train'):
''' 中文画图
'''
sns.set()
plt.figure()
plt.title(u"{}环境下{}算法的学习曲线".format(plot_cfg.env_name,
plot_cfg.algo_name), fontproperties=chinese_font())
plt.xlabel(u'回合数', fontproperties=chinese_font())
plt.plot(rewards)
plt.plot(ma_rewards)
plt.legend((u'奖励', u'滑动平均奖励',), loc="best", prop=chinese_font())
if plot_cfg.save:
plt.savefig(plot_cfg.result_path+f"{tag}_rewards_curve_cn")
# plt.show()
def plot_rewards(rewards, ma_rewards, plot_cfg, tag='train'):
sns.set()
plt.figure() # 创建一个图形实例,方便同时多画几个图
plt.title("learning curve on {} of {} for {}".format(
plot_cfg.device, plot_cfg.algo_name, plot_cfg.env_name))
plt.xlabel('epsiodes')
plt.plot(rewards, label='rewards')
plt.plot(ma_rewards, label='ma rewards')
plt.legend()
if plot_cfg.save:
plt.savefig(plot_cfg.result_path+"{}_rewards_curve".format(tag))
plt.show()
def plot_losses(losses, algo="DQN", save=True, path='./'):
sns.set()
plt.figure()
plt.title("loss curve of {}".format(algo))
plt.xlabel('epsiodes')
plt.plot(losses, label='rewards')
plt.legend()
if save:
plt.savefig(path+"losses_curve")
plt.show()
def save_results(rewards, ma_rewards, tag='train', path='./results'):
''' 保存奖励
'''
np.save(path+'{}_rewards.npy'.format(tag), rewards)
np.save(path+'{}_ma_rewards.npy'.format(tag), ma_rewards)
print('结果保存完毕!')
def make_dir(*paths):
''' 创建文件夹
'''
for path in paths:
Path(path).mkdir(parents=True, exist_ok=True)
def del_empty_dir(*paths):
''' 删除目录下所有空文件夹
'''
for path in paths:
dirs = os.listdir(path)
for dir in dirs:
if not os.listdir(os.path.join(path, dir)):
os.removedirs(os.path.join(path, dir))
```
#### File: rl-tutorials/DQN/dqn.py
```python
'''off-policy
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import random
import math
import numpy as np
class MLP(nn.Module):
def __init__(self, state_dim,action_dim,hidden_dim=128):
""" 初始化q网络,为全连接网络
state_dim: 输入的特征数即环境的状态维度
action_dim: 输出的动作维度
"""
super(MLP, self).__init__()
self.fc1 = nn.Linear(state_dim, hidden_dim) # 输入层
self.fc2 = nn.Linear(hidden_dim,hidden_dim) # 隐藏层
self.fc3 = nn.Linear(hidden_dim, action_dim) # 输出层
def forward(self, x):
# 各层对应的激活函数
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
return self.fc3(x)
class ReplayBuffer:
def __init__(self, capacity):
self.capacity = capacity # 经验回放的容量
self.buffer = [] # 缓冲区
self.position = 0
def push(self, state, action, reward, next_state, done):
''' 缓冲区是一个队列,容量超出时去掉开始存入的转移(transition)
'''
if len(self.buffer) < self.capacity:
self.buffer.append(None)
self.buffer[self.position] = (state, action, reward, next_state, done)
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
batch = random.sample(self.buffer, batch_size) # 随机采出小批量转移
state, action, reward, next_state, done = zip(*batch) # 解压成状态,动作等
return state, action, reward, next_state, done
def __len__(self):
''' 返回当前存储的量
'''
return len(self.buffer)
class DQN:
def __init__(self, state_dim, action_dim, cfg):
self.action_dim = action_dim # 总的动作个数
self.device = cfg.device # 设备,cpu或gpu等
self.gamma = cfg.gamma # 奖励的折扣因子
# e-greedy策略相关参数
self.frame_idx = 0 # 用于epsilon的衰减计数
self.epsilon = lambda frame_idx: cfg.epsilon_end + \
(cfg.epsilon_start - cfg.epsilon_end) * \
math.exp(-1. * frame_idx / cfg.epsilon_decay)
self.batch_size = cfg.batch_size
self.policy_net = MLP(state_dim, action_dim,hidden_dim=cfg.hidden_dim).to(self.device)
self.target_net = MLP(state_dim, action_dim,hidden_dim=cfg.hidden_dim).to(self.device)
for target_param, param in zip(self.target_net.parameters(),self.policy_net.parameters()): # 复制参数到目标网路targe_net
target_param.data.copy_(param.data)
self.optimizer = optim.Adam(self.policy_net.parameters(), lr=cfg.lr) # 优化器
self.memory = ReplayBuffer(cfg.memory_capacity) # 经验回放
def choose_action(self, state):
''' 选择动作
'''
self.frame_idx += 1
if random.random() > self.epsilon(self.frame_idx):
with torch.no_grad():
state = torch.tensor([state], device=self.device, dtype=torch.float32)
q_values = self.policy_net(state)
action = q_values.max(1)[1].item() # 选择Q值最大的动作
else:
action = random.randrange(self.action_dim)
return action
def update(self):
if len(self.memory) < self.batch_size: # 当memory中不满足一个批量时,不更新策略
return
# 从经验回放中(replay memory)中随机采样一个批量的转移(transition)
state_batch, action_batch, reward_batch, next_state_batch, done_batch = self.memory.sample(
self.batch_size)
# 转为张量
state_batch = torch.tensor(state_batch, device=self.device, dtype=torch.float)
action_batch = torch.tensor(action_batch, device=self.device).unsqueeze(1)
reward_batch = torch.tensor(reward_batch, device=self.device, dtype=torch.float)
next_state_batch = torch.tensor(next_state_batch, device=self.device, dtype=torch.float)
done_batch = torch.tensor(np.float32(done_batch), device=self.device)
q_values = self.policy_net(state_batch).gather(dim=1, index=action_batch) # 计算当前状态(s_t,a)对应的Q(s_t, a)
next_q_values = self.target_net(next_state_batch).max(1)[0].detach() # 计算下一时刻的状态(s_t_,a)对应的Q值
# 计算期望的Q值,对于终止状态,此时done_batch[0]=1, 对应的expected_q_value等于reward
expected_q_values = reward_batch + self.gamma * next_q_values * (1-done_batch)
loss = nn.MSELoss()(q_values, expected_q_values.unsqueeze(1)) # 计算均方根损失
# 优化更新模型
self.optimizer.zero_grad()
loss.backward()
for param in self.policy_net.parameters(): # clip防止梯度爆炸
param.grad.data.clamp_(-1, 1)
self.optimizer.step()
def save(self, path):
torch.save(self.target_net.state_dict(), path+'dqn_checkpoint.pth')
def load(self, path):
self.target_net.load_state_dict(torch.load(path+'dqn_checkpoint.pth'))
for target_param, param in zip(self.target_net.parameters(), self.policy_net.parameters()):
param.data.copy_(target_param.data)
```
#### File: rl-tutorials/PPO/task0.py
```python
import sys,os
curr_path = os.path.dirname(os.path.abspath(__file__)) # 当前文件所在绝对路径
parent_path = os.path.dirname(curr_path) # 父路径
sys.path.append(parent_path) # 添加路径到系统路径
import gym
import torch
import numpy as np
import datetime
from common.utils import plot_rewards
from common.utils import save_results,make_dir
from PPO.ppo2 import PPO
curr_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") # 获取当前时间
class Config:
def __init__(self) -> None:
################################## 环境超参数 ###################################
self.algo_name = "DQN" # 算法名称
self.env_name = 'CartPole-v0' # 环境名称
self.continuous = False # 环境是否为连续动作
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # 检测GPU
self.seed = 10 # 随机种子,置0则不设置随机种子
self.train_eps = 200 # 训练的回合数
self.test_eps = 20 # 测试的回合数
################################################################################
################################## 算法超参数 ####################################
self.batch_size = 5 # mini-batch SGD中的批量大小
self.gamma = 0.95 # 强化学习中的折扣因子
self.n_epochs = 4
self.actor_lr = 0.0003 # actor的学习率
self.critic_lr = 0.0003 # critic的学习率
self.gae_lambda = 0.95
self.policy_clip = 0.2
self.hidden_dim = 256
self.update_fre = 20 # 策略更新频率
################################################################################
################################# 保存结果相关参数 ################################
self.result_path = curr_path+"/outputs/" + self.env_name + \
'/'+curr_time+'/results/' # 保存结果的路径
self.model_path = curr_path+"/outputs/" + self.env_name + \
'/'+curr_time+'/models/' # 保存模型的路径
self.save = True # 是否保存图片
################################################################################
def env_agent_config(cfg):
''' 创建环境和智能体
'''
env = gym.make(cfg.env_name) # 创建环境
state_dim = env.observation_space.shape[0] # 状态维度
if cfg.continuous:
action_dim = env.action_space.shape[0] # 动作维度
else:
action_dim = env.action_space.n # 动作维度
agent = PPO(state_dim, action_dim, cfg) # 创建智能体
if cfg.seed !=0: # 设置随机种子
torch.manual_seed(cfg.seed)
env.seed(cfg.seed)
np.random.seed(cfg.seed)
return env, agent
def train(cfg,env,agent):
print('开始训练!')
print(f'环境:{cfg.env_name}, 算法:{cfg.algo_name}, 设备:{cfg.device}')
rewards = [] # 记录所有回合的奖励
ma_rewards = [] # 记录所有回合的滑动平均奖励
steps = 0
for i_ep in range(cfg.train_eps):
state = env.reset()
done = False
ep_reward = 0
while not done:
action, prob, val = agent.choose_action(state)
state_, reward, done, _ = env.step(action)
steps += 1
ep_reward += reward
agent.memory.push(state, action, prob, val, reward, done)
if steps % cfg.update_fre == 0:
agent.update()
state = state_
rewards.append(ep_reward)
if ma_rewards:
ma_rewards.append(0.9*ma_rewards[-1]+0.1*ep_reward)
else:
ma_rewards.append(ep_reward)
if (i_ep+1)%10 == 0:
print(f"回合:{i_ep+1}/{cfg.train_eps},奖励:{ep_reward:.2f}")
print('完成训练!')
return rewards,ma_rewards
def test(cfg,env,agent):
print('开始测试!')
print(f'环境:{cfg.env_name}, 算法:{cfg.algo_name}, 设备:{cfg.device}')
rewards = [] # 记录所有回合的奖励
ma_rewards = [] # 记录所有回合的滑动平均奖励
for i_ep in range(cfg.test_eps):
state = env.reset()
done = False
ep_reward = 0
while not done:
action, prob, val = agent.choose_action(state)
state_, reward, done, _ = env.step(action)
ep_reward += reward
state = state_
rewards.append(ep_reward)
if ma_rewards:
ma_rewards.append(
0.9*ma_rewards[-1]+0.1*ep_reward)
else:
ma_rewards.append(ep_reward)
print('回合:{}/{}, 奖励:{}'.format(i_ep+1, cfg.test_eps, ep_reward))
print('完成训练!')
return rewards,ma_rewards
if __name__ == "__main__":
cfg = Config()
# 训练
env,agent = env_agent_config(cfg)
rewards, ma_rewards = train(cfg, env, agent)
make_dir(cfg.result_path, cfg.model_path) # 创建保存结果和模型路径的文件夹
agent.save(path=cfg.model_path)
save_results(rewards, ma_rewards, tag='train', path=cfg.result_path)
plot_rewards(rewards, ma_rewards, cfg, tag="train")
# 测试
env,agent = env_agent_config(cfg)
agent.load(path=cfg.model_path)
rewards,ma_rewards = test(cfg,env,agent)
save_results(rewards,ma_rewards,tag='test',path=cfg.result_path)
plot_rewards(rewards,ma_rewards,cfg,tag="test")
```
#### File: rl-tutorials/Sarsa/agent.py
```python
import numpy as np
from collections import defaultdict
import torch
class Sarsa(object):
def __init__(self,
action_dim,sarsa_cfg,):
self.action_dim = action_dim # number of actions
self.lr = sarsa_cfg.lr # learning rate
self.gamma = sarsa_cfg.gamma
self.epsilon = sarsa_cfg.epsilon
self.Q = defaultdict(lambda: np.zeros(action_dim))
# self.Q = np.zeros((state_dim, action_dim)) # Q表
def choose_action(self, state):
best_action = np.argmax(self.Q[state])
# action = best_action
action_probs = np.ones(self.action_dim, dtype=float) * self.epsilon / self.action_dim
action_probs[best_action] += (1.0 - self.epsilon)
action = np.random.choice(np.arange(len(action_probs)), p=action_probs)
return action
def update(self, state, action, reward, next_state, next_action,done):
Q_predict = self.Q[state][action]
if done:
Q_target = reward # terminal state
else:
Q_target = reward + self.gamma * self.Q[next_state][next_action]
self.Q[state][action] += self.lr * (Q_target - Q_predict)
def save(self,path):
'''把 Q表格 的数据保存到文件中
'''
import dill
torch.save(
obj=self.Q,
f=path+"sarsa_model.pkl",
pickle_module=dill
)
def load(self, path):
'''从文件中读取数据到 Q表格
'''
import dill
self.Q =torch.load(f=path+'sarsa_model.pkl',pickle_module=dill)
``` |
{
"source": "johnjmolina/MLKyoto",
"score": 3
} |
#### File: utils/pdesolver/etdrk.py
```python
import numpy as np
from . import phi
class _etdrk:
def __init__(self, *, G, dt):
self.G = G
self.dt = dt
class etdrk1(_etdrk):
def __init__(self, *, L, G, dt, dps=100):
super().__init__(G=G, dt=dt)
hL = dt*L.reshape(-1)
self.phi = np.array([phi.phin(n, hL, dps=dps).reshape(L.shape) for n in range(2)])
def step(self, u):
u[...] = self.phi[0,...] * u[...] + self.dt * self.phi[1,...] * self.G(u)
return u
class etdrk2(_etdrk):
def __init__(self, *, L, G, dt, dtype, dps=100):
super().__init__(G=G, dt=dt)
hL = dt*L.reshape(-1)
hLh = hL / 2
phi1 = phi.phin(1, hL, dps=dps).reshape(L.shape)
phi2 = phi.phin(2, hL, dps=dps).reshape(L.shape)
self.phi = np.array([phi.phin(n, hL, dps=dps).reshape(L.shape) for n in range(1)])
self.phih = np.array([phi.phin(n, hLh, dps=dps).reshape(L.shape) for n in range(2)])
self.aux = np.zeros((3,) + L.shape, dtype=dtype)
self.hc1 = self.dt*(phi1[...]-2.0*phi2[...])
self.hc2 = 2.0*self.dt*phi2[...]
def step(self, u):
Ui, G1, G2 = self.aux[0,...], self.aux[1,...], self.aux[2,...]
# G1
G1[...] = self.G(u)
# U2, G2
Ui[...] = self.phih[0,...]*u[...] + self.dt*(0.5*self.phih[1,...]*G1[...])
G2[...] = self.G(Ui)
# u_{n+1}
u[...] = self.phi[0,...]*u[...] + self.hc1[...]*G1[...] + self.hc2[...]*G2[...]
return u
class etdrk3(_etdrk):
""" Heun's method : worst case order 2.75"""
def __init__(self, *, L, G, dt, dtype, dps=100):
super().__init__(G=G, dt=dt)
hL = dt*L.reshape(-1)
hL13 = hL/3.0
hL23 = 2.0*hL13
phi1 = phi.phin(1, hL, dps=dps).reshape(L.shape)
phi1_13 = phi.phin(1, hL13, dps=dps).reshape(L.shape)
phi1_23 = phi.phin(1, hL23, dps=dps).reshape(L.shape)
phi2 = phi.phin(2, hL, dps=dps).reshape(L.shape)
phi2_23 = phi.phin(2, hL23, dps=dps).reshape(L.shape)
self.phi = np.array([phi.phin(n, hL, dps=dps).reshape(L.shape) for n in range(1)])
self.phi13 = np.array([phi.phin(n, hL13,dps=dps).reshape(L.shape) for n in range(1)])
self.phi23 = np.array([phi.phin(n, hL23,dps=dps).reshape(L.shape) for n in range(1)])
self.hc1 = self.dt*(phi1 - 1.5*phi2)
self.hc3 = self.dt*1.5*phi2
self.hc1_2 = self.dt/3.0*phi1_13
self.hc1_3 = self.dt/3.0*(2.0*phi1_23 - 4.0*phi2_23)
self.hc2_3 = self.dt/3.0*(4.0*phi2_23)
self.aux = np.zeros((3,) + L.shape, dtype=dtype)
def step(self, u):
Ui = self.aux[0,...]
G1 = self.aux[1,...]
G2 = self.aux[2,...]
G3 = self.aux[2,...] # yes G3 and G2 are aliased
#G1
G1[...] = self.G(u)
#U2(G1), G2
Ui[...] = self.phi13[0,...]*u[...] + self.hc1_2[...]*G1[...]
G2[...] = self.G(Ui)
#U3(G1, G2), G3
Ui[...] = self.phi23[0,...]*u[...] + self.hc1_3[...]*G1[...] + self.hc2_3[...]*G2[...]
G3[...] = self.G(Ui)
#u_{n+1}(G1, G3)
u[...] = self.phi[0,...]*u[...] + self.hc1[...]*G1[...] + self.hc3[...]*G3[...]
return u
class etdrk45(_etdrk):
""" Hochbruck and Ostermann's fourth order ETDRK method"""
def __init__(self, *, L, G, dt, dtype, dps=100):
super().__init__(G=G, dt=dt)
# temporary data
hL = dt*L.reshape(-1)
hLh = hL/2
phi3 = phi.phin(3, hL, dps=dps).reshape(L.shape)
phih3 = phi.phin(3, hLh,dps=dps).reshape(L.shape)
# persistent data
self.phi = np.array([phi.phin(n, hL, dps=dps).reshape(L.shape) for n in range(3)])
self.phih = np.array([phi.phin(n, hLh, dps=dps).reshape(L.shape) for n in range(3)])
self.a52 = phi.phi_a52(phi2_dt = self.phi[2], phi3_dt = phi3, phi2_hdt = self.phih[2], phi3_hdt = phih3)
self.aux = np.zeros((6,)+L.shape, dtype=dtype)
self.hc1 = self.dt*(self.phi[1,:] - 3*self.phi[2,:] + 4*phi3[:])
self.hc4 = self.dt*(4*phi3[:] - self.phi[2,:])
self.hc5 = self.dt*(4.0*self.phi[2,:] - 8.0*phi3[:])
def step(self, u):
Ui = self.aux[0,...]
G1 = self.aux[1,...]
G2 = self.aux[2,...]
G3 = self.aux[3,...]
G4 = self.aux[4,...]
G5 = self.aux[5,...]
uh = self.aux[5,...] # yes, uh and G5 are aliased
uh[...] = self.phih[0,...]*u[...]
#G1
G1[...] = self.G(u)
#U2(G1),G2
Ui[...] = uh[...] + self.dt*(0.5*self.phih[1,...]*G1[...])
G2[...] = self.G(Ui)
#U3(G1, G2),G3
Ui[...] = uh[...] + self.dt*((0.5*self.phih[1,...]-self.phih[2,...])*G1[...] + self.phih[2,...]*G2[...])
G3[...] = self.G(Ui)
#U4(G1, G2, G3),G4
Ui[...] = self.phi[0,...]*u[...] + self.dt*((self.phi[1,...] - 2*self.phi[2,...])*G1[...] + self.phi[2,...]*(G2[...] + G3[...]))
G4[...] = self.G(Ui)
#U5(G1, G2, G3, G4),G5
Ui[...] = uh[...] + self.dt*((0.5*self.phih[1,...] - 0.25*self.phih[2,...] - self.a52[...])*G1[...] + self.a52[...]*(G2[...] + G3[...]) + (0.25*self.phih[2,...] - self.a52[...])*G4[...])
G5[...] = self.G(Ui)
# u_{n+1}(G1, G4, G5)
u[...] = self.phi[0,...]*u[...] + self.hc1[...]*G1[...] + self.hc4[...]*G4[...] + self.hc5[...]*G5[...]
return u
``` |
{
"source": "johnjohndoe/buildnotify",
"score": 2
} |
#### File: buildnotify/buildnotifylib/app_notification.py
```python
from typing import Optional
from PyQt5.QtWidgets import QWidget
from buildnotifylib.core.projects import OverallIntegrationStatus
from buildnotifylib.config import Config
from buildnotifylib.notifications import Notification
from buildnotifylib.project_status_notification import ProjectStatusNotification
class AppNotification(object):
def __init__(self, config: Config, widget: QWidget):
self.config = config
self.notification = Notification(widget)
self.integration_status: Optional[OverallIntegrationStatus] = None
def update_projects(self, new_integration_status: OverallIntegrationStatus):
if self.integration_status is not None:
ProjectStatusNotification(self.config, self.integration_status, new_integration_status,
self.notification).show_notifications()
self.integration_status = new_integration_status
```
#### File: buildnotifylib/core/background_event.py
```python
from typing import Callable, Any
from PyQt5.QtCore import QThread, pyqtSignal, QObject
class BackgroundEvent(QThread):
completed = pyqtSignal('PyQt_PyObject')
def __init__(self, task: Callable[[], Any], parent: QObject = None):
QThread.__init__(self, parent)
self.task = task
def run(self):
data = self.task()
self.completed.emit(data)
```
#### File: buildnotifylib/core/filtered_continuous_integration_server.py
```python
from typing import List
from buildnotifylib.core.project import Project
from buildnotifylib.core.continous_integration_server import ContinuousIntegrationServer
class FilteredContinuousIntegrationServer(object):
def __init__(self, server: ContinuousIntegrationServer, filter_projects: List[str]):
self.server = server
self.filter_projects = filter_projects
self.unavailable = server.unavailable
self.url = server.url
def get_projects(self) -> List[Project]:
return [project for project in self.server.get_projects() if project.name not in self.filter_projects]
```
#### File: buildnotify/buildnotifylib/preferences.py
```python
from typing import Optional, List, Tuple
from PyQt5.QtCore import QStringListModel
from PyQt5.QtWidgets import QDialog, QWidget
from buildnotifylib.config import Config, Preferences
from buildnotifylib.generated.preferences_ui import Ui_Preferences
from buildnotifylib.server_configuration_dialog import ServerConfigurationDialog
class PreferencesDialog(QDialog):
def __init__(self, conf: Config, parent: QWidget = None):
QDialog.__init__(self, parent)
self.conf = conf
self.ui = Ui_Preferences()
self.ui.setupUi(self)
self.checkboxes = dict(successfulBuild=self.ui.successfulBuildsCheckbox,
brokenBuild=self.ui.brokenBuildsCheckbox, fixedBuild=self.ui.fixedBuildsCheckbox,
stillFailingBuild=self.ui.stillFailingBuildsCheckbox,
connectivityIssues=self.ui.connectivityIssuesCheckbox,
lastBuildTimeForProject=self.ui.showLastBuildTimeCheckbox)
self.set_values_from_config()
# Connect up the buttons.
self.ui.addButton.clicked.connect(self.add_server)
self.ui.removeButton.clicked.connect(self.remove_element)
self.ui.buttonBox.accepted.connect(self.accept)
self.ui.configureProjectButton.clicked.connect(self.configure_projects)
def set_values_from_config(self):
self.ui.cctrayPathList.setModel(QStringListModel(self.conf.get_urls()))
self.ui.cctrayPathList.clicked.connect(lambda _: self.item_selection_changed(True))
self.ui.cctrayPathList.doubleClicked.connect(self.configure_projects)
self.ui.removeButton.clicked.connect(lambda _: self.item_selection_changed(False))
for key, checkbox in self.checkboxes.items():
checkbox.setChecked(self.conf.get_value(str(key)))
self.ui.pollingIntervalSpinBox.setValue(self.conf.get_interval_in_seconds())
self.ui.scriptCheckbox.setChecked(self.conf.get_custom_script_enabled())
self.ui.scriptLineEdit.setText(self.conf.get_custom_script())
self.ui.sortBuildByLastBuildTime.setChecked(self.conf.get_sort_by_last_build_time())
self.ui.sortBuildByName.setChecked(self.conf.get_sort_by_name())
self.ui.showLastBuildLabelCheckbox.setChecked(self.conf.get_show_last_build_label())
def item_selection_changed(self, status):
self.ui.configureProjectButton.setEnabled(status)
def add_server(self):
server_config = ServerConfigurationDialog(None, self.conf, self).open()
if server_config is not None:
self.conf.save_server_config(server_config)
urls = self.ui.cctrayPathList.model().stringList()
urls.append(server_config.url)
self.ui.cctrayPathList.setModel(QStringListModel(urls))
def remove_element(self):
index = self.ui.cctrayPathList.selectionModel().currentIndex()
urls = self.ui.cctrayPathList.model().stringList()
urls.pop(index.row())
self.ui.cctrayPathList.setModel(QStringListModel(urls))
def configure_projects(self):
url = self.ui.cctrayPathList.selectionModel().currentIndex().data()
if not url:
return
server_config = ServerConfigurationDialog(url, self.conf, self).open()
if server_config is not None:
self.conf.save_server_config(server_config)
def get_urls(self) -> List[str]:
return [str(url) for url in self.ui.cctrayPathList.model().stringList()]
def get_interval_in_seconds(self) -> int:
return self.ui.pollingIntervalSpinBox.value()
def get_selections(self) -> List[Tuple[str, bool]]:
return [(key, checkbox.isChecked()) for (key, checkbox) in list(self.checkboxes.items())]
def open(self) -> Optional[Preferences]: # type: ignore
if self.exec_() == QDialog.Accepted:
return Preferences(
urls=self.get_urls(),
interval=self.get_interval_in_seconds(),
custom_script_text=self.ui.scriptLineEdit.text(),
custom_script_checked=self.ui.scriptCheckbox.isChecked(),
sort_by_build_time=self.ui.sortBuildByLastBuildTime.isChecked(),
sort_by_name=self.ui.sortBuildByName.isChecked(),
selections=self.get_selections(),
show_last_build_label=self.ui.showLastBuildLabelCheckbox.isChecked()
)
return None
```
#### File: buildnotify/buildnotifylib/version.py
```python
from os import getenv
VERSION = "2.1.0"
def version(key='BUILD_LABEL') -> str:
build = getenv(key)
label = '' if build is None else '.dev' + build
return VERSION + label
if __name__ == '__main__':
print((version()))
``` |
{
"source": "johnjohndoe/c3nav",
"score": 2
} |
#### File: c3nav/api/models.py
```python
import string
from django.conf import settings
from django.db import models
from django.utils.crypto import constant_time_compare, get_random_string
from django.utils.translation import ugettext_lazy as _
class Token(models.Model):
"""
Token for log in via API
"""
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
secret = models.CharField(max_length=64, verbose_name=_('secret'))
session_auth_hash = models.CharField(_('session auth hash'), max_length=128)
class Meta:
verbose_name = _('login tokens')
verbose_name_plural = _('login tokens')
default_related_name = 'login_tokens'
def save(self, *args, **kwargs):
if not self.secret:
self.secret = get_random_string(64, string.ascii_letters + string.digits)
if not self.session_auth_hash:
# noinspection PyUnresolvedReferences
self.session_auth_hash = self.user.get_session_auth_hash()
super().save(*args, **kwargs)
def get_token(self):
return '%d:%s' % (self.pk, self.secret)
def verify(self):
# noinspection PyUnresolvedReferences
return constant_time_compare(
self.session_auth_hash,
self.user.get_session_auth_hash()
)
@classmethod
def get_by_token(cls, token: str):
try:
pk, secret = token.split(':', 1)
except ValueError:
raise cls.DoesNotExist
if not pk.isdigit() or not secret:
raise cls.DoesNotExist
obj = cls.objects.select_related('user').get(pk=pk, secret=secret)
if not obj.verify():
obj.delete()
raise cls.DoesNotExist
return obj
```
#### File: c3nav/control/admin.py
```python
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import User
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from c3nav.control.models import UserPermissions
class UserPermissionsInline(admin.StackedInline):
model = UserPermissions
can_delete = False
class UserAdmin(BaseUserAdmin):
fieldsets = (
(None, {'fields': ('username', 'password', 'email')}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
)
readonly_fields = ('last_login', 'date_joined')
inlines = (UserPermissionsInline, )
def get_view_on_site_url(self, obj=None):
return None if obj is None else reverse('control.users.detail', args=[obj.pk])
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
```
#### File: c3nav/editor/signals.py
```python
from c3nav.editor.models import ChangeSet
def set_changeset_author_on_login(sender, user, request, **kwargs):
try:
changeset = request.changeset
except AttributeError:
changeset = ChangeSet.get_for_request(request, as_logged_out=True)
if changeset.pk is not None:
changeset.author = user
changeset.save()
```
#### File: c3nav/editor/utils.py
```python
class DefaultEditUtils:
def __init__(self, request):
self.request = request
@classmethod
def from_obj(cls, obj, request):
return cls(request)
@property
def can_access_child_base_mapdata(self):
return self.request.user_permissions.can_access_base_mapdata
@property
def can_create(self):
return self.can_access_child_base_mapdata
@property
def _geometry_url(self):
return None
@property
def geometry_url(self):
return self._geometry_url if self.can_access_child_base_mapdata else None
class LevelChildEditUtils(DefaultEditUtils):
def __init__(self, level, request):
super().__init__(request)
self.level = level
@classmethod
def from_obj(cls, obj, request):
return cls(obj.level, request)
@property
def _geometry_url(self):
return '/api/editor/geometries/?level=' + str(self.level.primary_level_pk)
class SpaceChildEditUtils(DefaultEditUtils):
def __init__(self, space, request):
super().__init__(request)
self.space = space
@classmethod
def from_obj(cls, obj, request):
return cls(obj.space, request)
@property
def can_access_child_base_mapdata(self):
return (self.request.user_permissions.can_access_base_mapdata or
self.space.base_mapdata_accessible or
self.space.pk in self.request.user_space_accesses)
@property
def _geometry_url(self):
return '/api/editor/geometries/?space='+str(self.space.pk)
```
#### File: editor/views/edit.py
```python
import mimetypes
import os
import typing
from contextlib import suppress
from django.conf import settings
from django.contrib import messages
from django.core.cache import cache
from django.core.exceptions import FieldDoesNotExist, ObjectDoesNotExist, PermissionDenied
from django.db import IntegrityError, models
from django.db.models import Q
from django.http import Http404, HttpResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.http import etag
from c3nav.editor.forms import GraphEdgeSettingsForm, GraphEditorActionForm
from c3nav.editor.utils import DefaultEditUtils, LevelChildEditUtils, SpaceChildEditUtils
from c3nav.editor.views.base import (APIHybridError, APIHybridFormTemplateResponse, APIHybridLoginRequiredResponse,
APIHybridMessageRedirectResponse, APIHybridTemplateContextResponse, etag_func,
sidebar_view)
from c3nav.mapdata.models.access import AccessPermission
from c3nav.mapdata.utils.user import can_access_editor
def child_model(request, model: typing.Union[str, models.Model], kwargs=None, parent=None):
model = request.changeset.wrap_model(model)
related_name = model._meta.default_related_name
if parent is not None:
qs = getattr(parent, related_name)
if hasattr(model, 'q_for_request'):
qs = qs.filter(model.q_for_request(request))
count = qs.count()
else:
count = None
return {
'title': model._meta.verbose_name_plural,
'url': reverse('editor.'+related_name+'.list', kwargs=kwargs),
'count': count,
}
@sidebar_view(api_hybrid=True)
@etag(etag_func)
def main_index(request):
Level = request.changeset.wrap_model('Level')
return APIHybridTemplateContextResponse('editor/index.html', {
'levels': Level.objects.filter(Level.q_for_request(request), on_top_of__isnull=True),
'can_create_level': (request.user_permissions.can_access_base_mapdata and
request.changeset.can_edit(request)),
'child_models': [
child_model(request, 'LocationGroupCategory'),
child_model(request, 'LocationGroup'),
child_model(request, 'DynamicLocation'),
child_model(request, 'WayType'),
child_model(request, 'AccessRestriction'),
child_model(request, 'AccessRestrictionGroup'),
child_model(request, 'LabelSettings'),
child_model(request, 'Source'),
],
}, fields=('can_create_level', 'child_models'))
@sidebar_view(api_hybrid=True)
@etag(etag_func)
def level_detail(request, pk):
Level = request.changeset.wrap_model('Level')
qs = Level.objects.filter(Level.q_for_request(request))
level = get_object_or_404(qs.select_related('on_top_of').prefetch_related('levels_on_top'), pk=pk)
if request.user_permissions.can_access_base_mapdata:
submodels = ('Building', 'Space', 'Door')
else:
submodels = ('Space', )
return APIHybridTemplateContextResponse('editor/level.html', {
'levels': Level.objects.filter(Level.q_for_request(request), on_top_of__isnull=True),
'level': level,
'level_url': 'editor.levels.detail',
'level_as_pk': True,
'can_edit_graph': request.user_permissions.can_access_base_mapdata,
'can_create_level': (request.user_permissions.can_access_base_mapdata and
request.changeset.can_edit(request)),
'child_models': [child_model(request, model_name, kwargs={'level': pk}, parent=level)
for model_name in submodels],
'levels_on_top': level.levels_on_top.filter(Level.q_for_request(request)).all(),
'geometry_url': ('/api/editor/geometries/?level='+str(level.primary_level_pk)
if request.user_permissions.can_access_base_mapdata else None),
}, fields=('level', 'can_edit_graph', 'can_create_level', 'child_models', 'levels_on_top'))
@sidebar_view(api_hybrid=True)
@etag(etag_func)
def space_detail(request, level, pk):
Level = request.changeset.wrap_model('Level')
Space = request.changeset.wrap_model('Space')
qs = Space.objects.filter(Space.q_for_request(request))
space = get_object_or_404(qs.select_related('level'), level__pk=level, pk=pk)
edit_utils = SpaceChildEditUtils(space, request)
if edit_utils.can_access_child_base_mapdata:
submodels = ('POI', 'Area', 'Obstacle', 'LineObstacle', 'Stair', 'Ramp', 'Column',
'Hole', 'AltitudeMarker', 'LeaveDescription', 'CrossDescription',
'WifiMeasurement')
else:
submodels = ('POI', 'Area', 'AltitudeMarker', 'LeaveDescription', 'CrossDescription')
return APIHybridTemplateContextResponse('editor/space.html', {
'levels': Level.objects.filter(Level.q_for_request(request), on_top_of__isnull=True),
'level': space.level,
'level_url': 'editor.spaces.list',
'space': space,
'can_edit_graph': request.user_permissions.can_access_base_mapdata,
'child_models': [child_model(request, model_name, kwargs={'space': pk}, parent=space)
for model_name in submodels],
'geometry_url': edit_utils.geometry_url,
}, fields=('level', 'space', 'can_edit_graph', 'child_models'))
def get_changeset_exceeded(request):
return request.user_permissions.max_changeset_changes <= request.changeset.changed_objects_count
@sidebar_view(api_hybrid=True)
@etag(etag_func)
def edit(request, pk=None, model=None, level=None, space=None, on_top_of=None, explicit_edit=False):
changeset_exceeded = get_changeset_exceeded(request)
model_changes = {}
if changeset_exceeded:
model_changes = request.changeset.get_changed_objects_by_model(model)
model = request.changeset.wrap_model(model)
related_name = model._meta.default_related_name
Level = request.changeset.wrap_model('Level')
Space = request.changeset.wrap_model('Space')
can_edit_changeset = request.changeset.can_edit(request)
obj = None
edit_utils = DefaultEditUtils(request)
if pk is not None:
# Edit existing map item
kwargs = {'pk': pk}
qs = model.objects.all()
if hasattr(model, 'q_for_request'):
qs = qs.filter(model.q_for_request(request))
utils_cls = DefaultEditUtils
if level is not None:
# parent object is a level
kwargs.update({'level__pk': level})
qs = qs.select_related('level')
utils_cls = LevelChildEditUtils
elif space is not None:
# parent object is a space
kwargs.update({'space__pk': space})
qs = qs.select_related('space')
utils_cls = SpaceChildEditUtils
obj = get_object_or_404(qs, **kwargs)
edit_utils = utils_cls.from_obj(obj, request)
elif level is not None:
level = get_object_or_404(Level.objects.filter(Level.q_for_request(request)), pk=level)
edit_utils = LevelChildEditUtils(level, request)
elif space is not None:
space = get_object_or_404(Space.objects.filter(Space.q_for_request(request)), pk=space)
edit_utils = SpaceChildEditUtils(space, request)
elif on_top_of is not None:
on_top_of = get_object_or_404(Level.objects.filter(Level.q_for_request(request), on_top_of__isnull=True),
pk=on_top_of)
new = obj is None
if new and not edit_utils.can_create:
raise PermissionDenied
geometry_url = edit_utils.geometry_url
if model.__name__ == 'Space' and not new:
geometry_url = SpaceChildEditUtils(obj, request).geometry_url
# noinspection PyProtectedMember
ctx = {
'path': request.path,
'pk': pk,
'model_name': model.__name__.lower(),
'model_title': model._meta.verbose_name,
'can_edit': can_edit_changeset,
'new': new,
'title': obj.title if obj else None,
'geometry_url': geometry_url,
}
with suppress(FieldDoesNotExist):
ctx.update({
'geomtype': model._meta.get_field('geometry').geomtype,
})
space_id = None
if model == Level:
ctx.update({
'level': obj,
'back_url': reverse('editor.index') if new else reverse('editor.levels.detail', kwargs={'pk': pk}),
'nozoom': True,
})
if not new:
ctx.update({
'on_top_of': obj.on_top_of,
})
elif on_top_of:
ctx.update({
'on_top_of': on_top_of,
'back_url': reverse('editor.levels.detail', kwargs={'pk': on_top_of.pk}),
})
elif model == Space and not new:
level = obj.level
ctx.update({
'level': obj.level,
'back_url': reverse('editor.spaces.detail', kwargs={'level': obj.level.pk, 'pk': pk}),
'nozoom': True,
})
elif model == Space and new:
ctx.update({
'level': level,
'back_url': reverse('editor.spaces.list', kwargs={'level': level.pk}),
'nozoom': True,
})
elif hasattr(model, 'level') and 'Dynamic' not in model.__name__:
if not new:
level = obj.level
ctx.update({
'level': level,
'back_url': reverse('editor.'+related_name+'.list', kwargs={'level': level.pk}),
})
elif hasattr(model, 'space'):
if not new:
space = obj.space
space_id = space.pk
ctx.update({
'level': space.level,
'back_url': reverse('editor.'+related_name+'.list', kwargs={'space': space.pk}),
})
else:
kwargs = {}
if level is not None:
kwargs.update({'level': level})
elif space is not None:
kwargs.update({'space': space})
kwargs.update(get_visible_spaces_kwargs(model, request))
ctx.update({
'back_url': reverse('.'.join(request.resolver_match.url_name.split('.')[:-1]+['list']), kwargs=kwargs),
})
nosave = False
if changeset_exceeded:
if new:
return APIHybridMessageRedirectResponse(
level='error', message=_('You can not create new objects because your changeset is full.'),
redirect_to=ctx['back_url'], status_code=409,
)
elif obj.pk not in model_changes:
messages.warning(request, _('You can not edit this object because your changeset is full.'))
nosave = True
ctx.update({
'nosave': nosave
})
if new:
ctx.update({
'nozoom': True
})
if new and model.__name__ == 'WifiMeasurement' and not request.user.is_authenticated:
return APIHybridLoginRequiredResponse(next=request.path_info, login_url='editor.login', level='info',
message=_('You need to log in to create Wifi Measurements.'))
error = None
delete = getattr(request, 'is_delete', None)
if request.method == 'POST' or (not new and delete):
if nosave:
return APIHybridMessageRedirectResponse(
level='error', message=_('You can not edit this object because your changeset is full.'),
redirect_to=request.path, status_code=409,
)
if not can_edit_changeset:
return APIHybridMessageRedirectResponse(
level='error', message=_('You can not edit changes on this changeset.'),
redirect_to=request.path, status_code=403,
)
if not new and ((request.POST.get('delete') == '1' and delete is not False) or delete):
# Delete this mapitem!
try:
if not request.changeset.get_changed_object(obj).can_delete():
raise PermissionError
except (ObjectDoesNotExist, PermissionError):
return APIHybridMessageRedirectResponse(
level='error',
message=_('You can not delete this object because other objects still depend on it.'),
redirect_to=request.path, status_code=409,
)
if request.POST.get('delete_confirm') == '1' or delete:
with request.changeset.lock_to_edit(request) as changeset:
if changeset.can_edit(request):
obj.delete()
else:
return APIHybridMessageRedirectResponse(
level='error',
message=_('You can not edit changes on this changeset.'),
redirect_to=request.path, status_code=403,
)
if model == Level:
if obj.on_top_of_id is not None:
redirect_to = reverse('editor.levels.detail', kwargs={'pk': obj.on_top_of_id})
else:
redirect_to = reverse('editor.index')
elif model == Space:
redirect_to = reverse('editor.spaces.list', kwargs={'level': obj.level.pk})
else:
redirect_to = ctx['back_url']
return APIHybridMessageRedirectResponse(
level='success',
message=_('Object was successfully deleted.'),
redirect_to=redirect_to
)
ctx['obj_title'] = obj.title
return APIHybridTemplateContextResponse('editor/delete.html', ctx, fields=())
json_body = getattr(request, 'json_body', None)
data = json_body if json_body is not None else request.POST
form = model.EditorForm(instance=model() if new else obj, data=data, is_json=json_body is not None,
request=request, space_id=space_id,
geometry_editable=edit_utils.can_access_child_base_mapdata)
if form.is_valid():
# Update/create objects
obj = form.save(commit=False)
if level is not None:
obj.level = level
if space is not None:
obj.space = space
if on_top_of is not None:
obj.on_top_of = on_top_of
with request.changeset.lock_to_edit(request) as changeset:
if changeset.can_edit(request):
try:
obj.save()
except IntegrityError:
error = APIHybridError(status_code=400, message=_('Duplicate entry.'))
else:
if form.redirect_slugs is not None:
for slug in form.add_redirect_slugs:
obj.redirects.create(slug=slug)
for slug in form.remove_redirect_slugs:
obj.redirects.filter(slug=slug).delete()
form.save_m2m()
return APIHybridMessageRedirectResponse(
level='success',
message=_('Object was successfully saved.'),
redirect_to=ctx['back_url']
)
else:
error = APIHybridError(status_code=403, message=_('You can not edit changes on this changeset.'))
else:
form = model.EditorForm(instance=obj, request=request, space_id=space_id,
geometry_editable=edit_utils.can_access_child_base_mapdata)
ctx.update({
'form': form,
})
return APIHybridFormTemplateResponse('editor/edit.html', ctx, form=form, error=error)
def get_visible_spaces(request):
cache_key = 'editor:visible_spaces:%s:%s' % (
request.changeset.raw_cache_key_by_changes,
AccessPermission.cache_key_for_request(request, with_update=False)
)
visible_spaces = cache.get(cache_key, None)
if visible_spaces is None:
Space = request.changeset.wrap_model('Space')
visible_spaces = tuple(Space.qs_for_request(request).values_list('pk', flat=True))
cache.set(cache_key, visible_spaces, 900)
return visible_spaces
def get_visible_spaces_kwargs(model, request):
kwargs = {}
if hasattr(model, 'target_space'):
visible_spaces = get_visible_spaces(request)
kwargs['target_space_id__in'] = visible_spaces
if hasattr(model, 'origin_space'):
kwargs['origin_space_id__in'] = visible_spaces
return kwargs
@sidebar_view(api_hybrid=True)
@etag(etag_func)
def list_objects(request, model=None, level=None, space=None, explicit_edit=False):
resolver_match = getattr(request, 'sub_resolver_match', request.resolver_match)
if not resolver_match.url_name.endswith('.list'):
raise ValueError('url_name does not end with .list')
model = request.changeset.wrap_model(model)
Level = request.changeset.wrap_model('Level')
Space = request.changeset.wrap_model('Space')
can_edit = request.changeset.can_edit(request)
ctx = {
'path': request.path,
'model_name': model.__name__.lower(),
'model_title': model._meta.verbose_name,
'model_title_plural': model._meta.verbose_name_plural,
'explicit_edit': explicit_edit,
}
queryset = model.objects.all().order_by('id')
if hasattr(model, 'q_for_request'):
queryset = queryset.filter(model.q_for_request(request))
reverse_kwargs = {}
add_cols = []
if level is not None:
reverse_kwargs['level'] = level
level = get_object_or_404(Level.objects.filter(Level.q_for_request(request)), pk=level)
queryset = queryset.filter(level=level).defer('geometry')
edit_utils = LevelChildEditUtils(level, request)
ctx.update({
'back_url': reverse('editor.levels.detail', kwargs={'pk': level.pk}),
'back_title': _('back to level'),
'levels': Level.objects.filter(Level.q_for_request(request), on_top_of__isnull=True),
'level': level,
'level_url': resolver_match.url_name,
})
elif space is not None:
reverse_kwargs['space'] = space
sub_qs = Space.objects.filter(Space.q_for_request(request)).select_related('level').defer('geometry')
space = get_object_or_404(sub_qs, pk=space)
queryset = queryset.filter(space=space).filter(**get_visible_spaces_kwargs(model, request))
edit_utils = SpaceChildEditUtils(space, request)
with suppress(FieldDoesNotExist):
model._meta.get_field('geometry')
queryset = queryset.defer('geometry')
with suppress(FieldDoesNotExist):
model._meta.get_field('origin_space')
queryset = queryset.select_related('origin_space')
with suppress(FieldDoesNotExist):
model._meta.get_field('target_space')
queryset = queryset.select_related('target_space')
ctx.update({
'levels': Level.objects.filter(Level.q_for_request(request), on_top_of__isnull=True),
'level': space.level,
'level_url': 'editor.spaces.list',
'space': space,
'back_url': reverse('editor.spaces.detail', kwargs={'level': space.level.pk, 'pk': space.pk}),
'back_title': _('back to space'),
})
else:
edit_utils = DefaultEditUtils(request)
with suppress(FieldDoesNotExist):
model._meta.get_field('category')
queryset = queryset.select_related('category')
with suppress(FieldDoesNotExist):
model._meta.get_field('priority')
add_cols.append('priority')
queryset = queryset.order_by('-priority')
ctx.update({
'back_url': reverse('editor.index'),
'back_title': _('back to overview'),
})
edit_url_name = resolver_match.url_name[:-4]+('detail' if explicit_edit else 'edit')
for obj in queryset:
reverse_kwargs['pk'] = obj.pk
obj.edit_url = reverse(edit_url_name, kwargs=reverse_kwargs)
obj.add_cols = tuple(getattr(obj, col) for col in add_cols)
reverse_kwargs.pop('pk', None)
if model.__name__ == 'LocationGroup':
LocationGroupCategory = request.changeset.wrap_model('LocationGroupCategory')
grouped_objects = tuple(
{
'title': category.title_plural,
'objects': tuple(obj for obj in queryset if obj.category_id == category.pk)
}
for category in LocationGroupCategory.objects.order_by('-priority')
)
else:
grouped_objects = (
{
'objects': queryset,
},
)
ctx.update({
'can_create': edit_utils.can_create and can_edit,
'geometry_url': edit_utils.geometry_url,
'add_cols': add_cols,
'create_url': reverse(resolver_match.url_name[:-4] + 'create', kwargs=reverse_kwargs),
'grouped_objects': grouped_objects,
})
return APIHybridTemplateContextResponse('editor/list.html', ctx,
fields=('can_create', 'create_url', 'objects'))
def connect_nodes(request, active_node, clicked_node, edge_settings_form):
if not request.user_permissions.can_access_base_mapdata:
raise PermissionDenied
changeset_exceeded = get_changeset_exceeded(request)
graphedge_changes = {}
if changeset_exceeded:
graphedge_changes = request.changeset.get_changed_objects_by_model('GraphEdge')
new_connections = []
new_connections.append((active_node, clicked_node, False))
if not edge_settings_form.cleaned_data['oneway']:
new_connections.append((clicked_node, active_node, True))
instance = edge_settings_form.instance
for from_node, to_node, is_reverse in new_connections:
existing = from_node.edges_from_here.filter(to_node=to_node).first()
if changeset_exceeded and (not existing or existing.pk not in graphedge_changes):
messages.error(request, _('Could not edit edge because your changeset is full.'))
return
if existing is None:
instance.pk = None
instance.from_node = from_node
instance.to_node = to_node
instance.save()
messages.success(request, _('Reverse edge created.') if is_reverse else _('Edge created.'))
elif existing.waytype == instance.waytype and existing.access_restriction == instance.access_restriction:
existing.delete()
messages.success(request, _('Reverse edge deleted.') if is_reverse else _('Edge deleted.'))
else:
existing.waytype = instance.waytype
existing.access_restriction = instance.access_restriction
existing.save()
messages.success(request, _('Reverse edge overwritten.') if is_reverse else _('Edge overwritten.'))
@sidebar_view
@etag(etag_func)
def graph_edit(request, level=None, space=None):
if not request.user_permissions.can_access_base_mapdata:
raise PermissionDenied
Level = request.changeset.wrap_model('Level')
Space = request.changeset.wrap_model('Space')
GraphNode = request.changeset.wrap_model('GraphNode')
GraphEdge = request.changeset.wrap_model('GraphEdge')
can_edit = request.changeset.can_edit(request)
ctx = {
'path': request.path,
'can_edit': can_edit,
'levels': Level.objects.filter(Level.q_for_request(request), on_top_of__isnull=True),
'level_url': 'editor.levels.graph',
}
create_nodes = False
if level is not None:
level = get_object_or_404(Level.objects.filter(Level.q_for_request(request)), pk=level)
ctx.update({
'back_url': reverse('editor.levels.detail', kwargs={'pk': level.pk}),
'back_title': _('back to level'),
'level': level,
'geometry_url': '/api/editor/geometries/?level='+str(level.primary_level_pk),
})
elif space is not None:
queryset = Space.objects.filter(Space.q_for_request(request)).select_related('level').defer('geometry')
space = get_object_or_404(queryset, pk=space)
level = space.level
ctx.update({
'space': space,
'level': space.level,
'back_url': reverse('editor.spaces.detail', kwargs={'level': level.pk, 'pk': space.pk}),
'back_title': _('back to space'),
'parent_url': reverse('editor.levels.graph', kwargs={'level': level.pk}),
'parent_title': _('to level graph'),
'geometry_url': '/api/editor/geometries/?space='+str(space.pk),
})
create_nodes = True
if request.method == 'POST':
changeset_exceeded = get_changeset_exceeded(request)
graphnode_changes = {}
if changeset_exceeded:
graphnode_changes = request.changeset.get_changed_objects_by_model('GraphNode')
if request.POST.get('delete') == '1':
# Delete this graphnode!
node = get_object_or_404(GraphNode, pk=request.POST.get('pk'))
if changeset_exceeded and node.pk not in graphnode_changes:
messages.error(request, _('You can not delete this graph node because your changeset is full.'))
return redirect(request.path)
if request.POST.get('delete_confirm') == '1':
with request.changeset.lock_to_edit(request) as changeset:
if changeset.can_edit(request):
node.edges_from_here.all().delete()
node.edges_to_here.all().delete()
node.delete()
else:
messages.error(request, _('You can not edit changes on this changeset.'))
return redirect(request.path)
messages.success(request, _('Graph Node was successfully deleted.'))
return redirect(request.path)
return render(request, 'editor/delete.html', {
'model_title': GraphNode._meta.verbose_name,
'pk': node.pk,
'obj_title': node.title
})
permissions = AccessPermission.get_for_request(request) | set([None])
edge_settings_form = GraphEdgeSettingsForm(instance=GraphEdge(), request=request, data=request.POST)
graph_action_form = GraphEditorActionForm(request=request, allow_clicked_position=create_nodes,
data=request.POST)
if edge_settings_form.is_valid() and graph_action_form.is_valid():
goto_space = graph_action_form.cleaned_data['goto_space']
if goto_space is not None:
return redirect(reverse('editor.spaces.graph', kwargs={'space': goto_space.pk}))
set_active_node = False
active_node = graph_action_form.cleaned_data['active_node']
clicked_node = graph_action_form.cleaned_data['clicked_node']
clicked_position = graph_action_form.cleaned_data.get('clicked_position')
if clicked_node is not None and clicked_position is None:
if active_node is None:
active_node = clicked_node
set_active_node = True
elif active_node == clicked_node:
active_node = None
set_active_node = True
else:
with request.changeset.lock_to_edit(request) as changeset:
if changeset.can_edit(request):
connect_nodes(request, active_node, clicked_node, edge_settings_form)
active_node = clicked_node if edge_settings_form.cleaned_data['activate_next'] else None
set_active_node = True
else:
messages.error(request, _('You can not edit changes on this changeset.'))
elif (clicked_node is None and clicked_position is not None and
active_node is None and space.geometry.contains(clicked_position)):
if changeset_exceeded:
messages.error(request, _('You can not add graph nodes because your changeset is full.'))
return redirect(request.path)
with request.changeset.lock_to_edit(request) as changeset:
if changeset.can_edit(request):
node = GraphNode(space=space, geometry=clicked_position)
node.save()
messages.success(request, _('New graph node created.'))
active_node = None
set_active_node = True
else:
messages.error(request, _('You can not edit changes on this changeset.'))
if set_active_node:
connections = {}
if active_node:
for self_node, other_node in (('from_node', 'to_node'), ('to_node', 'from_node')):
conn_qs = GraphEdge.objects.filter(Q(**{self_node+'__pk': active_node.pk}))
conn_qs = conn_qs.select_related(other_node+'__space', other_node+'__space__level',
'waytype', 'access_restriction')
for edge in conn_qs:
edge.other_node = getattr(edge, other_node)
if (edge.other_node.space.access_restriction_id not in permissions
or edge.other_node.space.level.access_restriction_id not in permissions):
continue
connections.setdefault(edge.other_node.space_id, []).append(edge)
connections = sorted(
connections.values(),
key=lambda c: (c[0].other_node.space.level == level,
c[0].other_node.space == space,
c[0].other_node.space.level.base_altitude)
)
ctx.update({
'set_active_node': set_active_node,
'active_node': active_node,
'active_node_connections': connections,
})
else:
edge_settings_form = GraphEdgeSettingsForm(request=request)
graph_action_form = GraphEditorActionForm(request=request, allow_clicked_position=create_nodes)
ctx.update({
'edge_settings_form': edge_settings_form,
'graph_action_form': graph_action_form,
'create_nodes': create_nodes,
})
return render(request, 'editor/graph.html', ctx)
def sourceimage(request, filename):
if not request.user.is_superuser:
raise PermissionDenied
if not can_access_editor(request):
return PermissionDenied
try:
return HttpResponse(open(os.path.join(settings.SOURCES_ROOT, filename), 'rb'),
content_type=mimetypes.guess_type(filename)[0])
except FileNotFoundError:
raise Http404
```
#### File: management/commands/cleangeometries.py
```python
from django.core.management.base import BaseCommand
from django.db import transaction
from c3nav.mapdata.models.geometry.base import GeometryMixin
from c3nav.mapdata.utils.models import get_submodels
class Command(BaseCommand):
help = 'clean-up/fix all geometries in the database'
def handle(self, *args, **options):
with transaction.atomic():
for model in get_submodels(GeometryMixin):
for instance in model.objects.all():
old_geom = instance.geometry.wrapped_geojson
if instance.geometry.is_empty:
print('Deleted %s' % instance)
instance.delete()
continue
instance.save()
instance.refresh_from_db()
if instance.geometry.wrapped_geojson != old_geom:
print('Fixed %s' % instance)
```
#### File: management/commands/importsvg.py
```python
import argparse
import logging
import re
from xml.etree import ElementTree
from django.core.management.base import BaseCommand, CommandError
from django.utils.translation import ugettext_lazy as _
from shapely.affinity import scale, translate
from shapely.geometry import Polygon
from c3nav.mapdata.models import Area, MapUpdate, Obstacle, Space
from c3nav.mapdata.utils.cache.changes import changed_geometries
class Command(BaseCommand):
help = 'render the map'
@staticmethod
def space_value(value):
try:
space = Space.objects.get(pk=value)
except Space.DoesNotExist:
raise argparse.ArgumentTypeError(
_('unknown space')
)
return space
def add_arguments(self, parser):
parser.add_argument('svgfile', type=argparse.FileType('r'), help=_('svg file to import'))
parser.add_argument('name', type=str, help=_('name of the import'))
parser.add_argument('--type', type=str, required=True, choices=('areas', 'obstacles'),
help=_('type of objects to create'))
parser.add_argument('--space', type=self.space_value, required=True,
help=_('space to add the objects to'))
parser.add_argument('--minx', type=float, required=True,
help=_('minimum x coordinate, everthing left of it will be cropped'))
parser.add_argument('--miny', type=float, required=True,
help=_('minimum y coordinate, everthing below it will be cropped'))
parser.add_argument('--maxx', type=float, required=True,
help=_('maximum x coordinate, everthing right of it will be cropped'))
parser.add_argument('--maxy', type=float, required=True,
help=_('maximum y coordinate, everthing above it will be cropped'))
@staticmethod
def parse_svg_data(data):
first = False
last_point = (0, 0)
last_end_point = None
done_subpaths = []
current_subpath = []
while data:
data = data.lstrip().replace(',', ' ')
command = data[0]
if first and command not in 'Mm':
raise ValueError('path data has to start with moveto command.')
data = data[1:].lstrip()
first = False
numbers = []
while True:
match = re.match(r'^-?[0-9]+(\.[0-9]+)?(e-?[0-9]+)?', data)
if match is None:
break
numbers.append(float(match.group(0)))
data = data[len(match.group(0)):].lstrip()
relative = command.islower()
if command in 'Mm':
if not len(numbers) or len(numbers) % 2:
raise ValueError('Invalid number of arguments for moveto command!')
numbers = iter(numbers)
first = True
for x, y in zip(numbers, numbers):
if relative:
x, y = last_point[0] + x, last_point[1] + y
if first:
first = False
if current_subpath:
done_subpaths.append(current_subpath)
last_end_point = current_subpath[-1]
current_subpath = []
current_subpath.append((x, y))
last_point = (x, y)
elif command in 'Ll':
if not len(numbers) or len(numbers) % 2:
raise ValueError('Invalid number of arguments for lineto command!')
numbers = iter(numbers)
for x, y in zip(numbers, numbers):
if relative:
x, y = last_point[0] + x, last_point[1] + y
if not current_subpath:
current_subpath.append(last_end_point)
current_subpath.append((x, y))
last_point = (x, y)
elif command in 'Hh':
if not len(numbers):
raise ValueError('Invalid number of arguments for horizontal lineto command!')
y = last_point[1]
for x in numbers:
if relative:
x = last_point[0] + x
if not current_subpath:
current_subpath.append(last_end_point)
current_subpath.append((x, y))
last_point = (x, y)
elif command in 'Vv':
if not len(numbers):
raise ValueError('Invalid number of arguments for vertical lineto command!')
x = last_point[0]
for y in numbers:
if relative:
y = last_point[1] + y
if not current_subpath:
current_subpath.append(last_end_point)
current_subpath.append((x, y))
last_point = (x, y)
elif command in 'Zz':
if numbers:
raise ValueError('Invalid number of arguments for closepath command!')
current_subpath.append(current_subpath[0])
done_subpaths.append(current_subpath)
last_end_point = current_subpath[-1]
current_subpath = []
else:
raise ValueError('unknown svg command: ' + command)
if current_subpath:
done_subpaths.append(current_subpath)
return done_subpaths
def handle(self, *args, **options):
minx = options['minx']
miny = options['miny']
maxx = options['maxx']
maxy = options['maxy']
if minx >= maxx:
raise CommandError(_('minx has to be lower than maxx'))
if miny >= maxy:
raise CommandError(_('miny has to be lower than maxy'))
width = maxx-minx
height = maxy-miny
model = {'areas': Area, 'obstacles': Obstacle}[options['type']]
namespaces = {'svg': 'http://www.w3.org/2000/svg'}
svg = ElementTree.fromstring(options['svgfile'].read())
svg_width = float(svg.attrib['width'])
svg_height = float(svg.attrib['height'])
svg_viewbox = svg.attrib.get('viewBox')
if svg_viewbox:
offset_x, offset_y, svg_width, svg_height = [float(i) for i in svg_viewbox.split(' ')]
else:
offset_x, offset_y = 0, 0
for element in svg.findall('.//svg:clipPath/..', namespaces):
for clippath in element.findall('./svg:clipPath', namespaces):
element.remove(clippath)
for element in svg.findall('.//svg:symbol/..', namespaces):
for clippath in element.findall('./svg:symbol', namespaces):
element.remove(clippath)
if svg.findall('.//*[@transform]'):
raise CommandError(_('svg contains transform attributes. Use inkscape apply transforms.'))
if model.objects.filter(space=options['space'], import_tag=options['name']).exists():
raise CommandError(_('objects with this import tag already exist in this space.'))
with MapUpdate.lock():
changed_geometries.reset()
for path in svg.findall('.//svg:path', namespaces):
for polygon in self.parse_svg_data(path.attrib['d']):
if len(polygon) < 3:
continue
polygon = Polygon(polygon)
polygon = translate(polygon, xoff=-offset_x, yoff=-offset_y)
polygon = scale(polygon, xfact=1, yfact=-1, origin=(0, svg_height/2))
polygon = scale(polygon, xfact=width / svg_width, yfact=height / svg_height, origin=(0, 0))
polygon = translate(polygon, xoff=minx, yoff=miny)
obj = model(geometry=polygon, space=options['space'], import_tag=options['name'])
obj.save()
MapUpdate.objects.create(type='importsvg')
logger = logging.getLogger('c3nav')
logger.info('Imported, map update created.')
logger.info('Next step: go into the shell and edit them using '
'%s.objects.filter(space_id=%r, import_tag=%r)' %
(model.__name__, options['space'].pk, options['name']))
```
#### File: management/commands/processupdates.py
```python
import logging
from django.conf import settings
from django.core.management.base import BaseCommand
from django.db import DatabaseError
from django.utils.translation import ugettext_lazy as _
from c3nav.mapdata.tasks import process_map_updates
class Command(BaseCommand):
help = 'process unprocessed map updates'
def handle(self, *args, **options):
logger = logging.getLogger('c3nav')
try:
process_map_updates()
except DatabaseError:
logger.error(_('There is already map update processing in progress.'))
if not settings.HAS_REAL_CACHE:
print(_('You have no external cache configured, so don\'t forget to restart your c3nav instance!'))
```
#### File: c3nav/mapdata/middleware.py
```python
import re
from functools import wraps
from c3nav.mapdata.utils.user import get_user_data_lazy
class NoLanguageMiddleware:
"""
Middleware that allows unsetting the Language HTTP header usind the @no_language decorator.
"""
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
response = self.get_response(request)
if not getattr(response, 'no_language', False):
return response
if not getattr(response, 'keep_content_language', False):
del response['Content-Language']
if not response.has_header('Vary'):
return response
vary = tuple(s for s in re.split(r'\s*,\s*', response['Vary']) if s.lower() != 'accept-language')
if vary:
response['Vary'] = ', '.join(vary)
else:
del response['Vary']
return response
def no_language(keep_content_language=False):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
response = func(*args, **kwargs)
response.no_language = True
response.keep_content_language = keep_content_language
return response
return wrapper
return decorator
class UserDataMiddleware:
"""
Enables getting user_data using request.user_data.
"""
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
request.user_data = get_user_data_lazy(request)
return self.get_response(request)
```
#### File: mapdata/migrations/0002_fix_broken_spaces.py
```python
from django.db import migrations
from shapely.geometry import Polygon
def fix_broken_spaces(apps, schema_editor):
Space = apps.get_model('mapdata', 'Space')
for space in Space.objects.all():
geometry = space.geometry
space.geometry = Polygon(shell=geometry.exterior)
space.save()
class Migration(migrations.Migration):
dependencies = [
('mapdata', '0001_squashed_2018'),
]
operations = [
migrations.RunPython(fix_broken_spaces, migrations.RunPython.noop)
]
```
#### File: mapdata/migrations/0007_assign_hole_space.py
```python
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
def assign_hole_to_space(apps, schema_editor):
Hole = apps.get_model('mapdata', 'Hole')
Space = apps.get_model('mapdata', 'Space')
for hole in Hole.objects.filter():
space = max(hole.section.spaces.filter(level='normal'),
key=lambda s: s.geometry.intersection(hole.geometry).area)
hole.space = space
hole.save()
class Migration(migrations.Migration):
dependencies = [
('mapdata', '0006_remove_section_name'),
]
operations = [
migrations.AddField(
model_name='hole',
name='space',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, related_name='holes', to='mapdata.Space', verbose_name='space'),
preserve_default=False,
),
migrations.RunPython(assign_hole_to_space),
]
```
#### File: mapdata/migrations/0010_on_top_of.py
```python
from __future__ import unicode_literals
from decimal import Decimal
from django.db import migrations, models
import django.db.models.deletion
def move_upper_spaces_to_section_on_top(apps, schema_editor):
Section = apps.get_model('mapdata', 'Section')
Space = apps.get_model('mapdata', 'Space')
Space.objects.filter(level='lower').delete()
for section in Section.objects.all():
if Space.objects.filter(level='upper', section=section).exists():
section_on_top_of = section.sections_on_top.create(altitude=section.altitude+Decimal('0.01'), public=section.public,
can_search=False, can_describe=False)
Space.objects.filter(level='upper', section=section).update(section=section_on_top_of, outside=True)
class Migration(migrations.Migration):
dependencies = [
('mapdata', '0009_column'),
]
operations = [
migrations.AddField(
model_name='section',
name='on_top_of',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE,
related_name='sections_on_top', to='mapdata.Section', verbose_name='on top of'),
),
migrations.RunPython(move_upper_spaces_to_section_on_top),
migrations.RemoveField(
model_name='space',
name='level',
),
]
```
#### File: mapdata/migrations/0015_auto_20170706_1334.py
```python
from __future__ import unicode_literals
from django.db import migrations
def set_blank_to_null(apps, schema_editor):
for model_name in ('Area', 'Point', 'Level', 'Space', 'LocationGroup'):
model = apps.get_model('mapdata', model_name)
model.objects.filter(color='').update(color=None)
class Migration(migrations.Migration):
dependencies = [
('mapdata', '0014_mapupdate'),
]
operations = [
migrations.RunPython(set_blank_to_null),
]
```
#### File: mapdata/models/update.py
```python
import logging
import os
import pickle
import time
from contextlib import contextmanager, suppress
from sqlite3 import DatabaseError
from django.conf import settings
from django.core.cache import cache
from django.db import models, transaction
from django.utils.http import int_to_base36
from django.utils.timezone import make_naive
from django.utils.translation import ugettext_lazy as _
from c3nav.mapdata.tasks import process_map_updates
class MapUpdate(models.Model):
"""
A map update. created whenever mapdata is changed.
"""
TYPES = (
('changeset', _('changeset applied')),
('direct_edit', _('direct edit')),
('control_panel', _('via control panel')),
('management', 'manage.py clearmapcache'),
)
datetime = models.DateTimeField(auto_now_add=True, db_index=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete=models.PROTECT)
type = models.CharField(max_length=32, choices=TYPES)
processed = models.BooleanField(default=False)
geometries_changed = models.BooleanField()
class Meta:
verbose_name = _('Map update')
verbose_name_plural = _('Map updates')
default_related_name = 'mapupdates'
ordering = ('datetime', )
get_latest_by = 'datetime'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.was_processed = self.processed
@classmethod
def last_update(cls, force=False):
if not force:
last_update = cache.get('mapdata:last_update', None)
if last_update is not None:
return last_update
try:
with cls.lock():
last_update = cls.objects.latest().to_tuple
cache.set('mapdata:last_update', last_update, None)
except cls.DoesNotExist:
last_update = (0, 0)
cache.set('mapdata:last_update', last_update, None)
return last_update
@classmethod
def last_processed_update(cls, force=False):
if not force:
last_processed_update = cache.get('mapdata:last_processed_update', None)
if last_processed_update is not None:
return last_processed_update
try:
with cls.lock():
last_processed_update = cls.objects.filter(processed=True).latest().to_tuple
cache.set('mapdata:last_processed_update', last_processed_update, None)
except cls.DoesNotExist:
last_processed_update = (0, 0)
cache.set('mapdata:last_processed_update', last_processed_update, None)
return last_processed_update
@property
def to_tuple(self):
return self.pk, int(make_naive(self.datetime).timestamp())
@property
def cache_key(self):
return self.build_cache_key(self.pk, int(make_naive(self.datetime).timestamp()))
@classmethod
def current_cache_key(cls, request=None):
return cls.build_cache_key(*cls.last_update())
@classmethod
def current_processed_cache_key(cls, request=None):
return cls.build_cache_key(*cls.last_processed_update())
@staticmethod
def build_cache_key(pk, timestamp):
return int_to_base36(pk)+'_'+int_to_base36(timestamp)
@classmethod
@contextmanager
def lock(cls):
with transaction.atomic():
try:
earliest = cls.objects.earliest()
except cls.DoesNotExist:
yield
else:
yield cls.objects.select_for_update().get(pk=earliest.pk)
def _changed_geometries_filename(self):
return os.path.join(settings.CACHE_ROOT, 'changed_geometries', 'update_%d.pickle' % self.pk)
class ProcessUpdatesAlreadyRunning(Exception):
pass
@classmethod
@contextmanager
def get_updates_to_process(cls):
queryset = cls.objects.filter(processed=False)
with transaction.atomic():
if settings.HAS_REDIS:
import redis
lock_aquired = None
lock = redis.Redis().lock('mapupdate:process_updates:lock')
try:
lock_aquired = lock.acquire(blocking=False, blocking_timeout=1800)
if not lock_aquired:
raise cls.ProcessUpdatesAlreadyRunning
cache.set('mapdata:last_process_updates_start', int(time.time()), None)
yield tuple(queryset)
finally:
if lock_aquired:
lock.release()
else:
try:
yield tuple(queryset.select_for_update(nowait=True))
except DatabaseError:
raise cls.ProcessUpdatesAlreadyRunning
@classmethod
def process_updates(cls):
logger = logging.getLogger('c3nav')
with cls.get_updates_to_process() as new_updates:
if not new_updates:
return ()
if any(update.geometries_changed for update in new_updates):
from c3nav.mapdata.utils.cache.changes import changed_geometries
changed_geometries.reset()
logger.info('Recalculating altitude areas...')
from c3nav.mapdata.models import AltitudeArea
AltitudeArea.recalculate()
logger.info('%.3f m² of altitude areas affected.' % changed_geometries.area)
last_processed_update = cls.last_processed_update(force=True)
for new_update in new_updates:
logger.info('Applying changed geometries from MapUpdate #%(id)s (%(type)s)...' %
{'id': new_update.pk, 'type': new_update.type})
try:
new_changes = pickle.load(open(new_update._changed_geometries_filename(), 'rb'))
except FileNotFoundError:
logger.warning('changed_geometries pickle file not found.')
else:
logger.info('%.3f m² affected by this update.' % new_changes.area)
changed_geometries.combine(new_changes)
logger.info('%.3f m² of geometries affected in total.' % changed_geometries.area)
changed_geometries.save(last_processed_update, new_updates[-1].to_tuple)
logger.info('Rebuilding level render data...')
from c3nav.mapdata.render.renderdata import LevelRenderData
LevelRenderData.rebuild()
else:
logger.info('No geometries affected.')
logger.info('Rebuilding router...')
from c3nav.routing.router import Router
Router.rebuild(new_updates[-1].to_tuple)
logger.info('Rebuilding locator...')
from c3nav.routing.locator import Locator
Locator.rebuild(new_updates[-1].to_tuple)
for new_update in reversed(new_updates):
new_update.processed = True
new_update.save()
transaction.on_commit(
lambda: cache.set('mapdata:last_processed_update', new_updates[-1].to_tuple, None)
)
return new_updates
def save(self, **kwargs):
new = self.pk is None
if not new and (self.was_processed or not self.processed):
raise TypeError
from c3nav.mapdata.utils.cache.changes import changed_geometries
if self.geometries_changed is None:
self.geometries_changed = not changed_geometries.is_empty
super().save(**kwargs)
with suppress(FileExistsError):
os.mkdir(os.path.dirname(self._changed_geometries_filename()))
if self.geometries_changed:
pickle.dump(changed_geometries, open(self._changed_geometries_filename(), 'wb'))
if new:
transaction.on_commit(
lambda: cache.set('mapdata:last_update', self.to_tuple, None)
)
if settings.HAS_CELERY and settings.AUTO_PROCESS_UPDATES:
transaction.on_commit(
lambda: process_map_updates.delay()
)
```
#### File: render/geometry/altitudearea.py
```python
from collections import deque
from itertools import chain
import numpy as np
from c3nav.mapdata.models import AltitudeArea
from c3nav.mapdata.render.geometry.hybrid import HybridGeometry
class AltitudeAreaGeometries:
def __init__(self, altitudearea=None, colors=None, obstacles=None):
if altitudearea is not None:
self.geometry = altitudearea.geometry
self.altitude = int(altitudearea.altitude * 1000)
self.altitude2 = None if altitudearea.altitude2 is None else int(altitudearea.altitude2 * 1000)
self.point1 = altitudearea.point1
self.point2 = altitudearea.point2
else:
self.geometry = None
self.altitude = None
self.altitude2 = None
self.point1 = None
self.point2 = None
self.base = None
self.bottom = None
self.colors = colors
self.obstacles = obstacles
def get_altitudes(self, points):
# noinspection PyCallByClass,PyTypeChecker
return AltitudeArea.get_altitudes(self, points/1000).astype(np.int32)
def create_hybrid_geometries(self, face_centers, vertices_offset, faces_offset):
self.geometry = HybridGeometry.create(self.geometry, face_centers)
vertices = deque()
faces = deque()
for color, areas in self.colors.items():
for height in tuple(areas.keys()):
faces_offset, vertices_offset = self._call_create_full(areas, height, faces, vertices,
faces_offset, vertices_offset)
for height_obstacles in self.obstacles.values():
for color_obstacles in height_obstacles.values():
for i in range(len(color_obstacles)):
faces_offset, vertices_offset = self._call_create_full(color_obstacles, i, faces, vertices,
faces_offset, vertices_offset)
if not vertices:
return np.empty((0, 2), dtype=np.int32), np.empty((0, 3), dtype=np.uint32)
return np.vstack(vertices), np.vstack(faces)
def _call_create_full(self, mapping, key, faces, vertices, faces_offset, vertices_offset):
geom = mapping[key]
new_geom, new_vertices, new_faces = HybridGeometry.create_full(geom, vertices_offset, faces_offset)
mapping[key] = new_geom
vertices_offset += new_vertices.shape[0]
faces_offset += new_faces.shape[0]
vertices.append(new_vertices)
faces.append(new_faces)
return faces_offset, vertices_offset
def remove_faces(self, faces):
self.geometry.remove_faces(faces)
for areas in self.colors.values():
for area in areas.values():
area.remove_faces(faces)
def create_polyhedrons(self, create_polyhedron, altitudes, min_altitude, crops):
if self.altitude2 is None:
altitudes = self.altitude
self.base = HybridGeometry(self.geometry.geom, self.geometry.faces)
self.bottom = HybridGeometry(self.geometry.geom, self.geometry.faces)
self.geometry.build_polyhedron(create_polyhedron,
lower=altitudes - int(0.7 * 1000),
upper=altitudes,
crops=crops)
self.base.build_polyhedron(create_polyhedron,
lower=min_altitude - int(0.7 * 1000),
upper=altitudes - int(0.7 * 1000),
crops=crops,
top=False, bottom=False)
self.bottom.build_polyhedron(create_polyhedron,
lower=0, upper=1,
crops=crops,
top=False)
for geometry in chain(*(areas.values() for areas in self.colors.values())):
geometry.build_polyhedron(create_polyhedron,
lower=altitudes,
upper=altitudes + int(0.001 * 1000),
crops=crops)
# todo: treat altitude properly
for height, height_geometries in self.obstacles.items():
for color, color_geometries in height_geometries.items():
for geometry in color_geometries:
geometry.build_polyhedron(create_polyhedron,
lower=altitudes,
upper=altitudes + height,
crops=crops)
```
#### File: utils/cache/accessrestrictions.py
```python
import operator
import struct
from functools import reduce
import numpy as np
from c3nav.mapdata.utils.cache.indexed import LevelGeometryIndexed
class AccessRestrictionAffected(LevelGeometryIndexed):
# metadata format:
# 64 times:
# 4 bytes (uint32): access restriction id (or 0x00 if empty)
# each uint64 cell contains a bitmask of restrictions.
# e.g.: 2^n bit set → restriction with index 2^n does apply
dtype = np.uint64
variant_id = 2
variant_name = 'restrictions'
def __init__(self, restrictions=None, **kwargs):
super().__init__(**kwargs)
self.restrictions = [] if restrictions is None else restrictions
self.restrictions_lookup = {restriction: i for i, restriction in enumerate(self.restrictions)}
@classmethod
def _read_metadata(cls, f, kwargs):
restrictions = list(struct.unpack('<'+'I'*64, f.read(4*64)))
while restrictions and restrictions[-1] == 0:
restrictions.pop()
kwargs['restrictions'] = restrictions
def _write_metadata(self, f):
f.write(struct.pack('<'+'I'*64, *self.restrictions, *((0, )*(64-len(self.restrictions)))))
@classmethod
def build(cls, access_restriction_affected):
result = cls()
for restriction, area in access_restriction_affected.items():
result[area.buffer(1)].add(restriction)
return result
@classmethod
def open(cls, filename):
try:
instance = super().open(filename)
except FileNotFoundError:
instance = cls(restrictions=[], filename=filename)
return instance
def _get_restriction_index(self, restriction, create=False):
i = self.restrictions_lookup.get(restriction)
if create and i is None:
i = len(self.restrictions)
self.restrictions_lookup[restriction] = i
self.restrictions.append(restriction)
return i
def __getitem__(self, selector):
return AccessRestrictionAffectedCells(self, selector)
def __setitem__(self, selector, value):
raise TypeError('__setitem__ not supported for AccessRestriction matrix')
class AccessRestrictionAffectedCells:
def __init__(self, parent: AccessRestrictionAffected, selector):
self.parent = parent
self.selector = selector
self.values = self._get_values()
def _get_values(self):
return LevelGeometryIndexed.__getitem__(self.parent, self.selector)
def _set(self, values):
self.values = values
LevelGeometryIndexed.__setitem__(self.parent, self.selector, values)
def __contains__(self, restriction):
i = self.parent._get_restriction_index(restriction)
return (self.values & (2**i)).any()
def add(self, restriction):
from shapely.geometry.base import BaseGeometry
if not isinstance(self.selector, BaseGeometry):
raise TypeError('Can only add restrictions with Geometry based selectors')
# expand array
bounds = self.parent._get_geometry_bounds(self.selector)
self.parent.fit_bounds(*bounds)
self.values = self._get_values()
i = self.parent._get_restriction_index(restriction, create=True)
self._set(self.values | (2**i))
def discard(self, restriction):
from shapely.geometry.base import BaseGeometry
if not isinstance(self.selector, BaseGeometry):
raise TypeError('Can only discard restrictions with Geometry based selectors')
i = self.parent._get_restriction_index(restriction)
self._set(self.values & ((2**64-1) ^ (2**i)))
def __iter__(self):
all = reduce(operator.or_, self.values.tolist(), 0)
yield from (restriction for i, restriction in enumerate(self.parent.restrictions) if (all & 2**i))
```
#### File: utils/cache/local.py
```python
from collections import OrderedDict
from django.core.cache import cache
from c3nav.mapdata.models import MapUpdate
class NoneFromCache:
pass
class LocalCacheProxy:
# django cache, buffered using a LRU cache
# only usable for stuff that never changes, obviously
def __init__(self, maxsize=128):
self._maxsize = maxsize
self._mapupdate = None
self._items = OrderedDict()
def get(self, key, default=None):
if self._mapupdate is None:
self._check_mapupdate()
try:
# first check out cache
result = self._items[key]
except KeyError:
# not in our cache
result = cache.get(key, default=NoneFromCache)
if result is not NoneFromCache:
self._items[key] = result
self._prune()
else:
result = default
else:
self._items.move_to_end(key, last=True)
return result
def _prune(self):
# remove old items
while len(self._items) > self._maxsize:
self._items.pop(next(iter(self._items.keys())))
def _check_mapupdate(self):
mapupdate = MapUpdate.current_cache_key()
if self._mapupdate != mapupdate:
self._items = OrderedDict()
self._mapupdate = mapupdate
def set(self, key, value, expire):
self._check_mapupdate()
cache.set(key, value, expire)
self._items[key] = value
self._prune()
```
#### File: mapdata/utils/color.py
```python
from functools import lru_cache
@lru_cache()
def color_to_rgb(color, alpha=None):
if color.startswith('#'):
return (*(int(color[i:i + 2], 16) / 255 for i in range(1, 6, 2)), 1 if alpha is None else alpha)
if color.startswith('rgba('):
color = tuple(float(i.strip()) for i in color.strip()[5:-1].split(','))
return (*(i/255 for i in color[:3]), color[3] if alpha is None else alpha)
raise ValueError('invalid color string!')
@lru_cache()
def rgb_to_color(rgb):
# noinspection PyStringFormat
return 'rgba(%d, %d, %d, %.1f)' % (*(i*255 for i in rgb[:3]), rgb[3])
```
#### File: mapdata/utils/mpl.py
```python
from abc import ABC, abstractmethod
import numpy as np
from matplotlib.path import Path
from shapely.geometry import GeometryCollection, MultiPolygon, Polygon
from c3nav.mapdata.utils.geometry import assert_multipolygon
class MplPathProxy(ABC):
@abstractmethod
def intersects_path(self, path):
pass
@abstractmethod
def contains_point(self, point):
pass
class MplMultipolygonPath(MplPathProxy):
__slots__ = ('polygons')
def __init__(self, polygon):
self.polygons = tuple(MplPolygonPath(polygon) for polygon in assert_multipolygon(polygon))
@property
def exteriors(self):
return tuple(polygon.exterior for polygon in self.polygons)
def intersects_path(self, path, filled=False):
for polygon in self.polygons:
if polygon.intersects_path(path, filled=filled):
return True
return False
def contains_point(self, point):
for polygon in self.polygons:
if polygon.contains_point(point):
return True
return False
def contains_points(self, points):
result = np.full((len(points),), fill_value=False, dtype=np.bool)
for polygon in self.polygons:
ix = np.argwhere(np.logical_not(result)).flatten()
result[ix] = polygon.contains_points(points[ix])
return result
class MplPolygonPath(MplPathProxy):
__slots__ = ('exterior', 'interiors')
def __init__(self, polygon):
self.exterior = linearring_to_mpl_path(polygon.exterior)
self.interiors = [linearring_to_mpl_path(interior) for interior in polygon.interiors]
@property
def exteriors(self):
return (self.exterior, )
def intersects_path(self, path, filled=False):
if filled:
if not self.exterior.intersects_path(path, filled=True):
return False
for interior in self.interiors:
if interior.contains_path(path):
return False
return True
else:
if self.exterior.intersects_path(path, filled=False):
return True
for interior in self.interiors:
if interior.intersects_path(path, filled=False):
return True
return False
def contains_points(self, points):
result = self.exterior.contains_points(points)
for interior in self.interiors:
if not result.any():
break
ix = np.argwhere(result).flatten()
result[ix] = np.logical_not(interior.contains_points(points[ix]))
return result
def contains_point(self, point):
if not self.exterior.contains_point(point):
return False
for interior in self.interiors:
if interior.contains_point(point):
return False
return True
def shapely_to_mpl(geometry):
"""
convert a shapely Polygon or Multipolygon to a matplotlib Path
:param polygon: shapely Polygon or Multipolygon
:return: MplPathProxy
"""
if isinstance(geometry, Polygon):
return MplPolygonPath(geometry)
elif isinstance(geometry, MultiPolygon) or geometry.is_empty or isinstance(geometry, GeometryCollection):
return MplMultipolygonPath(geometry)
raise TypeError
def linearring_to_mpl_path(linearring):
return Path(np.array(linearring),
(Path.MOVETO, *([Path.LINETO] * (len(linearring.coords)-2)), Path.CLOSEPOLY), readonly=True)
```
#### File: routing/utils/draw.py
```python
from django.conf import settings
def _ellipse_bbox(x, y, height):
x *= settings.RENDER_SCALE
y *= settings.RENDER_SCALE
y = height-y
return ((x - 2, y - 2), (x + 2, y + 2))
def _line_coords(from_point, to_point, height):
return (from_point.x * settings.RENDER_SCALE, height - (from_point.y * settings.RENDER_SCALE),
to_point.x * settings.RENDER_SCALE, height - (to_point.y * settings.RENDER_SCALE))
```
#### File: site/templatetags/route_render.py
```python
from django import template
register = template.Library()
@register.filter
def negate(value):
return -value
@register.filter
def subtract(value, arg):
return value - arg
```
#### File: c3nav/tools/fakemobileclient.py
```python
import http.server
import json
import socketserver
import subprocess
import sys
import time
PORT = int(sys.argv[1]) if sys.argv[1:] else 8042
def get_from_lines(lines, keyword):
return next(iter(l for l in lines if l.startswith(keyword))).split(keyword)[1].strip()
class FakeMobileClientHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
"""Serve a GET request."""
if self.path != '/scan':
self.send_error(404, explain='Look at /scan')
return
while True:
p = subprocess.Popen(['iwlist', 'scan'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = p.communicate()[0].decode().split('Cell')[1:]
if not output:
print('scan failed, try again…')
time.sleep(0.2)
continue
stations = []
for data in output:
lines = [l.strip() for l in data[5:].split('\n')]
stations.append({
'bssid': get_from_lines(lines, 'Address:'),
'ssid': get_from_lines(lines, 'ESSID:')[1:-1],
'level': int(get_from_lines(lines, 'Quality=').split('=')[-1][:-4]),
'frequency': int(float(get_from_lines(lines, 'Frequency:').split(' ')[0]) * 1000)
})
if not stations:
continue
break
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(json.dumps({'data': stations}).encode())
return True
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
with ThreadedTCPServer(('127.0.0.1', PORT), FakeMobileClientHandler) as server:
print('fakemobilelient on 127.0.0.1:%d' % PORT)
server.serve_forever()
``` |
{
"source": "JohnJohnsonOkah/myshop",
"score": 2
} |
#### File: myshop/orders/views.py
```python
from django.conf import settings
from django.contrib.admin.views.decorators import staff_member_required
from django.template.loader import render_to_string
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, render, redirect
from django.urls import reverse
import weasyprint
from cart.cart import Cart
from .forms import OrderCreateForm
from .models import Order, OrderItem
from .tasks import order_created
def order_create(request):
"""
Create a new Order with
- checkout details (OrderCreateForm()) &
- items in the cart (Cart())
"""
cart = Cart(request)
if request.method == "POST":
form = OrderCreateForm(request.POST)
if form.is_valid():
order = form.save(commit=False)
if cart.coupon:
order.coupon = cart.coupon
order.discount = cart.coupon.discount
order.save()
for item in cart:
OrderItem.objects.create(
order=order,
product=item["product"],
price=item["price"],
quantity=item["quantity"],
)
# clear the cart
cart.clear()
# launch asynchronous task
order_created.apply_async((order.id,), countdown=3)
# set the order in the session
request.session["order_id"] = order.id
# redirect for payment
return redirect(reverse("payment:process"))
else:
form = OrderCreateForm()
return render(request, "orders/order/create.html", {"cart": cart, "form": form})
@staff_member_required
def admin_order_detail(request, order_id):
"""
This view displays the information about an order on admin site
"""
order = get_object_or_404(Order, id=order_id)
return render(request, "admin/orders/order/detail.html", {"order": order})
@staff_member_required
def admin_order_pdf(request, order_id):
"""
This view generate PDF invoices for existing Orders
"""
order = get_object_or_404(Order, id=order_id)
html = render_to_string("orders/order/pdf.html", {"order": order})
response = HttpResponse(content_type="application/pdf")
response["Content-Disposition"] = f"filename=order_{order.id}.pdf"
weasyprint.HTML(string=html).write_pdf(
response, stylesheets=[weasyprint.CSS(settings.STATIC_ROOT + "/css/pdf.css")]
)
return response
```
#### File: myshop/shop/models.py
```python
from django.db import models
from django.urls import reverse
from cloudinary.models import CloudinaryField
class Category(models.Model):
""" The category model each product is classified into. """
name = models.CharField(max_length=200, db_index=True)
slug = models.SlugField(max_length=200, unique=True)
class Meta:
ordering = ("name",)
verbose_name = "category"
verbose_name_plural = "categories"
def __str__(self):
return self.name
def get_absolute_url(self):
"""Retrieve url for list of product in a given category"""
return reverse("shop:product_list_by_category", args=[self.slug])
class Product(models.Model):
""" The product model of an item in the shop. """
category = models.ForeignKey(
Category, related_name="products", on_delete=models.CASCADE
)
name = models.CharField(max_length=200, db_index=True)
slug = models.SlugField(max_length=200, db_index=True)
image = models.ImageField(upload_to="products/%Y/%m/%d", blank=True)
description = models.TextField(blank=True)
price = models.DecimalField(max_digits=10, decimal_places=2)
available = models.BooleanField(default=True)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Meta:
ordering = ("name",)
index_together = (("id", "slug"),)
def __str__(self):
return self.name
def get_absolute_url(self):
"""Retrieves url for a given product"""
return reverse("shop:product_detail", args=[self.id, self.slug])
``` |
{
"source": "JohnJohnstone/opsmop",
"score": 2
} |
#### File: opsmop/core/scope.py
```python
class Scope(object):
""" Scope is used to prepare variable stacks during the executor phase to implement variable scoping rules """
__slots__ = [ '_parent', '_level', '_resource', '_variables', '_role', '_root', '_ancestors']
def __init__(self, variables=None, level=0, parent=None, resource=None):
if variables is None:
variables = dict()
self._parent = parent
self._level = level
self._resource = resource
self._variables = variables
self._role = None
self._root = None
from opsmop.core.policy import Policy
from opsmop.core.role import Role
if issubclass(type(resource), Role):
self._role = resource
elif self._parent and self._parent._role:
self._role = self._parent._role
if issubclass(type(resource), Policy):
self._root = resource
self._ancestors = [ ]
elif self._parent:
self._root = self._parent._root
self._ancestors = self._parent._ancestors[:]
self._ancestors.append(self._parent)
# load the resource variables into the scope
# set_variables method on the object win out over keyword args
self.update_variables(resource.variables)
self.update_variables(resource.extra_variables)
def resource(self):
return self._resource
def parent(self):
return self._parent
def role(self):
return self._role
@classmethod
def for_top_level(cls, resource):
return cls(variables=resource.variables, level=0, parent=None, resource=resource)
def top_level_scope(self):
if self.parent() is None:
return self
else:
return self.parent().top_level_scope()
def top_level_resource(self):
top_scope = self.top_level_scope()
return top_scope._resource
def deeper_scope_for(self, resource):
return Scope(variables=self._variables.copy(), level=self._level+1, parent=self, resource=resource)
def ancestors(self):
return self._ancestors
def root_scope(self):
return self._root
def variables(self):
scopes = self.ancestors()
scopes.append(self)
vstack = [ s._variables for s in scopes]
results = dict()
for variables in vstack:
results.update(variables)
return results
def update_parent_variables(self, variables):
"""
Resources setting/registering variables should always update the scope one level up.
"""
self.parent().update_variables(variables)
def update_variables(self, variables):
"""
Variables on a Resource should update just that resource.
"""
self._variables.update(variables)
def update_global_variables(self, variables):
root = self.root_scope()
root.update_variables(variables)
def __str__(self):
return "<Scope resource=%s, level=%s, parent=%s, variables=%s>" % (self._resource, self._level, self.parent(), self._variables)
```
#### File: opsmop/facts/filetests.py
```python
import hashlib
import os
import stat
from opsmop.core.context import Context
from opsmop.facts.facts import Facts
class FileTestFacts(Facts):
"""
As this evolves, facts can be dynamically injected into this base class based on platform, allowing a subclass
for things like LinuxFacts. When this happens, we can have a "facts/" package.
"""
def exists(self, fname):
return os.path.exists(fname)
def executable(self, fname):
return os.path.isfile(fname) and os.access(fname, os.X_OK)
def is_file(self, fname):
from pathlib import Path
if not self.exists(fname):
return None
return Path(fname).is_file()
def is_directory(self, fname):
from pathlib import Path
if not self.exists(fname):
return None
return Path(fname).is_dir()
def mode(self, fname):
from pathlib import Path
if not self.exists(fname):
return None
lstat = Path(fname).lstat()
return stat.S_IMODE(lstat.st_mode)
def owner(self, fname):
from pathlib import Path
if not self.exists(fname):
return None
return Path(fname).owner()
def group(self, fname):
from pathlib import Path
if not self.exists(fname):
return None
return Path(fname).group()
def checksum(self, fname, blocksize=65535):
m = hashlib.sha256()
with open(fname, "rb") as f:
block = f.read(blocksize)
while len(block) > 0:
block = f.read(blocksize)
m.update(block)
return m.hexdigest()
def string_checksum(self, msg):
m = hashlib.sha256()
m.update(msg.encode())
return m.hexdigest()
def same_contents(self, dest, src, remote=False):
if not self.exists(dest):
return False
m = hashlib.sha256()
c1 = self.checksum(dest)
c2 = None
if not remote:
c2 = self.checksum(src)
else:
# FIXME: this is slightly duplicated with provider code
if not src.startswith('/'):
src = os.path.join(Context().relative_root(), src)
c2 = Context().get_checksum(src)
return (c1 == c2)
FileTests = FileTestFacts()
``` |
{
"source": "johnjones4/Doomsday-Machine-2",
"score": 2
} |
#### File: Doomsday-Machine-2/doomsdaymachine/webserver.py
```python
from flask import Flask, jsonify, send_from_directory
from doomsdaymachine.util import load_config
from doomsdaymachine.backup_log import BackupLog
APP = Flask(__name__)
config = load_config()
ALLOWED_STATIC_FILES = [
"index.html",
"script.js",
"style.css",
"reset.css",
]
@APP.route("/")
def home_file():
return send_from_directory("../static", "index.html")
@APP.route("/<file>")
def static_file(file):
if file in ALLOWED_STATIC_FILES:
return send_from_directory("../static", file)
@APP.route("/api/status")
def status():
backup_log = BackupLog(config)
active_job = backup_log.get_active_job()
jobs = list(map(lambda job: dict(
name=job["name"],
type=job["type"],
id=job["id"],
start_time=active_job["start_time"] if active_job["job"] == job["id"] else None,
last_execution_time=backup_log.get_last_execution_time(job["id"])
), config["jobs"]))
return jsonify(dict(jobs=jobs))
``` |
{
"source": "johnjones4/e-dashboard",
"score": 3
} |
#### File: e-dashboard/lib/dashboard.py
```python
from PIL import Image, ImageDraw, ImageFont
from datetime import datetime
class Dashboard:
def __init__(self, width, height, rows, cols, gutter, show_status, font_size, font_path):
self.show_status = show_status
self.font_size = font_size
self.font_path = font_path
self.width = width
self.height = height
self.widgets_height = height if not self.show_status else (height - (font_size + gutter))
self.rows = rows
self.cols = cols
self.row_size = (self.widgets_height / self.rows)
self.col_size = (self.width / self.cols)
self.gutter = gutter
self.widgets = []
def add_widget(self, widget):
(row, col, row_span, col_span) = widget.get_position()
if row < 0 or row >= self.rows:
raise Exception("Invalid row: %d" % (row))
if col < 0 or col >= self.cols:
raise Exception("Invalid col: %d" % (col))
if row_span < 0 or row + row_span > self.rows:
raise Exception("Invalid row span: %d" % (row_span))
if col_span < 0 or col + col_span > self.cols:
raise Exception("Invalid col span: %d" % (col_span))
self.widgets.append((widget, row, col, row_span, col_span))
def draw_status(self, image):
font = ImageFont.truetype(self.font_path, self.font_size, 0)
drawable = ImageDraw.Draw(image)
x = self.gutter / 2.0
y = self.widgets_height
drawable.text((x, y), "Last Updated At %s" % (datetime.now().strftime("%m/%d/%Y, %H:%M:%S")), font=font)
def generate(self):
mode = "L"
color = 255
generated_image = Image.new(mode, (self.width, self.height), color=color)
for (widget, row, col, row_span, col_span) in self.widgets:
x = int(self.col_size * col + (self.gutter / 2.0))
y = int(self.row_size * row + (self.gutter / 2.0))
width = int(self.col_size * col_span - self.gutter)
height = int(self.row_size * row_span - self.gutter)
image = widget.generate(mode, width, height, color)
generated_image.paste(image, (x, y))
if self.show_status:
self.draw_status(generated_image)
return generated_image
``` |
{
"source": "johnjones4/model-rocket-telemetry",
"score": 2
} |
#### File: air/tests/test_utils.py
```python
import io
import random
import time
from queue import Queue
from threading import Thread
from whitevest.lib.atomic_buffer import AtomicBuffer
from whitevest.lib.atomic_value import AtomicValue
from whitevest.lib.configuration import Configuration
from whitevest.lib.const import TELEMETRY_TUPLE_LENGTH
from whitevest.lib.utils import (
digest_next_sensor_reading,
take_gps_reading,
transmit_latest_readings,
write_queue_log,
write_sensor_log,
)
class MockSerial:
def __init__(self, line):
self.line = line
def readline(self):
return self.line
def test_write_queue_log():
outfile = io.StringIO("")
data_queue = Queue()
configuration = Configuration(None, dict(output_directory="./data"))
while data_queue.qsize() < 10:
data_queue.put((random.random(), random.random(), random.random()))
write_queue_log(outfile, data_queue)
contents = outfile.getvalue()
assert contents
assert len(contents.split("\n")) == 11
def test_take_gps_reading():
line = f"$GPGGA,134658.00,5106.9792,N,11402.3003,W,2,09,1.0,1048.47,M,-16.27,M,08,AAAA*60"
sio = MockSerial(line)
val = AtomicValue()
take_gps_reading(sio, val)
assert val.get_value() == (51.11632, -114.03833833333333, 2, 9)
class MockRFM9X:
def send(self, value):
self.sent = value
def test_digest_next_sensor_reading():
start_time = time.time()
altimeter_value = [random.random() for _ in range(2)]
gps_value = [random.random() for _ in range(4)]
magnetometer_accelerometer_value = [random.random() for _ in range(6)]
data_queue = Queue()
current_reading = AtomicBuffer(1)
now = digest_next_sensor_reading(
start_time,
data_queue,
current_reading,
gps_value,
altimeter_value,
magnetometer_accelerometer_value,
)
logged = data_queue.get()
expected_tuple = (
now - start_time,
*altimeter_value,
*magnetometer_accelerometer_value,
*gps_value,
)
assert logged
assert logged == expected_tuple
assert current_reading.read()[0] == expected_tuple
assert len(logged) == TELEMETRY_TUPLE_LENGTH
def test_write_sensor_log():
start_time = time.time()
outfile = io.StringIO("")
data_queue = Queue()
while data_queue.qsize() < 100:
data_queue.put((random.random(), random.random(), random.random()))
continue_running = AtomicValue(True)
thread = Thread(
target=write_sensor_log,
args=(start_time, outfile, data_queue, continue_running, continue_running),
)
thread.start()
time.sleep(5)
continue_running.update(False)
thread.join()
contents = outfile.getvalue()
assert contents
assert len(contents.split("\n")) > 0
def test_transmit_latest_readings():
last_check = 1
readings_sent = 0
start_time = time.time()
rfm9x = MockRFM9X()
camera_is_running = AtomicValue(0.0)
current_reading = AtomicBuffer(2)
current_reading.put([random.random() for _ in range(TELEMETRY_TUPLE_LENGTH)])
current_reading.put([random.random() for _ in range(TELEMETRY_TUPLE_LENGTH)])
readings_sent_1, last_check_1 = transmit_latest_readings(
camera_is_running, rfm9x, last_check, readings_sent, start_time, current_reading
)
assert readings_sent_1 > readings_sent
assert last_check < last_check_1
assert last_check_1 <= time.time()
assert len(rfm9x.sent) == (TELEMETRY_TUPLE_LENGTH * 8 * 2) + 8
```
#### File: whitevest/bin/air.py
```python
import logging
import os
import time
from queue import Queue
from threading import Thread
from whitevest.lib.atomic_buffer import AtomicBuffer
from whitevest.lib.atomic_value import AtomicValue
from whitevest.lib.configuration import Configuration
from whitevest.lib.hardware import init_reset_button
from whitevest.lib.utils import create_gps_thread
from whitevest.threads.air import (
camera_thread,
sensor_log_writing_loop,
sensor_reading_loop,
transmitter_thread,
)
def main():
"""Inboard data capture and transmission script"""
# Load up the system configuration
configuration = Configuration(
os.getenv("AIR_CONFIG_FILE", None), Configuration.default_air_configuration
)
# Queue to manage data synchronization between sensor reading, transmission, and data logging
data_queue = Queue(1000)
# Timestamp to use for log files and log saving cutoff
start_time = time.time()
# Thread safe place to store altitude reading
current_readings = AtomicBuffer(50)
# Holds the most recent GPS data
gps_value = AtomicValue((0.0, 0.0, 0.0, 0.0))
# pcnt counter to runtime limit
pcnt_to_limit = AtomicValue(0.0)
# Thread safe place to store continue value
continue_running = AtomicValue(True)
# Thread safe place to store continue value
continue_logging = AtomicValue(True)
# Setup listener for reset button
init_reset_button(configuration, continue_running)
gps_thread = create_gps_thread(configuration, gps_value, continue_running)
gps_thread.start()
write_thread = Thread(
target=sensor_log_writing_loop,
args=(
configuration,
start_time,
data_queue,
continue_running,
continue_logging,
),
daemon=True,
)
write_thread.start()
camera_thread_handle = Thread(
target=camera_thread,
args=(configuration, start_time, continue_running, continue_logging),
daemon=True,
)
camera_thread_handle.start()
transmitter_thread_handle = Thread(
target=transmitter_thread,
args=(
configuration,
start_time,
current_readings,
pcnt_to_limit,
continue_running,
),
daemon=True,
)
transmitter_thread_handle.start()
sensor_reading_thread = Thread(
target=sensor_reading_loop,
args=(
configuration,
start_time,
data_queue,
current_readings,
gps_value,
continue_running,
),
daemon=True,
)
sensor_reading_thread.start()
runtime_limit = configuration.get("runtime_limit")
while continue_running.get_value() and time.time() - start_time <= runtime_limit:
pcnt_to_limit.update((time.time() - start_time) / runtime_limit)
time.sleep(1)
logging.info("Stopping write activities")
continue_logging.update(False)
write_thread.join()
camera_thread_handle.join()
pcnt_to_limit.update(1)
logging.info("Write activities ended")
gps_thread.join()
transmitter_thread_handle.join()
sensor_reading_thread.join()
if __name__ == "__main__":
main()
```
#### File: whitevest/lib/atomic_value.py
```python
from threading import Lock
class AtomicValue:
"""Thread-safe class for holding a value"""
def __init__(self, value=None):
"""Initialize the class"""
self.value = value
self.lock = Lock()
def try_update(self, value):
"""Try to update the latest value without blocking"""
# pylint: disable=bad-option-value,consider-using-with
if self.lock.acquire(False):
self.value = value
self.lock.release()
return True
return False
def update(self, value):
"""Block until we can write a value"""
with self.lock:
self.value = value
def get_value(self):
"""Block until the latest value is available"""
with self.lock:
return self.value
```
#### File: whitevest/lib/configuration.py
```python
from typing import Dict
import yaml
from whitevest.lib.const import TESTING_MODE
if not TESTING_MODE:
import board
from adafruit_blinka.microcontroller.bcm283x.pin import Pin
else:
class Pin: # pylint: disable=too-few-public-methods
"""Dummy class for when GPIO modules are not available"""
class Configuration:
"""Class to manage system configuration"""
def __init__(self, config_file: str, default_configuration):
"""Create a new configuration based on the supplied yml file path or a default config"""
if config_file:
try:
with open(config_file, "r", encoding="utf8") as config_file_handle:
self.config = yaml.full_load(config_file_handle)
except: # pylint: disable=bare-except
pass
return
self.config = default_configuration
def get(self, key: str, default=None):
"""Get a configuration value"""
return self.config.get(key, default)
def get_device_configuration(self, device: str, key: str, default=None):
"""Get a device configuration value"""
if device in self.config.get("devices", {}):
return self.config["devices"][device].get(key, default)
return None
def get_pin_assignments(self, device: str) -> Dict[str, Pin]:
"""Get a set of pin assignments for a device"""
if device in self.config.get("devices", {}):
config = self.config["devices"][device]
assignments = {}
for name in config:
assignments[name] = getattr(board, config[name])
return assignments
return None
default_air_configuration = dict(
runtime_limit=600,
output_directory="./data",
devices=dict(
rfm9x=dict(
sck="SCK",
mosi="MOSI",
miso="MISO",
cs="D5",
reset="CE1",
),
bmp3xx=dict(
scl="SCL",
sda="SDA",
),
lsm303=dict(
scl="SCL",
sda="SDA",
),
reset=dict(pin="16"),
gps=dict(serial_device="/dev/ttyAMA0"),
),
)
``` |
{
"source": "johnjones4/mqtt2kasa",
"score": 2
} |
#### File: mqtt2kasa/mqtt2kasa/log.py
```python
import logging
from logging.handlers import SysLogHandler
from os import path
def getLogger():
return logging.getLogger("mqtt2kasa")
def initLogger(testing=False):
logger = getLogger()
logger.setLevel(logging.INFO)
log_to_console()
if testing:
set_log_level_debug()
def log_to_console():
consoleHandler = logging.StreamHandler()
format = "%(asctime)s %(module)12s:%(lineno)-d %(levelname)-8s %(message)s"
formatter = logging.Formatter(format)
consoleHandler.setFormatter(formatter)
getLogger().addHandler(consoleHandler)
def set_log_level_debug():
getLogger().setLevel(logging.DEBUG)
``` |
{
"source": "johnjones4/Weather-Ship-Tango-Delta",
"score": 3
} |
#### File: eink/weatherstation/render.py
```python
from PIL import Image, ImageDraw, ImageFont
from datetime import datetime
import urllib.request
import json
DIRECTION_UP = 0
DIRECTION_DOWN = 1
DIRECTION_NONE = 2
FONT_PATH = "./Questrial-Regular.ttf"
API_PATH = "http://weather.apps.local.johnjonesfour.com/api/weather/average?range=10"
def fetch_weather():
try:
with urllib.request.urlopen(API_PATH) as response:
return json.loads(response.read())
except:
return None
def get_direction(old_value, new_value):
if old_value < new_value:
return DIRECTION_UP
elif old_value > new_value:
return DIRECTION_DOWN
else:
return DIRECTION_NONE
def do_conversion(key, value):
if key == "avg_wind_speed" or key == "min_wind_speed" or key == "max_wind_speed":
val = value * 2.237
return round(val, 1) if val < 10 else int(round(val))
elif key == "temperature":
return int(round(value * 1.8 + 32))
elif key == "relative_humidity":
return int(round(value))
elif key == "pressure":
return int(round(value / 3386 * 100))
else:
return int(round(value))
def convert_weather(weather):
outcome = dict()
for key in weather:
outcome[key] = dict(
value= do_conversion(key, weather[key]["current_value"]),
direction= get_direction(weather[key]["previous_value"], weather[key]["current_value"])
)
return outcome
def draw_datapoint(draw, direction, value, label, x, y, width, height):
value_height = int(height * 0.55)
direction_height = int(value_height * 0.4)
top_padding = int(height * 0.15)
label_padding = int(height * 0.05)
label_height = int(height * 0.11)
direction_text = "↑" if direction == DIRECTION_UP else "↓" if direction == DIRECTION_DOWN else None
value_fnt = ImageFont.truetype(FONT_PATH, value_height)
value_width, value_height = draw.textsize(value, font=value_fnt)
value_x = x + int((width - value_width) / 2)
value_y = y + top_padding
draw.text((value_x, value_y), value, font=value_fnt, fill=0)
if direction_text:
direction_fnt = ImageFont.truetype(FONT_PATH, direction_height)
direction_width, direction_height = draw.textsize(direction_text, font=direction_fnt)
direction_x = value_x - direction_width
direction_y = value_y + int((value_height - direction_width) / 2)
draw.text((direction_x, direction_y), direction_text, font=direction_fnt, fill=0)
label_fnt = ImageFont.truetype(FONT_PATH, label_height)
label_width, _ = draw.textsize(label, font=label_fnt)
draw.text((x + int((width - label_width) / 2), y + top_padding + value_height + label_padding), label, font=label_fnt, fill=0)
def draw_weather(weather, width, height):
subpoints = [
(weather["max_wind_speed"]["direction"], str(weather["max_wind_speed"]["value"]), "Wind Gusts (MPH)"),
(weather["relative_humidity"]["direction"], f"{weather['relative_humidity']['value']}%", "Humidity"),
(weather["pressure"]["direction"], str(weather["pressure"]["value"]), "Pressure (inHg)"),
]
image = Image.new("1", (width, height), 255)
draw = ImageDraw.Draw(image)
# draw.rectangle((0, 0, width, height), fill=1)
subpoint_width = int(width / 3.0)
subpoint_x = width - subpoint_width
draw.line((subpoint_x, 0, subpoint_x, height), fill=0)
subpoint_spacing = height / len(subpoints)
for i, (direction, value, label) in enumerate(subpoints):
y = int(i * subpoint_spacing)
if i != len(subpoints) - 1:
draw.line((subpoint_x, y + subpoint_spacing, subpoint_x + subpoint_width, y + subpoint_spacing), fill=0)
draw_datapoint(draw, direction, value, label, subpoint_x, y, subpoint_width, subpoint_spacing)
draw_datapoint(draw, weather["temperature"]["direction"], f"{weather['temperature']['value']}°", "Temperature (F)", 0, 0, subpoint_x, height)
date_padding = int(height * 0.01)
date_height = int(height * 0.04)
date_fnt = ImageFont.truetype(FONT_PATH, date_height)
now = datetime.now()
date_str = now.strftime("%A, %d. %B %Y %I:%M%p")
date_x = date_padding
date_y = height - date_height - date_padding
draw.text((date_x, date_y), date_str, font=date_fnt, fill=0)
return image
def render(width, height):
raw_weather = fetch_weather()
if raw_weather:
weather = convert_weather(raw_weather)
return draw_weather(weather, width, height)
else:
return Image.open("./fallback.png")
``` |
{
"source": "johnjoo1/scrape-linkedin",
"score": 3
} |
#### File: scrape-linkedin/pylinkedin/cli.py
```python
import click
from .scraper import LinkedinItem
from pprint import pprint
def read_text(file_path):
with open(file_path, 'r') as f:
return f.read()
@click.command()
@click.option('--url', '-u', type=str, help='Url of the profile you want to scrape')
@click.option('--attribute', '-a', type=click.Choice(LinkedinItem.attributes_key),
default=None, help='Display only a specific attribute, display everything by default')
@click.option('--file_path', '-f', type=click.Path(exists=True), default=None, help='Raw path to html of the profile you want to scrape')
def scrape(url, attribute, file_path):
"""Simple command line to scrape a profile"""
if file_path is not None:
linkedin_profile = LinkedinItem(html_string=read_text(file_path))
else:
linkedin_profile = LinkedinItem(url=url)
if attribute is not None:
pprint(linkedin_profile.__getattribute__(attribute))
else:
pprint(linkedin_profile.to_dict())
```
#### File: scrape-linkedin/pylinkedin/utils.py
```python
import random
#import json
import time
import requests
from lxml import html
from .exceptions import ServerIpBlacklisted, BadStatusCode, ProfileNotFound
#########################################################
# Helpers
#########################################################
# Requests with rotating proxies and user-agent
def to_requests_format(ip, port):
""" Returns the proxy format for requests package """
return {'http': 'http://{}:{}'.format(ip, port),
'https': 'http://{}:{}'.format(ip, port)}
class CustomRequest(object):
"""
This is a Class based on top of the requests package providing
the ability to easily to request rotating user-agent and/or proxies
Arguments
---------
list_user_agent: a list of user-agent by default the list provided in the
beginning of the program
list_proxies : a list of dictionnary of proxies :
[{'https': 'https://172.16.31.10:8080'},{...},], default []
Returns
-------
A class with a get method that is just a customization of requests.get()
"""
def __init__(self, list_user_agent=None, rotate_ua=False, list_proxies=None):
if not list_user_agent:
self.list_user_agent = [
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1"
"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
]
else:
self.list_user_agent = list_user_agent
self.list_proxies = [] if not list_proxies else list_proxies
self.rotate_ua = rotate_ua
# GoogleBot by default
self.headers = {'User-Agent': "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
self.proxy = {}
def set_random_ua(self):
self.headers['User-Agent'] = random.choice(self.list_user_agent)
def set_random_proxy(self):
self.proxy = random.choice(self.list_proxies)
def _get(self, url, *args, **kwargs):
""" Get helpers to handle exceptions """
try:
r = requests.get(url=url, *args, **kwargs)
if r.status_code == 999:
msg_error = "Linkedin blacklists ips for unauthentified http requests, Aws, Digital Ocean"
raise ServerIpBlacklisted(msg_error)
elif r.status_code == 404:
raise ProfileNotFound("The following url :{} can not be publicely found on Linkedin (404 error)".format(url))
elif r.status_code != 200:
raise BadStatusCode("The status code of the get requests is: {}".format(r.status_code))
return r
except requests.exceptions.Timeout:
raise Exception("Request timeout")
except requests.exceptions.RequestException as e:
print(e)
def get(self, url, *args, **kwargs):
if self.rotate_ua is True:
self.set_random_ua()
if self.list_proxies:
self.set_random_proxy()
return self._get(url=url, headers=self.headers, proxies=self.proxy, *args, **kwargs)
else:
return self._get(url=url, headers=self.headers, *args, **kwargs)
# Read and write to a pretty json
def random_delay(static=0.5, variable=1):
time.sleep(static + variable * random.random())
def write_to_json(json_file_path, data):
with open(json_file_path, "a") as f:
f.write("{}\n".format(json.dumps(data)))
def read_from_json(json_file_path):
data = []
with open(json_file_path, "r") as f:
for line in f:
data.append(json.loads(line))
return data
# Others
def create_search_url(list_keyword):
return '%20'.join(list_keyword)
``` |
{
"source": "JohnJorgensen19/salesforce",
"score": 2
} |
#### File: salesforce/backend/client.py
```python
from salesforce import DJANGO_18_PLUS
from django.core.exceptions import ImproperlyConfigured
if DJANGO_18_PLUS:
from django.db.backends.base.client import BaseDatabaseClient
else:
from django.db.backends import BaseDatabaseClient
def complain(*args, **kwargs):
raise ImproperlyConfigured("DatabaseClient: Not yet implemented for the Salesforce backend.")
class DatabaseClient(BaseDatabaseClient):
runshell = complain
```
#### File: salesforce/backend/test_helpers.py
```python
from django.conf import settings
from salesforce import router
import uuid
from unittest import skip, skipUnless, expectedFailure
# random string for tests that accidentally run concurrent
uid = '-' + str(uuid.uuid4())[:7]
sf_alias = getattr(settings, 'SALESFORCE_DB_ALIAS', 'salesforce')
default_is_sf = router.is_sf_database(sf_alias)
current_user = settings.DATABASES[sf_alias]['USER']
def expectedFailureIf(condition):
"""Conditional 'expectedFailure' decorator for TestCase"""
if condition:
return expectedFailure
else:
return lambda func: func
``` |
{
"source": "johnjosephmorgan/espnet",
"score": 2
} |
#### File: espnet/test/test_transformer_decode.py
```python
import numpy
import pytest
import torch
from espnet.nets.pytorch_backend.transformer.decoder import Decoder
from espnet.nets.pytorch_backend.transformer.mask import subsequent_mask
@pytest.mark.parametrize("normalize_before", [True, False])
def test_decoder_cache(normalize_before):
adim = 4
odim = 5
decoder = Decoder(
odim=odim,
attention_dim=adim,
linear_units=3,
num_blocks=2,
normalize_before=normalize_before,
dropout_rate=0.0)
dlayer = decoder.decoders[0]
memory = torch.randn(2, 5, adim)
x = torch.randn(2, 5, adim) * 100
mask = subsequent_mask(x.shape[1]).unsqueeze(0)
prev_mask = mask[:, :-1, :-1]
decoder.eval()
with torch.no_grad():
# layer-level test
y = dlayer(x, mask, memory, None)[0]
cache = dlayer(x[:, :-1], prev_mask, memory, None)[0]
y_fast = dlayer(x, mask, memory, None, cache=cache)[0]
numpy.testing.assert_allclose(y.numpy(), y_fast.numpy(), rtol=1e-5)
# decoder-level test
x = torch.randint(0, odim, x.shape[:2])
y, _ = decoder.forward_one_step(x, mask, memory)
y_, cache = decoder.forward_one_step(x[:, :-1], prev_mask, memory, cache=decoder.init_state())
y_fast, _ = decoder.forward_one_step(x, mask, memory, cache=cache)
numpy.testing.assert_allclose(y.numpy(), y_fast.numpy(), rtol=1e-5)
if __name__ == "__main__":
# benchmark with synth dataset
from time import time
import matplotlib.pyplot as plt
adim = 4
odim = 5
decoder = Decoder(
odim=odim,
attention_dim=adim,
linear_units=3,
num_blocks=2,
dropout_rate=0.0)
dlayer = decoder.decoders[0]
xlen = 100
xs = torch.randint(0, odim, (1, xlen))
memory = torch.randn(2, 500, adim)
mask = subsequent_mask(xlen).unsqueeze(0)
result = {"cached": [], "baseline": []}
n_avg = 10
decoder.eval()
for key, value in result.items():
cache = decoder.init_state()
print(key)
for i in range(xlen):
x = xs[:, :i + 1]
m = mask[:, :i + 1, :i + 1]
start = time()
for _ in range(n_avg):
with torch.no_grad():
if key == "baseline":
cache = None
y, new_cache = decoder.forward_one_step(x, m, memory, cache=cache)
if key == "cached":
cache = new_cache
dur = (time() - start) / n_avg
value.append(dur)
plt.plot(range(xlen), value, label=key)
plt.xlabel("hypothesis length")
plt.ylabel("average time [sec]")
plt.grid()
plt.legend()
plt.savefig("benchmark.png")
``` |
{
"source": "johnjosephmorgan/kaldi",
"score": 3
} |
#### File: s5/local/prepare_data.py
```python
import sys
import os
import argparse
import subprocess
import itertools
from collections import defaultdict
from pathlib import Path
import numpy as np
import pandas as pd
class Segment:
"""
Field 5 (SAD segment label) can have one of the following values:
S : speech segment
NS : non-speech segment
NT : "button-off" segment (not transmitted according to PTT log)
RX : "button-off" segment (not transmitted according to RMS scan)
Filenames:
transceiver files: {iiiii}_{jjjjj}_{lng}_{c}.{type}
where:
{iiiii} is a 5-digit source audio identifier
{jjjjj} is a 5-digit transmission session identifier
{lng} is one of:
alv: Levantine Arabic
eng: American English
fas: Farsi (Persian)
pus: Pashto
urd: Urdu
{c} is one of: A B C D E F G H
"""
def __init__(self, fields):
self.partition = fields[0]
self.reco_id = fields[1]
self.start_time = float(fields[2])
self.end_time = float(fields[3])
self.dur = self.end_time - self.start_time
self.sad_label = fields[4]
self.sad_provenance = fields[5]
# self.speaker_id = fields[6]
# self.sid_provenance = fields[7]
# self.language_id = fields[8]
self.language_id = fields[6]
# self.lid_provenance = fields[9]
self.lid_provenance = fields[7]
# self.transcript = fields[10]
# self.transcript_provenance = fields[11]
rec_info = self.reco_id.split('_')
if len(rec_info) == 3:
src_audio_id = rec_info[0]
lng = rec_info[1]
src = rec_info[2]
self.spk_id = src_audio_id
elif len(rec_info) == 4:
src_audio_id = rec_info[0]
transmission_session_id = rec_info[1]
lng = rec_info[2]
channel = rec_info[3]
self.spk_id = src_audio_id
def groupby(iterable, keyfunc):
"""Wrapper around ``itertools.groupby`` which sorts data first."""
iterable = sorted(iterable, key=keyfunc)
for key, group in itertools.groupby(iterable, keyfunc):
yield key, group
def read_annotations(file_path):
segments = []
with open(file_path, 'r') as f:
for line in f.readlines():
fields = line.strip().split()
segments.append(Segment(fields))
return segments
def find_audios(wav_path, file_list):
# Get all .flac file names from audio directory
wav_path = Path(wav_path)
wavs_glob = wav_path.rglob('*.flac')
wavs = [ w for w in wavs_glob ]
keys = [ Path(wf).stem for wf in wavs ]
data = {'key': keys, 'file_path': wavs}
df_wav = pd.DataFrame(data)
# Filter list to keep only those in annotations (for the specific data split)
file_names_str = "|".join(file_list)
df_wav = df_wav.loc[df_wav['key'].str.contains(file_names_str)].sort_values('key')
return df_wav
def write_wav(df_wav, output_path, bin_wav=True):
with open(output_path + '/wav.scp', 'w') as f:
for key,file_path in zip(df_wav['key'], df_wav['file_path']):
key = key.split('.')[0]
if bin_wav:
f.write('%s sox %s -t wav - remix 1 | \n' % (key, file_path))
else:
f.write('%s %s\n' % (key, file_path))
def write_output(segments, out_path, min_length):
reco_and_spk_to_segs = defaultdict(list,
{uid : list(g) for uid, g in groupby(segments, lambda x: (x.reco_id,x.spk_id))})
# write 5 places after the decimal point
rttm_str = "SPEAKER {0} 1 {1:7.5f} {2:7.5f} <NA> <NA> {3} <NA> <NA>\n"
with open(out_path+'/rttm.annotation','w') as rttm_writer:
for uid in sorted(reco_and_spk_to_segs):
segs = sorted(reco_and_spk_to_segs[uid], key=lambda x: x.start_time)
reco_id, spk_id = uid
for seg in segs:
# skip the non-speech segments
if seg.sad_label == 'NS':
continue
elif seg.sad_label == 'NT':
continue
elif seg.sad_label == 'RX':
continue
elif seg.dur >= min_length:
rttm_writer.write(rttm_str.format(reco_id, seg.start_time, seg.dur, spk_id))
else:
print('Bad segment', seg)
def make_sad_data(annotations, wav_path, output_path, min_length):
if not os.path.exists(output_path):
os.makedirs(output_path)
print ('read annotations to get segments')
segments = read_annotations(annotations)
reco_to_segs = defaultdict(list,
{reco_id : list(g) for reco_id, g in groupby(segments, lambda x: x.reco_id)})
file_list = list(reco_to_segs.keys())
print('read audios')
df_wav = find_audios(wav_path, file_list)
print('make wav.scp')
write_wav(df_wav, output_path)
print('write annotation rttm')
write_output(segments, output_path, min_length)
if __name__ == "__main__":
parser=argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
fromfile_prefix_chars='@',
description='Prepare rats_sad corpus for Speech Activity Detection')
parser.add_argument('annotations', help="Output path to annotations file")
parser.add_argument('wav_path', help="Path to source rats_sad corpus audio files directory")
parser.add_argument('output_path', help="Path to data directory")
parser.add_argument('--min-length', default=0.0001, type=float, help="minimum length of segments to create")
args=parser.parse_args()
make_sad_data(**vars(args))
``` |
{
"source": "johnj/salt",
"score": 2
} |
#### File: salt/states/win_lgpo.py
```python
from __future__ import absolute_import, unicode_literals, print_function
import logging
# Import salt libs
import salt.utils.data
import salt.utils.dictdiffer
import salt.utils.json
# Import 3rd party libs
from salt.ext import six
log = logging.getLogger(__name__)
__virtualname__ = 'lgpo'
__func_alias__ = {'set_': 'set'}
def __virtual__():
'''
load this state if the win_lgpo module exists
'''
return __virtualname__ if 'lgpo.set' in __salt__ else False
def _compare_policies(new_policy, current_policy):
'''
Helper function that returns ``True`` if the policies are the same,
otherwise ``False``
'''
# Compared dicts, lists, and strings
if isinstance(new_policy, six.string_types):
return new_policy == current_policy
elif isinstance(new_policy, list):
if isinstance(current_policy, list):
return salt.utils.data.compare_lists(new_policy,
current_policy) == {}
else:
return False
elif isinstance(new_policy, dict):
if isinstance(current_policy, dict):
return salt.utils.data.compare_dicts(new_policy,
current_policy) == {}
else:
return False
def set_(name,
setting=None,
policy_class=None,
computer_policy=None,
user_policy=None,
cumulative_rights_assignments=True,
adml_language='en-US'):
'''
Ensure the specified policy is set
name
the name of a single policy to configure
setting
the configuration setting for the single named policy
if this argument is used the computer_policy/user_policy arguments will be ignored
policy_class
the policy class of the single named policy to configure
this can "machine", "user", or "both"
computer_policy
a dict of policyname: value pairs of a set of computer policies to configure
if this argument is used, the name/setting/policy_class arguments will be ignored
user_policy
a dict of policyname: value pairs of a set of user policies to configure
if this argument is used, the name/setting/policy_class arguments will be ignored
cumulative_rights_assignments
determine if any user right assignment policies specified will be cumulative
or explicit
adml_language
the adml language to use for AMDX policy data/display conversions
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
policy_classes = ['machine', 'computer', 'user', 'both']
if not setting and not computer_policy and not user_policy:
msg = 'At least one of the parameters setting, computer_policy, or user_policy'
msg = msg + ' must be specified.'
ret['result'] = False
ret['comment'] = msg
return ret
if setting and not policy_class:
msg = 'A single policy setting was specified but the policy_class was not specified.'
ret['result'] = False
ret['comment'] = msg
return ret
if setting and (computer_policy or user_policy):
msg = 'The setting and computer_policy/user_policy parameters are mutually exclusive. Please'
msg = msg + ' specify either a policy name and setting or a computer_policy and/or user_policy'
msg = msg + ' dict'
ret['result'] = False
ret['comment'] = msg
return ret
if policy_class and policy_class.lower() not in policy_classes:
msg = 'The policy_class parameter must be one of the following: {0}'
ret['result'] = False
ret['comment'] = msg
return ret
if not setting:
if computer_policy and user_policy:
policy_class = 'both'
elif computer_policy:
policy_class = 'machine'
elif user_policy:
policy_class = 'user'
if computer_policy and not isinstance(computer_policy, dict):
msg = 'The computer_policy must be specified as a dict.'
ret['result'] = False
ret['comment'] = msg
return ret
if user_policy and not isinstance(user_policy, dict):
msg = 'The user_policy must be specified as a dict.'
ret['result'] = False
ret['comment'] = msg
return ret
else:
user_policy = {}
computer_policy = {}
if policy_class.lower() == 'both':
user_policy[name] = setting
computer_policy[name] = setting
elif policy_class.lower() == 'user':
user_policy[name] = setting
elif policy_class.lower() == 'machine' or policy_class.lower() == 'computer':
computer_policy[name] = setting
pol_data = {}
pol_data['user'] = {'output_section': 'User Configuration',
'requested_policy': user_policy,
'policy_lookup': {}}
pol_data['machine'] = {'output_section': 'Computer Configuration',
'requested_policy': computer_policy,
'policy_lookup': {}}
for p_class, p_data in six.iteritems(pol_data):
if p_data['requested_policy']:
for policy_name, policy_setting in six.iteritems(p_data['requested_policy']):
lookup = __salt__['lgpo.get_policy_info'](policy_name,
p_class,
adml_language=adml_language)
if lookup['policy_found']:
pol_data[p_class]['policy_lookup'][policy_name] = lookup
else:
ret['comment'] = ' '.join([ret['comment'], lookup['message']])
ret['result'] = False
if not ret['result']:
return ret
current_policy = __salt__['lgpo.get'](policy_class=policy_class,
adml_language=adml_language,
hierarchical_return=False)
log.debug('current policy == %s', current_policy)
# compare policies
policy_changes = []
for policy_section, policy_data in six.iteritems(pol_data):
pol_id = None
if policy_data and policy_data['output_section'] in current_policy:
for policy_name, policy_setting in six.iteritems(policy_data['requested_policy']):
currently_set = False
# Check Case sensitive first (faster)
if policy_name in current_policy[policy_data['output_section']]:
currently_set = True
pol_id = policy_name
# Check case insensitive
elif policy_name.lower() in (k.lower() for k in current_policy[policy_data['output_section']]):
for p_name in current_policy[policy_data['output_section']]:
if policy_name.lower() == p_name.lower():
currently_set = True
pol_id = p_name
break
# Check aliases
else:
for alias in policy_data['policy_lookup'][policy_name]['policy_aliases']:
log.debug('checking alias %s', alias)
if alias in current_policy[policy_data['output_section']]:
currently_set = True
pol_id = alias
break
if currently_set:
# compare
log.debug('need to compare %s from '
'current/requested policy', policy_name)
changes = False
requested_policy_json = salt.utils.json.dumps(policy_data['requested_policy'][policy_name], sort_keys=True).lower()
current_policy_json = salt.utils.json.dumps(current_policy[policy_data['output_section']][pol_id], sort_keys=True).lower()
requested_policy_check = salt.utils.json.loads(requested_policy_json)
current_policy_check = salt.utils.json.loads(current_policy_json)
# Are the requested and current policies identical
policies_are_equal = _compare_policies(
requested_policy_check, current_policy_check)
if not policies_are_equal:
additional_policy_comments = []
if policy_data['policy_lookup'][policy_name]['rights_assignment'] and cumulative_rights_assignments:
for user in policy_data['requested_policy'][policy_name]:
if user not in current_policy[policy_data['output_section']][pol_id]:
changes = True
else:
additional_policy_comments.append('"{0}" is already granted the right'.format(user))
else:
changes = True
if changes:
log.debug('%s current policy != requested policy',
policy_name)
log.debug(
'we compared %s to %s',
requested_policy_json, current_policy_json
)
policy_changes.append(policy_name)
else:
if additional_policy_comments:
ret['comment'] = '"{0}" is already set ({1})\n'.format(policy_name, ', '.join(additional_policy_comments))
else:
ret['comment'] = '"{0}" is already set\n'.format(policy_name) + ret['comment']
else:
log.debug('%s current setting matches '
'the requested setting', policy_name)
ret['comment'] = '"{0}" is already set\n'.format(policy_name) + ret['comment']
else:
policy_changes.append(policy_name)
log.debug('policy %s is not set, we will configure it',
policy_name)
if __opts__['test']:
if policy_changes:
ret['result'] = None
ret['comment'] = 'The following policies are set to change:\n{0}'.format(
'\n'.join(policy_changes))
else:
ret['comment'] = 'All specified policies are properly configured'
else:
if policy_changes:
_ret = __salt__['lgpo.set'](computer_policy=computer_policy,
user_policy=user_policy,
cumulative_rights_assignments=cumulative_rights_assignments,
adml_language=adml_language)
if _ret:
ret['result'] = _ret
ret['changes'] = salt.utils.dictdiffer.deep_diff(
current_policy,
__salt__['lgpo.get'](policy_class=policy_class,
adml_language=adml_language,
hierarchical_return=False))
if ret['changes']:
ret['comment'] = 'The following policies changed:\n{0}' \
''.format('\n'.join(policy_changes))
else:
ret['comment'] = 'The following policies are in the correct state:\n{0}' \
''.format('\n'.join(policy_changes))
else:
ret['result'] = False
ret['comment'] = 'Errors occurred while attempting to configure policies: {0}'.format(_ret)
return ret
```
#### File: cloud/clouds/test_linode.py
```python
from __future__ import absolute_import, print_function, unicode_literals
# Create the cloud instance name to be used throughout the tests
from tests.integration.cloud.helpers.cloud_test_base import CloudTest, TIMEOUT
class LinodeTest(CloudTest):
'''
Integration tests for the Linode cloud provider in Salt-Cloud
'''
PROVIDER = 'linode'
REQUIRED_PROVIDER_CONFIG_ITEMS = ('apikey', 'password')
def test_instance(self):
'''
Test creating an instance on Linode
'''
# check if instance with salt installed returned
ret_str = self.run_cloud('-p linode-test {0}'.format(self.instance_name), timeout=TIMEOUT)
self.assertInstanceExists(ret_str)
self.assertDestroyInstance()
```
#### File: integration/modules/test_supervisord.py
```python
from __future__ import absolute_import, unicode_literals, print_function
import os
import time
import subprocess
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.unit import skipIf
from tests.support.paths import TMP
# Import salt libs
import salt.utils.path
from salt.modules.virtualenv_mod import KNOWN_BINARY_NAMES
# Import 3rd-party libs
from salt.ext import six
@skipIf(six.PY3, 'supervisor does not work under python 3')
@skipIf(salt.utils.path.which_bin(KNOWN_BINARY_NAMES) is None, 'virtualenv not installed')
@skipIf(salt.utils.path.which('supervisorctl') is None, 'supervisord not installed')
class SupervisordModuleTest(ModuleCase):
'''
Validates the supervisorctl functions.
'''
def setUp(self):
super(SupervisordModuleTest, self).setUp()
self.venv_test_dir = os.path.join(TMP, 'supervisortests')
self.venv_dir = os.path.join(self.venv_test_dir, 'venv')
self.supervisor_sock = os.path.join(self.venv_dir, 'supervisor.sock')
if not os.path.exists(self.venv_dir):
os.makedirs(self.venv_test_dir)
self.run_function('virtualenv.create', [self.venv_dir])
self.run_function(
'pip.install', [], pkgs='supervisor', bin_env=self.venv_dir)
self.supervisord = os.path.join(self.venv_dir, 'bin', 'supervisord')
if not os.path.exists(self.supervisord):
self.skipTest('Failed to install supervisor in test virtualenv')
self.supervisor_conf = os.path.join(self.venv_dir, 'supervisor.conf')
def start_supervisord(self, autostart=True):
self.run_state(
'file.managed', name=self.supervisor_conf,
source='salt://supervisor.conf', template='jinja',
context={
'supervisor_sock': self.supervisor_sock,
'virtual_env': self.venv_dir,
'autostart': autostart
}
)
if not os.path.exists(self.supervisor_conf):
self.skipTest('failed to create supervisor config file')
self.supervisor_proc = subprocess.Popen(
[self.supervisord, '-c', self.supervisor_conf]
)
if self.supervisor_proc.poll() is not None:
self.skipTest('failed to start supervisord')
timeout = 10
while not os.path.exists(self.supervisor_sock):
if timeout == 0:
self.skipTest(
'supervisor socket not found - failed to start supervisord'
)
break
else:
time.sleep(1)
timeout -= 1
def tearDown(self):
if hasattr(self, 'supervisor_proc') and \
self.supervisor_proc.poll() is not None:
self.run_function(
'supervisord.custom', ['shutdown'],
conf_file=self.supervisor_conf, bin_env=self.venv_dir)
self.supervisor_proc.wait()
del self.venv_dir
del self.venv_test_dir
del self.supervisor_sock
del self.supervisord
del self.supervisor_conf
def test_start_all(self):
'''
Start all services when they are not running.
'''
self.start_supervisord(autostart=False)
ret = self.run_function(
'supervisord.start', [], conf_file=self.supervisor_conf,
bin_env=self.venv_dir)
self.assertIn('sleep_service: started', ret)
self.assertIn('sleep_service2: started', ret)
def test_start_all_already_running(self):
'''
Start all services when they are running.
'''
self.start_supervisord(autostart=True)
ret = self.run_function(
'supervisord.start', [], conf_file=self.supervisor_conf,
bin_env=self.venv_dir
)
self.assertEqual(ret, '')
def test_start_one(self):
'''
Start a specific service that is not running.
'''
self.start_supervisord(autostart=False)
ret = self.run_function(
'supervisord.start', ['sleep_service'],
conf_file=self.supervisor_conf, bin_env=self.venv_dir)
self.assertEqual(ret, 'sleep_service: started')
def test_start_one_already_running(self):
'''
Try to start a specific service that is running.
'''
self.start_supervisord(autostart=True)
ret = self.run_function(
'supervisord.start', ['sleep_service'],
conf_file=self.supervisor_conf, bin_env=self.venv_dir)
self.assertEqual(ret, 'sleep_service: ERROR (already started)')
def test_restart_all(self):
'''
Restart all services when they are running.
'''
self.start_supervisord(autostart=True)
ret = self.run_function(
'supervisord.restart', [], conf_file=self.supervisor_conf,
bin_env=self.venv_dir)
self.assertIn('sleep_service: stopped', ret)
self.assertIn('sleep_service2: stopped', ret)
self.assertIn('sleep_service: started', ret)
self.assertIn('sleep_service2: started', ret)
def test_restart_all_not_running(self):
'''
Restart all services when they are not running.
'''
self.start_supervisord(autostart=False)
ret = self.run_function(
'supervisord.restart', [], conf_file=self.supervisor_conf,
bin_env=self.venv_dir)
# These 2 services might return in different orders so test separately
self.assertIn('sleep_service: started', ret)
self.assertIn('sleep_service2: started', ret)
def test_restart_one(self):
'''
Restart a specific service that is running.
'''
self.start_supervisord(autostart=True)
ret = self.run_function(
'supervisord.restart', ['sleep_service'],
conf_file=self.supervisor_conf, bin_env=self.venv_dir)
self.assertEqual(ret, 'sleep_service: stopped\nsleep_service: started')
def test_restart_one_not_running(self):
'''
Restart a specific service that is not running.
'''
self.start_supervisord(autostart=False)
ret = self.run_function(
'supervisord.restart', ['sleep_service'],
conf_file=self.supervisor_conf, bin_env=self.venv_dir)
self.assertIn('sleep_service: ERROR (not running)', ret)
self.assertIn('sleep_service: started', ret)
def test_stop_all(self):
'''
Stop all services when they are running.
'''
self.start_supervisord(autostart=True)
ret = self.run_function(
'supervisord.stop', [], conf_file=self.supervisor_conf,
bin_env=self.venv_dir)
self.assertIn('sleep_service: stopped', ret)
self.assertIn('sleep_service2: stopped', ret)
def test_stop_all_not_running(self):
'''
Stop all services when they are not running.
'''
self.start_supervisord(autostart=False)
ret = self.run_function(
'supervisord.stop', [], conf_file=self.supervisor_conf,
bin_env=self.venv_dir)
self.assertEqual(ret, '')
def test_stop_one(self):
'''
Stop a specific service that is running.
'''
self.start_supervisord(autostart=True)
ret = self.run_function(
'supervisord.stop', ['sleep_service'],
conf_file=self.supervisor_conf, bin_env=self.venv_dir)
self.assertEqual(ret, 'sleep_service: stopped')
def test_stop_one_not_running(self):
'''
Stop a specific service that is not running.
'''
self.start_supervisord(autostart=False)
ret = self.run_function(
'supervisord.stop', ['sleep_service'],
conf_file=self.supervisor_conf, bin_env=self.venv_dir)
self.assertEqual(ret, 'sleep_service: ERROR (not running)')
def test_status_all(self):
'''
Status for all services
'''
self.start_supervisord(autostart=True)
ret = self.run_function(
'supervisord.status', [], conf_file=self.supervisor_conf,
bin_env=self.venv_dir)
self.assertEqual(sorted(ret), ['sleep_service', 'sleep_service2'])
def test_status_one(self):
'''
Status for a specific service.
'''
self.start_supervisord(autostart=True)
ret = self.run_function(
'supervisord.status', ['sleep_service'],
conf_file=self.supervisor_conf, bin_env=self.venv_dir)
self.assertTrue(ret)
```
#### File: unit/modules/test_win_lgpo.py
```python
from __future__ import absolute_import, unicode_literals, print_function
# Import Salt Testing Libs
from tests.support.unit import TestCase
# Import Salt Libs
import salt.modules.win_lgpo as win_lgpo
class WinSystemTestCase(TestCase):
'''
Test cases for salt.modules.win_lgpo
'''
encoded_null = chr(0).encode('utf-16-le')
def test__encode_string(self):
'''
``_encode_string`` should return a null terminated ``utf-16-le`` encoded
string when a string value is passed
'''
encoded_value = b''.join(['Salt is awesome'.encode('utf-16-le'),
self.encoded_null])
value = win_lgpo._encode_string('Salt is awesome')
self.assertEqual(value, encoded_value)
def test__encode_string_empty_string(self):
'''
``_encode_string`` should return an encoded null when an empty string
value is passed
'''
value = win_lgpo._encode_string('')
self.assertEqual(value, self.encoded_null)
def test__encode_string_error(self):
'''
``_encode_string`` should raise an error if a non-string value is passed
'''
self.assertRaises(TypeError, win_lgpo._encode_string, [1])
test_list = ['item1', 'item2']
self.assertRaises(TypeError, win_lgpo._encode_string, [test_list])
test_dict = {'key1': 'value1', 'key2': 'value2'}
self.assertRaises(TypeError, win_lgpo._encode_string, [test_dict])
def test__encode_string_none(self):
'''
``_encode_string`` should return an encoded null when ``None`` is passed
'''
value = win_lgpo._encode_string(None)
self.assertEqual(value, self.encoded_null)
```
#### File: unit/states/test_boto_cloudtrail.py
```python
from __future__ import absolute_import, print_function, unicode_literals
import logging
import random
import string
import logging
# Import Salt Testing libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import skipIf, TestCase
from tests.support.mock import (
MagicMock,
NO_MOCK,
NO_MOCK_REASON,
patch
)
# Import Salt libs
import salt.config
import salt.loader
from salt.utils.versions import LooseVersion
# pylint: disable=import-error,no-name-in-module,unused-import
from tests.unit.modules.test_boto_cloudtrail import BotoCloudTrailTestCaseMixin
# Import 3rd-party libs
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
try:
import boto
import boto3
from botocore.exceptions import ClientError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
# pylint: enable=import-error,no-name-in-module,unused-import
# Import Salt Libs
import salt.states.boto_cloudtrail as boto_cloudtrail
# the boto_cloudtrail module relies on the connect_to_region() method
# which was added in boto 2.8.0
# https://github.com/boto/boto/commit/33ac26b416fbb48a60602542b4ce15dcc7029f12
required_boto3_version = '1.2.1'
log = logging.getLogger(__name__)
def _has_required_boto():
'''
Returns True/False boolean depending on if Boto is installed and correct
version.
'''
if not HAS_BOTO:
return False
elif LooseVersion(boto3.__version__) < LooseVersion(required_boto3_version):
return False
else:
return True
if _has_required_boto():
region = 'us-east-1'
access_key = '<KEY>'
secret_key = '<KEY>'
conn_parameters = {'region': region, 'key': access_key, 'keyid': secret_key, 'profile': {}}
error_message = 'An error occurred (101) when calling the {0} operation: Test-defined error'
not_found_error = ClientError({
'Error': {
'Code': 'TrailNotFoundException',
'Message': "Test-defined error"
}
}, 'msg')
error_content = {
'Error': {
'Code': 101,
'Message': "Test-defined error"
}
}
trail_ret = dict(Name='testtrail',
IncludeGlobalServiceEvents=True,
KmsKeyId=None,
LogFileValidationEnabled=False,
S3BucketName='auditinfo',
TrailARN='arn:aws:cloudtrail:us-east-1:214351231622:trail/testtrail')
status_ret = dict(IsLogging=False,
LatestCloudWatchLogsDeliveryError=None,
LatestCloudWatchLogsDeliveryTime=None,
LatestDeliveryError=None,
LatestDeliveryTime=None,
LatestDigestDeliveryError=None,
LatestDigestDeliveryTime=None,
LatestNotificationError=None,
LatestNotificationTime=None,
StartLoggingTime=None,
StopLoggingTime=None)
@skipIf(HAS_BOTO is False, 'The boto module must be installed.')
@skipIf(_has_required_boto() is False, 'The boto3 module must be greater than'
' or equal to version {0}'
.format(required_boto3_version))
@skipIf(NO_MOCK, NO_MOCK_REASON)
class BotoCloudTrailStateTestCaseBase(TestCase, LoaderModuleMockMixin):
conn = None
def setup_loader_modules(self):
ctx = {}
utils = salt.loader.utils(
self.opts,
whitelist=['boto', 'boto3', 'args', 'systemd', 'path', 'platform', 'reg'],
context=ctx)
serializers = salt.loader.serializers(self.opts)
self.funcs = funcs = salt.loader.minion_mods(self.opts, context=ctx, utils=utils, whitelist=['boto_cloudtrail'])
self.salt_states = salt.loader.states(opts=self.opts, functions=funcs, utils=utils, whitelist=['boto_cloudtrail'],
serializers=serializers)
return {
boto_cloudtrail: {
'__opts__': self.opts,
'__salt__': funcs,
'__utils__': utils,
'__states__': self.salt_states,
'__serializers__': serializers,
}
}
@classmethod
def setUpClass(cls):
cls.opts = salt.config.DEFAULT_MINION_OPTS.copy()
cls.opts['grains'] = salt.loader.grains(cls.opts)
@classmethod
def tearDownClass(cls):
del cls.opts
# Set up MagicMock to replace the boto3 session
def setUp(self):
self.addCleanup(delattr, self, 'funcs')
self.addCleanup(delattr, self, 'salt_states')
# Set up MagicMock to replace the boto3 session
# connections keep getting cached from prior tests, can't find the
# correct context object to clear it. So randomize the cache key, to prevent any
# cache hits
conn_parameters['key'] = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(50))
self.patcher = patch('boto3.session.Session')
self.addCleanup(self.patcher.stop)
self.addCleanup(delattr, self, 'patcher')
mock_session = self.patcher.start()
session_instance = mock_session.return_value
self.conn = MagicMock()
self.addCleanup(delattr, self, 'conn')
session_instance.client.return_value = self.conn
class BotoCloudTrailTestCase(BotoCloudTrailStateTestCaseBase, BotoCloudTrailTestCaseMixin):
'''
TestCase for salt.modules.boto_cloudtrail state.module
'''
def test_present_when_trail_does_not_exist(self):
'''
Tests present on a trail that does not exist.
'''
self.conn.get_trail_status.side_effect = [not_found_error, status_ret]
self.conn.create_trail.return_value = trail_ret
self.conn.describe_trails.return_value = {'trailList': [trail_ret]}
with patch.dict(self.funcs, {'boto_iam.get_account_id': MagicMock(return_value='1234')}):
result = self.salt_states['boto_cloudtrail.present'](
'trail present',
Name=trail_ret['Name'],
S3BucketName=trail_ret['S3BucketName'])
self.assertTrue(result['result'])
self.assertEqual(result['changes']['new']['trail']['Name'],
trail_ret['Name'])
def test_present_when_trail_exists(self):
self.conn.get_trail_status.return_value = status_ret
self.conn.create_trail.return_value = trail_ret
self.conn.describe_trails.return_value = {'trailList': [trail_ret]}
with patch.dict(self.funcs, {'boto_iam.get_account_id': MagicMock(return_value='1234')}):
result = self.salt_states['boto_cloudtrail.present'](
'trail present',
Name=trail_ret['Name'],
S3BucketName=trail_ret['S3BucketName'],
LoggingEnabled=False)
self.assertTrue(result['result'])
self.assertEqual(result['changes'], {})
def test_present_with_failure(self):
self.conn.get_trail_status.side_effect = [not_found_error, status_ret]
self.conn.create_trail.side_effect = ClientError(error_content, 'create_trail')
with patch.dict(self.funcs, {'boto_iam.get_account_id': MagicMock(return_value='1234')}):
result = self.salt_states['boto_cloudtrail.present'](
'trail present',
Name=trail_ret['Name'],
S3BucketName=trail_ret['S3BucketName'],
LoggingEnabled=False)
self.assertFalse(result['result'])
self.assertTrue('An error occurred' in result['comment'])
def test_absent_when_trail_does_not_exist(self):
'''
Tests absent on a trail that does not exist.
'''
self.conn.get_trail_status.side_effect = not_found_error
result = self.salt_states['boto_cloudtrail.absent']('test', 'mytrail')
self.assertTrue(result['result'])
self.assertEqual(result['changes'], {})
def test_absent_when_trail_exists(self):
self.conn.get_trail_status.return_value = status_ret
result = self.salt_states['boto_cloudtrail.absent']('test', trail_ret['Name'])
self.assertTrue(result['result'])
self.assertEqual(result['changes']['new']['trail'], None)
def test_absent_with_failure(self):
self.conn.get_trail_status.return_value = status_ret
self.conn.delete_trail.side_effect = ClientError(error_content, 'delete_trail')
result = self.salt_states['boto_cloudtrail.absent']('test', trail_ret['Name'])
self.assertFalse(result['result'])
self.assertTrue('An error occurred' in result['comment'])
``` |
{
"source": "john-judge/FitTest",
"score": 3
} |
#### File: FitTest/source/dynamic_fit_unit.py
```python
from source.field import Field
class Position:
def __init__(self, position_name, default_value, bounds):
self.name = position_name
self.x = Field(position_name + "_x", default_value=default_value[0], bounds=bounds[0])
self.y = Field(position_name + "_y", default_value=default_value[1], bounds=bounds[1])
class DynamicFitUnit:
def __init__(self, saddle_default, handlebars_default, saddle_bounds, handlebars_bounds, step_size):
self.saddle = Position("Saddle", saddle_default, saddle_bounds)
self.handlebars = Position("Handlebars", handlebars_default, handlebars_bounds)
self.step_size = step_size
def get_fields(self):
return [self.saddle.x, self.saddle.y, self.handlebars.x, self.handlebars.y]
# calculate stack
def get_stack(self):
return self.handlebars.y
# calculate reach
def get_reach(self):
return self.handlebars.x
``` |
{
"source": "john-judge/qiskit-terra",
"score": 3
} |
#### File: passes/optimization/optimize_1q_decomposition.py
```python
import copy
import logging
import warnings
import numpy as np
from qiskit.circuit.library.standard_gates import U3Gate
from qiskit.transpiler.basepasses import TransformationPass
from qiskit.quantum_info.synthesis import one_qubit_decompose
from qiskit.converters import circuit_to_dag
logger = logging.getLogger(__name__)
class Optimize1qGatesDecomposition(TransformationPass):
"""Optimize chains of single-qubit gates by combining them into a single gate."""
def __init__(self, basis=None):
"""Optimize1qGatesDecomposition initializer.
Args:
basis (list[str]): Basis gates to consider, e.g. `['u3', 'cx']`. For the effects
of this pass, the basis is the set intersection between the `basis` parameter
and the Euler basis.
"""
super().__init__()
self._target_basis = basis
self._decomposers = None
if basis:
self._decomposers = []
basis_set = set(basis)
euler_basis_gates = one_qubit_decompose.ONE_QUBIT_EULER_BASIS_GATES
for euler_basis_name, gates in euler_basis_gates.items():
if set(gates).issubset(basis_set):
basis_copy = copy.copy(self._decomposers)
for base in basis_copy:
# check if gates are a superset of another basis
# and if so, remove that basis
if set(euler_basis_gates[base.basis]).issubset(set(gates)):
self._decomposers.remove(base)
# check if the gates are a subset of another basis
elif set(gates).issubset(set(euler_basis_gates[base.basis])):
break
# if not a subset, add it to the list
else:
self._decomposers.append(
one_qubit_decompose.OneQubitEulerDecomposer(euler_basis_name)
)
def run(self, dag):
"""Run the Optimize1qGatesDecomposition pass on `dag`.
Args:
dag (DAGCircuit): the DAG to be optimized.
Returns:
DAGCircuit: the optimized DAG.
"""
if not self._decomposers:
logger.info("Skipping pass because no basis is set")
return dag
runs = dag.collect_1q_runs()
for run in runs:
# SPECIAL CASE: Don't bother to optimize single U3 gates which are in the basis set.
# The U3 decomposer is only going to emit a sequence of length 1 anyhow.
if "u3" in self._target_basis and len(run) == 1 and isinstance(run[0].op, U3Gate):
# Toss U3 gates equivalent to the identity; there we get off easy.
if np.allclose(run[0].op.to_matrix(), np.eye(2), 1e-15, 0):
dag.remove_op_node(run[0])
continue
# We might rewrite into lower `u`s if they're available.
if "u2" not in self._target_basis and "u1" not in self._target_basis:
continue
new_circs = []
operator = run[0].op.to_matrix()
for gate in run[1:]:
operator = gate.op.to_matrix().dot(operator)
for decomposer in self._decomposers:
new_circs.append(decomposer._decompose(operator))
if new_circs:
new_circ = min(new_circs, key=len)
# do we even have calibrations?
has_cals_p = dag.calibrations is not None and len(dag.calibrations) > 0
# is this run all in the target set and also uncalibrated?
rewriteable_and_in_basis_p = all(
g.name in self._target_basis
and (not has_cals_p or not dag.has_calibration_for(g))
for g in run
)
# does this run have uncalibrated gates?
uncalibrated_p = not has_cals_p or any(not dag.has_calibration_for(g) for g in run)
# does this run have gates not in the image of ._decomposers _and_ uncalibrated?
uncalibrated_and_not_basis_p = any(
g.name not in self._target_basis
and (not has_cals_p or not dag.has_calibration_for(g))
for g in run
)
if rewriteable_and_in_basis_p and len(run) < len(new_circ):
# NOTE: This is short-circuited on calibrated gates, which we're timid about
# reducing.
warnings.warn(
f"Resynthesized {run} and got {new_circ}, "
f"but the original was native and the new value is longer. This "
f"indicates an efficiency bug in synthesis. Please report it by "
f"opening an issue here: "
f"https://github.com/Qiskit/qiskit-terra/issues/new/choose",
stacklevel=2,
)
# if we're outside of the basis set, we're obligated to logically decompose.
# if we're outside of the set of gates for which we have physical definitions,
# then we _try_ to decompose, using the results if we see improvement.
# NOTE: Here we use circuit length as a weak proxy for "improvement"; in reality,
# we care about something more like fidelity at runtime, which would mean,
# e.g., a preference for `RZGate`s over `RXGate`s. In fact, users sometimes
# express a preference for a "canonical form" of a circuit, which may come in
# the form of some parameter values, also not visible at the level of circuit
# length. Since we don't have a framework for the caller to programmatically
# express what they want here, we include some special casing for particular
# gates which we've promised to normalize --- but this is fragile and should
# ultimately be done away with.
if (
uncalibrated_and_not_basis_p
or (uncalibrated_p and len(run) > len(new_circ))
or isinstance(run[0].op, U3Gate)
):
new_dag = circuit_to_dag(new_circ)
dag.substitute_node_with_dag(run[0], new_dag)
# Delete the other nodes in the run
for current_node in run[1:]:
dag.remove_op_node(current_node)
return dag
``` |
{
"source": "john-judge/ZDA_Explorer",
"score": 3
} |
#### File: ZDA_Explorer/lib/snr.py
```python
import numpy as np
from yellowbrick.cluster import KElbowVisualizer
from sklearn.metrics import silhouette_samples, silhouette_score
from sklearn.cluster import AgglomerativeClustering
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
class AnalyzerSNR:
def __init__(self, data):
self.data = data
self.snr = None
self.k_clusters = None
self.snr_cutoff = None
self.clustered = None
self.cluster_indices_by_snr = None
def get_snr(self, plot=False):
""" Given a single trial, compute the SNR image for this trial """
self.snr = np.mean(self.data, axis=2) / np.std(self.data, axis=2)
if plot:
plt.imshow(self.snr, cmap='jet', interpolation='nearest')
plt.show()
return self.snr
def cluster_on_snr(self, k_clusters=3, snr_cutoff=0.7, plot=False):
""" Perform 1-D clustering on SNR after masking out the pixels
whose snr is below snr_cutoff (a percentile in range [0,1]) """
self.k_clusters = k_clusters
self.snr_cutoff = np.percentile(self.snr, snr_cutoff * 100)
if self.snr is None:
raise ValueError("No SNR data found.")
mask = (self.snr >= self.snr_cutoff).astype(np.float)
if plot:
# masked image: reasonability check
plt.imshow(self.snr * mask, cmap='jet', interpolation='nearest')
plt.show()
# +1 for the masked 0's
snr_copy = self.snr
snr_orig_shape = self.snr.shape
km = KMeans(n_clusters=k_clusters+1).fit(self.snr.reshape(-1,1))
self.clustered = np.array(km.labels_).reshape(self.snr.shape) + 1
self.clustered = self.clustered.astype(np.int)
self.snr.reshape(snr_orig_shape)
if plot:
plt.imshow(self.clustered * mask, cmap='viridis', interpolation='nearest')
plt.show()
return self.clustered
def get_average_snr_by_cluster(self):
""" Returns a list of average SNR values by cluster, where
the float at index i is the average SNR for cluster i+1 """
if self.k_clusters is None:
raise ValueError("must call method cluster_on_snr() before getting average SNRs for clusters")
return [np.average(self.snr[np.where(self.clustered==i)[0]])
for i in range(1, self.k_clusters+2)]
def get_kth_cluster(self, k, plot=False):
"""
Returns iterable of indexes of pixels in the kth cluster
(k=0,...,k_clusters)
"""
if self.k_clusters is None:
raise ValueError("must call method cluster_on_snr() before getting kth cluster")
if k > self.k_clusters:
raise ValueError("k is greater than number of clusters")
# sort clusters by SNR (which can differ from cluster label)
if self.cluster_indices_by_snr is None:
# SNR by cluster
avg_snr_by_cluster = self.get_average_snr_by_cluster()
self.cluster_indices_by_snr = np.argsort(np.array(avg_snr_by_cluster)) + 1
k_selection = self.cluster_indices_by_snr[-1-k]
mask = (self.snr >= self.snr_cutoff).astype(np.float)
# Select the pixels in this SNR cluster, above SNR cutoff
arg_selection = np.stack(np.where(self.clustered * mask == k_selection))
if plot:
for i in range(arg_selection.shape[1]):
x_max = arg_selection[0][i]
y_max = arg_selection[1][i]
mask[x_max, y_max] *= 3 # highlight
plt.imshow(self.clustered * mask, cmap='jet', interpolation='nearest')
plt.show()
return arg_selection
def get_silhouette_score(self, plot_elbow=True):
""" Return silhouette score and plot Elbow plot for this K-means clustering """
raise NotImplementedError
print("Silhouette score:", silhouette_score(features, label))
# Instantiate a scikit-learn K-Means model
model = KMeans(random_state=0)
# Instantiate the KElbowVisualizer with the number of clusters and the metric
visualizer = KElbowVisualizer(model, k=(2,6), metric='silhouette', timings=False)
# Fit the data and visualize
visualizer.fit(features)
visualizer.poof()
``` |
{
"source": "johnjung/weather",
"score": 4
} |
#### File: weather/speculative_weather_report/classes.py
```python
import astral
import csv
import datetime
import math
import os
import random
import re
import statistics
def load_historical_data_headers():
"""Load the headers for historical weather data."""
with open(os.path.dirname(os.path.realpath(__file__)) + '/../data/1711054.csv') as f:
reader = csv.reader(f)
return next(reader, None)
def load_historical_data():
"""Load historical weather data."""
with open(os.path.dirname(os.path.realpath(__file__)) + '/../data/1711054.csv') as f:
reader = csv.reader(f)
next(reader, None)
historical_data = []
for row in reader:
historical_data.append(row)
return historical_data
class Forecast:
"""Contains the display elements of a speculative weather forecast.
Notes:
This display creates a fictional future weather forecast from historical
weather data. It does this using three different dates and two different
places. To start, the current date and location let us calculate the day
of week, sunrise time, sunset time, and other events like high tides or
eclipses.
An alternate location allows the display to show historical weather data
from a place with different climate. For example, on May 1st of this
year in Chicago, Illinois, the forecast might display weather data from
May 1st, 2010 in Austin, Texas. Using historical data gives us realistic
looking data without having to do any weather modeling.
Finally, the forecast displays a year in the future where the day of
week (e.g. Tuesday) matches the current day of the week. The forecast
also includes a news feed and advertisements, to explore the context
around the weather.
This object is designed to be instantiated once for each weather
forecast display.
"""
def __init__(self, dt):
"""constructor.
Creates a new Forecast object, and instantiates Weather objects to
display the current weather along with daily and hourly forecasts, as
well objects to display the news and advertisements.
Args:
dt (datetime.datetime): the current datetime, i.e.
datetime.datetime.now()
"""
self.dt = dt
self.astral = astral.Astral()
self.astral_city = 'Chicago'
self.current_weather = CurrentWeather(dt)
self.daily = []
d = self.current_weather.dt.replace(hour=0, minute=0, second=0)
for i in range(1, 7):
self.daily.append(
DailyWeather(
d + datetime.timedelta(days=i)
)
)
self.hourly = []
d = self.current_weather.dt.replace(minute=0, second=0)
for i in range(1, 25):
self.hourly.append(
HourlyWeather(d + datetime.timedelta(hours=i))
)
self.news = News()
def moon_phase(self):
"""Get the current moon phase.
Returns:
str: One of four possible moon phases.
"""
return (
'New Moon',
'First Quarter',
'Full Moon',
'Last Quarter',
'New Moon'
)[int(float(self.astral.moon_phase(date=self.dt) / 7.0))]
def next_sunrise(self):
"""sunrise.
Returns:
datetime.datetime: the time of the next sunrise.
"""
sun = self.astral_city.sun(date=self.dt)
if self.sun['sunrise'] > self.dt:
return self.sun['sunset']
else:
sun = self.astral_city.sun(
date=self.dt + datetime.timedelta(days=1)
)
return self.sun['sunset']
def next_sunset(self):
"""sunset.
Returns:
datetime.datetime: the time of the next sunset.
"""
sun = self.astral_city.sun(date=self.dt)
if self.sun['sunset'] > self.dt:
return self.sun['sunset']
else:
sun = self.astral_city.sun(
date=self.dt + datetime.timedelta(days=1)
)
return self.sun['sunset']
def next_high_tide(self):
"""high tide.
Returns:
datetime.datetime: the time of the next high tide.
"""
raise NotImplementedError
def next_low_tide(self):
"""low tide.
Returns:
datetime.datetime: the time of the next low tide.
"""
raise NotImplementedError
def next_partial_solar_eclipse(self):
"""partial solar eclipse.
Returns:
datetime.datetime: the time of the next partial solar eclipse.
"""
raise NotImplementedError
def next_total_solar_eclipse(self):
"""total solar eclipse.
Returns:
datetime.datetime: the time of the next total solar eclipse.
"""
raise NotImplementedError
def next_transit_of_mercury(self):
"""transit of mercury.
Returns:
datetime.datetime: the time of the next transit of Mercury.
"""
raise NotImplementedError
def next_transit_of_venus(self):
"""transit of Venus.
Returns:
datetime.datetime: the time of the next transit of Venus.
"""
raise NotImplementedError
def asdict(self):
"""get the forecast as a dict, for display in Jinja templates.
Returns:
dict: a dictionary containing the current weather, daily and hourly
weather forecasts and astronomical events, news and advertisements.
"""
return {
'current_weather': self.current_weather.asdict(),
'daily': [d.asdict() for d in self.daily],
'hourly': [h.asdict() for h in self.hourly],
'news': self.news.asdict()
}
class Weather:
historical_headers = load_historical_data_headers()
historical_data = load_historical_data()
def __init__(self, dt):
"""Constructor
Args:
dt (datetime.datetime)
"""
self.dt = dt
def as_of(self):
"""Get the most recent reading time from historical data.
Returns:
str: the date and time of the most recent weather data reading in
YYYY-mm-ddTHH:MM:SS format.
"""
return datetime.datetime.strptime(
self._get_historical('DATE'),
'%Y-%m-%dT%H:%M:%S'
).strftime('%-I:%M%p')
def carbon_count(self, temperature_increase):
"""Get an estimated carbon count for a given temperature increase.
Args:
temperature_increase (int): in Fahrenheit, starting from 0.
Notes:
This is a very rough estimate based on the following document:
http://dels.nas.edu/resources/static-assets/
materials-based-on-reports/booklets/warming_world_final.pdf
Returns:
int: the carbon count.
"""
carbon_counts = (410, 480, 550, 630, 700, 800, 900, 1000, 1200, 1400)
return carbon_counts[temperature_increase]
def dew_point(self):
"""Get the dew point.
Returns:
int: the dew point temperature in Fahrenheit.
"""
return int(self._get_historical('HourlyDewPointTemperature'))
def heat_index(self):
"""Calculate the heat index: see
https://en.wikipedia.org/wiki/Heat_index.
Returns:
int: a heat index temperature in Fahrenheit.
"""
t = self.temperature()
if t < 80:
return None
r = self.relative_humidity()
if r < 40:
return None
return int(
sum(
[-42.379,
2.04901523 * t,
10.14333127 * r,
-0.22475541 * t * r,
-6.83783e-03 * math.pow(t, 2),
-5.481717e-02 * math.pow(r, 2),
1.22874e-03 * math.pow(t, 2) * r,
8.5282e-04 * t * math.pow(r, 2),
-1.99e-06 * math.pow(t, 2) * math.pow(r, 2)]
)
)
def human_readable_datetime(self):
"""Get a human readable datetime string for this object.
Raises:
NotImplementedError: Implement this method in subclasses.
"""
raise NotImplementedError
def relative_humidity(self):
"""Get the relative humidity.
Returns:
int: the relative humidity from 0 to 100.
"""
return int(self._get_historical('HourlyRelativeHumidity'))
def sky_conditions(self):
"""Get sky conditions.
Notes:
These are recorded in the data in a string like:
FEW:02 70 SCT:04 200 BKN:07 250
Although this data field is often blank, very often zero or more
data chunks in the following format will be included:
[A-Z]{3}:[0-9]{2} [0-9]{2}
The three letter sequence indicates cloud cover according to the
dict below. The two digit sequence immediately following indicates
the coverage of a layer in oktas (i.e. eigths) of sky covered. The
final three digit sequence describes the height of the cloud layer,
in hundreds of feet: e.g., 50 = 5000 feet. It is also possible for
this string to include data that indicates that it was not possible
to observe the sky because of obscuring phenomena like smoke or fog.
The last three-character chunk provides the best summary of current
sky conditions.
Returns:
str: current sky conditions, e.g. 'clear sky'
"""
conditions = {
'CLR': 'clear sky',
'FEW': 'few clouds',
'SCT': 'scattered clouds',
'BKN': 'broken clouds',
'OVC': 'overcast'
}
matches = re.search(
'([A-Z]{3}).*$',
self._get_historical('HourlySkyConditions')
)
try:
return conditions[matches.group(1)]
except AttributeError:
return self._get_historical('HourlySkyConditions')
def temperature(self):
"""Get the dry bulb temperature ("the temperature")
Returns:
int: the temperature in Fahrenheit.
"""
return int(self._get_historical('HourlyDryBulbTemperature'))
def temperature_min(self):
"""Get the minimum daily temperature.
Returns:
int: the temperature in Fahrenheit.
"""
return self._temperature_summary('min')
def temperature_mean(self):
"""Get the mean daily temperature.
Returns:
int: the temperature in Fahrenheit.
"""
return self._temperature_summary('mean')
def temperature_max(self):
"""Get the maximum daily temperature.
Returns:
int: the temperature in Fahrenheit.
"""
return self._temperature_summary('max')
def visibility(self):
"""Get the visibility.
Returns:
int: visibility in miles.
"""
return int(float(self._get_historical('HourlyVisibility')))
def weather_type(self):
"""Get the type of weather.
Returns:
str: a description of the current weather, e.g. 'fog'
"""
weather_strings = {
'FG': 'fog',
'TS': 'thunder',
'PL': 'sleet',
'GR': 'hail',
'GL': 'ice sheeting',
'DU': 'dust',
'HZ': 'haze',
'BLSN': 'drifing snow',
'FC': 'funnel cloud',
'WIND': 'high winds',
'BLPY': 'blowing spray',
'BR': 'mist',
'DZ': 'drizzle',
'FZDZ': 'freezing drizzle',
'RA': 'rain',
'FZRA': 'freezing rain',
'SN': 'snow',
'UP': 'precipitation',
'MIFG': 'ground fog',
'FZFG': 'freezing fog'
}
types = set()
present_weather = self._get_historical('HourlyPresentWeatherType')
for p in present_weather.split('|'):
m = re.search('[A-Z]+', p)
if m:
types.add(weather_strings[m.group(0)])
return ', '.join(list(types))
def wind_direction_and_speed(self):
"""Get the wind direction and speed.
Returns:
str: e.g., '13mph SW'
"""
d = int(self._get_historical('HourlyWindDirection'))
if d == 0:
return 'still'
directions = ('N', 'NNE', 'NE', 'ENE', 'E', 'ESE', 'SE', 'SSE', 'S',
'SSW', 'SW', 'WSW', 'W', 'WNW', 'NW', 'NNW', 'N')
i = int(round(float(d) / 22.5))
direction = directions[i % 16]
s = self._get_historical('HourlyWindSpeed')
return '{}mph {}'.format(s, direction)
def _get_closest_past_index(self, dt=None):
"""Find the closest past index represented in historical data for a
given date/time string.
Args:
dt (datetime.datetime): A datetime to look up other than self.dt.
Returns:
int: an index (record number) in the historical data.
"""
if not dt:
dt = self.dt
dt_string = dt.replace(year=2010).strftime('%Y-%m-%dT%H:%M:%S')
f = self.historical_headers.index('DATE')
i = len(self.historical_data) - 1
while i >= 0:
if self.historical_data[i][f] < dt_string:
break
i -= 1
return i
def _get_historical(self, field):
"""Get a single historical data point. If the current data point is
blank, the function searches backwards for the last available data.
Args:
field (str): the field name.
Returns:
str: the data.
"""
i = self._get_closest_past_index()
f = self.historical_headers.index(field)
while i > 0:
if self.historical_data[i][f]:
return self.historical_data[i][f]
i -= 1
return ''
def _get_historical_daily_range(self, field):
"""Get a range of historical data points.
Args:
field (str): the field name.
Returns:
str: the data.
"""
start_of_day = self.dt.replace(hour=0, minute=0, second=0)
i1 = self._get_closest_past_index(start_of_day)
i2 = self._get_closest_past_index(
start_of_day + datetime.timedelta(days=1)
)
f = self.historical_headers.index(field)
return [self.historical_data[i][f] for i in range(i1, i2 + 1)]
def _temperature_summary(self, summary_type):
"""Get a temperature summary for the day.
Args:
summary_type (str): one of 'min', 'max', 'mean'
Returns:
int: the temperature summary in Fahrenheit.
"""
temperatures = map(
int,
filter(
bool,
self._get_historical_daily_range('HourlyDryBulbTemperature')
)
)
if summary_type == 'min':
return min(temperatures)
elif summary_type == 'max':
return max(temperatures)
elif summary_type == 'mean':
return int(statistics.mean(temperatures))
else:
raise ValueError
def future_year_with_same_weekday(self, min_future_year):
"""Get a future year with the same weekday (e.g. "Tuesday") as self.dt.
Args:
min_future_year (int): year to start checking for matching weekdays.
Returns:
int: the future year.
"""
year = min_future_year
while True:
if self.dt.replace(year=year).weekday() == self.dt.weekday():
return year
year += 1
def asdict(self):
"""Get this object as a dict, for rendering in Jinja templates.
Returns:
dict: template data.
"""
return {
'as_of': self.as_of(),
'human_readable_datetime': self.human_readable_datetime(),
'simulation_year': self.future_year_with_same_weekday(2060),
'carbon_count': self.carbon_count(0),
'dew_point': self.dew_point(),
'heat_index': self.heat_index(),
'relative_humidity': self.relative_humidity(),
'sky_conditions': self.sky_conditions(),
'temperature': self.temperature(),
'temperature_min': self.temperature_min(),
'temperature_mean': self.temperature_mean(),
'temperature_max': self.temperature_max(),
'visibility': self.visibility(),
'weather_type': self.weather_type(),
'wind_direction_and_speed': self.wind_direction_and_speed()
}
class CurrentWeather(Weather):
def human_readable_datetime(self):
"""Get a human readable date and time for the current weather, e.g.
'Tuesday, May 1.'
Returns:
str: the current time.
"""
return self.dt.strftime('%A, %B %-d')
class DailyWeather(Weather):
def human_readable_datetime(self):
"""Get a human readable date and time for each cell in a daily weather
forecast, e.g. 'Tuesday'
Returns:
str: the weekday.
"""
return self.dt.strftime('%a')
def asdict(self):
"""Get information for an daily weather forecast cell.
Returns:
dict: forecast data.
"""
return {
'as_of': self.as_of(),
'dt': self.dt,
'human_readable_datetime': self.human_readable_datetime(),
'temperature_min': self.temperature_min(),
'temperature_mean': self.temperature_mean(),
'temperature_max': self.temperature_max(),
}
class HourlyWeather(Weather):
def human_readable_datetime(self):
"""Get a human readable date and time for each cell in an hourly
forecast, e.g. '2PM'
Returns:
str: the time.
"""
return self.dt.strftime('%-I%p')
def asdict(self):
"""Get information for an hourly weather forecast cell.
Returns:
dict: forecast data.
"""
return {
'as_of': self.as_of(),
'dt': self.dt,
'human_readable_datetime': self.human_readable_datetime(),
'temperature': self.temperature(),
}
class Sunrise(Weather):
def human_readable_datetime(self):
raise NotImplementedError
class Sunset(Weather):
def human_readable_datetime(self):
raise NotImplementedError
class News:
def get_advertisement(self):
ads = (("Skirts, blouses and accessories. Up to 45% off. Shop Now.", "Noracora"),
("Your future's looking up with our new student loan. Competitive interest rates. Multiple repayment options. No origination fee. Get started now.", "<NAME>"),
("Shop non-traditional jewelry designs.", "Brilliant Earth"),
("Khakis for all seasons. All season tech.", "Dockers"))
return random.choice(ads)[0]
def get_news(self):
# short headline, source note.
news = (("Troubling Trend Since 2020s for Great Lakes. Superior, Huron and Erie have seen the greatest declines.",
"weather.com 2019/04/28 (modified date)"),
("Freeze in May? Here's Who Is Likely to See One.",
"weather.com 2019/04/28 (verbatim)"),
("Incoming Severe Threat This Week",
"weather.com 2019/04/28 (verbatim)"),
("Winter Storm Central: Blizzard Conditions Likely; Travel Nearly Impossible",
"weather.com 2019/04/28 (verbatim)"),
("Allergy: Tips for An Allergy-Free Spring Road Trip",
"weather.com 2019/04/28 (verbatim)"),
("Allergy: Worst Plants for Spring Allergies",
"weather.com 2019/04/28 (verbatim)"),
("Tornado Safety and Preparedness: Safest Places to Wait Out A Tornado",
"weather.com 2019/04/28 (verbatim)"),
("Allergy: Spring Allergy Capitals: Which City Ranks the Worst",
"weather.com 2019/04/28 (verbatim)"))
# short headline, story body, source URL.
# stories are lightly modified, mostly changing the names of places.
news = [("""Two Killed as Tornado Strikes Stillwater, Oklahoma;
Apparent Tornado Leaves Swath of Damage in Norman.""",
"""A tornado tore through Stillwater, Oklahoma last night,
touching down at approximately 10:20pm on the leading edge
of a squall line of severe thunderstorms. The Western Value
Inn at 51 and 177 was destroyed. Image from the scene
showed emergency crews sifting through rubble after part of
the motel's second story collapsed into a pile of debris
strewn about the first floor and parking lot. Two deaths
have been confirmed by county emergency management. Another
apparent tornado produced damage in the Norman area after
midnight.""",
"https://weather.com/news/news/2019-05-26-oklahoma-tornadoes-el-reno-sapulpa"),
("""Powerful 8.0 Magnitude Earthquake Strikes north-central
Bolivia.""",
"""An 8.0 magnitude earthquake shook north-central Bolivia
yesterday morning, acording to the U.S. Geological Survey.
There were no immediate reports of deaths or major damage.
The quake, at a moderate depth of 71 miles, strike at 2:41
a.m., 50 miles southeast of Sorata. There were no immediate
reports of deaths. The mayor of Sorata told local radio
station RPP that the quake was felt very strongly there,
but it was not possible to move around the town because of
the darkness. A number of old houses collapsed, and the
electricity was cut, according to the National Emergency
Operations Center.""",
"https://weather.com/news/news/2019-05-26-earthquake-north-central-peru-may"),
("",
"""Our primary journalistic mission is to report on breaking
weather news and the environment. This story does not
necessarily represent the position of our parent company.""",
""),
("""Has Government Turned Us Into a Nation of Makers and
Takers?""",
"""In a recent article produced by the Tax Policy Center, tax
analyst <NAME> reports that 43% of Americans
won't pay federal income taxes this year. Roberts, a former
deputy assistant director for the Congressional Budget
Office, also states that "many commentators" have twisted
such statistics to suggest "that nearly half of all
households paid no tax at all when, in fact, nearly
everyone pays something." Roberts is correct that the
federal income tax is just one of many taxes, and hence, it
is misleading to ignore other taxes when discussing makers
and takers. However, he ignores another crucial aspect of
this issue, which is that the person who pays $1,000 in
taxes and receives $10,000 in government benefits is a
taker on the net. Even though this person pays
"something", as Roberts notes, he receives far more from
the government than he pays in taxes.""",
"https://www.justfactsdaily.com/has-government-turned-us-into-a-nation-of-makers-and-takers/"),
("",
"""The Intergovernmental Panel on Climate Change (IPCC) is
"the leading international body for the assessment of
climate change," and its "work serves as the key basis
for climate policy decisions made by governments throughout
the world. The IPCC states: "To determine whether current
warming is unusual, it is essential to place it in the
context of longer-term climate variability." The first
IPCC report stated that "some of the global warming since
1850 could be a recovery from the Little Ice Age rather
than a direct result of human activities. So it is
important to recognize the natural variations of climate
are appreciable and will modulate any future changes
induced by man." The second IPCC report stated that "data
prior to 1400 are too sparse to allow the reliable estimate
of global mean temperature" and show a graph of
proxy-derived temperatures for Earth's Northern Hemisphere
from 1400 onward." The third IPCC report stated that the
latest proxy studies indicate "the conventional terms of
'Little Ice Age' and 'Medieval Warm Period' appear to have
limited utility in describing...global mean temperature
change in the past centuries.""",
"https://www.justfacts.com/globalwarming.asp")]
random.shuffle(news)
output = []
for n in news:
output.append(re.sub(r'\s+', ' ', n[1]).strip())
return output
def asdict(self):
return {}
```
#### File: johnjung/weather/web.py
```python
import datetime
from speculative_weather_report import CurrentWeather, DailyWeather, \
Forecast, HourlyWeather, News, \
Sunrise, Sunset, Weather
from flask import Flask, render_template
app = Flask(__name__)
app.debug = True
@app.route('/', methods=['GET'])
def index():
f = Forecast(datetime.datetime.now())
return render_template('weather.html', **f.asdict())
``` |
{
"source": "johnjz30/Phy-Net",
"score": 2
} |
#### File: Phy-Net/input/lat_inputs.py
```python
import os
import numpy as np
import tensorflow as tf
import systems.fluid_createTFRecords as fluid_createTFRecords
import systems.em_createTFRecords as em_createTFRecords
from glob import glob as glb
from tqdm import *
FLAGS = tf.app.flags.FLAGS
# Constants describing the input pipline.
tf.app.flags.DEFINE_integer('min_queue_examples', 400,
""" min examples to queue up""")
tf.app.flags.DEFINE_integer('num_preprocess_threads', 2,
""" number of process threads for que runner """)
tf.app.flags.DEFINE_string('data_dir', '/data',
""" base dir for all data""")
tf.app.flags.DEFINE_string('tf_data_dir', '../data',
""" base dir for saving tf records data""")
tf.app.flags.DEFINE_integer('tf_seq_length', 30,
""" seq length of tf saved records """)
def lat_distortions(lat, distortions):
if len(lat.get_shape()) == 5:
lat = tf.cond(distortions[0]>0.50, lambda: tf.reverse(lat, axis=[2]), lambda: lat)
elif len(lat.get_shape()) == 6:
lat = tf.cond(distortions[0]>0.50, lambda: tf.reverse(lat, axis=[2]), lambda: lat)
lat = tf.cond(0.75<distortions[0], lambda: tf.reverse(lat, axis=[3]), lambda: lat)
lat = tf.cond(distortions[0]<0.25, lambda: tf.reverse(lat, axis=[3]), lambda: lat)
return lat
def read_data_fluid(filename_queue, seq_length, shape, num_frames):
# make reader
reader = tf.TFRecordReader()
key, serialized_example = reader.read(filename_queue)
# make feature dict
feature_dict = {}
for i in xrange(FLAGS.tf_seq_length):
feature_dict['flow/frame_' + str(i)] = tf.FixedLenFeature([np.prod(np.array(shape))*num_frames],tf.float32)
feature_dict['boundary'] = tf.FixedLenFeature([np.prod(np.array(shape))],tf.float32)
features = tf.parse_single_example(
serialized_example,
features=feature_dict)
# read seq from record
seq_of_flow = []
seq_of_boundary = []
for sq in xrange(FLAGS.tf_seq_length - seq_length):
flow = []
for i in xrange(seq_length):
flow.append(features['flow/frame_' + str(i+sq)])
boundary = features['boundary']
# reshape it
flow = tf.stack(flow)
flow = tf.reshape(flow, [seq_length] + shape + [num_frames])
flow = tf.to_float(flow)
boundary = tf.reshape(boundary, [1] + shape + [1])
boundary = tf.to_float(boundary)
seq_of_flow.append(flow)
seq_of_boundary.append(boundary)
seq_of_flow = tf.stack(seq_of_flow)
seq_of_boundary = tf.stack(seq_of_boundary)
return seq_of_flow, seq_of_boundary
def read_data_em(filename_queue, seq_length, shape, num_frames):
# make reader
reader = tf.TFRecordReader()
key, serialized_example = reader.read(filename_queue)
# make feature dict
feature_dict = {}
for i in xrange(seq_length):
feature_dict['em/frame_' + str(i)] = tf.FixedLenFeature([np.prod(np.array(shape))*num_frames],tf.float32)
feature_dict['boundary'] = tf.FixedLenFeature([np.prod(np.array(shape))],tf.float32)
features = tf.parse_single_example(
serialized_example,
features=feature_dict)
# read seq from record
seq_of_em = []
seq_of_boundary = []
for sq in xrange(FLAGS.tf_seq_length - seq_length):
em = []
for i in xrange(seq_length):
em.append(features['em/frame_' + str(i)])
boundary = features['boundary']
# reshape it
em = tf.stack(em)
em = tf.reshape(em, [seq_length] + shape + [num_frames])
em = tf.to_float(em)
boundary = tf.reshape(boundary, [1] + shape + [1])
boundary = tf.to_float(boundary)
seq_of_em.append(em)
seq_of_boundary.append(boundary)
seq_of_em = tf.stack(seq_of_em)
seq_of_boundary = tf.stack(seq_of_boundary)
return seq_of_em, seq_of_boundary
def _generate_fluid_batch(seq_of_flow, seq_of_boundary, batch_size):
num_preprocess_threads = FLAGS.num_preprocess_threads
flows, boundarys = tf.train.shuffle_batch(
[seq_of_flow, seq_of_boundary],
batch_size=batch_size,
num_threads=num_preprocess_threads,
enqueue_many=True,
capacity=FLAGS.min_queue_examples + 3 * batch_size,
min_after_dequeue=FLAGS.min_queue_examples)
return flows, boundarys
def _generate_em_batch(seq_of_em, seq_of_boundary, batch_size):
num_preprocess_threads = FLAGS.num_preprocess_threads
ems, boundarys = tf.train.shuffle_batch(
[seq_of_em, seq_of_boundary],
batch_size=batch_size,
num_threads=num_preprocess_threads,
enqueue_many=True,
capacity=FLAGS.min_queue_examples + 3 * batch_size,
min_after_dequeue=FLAGS.min_queue_examples)
return ems, boundarys
def fluid_inputs(batch_size, seq_length, shape, num_frames, train=True):
# number of train simulations
run_num = 1
# make dir name based on shape of simulation
dir_name = 'fluid_flow_' + str(shape[0]) + 'x' + str(shape[1])
if len(shape) > 2:
dir_name = dir_name + 'x' + str(shape[2])
dir_name = dir_name + '_'
print("begining to generate tf records")
fluid_createTFRecords.generate_tfrecords(FLAGS.tf_seq_length, run_num, shape, num_frames, dir_name)
# get tfrecord files
tfrecord_filename = glb(FLAGS.tf_data_dir + '/tfrecords/' + str(dir_name) + '/*_seq_length_' + str(FLAGS.tf_seq_length) + '.tfrecords')
# make filename que
filename_queue = tf.train.string_input_producer(tfrecord_filename)
# read tfrecords
seq_of_flow, seq_of_boundary = read_data_fluid(filename_queue, seq_length, shape, num_frames)
# flip flow as a distortion
distortions = tf.random_uniform([1], 0, 1.0, dtype=tf.float32)
seq_of_flow = lat_distortions(seq_of_flow, distortions)
seq_of_boundary = lat_distortions(seq_of_boundary, distortions)
# construct batch of flows
flows, boundarys = _generate_fluid_batch(seq_of_flow, seq_of_boundary, batch_size)
return flows, boundarys
def em_inputs(batch_size, seq_length, shape, num_frames, train=True):
# number of train simulations
run_num = 50
# make dir name based on shape of simulation
dir_name = 'em_' + str(shape[0]) + 'x' + str(shape[1])
if len(shape) > 2:
dir_name = dir_name + 'x' + str(shape[2])
dir_name = dir_name + '_'
print("begining to generate tf records")
em_createTFRecords.generate_tfrecords(FLAGS.tf_seq_length, run_num, shape, num_frames, dir_name)
# get tfrecord files
tfrecord_filename = glb(FLAGS.tf_data_dir + '/tfrecords/' + str(dir_name) + '/*_seq_length_' + str(FLAGS.tf_seq_length) + '.tfrecords')
# make filename que
filename_queue = tf.train.string_input_producer(tfrecord_filename)
# read tfrecords
seq_of_em, seq_of_boundary = read_data_em(filename_queue, seq_length, shape, num_frames)
# flip flow as a distortion
distortions = tf.random_uniform([1], 0, 1.0, dtype=tf.float32)
seq_of_em = lat_distortions(seq_of_em, distortions)
seq_of_boundary = lat_distortions(seq_of_boundary, distortions)
# construct batch of em
ems, boundarys = _generate_em_batch(seq_of_em, seq_of_boundary, batch_size)
return ems, boundarys
```
#### File: Phy-Net/model/lat_net.py
```python
import tensorflow as tf
import numpy as np
from nn import *
import input.lat_inputs as lat_inputs
import systems.fluid_createTFRecords as fluid
import systems.em_createTFRecords as em
FLAGS = tf.app.flags.FLAGS
# Constants describing the training process.
################# system params
tf.app.flags.DEFINE_string('system', 'fluid_flow',
""" system to compress """)
tf.app.flags.DEFINE_integer('lattice_size', 9,
""" size of lattice """)
tf.app.flags.DEFINE_integer('boundary_size', 4,
""" size of boundary """)
tf.app.flags.DEFINE_string('dimensions', '512x512',
""" dimension of simulation with x between value """)
################# running params
tf.app.flags.DEFINE_string('base_dir', '../checkpoints',
"""dir to store trained net """)
tf.app.flags.DEFINE_bool('restore', True,
""" restore model if there is one """)
################# model params
## resnet params
tf.app.flags.DEFINE_integer('nr_residual', 2,
""" number of residual blocks before down sizing """)
tf.app.flags.DEFINE_integer('nr_downsamples', 3,
""" numper of downsamples """)
tf.app.flags.DEFINE_string('nonlinearity', "leaky_relu",
""" what nonlinearity to use, leaky_relu, relu, elu, concat_elu """)
tf.app.flags.DEFINE_float('keep_p', 1.0,
""" keep probability for res blocks """)
tf.app.flags.DEFINE_bool('gated', False,
""" gated res blocks """)
tf.app.flags.DEFINE_integer('filter_size', 16,
""" filter size for first res block. the rest of the filters are 2x every downsample """)
## compression params
tf.app.flags.DEFINE_bool('lstm', False,
""" lstm or non recurrent""")
tf.app.flags.DEFINE_integer('nr_residual_compression', 3,
""" number of residual compression layers """)
tf.app.flags.DEFINE_integer('filter_size_compression', 128,
""" filter size for compression piece """)
tf.app.flags.DEFINE_integer('nr_discriminators', 1,
""" number of discriminators to train """)
tf.app.flags.DEFINE_integer('z_size', 50,
""" size of z vector """)
tf.app.flags.DEFINE_integer('nr_residual_discriminator', 1,
""" number of residual blocks before down sizing """)
tf.app.flags.DEFINE_integer('nr_downsamples_discriminator', 3,
""" numper of downsamples """)
tf.app.flags.DEFINE_float('keep_p_discriminator', 1.0,
""" keep probability for res blocks """)
tf.app.flags.DEFINE_integer('filter_size_discriminator', 32,
""" filter size for first res block of discriminator """)
tf.app.flags.DEFINE_integer('lstm_size_discriminator', 512,
""" size of lstm cell in discriminator """)
## gan params (currently not in use)
tf.app.flags.DEFINE_bool('gan', False,
""" use gan training """)
################# optimize params
tf.app.flags.DEFINE_string('optimizer', "adam",
""" what optimizer to use (currently adam is the only option)""")
tf.app.flags.DEFINE_float('reconstruction_lr', 0.0004,
""" learning rete for reconstruction """)
tf.app.flags.DEFINE_float('gan_lr', 2e-5,
""" learning rate for training gan """)
tf.app.flags.DEFINE_float('lambda_divergence', 0.2,
""" weight of divergence or gradient differnce error """)
################# train params
tf.app.flags.DEFINE_integer('max_steps', 1000000,
""" max steps to train """)
tf.app.flags.DEFINE_integer('unroll_length', 5,
""" unroll length """)
tf.app.flags.DEFINE_integer('init_unroll_length', 0,
""" inital unroll length before training """)
tf.app.flags.DEFINE_bool('unroll_from_true', False,
""" use the true data when unrolling the network (probably just used for unroll_length 1 when doing curriculum learning""")
tf.app.flags.DEFINE_integer('batch_size', 4,
""" batch size """)
tf.app.flags.DEFINE_integer('nr_gpus', 1,
""" number of gpus for training (each gpu with have batch size FLAGS.batch_size""")
################# test params
tf.app.flags.DEFINE_bool('train', True,
""" train or test """)
tf.app.flags.DEFINE_string('test_dimensions', '512x512',
""" test video dimentions """)
tf.app.flags.DEFINE_integer('video_length', 200,
""" video dimentions """)
tf.app.flags.DEFINE_integer('test_length', 50,
""" sequence length for testing (making error plots) """)
tf.app.flags.DEFINE_integer('test_nr_runs', 10,
""" number of simulations to test on (making error plots) """)
tf.app.flags.DEFINE_integer('test_nr_per_simulation', 1,
""" number of test runs per simulations (making error plots) """)
tf.app.flags.DEFINE_string('extract_type', 'line',
""" if extracting in decoder of timing tests """)
tf.app.flags.DEFINE_integer('extract_pos', 5,
""" where to extract in decoder for timing tests """)
####### inputs #######
def inputs(empty=False, name="inputs", shape=None, batch_size=1, single_step=False):
"""makes input vector
Args:
empty: will just return an empty state to fill with a feed dict
name: name for variables
shape: shape of input. if None then will use the shape of FLAGS.dimensions
Return:
state: state of simulation
boundary: boundary of simulation
"""
if shape is None:
shape = FLAGS.dimensions.split('x')
shape = map(int, shape)
frame_num = FLAGS.lattice_size
boundary_num = FLAGS.boundary_size
if empty:
if not single_step:
state = tf.placeholder(tf.float32, [batch_size, FLAGS.unroll_length] + shape + [frame_num], name=name)
boundary = tf.placeholder(tf.float32, [batch_size, 1] + shape + [boundary_num], name=name)
elif single_step:
state = tf.placeholder(tf.float32, [batch_size] + shape + [frame_num], name=name)
boundary = tf.placeholder(tf.float32, [batch_size] + shape + [boundary_num], name=name)
elif FLAGS.system == "fluid_flow":
state, boundary = lat_inputs.fluid_inputs(FLAGS.batch_size, FLAGS.init_unroll_length + FLAGS.unroll_length, shape, frame_num, FLAGS.train)
elif FLAGS.system == "em":
state, boundary = lat_inputs.em_inputs(FLAGS.batch_size, FLAGS.init_unroll_length + FLAGS.unroll_length, shape, frame_num, FLAGS.train)
if FLAGS.gan:
z = tf.placeholder("float", [None, total_unroll_length, FLAGS.z_size])
return state, boundary, z
else:
return state, boundary
####### feed_dict #######
def feed_dict(seq_length, shape, lattice_size, run_num, start_index):
"""makes feed dict for testing
Args:
seq_length: length of seq out
shape: shape of simulation
lattice_size: int of lattice dims (9 for 2D fluid simulations)
run_num: int index of simulation
frame_num: int index of where state to start simulation
Return:
state: state of simulation
boundary: boundary of simulation
"""
if FLAGS.system == "fluid_flow":
dir_name = "fluid_flow_"
if len(shape) == 2:
dir_name = dir_name + str(shape[0]) + 'x' + str(shape[1]) + '_test'
else:
dir_name = dir_name + str(shape[0]) + 'x' + str(shape[1]) + 'x' + str(shape[2]) + '_test'
state, boundary = fluid.generate_feed_dict(seq_length, shape, lattice_size, dir_name, run_num, start_index)
elif FLAGS.system == "em":
dir_name = "em_"
dir_name = dir_name + str(shape[0]) + 'x' + str(shape[1]) + '_test'
state, boundary = em.generate_feed_dict(seq_length, shape, lattice_size, dir_name, run_num, start_index)
return state, boundary
####### encoding #######
def encoding(inputs, name='', boundary=False):
"""Builds encoding mapping of LatNet.
Args:
inputs: input to encoder
name: name for variables
boundary: bool for whether encoding the boundary or the state of the simulation
Return:
x_i: encoded state
"""
x_i = inputs
nonlinearity = set_nonlinearity(FLAGS.nonlinearity)
if FLAGS.system == "fluid_flow":
padding = (len(x_i.get_shape())-3)*["mobius"] + ["zeros"]
elif FLAGS.system == "em":
padding = ["mobius", "mobius"]
print("encoding input shape " + str(x_i.get_shape()))
for i in xrange(FLAGS.nr_downsamples):
filter_size = FLAGS.filter_size*(pow(2,i))
x_i = res_block(x_i, filter_size=filter_size, nonlinearity=nonlinearity, keep_p=FLAGS.keep_p, stride=2, gated=FLAGS.gated, padding=padding, name=name + "resnet_down_sampled_" + str(i) + "_nr_residual_0", begin_nonlinearity=False)
print("filter size for layer " + str(i) + " of encoding is " + str(filter_size) + " with shape " + str(x_i.get_shape()))
for j in xrange(FLAGS.nr_residual - 1):
x_i = res_block(x_i, filter_size=filter_size, nonlinearity=nonlinearity, keep_p=FLAGS.keep_p, stride=1, gated=FLAGS.gated, padding=padding, name=name + "resnet_down_sampled_" + str(i) + "_nr_residual_" + str(j+1))
if boundary:
x_i = res_block(x_i, filter_size=FLAGS.filter_size_compression*2, nonlinearity=nonlinearity, keep_p=FLAGS.keep_p, stride=1, gated=FLAGS.gated, padding=padding, name=name + "resnet_last_before_compression")
else:
x_i = res_block(x_i, filter_size=FLAGS.filter_size_compression, nonlinearity=nonlinearity, keep_p=FLAGS.keep_p, stride=1, gated=FLAGS.gated, padding=padding, name=name + "resnet_last_before_compression")
return x_i
####### encoding template #######
encode_state_template = tf.make_template('encode_state_template', encoding)
encode_boundary_template = tf.make_template('encode_boundary_template', encoding)
#################################
####### compression #############
def compression(inputs):
"""Builds compressed mapping of LatNet.
Args:
inputs: input to compression network
Return:
x_i
"""
x_i = inputs
nonlinearity = set_nonlinearity(FLAGS.nonlinearity)
if FLAGS.system == "fluid_flow":
padding = (len(x_i.get_shape())-3)*["mobius"] + ["zeros"]
elif FLAGS.system == "em":
padding = ["mobius", "mobius"]
for i in xrange(FLAGS.nr_residual_compression):
x_i = res_block(x_i, filter_size=FLAGS.filter_size_compression, nonlinearity=nonlinearity, keep_p=FLAGS.keep_p, stride=1, gated=FLAGS.gated, padding=padding, name="resnet_compression_" + str(i))
return x_i
####### compression template ######
compress_template = tf.make_template('compress_template', compression)
#################################
####### decoding #######
def decoding(inputs, extract_type=None, extract_pos=64):
"""Builds decoding mapping of LatNet.
Args:
inputs: input to decoder
extract_type: string that specifies to extract plane, line or point
from compresses state. If None then no extraction
extract_pos: int pos to extract
Return:
x_i: decompressed state
"""
x_i = inputs
nonlinearity = set_nonlinearity(FLAGS.nonlinearity)
if FLAGS.system == "fluid_flow":
padding = (len(x_i.get_shape())-3)*["mobius"] + ["zeros"]
elif FLAGS.system == "em":
padding = ["mobius", "mobius"]
if (extract_type is not None) and (extract_type != 'False'):
width = (FLAGS.nr_downsamples-1)*FLAGS.nr_residual*2
### hard setting extract_pos for now ###
extract_pos = width + 1
########################################
x_i = trim_tensor(x_i, extract_pos, width, extract_type)
for i in xrange(FLAGS.nr_downsamples-1):
filter_size = FLAGS.filter_size*pow(2,FLAGS.nr_downsamples-i-2)
print("decoding filter size for layer " + str(i) + " of encoding is " + str(filter_size))
x_i = transpose_conv_layer(x_i, 4, 2, filter_size, padding, "up_conv_" + str(i))
for j in xrange(FLAGS.nr_residual):
x_i = res_block(x_i, filter_size=filter_size, nonlinearity=nonlinearity, keep_p=FLAGS.keep_p, stride=1, gated=FLAGS.gated, padding=padding, name="resnet_up_sampled_" + str(i) + "_nr_residual_" + str(j+1))
if (extract_type is not None) and (extract_type != 'False'):
width = width-2
x_i = trim_tensor(x_i, width+2, width, extract_type)
x_i = transpose_conv_layer(x_i, 4, 2, FLAGS.lattice_size, padding, "up_conv_" + str(FLAGS.nr_downsamples))
return tf.nn.tanh(x_i)
####### decoding template #######
decoding_template = tf.make_template('decoding_template', decoding)
#################################
####### unroll #######
def unroll(state, boundary, z=None):
"""unrolls LatNet.
Args:
state: seq of states to train on
boundary: seq of boundary states to train on
Return:
x_out: predicted seq of states
"""
total_unroll_length = FLAGS.init_unroll_length + FLAGS.unroll_length
if FLAGS.lstm:
print("lstm not implemented yet")
exit()
else:
# store all out
x_out = []
# encode
y_1 = encode_state_template(state[:,0])
small_boundary = encode_boundary_template(boundary[:,0], name='boundry_', boundary=True)
# apply boundary
[small_boundary_mul, small_boundary_add] = tf.split(small_boundary, 2, len(small_boundary.get_shape())-1)
y_1 = (small_boundary_mul * y_1) + small_boundary_add
# add z if gan training
if FLAGS.gan:
y_1 = add_z(y_1, z)
# unroll all
for i in xrange(FLAGS.unroll_length):
# decode and add to list
x_2 = decoding_template(y_1)
x_out.append(x_2)
# compression
if FLAGS.unroll_length > 1:
# compression mapping
y_1 = compress_template(y_1)
# apply boundary
y_1 = (small_boundary_mul * y_1) + small_boundary_add
# add z if gan training
if FLAGS.gan:
y_1 = add_z(y_1, z)
x_out = tf.stack(x_out)
perm = np.concatenate([np.array([1,0]), np.arange(2,len(x_2.get_shape())+1,1)], 0)
x_out = tf.transpose(x_out, perm=perm)
return x_out
####### unroll template #######
unroll_template = tf.make_template('unroll_template', unroll)
###############################
####### continual unroll #######
def continual_unroll(state, boundary, z=None, extract_type=None, extract_pos=None):
"""unrolls LatNet one step to generate continual simulations
Args:
state: seq of states to train on
boundary: seq of boundary states to train on
extract_type: string that specifies to extract plane, line or point
from compresses state. If None then no extraction
extract_pos: int pos to extract
Return:
y_1: compressed state
small_boundary_mul: compressed state
small_boundary_add: compressed state
x_2: decompresed state
y_2: compressed state after one compression mapping
"""
if FLAGS.lstm:
print("lstm not implemented yet")
exit()
else:
# store all out
y_1 = encode_state_template(state)
small_boundary = encode_boundary_template(boundary, name='boundry_', boundary=True)
# apply boundary
[small_boundary_mul, small_boundary_add] = tf.split(small_boundary, 2, len(small_boundary.get_shape())-1)
y_1_boundary = (small_boundary_mul * y_1) + small_boundary_add
# add z if gan training
if FLAGS.gan:
y_1_boundary = add_z(y_1_boundary, z)
# unroll one step
x_2 = decoding_template(y_1_boundary, extract_type=extract_type, extract_pos=extract_pos)
y_2 = compress_template(y_1_boundary)
return y_1, small_boundary_mul, small_boundary_add, x_2, y_2
####### continual unroll template #######
continual_unroll_template = tf.make_template('unroll_template', continual_unroll) # same variable scope as unroll_template
#########################################
```
#### File: Phy-Net/model/optimizer.py
```python
import tensorflow as tf
import numpy as np
FLAGS = tf.app.flags.FLAGS
def adam_updates(params, cost_or_grads, lr=0.001, mom1=0.9, mom2=0.999):
''' Adam optimizer '''
updates = []
if type(cost_or_grads) is not list:
grads = tf.gradients(cost_or_grads, params)
else:
grads = cost_or_grads
t = tf.Variable(1., 'adam_t')
for p, g in zip(params, grads):
mg = tf.Variable(tf.zeros(p.get_shape()), p.name + '_adam_mg')
if mom1>0:
v = tf.Variable(tf.zeros(p.get_shape()), p.name + '_adam_v')
v_t = mom1*v + (1. - mom1)*g
v_hat = v_t / (1. - tf.pow(mom1,t))
updates.append(v.assign(v_t))
else:
v_hat = g
mg_t = mom2*mg + (1. - mom2)*tf.square(g)
mg_hat = mg_t / (1. - tf.pow(mom2,t))
g_t = v_hat / tf.sqrt(mg_hat + 1e-8)
p_t = p - lr * g_t
updates.append(mg.assign(mg_t))
updates.append(p.assign(p_t))
updates.append(t.assign_add(1))
return tf.group(*updates)
```
#### File: Phy-Net/systems/em_createTFRecords.py
```python
import numpy as np
import tensorflow as tf
from glob import glob as glb
import h5py
from tqdm import *
from model.lattice import *
FLAGS = tf.app.flags.FLAGS
# helper function
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _float_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def tryint(s):
try:
return int(s)
except:
return s
def alphanum_key(s):
return [ tryint(c) for c in re.split('([0-9]+)', s) ]
def load_em(filename, shape, frame_num):
# load file
stream_em = h5py.File(filename, 'r')
# process lattice state
em_state = np.array(stream_em['State'][:])
if len(shape) == 2:
shape = [1] + shape
em_state = 10.0*em_state.reshape(shape + [frame_num])
stream_em.close()
return em_state
def load_boundary(filename, shape, frame_num):
stream_boundary = h5py.File(filename, 'r')
boundary_cond = np.array(stream_boundary['Epsilon'][:])
boundary_cond = boundary_cond.reshape([1]+shape+[1])
stream_boundary.close()
return boundary_cond
def make_feature_from_seq(seq_frames, seq_length, shape, frame_num):
feature = {}
for i in xrange(seq_length):
frame = seq_frames[i]
frame = np.float32(frame)
frame = frame.reshape([np.prod(np.array(shape))*frame_num])
frame = frame.astype(np.float)
feature['em/frame_' + str(i)] = _float_feature(frame)
return feature
def generate_feed_dict(seq_length, shape, frame_num, dir_name, run_number, start_index):
# generate boundry
boundary_cond = load_boundary(FLAGS.data_dir + '/' + dir_name + '/sample_' + str(run_number) + '/em_0000.h5', shape, frame_num) # doesnt mater what boundary is loaded
# generate em state
em_state = np.zeros([seq_length] + shape + [frame_num])
for i in xrange(seq_length):
em_state[i] = load_em(FLAGS.data_dir + '/' + dir_name + '/sample_' + str(run_number) + '/em_' + str(start_index + i).zfill(4) + '.h5', shape, frame_num)
return em_state, boundary_cond
def generate_tfrecords(seq_length, num_runs, shape, frame_num, dir_name):
if not tf.gfile.Exists(FLAGS.tf_data_dir + '/tfrecords/' + dir_name):
tf.gfile.MakeDirs(FLAGS.tf_data_dir + '/tfrecords/' + dir_name)
for run in tqdm(xrange(num_runs)):
filename = FLAGS.tf_data_dir + '/tfrecords/' + dir_name + '/run_' + str(run) + '_seq_length_' + str(seq_length) + '.tfrecords'
tfrecord_filename = glb(FLAGS.tf_data_dir + '/tfrecords/' + dir_name + '/*')
if filename not in tfrecord_filename:
writer = tf.python_io.TFRecordWriter(filename)
h5_filenames = glb(FLAGS.data_dir + '/' + dir_name + '/sample_' + str(run) + '/*.h5')
num_samples = len(h5_filenames)
# first calc boundary (from first sample)
boundary_cond = load_boundary(FLAGS.data_dir + '/' + dir_name + '/sample_' + str(run) + '/em_0000.h5', shape, frame_num)
boundary_cond = np.float32(boundary_cond)
boundary_flat = boundary_cond.reshape([np.prod(np.array(shape))])
boundary_raw = boundary_flat.astype(np.float)
# save tf records
ind_dat = 0
while ind_dat < (num_samples - seq_length - 1):
seq_frames = np.zeros([seq_length] + shape + [frame_num])
for i in xrange(seq_length):
em_state = load_em(FLAGS.data_dir + '/' + dir_name + '/sample_' + str(run) + '/em_' + str(i+ind_dat).zfill(4) + '.h5', shape, frame_num)
em_state = np.float32(em_state)
seq_frames[i] = em_state
overlap = min(4, seq_length)
ind_dat += seq_length - overlap # overlap between frames
# make feature map
feature = make_feature_from_seq(seq_frames, seq_length, shape, frame_num)
feature['boundary'] = _float_feature(boundary_raw)
# create example and write it
example = tf.train.Example(features=tf.train.Features(feature=feature))
writer.write(example.SerializeToString())
writer.close()
``` |
{
"source": "johnk314/Python-Projects",
"score": 3
} |
#### File: Python-Projects/tool_belt/connect.py
```python
import geopandas
import os
import pandas
import psycopg2
import pyodbc
import requests
import sqlalchemy
class GISODBC:
""""""
def __init__(self):
# TODO: psycopg2 vs sqlalchemy: https://hackersandslackers.com/psycopg2-postgres-python/
# TODO: sqlalchemy ORM tutorials: https://hackersandslackers.com/series/mastering-sqlalchemy/
# TODO: Connect with sqlalchemy
self.engine = sqlalchemy.create_engine(r'postgresql://postgres:Jck258-5049@localhost:5433/GIS')
# TODO: Connect with psycopg2.
# host = 'localhost'
# port = 5433
# database = 'GIS'
# user = 'postgres'
# password = '<PASSWORD>'
# self.con = psycopg2.connect(host=host, port=port, database=database, user=user, password=password)
# Helper function for accessing pd.read_sql_query AND pd.read_sql_table().
# Read query results into a DataFrame.
# pd.read_sql(sql=None, con=self.con, index_col=None, coerce_float=None, params=None, parse_dates=None, columns=None, chunksize=None)
# Returns a DataFrame corresponding to the result set of the query string.
# pd.read_sql_query(sql=None, con=self.con, index_col=None, coerce_float=None, params=None, parse_dates=None, chunksize=None)
# Given a table name and connection, returns a DataFrame.
# pd.read_sql_table(table_name=None, con=self.con, schema=None, index_col=None, coerce_float=None,parse_dates=None, columns=None, chunksize=None)
# pd.to_sql()
def show_schemas(self):
q = r'SELECT schema_name FROM information_schema.schemata;'
return pandas.read_sql(sql=q, con=self.engine)
def show_tables(self, schema_name):
q = r"SELECT * FROM information_schema.tables WHERE table_schema = '{}' AND table_type = 'BASE TABLE';".format(schema_name)
return pandas.read_sql(sql=q, con=self.engine)
def show_views(self, schema_name):
q = r"SELECT * FROM information_schema.tables WHERE table_schema = '{}' AND table_type = 'VIEW';".format(schema_name)
return pandas.read_sql(sql=q, con=self.engine)
def get_unemployment(self):
q = r'SELECT * FROM "FRED_Unemployment";'
return pandas.read_sql(sql=q, con=self.engine)
def get_geo(self):
# https://geopandas.org/reference/geopandas.read_postgis.html#geopandas.read_postgis
geopandas.read_postgis(sql=r'', con=self.engine, geom_col='geom', crs=None, index_col=None, coerce_float=True,
parse_dates=None, params=None, chunksize=None)
def write_dataframe(self, dataframe, schema_name, table_name, if_exists='fail'):
"""
:param dataframe: Write the incoming dataframe as a table.
:param schema_name: The schema to write the table to.
:param table_name: The name of the table to write.
:param if_exists: What to do it the table exists in the specified database. Options include:'fail', 'replace', 'append'
:return: None
s """
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_sql.html
dataframe.to_sql(name=table_name, con=self.engine, if_exists=if_exists, schema=schema_name)
def write_geodataframe(self, geodataframe, table_name, schema_name):
"""
:param geodataframe: Input geodataframe to be written.
:param table_name: Name of the table to be created and/or written to.
:param schema_name: Schema of the table to be created and/or written to.
:return: None
"""
# https://geopandas.org/io.html
# https://geopandas.org/reference.html#geopandas.GeoDataFrame.to_postgis
geodataframe.to_postgis(name=table_name, con=self.engine, if_exists='fail', schema=schema_name, chunksize=5000)
class Files:
def __init__(self):
# TODO: List recursively into a dictionary. use pprint.pprint to show the levels.
for f in os.listdir('data'):
print(f)
if __name__ == '__main__':
# Put test-harness here.
pass
``` |
{
"source": "johnkabage/book-a-meal-API",
"score": 3
} |
#### File: Api/v1/models.py
```python
from werkzeug.security import generate_password_hash
from datetime import datetime
users = []
meal_orders = []
class MealOrder:
meal_id = 1
def __init__(self, city, area, meal, quantity, price, receiver):
self.city = city
self.area = area
self.meal = meal
self.quantity = quantity
self.price = price
self.receiver = receiver
self.date = str(datetime.now())
self.id = MealOrder.meal_id
MealOrder.meal_id += 1
def get_meal_by_id(self, id):
for meal in meal_orders:
if meal.id == id:
return meal
def get_meal_by_user(self,user):
users_meal = [meal for meal in meal_orders if meal.user == user]
return users_meal
class User:
user_id = 1
def __init__(self, fname, lname, username, email, password, confirm_password):
self.fname = first_name
self.lname = last_name
self.username = username
self.email = email
if password:
self.password = generate_password_hash(password)
if confirm_password:
self.confirm_password = generate_password_hash(confirm_password)
self.id = User.
def get_user_by_username(self, user):
for user in users:
if user.id == id:
return user
def get_user_by_id(self, id):
for user in users:
if user.id == id:
return user
def serialize(self):
return dict(
id = self.id
username = self.username
email = self.email
)
``` |
{
"source": "johnkabler/PyRyx",
"score": 2
} |
#### File: johnkabler/PyRyx/pyryx.py
```python
from requests_oauthlib import OAuth1Session
from requests_oauthlib import OAuth1
import requests
import pandas as pd
import csv
import sys
if sys.version_info[0] < 3:
from StringIO import StringIO
else:
from io import StringIO
import time
class PyRyxApi:
def __init__(self, client_key, client_secret, gallery_url):
self.client_key = client_key
self.client_secret = client_secret
self.gallery_url = gallery_url
def generateAyxEndpoint(self, operation, method):
req_url = self.gallery_url + '{operation}/{method}/'.format(operation=operation,
method=method)
return req_url
def generateAyxRequest(self, endpoint):
queryoauth = OAuth1(self.client_key, self.client_secret, signature_type='query')
return requests.get(endpoint, auth=queryoauth)
def getSubscriptionWorkflows(self):
endpoint = self.generateAyxEndpoint('workflows', 'subscription')
return self.generateAyxRequest(endpoint).json()
def getWorkflowQuestions(self, app_id):
endpoint = self.generateAyxEndpoint('workflows', app_id) + 'questions/'
return self.generateAyxRequest(endpoint).json()
def getWorkflowJobs(self, app_id):
endpoint = self.generateAyxEndpoint('workflows', app_id) + 'jobs/'
return self.generateAyxRequest(endpoint).json()
def checkJobState(self, job_id):
endpoint = self.generateAyxEndpoint('jobs', job_id)
return self.generateAyxRequest(endpoint).json()
def getJobOutput(self, job_id, output_id):
endpoint = self.generateAyxEndpoint('jobs', job_id)
endpoint = endpoint + 'output/{output_id}/'.format(output_id=output_id)
queryoauth = OAuth1(self.client_key, self.client_secret, signature_type='query')
payload = {'format': 'Csv'}
DATA = StringIO(requests.get(endpoint, auth=queryoauth, params=payload).text)
return pd.read_csv(DATA)
def fetchJobOutput(self, job_id):
job_info = self.checkJobState(job_id)
result_list = []
output_id_list = [output['id'] for output in job_info['outputs']]
for output_id in output_id_list:
output_df = self.getJobOutput(job_id, output_id)
result_list.append(output_df)
if (len(result_list) > 1):
df = pd.concat(result_list)
else:
df = result_list[:1]
return df[:-1]
def executeWorkflow(self, app_id, question_payload):
endpoint = self.generateAyxEndpoint('workflows', app_id) + 'jobs/'
queryoauth = OAuth1(self.client_key, self.client_secret, signature_type='query')
return requests.post(endpoint, auth=queryoauth, json=question_payload)
def executeAndFetchResults(self, app_id,
question_payload):
response = self.executeWorkflow(app_id, question_payload)
job_id = response.json()['id']
job_state = 'Queued'
while(job_state == 'Queued'):
job = self.checkJobState(job_id)
j_state = job['status']
print(j_state)
if (j_state == 'Completed'):
job_state = j_state
time.sleep(1)
return self.fetchJobOutput(job_id)
``` |
{
"source": "johnkalinowski/llk",
"score": 3
} |
#### File: llk/typeclasses/characters.py
```python
import random
import evennia
from enum import Enum
from evennia.utils import create
from evennia.contrib import gendersub
from world.languages import Language
from world.occupations import Occupation
from world.occupations import OccupationTable
import typeclasses.auras as auras
from world.birthaugur import BirthAugur
from world.birthaugur import BirthAugurTable
from world.races import Race
from world.weapons import WeaponType
# PLACED IN THIS FILE UNTIL A BETTER SPOT FOUND
def roll_dice(num_dice, die_type):
"""
3d6, for example, would be num_dice = 3, die_type = 6
"""
result = 0
for i in range(num_dice):
result += random.randint(1, die_type)
return result
def calculate_ability_modifier(ability_score):
modifier = 0
if ability_score <= 3:
modifier = -3
elif ability_score <= 5:
modifier = -2
elif ability_score <= 8:
modifier = -1
elif ability_score <= 12:
modifier = 0
elif ability_score <= 15:
modifier = 1
elif ability_score <= 17:
modifier = 2
elif ability_score > 17:
modifier = 3
return modifier
class Alignment(Enum):
Lawful = 0
Neutral = 1
Chaotic = 2
def __str__(self):
if self == Alignment.Lawful:
return "Lawful"
elif self == Alignment.Neutral:
return "Neutral"
elif self == Alignment.Chaotic:
return "Chaotic"
else:
return "unknown"
# PLACED IN THIS FILE UNTIL A BETTER SPOT FOUND
class Character(gendersub.GenderCharacter):
"""
The Character defaults to reimplementing some of base Object's hook methods
with the following functionality:
at_basetype_setup - always assigns the DefaultCmdSet to this object type
(important!)sets locks so character cannot be picked up
and its commands only be called by itself, not anyone else.
(to change things, use at_object_creation() instead).
at_after_move(source_location) - Launches the "look" command after every
move.
at_post_unpuppet(account) - when Account disconnects from the Character,
we store the current location in the pre_logout_location
Attribute and move it to a None-location so the
"unpuppeted" character object does not need to stay on
grid. Echoes "Account has disconnected" to the room.
at_pre_puppet - Just before Account re-connects, retrieves the character's
pre_logout_location Attribute and move it back on the grid.
at_post_puppet - Echoes "AccountName has entered the game" to the room.
"""
def get_modified_hp(self):
stam = self.get_modified_stamina()
modifier = calculate_ability_modifier(stam)
for aura in self.db.auras:
if auras.AuraEffect.HP in aura.db.effect_modifiers.keys():
modifier += aura.db.effect_modifiers[auras.AuraEffect.HP]
return (self.db.hp + modifier)
def get_modified_strength(self):
modifier = 0
for aura in self.db.auras:
if auras.AuraEffect.Strength in aura.db.effect_modifiers.keys():
modifier += aura.db.effect_modifiers[auras.AuraEffect.Strength]
return (self.db.strength + modifier)
def get_modified_agility(self):
modifier = 0
for aura in self.db.auras:
if auras.AuraEffect.Agility in aura.db.effect_modifiers.keys():
modifier += aura.db.effect_modifiers[auras.AuraEffect.Agility]
return (self.db.agility + modifier)
def get_modified_stamina(self):
modifier = 0
for aura in self.db.auras:
if auras.AuraEffect.Stamina in aura.db.effect_modifiers.keys():
modifier += aura.db.effect_modifiers[auras.AuraEffect.Stamina]
return (self.db.stamina + modifier)
def get_modified_personality(self):
modifier = 0
for aura in self.db.auras:
if auras.AuraEffect.Personality in aura.db.effect_modifiers.keys():
modifier += aura.db.effect_modifiers[auras.AuraEffect.Personality]
return (self.db.personality + modifier)
def get_modified_intelligence(self):
modifier = 0
for aura in self.db.auras:
if auras.AuraEffect.Intelligence in aura.db.effect_modifiers.keys():
modifier += aura.db.effect_modifiers[auras.AuraEffect.Intelligence]
return (self.db.intelligence + modifier)
def get_modified_luck(self):
modifier = 0
for aura in self.db.auras:
if auras.AuraEffect.Luck in aura.db.effect_modifiers.keys():
modifier += aura.db.effect_modifiers[auras.AuraEffect.Luck]
return (self.db.luck + modifier)
def get_modified_speed(self):
modifier = 0
for aura in self.db.auras:
if auras.AuraEffect.Speed in aura.db.effect_modifiers.keys():
modifier += aura.db.effect_modifiers[auras.AuraEffect.Speed]
return (self.db.speed + modifier)
def get_modified_ac(self):
agi = self.get_modified_agility()
modifier = calculate_ability_modifier(agi)
for aura in self.db.auras:
if auras.AuraEffect.AC in aura.db.effect_modifiers.keys():
modifier += aura.db.effect_modifiers[auras.AuraEffect.AC]
return (self.db.ac + modifier)
def at_object_creation(self):
super().at_object_creation()
self.db.level = 0
self.db.ac = 0
self.db.xp = 0
self.db.alignment = Alignment.Neutral # TODO: alignment selection
# these are base stats before any modifiers or active effects
self.db.strength = roll_dice(3, 6)
self.db.agility = roll_dice(3, 6)
self.db.stamina = roll_dice(3, 6)
self.db.personality = roll_dice(3, 6)
self.db.intelligence = roll_dice(3, 6)
self.db.luck = roll_dice(3, 6)
self.db.hp = roll_dice(1, 4)
self.db.speed = 30
self.db.birth_augur = BirthAugur(roll_dice(1, 30))
luck_modifier = calculate_ability_modifier(self.db.luck)
self.db.auras = []
augur_data = BirthAugurTable[str(self.db.birth_augur)]
if "effects" in augur_data and luck_modifier != 0:
aura_effects = {}
for m in augur_data["effects"]:
if "modifier_multiplier" in augur_data:
aura_effects[m] = \
luck_modifier * augur_data["modifier_multiplier"]
else:
aura_effects[m] = luck_modifier
aura = create.create_object(
auras.PersistentEffectAura,
key=augur_data["name"],
location=self,
attributes=[
("desc", augur_data["desc"]),
("effect_modifiers", aura_effects)
]
)
aura.build_modifier_description()
self.db.auras.append(aura)
self.db.known_languages = []
self.db.known_languages.append(Language.Common)
self.db.weapon_proficiencies = []
self.db.weapon_proficiencies.append(WeaponType.Basic)
self.db.gold = 0
self.db.silver = 0
self.db.copper = roll_dice(5, 12)
self.db.occupation = Occupation(roll_dice(1, 100))
occupation = str(self.db.occupation)
if "weapon_proficiencies" in OccupationTable[occupation]:
self.db.weapon_proficiencies.append(
OccupationTable[occupation]["weapon_proficiencies"]
)
for item in OccupationTable[occupation]["items"]:
item_clone = dict(item)
item_clone["location"] = self
evennia.prototypes.spawner.spawn(item_clone)
if "race" in OccupationTable[occupation]:
self.db.race = OccupationTable[occupation]["race"]
else:
self.db.race = Race.Human
if self.db.race == Race.Dwarf or self.db.race == Race.Halfling:
self.db.speed = 20
if self.db.race == Race.Human:
self.db.age = random.randint(18, 45)
elif self.db.race == Race.Dwarf:
self.db.age = random.randint(37, 75)
elif self.db.race == Race.Elf:
self.db.age = random.randint(35, 100)
elif self.db.race == Race.Halfling:
self.db.age = random.randint(20, 55)
if "money" in OccupationTable[occupation]:
money_data = OccupationTable[occupation]["money"]
if "gold" in money_data:
self.db.gold += money_data["gold"]
if "silver" in money_data:
self.db.silver += money_data["silver"]
if "copper" in money_data:
self.db.copper += money_data["copper"]
if self.db.intelligence >= 8:
if self.db.race == Race.Dwarf:
self.db.known_languages.append(Language.Dwarf)
elif self.db.race == Race.Elf:
self.db.known_languages.append(Language.Elf)
elif self.db.race == Race.Halfling:
self.db.known_languages.append(Language.Halfling)
self.db.current_hp = self.get_modified_hp()
``` |
{
"source": "johnkangw/dash-labs",
"score": 2
} |
#### File: johnkangw/dash-labs/setup.py
```python
from setuptools import setup, find_packages
import os
from pathlib import Path
here = Path(os.path.dirname(os.path.abspath(__file__)))
main_ns = {}
with (here / "dash_labs" / "version.py").open() as f:
exec(f.read(), main_ns) # pylint: disable=exec-used
def requirements_txt():
with open(here / "requirements.txt", "rt") as f:
return [line.strip() for line in f.read().split("\n")]
def readme():
with open(here / "README.md", "rt") as f:
return f.read()
setup(
name="dash-labs",
version=main_ns["__version__"],
author="<NAME>",
author_email="<EMAIL>",
maintainer="<NAME>",
maintainer_email="<EMAIL>",
url="https://github.com/plotly/dash-labs",
project_urls={"Github": "https://github.com/plotly/dash-labs"},
description="Experimental enhancements for potential inclusion in Dash",
long_description=readme(),
long_description_content_type="text/markdown",
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Scientific/Engineering :: Visualization",
"Framework :: Dash",
],
license="MIT",
license_files=["LICENSE.txt"],
python_requires=">=3.6.*",
packages=find_packages(exclude=["tests", "tests.*"]),
)
``` |
{
"source": "johnkarasev/cs446_project",
"score": 3
} |
#### File: johnkarasev/cs446_project/main.py
```python
import numpy as np
import json
import preprocess
from word2vec import Word2Vec
def main():
contexts = np.fromfile("./data/npcontexts.dat", dtype=int)
neighbors = np.fromfile("./data/npneighbors.dat", dtype=int)
skipgram = Word2Vec(contexts, neighbors, 35000, 10, 0.001, 64, "sg.ckpt", batch_size=500)
skipgram.train(2)
if __name__ == "__main__":
main()
``` |
{
"source": "JohnKarima/blog",
"score": 2
} |
#### File: app/main/views.py
```python
from flask import render_template, request, redirect, url_for, abort, flash
from flask_login import login_required, current_user
from . import main
from ..request import get_quotes
from ..models import User, Blog, Comment
from .forms import UpdateProfile, BlogForm, CommentForm
from .. import db, photos
import markdown2
@main.route('/')
def index():
'''
View root page function that returns the index page and its data
'''
title = 'TheVoid.com'
message = 'Blog Gang Rise UP!!!'
quotes = get_quotes()
blogs = Blog.get_all_blogs()
return render_template('index.html', message = message, title = title, quotes=quotes, blogs = blogs)
@main.route('/user/<uname>')
def profile(uname):
user = User.query.filter_by(username = uname).first()
blogs = Blog.query.filter_by(user_id = user.id)
title = 'Profile'
if user is None:
abort(404)
return render_template("profile/profile.html", user = user, blogs = blogs, title = title)
@main.route('/user/<uname>/update',methods = ['GET','POST'])
@login_required
def update_profile(uname):
user = User.query.filter_by(username = uname).first()
title = 'Edit profile'
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile',uname=user.username))
return render_template('profile/update.html',form =form, title = title)
@main.route('/user/<uname>/update/pic',methods= ['POST'])
@login_required
def update_pic(uname):
user = User.query.filter_by(username = uname).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('main.profile',uname=uname))
@main.route('/new_blog', methods = ['GET','POST'])
def new_blog():
form = BlogForm()
title = 'New Blog Post'
if form.validate_on_submit():
blog = form.blog_post.data
new_blog = Blog(blog = blog, user = current_user)
new_blog.save_blog()
return redirect(url_for('main.index'))
return render_template('/new_blog.html',blog_form = form, title = title)
@main.route("/blog/<int:id>",methods = ["GET","POST"])
def blog_page(id):
blog = Blog.query.filter_by(id = id).first()
title = 'Blog Post'
form = CommentForm()
if form.validate_on_submit():
comment = form.comment.data
new_comment = Comment(comment = comment, blog = blog)
new_comment.save_comments()
return redirect(url_for('main.blog_page', id = blog.id))
comments = Comment.query.filter_by(blog_id = blog.id)
return render_template("blog.html", title = title, blog = blog, comment_form = form, comments = comments)
# @main.route("/delete/<id>")
# def delete(id):
# blog = Blog.query.filter_by(id = id).first()
# user_id = blog.user_id
# db.session.delete(blog)
# db.session.commit()
# return redirect(url_for('blog.html',id = user_id))
``` |
{
"source": "JohnKarima/news-hub",
"score": 3
} |
#### File: app/main/views.py
```python
from flask import render_template
from . import main
from ..request import get_source, get_article
@main.route('/')
def index():
'''
View root page function that returns the index page and its data
'''
sources_english = get_source('en')
sources_strange = get_source('fr')
print(sources_english)
print(sources_strange)
title = 'Home - Welcome to NewsHub'
return render_template('index.html', title = title, en = sources_english, fr = sources_strange)
@main.route('/articles/<id>')
def article(id):
'''
View articles page function that returns the articles details page and its data
'''
title = 'Sources'
article = get_article(id)
print(article)
return render_template('articles.html', title=title, article = article)
```
#### File: news-hub/tests/test_article.py
```python
import unittest
from app.models import Article
class ArticleTest(unittest.TestCase):
'''
Test Class to test the behaviour of the Article class
'''
def setUp(self):
'''
Set up method that will run before every Test
'''
self.new_article = Article('NewsDaily', 'NewsDailyTrue','<NAME>', 'Hummus...thoughts?','Literally talking about hummus sir','www.newsdaily.net','www.newsdaily.net/picOfHummus6', '2020/2/3', 'lorem gang et all')
def test_instance(self):
self.assertTrue(isinstance(self.new_article,Article))
``` |
{
"source": "JohnKarima/password-locker",
"score": 3
} |
#### File: JohnKarima/password-locker/run.py
```python
from password import User, Credentials
import pyfiglet, pyperclip
ascii_banner = pyfiglet.figlet_format("Pass Lock!")
print(ascii_banner)
def create_users(username, password):
'''
function to create a new user
'''
new_user = User(username,password)
return new_user
def save_users(user):
'''
Function to save users
'''
user.save_users()
def delete_users(user):
'''
Function to delete a user
'''
user.delete_users()
def user_exists(username, password):
'''
function that checks if a user exists from the user list
'''
return User.user_exists(username, password)
def find_by_username_user(username):
'''
Function that finds a user by username
'''
return User.find_by_username_user(username)
def display_users():
'''
Function that returns all the saved users
'''
return User.display_users()
def create_credentials(site, username, password):
'''
function to create a new credential
'''
new_credentials = Credentials(site, username,password)
return new_credentials
def save_credentials(credentials):
'''
Function to save credentials
'''
credentials.save_credentials()
def delete_credentials(credentials):
'''
Function to delete a credential
'''
credentials.delete_credentials()
def find_by_site(site):
'''
Function that finds a credential by site and returns the credentials
'''
return Credentials.find_by_site(site)
def credential_exists(site):
'''
function that checks if a credential exists from the credentials list
'''
return Credentials.credential_exists(site)
def display_credentials():
'''
Function that returns all the saved credentials
'''
return Credentials.display_credentials()
def main():
print("Hello Welcome to Password_Locker.")
print("Let's get you started by creating your new account")
print("="*50)
print("Please input a Password_Locker UserName of your choice...")
u_name_login = input()
print("Please input the Password_Locker Password you'll be using...")
password_login = input()
save_users(create_users(u_name_login,password_login))
print ('\n')
print(f"New User Username: {u_name_login}, with Password: {password_login}, saved!")
print("Now let's login to Password_Locker")
print("="*50)
print("Enter your Username and your Password to log in:")
username = input("Username: ")
password = input("Password: ")
if user_exists(username, password):
login = user_exists(username,password)
print('\n')
print(f"Hello {username}. Welcome To Password_Locker!")
else:
while user_exists(username,password) == False:
print('\n')
print("Wrong Password_Locker login Credentials!")
print("Please try again")
print('\n')
username = input("Username: ")
password = input("Password: ")
login = user_exists(username,password)
while login == True:
print('\n')
print("Use these short codes to : \ncc -Create a new credential, dc -Display credentials, \nfc -Find a credential by site, d -Delete a credential by site, \nex -Exit Password_Locker")
short_code = input().lower()
if short_code == 'cc':
print("New Credential")
print("="*50)
print ("Site name ....")
s_name = input()
print ("User name ....")
u_name = input()
print("Password ...")
password = input()
save_credentials(create_credentials(s_name, u_name,password))
print('\n')
print(f"New Credential for -{s_name} account:\n Username: {u_name}\n Password: {password} created")
print("="*50)
elif short_code == 'dc':
if display_credentials():
print("Here is a list of all your credentials:")
print('\n')
for credentials in display_credentials():
print(f" Site: {credentials.site} Username: {credentials.username} Password: {credentials.password} .....")
print("-"*50)
else:
print('\n')
print("You don't seem to have any credentials saved yet")
print("="*50)
print('\n')
elif short_code == 'fc':
print("Enter the site name you want to search for")
search_site = input()
if credential_exists(search_site):
search_credentials = find_by_site(search_site)
print('\n')
print(f"Found. Site: {search_credentials.site} Username: {search_credentials.username} Password: {search_credentials.password}")
print('=' *50)
else:
print("That credential does not exist")
print("="*50)
elif short_code == "d":
print("Enter the account site name of the credentials you want to delete")
search_name = input().lower()
if find_by_site(search_name):
search_credential = find_by_site(search_name)
print("="*50)
search_credential.delete_credentials()
print('\n')
print(f"Your stored credentials for : {search_credential.site} have successfully been deleted!!!")
print('\n')
else:
print("That Credential you want to delete does not exist in your store yet")
elif short_code == "ex":
ascii_banner = pyfiglet.figlet_format("Bye Bye!")
print(ascii_banner)
print("See you again .......")
break
else:
print("I really didn't get that. Please use the short codes")
if __name__ == '__main__':
main()
``` |
{
"source": "Johnkariungi/worldtravel",
"score": 3
} |
#### File: Johnkariungi/worldtravel/utils.py
```python
from wit import Wit
access_token = "<KEY>"
client = Wit(access_token = access_token)
#message_text = "I like cool places"
def wit_response(message_text):
resp = client.message(message_text)
entity = None
value = None
print(resp)
for k, v in resp['entities'].items():
entity = k
value = v[0]['value']
print(value)
#try:
#entity = list(resp['entities'])[0]
#value = resp['entities']['entity'][0]['value']
#except Exception as e:
return (entity, value)
#print(resp)
print(wit_response('I like cool places'))
``` |
{
"source": "johnkarlen/cookiecutter-data-science",
"score": 2
} |
#### File: src/deploy/model_deploy.py
```python
from mmmm import new_model
import os
import sys
import vertica_python
def run_from_ipython():
try:
__IPYTHON__
return True
except NameError:
return False
if run_from_ipython():
from IPython import get_ipython
ipython = get_ipython()
ipython.magic("matplotlib inline")
project_dir = os.getcwd()
else:
project_dir = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)
src_dir = os.path.join(project_dir, "src")
sys.path.append(src_dir)
# What is the name of the model
model_name = ''
# What is the domain - options are D (digital), I (investments),
# O (operations), SM (sales and marketing), RP (risk and product)
domain = ''
# What is the current model version? Model version should take the form of
# {major}.{minor} where both {major} and {minor} are integers.
version = ''
# Who should be contacted in case of failure
owner = ''
# If the primary owner cannot be contacted who should be
# contacted in case of failure
secondary_owner = ''
# Where is the model object being stored?
# Typically this is a location in S3 or hdfs
location = ''
# What date is the model being deployed?
# Dates should be formatted as YYYY-mm-dd
deploy_date = ''
# In order to monitor accuracy we need a baseline measurement
# First, how are you measuring your model? AUC? RMSE? Put the name
# of the metric below
metric = ''
# What is the value of the metric above? Typically this value would
# derived by measuring performance on a held out testing set or through
# cross validation
value = ''
# The memo column below is optional. It can be used to provide a brief
# description of the deployment
memo = ''
# What is the name of the training pd.Dataframe in Python?
# Do not enter as a string
training_data = ''
# What columns should we consider categorical? For these columns model
# monitoring will count the number in each bin. Include model output
# if appropriate. DO NOT include the true label
categorical_cols = ['', '', ...]
# What columns should we consider numerical? For these columns model
# monitoring will compute deciles and monitor the counts in these deciles over
# time. Include model output if appropriate. DO NOT include the true label
# If you would like to specify your own bins please reach out to Xiaomin
# for instruction.
continuous_cols = ['', '', ...]
# What columns are model output? DO NOT include the true label
output_cols = ['', '', ...]
# Establish a connection to vertica
db_conn = vertica_python.connect(
host='vertica.private.massmutual.com',
port=int("5433"),
user='model_monitoring_batch',
password='[<PASSWORD>]',
database='advana'
)
# Execute this function, given user does not offer self-defined bins
new_model(
model_name,
version,
domain,
owner=owner,
secondary_owner=secondary_owner,
location=location,
deploy_date=deploy_date,
metric=metric,
value=value,
memo=memo,
categorical_cols=categorical_cols,
continuous_cols=continuous_cols,
output_cols=output_cols,
db_conn=db_conn)
db_conn.close()
``` |
{
"source": "JohnKatsa/FS_Monitoring",
"score": 3
} |
#### File: JohnKatsa/FS_Monitoring/statsCalculator.py
```python
class statsCalculator:
# Average of file sizes that were opened, written or read. (Before write operation)
avgFileSizes_open = 0
open_counter = 0
avgFileSizes_read = 0
read_counter = 0
avgFileSizes_write = 0
write_counter = 0
def avgFileSizes_update(self, filesMap, fileName, type):
if filesMap.get(fileName):
if type == "open":
self.avgFileSizes_open += filesMap.get(fileName)
self.open_counter += 1
elif type == "read":
self.avgFileSizes_read += filesMap.get(fileName)
self.read_counter += 1
elif type == "write":
self.avgFileSizes_write += filesMap.get(fileName)
self.write_counter += 1
def avgFileSizes(self):
return {"open" : 0 if self.open_counter == 0 else self.avgFileSizes_open/self.open_counter,\
"read" : 0 if self.read_counter == 0 else self.avgFileSizes_read/self.read_counter,\
"write" : 0 if self.write_counter == 0 else self.avgFileSizes_write/self.write_counter}
###################################################################################
x = statsCalculator()
print(x.avgFileSizes())
``` |
{
"source": "Johnkayode/django-rest-framework-email-accounts",
"score": 3
} |
#### File: Johnkayode/django-rest-framework-email-accounts/models.py
```python
from __future__ import unicode_literals
from django.contrib.auth.models import (BaseUserManager, AbstractBaseUser)
from django.db import models
from django.utils.translation import ugettext as _
import uuid
class AppUserManager(BaseUserManager):
"""AppUser manager class."""
def create_user(self, email, first_name, last_name, password=None):
"""Create and save a User with the given email, date of birth and password."""
if not email:
raise ValueError(_('Users must have an email address'))
user = self.model(
email=self.normalize_email(email),
first_name=first_name,
last_name=last_name
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, first_name, last_name, password):
"""Create and save a superuser with the given email."""
user = self.create_user(email, password=password, first_name=first_name, last_name=last_name)
user.is_admin = True
user.save(using=self._db)
return user
class AppUser(AbstractBaseUser):
"""AppUser model class (for customizing user model)."""
# Fields
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
email = models.EmailField(verbose_name=_('E-mail address'), max_length=255, unique=True)
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
is_active = models.BooleanField(default=True)
is_verified = models.BooleanField(default=False)
is_admin = models.BooleanField(default=False)
timestamp_subscription = models.DateTimeField(auto_now_add=True)
timestamp_modified = models.DateTimeField(auto_now=True)
objects = AppUserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['first_name', 'last_name']
# Methods
def get_full_name(self):
"""Return the user full name."""
# The user is identified by their email address
return self.first_name+' '+self.last_name
def get_short_name(self):
"""Return the user first name."""
# The user is identified by their email address
return self.first_name
def __unicode__(self):
"""Unicode representation of the class."""
return self.email
def __str__(self): # __unicode__ on Python 2
"""String representation of the class."""
return self.email
def has_perm(self, perm, obj=None):
"""Do the user have a specific permission? Checking it."""
# Simplest possible answer: Yes, always
return True
def has_module_perms(self, app_label):
"""Do the user have permissions to view the app `Accounts`? Checking it."""
# Simplest possible answer: Yes, always
return True
@property
def is_staff(self):
"""Checking if the user is a member of staff."""
# Simplest possible answer: All admins are staff
return self.is_admin
``` |
{
"source": "Johnkayode/Voting",
"score": 2
} |
#### File: Voting/Choice_Coin_Voting/__init__.py
```python
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from os import environ, path
from dotenv import load_dotenv
basedir = path.dirname(path.abspath(__file__))
load_dotenv(path.join(basedir, ".env"), verbose=True)
db = SQLAlchemy()
def create_app():
app = Flask(__name__)
app.config['FLASK_DEBUG'] = environ.get("FLASK_DEBUG")
app.config['SECRET_KEY'] = environ.get("SECRET_KEY")
app.config['SQLALCHEMY_DATABASE_URI'] = environ.get("SQLALCHEMY_DATABASE_URI")
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.init_app(app)
with app.app_context():
from . import models, routes
return app
```
#### File: Johnkayode/Voting/run.py
```python
from Choice_Coin_Voting import create_app, db
app = create_app()
@app.before_first_request
def create_db_tables():
db.create_all()
if __name__=="__main__":
app.run(debug=True)
``` |
{
"source": "JohnKazantzis/GreenWallet",
"score": 2
} |
#### File: GreenWallet/backend/server.py
```python
import sqlite3
from flask import Flask, request
import flask
from flask_classful import FlaskView, route
from flask_cors import CORS
import requests
import json
from sqlalchemy import create_engine
from sqlalchemy import Column, String, Integer
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
import hashlib
from JWTTokenUtils import *
from wallet import *
app = Flask(__name__)
CORS(app)
# Heroku test
@app.route('/')
def index():
return "<h1>Welcome to our server !!</h1>"
db_string = 'postgres://ihqfpqjelgtxop:<EMAIL>:5432/d9isbfunrbk25g'
db = create_engine(db_string)
base = declarative_base()
class Contract(base):
__tablename__ = 'Contracts'
Name = Column(String)
Address = Column(String, primary_key=True)
FunctionName = Column(String)
userId = Column(Integer)
class User(base):
__tablename__ = 'Users'
Id = Column(Integer, primary_key=True, autoincrement=True)
FirstName = Column(String)
LastName = Column(String)
Username = Column(String)
Password = Column(String)
Mnemonic = Column(String)
class UserUtils:
@staticmethod
def passwordHashing(password):
# Creating the user's password hash
return hashlib.sha224(password).hexdigest()
@staticmethod
@app.route('/checkToken/', methods=['GET'])
def checkToken():
data = request.args.to_dict()
print('####################')
print(data)
token = data['token']
#print(token = request.args.get("token", None))
result = JWTTokenUtils.checkToken(token)
if result is None:
return '0'
return '1'
@staticmethod
@app.route('/login/', methods=['GET'])
def login():
# Getting calls's parameters
data = request.args.to_dict()
# Creating session
Session = sessionmaker(db)
session = Session()
# Querying the database to validate the user's credentials
users = session.query(User).filter(User.Username == data['username']).filter(User.Password == UserUtils.passwordHashing(data['password'].encode('utf-8')))
for user in users:
print('User found: {}, {}'.format(user.Username, user.Password))
# Creating token
token = JWTTokenUtils.createToken(user.Id)
return {'token': token.decode('utf-8'), 'mnemonic': user.Mnemonic}
session.commit()
return 'User not found!'
@staticmethod
@app.route('/createUser/', methods=['POST'])
def createUser():
data = request.values.to_dict()
for key in data.keys():
formData = json.loads(key)
print(formData)
# Creating Mnemonic
seed = walletUtils.createSeed()
print(seed)
# Creating session
Session = sessionmaker(db)
session = Session()
genesys = User(Username=formData['username'], Password=UserUtils.passwordHashing(formData['password'].encode('utf-8')), Mnemonic=seed)
session.add(genesys)
session.commit()
# The details of the new user
userDetails = {'username': formData['username'], 'mnemonic': seed}
return userDetails
@staticmethod
@app.route('/deleteUser/', methods=['DELETE'])
def deleteUser():
data = request.args.to_dict()
print(data)
session.query(User).filter(User.Username == data['username']).filter(User.Password == UserUtils.passwordHashing(data['password'].encode('utf-8'))).delete()
session.commit()
return 'OK'
class ContractUtils:
# @staticmethod
# def getLocalContracts():
# f = open('../client/src/contracts/scooterTransactions.json')
# data = json.load(f)
# scooterTransactionsAddr = data['networks']['5777']['address']
# print(scooterTransactionsAddr)
# return scooterTransactionsAddr
@staticmethod
@app.route('/getContracts/', methods=['GET'])
def getContracts():
#
# Returning all the Contract entries in the db
#
Session = sessionmaker(db)
session = Session()
# Retrieving and checking the token
data = request.args.to_dict()
result = JWTTokenUtils.checkToken(data['token'])
print('Get Contracts token: ' + str(result))
if result is None:
return 0, 201
else:
contracts = session.query(Contract).filter(Contract.userId==result).order_by(Contract.Address)
data = {}
for contract in contracts:
data[contract.Address] = {'Name': contract.Name, 'FunctionName': contract.FunctionName, 'Address': contract.Address}
session.commit()
print(data)
responseToReact = flask.Response(json.dumps(data))
responseToReact.headers['Access-Control-Allow-Origin'] = '*'
return responseToReact
@staticmethod
@app.route('/deleteContract/<address>/', methods=['DELETE'])
def deleteContract(address):
print(address)
Session = sessionmaker(db)
session = Session()
# genesys = Contract(Address=address)
# session.delete(genesys)
session.query(Contract).filter(Contract.Address == address).delete()
session.commit()
return 'Contact Deleted'
@staticmethod
@app.route('/updateAddContract/', methods=['POST'])
def updateContract():
data = request.values.to_dict()
for key in data.keys():
formData = json.loads(key)
# Retrieving and checking the token
result = JWTTokenUtils.checkToken(formData['token'])
Session = sessionmaker(db)
session = Session()
# Checking if the Contract (address) already exists
contracts = session.query(Contract)
for contract in contracts:
if formData['address'] == contract.Address:
# If any field is different, update data!
if (formData['name'] != contract.Name) or (formData['functionName'] != contract.FunctionName):
contract.Name = formData['name']
contract.FunctionName = formData['functionName']
contract.userId = result
session.commit()
return 'Contract Updated'
return 'Contract Already exists'
genesys = Contract(Name=formData['name'], Address=formData['address'], FunctionName=formData['functionName'], userId=result)
session.add(genesys)
session.commit()
return 'OK'
class APICalls:
@staticmethod
@app.route('/getExchangeRates/', methods=['GET'])
def getExchangeRates():
#
# Making an API call to the CoinLayer API
# to get the current exchange rates
#
data = request.args.to_dict()
coin = data['target']
COINLAYER_URL = 'http://api.coinlayer.com/live'
COINLAYER_API_KEY = '<KEY>'
PARAMS = { 'access_key': COINLAYER_API_KEY, 'target': coin }
HEADERS = {'content-type':'application/json'}
response = requests.get(url=COINLAYER_URL, params=PARAMS, headers=HEADERS)
data = json.loads(response.text)
dataToSent = {'ETH': data['rates']['ETH'],
'BTC': data['rates']['BTC'],
'XRP': data['rates']['XRP']
}
responseToReact = flask.Response(json.dumps(dataToSent))
responseToReact.headers['Access-Control-Allow-Origin'] = '*'
return responseToReact
# Getting the address of the contract
# scooterTransactionsAddr = ContractUtils.getLocalContracts()
Session = sessionmaker(db)
session = Session()
base.metadata.create_all(db)
session.commit()
# users = session.query(User)
# for x in users:
# print(x.LastName)
# # Connecting to the database
# db, base = ContractUtils.dbInit()
# # Creating Tables
# ContractUtils.create_tables(db)
token = JWTTokenUtils.createToken(1)
JWTTokenUtils.checkToken(token)
if __name__ == '__main__':
app.run()
``` |
{
"source": "johnkbyrne/one-thousand-miles",
"score": 3
} |
#### File: one-thousand-miles/one_k_km/actual_vs_goal_cumulative.py
```python
import pandas as pd
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource, HoverTool
def actual_goal_cumulative(source, x_axis):
cds_bar = source
fig = figure(
title='Actual KMs versus goal KMs',
plot_height=500, plot_width=800,
x_axis_label='Week Number', y_axis_label='Cumulative KMs',
x_minor_ticks=2, y_range=(0, 50),
x_range=x_axis,
toolbar_location=None,
)
fig.vbar(
x='day_of_week', bottom=0, top='weekly_actual_cumulative',
color='#084594', width=0.75,
legend='KMs Run', source=cds_bar,
)
fig.line(
x='day_of_week', y='weekly_goal_cumulative',
color='#9ecae1', line_width=4,
legend='Goal KMs', source=cds_bar,
)
# Put the legend in the upper left corner
fig.legend.location = 'top_left'
tooltips = [
('Cumulative Kilometers','@weekly_goal_cumulative'),
('Cumulative goal KMS', '@week'),
]
# Add the HoverTool to the figure
fig.add_tools(HoverTool(tooltips=tooltips))
return fig
```
#### File: one-thousand-miles/one_k_km/static_summary.py
```python
import pandas as pd
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource, HoverTool
def summary_cumulative(source, X_AXIS):
source = source
X_AXIS = [str(x) for x in X_AXIS]
km_fig = figure(
plot_height=300, plot_width=800,
title='Cumulative KMs Run vs Goal KMs',
x_axis_label='week_number',
y_axis_label='Kms',
x_range=X_AXIS,
y_range=(0, 1010),
toolbar_location=None)
km_fig.line(
x='week_number', y='cumulative_weekly_kms',
color='#9ecae1', line_width=3,
legend='Cumulative weekly kms',source=source
)
km_fig.vbar(
x='week_number', bottom=0, top='cumulative_weekly_kms',
color='#084594', width=0.75,
legend='KMs Run', source=source
)
km_fig.line(
x='week_number', y='cumulative_weekly_goal_kms',
color='#9ecae1', line_width=3,
legend='Cumulative goal kms',source=source
)
km_fig.legend.location = 'top_left'
tooltips = [
('Cumulative actual Kilometers','@cumulative_weekly_kms'),
('Cumulative goal kilometers', '@cumulative_weekly_goal_kms'),
('Week number', '@week')
]
# Add the HoverTool to the figure
km_fig.add_tools(HoverTool(tooltips=tooltips))
return km_fig
def actual_weekly_vs_goal(source, X_AXIS):
cds_bar = source
X_AXIS = [str(x) for x in X_AXIS]
# Create a figure with a datetime type x-axis
fig = figure(
title='Actual KMs Run versus Goal KMs',
plot_height=300, plot_width=800,
x_axis_label='Week number', y_axis_label='KMs',
x_minor_ticks=2,
y_range=(0, 50),
x_range=X_AXIS,
toolbar_location=None
)
# The daily words will be represented as vertical bars (columns)
fig.vbar(
x='week_number', bottom=0, top='kms',
color='#084594', width=0.75,
legend='KMS Run', source=cds_bar
)
# The cumulative sum will be a trend line
fig.line(
x='week_number', y='weekly_goal',
color='#9ecae1', line_width=1,
legend='Goal KMs', source=cds_bar
)
# Put the legend in the upper left corner
fig.legend.location = 'top_left'
tooltips = [
('Kilometers','@kms'),
('Week number', '@week_number'),
]
# Add the HoverTool to the figure
fig.add_tools(HoverTool(tooltips=tooltips))
return fig
``` |
{
"source": "johnkchan/jay-bot",
"score": 3
} |
#### File: jay-bot/cogs/APICommands.py
```python
import discord
from discord.ext import commands
from googletrans import Translator
from datetime import datetime
import asyncio
import random
import praw
import requests
import os
class APICommands(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name="advice")
async def advice(self, ctx):
URL = "https://api.adviceslip.com/advice"
try:
r = requests.get(url=URL)
except Exception as e:
print(e)
return
advice = r.json()
if advice:
return await ctx.send(advice["slip"]["advice"])
@commands.command(name="gif", description="It's so fluffy!")
async def gif(self, ctx, *, search_term: str):
URL = "http://api.giphy.com/v1/gifs/random?"
PARAMS = {"api_key": os.environ["GIPHY_API_KEY"],
"tag": search_term}
try:
r = requests.get(url=URL, params=PARAMS)
except Exception as e:
print(e)
return
data = r.json()
if data["data"]:
return await ctx.send(data["data"]["images"]["original"]["url"])
@commands.command(name="weather", description="Jay Bot tells you the weather forecast", help="Shows latest weather forecast", aliases=["weatherc"])
async def weather(self, ctx, *, location: str = "New York City"):
command = ctx.message.content
# Should return temperature in Celsius if user specifies weatherc command
units = "metric" if ".weatherc" in command else "imperial"
scale = "C" if ".weatherc" in command else "F"
speed = "mps" if ".weatherc" in command else "mph"
URL = "http://api.openweathermap.org/data/2.5/weather?"
PARAMS = {"appid": os.getenv('OPENWEATHER_API_KEY'),
"q": location,
"units": units}
try:
r = requests.get(url=URL, params=PARAMS)
except Exception as e:
print(e)
return
data = r.json()
# Convert Latitude & Longitude to Float
latitude = float(data['coord']['lat'])
longitude = float(data['coord']['lon'])
embed = discord.Embed(
title=data["name"],
description=f"[{latitude}{'N' if latitude > 0 else 'S'},{longitude}{'W' if latitude < 0 else 'E'}](https://www.google.com/maps/search/{latitude},{longitude}/)",
colour=discord.Colour.blue()
)
embed.add_field(
name="Weather", value=data["weather"][0]["description"].title(), inline=False)
embed.add_field(
name="Temp", value=f"{int(data['main']['temp'])}°{scale}", inline=True)
embed.add_field(
name="Feels Like", value=f"{int(data['main']['feels_like'])}°{scale}", inline=True)
embed.add_field(
name="\uFEFF", value="\uFEFF", inline=True)
embed.add_field(
name="Humidity", value=f"{int(data['main']['humidity'])}%", inline=True)
embed.add_field(
name="Wind Speed", value=f"{data['wind']['speed']}{speed}", inline=True)
embed.add_field(
name="\uFEFF", value="\uFEFF", inline=True)
# Convert Unix time to Readable Date Format
sunrise = datetime.fromtimestamp(data["sys"]["sunrise"])
sunset = datetime.fromtimestamp(data["sys"]["sunset"])
embed.add_field(
name="Sunrise", value=sunrise.strftime('%I:%M %p'), inline=True)
embed.add_field(
name="Sunset", value=sunset.strftime('%I:%M %p'), inline=True)
embed.add_field(
name="\uFEFF", value="\uFEFF", inline=True)
embed.set_thumbnail(
url=f"http://openweathermap.org/img/wn/{data['weather'][0]['icon']}@2x.png")
return await ctx.send(embed=embed)
@commands.command(name="movie", description="Jay Bot tells you movie details", help="Shows movie details")
async def movie(self, ctx, *, movie_title: str):
URL = "http://www.omdbapi.com/?"
PARAMS = {"apikey": os.environ["OMDB_API_KEY"],
"t": movie_title}
try:
r = requests.get(url=URL, params=PARAMS)
except Exception as e:
print(e)
return
movie = r.json()
if movie["Response"] != "True":
return await ctx.send("Movie title not found")
embed = discord.Embed(
title=movie["Title"],
description=movie["Plot"],
colour=discord.Colour.blue(),
url=f"https://www.imdb.com/title/{movie['imdbID']}"
)
embed.add_field(
name="Released", value=movie["Released"], inline=True)
embed.add_field(
name="Runtime", value=movie["Runtime"], inline=True)
embed.add_field(
name="Rated", value=movie["Rated"], inline=True)
embed.add_field(
name="Genre", value=movie["Genre"], inline=False)
embed.add_field(
name="Director", value=movie["Director"], inline=False)
embed.add_field(name="Actors", value=movie["Actors"], inline=False)
if movie["Awards"] != "N/A":
embed.add_field(
name="Awards", value=movie["Awards"], inline=False)
embed.add_field(
name="Metascore", value=movie["Metascore"], inline=True)
embed.add_field(
name="IMDb Rating", value=movie["imdbRating"], inline=True)
embed.add_field(
name="IMDb Votes", value=movie["imdbVotes"], inline=True)
embed.set_thumbnail(url=movie["Poster"])
return await ctx.send(embed=embed)
@commands.command(name="urbandictionary", description="Jay Bot tells you the definition", aliases=["urban", "urbandict"], help="Shows urban dictionary results")
async def urbandict(self, ctx, *, search_term: str):
URL = "http://api.urbandictionary.com/v0/define?"
PARAMS = {"term": search_term}
try:
r = requests.get(url=URL, params=PARAMS)
except Exception as e:
print(e)
data = r.json()
if not data["list"]:
return await ctx.send("Definition not found")
top_result = data["list"][0]
embed = discord.Embed(
title=top_result["word"].title(),
colour=discord.Colour.blue(),
url=top_result["permalink"]
)
embed.set_thumbnail(
url="https://img.pngio.com/urban-dictionary-definition-for-your-fave-urban-dictionary-png-670_315.png")
embed.add_field(
name="Author", value=top_result["author"], inline=False)
embed.add_field(
name="Definition", value=top_result["definition"], inline=False)
embed.add_field(
name="Example", value=top_result["example"], inline=False)
return await ctx.send(embed=embed)
@commands.command(name="funfact", description="Jay Bot tells you a fun fact", help="Shows random fun fact")
async def funfact(self, ctx):
URL = "https://uselessfacts.jsph.pl/random.json?language=en"
r = requests.get(url=URL)
data = r.json()
return await ctx.send(data["text"])
@commands.command(name="joke", description="Jay Bot tells you a joke", help="Shows random joke")
async def joke(self, ctx):
URL = "https://official-joke-api.appspot.com/random_joke"
r = requests.get(url=URL)
data = r.json()
return await ctx.send(f"{data['setup']}\n> {data['punchline']}")
@commands.command(name="translate", help="Shows translation")
async def translate(self, ctx, *, text: str):
LANGUAGES = {
'af': 'afrikaans',
'sq': 'albanian',
'am': 'amharic',
'ar': 'arabic',
'hy': 'armenian',
'az': 'azerbaijani',
'eu': 'basque',
'be': 'belarusian',
'bn': 'bengali',
'bs': 'bosnian',
'bg': 'bulgarian',
'ca': 'catalan',
'ceb': 'cebuano',
'ny': 'chichewa',
'zh-cn': 'chinese (simplified)',
'zh-tw': 'chinese (traditional)',
'co': 'corsican',
'hr': 'croatian',
'cs': 'czech',
'da': 'danish',
'nl': 'dutch',
'en': 'english',
'eo': 'esperanto',
'et': 'estonian',
'tl': 'filipino',
'fi': 'finnish',
'fr': 'french',
'fy': 'frisian',
'gl': 'galician',
'ka': 'georgian',
'de': 'german',
'el': 'greek',
'gu': 'gujarati',
'ht': 'haitian creole',
'ha': 'hausa',
'haw': 'hawaiian',
'iw': 'hebrew',
'hi': 'hindi',
'hmn': 'hmong',
'hu': 'hungarian',
'is': 'icelandic',
'ig': 'igbo',
'id': 'indonesian',
'ga': 'irish',
'it': 'italian',
'ja': 'japanese',
'jw': 'javanese',
'kn': 'kannada',
'kk': 'kazakh',
'km': 'khmer',
'ko': 'korean',
'ku': 'kurdish (kurmanji)',
'ky': 'kyrgyz',
'lo': 'lao',
'la': 'latin',
'lv': 'latvian',
'lt': 'lithuanian',
'lb': 'luxembourgish',
'mk': 'macedonian',
'mg': 'malagasy',
'ms': 'malay',
'ml': 'malayalam',
'mt': 'maltese',
'mi': 'maori',
'mr': 'marathi',
'mn': 'mongolian',
'my': 'myanmar (burmese)',
'ne': 'nepali',
'no': 'norwegian',
'ps': 'pashto',
'fa': 'persian',
'pl': 'polish',
'pt': 'portuguese',
'pa': 'punjabi',
'ro': 'romanian',
'ru': 'russian',
'sm': 'samoan',
'gd': 'scots gaelic',
'sr': 'serbian',
'st': 'sesotho',
'sn': 'shona',
'sd': 'sindhi',
'si': 'sinhala',
'sk': 'slovak',
'sl': 'slovenian',
'so': 'somali',
'es': 'spanish',
'su': 'sundanese',
'sw': 'swahili',
'sv': 'swedish',
'tg': 'tajik',
'ta': 'tamil',
'te': 'telugu',
'th': 'thai',
'tr': 'turkish',
'uk': 'ukrainian',
'ur': 'urdu',
'uz': 'uzbek',
'vi': 'vietnamese',
'cy': 'welsh',
'xh': 'xhosa',
'yi': 'yiddish',
'yo': 'yoruba',
'zu': 'zulu',
'fil': 'Filipino',
'he': 'Hebrew'
}
translator = Translator()
detection = translator.detect(text)
language = detection.lang.lower()
confidence = detection.confidence
output_language = "zh-cn" if language == "en" else "en"
output = translator.translate(text, dest=output_language)
translation = output.text
pronunciation = translator.translate(
text, dest=language).pronunciation
if output_language == "zh-cn":
pronunciation = translator.translate(
translation, dest="zh-cn").pronunciation
if translation:
embed = discord.Embed(
colour=discord.Colour.blue()
)
embed.add_field(
name="Translation", value=translation, inline=False)
if pronunciation:
embed.add_field(
name="Pronounciation", value=pronunciation, inline=False)
embed.add_field(
name="Detected", value=f"{LANGUAGES[language].title()}", inline=True)
embed.add_field(
name="Confidence", value=f"{confidence}", inline=True)
embed.set_thumbnail(
url="https://cdn0.iconfinder.com/data/icons/tuts/256/google_translate.png")
return await ctx.send(embed=embed)
@commands.command(name="yelp")
async def yelp(self, ctx, category, *, location="New York City"):
URL = "https://api.yelp.com/v3/businesses/search?"
HEADERS = {"Authorization": f"bearer {os.getenv('YELP_API_KEY')}"}
PARAMS = {"terms": "restaurant",
"categories": category.lower(),
"location": location,
"limit": 5,
"price": "1, 2"}
try:
r = requests.get(url=URL, params=PARAMS, headers=HEADERS)
except Exception as e:
print(e)
return
top_results = r.json()
await ctx.send(f"Top {PARAMS['limit']} results for '{category.title()}' in {location.title()}")
for business in top_results["businesses"]:
embed = discord.Embed(
title=business["name"],
description=", ".join([i["title"]
for i in business["categories"]]),
colour=discord.Colour.blue(),
url=business["url"]
)
embed.set_thumbnail(url=business["image_url"])
address = business["location"]["address1"]
city = business["location"]["city"]
zipcode = business["location"]["zip_code"]
full_address = f"{address}, {city} {zipcode}"
address_search_string = "+".join(full_address.split(" "))
embed.add_field(
name="Address", value=f"[{full_address}](https://www.google.com/maps/search/{address_search_string})", inline=False)
embed.add_field(name="Price", value=business["price"], inline=True)
embed.add_field(
name="Rating", value=business["rating"], inline=True)
embed.add_field(name="Review Count",
value=business["review_count"], inline=True)
embed.set_footer(
text=f"For more info, run .yelpsearch {business['id']}")
await ctx.send(embed=embed)
@commands.command(name="yelpsearch")
async def yelpsearch(self, ctx, business_id):
URL = f"https://api.yelp.com/v3/businesses/{business_id}"
HEADERS = {"Authorization": f"bearer {os.getenv('YELP_API_KEY')}"}
try:
r = requests.get(url=URL, headers=HEADERS)
except Exception as e:
print(e)
business = r.json()
if r.status_code == 404:
return await ctx.send(business["error"]["description"])
embed = discord.Embed(
title=business["name"],
description=", ".join([i["title"]
for i in business["categories"]]),
colour=discord.Colour.blue(),
url=business["url"]
)
embed.set_thumbnail(url=business["image_url"])
# Display Location Details of Business
address = business["location"]["address1"]
city = business["location"]["city"]
zipcode = business["location"]["zip_code"]
full_address = f"{address}, {city} {zipcode}"
address_search_string = "+".join(full_address.split(" "))
embed.add_field(
name="Address", value=f"[{full_address}](https://www.google.com/maps/search/{address_search_string})", inline=False)
# Display Price a& Rating of Business
embed.add_field(name="Price", value=business["price"], inline=True)
embed.add_field(
name="Rating", value=business["rating"], inline=True)
embed.add_field(name="Reviews",
value=business["review_count"], inline=True)
# Display Transaction Types offered by Business
embed.add_field(name="Reservation?",
value="✅" if "restaurant_reservation" in business["transactions"] else "❌", inline=True)
embed.add_field(name="Delivery?",
value="✅" if "delivery" in business["transactions"] else "❌", inline=True)
embed.add_field(name="Pickup?",
value="✅" if "pickup" in business["transactions"] else "❌", inline=True)
operation_hours = {0: ["Monday", "Closed"],
1: ["Tuesday", "Closed"],
2: ["Wednesday", "Closed"],
3: ["Thursday", "Closed"],
4: ["Friday", "Closed"],
5: ["Saturday", "Closed"],
6: ["Sunday", "Closed"]}
# Update operationHours dictionary with startTime and endTime
for weekday in business["hours"][0]["open"]:
# Convert 24 Hour Format into 12 Hour Format
opening_hour = datetime.strptime(
weekday['start'], "%H%M").strftime("%I:%M %p")
closing_hour = datetime.strptime(
weekday['end'], "%H%M").strftime("%I:%M %p")
operation_hours[weekday["day"]
][1] = f"{opening_hour} - {closing_hour}"
embed.add_field(name="Hours", value="\n".join(
[f"{value[0]}: {value[1]}" for key, value in operation_hours.items()]), inline=False)
embed.add_field(name="Is Open Now?",
value="✅" if business["hours"][0]["is_open_now"] else "❌", inline=False)
# Use second available photo to avoid duplicating thumnbnail image
if business["photos"]:
try:
embed.set_image(url=business["photos"][1])
except:
embed.set_image(url=business["photos"][0])
return await ctx.send(embed=embed)
@commands.command(name="reddit", aliases=["ah", "dh", "ph", "dank", "comic"])
async def reddit(self, ctx, subreddit: str = "random"):
reddit = praw.Reddit(client_id=os.environ["REDDIT_CLIENT_ID"],
client_secret=os.environ["REDDIT_CLIENT_SECRET"],
user_agent="Jay Bot")
subreddit_dict = {
"ah": "accountinghumor",
"dh": "designershumor",
"ph": "programmerhumor",
"dank": "memes",
"comic": "webcomics"
}
command = ctx.message.content[1:]
try:
# Check if subreddit is in dictionary
selection = subreddit_dict[command]
except KeyError:
selection = subreddit
try:
# Return random submission from subreddit
submission = reddit.subreddit(selection).random()
max_embed_desc_len = 2048
embed = discord.Embed(
title=submission.title,
description=submission.selftext if len(
submission.selftext) < max_embed_desc_len else submission.selftext[:max_embed_desc_len - 3] + "...",
url=submission.shortlink
)
embed.add_field(
name="Author", value=f"[{submission.author.name}](https://www.reddit.com/user/{submission.author.name})", inline=True)
embed.add_field(
name="👍", value=submission.score, inline=True)
# Check if valid png or jpg file before setting image for embed
if submission.url[-3:] in ["png", "jpg"]:
embed.set_image(url=submission.url)
embed.set_footer(
text=f"/r/{submission.subreddit.display_name}")
await ctx.send(embed=embed)
except Exception as e:
print(e)
if "403" in str(e):
return await ctx.send("Subreddit is private")
if "404" in str(e):
return await ctx.send("Subreddit not found")
return await ctx.send("Subreddit not supported")
@commands.command(name="dictionary", aliases=["dict"])
async def dictionary(self, ctx, word: str):
if not word:
return await ctx.send("Please provide word to be defined")
URL = f"https://owlbot.info/api/v4/dictionary/{word}"
HEADERS = {"Authorization": f"Token {os.getenv('OWLBOT_API_KEY')}"}
try:
r = requests.get(url=URL, headers=HEADERS)
except Exception as e:
print(e)
if r.status_code == 404:
return await ctx.send("No definition found.")
data = r.json()
# Take Top 3 Definitions
length = 3 if len(data['definitions']) > 3 else len(
data['definitions'])
for i in range(length):
embed = discord.Embed(
title=data["word"].title(),
description=data["definitions"][i]['type'].title()
)
embed.add_field(
name="Definition", value=data["definitions"][i]["definition"], inline=False)
if data["definitions"][i]["example"]:
embed.add_field(
name="Example", value=data["definitions"][i]["example"], inline=False)
if data["definitions"][i]["image_url"]:
embed.set_thumbnail(url=data["definitions"][i]["image_url"])
await ctx.send(embed=embed)
@commands.command(name="news")
async def news(self, ctx, article_count=5):
URL = "https://newsapi.org/v2/top-headlines?"
PARAMS = {"apiKey": os.getenv("NEWS_API_KEY"),
"country": "us", }
try:
r = requests.get(url=URL, params=PARAMS)
except Exception as e:
print(e)
return
data = r.json()
length = article_count if data["totalResults"] >= article_count else data["totalResults"]
for i in range(length):
article = data["articles"][i]
embed = discord.Embed(
title=article["title"],
description=article['description'],
colour=discord.Colour.blue(),
url=article["url"]
)
embed.set_thumbnail(url=article["urlToImage"])
await ctx.send(embed=embed)
@commands.command(name="mal")
async def my_anime_list(self, ctx, username: str):
URL = f"https://api.jikan.moe/v3/user/{username}/animelist/all"
try:
r = requests.get(url=URL)
except Exception:
return
data = r.json()
embed = discord.Embed(
title=f"{username}'s Anime List",
description="All Anime",
url=f"https://myanimelist.net/animelist/{username}",
)
status_dict = {
1: "Watching",
2: 2,
3: 3,
4: 4,
5: 5,
6: 6
}
for idx, anime in enumerate(data["anime"]):
embed.add_field(
name="Title" if idx == 0 else "", value=f"[{anime['title']}]({anime['url']})", inline=True)
embed.add_field(
name="Status" if idx == 0 else "", value=status_dict[anime['watching_status']], inline=True)
embed.add_field(
name="Progress" if idx == 0 else "", value=f"Eps {anime['watched_episodes']}/{anime['total_episodes']}", inline=True)
embed.set_thumbnail(
url="https://image.myanimelist.net/ui/OK6W_koKDTOqqqLDbIoPAiC8a86sHufn_jOI-JGtoCQ")
embed.set_footer(text=f"MyAnimeList")
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(APICommands(bot))
```
#### File: jay-bot/cogs/ComicCommands.py
```python
import discord
from discord.ext import commands
from imgurpython import ImgurClient
import requests
import random
import os
class ComicCommands(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name="cyanide", aliases=["ch"])
async def cyanide(self, ctx):
client_id = os.getenv("IMGUR_CLIENT_ID")
client_secret = os.getenv("IMGUR_CLIENT_SECRET")
client = ImgurClient(client_id, client_secret)
galleries = ["Q28iX", "F3MUq", "Hs78vjZ",
"4irAcqH", "XyPBv", "ytSSEEo"]
images = client.get_album_images(random.choice(galleries))
random_idx = random.randrange(0, len(images))
random_comic = images[random_idx].link
embed = discord.Embed(
title="Cyanide & Happiness",
url="http://explosm.net/"
)
embed.set_image(url=random_comic)
await ctx.send(embed=embed)
@commands.command(name="loading", aliases=["la"])
async def loading(self, ctx):
client_id = os.getenv("IMGUR_CLIENT_ID")
client_secret = os.getenv("IMGUR_CLIENT_SECRET")
client = ImgurClient(client_id, client_secret)
galleries = ["eqog8N3", "V4983", "nk7dK", "J5hdR"]
images = client.get_album_images(random.choice(galleries))
random_idx = random.randrange(0, len(images))
random_comic = images[random_idx].link
embed = discord.Embed(
title="Loading Artist",
url="https://loadingartist.com/"
)
embed.set_image(url=random_comic)
await ctx.send(embed=embed)
@commands.command(name="lovenstein", aliases=["mrl"])
async def lovenstein(self, ctx):
client_id = os.getenv("IMGUR_CLIENT_ID")
client_secret = os.getenv("IMGUR_CLIENT_SECRET")
client = ImgurClient(client_id, client_secret)
galleries = ["6h7o9", "MhDJD", "Spqb6Oj", "Fm9cQ"]
images = client.get_album_images(random.choice(galleries))
random_idx = random.randrange(0, len(images))
random_comic = images[random_idx].link
embed = discord.Embed(
title="MrLovenstein",
url="https://www.mrlovenstein.com/"
)
embed.set_image(url=random_comic)
await ctx.send(embed=embed)
@commands.command(name="owlturd", aliases=["ot"])
async def owlturd(self, ctx):
client_id = os.getenv("IMGUR_CLIENT_ID")
client_secret = os.getenv("IMGUR_CLIENT_SECRET")
client = ImgurClient(client_id, client_secret)
galleries = ["KQELY", "MJBPd"]
images = client.get_album_images(random.choice(galleries))
random_idx = random.randrange(0, len(images))
random_comic = images[random_idx].link
embed = discord.Embed(
title="Owl Turd",
url="https://www.gocomics.com/shen-comix"
)
embed.set_image(url=random_comic)
await ctx.send(embed=embed)
@commands.command(name="xkcd")
async def xkcd(self, ctx):
random_comic_num = random.randrange(1, 2327)
URL = f"https://xkcd.com/{random_comic_num}/info.0.json"
try:
r = requests.get(url=URL)
except Exception as e:
print(e)
return
comic = r.json()
embed = discord.Embed(
title=comic["title"],
description=comic["alt"],
url=f"https://xkcd.com/{comic['num']}/"
)
embed.set_image(url=comic["img"])
embed.set_footer(text=f"xkcd comic #{comic['num']}")
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(ComicCommands(bot))
``` |
{
"source": "JohnKearney2020/Hero_RPG",
"score": 4
} |
#### File: JohnKearney2020/Hero_RPG/original_rpg.py
```python
def main():
hero_health = 10
hero_power = 5
goblin_health = 6
goblin_power = 2
while goblin_health > 0 and hero_health > 0:
print("You have {} health and {} power.".format(hero_health, hero_power))
print("The goblin has {} health and {} power.".format(goblin_health, goblin_power))
print()
print("What do you want to do?")
print("1. fight goblin")
print("2. do nothing")
print("3. flee")
print("> ", end=' ')
raw_input = input()
if raw_input == "1":
# Hero attacks goblin
goblin_health -= hero_power
print("You do {} damage to the goblin.".format(hero_power))
if goblin_health <= 0:
print("The goblin is dead.")
elif raw_input == "2":
pass
elif raw_input == "3":
print("Goodbye.")
break
else:
print("Invalid input {}".format(raw_input))
if goblin_health > 0:
# Goblin attacks hero
hero_health -= goblin_power
print("The goblin does {} damage to you.".format(goblin_power))
if hero_health <= 0:
print("You are dead.")
main()
``` |
{
"source": "johnkellehernz/pfwra",
"score": 2
} |
#### File: pfwra/common/models.py
```python
from django.db import models
from django.template.defaultfilters import slugify
from wagtail.core.models import Collection, Page
from wagtail.core.fields import StreamField
from wagtail.snippets.models import register_snippet
from wagtail.images.edit_handlers import ImageChooserPanel
from wagtail.search import index
from wagtail.admin.edit_handlers import (
FieldPanel,
FieldRowPanel,
MultiFieldPanel,
PageChooserPanel,
StreamFieldPanel,
)
from wagtailcache.cache import WagtailCacheMixin
from modelcluster.models import ClusterableModel
from modelcluster.fields import ParentalKey
from .blocks import BaseStreamBlock
@register_snippet
class Counter(models.Model):
image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
count = models.IntegerField(null=True, blank=True)
text = models.CharField(
null=True,
blank=True,
max_length=255,
help_text='Text to display on the counter'
)
panels = [
ImageChooserPanel('image'),
FieldPanel('count'),
FieldPanel('text'),
]
def __str__(self):
return '%d %s' % (self.count, self.text)
@register_snippet
class Suburb(models.Model):
name = models.CharField("Suburb name", unique=True, max_length=254)
slug = models.SlugField(unique=True, blank=True)
class Meta:
ordering = ['name']
verbose_name = 'suburb'
verbose_name_plural = 'suburbs'
panels = [
FieldPanel('name'),
FieldPanel('slug'),
]
def __str__(self):
return self.name
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.name)
return super(Suburb, self).save(*args, **kwargs)
@register_snippet
class People(models.Model):
first_name = models.CharField("First name", max_length=254)
last_name = models.CharField("Last name", max_length=254, null=True, blank=True)
job_title = models.CharField("Job title", max_length=254, null=True, blank=True)
image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
panels = [
MultiFieldPanel([
FieldRowPanel([
FieldPanel('first_name', classname="col6"),
FieldPanel('last_name', classname="col6"),
])
], "Name"),
FieldPanel('job_title'),
ImageChooserPanel('image')
]
search_fields = [
index.SearchField('first_name'),
index.SearchField('last_name'),
]
def __str__(self):
return '{} {}'.format(self.first_name, self.last_name)
class Meta:
verbose_name = 'Person'
verbose_name_plural = 'People'
class StandardPage(WagtailCacheMixin, Page):
subtitle = models.CharField("Title in Te reo Māori", max_length=254, blank=True, null=True)
introduction = models.TextField(
help_text='Text to describe the page',
blank=True)
image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
)
body = StreamField(
BaseStreamBlock(), verbose_name="Page body", blank=True
)
content_panels = Page.content_panels + [
FieldPanel('subtitle', classname="full"),
FieldPanel('introduction', classname="full"),
StreamFieldPanel('body'),
ImageChooserPanel('image'),
]
search_fields = Page.search_fields + [ # Inherit search_fields from Page
index.SearchField('introduction'),
index.SearchField('body'),
]
```
#### File: pfwra/news/models.py
```python
from __future__ import unicode_literals
from django.contrib import messages
from django.db import models
from django.shortcuts import redirect, render
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from modelcluster.contrib.taggit import ClusterTaggableManager
from modelcluster.fields import ParentalKey
from taggit.models import Tag, TaggedItemBase
from wagtail.contrib.routable_page.models import RoutablePageMixin, route
from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, StreamFieldPanel, MultiFieldPanel
from wagtail.core.fields import StreamField
from wagtail.core.models import Page, Orderable
from wagtail.images.edit_handlers import ImageChooserPanel
from wagtail.search import index
from wagtail.snippets.edit_handlers import SnippetChooserPanel
from wagtailcache.cache import WagtailCacheMixin
from common.blocks import BaseStreamBlock
from common.models import Suburb
class BlogPageTag(TaggedItemBase):
"""
This model allows us to create a many-to-many relationship between
the BlogPage object and tags. There's a longer guide on using it at
http://docs.wagtail.io/en/latest/reference/pages/model_recipes.html#tagging
"""
content_object = ParentalKey('BlogPage', related_name='tagged_items', on_delete=models.CASCADE)
class BlogPage(WagtailCacheMixin, Page):
author = models.ForeignKey(
'common.People',
null=True,
blank=True,
related_name='+',
on_delete=models.SET_NULL
)
introduction = models.TextField(
help_text='Text to describe the page',
blank=True)
image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
)
body = StreamField(
BaseStreamBlock(), verbose_name="Page body", blank=True
)
tags = ClusterTaggableManager(through=BlogPageTag, blank=True)
suburb = models.ForeignKey(
'common.Suburb',
null=True,
blank=True,
related_name='page_set',
on_delete=models.SET_NULL
)
date_published = models.DateField(
"Date article published", blank=True, null=True
)
content_panels = Page.content_panels + [
FieldPanel('introduction', classname="full"),
ImageChooserPanel('image'),
StreamFieldPanel('body'),
FieldPanel('date_published'),
SnippetChooserPanel('author'),
MultiFieldPanel([
FieldPanel('tags'),
SnippetChooserPanel('suburb'),
], heading="Suburbs and tags"),
]
search_fields = Page.search_fields + [
index.SearchField('introduction'),
index.SearchField('body'),
index.RelatedFields('tags', [
index.SearchField('name', partial_match=True, boost=10),
]),
index.RelatedFields('suburb', [
index.SearchField('name'),
]),
]
@property
def get_tags(self):
tags = self.tags.all()
for tag in tags:
tag.url = '/' + '/'.join(s.strip('/') for s in [
self.get_parent().url,
'tags',
tag.slug
])
return tags
# Specifies parent to BlogPage as being BlogIndexPages
parent_page_types = ['BlogIndexPage']
# Specifies what content types can exist as children of BlogPage.
# Empty list means that no child content types are allowed.
subpage_types = []
class BlogIndexPage(WagtailCacheMixin, RoutablePageMixin, Page):
"""
Index page for blogs.
We need to alter the page model's context to return the child page objects,
the BlogPage objects, so that it works as an index page
RoutablePageMixin is used to allow for a custom sub-URL for the tag views
defined above.
"""
subtitle = models.CharField("Title in Te reo Māori", max_length=254, blank=True, null=True)
introduction = models.TextField(
help_text='Text to describe the page',
blank=True)
image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
)
content_panels = Page.content_panels + [
FieldPanel('subtitle', classname="full"),
FieldPanel('introduction', classname="full"),
ImageChooserPanel('image'),
]
search_fields = Page.search_fields + [
index.SearchField('introduction'),
]
# Speficies that only BlogPage objects can live under this index page
subpage_types = ['BlogPage']
parent_page_types = ['home.HomePage']
# # Defines a method to access the children of the page (e.g. BlogPage
# # objects). On the demo site we use this on the HomePage
# def children(self):
# return self.get_children().specific().live()
# Overrides the context to list all child items, that are live, by the
# date that they were published
# http://docs.wagtail.io/en/latest/getting_started/tutorial.html#overriding-context
def get_context(self, request):
context = super(BlogIndexPage, self).get_context(request)
context['posts'] = self.paginate(request, self.get_posts())
return context
def paginate(self, request, posts, *args):
page = request.GET.get('page')
paginator = Paginator(posts, 6)
try:
pages = paginator.page(page)
except PageNotAnInteger:
pages = paginator.page(1)
except EmptyPage:
pages = paginator.page(paginator.num_pages)
return pages
# This defines a Custom view that utilizes Tags. This view will return all
# related BlogPages for a given Tag or redirect back to the BlogIndexPage.
# More information on RoutablePages is at
# http://docs.wagtail.io/en/latest/reference/contrib/routablepage.html
@route(r'^tags/$', name='tag_archive')
@route(r'^tags/([\w-]+)/$', name='tag_archive')
def tag_archive(self, request, tag=None):
try:
tag = Tag.objects.get(slug=tag)
except Tag.DoesNotExist:
return redirect(self.url)
posts = self.get_posts(tag=tag)
context = {
'page': self,
'tag': tag,
'posts': self.paginate(request, posts)
}
return render(request, 'news/blog_index_page.html', context)
# This defines a Custom view that utilizes Tags. This view will return all
# related BlogPages for a given Tag or redirect back to the BlogIndexPage.
# More information on RoutablePages is at
# http://docs.wagtail.io/en/latest/reference/contrib/routablepage.html
@route(r'^suburbs/$', name='suburb_archive')
@route(r'^suburbs/([\w-]+)/$', name='suburb_archive')
def suburb_archive(self, request, tag=None):
try:
suburb = Suburb.objects.get(slug=tag)
except suburb.DoesNotExist:
if suburb:
msg = 'There are no blog posts with the "{}" suburb'.format(suburb)
messages.add_message(request, messages.INFO, msg)
return redirect(self.url)
posts = self.get_posts(suburb=suburb)
context = {
'page': self,
'suburb': suburb,
'posts': self.paginate(request, posts)
}
return render(request, 'news/blog_index_page.html', context)
def serve_preview(self, request, mode_name):
# Needed for previews to work
return self.serve(request)
# Returns the child BlogPage objects for this BlogPageIndex.
# If a tag is used then it will filter the posts by tag.
# Same with suburb if no tag present
def get_posts(self, tag=None, suburb=None):
posts = BlogPage.objects.live().descendant_of(self).order_by('-date_published')
if tag:
posts = posts.filter(tags=tag)
elif suburb:
posts = posts.filter(suburb=suburb)
return posts
# Returns the list of Tags for all child posts of this BlogPage.
def get_child_tags(self):
tags = []
for post in self.get_posts():
# Not tags.append() because we don't want a list of lists
tags += post.get_tags
tags = sorted(set(tags))
return tags
def get_child_suburbs(self):
return Suburb.objects.exclude(page_set__isnull=True)
``` |
{
"source": "JohnKendrick/PyMieScatt",
"score": 2
} |
#### File: PyMieScatt/PyMieScatt/CoreShell.py
```python
import numpy as np
from scipy.special import jv, yv
from PyMieScatt.Mie import MieQ, MiePiTau
import warnings
def MieQCoreShell(mCore,mShell,wavelength,dCore,dShell,nMedium=1.0,asDict=False, asCrossSection=False):
# http://pymiescatt.readthedocs.io/en/latest/forwardCS.html#MieQCoreShell
if(nMedium != 1.0):
warnings.warn("Note: the use of nMedium was incorporated naievely and the result should be carefully scrutinized.")
xCore = np.pi*dCore/wavelength
xShell = np.pi*dShell/wavelength
if xCore==xShell:
return MieQ(mCore,wavelength,dShell,nMedium=nMedium, asDict=asDict, asCrossSection=asCrossSection)
elif xCore==0:
return MieQ(mShell,wavelength,dShell,nMedium=nMedium, asDict=asDict, asCrossSection=asCrossSection)
elif mCore==mShell:
return MieQ(mCore,wavelength,dShell,nMedium=nMedium, asDict=asDict, asCrossSection=asCrossSection)
elif xCore>0:
nMedium = nMedium.real
# wavelength /= nMedium # The choice was either to redefine the wavelength, or the xCore & xShell, I left just for indication, your call.
mCore /= nMedium
mShell /= nMedium
xCore = np.pi*dCore * nMedium/wavelength # Not ideal to redefine xCore and xShell, but it seems need in order to keep MieQ conditions in place
xShell = np.pi*dShell * nMedium/wavelength
nmax = np.round(2+xShell+4*(xShell**(1/3)))
n = np.arange(1,nmax+1)
n1 = 2*n+1
n2 = n*(n+2)/(n+1)
n3 = n1/(n*(n+1))
xShell2 = xShell**2
an, bn = CoreShell_ab(mCore,mShell,xCore,xShell)
qext = (2/xShell2)*np.sum(n1*(an.real+bn.real))
qsca = (2/xShell2)*np.sum(n1*(an.real**2+an.imag**2+bn.real**2+bn.imag**2))
qabs = qext-qsca
g1 = [an.real[1:int(nmax)],an.imag[1:int(nmax)],bn.real[1:int(nmax)],bn.imag[1:int(nmax)]]
g1 = [np.append(x, 0.0) for x in g1]
g = (4/(qsca*xShell2))*np.sum((n2*(an.real*g1[0]+an.imag*g1[1]+bn.real*g1[2]+bn.imag*g1[3]))+(n3*(an.real*bn.real+an.imag*bn.imag)))
qpr = qext-qsca*g
qback = (1/xShell2)*(np.abs(np.sum(n1*((-1)**n)*(an-bn)))**2)
qratio = qback/qsca
if asCrossSection:
css = np.pi*(dShell/2)**2
cext = css*qext
csca = css*qsca
cabs = css*qabs
cpr = css*qpr
cback = css*qback
cratio = qratio
if asDict:
return dict(Cext=cext,Csca=csca,Cabs=cabs,g=g,Cpr=cpr,Cback=cback,Cratio=cratio)
else:
return cext, csca, cabs, g, cpr, cback, cratio
else:
if asDict:
return dict(Qext=qext,Qsca=qsca,Qabs=qabs,g=g,Qpr=qpr,Qback=qback,Qratio=qratio)
else:
return qext, qsca, qabs, g, qpr, qback, qratio
def CoreShell_ab(mCore,mShell,xCore,xShell):
# http://pymiescatt.readthedocs.io/en/latest/forwardCS.html#CoreShell_ab
m = mShell/mCore
u = mCore*xCore
v = mShell*xCore
w = mShell*xShell
mx = max(np.abs(mCore*xShell),np.abs(mShell*xShell))
nmax = np.round(2+xShell+4*(xShell**(1/3)))
nmx = np.round(max(nmax,mx)+16)
n = np.arange(1,nmax+1)
nu = n+0.5
sv = np.sqrt(0.5*np.pi*v)
sw = np.sqrt(0.5*np.pi*w)
sy = np.sqrt(0.5*np.pi*xShell)
pv = sv*jv(nu,v)
pw = sw*jv(nu,w)
py = sy*jv(nu,xShell)
chv = -sv*yv(nu,v)
chw = -sw*yv(nu,w)
chy = -sy*yv(nu,xShell)
p1y = np.append([np.sin(xShell)], [py[0:int(nmax)-1]])
ch1y = np.append([np.cos(xShell)], [chy[0:int(nmax)-1]])
gsy = py-(0+1.0j)*chy
gs1y = p1y-(0+1.0j)*ch1y
# B&H Equation 4.89
Dnu = np.zeros((int(nmx)),dtype=complex)
Dnv = np.zeros((int(nmx)),dtype=complex)
Dnw = np.zeros((int(nmx)),dtype=complex)
for i in range(int(nmx)-1,1,-1):
Dnu[i-1] = i/u-1/(Dnu[i]+i/u)
Dnv[i-1] = i/v-1/(Dnv[i]+i/v)
Dnw[i-1] = i/w-1/(Dnw[i]+i/w)
Du = Dnu[1:int(nmax)+1]
Dv = Dnv[1:int(nmax)+1]
Dw = Dnw[1:int(nmax)+1]
uu = m*Du-Dv
vv = Du/m-Dv
fv = pv/chv
dns = ((uu*fv/pw)/(uu*(pw-chw*fv)+(pw/pv)/chv))+Dw
gns = ((vv*fv/pw)/(vv*(pw-chw*fv)+(pw/pv)/chv))+Dw
a1 = dns/mShell+n/xShell
b1 = mShell*gns+n/xShell
an = (py*a1-p1y)/(gsy*a1-gs1y)
bn = (py*b1-p1y)/(gsy*b1-gs1y)
return an, bn
def CoreShellScatteringFunction(mCore,mShell,wavelength,dCore,dShell,nMedium=1.0, minAngle=0, maxAngle=180, angularResolution=0.5, normed=False):
# http://pymiescatt.readthedocs.io/en/latest/forwardCS.html#CoreShellScatteringFunction
if(nMedium != 1.0):
warnings.warn("Note: the use of nMedium was incorporated naievely and the result should be carefully scrutinized.")
mCore /= nMedium
mShell /= nMedium
wavelength /= nMedium
xCore = np.pi*dCore/wavelength
xShell = np.pi*dShell/wavelength
theta = np.linspace(minAngle,maxAngle,int((maxAngle-minAngle)/angularResolution))*np.pi/180
thetaSteps = len(theta)
SL = np.zeros(thetaSteps)
SR = np.zeros(thetaSteps)
SU = np.zeros(thetaSteps)
for j in range(thetaSteps):
u = np.cos(theta[j])
S1,S2 = CoreShellS1S2(mCore,mShell,xCore,xShell,u)
SL[j] = (np.sum((np.conjugate(S1)*S1))).real
SR[j] = (np.sum((np.conjugate(S2)*S2))).real
SU[j] = (SR[j]+SL[j])/2
if normed:
SL /= np.max(SL)
SR /= np.max(SR)
SU /= np.max(SU)
return theta,SL,SR,SU
def CoreShellS1S2(mCore,mShell,xCore,xShell,mu):
# http://pymiescatt.readthedocs.io/en/latest/forwardCS.html#CoreShellS1S2
nmax = np.round(2+xShell+4*(xShell**(1/3)))
an,bn = CoreShell_ab(mCore,mShell,xCore,xShell)
pin,taun = MiePiTau(mu,nmax)
n = np.arange(1,int(nmax)+1)
n2 = (2*n+1)/(n*(n+1))
pin *= n2
taun *= n2
S1=np.sum(an*np.conjugate(pin))+np.sum(bn*np.conjugate(taun))
S2=np.sum(an*np.conjugate(taun))+np.sum(bn*np.conjugate(pin))
return S1,S2
def CoreShellMatrixElements(mCore,mShell,xCore,xShell,mu):
# http://pymiescatt.readthedocs.io/en/latest/forwardCS.html#CoreShellMatrixElements
S1,S2 = CoreShellS1S2(mCore,mShell,xCore,xShell,mu)
S11 = 0.5*(np.abs(S2)**2+np.abs(S1)**2)
S12 = 0.5*(np.abs(S2)**2-np.abs(S1)**2)
S33 = 0.5*(np.conjugate(S2)*S1+S2*np.conjugate(S1))
S34 = 0.5j*(S1*np.conjugate(S2)-S2*np.conjugate(S1))
return S11, S12, S33, S34
``` |
{
"source": "jo-hnkennedy/kerr",
"score": 3
} |
#### File: jo-hnkennedy/kerr/k.py
```python
import pandas
import sqlite3
import re
import sys
from urllib2 import urlopen
import xml.etree.ElementTree as et
#converts minutes string ("32:30") to double (32.5)
def getMinutes(minuteString):
if (re.search('Did Not Play', minuteString) or minuteString is None or re.search('Suspended', minuteString)):
return 0
else:
split = re.split('[:]', minuteString)
num = int(split[0]) + 0.0
denom = int(split[1]) + 0.0
return num + (denom/60)
#returns the odds for games between team1 and team2
#assumes team1 is home team
def getOdds(team1, team2):
spread = 0
feed = urlopen("http://xml.pinnaclesports.com/pinnacleFeed.aspx?sportType=Basketball&sportSubType=NBA")
tree = et.fromstring(feed.read())
events = tree.find('events')
#looping through games
for event in events:
participants = event.find('participants')
#looping through game participants, looking for name matches
for participant in participants:
#if either team is found, assuming we found the game and getting odds
if (re.match(team1, participant.find('participant_name').text)):
#getting home team spread
periods = event.find('periods')
spread = periods[0].find('spread').find('spread_home').text
return spread
db = sqlite3.connect("knba")
k = db.cursor()
#FIRST TEAM IS HOME TEAM
teams = (sys.argv[1], sys.argv[2])
print "Team1: ", teams[0]
spread = getOdds(teams[0], teams[1])
print "Calculating scores for", teams[0], "and", teams[1]
print ""
pointsArray = []
for team in teams:
print team
if (team == teams[0]):
otherTeam = teams[1]
else:
otherTeam = teams[0]
totalPoints = 0
#getting roster
k.execute("SELECT name FROM rosters WHERE team LIKE ?;", (team,))
players = k.fetchall()
#calculating average points for each player
for player in players:
#getting points where player's opponent was other team
k.execute("SELECT points, minutes FROM games_box_simple WHERE playerName LIKE ? AND (homeTeam LIKE ? OR visitorTeam LIKE ?);", (player[0], otherTeam, otherTeam));
points_tuple = k.fetchall()
points = []
minutes = []
#converting tuple to list
for tuple in points_tuple:
if (tuple[0] is None):
points.append(0)
else:
points.append(tuple[0])
minutes.append(getMinutes(tuple[1]))
#getting points per minute against that team since 2012-2013
pointSum = 0
minuteSum = 0
for point in points:
pointSum = pointSum + point
for minute in minutes:
minuteSum = minuteSum + minute
if (minuteSum == 0):
pointsPerMinute = 0
else:
pointsPerMinute = (pointSum + 0.0) / minuteSum
#averaging minutes of most recent 15 games
k.execute("SELECT minutes FROM games_box_simple WHERE playerName LIKE ? ORDER BY ROWID DESC", player);
recentMinutes = 0.0
i = 0
while (i < 15):
minutes_tuple = k.fetchone()
lastGameMinutes = getMinutes(minutes_tuple[0])
recentMinutes = recentMinutes + lastGameMinutes
i = i + 1
recentMinutes = recentMinutes / 15.0
totalPoints = (pointsPerMinute * recentMinutes) + totalPoints
print player[0], " scores ", (pointsPerMinute * recentMinutes), " points"
print ""
pointsArray.append(totalPoints)
print ''
print teams[0], " - ", pointsArray[0]
print teams[1], " - ", pointsArray[1]
print "spread: ", spread
print ""
spread = float(spread)
if (pointsArray[0] - spread > pointsArray[1]):
print "Take the under"
elif (pointsArray[1] - spread < pointsArray[1]):
print "Take the over"
else:
print "This shouldn't happen"
``` |
{
"source": "johnkennyuk/supervisor",
"score": 2
} |
#### File: supervisor/tests/conftest.py
```python
from functools import partial
from inspect import unwrap
from pathlib import Path
import re
from unittest.mock import AsyncMock, MagicMock, PropertyMock, patch
from uuid import uuid4
from aiohttp import web
from awesomeversion import AwesomeVersion
import pytest
from supervisor.api import RestAPI
from supervisor.bootstrap import initialize_coresys
from supervisor.const import REQUEST_FROM
from supervisor.coresys import CoreSys
from supervisor.dbus.network import NetworkManager
from supervisor.docker import DockerAPI
from supervisor.store.addon import AddonStore
from supervisor.store.repository import Repository
from supervisor.utils.gdbus import DBus
from tests.common import exists_fixture, load_fixture, load_json_fixture
# pylint: disable=redefined-outer-name, protected-access
async def mock_async_return_true() -> bool:
"""Mock methods to return True."""
return True
@pytest.fixture
def docker() -> DockerAPI:
"""Mock DockerAPI."""
images = [MagicMock(tags=["ghcr.io/home-assistant/amd64-hassio-supervisor:latest"])]
with patch("docker.DockerClient", return_value=MagicMock()), patch(
"supervisor.docker.DockerAPI.images", return_value=MagicMock()
), patch("supervisor.docker.DockerAPI.containers", return_value=MagicMock()), patch(
"supervisor.docker.DockerAPI.api", return_value=MagicMock()
), patch(
"supervisor.docker.DockerAPI.images.list", return_value=images
), patch(
"supervisor.docker.DockerAPI.info",
return_value=MagicMock(),
), patch(
"supervisor.docker.DockerConfig",
return_value=MagicMock(),
):
docker_obj = DockerAPI()
docker_obj.info.logging = "journald"
docker_obj.info.storage = "overlay2"
docker_obj.info.version = "1.0.0"
docker_obj.config.registries = {}
yield docker_obj
@pytest.fixture
def dbus() -> DBus:
"""Mock DBUS."""
dbus_commands = []
async def mock_get_properties(dbus_obj, interface):
latest = dbus_obj.object_path.split("/")[-1]
fixture = interface.replace(".", "_")
if latest.isnumeric():
fixture = f"{fixture}_{latest}"
return load_json_fixture(f"{fixture}.json")
async def mock_wait_signal(_, __):
pass
async def mock_send(_, command, silent=False):
if silent:
return ""
fixture = command[6].replace("/", "_")[1:]
if command[1] == "introspect":
filetype = "xml"
if not exists_fixture(f"{fixture}.{filetype}"):
fixture = re.sub(r"_[0-9]+$", "", fixture)
# special case
if exists_fixture(f"{fixture}_~.{filetype}"):
fixture = f"{fixture}_~"
else:
fixture = f"{fixture}-{command[10].split('.')[-1]}"
filetype = "fixture"
dbus_commands.append(fixture)
return load_fixture(f"{fixture}.{filetype}")
with patch("supervisor.utils.gdbus.DBus._send", new=mock_send), patch(
"supervisor.utils.gdbus.DBus.wait_signal", new=mock_wait_signal
), patch(
"supervisor.dbus.interface.DBusInterface.is_connected",
return_value=True,
), patch(
"supervisor.utils.gdbus.DBus.get_properties", new=mock_get_properties
):
yield dbus_commands
@pytest.fixture
async def network_manager(dbus) -> NetworkManager:
"""Mock NetworkManager."""
nm_obj = NetworkManager()
nm_obj.dbus = dbus
# Init
await nm_obj.connect()
await nm_obj.update()
yield nm_obj
@pytest.fixture
async def coresys(loop, docker, network_manager, aiohttp_client) -> CoreSys:
"""Create a CoreSys Mock."""
with patch("supervisor.bootstrap.initialize_system_data"), patch(
"supervisor.bootstrap.setup_diagnostics"
):
coresys_obj = await initialize_coresys()
# Mock save json
coresys_obj._ingress.save_data = MagicMock()
coresys_obj._auth.save_data = MagicMock()
coresys_obj._updater.save_data = MagicMock()
coresys_obj._config.save_data = MagicMock()
coresys_obj._jobs.save_data = MagicMock()
coresys_obj._resolution.save_data = MagicMock()
# Mock test client
coresys_obj.arch._default_arch = "amd64"
coresys_obj._machine = "qemux86-64"
coresys_obj._machine_id = uuid4()
# Mock host communication
coresys_obj._dbus._network = network_manager
# Mock docker
coresys_obj._docker = docker
# Set internet state
coresys_obj.supervisor._connectivity = True
coresys_obj.host.network._connectivity = True
# WebSocket
coresys_obj.homeassistant.api.check_api_state = mock_async_return_true
coresys_obj.homeassistant._websocket._client = AsyncMock(
ha_version=AwesomeVersion("2021.2.4")
)
# Remove rate limiting decorator from fetch_data
coresys_obj.updater.fetch_data = partial(
unwrap(coresys_obj.updater.fetch_data), coresys_obj.updater
)
yield coresys_obj
await coresys_obj.websession.close()
@pytest.fixture
def sys_machine():
"""Mock sys_machine."""
with patch("supervisor.coresys.CoreSys.machine", new_callable=PropertyMock) as mock:
yield mock
@pytest.fixture
def sys_supervisor():
"""Mock sys_supervisor."""
with patch(
"supervisor.coresys.CoreSys.supervisor", new_callable=PropertyMock
) as mock:
mock.return_value = MagicMock()
yield MagicMock
@pytest.fixture
async def api_client(aiohttp_client, coresys: CoreSys):
"""Fixture for RestAPI client."""
@web.middleware
async def _security_middleware(request: web.Request, handler: web.RequestHandler):
"""Make request are from Core."""
request[REQUEST_FROM] = coresys.homeassistant
return await handler(request)
api = RestAPI(coresys)
api.webapp = web.Application(middlewares=[_security_middleware])
api.start = AsyncMock()
await api.load()
yield await aiohttp_client(api.webapp)
@pytest.fixture
def store_manager(coresys: CoreSys):
"""Fixture for the store manager."""
sm_obj = coresys.store
with patch("supervisor.store.data.StoreData.update", return_value=MagicMock()):
yield sm_obj
@pytest.fixture
def run_dir(tmp_path):
"""Fixture to inject hassio env."""
with patch("supervisor.core.RUN_SUPERVISOR_STATE") as mock_run:
tmp_state = Path(tmp_path, "supervisor")
mock_run.write_text = tmp_state.write_text
yield tmp_state
@pytest.fixture
def store_addon(coresys: CoreSys, tmp_path):
"""Store add-on fixture."""
addon_obj = AddonStore(coresys, "test_store_addon")
coresys.addons.store[addon_obj.slug] = addon_obj
coresys.store.data.addons[addon_obj.slug] = load_json_fixture("add-on.json")
yield addon_obj
@pytest.fixture
def repository(coresys: CoreSys):
"""Repository fixture."""
repository_obj = Repository(
coresys, "https://github.com/awesome-developer/awesome-repo"
)
coresys.store.repositories[repository_obj.slug] = repository_obj
yield repository_obj
``` |
{
"source": "johnkerl/sack",
"score": 3
} |
#### File: johnkerl/sack/cl2_gm.py
```python
import cl2_tm
def get_elements_str(params_string):
[n, sqsign] = cl2_tm.params_from_string(params_string)
two_n = 1 << n
elts = []
for bits in range(0, two_n):
for sign in [1, -1]:
#for sign in [1, -1]:
#for bits in range(0, two_n):
elts.append(cl2_tm.cl2_t(sign, bits, n, sqsign))
return elts
```
#### File: johnkerl/sack/f2polymod_tm.py
```python
import sys
import re
import copy
import f2poly_tm
# xxx this is in mid-port from C++.
## ================================================================
#def f2polymod_from_string(string):
# return f2polymod_t(int(string, 16))
#
## ================================================================
#class f2polymod_t:
#
# #def __init__(self, resbits, modbits):
# # self.modbits = modbits
# # self.resbits = f2poly_tm.imod(resbits, modbits)
#
# # Both arguments should be of type f2poly_t.
# def __init__(self, residue, modulus):
# self.modulus = modulus
# self.residue = residue % modulus
#
# def __add__(a,b):
# c = f2polymod_t(a.residue + b.residue, a.modulus)
# return c
# def __sub__(a,b):
# c = f2polymod_t(a.residue - b.residue, a.modulus)
# return c
# def __mul__(a,b):
# c = f2polymod_t(a.residue * b.residue, a.modulus)
# return c
#
# # xxx fix me
# def recip(a):
# pass
#
##int f2polymod_t::recip(f2polymod_t & rinv)
## f2poly_t g, a, b;
## g = this->residue.ext_gcd(this->modulus, a, b);
##
## if (g.find_degree() != 0): # Error check
## //std::cerr << "f2polymod recip: zero or zero divisor.";
## return 0
## else:
## rinv = f2polymod_t(a, this->modulus)
## return 1
#
# def __div__(a,b):
# return a * b.recip()
#
# # xxx fix me
# def __pow__(a, e):
# ap = a.residue
# one = f2poly_t(1)
# rv = one
#
# #xxx types
# if (e == 0):
# if (a.residue.bits == 0):
# print >> sys.stderr, "f2polymod_t.exp: 0^0 undefined."
# sys.exit(1)
# return one
# elif (e < 0):
# if (a.residue.bits == 0):
# print >> sys.stderr, "f2polymod_t.exp: division by zero."
# sys.exit(1)
#
# #xxx
# f2polymod_t inv = one/ *this
# xp = inv.residue
# e = -e
#
# while (e != 0):
# if e & 1:
# rv.residue = (rv.residue * xp) % this->modulus
# e >>= 1
# xp = (xp * xp) % this->modulus
# return rv
#
#
# def __eq__(a,b):
# return a.bits == b.bits
# def __ne__(a,b):
# return not (a == b)
# def __neg__(a):
# return a
#
# def scan(self, string):
# self.bits = int(string, 16)
#
# def __str__(self):
# # xxx temp
# return self.residue.__str__()
# #return "%x" % self.bits
# def __repr__(self):
# return self.__str__()
#
#
##std::ostream & operator<<(std::ostream & os, const f2polymod_t & a)
## a.residue.dprint(os, a.modulus.find_degree() - 1)
##
##int f2polymod_t::from_string(char * string, f2poly_t m)
## f2poly_t r;
## std::istringstream iss(string, std::ios_base::in);
## iss >> r;
## if (iss.fail()) {
## return 0;
## }
## else {
## *this = f2polymod_t(r, m);
## return 1;
## }
##
##void f2polymod_t::check_moduli(f2polymod_t & that) const
## if (this->modulus != that.modulus) {
## std::cerr
## << "f2polymod_t: mixed moduli "
## << this->modulus
## << ", "
## << that.modulus
## << ".";
## std::cerr << std::endl;
## exit(1);
## }
```
#### File: johnkerl/sack/modmul_tm.py
```python
import re
import copy
import sackint
class modmul_t:
def __init__(self, resarray, modarray):
self.check_lengths(len(resarray), len(modarray), "residues", "moduli")
self.check_moduli(modarray)
self.residues = copy.copy(resarray)
self.moduli = copy.copy(modarray)
for i in range(0, len(modarray)):
self.residues[i] %= self.moduli[i]
def __eq__(a,b):
if (len(a.residues) != len(b.residues)):
return 0
n = len(a.residues)
for i in range(0, n):
if (a.residues[i] != b.residues[i]):
return 0
return 1
def __ne__(a,b):
return not (a == b)
def __mul__(a,b):
a.check_lengths(len(a.moduli), len(b.moduli), "moduli", "moduli")
a.check_moduli_pair(a.moduli, b.moduli)
c = modmul_t(a.residues, a.moduli)
for i in range(0, len(a.moduli)):
c.residues[i] = (a.residues[i] * b.residues[i]) % c.moduli[i]
return c
def inv(a):
c = modmul_t(a.residues, a.moduli)
for i in range(0, len(a.moduli)):
c.residues[i] = sackint.intmodrecip(a.residues[i], a.moduli[i])
return c
def __add__(a,b):
a.check_lengths(len(a.moduli), len(b.moduli), "moduli", "moduli")
a.check_moduli_pair(a.moduli, b.moduli)
c = modmul_t(a.residues, a.moduli)
for i in range(0, len(a.moduli)):
c.residues[i] = (a.residues[i] + b.residues[i]) % c.moduli[i]
return c
def __sub__(a,b):
a.check_lengths(len(a.moduli), len(b.moduli), "moduli", "moduli")
a.check_moduli_pair(a.moduli, b.moduli)
c = modmul_t(a.residues, a.moduli)
for i in range(0, len(a.moduli)):
c.residues[i] = (a.residues[i] - b.residues[i]) % c.moduli[i]
return c
def neg(a):
c = modmul_t(a.residues, a.moduli)
for i in range(0, len(a.moduli)):
c.residues[i] = (-a.residues[i]) % a.moduli[i]
return c
def scan(self, res_string, mod_string):
res_strings = re.split(',', res_string)
mod_strings = re.split(',', mod_string)
self.check_lengths(len(res_strings), len(mod_strings), res_strings,
mod_strings)
n = len(res_strings)
resarray = list(range(0, n))
modarray = list(range(0, n))
for i in range(0, n):
resarray[i] = int(res_strings[i])
modarray[i] = int(mod_strings[i])
self.__init__(resarray, modarray)
def __str__(self):
string = str(self.residues[0])
for i in range(1, len(self.residues)):
string += "," + str(self.residues[i])
return string
def __repr__(self):
return self.__str__()
def check_length(self, length, desc):
if (length < 1):
print((desc, "length", str(length), "< 1"))
raise RuntimeError
def check_lengths(self, len1, len2, desc1, desc2):
self.check_length(len1, desc1)
self.check_length(len2, desc2)
if (len1 != len2):
print((desc1, "length", str(len1), "!=", desc2, "length", len2))
raise RuntimeError
def check_moduli(self, mods):
for i in range(0, len(mods)):
if (mods[i] < 1):
print(("Modulus", mods[i], "< 1 in", mods))
raise RuntimeError
def check_moduli_pair(self, mods1, mods2):
if (mods1 != mods2):
print(("Mismatched moduli", mods1, ", ", mods2))
raise RuntimeError
def params_from_string(params_string):
if (len(params_string) == 0):
print("Modadd requires non-empty parameter string")
sys.exit(1)
mod_strings = re.split(',', params_string)
n = len(mod_strings)
mod_array = list(range(0, n))
for i in range(0, n):
mod_array[i] = int(mod_strings[i])
return mod_array
def from_string(value_string, params_string):
obj = modmul_t([1], [1])
obj.scan(value_string, params_string)
return obj
# ================================================================
import unittest
if __name__ == '__main__':
class test_cases(unittest.TestCase):
def test___init__(self):
pass # to be implemented
def test___eq__(self):
pass # to be implemented
def test___ne__(self):
pass # to be implemented
def test___mul__(self):
pass # to be implemented
def test_inv(self):
pass # to be implemented
def test___add__(self):
pass # to be implemented
def test___sub__(self):
pass # to be implemented
def test_neg(self):
pass # to be implemented
def test_scan(self):
pass # to be implemented
def test___str__(self):
pass # to be implemented
def test___repr__(self):
pass # to be implemented
def test_check_length(self):
pass # to be implemented
def test_check_lengths(self):
pass # to be implemented
def test_check_moduli(self):
pass # to be implemented
def test_check_moduli_pair(self):
pass # to be implemented
def test_params_from_string(self):
pass # to be implemented
def test_from_string(self):
pass # to be implemented
# ----------------------------------------------------------------
unittest.main()
```
#### File: johnkerl/sack/quatu_tm.py
```python
import re
quatu_mul_table = [
# 1 -1 i -i j -j k -k
[ 0, 1, 2, 3, 4, 5, 6, 7 ], # 1
[ 1, 0, 3, 2, 5, 4, 7, 6 ], # -1
[ 2, 3, 1, 0, 6, 7, 5, 4 ], # i
[ 3, 2, 0, 1, 7, 6, 4, 5 ], # -i
[ 4, 5, 7, 6, 1, 0, 2, 3 ], # j
[ 5, 4, 6, 7, 0, 1, 3, 2 ], # -j
[ 6, 7, 4, 5, 3, 2, 1, 0 ], # k
[ 7, 6, 5, 4, 2, 3, 0, 1 ], # -k
]
quatu_inv_table = [ 0, 1, 3, 2, 5, 4, 7, 6 ]
# 1 -1 i -i j -j k -k
class quatu_t:
#code = 0
def __init__(self, argcode):
self.code = argcode & 7
def __mul__(a,b):
c = quatu_t(quatu_mul_table[a.code][b.code]);
return c
def __eq__(a,b):
return (a.code == b.code)
def __ne__(a,b):
return not (a == b)
def __lt__(a,b):
return (a.code < b.code)
def __le__(a,b):
return (a.code <= b.code)
def __gt__(a,b):
return (a.code > b.code)
def __ge__(a,b):
return (a.code >= b.code)
def inv(a):
c = quatu_t(quatu_inv_table[a.code]);
return c
def scan(self, string):
if (string == "1"):
self.__init__(0)
elif (string == "-1"):
self.__init__(1)
elif (string == "i"):
self.__init__(2)
elif (string == "-i"):
self.__init__(3)
elif (string == "j"):
self.__init__(4)
elif (string == "-j"):
self.__init__(5)
elif (string == "k"):
self.__init__(6)
elif (string == "-k"):
self.__init__(7)
else:
raise IOError
def __str__(self):
if (self.code == 0):
return " 1"
elif (self.code == 1):
return "-1"
elif (self.code == 2):
return " i"
elif (self.code == 3):
return "-i"
elif (self.code == 4):
return " j"
elif (self.code == 5):
return "-j"
elif (self.code == 6):
return " k"
elif (self.code == 7):
return "-k"
else:
raise IOError
def __repr__(self):
return self.__str__()
def params_from_string(params_string):
# xxx check empty
return 0
def from_string(value_string, params_string):
not_used = params_from_string(params_string)
obj = quatu_t(0)
obj.scan(value_string)
return obj
#x = quatu_t(3)
#y = quatu_t(2)
#print x
#print y
#z = x * y
#print z
#z.scan("i")
#print z
#print
#for i in range(0, 8):
# for j in range(0, 8):
# x = quatu_t(i)
# y = quatu_t(j)
# z = x * y
# print x, y, z
# print
# ================================================================
import unittest
if __name__ == '__main__':
class test_cases(unittest.TestCase):
def test___init__(self):
pass # to be implemented
def test___mul__(self):
pass # to be implemented
def test___eq__(self):
pass # to be implemented
def test___ne__(self):
pass # to be implemented
def test___lt__(self):
pass # to be implemented
def test___le__(self):
pass # to be implemented
def test___gt__(self):
pass # to be implemented
def test___ge__(self):
pass # to be implemented
def test_inv(self):
pass # to be implemented
def test_scan(self):
pass # to be implemented
def test___str__(self):
pass # to be implemented
def test___repr__(self):
pass # to be implemented
def test_params_from_string(self):
pass # to be implemented
def test_from_string(self):
pass # to be implemented
# ----------------------------------------------------------------
unittest.main()
```
#### File: johnkerl/sack/sackint.py
```python
def gcd(a, b):
r = 0
if (a == 0):
return b
if (b == 0):
return a
while (1):
r = a % b
if (r == 0):
break
a = b
b = r
if (b < 0):
b = -b
return b
# ----------------------------------------------------------------
# Blankinship's algorithm
def extgcd(a, b):
# Initialize
mprime = 1
n = 1
m = 0
nprime = 0
c = a
d = b
while (1):
# Divide
q = c / d
r = c % d
# Note: now c = qd + r and 0 <= r < d
# Remainder zero?
if (r == 0):
break
# Recycle
c = d
d = r
t = mprime
mprime = m
qm = q * m
m = t - qm
t = nprime
nprime = n
qn = q * n
n = t - qn
return [d, m, n]
# ----------------------------------------------------------------
# This function should be invoked with only one argument.
# The optional argument is a way to have a local static in Python.
# See Lutz & Ascher, 2nd. ed., p 241.
def eulerphi(n, cached_n_and_phi=[2,1]):
if (n == cached_n_and_phi[0]):
# Cache hit
return cached_n_and_phi[1]
phi = 0
for i in range(1, n):
if (gcd(n, i) == 1):
phi += 1
return phi
cached_n_and_phi[0] = n
cached_n_and_phi[1] = phi
return phi
# ----------------------------------------------------------------
# Binary exponentiation
def intexp(x, e):
xp = x
rv = 1
if (e < 0):
print(("intexp: negative exponent", e, "disallowed."))
raise RuntimeError
while (e != 0):
if (e & 1):
rv = (rv * xp)
e = e >> 1
xp = (xp * xp)
return rv
# ----------------------------------------------------------------
# Binary exponentiation
def intmodexp(x, e, m):
xp = x
rv = 1
if (e < 0):
e = -e
x = intmodrecip(x, m)
while (e != 0):
if (e & 1):
rv = (rv * xp) % m
e = e >> 1
xp = (xp * xp) % m
return rv
# ----------------------------------------------------------------
def intmodrecip(x, m):
if (gcd(x, m) != 1):
print(("intmodrecip: impossible inverse", x, "mod", m))
raise RuntimeError
phi = eulerphi(m)
return intmodexp(x, phi-1, m)
# ----------------------------------------------------------------
def factorial(n):
if (n < 0):
print("factorial: negative input disallowed.")
raise RuntimeError
if (n < 2):
return 1
rv = 1
for k in range(2, n+1):
rv *= k
return rv
# ----------------------------------------------------------------
# How to compute P(n) = number of partitions of n. Examples for n = 1 to 5:
#
# 1 2 3 4 5
# 1 1 2 1 3 1 4 1
# 1 1 1 2 2 3 2
# 2 1 1 3 1 1
# 1 1 1 1 2 2 1
# 2 1 1 1
# 1 1 1 1 1
#
# This is a first-rest algorithm. Loop over possible choices k for the first
# number. The rest must sum to n-k. Furthermore, the rest must be descending
# and so each must be less than or equal to k. Thus we naturally have an
# auxiliary function P(n, m) counting partitions of n with each element less
# than or equal to m.
def num_ptnsm(n, m):
if (n < 0): return 0
if (n <= 1): return 1
if (m == 1): return 1
sum = 0
for k in range(1, m+1):
if (n-k >= 0):
sum += num_ptnsm(n-k, k)
return sum
# ----------------------------------------------------------------
def num_ptns(n):
return num_ptnsm(n, n)
# ----------------------------------------------------------------
def ptnsm(n, m):
rv = []
if (n < 0): return 0
if (n == 0): return [[]]
if (n == 1): return [[1]]
if (m == 1): return [[1] * n]
sum = 0
for k in range(1, m+1):
if (n-k >= 0):
tails = ptnsm(n-k, k)
for tail in tails:
rv.append([k] + tail)
return rv
# ----------------------------------------------------------------
def ptns(n):
return ptnsm(n, n)
#for n in range(1, 21):
# a = onum_ptns(n)
# b = num_ptns(n)
# print "%2d %2d %2d" % (n, a, b)
#for n in range(1, 5+1):
# for m in range(1, n+1):
# p = num_ptnsm(n, m)
# print n, m, p
# print
#for n in range(1, 7+1):
# for m in range(1, n+1):
# X = ptnsm(n, m)
# print n, m, len(X), X
# print
```
#### File: johnkerl/sack/v4_tm.py
```python
import re
# Type module for the Klein-4 group ("Viergruppe" in German, hence the
# traditional "V4").
# e a b c
# a e c b
# b c e a
# c b a e
# v4_table = [
# [ 0,1,2,3 ],
# [ 1,0,3,2 ],
# [ 2,3,0,1 ],
# [ 3,2,1,0 ]]
class v4_t:
#code = 0
def __init__(self, argcode):
self.code = argcode & 3
def __eq__(a,b):
return (a.code == b.code)
def __ne__(a,b):
return not (a == b)
def __mul__(a,b):
#c = v4_t(v4_table[a.code][b.code]);
c = v4_t(a.code ^ b.code)
return c
def inv(a):
c = v4_t(a.code)
return c
def scan(self, string):
if (string == "e"):
self.__init__(0)
elif (string == "a"):
self.__init__(1)
elif (string == "b"):
self.__init__(2)
elif (string == "c"):
self.__init__(3)
else:
raise IOError
def __str__(self):
if (self.code == 0):
return "e"
elif (self.code == 1):
return "a"
elif (self.code == 2):
return "b"
elif (self.code == 3):
return "c"
else:
raise IOError
def __repr__(self):
return self.__str__()
def params_from_string(params_string):
return 0
def from_string(value_string, params_string):
not_used = params_from_string(params_string)
obj = v4_t(0)
obj.scan(value_string)
return obj
#x = v4_t(3)
#y = v4_t(2)
#print x
#print y
#z = x * y
#print z
#z.scan("a")
#print z
#print
#for i in range(0, 4):
# for j in range(0, 4):
# x = v4_t(i)
# y = v4_t(j)
# z = x * y
# print x, y, z
# print
# ================================================================
import unittest
if __name__ == '__main__':
class test_cases(unittest.TestCase):
def test___init__(self):
pass # to be implemented
def test___eq__(self):
pass # to be implemented
def test___ne__(self):
pass # to be implemented
def test___mul__(self):
pass # to be implemented
def test_inv(self):
pass # to be implemented
def test_scan(self):
pass # to be implemented
def test___str__(self):
pass # to be implemented
def test___repr__(self):
pass # to be implemented
def test_params_from_string(self):
pass # to be implemented
def test_from_string(self):
pass # to be implemented
# ----------------------------------------------------------------
unittest.main()
``` |
{
"source": "johnkerl/scripts-math",
"score": 3
} |
#### File: scripts-math/pythonlib/arg_eq_match_m.py
```python
import sys
import re
# ----------------------------------------------------------------
# If there is a match, returns [True, {value}]. Else, returns [False, 0].
# E.g. if called with name_eq_value_pair "N=10", name = "N", and value_scanner
# = int, returns [True, 10].
# xxx fix cmt to match reality
def arg_eq_match(name_eq_value_pair, name, value_scanner, value_list):
name_eq = name + '='
len_of_name_eq = len(name_eq)
regexp = '^' + name_eq
if re.match(regexp, name_eq_value_pair):
value_string = name_eq_value_pair[len_of_name_eq:]
try:
value = value_scanner(value_string)
except:
print >> sys.stderr, 'Couldn\'t parse RHS of \'%s\' as %s.' \
% (name_eq_value_pair, value_scanner.__name__)
return False
value_list[0] = value
return True
else:
return False
```
#### File: bin/ARp/ARp_m.py
```python
import sys, random, copy
# AR(p) process:
# X[i] = c + sum_{j=1}^p a[j] X[i-j] + eps[i]
# where eps[i] ~ N(0, sigma_eps^2)
# ... perhaps eps[i] with non-normal distributions also could be used to create
# something called AR(p)?
class ARp_t:
# c_and_as: [c a[1] a[2] ... a[p]]
# X_i_minus: [X[i] X[i-1] X[i-2] ... X[i-p]]
def __init__(self, c_and_as, sigma_eps):
self.p = len(c_and_as) - 1
self.i = 0
if self.p < 1:
print >> sys.stderr, "Need at least c and one a."
print >> sys.stderr, "Pass a1=0 to get no a's."
sys.exit(1)
self.c_and_as = c_and_as
self.sigma_eps = sigma_eps
self.X_i_minus = [0] * (self.p+1)
self.js_1_to_p = range(1, self.p+1)
self.js_pm1_to_0 = range(self.p-1, -1, -1)
# E.g. with p=2:
# X[i-2] = X[i-1]
# X[i-1] = X[i]
# X[i] = c + a[1]*X[i-1] + a[2]*X[i-2]
def update(self):
for j in self.js_pm1_to_0:
self.X_i_minus[j+1] = self.X_i_minus[j]
Xi = self.c_and_as[0] # c
for j in self.js_1_to_p:
Xi += self.c_and_as[j] * self.X_i_minus[j]
Xi += random.gauss(0, self.sigma_eps)
self.X_i_minus[0] = Xi
self.i += 1
def get(self):
return self.X_i_minus[0]
def get_and_update(self):
Xi = self.X_i_minus[0]
self.update()
return Xi
def get_all(self):
#return copy.copy(self.X_i_minus)
return self.X_i_minus
def __str__(self):
string = 'i=%d p=%d (c=%.4f' % (self.i, self.p, self.c_and_as[0])
for j in self.js_1_to_p:
string += ' a[%d]=%.4f' % (j, self.c_and_as[j])
string += ')'
for j in range(0, self.p+1):
string += ' X[%d]=%.4f' % (self.i-j, self.X_i_minus[j])
return string
def __repr__(self):
return __str__(self)
# ----------------------------------------------------------------
def usage():
print >> sys.stderr, "Usage: %s {n}" % (sys.argv[0])
print >> sys.stderr, "Or: %s {n} {nA}" % (sys.argv[0])
print >> sys.stderr, "Or: %s {n} {nA} {c} {a1 a2 ...}" % (sys.argv[0])
sys.exit(1)
# ----------------------------------------------------------------
# N.B.:
# * c=0 a1=1 a2=1 sigma=0: fibo
# * c=1 a1=1 sigma=0: counter
# * c=mu a1=0 sigma=1: IID N(mu, sigma)
n = 20
nA = 10
c_and_as = [0, 0.999]
sigma_eps = 1
argc = len(sys.argv)
if argc == 1:
pass
elif argc == 2:
if sys.argv[1] == '-h' or sys.argv[1] == '--help':
usage()
n = int(sys.argv[1])
elif argc == 3:
n = int(sys.argv[1])
nA = int(sys.argv[2])
elif argc >= 5:
n = int(sys.argv[1])
nA = int(sys.argv[2])
c_and_as = map(float, sys.argv[3:])
else:
usage()
ARps = [0] * nA
for j in range(0, nA):
ARps[j] = ARp_t(c_and_as, sigma_eps)
for i in range(0, n):
for j in range(0, nA):
print ARps[j].get_and_update(),
print
```
#### File: bin/cumdie/try1.py
```python
import sys, random
# ----------------------------------------------------------------
def main():
#print_trajectories()
create_histogram_data()
# ----------------------------------------------------------------
def print_trajectories():
sum_to_reach = 300
num_experiments = 20
if len(sys.argv) == 2:
num_experiments = int(sys.argv[1])
sums = [0] * num_experiments
while True:
# Print
for i in range(0, num_experiments):
print sums[i],
print
# Check for done
all_done = True
for i in range(0, num_experiments):
if sums[i] < sum_to_reach:
all_done = False
break
if all_done:
break
# Update
for i in range(0, num_experiments):
if sums[i] < sum_to_reach:
U = random.randint(1, 6)
sums[i] += U
# ----------------------------------------------------------------
def create_histogram_data():
sum_to_reach = 300
num_rolls_to_reach = 80
num_experiments = 100000
if len(sys.argv) == 2:
num_experiments = int(sys.argv[1])
for i in range(0, num_experiments):
#print stopping_time(sum_to_reach)
print sum_after(num_rolls_to_reach)
# ----------------------------------------------------------------
def stopping_time(sum_to_reach):
sum = 0
num_rolls = 0
while sum < sum_to_reach:
U = random.randint(1, 6)
num_rolls += 1
sum += U
return num_rolls
# ----------------------------------------------------------------
def sum_after(num_rolls_to_reach):
sum = 0
num_rolls = 0
while num_rolls < num_rolls_to_reach:
U = random.randint(1, 6)
num_rolls += 1
sum += U
return sum
# ----------------------------------------------------------------
main()
```
#### File: pythonlib/bin/fake-stars.py
```python
from __future__ import division
import sys, Image
import random
# ----------------------------------------------------------------
# Tweakable parameters
density = 0.01
radius_CDF = [[0.9, 1], [0.98, 2], [1.0, 3]]
spread = 40
# ----------------------------------------------------------------
def main():
# My laptop monitor size:
#width = 1366
#height = 768
# My laptop monitor size times 120%. Then I can shrink the image
# inside GIMP to get a little softer look.
width = 1640
height = 922
argc = len(sys.argv)
if argc == 2:
file_name = sys.argv[1]
elif argc == 4:
width = int(sys.argv[1])
height = int(sys.argv[2])
file_name = sys.argv[3]
else:
usage()
im = Image.new('RGB', (width, height), 0)
num_stars = int(density * width * height)
for k in xrange(0, num_stars):
i = random.randint(0, width-1)
j = random.randint(0, height-1)
putrandstar(im, width, height, i, j)
im.save(file_name)
# ----------------------------------------------------------------
def usage():
print >> sys.stderr, \
"Usage: %s {width in pixels} {height in pixels} {output file name}"
sys.exit(1)
# ----------------------------------------------------------------
# 1. Keep r/g/b close to one another, & vary brightness more.
# 2. Take random radius & bleed over into adjacent pixels
def putrandstar(im, width, height, i, j):
r = random.randint(0, 255)
g = random.randint(max(r-spread,0), min(r+spread,255))
b = random.randint(max(r-spread,0), min(r+spread,255))
radius = random_selection_from_CDF(radius_CDF)
# How to handle various radii: treat the star as a circular disk, with
# brightness uniform across the disk. So, for each pixel in a 1x1, 2x2, or
# 3x3 grid, count the fraction of the pixel that is covered by a circle.
# (Place star centers at pixel centers for convenience.)
# Eyeball estimates:
# o 1x1: middle about 3/4 covered.
# o 2x2: center 100% covered. up/down/left/right about 45% covered.
# corners about 15% covered.
# o 3x3: center 100% covered. up/down/left/right about 90% covered.
# corners about 60% covered.
if radius == 1:
im.putpixel((i,j), scale_rgb(r,g,b,0.75))
elif radius == 2:
clipped_putpixel(im, width, height, i, j, scale_rgb(r,g,b,1.00))
clipped_putpixel(im, width, height, i+1, j, scale_rgb(r,g,b,0.45))
clipped_putpixel(im, width, height, i-1, j, scale_rgb(r,g,b,0.45))
clipped_putpixel(im, width, height, i, j+1, scale_rgb(r,g,b,0.45))
clipped_putpixel(im, width, height, i, j-1, scale_rgb(r,g,b,0.45))
clipped_putpixel(im, width, height, i+1, j+1, scale_rgb(r,g,b,0.15))
clipped_putpixel(im, width, height, i+1, j-1, scale_rgb(r,g,b,0.15))
clipped_putpixel(im, width, height, i-1, j+1, scale_rgb(r,g,b,0.15))
clipped_putpixel(im, width, height, i-1, j-1, scale_rgb(r,g,b,0.15))
elif radius == 3:
clipped_putpixel(im, width, height, i, j, scale_rgb(r,g,b,1.00))
clipped_putpixel(im, width, height, i+1, j, scale_rgb(r,g,b,0.90))
clipped_putpixel(im, width, height, i-1, j, scale_rgb(r,g,b,0.90))
clipped_putpixel(im, width, height, i, j+1, scale_rgb(r,g,b,0.90))
clipped_putpixel(im, width, height, i, j-1, scale_rgb(r,g,b,0.90))
clipped_putpixel(im, width, height, i+1, j+1, scale_rgb(r,g,b,0.40))
clipped_putpixel(im, width, height, i+1, j-1, scale_rgb(r,g,b,0.40))
clipped_putpixel(im, width, height, i-1, j+1, scale_rgb(r,g,b,0.40))
clipped_putpixel(im, width, height, i-1, j-1, scale_rgb(r,g,b,0.40))
# ----------------------------------------------------------------
def random_selection_from_CDF(CDF):
U = random.uniform(0.0, 1.0)
for [C, value] in CDF:
if U < C:
return value
print >> sys.stderr, 'random_selection_from_CDF b0rk'
sys.exit(1)
# ----------------------------------------------------------------
def scale_rgb(r,g,b,frac):
return (int(frac*r), int(frac*g), int(frac*b))
# ----------------------------------------------------------------
def clipped_putpixel(im, width, height, i, j, rgb):
if i < 0:
return
if i >= width:
return
if j < 0:
return
if j >= height:
return
im.putpixel((i,j), rgb)
# ================================================================
main()
```
#### File: bin/sde/sdesolv1.py
```python
from __future__ import division # 1/2 = 0.5, not 0.
import math, random
# ----------------------------------------------------------------
# Writes the motion into the Xt array, which must be predimensioned.
# Xt is dimensioned [nt][dim]?
# or have:
# Xt is dimensioned [dim][nt]?
# dXt = e(X_t, t) dt + f(Xt, t) db_t
def sdesolve1(Xt, e, f, params, dt):
nt = len(Xt)
dim = len(Xt[0])
sqrtdt = math.sqrt(dt)
T = nt * dt
t = 0.0
Bt = 0.0
Xt[0] = X0
for i in xrange(1, dim):
for j in xrange(1, nt):
dB = random.gauss(0.0, sqrtdt)
Xt[i][j] = Xt[i][j-1] + e(Xt, t) * dt + f(Xt, t) * dB
t += dt
```
#### File: pythonlib/bin/test_numderiv.py
```python
from __future__ import division # 1/2 = 0.5, not 0.
from kerlutil import *
from math import *
#from cmath import *
def c(x):
return 1.0
def i(x):
return x
def s(x):
return x**2
def f(x):
return sin(x)
def g(z):
return exp(1j*z)/z
# ----------------------------------------------------------------
def rtest():
#x = frange(2., 4., 20)
x = frange(0, 2*pi, 200)
y = map(f, x)
yp = numderiv(f, x)
n = len(x)
for i in range(0, n):
printf_row([x[i], y[i], yp[i]])
#rtest()
# ----------------------------------------------------------------
from cmath import *
def ctest():
#x = frange(2., 4., 20)
z = ztrange(1., 0, 2*pi, 200)
w = map(g, z)
wp = numderiv(g, z)
n = len(z)
for i in range(0, n):
printf_row(clist_to_rlist([z[i], w[i], wp[i]]))
ctest()
```
#### File: scripts-math/pythonlib/float2hex_m.py
```python
from __future__ import division # 1/2 = 0.5, not 0.
import struct
def float2hex(x):
p = struct.pack ("!f", x)
i = struct.unpack("!I", p)
s = "%08x" % (int(i[0]))
return s
```
#### File: scripts-math/pythonlib/prb_m.py
```python
from __future__ import division # 1/2 = 0.5, not 0.
import sackmat_m
import random
import copy
import sys
import pmtc_tm
# ----------------------------------------------------------------
# Scales the vector so its row sum is 1.
def row_normalize_vector(v):
sum = sackmat_m.vec_contract(v)
if (sum == 0.0):
return
n = len(v)
for i in range(0, n):
v[i] /= sum
# ----------------------------------------------------------------
# Scales each row of the matrix so the row sums are 1.
def row_normalize_matrix(A):
[nr, nc] = A.dims()
for i in range(0, nr):
row_normalize_vector(A[i])
# ----------------------------------------------------------------
# Use the metric induced by the max norm.
def are_close(A, B, tol = 1e-4):
# Assume same dims
n = A.square_dim()
for i in range(0, n):
for j in range(0, n):
d = abs(A[i][j] - B[i][j])
if (d > tol):
return [0, d]
return [1, tol]
# ----------------------------------------------------------------
# Iterates the chain, looking for stability.
def is_stable(A, verbose=0):
Ap = copy.copy(A)
#maxits = 20
maxits = 1000
k = 0
while (k < maxits):
Ao = Ap
Ap = Ap * A
[yn, err] = are_close(Ao, Ap)
if (verbose):
print "-- k = %d err = %.4e" % (k, err)
Ap.printf()
print ""
if (yn == 1):
return [1, Ap]
k += 1
return [0, Ap]
# ----------------------------------------------------------------
# Let q = 1-p.
# p q 0 0 0 0
# 0 p q 0 0 0
# 0 0 p q 0 0
# 0 0 0 p q 0
# 0 0 0 0 p q
# q 0 0 0 0 p
def circpmat1(n, p):
A = sackmat_m.make_zero_matrix(n, n)
for i in range(0, n):
A[i][i] = p
A[i][(i+1)%n] = 1-p
return A
# ----------------------------------------------------------------
# Let q = 1-p.
# 0 q 0 0 0 p
# p 0 q 0 0 0
# 0 p 0 q 0 0
# 0 0 p 0 q 0
# 0 0 0 p 0 q
# q 0 0 0 p 0
def circpmat2(n, p):
A = sackmat_m.make_zero_matrix(n, n)
for i in range(0, n):
A[i][(i-1)%n] = p
A[i][(i+1)%n] = 1-p
return A
# ----------------------------------------------------------------
# Assign matrix elements using uniform distribution on [0, 1), then
# normalize rows.
def randprbmat(n):
A = sackmat_m.make_zero_matrix(n, n)
for i in range(0, n):
for j in range(0, n):
A[i][j] = random.random()
row_normalize_matrix(A)
return A
# ----------------------------------------------------------------
def pqmat(p, q):
return sackmat_m.sackmat([[p, 1-p],[1-q, q]])
# ================================================================
#A = randprbmat(6,6)
#A = pmtc_tm.from_cycles([[1,2],[3,4,5]],5).to_permutation_matrix()
#A = circpmat1(6, .5)
#A = circpmat2(6, .1)
p = 0.9
q = 0.8
if (len(sys.argv) == 3):
p=float(sys.argv[1])
q=float(sys.argv[2])
A=pqmat(p, q)
print "A:"
A.printf()
print ""
#sys.exit(0)
[yn, As] = is_stable(A, 1)
if (yn == 1):
print "stable"
As.printf()
else:
print "unstable"
As.printf()
```
#### File: scripts-math/pythonlib/randc_m.py
```python
from __future__ import division # 1/2 = 0.5, not 0.
import random
import math
from cplxreal_m import *
# ----------------------------------------------------------------
def randc_unit():
phz = random.uniform(0.0, 2.0*math.pi)
return complex(math.cos(phz), math.sin(phz))
# ----------------------------------------------------------------
def randc_mean_sq_1():
s = 0.70710678118654746 # 1.0/math.sqrt(2.0)
re = random.gauss(0.0, s)
im = random.gauss(0.0, s)
return complex(re, im)
# ----------------------------------------------------------------
def randc_normal(mu, sigma_squared):
s = 0.70710678118654746 # 1.0/math.sqrt(2.0)
scale = sigma_squared * s
re = random.gauss(real(mu), scale)
im = random.gauss(imag(mu), scale)
return complex(re, im)
```
#### File: scripts-math/pythonlib/stats_m.py
```python
from __future__ import division # 1/2 = 0.5, not 0.
import math
import copy
import sackmat_m
import normal_m
import sys
# ----------------------------------------------------------------
def make_histogram(farray, lo, hi, num_bins):
bins = [0] * num_bins
# Ensure they are floats.
lo *= 1.0; hi *= 1.0
# This is loop-invariant; hoist it out.
mul = num_bins / (hi - lo)
for element in farray:
if ((element >= lo) and (element < hi)):
idx = int((element-lo) * mul)
bins[idx] += 1
return bins
# ----------------------------------------------------------------
def make_histogram2(xy_pairs, lo_x, hi_x, num_x_bins, lo_y, hi_y, num_y_bins):
bins = sackmat_m.make_zero_matrix(num_x_bins, num_y_bins)
# Ensure they are floats.
lo_x *= 1.0; hi_x *= 1.0; lo_y *= 1.0; hi_y *= 1.0
# These are loop-invariant; hoist them out.
mul_x = num_x_bins / (hi_x - lo_x)
mul_y = num_y_bins / (hi_y - lo_y)
for [x, y] in xy_pairs:
if ((x >= lo_x) and (x < hi_x) and (y >= lo_y) and (y < hi_y)):
x_idx = int((x-lo_x) * mul_x)
y_idx = int((y-lo_y) * mul_y)
bins[x_idx][y_idx] += 1
return bins
# ----------------------------------------------------------------
def make_histogram_labels(lo, hi, num_bins, center=0):
list = []
delta = (hi-lo)/(1.0*num_bins)
for i in range(0, num_bins):
if (center):
label = lo + (i + 0.5) *delta
else:
label = lo + i*delta
list.append(label)
return list
# ----------------------------------------------------------------
def find_min(farray):
lo = farray[0]
for element in farray:
if (element < lo):
lo = element
return lo
# ----------------------------------------------------------------
def find_max(farray):
hi = farray[0]
for element in farray:
if (element > hi):
hi = element
return hi
# ----------------------------------------------------------------
def find_bounds(farray):
lo = farray[0]
hi = farray[0]
for element in farray:
if (element < lo):
lo = element
if (element > hi):
hi = element
return [lo, hi]
# ----------------------------------------------------------------
# Scalar mean
def find_mean(farray):
sum = 0.0
for element in farray:
sum += element
return sum / len(farray)
# ----------------------------------------------------------------
# Scalar standard deviation
def find_stddev(farray, mean):
sum = 0.0
for element in farray:
sum += (element - mean) ** 2
return math.sqrt(sum / (len(farray) - 1))
# ----------------------------------------------------------------
# Standard error, i.e. standard deviation of the sample mean.
def find_stderr(farray, mean):
s = find_stddev(farray, mean)
n = len(farray)
return s / math.sqrt(n)
# ----------------------------------------------------------------
# Population variance
def find_popvaraux(xs, mean):
sum = 0.0
for x in xs:
sum += (x - mean) ** 2
return sum / len(xs)
def find_popvar(xs):
return find_popvaraux(xs, find_mean(xs))
# Sample variance
def find_sampvaraux(xs, mean):
sum = 0.0
for x in xs:
sum += (x - mean) ** 2
return sum / (len(xs) - 1)
def find_sampvar(xs):
return find_sampvaraux(xs, find_mean(xs))
def find_var_of_sample_mean(xs):
return find_sampvar(xs) / len(xs)
# U = 1 - <X^4> / (3 <X^2>^2)
# where X is taken to have zero mean
def find_fourth_order_cumulant(farray):
n = len(farray)
sumx = 0.0
sumx2 = 0.0
sumx4 = 0.0
for x in farray:
sumx += x
meanx = sumx / n
for y in farray:
x = y - meanx
x2 = x*x
x4 = x2*x2
sumx2 += x2
sumx4 += x4
meanx2 = sumx2 / n
meanx4 = sumx4 / n
if meanx2 == 0.0:
return 0.0
else:
return 1.0 - meanx4 / (3 * meanx2 * meanx2);
def find_fourth_order_cumulant_temp(farray):
n = len(farray)
sumx = 0.0
sumx2 = 0.0
sumx3 = 0.0
sumx4 = 0.0
for x in farray:
sumx += x
sumx2 += x*x
sumx3 += x*x*x
sumx4 += x*x*x*x
xbar = sumx / n
xbar2 = xbar * xbar
xbar3 = xbar * xbar2
xbar4 = xbar * xbar3
numer = \
sumx4 \
- 4 * xbar * sumx3 \
+ 6 * xbar2 * sumx2 \
- 4 * xbar3 * sumx \
+ n * xbar4
denom = \
sumx2 * sumx2 \
+ 4 * xbar2 * sumx * sumx \
+ n * n * xbar4 \
+ 2 * n * xbar2 * sumx2 \
- 4 * xbar * sumx * sumx2 \
- 4 * n * xbar3 * sumx
if denom == 0.0:
return 0.0
else:
return 1.0 - (n * numer) / (3 * denom)
# ----------------------------------------------------------------
# Sample covariance of two lists.
def find_sample_covariance(xs, ys):
N = len(xs)
mean_x = find_mean(xs)
mean_y = find_mean(ys)
sum = 0.0
for k in range(0, N):
sum += (xs[k] - mean_x) * (ys[k] - mean_y)
return sum / (N-1.0)
# ----------------------------------------------------------------
# Sample correlation of two lists:
# Corr(X,Y) = Cov(X,Y) / sigma_X sigma_Y.
def find_sample_correlation(xs, ys):
N = len(xs)
cov = find_sample_covariance(xs, ys)
mean_x = find_mean(xs)
mean_y = find_mean(ys)
stddev_x = find_stddev(xs, mean_x)
stddev_y = find_stddev(ys, mean_y)
return cov / (stddev_x * stddev_y)
# ----------------------------------------------------------------
def find_sample_tauint(xs, numk=0):
N = len(xs)
if numk == 0:
numk = N
tauint = [0.0] * numk
autocorr = find_sample_autocorr(xs, numk)
sum = 1.0
tauint[0] = sum
for t in range(1, numk):
sum += 2 * autocorr[t]
tauint[t] = sum
return tauint
# ----------------------------------------------------------------
# Computing running sum of $\hat{\tau}_int(t)$.
# Stop after a flat spot has been found, detected via a turning point.
# Be clever about re-using previous sums.
#
# See my dissertation for details. Or, Berg's Markov Chain Monte Carlo
# Simulations and Their Statistical Analysis.
def find_sample_tauint_flat_spot(xs):
N = len(xs)
minflat = 4
# Within-window sums sumi, sumi2, sumj, and sumj2 contain terms which can
# be re-used across k. The cross-sums sumij are different for each k and
# must be recomputed.
#
# Here, compute the full-window sums.
sumi = 0.0; sumi2 = 0.0
for k in range(0, N):
x = xs[k]
sumi += x
sumi2 += x*x
sumj = sumi; sumj2 = sumi2
tauint = 1.0
nflat = 0
# Go up only to t=N-2. The autocorr estimator for t=N-1 doesn't work (only
# one sample), and if we haven't found a flat spot of tauint by then, we
# aren't going to.
for t in range(1, N-1):
winsz = N - t
winszm1 = winsz - 1
denom = winszm1
if winszm1 == 0:
denom = 1
i = t; j = N-t-1
# Update the within-window sums.
xi = xs[ i ]; xj = xs[ j ]
xim = xs[i-1]; xjp = xs[j+1]
sumi -= xim; sumi2 -= xim*xim
sumj -= xjp; sumj2 -= xjp*xjp
# Compute the cross-sum.
sumij = 0.0
for k in range(0, winsz):
sumij += xs[k] * xs[t+k]
# Compute the autocorrelation term for this t.
meani = sumi / winsz; meanj = sumj / winsz
stdi = math.sqrt((sumi2 - (sumi**2 / winsz)) / denom)
stdj = math.sqrt((sumj2 - (sumj**2 / winsz)) / denom)
autocorr_t = 0.0
if (stdi != 0.0) and (stdj != 0.0):
autocorr_t = (sumij / winsz - (meani*meanj)) / (stdi*stdj)
tauint += 2.0 * autocorr_t
#print '%d %11.7f %11.7f' % (t, autocorr_t, tauint)
if autocorr_t < 0.0:
return tauint
return tauint
# ----------------------------------------------------------------
# E[X_i X_j] - mu^2
# -----------------.
# sigma^2
# More efficient autocorrelation:
#
# The cross-window sums must be computed for each k, because they differ for
# each k. But the within-window sums may be accumulated, as long as we start
# with k at the end and work our way backward to the beginning.
# Example with N = 8:
# [ 0 ] k = 7
# [ o ]
# [ o ]
# [ 7 ]
# [ 0 1 ] k = 6
# [ o o ]
# [ o o ]
# [ 6 7 ]
# ...
# [ 0 1 2 3 4 5 6 ] k = 1
# [ o o o o o o o ]
# [ o o o o o o o ]
# [ 1 2 3 4 5 6 7 ]
# [ 0 1 2 3 4 5 6 7 ] k = 0
# [ o o o o o o o o ]
# [ o o o o o o o o ]
# [ 0 1 2 3 4 5 6 7 ]
# ----------------------------------------------------------------
# Pass numk=0 or numk=N to compute all possible autocorrelations. Pass numk =
# something smaller to compute only the first numk autocorrelations.
def find_sample_autocorr(xs, numk=0):
N = len(xs)
if numk == 0:
numk = N
autocorr = [0.0] * numk
if (numk <= 1) or (numk > N):
print >> sys.stderr, \
"find_sample_autocorr: numk must be > 1 and <= N=%d; got %d." \
% (N, numk)
sys.exit(1)
# Sums over first and second windows
sumi = 0.0; sumi2 = 0.0
sumj = 0.0; sumj2 = 0.0
# k = N-1: accumulate sums, but set autocorr to zero. Sample sizes
# are 1; standard deviations would get a division by zero.
i = 0; j = N-1
for k in range(N-1, numk-1, -1):
xi = xs[i]; xj = xs[j]
sumi += xi; sumi2 += xi**2
sumj += xj; sumj2 += xj**2
i += 1; j -= 1
# k = N-2 down to 1.
for k in range(numk-1, 0, -1):
xi = xs[i]; xj = xs[j]
sumi += xi; sumi2 += xi**2
sumj += xj; sumj2 += xj**2
winsz = N-k; winszm1 = winsz - 1
if k == N-1: # Leave autocorr[N-1] = 0.0 to avoid division by zero.
i += 1; j -= 1
continue
sumij = 0.0
for ell in range(0, winsz):
sumij += xs[ell] * xs[ell+k]
meani = sumi / winsz; meanj = sumj / winsz
stdi = math.sqrt((sumi2 - (sumi**2 / winsz)) / winszm1)
stdj = math.sqrt((sumj2 - (sumj**2 / winsz)) / winszm1)
if (stdi == 0.0) or (stdj == 0.0):
autocorr[k] = 0.0
else:
autocorr[k] = (sumij / winsz - (meani*meanj)) / (stdi*stdj)
i += 1; j -= 1
# k = 0: sumij, sumi2, and sumj2 are all the same.
k = 0
winsz = N; winszm1 = N-1
xi = xs[N-1]
sumi += xi
sumi2 += xi**2
meani = sumi / N
stdi = math.sqrt((sumi2 - (sumi**2 / winsz)) / winszm1)
autocorr[0] = (sumi2 / N - (meani*meani)) / (stdi*stdi)
return autocorr
## ----------------------------------------------------------------
#def find_sample_full_autocorr(xs):
# N = len(xs)
# autocorr = [0.0] * N
#
# # Sums over first and second windows
# sumi = 0.0; sumi2 = 0.0
# sumj = 0.0; sumj2 = 0.0
#
# # k = N-1: accumulate sums, but set autocorr to zero. Sample sizes
# # are 1; standard deviations would get a division by zero.
# i = 0; j = N-1
# k = N-1
# xi = xs[i]; xj = xs[j]
# sumi += xi; sumi2 += xi**2
# sumj += xj; sumj2 += xj**2
# autocorr[k] = 0.0
# i += 1; j -= 1
#
# # k = N-2 down to 1.
# for k in range(N-2, 0, -1):
# xi = xs[i]; xj = xs[j]
# sumi += xi; sumi2 += xi**2
# sumj += xj; sumj2 += xj**2
# winsz = N-k; winszm1 = winsz - 1
#
# sumij = 0.0
# for ell in range(0, winsz):
# sumij += xs[ell] * xs[ell+k]
#
# meani = sumi / winsz; meanj = sumj / winsz
# stdi = math.sqrt((sumi2 - (sumi**2 / winsz)) / winszm1)
# stdj = math.sqrt((sumj2 - (sumj**2 / winsz)) / winszm1)
#
# autocorr[k] = (sumij / winsz - (meani*meanj)) / (stdi*stdj)
# i += 1; j -= 1
#
# # k = 0: sumij, sumi2, and sumj2 are all the same.
# k = 0
# winsz = N; winszm1 = N-1
# xi = xs[N-1]
# sumi += xi
# sumi2 += xi**2
# meani = sumi / N
# stdi = math.sqrt((sumi2 - (sumi**2 / winsz)) / winszm1)
#
# autocorr[0] = (sumi2 / N - (meani*meani)) / (stdi*stdi)
#
# return autocorr
#
## ----------------------------------------------------------------
#def find_sample_short_autocorr(xs, numk):
# N = len(xs)
# autocorr = [0.0] * numk
#
# if (numk <= 1) or (numk >= N):
# print >> sys.stderr, \
# "find_sample_short_autocorr: numk must be > 1 and < N=%d; got %d." \
# % (N, numk)
# sys.exit(1)
#
# # Sums over first and second windows
# sumi = 0.0; sumi2 = 0.0
# sumj = 0.0; sumj2 = 0.0
#
# # k = N-1: accumulate sums, but set autocorr to zero. Sample sizes
# # are 1; standard deviations would get a division by zero.
# i = 0; j = N-1
# for k in range(N-1, numk-1, -1):
# xi = xs[i]; xj = xs[j]
# sumi += xi; sumi2 += xi**2
# sumj += xj; sumj2 += xj**2
# i += 1; j -= 1
#
# # k = N-2 down to 1.
# for k in range(numk-1, 0, -1):
# xi = xs[i]; xj = xs[j]
# sumi += xi; sumi2 += xi**2
# sumj += xj; sumj2 += xj**2
# winsz = N-k; winszm1 = winsz - 1
#
# sumij = 0.0
# for ell in range(0, winsz):
# sumij += xs[ell] * xs[ell+k]
#
# meani = sumi / winsz; meanj = sumj / winsz
# stdi = math.sqrt((sumi2 - (sumi**2 / winsz)) / winszm1)
# stdj = math.sqrt((sumj2 - (sumj**2 / winsz)) / winszm1)
#
# autocorr[k] = (sumij / winsz - (meani*meanj)) / (stdi*stdj)
# i += 1; j -= 1
#
# # k = 0: sumij, sumi2, and sumj2 are all the same.
# k = 0
# winsz = N; winszm1 = N-1
# xi = xs[N-1]
# sumi += xi
# sumi2 += xi**2
# meani = sumi / N
# stdi = math.sqrt((sumi2 - (sumi**2 / winsz)) / winszm1)
#
# autocorr[0] = (sumi2 / N - (meani*meani)) / (stdi*stdi)
#
# return autocorr
# ----------------------------------------------------------------
# E[X_i X_j] - mu^2
# -----------------.
# sigma^2
# Example:
#
# N = 8, k = 2
# 0 1 2 3 4 5
# o o o o o o
# o o o o o o
# 2 3 4 5 6 7
def find_sample_autocorr_old1(xs):
N = len(xs)
autocorr = [0.0] * N
for k in range(0, N-1): # k = j-i
window1 = xs[0:N-k]
window2 = xs[k:N]
mean1 = find_mean(window1)
mean2 = find_mean(window2)
std1 = find_stddev(window1, mean1)
std2 = find_stddev(window2, mean2)
cross_sum = 0.0
for i in range(0, N-k):
cross_sum += xs[i] * xs[i+k]
cross_mean = cross_sum / (N-k)
autocorr[k] = (cross_mean - mean1*mean2) / (std1 * std2)
return autocorr
# ----------------------------------------------------------------
# Vector mean: varray is a list of N vectors, each having n elements. This
# could be done more elegantly using calls to sackmat_m's vecadd but it seems
# tighter performancewise this way.
def find_vector_mean(varray):
N = len(varray)
n = len(varray[0])
sum = [0.0] * n
for vector in varray:
for j in range(0, n):
sum[j] += vector[j]
for j in range(0, n):
sum[j] *= (1.0/N)
return sum
# ----------------------------------------------------------------
# Vector standard deviation: varray is a list of N vectors, each having n
# elements. This could be done more elegantly using calls to sackmat routines
# but it seems tighter performancewise this way.
def find_vector_stddev(varray, vmean):
N = len(varray)
n = len(varray[0])
sum = [0.0] * n
for vector in varray:
for j in range(0, n):
sum[j] += (vector[j] - vmean[j])**2
denom = 1.0 / (N-1)
for j in range(0, n):
sum[j] = math.sqrt(sum[j] * denom)
return sum
# ----------------------------------------------------------------
# find_sample_covariance_matrix
# ----------------------------------------------------------------
#
# There are nx vectors x, each having dim elements.
# The sample covariance matrix Q is dim x dim with elements
# q[i][j] = (1/(nx-1)) sum_{k=1}^nx (x[k][i] - mu[i])(x[k][j] - mu[j])
# where k indexes the set of x's and i,j index elements within the k'th vector.
# I.e. the ij'th entry of Q is the (scalar) sample covariance of xi and xj.
# In particular, the diagonal entries are the variances of the xi.
# Example:
# nx = 3 and dim = 2.
#
# x0 = [ 1 ] x1 = [ 2 ] x2 = [ 3 ]
# [-4 ] [-5 ] [-6 ]
#
# mu = [ 2 ]
# [-5 ]
# q[0][0] = (x[0][0] - mu[0]) * (x[0][0] - mu[0])
# + (x[1][0] - mu[0]) * (x[1][0] - mu[0])
# + (x[2][0] - mu[0]) * (x[2][0] - mu[0])
#
# = ( 1 - 2) * ( 1 - 2)
# + ( 2 - 2) * ( 2 - 2)
# + ( 3 - 2) * ( 3 - 2)
#
# = 1 + 0 + 1 = 2
# q[0][1] = (x[0][0] - mu[0]) * (x[0][1] - mu[1])
# + (x[1][0] - mu[0]) * (x[1][1] - mu[1])
# + (x[2][0] - mu[0]) * (x[2][1] - mu[1])
#
# = ( 1 - 2) * (-4 + 5)
# + ( 2 - 2) * (-5 + 5)
# + ( 3 - 2) * (-6 + 5)
#
# =-1 + 0 - 1 = -2
# q[1][0] = (x[0][1] - mu[1]) * (x[0][0] - mu[0])
# + (x[1][1] - mu[1]) * (x[1][0] - mu[0])
# + (x[2][1] - mu[1]) * (x[2][0] - mu[0])
#
# = (-4 + 5) * ( 1 - 2)
# + (-5 + 5) * ( 2 - 2)
# + (-6 + 5) * ( 3 - 2)
#
# =-1 + 0 - 1 = -2
# q[1][1] = (x[0][1] - mu[1]) * (x[0][1] - mu[1])
# + (x[1][1] - mu[1]) * (x[1][1] - mu[1])
# + (x[2][1] - mu[1]) * (x[2][1] - mu[1])
#
# = (-4 + 5) * (-4 + 5)
# + (-5 + 5) * (-5 + 5)
# + (-6 + 5) * (-6 + 5)
#
# = 1 + 0 + 1 = 2
# Q = [ 2 -2 ] * (1/2)
# [-2 2 ]
#
# = [ 1 -1 ]
# [-1 1 ]
def find_sample_covariance_matrix(xs):
nx = len(xs)
dim = len(xs[0])
mean = find_vector_mean(xs)
Q = sackmat_m.make_zero_matrix(dim, dim)
for i in range(0, dim):
for j in range(0, dim):
sum = 0.0
for k in range(0, nx):
sum += (xs[k][i] - mean[i]) * (xs[k][j] - mean[j])
Q[i][j] = sum / (nx-1.0)
return Q
# ----------------------------------------------------------------
# Corr(X,Y) = Cov(X,Y) / sigma_X sigma_Y
# Corr(Xi,Xj) = Cov(Xi,Xj) / sigma_Xi sigma_Xj
# The ijth entry of the correlation matrix is the correlation of Xi and Xj.
def find_sample_correlation_matrix(xs):
nx = len(xs)
dim = len(xs[0])
cov = find_sample_covariance_matrix(xs)
vmean = find_vector_mean(xs)
vstddev = find_vector_stddev(xs, vmean)
for i in range(0, dim):
for j in range(0, dim):
cov[i][j] /= vstddev[i] * vstddev[j]
return cov
# ----------------------------------------------------------------
# Univariate linear regression
# ----------------------------------------------------------------
# There are N (xi, yi) pairs.
#
# E = sum (yi - m xi - b)^2
#
# DE/Dm = sum 2 (yi - m xi - b) (-xi) = 0
# DE/Db = sum 2 (yi - m xi - b) (-1) = 0
#
# sum (yi - m xi - b) (xi) = 0
# sum (yi - m xi - b) = 0
#
# sum (xi yi - m xi^2 - b xi) = 0
# sum (yi - m xi - b) = 0
#
# m sum(xi^2) + b sum(xi) = sum(xi yi)
# m sum(xi) + b N = sum(yi)
#
# [ sum(xi^2) sum(xi) ] [ m ] = [ sum(xi yi) ]
# [ sum(xi) N ] [ b ] = [ sum(yi) ]
#
# [ m ] = [ sum(xi^2) sum(xi) ]^-1 [ sum(xi yi) ]
# [ b ] [ sum(xi) N ] [ sum(yi) ]
#
# = [ N -sum(xi) ] [ sum(xi yi) ] * 1/D
# [ -sum(xi) sum(xi^2)] [ sum(yi) ]
#
# where
#
# D = N sum(xi^2) - sum(xi)^2.
#
# So
#
# N sum(xi yi) - sum(xi) sum(yi)
# m = --------------------------------
# D
#
# -sum(xi)sum(xi yi) + sum(xi^2) sum(yi)
# b = ----------------------------------------
# D
def linear_regression(xs, ys):
sumxi = 0.0
sumyi = 0.0
sumxiyi = 0.0
sumxi2 = 0.0
N = len(xs)
for i in range(0, N):
x = xs[i]
y = ys[i]
sumxi += x
sumyi += y
sumxiyi += x*y
sumxi2 += x*x
D = N * sumxi2 - sumxi**2
m = (N * sumxiyi - sumxi * sumyi) / D
b = (-sumxi * sumxiyi + sumxi2 * sumyi) / D
# Young 1962, pp. 122-124. Compute sample variance of linear
# approximations, then variances of m and b.
var_z = 0.0
for i in range(0, N):
var_z += (m * xs[i] + b - ys[i])**2
var_z /= N
var_m = (N * var_z) / D
var_b = (var_z * sumxi2) / D
return [m, b, math.sqrt(var_m), math.sqrt(var_b)]
# ----------------------------------------------------------------
def get_corr_coeff(xs, ys):
sumxi = 0.0
sumyi = 0.0
sumxiyi = 0.0
sumxi2 = 0.0
sumyi2 = 0.0
N = len(xs)
for i in range(0, N):
x = xs[i]
y = ys[i]
sumxi += x
sumyi += y
sumxiyi += x*y
sumxi2 += x*x
sumyi2 += y*y
# Young 1962, p. 130.
a = N*sumxiyi - sumxi*sumyi
b = N*sumxi2 - sumxi**2
c = N*sumyi2 - sumyi**2
corr_coeff = a / math.sqrt(b*c)
return corr_coeff
# ----------------------------------------------------------------
def zscore(x):
return normal_m.invnorm(x)
# ----------------------------------------------------------------
def get_normal_quantiles_zs(xarray):
# Assume inputs are sorted?
n = len(xarray)
zarray = []
for i in range(0, n):
z = zscore((1.0*(i+0.5))/n)
zarray.append(z)
return zarray
# ----------------------------------------------------------------
def print_normal_quantiles_zx(xarray):
xs = copy.copy(xarray)
xs.sort()
zs = get_normal_quantiles_zs(xs)
n = len(xs)
for i in range(0, n):
print zs[i], xs[i]
# ----------------------------------------------------------------
# Suppose x_1, ..., x_N are IID samples of a random variable X.
# The kernel density approximator of the PDF of X, with bandwidth parameter
# h, is the function
#
# KDA(x) = (1/Nh) sum K((x-x_i)/h)
#
# where the kernel K may be taken to be the PDF of the standard normal.
def kernel_density_estimator(x, xarray, h):
N = len(xarray)
sum = 0.0
for xi in xarray:
sum += normal_m.normalpdf((x-xi)/h)
return sum/N/h
def plot_kde(xarray, h, xlo, xhi, nx):
dx = (xhi - xlo) / (1.0*nx)
for i in range(0, nx+1):
x = xlo + i*dx
y = kernel_density_estimator(x, xarray, h)
print x, y
``` |
{
"source": "johnkerl/spit",
"score": 2
} |
#### File: johnkerl/spit/spit-client.py
```python
import sys, os, getopt, socket, errno
OURDIR = os.path.dirname(sys.argv[0])
if OURDIR == '':
OURDIR = '.'
execfile(OURDIR + '/spit-classes.py')
DEFAULT_WORKER_ID = "0"
# ================================================================
def usage(ostream):
a = sys.argv[0]
string = \
"""Usage: %s [options] ask
Or: %s [options] show
Or: %s [options] output {DKVP text}
Or: %s [options] stats {DKVP text}
Or: %s [options] mark-done {task_id}
Or: %s [options] mark-failed {task_id}
Options:
-s {server host name} Defaults to %s
-p {server port number} Defaults to %d
-w {worker ID} Defaults to %s
""" % (a,a,a,a,a,a,DEFAULT_SPIT_SERVER_HOST_NAME,DEFAULT_SPIT_SERVER_PORT_NUMBER,DEFAULT_WORKER_ID)
ostream.write(string)
# ----------------------------------------------------------------
def main():
server_host_name = DEFAULT_SPIT_SERVER_HOST_NAME
server_port_number = DEFAULT_SPIT_SERVER_PORT_NUMBER
worker_id = DEFAULT_WORKER_ID
try:
optargs, non_option_args = getopt.getopt(sys.argv[1:], "s:p:w:h", ['help'])
except getopt.GetoptError, err:
print >> sys.stderr, str(err)
usage(sys.stderr)
sys.exit(1);
for opt, arg in optargs:
if opt == '-s':
server_host_name = arg
elif opt == '-p':
server_port_number = int(arg)
elif opt == '-w':
worker_id = arg
elif opt == '-h':
usage(sys.stdout)
sys.exit(0)
elif opt == '--help':
usage(sys.stdout)
sys.exit(0)
else:
print >> sys.stderr, "Unhandled option \"%s\"." % opt
sys.exit(1)
non_option_arg_count = len(non_option_args)
if non_option_arg_count < 1:
usage(sys.stderr)
sys.exit(1)
verb = non_option_args[0]
non_option_args = non_option_args[1:]
non_option_arg_count = len(non_option_args)
client = SpitClient(server_host_name, server_port_number, worker_id)
if verb == 'ask':
if non_option_arg_count != 0:
usage(sys.stderr)
sys.exit(1)
task_id = client.send_wreq()
cprint(task_id)
elif verb == 'show':
if non_option_arg_count != 0:
usage(sys.stderr)
sys.exit(1)
output = client.send_show()
cprint(output)
elif verb == 'output':
client.send_output(",".join(non_option_args))
elif verb == 'stats':
client.send_stats(",".join(non_option_args))
elif verb == 'mark-done':
if non_option_arg_count != 1:
usage(sys.stderr)
sys.exit(1)
task_id = non_option_args[0]
client.send_mark_done(task_id)
elif verb == 'mark-failed':
if non_option_arg_count != 1:
usage(sys.stderr)
sys.exit(1)
task_id = non_option_args[0]
client.send_mark_failed(task_id)
else:
usage(sys.stderr)
sys.exit(1)
# ================================================================
if __name__ == "__main__":
main()
``` |
{
"source": "JohnKHancock/my_portfolio",
"score": 3
} |
#### File: Data Wrangling OpenStreetMap/Code Files/Part_One_Audit.py
```python
import xml.etree.cElementTree as ET
from collections import defaultdict
import pprint as pp
import re
#==============================================================================
# Data Wrangling Final Project
#
#==============================================================================
filename = 'detroit_michigan.osm'
#==============================================================================
#
# PART ONE
#1. Measuring Validity: Does the Data Conform to a Schema:
#==============================================================================
tags = {}
elems = {
"Parents": 0,
"Stand_Alone": 0
}
def count_tags(filename):
for _, elem in ET.iterparse(filename):
if elem.tag in tags:
tags[elem.tag] += 1
else:
tags[elem.tag] = 1
return tags
def count_elements_with_children(filename):
for _, elem in ET.iterparse(filename):
checkList = elem.getchildren()
if len(checkList) > 0:
elems["Parents"] += 1
else:
elems["Stand_Alone"] += 1
return elems
#Uncomment the lines below to get the results of the tags count
#count_tags(filename)
#count_elements_with_children(filename)
#pp.pprint(tags)
#pp.pprint(elems)
#==============================================================================
#2. Measuring Data Accuracy: Perform an Audit of the Data
#==============================================================================
#Code taken from lesson 3 of DataWrangling with MongoDB
street_type_re = re.compile(r'\S+\.?$', re.IGNORECASE)
street_types = defaultdict(int)
def audit_street_type(street_types, street_name):
m = street_type_re.search(street_name)
if m:
street_type = m.group()
street_types[street_type] += 1
def print_sorted_dict(d):
keys = d.keys()
keys = sorted(keys, key=lambda s: s.lower())
for k in keys:
v = d[k]
pp.pprint( "%s: %d" % (k, v) )
def is_street_name(elem):
return (elem.tag == "tag") and (elem.attrib['k'] == "addr:street")
def audit():
for event, elem in ET.iterparse(filename):
if is_street_name(elem):
audit_street_type(street_types, elem.attrib['v'])
print_sorted_dict(street_types)
#Uncomment the lines below to get the results of the audit of the street names
#audit()
#==============================================================================
#3. Measuring Data Accuracy: Perform an Audit of the Data – Non US Entries
#==============================================================================
country_codes = {}
def audit_country():
for event, elem in ET.iterparse(filename):
if elem.tag == "tag":
if elem.attrib['k'] == "addr:country":
if elem.attrib['v'] in country_codes:
country_codes[elem.attrib['v']] += 1
else:
country_codes[elem.attrib['v']] = 1
return country_codes
#Uncomment the lines below to get the results of the audit of countries
#audit_country()
#pp.pprint(countries)
#==============================================================================
# 4. Measuring Data Accuracy: Perform an Audit of the Data – Erroneous Postal Codes
#==============================================================================
postcodes = {}
def audit_postcodes():
for event, elem in ET.iterparse(filename):
if elem.tag == "tag":
if elem.attrib['k'] == "addr:postcode":
if elem.attrib['v'] in postcodes:
postcodes[elem.attrib['v']] += 1
else:
postcodes[elem.attrib['v']] = 1
pp.pprint(postcodes)
#Uncomment the lines below to get the results of the audit of the postal codes
#audit_postcodes()
#pp.pprint(postcodes)
#==============================================================================
#5. Measuring Data Accuracy: Perform an Audit of the Data – Erroneous City Values
#==============================================================================
city = {}
def audit_city():
for event, elem in ET.iterparse(filename):
if elem.tag == "tag":
if elem.attrib['k'] == "addr:city":
if elem.attrib['v'] in city:
city[elem.attrib['v']] += 1
else:
city[elem.attrib['v']] = 1
pp.pprint(city)
#Uncomment the lines below to get the results of the audit of the city data
#audit_city()
#pp.pprint(city)
``` |
{
"source": "john-khgoh/KeywordSubstitutionCipherSolver",
"score": 4
} |
#### File: john-khgoh/KeywordSubstitutionCipherSolver/Solver.py
```python
from datetime import datetime
from itertools import permutations
from collections import OrderedDict
def removeAlphaDup(wordlist): #Remove alphabet duplicates from words in wordlist e.g. hello become helo
newwordlist = []
for word in wordlist:
newwordlist.append(''.join(OrderedDict.fromkeys(word))) #After removing duplicate using OrderDict, append to newwordlist
return newwordlist
#print("test")
def switchKey(word): #Add keyword to front of alpha, without duplicates
#alpha = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
#key = ['<KEY>']
#alpha='abcdefghijklmnopqrstuvwxyz'
removelist = ['-','\'',' ']
key='<KEY>'
newkey=word+key
newkey=newkey.translate(None,''.join(removelist)) #Removes character from removelist
newkey=''.join(OrderedDict.fromkeys(newkey))
return newkey
#print((permutations(key))
def wordFinder(wordlist,phrase): #Find words from the wordlist in the phrase
#list = ['this','is','a','test','not']
sublist = []
for word in wordlist:
if(phrase.find(word)>0): #Check if there's a word from the wordlist, if found returns word from list.
sublist.append(word)
#print(word)
return sublist
#print(sublist)
#a = phrase.find('thee')
#print phrase
def decrypt(phrase):
wordlist = open('English3000.txt','r').read().split('\n') #Read from the word list
outputname = "Output\Output" + str(datetime.now().strftime('-%Y%m%d-%H%M')) + ".txt" #Name format of output file
f = open(outputname,'a') #Write to file in append mode
newwordlist = removeAlphaDup(wordlist)
alpha='abcdefghijklmnopqrstuvwxyz'
for word in newwordlist: #Iterate through newwordlist
sublist = []
newphrase = ''
newkey = switchKey(word)
for char in phrase:
pos = newkey.find(char)
newphrase=newphrase+alpha[pos]
sublist = wordFinder(wordlist,newphrase)
if sublist: #If sublist is not empty, write to file
f.write(str(sublist)+'\n')
f.close()
#print(newphrase)
#print newkey
#print(newwordlist)
#wordFinder(wordlist,phrase)
#switchKey(phrase)
phrase = '<KEY>' #This is the string you want to decipher
decrypt(phrase)
``` |
{
"source": "JohnKhor/tankobon",
"score": 3
} |
#### File: tankobon/parser/01_shonenjump.py
```python
import bs4
import json
import os
import requests
from utils import (get_content, get_soup, create_dir, write_image, save_json)
BASE_URL = 'https://www.shonenjump.com'
RENSAI_URL = BASE_URL + '/j/rensai/'
ARCHIVES_URL = RENSAI_URL + 'archives.html'
LIST_URL = RENSAI_URL + 'list/'
def shonenjump():
# create image directory
IMG_DIR = 'img'
create_dir(IMG_DIR)
rensai_soup = get_soup(get_content(RENSAI_URL))
archives_soup = get_soup(get_content(ARCHIVES_URL))
# store series information: name, abbreviated name and whether it is still ongoing
all_series = []
# create icon directory
ICONS_DIR = os.path.join(IMG_DIR, 'icons')
create_dir(ICONS_DIR)
for soup in [rensai_soup, archives_soup]:
# ongoing series?
ongoing = True if soup is rensai_soup else False
section = soup.find('section', class_='serialSeries')
for li in section.find_all('li'):
# series name in japanese
name_jp = li.div.text if li.div else li.p.text
name_jp = name_jp[1:name_jp.find('』')]
link_tag = li.a
# abbreviated name
abbr = link_tag['href'].rsplit('/', 1)[1][:-5]
# download icon
img_src = link_tag.img['src']
img_url = BASE_URL + img_src
file_path = os.path.join(ICONS_DIR, abbr + '.' + img_src.rsplit('.', 1)[1])
print(f'Downloading {file_path}...')
write_image(img_url, file_path)
# add series
series = { 'name': name_jp, 'abbr': abbr, 'ongoing': ongoing }
all_series.append(series)
# save series information
save_json("data.json", all_series)
for series in all_series:
# create directory for this series
series_dir = os.path.join(IMG_DIR, series['abbr'])
create_dir(series_dir)
current_list_url = LIST_URL + series['abbr'] + '.html'
while current_list_url:
list_soup = get_soup(get_content(current_list_url))
ul = list_soup.find('ul', class_='comicsList')
# ignore series that hasn't release any volume yet
if ul.li is None:
break
for dl in ul.select('li dl'):
# skip current volume if it isn't released yet
if '発売予定' in str(dl.p):
continue
# download cover
img_src = dl.img['src']
img_url = BASE_URL + img_src
file_path = os.path.join(series_dir, img_src.rsplit('/', 1)[1])
print(f'Downloading {file_path}...')
write_image(img_url, file_path)
# get url for next list of covers
next_list_url_tag = list_soup.find('span', class_='current_page').next_sibling.next_sibling
if next_list_url_tag is None:
break
else:
current_list_url = BASE_URL + next_list_url_tag['href']
if __name__ == '__main__':
shonenjump()
``` |
{
"source": "JohnKHW/atos_api",
"score": 3
} |
#### File: AI/nets/net_interface.py
```python
import os
from importlib import import_module
class NetModule(object):
def __init__(self, module_name, net_name, **kwargs):
self.module_name = module_name
self.net_name = net_name
self.m = import_module('nets.' + self.module_name)
def create_model(self, **kwargs):
"""
when use a pretrained model of imagenet, pretrained_model_num_classes is 1000
:param kwargs:
:return:
"""
_model = getattr(self.m, self.net_name)
model = _model(**kwargs)
return model
``` |
{
"source": "johnkim76/photo-album-demo",
"score": 3
} |
#### File: src/db_info/my_sql_info.py
```python
from .db_info import DbInfo
import mysql.connector
class MySqlInfo(DbInfo):
db_type_label = 'MySql'
def __init__(self, host, port, database, user, password):
super().__init__()
self.host = host
self.port = port or 3306
self.database = database
self.user = user
self.password = password
self.connector = mysql.connector
def query_version(self):
return self.query("""SELECT VERSION()""", 0, 0)
def query_uptime(self):
return self.query("""SHOW STATUS LIKE 'Uptime'""", 0, 1) + " seconds"
def query_connection_count(self):
return self.query("""SHOW STATUS LIKE 'Connections'""", 0, 1)
``` |
{
"source": "John-Kimani/Password-Manager",
"score": 3
} |
#### File: John-Kimani/Password-Manager/password_test.py
```python
import unittest #unitest module
from password import Credential #imports Credential class for testing
from password import User #imports User class for testing
class TestUser(unittest.TestCase):
'''
Test class that helps define test cases for the credentials class behaviours
Args:
Testcase class taht helps create test cases for User
'''
# test to check if user object is instatiated properly
def setUp(self):
'''
Set up method to run before each test case.
'''
self.new_profile = User('Vernice','<NAME>')
def test__init(self):
'''
Test case to test if User object is instantiated correctly
'''
self.assertEqual(self.new_profile.userName, 'Vernice')
self.assertEqual(self.new_profile.password, '<NAME>')
#end of class user test
#start of class credential test
class TestCredential(unittest.TestCase):
'''
Test class that helps define test cases for the credentials class behaviours
Args:
Testcase class that helps create test cases for Credential
'''
#test to check if credential object is instantiated properly
def setUp(self):
'''
Set up method to run before each test case.
'''
self.new_account = Credential('Pintrest', 'kimperria', 'Aura-Dev98') #sample login details for a new pintrest account
def test__init(self):
'''
Test case to test if credential object is instantiated correctly
'''
self.assertEqual(self.new_account.accountName, 'Pintrest')
self.assertEqual(self.new_account.accountUsername,'kimperria')
self.assertEqual(self.new_account.accountPassword,'<PASSWORD>')
# save account
def test_save_account(self):
'''
Test case to check account object is saved into the contact list
'''
self.new_account.save_account()
self.assertEqual(len(Credential.credentials_list),1)
def tearDown(self):
'''
cleans up each credential list after instance
'''
Credential.credentials_list = []
# save multiple accounts
def test_save_multiple_accounts(self):
'''
Test case to check if users can save multiple accounts
'''
self.new_account.save_account()
test_account = Credential('Pintrest', 'kimperria', 'Aura-Dev98')
test_account.save_account()
self.assertEqual(len(Credential.credentials_list),2)
#delete account
def test_delete_account(self):
'''
Test case to check if user can delete an account
'''
self.new_account.save_account()
test_account = Credential('Pintrest', 'kimperria', 'Aura-Dev98')
test_account.save_account()
self.new_account.delete_account() #deletes account object
self.assertEqual(len(Credential.credentials_list),1)
# search account by username
def test_find_account_by_username(self):
'''
Test case to check if we can find an account by username and display information
'''
self.new_account.save_account()
test_account = Credential('Pintrest', 'kimperria', 'Aura-Dev98')
test_account.save_account()
found_account = Credential.find_by_accountUsername('kimperria')
self.assertEqual(found_account.accountUsername,test_account.accountUsername)
# check if account exist
def test_account_exist(self):
'''
Test case to check if a user account already exist returns a boolean
'''
self.new_account.save_account()
test_account = Credential('Pintrest', 'kimperria', 'Aura-Dev98')
test_account.save_account()
account_exists = Credential.account_exist('kimperria')
self.assertTrue(account_exists)
# display available accounts
def test_display_all_accounts(self):
'''
Method that returns a list of all saved accounts
'''
self.assertEqual(Credential.display_accounts(),Credential.credentials_list)
#class condition
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "johnkim-det/determined",
"score": 2
} |
#### File: determined/core/_context.py
```python
import logging
from typing import Any, Optional
import appdirs
import determined as det
from determined import core, tensorboard
from determined.common import constants, storage
from determined.common.api import certs
from determined.common.experimental.session import Session, get_max_retries_config
logger = logging.getLogger("determined.core")
class Context:
"""
``core.Context`` is a simple composition of several component APIs, with the following public
members:
- ``.checkpoint``, a :class:`~CheckpointContext`
- ``.distributed``, a :class:`~DistributedContext`
- ``.preempt``, a :class:`~PreemptContext`
- ``.searcher``, a :class:`~SearcherContext`
- ``.train``, a :class:`~TrainContext`
``core.Context`` is a tool for integrating arbitrary distributed tasks into a Determined
cluster.
You should always use :meth:`core.init() <determined.core.init>` instead of creating a
core.Context manually.
"""
def __init__(
self,
checkpoint: core.CheckpointContext,
distributed: Optional[core.DistributedContext] = None,
preempt: Optional[core.PreemptContext] = None,
train: Optional[core.TrainContext] = None,
searcher: Optional[core.SearcherContext] = None,
) -> None:
self.checkpoint = checkpoint
self.distributed = distributed or core.DummyDistributedContext()
self.preempt = preempt or core.DummyPreemptContext(self.distributed)
self.train = train or core.DummyTrainContext()
self.searcher = searcher or core.DummySearcherContext(self.distributed)
def __enter__(self) -> "Context":
self.preempt.start()
return self
def __exit__(self, typ: type, value: Exception, tb: Any) -> None:
self.preempt.close()
self.distributed.close()
# Detect some specific exceptions that are part of the user-facing API.
if isinstance(value, det.InvalidHP):
self.train.report_early_exit(core.EarlyExitReason.INVALID_HP)
logger.info("InvalidHP detected during Trial init, converting InvalidHP to exit(0)")
exit(0)
def _dummy_init(
*,
distributed: Optional[core.DistributedContext] = None,
# TODO(DET-6153): allow a Union[StorageManager, str] here.
storage_manager: Optional[storage.StorageManager] = None,
preempt_mode: core.PreemptMode = core.PreemptMode.WorkersAskChief,
) -> Context:
"""
Build a core.Context suitable for running off-cluster. This is normally called by init()
when it is detected that there is no ClusterInfo available, but can be invoked directly for
e.g. local test mode.
"""
distributed = distributed or core.DummyDistributedContext()
preempt = core.DummyPreemptContext(distributed, preempt_mode)
if storage_manager is None:
base_path = appdirs.user_data_dir("determined")
logger.info("no storage_manager provided; storing checkpoints in {base_path}")
storage_manager = storage.SharedFSStorageManager(base_path)
checkpoint = core.DummyCheckpointContext(distributed, storage_manager)
train = core.DummyTrainContext()
searcher = core.DummySearcherContext(distributed)
return Context(
distributed=distributed,
checkpoint=checkpoint,
preempt=preempt,
train=train,
searcher=searcher,
)
# The '*' is because we expect to add parameters to this method. To keep a backwards-compatible
# API, we either need to always append to the parameters (preserving order of positional parameters)
# or force users to always use kwargs. We haven't decided what the right positional arguments are
# yet, so the '*' lets us delay that decision until we are ready.
def init(
*,
distributed: Optional[core.DistributedContext] = None,
# TODO: figure out a better way to deal with checkpointing in the local training case.
storage_manager: Optional[storage.StorageManager] = None,
preempt_mode: core.PreemptMode = core.PreemptMode.WorkersAskChief,
) -> Context:
"""
``core.init()`` builds a :class:`core.Context <determined.core.Context>` for use with the Core
API.
Always use ``with core.init() as context`` instead of instantiating a ``core.Context`` directly.
Certain components of the Core API may be configured by passing arguments to ``core.init()``.
The only arg that is required is a ``DistributedContext``, and even that is only required for
for multi-slot tasks.
All of your training must occur within the scope of the ``with core.init() as core_context``, as
there are resources necessary for training which start in the ``core.Context``'s ``__enter__``
method and must be cleaned up in its ``__exit__()`` method.
Arguments:
distributed (``core.DistributedContext``, optional): Passing a ``DistributedContext`` is
required for multi-slot training, but unnecessary for single-slot training. Defaults to
``None``.
preempt_mode (``core.PreemptMode``, optional): Configure the calling pattern for the
``core_context.preempt.should_preempt()`` method. See
:class:`~determined.core.PreemptMode` for more detail. Defaults to ``WorkersAskChief``.
storage_manager: Internal use only.
"""
info = det.get_cluster_info()
if info is None:
return _dummy_init(distributed=distributed, storage_manager=storage_manager)
# We are on the cluster.
cert = certs.default_load(info.master_url)
session = Session(info.master_url, None, None, cert, max_retries=get_max_retries_config())
if distributed is None:
if len(info.container_addrs) > 1 or len(info.slot_ids) > 1:
raise ValueError("you must provide a valid DistributedContext for a multi-slot task")
distributed = distributed or core.DummyDistributedContext()
preempt = core.PreemptContext(session, info.allocation_id, distributed, preempt_mode)
# At present, we only support tensorboards in Trial tasks.
tbd_mgr = None
tbd_writer = None
train = None
searcher = None
if info.task_type == "TRIAL":
# Prepare the tensorboard hooks.
tbd_mgr = tensorboard.build(
info.cluster_id,
str(info.trial.experiment_id),
str(info.trial.trial_id),
info.trial._config["checkpoint_storage"],
container_path=constants.SHARED_FS_CONTAINER_PATH,
)
tbd_writer = tensorboard.get_metric_writer()
train = core.TrainContext(
session,
info.trial.trial_id,
info.trial._trial_run_id,
info.trial.experiment_id,
tbd_mgr,
tbd_writer,
)
units = core._parse_searcher_units(info.trial._config)
searcher = core.SearcherContext(
session,
distributed,
info.trial.trial_id,
info.trial._trial_run_id,
info.allocation_id,
units,
)
if storage_manager is None:
storage_manager = storage.build(
info.trial._config["checkpoint_storage"],
container_path=constants.SHARED_FS_CONTAINER_PATH,
)
checkpoint = core.CheckpointContext(
distributed, storage_manager, session, info.task_id, info.allocation_id, tbd_mgr
)
else:
# TODO: support checkpointing for non-trial tasks.
if storage_manager is None:
base_path = appdirs.user_data_dir("determined")
logger.info("no storage_manager provided; storing checkpoints in {base_path}")
storage_manager = storage.SharedFSStorageManager(base_path)
checkpoint = core.DummyCheckpointContext(distributed, storage_manager)
return Context(
distributed=distributed,
checkpoint=checkpoint,
preempt=preempt,
train=train,
searcher=searcher,
)
```
#### File: determined/core/_train.py
```python
import enum
import logging
from typing import Any, Dict, List, Optional, Set
import determined as det
from determined import tensorboard
from determined.common.api import errors
from determined.common.experimental.session import Session
logger = logging.getLogger("determined.core")
class EarlyExitReason(enum.Enum):
INVALID_HP = "EXITED_REASON_INVALID_HP"
# This is generally unnecessary; just exit early.
USER_REQUESTED_STOP = "EXITED_REASON_USER_REQUESTED_STOP"
class TrainContext:
"""
``TrainContext`` gives access to report training and validation metrics to the Determined master
during trial tasks.
"""
def __init__(
self,
session: Session,
trial_id: int,
run_id: int,
exp_id: int,
tbd_mgr: Optional[tensorboard.TensorboardManager],
tbd_writer: Optional[tensorboard.BatchMetricWriter],
) -> None:
self._session = session
self._trial_id = trial_id
self._run_id = run_id
self._exp_id = exp_id
self._tbd_mgr = tbd_mgr
self._tbd_writer = tbd_writer
def set_status(self, status: str) -> None:
"""
Report a short user-facing string that the WebUI can render to indicate what a trial is
working on.
"""
body = {"state": status}
logger.debug(f"set_status({status})")
self._session.post(f"/api/v1/trials/{self._trial_id}/runner/metadata", json=body)
def _get_last_validation(self) -> Optional[int]:
# This is needed by the workload sequencer, but it is not generally stable, because it is
# easy to call this before reporting any metrics. If your last checkpoint was older than
# your last validation, then the value you get from this function might be higher before
# you report metrics than after (since metrics get archived on first report of new metrics,
# not on trial restart). However, this bug does not happen to affect the workload sequencer
# because of the workload sequencer's very specific use of this function.
r = self._session.get(f"/api/v1/trials/{self._trial_id}")
val = r.json()["trial"].get("latestValidation") or {}
steps_completed = val.get("totalBatches")
logger.debug(f"_get_last_validation() -> {steps_completed}")
return steps_completed
def report_training_metrics(
self,
steps_completed: int,
metrics: Dict[str, Any],
batch_metrics: Optional[List[Dict[str, Any]]] = None,
) -> None:
"""
Report training metrics to the master.
You can include a list of ``batch_metrics``. Batch metrics are not be shown in the WebUI
but may be accessed from the master using the CLI for post-processing.
"""
body = {
"trial_run_id": self._run_id,
"steps_completed": steps_completed,
"metrics": metrics,
}
if batch_metrics is not None:
body["batch_metrics"] = batch_metrics
logger.info(
f"report_training_metrics(steps_completed={steps_completed}, metrics={metrics})"
)
self._session.post(
f"/api/v1/trials/{self._trial_id}/training_metrics",
data=det.util.json_encode(body),
)
# Also sync tensorboard.
if self._tbd_writer and self._tbd_mgr:
self._tbd_writer.on_train_step_end(steps_completed, metrics, batch_metrics)
self._tbd_mgr.sync()
def _get_serializable_metrics(self, metrics: Dict[str, Any]) -> Set[str]:
serializable_metrics = set()
non_serializable_metrics = set()
# In the case of trial implementation bugs, validation metric functions may return None.
# Immediately fail any trial that encounters a None metric.
for metric_name, metric_value in metrics.items():
if metric_value is None:
raise RuntimeError(
"Validation metric '{}' returned "
"an invalid scalar value: {}".format(metric_name, metric_value)
)
if isinstance(metric_value, (bytes, bytearray)):
non_serializable_metrics.add(metric_name)
else:
serializable_metrics.add(metric_name)
if len(non_serializable_metrics):
logger.warning(
"Removed non serializable metrics: %s", ", ".join(non_serializable_metrics)
)
return serializable_metrics
def report_validation_metrics(
self,
steps_completed: int,
metrics: Dict[str, Any],
) -> None:
"""
Report validation metrics to the master.
Note that for hyperparameter search, this is independent of the need to report the searcher
metric using ``SearcherOperation.report_completed()`` in the Searcher API.
"""
serializable_metrics = self._get_serializable_metrics(metrics)
reportable_metrics = {k: metrics[k] for k in serializable_metrics}
body = {
"trial_run_id": self._run_id,
"steps_completed": steps_completed,
"metrics": reportable_metrics,
}
logger.info(
f"report_validation_metrics(steps_completed={steps_completed}, metrics={metrics})"
)
self._session.post(
f"/api/v1/trials/{self._trial_id}/validation_metrics",
data=det.util.json_encode(body),
)
# Also sync tensorboard (all metrics, not just json-serializable ones).
if self._tbd_writer and self._tbd_mgr:
self._tbd_writer.on_validation_step_end(steps_completed, metrics)
self._tbd_mgr.sync()
def report_early_exit(self, reason: EarlyExitReason) -> None:
"""
Report an early exit reason to the Determined master.
Currenlty, the only meaningful value to report is ``EarlyExitReason.INVALID_HP``, which is
reported automatically in ``core.Context.__exit__()`` detects an exception of type
``det.InvalidHP``.
"""
body = {"reason": EarlyExitReason(reason).value}
logger.info(f"report_early_exit({reason})")
r = self._session.post(
f"/api/v1/trials/{self._trial_id}/early_exit",
data=det.util.json_encode(body),
)
if r.status_code == 400:
logger.warn("early exit has already been reported for this trial, ignoring new value")
def get_experiment_best_validation(self) -> Optional[float]:
"""
Get the best reported validation metric reported so far, across the whole experiment.
The returned value is the highest or lowest reported validation metric value, using the
``searcher.metric`` field of the experiment config as the key and
``searcher.smaller_is_better`` for the comparison.
"""
logger.debug("get_experiment_best_validation()")
try:
r = self._session.get(
f"/api/v1/experiments/{self._exp_id}/searcher/best_searcher_validation_metric"
)
except errors.NotFoundException:
# 404 means 'no validations yet'.
return None
return float(r.json()["metric"])
class DummyTrainContext(TrainContext):
def __init__(self) -> None:
pass
def set_status(self, status: str) -> None:
logger.info(f"status: {status}")
def _get_last_validation(self) -> Optional[int]:
return None
def report_training_metrics(
self,
steps_completed: int,
metrics: Dict[str, Any],
batch_metrics: Optional[List[Dict[str, Any]]] = None,
) -> None:
logger.info(
f"report_training_metrics(steps_completed={steps_completed}, metrics={metrics})"
)
logger.debug(
f"report_training_metrics(steps_completed={steps_completed},"
f" batch_metrics={batch_metrics})"
)
def report_validation_metrics(self, steps_completed: int, metrics: Dict[str, Any]) -> None:
serializable_metrics = self._get_serializable_metrics(metrics)
metrics = {k: metrics[k] for k in serializable_metrics}
logger.info(
f"report_validation_metrics(steps_completed={steps_completed} metrics={metrics})"
)
def report_early_exit(self, reason: EarlyExitReason) -> None:
logger.info(f"report_early_exit({reason})")
def get_experiment_best_validation(self) -> Optional[float]:
return None
```
#### File: determined/tensorboard/gcs.py
```python
import logging
import os
from pathlib import Path
from typing import Any, Optional
from determined.common import util
from determined.common.storage.s3 import normalize_prefix
from determined.tensorboard import base
class GCSTensorboardManager(base.TensorboardManager):
"""
Store and load tf event logs from gcs.
Authentication is currently only supported via the "Application
Default Credentials" method in GCP [1]. Typical configuration:
ensure your VM runs in a service account that has sufficient
permissions to read/write/delete from the GCS bucket where
checkpoints will be stored (this only works when running in GCE).
"""
def __init__(self, bucket: str, prefix: Optional[str], *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
import google.cloud.storage
self.client = google.cloud.storage.Client()
self.bucket = self.client.bucket(bucket)
self.prefix = normalize_prefix(prefix)
def get_storage_prefix(self, storage_id: Path) -> str:
return os.path.join(self.prefix, storage_id)
@util.preserve_random_state
def sync(self) -> None:
for path in self.to_sync():
blob_name = self.sync_path.joinpath(path.relative_to(self.base_path))
to_path = self.get_storage_prefix(blob_name)
blob = self.bucket.blob(to_path)
logging.debug(f"Uploading {path} to GCS: {to_path}")
blob.upload_from_filename(str(path))
def delete(self) -> None:
prefix_path = self.get_storage_prefix(self.sync_path)
self.bucket.delete_blobs(blobs=list(self.bucket.list_blobs(prefix=prefix_path)))
``` |
{
"source": "johnkimu/Dynamics365FO-AppChecker",
"score": 3
} |
#### File: Python/extractor/tests.py
```python
import unittest
import ast
from xml.etree import ElementTree
import xmlnodegenerator
# The most up to date description of the AST is
# https://docs.python.org/3.6/library/ast.html
def createTree(source: str):
tree = ast.parse(source)
generator = xmlnodegenerator.XmlNodeGenerator(source)
root = generator.visitModule(tree)
return root
# Statements
class ClassDefinitionTest(unittest.TestCase):
def testClassWithExtension(self):
root = createTree('''class MyClass(object):
def foo(self, arg: str):
pass''')
classDefinitionNode = root.find("Statements/Class[@Name='MyClass']")
assert classDefinitionNode != None
extension = classDefinitionNode.find("Bases/Name[@Id='object']")
assert extension != None
def testClassNoExtension(self):
root = createTree('''class MyClass:
def foo():
pass''')
classDefinitionNode = root.find("Statements/Class[@Name='MyClass']")
assert classDefinitionNode != None
extension = root.find("Statements/Class/Bases/*")
assert extension is None
def testClassWithAttributedExtension(self):
root = createTree('''class FuncCallVisitor(ast.NodeVisitor):
def foo():
pass''')
classDefinitionNode = root.find("Statements/Class[@Name='FuncCallVisitor']")
assert classDefinitionNode != None
extension = classDefinitionNode.find("Bases/Attribute[@Id='NodeVisitor']/Name[@Id='ast']")
assert extension != None
def testClassWithComments(self):
root = createTree('''class Bar:
'This is a comment'
def foo():
pass''')
classDefinitionNode = root.find("Statements/Class[@Name='Bar']")
assert classDefinitionNode != None
classDefinitionNode = root.find("Statements/Class[@Comment='This is a comment']")
assert classDefinitionNode != None
class FunctionDefTest(unittest.TestCase):
def testFunctionDefNoArgs(self):
root = createTree('''def foo(): pass''')
functionDefinitionNode = root.find("Statements/FunctionDefinition[@Name='foo']")
assert functionDefinitionNode != None
# There are no parameters:
functionDefinitionNode = root.find("Statements/FunctionDefinition[@Name='foo']/Arguments")
assert functionDefinitionNode != None
functionDefinitionNode = root.find("Statements/FunctionDefinition[@Name='foo']/Arguments/*")
assert functionDefinitionNode is None
def testFunctionDefWithComment(self):
root = createTree('''
def foo():
'Interesting function'
pass''')
functionDefinitionNode = root.find("Statements/FunctionDefinition[@Name='foo']")
assert functionDefinitionNode != None
functionDefinitionNode = root.find("Statements/FunctionDefinition[@Comment='Interesting function']")
assert functionDefinitionNode != None
def testFunctionDefSimpleArgs(self):
root = createTree('''def foo(self, banana): pass''')
functionDefinitionNode = root.find("Statements/FunctionDefinition[@Name='foo']")
assert functionDefinitionNode != None
# There are no parameters:
argumentsNode = functionDefinitionNode.find("Arguments")
assert argumentsNode != None
argumentNode = argumentsNode.find("Argument[@Name='self']")
assert argumentNode != None
argumentNode = argumentsNode.find("Argument[@Name='banana']")
assert argumentNode != None
def testFunctionDefNoArgsWithReturn(self):
root = createTree('''def foo() ->None: pass''')
functionDefinitionNode = root.find("Statements/FunctionDefinition[@Name='foo']")
assert functionDefinitionNode != None
# There are no parameters:
functionDefinitionNode = root.find("Statements/FunctionDefinition[@Name='foo']/Arguments")
assert functionDefinitionNode != None
functionDefinitionNode = root.find("Statements/FunctionDefinition[@Name='foo']/Arguments/*")
assert functionDefinitionNode is None
# The return annotation is 'None'
functionDefinitionNode = root.find("Statements/FunctionDefinition[@Name='foo']/Returns/NameConstant[@Name='None']")
assert functionDefinitionNode != None
def testFunctionDefDefaultArgs(self):
root = createTree('''def foo(self, banana=1, orange=""): pass''')
functionDefinitionNode = root.find("Statements/FunctionDefinition[@Name='foo']")
assert functionDefinitionNode != None
# There are no parameters:
argumentsNode = functionDefinitionNode.find("Arguments")
assert argumentsNode != None
argumentNode = argumentsNode.find("Argument[@Name='self']")
assert argumentNode != None
argumentNode = argumentsNode.find("Argument[@Name='banana']/NumberLiteral[@Value='1']")
assert argumentNode != None
argumentNode = argumentsNode.find("Argument[@Name='orange']/StringLiteral[@Value='']")
assert argumentNode != None
def testFunctionDefStarredArgs(self):
root = createTree('''def foo(self, *arg1, **arg2): pass''')
# In this syntax, the *arg means that all the positional parameters should
# be passed as a list, while the **args causes the keyword parameters to be
# collected in a dictionary (in the args2 parameter).
functionDefinitionNode = root.find("Statements/FunctionDefinition[@Name='foo']")
assert functionDefinitionNode != None
argumentsNode = functionDefinitionNode.find("Arguments")
assert argumentsNode != None
argumentNode = argumentsNode.find("Argument[@Name='self']")
assert argumentNode != None
argumentNode = argumentsNode.find("Vararg/Argument[@Name='arg1']")
assert argumentNode != None
argumentNode = argumentsNode.find("KeywordArg/Argument[@Name='arg2']")
assert argumentNode != None
def testFunctionDefAnnotatedArgs(self):
root = createTree('''def foo(self: int, banana: str) -> str: pass''')
functionDefinitionNode = root.find("Statements/FunctionDefinition[@Name='foo']")
assert functionDefinitionNode != None
# There are no parameters:
argumentsNode = functionDefinitionNode.find("Arguments")
assert argumentsNode != None
argumentNode = argumentsNode.find("Argument[@Name='self']")
assert argumentNode != None
parameterAnnotationNode = argumentNode.find("Annotation")
assert parameterAnnotationNode != None
argumentNode = argumentsNode.find("Argument[@Name='banana']")
assert argumentNode != None
parameterAnnotationNode = argumentNode.find("Annotation")
assert parameterAnnotationNode != None
functionDefinitionNode = root.find("Statements/FunctionDefinition[@Name='foo']/Returns/Name[@Id='str']")
assert functionDefinitionNode != None
def testAsyncFunctionDefWithPredefReturn(self):
root = createTree('''async def foo() -> None: pass''')
functionDefinitionNode = root.find("Statements/AsyncFunctionDefinition[@Name='foo']")
assert functionDefinitionNode != None
# There are no parameters:
argumentsNode = functionDefinitionNode.find("Arguments")
assert argumentsNode is None
functionDefinitionNode = root.find("Statements/FunctionDefinition[@Name='foo']/Returns[@Type='None']")
assert functionDefinitionNode != None
def testAsyncFunctionDefWithPredefReturn(self):
root = createTree('''async def foo() -> str: pass''')
functionDefinitionNode = root.find("Statements/AsyncFunctionDefinition[@Name='foo']")
assert functionDefinitionNode != None
# There are no parameters:
argumentsNode = functionDefinitionNode.find("Arguments")
assert argumentsNode != None
functionDefinitionNode = root.find("Statements/AsyncFunctionDefinition[@Name='foo']/Returns[@Name='str']")
assert functionDefinitionNode != None
class ImportTest(unittest.TestCase):
def runTest(self):
source = "import ast"
root = createTree(source)
# Check that the tree contains the a Module element.
assert root.tag == 'Module'
importNode = root.find("Statements/Import")
assert importNode != None
# assert s == source
class ImportFromTest(unittest.TestCase):
def testSimpleImportFrom(self):
root = createTree("from MyModule import Banana")
importFromNode = root.find("Statements/ImportFrom[@Module='MyModule']")
assert importFromNode != None
namesNode = importFromNode.find("Names/Name[@Id='Banana']")
assert namesNode != None
def testMultipleImportFrom(self):
root = createTree("from MyModule import Banana as b, Orange as s")
importFromNode = root.find("Statements/ImportFrom[@Module='MyModule']")
assert importFromNode != None
namesNode = importFromNode.find("Names/Name[@Id='Banana']")
assert namesNode != None
namesNode = importFromNode.find("Names/Name[@AsName='b']")
assert namesNode != None
namesNode = importFromNode.find("Names/Name[@Id='Orange']")
assert namesNode != None
namesNode = importFromNode.find("Names/Name[@AsName='s']")
assert namesNode != None
def testImportFromAll(self):
root = createTree("from MyModule import *")
importFromNode = root.find("Statements/ImportFrom[@Module='MyModule']")
assert importFromNode != None
namesNode = importFromNode.find("Names/Name[@Id='*']")
assert namesNode != None
def testImportWithParentDirSpec(self):
root = createTree("from .. import Banana")
assert root != None
node = root.find("Statements/ImportFrom/Names/Name[@Id='Banana']")
assert node != None
class WhileTest(unittest.TestCase):
def testSimpleWhile(self):
root = createTree("while 1: pass")
whileStatement = root.find("Statements/While")
assert whileStatement
testNode = whileStatement.find("NumberLiteral")
assert testNode != None
bodyNode = root.find("Statements/While/Statements[1]/Pass")
assert bodyNode != None
orelseNode = root.find("Statements/While/Statements[2]/*")
assert orelseNode is None
def testWhileWithOrElse(self):
root = createTree('''while 1:
pass
else: return''')
whileStatement = root.find("Statements/While")
assert whileStatement
testNode = whileStatement.find("NumberLiteral")
assert testNode != None
bodyNode = root.find("Statements/While/Statements[1]/Pass")
assert bodyNode != None
orelseNode = root.find("Statements/While/Statements[2]/Return")
assert orelseNode != None
class ForTest(unittest.TestCase):
def testSimpleFor(self):
root = createTree("for i in foo(10, banana=4): pass")
forStatement = root.find("Statements/For")
assert forStatement
# TODO check the name (i in this case)
# TODO Check the expression
bodyNode = forStatement.find("Statements[1]/Pass")
assert bodyNode != None
orelseNode = forStatement.find("Statements[2]/*")
assert orelseNode is None
def testAsyncFor(self):
root = createTree('''async def f():
async for i in foo(10, banana=4):
pass''')
forStatement = root.find("Statements/AsyncFunctionDefinition/Statements/AsyncFor")
assert forStatement
bodyNode = forStatement.find("Statements/Pass")
assert bodyNode != None
orelseNode = forStatement.find("Statements[2]/*")
assert orelseNode is None
class TryExceptionTest(unittest.TestCase):
def testTryExceptionSingle(self):
root = createTree('''try:
return 1
except IOError:
return 2''')
node = root.find("Statements/TryExcept/Statements/Return/NumberLiteral[@Value='1']")
assert node != None
node = root.find("Statements/TryExcept/Handler/Statements/Return/NumberLiteral[@Value='2']")
assert node != None
node = root.find("Statements/TryExcept/Handler/Name[@Id='IOError']")
assert node != None
def testTryExceptionSingleWithAlias(self):
root = createTree('''try:
return 1
except IOError as name:
return 2''')
node = root.find("Statements/TryExcept/Handler[@Alias='name']")
assert node != None
def testTryExceptionMultiple(self):
root = createTree('''try:
return 1
except (IOError, ValueError):
return 2''')
node = root.find("Statements/TryExcept/Statements/Return/NumberLiteral[@Value='1']")
assert node != None
node = root.find("Statements/TryExcept/Handler/Statements/Return/NumberLiteral[@Value='2']")
assert node != None
node = root.find("Statements/TryExcept/Handler/Tuple/Name[@Id='IOError']")
assert node != None
node = root.find("Statements/TryExcept/Handler/Tuple/Name[@Id='ValueError']")
assert node != None
def testTryExceptionWithCatchAll(self):
root = createTree('''try:
return 1
except:
return 2''')
node = root.find("Statements/TryExcept/Statements/Return/NumberLiteral[@Value='1']")
assert node != None
node = root.find("Statements/TryExcept/Handler/Statements/Return/NumberLiteral[@Value='2']")
assert node != None
node = root.find("Statements/TryExcept/Handler/Name")
assert node is None
def testTryExceptionWithElse(self):
root = createTree('''try:
return 1
except:
return 2
else:
return 3''')
node = root.find("Statements/TryExcept/Statements[1]/Return/NumberLiteral[@Value='1']")
assert node != None
node = root.find("Statements/TryExcept/Handler/Name")
assert node is None
node = root.find("Statements/TryExcept/Handler/Statements/Return/NumberLiteral[@Value='2']")
assert node != None
node = root.find("Statements/TryExcept/Statements[2]/Return/NumberLiteral[@Value='3']")
assert node != None
def testTryExceptionElseAndFinally(self):
root = createTree('''try:
return 1
except:
return 2
else:
return 3
finally:
return 4''')
node = root.find("Statements/TryExcept/Statements[1]/Return/NumberLiteral[@Value='1']")
assert node != None
node = root.find("Statements/TryExcept/Handler/Name")
assert node is None
node = root.find("Statements/TryExcept/Handler/Statements/Return/NumberLiteral[@Value='2']")
assert node != None
node = root.find("Statements/TryExcept/Statements[2]/Return/NumberLiteral[@Value='3']")
assert node != None
node = root.find("Statements/TryExcept/Statements[3]/Return/NumberLiteral[@Value='4']")
assert node != None
class RaiseTest(unittest.TestCase):
def testRaiseWithoutArguments(self):
root = createTree("raise")
node = root.find("Statements/Raise")
assert node != None
def testRaiseWithSingleArgument(self):
root = createTree("raise IOError()")
node = root.find("Statements/Raise/Call")
assert node != None
def testRaiseWithTwoArguments(self):
root = createTree("raise IOError from original")
node = root.find("Statements/Raise")
assert node != None
class ReturnTests(unittest.TestCase):
def testReturnNoValue(self):
root = createTree("return")
# Check that the tree contains the a Return element.
assert root.findall("Statements/Return")
# ... without any subtree for the expression
assert not root.findall("Statements/Return/*")
def testReturnWithValue(self):
root = createTree("return 1")
# Check that the tree contains the a Return element.
returnNode = root.find("Statements/Return")
assert returnNode != None
# ... with a subtree for the expression
returnNodeWithSubtree = root.findall("Statements/Return/NumberLiteral[@Value='1']")
assert returnNodeWithSubtree != None
class PassTest(unittest.TestCase):
def runTest(self):
root = createTree("pass")
passNode = root.find("Statements/Pass")
assert passNode != None
class BreakTest(unittest.TestCase):
def runTest(self):
root = createTree("break")
node = root.find("Statements/Break")
assert node != None
class ContinueTest(unittest.TestCase):
def runTest(self):
root = createTree("continue")
# Check that the tree contains the a Return element.
node = root.find("Statements/Continue")
assert node != None
class WithTest(unittest.TestCase):
def testWithSimple(self):
root = createTree('''with open("x.txt") as f: pass''')
node = root.find("Statements/With")
assert node != None
node = root.find("Statements/With/Item/Name[@Id='f']")
assert node != None
node = root.find("Statements/With/Item/Call/Name[@Id='open']")
assert node != None
node = root.find("Statements/With/Statements/Pass")
assert node != None
def testWithAsync(self):
root = createTree('''async def foo():
async with open("x.txt") as f:
pass''')
withNode = root.find("Statements/AsyncFunctionDefinition/Statements/AsyncWith")
assert withNode != None
node = withNode.find("Item/Name[@Id='f']")
assert node != None
node = withNode.find("Item/Call/Name[@Id='open']")
assert node != None
node = withNode.find("Statements/Pass")
assert node != None
class IfTest(unittest.TestCase):
def runTest(self):
root = createTree('''if 1:
return 10
else:
return 20''')
assert root.find("Statements/If")
ifPart = root.find("Statements/If/Statements/Return/NumberLiteral[@Value='10']")
elsePart = root.find("Statements/If/Statements/Return/NumberLiteral[@Value='20']")
# ... with a subtree for the expression
assert ifPart != None, "If part was not found"
assert elsePart != None, "Else part not found"
class AssertTest(unittest.TestCase):
def testAssertSimple(self):
root = createTree("assert a is None")
node = root.find("Statements/Assert/Is")
assert node != None
def testAssertWithExplanation(self):
root = createTree('''assert a is None, "a should not be empty"''')
node = root.find("Statements/Assert/Is")
assert node != None
node = root.find("Statements/Assert/StringLiteral")
assert node != None
class AssignTest(unittest.TestCase):
def testAssignSimple(self):
root = createTree("a = 3")
node = root.find("Statements/Assign/NumberLiteral[@Value='3']")
assert node != None
def testAssignMultiple(self):
root = createTree("[a, b] = 3, 4")
node = root.find("Statements/Assign/List/Name[@Id='a']")
assert node != None
node = root.find("Statements/Assign/List/Name[@Id='b']")
assert node != None
node = root.find("Statements/Assign/Tuple/NumberLiteral[@Value='3']")
assert node != None
node = root.find("Statements/Assign/Tuple/NumberLiteral[@Value='4']")
assert node != None
def testAugmentedAssignPlus(self):
root = createTree("a += 3")
node = root.find("Statements/AugmentedAssign[@op='Plus']/NumberLiteral[@Value='3']")
assert node != None
def testAugmentedAssignMinus(self):
root = createTree("a -= 3")
node = root.find("Statements/AugmentedAssign[@op='Minus']/NumberLiteral[@Value='3']")
assert node != None
def testAugmentedAssignMultiply(self):
root = createTree("a *= 3")
node = root.find("Statements/AugmentedAssign[@op='Multiply']/NumberLiteral[@Value='3']")
assert node != None
def testAugmentedAssignDivide(self):
root = createTree("a /= 3")
node = root.find("Statements/AugmentedAssign[@op='Divide']/NumberLiteral[@Value='3']")
assert node != None
def testAugmentedAssignMod(self):
root = createTree("a %= 3")
node = root.find("Statements/AugmentedAssign[@op='Mod']/NumberLiteral[@Value='3']")
assert node != None
def testAugmentedAssignMod(self):
root = createTree("a **= 3")
node = root.find("Statements/AugmentedAssign[@op='Power']/NumberLiteral[@Value='3']")
assert node != None
def testAugmentedAssignMod(self):
root = createTree("a <<= 3")
node = root.find("Statements/AugmentedAssign[@op='LeftShift']/NumberLiteral[@Value='3']")
assert node != None
def testAugmentedAssignMod(self):
root = createTree("a >>= 3")
node = root.find("Statements/AugmentedAssign[@op='RightShift']/NumberLiteral[@Value='3']")
assert node != None
def testAugmentedAssignMod(self):
root = createTree("a &= 3")
node = root.find("Statements/AugmentedAssign[@op='And']/NumberLiteral[@Value='3']")
assert node != None
def testAugmentedAssignMod(self):
root = createTree("a %= 3")
node = root.find("Statements/AugmentedAssign[@op='Mod']/NumberLiteral[@Value='3']")
assert node != None
def testAugmentedAssignMod(self):
root = createTree("a ^= 3")
node = root.find("Statements/AugmentedAssign[@op='Xor']/NumberLiteral[@Value='3']")
assert node != None
def testAugmentedAssignFloorDiv(self):
root = createTree("a //= 3")
node = root.find("Statements/AugmentedAssign[@op='FloorDiv']/NumberLiteral[@Value='3']")
assert node != None
def testAnnotatedAssignment(self): # TODO
root = createTree("v: int = 44")
assert root != None
node = root.find("Statements/AnnotatedAssign")
assert node != None
node = root.find("Statements/AnnotatedAssign[@Simple='1']")
assert node != None
node = root.find("Statements/AnnotatedAssign/*[1][@Id='v']")
assert node != None
node = root.find("Statements/AnnotatedAssign/*[2][@Id='int']")
assert node != None
node = root.find("Statements/AnnotatedAssign/NumberLiteral[@Value='44']")
assert node != None
class DelTest(unittest.TestCase):
def testDel(self):
root = createTree("del a")
node = root.find("Statements/Del/Name[@Id='a']")
assert node != None
class ScopeTest(unittest.TestCase):
def testGlobal(self):
root = createTree("global a,b")
assert root != None, "Unable to parse the global statement"
node = root.find("Statements/Global")
assert node != None, "Did not find global statement"
# Check the Identifier children
node = root.find("Statements/Global/Identifier[@Name='a']")
assert node != None, "Did not find first global identifier"
node = root.find("Statements/Global/Identifier[@Name='b']")
assert node != None, "Did not find second global identifier"
def testNonlocal(self):
root = createTree("nonlocal a,b")
assert root != None, "Unable to parse the nonlocal statement"
node = root.find("Statements/Nonlocal")
assert node != None, "Did not find nonlocal statement"
# Check the Identifier children
node = root.find("Statements/Nonlocal/Identifier[@Name='a']")
assert node != None, "Did not find first nonlocal identifier"
node = root.find("Statements/Nonlocal/Identifier[@Name='b']")
assert node != None, "Did not find second nonlocal identifier"
# Expression tests
class LiteralTest(unittest.TestCase):
def testJoinedStr(self):
root = createTree('''f"{1}{2}"''')
assert root != None, "No joined string found"
node = root.find("Statements/ExpressionStatement/JoinedStr")
assert node != None, "Tree does not contain joined string"
node = root.find("Statements/ExpressionStatement/JoinedStr/FormattedValue/NumberLiteral[@Value='1']")
assert node != None, "First number literal not found"
node = root.find("Statements/ExpressionStatement/JoinedStr/FormattedValue/NumberLiteral[@Value='2']")
assert node != None, "Second number literal not found"
class AddTest(unittest.TestCase):
def runTest(self):
root = createTree('''1 + 2''')
node = root.find("Statements/ExpressionStatement/Add")
assert node != None, "No add node found"
class SubtractTest(unittest.TestCase):
def runTest(self):
root = createTree('''1 - 2''')
node = root.find("Statements/ExpressionStatement/Subtract")
assert node != None, "No subtract node found"
class MultiplyTest(unittest.TestCase):
def runTest(self):
root = createTree('''1 * 2''')
node = root.find("Statements/ExpressionStatement/Multiply")
assert node != None, "No multiply node found"
class DivideTest(unittest.TestCase):
def runTest(self):
root = createTree('''1 / 2''')
node = root.find("Statements/ExpressionStatement/Divide")
assert node != None, "No divide node found"
class FloorDivideTest(unittest.TestCase):
def runTest(self):
root = createTree('''1 // 2''')
node = root.find("Statements/ExpressionStatement/FloorDivide")
assert node != None, "No floor divide node found"
class PowerTest(unittest.TestCase):
def runTest(self):
root = createTree('''2 ** 3''')
node = root.find("Statements/ExpressionStatement/Power")
assert node != None, "No power node found"
class BitAndTest(unittest.TestCase):
def runTest(self):
root = createTree('''2 & 3''')
node = root.find("Statements/ExpressionStatement/BitwiseAnd")
assert node != None, "No bitwise and node found"
class BitOrTest(unittest.TestCase):
def runTest(self):
root = createTree('''2 | 3''')
node = root.find("Statements/ExpressionStatement/BitwiseOr")
assert node != None, "No bitwise or node found"
class BitXorTest(unittest.TestCase):
def runTest(self):
root = createTree('''2 ^ 3''')
node = root.find("Statements/ExpressionStatement/BitwiseXor")
assert node != None, "No bitwise xor node found"
class BitLeftShiftTest(unittest.TestCase):
def runTest(self):
root = createTree('''2 << 3''')
node = root.find("Statements/ExpressionStatement/LeftShift")
assert node != None, "No left shift node found"
class BitRightShiftTest(unittest.TestCase):
def runTest(self):
root = createTree('''2 >> 3''')
node = root.find("Statements/ExpressionStatement/RightShift")
assert node != None, "No right shift node found"
class MonadicOperatorTests(unittest.TestCase):
def testMonadicMinus(self):
root = createTree('''-1''')
node = root.find("Statements/ExpressionStatement/MonadicMinus")
assert node != None, "No monadic minus node found"
def testMonadicPlus(self):
root = createTree('''+1''')
node = root.find("Statements/ExpressionStatement/MonadicPlus")
assert node != None, "No monadic plus node found"
def testMonadicInvert(self):
root = createTree('''~1''')
node = root.find("Statements/ExpressionStatement/Invert")
assert node != None, "No monadic invert (~) node found"
def testMonadicNot(self):
root = createTree('''not True''')
node = root.find("Statements/ExpressionStatement/Not")
assert node != None, "No monadic not node found"
class LogicalOperatorTests(unittest.TestCase):
def testAndOperator(self):
root = createTree('''1 and 2''')
node = root.find("Statements/ExpressionStatement/And")
assert node != None
def testOrOperator(self):
root = createTree('''1 or 2''')
node = root.find("Statements/ExpressionStatement/Or")
assert node != None
class RelationalOperatorTests(unittest.TestCase):
def testRelationalEquals(self):
root = createTree('''1 == 2''')
node = root.find("Statements/ExpressionStatement/Equals/NumberLiteral[@Value='1']")
assert node != None, "Left node of == node not found"
node = root.find("Statements/ExpressionStatement/Equals/NumberLiteral[@Value='2']")
assert node != None, "Right node of == node not found"
def testRelationalNotEquals(self):
root = createTree('''1 != 2''')
node = root.find("Statements/ExpressionStatement/NotEquals/NumberLiteral[@Value='1']")
assert node != None, "Left node of != node not found"
node = root.find("Statements/ExpressionStatement/NotEquals/NumberLiteral[@Value='2']")
assert node != None, "Right node of != node not found"
def testRelationalGreaterThan(self):
root = createTree('''1 > 2''')
node = root.find("Statements/ExpressionStatement/GreaterThan/NumberLiteral[@Value='1']")
assert node != None, "Left node of > node not found"
node = root.find("Statements/ExpressionStatement/GreaterThan/NumberLiteral[@Value='2']")
assert node != None, "Right node of > node not found"
def testRelationalGreaterOrEqual(self):
root = createTree('''1 >= 2''')
node = root.find("Statements/ExpressionStatement/GreaterThanOrEqual/NumberLiteral[@Value='1']")
assert node != None, "Left node of >= node not found"
node = root.find("Statements/ExpressionStatement/GreaterThanOrEqual/NumberLiteral[@Value='2']")
assert node != None, "Right node of >= node not found"
def testRelationalLessThan(self):
root = createTree('''1 < 2''')
node = root.find("Statements/ExpressionStatement/LessThan/NumberLiteral[@Value='1']")
assert node != None, "Left node of < node not found"
node = root.find("Statements/ExpressionStatement/LessThan/NumberLiteral[@Value='2']")
assert node != None, "Right node of < node not found"
def testRelationalLessThanOrEqual(self):
root = createTree('''1 <= 2''')
node = root.find("Statements/ExpressionStatement/LessThanOrEqual/NumberLiteral[@Value='1']")
assert node != None, "Left node of <= node not found"
node = root.find("Statements/ExpressionStatement/LessThanOrEqual/NumberLiteral[@Value='2']")
assert node != None, "Right node of <= node not found"
def testRelationalBetween(self):
root = createTree('''1 < x < 2''')
node = root.find("Statements/ExpressionStatement/LessThan/NumberLiteral[@Value='1']")
assert node != None, "Left node of < node not found"
node = root.find("Statements/ExpressionStatement/LessThan/LessThan/Name[@Id='x']")
assert node != None, "Right node of < node not found"
node = root.find("Statements/ExpressionStatement/LessThan/LessThan/NumberLiteral[@Value='2']")
assert node != None, "Right node of < node not found"
class TernaryIfExpressionTests(unittest.TestCase):
def testIfExpression(self):
root = createTree("a if condition else b")
node = root.find("Statements/ExpressionStatement/Conditional")
assert node != None
assert len(node) == 3
node = root.find("Statements/ExpressionStatement/Conditional/Name[@Id='a']")
assert node != None
node = root.find("Statements/ExpressionStatement/Conditional/Name[@Id='condition']")
assert node != None
node = root.find("Statements/ExpressionStatement/Conditional/Name[@Id='b']")
assert node != None
class CallTest(unittest.TestCase):
def testCallDefaultAndPositionalArg(self):
root = createTree("foo(1,3,5, sep='.', end='<', flush=True)")
printNode = root.find("Statements/ExpressionStatement/Call/Name[@Id='foo']")
assert printNode != None
positionalParameters = root.findall('Statements/ExpressionStatement/Call/PositionalArgument')
assert len(positionalParameters) == 3
keywordParameters = root.findall('Statements/ExpressionStatement/Call/KeywordArgument')
assert len(keywordParameters) == 3
def testCallWithAttribute(self):
root = createTree("compile(ast.PyCF_ONLY_AST)")
attr = root.find("Statements/ExpressionStatement/Call/PositionalArgument/Attribute[@Id='PyCF_ONLY_AST']")
assert attr != None
name = attr.find("Name[@Id='ast']")
assert name != None
def testCallWithParameterLists(self):
root = createTree("Foo(*args, **kwargs)")
call = root.find("Statements/ExpressionStatement/Call");
assert call != None
positional = call.find("PositionalArgument/Starred/Name[@Id='args']")
assert positional != None
keyword = call.find("KeywordArgument/Name[@Id='kwargs']")
assert keyword != None
class SliceTest(unittest.TestCase):
def testSliceNone(self):
root = createTree("a[:]")
assert root != None
slice = root.find("Statements/ExpressionStatement/Subscript/Slice")
assert slice != None
node = slice.find("Lower/*")
assert node is None
node = slice.find("Upper/*")
assert node is None
node = slice.find("Step/*")
assert node is None
def testSliceOnlyLower(self):
root = createTree("a[1:]")
assert root != None
slice = root.find("Statements/ExpressionStatement/Subscript/Slice")
assert slice != None
node = slice.find("Lower/NumberLiteral[@Value='1']")
assert node != None
node = slice.find("Upper/*")
assert node is None
node = slice.find("Step/*")
assert node is None
def testSliceOnlyUpper(self):
root = createTree("a[:10]")
assert root != None
slice = root.find("Statements/ExpressionStatement/Subscript/Slice")
assert slice != None
node = slice.find("Lower/*")
assert node is None
node = slice.find("Upper/NumberLiteral[@Value='10']")
assert node != None
node = slice.find("Step/*")
assert node is None
def testSliceLowerAndUpper(self):
root = createTree("a[1:10]")
assert root != None
slice = root.find("Statements/ExpressionStatement/Subscript/Slice")
assert slice != None
node = slice.find("Lower/NumberLiteral[@Value='1']")
assert node != None
node = slice.find("Upper/NumberLiteral[@Value='10']")
assert node != None
node = slice.find("Step/*")
assert node is None
def testSliceNoneWithStep(self):
root = createTree("a[::1]")
assert root != None
slice = root.find("Statements/ExpressionStatement/Subscript/Slice")
assert slice != None
node = slice.find("Lower/*")
assert node is None
node = slice.find("Upper/*")
assert node is None
node = slice.find("Step/NumberLiteral[@Value='1']")
assert node != None
def testSliceOnlyLowerWithStep(self):
root = createTree("a[1::1]")
assert root != None
slice = root.find("Statements/ExpressionStatement/Subscript/Slice")
assert slice != None
node = slice.find("Lower/NumberLiteral[@Value='1']")
assert node != None
node = slice.find("Upper/*")
assert node is None
node = slice.find("Step/NumberLiteral[@Value='1']")
assert node != None
def testSliceOnlyUpperWithStep(self):
root = createTree("a[:10:1]")
assert root != None
slice = root.find("Statements/ExpressionStatement/Subscript/Slice")
assert slice != None
node = slice.find("Lower/*")
assert node is None
node = slice.find("Upper/NumberLiteral[@Value='10']")
assert node != None
node = slice.find("Step/NumberLiteral[@Value='1']")
assert node != None
def testSliceLowerAndUpperWithStep(self):
root = createTree("a[1:10:1]")
assert root != None
slice = root.find("Statements/ExpressionStatement/Subscript/Slice")
assert slice != None
node = slice.find("Lower/NumberLiteral[@Value='1']")
assert node != None
node = slice.find("Upper/NumberLiteral[@Value='10']")
assert node != None
node = slice.find("Step/NumberLiteral[@Value='1']")
assert node != None
def testSliceExtended(self):
root = createTree("a[0, 1:2, ...]")
assert root != None
node = root.find("Statements/ExpressionStatement/Subscript")
assert node != None
name = node.find("Name[@Id='a']")
assert name != None
ext = node.find("ExtSlice")
assert ext != None
firstIndex = ext.find("Index/NumberLiteral[@Value='0']")
assert firstIndex != None
firstSlice = ext.find("Slice/Lower/NumberLiteral[@Value='1']")
assert firstSlice != None
firstSlice = ext.find("Slice/Upper/NumberLiteral[@Value='2']")
assert firstSlice != None
ellipsis = ext.find("Index/Ellipsis")
assert ellipsis != None
class StarTest(unittest.TestCase):
def testStar(self):
root = createTree("a, *b = (1, 2, 3)")
assert root != None
assign = root.find("Statements/Assign")
assert assign != None
a = assign.find("Tuple/Name[@Id='a']")
assert a != None
b = assign.find("Tuple/Starred/Name[@Id='b']")
assert b != None
class ComprehensionTests(unittest.TestCase):
def _checkComprehension(self, comprehension):
name = comprehension.find("Name[@Id='x']")
assert name != None
function = comprehension.find("Call/Name[@Id='range']")
assert function != None
assert comprehension.find("Equals/NumberLiteral[@Value='0']") != None
def testListComprehension(self):
root = createTree("[x for x in range(10) if x % 2 == 0]")
listComprehension = root.find("Statements/ExpressionStatement/ListComprehension")
assert listComprehension != None
name = listComprehension.find("Name[@Id='x']")
assert name != None
comprehension = listComprehension.find("Comprehension")
assert comprehension != None
self._checkComprehension(comprehension)
def testSetComprehension(self):
root = createTree("{x for x in range(10) if x % 2 == 0}")
setComprehension = root.find("Statements/ExpressionStatement/SetComprehension")
assert setComprehension != None
name = setComprehension.find("Name[@Id='x']")
assert name != None
comprehension = setComprehension.find("Comprehension")
assert comprehension != None
self._checkComprehension(comprehension)
def testDictComprehension(self):
root = createTree("{x : chr(65+x) for x in range(10) if x % 2 == 0}")
dictComprehension = root.find("Statements/ExpressionStatement/DictComprehension")
assert dictComprehension != None
name = dictComprehension.find("Name[@Id='x']")
assert name != None
comprehension = dictComprehension.find("Comprehension")
assert comprehension != None
self._checkComprehension(comprehension)
def testGenerator(self):
root = createTree("(x for x in range(10) if x % 2 == 0)")
generator = root.find("Statements/ExpressionStatement/Generator")
assert generator != None
name = generator.find("Name[@Id='x']")
assert name != None
comprehension = generator.find("Comprehension")
assert comprehension != None
self._checkComprehension(comprehension)
class YieldTests(unittest.TestCase):
def testYield(self):
root = createTree("yield 1")
assert root != None
node = root.find("Statements/ExpressionStatement/Yield/NumberLiteral[@Value='1']")
assert node != None
def testYieldFrom(self):
root = createTree("yield from generator()")
assert root != None
node = root.find("Statements/ExpressionStatement/YieldFrom/Call/Name[@Id='generator']")
assert node != None
def testAwait(self):
root = createTree('''async def ping_local():
return await ping_server('192.168.1.1')''')
assert root != None
node = root.find("Statements/AsyncFunctionDefinition/Statements/Return/Await/Call/Name[@Id='ping_server']")
assert node != None
class NameConstantTests(unittest.TestCase):
def testNoneConstant(self):
root = createTree("None")
node = root.find("Statements/ExpressionStatement/NameConstant[@Name='None']")
assert root != None
def testTrueConstant(self):
root = createTree("True")
node = root.find("Statements/ExpressionStatement/NameConstant[@Name='True']")
assert root != None
def testFalseConstant(self):
root = createTree("False")
node = root.find("Statements/ExpressionStatement/NameConstant[@Name='False']")
assert root != None
class ListTests(unittest.TestCase):
def testEmptyList(self):
root = createTree("[]")
node = root.find("Statements/ExpressionStatement/List")
assert node != None
node = root.find("Statements/ExpressionStatement/List/*")
assert node is None
def testNonEmptyList(self):
root = createTree("[1, 2, 3]")
node = root.find("Statements/ExpressionStatement/List/NumberLiteral[@Value='1']")
assert node != None
node = root.find("Statements/ExpressionStatement/List/NumberLiteral[@Value='2']")
assert node != None
node = root.find("Statements/ExpressionStatement/List/NumberLiteral[@Value='3']")
assert node != None
class SetTests(unittest.TestCase):
# Note that {} designates an empty dict, not an empty set.
def testNonEmptySet(self):
root = createTree("{1, 2, 3}")
node = root.find("Statements/ExpressionStatement/Set/NumberLiteral[@Value='1']")
assert node != None
node = root.find("Statements/ExpressionStatement/Set/NumberLiteral[@Value='2']")
assert node != None
node = root.find("Statements/ExpressionStatement/Set/NumberLiteral[@Value='3']")
assert node != None
class DictTests(unittest.TestCase):
def testEmptyDict(self):
root = createTree("{}")
node = root.find("Statements/ExpressionStatement/Dict")
assert node != None
node = root.find("Statements/ExpressionStatement/Dict/*")
assert node is None
def testNonEmptyDict(self):
root = createTree("{'one': 1, 'two': 2, 'three': 3}")
node = root.find("Statements/ExpressionStatement/Dict/DictElement/StringLiteral[@Value='one']../NumberLiteral[@Value='1']")
assert node != None
node = root.find("Statements/ExpressionStatement/Dict/DictElement/StringLiteral[@Value='two']../NumberLiteral[@Value='2']")
assert node != None
node = root.find("Statements/ExpressionStatement/Dict/DictElement/StringLiteral[@Value='three']../NumberLiteral[@Value='3']")
assert node != None
class TupleTests(unittest.TestCase):
def testEmptyTuple(self):
root = createTree("()")
node = root.find("Statements/ExpressionStatement/Tuple")
assert node != None
node = root.find("Statements/ExpressionStatement/Tuple/*")
assert node is None
def testNonEmptyTuple(self):
root = createTree("(1, 'one')")
node = root.find("Statements/ExpressionStatement/Tuple/StringLiteral[@Value='one']../NumberLiteral[@Value='1']")
assert node != None
node = root.find("Statements/ExpressionStatement/Tuple/NumberLiteral[@Value='1']")
assert node != None
class LambdaTests(unittest.TestCase):
def testLambdaWithArgs(self):
root = createTree("lambda x: x + 2")
assert root != None
lambdaNode = root.find("Statements/ExpressionStatement/Lambda")
assert lambdaNode != None
arg = lambdaNode.find("Arguments/Argument[@Name='x']")
assert arg != None
body = lambdaNode.find("Add")
assert body != None
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "johnkirtley/LeetCode",
"score": 4
} |
#### File: LeetCode/python/remove_dupes.py
```python
def remove_duplicates(arr):
unique = set(arr)
if len(unique) != len(arr):
print('True')
return True
else:
print('False')
return False
test_arr = [1, 1, 1, 1, 2, 3, 4, 5, 5, 5]
answer_arr = set(test_arr)
remove_duplicates(test_arr)
```
#### File: LeetCode/python/rotate_list.py
```python
def rotate_list(arr):
return list(zip(*arr[::-1]))
arr = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
print(rotate_list(arr))
```
#### File: LeetCode/python/valid_palindrome.py
```python
def valid_palindrome(str):
left, right = 0, len(str) - 1
while left < right:
if not str[left].isalnum():
left += 1
elif not str[right].isalnum():
right -= 1
else:
if str[left].lower() != str[right].lower():
print('False')
return False
else:
left += 1
right -= 1
print('True')
return True
test = "A man, a plan, a canal: Panama"
valid_palindrome(test)
``` |
{
"source": "JohnKirwan/StimulPy",
"score": 3
} |
#### File: StimulPy/src/stimulus_create.py
```python
def stimulus_create(type, wl, va, ratio):
# STIMULUS_CREATE generates a 2D stimulus vector for phototaxis experiments of desired type and resolution
# Inputs:
# type - type of stimulus, e.g. 'bar'/'dog'/'square'/'log' (see below for full list)
# wl - "width" or "wavelength" of pattern in degrees (definition varies with stimulus type)
# va - visual angles over which to create the stimulus (in degrees)
# ratio - ratio between black and white areas; usually ratio of amplitudes, but definition depends on stimulus type
# Outputs:
# outint - stimulus intensity over all angles in va (arbitraty units, normalize later)
# Pattern type options:
# 'bar' # a single black bar of width wl/2 on a white background
# 'singlegauss' # single dark gaussian (with half-width wl/2) on white background
# 'square' # square wave pattern: black stripe of width wl/2 flanked by two white stripes of half the width
# '2square' # square wavelet pattern (Haar wavelet): black stripe and white stripe, each of width wl/2, on grey background
# 'dog' # difference of Gaussians, central Gaussian with half-width wl/2
# 'dog2' # difference of Gaussians with different half-width ratio
# 'log' # Laplacian of Gaussian, v1
# 'mylog' # Laplacian of Gaussian, v2
# 'cos' # piece-wise sine with wavelenth wl (as used for Onychophorans); ratio ignored, always 0.5/1/0.5
# 'sin' # continous sine-wave pattern with wavelenth wl
# 'multiple' # 7 progressively smaller cosines
# example usage: outint = stimulus_create('dog', 10, -180:0.1:180, 1)
# Written by <NAME> and <NAME>, Lund University, 2017
# import matplotlib.pyplot as plt #for sanity checking
import numpy as np
## set defaults
if ratio == None:
ratio=1;
if va == None:
va=np.linspace(-180,180,3601);
if wl == None:
wl = 10;
if type == None:
type = 'bar';
outint = np.zeros(np.size(va)); # preallocate vector of pattern intensity
# this vector is populated with
## for all piece-wise defined patterns (e.g. square, triangle, sin), define the regions of black and white "stripes"
sel_whiteleft = np.logical_and(np.greater_equal(va,-(ratio+1)*wl/2),
np.greater(va,-wl/2) );
sel_black = np.logical_and(np.greater_equal(va,-wl/2),
np.greater(va, wl/2));
sel_whiteright = np.logical_and(np.greater_equal(va,wl/2),
np.greater(va,(ratio+1)*wl/2));
amps = [1, 1/ratio]; # relative amplitudes of black and white areas
## main switch to create patterns
if type.lower() == 'bar': # single black bar (of width wl/2) on white background
#switch lower(type)
## single bar patterns
#case 'bar'
sel_black = np.logical_and(np.greater_equal(va,-wl/4,np.less(va, wl/4);
outint[sel_black] = -1;
elif type.lower() == 'singlegauss': # single dark gaussian (with half-width wl/2) on white background
#case 'singlegauss'
sigma = wl/2 / (2*np.sqrt(2*np.log(2))); # sigma of Gaussian
outint = - np.exp(-va**2/(2*sigma**2)); # was -.va^2
## balanced patterns (integral = 0)
elif type.lower() == 'square': # square wave pattern: black stripe of width wl/2 flanked by two white stripes of half the width
#case 'square'
sel_whiteleft = np.logical_and(np.greater_equal(va,-(ratio+1)*wl/4),np.less(va, -wl/4));
sel_black = np.logical_and(np.greater_equal(va,-wl/4),np.less(va, wl/4));
sel_whiteright = np.logical_and(np.greater_equal(va, wl/4),np.less(va, (ratio+1)*wl/4));
outint[sel_black] = -amps[1];
outint[sel_whiteleft] = amps[2];
outint[sel_whiteright] = amps[2];
elif type.lower() == '2square': # square wavelet pattern (Haar wavelet): black stripe and white stripe, each of width wl/2, on grey background
#case '2square'
sel_white = np.logical_and(np.greater_equal(va,-wl/2),np.less(va, 0));
sel_black = np.logical_and(np.greater_equal(va,0),np.less(va, wl/2)); # logical of indices of va between 0 and wl/2
outint[sel_black] = -amps[1]; # apply this negative ampltiude to the true values of sel_black
outint[sel_white] = amps[2];
elif type.lower() == 'dog': # difference of Gaussians pattern
fwhm1 = wl/2; # half-width of primary (black) Gaussian; also equals half the distance between white peaks
fwhm2 = wl/2 * (ratio+1); # half-width of secondary (white) Gaussian
sigma1 = fwhm1 / (2*np.sqrt(2*np.log(2))); # sigma of primary Gaussian
sigma2 = fwhm2 / (2*np.sqrt(2*np.log(2))); # sigma of secondary Gaussian
amp1 = -1;
amp2 = -amp1 / (fwhm2/fwhm1);
outint = amp1 * np.exp(-va**2/(2*sigma1**2)) + amp2 * np.exp(-va**2/(2*sigma2**2));
elif type.lower() == 'dog2': # difference of Gaussians pattern with different half-width ratio
fwhm1 = wl; # half-width of primary (black) Gaussian; also equals half the distance between white peaks
fwhm2 = wl * 1.2; # half-width of secondary (white) Gaussian
sigma1 = fwhm1 / (2*np.sqrt(2*np.log(2))); # sigma of primary Gaussian
sigma2 = fwhm2 / (2*np.sqrt(2*np.log(2))); # sigma of secondary Gaussian
amp1 = -1*6;
amp2 = -amp1 / (fwhm2/fwhm1);
outint = amp1 * np.exp(-va**2/(2*sigma1**2)) + amp2 * np.exp(-va**2/(2*sigma2**2));
elif type.lower() == 'log': # Laplacian of Gaussian (not working properly?)
from scipy import ndimage
sigma = wl/2 / (2*np.sqrt(2*np.log(2))); # sigma
acc = np.median(np.diff(va)); # degrees/pixel
#outint = fspecial('log', np.size(va), sigma/acc);
outint = ndimage.gaussian_laplace(outint, sigma/acc) #array and sigma of filter
elif type.lower() == 'mylog': # Laplacian of Gaussian
sigma = wl/2 / (2*np.sqrt(2*np.log(2)));
outint = -(1-va**2/sigma**2)*np.exp(-va**2/(2*sigma**2)); #was .*exp()
elif type.lower() == 'cos': # piece-wise sine with wavelenth wl (as used for Onychophorans)
sel_whiteleft = np.logical_and(np.greater_equal(va,-3/4*wl),np.less(va, -1/4*wl);
sel_black = np.logical_and(np.greater_equal(va,-1/4*wl),np.less(va, 1/4*wl);
sel_whiteright = np.logical_and(np.greater_equal(va, 1/4*wl),np.less(va, 3/4*wl);
amps = [1, 1/2]; # relative amplitudes of black and white areas
outint[sel_black] = - amps[1] * np.cos(np.deg2rad(np.linspace(-90,90,np.count_nonzero(sel_black))));
### CAN'T ASSIGN TO F(X) CALL
# was linspace and nnz() in matlab
outint[sel_whiteleft] = amps[2] * np.cos(np.deg2rad(np.linspace(-90,90,np.count_nonzero(sel_whiteleft))));
outint[sel_whiteright] = amps[2] * np.cos(np.deg2rad(np.linspace(-90,90,np.count_nonzero(sel_whiteright))));
elif type.lower() == 'sin': # continous sine-wave pattern with wavelenth wl
outint = np.cos(np.deg2rad(va*360/wl));
elif type.lower() == 'multiple': # piece-wise cosine with 7 pieces (was intended to more closely simulate a continuous sine-wave)
sel_white2left = np.logical_and(np.greater_equal(va,-(3*ratio+1)*wl/2,np.less(va,= -(2*ratio+1)*wl/2);
sel_black2left = np.logical_and(np.greater_equal(va,-(2*ratio+1)*wl/2,np.less(va,= -(ratio+1)*wl/2);
sel_black2right = np.logical_and(np.greater_equal(va,(1*ratio+1)*wl/2,np.less(va,= (2*ratio+1)*wl/2);
sel_white2right = np.logical_and(np.greater_equal(va,(2*ratio+1)*wl/2,np.less(va,= (3*ratio+1)*wl/2);
amps = [1, 4/3/ratio, 2/3/ratio, 2/6/ratio];
outint[sel_black] = - amps[1] * np.cos(np.deg2rad(np.linspace(-90,90,np.count_nonzero(sel_black))));
outint[sel_whiteleft] = amps[2] * np.cos(np.deg2rad(np.linspace(-90,90,np.count_nonzero(sel_whiteleft))));
outint[sel_whiteright] = amps[2] * np.cos(np.deg2rad(np.linspace(-90,90,np.count_nonzero(sel_whiteright))));
outint[sel_black2left] = - amps[3] * np.cos(np.deg2rad(np.linspace(-90,90,np.count_nonzero(sel_black2left))));
outint[sel_black2right] = - amps[3] * np.cos(np.deg2rad(np.linspace(-90,90,np.count_nonzero(sel_black2right))));
outint[sel_white2left] = amps[4] * np.cos(np.deg2rad(np.linspace(-90,90,np.count_nonzero(sel_white2left))));
outint[sel_white2right] = amps[4] * np.cos(np.deg2rad(np.linspace(-90,90,np.count_nonzero(sel_white2right))));
return(outint)
``` |
{
"source": "johnkitaoka/NEAT-jetpackjoyride",
"score": 3
} |
#### File: johnkitaoka/NEAT-jetpackjoyride/background.py
```python
import pygame
import os
WIN_WIDTH = 800
WIN_HEIGHT = 600
GEN = 0
SCORE = 0
BG_IMG = pygame.transform.scale(pygame.image.load(os.path.join("imgs", "jjbg.png")), (800,600))
class Background:
"""
Represnts the moving floor of the game
"""
global SCORE
VEL = 8
WIDTH = BG_IMG.get_width()
IMG = BG_IMG
def __init__(self, y):
"""
Initialize the object
:param y: int
:return: None
"""
self.y = y
self.x1 = 0
self.x2 = self.WIDTH
def move(self):
"""
move floor so it looks like its scrolling
:return: None
"""
inc = SCORE//3
self.x1 -= (self.VEL + inc)
self.x2 -= (self.VEL + inc)
if self.x1 + self.WIDTH < 0:
self.x1 = self.x2 + self.WIDTH
if self.x2 + self.WIDTH < 0:
self.x2 = self.x1 + self.WIDTH
def draw(self, win):
"""
Draw the floor. This is two images that move together.
:param win: the pygame surface/window
:return: None
"""
win.blit(self.IMG, (self.x1, self.y))
win.blit(self.IMG, (self.x2, self.y))
``` |
{
"source": "johnkit/hexrd",
"score": 2
} |
#### File: hexrd/cli/fit_grains.py
```python
import logging
import os
import sys
import numpy as np
from hexrd import config
from hexrd import constants as cnst
from hexrd import instrument
from hexrd.fitgrains import fit_grains
from hexrd.transforms import xfcapi
descr = 'Extracts G vectors, grain position and strain'
example = """
examples:
hexrd fit-grains configuration.yml
"""
def configure_parser(sub_parsers):
p = sub_parsers.add_parser('fit-grains', description=descr, help=descr)
p.add_argument(
'yml', type=str,
help='YAML configuration file'
)
p.add_argument(
'-g', '--grains', type=str, default=None,
help="comma-separated list of IDs to refine, defaults to all"
)
p.add_argument(
'-q', '--quiet', action='store_true',
help="don't report progress in terminal"
)
p.add_argument(
'-c', '--clean', action='store_true',
help='overwrites existing analysis, uses initial orientations'
)
p.add_argument(
'-f', '--force', action='store_true',
help='overwrites existing analysis'
)
p.add_argument(
'-p', '--profile', action='store_true',
help='runs the analysis with cProfile enabled',
)
p.set_defaults(func=execute)
def write_results(fit_results, cfg, grains_filename='grains.out'):
instr = cfg.instrument.hedm
# make output directories
if not os.path.exists(cfg.analysis_dir):
os.mkdir(cfg.analysis_dir)
for det_key in instr.detectors:
os.mkdir(os.path.join(cfg.analysis_dir, det_key))
else:
# make sure panel dirs exist under analysis dir
for det_key in instr.detectors:
if not os.path.exists(os.path.join(cfg.analysis_dir, det_key)):
os.mkdir(os.path.join(cfg.analysis_dir, det_key))
gw = instrument.GrainDataWriter(
os.path.join(cfg.analysis_dir, grains_filename)
)
for fit_result in fit_results:
gw.dump_grain(*fit_result)
gw.close()
def execute(args, parser):
# load the configuration settings
cfgs = config.open(args.yml)
# configure logging to the console:
log_level = logging.DEBUG if args.debug else logging.INFO
if args.quiet:
log_level = logging.ERROR
logger = logging.getLogger('hexrd')
logger.setLevel(log_level)
ch = logging.StreamHandler()
ch.setLevel(logging.CRITICAL if args.quiet else log_level)
cf = logging.Formatter('%(asctime)s - %(message)s', '%y-%m-%d %H:%M:%S')
ch.setFormatter(cf)
logger.addHandler(ch)
# if find-orientations has not already been run, do so:
quats_f = os.path.join(
cfgs[0].working_dir,
'accepted_orientations_%s.dat' % cfgs[0].analysis_id
)
if os.path.exists(quats_f):
try:
qbar = np.loadtxt(quats_f).T
except(IOError):
raise(RuntimeError,
"error loading indexing results '%s'" % quats_f)
else:
logger.info("Missing %s, running find-orientations", quats_f)
logger.removeHandler(ch)
from hexrd.findorientations import find_orientations
results = find_orientations(cfgs[0])
qbar = results['qbar']
logger.addHandler(ch)
logger.info('=== begin fit-grains ===')
clobber = args.force or args.clean
for cfg in cfgs:
# prepare the analysis directory
if os.path.exists(cfg.analysis_dir) and not clobber:
logger.error(
'Analysis "%s" at %s already exists.'
' Change yml file or specify "force"',
cfg.analysis_name, cfg.analysis_dir
)
sys.exit()
# make output directories
instr = cfg.instrument.hedm
if not os.path.exists(cfg.analysis_dir):
os.makedirs(cfg.analysis_dir)
for det_key in instr.detectors:
os.mkdir(os.path.join(cfg.analysis_dir, det_key))
else:
# make sure panel dirs exist under analysis dir
for det_key in instr.detectors:
if not os.path.exists(os.path.join(cfg.analysis_dir, det_key)):
os.mkdir(os.path.join(cfg.analysis_dir, det_key))
logger.info('*** begin analysis "%s" ***', cfg.analysis_name)
# configure logging to file for this particular analysis
logfile = os.path.join(
cfg.working_dir,
cfg.analysis_name,
'fit-grains.log'
)
fh = logging.FileHandler(logfile, mode='w')
fh.setLevel(log_level)
ff = logging.Formatter(
'%(asctime)s - %(name)s - %(message)s',
'%m-%d %H:%M:%S'
)
fh.setFormatter(ff)
logger.info("logging to %s", logfile)
logger.addHandler(fh)
if args.profile:
import cProfile as profile
import pstats
from io import StringIO
pr = profile.Profile()
pr.enable()
grains_filename = os.path.join(
cfg.analysis_dir, 'grains.out'
)
# some conditions for arg handling
existing_analysis = os.path.exists(grains_filename)
new_with_estimate = not existing_analysis \
and cfg.fit_grains.estimate is not None
new_without_estimate = not existing_analysis \
and cfg.fit_grains.estimate is None
force_with_estimate = args.force \
and cfg.fit_grains.estimate is not None
force_without_estimate = args.force and cfg.fit_grains.estimate is None
# handle args
if args.clean or force_without_estimate or new_without_estimate:
# need accepted orientations from indexing in this case
if args.clean:
logger.info(
"'clean' specified; ignoring estimate and using default"
)
elif force_without_estimate:
logger.info(
"'force' option specified, but no initial estimate; "
+ "using default"
)
try:
gw = instrument.GrainDataWriter(grains_filename)
for i_g, q in enumerate(qbar.T):
phi = 2*np.arccos(q[0])
n = xfcapi.unitRowVector(q[1:])
grain_params = np.hstack(
[phi*n, cnst.zeros_3, cnst.identity_6x1]
)
gw.dump_grain(int(i_g), 1., 0., grain_params)
gw.close()
except(IOError):
raise(RuntimeError,
"indexing results '%s' not found!"
% 'accepted_orientations_' + cfg.analysis_id + '.dat')
elif force_with_estimate or new_with_estimate:
grains_filename = cfg.fit_grains.estimate
elif existing_analysis and not (clean or force):
raise(RuntimeError,
"fit results '%s' exist, " % grains_filename
+ "but --clean or --force options not specified")
grains_table = np.loadtxt(grains_filename, ndmin=2)
# process the data
gid_list = None
if args.grains is not None:
gid_list = [int(i) for i in args.grains.split(',')]
pass
cfg.fit_grains.qbar = qbar
fit_results = fit_grains(
cfg,
grains_table,
show_progress=not args.quiet,
ids_to_refine=gid_list,
)
if args.profile:
pr.disable()
s = StringIO.StringIO()
ps = pstats.Stats(pr, stream=s).sort_stats('cumulative')
ps.print_stats(50)
logger.info('%s', s.getvalue())
# stop logging for this particular analysis
fh.flush()
fh.close()
logger.removeHandler(fh)
logger.info('*** end analysis "%s" ***', cfg.analysis_name)
write_results(fit_results, cfg, grains_filename)
logger.info('=== end fit-grains ===')
# stop logging to the console
ch.flush()
ch.close()
logger.removeHandler(ch)
```
#### File: hexrd/cli/gui.py
```python
import sys
help = "Launches the hexrd graphical user interface"
# FIXME: must be updated to work with the new GUI
def configure_parser(sub_parsers):
p = sub_parsers.add_parser('gui', description=help, help=help)
p.add_argument(
'-q', '--quiet', action='store_true',
help="don't report progress in terminal"
)
p.add_argument(
'--qt', action='store_true',
help='use the Qt user interface'
)
p.add_argument(
'config', nargs='?', default=None,
help='yaml configuration file'
)
p.set_defaults(func=execute)
def execute(args, parser):
import logging
logger = logging.getLogger('hexrd')
logger.setLevel(logging.DEBUG)
if args.qt:
from hexrd.qt import execute
execute(args)
else:
from hexrd.wx import mainapp
mainapp.execute(*sys.argv[2:])
```
#### File: hexrd/config/fitgrains.py
```python
import logging
import os
from .config import Config
logger = logging.getLogger('hexrd.config')
class ToleranceConfig(Config):
@property
def eta(self):
temp = self._cfg.get('fit_grains:tolerance:eta')
if isinstance(temp, (int, float)):
temp = [temp, temp]
return temp
@property
def omega(self):
temp = self._cfg.get('fit_grains:tolerance:omega')
if isinstance(temp, (int, float)):
temp = [temp, temp]
return temp
@property
def tth(self):
temp = self._cfg.get('fit_grains:tolerance:tth')
if isinstance(temp, (int, float)):
temp = [temp, temp]
return temp
class FitGrainsConfig(Config):
@property
def do_fit(self):
return self._cfg.get('fit_grains:do_fit', True)
@property
def estimate(self):
key = 'fit_grains:estimate'
temp = self._cfg.get(key, None)
if temp is None:
return temp
if not os.path.isabs(temp):
temp = os.path.join(self._cfg.working_dir, temp)
if os.path.isfile(temp):
return temp
logger.warning('"%s": "%s" does not exist', key, temp)
@property
def npdiv(self):
return self._cfg.get('fit_grains:npdiv', 2)
@property
def threshold(self):
return self._cfg.get('fit_grains:threshold')
@property
def tolerance(self):
return ToleranceConfig(self._cfg)
@property
def refit(self):
key = 'fit_grains:refit'
temp = self._cfg.get(key, None)
if temp is None:
return temp
else:
if not isinstance(temp, (int, float, list)):
raise RuntimeError(
'"%s" must be None, a scalar, or a list, got "%s"'
% (key, temp)
)
if isinstance(temp, (int, float)):
temp = [temp, temp]
return temp
"""
TODO: evaluate the need for this
"""
@property
def skip_on_estimate(self):
key = 'fit_grains:skip_on_estimate'
temp = self._cfg.get(key, False)
if temp in (True, False):
return temp
raise RuntimeError(
'"%s" must be true or false, got "%s"' % (key, temp)
)
@property
def fit_only(self):
key = 'fit_grains:fit_only'
temp = self._cfg.get(key, False)
if temp in (True, False):
return temp
raise RuntimeError(
'"%s" must be true or false, got "%s"' % (key, temp)
)
@property
def tth_max(self):
key = 'fit_grains:tth_max'
temp = self._cfg.get(key, True)
if isinstance(temp, bool):
return temp
if isinstance(temp, (int, float)):
if temp > 0:
return temp
raise RuntimeError(
'"%s" must be > 0, true, or false, got "%s"' % (key, temp)
)
```
#### File: hexrd/fitting/grains.py
```python
import numpy as np
from scipy import optimize
from hexrd import matrixutil as mutil
from hexrd.transforms import xfcapi
from hexrd.xrdutil import extract_detector_transformation
return_value_flag = None
epsf = np.finfo(float).eps # ~2.2e-16
sqrt_epsf = np.sqrt(epsf) # ~1.5e-8
bVec_ref = xfcapi.bVec_ref
eta_ref = xfcapi.eta_ref
vInv_ref = np.r_[1., 1., 1., 0., 0., 0.]
# for grain parameters
gFlag_ref = np.ones(12, dtype=bool)
gScl_ref = np.ones(12, dtype=bool)
def fitGrain(gFull, instrument, reflections_dict,
bMat, wavelength,
gFlag=gFlag_ref, gScl=gScl_ref,
omePeriod=None,
factor=0.1, xtol=sqrt_epsf, ftol=sqrt_epsf):
"""
Perform least-squares optimization of grain parameters.
Parameters
----------
gFull : TYPE
DESCRIPTION.
instrument : TYPE
DESCRIPTION.
reflections_dict : TYPE
DESCRIPTION.
bMat : TYPE
DESCRIPTION.
wavelength : TYPE
DESCRIPTION.
gFlag : TYPE, optional
DESCRIPTION. The default is gFlag_ref.
gScl : TYPE, optional
DESCRIPTION. The default is gScl_ref.
omePeriod : TYPE, optional
DESCRIPTION. The default is None.
factor : TYPE, optional
DESCRIPTION. The default is 0.1.
xtol : TYPE, optional
DESCRIPTION. The default is sqrt_epsf.
ftol : TYPE, optional
DESCRIPTION. The default is sqrt_epsf.
Raises
------
RuntimeError
DESCRIPTION.
Returns
-------
retval : TYPE
DESCRIPTION.
"""
# FIXME: will currently fail if omePeriod is specifed
if omePeriod is not None:
# xyo_det[:, 2] = xfcapi.mapAngle(xyo_det[:, 2], omePeriod)
raise RuntimeError
gFit = gFull[gFlag]
fitArgs = (gFull, gFlag, instrument, reflections_dict,
bMat, wavelength, omePeriod)
results = optimize.leastsq(objFuncFitGrain, gFit, args=fitArgs,
diag=1./gScl[gFlag].flatten(),
factor=0.1, xtol=xtol, ftol=ftol)
gFit_opt = results[0]
retval = gFull
retval[gFlag] = gFit_opt
return retval
def objFuncFitGrain(gFit, gFull, gFlag,
instrument,
reflections_dict,
bMat, wavelength,
omePeriod,
simOnly=False,
return_value_flag=return_value_flag):
"""
Calculate residual between measured and simulated ff-HEDM G-vectors.
gFull[0] = expMap_c[0]
gFull[1] = expMap_c[1]
gFull[2] = expMap_c[2]
gFull[3] = tVec_c[0]
gFull[4] = tVec_c[1]
gFull[5] = tVec_c[2]
gFull[6] = vInv_MV[0]
gFull[7] = vInv_MV[1]
gFull[8] = vInv_MV[2]
gFull[9] = vInv_MV[3]
gFull[10] = vInv_MV[4]
gFull[11] = vInv_MV[5]
OLD CALL
objFuncFitGrain(gFit, gFull, gFlag,
detectorParams,
xyo_det, hkls_idx, bMat, wavelength,
bVec, eVec,
dFunc, dParams,
omePeriod,
simOnly=False, return_value_flag=return_value_flag)
Parameters
----------
gFit : TYPE
DESCRIPTION.
gFull : TYPE
DESCRIPTION.
gFlag : TYPE
DESCRIPTION.
instrument : TYPE
DESCRIPTION.
reflections_dict : TYPE
DESCRIPTION.
bMat : TYPE
DESCRIPTION.
wavelength : TYPE
DESCRIPTION.
omePeriod : TYPE
DESCRIPTION.
simOnly : TYPE, optional
DESCRIPTION. The default is False.
return_value_flag : TYPE, optional
DESCRIPTION. The default is return_value_flag.
Raises
------
RuntimeError
DESCRIPTION.
Returns
-------
retval : TYPE
DESCRIPTION.
"""
bVec = instrument.beam_vector
eVec = instrument.eta_vector
# fill out parameters
gFull[gFlag] = gFit
# map parameters to functional arrays
rMat_c = xfcapi.makeRotMatOfExpMap(gFull[:3])
tVec_c = gFull[3:6].reshape(3, 1)
vInv_s = gFull[6:]
vMat_s = mutil.vecMVToSymm(vInv_s) # NOTE: Inverse of V from F = V * R
# loop over instrument panels
# CAVEAT: keeping track of key ordering in the "detectors" attribute of
# instrument here because I am not sure if instatiating them using
# dict.fromkeys() preserves the same order if using iteration...
# <JVB 2017-10-31>
calc_omes_dict = dict.fromkeys(instrument.detectors, [])
calc_xy_dict = dict.fromkeys(instrument.detectors)
meas_xyo_all = []
det_keys_ordered = []
for det_key, panel in instrument.detectors.items():
det_keys_ordered.append(det_key)
rMat_d, tVec_d, chi, tVec_s = extract_detector_transformation(
instrument.detector_parameters[det_key])
results = reflections_dict[det_key]
if len(results) == 0:
continue
"""
extract data from results list fields:
refl_id, gvec_id, hkl, sum_int, max_int, pred_ang, meas_ang, meas_xy
or array from spots tables:
0:5 ID PID H K L
5:7 sum(int) max(int)
7:10 pred tth pred eta pred ome
10:13 meas tth meas eta meas ome
13:15 pred X pred Y
15:17 meas X meas Y
"""
if isinstance(results, list):
# WARNING: hkls and derived vectors below must be columnwise;
# strictly necessary??? change affected APIs instead?
# <JVB 2017-03-26>
hkls = np.atleast_2d(
np.vstack([x[2] for x in results])
).T
meas_xyo = np.atleast_2d(
np.vstack([np.r_[x[7], x[6][-1]] for x in results])
)
elif isinstance(results, np.ndarray):
hkls = np.atleast_2d(results[:, 2:5]).T
meas_xyo = np.atleast_2d(results[:, [15, 16, 12]])
# FIXME: distortion handling must change to class-based
if panel.distortion is not None:
meas_omes = meas_xyo[:, 2]
xy_unwarped = panel.distortion[0](
meas_xyo[:, :2], panel.distortion[1])
meas_xyo = np.vstack([xy_unwarped.T, meas_omes]).T
pass
# append to meas_omes
meas_xyo_all.append(meas_xyo)
# G-vectors:
# 1. calculate full g-vector components in CRYSTAL frame from B
# 2. rotate into SAMPLE frame and apply stretch
# 3. rotate back into CRYSTAL frame and normalize to unit magnitude
# IDEA: make a function for this sequence of operations with option for
# choosing ouput frame (i.e. CRYSTAL vs SAMPLE vs LAB)
gVec_c = np.dot(bMat, hkls)
gVec_s = np.dot(vMat_s, np.dot(rMat_c, gVec_c))
gHat_c = mutil.unitVector(np.dot(rMat_c.T, gVec_s))
# !!!: check that this operates on UNWARPED xy
match_omes, calc_omes = matchOmegas(
meas_xyo, hkls, chi, rMat_c, bMat, wavelength,
vInv=vInv_s, beamVec=bVec, etaVec=eVec,
omePeriod=omePeriod)
# append to omes dict
calc_omes_dict[det_key] = calc_omes
# TODO: try Numba implementations
rMat_s = xfcapi.makeOscillRotMatArray(chi, calc_omes)
calc_xy = xfcapi.gvecToDetectorXYArray(gHat_c.T,
rMat_d, rMat_s, rMat_c,
tVec_d, tVec_s, tVec_c,
beamVec=bVec)
# append to xy dict
calc_xy_dict[det_key] = calc_xy
pass
# stack results to concatenated arrays
calc_omes_all = np.hstack([calc_omes_dict[k] for k in det_keys_ordered])
tmp = []
for k in det_keys_ordered:
if calc_xy_dict[k] is not None:
tmp.append(calc_xy_dict[k])
calc_xy_all = np.vstack(tmp)
meas_xyo_all = np.vstack(meas_xyo_all)
npts = len(meas_xyo_all)
if np.any(np.isnan(calc_xy)):
raise RuntimeError(
"infeasible pFull: may want to scale" +
"back finite difference step size")
# return values
if simOnly:
# return simulated values
if return_value_flag in [None, 1]:
retval = np.hstack([calc_xy_all, calc_omes_all.reshape(npts, 1)])
else:
rd = dict.fromkeys(det_keys_ordered)
for det_key in det_keys_ordered:
rd[det_key] = {'calc_xy': calc_xy_dict[det_key],
'calc_omes': calc_omes_dict[det_key]}
retval = rd
else:
# return residual vector
# IDEA: try angles instead of xys?
diff_vecs_xy = calc_xy_all - meas_xyo_all[:, :2]
diff_ome = xfcapi.angularDifference(calc_omes_all, meas_xyo_all[:, 2])
retval = np.hstack([diff_vecs_xy,
diff_ome.reshape(npts, 1)
]).flatten()
if return_value_flag == 1:
# return scalar sum of squared residuals
retval = sum(abs(retval))
elif return_value_flag == 2:
# return DOF-normalized chisq
# TODO: check this calculation
denom = 3*npts - len(gFit) - 1.
if denom != 0:
nu_fac = 1. / denom
else:
nu_fac = 1.
retval = nu_fac * sum(retval**2)
return retval
def matchOmegas(xyo_det, hkls_idx, chi, rMat_c, bMat, wavelength,
vInv=vInv_ref, beamVec=bVec_ref, etaVec=eta_ref,
omePeriod=None):
"""
For a given list of (x, y, ome) points, outputs the index into the results
from oscillAnglesOfHKLs, including the calculated omega values.
"""
# get omegas for rMat_s calculation
if omePeriod is not None:
meas_omes = xfcapi.mapAngle(xyo_det[:, 2], omePeriod)
else:
meas_omes = xyo_det[:, 2]
oangs0, oangs1 = xfcapi.oscillAnglesOfHKLs(
hkls_idx.T, chi, rMat_c, bMat, wavelength,
vInv=vInv,
beamVec=beamVec,
etaVec=etaVec)
if np.any(np.isnan(oangs0)):
# debugging
# TODO: remove this
import pdb
pdb.set_trace()
nanIdx = np.where(np.isnan(oangs0[:, 0]))[0]
errorString = "Infeasible parameters for hkls:\n"
for i in range(len(nanIdx)):
errorString += "%d %d %d\n" % tuple(hkls_idx[:, nanIdx[i]])
raise RuntimeError(errorString)
else:
# CAPI version gives vstacked angles... must be (2, nhkls)
calc_omes = np.vstack([oangs0[:, 2], oangs1[:, 2]])
if omePeriod is not None:
calc_omes = np.vstack([xfcapi.mapAngle(oangs0[:, 2], omePeriod),
xfcapi.mapAngle(oangs1[:, 2], omePeriod)])
# do angular difference
diff_omes = xfcapi.angularDifference(np.tile(meas_omes, (2, 1)), calc_omes)
match_omes = np.argsort(diff_omes, axis=0) == 0
calc_omes = calc_omes.T.flatten()[match_omes.T.flatten()]
return match_omes, calc_omes
```
#### File: hexrd/fitting/peakfunctions.py
```python
import numpy as np
#import code
import copy
gauss_width_fact=2.*np.sqrt(2.*np.log(2.))
lorentz_width_fact=2.
#### 1-D Gaussian Functions
def _unit_gaussian(p,x):#Split the unit gaussian so this can be called for 2d and 3d functions
"""
Required Arguments:
p -- (m) [x0,FWHM]
x -- (n) ndarray of coordinate positions
Outputs:
f -- (n) ndarray of function values at positions x
"""
x0=p[0]
FWHM=p[1]
sigma=FWHM/gauss_width_fact
f=np.exp(-(x-x0)**2/(2.*sigma**2.))
return f
def _gaussian1d_no_bg(p,x):
"""
Required Arguments:
p -- (m) [A,x0,FWHM]
x -- (n) ndarray of coordinate positions
Outputs:
f -- (n) ndarray of function values at positions x
"""
A=p[0]
f=A*_unit_gaussian(p[[1,2]],x)
return f
def gaussian1d(p,x):
"""
Required Arguments:
p -- (m) [A,x0,FWHM,c0,c1]
x -- (n) ndarray of coordinate positions
Outputs:
f -- (n) ndarray of function values at positions x
"""
bg0=p[3]
bg1=p[4]
f=_gaussian1d_no_bg(p[:3],x)+bg0+bg1*x
return f
def _gaussian1d_no_bg_deriv(p,x):
"""
Required Arguments:
p -- (m) [A,x0,FWHM]
x -- (n) ndarray of coordinate positions
Outputs:
d_mat -- (3 x n) ndarray of derivative values at positions x
"""
x0=p[1]
FWHM=p[2]
sigma=FWHM/gauss_width_fact
dydx0=_gaussian1d_no_bg(p,x)*((x-x0)/(sigma**2.))
dydA=_unit_gaussian(p[[1,2]],x)
dydFWHM=_gaussian1d_no_bg(p,x)*((x-x0)**2./(sigma**3.))/gauss_width_fact
d_mat=np.zeros((len(p),len(x)))
d_mat[0,:]=dydA
d_mat[1,:]=dydx0
d_mat[2,:]=dydFWHM
return d_mat
def gaussian1d_deriv(p,x):
"""
Required Arguments:
p -- (m) [A,x0,FWHM,c0,c1]
x -- (n) ndarray of coordinate positions
Outputs:
d_mat -- (5 x n) ndarray of derivative values at positions x
"""
d_mat=np.zeros((len(p),len(x)))
d_mat[0:3,:]=_gaussian1d_no_bg_deriv(p[0:3],x)
d_mat[3,:]=1.
d_mat[4,:]=x
return d_mat
#### 1-D Lorentzian Functions
def _unit_lorentzian(p,x):#Split the unit function so this can be called for 2d and 3d functions
"""
Required Arguments:
p -- (m) [x0,FWHM]
x -- (n) ndarray of coordinate positions
Outputs:
f -- (n) ndarray of function values at positions x
"""
x0=p[0]
FWHM=p[1]
gamma=FWHM/lorentz_width_fact
f= gamma**2 / ((x-x0)**2 + gamma**2)
return f
def _lorentzian1d_no_bg(p,x):
"""
Required Arguments:
p -- (m) [A,x0,FWHM]
x -- (n) ndarray of coordinate positions
Outputs:
f -- (n) ndarray of function values at positions x
"""
A=p[0]
f= A*_unit_lorentzian(p[[1,2]],x)
return f
def lorentzian1d(p,x):
"""
Required Arguments:
p -- (m) [x0,FWHM,c0,c1]
x -- (n) ndarray of coordinate positions
Outputs:
f -- (n) ndarray of function values at positions x
"""
bg0=p[3]
bg1=p[4]
f=_lorentzian1d_no_bg(p[:3],x)+bg0+bg1*x
return f
def _lorentzian1d_no_bg_deriv(p,x):
"""
Required Arguments:
p -- (m) [A,x0,FWHM]
x -- (n) ndarray of coordinate positions
Outputs:
d_mat -- (3 x n) ndarray of derivative values at positions x
"""
x0=p[1]
FWHM=p[2]
gamma=FWHM/lorentz_width_fact
dydx0=_lorentzian1d_no_bg(p,x)*((2.*(x-x0))/((x-x0)**2 + gamma**2))
dydA=_unit_lorentzian(p[[1,2]],x)
dydFWHM=_lorentzian1d_no_bg(p,x)*((2.*(x-x0)**2.)/(gamma*((x-x0)**2 + gamma**2)))/lorentz_width_fact
d_mat=np.zeros((len(p),len(x)))
d_mat[0,:]=dydA
d_mat[1,:]=dydx0
d_mat[2,:]=dydFWHM
return d_mat
def lorentzian1d_deriv(p,x):
"""
Required Arguments:
p -- (m) [A,x0,FWHM,c0,c1]
x -- (n) ndarray of coordinate positions
Outputs:
d_mat -- (5 x n) ndarray of derivative values at positions x
"""
d_mat=np.zeros((len(p),len(x)))
d_mat[0:3,:]=_lorentzian1d_no_bg_deriv(p[0:3],x)
d_mat[3,:]=1.
d_mat[4,:]=x
return d_mat
#### 1-D Psuedo Voigt Functions
def _unit_pvoigt1d(p,x):#Split the unit function so this can be called for 2d and 3d functions
"""
Required Arguments:
p -- (m) [x0,FWHM,n]
x -- (n) ndarray of coordinate positions
Outputs:
f -- (n) ndarray of function values at positions x
"""
n=p[2]
f=(n*_unit_gaussian(p[:2],x)+(1.-n)*_unit_lorentzian(p[:2],x))
return f
def _pvoigt1d_no_bg(p,x):
"""
Required Arguments:
p -- (m) [A,x0,FWHM,n]
x -- (n) ndarray of coordinate positions
Outputs:
f -- (n) ndarray of function values at positions x
"""
A=p[0]
f=A*_unit_pvoigt1d(p[[1,2,3]],x)
return f
def pvoigt1d(p,x):
"""
Required Arguments:
p -- (m) [A,x0,FWHM,n,c0,c1]
x -- (n) ndarray of coordinate positions
Outputs:
f -- (n) ndarray of function values at positions x
"""
bg0=p[4]
bg1=p[5]
f=_pvoigt1d_no_bg(p[:4],x)+bg0+bg1*x
return f
#### 1-D Split Psuedo Voigt Functions
def _split_pvoigt1d_no_bg(p,x):
"""
Required Arguments:
p -- (m) [A,x0,FWHM-,FWHM+,n-,n+]
x -- (n) ndarray of coordinate positions
Outputs:
f -- (n) ndarray of function values at positions x
"""
A=p[0]
x0=p[1]
f=np.zeros(x.shape[0])
#Define halves, using gthanorequal and lthan, choice is arbitrary
xr=x>=x0
xl=x<x0
#+
r=np.where(xr)[0]
f[r]=A*_unit_pvoigt1d(p[[1,3,5]],x[r])
#-
l=np.where(xl)[0]
f[l]=A*_unit_pvoigt1d(p[[1,2,4]],x[l])
return f
def split_pvoigt1d(p,x):
"""
Required Arguments:
p -- (m) [A,x0,FWHM-,FWHM+,n-,n+,c0,c1]
x -- (n) ndarray of coordinate positions
Outputs:
f -- (n) ndarray of function values at positions x
"""
bg0=p[6]
bg1=p[7]
f=_split_pvoigt1d_no_bg(p[:6],x)+bg0+bg1*x
return f
#### Tanh Step Down
def tanh_stepdown_nobg(p,x):
"""
Required Arguments:
p -- (m) [A,x0,w]
x -- (n) ndarray of coordinate positions
Outputs:
f -- (n) ndarray of function values at positions x
"""
A=p[0]
x0=p[1]
w=p[2]
f=A*(0.5*(1.-np.tanh((x-x0)/w)))
return f
#### 2-D Rotation Coordinate Transform
def _2d_coord_transform(theta,x0,y0,x,y):
xprime=np.cos(theta)*x+np.sin(theta)*y
yprime=-np.sin(theta)*x+np.cos(theta)*y
x0prime=np.cos(theta)*x0+np.sin(theta)*y0
y0prime=-np.sin(theta)*x0+np.cos(theta)*y0
return x0prime, y0prime, xprime, yprime
#### 2-D Gaussian Function
def _gaussian2d_no_bg(p,x,y):
"""
Required Arguments:
p -- (m) [A,x0,y0,FWHMx,FWHMy]
x -- (n x o) ndarray of coordinate positions for dimension 1
y -- (n x o) ndarray of coordinate positions for dimension 1
Outputs:
f -- (n x 0) ndarray of function values at positions (x,y)
"""
A=p[0]
f=A*_unit_gaussian(p[[1,3]],x)*_unit_gaussian(p[[2,4]],y)
return f
def _gaussian2d_rot_no_bg(p,x,y):
"""
Required Arguments:
p -- (m) [A,x0,y0,FWHMx,FWHMy,theta]
x -- (n x o) ndarray of coordinate positions for dimension 1
y -- (n x o) ndarray of coordinate positions for dimension 2
Outputs:
f -- (n x o) ndarray of function values at positions (x,y)
"""
theta=p[5]
x0prime, y0prime, xprime, yprime=_2d_coord_transform(theta,p[1],p[2],x,y)
newp=copy.copy(p)#this copy was needed so original parameters set isn't changed
newp[1]=x0prime
newp[2]=y0prime
f=_gaussian2d_no_bg(newp[:5],xprime,yprime)
return f
def gaussian2d_rot(p,x,y):
"""
Required Arguments:
p -- (m) [A,x0,y0,FWHMx,FWHMy,theta,c0,c1x,c1y]
x -- (n x o) ndarray of coordinate positions for dimension 1
y -- (n x o) ndarray of coordinate positions for dimension 2
Outputs:
f -- (n x o) ndarray of function values at positions (x,y)
"""
bg0=p[6]
bg1x=p[7]
bg1y=p[8]
f=_gaussian2d_rot_no_bg(p[:6],x,y)+(bg0+bg1x*x+bg1y*y)
return f
def gaussian2d(p,x,y):
"""
Required Arguments:
p -- (m) [A,x0,y0,FWHMx,FWHMy,c0,c1x,c1y]
x -- (n x o) ndarray of coordinate positions for dimension 1
y -- (n x o) ndarray of coordinate positions for dimension 2
Outputs:
f -- (n x o) ndarray of function values at positions (x,y)
"""
bg0=p[5]
bg1x=p[6]
bg1y=p[7]
f=_gaussian2d_no_bg(p[:5],x,y)+(bg0+bg1x*x+bg1y*y)
return f
#### 2-D Split Psuedo Voigt Function
def _split_pvoigt2d_no_bg(p,x,y):
"""
Required Arguments:
p -- (m) [A,x0,y0,FWHMx-,FWHMx+,FWHMy-,FWHMy+,nx-,nx+,ny-,ny+]
x -- (n x o) ndarray of coordinate positions for dimension 1
y -- (n x o) ndarray of coordinate positions for dimension 2
Outputs:
f -- (n x o) ndarray of function values at positions (x,y)
"""
A=p[0]
x0=p[1]
y0=p[2]
f=np.zeros([x.shape[0],x.shape[1]])
#Define quadrants, using gthanorequal and lthan, choice is arbitrary
xr=x>=x0
xl=x<x0
yr=y>=y0
yl=y<y0
#++
q1=np.where(xr & yr)
f[q1]=A*_unit_pvoigt1d(p[[1,4,8]],x[q1])*_unit_pvoigt1d(p[[2,6,10]],y[q1])
#+-
q2=np.where(xr & yl)
f[q2]=A*_unit_pvoigt1d(p[[1,4,8]],x[q2])*_unit_pvoigt1d(p[[2,5,9]],y[q2])
#-+
q3=np.where(xl & yr)
f[q3]=A*_unit_pvoigt1d(p[[1,3,7]],x[q3])*_unit_pvoigt1d(p[[2,6,10]],y[q3])
#--
q4=np.where(xl & yl)
f[q4]=A*_unit_pvoigt1d(p[[1,3,7]],x[q4])*_unit_pvoigt1d(p[[2,5,9]],y[q4])
return f
def _split_pvoigt2d_rot_no_bg(p,x,y):
"""
Required Arguments:
p -- (m) [A,x0,y0,FWHMx-,FWHMx+,FWHMy-,FWHMy+,nx-,nx+,ny-,ny+,theta]
x -- (n x o) ndarray of coordinate positions for dimension 1
y -- (n x o) ndarray of coordinate positions for dimension 2
Outputs:
f -- (n x o) ndarray of function values at positions (x,y)
"""
theta=p[11]
x0prime, y0prime, xprime, yprime=_2d_coord_transform(theta,p[1],p[2],x,y)
newp=copy.copy(p)#this copy was needed so original parameters set isn't changed
newp[1]=x0prime
newp[2]=y0prime
f=_split_pvoigt2d_no_bg(newp[:11],xprime,yprime)
return f
def split_pvoigt2d_rot(p,x,y):
"""
Required Arguments:
p -- (m) [A,x0,y0,FWHMx-,FWHMx+,FWHMy-,FWHMy+,nx-,nx+,ny-,ny+,theta,c0,c1x,c1y]
x -- (n x o) ndarray of coordinate positions for dimension 1
y -- (n x o) ndarray of coordinate positions for dimension 2
Outputs:
f -- (n x o) ndarray of function values at positions (x,y)
"""
bg0=p[12]
bg1x=p[13]
bg1y=p[14]
f=_split_pvoigt2d_rot_no_bg(p[:12],x,y)+(bg0+bg1x*x+bg1y*y)
return f
#### 3-D Gaussian Function
def _gaussian3d_no_bg(p,x,y,z):
"""
Required Arguments:
p -- (m) [A,x0,y0,z0,FWHMx,FWHMy,FWHMz]
x -- (n x o x q) ndarray of coordinate positions for dimension 1
y -- (n x o x q) ndarray of coordinate positions for dimension 2
y -- (z x o x q) ndarray of coordinate positions for dimension 3
Outputs:
f -- (n x o x q) ndarray of function values at positions (x,y)
"""
A=p[0]
f=A*_unit_gaussian(p[[1,4]],x)*_unit_gaussian(p[[2,5]],y)*_unit_gaussian(p[[3,6]],z)
return f
def gaussian3d(p,x,y,z):
"""
Required Arguments:
p -- (m) [A,x0,y0,z0,FWHMx,FWHMy,FWHMz,c0,c1x,c1y,c1z]
x -- (n x o x q) ndarray of coordinate positions for dimension 1
y -- (n x o x q) ndarray of coordinate positions for dimension 2
y -- (z x o x q) ndarray of coordinate positions for dimension 3
Outputs:
f -- (n x o x q) ndarray of function values at positions (x,y,z)
"""
bg0=p[7]
bg1x=p[8]
bg1y=p[9]
bg1z=p[10]
f=_gaussian3d_no_bg(p[:5],x,y)+(bg0+bg1x*x+bg1y*y+bg1z*z)
return f
def _mpeak_1d_no_bg(p,x,pktype,num_pks):
"""
Required Arguments:
p -- (m x u) list of peak parameters for number of peaks (m is the number of
parameters per peak ("gaussian" and "lorentzian" - 3, "pvoigt" - 4, "split_pvoigt"
- 5)
x -- (n) ndarray of coordinate positions for dimension 1
pktype -- string, type of analytic function that will be used to fit the data,
current options are "gaussian","lorentzian","pvoigt" (psuedo voigt), and
"split_pvoigt" (split psuedo voigt)
num_pks -- integer 'u' indicating the number of pks, must match length of p
Outputs:
f -- (n) ndarray of function values at positions (x)
"""
f=np.zeros(len(x))
if pktype == 'gaussian' or pktype == 'lorentzian':
p_fit=np.reshape(p[:3*num_pks],[num_pks,3])
elif pktype == 'pvoigt':
p_fit=np.reshape(p[:4*num_pks],[num_pks,4])
elif pktype == 'split_pvoigt':
p_fit=np.reshape(p[:6*num_pks],[num_pks,6])
for ii in np.arange(num_pks):
if pktype == 'gaussian':
f=f+_gaussian1d_no_bg(p_fit[ii],x)
elif pktype == 'lorentzian':
f=f+_lorentzian1d_no_bg(p_fit[ii],x)
elif pktype == 'pvoigt':
f=f+_pvoigt1d_no_bg(p_fit[ii],x)
elif pktype == 'split_pvoigt':
f=f+_split_pvoigt1d_no_bg(p_fit[ii],x)
return f
def mpeak_1d(p,x,pktype,num_pks,bgtype=None):
"""
Required Arguments:
p -- (m x u) list of peak parameters for number of peaks (m is the number of
parameters per peak ("gaussian" and "lorentzian" - 3, "pvoigt" - 4, "split_pvoigt"
- 5)
x -- (n) ndarray of coordinate positions for dimension 1
pktype -- string, type of analytic function that will be used to fit the data,
current options are "gaussian","lorentzian","pvoigt" (psuedo voigt), and
"split_pvoigt" (split psuedo voigt)
num_pks -- integer 'u' indicating the number of pks, must match length of p
pktype -- string, background functions, available options are "constant",
"linear", and "quadratic"
Outputs:
f -- (n) ndarray of function values at positions (x)
"""
f=_mpeak_1d_no_bg(p,x,pktype,num_pks)
if bgtype=='linear':
f=f+p[-2]+p[-1]*x #c0=p[-2], c1=p[-1]
elif bgtype=='constant':
f=f+p[-1] #c0=p[-1]
elif bgtype=='quadratic':
f=f+p[-3]+p[-2]*x+p[-1]*x**2 #c0=p[-3], c1=p[-2], c2=p[-1],
return f
```
#### File: hexrd/hexrd/imageutil.py
```python
import numpy as np
from scipy import signal, ndimage
from hexrd import convolution
def fast_snip1d(y, w=4, numiter=2):
"""
"""
bkg = np.zeros_like(y)
zfull = np.log(np.log(np.sqrt(y + 1.) + 1.) + 1.)
for k, z in enumerate(zfull):
b = z
for i in range(numiter):
for p in range(w, 0, -1):
kernel = np.zeros(p*2 + 1)
kernel[0] = 0.5
kernel[-1] = 0.5
b = np.minimum(b, signal.fft (z, kernel, mode='same'))
z = b
bkg[k, :] = (np.exp(np.exp(b) - 1.) - 1.)**2 - 1.
return bkg
def snip1d(y, w=4, numiter=2, threshold=0):
"""
Return SNIP-estimated baseline-background for given spectrum y.
!!!: threshold values get marked as NaN in convolution
"""
mask = y <= threshold
zfull = np.log(np.log(np.sqrt(y + 1) + 1) + 1)
bkg = np.zeros_like(zfull)
for k, z in enumerate(zfull):
if np.all(mask[k]):
bkg[k, :] = np.nan
else:
b = z
for i in range(numiter):
for p in range(w, 0, -1):
kernel = np.zeros(p*2 + 1)
kernel[0] = kernel[-1] = 1./2.
b = np.minimum(
b,
convolution.convolve(
z, kernel, boundary='extend', mask=mask[k]
)
)
z = b
bkg[k, :] = (np.exp(np.exp(b) - 1) - 1)**2 - 1
nan_idx = np.isnan(bkg)
bkg[nan_idx] = threshold
return bkg
def snip1d_quad(y, w=4, numiter=2):
"""Return SNIP-estimated baseline-background for given spectrum y.
Adds a quadratic kernel convolution in parallel with the linear kernel."""
convolve1d = ndimage.convolve1d
kernels = []
for p in range(w, 1, -2):
N = p * 2 + 1
# linear kernel
kern1 = np.zeros(N)
kern1[0] = kern1[-1] = 1./2.
# quadratic kernel
kern2 = np.zeros(N)
kern2[0] = kern2[-1] = -1./6.
kern2[p/2] = kern2[3*p/2] = 4./6.
kernels.append([kern1, kern2])
z = b = np.log(np.log(y + 1) + 1)
for i in range(numiter):
for (kern1, kern2) in zip(kernels):
c = np.maximum(convolve1d(z, kern1, mode='nearest'),
convolve1d(z, kern2, mode='nearest'))
b = np.minimum(b, c)
z = b
return np.exp(np.exp(b) - 1) - 1
def snip2d(y, w=4, numiter=2, order=1):
"""
Return estimate of 2D-array background by "clipping" peak-like structures.
2D adaptation of the peak-clipping component of the SNIP algorithm.
Parameters
----------
y : 2-D input array
w : integer (default 4)
kernel size (maximum kernel extent actually = 2 * w * order + 1)
numiter : integer (default 2)
number of iterations
order : integer (default 1)
maximum order of filter kernel, either 1 (linear) or 2 (quadratic)
Returns
-------
out : 2-D array with SNIP-estimated background of y
References!!!
-----
[1] <NAME> et al, "SNIP, A statistics-sensitive background treatment
for the quantitative analysis of PIXE spectra in geoscience
applications," Nucl. Instr. and Meth. B 34, 396 (1988).
[2] <NAME> et al., "Background elimination methods for multidimensional
coincidence gamma-ray spectra," Nucl. Instr. and Meth. A 401, 113
(1997).
"""
maximum, minimum = np.fmax, np.fmin
# create list of kernels
kernels = []
for p in range(w, 0, -1): # decrement window starting from w
N = 2 * p * order + 1 # size of filter kernels
p1 = order * p
# linear filter kernel
kern1 = np.zeros((N, N)) # initialize a kernel with all zeros
xx, yy = np.indices(kern1.shape) # x-y indices of kernel points
ij = np.round(
np.hypot(xx - p1, yy - p1)
) == p1 # select circular shape
kern1[ij] = 1 / ij.sum() # normalize so sum of kernel elements is 1
kernels.append([kern1])
if order >= 2: # add quadratic filter kernel
p2 = p1 // 2
kern2 = np.zeros_like(kern1)
radii, norms = (p2, 2 * p2), (4/3, -1/3)
for radius, norm in zip(radii, norms):
ij = np.round(np.hypot(xx - p1, yy - p1)) == radius
kern2[ij] = norm / ij.sum()
kernels[-1].append(kern2)
# convolve kernels with input array
z = b = np.log(np.log(y + 1) + 1) # perform convolutions in logspace
for i in range(numiter):
for kk in kernels:
if order > 1:
c = maximum(ndimage.convolve(z, kk[0], mode='nearest'),
ndimage.convolve(z, kk[1], mode='nearest'))
else:
c = ndimage.convolve(z, kk[0], mode='nearest')
b = minimum(b, c)
z = b
return np.exp(np.exp(b) - 1) - 1
```
#### File: hexrd/tests/test_find_orientations.py
```python
import os
import logging
import sys
from pathlib import Path
import numpy as np
import pytest
import coloredlogs
from hexrd.findorientations import find_orientations, generate_eta_ome_maps
from hexrd import config
from hexrd.crystallography import PlaneData
import find_orientations_testing as test_utils
root = logging.getLogger()
root.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
formatter = coloredlogs.ColoredFormatter(
'%(asctime)s,%(msecs)03d - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
root.addHandler(handler)
@pytest.fixture
def example_repo_path():
if 'HEXRD_EXAMPLE_REPO_PATH' not in os.environ:
pytest.fail('Environment varable HEXRD_EXAMPLE_REPO_PATH not set!')
repo_path = os.environ['HEXRD_EXAMPLE_REPO_PATH']
return Path(repo_path)
@pytest.fixture
def example_repo_include_path(example_repo_path):
return example_repo_path / 'NIST_ruby' / 'multiruby_dexelas' / 'include'
@pytest.fixture
def example_repo_results_path(example_repo_path):
return example_repo_path / 'NIST_ruby' / 'multiruby_dexelas' / 'results'
@pytest.fixture
def example_repo_config_path(example_repo_include_path):
return example_repo_include_path / 'mruby_config_composite.yml'
@pytest.fixture
def test_config(example_repo_config_path, example_repo_include_path):
conf = config.open(example_repo_config_path)[0]
conf.working_dir = str(example_repo_include_path)
return conf
@pytest.fixture
def reference_eta_ome_maps(example_repo_results_path):
filename = 'results_mruby_composite_hexrd06_py27_ruby_eta-ome_maps.npz'
return example_repo_results_path / filename
@pytest.fixture
def example_repo_config_with_eta_ome_maps(test_config, reference_eta_ome_maps):
# Set eta omega maps file
cfg = test_config._cfg.copy()
results_path = str(Path('../results') / reference_eta_ome_maps.name)
cfg['find_orientations']['orientation_maps']['file'] = results_path
patch_config = config.root.RootConfig(cfg)
return patch_config
@pytest.fixture
def reference_orientations_path(example_repo_results_path):
filename = \
'accepted_orientations_results_mruby_composite_hexrd06_py27_ruby.dat'
return example_repo_results_path / filename
@pytest.fixture
def reference_orientations(reference_orientations_path):
return np.loadtxt(reference_orientations_path, ndmin=2)
def plane_data(plane_data):
args = np.array(plane_data.getParams())[:4]
hkls = plane_data.hkls
return PlaneData(hkls, *args)
def to_eomap(eta_ome_maps):
return test_utils.EOMap(
np.array(eta_ome_maps.dataStore),
eta_ome_maps.etas,
eta_ome_maps.etaEdges,
eta_ome_maps.omegas,
eta_ome_maps.omeEdges,
eta_ome_maps.iHKLList,
plane_data(eta_ome_maps.planeData)
)
def test_generate_eta_ome_maps(example_repo_include_path,
test_config,
reference_eta_ome_maps):
os.chdir(example_repo_include_path)
eta_ome_maps = generate_eta_ome_maps(test_config, save=False)
eta_ome_maps = to_eomap(eta_ome_maps)
expected = test_utils.load(reference_eta_ome_maps)
comparison = test_utils.Comparison(expected, eta_ome_maps)
assert comparison.compare()
def test_find_orientations(example_repo_include_path,
example_repo_config_with_eta_ome_maps,
reference_orientations):
os.chdir(example_repo_include_path)
results = find_orientations(
example_repo_config_with_eta_ome_maps
)
orientations = results['qbar']
try:
test_utils.compare_quaternion_lists(orientations.T,
reference_orientations)
except RuntimeError as err:
pytest.fail(str(err))
``` |
{
"source": "johnkittelman/pypow",
"score": 3
} |
#### File: pypow/thermodynamics/Cylinder_Geometry.py
```python
import numpy as np
from matplotlib import pyplot
class Cylinder_Geometry:
""" The geometry of the cylnder is a function of the given variables
The angle theta has a resolution of 1/10th of a degree
volumes are in cubic centimeters and lengths are in mm by convention"""
def __init__(self,compression_ratio = 12.75 ,bore=9.5,stroke=6.34,tdc_volume=0.0,swept_volume=449.9, connecting_rod_length=15.0,connecting_rod_ratio=3.388,crank_angle=np.arange(0,720,0.1)):
"""This init function converts all the base geometeries into arrays using numpy.array(), this is done
#for speed"""
if compression_ratio <0 or bore < 0 or stroke < 0 or tdc_volume < 0 or swept_volume < 0 or connecting_rod_length < 0:
raise ValueError('Inputs must be postive')
self.compression_ratio=compression_ratio
self.bore=np.array(bore)
self.stroke=np.array(stroke)
self.tdc_volume=np.array(tdc_volume)
self.swept_volume=np.array(swept_volume)
self.crank_radius=np.array(stroke/2.0)
self.connecting_rod_length=np.array(connecting_rod_length)
self.crank_angle=np.array(crank_angle)
self.connectrod_crankrad= np.array(connecting_rod_length/(stroke*0.5))
self.connecting_rod_ratio=np.array(connecting_rod_ratio)
def cylinder_volume_func(self):
'''This calcualates the cylinder volume given the function outlined in heywoods book on page 43,
The default caluation is to 1/10th of a degree, the resolution can change by change by changing the
Crank_angle numpy array'''
theta_c = np.deg2rad(self.crank_angle)
a=self.crank_radius
l=self.connecting_rod_length
b=self.bore
v_c=self.tdc_volume
c=Cylinder_Geometry()
if v_c == 0:
v_c = c.tdc_volume_calc()
cylinder_volume= np.arange(0,len(theta_c))
cylinder_volume = []
for i,theta in enumerate(theta_c):
s=(a * np.cos(theta)) + (np.sqrt(l ** 2 - (a ** 2) * (np.sin(theta)) ** 2))
cylinder_volume.append((v_c+((np.pi*(b**2))/4.0)*(l+a-s)))
return cylinder_volume
def areaCylinder(self):
# surface area of cylinder at instant
C=Cylinder_Geometry()
piston_pos= C.piston_position(self.crank_angle)
piston_area = np.pi * (self.bore ** 2) * 0.25
wall_area = np.pi * self.bore * self.stroke * (1 - piston_pos)
return wall_area + (piston_area * 2)
def tdc_volume_calc(self):
''' This calculates the tdc volume using the compression ratio and the swept volume
the equation for this can be found in heywoods books on page 44'''
tdc_volume=self.swept_volume/(self.compression_ratio-1)
return tdc_volume
def compression_ratio(self):
''' compression ration can be found if given swept volume and tdc volume'''
compression_ratio= (self.tdc_volume+self.swept_volume)/self.tdc_volume
return compression_ratio
def piston_velocity(self,n):
'''this function is used to define both the average velocity of the piston
aswell as the actual velocity of the piston'''
theta_c = np.deg2rad(self.crank_angle)
ave_pist_velocity=2*self.stroke*n
actual_pist_velocity= ave_pist_velocity*(np.pi*0.5*np.sin(theta_c))*(1+(np.cos(theta_c)/np.sqrt(self.connectrod_crankrad**2-(np.sin(theta_c)))))
return ave_pist_velocity, actual_pist_velocity
def piston_position(self, crank_angle):
""" Relative position of the piston, =1 at TDC and =0 at BDC, regarding
to the crank angle in degres. """
# Angle in radians
radangle = np.radians(crank_angle)
# Ratio of the crank radius on the connecting rod length
ratio = 1/self.connecting_rod_ratio
piston_pos=1-0.5*((1-np.cos(radangle)) + ratio*(1-np.sqrt(1-pow(ratio*np.sin(radangle),2))))
return piston_pos
if __name__=="__main__":
c=Cylinder_Geometry()
tdcv= c.cylinder_volume_func()
print (tdcv)
#print (len(tdcv))
#print (np.array2string(tdcv))
``` |
{
"source": "johnklee/algprac",
"score": 4
} |
#### File: game_theory/easy/misere-nim-1.py
```python
r'''
https://www.hackerrank.com/challenges/misere-nim-1/problem
'''
import math
import os
import random
import re
import sys
def winer(rst):
if rst:
return 'First'
else:
return 'Second'
# Complete the misereNim function below.
def misereNim(s):
# In a single pile, if more than one stones exist then first player will
# always win by leaving the last stone for second player to pick up
if len(s) == 1:
return winer(s[0] > 1)
totalStones = 0
xorValue = 0
for i in range(0, len(s)):
totalStones += s[i]
xorValue ^= s[i]
r'''
If sum of all stones equals the total piles, all piles have a single (1)
stone. For even number of piles, first player will always win.
'''
if totalStones == len(s):
return winer(totalStones % 2 == 0)
r'''
For all other cases, the xor value determines winner. If xor value = 0,
then second player will always win as all piles (stones) can be paired.
'''
print('xorValue={}'.format(xorValue))
return winer(xorValue > 0)
print("{}".format(misereNim([2, 1, 3])))
```
#### File: graph/medium/jeanies_route.py
```python
r'''
https://www.hackerrank.com/challenges/jeanies-route/problem
'''
import os
import sys
#
# Complete the jeanisRoute function below.
#
def jeanisRoute(n, clist, roads):
r'''
@param n(list):
Number of city
@param clist(list):
List of city need to visit
@param roads(list):
List of roads to connect each city
'''
qdict = {} # Quick path: with key as tuple(src, dest);
# value as cost traveling from source city to
# destination city
cdict = {} # Connecting dict
for s, d, w in roads:
qdict[(s, d)] = w
qdict[(d, s)] = w
if s in cdict:
cdict[s].append(d)
else:
cdict[s] = [d]
if d in cdict:
cdict[d].append(s)
else:
cdict[d] = [s]
# Fill-up qdict
for i in range(1, n+1):
if i not in clist:
continue
for j in range(i+1, n+1):
if (i, j) not in qdict:
gcs = sys.maxsize
gnc = None
for nc in cdict[i]:
if (nc, j) in qdict:
cs = qdict[(nc, j)]
#break
else:
cs = lookUp(i, nc, j, cdict, qdict)
#print('\tGot {}->{}={} (from={})'.format(nc, j, cs, i))
if cs < gcs:
gcs = cs
gnc = nc
gcs += qdict[(i, gnc)]
qdict[(i, j)] = gcs
qdict[(j, i)] = gcs
#print('Got {}->{}={}!'.format(i, j, gcs))
# Look up best search path
min_d = sys.maxsize
for nc in clist:
tmp_clist = clist[:]
tmp_clist.remove(nc)
d = visit_cts(nc, tmp_clist, qdict)
if d < min_d:
min_d = d
print("Got minimum distance={} starting from {}!".format(min_d, nc))
return min_d
def lookUp(frm, src, dst, cdict, qdict):
gcs = sys.maxsize
gnc = None
for nc in cdict[src]:
if nc == frm:
continue
if nc == dst:
return qdict[(src, nc)]
elif (nc, dst) in qdict:
return qdict[(src, nc)] + qdict[(nc, dst)]
else:
cs = lookUp(src, nc, dst, cdict, qdict)
if cs < gcs:
gcs = cs
gnc = nc
if gcs != sys.maxsize:
gcs += qdict[(src, nc)]
qdict[(src, dst)] = gcs
qdict[(dst, src)] = gcs
gcs += qdict[(src, nc)]
return gcs
else:
return gcs
def visit_cts(sc, clist, qdict):
r'''
Visit all cities in <clist> with starting city as <sc>
'''
if len(clist) == 1:
return qdict[(sc, clist[0])]
else:
min_d = sys.maxsize
min_nc = None
for nc in clist:
tmp_clist = clist[:]
tmp_clist.remove(nc)
d = visit_cts(nc, tmp_clist, qdict)
if d < min_d:
min_d = d
min_nc = nc
return qdict[(sc, min_nc)] + min_d
n = 5
clist = [1, 3, 4]
roads = [
(1, 2, 1),
(2, 3, 2),
(2, 4, 2),
(3, 5, 3)
]
''' testing data '''
def read_tc(tn):
if __file__.startswith('./'):
script_name = __file__[2:]
else:
script_name = __file__
tc_fname = "{}.t{}".format(script_name.split('.')[0], tn)
with open(tc_fname) as fh:
n, k = fh.readline().split()
n = int(n)
k = int(k)
clist = list(map(int, fh.readline().strip().split()))
roads = []
for i in range(n-1):
roads.append(list(map(int, fh.readline().rstrip().split())))
return (n, clist, roads)
min_d = jeanisRoute(n, clist, roads)
from pprint import pprint
pprint(min_d)
```
#### File: graph/medium/journey-to-the-moon.py
```python
r'''
https://www.hackerrank.com/challenges/journey-to-the-moon/problem
'''
import math
import os
import random
import re
import sys
class Node:
def __init__(self, v):
self.v = v
self.neighbors = set()
self.visit = False
def addN(self, n):
if n not in self.neighbors:
self.neighbors.add(n)
n.addN(self)
def __hash__(self):
return hash(self.v)
def __eq__(self, other):
return self.__class__ == other.__class__ and self.v == other.v
def n(self):
for n in self.neighbors:
yield n
def dfs(self):
from collections import deque
root = self
root.visit = True
nlist = deque()
nlist.append(root)
vlist = []
while len(nlist) > 0:
node = nlist.popleft()
vlist.append(node.v)
for n in node.n():
if not n.visit:
nlist.append(n)
n.visit = True
return vlist
# Complete the journeyToMoon function below.
def journeyToMoon(n, astronaut):
ndict = {}
cty_list = []
# Create graph
for a, b in astronaut:
if a not in ndict:
ndict[a] = Node(a)
if b not in ndict:
ndict[b] = Node(b)
ndict[a].addN(ndict[b])
# Search disjoin set
for node in ndict.values():
if not node.visit:
cty_list.append(node.dfs())
print('Group-{}: {}'.format(node.v, cty_list[-1]))
# Other distinct countury
for i in range(n):
if i not in ndict:
cty_list.append(set([i]))
print('Total {} unique countries...{}'.format(len(cty_list), cty_list))
# Calculate unique pairs
if len(cty_list) == 1:
return 0
elif len(cty_list) == 2:
return len(cty_list[0]) * len(cty_list[1])
else:
cty_len_list = map(len, cty_list)
psum = cty_len_list[0] * cty_len_list[1]
nsum = cty_len_list[0] + cty_len_list[1]
for i in range(2, len(cty_len_list)):
psum += nsum * cty_len_list[i]
nsum += cty_len_list[i]
return psum
#print("{}".format(journeyToMoon(5, [(0, 1), (2, 3), (0, 4)])))
#print("{}".format(journeyToMoon(4, [(0, 2)])))
import unittest
class FAT(unittest.TestCase):
def setUp(self):
pass
def test_01(self):
tdatas = [
(5, [(0, 1), (2, 3), (0, 4)], 6),
(4, [(0, 2)], 5)
]
for n, astronaut, a in tdatas:
r = journeyToMoon(n, astronaut)
self.assertEqual(a, r, 'Expect={}; Real={}'.format(a, r))
def test_02(self):
tid = [1]
tdatas = []
for id in tid:
with open('journey-to-the-moon.t{}'.format(id), 'r') as fh:
na, pn = fh.readline().strip().split(' ')
astronaut = []
for i in range(int(pn)):
astronaut.append(map(int, fh.readline().split(' ')))
with open('journey-to-the-moon.a{}'.format(id), 'r') as fh2:
tdatas.append((int(na), astronaut, int(fh2.readline())))
for n, astronaut, a in tdatas:
r = journeyToMoon(n, astronaut)
self.assertEqual(a, r, 'Expect={}; Real={}\n{}'.format(a, r, astronaut))
```
#### File: greedy/easy/luck_balance.py
```python
import math
import os
import random
import re
import sys
r'''
https://www.hackerrank.com/challenges/luck-balance/problem
'''
# Complete the luckBalance function below.
def luckBalance(n, k, contests):
luck_sum = 0
important_game_luck_list = []
for l, t in contests:
if t == 0:
luck_sum += l
else:
important_game_luck_list.append(l)
# Sorting in descending order
important_game_luck_list = sorted(important_game_luck_list, reverse=True)
# Add up all luck in losing game
for i in range(min(k, len(important_game_luck_list))):
luck_sum += important_game_luck_list[i]
# Minus back all luck in winning game
if k < len(important_game_luck_list):
for i in important_game_luck_list[k:]:
luck_sum -= i
return luck_sum
import unittest
class FAT(unittest.TestCase):
def setUp(self):
pass
def test_01(self):
self.assertEqual(luckBalance(3, 2, [(5, 1), (1, 1), (4, 0)]), 10)
self.assertEqual(luckBalance(3, 1, [(5, 1), (1, 1), (4, 0)]), 8)
def test_02(self):
real = luckBalance(6, 3, [(5, 1), (2, 1), (1, 1), (8, 1), (10, 0), (5, 0)])
self.assertEqual(real, 29, 'Expect:29; Real:{}'.format(real))
```
#### File: greedy/easy/marcs-cakewalk.py
```python
r'''
https://www.hackerrank.com/challenges/marcs-cakewalk/problem
'''
import math
import os
import random
import re
import sys
# Complete the marcsCakewalk function below.
def marcsCakewalk(calorie):
sorted_calorie = sorted(calorie, reverse=True)
sum_miles = 0
for i in range(len(sorted_calorie)):
sum_miles += math.pow(2, i) * sorted_calorie[i]
return int(sum_miles)
#print("{}".format(marcsCakewalk([1, 3, 2])))
#print("{}".format(marcsCakewalk([5, 10, 7])))
import unittest
class FAT(unittest.TestCase):
def setUp(self):
pass
def test_01(self):
tdatas = [
([1, 3, 2], 11),
([5, 10, 7], 44)
]
for calorie, a in tdatas:
r = marcsCakewalk(calorie)
self.assertEqual(a, r, 'Expect={}; Real={}'.format(a, r))
```
#### File: greedy/easy/maximum-perimeter-triangle.py
```python
r'''
https://www.hackerrank.com/challenges/maximum-perimeter-triangle/problem
'''
import math
import os
import random
import re
import sys
# Complete the maximumPerimeterTriangle function below.
def maximumPerimeterTriangle(sticks):
sideSet = set()
sideDict = {}
mp = 0
mp_sides = []
# Collect data and check Equilateral
for s in sticks:
sideSet.add(s)
if s in sideDict:
if sideDict[s] + 1 >= 3 and s * 3 > mp:
mp = s * 3
mp_sides = [s, s, s]
else:
sideDict[s] = sideDict[s] + 1
else:
sideDict[s] = 1
sideSet = sorted(list(sideSet))
# Check Isosceles
for s, c in sideDict.items():
if c >= 2:
for i in range(sideSet.index(s), -1, -1):
if sideSet[i] == s:
continue
else:
c_mp = (s * 2 + sideSet[i])
if c_mp > mp:
mp = c_mp
mp_sides = [ sideSet[i], s, s]
break
# Check Scalene
sides = recvMP(sideSet, [], len(sideSet) - 1, mp)
if sides:
sides = sorted(sides)
mp = sum(sides)
mp_sides = sides
#mp_sides = map(str, mp_sides)
if mp > 0:
return mp_sides
else:
return [-1]
def recvMP(sideSet, sides, i, mp):
if i < 0:
return None
else:
if len(sides) == 2:
for j in range(i, -1, -1):
if abs(sides[1] - sides[0]) >= sideSet[j]:
return None
else:
if sum(sides) + sideSet[j] > mp:
sides.append(sideSet[j])
return sides
else:
return None
return None
else:
for j in range(i, 0, -1):
nsides = []
nsides.extend(sides)
nsides.append(sideSet[j])
usides = recvMP(sideSet, nsides, j - 1, mp)
if usides:
return usides
return None
print("{}".format(maximumPerimeterTriangle([1, 1, 1, 3, 3])))
print("{}".format(maximumPerimeterTriangle([1, 2, 3])))
print("{}".format(maximumPerimeterTriangle([1, 1, 1, 2, 3, 5])))
print("{}".format(maximumPerimeterTriangle([1, 1, 1, 3, 4, 5])))
```
#### File: implementation/medium/climbing-the-leaderboard.py
```python
r'''
https://www.hackerrank.com/challenges/climbing-the-leaderboard/problem
'''
import math
import os
import random
import re
import sys
# Complete the climbingLeaderboard function below.
def climbingLeaderboard_v1(scores, alice):
results = []
pv = scores[0]
ranks = [pv]
for i in range(1, len(scores)):
cv = scores[i]
if cv != pv:
ranks.append(cv)
pv = cv
pi = len(ranks)
for s in alice:
if s < ranks[-1]:
results.append(len(ranks) + 1)
elif s >= ranks[0]:
results.append(1)
else:
for i in range(1, len(ranks)):
rs = ranks[i]
if rs == s:
results.append(i+1)
pi = i
break
elif rs > s:
continue
else: # rs < s
results.append(i+1)
break
return tuple(results)
def climbingLeaderboard(scores, alice):
print("scores={}".format(scores))
print("alice={}".format(alice))
results = []
# Look for start
pi = 1
ii = None
pv = scores[0]
for i in xrange(1, len(scores)):
cv = scores[i]
if cv > alice[0]:
if pv != cv:
pv = cv
pi += 1
continue
elif cv <= alice[0]:
if pv != cv:
pv = cv
pi += 1
ii = i
break
if ii is None:
ii = len(scores) - 1
if alice[0] < scores[-1]:
pi += 1
results.append(pi)
print('{} @ rank {}...(i={},{})'.format(alice[0], pi, ii, scores[ii]))
ai = 1
while pi > 1 and ai < len(alice):
for i in xrange(ii, -1, -1):
cv = scores[i]
if cv < alice[ai]:
if pv != cv:
pv = cv
pi -= 1
continue
elif cv >= alice[ai]:
if pv != cv:
pv = cv
pi -= 1
print('{} @ rank {}...(i={},{})'.format(alice[ai], pi, i, cv))
ii = i
ai += 1
results.append(pi)
break
print("i={}; pi={}".format(i, pi))
if i == 0 and alice[ai] >= scores[0]:
results.append(1)
break
return results
print("{}".format(climbingLeaderboard([100, 50, 40, 10, 5], [1, 20, 90, 90, 110])))
import unittest
class FAT(unittest.TestCase):
def setUp(self):
pass
def test_01(self):
tdatas = [
([100, 100, 50, 40, 40, 20, 10], [5, 25, 50, 120], (6, 4, 2, 1)),
([100, 50, 40, 10, 5], [1, 20, 90, 110], (6, 4, 2, 1)),
([100, 50, 40, 10, 5], [1, 20, 90, 90, 110], (6, 4, 2, 2, 1))
]
for scores, alice, a in tdatas:
r = climbingLeaderboard(scores, alice)
self.assertEqual(a, r, "Expr={}; Real={}".format(a, r))
```
#### File: implementation/medium/magic-square-forming.py
```python
import math
import os
import random
import re
import sys
from pprint import pprint
magic_squares = [[[2, 7, 6], [9, 5, 1], [4, 3, 8]],
[[2, 9, 4], [7, 5, 3], [6, 1, 8]],
[[4, 3, 8], [9, 5, 1], [2, 7, 6]],
[[4, 9, 2], [3, 5, 7], [8, 1, 6]],
[[6, 1, 8], [7, 5, 3], [2, 9, 4]],
[[6, 7, 2], [1, 5, 9], [8, 3, 4]],
[[8, 1, 6], [3, 5, 7], [4, 9, 2]],
[[8, 3, 4], [1, 5, 9], [6, 7, 2]]]
# Complete the formingMagicSquare function below.
def formingMagicSquare(s):
mc = -1
for ms in magic_squares:
cs = cost(ms, s)
if mc < 0 or mc > cs:
mc = cs
return mc
def cost(ms, os):
c = 0
for i in range(3):
for j in range(3):
c += abs(ms[i][j] - os[i][j])
return c
def rsum(squares, ri):
return sum(squares[ri])
def csum(squares, ci):
return sum(map(lambda e: e[ci], squares))
def isMagic(squares):
if squares[0][0] + squares[1][1] + squares[2][2] !=15:
return False
if squares[0][2] + squares[1][1] + squares[2][0] !=15:
return False
for i in range(3):
if rsum(squares, i) != 15 or csum(squares, i) != 15:
return False
return True
def l2s(alist):
squares = []
row = []
for i in range(9):
row.append(alist[i])
if (i + 1) % 3 == 0:
squares.append(row)
row = []
return squares
def coltMS():
r'''
Generate magic square collection
'''
magic_squares = []
all_squares_as_list = []
perm([], all_squares_as_list)
for alist in all_squares_as_list:
square = l2s(alist)
#pprint(square)
if isMagic(square):
magic_squares.append(square)
return magic_squares
def perm(clist, tlist):
if len(clist) == 9:
tlist.append(clist)
else:
for i in range(1, 10):
if i not in clist:
nlist = []
nlist.extend(clist)
nlist.append(i)
perm(nlist, tlist)
#magic_squares = coltMS()
#print('Total {} magic squares being collected:'.format(len(magic_squares)))
#for msquare in magic_squares:
# pprint(msquare)
data = [[5,3,4],[1,5,8],[6,4,2]]
print("{}".format(formingMagicSquare(data)))
```
#### File: hackerrank/others/all_subset_of_a_set.py
```python
import sys
r'''
https://www.youtube.com/watch?v=bGC2fNALbNU
Reference
* http://localhost/jforum/posts/list/2119.page
* https://www.ptt.cc/bbs/Eng-Class/M.1334215115.A.75F.html (數學的英文唸法)
'''
def find_all_subset(a_set):
results = [[], a_set]
for i in range(1, len(a_set)):
results.extend(n_pick_m(a_set, i))
return results
def n_pick_m(a_set, m):
r'''
n pick m will have n! / (m)!*(n-m)!
@param a_Set(list):
Target set with size N
@param m(int):
The number of M to compose the sub set
@return:
A list to contain all possible subset
@see http://localhost/jforum/posts/list/2119.page
'''
all_sub_set_list = []
#n_pick_m_recv(a_set, 0, m, [], all_sub_set_list)
n_pick_m_iter(a_set, m, all_sub_set_list)
return all_sub_set_list
class NMIter:
def __init__(self, id_list, n):
self.id_list = id_list
self.n = n
self.m = len(id_list)
self.is_done = False
self.jp = self.m - 2
def __iter__(self):
return self
def __next__(self):
if self.is_done:
raise StopIteration()
else:
m = self.m
n = self.n
tv = self.id_list[:]
if self.id_list[-1] + 1 < self.n:
self.id_list[-1] += 1
else:
self.is_done = True
cjp = self.jp
while cjp >= 0:
if self.id_list[cjp] + 1 + (self.m - 1 - cjp) < n:
self.jp = cjp
self.is_done = False
self.id_list[cjp] += 1
npv = self.id_list[cjp] + 1
for k in range(cjp+1, self.m):
self.id_list[k] = npv
npv += 1
break
cjp -= 1
return tv
def n_pick_m_iter(a_set, m, ct):
r'''
Iterative version of N pick M
@param a_set(list):
Set with size as N
@param m(int):
The number of M to compose the sub set
@param ct(list):
Collection to hold all subset
'''
if m == 1:
for e in a_set:
ct.append([e])
elif m == len(a_set):
ct.append(a_set)
elif m < len(a_set):
id_list = []
for i in range(m):
id_list.append(i)
jp = m - 2
nm_iter = NMIter(id_list, len(a_set))
for iv in nm_iter:
ct.append([a_set[i] for i in iv])
else:
#raise Exception('M is greater than N!')
return
def n_pick_m_recv(a_set, dep, rm, ss, ct):
r'''
Recursive version of N pick M
@param a_set(list):
Set with size as N
@param dep(int):
Depth of process
@param rm(int):
Remain element from target M
@param ss(list):
Current sub set
@param ct(list):
Collection to hold all subset
'''
if rm == 0:
ct.append(ss)
return
elif dep == len(a_set):
return
else:
# Skip current depth
n_ss = ss[:]
n_pick_m_recv(a_set, dep+1, rm, n_ss, ct)
# Pickup element at current depth
n_ss = ss[:]
n_ss.append(a_set[dep])
n_pick_m_recv(a_set, dep+1, rm-1, n_ss, ct)
def main():
a_set = sys.argv[1:]
print("Target set={}".format(a_set))
results = find_all_subset(a_set)
print("Target set={} has {:,d} subset:".format(a_set, len(results)))
for ss in sorted(results, key=lambda e:len(e)):
print("{{{}}}".format(','.join(ss)))
print("")
if __name__ == '__main__':
main()
```
#### File: search/easy/sherlock_and_array.py
```python
import sys
r'''
https://www.hackerrank.com/challenges/sherlock-and-array/problem
'''
def solve(a):
# Complete this function
return rsolve(a)
def rsolve(a):
if len(a) == 1:
return 'YES'
elif len(a) == 2:
return 'NO'
elif len(a) == 3:
return 'YES' if a[0] == a[2] else 'NO'
else:
return _rsolve(a, len(a)/2, 0, len(a) - 1)
def _rsolve(a, p, s, e):
lsum = sum(a[:p])
rsum = sum(a[p+1:])
if lsum == rsum:
return 'YES'
elif lsum > rsum:
# Move left
e = p - 1
if e < s:
return 'NO'
m = (e + s) / 2
return _rsolve(a, m, s, e)
else:
# Move right
s = p + 1
if s > e:
return 'NO'
m = (s + e) / 2
return _rsolve(a, m, s, e)
print(solve([1, 2, 3]))
print(solve([1, 2, 3, 3]))
```
#### File: strings/easy/two-strings.py
```python
r'''
https://www.hackerrank.com/challenges/two-strings/problem
'''
import math
import os
import random
import re
import sys
# Complete the twoStrings function below.
def twoStrings_v1(s1, s2):
if len(s1) == 0 or len(s2) == 0:
return 'NO'
cset= set()
for c in s1:
cset.add(c)
for c in s2:
cset.add(c)
return 'YES' if len(s1) + len(s2) != len(cset) else 'NO'
def twoStrings(s1, s2):
if len(s1) >= len(s2):
ls = s1
ss = s2
else:
ls = s2
ss = s1
cset = set()
for c in ls:
cset.add(c)
for c in ss:
if c in cset:
return 'YES'
return 'NO'
import unittest
class FAT(unittest.TestCase):
def setUp(self):
pass
def test_01(self):
tdatas = [
('hello', 'world', 'YES'),
('hi', 'world', 'NO')
]
for s1, s2, a in tdatas:
r = twoStrings(s1, s2)
self.assertEqual(a, r, 'Exp={}; Real={}'.format(a, r))
```
#### File: strings/medium/richie-rich.py
```python
r'''
https://www.hackerrank.com/challenges/richie-rich/problem
'''
import math
import os
import random
import re
import sys
# Complete the highestValuePalindrome function below.
def highestValuePalindrome(s, n, k):
pass
```
#### File: algprac/others/knapsack.py
```python
r'''
https://www.youtube.com/watch?v=xOlhR_2QCXY&index=68&list=WL&t=0s
'''
kk_sol_mem = {} # key as tuple(item index, rest weight); value as solution
def kk_sol(wlist, vlist, mw):
r'''
Solution to Knapsack problem
@param wlist(list):
List to hold weight of items
@param vlist(list):
List to hold value of items
@param mw(int):
Maximum weight the knapsack can stand
@return
List of item to hold with maximum value sum
'''
global kk_sol_mem
kk_sol_mem.clear()
return recv_kk_sol(0, mw, wlist, vlist, [])
def calc_values(slist, vlist):
r'''
Calculate the value sum of selsected item(s)
@param slist(list):
List of selection for each item
@param vlist(list):
List to hold value of items
@return
The sumed value of selected item(s)
'''
vsum = 0
for i in range(len(slist)):
if slist[i] > 0 :
vsum += vlist[i]
return vsum
def recv_kk_sol(rn, rw, wlist, vlist, slist):
r'''
@param rn(int):
Current item index
@param rw(int):
Rest weight the knapsack can stand
@param slist(list):
Selection item list
'''
isol = kk_sol_mem.get((rn, rw), None)
lb_sol = None
if isol is not None:
return isol
elif rn >= len(wlist) or rw == 0:
return slist
elif wlist[rn] > rw:
n_slist = slist[:]
n_slist.append(0)
lb_sol = recv_kk_sol(rn+1, rw, wlist, vlist, n_slist)
else:
n_slist = slist[:]
n_slist.append(1)
t1_sol = recv_kk_sol(rn+1, rw - wlist[rn], wlist, vlist, n_slist)
n_slist = slist[:]
n_slist.append(0)
t2_sol = recv_kk_sol(rn+1, rw, wlist, vlist, n_slist)
if calc_values(t1_sol, vlist) >= calc_values(t2_sol, vlist):
lb_sol = t1_sol
else:
lb_sol = t2_sol
kk_sol_mem[(rn, rw)] = lb_sol
return lb_sol
import unittest
class FAT(unittest.TestCase):
def test_d1(self):
wlist = [1, 2, 4, 2, 5]
vlist = [5, 3, 5, 3, 2]
mw = 10
slist = kk_sol(wlist, vlist, mw)
elist = [1, 1, 1, 1, 0]
print("Selection list={}".format(''.join(map(str, slist))))
self.assertEqual(''.join(map(str, elist)), ''.join(map(str, slist)), "Something wrong")
def test_d2(self):
wlist = [1, 2, 4, 2, 5, 3, 2]
vlist = [5, 3, 5, 3, 2, 10, 5]
mw = 10
slist = kk_sol(wlist, vlist, mw)
elist = [1, 1, 0, 1, 0, 1, 1]
print("Selection list={} with maximum value={:,d}".format(''.join(map(str, slist)), calc_values(slist, vlist)))
self.assertEqual(''.join(map(str, elist)), ''.join(map(str, slist)), "Something wrong")
``` |
{
"source": "johnklee/ff_crawler",
"score": 2
} |
#### File: Issue2/purifier/text_extractor.py
```python
import sys
import importlib
import os
import inspect
from importlib import util as importlib_util
from .logb import getLogger
# from .pdf2text import simple_fact as pdf_sfact
from .html2text import simple_fact as html_sfact
from .plain2text import simple_fact as pln_sfact
################################
# Constants
################################
MODU_PATH = os.path.dirname(__file__) if os.path.dirname(__file__) else './'
''' Path of current module '''
################################
# Class Definition
################################
class TEAgent:
ERR_MSG_MTYPE_NOT_SUPPORT = 'Content type={mtype} is not supported yet!'
''' Error message for unsupported MIME'''
DEFAULT_RST = {'title': '', 'text': '', 'te_suc': False}
def __init__(self, ext_title=False, disable_policy=False, policy_path=None):
r'''
Constructor
:param ext_title: True to extract title; False otherwise
:param disable_policy: True to disable loading policy
'''
self.logger = getLogger(os.path.basename(__file__))
self.handlers = {
'text/html': html_sfact(ext_title=ext_title),
# 'application/pdf': pdf_sfact(ext_title=ext_title),
'text/plain': pln_sfact(ext_title=ext_title)
} # key as Media type; value as corresponding handler
if not disable_policy:
if policy_path is None:
policy_path = os.path.join(os.path.abspath(MODU_PATH), 'policy')
self.load_policy(policy_path)
def load_policy(self, policy_path, namespace=None, target_policy_names=None):
r'''
Loading policy stored in a given folder
:param policy_path: Path of folder to store policy file
:param namespace: Namespace used to control the import path
:param target_policy_names: If given, only the policy module name exist in here will be loaded.
:return:
Number of policy file being loaded
'''
if os.path.isdir(policy_path):
pc = 0
for pf in list(filter(lambda f: f.endswith('.py') and f.startswith('policy'), os.listdir(policy_path))):
if target_policy_names and pf.split('.')[0] not in target_policy_names:
self.logger.warning('Ignore {}!'.format(pf))
continue
self.logger.debug('Loading {}...'.format(pf))
try:
module_name = 'purifier.policy{}.{}'.format('' if namespace is None else ".{}".format(namespace), pf.split('.')[0])
spec = importlib_util.spec_from_file_location(module_name, os.path.join(policy_path, pf))
module = importlib_util.module_from_spec(spec)
spec.loader.exec_module(module)
for po, pn in list(filter(lambda t: callable(t[0]) and not inspect.isclass(t[0]), list(map(lambda n: (getattr(module, n), n), dir(module))))):
if hasattr(po, 'url_ptn'):
self.logger.debug('\tRegister {}'.format(po.url_ptn))
po.module_name = module_name
po.policy_name = pn
self.handlers[po.mime].regr(po.url_ptn, po)
pc += 1
except:
self.logger.exception('Fail to load policy from {}!'.format(pf))
return pc
else:
self.logger.warn('Policy folder={} does not exist!'.format(policy_path))
return -1
def parse(self, mtype, url, content, do_ext_link=False):
r'''
Parse the given content to do text extraction
:param mtype: Content type in string. e.g.: 'text/html'.
:param url: The source URL
:param content: The corresponding content.
:param do_ext_link: True to extract URL link from content (default:False)
:return
tuple(is_success, extraction result, reason)
'''
try:
mtype = mtype.split(';')[0].strip()
handler = self.handlers.get(mtype, None)
if handler:
try:
extract_rst = handler(url, content, do_ext_link)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
return (False, TEAgent.DEFAULT_RST, {'reason': handler.reason(), 'err': "{}: {}".format(exc_type, exc_value)})
if isinstance(extract_rst, dict) and 'title' not in extract_rst:
extract_rst['title'] = ''
if (isinstance(extract_rst, dict) and extract_rst.get('te_suc', True)) or (isinstance(extract_rst, str) and extract_rst):
return (True, extract_rst, {'reason': handler.reason()})
else:
return (False, extract_rst, {'reason': handler.reason(), 'err': 'Empty TE' if not handler.err_msg else handler.err_msg})
else:
self.logger.info("Use default agent...")
return (False, TEAgent.DEFAULT_RST, {'reason': '?', 'err': TEAgent.ERR_MSG_MTYPE_NOT_SUPPORT.format(mtype=mtype, url=url)})
except:
self.logger.exception('Fail to parse content from URL={}!'.format(url))
exc_type, exc_value, exc_traceback = sys.exc_info()
return (False, TEAgent.DEFAULT_RST, {'reason': '?', 'err': "{}: {}".format(exc_type, exc_value)})
```
#### File: research/Issue2/utils.py
```python
import requests as reqlib
import os
import re
import random
import time
import pickle
import abc
import hashlib
import threading
from urllib.parse import urlparse
from purifier import TEAgent
from purifier.logb import getLogger
from enum import IntEnum
from typing import Tuple, List, Dict, Optional
class ScraperTimeout(Exception):
def __init__(self, ex):
self.ex = ex
def __str__(self):
return f"Timeout: {self.ex}"
class ScraperNot200(Exception):
def __init__(self, sc):
self.sc = sc
def __str__(self):
return f"Unexpected Status Code={self.sc}!"
class UnsupportedMIME(Exception):
def __init__(self, mime):
self.mime = mime
def __str__(self):
return f"Unsupported MIME={self.mime}!"
class Scraper(metaclass=abc.ABCMeta):
@abc.abstractmethod
def get(self, url):
pass
class ReqScraper(object):
def __init__(self,
page_cache_path="page_caches",
headers={'User-Agent': 'Mozilla/5.0'},
skip_cache=False,
supported_mime_set={"text/html"}):
self.page_cache_path = page_cache_path
if not os.path.isdir(self.page_cache_path):
os.makedirs(self.page_cache_path)
self.headers = headers
self.logger = getLogger(os.path.basename(self.__class__.__name__))
self.skip_cache = skip_cache
self.supported_mime_set = supported_mime_set
def _get_cache_path(self, url):
test_url_host = urlparse(url).netloc
url_md5 = hashlib.md5(url.encode('utf-8')).hexdigest()
cache_file_name = f"{test_url_host}_{url_md5}.txt"
cache_file_path = os.path.join(self.page_cache_path, cache_file_name)
return cache_file_path
def _del_from_cache(self, url):
cache_file_path = self._get_cache_path(url)
if os.path.isfile(cache_file_path):
self.logger.warning("Removing cache file={cache_file_path}...")
os.remove(cache_file_path)
def _get_from_cache(self, url):
cache_file_path = self._get_cache_path(url)
if os.path.isfile(cache_file_path):
self.logger.debug(f"Return content of {url} from cache...")
with open(cache_file_path, 'r', encoding='utf8') as fo:
return fo.read()
return None
def _save2cache(self, url, html_content):
cache_file_path = self._get_cache_path(url)
with open(cache_file_path, 'w', encoding='utf8') as fw:
fw.write(html_content)
def get(self, url):
if not self.skip_cache:
cache_text = self._get_from_cache(url)
if cache_text is not None:
return cache_text
self.logger.debug(f"Crawling {url}...")
try:
resp = reqlib.get(url, headers=self.headers, timeout=(5, 10))
if resp.ok:
mime = resp.headers['content-type'].split(';')[0].strip()
self.logger.debug(f"URL={url} with MIME={mime}...")
if mime.lower() not in self.supported_mime_set:
raise UnsupportedMIME(mime)
self._save2cache(url, resp.text)
return resp.text
else:
raise ScraperNot200(resp.status_code)
except Exception as e:
raise ScraperTimeout(e)
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class ThreadState(IntEnum):
STOPPED = 0
RUNNING = 1
STOPPING = 2
class CrawlAgent(object):
def __init__(self, name, throttling_range=(1, 2)):
self.rs = ReqScraper(page_cache_path=f"{name}_cache")
self.et = TEAgent(
policy_path="policy",
disable_policy=True,
ext_title=True
)
self.logger = getLogger(os.path.basename(self.__class__.__name__))
self.throttling_range = throttling_range
def obsolete_cache(self, url):
self.rs._del_from_cache(url)
def handle(self, url:str, skip_throttling:bool=False) -> Tuple[str, str, List[str]]:
try:
if skip_throttling:
wait_in_sec = random.uniform(*self.throttling_range)
self.logger.debug(f"throttling wait {wait_in_sec}s...")
time.sleep(wait_in_sec)
url_content_html = self.rs.get(url)
is_succ, rst, handler = self.et.parse(
"text/html",
url,
url_content_html,
do_ext_link=True
)
if is_succ:
return (rst['title'], rst['text'], rst['all_links'])
else:
return (rst['title'], rst['text'], rst['all_links'])
except ScraperNot200 as e:
self.logger.warning(f"Fail to handle URL={url}: {str(e)}")
return None, None, None
except UnsupportedMIME as e:
self.logger.warning(f"Fail to handle URL={url}: {str(e)}")
return None, None, None
except ScraperTimeout as e:
time.sleep(2)
self.logger.warning(f"Fail to handle URL={url}: {str(e)}")
return None, None, None
class ExplorerWorker(threading.Thread):
def __init__(
self,
name:str,
url_ptn:str,
src_url:str,
test_run:int=-1,
page_saved_dir:Optional[str]=None):
super(ExplorerWorker, self ).__init__(name = name)
self.name = name
self.url_ptn = url_ptn
self.src_url = src_url
self.test_run = test_run
self.ca = CrawlAgent(name)
self.pc_dict = self._get_pc_dict()
''' Processed result cache: Key as URL; value as bool (True means this URL is crawled successfully)'''
self.state = ThreadState.STOPPED
''' Thread state: 0-> stopped; 1-> running; 2-> stopping'''
self.logger = getLogger(os.path.basename(self.__class__.__name__))
''' Logger object '''
self.page_saved_dir = page_saved_dir if page_saved_dir is not None else f"{self.name}_pages_output"
''' Path or directory to save dump page'''
self.stop_signal = f"STOP_{self.name}"
''' Stop signal file '''
if not os.path.isdir(self.page_saved_dir):
os.makedirs(self.page_saved_dir)
def _get_output_page_path(self, url):
url_host = urlparse(url).netloc
url_md5 = hashlib.md5(url.encode('utf-8')).hexdigest()
page_file_name = f"{url_host}_{url_md5}.txt"
page_file_path = os.path.join(self.page_saved_dir, page_file_name)
return page_file_path
def _get_pc_serialized_file(self) -> str:
return f"{self.name}_pc_dict.pkl"
def _get_pc_dict(self) -> Dict[str, bool]:
pkl_file = self._get_pc_serialized_file()
if os.path.isfile(pkl_file):
with open(pkl_file, 'rb') as fo:
return pickle.load(fo)
else:
return {}
def _serialized(self):
pkl_file = self._get_pc_serialized_file()
with open(pkl_file, 'wb') as fo:
pickle.dump(self.pc_dict, fo)
def run(self):
self.state = ThreadState.RUNNING
url_queue = [self.src_url]
pc = sc = fc = oc = 0
while self.state == ThreadState.RUNNING and url_queue:
if os.path.isfile(self.stop_signal):
os.remove(self.stop_signal)
self.logger.warning("Receive STOP signal!")
break
url = url_queue.pop(0)
pc += 1
if url not in self.pc_dict:
# New URL
self.logger.debug(f"Handling URL={url}...")
title, content, collected_urls = self.ca.handle(url)
if content is None:
self.pc_dict[url] = False
fc += 1
else:
if url != self.src_url:
self.pc_dict[url] = True
sc += 1
self.logger.info(bcolors.BOLD + f"Completed URL={url} ({len(url_queue):,d}/{pc:,d})" + bcolors.ENDC)
next_level_urls = list(filter(lambda u: re.match(self.url_ptn, u) is not None and "#" not in u, collected_urls))
if next_level_urls:
self.logger.debug(f"\tCollected {len(next_level_urls)} next level URL(s)")
url_queue.extend(list(set(next_level_urls) - set(url_queue)))
if content and "?" not in url:
page_output_path = self._get_output_page_path(url)
with open(page_output_path, 'w', encoding='utf8') as fw:
fw.write(f"{url}\n\n")
fw.write(f"{title}\n\n")
fw.write(f"{content}")
self.logger.debug(f"\tSaved page to {page_output_path}!")
else:
# Old URL
if not self.pc_dict[url]:
self.logger.info(f"Skip broken URL={url} in the past...")
continue
title, content, collected_urls = self.ca.handle(url, skip_throttling=True)
if collected_urls:
next_level_urls = list(filter(lambda u: re.match(self.url_ptn, u) is not None, collected_urls))
url_queue.extend(list(set(next_level_urls) - set(url_queue)))
oc += 1
self.logger.info(f"URL={url} is already handled...({len(url_queue):,d}/{pc:,d})")
continue
if self.test_run > 0:
if (sc + fc) > self.test_run:
self.logger.info(f"Exceed test_run={self.test_run} and therefore stop running...")
break
if pc % 1000 == 0:
self.logger.info(bcolors.OKBLUE + bcolors.BOLD + f"{pc} URL completed: sc={sc:,d}; fc={fc:,d}; oc={oc:,d}\n" + bcolors.ENDC)
self._serialized()
self.ca.obsolete_cache(self.src_url)
url_queue.append(self.src_url)
self.logger.warning(f"Serialized explorer result (name={self.name})...")
self._serialized()
self.logger.warning(f"Explorer is stopped! (name={self.name})...")
self.state = ThreadState.STOPPED
def stop(self):
self.logger.warning(f"Stopping explorer worker (name={self.name})...")
if self.state == ThreadState.RUNNING:
self.state = ThreadState.STOPPING
while self.state != ThreadState.STOPPED:
time.sleep(1)
``` |
{
"source": "johnklee/fpu",
"score": 3
} |
#### File: fpu/examples/example1.py
```python
import sys
import re
sys.path.insert(0, '../') # noqa
from fp import * # noqa
print('===== fpu package example =====\n')
# Imperative programming
class EmailValidation:
emailPtn = re.compile("^[a-z0-9._%+-]+@[a-z0-9.-]+\\.[a-z]{2,4}$")
def __init__(self):
pass
def logError(self, msg):
print('Error message logged: {}'.format(msg))
def sendVerificationMail(self, mailAddr):
print('Mail sent to {}'.format(mailAddr))
@staticmethod
def validate(mailAddr):
if mailAddr is None:
return Result.failure("Email must not be null")
elif len(mailAddr) == 0:
return Result.failure("Email must not be empty")
elif EmailValidation.emailPtn.match(mailAddr):
return Result.success()
else:
return Result.failure("Email {} is invalid".format(mailAddr))
def execute(self, mailAddr):
rst = self.validate(mailAddr)
if isinstance(rst, Success):
self.sendVerificationMail(mailAddr)
else:
self.logError(rst.message)
emailPtn = re.compile("^[a-z0-9._%+-]+@[a-z0-9.-]+\\.[a-z]{2,4}$")
# FP style implementation
class EmailValidationFP:
def __init__(self):
self.succEft = self.sendVerificationMail
self.failEft = self.logError
def logError(self, msg):
print('Error message logged: {}'.format(msg))
def sendVerificationMail(self, mailAddr):
print('Mail sent to {}'.format(mailAddr))
def validate(self, mailAddr):
return Case.match(Case.default(Result.success(mailAddr)),
Case.mcase(Supplier(lambda s: s is None, mailAddr), Result.failure('Email is None')),
Case.mcase(Supplier(lambda s: len(s) == 0, mailAddr), Result.failure('Email is empty')),
Case.mcase(Supplier(lambda s: not emailPtn.match(s), mailAddr), Result.failure('Email {} is invalid'.format(mailAddr))))
def execute(self, mailAddr):
self.validate(mailAddr).bind(self.sendVerificationMail, self.logError)
emailVal1 = EmailValidation()
emailVal2 = EmailValidationFP()
emailVal2.execute("<EMAIL>")
emailVal2.execute(None)
emailVal2.execute("")
emailVal2.execute("<EMAIL>")
```
#### File: tests/unit/test_flist.py
```python
import unittest
import sys
import os
import re
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../fpu'))) # noqa
from fp import * # noqa
from flist import * # noqa
#####################
# Testing Class
#####################
class GFTestCase(unittest.TestCase):
r'''
Test Case(s) of global function(s) from flist.py
'''
def setUp(self):
pass
def tearDown(self):
pass
def test_api_reduce(self):
r'''
Testing API List.reduce
'''
alist = fl(2, 4, 6, 8, 10)
self.assertEqual(30, alist.reduce(lambda a, b: a + b))
self.assertEqual(31, alist.reduce(lambda a, b: a + b, 1))
def test_api_foldLeft(self):
r'''
Testing API List.foldLeft
'''
alist = fl(1, 2, 3, 4, 5)
self.assertEqual('012345', alist.foldLeft(0, lambda a, e: "{}{}".format(a, e)))
self.assertEqual(15, alist.foldLeft(0, lambda a, e: a + e))
self.assertEqual(120.0, alist.foldLeft(1.0, lambda a, e: a * e))
def test_gapi_fl(self):
r'''
Testing global API:fl to create object of List
'''
alist = fl(1, 2, 3)
self.assertEqual('[1, 2, 3, NIL]', str(alist))
self.assertEqual(3, alist.size())
self.assertEqual(3, alist.length())
self.assertEqual(1, alist.head())
self.assertEqual('[2, 3, NIL]', str(alist.tail()))
def test_gapi_concat(self):
r'''
Testing global API:concat to concat two list
'''
list1 = fl(1, 2, 3)
list2 = fl(4, 5, 6)
list3 = concat(list1, list2)
self.assertEqual('[1, 2, 3, 4, 5, 6, NIL]', str(list3))
def test_api_exists(self):
r'''
Testing List.exists
'''
alist = fl(1, 2, 3)
self.assertTrue(alist.exists(lambda e: e == 1))
self.assertFalse(alist.exists(lambda e: e == 5))
def test_api_drop(self):
r'''
Testing List.drop
'''
alist = fl(1, 2, 3, 4, 5)
nlist = alist.drop(3)
self.assertEqual('[4, 5, NIL]', str(nlist))
def test_api_dropWhile(self):
r'''
Testing List.dropWhile
'''
alist = fl(range(10)).reverse() # [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, NIL]
nlist = alist.dropWhile(lambda e: e < 5)
self.assertEqual('[5, 6, 7, 8, 9, NIL]', str(nlist))
def test_api_init(self):
r'''
Testing List.init
'''
alist = fl(range(10)).reverse() # [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, NIL]
nlist = alist.init()
self.assertEqual('[0, 1, 2, 3, 4, 5, 6, 7, 8, NIL]', str(nlist))
def test_api_filter(self):
r'''
Testing List.filter
'''
alist = fl(range(10)).reverse() # [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, NIL]
nlist = alist.filter(lambda e: e % 2 == 0)
self.assertEqual('[0, 2, 4, 6, 8, NIL]', str(nlist))
def test_api_map(self):
r'''
Testing List.map
'''
alist = fl(1, 2, 3)
nlist = alist.map(lambda e: e * 2)
self.assertEqual('[2, 4, 6, NIL]', str(nlist))
def test_api_flatMap(self):
r'''
Testing List.flatMap
'''
alist = fl(1, 2, 3)
nlist = alist.flatMap(lambda e: fl(e, e * 2))
self.assertEqual('[1, 2, 2, 4, 3, 6, NIL]', str(nlist))
def test_uapi_fsum(self):
r'''
Testing utility function fsum
'''
alist = fl(range(10))
self.assertEqual(45, fsum(alist))
self.assertEqual(0, fsum(fl()))
def test_uapi_fproduct(self):
r'''
Testing utility function fproduct
'''
alist = fl(1, 2, 3)
self.assertEqual(6.0, fproduct(alist))
self.assertEqual(1.0, fproduct(fl()))
def test_uapi_flatten(self):
r'''
Testing utility function flatten
'''
alist = fl()
alist = alist.cons(fl(4, 5, 6))
alist = alist.cons(fl(1, 2, 3))
self.assertEqual('[[1, 2, 3, NIL], [4, 5, 6, NIL], NIL]', str(alist))
nlist = flatten(alist)
self.assertEqual('[1, 2, 3, 4, 5, 6, NIL]', str(nlist))
def test_feat_forin(self):
alist = fl(1, 2, 3)
i = 1
for e in alist:
self.assertEqual(i, e)
i += 1
``` |
{
"source": "johnklee/learn_dp_from_bad_smell_design",
"score": 3
} |
#### File: learn_dp_from_bad_smell_design/smarthome_migration/lamp_device.py
```python
import device_api
from log_utils import get_logger
from termcolor import colored, cprint
class LampControllerV1(device_api.DeviceInterface):
def __init__(self, name):
self.name = name
self.log = get_logger(self)
self.on_state = False
def is_on(self):
return self.on_state
def turn_on(self):
if not self.on_state:
self.log.info('\tTurn on lamp(%s)', self.name)
self.on_state = True
def turn_off(self):
if self.on_state:
self.log.info('\tTurn off lamp(%s)', self.name)
self.on_state = False
class LEDLightController:
def __init__(self, name):
self.name = name
self.log = get_logger(self)
self.on_state = False
def is_on(self):
return self.on_state
def on(self, color='green'):
if not self.on_state:
self.log.info('\tTurn on LED light(%s)', self.name)
cprint('LED Light blinking...', color, attrs = ['blink'])
self.on_state = True
def off(self):
if self.on_state:
self.log.info('\tTurn off LED light(%s)', self.name)
self.on_state = False
class LEDLightControllerV2:
def __init__(self, name):
self.name = name
self.log = get_logger(self)
self._state = device_api.PowerState.OFF
def is_on(self):
return self._state == device_api.PowerState.ON
def on(self, color='green'):
if not self.is_on():
self.log.info('\tTurn on LED light(%s)', self.name)
cprint('LED Light blinking...', color, attrs = ['blink'])
self._state = device_api.PowerState.ON
def off(self):
if self.is_on():
self.log.info('\tTurn off LED light(%s)', self.name)
self._state = device_api.PowerState.OFF
```
#### File: learn_dp_from_bad_smell_design/smart-pizza-store_factory-part2/taiwan_pizza_recipes.py
```python
import pizza_recipes
class TaiwanCheezePizza(pizza_recipes.Pizza):
def prepare(self):
print('Add Cheeze and bacon')
self.ingredients.append('blue cheeze')
self.ingredients.append('bacon')
def bake(self):
print('Bake in light fire for 1.5 hours')
def cut(self):
print('Cut in 8 pieces')
self.num_piece = 8
def box(self):
print('Use paper box')
self.box_material = 'paper'
class TaiwanGreekPizza(pizza_recipes.Pizza):
def prepare(self):
print('Add special spicies and beef')
self.ingredients.append('special spices')
self.ingredients.append('Taiwan beef')
def bake(self):
print('Bake in mild fire for 1 hours')
def cut(self):
print('Cut in 8 pieces')
self.num_piece = 8
def box(self):
print('Use paper box')
self.box_material = 'paper'
class TaiwanPepperoniPizza(pizza_recipes.Pizza):
def prepare(self):
print('Add pepperoni and cuttlefish')
self.ingredients.append('pepperoni')
self.ingredients.append('cuttlefish')
def bake(self):
print('Bake in strong fire for 0.8 hours')
def cut(self):
print('Cut in 8 pieces')
self.num_piece = 8
def box(self):
print('Use paper box')
self.box_material = 'paper'
class TaiwanVeggiePizza(pizza_recipes.Pizza):
def prepare(self):
print('Add pickle and crab')
self.ingredients.append('pickle')
self.ingredients.append('crab')
def bake(self):
print('Bake in light fire for 4 hours')
def cut(self):
print('Cut in 8 pieces')
self.num_piece = 8
def box(self):
print('Use paper box')
self.box_material = 'paper'
```
#### File: learn_dp_from_bad_smell_design/smart-pizza-store_factory-part2/test_taiwan_pizza_store.py
```python
import unittest
import taiwan_pizza_factory
import taiwan_pizza_recipes
import pizza_stores
import pytest
from parameterized import parameterized, parameterized_class
class TestPizzaStore(unittest.TestCase):
def setUp(self):
super().setUp()
self.factory = taiwan_pizza_factory.TaiwanFactory()
self.store = pizza_stores.PizzaStore(self.factory)
@parameterized.expand([
('cheeze', taiwan_pizza_recipes.TaiwanCheezePizza),
('greek', taiwan_pizza_recipes.TaiwanGreekPizza),
('pepper', taiwan_pizza_recipes.TaiwanPepperoniPizza),
('veg', taiwan_pizza_recipes.TaiwanVeggiePizza),
])
def test_create_pizza(self, pizza_name, pizza_clz):
pizza_object = self.store.order_pizza(pizza_name)
self.assertTrue(isinstance(pizza_object, pizza_clz))
def test_unknown_pizza(self):
with pytest.raises(taiwan_pizza_factory.UnknownPizzaNameError) as e_info:
self.store.order_pizza('unknown_pizza')
```
#### File: learn_dp_from_bad_smell_design/smart-pizza-store_factory-part3/pizza_recipes.py
```python
import abc
import pizza_ingredients_factory
class Pizza(abc.ABC):
def __init__(self, ifactory: pizza_ingredients_factory.PizzaIngredientFactory):
self.ifactory = ifactory
self.topping = None
self.dough = None
self.sauce = None
self.cheese = None
self.clams = None
self.num_piece = -1
self.box_material = None
@abc.abstractmethod
def prepare(self):
raise NotImplementedError
@abc.abstractmethod
def bake(self):
raise NotImplementedError
@abc.abstractmethod
def cut(self):
raise NotImplementedError
@abc.abstractmethod
def box(self):
raise NotImplementedError
class CheezePizza(Pizza):
def prepare(self):
self.cheese = self.ifactory.create_cheese()
self.dough = self.ifactory.create_dough()
self.sauce = self.ifactory.create_sauce()
def bake(self):
print('Bake in light fire for 2 hours')
def cut(self):
print('Cut in 6 pieces')
self.num_piece = 6
def box(self):
print('Use paper box')
self.box_material = 'paper'
class GreekPizza(Pizza):
def prepare(self):
print('Add special spicies and beef')
self.cheese = self.ifactory.create_cheese()
self.dough = self.ifactory.create_dough()
self.sauce = self.ifactory.create_sauce()
self.topping = self.ifactory.create_topping()
def bake(self):
print('Bake in mild fire for 1 hours')
def cut(self):
print('Cut in 8 pieces')
self.num_piece = 8
def box(self):
print('Use plate')
self.box_material = 'plate'
class PepperoniPizza(Pizza):
def prepare(self):
print('Add pepperoni and seafood')
self.cheese = self.ifactory.create_cheese()
self.dough = self.ifactory.create_dough()
self.topping = self.ifactory.create_pepperoni_topping()
self.clams = self.ifactory.create_clams()
def bake(self):
print('Bake in strong fire for 1 hours')
def cut(self):
print('Cut in 4 pieces')
self.num_piece = 4
def box(self):
print('Use plastic box')
self.box_material = 'plastic'
class VeggiePizza(Pizza):
def prepare(self):
print('Add veggie and bread')
self.cheese = self.ifactory.create_cheese()
self.dough = self.ifactory.create_dough()
self.topping = self.ifactory.create_veggie_topping()
def bake(self):
print('Bake in light fire for 3 hours')
def cut(self):
print('Cut in 8 pieces')
self.num_piece = 8
def box(self):
print('Use paper box')
self.box_material = 'paper'
```
#### File: learn_dp_from_bad_smell_design/smart-pizza-store_factory-part3/taiwan_pizza_ingredients_factory.py
```python
import pizza_recipes
import pizza_ingredients
import pizza_ingredients_factory
class TaiwanIngredientsFactory(pizza_ingredients_factory.PizzaIngredientFactory):
def create_topping(self) -> pizza_ingredients.Topping:
return pizza_ingredients.SpinachTopping()
def create_dough(self) -> pizza_ingredients.Dough:
return pizza_ingredients.ThinCrustDough()
def create_sauce(self) -> pizza_ingredients.Sauce:
return pizza_ingredients.MarinaraSauce()
def create_cheese(self) -> pizza_ingredients.Cheese:
return pizza_ingredients.ReggianCheese()
def create_clams(self) -> pizza_ingredients.Clams:
return pizza_ingredients.FreshClams()
def create_oil(self) -> pizza_ingredients.Oil:
return pizza_ingredients.OliveOil()
```
#### File: learn_dp_from_bad_smell_design/tea-or-coffee_template/starbuzz_coffee.py
```python
from typing import List
_STEP_BOIL_WATER = 'Boil some water'
_STEP_GRIND_COFFEE_BEAN = 'Grind coffee bean'
_STEP_MAKE_DRINK = 'Brew coffee in boiling water'
_STEP_PULL_DRINK = 'Pour drink in cup'
_STEP_ADD_CONDIMENT = 'Add sugar and milk'
class StarBuzzCoffee:
def make(self) -> List[str]:
action_list = []
action_list.append(_STEP_GRIND_COFFEE_BEAN)
action_list.append(_STEP_BOIL_WATER)
action_list.append(_STEP_MAKE_DRINK)
action_list.append(_STEP_PULL_DRINK)
action_list.append(_STEP_ADD_CONDIMENT)
return action_list
```
#### File: learn_dp_from_bad_smell_design/tea-or-coffee_template/starbuzz_tea.py
```python
from typing import List
_STEP_BOIL_WATER = 'Boil some water'
_STEP_MAKE_DRINK = 'Steep tea in boiling water'
_STEP_PULL_DRINK = 'Pour drink in cup'
_STEP_ADD_CONDIMENT = 'Add lemon'
class StarBuzzTea:
def make(self) -> List[str]:
action_list = []
action_list.append(_STEP_BOIL_WATER)
action_list.append(_STEP_MAKE_DRINK)
action_list.append(_STEP_PULL_DRINK)
action_list.append(_STEP_ADD_CONDIMENT)
return action_list
``` |
{
"source": "johnklee/oo_dp_lesson",
"score": 3
} |
#### File: project/tests/test_sum_benchmark.py
```python
from my_sum import sum
def test_sum_benchmark(benchmark):
hundred_one_list = [1] * 100
result = benchmark(sum, hundred_one_list)
assert result == 100
``` |
{
"source": "john-klingner/sense-project",
"score": 3
} |
#### File: john-klingner/sense-project/broadcastnet_rpi_test_code.py
```python
from secrets import secrets # pylint: disable=no-name-in-module
import time
import requests
from adafruit_ble.advertising.standard import ManufacturerDataField
from adafruit_ble.advertising import Advertisement
import adafruit_ble
import sensor_measurement
aio_auth_header = {"X-AIO-KEY": secrets["aio_key"]}
aio_base_url = "https://io.adafruit.com/api/v2/" + secrets["aio_username"]
def byte_string(s):
return "".join("%02x" % b for b in s)
ble = adafruit_ble.BLERadio()
bridge_address = sensor_measurement.device_address
print("This is BroadcastNet bridge:", bridge_address)
print("scanning")
print()
sequence_numbers = {}
# By providing Advertisement as well we include everything, not just specific advertisements.
print(sensor_measurement.SensorMeasurement.match_prefixes)
while True:
for measurement in ble.start_scan(
Advertisement, interval=0.5
):
print(byte_string(bytes(measurement)))
reversed_address = [measurement.address.address_bytes[i] for i in range(5, -1, -1)]
sensor_address = "{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}".format(*reversed_address)
# Skip if we are getting the same broadcast more than once.
group_key = "bridge-{}-sensor-{}".format(bridge_address, sensor_address)
data = [group_key]
for attribute in dir(measurement.__class__):
attribute_instance = getattr(measurement.__class__, attribute)
if issubclass(attribute_instance.__class__, ManufacturerDataField):
values = getattr(measurement, attribute)
if values is not None:
data.extend([values, attibute, attribute_instance])
start_time = time.monotonic()
print(data)
# Only update the previous sequence if we logged successfully.
duration = time.monotonic() - start_time
print("Done logging measurement to IO. Took {} seconds".format(duration))
print()
```
#### File: john-klingner/sense-project/broadcastnet_sense_code.py
```python
import time
import sensor_measurement
from sensors import Sensors
import name
def ToSensorMeasurement(sensors):
measurement = sensor_measurement.SensorMeasurement()
measurement.temperature = sensors.getTemperature()
measurement.pressure = sensors.getBaroPressure()
measurement.relative_humidity =sensors.getHumidityShort()
measurement.color = sensors.getColors()
measurement.boardId = name.kBoardId
return measurement
print("This is BroadcastNet sensor:", sensor_measurement.device_address)
sensors = Sensors()
while True:
measurement = ToSensorMeasurement(sensors)
print(measurement)
sensor_measurement.broadcast(measurement)
time.sleep(4)
``` |
{
"source": "johnkmj/openai_gym",
"score": 3
} |
#### File: rl/agent/q_table.py
```python
import numpy as np
from rl.agent.base_agent import Agent
class QTable(Agent):
'''
The simplest Q learner - a table,
with epsilon-greedy method and
Bellman equation for value.
'''
def __init__(self, env_spec,
resolution=10,
gamma=0.95, learning_rate=0.1,
init_e=1.0, final_e=0.1, exploration_anneal_episodes=1000,
**kwargs): # absorb generic param without breaking
super(QTable, self).__init__(env_spec)
self.resolution = resolution
self.gamma = gamma
self.learning_rate = learning_rate
self.init_e = init_e
self.final_e = final_e
self.e = self.init_e
self.exploration_anneal_episodes = exploration_anneal_episodes
self.build_table()
def build_table(self):
'''
init the 2D qtable by
bijecting the state space into pixelated, flattened vector
multiplied with
list of possible discrete actions
'''
self.pixelate_state_space(self.resolution)
flat_state_size = self.resolution ** self.env_spec['state_dim']
self.qtable = np.random.uniform(
low=-1, high=1,
size=(flat_state_size, self.env_spec['action_dim']))
return self.qtable
def pixelate_state_space(self, resolution=10):
'''chunk up the state space hypercube to specified resolution'''
state_bounds = self.env_spec['state_bounds']
self.state_pixels = [np.linspace(*sb, num=resolution+1)
for sb in state_bounds]
return self.state_pixels
def flatten_state(self, state):
'''
collapse a hyperdim state by binning into state_pixels
then flattening the pixel_state into 1-dim bijection
'''
val_space_pairs = list(zip(state, self.state_pixels))
pixel_state = [np.digitize(*val_space)
for val_space in val_space_pairs] # binning
flat_state = int("".join([str(ps) for ps in pixel_state]))
return flat_state
def select_action(self, state):
'''epsilon-greedy method'''
if self.e > np.random.rand():
action = np.random.choice(self.env_spec['actions'])
else:
flat_state = self.flatten_state(state)
action = np.argmax(self.qtable[flat_state, :])
return action
def update_e(self):
'''strategy to update epsilon'''
self.e = max(self.e -
(self.init_e - self.final_e) /
float(self.exploration_anneal_episodes),
self.final_e)
return self.e
def update(self, sys_vars):
self.update_e()
def to_train(self, sys_vars):
return True
def train(self, sys_vars):
'''
run the basic bellman equation update
'''
last_exp = self.memory.pop()
state = last_exp['states'][0]
flat_state = self.flatten_state(state)
next_state = last_exp['next_states'][0]
next_flat_state = self.flatten_state(next_state)
action = np.argmax(last_exp['actions'][0]) # from one-hot
reward = last_exp['rewards'][0]
Q_state_action = self.qtable[flat_state, action]
Q_next_state = self.qtable[next_flat_state, :]
Q_next_state_max = np.amax(Q_next_state)
loss = (reward + self.gamma * Q_next_state_max - Q_state_action)
sys_vars['loss'].append(loss)
self.qtable[flat_state, action] = Q_state_action + \
self.learning_rate * loss
return self.qtable
```
#### File: openai_gym/rl/experiment.py
```python
import copy
import gym
import json
import matplotlib
import multiprocessing as mp
import warnings
import numpy as np
import platform
import pandas as pd
import traceback
from keras import backend as K
from os import path, environ
from rl.util import *
from rl.agent import *
from rl.memory import *
from rl.policy import *
from rl.preprocessor import *
# TODO fix mp breaking on Mac shit,
# except when running -b with agg backend
# (no GUI rendered,but saves graphs)
# set only if it's not MacOS
if environ.get('CI') or platform.system() == 'Darwin':
matplotlib.rcParams['backend'] = 'agg'
else:
matplotlib.rcParams['backend'] = 'TkAgg'
np.seterr(all='raise')
warnings.filterwarnings("ignore", module="matplotlib")
GREF = globals()
PARALLEL_PROCESS_NUM = mp.cpu_count()
ASSET_PATH = path.join(path.dirname(__file__), 'asset')
SESS_SPECS = json.loads(open(
path.join(ASSET_PATH, 'sess_specs.json')).read())
PROBLEMS = json.loads(open(
path.join(ASSET_PATH, 'problems.json')).read())
# the keys and their defaults need to be implemented by a sys_var
# the constants (capitalized) are problem configs,
# set in asset/problems.json
REQUIRED_SYS_KEYS = {
'RENDER': None,
'GYM_ENV_NAME': None,
'SOLVED_MEAN_REWARD': None,
'MAX_EPISODES': None,
'REWARD_MEAN_LEN': None,
'epi': 0,
't': 0,
'done': False,
'loss': [],
'total_rewards_history': [],
'explore_history': [],
'mean_rewards_history': [],
'mean_rewards': 0,
'total_rewards': 0,
'solved': False,
}
class Grapher(object):
'''
Grapher object that belongs to a Session
to draw graphs from its data
'''
def __init__(self, session):
import matplotlib.pyplot as plt
plt.rcParams['toolbar'] = 'None' # mute matplotlib toolbar
self.plt = plt
self.session = session
self.graph_filename = self.session.graph_filename
self.subgraphs = {}
self.figure = self.plt.figure(facecolor='white', figsize=(8, 9))
self.figure.suptitle(wrap_text(self.session.session_id))
self.init_figure()
def init_figure(self):
if environ.get('CI'):
return
# graph 1
ax1 = self.figure.add_subplot(
311,
frame_on=False,
title="\n\ntotal rewards per episode",
ylabel='total rewards')
p1, = ax1.plot([], [])
self.subgraphs['total rewards'] = (ax1, p1)
ax1e = ax1.twinx()
ax1e.set_ylabel('exploration rate').set_color('r')
ax1e.set_frame_on(False)
p1e, = ax1e.plot([], [], 'r')
self.subgraphs['e'] = (ax1e, p1e)
# graph 2
ax2 = self.figure.add_subplot(
312,
frame_on=False,
title='mean rewards over last 100 episodes',
ylabel='mean rewards')
p2, = ax2.plot([], [], 'g')
self.subgraphs['mean rewards'] = (ax2, p2)
# graph 3
ax3 = self.figure.add_subplot(
313,
frame_on=False,
title='loss over time, episode',
ylabel='loss')
p3, = ax3.plot([], [])
self.subgraphs['loss'] = (ax3, p3)
self.plt.tight_layout() # auto-fix spacing
self.plt.ion() # for live plot
def plot(self):
'''do live plotting'''
sys_vars = self.session.sys_vars
if environ.get('CI'):
return
ax1, p1 = self.subgraphs['total rewards']
p1.set_ydata(
sys_vars['total_rewards_history'])
p1.set_xdata(np.arange(len(p1.get_ydata())))
ax1.relim()
ax1.autoscale_view(tight=True, scalex=True, scaley=True)
ax1e, p1e = self.subgraphs['e']
p1e.set_ydata(
sys_vars['explore_history'])
p1e.set_xdata(np.arange(len(p1e.get_ydata())))
ax1e.relim()
ax1e.autoscale_view(tight=True, scalex=True, scaley=True)
ax2, p2 = self.subgraphs['mean rewards']
p2.set_ydata(
sys_vars['mean_rewards_history'])
p2.set_xdata(np.arange(len(p2.get_ydata())))
ax2.relim()
ax2.autoscale_view(tight=True, scalex=True, scaley=True)
ax3, p3 = self.subgraphs['loss']
p3.set_ydata(sys_vars['loss'])
p3.set_xdata(np.arange(len(p3.get_ydata())))
ax3.relim()
ax3.autoscale_view(tight=True, scalex=True, scaley=True)
self.plt.draw()
self.plt.pause(0.01)
self.save()
def save(self):
'''save graph to filename'''
self.figure.savefig(self.graph_filename)
class Session(object):
'''
The base unit of an Experiment
An Experiment for a config on repeat for k time
will run k Sessions, each with identical sess_spec
for a problem, Agent, Memory, Policy, param.
Handles its own data, plots and saves its own graphs
Serialized by the parent experiment_id with its session_id
'''
def __init__(self, experiment, session_num=0, num_of_sessions=1):
self.experiment = experiment
self.session_num = session_num
self.num_of_sessions = num_of_sessions
self.session_id = self.experiment.experiment_id + \
'_s' + str(self.session_num)
log_delimiter('Init Session #{} of {}:\n{}'.format(
self.session_num, self.num_of_sessions, self.session_id))
self.sess_spec = experiment.sess_spec
self.problem = self.sess_spec['problem']
self.Agent = get_module(GREF, self.sess_spec['Agent'])
self.Memory = get_module(GREF, self.sess_spec['Memory'])
self.Policy = get_module(GREF, self.sess_spec['Policy'])
self.PreProcessor = get_module(GREF, self.sess_spec['PreProcessor'])
self.param = self.sess_spec['param']
# init all things, so a session can only be ran once
self.sys_vars = self.init_sys_vars()
self.env = gym.make(self.sys_vars['GYM_ENV_NAME'])
self.preprocessor = self.PreProcessor(**self.param)
self.env_spec = self.set_env_spec()
self.agent = self.Agent(self.env_spec, **self.param)
self.memory = self.Memory(**self.param)
self.policy = self.Policy(**self.param)
self.agent.compile(self.memory, self.policy, self.preprocessor)
# data file and graph
self.base_filename = './data/{}/{}'.format(
self.experiment.prefix_id, self.session_id)
self.graph_filename = self.base_filename + '.png'
# for plotting
self.grapher = Grapher(self)
def init_sys_vars(self):
'''
init the sys vars for a problem by reading from
asset/problems.json, then reset the other sys vars
on reset will add vars (lower cases, see REQUIRED_SYS_KEYS)
'''
sys_vars = PROBLEMS[self.problem]
if not args.render:
sys_vars['RENDER'] = False
if environ.get('CI'):
sys_vars['RENDER'] = False
sys_vars['MAX_EPISODES'] = 4
self.sys_vars = sys_vars
self.reset_sys_vars()
return self.sys_vars
def reset_sys_vars(self):
'''reset and check RL system vars (lower case)
before each new session'''
for k in REQUIRED_SYS_KEYS:
if k.islower():
self.sys_vars[k] = copy.copy(REQUIRED_SYS_KEYS.get(k))
self.check_sys_vars()
return self.sys_vars
def check_sys_vars(self):
'''ensure the requried RL system vars are specified'''
sys_keys = self.sys_vars.keys()
assert all(k in sys_keys for k in REQUIRED_SYS_KEYS)
def set_env_spec(self):
'''Helper: return the env specs: dims, actions, reward range'''
env = self.env
state_dim = env.observation_space.shape[0]
if (len(env.observation_space.shape) > 1):
state_dim = env.observation_space.shape
env_spec = {
'state_dim': state_dim,
'state_bounds': np.transpose(
[env.observation_space.low, env.observation_space.high]),
'action_dim': env.action_space.n,
'actions': list(range(env.action_space.n)),
'reward_range': env.reward_range,
'timestep_limit': env.spec.tags.get(
'wrapper_config.TimeLimit.max_episode_steps')
}
self.env_spec = self.preprocessor.preprocess_env_spec(
env_spec) # preprocess
return self.env_spec
def debug_agent_info(self):
logger.debug(
"Agent info: {}".format(
format_obj_dict(
self.agent,
['learning_rate', 'n_epoch'])))
logger.debug(
"Memory info: size: {}".format(self.agent.memory.size()))
logger.debug(
"Policy info: {}".format(
format_obj_dict(self.agent.policy, ['e', 'tau'])))
logger.debug(
"PreProcessor info: {}".format(
format_obj_dict(self.agent.preprocessor, [])))
def check_end(self):
'''check if session ends (if is last episode)
do ending steps'''
sys_vars = self.sys_vars
logger.debug(
"RL Sys info: {}".format(
format_obj_dict(
sys_vars, ['epi', 't', 'total_rewards', 'mean_rewards'])))
logger.debug('{:->30}'.format(''))
if (sys_vars['solved'] or
(sys_vars['epi'] == sys_vars['MAX_EPISODES'] - 1)):
logger.info(
'Problem solved? {}\nAt episode: {}\nParams: {}'.format(
sys_vars['solved'], sys_vars['epi'],
to_json(self.param)))
self.env.close()
def update_history(self):
'''
update the data per episode end
'''
sys_vars = self.sys_vars
sys_vars['total_rewards_history'].append(sys_vars['total_rewards'])
sys_vars['explore_history'].append(
getattr(self.policy, 'e', 0) or getattr(self.policy, 'tau', 0))
avg_len = sys_vars['REWARD_MEAN_LEN']
# Calculating mean_reward over last 100 episodes
# case away from np for json serializable (dumb python)
mean_rewards = float(
np.mean(sys_vars['total_rewards_history'][-avg_len:]))
solved = (mean_rewards >= sys_vars['SOLVED_MEAN_REWARD'])
sys_vars['mean_rewards'] = mean_rewards
sys_vars['mean_rewards_history'].append(mean_rewards)
sys_vars['solved'] = solved
self.grapher.plot()
self.check_end()
return sys_vars
def run_episode(self):
'''run ane episode, return sys_vars'''
sys_vars, env, agent = self.sys_vars, self.env, self.agent
sys_vars['total_rewards'] = 0
state = env.reset()
processed_state = agent.preprocessor.reset_state(state)
agent.memory.reset_state(processed_state)
self.debug_agent_info()
for t in range(agent.env_spec['timestep_limit']):
sys_vars['t'] = t # update sys_vars t
if sys_vars.get('RENDER'):
env.render()
processed_state = agent.preprocessor.preprocess_state()
action = agent.select_action(processed_state)
next_state, reward, done, _info = env.step(action)
processed_exp = agent.preprocessor.preprocess_memory(
action, reward, next_state, done)
if processed_exp is not None:
agent.memory.add_exp(*processed_exp)
sys_vars['done'] = done
agent.update(sys_vars)
if agent.to_train(sys_vars):
agent.train(sys_vars)
sys_vars['total_rewards'] += reward
if done:
break
self.update_history()
return sys_vars
def clear_session(self):
if K._BACKEND == 'tensorflow':
K.clear_session() # manual gc to fix TF issue 3388
def run(self):
'''run a session of agent'''
log_delimiter('Run Session #{} of {}\n{}'.format(
self.session_num, self.num_of_sessions, self.session_id))
sys_vars = self.sys_vars
sys_vars['time_start'] = timestamp()
for epi in range(sys_vars['MAX_EPISODES']):
sys_vars['epi'] = epi # update sys_vars epi
try:
self.run_episode()
except Exception:
logger.error('Error in experiment, terminating '
'further session from {}'.format(self.session_id))
traceback.print_exc(file=sys.stdout)
break
if sys_vars['solved']:
break
self.clear_session()
sys_vars['time_end'] = timestamp()
sys_vars['time_taken'] = timestamp_elapse(
sys_vars['time_start'], sys_vars['time_end'])
progress = 'Progress: Experiment #{} Session #{} of {} done'.format(
self.experiment.experiment_num,
self.session_num, self.num_of_sessions)
log_delimiter('End Session:\n{}\n{}'.format(
self.session_id, progress))
return sys_vars
class Experiment(object):
'''
An Experiment for a config on repeat for k time
will run k Sessions, each with identical sess_spec
for a problem, Agent, Memory, Policy, param.
Will spawn as many Sessions for repetition
Handles all the data from sessions
to provide an experiment-level summary for a sess_spec
Its experiment_id is serialized by
problem, Agent, Memory, Policy and timestamp
Data Requirements:
JSON, single file, quick and useful summary,
replottable data, rerunnable specs
Keys:
all below X array of hyper param selection:
- sess_spec (so we can plug in directly again to rerun)
- summary
- time_start
- time_end
- time_taken
- metrics
- sys_vars_array
'''
def __init__(self, sess_spec, times=1,
experiment_num=0, num_of_experiments=1,
run_timestamp=timestamp(),
prefix_id_override=None):
self.sess_spec = sess_spec
self.data = None
self.times = times
self.sess_spec.pop('param_range', None) # single exp, del range
self.experiment_num = experiment_num
self.num_of_experiments = num_of_experiments
self.run_timestamp = run_timestamp
self.prefix_id = prefix_id_override or '{}_{}_{}_{}_{}_{}'.format(
sess_spec['problem'],
sess_spec['Agent'].split('.').pop(),
sess_spec['Memory'].split('.').pop(),
sess_spec['Policy'].split('.').pop(),
sess_spec['PreProcessor'].split('.').pop(),
self.run_timestamp
)
self.experiment_id = self.prefix_id + '_e' + str(self.experiment_num)
self.base_dir = './data/{}'.format(self.prefix_id)
os.makedirs(self.base_dir, exist_ok=True)
self.base_filename = './data/{}/{}'.format(
self.prefix_id, self.experiment_id)
self.data_filename = self.base_filename + '.json'
log_delimiter('Init Experiment #{} of {}:\n{}'.format(
self.experiment_num, self.num_of_experiments,
self.experiment_id), '=')
def analyze(self):
'''mean_rewards_per_epi
helper: analyze given data from an experiment
return metrics
'''
sys_vars_array = self.data['sys_vars_array']
solved_sys_vars_array = list(filter(
lambda sv: sv['solved'], sys_vars_array))
mean_rewards_array = np.array(list(map(
lambda sv: sv['mean_rewards'], sys_vars_array)))
max_total_rewards_array = np.array(list(map(
lambda sv: np.max(sv['total_rewards_history']), sys_vars_array)))
epi_array = np.array(list(map(lambda sv: sv['epi'], sys_vars_array)))
mean_rewards_per_epi_array = np.divide(mean_rewards_array, epi_array)
t_array = np.array(list(map(lambda sv: sv['t'], sys_vars_array)))
time_taken_array = np.array(list(map(
lambda sv: timestamp_elapse_to_seconds(sv['time_taken']),
sys_vars_array)))
solved_epi_array = np.array(list(map(
lambda sv: sv['epi'], solved_sys_vars_array)))
solved_t_array = np.array(list(map(
lambda sv: sv['t'], solved_sys_vars_array)))
solved_time_taken_array = np.array(list(map(
lambda sv: timestamp_elapse_to_seconds(sv['time_taken']),
solved_sys_vars_array)))
metrics = {
# percentage solved
'num_of_sessions': len(sys_vars_array),
'solved_num_of_sessions': len(solved_sys_vars_array),
'solved_ratio_of_sessions': float(len(
solved_sys_vars_array)) / len(sys_vars_array),
'mean_rewards_stats': basic_stats(mean_rewards_array),
'mean_rewards_per_epi_stats': basic_stats(
mean_rewards_per_epi_array),
'max_total_rewards_stats': basic_stats(max_total_rewards_array),
'epi_stats': basic_stats(epi_array),
't_stats': basic_stats(t_array),
'time_taken_stats': basic_stats(time_taken_array),
'solved_epi_stats': basic_stats(solved_epi_array),
'solved_t_stats': basic_stats(solved_t_array),
'solved_time_taken_stats': basic_stats(solved_time_taken_array),
}
self.data['summary'].update({'metrics': metrics})
return self.data
def save(self):
'''save the entire experiment data grid from inside run()'''
with open(self.data_filename, 'w') as f:
f.write(to_json(self.data))
logger.info(
'Session complete, data saved to {}'.format(self.data_filename))
def to_stop(self):
'''check of experiment should be continued'''
metrics = self.data['summary']['metrics']
failed = metrics['solved_ratio_of_sessions'] < 1.
if failed:
logger.info(
'Failed experiment, terminating sessions for {}'.format(
self.experiment_id))
return failed
def run(self):
'''
helper: run a experiment for Session
a number of times times given a sess_spec from gym_specs
'''
configure_gpu()
time_start = timestamp()
sys_vars_array = []
for s in range(self.times):
sess = Session(experiment=self,
session_num=s, num_of_sessions=self.times)
sys_vars = sess.run()
sys_vars_array.append(copy.copy(sys_vars))
time_end = timestamp()
time_taken = timestamp_elapse(time_start, time_end)
self.data = { # experiment data
'experiment_id': self.experiment_id,
'sess_spec': self.sess_spec,
'summary': {
'time_start': time_start,
'time_end': time_end,
'time_taken': time_taken,
'metrics': None,
},
'sys_vars_array': sys_vars_array,
}
self.analyze()
# progressive update, write when every session is done
self.save()
if self.to_stop():
break
progress = 'Progress: Experiment #{} of {} done'.format(
self.experiment_num, self.num_of_experiments)
log_delimiter(
'End Experiment:\n{}\n{}'.format(
self.experiment_id, progress), '=')
return self.data
def configure_gpu():
'''detect GPU options and configure'''
if K._BACKEND != 'tensorflow':
# skip directly if is not tensorflow
return
real_parallel_process_num = 1 if mp.current_process(
).name == 'MainProcess' else PARALLEL_PROCESS_NUM
tf = K.tf
gpu_options = tf.GPUOptions(
allow_growth=True,
per_process_gpu_memory_fraction=1./float(real_parallel_process_num))
config = tf.ConfigProto(
gpu_options=gpu_options,
allow_soft_placement=True)
sess = tf.Session(config=config)
K.set_session(sess)
return sess
def plot(experiment_or_prefix_id):
'''plot from a saved data by init sessions for each sys_vars'''
prefix_id = prefix_id_from_experiment_id(experiment_or_prefix_id)
experiment_data_array = load_data_array_from_prefix_id(prefix_id)
for data in experiment_data_array:
sess_spec = data['sess_spec']
experiment = Experiment(sess_spec, times=1,
prefix_id_override=prefix_id)
# save with the right serialized filename
experiment.experiment_id = data['experiment_id']
num_of_sessions = len(data['sys_vars_array'])
for s in range(num_of_sessions):
sess = Session(experiment=experiment,
session_num=s, num_of_sessions=num_of_sessions)
sys_vars = data['sys_vars_array'][s]
sess.sys_vars = sys_vars
sess.grapher.plot()
sess.clear_session()
def analyze_param_space(experiment_data_array_or_prefix_id):
'''
get all the data from all experiments.run()
or read from all data files matching the prefix of experiment_id
e.g. usage without running:
prefix_id = 'DevCartPole-v0_DQN_LinearMemoryWithForgetting_BoltzmannPolicy_2017-01-15_142810'
analyze_param_space(prefix_id)
'''
if isinstance(experiment_data_array_or_prefix_id, str):
experiment_data_array = load_data_array_from_prefix_id(
experiment_data_array_or_prefix_id)
else:
experiment_data_array = experiment_data_array_or_prefix_id
flat_metrics_array = []
for data in experiment_data_array:
flat_metrics = flatten_dict(data['summary']['metrics'])
flat_metrics.update({'experiment_id': data['experiment_id']})
flat_metrics_array.append(flat_metrics)
metrics_df = pd.DataFrame.from_dict(flat_metrics_array)
metrics_df.sort_values(
['mean_rewards_per_epi_stats_mean',
'mean_rewards_stats_mean', 'solved_ratio_of_sessions'],
ascending=False
)
experiment_id = experiment_data_array[0]['experiment_id']
prefix_id = prefix_id_from_experiment_id(experiment_id)
param_space_data_filename = './data/{0}/param_space_data_{0}.csv'.format(
prefix_id)
metrics_df.to_csv(param_space_data_filename, index=False)
logger.info(
'Param space data saved to {}'.format(param_space_data_filename))
return metrics_df
def run(sess_name_id_spec, times=1,
param_selection=False, line_search=False,
plot_only=False):
'''
primary method:
specify:
- sess_name(str) or sess_spec(Dict): run new experiment,
- experiment_id(str): rerun experiment from data
- experiment_id(str) with plot_only=True: plot graphs from data
This runs all experiments, specified by the obtained sess_spec
for a specified number of sessions per experiment
Multiple experiments are ran if param_selection=True
'''
# run plots on data only
if plot_only:
plot(sess_name_id_spec)
return
# set sess_spec based on input
if isinstance(sess_name_id_spec, str):
if len(sess_name_id_spec.split('_')) >= 4:
data = load_data_from_experiment_id(sess_name_id_spec)
sess_spec = data['sess_spec']
else:
sess_spec = SESS_SPECS.get(sess_name_id_spec)
else:
sess_spec = sess_name_id_spec
# compose grid and run param selection
if param_selection:
if line_search:
param_grid = param_line_search(sess_spec)
else:
param_grid = param_product(sess_spec)
sess_spec_grid = generate_sess_spec_grid(sess_spec, param_grid)
num_of_experiments = len(sess_spec_grid)
run_timestamp = timestamp()
experiment_array = []
for e in range(num_of_experiments):
sess_spec = sess_spec_grid[e]
experiment = Experiment(
sess_spec, times=times, experiment_num=e,
num_of_experiments=num_of_experiments,
run_timestamp=run_timestamp)
experiment_array.append(experiment)
p = mp.Pool(PARALLEL_PROCESS_NUM)
experiment_data_array = list(p.map(mp_run_helper, experiment_array))
p.close()
p.join()
else:
experiment = Experiment(sess_spec, times=times)
experiment_data = experiment.run()
experiment_data_array = [experiment_data]
return analyze_param_space(experiment_data_array)
```
#### File: johnkmj/openai_gym/setup.py
```python
import pip
import os
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
from setuptools.command.install import install
reqs = open('./requirements.txt', 'r').read().split('\n')
class OverrideInstall(install):
"""
Emulate sequential install of pip install -r requirements.txt
To fix numpy bug in scipy, scikit in py2
"""
def run(self):
for req in reqs:
if req:
pip.main(["install", "-U", req])
# explicitly config
test_args = [
'--cov-report=term',
'--cov-report=html',
'--cov=rl',
'test'
]
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = test_args
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
# Utility function to read the README file.
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
# the setup
setup(
name='openai_gym',
version='0.0.1',
description='Working out at the OpenAI Gym',
long_description=read('README.md'),
keywords='openai gym',
url='https://github.com/kengz/openai_gym',
author='keng',
author_email='<EMAIL>',
license='MIT',
packages=[],
zip_safe=False,
include_package_data=True,
install_requires=[],
dependency_links=[],
extras_require={
'dev': [],
'docs': [],
'testing': []
},
classifiers=[],
tests_require=['pytest', 'pytest-cov'],
test_suite='test',
cmdclass={'test': PyTest, 'install': OverrideInstall}
)
```
#### File: openai_gym/test/test_rl.py
```python
import unittest
import pandas as pd
from os import environ
environ['CI'] = environ.get('CI') or 'true'
from rl.experiment import run
class DQNTest(unittest.TestCase):
def test_run_gym_tour(self):
metrics_df = run('dummy')
assert isinstance(metrics_df, pd.DataFrame)
def test_run_q_table(self):
metrics_df = run('q_table')
assert isinstance(metrics_df, pd.DataFrame)
def test_run_dqn(self):
metrics_df = run('dqn')
assert isinstance(metrics_df, pd.DataFrame)
def test_run_double_dqn(self):
metrics_df = run('double_dqn')
assert isinstance(metrics_df, pd.DataFrame)
def test_run_mountain_double_dqn(self):
metrics_df = run('mountain_double_dqn')
assert isinstance(metrics_df, pd.DataFrame)
# def test_run_all(self):
# for x in game_specs.keys():
# metrics_df = run(x)
``` |
{
"source": "johnknott/proxmox-hetzner-autoconfigure",
"score": 3
} |
#### File: configurators/network/network.py
```python
from typing import NamedTuple
from proxmox_hetzner_autoconfigure.util import util
from proxmox_hetzner_autoconfigure.configurators import configurator as cfg
from proxmox_hetzner_autoconfigure.configurators.network.topologies import (
routed_subnet,
routed_separate_ips,
)
class Data(NamedTuple):
"""Data structure that gets emitted from gather_input and passed into transform_to_commands"""
generated_script: str
class Config(cfg.Configurator):
"""Implementation of the Network Configurator"""
def __init__(self):
super().__init__()
self.description = "Configure Network"
def gather_input(self) -> Data:
"""Gathers input from the user and returns a NetworkData"""
topologies = [routed_subnet, routed_separate_ips]
topology_configs = list(map(lambda t: t.Config(), topologies))
code, choice = util.dialog.radiolist(
"Please choose a network topology",
choices=map(lambda c: (c.short_description, c.description, 1), topology_configs),
)
if code != "ok":
return None
chosen_config = next(filter(lambda x: x.short_description == choice, topology_configs))
script = chosen_config.generate_script(chosen_config.gather_input())
return Data(generated_script=script)
def generate_script(self, data: Data) -> str:
"""transforms a Data into a shell script segment"""
return data.generated_script if data else ""
```
#### File: configurators/tls/tls.py
```python
from typing import NamedTuple
from proxmox_hetzner_autoconfigure.util import util
from proxmox_hetzner_autoconfigure.configurators import configurator as cfg
class Data(NamedTuple):
"""Tuple that gets emitted from gather_input and passed into transform_to_commands"""
email: str
domain: str
class Config(cfg.Configurator):
"""Implementation of an example Configurator"""
def __init__(self):
super().__init__()
self.description = "Configure TLS (Lets Encrypt)"
def gather_input(self) -> Data:
"""Gathers input from the user and returns a Data"""
email = domain = ""
email = util.input_regex(
"Please enter your email address", util.EMAIL_REGEX, "Invalid Email",
)
if email is None:
return None
domain = util.input_regex(
"Please enter your domain name (FQDN)." "Must be resolvable from the Internet.",
util.DOMAIN_REGEX,
"Invalid Domain",
)
if domain is None:
return None
util.shared_globals["fqdn"] = domain
return Data(email, domain)
def generate_script(self, data: Data) -> str:
"""transforms a Data into a shell script segment"""
return util.render_template(__file__, "template", data)
```
#### File: proxmox_hetzner_autoconfigure/util/util.py
```python
import os
import re
import platform
from ipaddress import IPv4Network, IPv4Address
from dialog import Dialog
from jinja2 import Environment, FileSystemLoader
IP_REGEX = r"\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\b"
CIDR_REGEX = r"\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\/\d{1,2}\b"
EMAIL_REGEX = r"[^@]+@[^@]+\.[^@]+"
DOMAIN_REGEX = r".*\.\w+"
NOT_EMPTY = r"^(?!\s*$).+"
ANSI_RED = r"\Zb\Z1"
ANSI_GREEN = r"\Zb\Z3"
ANSI_WHITE = r"\Zb\Z2"
ANSI_RESET = r"\Zn"
dialog = Dialog(dialog="dialog", autowidgetsize=True)
shared_globals = {}
def is_proxmox_machine():
"""Is this a Linux machine with Proxmox installed?"""
return platform.system() == "Linux" and os.popen("which pveversion").read().strip()
def main_ip():
"""Returns the detected main IP of this machine"""
return os.popen("hostname -i | awk '{print $1}'").read().strip()
def gateway_ip():
"""Returns the detected gateway IP of this machine"""
return os.popen("ip route | grep default | awk '{print $3}'").read().strip()
def render_template(file, template_name, binds):
"""Renders a jinja2 template and returns it as a string"""
dir_path = os.path.dirname(os.path.realpath(file))
env = Environment(loader=FileSystemLoader(dir_path))
template = env.get_template(f"{template_name}.jinja2")
if hasattr(binds, "_asdict"):
binds = binds._asdict()
return template.render(binds)
def wrap_as_heredoc(content, filename):
"""Wraps a string in a heredoc and adds code to write it to a file"""
return render_template(__file__, "heredoc", {"content": content, "filename": filename})
def build_script(configurators):
"""
Loops over configurators, calling gather_input on them and then renders the script sections
in a template.
"""
def gather_input(configurator):
cfg = configurator.Config()
dialog.set_background_title(cfg.description)
return {"config": cfg, "input": cfg.gather_input()}
def generate_script(input_data):
cfg = input_data["config"]
return {"name": cfg.description, "content": cfg.generate_script(input_data["input"])}
sections = map(generate_script, map(gather_input, configurators))
return render_template(__file__, "install", {"sections": sections, "shared_globals": shared_globals})
def input_regex(message, regex, regex_fail, **kwargs):
"""Helper method to ask the user for input and only proceed if the input matches a regex"""
text_value = error = ""
kwargs = kwargs if kwargs else {}
kwargs["colors"] = True
while not re.match(regex, text_value):
message_with_error = f"{message}\n{ANSI_RED}{error}{ANSI_RESET}" if error else message
code, text_value = dialog.inputbox(message_with_error, **kwargs)
error = regex_fail
if code != "ok":
return None
return text_value
def input_network(message, **kwargs):
"""Helper method to ask the user for input and only proceed if the input matches a regex"""
net_addr = error = ""
kwargs = kwargs if kwargs else {}
kwargs["colors"] = True
while True:
try:
message_with_error = f"{message}\n{ANSI_RED}{error}{ANSI_RESET}" if error else message
code, net_addr = dialog.inputbox(message_with_error, **kwargs)
if code != "ok":
return None
if not re.match(CIDR_REGEX, net_addr):
raise Exception("Please enter in the format x.x.x.x/x")
return str(IPv4Network(net_addr))
except Exception as err:
error = str(err)
def input_ip(message, **kwargs):
"""Helper method to ask the user for input and only proceed if the input matches a regex"""
ip_addr = error = ""
kwargs = kwargs if kwargs else {}
kwargs["colors"] = True
while True:
try:
message_with_error = f"{message}\n{ANSI_RED}{error}{ANSI_RESET}" if error else message
code, ip_addr = dialog.inputbox(message_with_error, **kwargs)
if code != "ok":
return None
return str(IPv4Address(ip_addr))
except Exception as err:
error = str(err)
```
#### File: tests/configurators/test_snippets.py
```python
from unittest.mock import MagicMock
from proxmox_hetzner_autoconfigure import util
from proxmox_hetzner_autoconfigure.configurators import snippets
def test_description():
config = snippets.Config()
assert config.description == "Configure Snippets"
def test_gather_input_correct():
dialog = MagicMock()
dialog.checklist.side_effect = [["ok", ["test_desc"]]]
config = snippets.Config()
snippets.util.dialog = dialog
assert config.gather_input() == snippets.Data(snippets=[])
```
#### File: tests/configurators/test_storage_box.py
```python
from unittest.mock import MagicMock
from proxmox_hetzner_autoconfigure import util
from proxmox_hetzner_autoconfigure.configurators import storage_box
def test_description():
config = storage_box.Config()
assert config.description == "Hetzner Storage Box"
def test_gather_input_correct():
dialog = MagicMock()
dialog.inputbox.side_effect = [
["ok", "u123456"],
["ok", "<PASSWORD>"],
["ok", "u123456.your-storagebox.de"],
]
config = storage_box.Config()
storage_box.util.dialog = dialog
assert config.gather_input() == storage_box.Data(
username="u123456", password="<PASSWORD>", server="u123456.your-storagebox.de",
)
def test_gather_input_cancelled():
dialog = MagicMock()
dialog.inputbox.side_effect = [
["ok", "u123456"],
["cancel", ""],
]
config = storage_box.Config()
storage_box.util.dialog = dialog
assert config.gather_input() is None
def test_generate_script():
config = storage_box.Config()
data = storage_box.Data(
username="u123456", password="<PASSWORD>", server="u123456.your-storagebox.de",
)
script = config.generate_script(data)
assert (
"//u123456.your-storagebox.de/backup /mnt/storage cifs _netdev,username=u123456,password=$STORAGE_BOX_PASSWORD,uid=101001,gid=101001 0 0"
in script
)
``` |
{
"source": "johnkour/Two_span_truss",
"score": 4
} |
#### File: johnkour/Two_span_truss/solver.py
```python
import numpy as np
from numpy import random as rnd
import pandas as pd
import sys
# ====================Define custom classes===================================
# 3.1: Create custom input error class:
class InputError(Exception):
"""Base class for input exceptions in this module."""
pass
# 3.1.1: Create number of training examples error subclass.
class ExamplesNumber(InputError):
"""Exception raised for errors in the input of the number of training
examples.
Attributes:
expression -- input expression in which the error occurred
message -- explanation of the error
"""
def __init__(self, message, payload=None):
self.message = message
self.payload = payload
def __str__(self):
return str(self.message)
# 3.1.2: Create width error subclass.
class WidthInput(InputError):
"""Exception raised for errors in the input of the width.
Attributes:
expression -- input expression in which the error occurred
message -- explanation of the error
"""
def __init__(self, message, payload=None):
self.message = message
self.payload = payload
def __str__(self):
return str(self.message)
# 3.1.3: Create thickness error subclass.
class ThickInput(InputError):
"""Exception raised for errors in the input of the thickness.
Attributes:
expression -- input expression in which the error occurred
message -- explanation of the error
"""
def __init__(self, message, payload=None):
self.message = message
self.payload = payload
def __str__(self):
return str(self.message)
# ====================Define custom functions=================================
# 2: Handmade functions.
# 2.1: Calculate the members' length.
def mem_length(L, h):
'''
Summary
----------
This function is used to calculate the length of the truss' members.
It also works if the length and the heigth of the truss change in every
training example. Note that with minor changes to the indexing it will
produce results for a truss more than 11 sections.
Parameters
----------
L : (m x 1) ARRAY
The length of the truss (m).
h : (m x 1) ARRAY
The heigth of the truss (m).
Returns
-------
S : (m x 11) ARRAY
The legth of the truss' members (m).
'''
m, n =np.shape(L)
temp2 = L/3
temp1 = np.hypot(h, temp2 /2)
temp3 = np.zeros((m, 11), dtype = int)
temp3[:, ::2] = 1
temp4 = np.ones((m, 11), dtype = int)
temp4 -= temp3
S = temp1 * temp3 + temp2 * temp4
return S
# 2.2: Calculate the members' internal forces.
def axial_f(P, S, L, h):
'''
Summary
----------
This function is used to calculate the axial force of the truss' members.
We uttilize the symmetry of the problem to write less lines of smarter
code using an auxiliary array temp to compute our final product F. The
nodal loads(P[i,j]) are differ from node to node and from analysis to
analysis. Here as before the function runs many analyses simultaneously.
Parameters
----------
P : (m x 3) ARRAY
The forces that the truss bears (kN).
S : (m x 11) ARRAY
The legth of the truss' members (m).
L : (m x 1) ARRAY
The length of the truss (m).
h : (m x 1) ARRAY
The heigth of the truss (m).
Returns
-------
F : (m x 11) ARRAY
The axial forces of the truss' members (kN).
'''
m, n = np.shape(S)
SP = np.sum(P, axis = 1, keepdims = True)
# print(SP)
# print(S[:, 0])
temp = np.zeros((m, 6))
temp[:, 0] = -SP.flatten() / 2
# print(temp[:, 0])
temp[:, 0] /= (h.flatten() / S[:, 0])
# print(temp[:, 0])
temp[:, 1] = np.abs(temp[:, 0])
temp[:, 1] *= S[:, 1] / 2
temp[:, 1] /= S[:, 0]
# print(temp[:, 1])
temp[:, 2] = np.abs(temp[:, 0]) * (h.flatten() / S[:, 0])
temp[:, 2] -= P[:, 0]
temp[:, 2] /= (h.flatten() / S[:, 2])
# print(temp[:, 2])
temp[:, 3] = np.abs(temp[:, 0]) + np.abs(temp[:, 2])
temp[:, 3] *= -(S[:, 1] / S[:, 2]) / 2
# print(temp[:, 3])
temp[:, 4] = -temp[:, 2]
# print(temp[:, 4])
temp[:, 5] = (np.abs(temp[:, 2]) + np.abs(temp[:, 4]))
temp[:, 5] *= (S[:, 1] / S[:, 2]) / 2
temp[:, 5] += np.abs(temp[:, 1])
# print(temp[:, 5])
# print(temp)
F = np.zeros((m, n), dtype = float)
F[:, :6] = temp
temp = temp[:, ::-1]
temp = np.delete(temp, 0, 1)
F[:, 6:] = temp
return F
# 2.3: Calculate the members' resistance.
def mem_res(S, b, t, fy, E, k):
'''
Summary
----------
This function is used to define the resistance of each member of the truss
to tension and compression. Once again we chose to run all of our analyses
at the same time. For the purposes of this assignment fy and E are the
same for every analysis. Note that it would make no sense to change fy for
different members in the same analysis and that E is pretty much standard
regardless of member or analysis.
Parameters
----------
S : (m x 11) ARRAY
The legth of the truss' members (m).
b : (m x 11) ARRAY
The width of the truss' members (mm).
t : (m x 11) ARRAY
The thickness of the truss' members (mm).
fy : INTEGER
The resistance of the steel (MPa).
E : INTEGER
The elasticity modulous of the steel (GPa).
k : FLOAT
The hardening parameter of the steel (-).
Returns
-------
T_for_cr : (m x 11) ARRAY
The tensile resistance of the truss' members (kN).
C_for_cr : (m x 11) ARRAY
The resistance of the truss' members to compression (kN).
'''
m, n = np.shape(b)
# b = b.reshape((1, len(b)))
# t = t.reshape((1, len(t)))
b = b / 10 ** 3
t = t / 10 ** 3
A = 4 * b * t
Vol = A * S
fy *= 10**3
E *= 10**6
T_for = fy * A
C_for = - k * T_for
Buck_for = np.pi**2 * E * ((b + t)**4 / 12 - (b - t)**4 / 12)
Buck_for /= -S**2
T_for_cr = T_for
C_for_cr = np.maximum(C_for, Buck_for)
return T_for_cr, C_for_cr
# 2.4: Evaluate whether the member fails or not.
def evaluation(F, T_for_cr, C_for_cr):
'''
Summary
----------
This function is used to estimate whether a member (which means the whole
truss) fails or not. The output of this function are the true values of Y
that will be used later to train machine learning algorithms to simulate
the structural analysis programmed here.
Parameters
----------
F : (m x 11) ARRAY
The axial forces of the truss' members (kN).
T_for_cr : (m x 11) ARRAY
The tensile resistance of the truss' members (kN).
C_for_cr : (m x 11) ARRAY
The resistance of the truss' members to compression (kN).
Returns
-------
Failure : (m x 1) ARRAY WITH ELEMENT VALUES 0 OR 1
1 if the truss fails, 0 otherwise.
'''
m, n = np.shape(F)
Failure = (C_for_cr > F) | (T_for_cr < F)
temp = Failure.astype(int)
Failure = np.amax(temp, axis = 1, keepdims = True)
# Failure = np.asscalar(Failure)
return Failure
# 3: Check that width and thickness are not out of bounds.
# 3.2.1: Test the variable m for input error.
def ex_num(m):
'''
Summary
----------
This function is used to evaluate if the number of training examples typed
by the user via keyboard is an integer number. If not an error message is
displayed so as to inform the user. It also converts m from string to
integer.
Parameters
----------
m : STRING
Number of training examples.
Returns
-------
m : INTEGER(?)
Number of training examples.
'''
try:
m = int(m)
except:
try:
raise ExamplesNumber("INVALID INPUT",
"The number of training examples " +
"is not an integer.")
except ExamplesNumber as error:
print(str(error))
print("Detail: {}".format(error.payload))
sys.exit()
return m
# 3.2.2: Test the variables b and t for input error.
def bounds(b, t):
'''
Summary
----------
This function is used to evaluate if the section properties (thickness and
width) are out of bounds. If they are, an error message is
displayed so as to inform the user and the analysis is stopped(the progam
closes).
Parameters
----------
b : (m x 11) ARRAY
The width of the truss' members (mm).
t : (m x 11) ARRAY
The thickness of the truss' members (mm).
Returns
-------
None.
'''
# b = b.reshape((1, len(b)))
# t = t.reshape((1, len(t)))
if np.any(b < 80) | np.any(b > 300):
try:
raise WidthInput("INVALID INPUT", "The width is out of bounds.")
except WidthInput as error:
print(str(error))
print("Detail: {}".format(error.payload))
sys.exit()
if np.any(t < 3) | np.any(t > 20):
try:
raise ThickInput("INVALID INPUT", "The thickness is out of bounds.")
except ThickInput as error:
print(str(error))
print("Detail: {}".format(error.payload))
sys.exit()
# Define function to generate the data:
def generator(m):
'''
Summary
----------
This function is used to produce m number of training examples(or analyses
). Each training example consists of a vector of 25 elements, the first 11
are the width of the sections, then the next 11 are the thickness of the
sections and the last 3 are the nodal loads. Note that if the code above is
slightly modified there could be another 3 variables: the length and heigth
of the truss and the quality of the steel(L, h, fy). We would then have
to give them random values for each analysis below.
Parameters
----------
m : INTEGER
Number of training examples.
Returns
-------
b : (m x 11) ARRAY
The width of the truss' members (mm).
t : (m x 11) ARRAY
The thickness of the truss' members (mm).
P : (m x 3) ARRAY
The load of the truss in each of the 3 nodes (kN).
'''
# b starts at 80mm, stops at 300mm and can be raised by 5mm.
b = rnd.randint(0, 44, size = (m, 11)) * 5
b += 80
# t starts at 3mm, stops at 20mm and can be raised by 1mm.
t = rnd.randint(3, 20, size = (m, 11))
# P starts at 0kN and stops at 250kN.
P = rnd.random(size = (m, 3))
P *= 250
P = np.round(P, decimals = 2)
return b, t, P
# Define function to export the data to CSV:
def exporter(path, name, tup, fl_form):
'''
Summary
----------
This function is used to export the data((b, t, P) or (Failure,)) to CSV
format in order to use them in the next programs. The Tupple (tup) should
contain the parameters(b, t, P) or the true values(Failure,), so as to have
the input and the output of the Machine Learning algorithms seperated to
different files and avoid further confussion. We recommend using fl_form =
%d, meaning floating format is integer for the true values(Failure,) and
fl_form = %.2f, meaning floating format with 2 decimals for the parameters.
The name of the file in which the data will be stored should not have the
suffix '.csv'. We use the pandas library to store the data to the CSV
because pandas are faster than the standard methods when dealing with big
data.
Parameters
----------
path : STRING
The path, where the file containing the data will be stored.
name : STRING
The name of the CSV file containing the data.
tup : TUPLE OF ARRAYS
The tuple containing the arrays with the data to be stored.
fl_form : STRING
The format of the numbers to be stored in the CSV.
Returns
-------
None.
'''
name += '.csv'
data = np.concatenate(tup, axis = 1)
df = pd.DataFrame(data)
path += '/' + name
df.to_csv(path, float_format = fl_form, header = False, index = False)
# ============================Main Program====================================
# 1.2: Define the geometry of the truss.
m = input(
'Please enter the number of training examples ' +
'(or press enter fordefault value: 500k)\n')
if len(m) < 1: m = 500 * 10**3
m = ex_num(m)
L = np.ones((m, 1))
L *= 20
h = np.ones((m, 1))
h *= 1.5
# print(L, h)
# 2.1: Calculate the members' length.
S = mem_length(L, h)
# print(S)
# print(S)
# 1.3 & 1.5: Define the load of the truss and the dimensions of it's sections.
# load = np.ones((m, 3))
# load *= 100
# P = np.array(load)
b, t, P = generator(m)
# 3.2: Test the variables b and t for input error.
bounds(b, t)
# 2.2: Calculate the members' internal forces.
F = axial_f(P, S, L, h)
# print(F)
# 1.4: Define the properties of the steel used.
fy = 275 # MPa
k = 0.80
E = 210 # GPa
# print(F)
# 2.3: Calculate the members' resistance.
T_for_cr, C_for_cr = mem_res(S, b, t, fy, E, k)
# print(T_for_cr)
# print(C_for_cr)
# 2.4: Evaluate whether the member fails or not.
Failure = evaluation(F, T_for_cr, C_for_cr)
# print(Failure)
n_fail = np.sum(Failure, axis = 0)
# print(n_fail, m-n_fail)
# 4: Export results to csvs.
message = 'Please enter the path where the file will be stored'
message += ' (Press enter to set path: '
message += 'D:/jkour/Documents/Σχολή/2ο έτος/Εαρινό Εξάμηνο/Προγραμματισμός Η.Υ'
message += '/Εργασία εξαμήνου/MyCode/Truss_analysis)\n'
pth = input(message)
if len(pth) < 1:
pth = 'D:/jkour/Documents/Σχολή/2ο έτος/Εαρινό Εξάμηνο/Προγραμματισμός Η.Υ'
pth += '/Εργασία εξαμήνου/MyCode/Truss_analysis'
tup = (b, t, P)
exporter(pth, 'variables', tup, '%.2f')
tup = (Failure, )
exporter(pth, 'results', tup, '%d')
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.