metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "JohannesSMHI/eoana",
"score": 2
} |
#### File: eoana/eoana/main.py
```python
from eoana.config import Settings
# from eoana.handler import some_classes, some_funcs
class App:
"""
Keep it clean, keep it tidy!
- read
- validate
- write
"""
def __init__(self, *args, **kwargs):
self.settings = Settings(**kwargs)
def validate(self, *args, **kwargs):
""""""
raise NotImplementedError
def read(self, *args, **kwargs):
""""""
raise NotImplementedError
def write(self, *args, **kwargs):
""""""
raise NotImplementedError
if __name__ == '__main__':
app = App()
```
#### File: eoana/readers/geotiff.py
```python
import time
import numpy as np
import rasterio as rio
import matplotlib.pyplot as plt
import xarray as xr
class GeoTIFFReader:
"""
"""
@staticmethod
def read(fid, as_type=None, nan_value=None):
as_type = as_type or float
nan_value = nan_value or 255.
rst = rio.open(fid)
array = rst.read()
array = array[0].astype(as_type)
array = np.where(array == nan_value, np.nan, array)
return array
@staticmethod
def read_meta(fid):
rst = rio.open(fid)
meta = rst.meta.copy()
meta.update(compress='lzw')
return meta
def xarray_reader(fid):
xarr = xr.open_rasterio(fid)
# xarr.sizes['x']
# xarr.sizes['y']
# xarr.coords['x'].data
# xarr.coords['y'].data
# xarr.data
if __name__ == "__main__":
fid = 'C:/Temp/Satellit/sentinel_data/kubdata/2021-02-26-.tif'
gf_reader = GeoTIFFReader()
array = gf_reader.read(fid, as_type=float)
meta = gf_reader.read_meta(fid)
```
#### File: eoana/readers/xlsx.py
```python
import pandas as pd
class PandasReaderBase:
"""
"""
def __init__(self, *args, **kwargs):
super(PandasReaderBase, self).__init__()
def get(self, item):
"""
:param item: str
:return:
"""
if item in self.__dict__.keys():
return self.__getattribute__(item)
else:
print('Warning! Can´t find attribute: %s' % item)
return 'None'
@staticmethod
def read(*args, **kwargs):
"""
:param args: tuple
Expects:
file_path
:param kwargs: dict
Addition:
header
encoding
dtype
keep_default_na
:return:
"""
return pd.read_excel(*args, **kwargs)
class PandasXlsxReader(PandasReaderBase):
"""
Reads txt / csv files
"""
def __init__(self, *args, **kwargs):
super(PandasXlsxReader, self).__init__()
for key, item in kwargs.items():
setattr(self, key, item)
```
#### File: eoana/validators/attributes.py
```python
from eoana.validators.validator import Validator
class MandatoryAttributes(Validator):
"""
"""
def __init__(self, *args, **kwargs):
super(MandatoryAttributes, self).__init__()
for key, item in kwargs.items():
setattr(self, key, item)
def validate(self, list_obj, **kwargs):
"""
:param list_obj: stations.handler.List
:return:
"""
assert self.attributes
report = {'approved': {},
'disapproved': {}}
for attr in self.attributes:
if list_obj.has_attribute(attr):
if list_obj.get(attr).all():
report['approved'].setdefault(attr, 'No missing values')
else:
report['disapproved'].setdefault(attr, 'WARNING! Missing values')
else:
report['disapproved'].setdefault(attr, 'WARNING! Missing attribute')
# ValidatorLog.update_info(
# list_name=list_obj.get('name'),
# validator_name=self.name,
# info=report,
# )
``` |
{
"source": "JohannesSMHI/odv_transformer",
"score": 3
} |
#### File: odv_transformer/readers/discrete.py
```python
import pandas as pd
from pathlib import Path
from odv_transformer.readers.txt import PandasTxtReader
from odv_transformer.utils import decmin_to_decdeg
class PhysicalChemicalArchiveReader(PandasTxtReader):
"""Reader for the PhysicalChemical datatype according to archive format."""
def __init__(self, *args, **kwargs):
"""Initialize."""
super().__init__(*args, **kwargs)
for key, item in kwargs.items():
setattr(self, key, item)
self.arguments = list(args)
self.files = {}
def load(self, *args, **kwargs):
"""Activate files."""
self._activate_files(*args, **kwargs)
def read_element(self, *args, **kwargs):
"""Read data element.
Reading excel sheet into pandas.Dataframe.
"""
df = self._read_file(*args, **kwargs)
if type(df) == pd.DataFrame:
if 'LATIT' in df:
df['LATIT_DD'] = df['LATIT'].apply(decmin_to_decdeg)
if 'LONGI' in df:
df['LONGI_DD'] = df['LONGI'].apply(decmin_to_decdeg)
return df
def _read_file(self, *args, **kwargs):
"""Read file (element) and return dataframe."""
fid = args[0] if type(args) == tuple else args
if fid in self.files:
if kwargs.get('dtype') == '':
kwargs['dtype'] = str
df = self.read(self.files.get(fid), **kwargs)
df = self.eliminate_empty_rows(df)
else:
df = None
print('File {} not found in delivery'.format(fid))
return df
def _activate_files(self, *args, **kwargs):
"""Set folder paths to self.files."""
folder_path = Path(args[0]) if type(args) == tuple else Path(args)
if not folder_path.exists:
raise FileNotFoundError(
'Could not find the given directory: {}'.format(folder_path)
)
if folder_path.name != 'processed_data':
folder_path = folder_path / 'processed_data'
for file_name in folder_path.glob('**/*'):
self.files.setdefault(file_name.name, file_name)
``` |
{
"source": "JohannesSMHI/sirena",
"score": 2
} |
#### File: sirena/core/calculator.py
```python
import numpy as np
import pandas as pd
from datetime import datetime
import statsmodels.api as sm
from statsmodels.sandbox.regression.predstd import wls_prediction_std
from statsmodels.stats.outliers_influence import summary_table
from sklearn.preprocessing import PolynomialFeatures
from sirena.utils import round_value
class AnnualMeanEquation:
"""Calculator for annual mean values."""
def __init__(self, *args, ref_value_2000=None, k_value=None, year=None, **kwargs):
"""Initialize."""
self.ref_value = ref_value_2000 or np.nan
self.k = k_value or np.nan
self.year = year or np.nan
def __call__(self):
"""Return class calculation.
Reference year for RH2000 is 2000
calculated_mean__yyyy = ref_value_2000 - k * (yyyy-2000).
"""
self.calculated = self.ref_value - self.k * (self.year - 2000)
return self.calculated
def calculate_apparent_land_uplift(self):
"""Return apparent land uplift.
With a linear regression apparent_land_uplift = k
"""
return (self.ref_value - self.k * (self.year - 2001)) - self.calculated
@property
def single_line(self):
"""Return line."""
return '-' * 65
@property
def dubble_line(self):
"""Return dubble line."""
return '=' * 65
def get_summary_string(self, summary):
"""Return statistical data summary."""
start_idx = summary.find('* The condition') - 1
return '\n'.join((
summary[:start_idx],
self.calculation_string,
self.single_line,
self.equation_string,
self.dubble_line,
summary[start_idx + 1:]
))
@property
def equation_string(self):
"""Return equation as string."""
return '{} = {} - {} * ({}-2000)'.format(
round(self.calculated, 3),
self.ref_value,
round(self.k, 3),
self.year
)
@property
def calculation_string(self):
"""Return SMHI calculation string."""
return 'SMHI calculation: M_(yyyy) = ref_value_2000 - k * (yyyy-2000)'
class OLSEquation:
"""Example calculator for an Ordinary Least Squares model.
https://www.statsmodels.org/stable/generated/statsmodels.regression.linear_model.OLS.html
"""
def __init__(self, *args, data=None, parameter=None, **kwargs):
"""Initialize."""
self.df = data
self.parameter = parameter
qf_boolean = self.df[self.parameter].isna()
if qf_boolean.any():
self.df.drop(self.df[qf_boolean].index, inplace=True)
def __call__(self):
"""Return class calculation."""
res = sm.OLS(
self.df[self.parameter],
self.df.loc[:, ['year', 'intercept']],
hasconst=True
).fit()
return res
class OLSLinearRegression:
"""Example calculator for an Ordinary Least Squares model.
https://www.statsmodels.org/stable/generated/statsmodels.regression.linear_model.OLS.html
"""
def __init__(self, *args, data=None, parameter=None, **kwargs):
"""Initialize."""
self.df = data
self.parameter = parameter
qf_boolean = self.df[self.parameter].isna()
if qf_boolean.any():
self.df.drop(self.df[qf_boolean].index, inplace=True)
def __call__(self):
"""Return class calculation."""
x = self.df['year'].values
y = self.df[self.parameter].values
x = x[:, np.newaxis]
y = y[:, np.newaxis]
x = sm.add_constant(x)
res = sm.OLS(y, x).fit()
return res
class OLSPolynomialRegression:
"""Example calculator for an Polynominal Regression model.
Source:
https://ostwalprasad.github.io/machine-learning/Polynomial-Regression-using-statsmodel.html
"""
def __init__(self, *args, data=None, parameter=None, polynomial_degree=None, **kwargs):
"""Initialize."""
self.df = data
self.parameter = parameter
self.polynomial_degree = polynomial_degree or 2
qf_boolean = self.df[self.parameter].isna()
if qf_boolean.any():
self.df.drop(self.df[qf_boolean].index, inplace=True)
def __call__(self):
"""Return class calculation."""
x = self.df['year'].values
y = self.df[self.parameter].values
x = x[:, np.newaxis]
y = y[:, np.newaxis]
polynomial_features = PolynomialFeatures(degree=self.polynomial_degree)
xp = polynomial_features.fit_transform(x)
res = sm.OLS(y, xp).fit()
return res
class CalculatorBase:
"""Base class for Node Calculator."""
def __init__(self):
"""Initialize."""
super().__init__()
self.attributes = set([])
def __setattr__(self, key, value):
"""Set attribute."""
super().__setattr__(key, value)
if key != 'attributes' and not key.startswith('_'):
self.attributes.add(key)
def update_attributes(self, **kwargs):
"""Update attributes of self."""
for attribute, value in kwargs.items():
setattr(self, attribute, value)
class Calculator(CalculatorBase):
"""Node Calculator."""
def __init__(self, calculation_year=None):
"""Initialize."""
super().__init__()
self.calc_year = calculation_year
@staticmethod
def calculate_wls_prediction_std(result):
"""Calculate and return Weighted Least Squares.
https://www.statsmodels.org/stable/examples/notebooks/generated/wls.html
"""
return wls_prediction_std(result)
@staticmethod
def get_regression_summary(result, conf_int=0.95, columns=None, as_dataframe=False):
"""Return regression summary."""
column_mapper = {
'Obs': 'obs',
'Dep Var\nPopulation': 'dep_var_population',
'Predicted\nValue': 'predicted_value',
'Std Error\nMean Predict': 'std_error_mean_predict',
'Mean ci\n95% low': 'mean_ci_lo',
'Mean ci\n95% upp': 'mean_ci_up',
'Predict ci\n95% low': 'pred_ci_lo',
'Predict ci\n95% upp': 'pred_ci_up',
'Residual': 'residual',
'Std Error\nResidual': 'std_error_residual',
'Student\nResidual': 'student_residual',
"Cook's\nD": 'cooks'
}
# columns = columns or ['Mean ci\n95% low', 'Mean ci\n95% upp']
simple_table, data_table, table_columns = summary_table(result, alpha=conf_int)
table_columns = [column_mapper.get(c) for c in table_columns]
if as_dataframe:
return pd.DataFrame(data_table, columns=table_columns)
def calculate_running_mean(self, data, parameter):
"""Calculate running mean.
Using climatological normal period of 30 years.
"""
running_mean = data[parameter].rolling(
31,
center=True,
# min_periods=3,
).mean()
self.update_attributes(running_mean=running_mean)
def calculate_annual_mean_water_level(self, station_attr):
"""Calculate annual mean water level and update attribute.
Calculations for a specific station.
"""
if station_attr:
try:
k = float(round_value(self.result.params[1], nr_decimals=2)) * -1 or np.nan
except IndexError:
k = np.nan
station_attr.setdefault('ref_value_2000', np.nan)
station_attr.setdefault('k_value', k)
station_attr.setdefault('year', self.calc_year or None)
am = AnnualMeanEquation(**station_attr)
res = am()
station_attr.setdefault('annual_mean', round_value(res, nr_decimals=1))
station_attr.setdefault('apparent_land_uplift', round_value(k, nr_decimals=2))
station_attr.setdefault('summary', am.get_summary_string(self.result.summary2().as_text()))
self.update_attributes(**station_attr)
def calculate_stats(self, data, parameter):
"""Calculate statistics."""
data = data.assign(intercept=1., year=lambda x: x.timestamp.dt.year)
# ols = OLSPolynomialRegression(
# data=data,
# parameter=parameter,
# polynomial_degree=2
# )
ols = OLSLinearRegression(
data=data,
parameter=parameter,
)
res = ols()
reg_sum = self.get_regression_summary(
res,
conf_int=0.05,
as_dataframe=True
)
self.update_attributes(
result=res,
data_values=data[parameter],
year=data['year'],
predstd=reg_sum['predicted_value'],
ci_l=reg_sum['mean_ci_lo'],
ci_u=reg_sum['mean_ci_up'],
iv_l=reg_sum['pred_ci_lo'],
iv_u=reg_sum['pred_ci_up']
)
class Statistics(dict):
"""Dictionary of stations with applied calculations."""
def __init__(self, calculation_year=None):
"""Initialize."""
super().__init__()
self.calc_year = calculation_year or datetime.now().year
print('Calculate annual mean water level for year {}'.format(self.calc_year))
def append_new_station(self, **kwargs):
"""Append data and calculations to self.
Args:
**kwargs: Station data/information.
"""
name = kwargs.get('name')
if name:
print('New station added: {}'.format(name))
self.setdefault(name, Calculator(calculation_year=self.calc_year))
self[name].calculate_stats(kwargs.get('data'), kwargs.get('parameter'))
self[name].calculate_annual_mean_water_level(kwargs.get('station_attr'))
self[name].calculate_running_mean(kwargs.get('data'), kwargs.get('parameter'))
```
#### File: sirena/core/data_handler.py
```python
from abc import ABC
import numpy as np
import pandas as pd
import datetime as dt
class Frame(pd.DataFrame, ABC):
"""Stores data from one, and only one, station.
We intend to insert information from the WIKSI database.
"""
# datetime start date (timestamp.millisecond = 0)
# https://docs.python.org/3.3/library/datetime.html
# time before this timestamp counts as negative milliseconds
dt_start = dt.datetime(1970, 1, 1)
@property
def _constructor(self):
"""Construct Frame.
Constructor for Frame, overides method in pandas.DataFrame.
"""
return Frame
def convert_formats(self):
"""Convert formats of self."""
self['timestamp'] = self['timestamp'].apply(
lambda x: self.dt_start + dt.timedelta(milliseconds=float(x))
)
self[self.data_columns] = self[self.data_columns].astype(float)
def exclude_flagged_data(self, q_flags=None):
"""Exclude flagged data.
By default we exclude values flagged with:
3 (160) Probably bad
4 (220) Bad
8 (82) Interpolated
..found flag 255, presumably this indicates a bad value
"""
q_flags = q_flags or ['3', '4', '8', '82', '160', '220', '255']
# qf_boolean = self['quality'].isin(q_flags)
# self.drop(self[qf_boolean].index, inplace=True)
for qf_column in self.quality_flag_columns:
qf_boolean = self[qf_column].isin(q_flags)
self.loc[qf_boolean, qf_column.replace('Q_', '')] = np.nan
@property
def data_columns(self):
"""Return (only) data columns."""
cols = []
for c in self.columns:
if c != 'timestamp' and not c.startswith('Q_'):
cols.append(c)
return cols
@property
def quality_flag_columns(self):
"""Return (only) flag columns."""
cols = []
for c in self.columns:
if c.startswith('Q_'):
cols.append(c)
return cols
class DataFrames(dict):
"""Stores information for multiple stations.
Use station name as key in this dictionary of Frame()-objects.
"""
def append_new_frame(self, **kwargs):
"""Append new Frame object to self."""
name = kwargs.get('name')
data = kwargs.get('data')
if name:
self.setdefault(name, Frame(data, columns=kwargs.get('columns')))
self[name].convert_formats()
self[name].exclude_flagged_data()
```
#### File: sirena/readers/samsa.py
```python
import requests
class SAMSABase:
"""Base class for SAMSA reader."""
def __init__(self):
"""Initialize."""
super(SAMSABase, self).__init__()
self.server = None
self._time_limit = None
self._filters = None
self._fields = None
def update_attributes(self, **kwargs):
"""Update attributes."""
for attribute, value in kwargs.items():
setattr(self, attribute, value)
def get_data(self, *args, **kwargs):
"""Return data."""
raise NotImplementedError
@property
def url(self):
"""Return url."""
return ''.join((self.server, self.filter_combo))
@property
def filter_combo(self):
"""Return filter_combo."""
return '&'.join((self.time_limit, self.filters, self.fields))
@property
def time_limit(self):
"""Return time_limit."""
return self._time_limit
@time_limit.setter
def time_limit(self, value):
"""Set time_limit."""
self._time_limit = 'timeLimit=' + str(value)
@property
def filters(self):
"""Return filters."""
return self._filters
@filters.setter
def filters(self, filter_list):
"""Set filters."""
self._filters = 'filters=' + ','.join(filter_list)
@property
def fields(self):
"""Return fields."""
return self._fields
@fields.setter
def fields(self, field_list):
"""Set fields."""
self._fields = 'fields=' + ','.join(field_list)
class SAMSAData(SAMSABase):
"""SAMSA reader."""
def __init__(self):
"""Initialize."""
super(SAMSAData, self).__init__()
def get_data(self):
"""Return data."""
data = None
try:
data = requests.get(self.url).json()
data = data.get('data')
except requests.exceptions.RequestException as e:
print('Could not load data.')
print(e)
# logging.warning('Could not load data.')
return data
``` |
{
"source": "JohannesSMHI/svea_experimental_visualization",
"score": 2
} |
#### File: svea_expvis/bokeh_widgets/callbacks.py
```python
from functools import partial
from bokeh.models import Button, FileInput, CustomJS, CrosshairTool
from bokeh.layouts import row, Spacer
from bokeh.models.widgets import Select
from bokeh.plotting import figure
from bokeh.events import ButtonClick
def update_colormapper(fig=None, plot=None, color_mapper=None, data_source=None, x_sel=None):
"""Update the color map."""
code = """
var parameter = cb_obj.value;
console.log('parameter', parameter);
// var data = data_source.data;
const {transform} = renderer.glyph.fill_color;
transform.low = color_mapper[parameter].low;
transform.high = color_mapper[parameter].high;
console.log('transform.low', transform.low);
console.log('transform.high', transform.high);
renderer.glyph.fill_color = {field: parameter, transform: transform};
fig.reset.emit()
"""
return CustomJS(
args=dict(fig=fig, renderer=plot, color_mapper=color_mapper,
data_source=data_source, x_sel=x_sel),
code=code)
def update_colormapper_transect(fig=None, plot=None, color_mapper=None):
"""Update the color map."""
code = """
var parameter = cb_obj.value;
const {transform} = renderer.glyph.fill_color;
var z = 'z';
transform.low = color_mapper[parameter].low;
transform.high = color_mapper[parameter].high;
// console.log('transform.low', transform.low);
// console.log('transform.high', transform.high);
renderer.glyph.fill_color = {field: z, transform: transform};
//renderer.glyph.fill_color = {field: 'z', transform: color_mapper[parameter]};
//color_bar = color_mapper[parameter];
//color_bar.trigger('change')
fig.reset.emit()
"""
return CustomJS(
args=dict(fig=fig, renderer=plot, color_mapper=color_mapper),
code=code)
def update_colormapper_range(fig=None, plot=None, data_source=None):
"""Update the color map."""
code = """
var data = data_source.data;
var parameter = cb_obj.value;
const {transform} = renderer.glyph.fill_color;
var z = 'z';
transform.low = Math.min.apply(Math, data.z)-0.1;
transform.high = Math.max.apply(Math, data.z)+0.1;
renderer.glyph.fill_color = {field: z, transform: transform};
fig.reset.emit()
"""
return CustomJS(
args=dict(fig=fig, renderer=plot, data_source=data_source),
code=code)
def select_callback(data_source=None, axis_objs=None, axis=None):
"""Return JS callback select object."""
code = """
var selector_parameter = this.value;
var data = data_source.data;
var axis_objs = axis_objs;
if (axis == 'x') {
data['x'] = data[selector_parameter];
} else if (axis == 'y') {
data['y'] = data[selector_parameter];
}
for (var i = 0; i < axis_objs.length; i++) {
axis_objs[i].axis_label = selector_parameter
}
data_source.selected.indices = [];
data_source.change.emit();
"""
return CustomJS(
code=code,
args={'data_source': data_source,
'axis_objs': axis_objs,
'axis': axis}
)
def lasso_callback(x_selector=None, y_selector=None, data_source=None, corr_source=None):
"""Return JS callback lasso object."""
code = """
console.log('lasso_callback');
var x_param = x_selector.value;
var y_param = y_selector.value;
var data = data_source.data;
var new_data = {x: [], y: []};
var indices = data_source.selected.indices;
var x_val, y_val;
for (var i = 0; i < indices.length; i++) {
x_val = data[x_param][indices[i]];
y_val = data[y_param][indices[i]];
new_data.x.push(x_val);
new_data.y.push(y_val);
}
corr_source.data = new_data;
"""
return CustomJS(args=dict(
x_selector=x_selector,
y_selector=y_selector,
data_source=data_source,
corr_source=corr_source
),
code=code)
def lasso_transect_callback(z_selector=None,
pos_source=None,
data_source=None,
plot_source=None):
"""Return JS callback lasso object."""
code = """
var x_param = 'timestamp';
var y_param = 'PRES_CTD';
var z_param = z_selector.value;
var time_param = 'timestring';
var data = data_source.data;
var pos = pos_source.data;
var new_data = {x: [], y: [], z: []};
var pos_indices = pos_source.selected.indices;
var selected_keys = [];
for (var i = 0; i < pos_indices.length; i++) {
selected_keys.push(pos[time_param][pos_indices[i]]);
}
var x_val, y_val, z_val, time_val;
for (var i = 0; i < data.x.length; i++) {
time_val = data[time_param][i];
if (selected_keys.indexOf(time_val) !== -1) {
x_val = data[x_param][i];
y_val = data[y_param][i];
z_val = data[z_param][i];
new_data.x.push(x_val);
new_data.y.push(y_val);
new_data.z.push(z_val);
}
}
plot_source.data = new_data;
"""
return CustomJS(args=dict(
z_selector=z_selector,
pos_source=pos_source,
data_source=data_source,
plot_source=plot_source,
),
code=code)
def lasso_corr_callback(x_selector=None, y_selector=None, data_source=None,
position_source=None, corr_source=None, corr_plot=None):
"""Return JS callback lasso object."""
code = """
console.log('lasso_callback');
var x_param = x_selector.value;
var y_param = y_selector.value;
var data = data_source.data;
var pos_data = position_source.data;
var new_data = {x: [], y: [], dep: [], key: []};
var indices = cb_obj.indices;
var selected_keys = [];
for (var i = 0; i < indices.length; i++) {
selected_keys.push(pos_data['KEY'][indices[i]]);
}
var key_val, x_val, y_val, dep_val;
for (var i = 0; i < data.KEY.length; i++) {
key_val = data.KEY[i];
if (selected_keys.indexOf(key_val) !== -1) {
x_val = data[x_param][i];
y_val = data[y_param][i];
dep_val = data['DEPH'][i];
new_data.x.push(x_val);
new_data.y.push(y_val);
new_data.dep.push(dep_val);
new_data.key.push(key_val);
}
}
if (new_data.x.length > 1) {
//console.log('axes_range update!', corr_plot.x_range.start, corr_plot.x_range.end, corr_plot.y_range.start, corr_plot.y_range.end)
var min_value = Math.min(...new_data.x.filter(x => !Number.isNaN(x)), ...new_data.y.filter(x => !Number.isNaN(x)))-0.2;
var max_value = Math.max(...new_data.x.filter(x => !Number.isNaN(x)), ...new_data.y.filter(x => !Number.isNaN(x)))+0.2;
corr_plot.x_range.start = min_value;
corr_plot.x_range.end = max_value;
corr_plot.y_range.start = min_value;
corr_plot.y_range.end = max_value;
corr_plot.change.emit();
corr_plot.reset.emit();
//console.log('after update!', corr_plot.x_range.start, corr_plot.x_range.end, corr_plot.y_range.start, corr_plot.y_range.end)
}
corr_source.data = new_data;
"""
return CustomJS(args=dict(
x_selector=x_selector,
y_selector=y_selector,
data_source=data_source,
position_source=position_source,
corr_source=corr_source,
corr_plot=corr_plot,
),
code=code)
def range_selection_callback(data_source=None):
"""Return JS callback select object."""
code = """
var data = data_source.data;
var min_dep = cb_obj.value[0];
var max_dep = cb_obj.value[1];
var indices = [];
for (var i = 0; i < data.dep.length; i++) {
if ((data.dep[i] >= min_dep) && (data.dep[i] <= max_dep)) {
indices.push(i)
}
}
data_source.selected.indices = indices;
"""
return CustomJS(args={'data_source': data_source},
code=code)
def range_slider_update_callback(slider=None, data_source=None):
"""Return JS callback slider object."""
code = """
var data = data_source.data;
//var values = [];
//var i = 0;
//while ( ! isNaN(data.y[i]) ) {
// values.push(data.y[i])
// i++
//}
slider.start = Math.min.apply(Math, data.dep);
slider.end = Math.max.apply(Math, data.dep);
slider.value = [slider.start, slider.end];
slider.change.emit();
"""
return CustomJS(args={'slider': slider, 'data_source': data_source},
code=code)
def month_selection_callback(position_source=None, position_master_source=None):
"""Return JS callback select object."""
code = """
console.log('month_selection_callback');
// Get data from ColumnDataSource
var selected_data = {LATIT: [], LONGI: [], STATN: [], KEY: [], MONTH: []};
var master = master_source.data;
var month_mapping = {'All': 'All',
'January': 1, 'February': 2,
'March': 3, 'April': 4,
'May': 5, 'June': 6,
'July': 7, 'August': 8,
'September': 9, 'October': 10,
'November': 11, 'December': 12};
var selected_month = month_mapping[month.value];
var key_val, lat_val, lon_val, statn_val, mon_val;
for (var i = 0; i < master.KEY.length; i++) {
key_val = master.KEY[i];
lat_val = master.LATIT[i];
lon_val = master.LONGI[i];
statn_val = master.STATN[i];
mon_val = master.MONTH[i];
if (selected_month == 'All') {
selected_data.KEY.push(key_val);
selected_data.LATIT.push(lat_val);
selected_data.LONGI.push(lon_val);
selected_data.STATN.push(statn_val);
selected_data.MONTH.push(mon_val);
} else if (mon_val == selected_month) {
selected_data.KEY.push(key_val);
selected_data.LATIT.push(lat_val);
selected_data.LONGI.push(lon_val);
selected_data.STATN.push(statn_val);
selected_data.MONTH.push(mon_val);
}
}
source.data = selected_data;
"""
# Create a CustomJS callback with the code and the data
return CustomJS(args={'source': position_source,
'master_source': position_master_source},
code=code)
def lasso_correlation_callback(z_selector=None,
pos_source=None,
data_source=None,
plot_source=None,
line_source=None):
"""Return JS callback lasso object."""
code = """
var mapper = {'DOXY_CTD': 'DOXY_MVP', 'DENS_CTD': 'DENS_MVP'};
var time_param = 'timestring';
var selected_param = z_selector.value;
var selected_param_mvp = mapper[selected_param];
var data = data_source.data;
var pos = pos_source.data;
var pos_indices = pos_source.selected.indices;
var selected_keys = [];
for (var i = 0; i < pos_indices.length; i++) {
selected_keys.push(pos[time_param][pos_indices[i]]);
}
var new_data = {x: [], y: []};
var x_val, y_val, time_val;
for (var i = 0; i < data.PRES_CTD.length; i++) {
time_val = data[time_param][i];
if (selected_keys.indexOf(time_val) !== -1) {
x_val = data[selected_param][i];
y_val = data[selected_param_mvp][i];
new_data.x.push(x_val);
new_data.y.push(y_val);
}
}
var high = Math.max.apply(Math, [Math.max.apply(Math, new_data.x), Math.max.apply(Math, new_data.y)]);
var low = Math.min.apply(Math, [Math.min.apply(Math, new_data.x), Math.min.apply(Math, new_data.y)]);
line_source.data = {x: [low, high], y: [low, high]};
plot_source.data = new_data;
"""
return CustomJS(args=dict(
z_selector=z_selector,
pos_source=pos_source,
data_source=data_source,
plot_source=plot_source,
line_source=line_source,
),
code=code)
```
#### File: svea_expvis/bokeh_widgets/map.py
```python
from bokeh.plotting import figure
from bokeh.tile_providers import get_provider, Vendors
tile_provider = get_provider('CARTODBPOSITRON')
def get_map(title='', x_range=None, y_range=None, tooltips=None):
x_range = x_range or (500000, 3000000)
y_range = y_range or (7000000, 10000000)
"""Return a bokeh figure."""
fig = figure(
x_range=x_range, y_range=y_range,
x_axis_type="mercator", y_axis_type="mercator",
plot_height=500, plot_width=800,
tools="pan,lasso_select,wheel_zoom,reset,save",
active_drag="lasso_select",
active_scroll="wheel_zoom",
tooltips=tooltips,
title=title
)
fig.xgrid.grid_line_color = None
fig.ygrid.grid_line_color = None
fig.add_tile(tile_provider)
return fig
```
#### File: svea_expvis/bokeh_widgets/utils.py
```python
import numpy as np
import pandas as pd
import geopandas as gp
from shapely.geometry import Point, MultiPoint
from shapely.ops import nearest_points
from datetime import datetime as dt
from pyproj import CRS, transform
from decimal import Decimal, ROUND_HALF_UP
from bokeh.models import ColumnDataSource
def round_value(value, nr_decimals=2):
"""Calculate rounded value."""
return str(Decimal(str(value)).quantize(Decimal('%%1.%sf' % nr_decimals % 1), rounding=ROUND_HALF_UP))
def decmin_to_decdeg(pos, string_type=True, decimals=4):
"""Convert position from decimal degrees into degrees and decimal minutes."""
pos = float(pos)
output = np.floor(pos / 100.) + (pos % 100) / 60.
output = round_value(output, nr_decimals=decimals)
if string_type:
return output
else:
return float(output)
def convert_projection(lats, lons):
"""Convert coordinates to a different system."""
project_projection = CRS('EPSG:4326')
google_projection = CRS('EPSG:3857')
x, y = transform(project_projection, google_projection, lons, lats, always_xy=True)
return x, y
def get_columndata_source(df, *args, lat_col=None, lon_col=None):
"""Return a bokeh ColumnDataSource object."""
if lat_col:
xs, ys = convert_projection(df[lat_col].astype(float).values,
df[lon_col].astype(float).values)
df['LONGI'] = xs
df['LATIT'] = ys
if any(args):
params = list(args)
df['x'] = df[params[0]]
df['y'] = df[params[1]]
if lon_col and lat_col and (lon_col != 'LONGI' and lat_col != 'LATIT'):
df = df.drop(columns=[lat_col, lon_col])
return ColumnDataSource(df)
def get_columndata_source_json(indata, xy_params=False):
"""
indata-structure:
{
timestamp: {
lat: value,
lon: value,
data: {
o2: array,
dens: array,
pressure: array
}
}
}
"""
new_data = {
'timestring': [],
'timestamp': [],
'PRES_CTD': [],
'DOXY_CTD': [],
'DENS_CTD': [],
}
for key, item in indata.items():
new_data['DOXY_CTD'].extend(item['data']['o2'])
new_data['DENS_CTD'].extend(item['data']['dens'])
new_data['PRES_CTD'].extend(item['data']['pressure'])
new_data['timestring'].extend([key] * len(item['data']['o2']))
# new_data['timestamp'].extend([dt.strptime(key, "%Y%m%d%H%M%S")] * len(item['data']['o2']))
new_data['timestamp'].extend([pd.Timestamp(key)] * len(item['data']['o2']))
if xy_params:
new_data['x'] = new_data['DOXY_CTD']
new_data['y'] = new_data['PRES_CTD']
return ColumnDataSource(new_data)
def get_position_columndata_source_json(indata):
"""
indata-structure:
{
timestamp: {
lat: value,
lon: value,
data: {
o2: array,
dens: array,
pressure: array
}
}
}
"""
new_data = {
'timestring': [],
'lat': [],
'lon': []
}
for key, item in indata.items():
new_data['timestring'].append(key)
new_data['lat'].append(item['lat'])
new_data['lon'].append(item['lon'])
xs, ys = convert_projection(new_data['lat'], new_data['lon'])
new_data['lon'] = xs
new_data['lat'] = ys
return ColumnDataSource(new_data)
def get_matching_columndata_source(data_ctd=None, data_mvp=None, match_timestrings=None):
"""Pick out matching values between the data sources."""
new_data = {
'timestring': [],
'timestamp': [],
'PRES_CTD': [],
'DOXY_CTD': [],
'DENS_CTD': [],
'DOXY_MVP': [],
'DENS_MVP': [],
}
df_ctd = pd.DataFrame(data_ctd.data)
df_mvp = pd.DataFrame(data_mvp.data)
for ctd_time, mvp_time in match_timestrings.items():
mvp_boolean = df_mvp['timestring'] == mvp_time
ctd_boolean = df_ctd['timestring'] == ctd_time
ctd_boolean = ctd_boolean & (df_ctd['PRES_CTD'].isin(df_mvp.loc[mvp_boolean, 'PRES_CTD']))
for row in df_ctd.loc[ctd_boolean, :].itertuples():
depth_boolean = mvp_boolean & (df_mvp['PRES_CTD'] == row.PRES_CTD)
if depth_boolean.sum() == 1:
new_data['timestring'].append(row.timestring)
new_data['timestamp'].append(row.timestamp)
new_data['PRES_CTD'].append(row.PRES_CTD)
new_data['DOXY_CTD'].append(row.DOXY_CTD)
new_data['DENS_CTD'].append(row.DENS_CTD)
new_data['DOXY_MVP'].append(df_mvp.loc[depth_boolean, 'DOXY_CTD'].values[0])
new_data['DENS_MVP'].append(df_mvp.loc[depth_boolean, 'DENS_CTD'].values[0])
return ColumnDataSource(new_data)
def get_matching_positions(data_ctd=None, data_mvp=None):
gf_ctd = gp.GeoDataFrame(data_ctd.data)
gf_ctd.geometry = gf_ctd[['lat', 'lon']].apply(lambda x: Point(x), axis=1)
gf_mvp = gp.GeoDataFrame(data_mvp.data)
gf_mvp.geometry = gf_mvp[['lat', 'lon']].apply(lambda x: Point(x), axis=1)
mvp_points = MultiPoint(gf_mvp['geometry'])
matching = {}
for row in gf_ctd.itertuples():
mvp_index = np.where(gf_mvp.geometry == nearest_points(row.geometry, mvp_points)[1])[0][0]
if row.geometry.distance(mvp_points[mvp_index]) < 10000:
matching[row.timestring] = gf_mvp['timestring'][mvp_index]
# returning dict. Matching CTD-timestring to MVP-timestring {"CTD-timestring": "MVP-timestring"}
return matching
```
#### File: svea_experimental_visualization/svea_expvis/filter.py
```python
import pandas as pd
class Name:
"""Class to extract information from file names."""
splitter = '_'
def __init__(self, file_name):
"""Initialize."""
args = file_name.strip('.txt').split(self.splitter)
self.date = pd.Timestamp(args[2])
self.shipc = args[3]
self.serno = float(args[4])
class SplitNameList:
"""Holds a lists of file name information."""
dates = []
ships = []
sernos = []
def __init__(self, name_list):
"""Initialize."""
self.names = name_list
for name in name_list:
name_obj = Name(name)
self.append_date(name_obj.date)
self.append_shipc(name_obj.shipc)
self.append_serno(name_obj.serno)
@classmethod
def append_date(cls, d):
"""Append date to class list."""
cls.dates.append(d)
@classmethod
def append_shipc(cls, s):
"""Append ship code to class list."""
cls.ships.append(s)
@classmethod
def append_serno(cls, s):
"""Append serno to class list."""
cls.sernos.append(s)
class Filter:
"""Filter filenames according to ctd-standard format eg.
'ctd_profile_20181208_34AR_0171.txt'.
"""
def __init__(self, name_list):
"""Initialize."""
lists = SplitNameList(name_list)
self.serie_names = pd.Series(lists.names)
self.serie_dates = pd.Series(lists.dates)
self.serie_ships = pd.Series(lists.ships)
self.serie_sernos = pd.Series(lists.sernos)
self._boolean = True
def add_filter(self, **kwargs):
"""Add new filter.
If any valid filter arguments: append boolean according
to @boolean.setter (property.setter).
"""
if 'month_list' in kwargs:
self.boolean = self.serie_dates.dt.month.isin(kwargs.get('month_list'))
if 'ship_list' in kwargs:
self.boolean = self.serie_ships.isin(kwargs.get('ship_list'))
if 'serno_max' in kwargs:
self.boolean = self.serie_sernos <= kwargs.get('serno_max')
if 'serno_min' in kwargs:
self.boolean = self.serie_sernos >= kwargs.get('serno_min')
@property
def valid_file_names(self):
"""Return valid file names."""
return self.serie_names[self.boolean].values
@property
def boolean(self):
"""Return boolean."""
return self._boolean
@boolean.setter
def boolean(self, add_bool):
"""Set boolean."""
self._boolean = self._boolean & add_bool
``` |
{
"source": "JohannesSMHI/weather-dashboard",
"score": 3
} |
#### File: weather-dashboard/data_handler/rainy.py
```python
RAIN_MAPPER = {
'rainh': ['date', 'hour'],
'raind': ['date'],
'rainw': ['week'],
'rainm': ['month'],
'raint': ['year']
}
def get_rainframe(df, parameter):
"""Return a grouped dataframe based on the timing attribute."""
if not df.empty:
return df.groupby(
[df['timestamp'].dt.__getattribute__(t_spec)
for t_spec in RAIN_MAPPER.get(parameter)]
).max().reset_index(drop=True)
else:
return df
``` |
{
"source": "johannessonlab/SpokBlockPaper",
"score": 2
} |
#### File: Annotation/scripts/GFFnumerator.py
```python
import sys # To exit the script
import os # For the input name
import argparse # For the fancy options
# import time
import datetime
import gffutils
import gffutils.inspect as inspect
# ------------------------------------------------------
version = 1
versiondisplay = "{0:.2f}".format(version)
# ============================
# Make a nice menu for the user
# ============================
parser = argparse.ArgumentParser(description="* Script to re-name the IDs of the genes and features of a GFF3 file *") # Create the object using class argparse
# Add options
parser.add_argument('GFF', help="GFF3 file sorted beforehand")
parser.add_argument('--sample', '-s', help="String representing sample that gets appended into the gene IDs")
parser.add_argument("--printdb", "-p", help="Print the database into a file instead of saving it in memory", default=False, action='store_true')
parser.add_argument('--version', '-v', action='version', version='%(prog)s ' + versiondisplay)
try:
# ArgumentParser parses arguments through the parse_args() method You can
# parse the command line by passing a sequence of argument strings to
# parse_args(). By default, the arguments are taken from sys.argv[1:]
args = parser.parse_args()
GFFopen = open(args.GFF, 'r')
except IOError as msg: # Check that the file exists
parser.error(str(msg))
parser.print_help()
# ============================
# ---------------------------------
# Make database
# ---------------------------------
# t0 = time.time()
# This will parse the file, infer the relationships among the features in the file, and store the features and relationships
# See https://pythonhosted.org/gffutils/autodocs/gffutils.create_db.html
# I expect a MAKER gff, where CDS have no unique ID
if args.printdb:
input_base = os.path.splitext(args.GFF)[0] # Taking out the prefix of the file
input_name = os.path.basename(input_base) # Remove the path
dbfnchoice = input_name + '.db'
else:
dbfnchoice = ':memory:'
id_spec={"gene": ["ID", "Name"], "mRNA": ["ID", "transcript_id"]} # http://daler.github.io/gffutils/database-ids.html
db = gffutils.create_db(data = args.GFF,
dbfn = dbfnchoice,
force = True, # force=True overwrite any existing databases.
id_spec = id_spec,
merge_strategy = "create_unique") # Add an underscore an integer at the end for each consecutive occurrence of the same ID
# verbose = True,)
# t1 = time.time()
# db_results = inspect.inspect(db) # Report
# print("\n\nIt took {0:.1f}s to create database".format(t1 - t0))
# ---------------------------------
## Get iterator of all genes
genes = [gene for gene in db.features_of_type("gene")]
nogenes = db.count_features_of_type("gene")
# Get ids of genes in database
allIDsGenes = [gene.id for gene in db.features_of_type("gene")]
## Make new IDs for the genes
if args.sample is not None:
newIDgene = [args.sample + "{0:05d}".format(n) for n in range(1, nogenes + 1)]
else:
newIDgene = ["gene.{0:05d}".format(n) for n in range(1, nogenes + 1)]
# ---------------------------
### Change IDs
# ---------------------------
def getnewID(id):
indexgene = allIDsGenes.index(id)
newid = newIDgene[indexgene]
# print(newid)
return(newid)
def gen():
sys.stdout.write("##gff-version 3\n")
# Add a line to mark the file with this script
now = datetime.datetime.now()
newhead = '# Original file ' + os.path.basename(args.GFF) + ' modified with GFFnummerator.py v. ' + str(versiondisplay) + ' on ' + str(now) + '\n'
sys.stdout.write(newhead)
# Actual GFF
for gene in db.features_of_type('gene'):
# for gene in db.features_of_type('gene', order_by='start'):
newidgene = getnewID(gene.id)
gene['ID'] = newidgene
print(gene)
mRNA = 1
for child in list(db.children(gene)):
if child.featuretype == "mRNA":
child['Parent'] = newidgene
# make a new ID for the mRNA
newidrna = newidgene + "-mRNA" + str(mRNA)
mRNA += 1
child['ID'] = newidrna
print(child)
# Update all the features that depend on the mRNA, assuming each feature has a SINGLE parent
typeids = {'gene':1, 'mRNA':1, 'exon':1, 'CDS':1, 'five_prime_UTR':1, 'three_prime_UTR':1}
for grandchild in list(db.children(gene, level = 2)):
grandchild['Parent'] = newidrna # TODO Add something here, a conditional, in case there are multiple parents
typefea = grandchild.featuretype # What type are we dealing with?
newidchild = newidrna + '-' + typefea + "{0:01d}".format(typeids[typefea])
grandchild['ID'] = newidchild
typeids[typefea] += 1 # Increase the count of the corresponding type for this gene
print(grandchild)
gen()
``` |
{
"source": "JohannesStutz/beproductive",
"score": 2
} |
#### File: beproductive/beproductive/blocker.py
```python
__all__ = ['APP_NAME', 'REDIRECT', 'WIN_PATH', 'LINUX_PATH', 'NOTIFY_DURATION', 'ICON_PATH', 'host_fp', 'host_fp_copy',
'host_fp_blocked', 'Blocker']
# Cell
from pathlib import Path
from shutil import copy
import beproductive.config as config
import sys
try:
from win10toast import ToastNotifier
win_notify = ToastNotifier()
except:
win_notify = False
# Cell
APP_NAME = 'Be Productive'
REDIRECT = '127.0.0.1'
WIN_PATH = r'C:\Windows\System32\drivers\etc'
LINUX_PATH = r'/etc'
NOTIFY_DURATION = 5 # CHANGE TO 5 FOR PRODUCTION
ICON_PATH = 'icon.ico'
if sys.platform == 'win32':
host_path = Path(WIN_PATH)
else:
host_path = Path(LINUX_PATH)
host_fp = host_path/'hosts'
host_fp_copy = host_path/'hosts.original'
host_fp_blocked = host_path/'hosts.blocked'
# Cell
class Blocker():
"The core of the package. It modifies the hosts file of the OS."
def __init__(self, redirect=REDIRECT):
self.adminrights = False
self.redirect = redirect
self.blocklist = config.load_config()
if not host_fp_copy.exists():
self._setup()
if self._create_blocked_list():
self.adminrights = True
def _setup(self):
"Creates a copy of the `hosts` file and saves it as `hosts.original`"
try:
copy(host_fp, host_fp_copy)
self.notify("Setup successful")
except PermissionError:
self._raise_permission_error()
def _create_blocked_list(self):
"Creates a copy of `hosts.original` and saves it to `hosts.blocked`. Then adds all blocked sites."
try:
copy(host_fp_copy, host_fp_blocked)
with open(host_fp_blocked, "a") as blocked_file:
for url in self.blocklist:
# TODO: refine, add www only if not in url, remove www if in url
blocked_file.write(f"{self.redirect} {url} www.{url} api.{url}\n")
# Special case for Twitter which has a special API URL
if url == 'twitter.com':
blocked_file.write(f"{self.redirect} tpop-api.twitter.com\n")
return True
except PermissionError:
self._raise_permission_error()
return False
def block(self, notify=False):
"Blocks all specified websites by replacing `hosts` file with `hosts.blocked`"
try:
copy(host_fp_blocked, host_fp)
except PermissionError:
self._raise_permission_error()
return False
if notify:
self.notify("Websites blocked, enjoy your work.")
return "Websites blocked"
def unblock(self, notify=False):
"Unblocks all websites by restoring the original `hosts` file"
try:
copy(host_fp_copy, host_fp)
except PermissionError:
self._raise_permission_error()
return False
if notify:
self.notify("All websites unblocked.")
return "Websites unblocked"
def notify(self, message, title=APP_NAME, duration=NOTIFY_DURATION):
"Sends notification to CLI and - if available - to GUI"
print(message)
if win_notify:
win_notify.show_toast(title, message, duration=duration)
def _raise_permission_error(self):
self.notify("Permission Error. Please run the command line tool as ADMINISTRATOR.")
```
#### File: beproductive/beproductive/pomodoro.py
```python
__all__ = ['WORK_TIME', 'BREAK_TIME', 'POMODOROS', 'pomodoro']
# Cell
from time import sleep
from .blocker import *
import sys
WORK_TIME = 25 # minutes
BREAK_TIME = 5 # minutes
POMODOROS = 4
# Cell
def pomodoro(work_time=WORK_TIME, break_time=BREAK_TIME, pomodoros=POMODOROS):
blocker = Blocker()
if not blocker.adminrights:
return False
turn = 1
while turn <= pomodoros:
if blocker.block():
blocker.notify(f"Pomodoro no. {turn} of {pomodoros} started, work for {work_time} minutes")
else:
blocker.notify("An error occured. Please exit with ctrl+c")
sleep(work_time*60)
blocker.unblock()
if turn < pomodoros:
blocker.notify(f"Pomodoro no. {turn} ended, take a {break_time} minutes break")
sleep(break_time*60)
else:
blocker.notify(f"Pomodoro session ended, take a longer break. All websites unblocked.", duration=10)
turn += 1
``` |
{
"source": "JohannesStutz/blurry",
"score": 2
} |
#### File: blurry/blurry/core.py
```python
__all__ = ['show_cv2', 'blur', 'pixelate', 'find_faces', 'blur_areas', 'anonymize', 'load_img', 'path',
'get_image_download_link', 'img1', 'img2', 'img3', 'example', 'img_file', 'uploaded_file']
# Cell
import cv2
import numpy as np
import fastcore
import math
from pathlib import Path
import matplotlib.pyplot as plt
from PIL import Image
### For Streamlit ###
import streamlit as st
import base64
from io import BytesIO
# Cell
def show_cv2(img: np.ndarray) -> None:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
display(Image.fromarray(img))
# Cell
def blur(img: np.ndarray, factor=1, sigma_x=0) -> np.ndarray:
h, w, _ = img.shape
kernel_size = max(w,h) / 3
kernel_size *= factor
kernel_size = math.ceil(kernel_size)
# Make sure that kernel size is an odd number
if kernel_size % 2 == 0:
kernel_size += 1
return cv2.GaussianBlur(img, (kernel_size, kernel_size), sigma_x)
# Cell
def pixelate(img: np.ndarray, factor=1) -> np.ndarray:
h, w, _ = img.shape
aspect_ratio = h/w
# New sizes
small_h, small_w = 10/factor*aspect_ratio, 10/factor
# Make sure resized version is at least 1 pixel in both dimensions and Integer
small_h, small_w = int(max(1, small_h)), int(max(1, small_w))
small = cv2.resize(img, (small_w, small_h), interpolation=cv2.INTER_LINEAR)
output = cv2.resize(small, (w, h), interpolation=cv2.INTER_NEAREST)
return output
# Cell
def find_faces(img):
"Finds faces in a picture and returns tuples of (x, y, w, h) for each face"
assert Path('haarcascade_frontalface_default.xml').is_file(), "haarcascade file not found"
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.1, 4)
return faces
# Cell
def blur_areas(img, areas, factor=1, blur_func=blur):
"""
Blurs defined areas in a cv2 image.
Inputs:
img: cv2 image in BGR format
areas: tuples of (x, y, w, h)
factor: increase (>1.0) or decrease (<1.0) default blurring
degrade_func: `blur` or `pixelate` (or any function that takes
the arguments `image` and `factor`)
Returns:
cv2 image in BGR format
"""
for (x, y, w, h) in areas:
y = int(y - 0.1*h)
h = int(1.25*h)
img[y:y+h,x:x+w] = blur_func(img[y:y+h,x:x+w], factor=factor)
return img
# Cell
def anonymize(img, factor=1, mode='blur', convert2rgb=False)->np.ndarray:
faces = find_faces(img)
if mode == 'pixelate':
blur_func = pixelate
else:
blur_func = blur
img = blur_areas(img, faces, factor=factor, blur_func=blur_func)
if convert2rgb:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
# Cell
def load_img(fn):
img = cv2.imread(str(fn))
assert isinstance(img, np.ndarray), "Image file not found"
return img
# Cell
path = Path('test_images')
# Cell
def get_image_download_link(img):
"""Generates a link allowing the PIL image to be downloaded
in: PIL image
out: href string
Source: https://discuss.streamlit.io/t/how-to-download-file-in-streamlit/1806/19
"""
buffered = BytesIO()
img.save(buffered, format="JPEG")
img_str = base64.b64encode(buffered.getvalue()).decode()
href = f'<a href="data:file/jpg;base64,{img_str}" download="blurredfaces.jpg">Download result</a>'
return href
# Cell
st.title("Blurry Faces");
st.write("Upload any photo with people in it, and this tool will pixelate the faces.")
# Cell
img1 = path/'group.jpg'
img2 = path/'group_closer.jpg'
img3 = path/'crowd.jpg'
st.image([Image.open(img1), Image.open(img2), Image.open(img3)], width=200)
example = st.radio("Choose an example image", ["Group", "Small group", "Large crowd"])
img_file = None
if example == "Group":
img_file = img1
elif example == "Small group":
img_file = img2
elif example == "Large crowd":
img_file = img3
# Cell
uploaded_file = st.file_uploader("Or upload a photo:", type=['png', 'jpg', 'jpeg'], accept_multiple_files=False)
# Cell
if uploaded_file or img_file:
if uploaded_file: file = uploaded_file
elif img_file: file = img_file
#st.write("Processing...")
img = Image.open(file)
img_array = np.array(img)
anon_img = anonymize(img_array, mode='pixelate')
anon_img_pil = Image.fromarray(anon_img)
st.image(anon_img, caption=" ", use_column_width='auto')
st.markdown(get_image_download_link(anon_img_pil), unsafe_allow_html=True)
``` |
{
"source": "JohannesTheo/SurvivalBox",
"score": 3
} |
#### File: SurvivalBox/survivalbox/card.py
```python
__author__ = '<NAME>'
import pygame
import numpy as np
from .game_objects import Sheep, Wolf, Fireplace
class Card(pygame.Surface):
# Some colors
black = (0,0,0)
white = (255,255,255)
white2 = (244,244,244)
grey = (130,130,130)
grey2 = (230,230,230)
dark = (51,51,51)
dark2 = (42,49,59)
blue = (45,162,225)
blue2 = (35,192,234)
cyan = (21,163,186)
cyan2 = (0,186,175)
orange = (251,140,0)
FIX_TILE_SIZE = 8 # a fixed size for the card. Every observation we be rescaled to a representation with TileSize of 8 pixels.
def __init__(self, width, height, Title):
# the size of the map in grid points
# self.MAP_WIDTH = map_size[0]
# self.MAP_HEIGHT = map_size[1]
# self.MAP_GRID_H = map_grid_h
# some margins
self.margin_out = 10
self.margin_in = 10
#w = 2 * self.margin_in + 2 * self.margin_out + 100
#h = 2 * self.margin_in + 2 * self.margin_out + self.MAP_HEIGHT * Card.FIX_TILE_SIZE
# w = min_width
# h = min_height
pygame.Surface.__init__(self, (width, height))
self.TitleString = Title
self.Title_y = 0
self.Active = False
#self.TileSize = tile_size
# Standard colors
self.TitleColor = Card.white2
self.TitleBg_Active = Card.orange
self.TitleBg_Inactive = Card.grey
self.StatsColor = Card.white2
self.CardColor = Card.dark2
# Standard font
self.TitleFont = pygame.font.SysFont("monaco", 18, False, False)
self.StatsFont = pygame.font.SysFont("monaco", 15, False, False)
# self.update_static(self.Active)
def update(self, active):
if self.Active != active:
self.Active = not self.Active
self.update_static(self.Active)
def scale_to(self, tile_size):
# maybe some TileSize depending rescaling? hm...
# Currently fixed size
return None
def update_static(self, active):
# Clear the Card
self.fill((0,0,0))
# Draw the Card background
background = (self.margin_out, 0, self.get_width() - 2 * self.margin_out, self.get_height())
# pygame.draw.rect(self, self.CardColor, background)
pygame.draw.rect(self, self.CardColor, background)
# Draw the Card title
self.Title_y = self.draw_title(active)
return self.Title_y
def empty_position(self, position):
ending_at_x = position[0]
ending_at_y = position[1] + self.StatsFont.get_height() #+ self.margin_in
return (ending_at_x, ending_at_y)
def draw_observation(self, raw_image, position):
x = self.margin_in + self.margin_out + position[0]
y = position[1]
self.blit(raw_image, (x,y))
ending_at_x = x + raw_image.get_width()
ending_at_y = y + raw_image.get_height() #+ self.margin_in
return (ending_at_x, ending_at_y)
def draw_statistic(self, text_string, position, color=None, bg_color=None):
'''
This method will render some text_string as statistic.
X wise the outer and inner margin is applied by default, allowing for an extra offset through the position[0] argument.
Y wise no margin is applied
'''
x = self.margin_in + self.margin_out + position[0]
y = position[1]
# set colors by user or default
if color is None: color = self.StatsColor
if bg_color is None: bg_color = self.CardColor
# draw the text to the surface
Text = self.StatsFont.render(text_string, 1, color, bg_color)
self.blit(Text,(x,y))
# returns the ending x and y pixel positions inside the card. Those can be used in later calls.
ending_at_x = x + Text.get_width()
ending_at_y = y + Text.get_height() #+ self.margin_in
return (ending_at_x, ending_at_y)
def draw_line(self, thickness, color, position, full_width=False, y_margin=None):
'''
Draws a line. Before and after the line we apply an y_margin
'''
if y_margin is None: y_margin = self.margin_in
if full_width:
x = 0
width = self.get_width()
else:
x = self.margin_in + self.margin_out + position[0]
width = self.get_width() - 2 * self.margin_in - 2 * self.margin_out
y = position[1] + y_margin
the_line = (x, y, width, thickness)
pygame.draw.rect(self, color, the_line)
ending_at_x = x + width
ending_at_y = y + thickness + y_margin
return (ending_at_x, ending_at_y)
def draw_title(self, active):
# Draw a Title depending on the ACTIVE status
if active:
# Calculate dimensions
TitleText = self.TitleFont.render(self.TitleString, 1, self.TitleColor, self.TitleBg_Active)
TitleBg = (0, 0, self.get_width(), TitleText.get_height() + 2 * self.margin_in)
# Draw background and title
pygame.draw.rect(self, self.TitleBg_Active, TitleBg)
self.blit(TitleText, (self.margin_out + self.margin_in, self.margin_in))
# Calculate and draw decoration
TitleBottom = TitleBg[1] + TitleBg[3]
deco_left = [( 0, TitleBottom ),
(self.margin_out, TitleBottom ),
(self.margin_out, TitleBottom + self.margin_out)]
deco_right = [(self.get_width() , TitleBottom ),
(self.get_width() - self.margin_out, TitleBottom ),
(self.get_width() - self.margin_out, TitleBottom + self.margin_out)]
pygame.draw.polygon(self, self.TitleBg_Active, deco_left)
pygame.draw.polygon(self, self.TitleBg_Active, deco_right)
else:
# Calculate dimensions
TitleText = self.TitleFont.render(self.TitleString, 1, self.TitleColor, self.TitleBg_Inactive)
TitleBg = (self.margin_out, 0, self.get_width() - 2 * self.margin_out, TitleText.get_height() + 2 * self.margin_in)
TitleBottom = TitleBg[1] + TitleBg[3]
# Draw background and title
pygame.draw.rect(self,self.TitleBg_Inactive, TitleBg)
self.blit(TitleText, (self.margin_out + self.margin_in, self.margin_in))
# return the bottom y position so it can be used to place other objects.
ending_at_y = TitleBottom + self.margin_in
return ending_at_y
def reset(self, active=False):
self.update_static(active)
class AgentCard(Card):
def __init__(self, agent_entry, map_dimension_x, map_dimension_y, tile_size, full_map_obs, basic=True, detail=True, observation=True):
self.BASIC_INFO = basic
self.DETAIL_INFO = detail
self.OBSERVATION = observation
# Calculate the total size of the card to init base class
self.FIX_TILE_SIZE = 8
self.MAP_W = map_dimension_x * self.FIX_TILE_SIZE
self.MAP_H = map_dimension_y * self.FIX_TILE_SIZE
self.FULL_MAP_OBS = full_map_obs
if tile_size == self.FIX_TILE_SIZE:
self.OBSERVATION = False
self.TileSize = tile_size
self.AgentEntry = agent_entry
self.ID = agent_entry["ID"]
self.Agent = agent_entry["Agent"]
self.Statistics = self.Agent.Statistics
self.AgentView = agent_entry["AgentView"]
self.VIEW_W = self.AgentView.get_width()
self.VIEW_H = self.AgentView.get_height()
if self.FULL_MAP_OBS:
self.Scaled_VIEW_W = self.MAP_W
self.Scaled_VIEW_H = self.MAP_H
else:
self.ViewPort_GRID_W = agent_entry["ViewPort_Grid"][0]
self.ViewPort_GRID_H = agent_entry["ViewPort_Grid"][1]
self.Scaled_VIEW_W = self.ViewPort_GRID_W * self.FIX_TILE_SIZE
self.Scaled_VIEW_H = self.ViewPort_GRID_H * self.FIX_TILE_SIZE
self.VIEW_Y = 0
self.BASIC_STATS_Y = 0
self.DETAIL_STATS_Y = 0
self.OBSERVATION_Y = 0
self.MIN_WIDTH = 202
self.MIN_HEIGHT = 0
Card.__init__(self, 0,0, "Calculating size...")
self.CARD_WIDTH = self.calculate_card_width()
self.CARD_HEIGHT = self.update_static()
Card.__init__(self, self.CARD_WIDTH, self.CARD_HEIGHT, "Agent: {}".format(self.ID))
self.update_static(self.Active)
def calculate_card_width(self):
Observation_W = (self.VIEW_W + 2 * self.margin_in + 2 * self.margin_out)
Scaled_Observation_W = (self.Scaled_VIEW_W + 2 * self.margin_in + 2 * self.margin_out)
return max(self.MIN_WIDTH, Observation_W, Scaled_Observation_W)
def update(self, active=False):
super(AgentCard,self).update(active)
# Reasign our Agent View because its
self.AgentView = self.AgentEntry["AgentView"]
self.Statistics = self.Agent.Statistics
next_x = 0
next_y = self.VIEW_Y
if self.TileSize != self.FIX_TILE_SIZE:
ScaledView = pygame.transform.scale(self.AgentView, ( self.Scaled_VIEW_W, self.Scaled_VIEW_H ))
next_x, next_y = self.draw_observation(ScaledView, (0, next_y))
else:
next_x, next_y = self.draw_observation(self.AgentView, (0, next_y))
# Draw Stats
if self.BASIC_INFO:
next_y = self.BASIC_STATS_Y
if active: color = Card.orange
else: color=None
next_x, next_y = self.draw_statistic("Energy: {:.2f}".format(self.Agent.Energy), (0,next_y), color)
next_x, next_y = self.draw_statistic("Reward: {:3.1f}".format(self.Statistics["rewards"]["reward_total"]), (0,next_y), color)
if self.DETAIL_INFO:
next_y = self.DETAIL_STATS_Y
stats = self.get_detailed_statistics()
next_x, next_y = self.draw_statistic("Food: {} | {}".format(stats[0][0], stats[0][0]), (0,next_y), stats[0][2])
next_x, next_y = self.draw_statistic("Fire: {} | {}".format(stats[1][0], stats[1][1]), (0,next_y), stats[1][2])
next_x, next_y = self.draw_statistic("Sheep: {} | {}".format(stats[2][0], stats[2][1]), (0,next_y), stats[2][2])
next_x, next_y = self.draw_statistic("Wolf: {} | {}".format(stats[3][0], stats[3][1]), (0,next_y), stats[3][2])
if self.OBSERVATION:
self.draw_observation(self.AgentView, (0, self.OBSERVATION_Y))
def get_detailed_statistics(self):
food = [self.Statistics["specialisation"]["collected_food"], self.Statistics["rewards"]["reward_from_food"], None]
fire = [self.Statistics["specialisation"]["steps_as_fireguard"], self.Statistics["rewards"]["reward_from_fire"], None]
sheep = [self.Statistics["specialisation"]["steps_as_shepherd"], self.Statistics["rewards"]["reward_from_sheep"], None]
wolf = [self.Statistics["specialisation"]["catched_wolf"], self.Statistics["rewards"]["reward_from_wolf"], None]
stats = [food, fire, sheep, wolf]
# find the entry the currently contributes the most to the reward and set a highlight color
rewards = [entry[1] for entry in stats]
max_reward = max(rewards)
if max_reward > 0:
index_max_reward = np.argmax(rewards)
stats[index_max_reward][2] = Card.blue
return stats
def scale_to(self, tile_size):
# maybe some TileSize depending rescaling? hm...
# currently fixed size
return None
def update_static(self, active=False):
next_y = super(AgentCard,self).update_static(active)
self.VIEW_Y = next_y
# draw window
next_y += (self.Scaled_VIEW_H + self.margin_in)
next_x, next_y = self.draw_statistic("({}x{}) pixels".format(self.AgentView.get_width(), self.AgentView.get_height()) , (0,next_y))
if self.BASIC_INFO:
next_x, next_y = self.draw_line(2, Card.grey,(0,next_y))
self.BASIC_STATS_Y = next_y
next_x, next_y = self.empty_position((0,next_y)) # Energy
next_x, next_y = self.empty_position((0,next_y)) # Reward
if self.DETAIL_INFO:
next_x, next_y = self.draw_line(2, Card.grey,(0,next_y))
self.DETAIL_STATS_Y = next_y
next_x, next_y = self.empty_position((0,next_y)) # Food
next_x, next_y = self.empty_position((0,next_y)) # Fire
next_x, next_y = self.empty_position((0,next_y)) # Sheep
next_x, next_y = self.empty_position((0,next_y)) # Wolf
if self.OBSERVATION:
# if the current y position is in close range to the map end, allign for a nicer view!
next_y = self.y_allign_to(self.MAP_H, next_y)
next_x, next_y = self.draw_line(4, Card.black,(0,next_y), True)
next_x, next_y = self.draw_statistic("Observation:" , (0,next_y))
next_y += self.margin_in
self.OBSERVATION_Y = next_y
next_y += (self.AgentView.get_height())
next_y = self.y_allign_to(self.MAP_H, next_y)
return next_y + self.margin_in
def y_allign_to(self, anchor_pos, y_pos, activation_range=150):
if (0 < (anchor_pos - y_pos) < activation_range):
#print("ALLGIN")
return (anchor_pos - self.margin_in)
else:
return y_pos
class StatisticsCard(Card):
def __init__(self, map_statistics, map_meta, npc_list, rewards):
self.MAP_INFO = True
self.NPC_INFO = True
self.map = map_statistics
self.MAP_WIDTH = map_meta["width"] - 2 # minus border
self.MAP_HEIGHT = map_meta["height"] - 2 # minus border
self.rewards = rewards
self.NPCs = npc_list
self.NPC_Y = 0
# Init a first time onlyfor dimension calculations
Card.__init__(self, 0, 0, "Calculating size...")
self.CARD_HEIGHT = self.update_static()
self.CARD_WIDTH = 490
# Now init with the correct dimensions
Card.__init__(self, self.CARD_WIDTH, self.CARD_HEIGHT, "SurvivalBox statistics")
self.TitleBg_Active = Card.blue
self.TitleBg_Inactive = Card.blue
self.CARD_HEIGHT = self.update_static()
def update(self, npc_list, active=False):
super(StatisticsCard, self).update(active)
if self.NPC_INFO:
# draw npc stats
next_x = 0
next_y = self.NPC_Y
for NPC in npc_list:
stats = NPC.Statistics["specialisation"]
ID = NPC.ID
if isinstance(NPC, Sheep):
FOOD = stats["collected_food"]
WS = stats["steps_with_shepherd"]
WOS = stats["steps_without_shepherd"]
DIED = stats["catched_by_wolf"]
next_x, next_y = self.draw_statistic("Sheep {}: food {} | shepherd ({},{}) | died {}".format(ID, FOOD, WS, WOS, DIED),(0, next_y))
elif isinstance(NPC, Wolf):
HUNTING = stats["steps_hunting"]
SHEEP = stats["catched_sheep"]
CATCHED = stats["catched_by_survivor"]
ATTACK = stats["attacked_survivor"]
next_x, next_y = self.draw_statistic("Wolf {}: chasing {} | sheep {} | attacks {} | died {}".format(ID, HUNTING, SHEEP, ATTACK, CATCHED),(0, next_y))
elif isinstance(NPC, Fireplace):
ON = stats["steps_fire_on"]
OFF = stats["steps_fire_off"]
SWTICHES = stats["fire_switches"]
next_x, next_y = self.draw_statistic("Fire {}: on {} | off {} | switches {}".format(ID, ON,OFF,SWTICHES),(0, next_y))
else:
raise Exception("Not a valid NPC type!")
def update_static(self, active=False):
# update title active status
next_y = super(StatisticsCard, self).update_static(active)
next_x, next_y = self.draw_statistic("Rewards: Food (+{}) / Fire (+{})".format(self.rewards["grass"], self.rewards["fire"]), (0, next_y))
next_x, next_y = self.draw_statistic(" Sheep (+{}) / Wolf (+{})".format(self.rewards["sheep"], self.rewards["wolf"]), (0, next_y))
if self.MAP_INFO:
next_x, next_y = self.draw_line( 2, Card.grey, (0, next_y))
next_x, next_y = self.draw_statistic("Map: {} x {} ({})".format(self.MAP_WIDTH, self.MAP_HEIGHT, self.map["total"]), (0, next_y))
next_x, next_y = self.draw_statistic("Tiles: {} Water | {} Dirt | {} Grass".format(self.map["water"], self.map["dirt"], self.map["grass"]),(0, next_y))
if self.NPC_INFO:
next_x, next_y = self.draw_line( 4, Card.black, (0, next_y), True)
next_x, next_y = self.draw_statistic("NPCs:",(0, next_y))
self.NPC_Y = next_y + self.margin_in
next_y = self.NPC_Y
for npc in self.NPCs:
next_x, next_y = self.empty_position((next_x, next_y))
return next_y + self.margin_in
``` |
{
"source": "johannestreutlein/op-tie-breaking",
"score": 3
} |
#### File: op-tie-breaking/src/op_with_tie_breaking.py
```python
import argparse
import matplotlib.pyplot as plt
import numpy as np
import datetime
import pandas as pd
import json
import os
from utils.uniquify import uniquify
def op_tie_breaking_evaluation(hash_lists, args):
'''This function evaluates our method, other-play with tie-breaking. It applies the tie-breaking function to different training runs to
choose policies and
then starts an experiment that calculates cross-play values for the chosen policies.
It would probably be better to run this with sacred, as part of an experiment, instead
of an extra python file, etc., but this suffices for now.
'''
chosen_indices_dict = {}
n_seeds_total = args.n_seeds_per_run
number_of_runs = len(hash_lists[args.hash_function_seeds[0]]) // args.n_seeds_per_run
for hash_seed, hashs in hash_lists.items():
print('\n\n=============================================')
print('-----------seed of hash function: {}---------'.format(hash_seed))
n_seeds = 1 #the case of one seed represents simply doing other-play
chosen_indices_dict[hash_seed] = {}
while n_seeds <= n_seeds_total:
print('\n\n----------- seeds per run: {}--------'.format(n_seeds))
print('-------------------------------------')
chosen_indices = []
for index in range(number_of_runs):
print('\n-------- run {} -------'.format(index))
hash_list = np.array(hashs[index*n_seeds_total:index*n_seeds_total+n_seeds_total])
hash_list = hash_list[:n_seeds]
print('\nhash_list:')
print(hash_list)
chosen_indices.append(op_with_tie_breaking(hash_list))
print('\nchosen indices:')
print(chosen_indices)
chosen_indices_dict[hash_seed][n_seeds] = chosen_indices
n_seeds *= 2
print('\nDoing a new cross play evaluation with the chosen models, for each hash-function seed\n\n')
#now, constructing new csv with model paths
#very inefficient with a loop and creating copies, but it doesn't matter at the moment
new_model_paths = pd.DataFrame()
for n_seeds in range(int(np.floor(np.log2(n_seeds_total))) + 1):
for hash_seed in args.hash_function_seeds:
for runs, chosen_policy in enumerate(chosen_indices_dict[hash_seed][2 ** n_seeds]):
new_model_paths = new_model_paths.append(args.model_paths.iloc[runs * n_seeds_total + chosen_policy], ignore_index=True)
print('Prepared model paths:')
print(new_model_paths)
filename = os.path.join('results', 'op_with_tie_breaking', 'chosen_model_list_hash_run_{}.csv'.format(args.hash_run))
filename = uniquify(filename)
new_model_paths.to_csv(filename)
os.system('python3 src/main.py --env-config={} --config=policy_gradient \
with seed={} \
evaluate=True \
cross_play=True \
calculate_hash=False \
test_nepisode={} \
n_seeds_per_run={} \
hash_function_seeds={} \
model_paths={}'.format(args.env, args.seed, args.test_nepisode, number_of_runs,
str(args.hash_function_seeds).replace(' ', ''), filename))
#here, each run corresponds to a chosen amount of seeds and each seed is a chosen policy from a different run
def op_with_tie_breaking(hashes):
'''this implements other-play with tie-breaking.
input is a list of tie-breaking values, output is index of policy with highest value.
The actual values are calculated in the hash-run, by a method of the environment
'''
best_policy = hashes.argmax()
print('\nIndex chosen policy:')
print(best_policy)
return best_policy
def load_info(args):
returns = None
filename = os.path.join('results', 'sacred', args.hash_run, 'config.json')
print('\n\nloading config from {}'.format(filename))
with open(filename) as json_file:
config = json.load(json_file)
args.env = config["env"]
args.hash_function_seeds = config["hash_function_seeds"]
print(args.hash_function_seeds)
filename = os.path.join('results', 'sacred', args.hash_run, 'info.json')
print('\n\nloading hashs from ' + filename)
hash_lists = {}
with open(filename) as json_file:
data = json.load(json_file)
for seed in args.hash_function_seeds:
hashs = data['trajectories_hash_{}'.format(seed)]
print('\n')
print('hash values seed {}:'.format(seed))
print(hashs)
hash_lists[seed] = hashs
args.model_paths = data['model_paths']
args.model_paths = pd.DataFrame(args.model_paths)
print('\nModel paths:')
print(args.model_paths)
return hash_lists, args
if __name__ == '__main__':
print(datetime.datetime.now())
# Define the parser
parser = argparse.ArgumentParser(description='Short sample app')
parser.add_argument('--hash_run', action="store", dest='hash_run', default=None) #sacred directory of the run from hash calculation
parser.add_argument('--test_nepisode', action="store", dest='test_nepisode', default='2048') #number of episodes to use to calculate cross-play values
parser.add_argument('--seed', action="store", dest='seed', default='100') # seed for environment and policy randomness
parser.add_argument('--n_seeds_per_run', action="store", dest='n_seeds_per_run', default='32') # seed for environment and policy randomness
# Now, parse the command line arguments and store the
# values in the `args` variable
args = parser.parse_args()
args.test_nepisode = json.loads(args.test_nepisode)
args.seed = json.loads(args.seed)
args.n_seeds_per_run= json.loads(args.n_seeds_per_run)
hash_lists, args = load_info(args)
op_tie_breaking_evaluation(hash_lists, args)
print('\n\n')
```
#### File: op-tie-breaking/src/plot_cross_play_heatmap.py
```python
import argparse
import numpy as np
import datetime
import pandas as pd
import json
import seaborn as sns
import matplotlib.pyplot as plt
import os
from utils.uniquify import uniquify
import matplotlib as mpl
def plot_cross_play_heatmap(matrices, args):
mpl.rcParams.update({
"font.size": 8,
})
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['ps.fonttype'] = 42
grid_kws = {"width_ratios": (.48, .48, .04), "wspace": .3, "hspace": .3}
f, (ax1, ax2, cbar_ax) = plt.subplots(1, 3, gridspec_kw=grid_kws, figsize=(5.8, 2.6))
ax1.title.set_text('Other-play')
ax2.title.set_text('Other-play with tie-breaking')
axes = [ax1, ax2]
maxim = 0
minim = 0
for index, matrix in enumerate(matrices):
print('return matrix:')
print(matrix)
off_diag = matrix[~np.eye(matrix.shape[0], dtype=bool)]
print('off_diagonal_mean:')
print(off_diag.mean())
print('off_diagonal_std:')
print(off_diag.std())
maxim = max([maxim, matrix.max()])
minim = min([minim, matrix.min()])
for index, matrix in enumerate(matrices):
heatmap = sns.heatmap(matrix, cmap='viridis', ax=axes[index], cbar=bool(index), cbar_ax=cbar_ax, vmax=maxim, vmin=minim)#linewidth=0.5,ticklabels=True, yticklabels=True
heatmap.set_xlabel('Player 2 seed')
heatmap.set_ylabel('Player 1 seed')
heatmap.tick_params(length=0, labeltop=True, labelbottom=False)
plt.gcf().subplots_adjust(top=0.8)
if args.save:
filename = 'results/op_with_tie_breaking/evalrun_{}_index_{}_cross_play_heatmap_{}.pdf'.format(args.run, args.index, args.env)
filename = uniquify(filename)
plt.savefig(filename)
plt.show()
def load_data(args):
filename = os.path.join('results/sacred', args.run, 'info.json')
print('\n\nLoading returns from ' + filename)
returns = []
with open(filename) as json_file:
data = json.load(json_file)
for dict in data['test_return_mean']:
returns.append(dict['value'])
filename = os.path.join('results/sacred', args.run, 'config.json')
print('\n\nLoading config from ' + filename)
with open(filename) as json_file:
config = json.load(json_file)
args.env = config["env"]
args.n_seeds_per_run = config["n_seeds_per_run"]
matrices = []
#pick out a run by its index.
for index in args.index:
relevant_returns = returns[index * args.n_seeds_per_run**2: (index + 1) * args.n_seeds_per_run**2]
#make it into a matrix
return_matrix = np.array(relevant_returns).reshape(args.n_seeds_per_run, args.n_seeds_per_run)
matrices.append(return_matrix)
return matrices, args
if __name__ == '__main__':
print(datetime.datetime.now())
# Define the parser
parser = argparse.ArgumentParser(description='Short sample app')
# sacred directory of the run of op_with_tie_breaking.py evaluation
parser.add_argument('--run', action="store", dest='run', default=None)
parser.add_argument('--save', action="store", dest='save', default=True)
# the starting index of the run of which the cross-play value should be plotted to a heatmap
# should be a list of two indices, the first one corresponding to other-play, the other one to op with tie-breaking
parser.add_argument('--index', action="store", dest='index', default='[0,100]')
# Now, parse the command line arguments and store the
# values in the `args` variable
args = parser.parse_args()
print("\nargs:")
print(args)
args.index = json.loads(args.index)
#masks = None
#if args.hash_run is None:
# args.hash_run=args.run
# Individual arguments can be accessed as attributes...
matrices, args = load_data(args)
plot_cross_play_heatmap(matrices, args)
```
#### File: src/utils/hash_function.py
```python
import torch as th
import numpy as np
from zlib import crc32
import torch.nn as nn
import torch.nn.functional as F
class HashFunction(nn.Module):
#this implements the neural network for hashing
def __init__(self, input_shape, seed, args):
with th.random.fork_rng():
#we set the random seed here to coordinate between the different principals
th.manual_seed(seed)
np.random.seed(seed)
super(HashFunction, self).__init__()
self.args = args
self.fc1 = nn.Linear(input_shape, args.hash_hidden_dim).double()
self.fc2 = nn.Linear(args.hash_hidden_dim, args.hash_hidden_dim).double()
self.fc3 = nn.Linear(args.hash_hidden_dim, args.hash_hidden_dim).double()
self.fc4 = nn.Linear(args.hash_hidden_dim, args.hash_hidden_dim).double()
self.fc5 = nn.Linear(args.hash_hidden_dim, 1).double()
def forward(self, inputs):
h1 = F.relu(self.fc1(inputs*np.sqrt(inputs.shape[-1])))
h2 = F.relu(self.fc2(h1*np.sqrt(h1.shape[-1])))
h3 = F.relu(self.fc3(h2*np.sqrt(h2.shape[-1])))
h4 = F.relu(self.fc4(h3*np.sqrt(h3.shape[-1])))
hashs = self.fc5(h4*np.sqrt(h4.shape[-1]))
return hashs
def hash_function(histories, seed, args, logger):
hash_type = args.hash_type
if hash_type == 'cov':
histories = np.array(histories.T,
dtype=np.float64) # prepare for calculating moments
histories -= histories.mean(axis=1, keepdims=True) # center each random variable
logger.console_logger.info(
'Calculating central moments with {} samples of {}-dim RV'.format(histories.shape[0], histories.shape[1]))
cov_matrix = np.cov(histories) #could also calculate higher moments potentially
logger.console_logger.info(
'Cov matrix:\n' + str(cov_matrix))
history_hash = cov_matrix.mean()
logger.console_logger.info(
'Cov matrix mean: ' + str(history_hash))
if hash_type == 'nn':
input_shape = histories.shape[-1]
hash_network = HashFunction(input_shape, seed, args)
input = th.tensor(histories, dtype=th.float64)
hashs = hash_network.forward(input)
history_hash = hashs.mean().item()
return history_hash
``` |
{
"source": "JohannesTruongLe/dataset_analysis_framework",
"score": 3
} |
#### File: lib/dataloader/KITTI.py
```python
import logging
import pandas as pd
from lib.dataloader.base_class import DataLoaderBase
from lib.dataloader.constants.KITTI import KITTI_COLS
LOGGER = logging.getLogger(__name__)
class KITTIDataLoader(DataLoaderBase):
"""KITTI DataLoader class."""
def __init__(self,
image_path,
label_path,
save_labels_as_dataframe_path=None):
"""Init.
Args:
image_path (str or pathlib.Path): Path to images of KITTI Dataset.
label_path (str or pathlib.Path): Path of labels of KITTI Dataset.
save_labels_as_dataframe_path (str or pathlib.Path): Path to save the pickled pandas
DataFrame to. Can be None if saving is not needed.
"""
super().__init__()
LOGGER.debug("Build KITTI dataloader ...")
self._image_path = image_path
self._label_path = label_path
self._save_labels_as_dataframe_path = save_labels_as_dataframe_path
def generate_sample(self):
"""Generate sample.
Yields:
pandas.DataFrame: Pandas Dataframe holding labels from one file. Column names can be
seen in dataloader.constants.KITTI_COLS. Index is the FILENAME_LABELPOSITION in file.
"""
for file in list(self._label_path.iterdir()):
LOGGER.debug("Reading from %s", file)
labels_from_one_file = pd.read_csv(file,
index_col=None,
header=None,
names=KITTI_COLS,
delimiter=' ')
# Set file name + label position in file as index
base_name = file.stem
sample_index = [base_name + '_' + str(label_idx)
for label_idx in range(labels_from_one_file.shape[0])]
labels_from_one_file.insert(loc=0, column='name', value=sample_index)
labels_from_one_file.set_index('name', inplace=True)
yield labels_from_one_file
def store_labels(self, output_path=None):
"""Store labels as Data Frame pickle file.
Args:
output_path (str or pathlib.Path or None): Path to store file. If None, take path given
during init.
"""
if not output_path:
output_path = self._save_labels_as_dataframe_path
assert self._save_labels_as_dataframe_path, "No path to store to given."
LOGGER.debug("Store data ...")
labels = self.build_label_dataframe()
output_path.parent.mkdir(parents=True, exist_ok=True)
labels.to_pickle(output_path)
```
#### File: lib/manifold/tsne.py
```python
import numpy as np
import tqdm
from sklearn.neighbors import NearestNeighbors
from lib.manifold.metric import compute_gaussian_similarity
from lib.manifold.base_class import ManifoldBase
class TSNE(ManifoldBase):
"""TSNE class.
For detailed description reder to T-SNE paper (http://www.cs.toronto.edu/~hinton/absps/tsne.pdf)
"""
def __init__(self,
seed=42,
n_components=2,
perplexity=10,
n_iter=200,
learning_rate=0.2,
fine_tune_share=0.1):
"""Init.
Args:
seed (int): Random seed to ensure reproducibility.
n_components (int): Number of components of output space.
perplexity (int): Number of nearest neighbours considered during optimization.
n_iter (int): Number of iterations.
learning_rate (float): Learning rate.
fine_tune_share (float): Share of fine-tuning epochs. Example: If fine_tune_share is 0.1
and n_iter is 100, than 100*(1-fine_tune_share)= 90 are trained with the original
learning rate, the rest 0.1*100=10 epochs are trained with 0.1*learning_rate
"""
self._random_state = np.random.RandomState(seed)
self._n_components = n_components
self._perplexity = perplexity
self._n_iter = n_iter
self._learning_rate = learning_rate
self._fine_tune_share = fine_tune_share
self._fine_tune_epoch = int(self._n_iter*(1-self._fine_tune_share))
def fit(self, data):
"""Perform TSNE on data.
Args:
data (numpy.ndarray(numpy.float)): Data to fit of shape [n_samples, n_features].
Returns:
numpy.ndarray(numpy.float): Data in embedded space of shape
[n_samples, self._n_components]
"""
n_samples = data.shape[0]
# Init data into embedded space
data_embedded = self._random_state.randn(n_samples, self._n_components)
# Compute asymmetric probablity function in original/old space
neighbour_distances, neighbour_idx = \
NearestNeighbors(n_neighbors=self._perplexity,
metric='euclidean').fit(data).kneighbors(data)
asym_prob_table_old_space = _compute_asym_prob_table(
similarity_function=compute_gaussian_similarity,
neighbour_distances=neighbour_distances,
neighbour_idx=neighbour_idx)
data_embedded = self._gradient_descent(asym_prop_old_space=asym_prob_table_old_space,
data=data_embedded,
neighbours_old_space=neighbour_idx)
return data_embedded
def _gradient_descent(self,
asym_prop_old_space,
data,
neighbours_old_space):
"""Optimize the model.
Args:
asym_prop_old_space (numpy.ndarray(numpy.float)): Table of size [n_samples, n_samples],
where each cell shows the asymmetic probabilty between to samples (indices refer to
whatsamples) of the orignal data.
data (numpy.ndarray(numpy.float)): Data of new space. Data should have shape of
[n_samples, n_features].
neighbours_old_space (numpy.ndarray(numpy.float): Idx in shape [n_samples, n_neighbours]
to the neighbours, for calculation example refer to from sklearn.neighbors import
NearestNeighbors.
Returns:
numpy.ndarray(numpy.float): Data embedded in new space. Data shape is
[n_samples, self._n_components]
"""
neighbour_distances, neighbour_idx_table = \
NearestNeighbors(n_neighbors=self._perplexity,
metric='euclidean').fit(data).kneighbors(data)
asym_prob_new_space = _compute_asym_prob_table(
similarity_function=compute_gaussian_similarity,
neighbour_distances=neighbour_distances,
neighbour_idx=neighbour_idx_table)
learning_rate = self._learning_rate
for iter_idx in tqdm.tqdm(range(self._n_iter), desc="Fit data with TSNE"):
for first_idx in range(data.shape[0]):
sum_value = 0
neighbour_table = np.concatenate([neighbours_old_space, neighbour_idx_table],
axis=1)
for neighbour_idx in range(neighbour_table.shape[1]):
second_idx = neighbour_table[first_idx, neighbour_idx]
sum_value += \
2 * ((data[first_idx] - data[second_idx]) *
(asym_prop_old_space[first_idx, second_idx] -
asym_prob_new_space[first_idx, second_idx] +
asym_prop_old_space[second_idx, first_idx] -
asym_prob_new_space[second_idx, first_idx]))
data[first_idx] -= learning_rate * sum_value
if iter_idx % 5 == 0:
neighbour_distances, neighbour_idx_table = \
NearestNeighbors(n_neighbors=self._perplexity,
metric='euclidean').fit(data).kneighbors(data)
asym_prob_new_space = _compute_asym_prob_table(
similarity_function=compute_gaussian_similarity,
neighbour_distances=neighbour_distances,
neighbour_idx=neighbour_idx_table)
if iter_idx == self._fine_tune_epoch:
learning_rate *= 0.1
data -= np.mean(data)
data /= np.std(data)
return data
def _compute_asym_prob_table(neighbour_distances,
neighbour_idx,
similarity_function=compute_gaussian_similarity):
"""Compute asymetric probability table.
For details refer to the T-SNE paper.
Args:
neighbour_distances (numpy.ndarray(numpy.float)): Distances in shape
[n_samples, n_neighbours]. For calculation example refer to from sklearn.neighbors import
NearestNeighbors.
neighbour_idx (numpy.ndarray(numpy.float)): Idx in shape [n_samples, n_neighbours] to the
neighbours, for example calculation refer to from sklearn.neighbors import NearestNeighbors.
similarity_function (function): Function to call calculate the distance from. For example
refer to manifold.metric.compute_gaussian_similarity().
Returns:
numpy.ndarray(numpy.float): Table of size [n_samples, n_samples], where each cell shows the
asymmetric probability between to samples (indices refer to what samples).
"""
dissim_table = _compute_dissim_table(similarity_function=similarity_function,
neighbour_idx=neighbour_idx,
neighbour_distances=neighbour_distances)
asym_similarity_table = _compute_asym_prob_table_given_dissim(similarity_table=dissim_table,
neighbour_idx_table=neighbour_idx)
return asym_similarity_table
def _compute_dissim_table(neighbour_idx,
neighbour_distances,
similarity_function=compute_gaussian_similarity):
"""Compute dissimilarity table.
For details refer to the T-SNE paper.
Args:
neighbour_distances (numpy.ndarray(numpy.float)): Refer to
manifold.tsne._compute_asym_prob_table() docstring.
neighbour_idx(numpy.ndarray (numpy.float)): Refer to
manifold.tsne._compute_asym_prob_table() docstring.
similarity_function (function): Refer to manifold.tsne._compute_asym_prob_table() docstring
Returns:
numpy.ndarray(numpy.float): Table of size [n_samples, n_samples], where each cell shows the
dissimilarity between to samples (indices refer to what samples).
"""
n_samples = neighbour_idx.shape[0]
table = np.zeros([n_samples, n_samples])
skip_table = np.zeros_like(table, dtype=np.bool)
for first_idx in range(n_samples):
for second_idx in range(neighbour_idx.shape[1]):
if first_idx != second_idx and not skip_table[first_idx][second_idx]:
skip_table[first_idx][second_idx] = True
skip_table[second_idx][first_idx] = True
similarity = similarity_function \
(distance=neighbour_distances[first_idx, second_idx])
table[first_idx][second_idx] = similarity
table[second_idx][first_idx] = similarity
return table
def _compute_asym_prob_table_given_dissim(similarity_table, neighbour_idx_table):
n_samples = similarity_table.shape[0]
asym_similarity_table = np.zeros([n_samples, n_samples])
for idx in range(n_samples):
neighbours_idx = neighbour_idx_table[idx, :]
denom = np.sum(np.exp(-similarity_table[idx, neighbours_idx]))
for neighbour_idx in neighbours_idx:
asym_similarity_table[idx, neighbour_idx] = \
np.exp(-similarity_table[idx, neighbour_idx]) / denom
return asym_similarity_table
```
#### File: lib/util/argparse_util.py
```python
import argparse
def string_to_bool(source):
"""Translate string to bools.
[yes, true t, y, 1] are interpreted as True, whereas [no, false , f, n, 0] as False. No
sensitive case. Credits go to:
https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse/43357954
Args:
source (str): Valid inputs are [('yes', 'true', 't', 'y', '1', 'no', 'false', 'f', 'n', '0']
Returns:
bool: Translated string.
Raises:
ArgumentTypeError: String was not valid input.
"""
if source.lower() in ('yes', 'true', 't', 'y', '1'):
output = True
elif source.lower() in ('no', 'false', 'f', 'n', '0'):
output = False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
return output
def default_config_parse(default_config_path):
"""Parse inline commands.
Args:
default_config_path (str): Path to default config.
Returns:
argparse.Namespace: Container holding
config (dict): Dictionary holding configs
verbose (bool): Bool value, which can be used for setting verbosity
"""
parser = argparse.ArgumentParser()
parser.add_argument('--config',
help="Path to config file.",
type=str,
default=default_config_path)
parser.add_argument('--verbose',
help="Increase output verbosity.",
required=False,
default='False',
type=str)
args = parser.parse_args()
args.verbose = string_to_bool(args.verbose)
return args
```
#### File: lib/util/matplotblib_util.py
```python
import logging
import matplotlib.pyplot as plt
import numpy as np
LOGGER = logging.getLogger(__name__)
def save_bar_chart(data,
output_path,
y_label,
x_tick_labels,
title,
bar_width=.75,
size_inches=(10, 7)):
"""Save bar chart.
Args:
data (list(int or float)): Each entry in list refers to one bar. Should have same length as
x_tick_labels.
output_path (str or pathlib.Path): Path to save bar chart to.
y_label (str): Label for y axis.
x_tick_labels (list(str)): Name for each bar. This number will be displayed right below the
bar.
title (str): Title of bar
bar_width (float): Bar width.
size_inches (tuple(int or float)): Size of plot in inches.
"""
LOGGER.debug("Plot file ...")
ind = np.arange(len(x_tick_labels))
fig, ax = plt.subplots()
fig.set_size_inches(*size_inches, forward=True)
rects = ax.bar(ind, data, bar_width, color='b')
ax.set_ylabel(y_label)
ax.set_title(title)
ax.set_xticks(ind)
ax.set_xticklabels(x_tick_labels)
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., 1.0*height,
'%d' % int(height),
ha='center', va='bottom')
LOGGER.debug("Save file ...")
plt.savefig(output_path)
def save_scatter_plot_with_classes(output_path, types, data, class_color_dict):
"""Saves scatter to disk.
Each point needs to have class (which is held in types). Plot each point of a class with the
same color.
Args:
output_path (str or pathlib.Path): Path to save figure to
types (list(str)): Each entry holds a class. Index must match data rows.
data (numpy.ndarray(numpy.float)): Data of size [n_samples, 2]
class_color_dict (dict): Each key refers to class while the entries refer to the color.
"""
for class_types, color in class_color_dict.items():
subset = data[types == class_types, :]
plt.scatter(subset[:, 0], subset[:, 1], c=color, label=class_types, s=10)
plt.legend()
plt.savefig(output_path)
```
#### File: dataset_analysis_framework/scripts/compute_embedded_space.py
```python
import logging
import tqdm
import numpy as np
import pandas as pd
from lib.manifold.tsne import TSNE
from lib.config.general_config import Config
from lib.util.matplotblib_util import save_scatter_plot_with_classes
from lib.util.logging_util import configure_logging_verbosity
from lib.util.argparse_util import default_config_parse
LOGGER = logging.getLogger(__name__)
def _load_data_from_directory(feature_path, type_container=None):
"""Load data from directory and prepare.
Args:
feature_path (Str): Path to bounding box features.
type_container (pandas.DataFrame or None): pandas Dataframe holding the types of whole
dataset. Index must match the file names in feature_path.
Returns:
numpy.ndarray(numpy.float): Features of bounding boxes in size [n_samples, n_features]
types (str): Class type refering to each sample. Only returned if type_container is not None
identifier (str): Uniqie label identifier. Only returned if type_container is not None.
"""
data = []
file_list = list(feature_path.glob('*'))
for path_to_data in tqdm.tqdm(file_list, desc='Load features'):
data.append(np.load(path_to_data))
data = np.array(data)
output = data
if type_container is not None:
identifier = np.array([file.stem for file in file_list])
types = np.array([type_container.loc[file.stem] for file in file_list])
output = (data, types, identifier)
return output
def save_embedded_features(feature_path, label_path, output_path, output_plot_path):
"""Perform TSNE and save features.
Args:
feature_path (str or pathlib.Path): Path to box features.
label_path (str or pathlib.Path): Path to pickled pandas Data Frame of labels.
output_path (str or pathlib.Path or None): Path to save embedded features to. Does not save
if None.
output_plot_path (str or pathlib.Path or None): Path to save plot to. Does not save if None.
"""
LOGGER.info("Save embedded features ... ")
type_container = pd.read_pickle(str(label_path))['type']
data, types, _ = _load_data_from_directory(feature_path, type_container)
# Normalize data
data -= np.mean(data)
data /= np.std(data)
embedded_space = TSNE().fit(data)
if output_path:
np.save(output_path, embedded_space)
if output_plot_path:
class_color_dict = {
'Car': 'b',
'Van': 'g',
'Truck': 'r',
'Pedestrian': 'c',
'Person_sitting': 'm',
'Cyclist': 'k',
'Tram': 'y'
}
save_scatter_plot_with_classes(output_path=output_plot_path,
types=types,
data=embedded_space,
class_color_dict=class_color_dict)
def _main():
"""Main script."""
args = default_config_parse(default_config_path='settings/scripts/compute_embedded_space.yaml')
configure_logging_verbosity(verbose=args.verbose)
config = Config.build_from_yaml(args.config)
save_embedded_features(**config.config)
if __name__ == '__main__':
_main()
```
#### File: dataset_analysis_framework/scripts/compute_feature_maps.py
```python
import logging
import numpy as np
import tqdm
from PIL import Image
from lib.config.general_config import Config
import lib.feature_extractor.resnet as resnet
from lib.util.logging_util import configure_logging_verbosity
from lib.util.argparse_util import default_config_parse
LOGGER = logging.getLogger(__name__)
def save_features(file_list, output_path, model):
"""Perform inference and save feature to disk.
Args:
file_list (list(str or pathlib.Path)): List holding absolute paths to images to perform
inference on.
output_path (str or pathlib.Path): Output path to store feature maps in.
model (feature_extractor.FeatureExtractorBase): Feature extractor model.
"""
output_path.mkdir(parents=True, exist_ok=True)
for file in tqdm.tqdm(file_list, desc="Compute Features"):
img = Image.open(file)
image_np = np.array(img)
image_np_expanded = np.expand_dims(image_np, axis=0)
out = model.inference(input_data=image_np_expanded)
np.save(output_path/file.stem, out)
def _get_label_names(inference_list_path):
"""Get label name out of inference list.
This method returns the label base name (so the file base name) not the label identifier.
Args:
inference_list_path (str or pathlib.Path): Refer to compute_feature_maps for details.
Returns:
numpy.ndarray: List of unique label base names.
"""
inference_list = np.loadtxt(str(inference_list_path), dtype=np.str)
label_names = [entry.split('_')[0] for entry in inference_list]
label_names = np.unique(label_names)
return label_names
def compute_feature_maps(output_path,
input_path,
inference_list_path,
resnet_config_path,
verbose=False):
"""Compute feature maps and save to disk.
The feature maps will be stored to output_path.
Args:
output_path (str or pathlib.Path): Path to save feature maps to.
input_path (str or pathlib.Path): Path to image folder.
inference_list_path (str or pathlib.Path): Path to inference list
(example creation and details at scripts/computer_inference_list.py).
resnet_config_path (str or pathlib.Path): Path to ResNet config file.
verbose (bool): Set verbosity.
"""
LOGGER.info("Compute feature maps ... ")
configure_logging_verbosity(verbose=verbose)
label_names = _get_label_names(inference_list_path)
image_paths = [input_path/(label_name + '.png') for label_name in label_names]
model = resnet.ResNet.build_from_yaml(resnet_config_path)
save_features(file_list=image_paths,
output_path=output_path,
model=model)
def _main():
"""Main script."""
args = default_config_parse(default_config_path='settings/scripts/compute_feature_maps.yaml')
configure_logging_verbosity(verbose=args.verbose)
config = Config.build_from_yaml(args.config)
compute_feature_maps(**config.config)
if __name__ == '__main__':
_main()
```
#### File: dataset_analysis_framework/scripts/compute_inference_list.py
```python
import logging
import numpy as np
import pandas as pd
from lib.config.general_config import Config
from lib.dataloader.constants.KITTI import TYPE, CLASS_LIST
from lib.util.logging_util import configure_logging_verbosity
from lib.util.argparse_util import default_config_parse
LOGGER = logging.getLogger(__name__)
def compute_inference_list(label_path, output_path, seed=42, verbose=False):
"""Compute inference list and save to disk.
This method will save a .txt file with each column holding an unique identifier for a label.
For each class n amount of samples are written to the file. n is equal to the minimum amount of
samples for a class. For KITTI, Pedestrian_sitting is the class with the fewest occurrences
(222), so for every class 222 samples would be chosen.
Args:
label_path (str or pathlib.Path): Path to labels as pickled pandas Data Frame file.
output_path (str or pathlib.Path): Path to save the inference list to.
seed (int): Random seed to enable reproducibility.
verbose (True): Set verbosity.
"""
LOGGER.info("Compute inference list ... ")
configure_logging_verbosity(verbose=verbose)
random_state = np.random.RandomState(seed)
labels = pd.read_pickle(str(label_path))
n_samples_dict = dict()
# Count samples per class
for class_types in CLASS_LIST:
n_samples_dict[class_types] = np.sum(labels[TYPE] == class_types)
# From each class get the same amount of samples like the class with the fewest n of samples
min_n = n_samples_dict[min(n_samples_dict, key=n_samples_dict.get)]
inference_list = []
for class_types in CLASS_LIST:
labels_one_class = labels[labels[TYPE] == class_types]
identifier = random_state.choice(labels_one_class.index.values, size=min_n, replace=False)
inference_list.append(identifier)
inference_list = [item for sublist in inference_list for item in sublist]
np.savetxt(str(output_path), inference_list, fmt='%s')
def _main():
"""Main script."""
args = default_config_parse(default_config_path='settings/scripts/compute_inference_list.yaml')
configure_logging_verbosity(verbose=args.verbose)
config = Config.build_from_yaml(args.config)
compute_inference_list(**config.config,
verbose=args.verbose)
if __name__ == '__main__':
_main()
``` |
{
"source": "johannesu/cnn-cells",
"score": 3
} |
#### File: johannesu/cnn-cells/network.py
```python
import lasagne
def setup(input_var, box_size = 29,):
# Setup network
net = lasagne.layers.InputLayer(
shape=(None, 1, box_size, box_size),
input_var=input_var)
# stage 1 : filter bank -> squashing -> max-pooling
net = lasagne.layers.Conv2DLayer(
net,
num_filters=30,
filter_size=(3, 3),
nonlinearity = lasagne.nonlinearities.rectify)
net = lasagne.layers.MaxPool2DLayer(net, pool_size=(2, 2))
# stage 2 : filter bank -> squashing -> max-pooling
net = lasagne.layers.Conv2DLayer(
net,
num_filters=30,
filter_size=(3, 3),
nonlinearity = lasagne.nonlinearities.rectify)
net = lasagne.layers.MaxPool2DLayer(net, pool_size=(2, 2))
# last stage: stanard 2-layer fully connected neural network with 50% dropout
net = lasagne.layers.DenseLayer(
lasagne.layers.dropout(net, p=.5),
num_units=100,
nonlinearity=lasagne.nonlinearities.rectify)
# Softmax output
net = lasagne.layers.DenseLayer(
lasagne.layers.dropout(net, p=.5),
num_units=2,
nonlinearity=lasagne.nonlinearities.softmax)
return net
``` |
{
"source": "johannesulf/dsigma",
"score": 3
} |
#### File: dsigma/dsigma/physics.py
```python
import numpy as np
from astropy import constants as c
from astropy import units as u
from scipy.special import jv, jn_zeros
from astropy.cosmology import FlatLambdaCDM
__all__ = ['mpc_per_degree', 'projection_angle', 'critical_surface_density',
'effective_critical_surface_density',
'lens_magnification_shear_bias']
_sigma_crit_factor = (c.c**2 / (4 * np.pi * c.G)).to(u.Msun / u.pc).value
def mpc_per_degree(z, cosmology=FlatLambdaCDM(H0=100, Om0=0.3),
comoving=False):
"""Estimate the angular scale in Mpc/degree at certain redshift.
Parameters
----------
cosmology : astropy.cosmology, optional
Cosmology to assume for calculations.
z : float or numpy array
Redshift of the object.
comoving : boolen
Use comoving distance instead of physical distance when True.
Default: False
Returns
-------
float or numpy array
Physical scale in unit of Mpc/degree.
"""
if comoving:
return (cosmology.comoving_transverse_distance(z).to(u.Mpc).value *
np.deg2rad(1))
return (cosmology.angular_diameter_distance(z).to(u.Mpc).value *
np.deg2rad(1))
def projection_angle(ra_l, dec_l, ra_s, dec_s):
r"""Calculate projection angle between lens and sources.
Parameters
----------
ra_l, dec_l : float or numpy array
Coordinates of the lens galaxies in degrees.
ra_s, dec_s : float or numpy array
Coordinates of the source galaxies in degrees.
Returns
-------
cos_2phi, sin_2phi : float or numpy array
The :math:`\cos` and :math:`\sin` of :math:`2 \phi`, where
:math:`\phi` is the angle measured from right ascension direction to a
line connecting the lens and source galaxies.
"""
# Convert everything into radians.
ra_l, dec_l = np.deg2rad(ra_l), np.deg2rad(dec_l)
ra_s, dec_s = np.deg2rad(ra_s), np.deg2rad(dec_s)
# Calculate the tan(phi).
mask = np.cos(dec_s) * np.sin(ra_s - ra_l) != 0
if hasattr(mask, "__len__"):
tan_phi = (
(np.cos(dec_l) * np.sin(dec_s) - np.sin(dec_l) * np.cos(dec_s) *
np.cos(ra_s - ra_l))[mask] /
(np.cos(dec_s) * np.sin(ra_s - ra_l))[mask])
cos_2phi = np.repeat(-1.0, len(mask))
sin_2phi = np.repeat(0.0, len(mask))
cos_2phi[mask] = (2.0 / (1.0 + tan_phi * tan_phi)) - 1.0
sin_2phi[mask] = 2.0 * tan_phi / (1.0 + tan_phi * tan_phi)
elif mask:
tan_phi = (
(np.cos(dec_l) * np.sin(dec_s) - np.sin(dec_l) * np.cos(dec_s) *
np.cos(ra_s - ra_l)) / (np.cos(dec_s) * np.sin(ra_s - ra_l)))
cos_2phi = (2.0 / (1.0 + tan_phi * tan_phi)) - 1.0
sin_2phi = (2.0 * tan_phi / (1.0 + tan_phi * tan_phi))
else:
cos_2phi = -1
sin_2phi = 0
return cos_2phi, sin_2phi
def critical_surface_density(z_l, z_s, cosmology, comoving=True, d_l=None,
d_s=None):
"""The critical surface density for a given lens and source redshift.
Parameters
----------
z_l : float or numpy array
Redshift of lens.
z_s : float or numpy array
Redshift of source.
cosmology : astropy.cosmology
Cosmology to assume for calculations.
comoving : boolean, optional
Flag for using comoving instead of physical units.
d_l : float or numpy array
Comoving transverse distance to the lens. If not given, it is
calculated from the redshift provided.
d_s : float or numpy array
Comoving transverse distance to the source. If not given, it is
calculated from the redshift provided.
Returns
-------
float or numpy array
Critical surface density for each lens-source pair.
"""
if d_l is None:
d_l = cosmology.comoving_transverse_distance(z_l).to(u.Mpc).value
if d_s is None:
d_s = cosmology.comoving_transverse_distance(z_s).to(u.Mpc).value
dist_term = (1e-6 * (d_s / (1 + z_s)) / (d_l / (1 + z_l)) /
(np.where(d_s > d_l, d_s - d_l, 1) / (1 + z_s)))
if np.isscalar(dist_term):
if d_s <= d_l:
dist_term = np.inf
else:
dist_term[d_s <= d_l] = np.inf
if comoving:
dist_term /= (1.0 + z_l)**2
return _sigma_crit_factor * dist_term
def effective_critical_surface_density(z_l, z_s, n_s, cosmology,
comoving=True):
"""The effective critical surface density for a given lens redshift and
source redshift distribution.
Parameters
----------
z_l : float or numpy array
Redshift of lens.
z_s : numpy array
Potential redshifts of sources.
n_s : numpy array
Fraction of source galaxies in each redshift bin. Does not need to be
normalized.
cosmology : astropy.cosmology
Cosmology to assume for calculations.
comoving : boolean, optional
Flag for using comoving instead of physical unit.
Returns
-------
sigma_crit_eff : float or numpy array
Effective critical surface density for the lens redshift given the
source redshift distribution.
"""
d_l = cosmology.comoving_transverse_distance(z_l).to(u.Mpc).value
d_s = cosmology.comoving_transverse_distance(z_s).to(u.Mpc).value
if not np.isscalar(z_l):
z_l = np.repeat(z_l, len(z_s)).reshape((len(z_l), len(z_s)))
d_l = np.repeat(d_l, len(z_s)).reshape(z_l.shape)
z_s = np.tile(z_s, len(z_l)).reshape(z_l.shape)
d_s = np.tile(d_s, len(z_l)).reshape(z_l.shape)
n_s = np.tile(n_s, len(z_l)).reshape(z_l.shape)
sigma_crit = critical_surface_density(z_l, z_s, cosmology=cosmology,
comoving=comoving, d_l=d_l, d_s=d_s)
if not np.isscalar(z_l):
sigma_crit_eff = np.repeat(np.inf, len(z_l))
mask = np.average(sigma_crit**-1, axis=-1, weights=n_s) == 0
sigma_crit_eff[~mask] = np.average(sigma_crit**-1, axis=-1,
weights=n_s)[~mask]**-1
return sigma_crit_eff
else:
if np.average(sigma_crit**-1, weights=n_s) > 0:
return np.average(sigma_crit**-1, weights=n_s)**-1
else:
return np.inf
def lens_magnification_shear_bias(theta, alpha_l, z_l, z_s, camb_results,
n_z=10, n_ell=200, bessel_function_zeros=100,
k_max=1e3):
r"""The lens magnification bias to the mean tangential shear. This function
is based on equations (13) and (14) in Unruh et al. (2020).
Parameters
----------
theta : float
Angular separation :math:`\theta` from the lens sample in radians.
alpha_l : float
Local slope of the flux distribution of lenses near the flux limit.
z_l : float
Redshift of lens.
z_s : float
Redshift of source.
camb_results : camb.results.CAMBdata
CAMB results object that contains information on cosmology and the
matter power spectrum.
n_z : int, optional
Number of redshift bins used in the integral. Larger numbers will be
more accurate.
n_ell : int, optional
Number of :math:`\ell` bins used in the integral. Larger numbers will
be more accurate.
bessel_function_zeros : int, optional
The calculation involves an integral over the second order Bessel
function :math:`J_2 (\ell \theta)` from :math:`\ell = 0` to
:math:`\ell = \infty`. In practice, this function replaces the upper
bound with the bessel_function_zeros-th zero point of the Bessel
function. Larger number should lead to more accurate results. However,
in practice, this also requires larger `n_ell`. Particularly, `n_ell`
should never fall below `bessel_function_zeros`.
k_max : float, optional
The maximum wavenumber beyond which the power spectrum is assumed to be
0.
Returns
-------
float
Bias in the mean tangential shear due to lens magnification effects.
"""
camb_interp = camb_results.get_matter_power_interpolator(
hubble_units=False, k_hunit=False)
ell_min = 0
ell_max = np.amax(jn_zeros(2, bessel_function_zeros)) / theta
z_min = 0
z_max = z_l
z, w_z = np.polynomial.legendre.leggauss(n_z)
z = (z_max - z_min) / 2.0 * z + (z_max + z_min) / 2.0
w_z = w_z * (z_max - z_min) / 2.0
ell, w_ell = np.polynomial.legendre.leggauss(n_ell)
ell = (ell_max - ell_min) / 2.0 * ell + (ell_max + ell_min) / 2.0
w_ell = w_ell * (ell_max - ell_min) / 2.0
int_z = np.array([
(1 + z_i)**2 / (2 * np.pi) *
camb_results.hubble_parameter(0) /
camb_results.hubble_parameter(z_i) *
camb_results.angular_diameter_distance2(z_i, z_l) *
camb_results.angular_diameter_distance2(z_i, z_s) /
camb_results.angular_diameter_distance(z_l) /
camb_results.angular_diameter_distance(z_s) for z_i in z])
d_ang = np.array([
camb_results.angular_diameter_distance(z_i) for z_i in z])
z = np.tile(z, n_ell)
int_z = np.tile(int_z, n_ell)
d_ang = np.tile(d_ang, n_ell)
w_z = np.tile(w_z, n_ell)
int_ell = ell * jv(2, ell * theta)
ell = np.repeat(ell, n_z)
int_ell = np.repeat(int_ell, n_z)
w_ell = np.repeat(w_ell, n_z)
k = (ell + 0.5) / ((1 + z) * d_ang)
int_z_ell = np.array([camb_interp.P(z[i], k[i]) for i in range(len(k))])
int_z_ell = np.where(k > k_max, 0, int_z_ell)
gamma = np.sum(int_z * int_ell * int_z_ell * w_z * w_ell)
gamma = ((gamma * u.Mpc**3) * 9 * camb_results.Params.H0**3 * u.km**3 /
u.s**3 / u.Mpc**3 *
(camb_results.Params.omch2 + camb_results.Params.ombh2)**2 /
(camb_results.Params.H0 / 100)**4 / 4 / c.c**3)
return 2 * (alpha_l - 1) * gamma
```
#### File: johannesulf/dsigma/setup.py
```python
from setuptools import setup, find_packages
from distutils.extension import Extension
from distutils.command.sdist import sdist
try:
from Cython.Build import cythonize
USE_CYTHON = True
except ImportError:
USE_CYTHON = False
ext = 'pyx' if USE_CYTHON else 'c'
extensions = [Extension(
'dsigma.precompute_engine', ['dsigma/precompute_engine.{}'.format(ext)],
extra_compile_args=['-Ofast', '-march=native'])]
if USE_CYTHON:
extensions = cythonize(extensions)
class sdist_with_cythonize(sdist):
def run(self):
cythonize(['dsigma/precompute_engine.pyx'])
sdist.run(self)
with open('README.md', 'r') as fstream:
long_description = fstream.read()
setup(
name='dsigma',
version='0.5.0',
description=('A Galaxy-Galaxy Lensing Pipeline'),
long_description=long_description,
long_description_content_type='text/markdown',
classifiers=[
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Astronomy',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
keywords='astronomy, weak-lensing',
url='https://github.com/johannesulf/dsigma',
author='<NAME>, <NAME>',
author_email='<EMAIL>',
packages=find_packages(),
install_requires=['numpy', 'astropy', 'scipy', 'scikit-learn',
'healpy'],
python_requires='>=3.4',
ext_modules=extensions,
cmdclass={'sdist': sdist_with_cythonize}
)
``` |
{
"source": "JohannesVerherstraeten/pypipeline",
"score": 2
} |
#### File: cloningstrategy/clonecell/iclonecell.py
```python
from typing import Sequence, TYPE_CHECKING, Optional
from pypipeline.cell.compositecell.icompositecell import ICompositeCell
if TYPE_CHECKING:
from pypipeline.validation import BoolExplained
from pypipeline.cell.icell import ICell
from pypipeline.cellio import InternalInput, InternalOutput
class ICloneCell(ICompositeCell):
"""
Clone cell interface.
Controlling class in the IObserver-IObservable relation, as observer of the original cell.
-> Relation should be created/deleted in the __init__ and delete() method.
"""
@classmethod
def create(cls, original_cell: "ICell", name: str) -> "ICloneCell":
"""
Factory method to create a new clone.
Args:
original_cell: the original cell to be cloned.
name: the name of the new clone cell.
Returns:
A new clone cell.
Raises:
InvalidInputException
NotImplementedError: if the original cell doesn't support cloning.
"""
raise NotImplementedError
# ------ Original cell ------
def get_original_cell(self) -> "ICell":
"""
Returns:
The original cell, of which this cell is a clone.
"""
raise NotImplementedError
def can_have_as_original_cell(self, original_cell: "ICell") -> "BoolExplained":
"""
Args:
original_cell: original cell to validate.
Returns:
TrueExplained if the given cell is a valid original cell for this clone cell. FalseExplained otherwise.
"""
raise NotImplementedError
def assert_has_proper_original_cell(self) -> None:
"""
Raises:
InvalidStateException: if the original cell is invalid.
"""
raise NotImplementedError
# ------ Inputs & Outputs ------
def get_clone_inputs(self) -> "Sequence[InternalInput]":
"""
Returns:
The (internal) clone inputs of this clone cell.
"""
raise NotImplementedError
def get_clone_input(self, name: str) -> "InternalInput":
"""
Args:
name: the name of the clone input to get.
Returns:
The (internal) clone input with the given name.
Raises:
KeyError: if this cell has no clone input with the given name.
"""
raise NotImplementedError
def get_clone_outputs(self) -> "Sequence[InternalOutput]":
"""
Returns:
The (internal) clone outputs of this clone cell.
"""
raise NotImplementedError
def get_clone_output(self, name: str) -> "InternalOutput":
"""
Args:
name: the name of the clone output to get.
Returns:
The (internal) clone output with the given name.
Raises:
KeyError: if this cell has no clone output with the given name.
"""
raise NotImplementedError
```
#### File: scalablecell/strategy/noscalingstrategy.py
```python
from typing import TYPE_CHECKING, Type
from pypipeline.cell.compositecell.scalablecell.strategy.ascalingstrategy import AScalingStrategy
from pypipeline.exceptions import ScalingNotSupportedException
if TYPE_CHECKING:
from pypipeline.cell.compositecell.scalablecell.scalablecelldeployment import ScalableCellDeployment
from pypipeline.cell.compositecell.scalablecell.strategy.cloningstrategy.clonecell.iclonecell import ICloneCell
class NoScalingStrategy(AScalingStrategy):
"""
No-Scaling strategy class.
A scaling strategy determines how a scalable cell should be executed. ATM 2 strategies are implemented:
- the CloningStrategy (default): allows to scale up using clones of the internal cell
- the NoScalingStrategy: doesn't allow scaling and executes the scalable cell's internal cell in the mainthread,
just like a Pipeline. Useful for debugging.
"""
def __init__(self, scalable_cell_deployment: "ScalableCellDeployment"):
"""
Args:
scalable_cell_deployment: the scalable cell deployment where this scaling strategy belongs to.
Raises:
NotDeployableException – if the internal cell cannot be deployed.
AlreadyDeployedException – if the internal cell is already deployed.
NotDeployableException – if the internal cell cannot be deployed.
Exception – any exception that the user may raise when overriding _on_deploy or _on_undeploy
"""
super(NoScalingStrategy, self).__init__(scalable_cell_deployment)
self.get_internal_cell().deploy()
@classmethod
def create(cls, scalable_cell_deployment: "ScalableCellDeployment") -> "NoScalingStrategy":
return NoScalingStrategy(scalable_cell_deployment)
def delete(self) -> None:
self.get_internal_cell().undeploy()
super(NoScalingStrategy, self).delete()
def _on_pull(self) -> None:
self.logger.debug(f"{self}.pull()")
# Just execute the internal cell in the main thread.
output_queue = self.get_scalable_cell_deployment().get_output_queue()
queue_idx = output_queue.acquire_queue_index()
internal_cell = self.get_internal_cell()
internal_cell.pull()
# assumes scalable cells have no other outputs than output_ports:
for output in self.get_scalable_cell_deployment().get_scalable_cell().get_output_ports():
output_incoming_connections = output.get_incoming_connections()
assert len(output_incoming_connections) == 1
value = output_incoming_connections[0].pull()
output_queue.set(output, queue_idx, value)
output_queue.signal_queue_index_ready(queue_idx)
def reset(self) -> None:
self.get_internal_cell().reset()
def add_clone(self, method: Type["ICloneCell"]) -> None:
raise ScalingNotSupportedException(f"Scalable cell strategy 'NoScalingStrategy' doesn't allow scaling up.")
def remove_clone(self, method: Type["ICloneCell"]) -> None:
raise ScalingNotSupportedException(f"Scalable cell strategy 'NoScalingStrategy' doesn't allow scaling down.")
```
#### File: cellio/acellio/abstractio.py
```python
from typing import TypeVar, Generic, Optional, Dict, TYPE_CHECKING, Any, Callable, Sequence
from threading import RLock, Condition
import logging
import pypipeline
from pypipeline.cell.icellobserver import IOCreatedEvent
from pypipeline.cellio.icellio import IO
from pypipeline.validation import BoolExplained, TrueExplained, FalseExplained, raise_if_not
from pypipeline.exceptions import NoInputProvidedException, InvalidInputException, InvalidStateException, \
NotDeployedException, CannotBeDeletedException
if TYPE_CHECKING:
from pypipeline.cell import ICell
from pypipeline.connection import IConnection
from pypipeline.cellio.connectionendpoint import ConnectionEntryPoint, ConnectionExitPoint
T = TypeVar('T')
class AbstractIO(IO[T], Generic[T]):
"""
Abstract cell IO class.
An IO is owned by its cell.
An IO is the controlling class in the IO-ICell relation, as IO of the cell.
-> Main mutators can be found in __init__ and delete()
"""
def __init__(self, cell: "ICell", name: str, validation_fn: Optional[Callable[[T], bool]] = None):
"""
Args:
cell: the cell of which this IO will be part.
name: the name of this IO. Should be unique within the cell.
validation_fn: An optional validation function that will be used to validate every value that passes
through this IO.
"""
self.logger = logging.getLogger(self.__class__.__name__)
raise_if_not(self.can_have_as_name(name), InvalidInputException)
self.__name = name
# Main mutator in the IO-ICell relation, as IO of the cell.
raise_if_not(self.can_have_as_cell(cell), InvalidInputException)
cell._add_io(self) # Access to protected method on purpose # May raise exceptions
self.__cell = cell
raise_if_not(self.can_have_as_validation_fn(validation_fn), InvalidInputException)
self.__validation_fn = validation_fn
self.__state_lock = RLock()
self.__value: T = None # type: ignore
self.__value_is_set: bool = False
self.__value_is_set_signal = Condition(self.__state_lock)
self.__is_deployed: bool = False
def _notify_observers_of_creation(self) -> None:
"""Should be called after every IO object has been fully created. """
event = IOCreatedEvent(self.get_cell(), debug_message=f"io created with name {self.__name}")
self.get_cell().notify_observers(event)
def _get_cell_pull_lock(self) -> "RLock":
"""
Returns:
The pull lock which makes sure that the cell of this IO is not pulled concurrently.
"""
raise NotImplementedError
def _get_state_lock(self):
"""
Returns:
The lock which makes sure that this IO is not pulled concurrently.
"""
return self.__state_lock
def _get_value_is_set_signal(self):
"""
Returns:
A condition that gets signalled when a new value is available.
"""
return self.__value_is_set_signal
def get_name(self) -> str:
return self.__name
@classmethod
def can_have_as_name(cls, name: str) -> BoolExplained:
if not isinstance(name, str):
return FalseExplained(f"Name should be a string, got {name}")
elif len(name) == 0:
return FalseExplained(f"Name should not be an empty string")
elif "." in name:
return FalseExplained(f"Name should not contain `.`")
return TrueExplained()
def get_full_name(self) -> str:
return f"{self.get_cell().get_full_name()}.{self.get_name()}"
def assert_has_proper_name(self) -> None:
raise_if_not(self.can_have_as_name(self.get_name()), InvalidStateException, f"{self} has invalid name: ")
def get_cell(self) -> "ICell":
return self.__cell
def can_have_as_cell(self, cell: "ICell") -> BoolExplained:
if not isinstance(cell, pypipeline.cell.icell.ICell):
return FalseExplained(f"Cell should be instance of ICell, got {cell}")
return TrueExplained()
def assert_has_proper_cell(self) -> None:
cell = self.get_cell()
raise_if_not(self.can_have_as_cell(cell), InvalidStateException, f"{self} has invalid cell: ")
if not cell.has_as_io(self):
raise InvalidStateException(f"Inconsistent relation: {cell} doesn't have {self} as io")
def get_validation_fn(self) -> Optional[Callable[[T], bool]]:
return self.__validation_fn
def can_have_as_validation_fn(self, validation_fn: Optional[Callable[[T], bool]]) -> BoolExplained:
if validation_fn is None:
return TrueExplained()
if not isinstance(validation_fn, Callable):
return FalseExplained(f"The validation function should be a callable that accepts one argument of type T, "
f"and returns a boolean.")
return TrueExplained()
def assert_has_proper_validation_fn(self) -> None:
validation_fn = self.get_validation_fn()
raise_if_not(self.can_have_as_validation_fn(validation_fn), InvalidStateException,
f"{self} has invalid validation function: ")
def get_all_connections(self) -> "Sequence[IConnection[T]]":
result = list(self.get_incoming_connections())
result.extend(self.get_outgoing_connections())
return result
def get_incoming_connections(self) -> "Sequence[IConnection[T]]":
raise NotImplementedError
def has_as_incoming_connection(self, connection: "IConnection[T]") -> bool:
raise NotImplementedError
def get_nb_incoming_connections(self) -> int:
raise NotImplementedError
def get_outgoing_connections(self) -> "Sequence[IConnection[T]]":
raise NotImplementedError
def has_as_outgoing_connection(self, connection: "IConnection[T]") -> bool:
raise NotImplementedError
def get_nb_outgoing_connections(self) -> int:
raise NotImplementedError
def _get_connection_entry_point(self) -> Optional["ConnectionEntryPoint"]:
raise NotImplementedError
def _get_connection_exit_point(self) -> Optional["ConnectionExitPoint"]:
raise NotImplementedError
def _deploy(self) -> None:
assert not self._is_deployed()
for connection in self.get_all_connections():
if connection.get_source()._is_deployed() or connection.get_target()._is_deployed():
connection._deploy() # access to protected member on purpose
self.__is_deployed = True
def _undeploy(self) -> None:
assert self._is_deployed()
for connection in self.get_all_connections():
if connection.get_source()._is_deployed() and connection.get_target()._is_deployed():
connection._undeploy() # access to protected member on purpose
self.__is_deployed = False
def _is_deployed(self) -> bool:
return self.__is_deployed
def _assert_is_properly_deployed(self) -> None:
for connection in self.get_all_connections():
connection._assert_is_properly_deployed() # access to protected member on purpose
def pull(self) -> T:
raise NotImplementedError
def reset(self) -> None:
self._clear_value()
def get_value(self) -> T:
with self._get_state_lock():
if not self.__value_is_set:
raise NoInputProvidedException(f"{self}.get_value() called, but value has not yet been set.")
return self.__value
def _set_value(self, value: T) -> None:
if not self.can_have_as_value(value):
raise InvalidInputException(f"{self}: Invalid value: {value}")
with self._get_state_lock():
self.logger.debug(f"{self}.set_value( {value} ) @ AbstractIO")
self.__value = value
self.__value_is_set = True
self.__value_is_set_signal.notify()
exit_point = self._get_connection_exit_point()
if exit_point is not None:
exit_point._notify_new_value()
def _clear_value(self) -> None:
with self._get_state_lock():
self.__value_is_set = False
def value_is_set(self) -> bool:
with self._get_state_lock():
return self.__value_is_set
def _wait_for_value(self, interruption_frequency: Optional[float] = None) -> None:
with self._get_state_lock():
while not self.__value_is_set_signal.wait(timeout=interruption_frequency):
self.logger.warning(f"{self}.wait_for_value() waiting... @ AbstractIO level")
if not self._is_deployed():
raise NotDeployedException(f"{self} got undeployed while someone was waiting for the value. ")
def _acknowledge_value(self) -> None:
self._clear_value()
def can_have_as_value(self, value: T) -> bool:
if self.__validation_fn is not None:
return self.__validation_fn(value)
return True
def assert_has_proper_value(self) -> None:
if self.value_is_set() and not self.can_have_as_value(self.get_value()):
raise InvalidStateException(f"{self} has an invalid value: {self.get_value()}")
def _get_sync_state(self) -> Dict[str, Any]:
with self._get_state_lock():
state: Dict[str, Any] = dict()
return state
def _set_sync_state(self, state: Dict) -> None:
pass
def get_nb_available_pulls(self) -> Optional[int]:
raise NotImplementedError
def _is_optional_even_when_typing_says_otherwise(self) -> bool:
if len(self.get_outgoing_connections()) == 0:
return False
is_optional = True
for conn in self.get_outgoing_connections():
target = conn.get_target()
if not target._is_optional_even_when_typing_says_otherwise():
is_optional = False
break
return is_optional
def get_topology_description(self) -> Dict[str, Any]:
result = {
"io": str(self),
"type": self.__class__.__name__,
"name": self.get_name(),
"id": self.get_full_name(),
}
return result
def assert_is_valid(self) -> None:
self.assert_has_proper_name()
self.assert_has_proper_cell()
self.assert_has_proper_validation_fn()
self.assert_has_proper_value()
self._assert_is_properly_deployed()
def delete(self) -> None:
if self._is_deployed():
raise CannotBeDeletedException(f"{self} cannot be deleted while it is deployed.")
if self.get_nb_incoming_connections() > 0:
raise CannotBeDeletedException(f"Cannot delete {self}, as it still has {self.get_nb_incoming_connections()}"
f"incoming connections.")
if self.get_nb_outgoing_connections() > 0:
raise CannotBeDeletedException(f"Cannot delete {self}, as it still has {self.get_nb_outgoing_connections()}"
f"outgoing connections.")
# Main mutator in the IO-ICell relation, as owner of the IO.
self.__cell._remove_io(self) # Access to protected method on purpose
self.__cell = None
def __getstate__(self) -> Dict:
# called during pickling
new_state = dict(self.__dict__)
new_state["_AbstractIO__state_lock"] = None
new_state["_AbstractIO__value_is_set_signal"] = None
return new_state
def __setstate__(self, state: Dict) -> None:
# called during unpickling
self.__dict__ = state
self.__state_lock = RLock()
self.__value_is_set_signal = Condition(self.__state_lock)
def __str__(self) -> str:
return f"{self.__class__.__name__}({self.get_full_name()})"
```
#### File: cellio/compositeio/internalinput.py
```python
from typing import TypeVar, Generic, TYPE_CHECKING, Optional, Dict, Sequence
from threading import BoundedSemaphore
import pypipeline
from pypipeline.cellio.acellio.ainput import AInput
from pypipeline.cellio.icellio import IConnectionEntryPoint, IConnectionExitPoint
from pypipeline.cellio.connectionendpoint import ConnectionExitPoint, ConnectionEntryPoint
from pypipeline.validation import BoolExplained, FalseExplained, TrueExplained
from pypipeline.exceptions import NotDeployedException
if TYPE_CHECKING:
from pypipeline.cell import ICell, ICompositeCell
from pypipeline.connection import IConnection
T = TypeVar('T')
class InternalInput(AInput[T], IConnectionExitPoint[T], Generic[T]):
"""
InternalInput class.
An internal input is a type of input that can only be created on a composite cell.
It accepts no incoming connections and infinite outgoing (internal) connections.
Every time an internal input is pulled, it blocks and wait until a new value is set.
An IO is owned by its cell.
An IO is the controlling class in the IO-ICell relation, as IO of the cell.
An IConnectionExitPoint is the controlled class in the IConnection-IConnectionExitPoint relation, as the
source of the connection.
"""
PULL_TIMEOUT: float = 5.
def __init__(self, cell: "ICompositeCell", name: str):
"""
Args:
cell: the cell of which this IO will be part.
name: the name of this IO. Should be unique within the cell.
"""
super(InternalInput, self).__init__(cell, name)
self.__exit_point: ConnectionExitPoint[T] = ConnectionExitPoint(self, max_outgoing_connections=99999)
self.__value_is_acknowledged: BoundedSemaphore = BoundedSemaphore(1)
self._notify_observers_of_creation()
def can_have_as_cell(self, cell: "ICell") -> BoolExplained:
super_result = super(InternalInput, self).can_have_as_cell(cell)
if not super_result:
return super_result
if not isinstance(cell, pypipeline.cell.compositecell.icompositecell.ICompositeCell):
return FalseExplained(f"An InternalInput can only be created on an instance of ICompositeCell")
return TrueExplained()
def set_value(self, value: T) -> None:
self.logger.debug(f"{self}.set_value( {value} ) waiting for prev value to be acknowledged @ InternalInput")
while not self.__value_is_acknowledged.acquire(timeout=self.PULL_TIMEOUT):
self.logger.warning(f"{self}.set_value() waiting... @ InternalInput level")
if not self._is_deployed():
raise NotDeployedException(f"{self} is set while not deployed")
super(InternalInput, self)._set_value(value)
def _clear_value(self) -> None:
super(InternalInput, self)._clear_value()
try:
self.__value_is_acknowledged.release()
except ValueError:
# The value was not set...
pass
def clear_value(self) -> None:
self._clear_value()
def pull(self) -> T:
self.logger.debug(f"{self}.pull() @ InternalInput level")
if not self.value_is_set():
self._wait_for_value(interruption_frequency=self.PULL_TIMEOUT)
self.logger.debug(f"{self}.pull() got value @ InternalInput level")
value = self.get_value()
self.__exit_point._notify_new_value()
self._acknowledge_value()
return value
def is_provided(self) -> bool:
# Has no more info on whether it will be provided or not.
# (It will always be provided with a value, but this value may be None in case of unconnected scalable
# cell inputs -> differentiate?)
# -> It is required to be provided, otherwise a CloneCell would not be deployable.
return True
def get_incoming_connections(self) -> "Sequence[IConnection[T]]":
return ()
def has_as_incoming_connection(self, connection: "IConnection[T]") -> bool:
return False
def get_nb_incoming_connections(self) -> int:
return 0
def pull_as_connection(self, connection: "IConnection[T]") -> T:
return self.__exit_point.pull_as_connection(connection)
def all_outgoing_connections_have_pulled(self) -> bool:
return self.__exit_point.have_all_outgoing_connections_pulled()
def has_seen_value(self, connection: "IConnection[T]") -> bool:
return self.__exit_point.has_seen_value(connection)
def get_outgoing_connections(self) -> "Sequence[IConnection[T]]":
return self.__exit_point.get_outgoing_connections()
@classmethod
def can_have_as_outgoing_connection(cls, connection: "IConnection[T]") -> BoolExplained:
return ConnectionExitPoint.can_have_as_outgoing_connection(connection)
def can_have_as_nb_outgoing_connections(self, number_of_outgoing_connections: int) -> BoolExplained:
return self.__exit_point.can_have_as_nb_outgoing_connections(number_of_outgoing_connections)
def _add_outgoing_connection(self, connection: "IConnection[T]") -> None:
self.__exit_point._add_outgoing_connection(connection)
def _remove_outgoing_connection(self, connection: "IConnection[T]") -> None:
self.__exit_point._remove_outgoing_connection(connection)
def get_max_nb_outgoing_connections(self) -> int:
return self.__exit_point.get_max_nb_outgoing_connections()
def get_nb_outgoing_connections(self) -> int:
return self.__exit_point.get_nb_outgoing_connections()
def has_as_outgoing_connection(self, connection: "IConnection[T]") -> bool:
return self.__exit_point.has_as_outgoing_connection(connection)
def has_outgoing_connection_to(self, target: "IConnectionEntryPoint[T]") -> bool:
return self.__exit_point.has_outgoing_connection_to(target)
def get_outgoing_connection_to(self, target: "IConnectionEntryPoint[T]") -> "IConnection[T]":
return self.__exit_point.get_outgoing_connection_to(target)
def assert_has_proper_outgoing_connections(self) -> None:
self.__exit_point.assert_has_proper_outgoing_connections()
def has_initial_value(self) -> bool:
return self.__exit_point.has_initial_value()
def get_nb_available_pulls(self) -> Optional[int]:
raise Exception("Not supported by this class?")
def _get_connection_entry_point(self) -> Optional[ConnectionEntryPoint]:
return None
def _get_connection_exit_point(self) -> ConnectionExitPoint:
return self.__exit_point
def assert_is_valid(self) -> None:
super(InternalInput, self).assert_is_valid()
self.__exit_point.assert_is_valid()
def delete(self) -> None:
super(InternalInput, self).delete()
self.__exit_point.delete()
def __getstate__(self) -> Dict:
# called during pickling
new_state = super(InternalInput, self).__getstate__()
new_state["_InternalInput__value_is_acknowledged"] = None
return new_state
def __setstate__(self, state: Dict) -> None:
# called during unpickling
super(InternalInput, self).__setstate__(state)
self.__value_is_acknowledged = BoundedSemaphore(1)
```
#### File: cellio/icellio/ioutput.py
```python
from typing import TypeVar, Generic
from pypipeline.cellio.icellio.io import IO
T = TypeVar('T')
class IOutput(IO[T], Generic[T]):
def set_value(self, value: T) -> None:
"""
Provide a new value for this IO.
The public version of IO._set_value(value)
Args:
value: the new value for this IO.
Raises:
TODO may raise exceptions
"""
raise NotImplementedError
def all_outgoing_connections_have_pulled(self) -> bool:
"""
Returns:
True if all outgoing connections have pulled, False otherwise.
"""
raise NotImplementedError
```
#### File: cellio/parameterio/runtimeparameter.py
```python
from typing import TypeVar, Generic, TYPE_CHECKING, Dict, Any, Optional, Callable
from pypipeline.cell.icellobserver import ParameterUpdateEvent
from pypipeline.cellio.standardio import Input
from pypipeline.exceptions import InvalidInputException, NoInputProvidedException, \
InvalidStateException
if TYPE_CHECKING:
from pypipeline.cell import ICell
T = TypeVar('T')
class RuntimeParameter(Input[T], Generic[T]):
"""
Runtime parameter class.
A runtime parameter is a type of input that accepts 1 incoming connection and no outgoing connections.
Every time a runtime parameter is pulled, it will pull the incoming connection, if available.
If no incoming connection is present, it returns the default value.
An IO is owned by its cell.
An IO is the controlling class in the IO-ICell relation, as IO of the cell.
An IConnectionEntryPoint is the controlled class in the IConnection-IConnectionEntryPoint relation, as the
target of the connection.
"""
__DEFAULT_VALUE_KEY: str = "default_value"
def __init__(self,
cell: "ICell",
name: str,
validation_fn: Optional[Callable[[T], bool]] = None):
"""
Args:
cell: the cell of which this IO will be part.
name: the name of this IO. Should be unique within the cell.
validation_fn: An optional validation function that will be used to validate every value that passes
through this IO.
"""
super(RuntimeParameter, self).__init__(cell, name, validation_fn)
self.__default_value: T = None # type: ignore
self.__default_value_is_set: bool = False
def set_default_value(self, value: T) -> None:
"""
Args:
value: the new default value for this runtime parameter. This value will be used when pulling the runtime
parameter if no incoming connection is available.
"""
if not self.can_have_as_value(value):
raise InvalidInputException(f"{self}: Invalid value: {value}")
with self._get_state_lock():
self.logger.debug(f"{self}.set_default_value( {value} ) @ RuntimeParameter")
self.__default_value = value
self.__default_value_is_set = True
event = ParameterUpdateEvent(self.get_cell()) # TODO avoid indirection of cell
self.get_cell().notify_observers(event)
def get_default_value(self) -> T:
"""
Returns:
The default value of this runtime parameter. This value will be used when pulling the runtime
parameter if no incoming connection is available.
"""
with self._get_state_lock():
if not self.__default_value_is_set:
raise NoInputProvidedException(f"{self}.get_default_value() called, but default value has not yet "
f"been set.")
return self.__default_value
def default_value_is_set(self) -> bool:
"""
Returns:
True if a default value is provided for this runtime parameter, False otherwise.
"""
with self._get_state_lock():
return self.__default_value_is_set
def _clear_default_value(self) -> None:
"""
Returns:
Clears the currently configured default value.
"""
with self._get_state_lock():
self.__default_value_is_set = False
def assert_has_proper_default_value(self) -> None:
"""
Raises:
InvalidStateException: if the configured default value is invalid.
"""
if self.default_value_is_set() and not self.can_have_as_value(self.get_default_value()):
raise InvalidStateException(f"{self} has an invalid default value: {self.get_default_value()}")
def pull(self) -> T:
if self.get_nb_incoming_connections() == 0:
return self.get_default_value()
result = super(RuntimeParameter, self).pull()
if result is None:
return self.get_default_value()
return result
def set_value(self, value: T) -> None:
"""
Same as self.set_default_value().
Note: this method is not related to self._set_value(value) which is used by incoming connections to set the
(not-default) value of this RuntimeParameter.
"""
self.set_default_value(value)
def is_provided(self) -> bool:
return super(RuntimeParameter, self).is_provided() or self.default_value_is_set()
def _is_optional_even_when_typing_says_otherwise(self) -> bool:
return True # A RuntimeParameter can handle None values being set: it will return the default value instead
def _get_sync_state(self) -> Dict[str, Any]:
with self._get_state_lock():
state: Dict[str, Any] = super(RuntimeParameter, self)._get_sync_state()
state[self.__DEFAULT_VALUE_KEY] = self.get_default_value() if self.default_value_is_set() else None
return state
def _set_sync_state(self, state: Dict) -> None:
with self._get_state_lock():
super(RuntimeParameter, self)._set_sync_state(state)
if state[self.__DEFAULT_VALUE_KEY] is not None:
self.set_default_value(state[self.__DEFAULT_VALUE_KEY])
def get_nb_available_pulls(self) -> Optional[int]:
if self.get_nb_incoming_connections() == 0:
return None
return super(RuntimeParameter, self).get_nb_available_pulls()
def assert_is_valid(self) -> None:
super(RuntimeParameter, self).assert_is_valid()
self.assert_has_proper_default_value()
```
#### File: pypipeline/pypipeline/validation.py
```python
from typing import List, Union, Type, Callable
from abc import ABC, abstractmethod
class BoolExplained(ABC):
"""
Boolean including one or more reasons if False.
Doesn't have a reason if True.
Can be used just like a normal bool.
"""
def __bool__(self) -> bool:
return self.value
@property
@abstractmethod
def value(self) -> bool:
raise NotImplementedError
@property
@abstractmethod
def reasons(self) -> List[str]:
raise NotImplementedError
def get_reasons_str(self, pretty: bool = False) -> str:
reasons = self.reasons
if len(reasons) == 0:
return ""
elif len(reasons) == 1:
return reasons[0]
elif pretty:
reasons_str = ""
for reason in reasons:
reasons_str += f"\n - {reason}"
return reasons_str
else:
return str(reasons)
def __str__(self) -> str:
return f"{bool(self)}" + (f"(reasons: {self.get_reasons_str()})" if not self else "")
def pretty(self) -> str:
reasons = self.reasons
if bool(self):
return "True"
assert len(reasons) > 0
if len(reasons) == 1:
return f"False(reasons: {self.get_reasons_str(pretty=True)})"
else:
result = f"False(reasons: "
result += self.get_reasons_str(pretty=True)
result += "\n)"
return result
def __mul__(self, other: object) -> "BoolExplained":
if isinstance(other, BoolExplained):
if bool(self) and bool(other):
return TrueExplained()
else:
return FalseExplained(self.reasons + other.reasons)
elif other:
return self
else:
raise ValueError(f"No reason given why `other` is False")
class TrueExplained(BoolExplained):
def __init__(self) -> None:
pass
@property
def value(self) -> bool:
return True
@property
def reasons(self) -> List[str]:
return []
class FalseExplained(BoolExplained):
def __init__(self, reasons: Union[str, List[str]]) -> None:
if isinstance(reasons, str):
reasons = [reasons]
else:
if len(reasons) == 0:
raise ValueError(f"FalseExplained created without giving a reason")
self.__reasons = reasons
@property
def value(self) -> bool:
return False
@property
def reasons(self) -> List[str]:
return self.__reasons
def raise_if_not(bool_with_explanation: BoolExplained,
exception_type: Type[Exception],
message_prefix: str = "") -> None:
"""
Raises an exception if the given BoolExplained instance is False.
The message of the exception will start with the given (optional) message prefix, followed by the
reason of the FalseExplained.
"""
if not bool_with_explanation:
raise exception_type(message_prefix + bool_with_explanation.get_reasons_str(pretty=True))
def assert_(validation_fn: Callable[[], BoolExplained],
message_prefix: str = "") -> None:
"""
Asserts that the given validation function evaluates to a TrueExplained.
The message of the assertion will start with the given (optional) message prefix, followed by the
reason of the FalseExplained.
"""
assert validation_fn(), message_prefix + validation_fn().get_reasons_str(pretty=True)
```
#### File: JohannesVerherstraeten/pypipeline/setup.py
```python
from typing import List
from pathlib import Path
from setuptools import setup
from pkg_resources import parse_requirements
_PATH_ROOT = Path(__file__).parent
_PATH_REQUIREMENTS = _PATH_ROOT / 'requirements'
def read(fname: str) -> str:
return open(_PATH_ROOT / fname).read()
def read_requirements(req_path: Path) -> List[str]:
assert req_path.exists()
with open(req_path, "r") as f:
res = parse_requirements(f.read())
return [str(r) for r in res]
# https://setuptools.readthedocs.io/en/latest/pkg_resources.html#requirements-parsing
# For installing package extras: `pip install pypipeline[ray, tests]`
# If you have the repo cloned locally: `pip install ".[ray, tests]"`
extras = {
'docs': read_requirements(_PATH_REQUIREMENTS / "docs.txt"),
'ray': read_requirements(_PATH_REQUIREMENTS / "ray.txt"),
'tests': read_requirements(_PATH_REQUIREMENTS / "tests.txt"),
'lib_torch': read_requirements(_PATH_REQUIREMENTS / "lib_torch.txt"),
'serve': read_requirements(_PATH_REQUIREMENTS / "serve.txt")
}
extras['dev'] = extras['docs'] + extras['ray'] + extras['tests'] + extras['serve']
extras['all'] = extras['dev'] + extras['lib_torch']
setup(
name="pypipeline",
version="0.1.0",
author="<NAME>",
author_email="<EMAIL>",
description="Encapsulate computations, combine them to algorithms, enable pipeline parallelism and scale up. ",
keywords="pipeline pipelining parallelism parallelization scaling ray threading algorithm throughput building "
"blocks",
python_requires='>=3.7,<3.9',
setup_requires=[],
install_requires=[],
extras_require=extras,
url="https://github.com/JohannesVerherstraeten/pypipeline",
packages=['pypipeline', 'pypipeline_lib', 'pypipeline_serve'],
long_description=read('README.md'),
classifiers=[
'Environment :: Console',
'Natural Language :: English',
# Project maturity
"Development Status :: 3 - Alpha",
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Scientific/Engineering :: Information Analysis',
# License
'License :: OSI Approved :: GNU Affero General Public License v3',
'Operating System :: OS Independent',
# Supported Python versions
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
)
```
#### File: t01_pypipeline_tests/t02_component_tests/test_acellio.py
```python
import pytest
from typing import Optional, List, Type
from pypipeline.connection import Connection
from pypipeline.cell import ASingleCell, ICompositeCell, Pipeline, ScalableCell, ICell
from pypipeline.cellio import (IConnectionExitPoint, IConnectionEntryPoint, Input, Output, InputPort, OutputPort,
RuntimeParameter, ConfigParameter, InternalInput, InternalOutput, IO)
from pypipeline.exceptions import IndeterminableTopologyException, InvalidInputException
single_io_types: List[Type[IO]] = [Input, Output, RuntimeParameter, ConfigParameter]
composite_io_types = [InputPort, InternalInput, OutputPort, InternalOutput]
class DummyCell1(ASingleCell):
def __init__(self, parent_cell: "Optional[ICompositeCell]", name: str):
super(DummyCell1, self).__init__(parent_cell, name=name)
def _on_pull(self) -> None:
pass
def supports_scaling(self) -> bool:
raise NotImplementedError
class DummyPipeline(Pipeline):
def __init__(self):
super(DummyPipeline, self).__init__(None, name="pipeline")
class DummyScalableCell(ScalableCell):
def __init__(self):
super(DummyScalableCell, self).__init__(None, name="scalablecell")
@pytest.fixture()
def dummycell1() -> DummyCell1:
return DummyCell1(None, "dummycell1")
@pytest.fixture()
def pipeline() -> DummyPipeline:
return DummyPipeline()
@pytest.fixture()
def scalable_cell() -> DummyScalableCell:
return DummyScalableCell()
@pytest.fixture()
def composite_cells(pipeline: DummyPipeline,
scalable_cell) -> List[ICompositeCell]:
return [pipeline, scalable_cell]
@pytest.fixture()
def single_cells(dummycell1: DummyCell1) -> List[ASingleCell]:
return [dummycell1]
@pytest.fixture()
def all_cells(composite_cells: List[ICompositeCell],
single_cells: List[ASingleCell]) -> List[ICell]:
result: List[ICell] = [*composite_cells, *single_cells]
return result
def test_acellio_validation_name() -> None:
for io_type in single_io_types:
assert not io_type.can_have_as_name(None)
assert not io_type.can_have_as_name("")
assert not io_type.can_have_as_name("abc.")
assert io_type.can_have_as_name("abc_de-fg(456)")
# def test_acellio_validation_cell(all_cells: List[ICell]) -> None:
# for io_type in single_io_types:
# assert not io_type.can_have_as_cell(None)
# for cell in all_cells:
# assert io_type.can_have_as_cell(cell)
def test_cellio_creation() -> None:
for io_type in single_io_types:
cell = DummyCell1(None, "dummy")
io: IO = io_type(cell, "a_name")
assert io.get_name() == "a_name"
assert io.get_full_name() == cell.get_full_name() + ".a_name"
assert io.get_cell() == cell
io.assert_is_valid()
def test_cell_creation_invalid_parent() -> None:
for io_type in single_io_types:
with pytest.raises(InvalidInputException):
io_type(None, "a_name")
def test_cell_creation_invalid_name() -> None:
for io_type in single_io_types:
cell = DummyCell1(None, "dummy")
with pytest.raises(InvalidInputException):
io_type(cell, "")
class DummyCell2(ASingleCell):
def __init__(self, parent_cell: "Optional[ICompositeCell]", name: str):
super(DummyCell2, self).__init__(parent_cell, name=name)
self.input: Input[int] = Input(self, "input")
self.output: Output[int] = Output(self, "output")
self.output_with_initial_value: Output[int] = Output(self, "output_with_initial_value", initial_value=321)
# Other input types:
RuntimeParameter(self, "runtime_param")
ConfigParameter(self, "config_param")
def _on_pull(self) -> None:
self.output.set_value(123)
def supports_scaling(self) -> bool:
raise NotImplementedError
def get_nb_available_pulls(self) -> Optional[int]:
return 456
class DummyCell3(ASingleCell):
def __init__(self, parent_cell: "Optional[ICompositeCell]", name: str):
super(DummyCell3, self).__init__(parent_cell, name=name)
self.input: Input[int] = Input(self, "input")
self.output: Output[int] = Output(self, "output")
# Other input types:
RuntimeParameter(self, "runtime_param")
ConfigParameter(self, "config_param")
def _on_pull(self) -> None:
self.output.set_value(789)
def supports_scaling(self) -> bool:
raise NotImplementedError
def get_nb_available_pulls(self) -> Optional[int]:
return 654
class DummyPipeline2(Pipeline):
def __init__(self):
super(DummyPipeline2, self).__init__(None, name="toplevel")
self.cell2 = DummyCell2(self, "cell1")
self.cell3 = DummyCell3(self, "cell2")
self.input: InputPort[int] = InputPort(self, "input_port")
self.output: OutputPort[int] = OutputPort(self, "output_port")
# Other input types:
Input(self, "input")
RuntimeParameter(self, "runtime_param")
ConfigParameter(self, "config_param")
InternalInput(self, "internal_input")
# other output types:
Output(self, "output")
InternalOutput(self, "internal_output")
@pytest.fixture()
def dummy_pipeline_2() -> DummyPipeline2:
return DummyPipeline2()
def test_acellio_add_connection(dummy_pipeline_2: DummyPipeline2) -> None:
assert len(dummy_pipeline_2.cell3.input.get_incoming_connections()) == 0
assert len(dummy_pipeline_2.cell2.output.get_outgoing_connections()) == 0
dummy_pipeline_2.cell3.input.assert_has_proper_incoming_connections()
dummy_pipeline_2.cell2.output.assert_has_proper_outgoing_connections()
dummy_pipeline_2.cell2.assert_is_valid()
dummy_pipeline_2.cell3.assert_is_valid()
c = Connection(dummy_pipeline_2.cell2.output, dummy_pipeline_2.cell3.input)
assert c in dummy_pipeline_2.cell3.input.get_incoming_connections()
assert c in dummy_pipeline_2.cell2.output.get_outgoing_connections()
assert dummy_pipeline_2.cell3.input.has_as_incoming_connection(c)
assert dummy_pipeline_2.cell2.output.has_as_outgoing_connection(c)
dummy_pipeline_2.cell3.input.assert_has_proper_incoming_connections()
dummy_pipeline_2.cell2.output.assert_has_proper_outgoing_connections()
assert c == dummy_pipeline_2.cell2.output.get_outgoing_connection_to(dummy_pipeline_2.cell3.input)
assert c == dummy_pipeline_2.cell3.input.get_incoming_connection_with(dummy_pipeline_2.cell2.output)
dummy_pipeline_2.cell2.assert_is_valid()
dummy_pipeline_2.cell3.assert_is_valid()
c.delete()
assert len(dummy_pipeline_2.cell3.input.get_incoming_connections()) == 0
assert len(dummy_pipeline_2.cell2.output.get_outgoing_connections()) == 0
dummy_pipeline_2.cell3.input.assert_has_proper_incoming_connections()
dummy_pipeline_2.cell2.output.assert_has_proper_outgoing_connections()
dummy_pipeline_2.cell2.assert_is_valid()
dummy_pipeline_2.cell3.assert_is_valid()
# # TODO test pulling
```
#### File: t01_pypipeline_tests/t03_integration_tests/test_01_standard_pipeline_and_upscaling.py
```python
import logging
from typing import Optional, TYPE_CHECKING, Generator
import time
import random
import ray
import pytest
from pypipeline.cell import ASingleCell, ScalableCell, Pipeline, RayCloneCell, ThreadCloneCell
from pypipeline.cellio import Input, Output, OutputPort, InputPort
from pypipeline.connection import Connection
if TYPE_CHECKING:
from pypipeline.cell import ICompositeCell
@pytest.fixture
def ray_init_and_shutdown() -> Generator:
ray.shutdown()
ray.init()
yield None
ray.shutdown()
class CellA(ASingleCell):
"""
A source cell providing data.
"""
def __init__(self, parent_cell: "Optional[ICompositeCell]", name: str):
super(CellA, self).__init__(parent_cell, name=name)
self.output1: Output[float] = Output(self, "output1")
self.output2: Output[int] = Output(self, "output2")
self.counter: int = 1
def supports_scaling(self) -> bool: # This cell has internal state, so should not be scaled up
return False
def _on_pull(self) -> None:
self.output1.set_value(self.counter / 10.) # typechecks!
self.output2.set_value(self.counter)
self.counter += 1
def _on_reset(self) -> None:
super(CellA, self)._on_reset()
self.counter = 1
class CellB(ASingleCell):
"""
Heavy computation cell. (see sleep)
"""
def __init__(self, parent_cell: "Optional[ICompositeCell]", name: str):
super(CellB, self).__init__(parent_cell, name=name)
self.input1: Input[float] = Input(self, "input1")
self.input2: Input[int] = Input(self, "input2")
self.output: Output[float] = Output(self, "output")
def supports_scaling(self) -> bool:
return False
def _on_pull(self) -> None:
value1: float = self.input1.pull() # typechecks!
value2: int = self.input2.pull()
result_sum = value1 + value2
time.sleep(random.random() * 0.10) # a heavy computation
# print(f"[CellB]: log: {result_sum}")
self.logger.debug(f"setting value: {result_sum}")
self.output.set_value(result_sum)
def test_without_upscaling() -> None:
# print("=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=")
# first test the ACell and BCell without parallelization or scaling:
toplevel_pipeline = Pipeline(None, "toplevel")
cell_a = CellA(toplevel_pipeline, "cell_a")
cell_b = CellB(toplevel_pipeline, "cell_b")
Connection(cell_a.output1, cell_b.input1) # typechecks
Connection(cell_a.output2, cell_b.input2) # typechecks
# No regulators, drivers or inference_processes needed, just pull the output you want.
toplevel_pipeline.assert_is_valid()
toplevel_pipeline.deploy()
expected_outputs = [i/10. + i for i in range(1, 6)]
for expected_output in expected_outputs:
assert cell_b.output.pull() == expected_output
toplevel_pipeline.assert_is_valid()
toplevel_pipeline.undeploy()
toplevel_pipeline.delete()
# print("=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=")
# now test the CellA and CellB in an upscaleable ScalableCell:
# Note that the scalablecell workers may raise exceptions on exit. Todo gracefull thread exiting.
class BScalableCell(ScalableCell):
"""
A ScalableCell enables pipeline parallelism and multiple clones running in parallel processes.
"""
def __init__(self, parent_cell: "Optional[ICompositeCell]", name: str):
super(BScalableCell, self).__init__(parent_cell, name=name)
self.cell_b = CellB(self, "cell_b")
self.input_port_1: InputPort[float] = InputPort(self, "input1")
self.input_port_2: InputPort[int] = InputPort(self, "input2")
self.output_port: OutputPort[float] = OutputPort(self, "output")
Connection(self.input_port_1, self.cell_b.input1)
Connection(self.input_port_2, self.cell_b.input2)
Connection(self.cell_b.output, self.output_port)
class ToplevelPipeline(Pipeline):
"""
Note: this is totally equivalent to the following, only implemented as subclass of a Pipeline
(which is a bit more structured and therefore the recommended way).
toplevel_pipeline = Pipeline(None, "toplevel")
cell_a = CellA(toplevel_pipeline, "cell_a")
cell_b_scalable = BScalableCell(toplevel_pipeline, "cell_b_scalable")
output_port: OutputPort[float] = OutputPort(toplevel_pipeline, "pipeline_output")
Connection(cell_a.output1, cell_b_scalable.input_port_1) # typechecks
Connection(cell_a.output2, cell_b_scalable.input_port_2) # typechecks
Connection(cell_b_scalable.output_port, output_port) # typechecks
"""
def __init__(self, parent_cell: "Optional[ICompositeCell]", name: str):
super(ToplevelPipeline, self).__init__(parent_cell, name=name)
self.cell_a = CellA(parent_cell=self, name="cell_a")
self.cell_b_scalable = BScalableCell(parent_cell=self, name="cell_b_scalable")
self.output_port: OutputPort[float] = OutputPort(self, "pipeline_output")
Connection(self.cell_a.output1, self.cell_b_scalable.input_port_1) # typechecks
Connection(self.cell_a.output2, self.cell_b_scalable.input_port_2) # typechecks
Connection(self.cell_b_scalable.output_port, self.output_port) # typechecks
@pytest.mark.parametrize("do_reset", [
True,
False
])
def test_with_upscaling(ray_init_and_shutdown: None, do_reset: bool) -> None:
# Add 4 clone clones: 4 parallel processes executing the clone.
# We set the queue capacity of the scalable-cell high enough, such that all 4 processes can write their result to this
# queue and don't have to wait for us to pull a value out of this queue before they can start their new run.
ab = ToplevelPipeline(None, "toplevel")
ab.cell_b_scalable.config_queue_capacity.set_value(5)
ab.cell_b_scalable.config_check_quit_interval.set_value(0.5)
ab.assert_is_valid()
ab.deploy()
ab.assert_is_valid()
expected_outputs = [i / 10. + i for i in range(1, 18)]
actual_output = None
for i, scaling_method in enumerate([ThreadCloneCell, RayCloneCell, ThreadCloneCell]):
ab.cell_b_scalable.scale_up(4, scaling_method)
ab.assert_is_valid()
expected_outputs_this_iteration = expected_outputs[:6] if do_reset else expected_outputs[i*6: (i+1)*6]
for expected_output in expected_outputs_this_iteration:
ab.logger.info(f"Pulling for expected output {expected_output}...")
ab.pull()
previous_actual_output = actual_output
actual_output = ab.output_port.get_value()
ab.logger.info(f"Actual output: {actual_output}, expected output: {expected_output}")
if not do_reset:
# Downscaling while a scalablecell is active, may result in data loss.
# However, the ordering of the results remains kept
assert previous_actual_output is None or actual_output > previous_actual_output
else:
assert actual_output == expected_output, f"{actual_output} == {expected_output}"
ab.logger.info(f"Start assert_is_valid")
ab.assert_is_valid()
ab.logger.info(f"Start scale_all_down")
ab.cell_b_scalable.scale_all_down()
if do_reset:
ab.logger.info(f"Start reset")
ab.reset()
ab.undeploy()
ab.delete()
logging.info(f"Done")
# # first pull will be slow, since the data must pass the whole pipeline
# # (heavy computation duration is 0.5s)
# # next pull will be fast, because of the extra 3 running processes
#
# # fifth pull will be slower again, but sometimes still less than 0.5s
# # because when the first clone finished its first run,
# # it immediately started a next run, before we executed the fifth
# # pull (=pipeline parallelization).
```
#### File: t01_pypipeline_tests/t03_integration_tests/test_07_cloning_a_cell_without_io.py
```python
from typing import Optional, TYPE_CHECKING, Generator
import ray
import time
import pytest
from pypipeline.cell import ASingleCell, ScalableCell, RayCloneCell
if TYPE_CHECKING:
from pypipeline.cell import ICompositeCell
@pytest.fixture
def ray_init_and_shutdown() -> Generator:
ray.init()
yield None
ray.shutdown()
class CellA(ASingleCell):
"""
A cell without any inputs
"""
def __init__(self, parent_cell: "Optional[ICompositeCell]", name: str):
super(CellA, self).__init__(parent_cell, name=name)
def supports_scaling(self) -> bool:
return True
def _on_pull(self) -> None:
self.logger.warning(f"I'm executing :)")
time.sleep(0.2)
class AScalableCell(ScalableCell):
"""
A ScalableCell enables pipeline parallelism and multiple clones running in parallel processes.
"""
def __init__(self, parent_cell: "Optional[ICompositeCell]", name: str):
super(AScalableCell, self).__init__(parent_cell, name=name)
self.cell_a = CellA(self, "cell_a")
def clone(self, new_parent: "Optional[ICompositeCell]") -> "AScalableCell":
return AScalableCell(new_parent, self.get_name())
def test_cloning_cell_without_io(ray_init_and_shutdown: None) -> None:
a = AScalableCell(None, "a")
a.config_queue_capacity.set_value(8)
a.assert_is_valid()
a.deploy() # May be put before or after (or in between) the creations of the clone clones
# a.scale_up(3, method=ThreadCloneCell)
a.scale_up(3, method=RayCloneCell)
a.assert_is_valid()
for i in range(10): # TODO a bit of a silly test...
print(i)
a.pull()
a.undeploy()
a.delete()
```
#### File: t01_pypipeline_tests/t03_integration_tests/test_08_complex_pipeline.py
```python
from typing import Optional, TYPE_CHECKING, Generator
import ray
import numpy as np
from math import sin, pi
import pytest
from tqdm import tqdm
from pypipeline.cell import ASingleCell, ScalableCell, RayCloneCell, ThreadCloneCell, Pipeline
from pypipeline.cellio import Input, Output, InputPort, OutputPort
from pypipeline.connection import Connection
if TYPE_CHECKING:
from pypipeline.cell import ICompositeCell
NB_ITERATIONS = 1000
SQUARE_MATRIX_SIZE = 3
@pytest.fixture
def ray_init_and_shutdown() -> Generator:
ray.init()
yield None
ray.shutdown()
class SourceCell1(ASingleCell):
def __init__(self, parent_cell: "Optional[ICompositeCell]", name: str):
super(SourceCell1, self).__init__(parent_cell, name=name)
self.output_counter: Output[int] = Output(self, "counter")
self.output_sine: Output[float] = Output(self, "sine")
self.counter = 1
def supports_scaling(self) -> bool:
return False
def _on_pull(self) -> None:
self.output_counter.set_value(self.counter)
self.output_sine.set_value(sin(self.counter * pi/2))
# self.logger.debug(f" ! Outputs: {self.counter}, {sin(self.counter * pi/2)}")
self.counter += 1
class SourceCell2(ASingleCell):
def __init__(self, parent_cell: "Optional[ICompositeCell]", name: str):
super(SourceCell2, self).__init__(parent_cell, name=name)
self.output_matrix: Output[np.ndarray] = Output(self, "matrix")
self.current_diagonal: np.ndarray = np.ones((SQUARE_MATRIX_SIZE, ))
def supports_scaling(self) -> bool:
return False
def _on_pull(self) -> None:
result = np.random.random((SQUARE_MATRIX_SIZE, SQUARE_MATRIX_SIZE))
result[np.where(np.eye(SQUARE_MATRIX_SIZE) > 0)] = self.current_diagonal
self.output_matrix.set_value(result)
# self.logger.debug(f" ! Outputs: {self.current_diagonal}")
self.current_diagonal = self.current_diagonal + 1
class CenterCell1(ASingleCell):
def __init__(self, parent_cell: "Optional[ICompositeCell]", name: str):
super(CenterCell1, self).__init__(parent_cell, name=name)
self.input_counter: Input[int] = Input(self, "counter")
self.input_sine: Input[float] = Input(self, "sine")
self.output_multiplication: Output[float] = Output(self, "multiplication")
def supports_scaling(self) -> bool:
return True
def _on_pull(self) -> None:
counter = self.input_counter.pull()
sine = self.input_sine.pull()
multiple = counter * sine
self.output_multiplication.set_value(multiple)
# self.logger.debug(f" ! Inputs: {counter}, {sine}, Outputs: {multiple}")
class ScalableCell1(ScalableCell):
def __init__(self, parent_cell: "Optional[ICompositeCell]", name: str):
super(ScalableCell1, self).__init__(parent_cell, name=name)
self.input_counter: InputPort[int] = InputPort(self, "counter_in")
self.input_sine: InputPort[float] = InputPort(self, "sine")
self.output_multiplication: OutputPort[float] = OutputPort(self, "multiplication")
self.output_counter: OutputPort[int] = OutputPort(self, "counter_out")
self.center_cell_1 = CenterCell1(self, "center_cell_1")
Connection(self.input_counter, self.center_cell_1.input_counter)
Connection(self.input_counter, self.output_counter)
Connection(self.input_sine, self.center_cell_1.input_sine)
Connection(self.center_cell_1.output_multiplication, self.output_multiplication)
class CenterCell2(ASingleCell):
def __init__(self, parent_cell: "Optional[ICompositeCell]", name: str):
super(CenterCell2, self).__init__(parent_cell, name=name)
self.input_matrix: Input[np.ndarray] = Input(self, "matrix")
self.input_sine: Input[float] = Input(self, "sine")
self.output_matrix_multiple: Output[np.ndarray] = Output(self, "matrix_multiple")
def supports_scaling(self) -> bool:
return True
def _on_pull(self) -> None:
sine = self.input_sine.pull()
matrix = self.input_matrix.pull()
matrix_multiple: np.ndarray = matrix * sine
self.output_matrix_multiple.set_value(matrix_multiple)
# self.logger.debug(f" ! Inputs: {matrix}, {sine}, Outputs: {matrix_multiple}")
class ScalableCell2(ScalableCell):
def __init__(self, parent_cell: "Optional[ICompositeCell]", name: str):
super(ScalableCell2, self).__init__(parent_cell, name=name)
self.input_matrix: InputPort[np.ndarray] = InputPort(self, "matrix")
self.input_sine: InputPort[float] = InputPort(self, "sine")
self.output_matrix_multiplication: OutputPort[np.ndarray] = OutputPort(self, "matrix_multiplication")
self.center_cell_2 = CenterCell2(self, "center_cell_2")
Connection(self.input_matrix, self.center_cell_2.input_matrix)
Connection(self.input_sine, self.center_cell_2.input_sine)
Connection(self.center_cell_2.output_matrix_multiple, self.output_matrix_multiplication)
class SinkCell(ASingleCell):
def __init__(self, parent_cell: "Optional[ICompositeCell]", name: str):
super(SinkCell, self).__init__(parent_cell, name=name)
self.input_multiplication: Input[int] = Input(self, "multiplication")
self.input_counter: Input[int] = Input(self, "counter")
self.input_matrix_multiple: Input[np.ndarray] = Input(self, "matrix_multiple")
self.output_combination: Output[np.ndarray] = Output(self, "combination")
def supports_scaling(self) -> bool:
return True
def _on_pull(self) -> None:
multiplication = self.input_multiplication.pull()
counter = self.input_counter.pull()
matrix_multiple = self.input_matrix_multiple.pull()
combination: np.ndarray = (matrix_multiple + np.eye(matrix_multiple.shape[0]) * multiplication) / 2.
combination /= counter # Should be just the sine signal again on the diagonal
self.output_combination.set_value(combination)
# self.logger.debug(f" ! Inputs: {multiplication}, {counter}, {matrix_multiple}, Outputs: {combination}")
class SinkScalableCell(ScalableCell):
def __init__(self, parent_cell: "Optional[ICompositeCell]", name: str):
super(SinkScalableCell, self).__init__(parent_cell, name=name)
self.input_multiplication: InputPort[int] = InputPort(self, "multiplication")
self.input_counter: InputPort[int] = InputPort(self, "counter")
self.input_matrix_multiple: InputPort[np.ndarray] = InputPort(self, "matrix_multiple")
self.output_combination: OutputPort[np.ndarray] = OutputPort(self, "combination")
self.center_cell_3 = SinkCell(self, "center_cell_3")
Connection(self.input_multiplication, self.center_cell_3.input_multiplication)
Connection(self.input_counter, self.center_cell_3.input_counter)
Connection(self.input_matrix_multiple, self.center_cell_3.input_matrix_multiple)
Connection(self.center_cell_3.output_combination, self.output_combination)
class ToplevelPipeline(Pipeline):
def __init__(self, parent_cell: "Optional[ICompositeCell]", name: str):
super(ToplevelPipeline, self).__init__(parent_cell, name=name)
self.source_1 = SourceCell1(self, "source_1")
self.source_2 = SourceCell2(self, "source_2")
self.center_1 = ScalableCell1(self, "center_1_scalable")
self.center_2 = ScalableCell2(self, "center_2_scalable")
self.sink = SinkScalableCell(self, "sink_scalable")
Connection(self.source_1.output_counter, self.center_1.input_counter)
Connection(self.source_1.output_sine, self.center_1.input_sine)
Connection(self.source_1.output_sine, self.center_2.input_sine)
Connection(self.source_2.output_matrix, self.center_2.input_matrix)
Connection(self.center_1.output_counter, self.sink.input_counter)
Connection(self.center_1.output_multiplication, self.sink.input_multiplication)
Connection(self.center_2.output_matrix_multiplication, self.sink.input_matrix_multiple)
@pytest.fixture()
def toplevel_pipeline() -> ToplevelPipeline:
return ToplevelPipeline(None, "toplevel")
def test_1(ray_init_and_shutdown: None, toplevel_pipeline: ToplevelPipeline) -> None:
toplevel_pipeline.center_1.scale_up(3, ThreadCloneCell)
toplevel_pipeline.center_2.scale_up(2, RayCloneCell)
toplevel_pipeline.sink.scale_up(4, RayCloneCell)
toplevel_pipeline.assert_is_valid()
toplevel_pipeline.deploy()
toplevel_pipeline.assert_is_valid()
for i in tqdm(range(1, NB_ITERATIONS)):
expected_result: np.ndarray = np.ones((SQUARE_MATRIX_SIZE, )) * sin(i * pi/2)
toplevel_pipeline.pull()
actual_result = toplevel_pipeline.sink.output_combination.get_value()
toplevel_pipeline.logger.info(f"Expected result: {expected_result}")
toplevel_pipeline.logger.info(f"Actual result: {actual_result.diagonal()}")
assert np.isclose(actual_result.diagonal(), expected_result).all()
toplevel_pipeline.undeploy()
toplevel_pipeline.delete()
```
#### File: t01_pypipeline_tests/t03_integration_tests/test_09_scalablecell_exceptions.py
```python
from typing import Optional, Generator, Type
import time
import ray
import pytest
import logging
from pypipeline.cell import ASingleCell, ACompositeCell, ScalableCell, Pipeline, RayCloneCell, ThreadCloneCell, \
ACloneCell
from pypipeline.cellio import Output, Input, InputPort, OutputPort
from pypipeline.connection import Connection
from pypipeline.exceptions import NonCriticalException
class RandomNonCriticalException(NonCriticalException):
pass
class RandomCriticalException(Exception):
pass
class SourceCell(ASingleCell):
def __init__(self, parent_cell: Optional[ACompositeCell], name: str):
super(SourceCell, self).__init__(parent_cell, name)
self.output_1: Output[int] = Output(self, "output_1")
self.output_2: Output[int] = Output(self, "output_2")
self.counter = 0
def _on_pull(self) -> None:
self.logger.warning(f"{self} being pulled...")
time.sleep(0.01)
# if self.counter == 4:
# raise SomeRandomException("some random error message")
self.output_1.set_value(self.counter)
self.output_2.set_value(self.counter)
self.counter += 1
class SinkCell(ASingleCell):
def __init__(self, parent_cell: Optional[ACompositeCell], name: str):
super(SinkCell, self).__init__(parent_cell, name)
self.input_1: Input[int] = Input(self, "input_1")
self.input_2: Input[int] = Input(self, "input_2")
self.output: Output[int] = Output(self, "output")
self.counter = 0
def _on_pull(self) -> None:
self.counter += 1
value_1 = self.input_1.pull()
if self.counter == 3:
raise RandomNonCriticalException("Non-critical errors shouldn't stop the pipeline")
elif self.counter == 6:
raise RandomCriticalException("Critical errors should stop the pipeline")
value_2 = self.input_2.pull()
time.sleep(0.02)
self.output.set_value(value_1 + value_2)
class ScalableSinkCell(ScalableCell):
def __init__(self, parent_cell: Optional[ACompositeCell], name: str):
super(ScalableSinkCell, self).__init__(parent_cell, name)
self.sink_cell = SinkCell(self, "sink_cell")
self.input_1: InputPort[int] = InputPort(self, "input_port_1")
self.input_2: InputPort[int] = InputPort(self, "input_port_2")
self.output: OutputPort[int] = OutputPort(self, "output_port")
Connection(self.input_1, self.sink_cell.input_1)
Connection(self.input_2, self.sink_cell.input_2)
Connection(self.sink_cell.output, self.output)
class TestPipeline(Pipeline):
def __init__(self, parent_cell: Optional[ACompositeCell], name: str):
super(TestPipeline, self).__init__(parent_cell, name)
self.source = SourceCell(self, "source")
self.sink = ScalableSinkCell(self, "scalable_sink")
Connection(self.source.output_1, self.sink.input_1)
Connection(self.source.output_2, self.sink.input_2)
@pytest.fixture
def ray_init_and_shutdown() -> Generator:
ray.init()
yield None
ray.shutdown()
@pytest.mark.parametrize("scaleup_method", [ThreadCloneCell, RayCloneCell])
def test_exceptions_in_clones(ray_init_and_shutdown, scaleup_method: Type[ACloneCell]):
pipeline = TestPipeline(None, "pipeline")
pipeline.sink.scale_up(2, method=scaleup_method)
pipeline.deploy()
nb_noncritical_exceptions = 0
for i in range(11):
logging.warning(f"============= {i} =============")
try:
pipeline.pull()
except RandomNonCriticalException as e:
logging.warning(f"Got non-critical exception {e}")
nb_noncritical_exceptions += 1
except RandomCriticalException as e:
logging.warning(f"Got critical exception {e}")
# In theory the pipeline can arrive here with only one non-critical exception, if only one of the
# clones did most of/all the work... Usually it should be 2
assert nb_noncritical_exceptions == 1 or nb_noncritical_exceptions == 2
break
else:
logging.warning(f"Got result {pipeline.sink.output.get_value()}")
assert pipeline.sink.output.get_value() == 2 * i
pipeline.undeploy()
``` |
{
"source": "Johannes-Vitt/sonypiapi",
"score": 3
} |
#### File: Johannes-Vitt/sonypiapi/sonypyapi.py
```python
import sys
import json
import urllib2
import collections
url = "http://192.168.122.1:8080/sony/"
camera_url = url+"camera"
avContent_url = url+"avContent"
def create_params_dict(method,params=[],id_=1,version="1.0"):
params = collections.OrderedDict([
("method", method),
("params", params),
("id", id_),
("version", version)])
return params
def api_call(url,params):
return urllib2.urlopen(url,json.dumps(params))
def cast_params(param_list):
#first object on param list ist method name, second the first param for api
#call api_call for getMethodTypes
#cast type for every argument on the param list
## try:
parameter_types = json.load(api_call(camera_url,list_of_method_types()))
for i in parameter_types["results"]:
if(i[0]==param_list[0]):
if (i[1]!=None):
for counter, to_type in enumerate(i[1]):
param_list[counter+1] = cast(param_list[counter+1],to_type)
## except:
## parameter_types = json.load(api_call(avContent_url,list_of_method_types()))
## for i in parameter_types["results"]:
## if(param_list != []):
## if(i[0]==param_list[0]):
## if (i[1]!=None):
## for counter, to_tpye in enumerate(i[1]):
## param_list[counter+1] = cast(param_list[counter+1],to_type)
return param_list
def cast(value, to_type):
if (to_type=="int"):
return int(value)
if (to_type=="bool"):
return bool(value)
if (to_type=="string"):
return str(value)
if (to_type=="int*"):
help_int = []
for i in value:
help_int.append(int(i))
return help_int
if (to_type=="string*"):
help_string = []
for i in value:
help_string.append(str(i))
return help_string
if (to_type=="bool*"):
help_bool = []
for i in value:
help_bool.append(bool(i))
return help_bool
def list_of_method_types():
params_for_api_call = collections.OrderedDict([
("method", "getMethodTypes"),
("params", ["1.0"]),
("id", 1),
("version", "1.0")])
return params_for_api_call
def command_line_call():
if(len(sys.argv)==1):
raw_params = ["getAvailableApiList"]
else:
raw_params = sys.argv[1:]
clean_params = cast_params(raw_params)
if(len(clean_params)>1):
params_for_api_call = create_params_dict(clean_params[0],clean_params[1:])
else:
params_for_api_call = create_params_dict(clean_params[0])
print api_call(camera_url,params_for_api_call).read()
command_line_call()
``` |
{
"source": "JohannesWaltmann/advent_of_code_2021",
"score": 4
} |
#### File: advent_of_code_2021/day_04/day04.py
```python
import numpy as np
# Load draws
test_draw = np.loadtxt('resources/test_input', dtype=str, max_rows=1, delimiter=',')
val_draw = np.loadtxt('resources/val_input', dtype=str, max_rows=1, delimiter=',')
# Load boards
test_boards = np.loadtxt('resources/test_input', skiprows=2)
val_boards = np.loadtxt('resources/val_input', skiprows=2)
def task_01_bingo(boards, draws):
"""
Computes a bingo on one of the given boards based on a given series of drawn numbers.
:param boards: Three dim np-array containing a set of bingo boards.
:param draws: Np-array with the set of drawn numbers.
:returns: The sum of all non-marked numbers on the winning board multiplied by the winning number.
"""
single_boards = np.array(np.split(boards, len(boards) / 5))
bool_mask = np.zeros_like(single_boards, dtype=bool)
for draw in draws:
# Take every drawn number
bool_mask[single_boards == int(draw)] = True
num_board = 0
for masked_board in bool_mask:
# Flip the bool mask of each board to true if a drawn number is matched
row_bingo = np.all(masked_board, axis=1)
col_bingo = np.all(masked_board, axis=0)
row_solved = np.any(row_bingo, axis=0)
col_solved = np.any(col_bingo, axis=0)
if row_solved or col_solved:
# Compute the boardsum if a row or col is fully true
winning_board = single_boards[num_board]
winning_board[masked_board == True] = 0
return int(np.sum(winning_board) * int(draw))
num_board += 1
def task_02_bingo(boards, draws):
"""
Computes a bingo on one of the given boards based on a given series of drawn numbers.
:param boards: Three dim np-array containing a set of bingo boards.
:param draws: Np-array with the set of drawn numbers.
:returns: The sum of all non-marked numbers on the last winning board multiplied by its winning number.
"""
single_boards = np.array(np.split(boards, len(boards) / 5))
bool_mask = np.zeros_like(single_boards, dtype=bool)
# Extra variables to keep track of the current last winner
last_draw, last_board = -1, np.zeros((5, 5))
last_marked = np.zeros_like(last_board, dtype=bool)
for draw in draws:
# Take every drawn number
bool_mask[single_boards == int(draw)] = True
num_board = 0
for masked_board in bool_mask:
# Flip the bool mask of each board to true if a drawn number is matched
row_bingo = np.all(masked_board, axis=1)
col_bingo = np.all(masked_board, axis=0)
row_solved = np.any(row_bingo, axis=0)
col_solved = np.any(col_bingo, axis=0)
if row_solved or col_solved:
# Set the current winning board as last winning board
last_board = np.copy(single_boards[num_board])
last_draw = draw
last_marked = np.copy(masked_board)
single_boards[num_board] = None
bool_mask[num_board] = None
num_board += 1
# Compute the board sum of the last winning board
last_board[last_marked == True] = 0
return int(np.sum(last_board) * int(last_draw))
print(f"Task 01 test output: {task_01_bingo(test_boards, test_draw)}")
print(f"Task 01 validation output: {task_01_bingo(val_boards, val_draw)}")
print(f"Task 02 test output: {task_02_bingo(test_boards, test_draw)}")
print(f"Task 02 validation output: {task_02_bingo(val_boards, val_draw)}")
```
#### File: advent_of_code_2021/day_08/day_08.py
```python
test_input = [[num.strip().split(" ") for num in line.split("|")]
for line in open('resources/test_input', 'r').readlines()]
test_input_2 = [[num.strip().split(" ") for num in line.split("|")]
for line in open('resources/test_input_2', 'r').readlines()]
val_input = [[num.strip().split(' ') for num in line.split('|')]
for line in open('resources/val_input', 'r').readlines()]
print(test_input[0])
def find_digit_in_segments(display) -> int:
"""
Computes how many instances of a specific digit are located in the given input.
:param display: The input of a seven segment split at '|' as delimiter.
:return: Sum of the counted digits.
"""
num_digits = 0
# Check each row of the segment display
for digit in display:
# Check each single segment
for segment in digit[1]:
# Increment the output if the number read is either of [1, 4, 7, 8]
if len(segment) == 2 or len(segment) == 3 or len(segment) == 4 or len(segment) == 7:
num_digits += 1
return num_digits
def compute_sum_output(display) -> int:
"""
Computes the digit representation for the output of every line and returns the sum of these outputs.
Needs to determine the character->digit mapping new for each single line.
:param display: Line wise input of the seven segment. Split at '|' as delimiter.
:return: Sum of the output numbers per line.
"""
sum_output = 0
# Filter the obvious numbers from the display input
for digit in display:
known = {}
for segment in digit[0]:
if len(segment) == 2:
known[1] = set(segment)
elif len(segment) == 4:
known[4] = set(segment)
# Filter the numbers from the output
nums = ''
for _segment in digit[1]:
if len(_segment) == 2:
nums += '1'
elif len(_segment) == 3:
nums += '7'
elif len(_segment) == 4:
nums += '4'
elif len(_segment) == 5:
if set(_segment).issuperset(known[1]):
nums += '3'
elif len(set(_segment) & known[4]) == 3:
nums += '5'
else:
nums += '2'
elif len(_segment) == 6:
if not set(_segment).issuperset(known[1]):
nums += '6'
elif set(_segment).issuperset(known[4]):
nums += '9'
else:
nums += '0'
elif len(_segment) == 7:
nums += '8'
sum_output += int(nums)
return sum_output
print(f'Solution test task 01: {find_digit_in_segments(test_input)}')
print(f'Solution test 02 task 01: {find_digit_in_segments(test_input_2)}')
print(f'Solution validation task 01: {find_digit_in_segments(val_input)}')
print('----------------------------')
print(f'Solution test task 02: {compute_sum_output(test_input)}')
print(f'Solution test 2 task 02: {compute_sum_output(test_input_2)}')
print(f'Solution validation task 02: {compute_sum_output(val_input)}')
```
#### File: advent_of_code_2021/day_12/day_12.py
```python
from collections import defaultdict
def count_paths_small_once(source_path: str) -> int:
"""
Counts the different possible paths through a cave system.
Each valid path has to contain a start and an end point.
A path may contain multiple entries of the came capital cave but only one instance
per minor cave.
:param source_path: File path to the routing info of a cave system.
:returns: The sum of counted valid paths. Each path increments the sum by 1.
"""
# Create dictionary
connections = defaultdict(list)
for line in open(source_path, 'r'):
# Write combinations of entry end destination point per path to the dictionary
entry, destination = line.strip().split('-')
connections[entry].append(destination)
connections[destination].append(entry)
next_cave = [['start']]
valid_paths = 0
# As long as there are valid paths available
while next_cave:
# Take the next path
current_path = next_cave.pop(0)
# If the cave coming after the current one
for follow_up in connections[current_path[-1]]:
# If next cave is an end cave increment the path counter
if follow_up == 'end':
valid_paths += 1
# Else append it to the list of coming caves
# Only when it is a capital cave or has not been visited yet
elif not follow_up.islower() or follow_up not in current_path:
next_cave.append(current_path + [follow_up])
return valid_paths
def count_paths_one_small_double(source_path: str) -> int:
"""
Counts the different possible paths through a cave system.
Each valid path has to contain a start and an end point.
A path may contain multiple entries of the came capital cave and up to two instances
per minor cave.
:param source_path: File path to the routing info of a cave system.
:returns: The sum of counted valid paths. Each path increments the sum by 1.
"""
# Create dictionary and fill with cave connections
connections = defaultdict(list)
for line in open(source_path, 'r'):
entry, destination = line.strip().split('-')
connections[entry].append(destination)
connections[destination].append(entry)
valid_paths = 0
_next = [['start']]
# As long as paths are available
while _next:
# Take the next path as current
current_path = _next.pop(0)
# For each cave in the current path
for coming in connections[current_path[-1]]:
# Mark the cave as multi occurance of minor cave
revisit = coming.islower() and coming in current_path
if coming == 'end':
valid_paths += 1
# If the path is not the starting cave and it is not a multi revisit of a minor cave
elif coming != 'start' and not (current_path[0] == '*' and revisit):
# Add it to the following caves of the current path
_next.append((['*'] if revisit else []) + current_path + [coming])
return valid_paths
# Output for task 01
print('Solution for first test task 01: ', count_paths_small_once('resources/test_1'))
print('Solution for second test task 01: ', count_paths_small_once('resources/test_2'))
print('Solution for validation task 01: ', count_paths_small_once('resources/val'))
print('-------------------------------')
# Output for task 02
print('Solution for first test task 02: ', count_paths_one_small_double('resources/test_3'))
print('Solution for second test task 02: ', count_paths_one_small_double('resources/test_2'))
print('Solution for validation task 02: ', count_paths_one_small_double('resources/val'))
``` |
{
"source": "johanneswd/weather-lights",
"score": 3
} |
#### File: johanneswd/weather-lights/weather-lights.py
```python
from time import sleep
try:
import neopixel
except ImportError:
print "Could not import neopixel, running in simulation mode"
import neopixel_sim as neopixel
from adds_data import MetarCache
airports = ['EGKB', 'EGHI', 'EGGP', 'EGGD', 'BIEG', 'EDDS']
# LED strip configuration:
LED_COUNT = len(airports) # Number of LED pixels.
LED_PIN = 18 # GPIO pin connected to the pixels (18 uses PWM!).
#LED_PIN = 10 # GPIO pin connected to the pixels (10 uses SPI /dev/spidev0.0).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 5 # DMA channel to use for generating signal (try 5)
LED_BRIGHTNESS = 32 # Set to 0 for darkest and 255 for brightest
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
LED_CHANNEL = 0 # set to '1' for GPIOs 13, 19, 41, 45 or 53
LED_STRIP = neopixel.ws.WS2811_STRIP_RGB # Strip type and colour ordering
colours = {
'LIFR' : neopixel.Color(191, 44, 214), # purple
'IFR' : neopixel.Color(226, 67, 27), # red
'MVFR' : neopixel.Color(46, 56, 209), # blue
'VFR' : neopixel.Color(41, 178, 45) # green
}
BLANK = neopixel.Color(0,0,0)
def get_colour(airport_id, metars):
metar = metars[airport_id]
if metar is None:
return BLANK
if not metar.noaa_flight_category.valid:
return BLANK
condition = metar.noaa_flight_category.data
print "%s is %s" % (airport_id, condition)
if condition not in colours:
return BLANK
return colours[condition]
def set_colours(strip, cache, airport_list):
metars = cache.get_metars(airport_list)
for airport, n in zip(airport_list, range(0, len(airport_list))):
colour = get_colour(airport, metars)
strip.setPixelColor(n, colour)
strip.show()
def main(poll_interval):
strip = neopixel.Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL,
LED_STRIP)
strip.begin()
cache = MetarCache()
while True:
set_colours(strip, cache, airports)
sleep(poll_interval)
if __name__=='__main__':
main(5)
``` |
{
"source": "johannes-weidenfeller/qiskit-aqua",
"score": 3
} |
#### File: components/neural_networks/generative_network.py
```python
from abc import ABC, abstractmethod
class GenerativeNetwork(ABC):
"""
Base class for generative Quantum and Classical Neural Networks.
This method should initialize the module, but
raise an exception if a required component of the module is not available.
"""
@abstractmethod
def __init__(self):
super().__init__()
self._num_parameters = 0
self._num_qubits = 0
self._bounds = list()
@abstractmethod
def set_seed(self, seed):
"""
Set seed.
Args:
seed (int): seed
Raises:
NotImplementedError: not implemented
"""
raise NotImplementedError()
@abstractmethod
def get_output(self, quantum_instance, params, shots):
"""
Apply quantum/classical neural network to given input and get the respective output
Args:
quantum_instance (QuantumInstance): Quantum Instance, used to run the generator circuit.
params (numpy.ndarray): parameters which should be used to run the generator,
if None use self._params
shots (int): if not None use a number of shots that is different from the number
set in quantum_instance
Returns:
Neural network output
Raises:
NotImplementedError: not implemented
"""
raise NotImplementedError()
@abstractmethod
def loss(self):
"""
Loss function used for optimization
"""
raise NotImplementedError()
@abstractmethod
def train(self, quantum_instance=None, shots=None):
"""
Perform one training step w.r.t to the generator's parameters
Args:
quantum_instance (QuantumInstance): used to run generator network.
Ignored for a classical network.
shots (int): Number of shots for hardware or qasm execution.
Ignored for classical network
Returns:
dict: generator loss and updated parameters.
Raises:
NotImplementedError: not implemented
"""
raise NotImplementedError()
```
#### File: operators/converters/abelian_grouper.py
```python
import warnings
from typing import List, Tuple, Dict, cast, Optional
import numpy as np
import retworkx as rx
from qiskit.aqua import AquaError
from .converter_base import ConverterBase
from ..list_ops.list_op import ListOp
from ..list_ops.summed_op import SummedOp
from ..operator_base import OperatorBase
from ..primitive_ops.pauli_op import PauliOp
from ..primitive_ops.pauli_sum_op import PauliSumOp
from ..state_fns.operator_state_fn import OperatorStateFn
class AbelianGrouper(ConverterBase):
"""The AbelianGrouper converts SummedOps into a sum of Abelian sums.
Meaning, it will traverse the Operator, and when it finds a SummedOp, it will evaluate which of
the summed sub-Operators commute with one another. It will then convert each of the groups of
commuting Operators into their own SummedOps, and return the sum-of-commuting-SummedOps.
This is particularly useful for cases where mutually commuting groups can be handled
similarly, as in the case of Pauli Expectations, where commuting Paulis have the same
diagonalizing circuit rotation, or Pauli Evolutions, where commuting Paulis can be
diagonalized together.
"""
def __init__(self, traverse: bool = True) -> None:
"""
Args:
traverse: Whether to convert only the Operator passed to ``convert``, or traverse
down that Operator.
"""
self._traverse = traverse
def convert(self, operator: OperatorBase) -> OperatorBase:
"""Check if operator is a SummedOp, in which case covert it into a sum of mutually
commuting sums, or if the Operator contains sub-Operators and ``traverse`` is True,
attempt to convert any sub-Operators.
Args:
operator: The Operator to attempt to convert.
Returns:
The converted Operator.
"""
# pylint: disable=cyclic-import,import-outside-toplevel
from ..evolutions.evolved_op import EvolvedOp
# TODO: implement direct way
if isinstance(operator, PauliSumOp):
operator = operator.to_pauli_op()
if isinstance(operator, ListOp):
if isinstance(operator, SummedOp) and all(isinstance(op, PauliOp)
for op in operator.oplist):
# For now, we only support graphs over Paulis.
return self.group_subops(operator)
elif self._traverse:
return operator.traverse(self.convert)
else:
return operator
elif isinstance(operator, OperatorStateFn) and self._traverse:
return OperatorStateFn(self.convert(operator.primitive),
is_measurement=operator.is_measurement,
coeff=operator.coeff)
elif isinstance(operator, EvolvedOp) and self._traverse:
return EvolvedOp(self.convert(operator.primitive), coeff=operator.coeff) # type: ignore
else:
return operator
@classmethod
def group_subops(cls, list_op: ListOp, fast: Optional[bool] = None,
use_nx: Optional[bool] = None) -> ListOp:
"""Given a ListOp, attempt to group into Abelian ListOps of the same type.
Args:
list_op: The Operator to group into Abelian groups
fast: Ignored - parameter will be removed in future release
use_nx: Ignored - parameter will be removed in future release
Returns:
The grouped Operator.
Raises:
AquaError: If any of list_op's sub-ops is not ``PauliOp``.
"""
if fast is not None or use_nx is not None:
warnings.warn('Options `fast` and `use_nx` of `AbelianGrouper.group_subops` are '
'no longer used and are now deprecated and will be removed no '
'sooner than 3 months following the 0.8.0 release.')
# TODO: implement direct way
if isinstance(list_op, PauliSumOp):
list_op = list_op.to_pauli_op()
for op in list_op.oplist:
if not isinstance(op, PauliOp):
raise AquaError(
'Cannot determine Abelian groups if any Operator in list_op is not '
'`PauliOp`. E.g., {} ({})'.format(op, type(op)))
edges = cls._commutation_graph(list_op)
nodes = range(len(list_op))
graph = rx.PyGraph()
graph.add_nodes_from(nodes)
graph.add_edges_from_no_data(edges)
# Keys in coloring_dict are nodes, values are colors
coloring_dict = rx.graph_greedy_color(graph)
groups = {} # type: Dict
# sort items so that the output is consistent with all options (fast and use_nx)
for idx, color in sorted(coloring_dict.items()):
groups.setdefault(color, []).append(list_op[idx])
group_ops = [list_op.__class__(group, abelian=True) for group in groups.values()]
if len(group_ops) == 1:
return group_ops[0] * list_op.coeff # type: ignore
return list_op.__class__(group_ops, coeff=list_op.coeff) # type: ignore
@staticmethod
def _commutation_graph(list_op: ListOp) -> List[Tuple[int, int]]:
"""Create edges (i, j) if i and j are not commutable.
Note:
This method is applicable to only PauliOps.
Args:
list_op: list_op
Returns:
A list of pairs of indices of the operators that are not commutable
"""
# convert a Pauli operator into int vector where {I: 0, X: 2, Y: 3, Z: 1}
mat1 = np.array([op.primitive.z + 2 * op.primitive.x for op in list_op], dtype=np.int8)
mat2 = mat1[:, None]
# mat3[i, j] is True if i and j are commutable with TPB
mat3 = (((mat1 * mat2) * (mat1 - mat2)) == 0).all(axis=2)
# return [(i, j) if mat3[i, j] is False and i < j]
return cast(List[Tuple[int, int]], list(zip(*np.where(np.triu(np.logical_not(mat3), k=1)))))
```
#### File: evolutions/trotterizations/qdrift.py
```python
from typing import cast
import numpy as np
from qiskit.aqua import aqua_globals
from .trotterization_base import TrotterizationBase
from ...operator_base import OperatorBase
from ...list_ops.summed_op import SummedOp
from ...list_ops.composed_op import ComposedOp
from ...primitive_ops.pauli_sum_op import PauliSumOp
# pylint: disable=invalid-name
class QDrift(TrotterizationBase):
""" The QDrift Trotterization method, which selects each each term in the
Trotterization randomly, with a probability proportional to its weight. Based on the work
of <NAME> in https://arxiv.org/abs/1811.08017.
"""
def __init__(self, reps: int = 1) -> None:
r"""
Args:
reps: The number of times to repeat the Trotterization circuit.
"""
super().__init__(reps=reps)
def convert(self, operator: OperatorBase) -> OperatorBase:
# TODO: implement direct way
if isinstance(operator, PauliSumOp):
operator = operator.to_pauli_op()
if not isinstance(operator, SummedOp):
raise TypeError('Trotterization converters can only convert SummedOps.')
summed_op = cast(SummedOp, operator)
# We artificially make the weights positive, TODO check approximation performance
weights = np.abs([op.coeff for op in summed_op.oplist]) # type: ignore
lambd = sum(weights)
N = 2 * (lambd ** 2) * (summed_op.coeff ** 2)
factor = lambd * summed_op.coeff / (N * self.reps)
# The protocol calls for the removal of the individual coefficients,
# and multiplication by a constant factor.
scaled_ops = \
[(op * (factor / op.coeff)).exp_i() for op in summed_op.oplist] # type: ignore
sampled_ops = aqua_globals.random.choice(scaled_ops,
size=(int(N * self.reps),), # type: ignore
p=weights / lambd)
return ComposedOp(sampled_ops).reduce()
```
#### File: operators/legacy/op_converter.py
```python
from typing import Union, Callable, cast
import itertools
import logging
import sys
import numpy as np
from qiskit.quantum_info import Pauli
from qiskit.tools.parallel import parallel_map
from qiskit.tools.events import TextProgressBar
from qiskit.aqua import AquaError, aqua_globals
from .weighted_pauli_operator import WeightedPauliOperator
from .matrix_operator import MatrixOperator
from .tpb_grouped_weighted_pauli_operator import TPBGroupedWeightedPauliOperator
logger = logging.getLogger(__name__)
def _conversion(basis, matrix):
pauli = Pauli.from_label(''.join(basis))
trace_value = np.sum(matrix.dot(pauli.to_spmatrix()).diagonal())
return trace_value, pauli
def to_weighted_pauli_operator(
operator: Union[WeightedPauliOperator, TPBGroupedWeightedPauliOperator, MatrixOperator]) \
-> WeightedPauliOperator:
"""
Converting a given operator to `WeightedPauliOperator`
Args:
operator: one of supported operator type
Returns:
The converted weighted pauli operator
Raises:
AquaError: Unsupported type to convert
Warnings:
Converting time from a MatrixOperator to a Pauli-type Operator grows exponentially.
If you are converting a system with large number of qubits, it will take time.
You can turn on DEBUG logging to check the progress.
"""
if operator.__class__ == WeightedPauliOperator:
return cast(WeightedPauliOperator, operator)
elif operator.__class__ == TPBGroupedWeightedPauliOperator:
# destroy the grouping but keep z2 symmetries info
op_tpb = cast(TPBGroupedWeightedPauliOperator, operator)
return WeightedPauliOperator(paulis=op_tpb.paulis, z2_symmetries=op_tpb.z2_symmetries,
name=op_tpb.name)
elif operator.__class__ == MatrixOperator:
op_m = cast(MatrixOperator, operator)
if op_m.is_empty():
return WeightedPauliOperator(paulis=[])
if op_m.num_qubits > 10:
logger.warning("Converting time from a MatrixOperator to a Pauli-type Operator grows "
"exponentially. If you are converting a system with large number of "
"qubits, it will take time. And now you are converting a %s-qubit "
"Hamiltonian. You can turn on DEBUG logging to check the progress."
"", op_m.num_qubits)
num_qubits = op_m.num_qubits
coeff = 2 ** (-num_qubits)
paulis = []
possible_basis = 'IXYZ'
if op_m.dia_matrix is not None:
possible_basis = 'IZ'
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Converting a MatrixOperator to a Pauli-type Operator:")
TextProgressBar(sys.stderr)
results = parallel_map(_conversion,
list(itertools.product(possible_basis, repeat=num_qubits)),
task_kwargs={"matrix": op_m._matrix},
num_processes=aqua_globals.num_processes)
for trace_value, pauli in results:
weight = trace_value * coeff
if weight != 0.0 and np.abs(weight) > op_m.atol:
paulis.append([weight, pauli])
return WeightedPauliOperator(paulis, z2_symmetries=operator.z2_symmetries,
name=operator.name)
else:
raise AquaError("Unsupported type to convert to WeightedPauliOperator: "
"{}".format(operator.__class__))
def to_matrix_operator(
operator: Union[WeightedPauliOperator, TPBGroupedWeightedPauliOperator, MatrixOperator])\
-> MatrixOperator:
"""
Converting a given operator to `MatrixOperator`
Args:
operator: one of supported operator type
Returns:
the converted matrix operator
Raises:
AquaError: Unsupported type to convert
"""
if operator.__class__ == WeightedPauliOperator:
op_w = cast(WeightedPauliOperator, operator)
if op_w.is_empty():
return MatrixOperator(None)
hamiltonian = 0
for weight, pauli in op_w.paulis:
hamiltonian += weight * pauli.to_spmatrix()
return MatrixOperator(matrix=hamiltonian, z2_symmetries=op_w.z2_symmetries,
name=op_w.name)
elif operator.__class__ == TPBGroupedWeightedPauliOperator:
op_tpb = cast(TPBGroupedWeightedPauliOperator, operator)
op = WeightedPauliOperator(paulis=op_tpb.paulis, z2_symmetries=op_tpb.z2_symmetries,
name=op_tpb.name)
return to_matrix_operator(op)
elif operator.__class__ == MatrixOperator:
return cast(MatrixOperator, operator)
else:
raise AquaError("Unsupported type to convert to MatrixOperator: "
"{}".format(operator.__class__))
# pylint: disable=invalid-name
def to_tpb_grouped_weighted_pauli_operator(
operator: Union[WeightedPauliOperator, TPBGroupedWeightedPauliOperator, MatrixOperator],
grouping_func: Callable, **kwargs: int) -> TPBGroupedWeightedPauliOperator:
"""
Args:
operator: one of supported operator type
grouping_func: a callable function that grouped the paulis in the operator.
kwargs: other setting for `grouping_func` function
Returns:
the converted tensor-product-basis grouped weighted pauli operator
Raises:
AquaError: Unsupported type to convert
"""
if operator.__class__ == WeightedPauliOperator:
return grouping_func(operator, **kwargs)
elif operator.__class__ == TPBGroupedWeightedPauliOperator:
# different tpb grouping approach is asked
op_tpb = cast(TPBGroupedWeightedPauliOperator, operator)
if grouping_func != op_tpb.grouping_func and kwargs != op_tpb.kwargs:
return grouping_func(op_tpb, **kwargs)
else:
return op_tpb
elif operator.__class__ == MatrixOperator:
op = to_weighted_pauli_operator(operator)
return grouping_func(op, **kwargs)
else:
raise AquaError("Unsupported type to convert to TPBGroupedWeightedPauliOperator: "
"{}".format(operator.__class__))
```
#### File: aqua/utils/dataset_helper.py
```python
import operator
from copy import deepcopy
import numpy as np
from sklearn.decomposition import PCA
def get_num_classes(dataset):
"""
Check number of classes in a given dataset
Args:
dataset(dict): key is the class name and value is the data.
Returns:
int: number of classes
"""
return len(list(dataset.keys()))
def get_feature_dimension(dataset):
"""
Check feature dimension of a given dataset
Args:
dataset(dict): key is the class name and value is the data.
Returns:
int: feature dimension, -1 denotes no data in the dataset.
Raises:
TypeError: invalid data set
"""
if not isinstance(dataset, dict):
raise TypeError("Dataset is not formatted as a dict. Please check it.")
feature_dim = -1
for v in dataset.values():
if not isinstance(v, np.ndarray):
v = np.asarray(v)
return v.shape[1]
return feature_dim
# pylint: disable=invalid-name
def split_dataset_to_data_and_labels(dataset, class_names=None):
"""
Split dataset to data and labels numpy array
If `class_names` is given, use the desired label to class name mapping,
or create the mapping based on the keys in the dataset.
Args:
dataset (dict): {'A': numpy.ndarray, 'B': numpy.ndarray, ...}
class_names (dict): class name of dataset, {class_name: label}
Returns:
Union(tuple(list, dict), list):
List contains two arrays of numpy.ndarray type
where the array at index 0 is data, an NxD array, and at
index 1 it is labels, an Nx1 array, containing values in range
0 to K-1, where K is the number of classes. The dict is a map
{str: int}, mapping class name to label. The tuple of list, dict is returned
when `class_names` is not None, otherwise just the list is returned.
Raises:
KeyError: data set invalid
"""
data = []
labels = []
if class_names is None:
sorted_classes_name = sorted(list(dataset.keys()))
class_to_label = {k: idx for idx, k in enumerate(sorted_classes_name)}
else:
class_to_label = class_names
sorted_label = sorted(class_to_label.items(), key=operator.itemgetter(1))
for class_name, _ in sorted_label:
values = dataset[class_name]
for value in values:
data.append(value)
try:
labels.append(class_to_label[class_name])
except Exception as ex: # pylint: disable=broad-except
raise KeyError('The dataset has different class names to '
'the training data. error message: {}'.format(ex)) from ex
data = np.asarray(data)
labels = np.asarray(labels)
if class_names is None:
return [data, labels], class_to_label
else:
return [data, labels]
def map_label_to_class_name(predicted_labels, label_to_class):
"""
Helper converts labels (numeric) to class name (string)
Args:
predicted_labels (numpy.ndarray): Nx1 array
label_to_class (dict or list): a mapping form label (numeric) to class name (str)
Returns:
str: predicted class names of each datum
"""
if not isinstance(predicted_labels, np.ndarray):
predicted_labels = np.asarray([predicted_labels])
predicted_class_names = []
for predicted_label in predicted_labels:
predicted_class_names.append(label_to_class[predicted_label])
return predicted_class_names
def reduce_dim_to_via_pca(x, dim):
"""
Reduce the data dimension via pca
Args:
x (numpy.ndarray): NxD array
dim (int): the targeted dimension D'
Returns:
numpy.ndarray: NxD' array
"""
x_reduced = PCA(n_components=dim).fit_transform(x)
return x_reduced
def discretize_and_truncate(data, bounds, num_qubits, return_data_grid_elements=False,
return_prob=False, prob_non_zero=True):
"""
Discretize & truncate classical data to enable digital encoding in qubit registers
whereby the data grid is [[grid elements dim 0],..., [grid elements dim k]]
Args:
data (list or array or np.array): training data (int or float) of dimension k
bounds (list or array or np.array): k min/max data values
[[min_0,max_0],...,[min_k-1,max_k-1]] if univariate data: [min_0,max_0]
num_qubits (list or array or np.array): k numbers of qubits to determine
representation resolution, i.e. n qubits enable the representation of 2**n
values [num_qubits_0,..., num_qubits_k-1]
return_data_grid_elements (Bool): if True - return an array with the data grid
elements
return_prob (Bool): if True - return a normalized frequency count of the discretized and
truncated data samples
prob_non_zero (Bool): if True - set 0 values in the prob_data to 10^-1 to avoid potential
problems when using the probabilities in loss functions - division by 0
Returns:
array: discretized and truncated data
array: data grid [[grid elements dim 0],..., [grid elements dim k]]
array: grid elements, Product_j=0^k-1 2**num_qubits_j element vectors
array: data probability, normalized frequency count sorted from smallest to biggest element
"""
# Truncate the data
if np.ndim(bounds) == 1:
bounds = np.reshape(bounds, (1, len(bounds)))
data = data.reshape((len(data), len(num_qubits)))
temp = []
for i, data_sample in enumerate(data):
append = True
for j, entry in enumerate(data_sample):
if entry < bounds[j, 0]:
append = False
if entry > bounds[j, 1]:
append = False
if append:
temp.append(list(data_sample))
data = np.array(temp)
# Fit the data to the data element grid
for j, prec in enumerate(num_qubits):
data_row = data[:, j] # dim j of all data samples
# prepare element grid for dim j
elements_current_dim = np.linspace(bounds[j, 0], bounds[j, 1], (2 ** prec))
# find index for data sample in grid
index_grid = np.searchsorted(
elements_current_dim,
data_row - (elements_current_dim[1] - elements_current_dim[0]) * 0.5)
for k, index in enumerate(index_grid):
data[k, j] = elements_current_dim[index]
if j == 0:
if len(num_qubits) > 1:
data_grid = [elements_current_dim]
else:
data_grid = elements_current_dim
grid_elements = elements_current_dim
elif j == 1:
temp = []
for grid_element in grid_elements:
for element_current in elements_current_dim:
temp.append([grid_element, element_current])
grid_elements = temp
data_grid.append(elements_current_dim)
else:
temp = []
for grid_element in grid_elements:
for element_current in elements_current_dim:
temp.append(deepcopy(grid_element).append(element_current))
grid_elements = deepcopy(temp)
data_grid.append(elements_current_dim)
data_grid = np.array(data_grid)
data = np.reshape(data, (len(data), len(data[0])))
if return_prob:
if np.ndim(data) > 1:
prob_data = np.zeros(int(np.prod(np.power(np.ones(len(data[0])) * 2, num_qubits))))
else:
prob_data = np.zeros(int(np.prod(np.power(np.array([2]), num_qubits))))
for data_element in data:
for i, element in enumerate(grid_elements):
if all(data_element == element):
prob_data[i] += 1 / len(data)
if prob_non_zero:
# add epsilon to avoid 0 entries which can be problematic in loss functions (division)
prob_data = [1e-10 if x == 0 else x for x in prob_data]
if return_data_grid_elements:
return data, data_grid, grid_elements, prob_data
else:
return data, data_grid, prob_data
else:
if return_data_grid_elements:
return data, data_grid, grid_elements
else:
return data, data_grid
```
#### File: circuit/library/raw_feature_vector.py
```python
from typing import Set, List, Optional
import numpy as np
from qiskit.circuit import QuantumRegister, ParameterVector, ParameterExpression, Gate
from qiskit.circuit.library import BlueprintCircuit
class RawFeatureVector(BlueprintCircuit):
"""The raw feature vector circuit.
This circuit acts as parameterized initialization for statevectors with ``feature_dimension``
dimensions, thus with ``log2(feature_dimension)`` qubits. As long as there are free parameters,
this circuit holds a placeholder instruction and can not be decomposed.
Once all parameters are bound, the placeholder is replaced by a state initialization and can
be unrolled.
In ML, this circuit can be used to load the training data into qubit amplitudes. It does not
apply an kernel transformation. (Therefore, it is a "raw" feature vector.)
Examples:
.. code-block::
from qiskit.ml.circuit.library import RawFeatureVector
circuit = RawFeatureVector(4)
print(circuit.num_qubits)
# prints: 2
print(circuit.draw(output='text'))
# prints:
# ┌──────┐
# q_0: ┤0 ├
# │ Raw │
# q_1: ┤1 ├
# └──────┘
print(circuit.ordered_parameters)
# prints: [Parameter(p[0]), Parameter(p[1]), Parameter(p[2]), Parameter(p[3])]
import numpy as np
state = np.array([1, 0, 0, 1]) / np.sqrt(2)
bound = circuit.assign_parameters(state)
print(bound.draw())
# prints:
# ┌──────────────────────────────────┐
# q_0: ┤0 ├
# │ initialize(0.70711,0,0,0.70711) │
# q_1: ┤1 ├
# └──────────────────────────────────┘
"""
def __init__(self, feature_dimension: Optional[int]) -> None:
"""
Args:
feature_dimension: The feature dimension and number of qubits.
"""
super().__init__()
self._num_qubits = None # type: int
self._parameters = None # type: List[ParameterExpression]
if feature_dimension:
self.feature_dimension = feature_dimension
def _build(self):
super()._build()
# if the parameters are fully specified, use the initialize instruction
if len(self.parameters) == 0:
self.initialize(self._parameters, self.qubits) # pylint: disable=no-member
# otherwise get a gate that acts as placeholder
else:
placeholder = Gate('Raw', self.num_qubits, self._parameters[:], label='Raw')
self.append(placeholder, self.qubits)
def _check_configuration(self, raise_on_failure=True):
pass
@property
def num_qubits(self) -> int:
"""Returns the number of qubits in this circuit.
Returns:
The number of qubits.
"""
return self._num_qubits if self._num_qubits is not None else 0
@num_qubits.setter
def num_qubits(self, num_qubits: int) -> None:
"""Set the number of qubits for the n-local circuit.
Args:
The new number of qubits.
"""
if self._num_qubits != num_qubits:
# invalidate the circuit
self._invalidate()
self._num_qubits = num_qubits
self._parameters = list(ParameterVector('p', length=self.feature_dimension))
self.add_register(QuantumRegister(self.num_qubits, 'q'))
@property
def feature_dimension(self) -> int:
"""Return the feature dimension.
Returns:
The feature dimension, which is ``2 ** num_qubits``.
"""
return 2 ** self.num_qubits
@feature_dimension.setter
def feature_dimension(self, feature_dimension: int) -> None:
"""Set the feature dimension.
Args:
feature_dimension: The new feature dimension. Must be a power of 2.
Raises:
ValueError: If ``feature_dimension`` is not a power of 2.
"""
num_qubits = np.log2(feature_dimension)
if int(num_qubits) != num_qubits:
raise ValueError('feature_dimension must be a power of 2!')
if self._num_qubits is None or num_qubits != self._num_qubits:
self._invalidate()
self.num_qubits = int(num_qubits)
def _invalidate(self):
super()._invalidate()
self._parameters = None
self._num_qubits = None
@property
def parameters(self) -> Set[ParameterExpression]:
"""Return the free parameters in the RawFeatureVector.
Returns:
A set of the free parameters.
"""
return set(self.ordered_parameters)
@property
def ordered_parameters(self) -> List[ParameterExpression]:
"""Return the free parameters in the RawFeatureVector.
Returns:
A list of the free parameters.
"""
return list(param for param in self._parameters if isinstance(param, ParameterExpression))
def bind_parameters(self, value_dict):
"""Bind parameters."""
if not isinstance(value_dict, dict):
value_dict = dict(zip(self.ordered_parameters, value_dict))
return super().bind_parameters(value_dict)
def assign_parameters(self, param_dict, inplace=False):
"""Call the initialize instruction."""
if not isinstance(param_dict, dict):
param_dict = dict(zip(self.ordered_parameters, param_dict))
if inplace:
dest = self
else:
dest = RawFeatureVector(2 ** self.num_qubits)
dest._build()
dest._parameters = self._parameters.copy()
# update the parameter list
for i, param in enumerate(dest._parameters):
if param in param_dict.keys():
dest._parameters[i] = param_dict[param]
# if fully bound call the initialize instruction
if len(dest.parameters) == 0:
dest._data = [] # wipe the current data
parameters = dest._parameters / np.linalg.norm(dest._parameters)
dest.initialize(parameters, dest.qubits) # pylint: disable=no-member
# else update the placeholder
else:
dest.data[0][0].params = dest._parameters
return None if inplace else dest
```
#### File: aqua/operators/test_op_construction.py
```python
import unittest
from test.aqua import QiskitAquaTestCase
import itertools
import scipy
from scipy.stats import unitary_group
import numpy as np
from ddt import ddt, data
from qiskit import QiskitError
from qiskit.aqua import AquaError
from qiskit.circuit import QuantumCircuit, QuantumRegister, Instruction, Parameter, ParameterVector
from qiskit.extensions.exceptions import ExtensionError
from qiskit.quantum_info import Operator, Pauli, Statevector
from qiskit.circuit.library import CZGate, ZGate
from qiskit.aqua.operators import (
X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn,
CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp,
SummedOp, OperatorBase, Zero
)
from qiskit.aqua.operators import MatrixOperator
# pylint: disable=invalid-name
@ddt
class TestOpConstruction(QiskitAquaTestCase):
"""Operator Construction tests."""
def test_pauli_primitives(self):
""" from to file test """
newop = X ^ Y ^ Z ^ I
self.assertEqual(newop.primitive, Pauli(label='XYZI'))
kpower_op = (Y ^ 5) ^ (I ^ 3)
self.assertEqual(kpower_op.primitive, Pauli(label='YYYYYIII'))
kpower_op2 = (Y ^ I) ^ 4
self.assertEqual(kpower_op2.primitive, Pauli(label='YIYIYIYI'))
# Check immutability
self.assertEqual(X.primitive, Pauli(label='X'))
self.assertEqual(Y.primitive, Pauli(label='Y'))
self.assertEqual(Z.primitive, Pauli(label='Z'))
self.assertEqual(I.primitive, Pauli(label='I'))
def test_composed_eval(self):
""" Test eval of ComposedOp """
self.assertAlmostEqual(Minus.eval('1'), -.5 ** .5)
def test_evals(self):
""" evals test """
# pylint: disable=no-member
# TODO: Think about eval names
self.assertEqual(Z.eval('0').eval('0'), 1)
self.assertEqual(Z.eval('1').eval('0'), 0)
self.assertEqual(Z.eval('0').eval('1'), 0)
self.assertEqual(Z.eval('1').eval('1'), -1)
self.assertEqual(X.eval('0').eval('0'), 0)
self.assertEqual(X.eval('1').eval('0'), 1)
self.assertEqual(X.eval('0').eval('1'), 1)
self.assertEqual(X.eval('1').eval('1'), 0)
self.assertEqual(Y.eval('0').eval('0'), 0)
self.assertEqual(Y.eval('1').eval('0'), -1j)
self.assertEqual(Y.eval('0').eval('1'), 1j)
self.assertEqual(Y.eval('1').eval('1'), 0)
with self.assertRaises(ValueError):
Y.eval('11')
with self.assertRaises(ValueError):
(X ^ Y).eval('1111')
with self.assertRaises(ValueError):
Y.eval((X ^ X).to_matrix_op())
# Check that Pauli logic eval returns same as matrix logic
self.assertEqual(PrimitiveOp(Z.to_matrix()).eval('0').eval('0'), 1)
self.assertEqual(PrimitiveOp(Z.to_matrix()).eval('1').eval('0'), 0)
self.assertEqual(PrimitiveOp(Z.to_matrix()).eval('0').eval('1'), 0)
self.assertEqual(PrimitiveOp(Z.to_matrix()).eval('1').eval('1'), -1)
self.assertEqual(PrimitiveOp(X.to_matrix()).eval('0').eval('0'), 0)
self.assertEqual(PrimitiveOp(X.to_matrix()).eval('1').eval('0'), 1)
self.assertEqual(PrimitiveOp(X.to_matrix()).eval('0').eval('1'), 1)
self.assertEqual(PrimitiveOp(X.to_matrix()).eval('1').eval('1'), 0)
self.assertEqual(PrimitiveOp(Y.to_matrix()).eval('0').eval('0'), 0)
self.assertEqual(PrimitiveOp(Y.to_matrix()).eval('1').eval('0'), -1j)
self.assertEqual(PrimitiveOp(Y.to_matrix()).eval('0').eval('1'), 1j)
self.assertEqual(PrimitiveOp(Y.to_matrix()).eval('1').eval('1'), 0)
pauli_op = Z ^ I ^ X ^ Y
mat_op = PrimitiveOp(pauli_op.to_matrix())
full_basis = list(map(''.join, itertools.product('01', repeat=pauli_op.num_qubits)))
for bstr1, bstr2 in itertools.product(full_basis, full_basis):
# print('{} {} {} {}'.format(bstr1, bstr2, pauli_op.eval(bstr1, bstr2),
# mat_op.eval(bstr1, bstr2)))
np.testing.assert_array_almost_equal(pauli_op.eval(bstr1).eval(bstr2),
mat_op.eval(bstr1).eval(bstr2))
gnarly_op = SummedOp([(H ^ I ^ Y).compose(X ^ X ^ Z).tensor(Z),
PrimitiveOp(Operator.from_label('+r0I')),
3 * (X ^ CX ^ T)], coeff=3 + .2j)
gnarly_mat_op = PrimitiveOp(gnarly_op.to_matrix())
full_basis = list(map(''.join, itertools.product('01', repeat=gnarly_op.num_qubits)))
for bstr1, bstr2 in itertools.product(full_basis, full_basis):
np.testing.assert_array_almost_equal(gnarly_op.eval(bstr1).eval(bstr2),
gnarly_mat_op.eval(bstr1).eval(bstr2))
def test_circuit_construction(self):
""" circuit construction test """
hadq2 = H ^ I
cz = hadq2.compose(CX).compose(hadq2)
qc = QuantumCircuit(2)
qc.append(cz.primitive, qargs=range(2))
ref_cz_mat = PrimitiveOp(CZGate()).to_matrix()
np.testing.assert_array_almost_equal(cz.to_matrix(), ref_cz_mat)
def test_io_consistency(self):
""" consistency test """
new_op = X ^ Y ^ I
label = 'XYI'
# label = new_op.primitive.to_label()
self.assertEqual(str(new_op.primitive), label)
np.testing.assert_array_almost_equal(new_op.primitive.to_matrix(),
Operator.from_label(label).data)
self.assertEqual(new_op.primitive, Pauli(label=label))
x_mat = X.primitive.to_matrix()
y_mat = Y.primitive.to_matrix()
i_mat = np.eye(2, 2)
np.testing.assert_array_almost_equal(new_op.primitive.to_matrix(),
np.kron(np.kron(x_mat, y_mat), i_mat))
hi = np.kron(H.to_matrix(), I.to_matrix())
hi2 = Operator.from_label('HI').data
hi3 = (H ^ I).to_matrix()
np.testing.assert_array_almost_equal(hi, hi2)
np.testing.assert_array_almost_equal(hi2, hi3)
xy = np.kron(X.to_matrix(), Y.to_matrix())
xy2 = Operator.from_label('XY').data
xy3 = (X ^ Y).to_matrix()
np.testing.assert_array_almost_equal(xy, xy2)
np.testing.assert_array_almost_equal(xy2, xy3)
# Check if numpy array instantiation is the same as from Operator
matrix_op = Operator.from_label('+r')
np.testing.assert_array_almost_equal(PrimitiveOp(matrix_op).to_matrix(),
PrimitiveOp(matrix_op.data).to_matrix())
# Ditto list of lists
np.testing.assert_array_almost_equal(PrimitiveOp(matrix_op.data.tolist()).to_matrix(),
PrimitiveOp(matrix_op.data).to_matrix())
# TODO make sure this works once we resolve endianness mayhem
# qc = QuantumCircuit(3)
# qc.x(2)
# qc.y(1)
# from qiskit import BasicAer, QuantumCircuit, execute
# unitary = execute(qc, BasicAer.get_backend('unitary_simulator')).result().get_unitary()
# np.testing.assert_array_almost_equal(new_op.primitive.to_matrix(), unitary)
def test_to_matrix(self):
"""to matrix text """
np.testing.assert_array_equal(X.to_matrix(), Operator.from_label('X').data)
np.testing.assert_array_equal(Y.to_matrix(), Operator.from_label('Y').data)
np.testing.assert_array_equal(Z.to_matrix(), Operator.from_label('Z').data)
op1 = Y + H
np.testing.assert_array_almost_equal(op1.to_matrix(), Y.to_matrix() + H.to_matrix())
op2 = op1 * .5
np.testing.assert_array_almost_equal(op2.to_matrix(), op1.to_matrix() * .5)
op3 = (4 - .6j) * op2
np.testing.assert_array_almost_equal(op3.to_matrix(), op2.to_matrix() * (4 - .6j))
op4 = op3.tensor(X)
np.testing.assert_array_almost_equal(op4.to_matrix(),
np.kron(op3.to_matrix(), X.to_matrix()))
op5 = op4.compose(H ^ I)
np.testing.assert_array_almost_equal(op5.to_matrix(), np.dot(op4.to_matrix(),
(H ^ I).to_matrix()))
op6 = op5 + PrimitiveOp(Operator.from_label('+r').data)
np.testing.assert_array_almost_equal(
op6.to_matrix(), op5.to_matrix() + Operator.from_label('+r').data)
param = Parameter("α")
m = np.array([[0, -1j], [1j, 0]])
op7 = MatrixOp(m, param)
np.testing.assert_array_equal(op7.to_matrix(), m * param)
param = Parameter("β")
op8 = PauliOp(primitive=Pauli(label="Y"), coeff=param)
np.testing.assert_array_equal(op8.to_matrix(), m * param)
param = Parameter("γ")
qc = QuantumCircuit(1)
qc.h(0)
op9 = CircuitOp(qc, coeff=param)
m = np.array([[1, 1], [1, -1]]) / np.sqrt(2)
np.testing.assert_array_equal(op9.to_matrix(), m * param)
def test_circuit_op_to_matrix(self):
""" test CircuitOp.to_matrix """
qc = QuantumCircuit(1)
qc.rz(1.0, 0)
qcop = CircuitOp(qc)
np.testing.assert_array_almost_equal(
qcop.to_matrix(), scipy.linalg.expm(-0.5j * Z.to_matrix()))
def test_matrix_to_instruction(self):
"""Test MatrixOp.to_instruction yields an Instruction object."""
matop = (H ^ 3).to_matrix_op()
with self.subTest('assert to_instruction returns Instruction'):
self.assertIsInstance(matop.to_instruction(), Instruction)
matop = ((H ^ 3) + (Z ^ 3)).to_matrix_op()
with self.subTest('matrix operator is not unitary'):
with self.assertRaises(ExtensionError):
matop.to_instruction()
def test_adjoint(self):
""" adjoint test """
gnarly_op = 3 * (H ^ I ^ Y).compose(X ^ X ^ Z).tensor(T ^ Z) + \
PrimitiveOp(Operator.from_label('+r0IX').data)
np.testing.assert_array_almost_equal(np.conj(np.transpose(gnarly_op.to_matrix())),
gnarly_op.adjoint().to_matrix())
def test_primitive_strings(self):
""" get primitives test """
self.assertEqual(X.primitive_strings(), {'Pauli'})
gnarly_op = 3 * (H ^ I ^ Y).compose(X ^ X ^ Z).tensor(T ^ Z) + \
PrimitiveOp(Operator.from_label('+r0IX').data)
self.assertEqual(gnarly_op.primitive_strings(), {'QuantumCircuit', 'Matrix'})
def test_to_pauli_op(self):
""" Test to_pauli_op method """
gnarly_op = 3 * (H ^ I ^ Y).compose(X ^ X ^ Z).tensor(T ^ Z) + \
PrimitiveOp(Operator.from_label('+r0IX').data)
mat_op = gnarly_op.to_matrix_op()
pauli_op = gnarly_op.to_pauli_op()
self.assertIsInstance(pauli_op, SummedOp)
for p in pauli_op:
self.assertIsInstance(p, PauliOp)
np.testing.assert_array_almost_equal(mat_op.to_matrix(), pauli_op.to_matrix())
def test_circuit_permute(self):
r""" Test the CircuitOp's .permute method """
perm = range(7)[::-1]
c_op = (((CX ^ 3) ^ X) @
(H ^ 7) @
(X ^ Y ^ Z ^ I ^ X ^ X ^ X) @
(Y ^ (CX ^ 3)) @
(X ^ Y ^ Z ^ I ^ X ^ X ^ X))
c_op_perm = c_op.permute(perm)
self.assertNotEqual(c_op, c_op_perm)
c_op_id = c_op_perm.permute(perm)
self.assertEqual(c_op, c_op_id)
def test_summed_op_reduce(self):
"""Test SummedOp"""
sum_op = (X ^ X * 2) + (Y ^ Y) # type: PauliSumOp
sum_op = sum_op.to_pauli_op() # type: SummedOp[PauliOp]
with self.subTest('SummedOp test 1'):
self.assertEqual(sum_op.coeff, 1)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY'])
self.assertListEqual([op.coeff for op in sum_op], [2, 1])
sum_op = (X ^ X * 2) + (Y ^ Y)
sum_op += Y ^ Y
sum_op = sum_op.to_pauli_op() # type: SummedOp[PauliOp]
with self.subTest('SummedOp test 2-a'):
self.assertEqual(sum_op.coeff, 1)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY', 'YY'])
self.assertListEqual([op.coeff for op in sum_op], [2, 1, 1])
sum_op = sum_op.collapse_summands()
with self.subTest('SummedOp test 2-b'):
self.assertEqual(sum_op.coeff, 1)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY'])
self.assertListEqual([op.coeff for op in sum_op], [2, 2])
sum_op = (X ^ X * 2) + (Y ^ Y)
sum_op += (Y ^ Y) + (X ^ X * 2)
sum_op = sum_op.to_pauli_op() # type: SummedOp[PauliOp]
with self.subTest('SummedOp test 3-a'):
self.assertEqual(sum_op.coeff, 1)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY', 'YY', 'XX'])
self.assertListEqual([op.coeff for op in sum_op], [2, 1, 1, 2])
sum_op = sum_op.reduce().to_pauli_op()
with self.subTest('SummedOp test 3-b'):
self.assertEqual(sum_op.coeff, 1)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY'])
self.assertListEqual([op.coeff for op in sum_op], [4, 2])
sum_op = SummedOp([X ^ X * 2, Y ^ Y], 2)
with self.subTest('SummedOp test 4-a'):
self.assertEqual(sum_op.coeff, 2)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY'])
self.assertListEqual([op.coeff for op in sum_op], [2, 1])
sum_op = sum_op.collapse_summands()
with self.subTest('SummedOp test 4-b'):
self.assertEqual(sum_op.coeff, 1)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY'])
self.assertListEqual([op.coeff for op in sum_op], [4, 2])
sum_op = SummedOp([X ^ X * 2, Y ^ Y], 2)
sum_op += Y ^ Y
with self.subTest('SummedOp test 5-a'):
self.assertEqual(sum_op.coeff, 1)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY', 'YY'])
self.assertListEqual([op.coeff for op in sum_op], [4, 2, 1])
sum_op = sum_op.collapse_summands()
with self.subTest('SummedOp test 5-b'):
self.assertEqual(sum_op.coeff, 1)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY'])
self.assertListEqual([op.coeff for op in sum_op], [4, 3])
sum_op = SummedOp([X ^ X * 2, Y ^ Y], 2)
sum_op += ((X ^ X) * 2 + (Y ^ Y)).to_pauli_op()
with self.subTest('SummedOp test 6-a'):
self.assertEqual(sum_op.coeff, 1)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY', 'XX', 'YY'])
self.assertListEqual([op.coeff for op in sum_op], [4, 2, 2, 1])
sum_op = sum_op.collapse_summands()
with self.subTest('SummedOp test 6-b'):
self.assertEqual(sum_op.coeff, 1)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY'])
self.assertListEqual([op.coeff for op in sum_op], [6, 3])
sum_op = SummedOp([X ^ X * 2, Y ^ Y], 2)
sum_op += sum_op
with self.subTest('SummedOp test 7-a'):
self.assertEqual(sum_op.coeff, 1)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY', 'XX', 'YY'])
self.assertListEqual([op.coeff for op in sum_op], [4, 2, 4, 2])
sum_op = sum_op.collapse_summands()
with self.subTest('SummedOp test 7-b'):
self.assertEqual(sum_op.coeff, 1)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY'])
self.assertListEqual([op.coeff for op in sum_op], [8, 4])
sum_op = SummedOp([X ^ X * 2, Y ^ Y], 2) + SummedOp([X ^ X * 2, Z ^ Z], 3)
with self.subTest('SummedOp test 8-a'):
self.assertEqual(sum_op.coeff, 1)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY', 'XX', 'ZZ'])
self.assertListEqual([op.coeff for op in sum_op], [4, 2, 6, 3])
sum_op = sum_op.collapse_summands()
with self.subTest('SummedOp test 8-b'):
self.assertEqual(sum_op.coeff, 1)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY', 'ZZ'])
self.assertListEqual([op.coeff for op in sum_op], [10, 2, 3])
def test_compose_op_of_different_dim(self):
"""
Test if smaller operator expands to correct dim when composed with bigger operator.
Test if PrimitiveOps compose methods are consistent.
"""
# PauliOps of different dim
xy_p = (X ^ Y)
xyz_p = (X ^ Y ^ Z)
pauli_op = xy_p @ xyz_p
expected_result = (I ^ I ^ Z)
self.assertEqual(pauli_op, expected_result)
# MatrixOps of different dim
xy_m = xy_p.to_matrix_op()
xyz_m = xyz_p.to_matrix_op()
matrix_op = xy_m @ xyz_m
self.assertEqual(matrix_op, expected_result.to_matrix_op())
# CircuitOps of different dim
xy_c = xy_p.to_circuit_op()
xyz_c = xyz_p.to_circuit_op()
circuit_op = xy_c @ xyz_c
self.assertTrue(np.array_equal(pauli_op.to_matrix(), matrix_op.to_matrix()))
self.assertTrue(np.allclose(pauli_op.to_matrix(), circuit_op.to_matrix(), rtol=1e-14))
self.assertTrue(np.allclose(matrix_op.to_matrix(), circuit_op.to_matrix(), rtol=1e-14))
def test_permute_on_primitive_op(self):
""" Test if permute methods of PrimitiveOps are consistent and work as expected. """
indices = [1, 2, 4]
# PauliOp
pauli_op = (X ^ Y ^ Z)
permuted_pauli_op = pauli_op.permute(indices)
expected_pauli_op = (X ^ I ^ Y ^ Z ^ I)
self.assertEqual(permuted_pauli_op, expected_pauli_op)
# CircuitOp
circuit_op = pauli_op.to_circuit_op()
permuted_circuit_op = circuit_op.permute(indices)
expected_circuit_op = expected_pauli_op.to_circuit_op()
self.assertEqual(permuted_circuit_op.primitive.__str__(),
expected_circuit_op.primitive.__str__())
# MatrixOp
matrix_op = pauli_op.to_matrix_op()
permuted_matrix_op = matrix_op.permute(indices)
expected_matrix_op = expected_pauli_op.to_matrix_op()
equal = np.allclose(permuted_matrix_op.to_matrix(), expected_matrix_op.to_matrix())
self.assertTrue(equal)
def test_permute_on_list_op(self):
""" Test if ListOp permute method is consistent with PrimitiveOps permute methods. """
op1 = (X ^ Y ^ Z).to_circuit_op()
op2 = (Z ^ X ^ Y)
# ComposedOp
indices = [1, 2, 0]
primitive_op = op1 @ op2
primitive_op_perm = primitive_op.permute(indices) # CircuitOp.permute
composed_op = ComposedOp([op1, op2])
composed_op_perm = composed_op.permute(indices)
# reduce the ListOp to PrimitiveOp
to_primitive = composed_op_perm.oplist[0] @ composed_op_perm.oplist[1]
# compare resulting PrimitiveOps
equal = np.allclose(primitive_op_perm.to_matrix(), to_primitive.to_matrix())
self.assertTrue(equal)
# TensoredOp
indices = [3, 5, 4, 0, 2, 1]
primitive_op = op1 ^ op2
primitive_op_perm = primitive_op.permute(indices)
tensored_op = TensoredOp([op1, op2])
tensored_op_perm = tensored_op.permute(indices)
# reduce the ListOp to PrimitiveOp
composed_oplist = tensored_op_perm.oplist
to_primitive = \
composed_oplist[0] @ (composed_oplist[1].oplist[0] ^ composed_oplist[1].oplist[1]) @ \
composed_oplist[2]
# compare resulting PrimitiveOps
equal = np.allclose(primitive_op_perm.to_matrix(), to_primitive.to_matrix())
self.assertTrue(equal)
# SummedOp
primitive_op = (X ^ Y ^ Z)
summed_op = SummedOp([primitive_op])
indices = [1, 2, 0]
primitive_op_perm = primitive_op.permute(indices) # PauliOp.permute
summed_op_perm = summed_op.permute(indices)
# reduce the ListOp to PrimitiveOp
to_primitive = summed_op_perm.oplist[0] @ primitive_op @ summed_op_perm.oplist[2]
# compare resulting PrimitiveOps
equal = np.allclose(primitive_op_perm.to_matrix(), to_primitive.to_matrix())
self.assertTrue(equal)
def test_expand_on_list_op(self):
""" Test if expanded ListOp has expected num_qubits. """
add_qubits = 3
# ComposedOp
composed_op = ComposedOp([(X ^ Y ^ Z), (H ^ T), (Z ^ X ^ Y ^ Z).to_matrix_op()])
expanded = composed_op._expand_dim(add_qubits)
self.assertEqual(composed_op.num_qubits + add_qubits, expanded.num_qubits)
# TensoredOp
tensored_op = TensoredOp([(X ^ Y), (Z ^ I)])
expanded = tensored_op._expand_dim(add_qubits)
self.assertEqual(tensored_op.num_qubits + add_qubits, expanded.num_qubits)
# SummedOp
summed_op = SummedOp([(X ^ Y), (Z ^ I ^ Z)])
expanded = summed_op._expand_dim(add_qubits)
self.assertEqual(summed_op.num_qubits + add_qubits, expanded.num_qubits)
def test_expand_on_state_fn(self):
""" Test if expanded StateFn has expected num_qubits. """
num_qubits = 3
add_qubits = 2
# case CircuitStateFn, with primitive QuantumCircuit
qc2 = QuantumCircuit(num_qubits)
qc2.cx(0, 1)
cfn = CircuitStateFn(qc2, is_measurement=True)
cfn_exp = cfn._expand_dim(add_qubits)
self.assertEqual(cfn_exp.num_qubits, add_qubits + num_qubits)
# case OperatorStateFn, with OperatorBase primitive, in our case CircuitStateFn
osfn = OperatorStateFn(cfn)
osfn_exp = osfn._expand_dim(add_qubits)
self.assertEqual(osfn_exp.num_qubits, add_qubits + num_qubits)
# case DictStateFn
dsfn = DictStateFn('1'*num_qubits, is_measurement=True)
self.assertEqual(dsfn.num_qubits, num_qubits)
dsfn_exp = dsfn._expand_dim(add_qubits)
self.assertEqual(dsfn_exp.num_qubits, num_qubits + add_qubits)
# case VectorStateFn
vsfn = VectorStateFn(np.ones(2**num_qubits, dtype=complex))
self.assertEqual(vsfn.num_qubits, num_qubits)
vsfn_exp = vsfn._expand_dim(add_qubits)
self.assertEqual(vsfn_exp.num_qubits, num_qubits + add_qubits)
def test_permute_on_state_fn(self):
""" Test if StateFns permute are consistent. """
num_qubits = 4
dim = 2**num_qubits
primitive_list = [1.0/(i+1) for i in range(dim)]
primitive_dict = {format(i, 'b').zfill(num_qubits): 1.0/(i+1) for i in range(dim)}
dict_fn = DictStateFn(primitive=primitive_dict, is_measurement=True)
vec_fn = VectorStateFn(primitive=primitive_list, is_measurement=True)
# check if dict_fn and vec_fn are equivalent
equivalent = np.allclose(dict_fn.to_matrix(), vec_fn.to_matrix())
self.assertTrue(equivalent)
# permute
indices = [2, 3, 0, 1]
permute_dict = dict_fn.permute(indices)
permute_vect = vec_fn.permute(indices)
equivalent = np.allclose(permute_dict.to_matrix(), permute_vect.to_matrix())
self.assertTrue(equivalent)
def test_compose_consistency(self):
"""Test if PrimitiveOp @ ComposedOp is consistent with ComposedOp @ PrimitiveOp."""
# PauliOp
op1 = (X ^ Y ^ Z)
op2 = (X ^ Y ^ Z)
op3 = (X ^ Y ^ Z).to_circuit_op()
comp1 = op1 @ ComposedOp([op2, op3])
comp2 = ComposedOp([op3, op2]) @ op1
self.assertListEqual(comp1.oplist, list(reversed(comp2.oplist)))
# CircitOp
op1 = op1.to_circuit_op()
op2 = op2.to_circuit_op()
op3 = op3.to_matrix_op()
comp1 = op1 @ ComposedOp([op2, op3])
comp2 = ComposedOp([op3, op2]) @ op1
self.assertListEqual(comp1.oplist, list(reversed(comp2.oplist)))
# MatrixOp
op1 = op1.to_matrix_op()
op2 = op2.to_matrix_op()
op3 = op3.to_pauli_op()
comp1 = op1 @ ComposedOp([op2, op3])
comp2 = ComposedOp([op3, op2]) @ op1
self.assertListEqual(comp1.oplist, list(reversed(comp2.oplist)))
def test_compose_with_indices(self):
""" Test compose method using its permutation feature."""
pauli_op = (X ^ Y ^ Z)
circuit_op = (T ^ H)
matrix_op = (X ^ Y ^ H ^ T).to_matrix_op()
evolved_op = EvolvedOp(matrix_op)
# composition of PrimitiveOps
num_qubits = 4
primitive_op = pauli_op @ circuit_op @ matrix_op
composed_op = pauli_op @ circuit_op @ evolved_op
self.assertEqual(primitive_op.num_qubits, num_qubits)
self.assertEqual(composed_op.num_qubits, num_qubits)
# with permutation
num_qubits = 5
indices = [1, 4]
permuted_primitive_op = evolved_op @ circuit_op.permute(indices) @ pauli_op @ matrix_op
composed_primitive_op = \
evolved_op @ pauli_op.compose(circuit_op, permutation=indices, front=True) @ matrix_op
self.assertTrue(np.allclose(permuted_primitive_op.to_matrix(),
composed_primitive_op.to_matrix()))
self.assertEqual(num_qubits, permuted_primitive_op.num_qubits)
# ListOp
num_qubits = 6
tensored_op = TensoredOp([pauli_op, circuit_op])
summed_op = pauli_op + circuit_op.permute([2, 1])
composed_op = circuit_op @ evolved_op @ matrix_op
list_op = summed_op @ composed_op.compose(tensored_op, permutation=[1, 2, 3, 5, 4],
front=True)
self.assertEqual(num_qubits, list_op.num_qubits)
num_qubits = 4
circuit_fn = CircuitStateFn(primitive=circuit_op.primitive, is_measurement=True)
operator_fn = OperatorStateFn(primitive=circuit_op ^ circuit_op, is_measurement=True)
no_perm_op = circuit_fn @ operator_fn
self.assertEqual(no_perm_op.num_qubits, num_qubits)
indices = [0, 4]
perm_op = operator_fn.compose(circuit_fn, permutation=indices, front=True)
self.assertEqual(perm_op.num_qubits, max(indices) + 1)
# StateFn
num_qubits = 3
dim = 2**num_qubits
vec = [1.0/(i+1) for i in range(dim)]
dic = {format(i, 'b').zfill(num_qubits): 1.0/(i+1) for i in range(dim)}
is_measurement = True
op_state_fn = OperatorStateFn(matrix_op, is_measurement=is_measurement) # num_qubit = 4
vec_state_fn = VectorStateFn(vec, is_measurement=is_measurement) # 3
dic_state_fn = DictStateFn(dic, is_measurement=is_measurement) # 3
circ_state_fn = CircuitStateFn(circuit_op.to_circuit(), is_measurement=is_measurement) # 2
composed_op = op_state_fn @ vec_state_fn @ dic_state_fn @ circ_state_fn
self.assertEqual(composed_op.num_qubits, op_state_fn.num_qubits)
# with permutation
perm = [2, 4, 6]
composed = \
op_state_fn @ dic_state_fn.compose(vec_state_fn, permutation=perm, front=True) @ \
circ_state_fn
self.assertEqual(composed.num_qubits, max(perm) + 1)
def test_summed_op_equals(self):
"""Test corner cases of SummedOp's equals function."""
with self.subTest('multiplicative factor'):
self.assertEqual(2 * X, X + X)
with self.subTest('commutative'):
self.assertEqual(X + Z, Z + X)
with self.subTest('circuit and paulis'):
z = CircuitOp(ZGate())
self.assertEqual(Z + z, z + Z)
with self.subTest('matrix op and paulis'):
z = MatrixOp([[1, 0], [0, -1]])
self.assertEqual(Z + z, z + Z)
with self.subTest('matrix multiplicative'):
z = MatrixOp([[1, 0], [0, -1]])
self.assertEqual(2 * z, z + z)
with self.subTest('parameter coefficients'):
expr = Parameter('theta')
z = MatrixOp([[1, 0], [0, -1]])
self.assertEqual(expr * z, expr * z)
with self.subTest('different coefficient types'):
expr = Parameter('theta')
z = MatrixOp([[1, 0], [0, -1]])
self.assertNotEqual(expr * z, 2 * z)
with self.subTest('additions aggregation'):
z = MatrixOp([[1, 0], [0, -1]])
a = z + z + Z
b = 2 * z + Z
c = z + Z + z
self.assertEqual(a, b)
self.assertEqual(b, c)
self.assertEqual(a, c)
def test_circuit_compose_register_independent(self):
"""Test that CircuitOp uses combines circuits independent of the register.
I.e. that is uses ``QuantumCircuit.compose`` over ``combine`` or ``extend``.
"""
op = Z ^ 2
qr = QuantumRegister(2, 'my_qr')
circuit = QuantumCircuit(qr)
composed = op.compose(CircuitOp(circuit))
self.assertEqual(composed.num_qubits, 2)
def test_matrix_op_conversions(self):
"""Test to reveal QiskitError when to_instruction or to_circuit method is called on
parametrized matrix op."""
m = np.array([[0, 0, 1, 0], [0, 0, 0, -1], [1, 0, 0, 0], [0, -1, 0, 0]])
matrix_op = MatrixOp(m, Parameter('beta'))
for method in ['to_instruction', 'to_circuit']:
with self.subTest(method):
# QiskitError: multiplication of Operator with ParameterExpression isn't implemented
self.assertRaises(QiskitError, getattr(matrix_op, method))
def test_list_op_to_circuit(self):
"""Test if unitary ListOps transpile to circuit. """
# generate unitary matrices of dimension 2,4,8, seed is fixed
np.random.seed(233423)
u2 = unitary_group.rvs(2)
u4 = unitary_group.rvs(4)
u8 = unitary_group.rvs(8)
# pauli matrices as numpy.arrays
x = np.array([[0.0, 1.0], [1.0, 0.0]])
y = np.array([[0.0, -1.0j], [1.0j, 0.0]])
z = np.array([[1.0, 0.0], [0.0, -1.0]])
# create MatrixOp and CircuitOp out of matrices
op2 = MatrixOp(u2)
op4 = MatrixOp(u4)
op8 = MatrixOp(u8)
c2 = op2.to_circuit_op()
# algorithm using only matrix operations on numpy.arrays
xu4 = np.kron(x, u4)
zc2 = np.kron(z, u2)
zc2y = np.kron(zc2, y)
matrix = np.matmul(xu4, zc2y)
matrix = np.matmul(matrix, u8)
matrix = np.kron(matrix, u2)
operator = Operator(matrix)
# same algorithm as above, but using PrimitiveOps
list_op = ((X ^ op4) @ (Z ^ c2 ^ Y) @ op8) ^ op2
circuit = list_op.to_circuit()
# verify that ListOp.to_circuit() outputs correct quantum circuit
self.assertTrue(operator.equiv(circuit), "ListOp.to_circuit() outputs wrong circuit!")
def test_composed_op_to_circuit(self):
"""
Test if unitary ComposedOp transpile to circuit and represents expected operator.
Test if to_circuit on non-unitary ListOp raises exception.
"""
x = np.array([[0.0, 1.0], [1.0, 0.0]]) # Pauli X as numpy array
y = np.array([[0.0, -1.0j], [1.0j, 0.0]]) # Pauli Y as numpy array
m1 = np.array([[0, 0, 1, 0], [0, 0, 0, -1], [0, 0, 0, 0], [0, 0, 0, 0]]) # non-unitary
m2 = np.array([[0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 0], [0, -1, 0, 0]]) # non-unitary
m_op1 = MatrixOp(m1)
m_op2 = MatrixOp(m2)
pm1 = (X ^ Y) ^ m_op1 # non-unitary TensoredOp
pm2 = (X ^ Y) ^ m_op2 # non-unitary TensoredOp
self.assertRaises(ExtensionError, pm1.to_circuit)
self.assertRaises(ExtensionError, pm2.to_circuit)
summed_op = pm1 + pm2 # unitary SummedOp([TensoredOp, TensoredOp])
circuit = summed_op.to_circuit() # should transpile without any exception
# same algorithm that leads to summed_op above, but using only arrays and matrix operations
unitary = np.kron(np.kron(x, y), m1 + m2)
self.assertTrue(Operator(unitary).equiv(circuit))
def test_op_to_circuit_with_parameters(self):
"""On parametrized SummedOp, to_matrix_op returns ListOp, instead of MatrixOp. To avoid
the infinite recursion, AquaError is raised. """
m1 = np.array([[0, 0, 1, 0], [0, 0, 0, -1], [0, 0, 0, 0], [0, 0, 0, 0]]) # non-unitary
m2 = np.array([[0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 0], [0, -1, 0, 0]]) # non-unitary
op1_with_param = MatrixOp(m1, Parameter('alpha')) # non-unitary
op2_with_param = MatrixOp(m2, Parameter('beta')) # non-unitary
summed_op_with_param = op1_with_param + op2_with_param # unitary
self.assertRaises(AquaError, summed_op_with_param.to_circuit) # should raise Aqua error
def test_permute_list_op_with_inconsistent_num_qubits(self):
"""Test if permute raises error if ListOp contains operators with different num_qubits."""
list_op = ListOp([X, X ^ X])
self.assertRaises(AquaError, list_op.permute, [0, 1])
@data(Z, CircuitOp(ZGate()), MatrixOp([[1, 0], [0, -1]]))
def test_op_indent(self, op):
"""Test that indentation correctly adds INDENTATION at the beginning of each line"""
initial_str = str(op)
indented_str = op._indent(initial_str)
starts_with_indent = indented_str.startswith(op.INDENTATION)
self.assertTrue(starts_with_indent)
indented_str_content = (
indented_str[len(op.INDENTATION):]
).split("\n{}".format(op.INDENTATION))
self.assertListEqual(indented_str_content, initial_str.split("\n"))
def test_composed_op_immutable_under_eval(self):
"""Test ``ComposedOp.eval`` does not change the operator instance."""
op = 2 * ComposedOp([X])
_ = op.eval()
# previous bug: after op.eval(), op was 2 * ComposedOp([2 * X])
self.assertEqual(op, 2 * ComposedOp([X]))
def test_op_parameters(self):
"""Test that Parameters are stored correctly"""
phi = Parameter('φ')
theta = ParameterVector(name='θ',
length=2)
qc = QuantumCircuit(2)
qc.rz(phi, 0)
qc.rz(phi, 1)
for i in range(2):
qc.rx(theta[i], i)
qc.h(0)
qc.x(1)
l = Parameter('λ')
op = PrimitiveOp(qc,
coeff=l)
params = set([phi, l, *theta.params])
self.assertEqual(params, op.parameters)
self.assertEqual(params, StateFn(op).parameters)
self.assertEqual(params, StateFn(qc, coeff=l).parameters)
def test_list_op_parameters(self):
"""Test that Parameters are stored correctly in a List Operator"""
lam = Parameter('λ')
phi = Parameter('φ')
omega = Parameter('ω')
mat_op = PrimitiveOp([[0, 1],
[1, 0]],
coeff=omega)
qc = QuantumCircuit(1)
qc.rx(phi, 0)
qc_op = PrimitiveOp(qc)
op1 = SummedOp([mat_op, qc_op])
params = [phi, omega]
self.assertEqual(op1.parameters, set(params))
# check list nesting case
op2 = PrimitiveOp([[1, 0],
[0, -1]],
coeff=lam)
list_op = ListOp([op1, op2])
params.append(lam)
self.assertEqual(list_op.parameters, set(params))
@data(VectorStateFn([1, 0]),
DictStateFn({'0': 1}),
CircuitStateFn(QuantumCircuit(1)),
OperatorStateFn(I),
OperatorStateFn(MatrixOp([[1, 0], [0, 1]])),
OperatorStateFn(CircuitOp(QuantumCircuit(1))))
def test_statefn_eval(self, op):
"""Test calling eval on StateFn returns the statevector."""
expected = Statevector([1, 0])
self.assertEqual(op.eval().primitive, expected)
def test_to_circuit_op(self):
"""Test to_circuit_op method."""
vector = np.array([2, 2])
vsfn = VectorStateFn([1, 1], coeff=2)
dsfn = DictStateFn({'0': 1, '1': 1}, coeff=2)
for sfn in [vsfn, dsfn]:
np.testing.assert_array_almost_equal(
sfn.to_circuit_op().eval().primitive.data, vector
)
def test_invalid_primitive(self):
"""Test invalid MatrixOp construction"""
msg = "MatrixOp can only be instantiated with " \
"['list', 'ndarray', 'spmatrix', 'Operator'], not "
with self.assertRaises(TypeError) as cm:
_ = MatrixOp('invalid')
self.assertEqual(str(cm.exception), msg + "'str'")
with self.assertRaises(TypeError) as cm:
_ = MatrixOp(MatrixOperator(np.eye(2)))
self.assertEqual(str(cm.exception), msg + "'MatrixOperator'")
with self.assertRaises(TypeError) as cm:
_ = MatrixOp(None)
self.assertEqual(str(cm.exception), msg + "'NoneType'")
with self.assertRaises(TypeError) as cm:
_ = MatrixOp(2.0)
self.assertEqual(str(cm.exception), msg + "'float'")
def test_summedop_equals(self):
"""Test SummedOp.equals """
ops = [Z, CircuitOp(ZGate()), MatrixOp([[1, 0], [0, -1]]), Zero, Minus]
sum_op = sum(ops + [ListOp(ops)])
self.assertEqual(sum_op, sum_op)
self.assertEqual(sum_op + sum_op, 2 * sum_op)
self.assertEqual(sum_op + sum_op + sum_op, 3 * sum_op)
ops2 = [Z, CircuitOp(ZGate()), MatrixOp([[1, 0], [0, 1]]), Zero, Minus]
sum_op2 = sum(ops2 + [ListOp(ops)])
self.assertNotEqual(sum_op, sum_op2)
self.assertEqual(sum_op2, sum_op2)
sum_op3 = sum(ops)
self.assertNotEqual(sum_op, sum_op3)
self.assertNotEqual(sum_op2, sum_op3)
self.assertEqual(sum_op3, sum_op3)
class TestOpMethods(QiskitAquaTestCase):
"""Basic method tests."""
def test_listop_num_qubits(self):
"""Test that ListOp.num_qubits checks that all operators have the same number of qubits."""
op = ListOp([X ^ Y, Y ^ Z])
with self.subTest('All operators have the same numbers of qubits'):
self.assertEqual(op.num_qubits, 2)
op = ListOp([X ^ Y, Y])
with self.subTest('Operators have different numbers of qubits'):
with self.assertRaises(ValueError):
op.num_qubits # pylint: disable=pointless-statement
with self.assertRaises(ValueError):
X @ op # pylint: disable=pointless-statement
@ddt
class TestListOpMethods(QiskitAquaTestCase):
"""Test ListOp accessing methods"""
@data(ListOp, SummedOp, ComposedOp, TensoredOp)
def test_indexing(self, list_op_type):
"""Test indexing and slicing"""
coeff = 3 + .2j
states_op = list_op_type([X, Y, Z, I], coeff=coeff)
single_op = states_op[1]
self.assertIsInstance(single_op, OperatorBase)
self.assertNotIsInstance(single_op, ListOp)
list_one_element = states_op[1:2]
self.assertIsInstance(list_one_element, list_op_type)
self.assertEqual(len(list_one_element), 1)
self.assertEqual(list_one_element[0], Y)
list_two_elements = states_op[::2]
self.assertIsInstance(list_two_elements, list_op_type)
self.assertEqual(len(list_two_elements), 2)
self.assertEqual(list_two_elements[0], X)
self.assertEqual(list_two_elements[1], Z)
self.assertEqual(list_one_element.coeff, coeff)
self.assertEqual(list_two_elements.coeff, coeff)
class TestListOpComboFn(QiskitAquaTestCase):
"""Test combo fn is propagated."""
def setUp(self):
super().setUp()
self.combo_fn = lambda x: [x_i ** 2 for x_i in x]
self.listop = ListOp([X], combo_fn=self.combo_fn)
def assertComboFnPreserved(self, processed_op):
"""Assert the quadratic combo_fn is preserved."""
x = [1, 2, 3]
self.assertListEqual(processed_op.combo_fn(x), self.combo_fn(x))
def test_at_conversion(self):
"""Test after conversion the combo_fn is preserved."""
for method in ['to_matrix_op', 'to_pauli_op', 'to_circuit_op']:
with self.subTest(method):
converted = getattr(self.listop, method)()
self.assertComboFnPreserved(converted)
def test_after_mul(self):
"""Test after multiplication the combo_fn is preserved."""
self.assertComboFnPreserved(2 * self.listop)
def test_at_traverse(self):
"""Test after traversing the combo_fn is preserved."""
def traverse_fn(op):
return -op
traversed = self.listop.traverse(traverse_fn)
self.assertComboFnPreserved(traversed)
def test_after_adjoint(self):
"""Test after traversing the combo_fn is preserved."""
self.assertComboFnPreserved(self.listop.adjoint())
def test_after_reduce(self):
"""Test after reducing the combo_fn is preserved."""
self.assertComboFnPreserved(self.listop.reduce())
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "johanneswerner/EUKulele",
"score": 3
} |
#### File: EUKulele/scripts/query_busco.py
```python
import pandas as pd
import numpy as np
import os
import sys
import argparse
import chardet
import glob
import multiprocessing
from joblib import Parallel, delayed
__author__ = "<NAME>, <NAME>"
__copyright__ = "EUKulele"
__license__ = "MIT"
__email__ = "<EMAIL>"
level_hierarchy = ['supergroup','division','class','order','family','genus','species']
def evaluate_organism(organism, taxonomy, tax_table, create_fasta, write_transcript_file, busco_out,
taxonomy_file_prefix, busco_threshold, output_dir, sample_name, fasta_file):
organism_format = organism
if organism == "":
print("No organism found", flush=True)
return pd.DataFrame(columns = ["Organism","TaxonomicLevel","BuscoCompleteness","NumberCovered",
"CtTwoCopies","CtThreeCopies","CtFourCopies","CtFivePlusCopies",
"PercentageDuplicated"])
if taxonomy == "species":
organism_format = " ".join(str(organism).split(";"))
full_taxonomy = tax_table.loc[[(organism_format in curr) for curr in list(tax_table[taxonomy])],:]
if len(full_taxonomy.index) < 1:
print("No taxonomy found for that organism " + str(organism) + " and taxonomic level " + str(taxonomy) + ".",
flush=True)
return pd.DataFrame(columns = ["Organism","TaxonomicLevel","BuscoCompleteness","NumberCovered",
"CtTwoCopies","CtThreeCopies","CtFourCopies","CtFivePlusCopies",
"PercentageDuplicated"])
curr_level = [ind for ind in range(len(level_hierarchy)) if level_hierarchy[ind] == taxonomy][0]
max_level = len(level_hierarchy) - 1
success = 0
success_level = ""
busco_scores = []
levels_out = []
percent_multiples = []
number_duplicated = []
number_tripled = []
number_quadrupled = []
number_higher_mult = []
number_covered = []
busco_out_file = pd.read_csv(busco_out, sep = "\t", comment = "#", names = ["BuscoID","Status","Sequence","Score","Length"])
select_inds = [ (busco_out_file.Status[curr] == "Complete") |
(busco_out_file.Status[curr] == "Fragmented") |
(busco_out_file.Status[curr] == "Duplicated") for curr in range(len(busco_out_file.index))]
good_buscos = busco_out_file.loc[select_inds,:]
good_busco_sequences = [curr.split(".")[0] for curr in list(good_buscos.Sequence)]
if len(good_busco_sequences) == 0:
print("No BUSCO matches were made",flush=True)
return pd.DataFrame(columns = ["Organism","TaxonomicLevel","BuscoCompleteness","NumberCovered",
"CtTwoCopies","CtThreeCopies","CtFourCopies","CtFivePlusCopies",
"PercentageDuplicated"])
Covered_IDs = list(good_buscos.BuscoID)
total_buscos = len(set(list(busco_out_file.BuscoID)))
while (curr_level >= 0):
#### GET THE CURRENT LEVEL OF TAXONOMY FROM THE TAX TABLE FILE ####
curr_tax_list = set(list(full_taxonomy[level_hierarchy[curr_level]]))
if len(curr_tax_list) > 1:
print("More than 1 unique match found; using all matches: " + str(", ".join(curr_tax_list)), flush=True)
curr_taxonomy = ";".join(curr_tax_list)
if (curr_taxonomy == "") | (curr_taxonomy.lower() == "nan"):
print("No taxonomy found at level " + level_hierarchy[curr_level], flush=True)
continue
#### CREATE A "MOCK TRANSCRIPTOME" BY PULLING BY TAXONOMIC LEVEL ####
taxonomy_file = pd.read_csv(taxonomy_file_prefix + "_all_" +
str(level_hierarchy[curr_level]) +
"_counts.csv", sep=",",header=0)
taxonomy_file = taxonomy_file.loc[[tax in curr_taxonomy for tax in list(taxonomy_file[level_hierarchy[curr_level].capitalize()])],:]
transcripts_to_search = list(taxonomy_file["GroupedTranscripts"])
transcripts_to_search_sep = []
for transcript_name in transcripts_to_search:
transcripts_to_search_sep.extend([curr.split(".")[0] for curr in transcript_name.split(";")])
set_transcripts_to_search = set(transcripts_to_search_sep)
good_busco_sequences_list = list(good_busco_sequences)
BUSCOs_covered_all = [Covered_IDs[curr] for curr in range(len(good_busco_sequences_list)) if \
good_busco_sequences_list[curr] in list(set_transcripts_to_search)]
BUSCOs_covered = set(BUSCOs_covered_all)
number_appearences = [BUSCOs_covered_all.count(curr_busco) for curr_busco in list(BUSCOs_covered)]
multiples = [curr_appear for curr_appear in number_appearences if curr_appear >= 2]
## KEEP TRACK OF WHETHER DUPLICATES/TRIPLES/ETC. ARE COMMON ##
number_covered.append(number_appearences)
number_duplicated.append(number_appearences.count(2))
number_tripled.append(number_appearences.count(3))
number_quadrupled.append(number_appearences.count(4))
number_higher_mult.append(len([curr_appear for curr_appear in number_appearences if curr_appear > 4]))
prop_duplicated = 0
if len(BUSCOs_covered) > 0:
prop_duplicated = len(multiples) / len(BUSCOs_covered) * 100
percent_multiples.append(prop_duplicated)
busco_completeness = len(BUSCOs_covered) / total_buscos * 100
busco_scores.append(busco_completeness)
levels_out.append(level_hierarchy[curr_level])
if busco_completeness >= busco_threshold:
success = 1
success_level = level_hierarchy[curr_level]
curr_level = curr_level - 1
report_dir = os.path.join(output_dir, "busco_assessment", "output_by_level", taxonomy,
"_".join(organism_format.replace("(", "_").replace(")", "_").replace("'","").split(" ")))
os.system("mkdir -p " + report_dir)
report_file = os.path.join(report_dir, sample_name + "_report.txt")
reported = open(report_file,"w")
reversed_scores = busco_scores
reversed_scores.reverse()
if success == 1:
file_written = os.path.join(report_dir, level_hierarchy[curr_level] + "_" + sample_name + ".txt")
if (write_transcript_file):
with open(file_written, 'w') as filehandle:
for transcript_name in transcripts_to_search_sep:
filehandle.write(transcript_name + '\n')
if (create_fasta):
mock_file_name = os.path.join(report_dir, organism + "_" + level_hierarchy[curr_level] + "_" +
sample_name + "_transcripts.fasta")
os.system("grep -w -A 2 -f " + file_written + " " + fasta_file + " --no-group-separator > " + mock_file_name)
reported.write("Taxonomy file successfully completed with BUSCO completeness " + str(busco_completeness) +
"% at location " + str(file_written) + "\n This was at taxonomic level " + str(success_level) +
". \n The file containing the transcript names for the mock transcriptome corresponding to this "
"taxonomic level is located here: " + str(file_written) + ".\n")
reported.write("The BUSCO scores found at the various taxonomic levels (Supergroup to " +
str(taxonomy) + ") were: " + " ".join([str(curr) for curr in reversed_scores]))
else:
reported.write("Sufficient BUSCO completeness not found at threshold " + str(busco_threshold) + "%. \n")
reported.write("The BUSCO scores found at the various taxonomic levels (Supergroup to " + str(taxonomy) +
") were: " + " ".join([str(curr) for curr in reversed_scores]) + "\n")
reported.close()
reversed_levels = levels_out
reversed_levels.reverse()
return pd.DataFrame({"Organism":[organism] * len(levels_out),"TaxonomicLevel":reversed_levels,
"BuscoCompleteness":busco_scores,"NumberCovered":number_covered,"CtTwoCopies":number_duplicated,
"CtThreeCopies":number_tripled,"CtFourCopies":number_quadrupled,"CtFivePlusCopies":number_higher_mult,
"PercentageDuplicated":percent_multiples})
def read_in_taxonomy(infile):
with open(infile, 'rb') as f:
result = chardet.detect(f.read())
tax_out = pd.read_csv(infile, sep='\t',encoding=result['encoding'])
classes = ['supergroup','division','class','order','family','genus','species']
for c in tax_out.columns:
if c.lower() in classes:
if (np.issubdtype(tax_out[str(c)].dtypes, np.number)) | (np.issubdtype(tax_out[str(c)].dtypes, np.float_)):
tax_out = tax_out.loc[:,~(tax_out.columns == c)]
tax_out.columns = tax_out.columns.str.lower()
tax_out = tax_out.set_index('source_id')
return tax_out
def queryBusco(args=None):
"""
Search through individual BUSCO outputs to find number of matches for each organism/taxonomic level.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--busco_out',default="busco", help = "The output from the BUSCO run on the full sample file.")
parser.add_argument('--individual_or_summary','-i',default="summary",choices=["summary","individual"],
help = "Not necessary if we are running in summary mode.")
parser.add_argument('--organism_group', nargs = "+", default = [],
help = "The focal name(s) of species/genus/order/class.")
parser.add_argument('--taxonomic_level', nargs = "+", default = [],
help = "The taxonomic level(s) of the specified focal name.")
parser.add_argument('--fasta_file',
help = "The fasta file from which we pull sequences for the mock transcriptome.")
parser.add_argument('--taxonomy_file_prefix',
help = "The taxonomy file we use to create the mock transcriptome.")
parser.add_argument('--tax_table',
help = "the taxonomy table to get the full classification of the organism, " +
"as assessed by the database being used.")
parser.add_argument('--sample_name', help = "The name of the original sample being assessed.")
parser.add_argument('--download_busco',action='store_true',
help = "If specified, we download BUSCO file from the url in the next argument.")
parser.add_argument('--create_fasta',action='store_true',
help = "If specified, we create a 'transcriptome fasta' when we query for the organisms.")
parser.add_argument('--busco_url',default=0)
parser.add_argument('--busco_location',default="busco",
help = "Location to store the BUSCO tar reference.")
parser.add_argument('--output_dir',default="output")
parser.add_argument('--available_cpus',default=1)
parser.add_argument('--busco_threshold',default=50)
parser.add_argument('--write_transcript_file', default=False, action='store_true',
help = "Whether to write an actual file with the subsetted transcriptome.")
if args != None:
args = parser.parse_args(args)
else:
args = parser.parse_args()
organism = args.organism_group
taxonomy = args.taxonomic_level
tax_table = read_in_taxonomy(args.tax_table)
if (args.individual_or_summary == "individual") & ((len(args.organism_group) == 0) | (len(args.taxonomic_level) == 0)):
print("You specified individual mode, but then did not provide a taxonomic group and/or accompanying taxonomic level.",
flush=True)
sys.exit(1)
if (len(args.organism_group) == 0) != (len(args.taxonomic_level) == 0):
print("The number of organisms you specified is not equal to the number of taxonomic levels you specified. " +
str(len(args.organism_group)) + " organisms were specified, while " + str(args.taxonomic_level) +
" taxonomic levels were specified.",flush=True)
sys.exit(1)
if (args.individual_or_summary == "individual"):
results_frame = Parallel(n_jobs=multiprocessing.cpu_count())(delayed(evaluate_organism)(organism[curr],
taxonomy[curr], tax_table,
args.create_fasta,
args.write_transcript_file,
args.busco_out,
args.taxonomy_file_prefix,
float(args.busco_threshold),
args.output_dir,
args.sample_name,
args.fasta_file) \
for curr in range(len(organism)))
print(results_frame,flush=True)
results_frame = pd.concat(results_frame)
os.system("mkdir -p " + os.path.join(args.output_dir, "busco_assessment", args.sample_name, "individual"))
results_frame.to_csv(path_or_buf = os.path.join(args.output_dir, "busco_assessment",
args.sample_name, "individual", "summary_" +
args.sample_name + ".tsv"), sep = "\t")
else:
for taxonomy in level_hierarchy:
taxonomy_file = pd.read_csv(args.taxonomy_file_prefix + "_all_" + str(taxonomy) +
"_counts.csv", sep=",",header=0)
if len(taxonomy_file.index) > 0:
curr_frame = taxonomy_file.nlargest(multiprocessing.cpu_count(), 'NumTranscripts')
organisms = list(set(list(curr_frame[taxonomy.capitalize()])))
results_frame = Parallel(n_jobs=multiprocessing.cpu_count())(delayed(evaluate_organism)(organism, taxonomy, tax_table, args.create_fasta, args.write_transcript_file, args.busco_out, args.taxonomy_file_prefix, args.busco_threshold,
args.output_dir,
args.sample_name,
args.fasta_file) \
for organism in organisms)
results_frame = pd.concat(results_frame)
else:
results_frame = pd.DataFrame(columns = ["Organism","TaxonomicLevel","BuscoCompleteness",
"NumberCovered","CtTwoCopies",
"CtThreeCopies","CtFourCopies","CtFivePlusCopies",
"PercentageDuplicated"])
os.system("mkdir -p " + os.path.join(args.output_dir, "busco_assessment", args.sample_name,
taxonomy + "_combined"))
results_frame.to_csv(path_or_buf = os.path.join(args.output_dir, "busco_assessment",
args.sample_name, taxonomy + "_combined",
"summary_" + taxonomy + "_" + args.sample_name +
".tsv"), sep = "\t")
return 0
if __name__ == "__main__":
queryBusco()
```
#### File: src/EUKulele/manage_steps.py
```python
import os
import sys
import subprocess
import multiprocessing
from joblib import Parallel, delayed
import shutil
import pathlib
import pandas as pd
import math
import EUKulele
from EUKulele.tax_placement import place_taxonomy
from EUKulele.visualize_results import visualize_all_results
from scripts.mag_stats import magStats
MEM_AVAIL_GB = 0
while MEM_AVAIL_GB == 0:
try:
os.system("free -m > free.csv")
MEM_AVAIL_GB = pd.read_csv("free.csv", sep = "\s+").free[0] / 10**3
except:
pass
# 25 GB memory per GB file size
# add a parameter that changes between the requirement per file size (reducing to 10 GB for now)
# also add a parameter to EUKulele that decides whether you use 100% of available memory and scales
# MEM_AVAIL_GB by that amount (default to 75%)
def calc_max_jobs(num_files, size_in_bytes = 2147483648, max_mem_per_proc = 10, perc_mem = 0.75):
size_in_gb = size_in_bytes / (1024*1024*1024)
if size_in_gb == 0:
size_in_gb = 0.01
MAX_JOBS = math.floor(MEM_AVAIL_GB * perc_mem / (max_mem_per_proc * size_in_gb * num_files)) #48)
if MAX_JOBS == 0:
MAX_JOBS = 1
return MAX_JOBS
MAX_JOBS = max(1, calc_max_jobs(1))
# For DIAMOND: The program can be expected to use roughly six times this number of memory (in GB).
# So for the default value of -b2.0, the memory usage will be about 12 GB.
# So we want alignment to have -b6.0
def manageEukulele(piece, mets_or_mags = "", samples = [], database_dir = "",
output_dir = "", ref_fasta = "", alignment_choice = "diamond",
rerun_rules = False, cutoff_file = "", sample_dir = "", nt_ext = "", pep_ext = "",
consensus_cutoff = 0.75, tax_tab = "", prot_tab = "", use_salmon_counts = False,
names_to_reads = "", alignment_res = "", filter_metric = "evalue",
run_transdecoder = False, transdecoder_orf_size = 100, perc_mem = 0.75):
"""
This function diverts management tasks to the below helper functions.
"""
if piece == "setup_eukulele":
setupEukulele(output_dir)
elif piece == "setup_databases":
createAlignmentDatabase(ref_fasta, rerun_rules, output_dir, alignment_choice, database_dir)
elif piece == "get_samples":
return getSamples(mets_or_mags, sample_dir, nt_ext, pep_ext)
elif piece == "transdecode":
if mets_or_mags == "mets":
manageTrandecode(samples, output_dir, rerun_rules, sample_dir,
mets_or_mags = "mets", transdecoder_orf_size = 100,
nt_ext = "." + nt_ext.strip('.'), pep_ext = "." + pep_ext.strip('.'),
run_transdecoder = run_transdecoder, perc_mem = perc_mem)
elif piece == "align_to_db":
return manageAlignment(alignment_choice, samples, filter_metric, output_dir, ref_fasta,
mets_or_mags, database_dir, sample_dir, rerun_rules, nt_ext, pep_ext, core = "full",
perc_mem = perc_mem)
elif piece == "estimate_taxonomy":
manageTaxEstimation(output_dir, mets_or_mags, tax_tab, cutoff_file, consensus_cutoff,
prot_tab, use_salmon_counts, names_to_reads, alignment_res,
rerun_rules, samples, sample_dir, pep_ext, nt_ext, perc_mem)
elif piece == "visualize_taxonomy":
manageTaxVisualization(output_dir, mets_or_mags, sample_dir, pep_ext, nt_ext,
use_salmon_counts, rerun_rules)
elif piece == "assign_taxonomy":
manageTaxAssignment(samples, mets_or_mags, output_dir, sample_dir, pep_ext, core = False)
elif piece == "core_align_to_db":
alignment_res = manageAlignment(alignment_choice, samples, filter_metric, output_dir, ref_fasta,
mets_or_mags, database_dir, sample_dir, rerun_rules, nt_ext, pep_ext, core = "core",
perc_mem = perc_mem)
alignment_res = [curr for curr in alignment_res if curr != ""]
return alignment_res
elif piece == "core_estimate_taxonomy":
manageCoreTaxEstimation(output_dir, mets_or_mags, tax_tab, cutoff_file, consensus_cutoff,
prot_tab, use_salmon_counts, names_to_reads, alignment_res,
rerun_rules, samples, sample_dir, pep_ext, nt_ext, perc_mem)
elif piece == "core_visualize_taxonomy":
manageCoreTaxVisualization(output_dir, mets_or_mags, sample_dir, pep_ext, nt_ext,
use_salmon_counts, rerun_rules, core = True)
elif piece == "core_assign_taxonomy":
manageTaxAssignment(samples, mets_or_mags, output_dir, sample_dir, pep_ext, core = True)
else:
print("Not a supported management function.")
sys.exit(1)
def getSamples(mets_or_mags, sample_dir, nt_ext, pep_ext):
"""
Get the names of the metagenomic or metatranscriptomic samples from the provided folder.
"""
if (mets_or_mags == "mets"):
samples_nt = [".".join(curr.split(".")[0:-1]) for curr in os.listdir(sample_dir) if curr.split(".")[-1] == nt_ext]
samples_pep = [".".join(curr.split(".")[0:-1]) for curr in os.listdir(sample_dir) if curr.split(".")[-1] == pep_ext]
samples = list(set(samples_nt + samples_pep))
print(samples)
if len(samples) == 0:
print("No samples found in sample directory with specified nucleotide or peptide extension.")
sys.exit(1)
else:
samples = [".".join(curr.split(".")[0:-1]) for curr in os.listdir(sample_dir) if curr.split(".")[-1] == pep_ext]
if len(samples) == 0:
print("No samples found in sample directory with specified peptide extension.")
sys.exit(1)
return samples
def transdecodeToPeptide(sample_name, output_dir, rerun_rules, sample_dir,
mets_or_mags = "mets", transdecoder_orf_size = 100,
nt_ext = ".fasta", pep_ext = ".faa", run_transdecoder = False):
"""
Use TransDecoder to convert input nucleotide metatranscriptomic sequences to peptide sequences.
"""
if (not run_transdecoder):
return 0
print("Running TransDecoder for sample " + str(sample_name) + "...", flush = True)
os.system("mkdir -p " + os.path.join(output_dir, mets_or_mags, "transdecoder"))
if (os.path.isfile(os.path.join(output_dir, mets_or_mags,
sample_name + pep_ext))) & (not rerun_rules):
print("TransDecoder file already detected for sample " +
str(sample_name) + "; will not re-run step.", flush = True)
return 0
elif (os.path.isfile(os.path.join(sample_dir, sample_name + pep_ext))) & (not rerun_rules):
print("Protein files detected for sample in sample directory; " +
"will not TransDecode.", flush = True)
os.system("cp " + os.path.join(sample_dir, sample_name + pep_ext) + " " +
os.path.join(output_dir, mets_or_mags, sample_name + pep_ext))
return 0
TD_log = open(os.path.join(output_dir,"log","transdecoder_longorfs_" + sample_name + ".log"), "w+")
TD_err = open(os.path.join(output_dir,"log","transdecoder_longorfs_" + sample_name + ".err"), "w+")
if (not os.path.isfile(os.path.join(sample_dir, sample_name + nt_ext))):
print("File: " + os.path.join(sample_dir, sample_name + nt_ext) + " was called by TransDecoder and "
"does not exist. Check for typos.")
sys.exit(1)
rc1 = subprocess.Popen(["TransDecoder.LongOrfs", "-t", os.path.join(sample_dir, sample_name + nt_ext),
"-m", str(transdecoder_orf_size)], stdout = TD_log, stderr = TD_err).wait()
TD_log.close()
TD_err.close()
TD_log = open(os.path.join(output_dir,"log","transdecoder_predict_" + sample_name + ".log"), "w+")
TD_err = open(os.path.join(output_dir,"log","transdecoder_predict_" + sample_name + ".err"), "w+")
rc2 = subprocess.Popen(["TransDecoder.Predict", "-t", os.path.join(sample_dir, sample_name + nt_ext),
"--no_refine_starts"], stdout = TD_log, stderr = TD_err).wait()
#rc2 = p2.returncode
TD_log.close()
TD_err.close()
if (rc1 + rc2) != 0:
print("TransDecoder did not complete successfully for sample " +
str(sample_name) + ". Check <output_dir>/log/ folder for details.")
sys.exit(1)
merged_name = sample_name + nt_ext
os.system("mkdir -p " + os.path.join(output_dir, mets_or_mags))
os.system("mkdir -p " + os.path.join(output_dir, mets_or_mags, "transdecoder"))
os.replace(merged_name + ".transdecoder.pep", os.path.join(output_dir, mets_or_mags,
sample_name + pep_ext))
os.replace(merged_name + ".transdecoder.cds", os.path.join(output_dir, mets_or_mags,
"transdecoder", sample_name +
".fasta.transdecoder.cds"))
os.replace(merged_name + ".transdecoder.gff3", os.path.join(output_dir, mets_or_mags,
"transdecoder", sample_name +
".fasta.transdecoder.gff3"))
os.replace(merged_name + ".transdecoder.bed", os.path.join(output_dir, mets_or_mags,
"transdecoder", sample_name +
".fasta.transdecoder.bed"))
#shutil.rmtree
os.system("rm -rf " + merged_name + "*.transdecoder_dir*")
return rc1 + rc2
def manageTrandecode(met_samples, output_dir, rerun_rules, sample_dir,
mets_or_mags = "mets", transdecoder_orf_size = 100,
nt_ext = "fasta", pep_ext = ".faa", run_transdecoder = False, perc_mem = 0.75):
"""
Now for some TransDecoding - a manager for TransDecoder steps.
"""
if (not run_transdecoder):
return 0
print("Running TransDecoder for MET samples...", flush = True)
MAX_JOBS = 1
MAX_JOBS_SAMPS = [calc_max_jobs(len(met_samples), pathlib.Path(os.path.join(sample_dir, sample + nt_ext)).stat().st_size,
max_mem_per_proc = 48, perc_mem = perc_mem) \
for sample in met_samples \
if os.path.isfile(os.path.join(sample_dir, sample + nt_ext))]
if len(MAX_JOBS_SAMPS) > 0:
MAX_JOBS = min([calc_max_jobs(len(met_samples), pathlib.Path(os.path.join(sample_dir, sample + nt_ext)).stat().st_size,
max_mem_per_proc = 48, perc_mem = perc_mem) \
for sample in met_samples \
if os.path.isfile(os.path.join(sample_dir, sample + nt_ext))])
n_jobs_align = min(multiprocessing.cpu_count(), len(met_samples), max(1,MAX_JOBS))
transdecoder_res = Parallel(n_jobs=n_jobs_align)(delayed(transdecodeToPeptide)(sample_name, output_dir,
rerun_rules, sample_dir,
mets_or_mags = "mets", transdecoder_orf_size = 100,
nt_ext = nt_ext, pep_ext = pep_ext,
run_transdecoder = run_transdecoder) for sample_name in met_samples)
all_codes = sum(transdecoder_res)
os.system("rm -f pipeliner*")
if all_codes > 0:
print("TransDecoder did not complete successfully; check log folder for details.")
sys.exit(1)
#rcodes = [os.remove(curr) for curr in glob.glob("pipeliner*")]
def setupEukulele(output_dir):
print("Setting things up...")
os.system("mkdir -p " + output_dir)
os.system("mkdir -p " + os.path.join(output_dir, "log"))
## Download software dependencies
rc1 = os.system("install_dependencies.sh references_bins/ " +
"1> " + os.path.join(output_dir, "log", "dependency_log.txt") + " 2> " +
os.path.join(output_dir, "log", "dependency_err.txt"))
sys.path.append("references_bins/")
os.system("echo $PATH > path_test.txt")
if rc1 != 0:
print("Could not successfully install all external dependent software.\n" +
"Check DIAMOND, BLAST, BUSCO, and TransDecoder installation.")
return 1
return 0
def manageAlignment(alignment_choice, samples, filter_metric, output_dir, ref_fasta,
mets_or_mags, database_dir, sample_dir, rerun_rules, nt_ext, pep_ext, core = "full",
perc_mem = 0.75):
"""
Manage the multithreaded management of aligning to either BLAST or DIAMOND database.
"""
print("Aligning to reference database...")
if mets_or_mags == "mets":
fastas = []
for sample in samples:
if os.path.isfile(os.path.join(output_dir, mets_or_mags, sample + "." + pep_ext)):
fp = open(os.path.join(output_dir, mets_or_mags, sample + "." + pep_ext))
for i, line in enumerate(fp):
if i == 2:
chars = set(list(line))
if len(chars) <= 5:
print("Peptide extension used, but this file, " +
str(os.path.join(output_dir, mets_or_mags, sample + "." + pep_ext)) +
", does not appear to be a peptide file.")
break
elif i > 2:
fastas.append(os.path.join(output_dir, mets_or_mags, sample + "." + pep_ext))
break
fp.close()
elif os.path.isfile(os.path.join(sample_dir, sample + "." + pep_ext)):
fp = open(os.path.join(sample_dir, sample + "." + pep_ext))
for i, line in enumerate(fp):
if i == 2:
chars = set(list(line))
if len(chars) <= 5:
print("Peptide extension used, but this file, " +
str(os.path.join(sample_dir, sample + "." + pep_ext)) +
", does not appear to be a peptide file.")
break
elif i > 2:
fastas.append(os.path.join(sample_dir, sample + "." + pep_ext))
break
fastas.append(os.path.join(sample_dir, sample + "." + pep_ext))
else:
fastas.append(os.path.join(sample_dir, sample + "." + nt_ext))
else:
fastas = [os.path.join(sample_dir, sample + "." + pep_ext) for sample in samples]
print(fastas)
MAX_JOBS = 1
if len(fastas) > 0:
MAX_JOBS = min([calc_max_jobs(len(fastas), pathlib.Path(sample).stat().st_size,
max_mem_per_proc = 10, perc_mem = perc_mem) for sample in fastas])
n_jobs_align = min(multiprocessing.cpu_count(), len(samples), max(1,MAX_JOBS))
alignment_res = Parallel(n_jobs=n_jobs_align, prefer="threads")(delayed(alignToDatabase)(alignment_choice,
sample_name, filter_metric,
output_dir, ref_fasta,
mets_or_mags, database_dir,
sample_dir, rerun_rules, nt_ext,
pep_ext, core = core) \
for sample_name in samples)
#alignment_res = []
#for sample_name in samples:
# alignment_res.append(alignToDatabase(alignment_choice, sample_name, filter_metric, output_dir, ref_fasta,
# mets_or_mags, database_dir, sample_dir, rerun_rules, nt_ext, pep_ext))
if any([((curr == None) | (curr == 1)) for curr in alignment_res]):
print("Alignment did not complete successfully.")
sys.exit(1)
return alignment_res
def createAlignmentDatabase(ref_fasta, rerun_rules, output_dir, alignment_choice="diamond", database_dir=""):
"""
Creates a database from the provided reference fasta file and reference database,
whether or not it has been autogenerated.
"""
rc2 = 0
output_log = os.path.join(output_dir, "log", "alignment_out.log")
error_log = os.path.join(output_dir, "log", "alignment_err.log")
if alignment_choice == "diamond":
align_db = os.path.join(database_dir, "diamond", ref_fasta.strip('.fa') + '.dmnd')
if (not os.path.isfile(align_db)) | (rerun_rules):
## DIAMOND database creation ##
os.system("mkdir -p " + os.path.join(database_dir, "diamond"))
db = os.path.join(database_dir, "diamond", ref_fasta.strip('.fa'))
rc2 = os.system("diamond makedb --in " + os.path.join(database_dir, ref_fasta) + " --db " + db +
" 1> " + output_log + " 2> " + error_log)
else:
print("Diamond database file already created; will not re-create database.", flush = True)
else:
db = os.path.join(database_dir, "blast", ref_fasta.strip('.fa'), "database")
os.system("mkdir -p " + db)
db_type = "prot"
blast_version = 5
database = ref_fasta.strip('.')
#os.system("cut -f 1 -d' ' " + os.path.join(database_dir, ref_fasta) + " > " +
# os.path.join(database_dir, ref_fasta + "decoy.pep.fa"))
#os.system("perl -i -pe 's/$/_$seen{$_}/ if ++$seen{$_}>1 and /^>/; ' " +
# os.path.join(database_dir, ref_fasta + "decoy.pep.fa"))
# os.system("sed -i $'s/ //g' " + os.path.join(database_dir, ref_fasta))
# makeblastdb -in tests/aux_data/mmetsp/sample_ref_MAG/reference.pep.fa -parse_seqids -title referencefa -dbtype prot -out tests/aux_data/mmetsp/sample_ref_MAG/blast/reference.pep/database
rc2 = os.system("makeblastdb -in " + os.path.join(database_dir, ref_fasta) +
" -parse_seqids -title " + database +
" -dbtype " + db_type + " -out " + db + " 1> " + output_log + " 2> " +
error_log)
return rc2
def alignToDatabase(alignment_choice, sample_name, filter_metric, output_dir, ref_fasta,
mets_or_mags, database_dir, sample_dir, rerun_rules, nt_ext, pep_ext, core = "full"):
"""
Align the samples against the created database.
"""
print("Aligning sample " + sample_name + "...")
if alignment_choice == "diamond":
os.system("mkdir -p " + os.path.join(output_dir, mets_or_mags + "_" + core, "diamond"))
diamond_out = os.path.join(output_dir, mets_or_mags + "_" + core, "diamond", sample_name + ".diamond.out")
if (os.path.isfile(diamond_out)):
if (pathlib.Path(diamond_out).stat().st_size != 0) & (not rerun_rules):
print("Diamond alignment file already detected; will not re-run step.")
return diamond_out
align_db = os.path.join(database_dir, "diamond", ref_fasta.strip('.fa') + '.dmnd')
alignment_method = "blastp"
if (mets_or_mags == "mets") & (core == "full"):
if os.path.isfile(os.path.join(output_dir, mets_or_mags, sample_name + "." + pep_ext)):
fasta = os.path.join(output_dir, mets_or_mags, sample_name + "." + pep_ext)
elif os.path.isfile(os.path.join(sample_dir, sample_name + "." + pep_ext)):
fasta = os.path.join(sample_dir, sample_name + "." + pep_ext)
else:
fasta = os.path.join(sample_dir, sample_name + "." + nt_ext)
alignment_method = "blastx"
elif core == "full":
fasta = os.path.join(sample_dir, sample_name + "." + pep_ext)
elif core == "core":
# now concatenate the BUSCO output
fasta = os.path.join(output_dir, sample_name + "_busco" + "." + pep_ext)
os.system(" ".join(["concatenate_busco.sh", sample_name, fasta, output_dir]))
if not os.path.isfile(fasta):
print("No BUSCO matches found for sample: " + sample_name)
return ""
other = "--outfmt 6 -k 100 -e 1e-5"
outfmt = 6
k = 100
e = 1e-5
bitscore = 50
pid_cutoff = 75
diamond_log = open(os.path.join(output_dir,"log",core + "_diamond_align_" + sample_name + ".log"), "w+")
diamond_err = open(os.path.join(output_dir,"log",core + "_diamond_align_" + sample_name + ".err"), "w+")
if filter_metric == "bitscore":
rc1 = subprocess.Popen(["diamond", alignment_method, "--db", align_db, "-q", fasta, "-o",
diamond_out, "--outfmt", str(outfmt), "-k", str(k), "--min-score",
str(bitscore), '-b3.0'], stdout = diamond_log, stderr = diamond_err).wait()
print("Diamond process exited for sample " + str(sample_name) + ".", flush = True)
elif filter_metric == "pid":
rc1 = subprocess.Popen(["diamond", alignment_method, "--db", align_db, "-q", fasta, "-o",
diamond_out, "--outfmt", str(outfmt), "-k", str(k), "--id",
str(pid_cutoff), '-b3.0'], stdout = diamond_log, stderr = diamond_err).wait()
print("Diamond process exited for sample " + str(sample_name) + ".", flush = True)
else:
rc1 = subprocess.Popen(["diamond", alignment_method, "--db", align_db, "-q", fasta, "-o",
diamond_out, "--outfmt", str(outfmt), "-k", str(k), "-e",
str(e), '-b3.0'], stdout = diamond_log, stderr = diamond_err).wait()
# For debugging.
#, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
#stdout, stderr = p.communicate()
#rc1 = p.returncode
#print(stderr)
#print(stdout)
print("Diamond process exited for sample " + str(sample_name) + ".", flush = True)
if rc1 != 0:
print("Diamond did not complete successfully for sample",str(sample_name),"with rc code",str(rc1))
os.system("rm -f " + diamond_out)
return 1
return diamond_out
else:
blast_out = os.path.join(output_dir, mets_or_mags + "_" + core, "blast", sample_name + ".blast.txt")
os.system("mkdir -p " + os.path.join(output_dir, mets_or_mags + "_" + core, "blast"))
if (os.path.isfile(blast_out)) & (not rerun_rules):
print("BLAST alignment file already detected; will not re-run step.")
return blast_out
align_db = os.path.join(database_dir, "blast", ref_fasta.strip('.fa'), "database")
alignment_method = "blastp"
if mets_or_mags == "mets":
if os.path.isfile(os.path.join(output_dir, mets_or_mags, sample_name + "." + pep_ext)):
fasta = os.path.join(output_dir, mets_or_mags, sample_name + "." + pep_ext)
elif os.path.isfile(os.path.join(sample_dir, sample_name + "." + pep_ext)):
fasta = os.path.join(sample_dir, sample_name + "." + pep_ext)
else:
fasta = os.path.join(sample_dir, sample_name + "." + nt_ext)
alignment_method = "blastx"
elif core == "full":
fasta = os.path.join(sample_dir, sample_name + "." + pep_ext)
elif core == "core":
# now concatenate the BUSCO output
fasta = os.path.join(output_dir, sample_name + "_busco" + "." + pep_ext)
os.system(" ".join(["concatenate_busco.sh", sample_name, fasta, output_dir]))
if not os.path.isfile(fasta):
print("No BUSCO matches found for sample: " + sample_name)
return ""
outfmt = 6 # tabular output format
e = 1e-5
os.system("export BLASTDB=" + align_db)
blast_log = open(os.path.join(output_dir,"log","blast_align_" + sample_name + ".log"), "w+")
blast_err = open(os.path.join(output_dir,"log","blast_align_" + sample_name + ".err"), "w+")
rc1 = subprocess.Popen([alignment_method, "-query", fasta, "-db", align_db, "-out",
blast_out,"-outfmt",str(outfmt),"-evalue", str(e)],
stdout = blast_log, stderr = blast_err).wait()
if rc1 != 0:
print("BLAST did not complete successfully.")
return 1
return blast_out
def manageTaxEstimation(output_dir, mets_or_mags, tax_tab, cutoff_file, consensus_cutoff,
prot_tab, use_salmon_counts, names_to_reads, alignment_res,
rerun_rules, samples, sample_dir, pep_ext, nt_ext, perc_mem):
print("Performing taxonomic estimation steps...", flush=True)
os.system("mkdir -p " + os.path.join(output_dir, "taxonomy_estimation"))
outfiles = [os.path.join(output_dir, "taxonomy_estimation", samp + "-estimated-taxonomy.out") for samp in samples]
if mets_or_mags == "mets":
fastas = []
for sample in samples:
if os.path.isfile(os.path.join(output_dir, mets_or_mags, sample + "." + pep_ext)):
fastas.append(os.path.join(output_dir, mets_or_mags, sample + "." + pep_ext))
elif os.path.isfile(os.path.join(sample_dir, sample + "." + pep_ext)):
fastas.append(os.path.join(sample_dir, sample + "." + pep_ext))
else:
fastas.append(os.path.join(sample_dir, sample + "." + nt_ext))
else:
fastas = [os.path.join(sample_dir, sample + "." + pep_ext) for sample in samples]
MAX_JOBS = min([calc_max_jobs(len(fastas), pathlib.Path(sample).stat().st_size,
max_mem_per_proc = 5, perc_mem = perc_mem) for sample in fastas])
n_jobs_align = min(multiprocessing.cpu_count(), len(alignment_res), max(1, MAX_JOBS))
for t in range(len(alignment_res)):
#curr_out = place_taxonomy(tax_tab, cutoff_file, consensus_cutoff,\
# prot_tab, use_salmon_counts, names_to_reads,\
# alignment_res[t], outfiles[t], rerun_rules)
try:
sys.stdout = open(os.path.join(output_dir, "log", "tax_est_" +
alignment_res[t].split("/")[-1].split(".")[0] + ".out"), "w")
sys.stderr = open(os.path.join(output_dir, "log", "tax_est_" +
alignment_res[t].split("/")[-1].split(".")[0] + ".err"), "w")
curr_out = place_taxonomy(tax_tab, cutoff_file, consensus_cutoff,\
prot_tab, use_salmon_counts, names_to_reads,\
alignment_res[t], outfiles[t], rerun_rules)
except:
print("Taxonomic estimation did not complete successfully. Check log file for details.")
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
def manageCoreTaxEstimation(output_dir, mets_or_mags, tax_tab, cutoff_file, consensus_cutoff,
prot_tab, use_salmon_counts, names_to_reads, alignment_res,
rerun_rules, samples, sample_dir, pep_ext, nt_ext, perc_mem):
print("Performing taxonomic estimation steps...", flush=True)
os.system("mkdir -p " + os.path.join(output_dir, "core_taxonomy_estimation"))
outfiles = [os.path.join(output_dir, "core_taxonomy_estimation", samp + "-estimated-taxonomy.out") for samp in samples]
if mets_or_mags == "mets":
fastas = []
for sample in samples:
if os.path.isfile(os.path.join(output_dir, mets_or_mags, sample + "." + pep_ext)):
fastas.append(os.path.join(output_dir, mets_or_mags, sample + "." + pep_ext))
elif os.path.isfile(os.path.join(sample_dir, sample + "." + pep_ext)):
fastas.append(os.path.join(sample_dir, sample + "." + pep_ext))
else:
fastas.append(os.path.join(sample_dir, sample + "." + nt_ext))
else:
fastas = [os.path.join(sample_dir, sample + "." + pep_ext) for sample in samples]
MAX_JOBS = min([calc_max_jobs(len(fastas), pathlib.Path(sample).stat().st_size,
max_mem_per_proc = 10, perc_mem = perc_mem) for sample in fastas])
n_jobs_align = min(multiprocessing.cpu_count(), len(alignment_res), MAX_JOBS)
for t in range(len(alignment_res)):
try:
sys.stdout = open(os.path.join(output_dir, "log", "core_tax_est_" +
alignment_res[t].split("/")[-1].split(".")[0] + ".out"), "w")
sys.stderr = open(os.path.join(output_dir, "log", "core_tax_est_" +
alignment_res[t].split("/")[-1].split(".")[0] + ".err"), "w")
curr_out = place_taxonomy(tax_tab, cutoff_file, consensus_cutoff,\
prot_tab, use_salmon_counts, names_to_reads,\
alignment_res[t], outfiles[t], rerun_rules)
except:
print("Taxonomic estimation for core genes did not complete successfully. Check log file for details.")
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
def manageTaxVisualization(output_dir, mets_or_mags, sample_dir, pep_ext, nt_ext, use_salmon_counts, rerun_rules):
print("Performing taxonomic visualization steps...", flush=True)
out_prefix = output_dir.split("/")[-1]
sys.stdout = open(os.path.join(output_dir, "log", "tax_vis.out"), "w")
sys.stderr = open(os.path.join(output_dir, "log", "tax_vis.err"), "w")
visualize_all_results(out_prefix, output_dir, os.path.join(output_dir, "taxonomy_estimation"),
sample_dir, pep_ext, nt_ext, use_salmon_counts, rerun_rules)
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
def manageCoreTaxVisualization(output_dir, mets_or_mags, sample_dir, pep_ext, nt_ext, use_salmon_counts,
rerun_rules, core = False):
print("Performing taxonomic visualization steps...", flush=True)
out_prefix = output_dir.split("/")[-1]
sys.stdout = open(os.path.join(output_dir, "log", "core_tax_vis.out"), "w")
sys.stderr = open(os.path.join(output_dir, "log", "core_tax_vis.err"), "w")
visualize_all_results(out_prefix = out_prefix, out_dir = output_dir,
est_dir = os.path.join(output_dir, "core_taxonomy_estimation"),
samples_dir = sample_dir, prot_extension = pep_ext,
nucle_extension = nt_ext, use_counts = use_salmon_counts, rerun = rerun_rules,
core = core)
#except:
# print("Taxonomic visualization of core genes did not complete successfully. Check log files for details.")
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
def manageTaxAssignment(samples, mets_or_mags, output_dir, sample_dir, pep_ext, core = False, perc_mem = 0.75):
if mets_or_mags == "mags":
print("Performing taxonomic assignment steps...", flush=True)
MAX_JOBS = min([calc_max_jobs(len(samples),
pathlib.Path(os.path.join(sample_dir, sample + "." + pep_ext)).stat().st_size,
max_mem_per_proc = 10, perc_mem = perc_mem)
for sample in samples])
n_jobs_viz = min(multiprocessing.cpu_count(), len(samples), max(1,MAX_JOBS))
try:
if core:
assign_res = Parallel(n_jobs=n_jobs_viz, prefer="threads")(delayed(assignTaxonomy)(samp, output_dir,
"core_taxonomy_estimation",
mets_or_mags, core = True) \
for samp in samples)
else:
assign_res = Parallel(n_jobs=n_jobs_viz, prefer="threads")(delayed(assignTaxonomy)(samp, output_dir,
"taxonomy_estimation",
mets_or_mags, core = False) \
for samp in samples)
except:
print("Taxonomic assignment did not complete successfully. Check log files for details.")
sys.exit(1)
if sum(assign_res) != 0:
print("Taxonomic assignment did not complete successfully. Check log files for details.")
sys.exit(1)
def assignTaxonomy(sample_name, output_dir, est_dir, mets_or_mags, core = False):
taxfile = os.path.join(output_dir, est_dir, sample_name + "-estimated-taxonomy.out")
levels_directory = os.path.join(output_dir, "levels_mags")
max_dir = os.path.join(output_dir, "max_level_mags")
if core:
levels_directory = os.path.join(output_dir, "core_levels_mags")
max_dir = os.path.join(output_dir, "core_max_level_mags")
error_log = os.path.join(output_dir, "log", "_".join(est_dir.split("_")[0:1]) + "_assign_" + sample_name + ".err")
out_log = os.path.join(output_dir, "log", "_".join(est_dir.split("_")[0:1]) + "_assign_" + sample_name + ".out")
sys.stdout = open(out_log, "w")
sys.stderr = open(error_log, "w")
try:
rc = magStats(["--estimated-taxonomy-file",taxfile,
"--out-prefix",sample_name,"--outdir",
levels_directory,"--max-out-dir",max_dir])
except:
print("Taxonomic assignment did not complete successfully for sample " + str(sample_name) +
". Check log for details.")
sys.exit(1)
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
return rc
```
#### File: tests/code/test_mags_2.py
```python
import pytest
import sys
from unittest import TestCase
sys.path.insert(1, '..')
sys.path.insert(1, '../src/EUKulele')
import EUKulele
from EUKulele.EUKulele_config import eukulele
import yaml
import os
test_reference = "mmetsp"
def test_individual():
base_dir = os.path.join(os.path.dirname(__file__), '..', 'aux_data')
sample_dir = os.path.join(base_dir, test_reference, "samples_MAGs")
output_dir = os.path.join(base_dir, "test_out_E")
os.system("rm -rf " + output_dir)
reference_dir = os.path.join(base_dir, test_reference, "sample_ref")
string_arguments = " ".join(["all", "--database", "mmetsp", "--sample_dir", sample_dir,
"--mets_or_mags", "mags", "--out_dir", output_dir, "-i",
'--organisms', 'Chromera', '--taxonomy_organisms', 'genus',
"--reference_dir", reference_dir])
eukulele(string_arguments=string_arguments)
samplenames = [curr.split(".")[0] for curr in os.listdir(sample_dir)]
busco_out = os.path.join(output_dir, "busco_assessment", samplenames[0], "individual",
"summary_" + samplenames[0] + ".tsv")
assert os.path.isfile(busco_out)
def test_error_input():
base_dir = os.path.join(os.path.dirname(__file__), '..', 'aux_data')
sample_dir = os.path.join(base_dir, test_reference, "samples_MAGs")
output_dir = os.path.join(base_dir, "test_out_F")
os.system("rm -rf " + output_dir)
reference_dir = os.path.join(base_dir, test_reference, "sample_ref")
string_arguments = " ".join(["--database", "mmetsp", "--sample_dir", sample_dir,
"--mets_or_mags", "mmm", "--out_dir", output_dir, "-i",
'--organisms', 'Chromera', '--taxonomy_organisms', 'genus',
"--reference_dir", reference_dir])
error = 0
try:
eukulele(string_arguments=string_arguments)
except:
error = 1
assert error == 1
def test_error_required_input():
base_dir = os.path.join(os.path.dirname(__file__), '..', 'aux_data')
sample_dir = os.path.join(base_dir, test_reference, "samples_MAGs")
output_dir = os.path.join(base_dir, "test_out_G")
os.system("rm -rf " + output_dir)
reference_dir = os.path.join(base_dir, test_reference, "sample_ref")
string_arguments = " ".join(["--database", "mmetsp", "--sample_dir", sample_dir,
"--out_dir", output_dir, "-i",
'--organisms', 'Chromera', '--taxonomy_organisms', 'genus',
"--reference_dir", reference_dir])
error = 0
try:
eukulele(string_arguments=string_arguments)
except:
error = 1
assert error == 1
def test_error_busco_no_orgs():
base_dir = os.path.join(os.path.dirname(__file__), '..', 'aux_data')
sample_dir = os.path.join(base_dir, test_reference, "samples_MAGs")
output_dir = os.path.join(base_dir, "test_out_H")
os.system("rm -rf " + output_dir)
reference_dir = os.path.join(base_dir, test_reference, "sample_ref")
string_arguments = " ".join(["--database", "mmetsp", "--sample_dir", sample_dir,
"--mets_or_mags", "mags", "--out_dir", output_dir, "-i",
"--reference_dir", reference_dir])
error = 0
try:
eukulele(string_arguments=string_arguments)
except:
error = 1
assert error == 1
def test_error_n_extension():
base_dir = os.path.join(os.path.dirname(__file__), '..', 'aux_data')
sample_dir = os.path.join(base_dir, test_reference, "samples_MAGs")
output_dir = os.path.join(base_dir, "test_out_I")
os.system("rm -rf " + output_dir)
reference_dir = os.path.join(base_dir, test_reference, "sample_ref")
string_arguments = " ".join(["--database", "mmetsp", "--sample_dir", sample_dir,
"--mets_or_mags", "mets", "--out_dir", output_dir, "-i",
"--n_ext", ".hello",
"--reference_dir", reference_dir])
error = 0
try:
eukulele(string_arguments=string_arguments)
except:
error = 1
assert error == 1
def test_error_p_extension():
base_dir = os.path.join(os.path.dirname(__file__), '..', 'aux_data')
sample_dir = os.path.join(base_dir, test_reference, "samples_MAGs")
output_dir = os.path.join(base_dir, "test_out_J")
os.system("rm -rf " + output_dir)
reference_dir = os.path.join(base_dir, test_reference, "sample_ref")
string_arguments = " ".join(["--database", "mmetsp", "--sample_dir", sample_dir,
"--mets_or_mags", "mags", "--out_dir", output_dir, "-i",
"--p_ext", ".hello",
"--reference_dir", reference_dir])
error = 0
try:
eukulele(string_arguments=string_arguments)
except:
error = 1
assert error == 1
def test_error_busco():
base_dir = os.path.join(os.path.dirname(__file__), '..', 'aux_data')
sample_dir = os.path.join(base_dir, test_reference, "samples_MAGs")
output_dir = os.path.join(base_dir, "test_out_K")
os.system("rm -rf " + output_dir)
reference_dir = os.path.join(base_dir, test_reference, "sample_ref")
string_arguments = " ".join(["--database", "mmetsp", "--sample_dir", sample_dir,
"--mets_or_mags", "mags", "--out_dir", output_dir, "-i",
'--busco_file', os.path.join(base_dir, test_reference, "samples_MAGs", "busco_file_fake.tsv"),
"--reference_dir", reference_dir])
error = 0
try:
eukulele(string_arguments=string_arguments)
except:
error = 1
assert error == 1
def test_busco_file():
base_dir = os.path.join(os.path.dirname(__file__), '..', 'aux_data')
sample_dir = os.path.join(base_dir, test_reference, "samples_MAGs")
output_dir = os.path.join(base_dir, "test_out_K")
os.system("rm -rf " + output_dir)
reference_dir = os.path.join(base_dir, test_reference, "sample_ref")
string_arguments = " ".join(["--database", "mmetsp", "--sample_dir", sample_dir,
"--mets_or_mags", "mags", "--out_dir", output_dir, "-i",
'--busco_file', os.path.join(base_dir, test_reference, "samples_MAGs", "test_busco.tsv"),
"--reference_dir", reference_dir])
error = 0
eukulele(string_arguments=string_arguments)
samplenames = [curr.split(".")[0] for curr in os.listdir(sample_dir)]
busco_out = os.path.join(output_dir, "busco_assessment", samplenames[0], "individual",
"summary_" + samplenames[0] + ".tsv")
out_prefix = samplenames[0]
assert (os.path.isfile(busco_out))
def test_all():
base_dir = os.path.join(os.path.dirname(__file__), '..', 'aux_data')
base_config = os.path.join(os.path.dirname(__file__), '..', 'aux_data', 'config.yaml')
base_config_curr = os.path.join(os.path.dirname(__file__), '..', 'aux_data', 'config_O.yaml')
os.system("cp " + base_config + " " + base_config_curr)
with open(base_config_curr) as f:
config = yaml.load(f, Loader=yaml.FullLoader)
config["mets_or_mags"] = "mags"
config["reference"] = os.path.join(base_dir, test_reference, "sample_ref")
config["samples"] = os.path.join(base_dir, "real-world-samples", "MAGs")
config["subroutine"] = "all"
config["individual_or_summary"] = "summary"
config["cutoff"] = os.path.join("tax-cutoffs.yaml")
config["output"] = os.path.join(base_dir, "test_out_all_K")
config["database"] = test_reference
config["organisms"] = ["Chromera"]
config["taxonomy_organisms"] = ["genus"]
config["download_reference"] = 0
config["column"] = "SOURCE_ID"
config["ref_fasta"] = "reference.pep.fa"
config["protein_map"] = "prot-map.json"
config["tax_table"] = "tax-table.txt"
config_path = os.path.join(base_dir, 'test_configs')
os.system("mkdir -p " + config_path)
config_file = os.path.join(config_path, 'curr_config_busco_O.yaml')
with open(config_file, 'w') as f:
yaml.dump(config, f)
eukulele(string_arguments=" ".join(["--config",config_file]))
samplenames = [curr.split(".")[0] for curr in os.listdir(config["samples"])]
busco_out = os.path.join(config["output"], "busco_assessment", samplenames[0], "species_combined",
"summary_species_" + samplenames[0] + ".tsv")
out_prefix = samplenames[0]
mag_file = os.path.join(config["output"], "levels_mags", out_prefix + '.' + "species")
assert (os.path.isfile(busco_out)) & (os.path.isfile(mag_file))
def test_tester():
base_dir = os.path.join(os.path.dirname(__file__), '..', 'aux_data')
sample_dir = os.path.join(base_dir, test_reference, "samples_MAGs")
output_dir = os.path.join(base_dir, "test_out")
reference_dir = os.path.join(base_dir, test_reference, "sample_ref")
os.system("rm -rf " + output_dir)
string_arguments = " ".join(["setup", "--test", "--database", "mmetsp", "--sample_dir", sample_dir,
"--mets_or_mags", "mags", "--out_dir", output_dir, "--ref_fasta",
"reference.pep.fa", "--reference_dir", reference_dir])
eukulele(string_arguments=string_arguments)
assert (not os.path.isdir(output_dir))
#def test_cleanup():
# base_dir = os.path.join(os.path.dirname(__file__), '..', 'aux_data')
# config_path = os.path.join(os.path.dirname(__file__), '..', 'aux_data', 'test_configs')
# base_configs = [os.path.join(config_path, 'curr_config_alignment.yaml'),\
# os.path.join(config_path, 'curr_config_setup.yaml')]
# successful_test = True
# for base_config in base_configs:
# with open(base_config) as f:
# config = yaml.load(f, Loader=yaml.FullLoader)
# config["reference"] = os.path.join(base_dir, test_reference)
# os.system("rm -rf " + os.path.join(config["output"]))
# successful_test = successful_test & (not os.path.isdir(os.path.join(config["output"])))
# successful_test = True
# assert successful_test
``` |
{
"source": "johanneswiesel/Stats-for-superhedging",
"score": 2
} |
#### File: Stats-for-superhedging/GARCH-Example/GARCH.py
```python
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import pylab
import arch
from scipy.optimize import minimize
from Wasserstein_empirical import calculate_wasserstein
import tensorflow as tf
#Calculate AVAR-estimator directly
def percentile(a,p):
return np.sort(a, axis=0)[int(p*len(a))-1:]
def avar(data, g, alpha):
def avar_cpt(x):
return np.mean(percentile(g(data)-x[0]*(data-1), alpha))
res = minimize(avar_cpt, x0=[0], method='BFGS').fun
return(res)
def simulate_garch(dates, garch_parameters, no_samples, starting_val=1): #simulates GARCH(1,1) with student-t distribution according to given dates, starting value 1
omega = garch_parameters[0]
alpha = garch_parameters[1]
beta = garch_parameters[2]
nu = garch_parameters[3]
n = dates.shape[0]
epsilon = np.zeros((n, no_samples))
epsilon[0, :] = starting_val
t_rand = np.sqrt((nu-2)/nu)*np.random.standard_t(df= nu, size=(n, no_samples))
for i in range(0, no_samples):
sigma2 = np.zeros(n)
for k in range(1, n):
sigma2[k] = omega + alpha*(epsilon[k-1,i]**2) + beta*sigma2[k-1]
epsilon[k, i] = t_rand[k,i] * np.sqrt(sigma2[k])
return(pd.DataFrame(data=epsilon, index=dates.index))
def estimate_garch(data, window): #estimate GARCH(1,1) model with running window, data Nx1 dimensional
n = data.shape[0]
garch_parameters = np.zeros((n, 4))
for i in range(0, n-window+1):
print(i)
model = arch.arch_model(data.values[i:i+window],mean='Zero', vol='GARCH', dist='StudentsT')
model_fit = model.fit(disp='off')
garch_parameters[window-1+i,0] = model_fit.params[0]
garch_parameters[window-1+i,1] = model_fit.params[1]
garch_parameters[window-1+i,2] = model_fit.params[2]
garch_parameters[window-1+i,3] = model_fit.params[3]
res = pd.DataFrame(data = garch_parameters, index = data.index)
return(res)
def monte_carlo_garch(data, garch_parameters, no_samples, no_mc, g, alpha):
epsilon = np.zeros((no_mc, no_samples))
sigma2 = np.zeros((no_mc, no_samples))
N = data.shape[0]
avar_out = pd.DataFrame(data=np.zeros((N, no_samples)), index=data.index)
for i in range(0,N):
print(i)
epsilon[0,:] = 1
omega = garch_parameters.iloc[i,0]
gamma = garch_parameters.iloc[i,1]
beta = garch_parameters.iloc[i,2]
nu = garch_parameters.iloc[i,3]
if omega == 0 or gamma == 0 or beta == 0 or nu == 0: continue
t_rand = np.random.standard_t(df= nu, size=(no_mc, no_samples))
for k in range(1, no_mc):
sigma2[k, :] = omega + gamma*(epsilon[k-1, :]**2) + beta*sigma2[k-1, :]
epsilon[k, :] = t_rand[k, :] * np.sqrt(sigma2[k,:])
returns = np.exp(epsilon/100)
for j in range(0, no_samples):
avar_out.iloc[i, j] = avar(returns[:, j],g,alpha)
return(avar_out)
if __name__ == '__main__':
# Test with simulated real data - constant GARCH(1,1) model
garch_params = [0.02, 0.1, 0.83, 5]
N = 10**6 #Data points to calculate true value
dates = pd.DataFrame(index=range(0,N))
garch_simulation = simulate_garch(dates, garch_params, 1)
returns_garch_simulation = np.exp(garch_simulation/100)
# Check model
model = arch.arch_model(garch_simulation,mean='Zero', vol='GARCH', dist='StudentsT')
model_fit = model.fit()
#Concrete application
def g(x):
return(np.maximum(x-1,0))
alpha = 0.95
interval = 50
#Convention price S0=1
true_value = avar(returns_garch_simulation,g, alpha)
# Rolling estimation
n = 10**3 # length of simulated data
dates_est = pd.DataFrame(index=range(0,n))
garch_samples_est = simulate_garch(dates_est, garch_params, 1)
returns_garch_est = np.exp(garch_samples_est/100)
# Plugin estimator on given data
def avar_concrete(data):
return (avar(data,g,alpha))
plugin_avar = returns_garch_est.rolling(interval).apply(avar_concrete)
# Wasserstein estimator
def g_wass(r):
return tf.nn.relu(r-1)
eps_const = 0.5
def wasserstein_concrete(data):
return calculate_wasserstein(data, eps_const, alpha, g_wass)
wasserstein_avar = returns_garch_est.rolling(interval).apply(wasserstein_concrete)
# Monte Carlo GARCH(1,1)
no_samples = 10**2
no_mc = 10**2
garch_params_est = estimate_garch(garch_samples_est, interval)
garch_avar = monte_carlo_garch(dates_est, garch_params_est, no_samples, no_mc, g, alpha)
# Adapt for plot
garch_avar[garch_avar < 0 ] = 0
garch_avar[ garch_avar > 0.04] = 0.04
# Plot
plt.figure(figsize=(16.000, 8.000), dpi=100)
plt.plot(pd.DataFrame(data=np.repeat(true_value, plugin_avar.shape[0]), index=plugin_avar.index), label='True Value', linewidth = 3)
plt.plot(plugin_avar, label='Plugin historical', linewidth=3)
plt.plot(wasserstein_avar, label = 'Wasserstein historical', linewidth=3)
plt.plot((garch_avar.mean(axis=1)).rolling(2).mean(), label = 'Plugin GARCH(1,1)')
plt.legend()
pylab.savefig('garch_fixed_par.pdf')
#########################################################
# Test with simulated data - Changing GARCH(1,1) model
garch_params = [0.02, 0.1, 0.8, 5]
garch_params2 = [0.05, 0.14, 0.83, 20]
N = 10**6 # Data points to calculate true value
dates = pd.DataFrame(index=range(0,N))
garch_simulation2 = simulate_garch(dates, garch_params2, 1)
returns_garch_simulation2 = np.exp(garch_simulation2/100)
# Check model
model2 = arch.arch_model(garch_simulation2,mean='Zero', vol='GARCH', dist='StudentsT')
model_fit2 = model2.fit()
# Concrete application
def g(x):
return(np.maximum(x-1,0))
alpha = 0.95
interval = 50
# Convention price S0=1
true_value2 = avar(returns_garch_simulation2,g, alpha)
# Rolling estimation
n = 10**3 # length of simulated data
dates_est2 = pd.DataFrame(index=range(0,n))
garch_samples_est2 = simulate_garch(dates_est, garch_params, 1, starting_val=1)
garch_samples_est_temp = simulate_garch(dates_est2, garch_params2, 1, starting_val=garch_samples_est2.iloc[n//3-1])
garch_samples_est2.iloc[n//3:2*n//3] = garch_samples_est_temp.iloc[0:n//3].values
garch_samples_est_temp = simulate_garch(dates_est2, garch_params2, 1, starting_val=garch_samples_est2.iloc[2*n//3-1])
garch_samples_est2.iloc[2*n//3:] = garch_samples_est_temp.iloc[0:n//3+1].values
returns_garch_est2 = np.exp(garch_samples_est2/100)
# Plugin estimator on given data
def avar_concrete(data):
return (avar(data,g,alpha))
plugin_avar2 = returns_garch_est2.rolling(interval).apply(avar_concrete)
# Wasserstein estimator
def g_wass(r):
return tf.nn.relu(r-1)
eps_const = 0.5
def wasserstein_concrete(data):
return (calculate_wasserstein(data, eps_const, alpha, g_wass))
wasserstein_avar2 = returns_garch_est2.rolling(interval).apply(wasserstein_concrete)
# Monte Carlo GARCH(1,1)
no_samples = 10**2
no_mc = 10**2
garch_params_est2 = estimate_garch(garch_samples_est2, interval)
garch_avar2 = monte_carlo_garch(dates_est2, garch_params_est2, no_samples, no_mc, g, alpha)
garch_avar2[garch_avar2 < 0 ] = 0
garch_avar2[ garch_avar2 > 0.1] = 0.1
# Plot
plt.figure(figsize=(16.000, 8.000), dpi=100)
true_value_mat = np.append(np.append(np.repeat(true_value,n//3), np.repeat(true_value2, n//3)),np.repeat(true_value,n//3+1))
plt.plot(pd.DataFrame(data = true_value_mat, index = plugin_avar.index), label = 'True value', linewidth =3)
plt.plot(plugin_avar2, label = 'Plugin historical', linewidth = 3)
plt.plot((garch_avar2.mean(axis=1)).rolling(2).mean(), label = 'Plugin GARCH(1,1)')
plt.plot(wasserstein_avar2, label = 'Wasserstein historical', linewidth=3)
plt.legend()
pylab.savefig('garch_changing_par.pdf')
``` |
{
"source": "johanneswiesel/Wasserstein-correlation",
"score": 3
} |
#### File: johanneswiesel/Wasserstein-correlation/fig2.py
```python
import numpy as np
import matplotlib.pyplot as plt
from collections import Counter
from scipy.stats import pearsonr,spearmanr
from scipy.spatial.distance import pdist, squareform
from xicor.xicor import Xi
import ot
def distcorr(X, Y):
X = np.atleast_1d(X)
Y = np.atleast_1d(Y)
if np.prod(X.shape) == len(X):
X = X[:, None]
if np.prod(Y.shape) == len(Y):
Y = Y[:, None]
X = np.atleast_2d(X)
Y = np.atleast_2d(Y)
n = X.shape[0]
if Y.shape[0] != X.shape[0]:
raise ValueError('Number of samples must match')
a = squareform(pdist(X))
b = squareform(pdist(Y))
A = a - a.mean(axis=0)[None, :] - a.mean(axis=1)[:, None] + a.mean()
B = b - b.mean(axis=0)[None, :] - b.mean(axis=1)[:, None] + b.mean()
dcov2_xy = (A * B).sum()/float(n * n)
dcov2_xx = (A * A).sum()/float(n * n)
dcov2_yy = (B * B).sum()/float(n * n)
dcor = np.sqrt(dcov2_xy)/np.sqrt(np.sqrt(dcov2_xx) * np.sqrt(dcov2_yy))
return dcor
def adapW1_eot(x,y,N):
x_new = N**(-1/3)*np.floor(N**(1/3)*x)
y_new = N**(-1/3)*np.floor(N**(1/3)*y)
x_val = np.array(list(Counter(x_new).keys()))
x_freq = np.array(list(Counter(x_new).values()))
W = np.zeros(len(x_val))
for i in range(0,len(x_val)):
aux = y_new[x_new==x_val[i]]
aux = aux.reshape((len(aux), 1))
c = np.abs(aux-y_new)
w1 = np.ones(len(aux))/len(aux)
w2 = np.ones(len(y))/len(y)
W[i] = ot.sinkhorn2(w1,w2,c,0.01)
c = np.abs(y_new.reshape((N,1))-y_new)
denom = c.sum()/N**2
return np.dot(W, x_freq)/(N*denom)
N = 1000 #no. of samples
M = 30 #no. of draws
lam = np.linspace(0,1, num = 100)
Wcor = np.zeros(len(lam))
pcor = np.zeros(len(lam))
scor = np.zeros(len(lam))
dcor = np.zeros(len(lam))
ccor = np.zeros(len(lam))
Wcor_aux = np.zeros(M)
pcor_aux = np.zeros(M)
scor_aux = np.zeros(M)
dcor_aux = np.zeros(M)
ccor_aux = np.zeros(M)
#Define function f(x)
def func(x):
return np.abs(x-0.5)
#Compute estimators
for i in range(0,len(lam)):
for j in range(0, M):
print(i,j)
x = np.random.random_sample(N)
y = lam[i]*func(x)+(1-lam[i])*np.random.random_sample(N)
Wcor_aux[j] = adapW1_eot(x,y,N)
pcor_aux[j] , _ = pearsonr(x, y)
dcor_aux[j] = distcorr(x, y)
ccor_aux[j] = Xi(x,y).correlation
scor_aux[j], _ = spearmanr(x,y)
Wcor[i] = np.mean(Wcor_aux)
pcor[i] = np.mean(pcor_aux)
dcor[i] = np.mean(dcor_aux)
ccor[i] = np.mean(ccor_aux)
scor[i] = np.mean(scor_aux)
#Plot
f = plt.figure(figsize=(11.69,8.27))
plt.plot(lam, Wcor, label="Wasserstein correlation")
plt.plot(lam, pcor, label="Pearson's correlation")
plt.plot(lam, scor, label="Spearman's correlation")
plt.plot(lam, dcor, label="Distance correlation")
plt.plot(lam, ccor, label="Chatterjee's correlation")
plt.legend()
plt.show()
``` |
{
"source": "JohannesWiesner/ci-calculator",
"score": 3
} |
#### File: JohannesWiesner/ci-calculator/create_icon.py
```python
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
import matplotlib
import numpy as np
from scipy.stats import norm
from PIL import Image
import os
def callPlotWindow():
# convert matplotlib's intern 'C0' to rgba tuple
default_blue_hex = plt.rcParams['axes.prop_cycle'].by_key()['color'][0]
default_blue_rgba = matplotlib.colors.to_rgba(default_blue_hex)
def lighter(color, percent):
# color = np.array(color)
white = np.array([1,1,1,1])
vector = white-color
return color + vector * percent
plotdata = dict()
plotdata['sd'] = 10
plotdata['mean'] = 50
my_dpi = 144
fig = plt.figure(figsize=(256/my_dpi, 256/my_dpi), dpi=my_dpi,frameon=False)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# create x values for normal distribution
x_normdist = np.concatenate((
np.linspace(plotdata["mean"] - 3 * plotdata["sd"], plotdata["mean"] - 2 * plotdata["sd"],endpoint=False),
np.linspace(plotdata["mean"] - 2 * plotdata["sd"], plotdata["mean"] - 1 * plotdata["sd"],endpoint=False),
np.linspace(plotdata["mean"] - 1 * plotdata["sd"], plotdata["mean"] + 1 * plotdata["sd"],endpoint=False),
np.linspace(plotdata["mean"] + 1 * plotdata["sd"], plotdata["mean"] + 2 * plotdata["sd"],endpoint=False),
np.linspace(plotdata["mean"] + 2 * plotdata["sd"], plotdata["mean"] + 3 * plotdata["sd"])
))
# plot normal distribution curve
y_normdist = norm.pdf(x_normdist,plotdata["mean"],plotdata["sd"])
ax.plot(x_normdist,y_normdist)
# create logical lists which are used for 'where'-argument in fill-between method
average = (x_normdist >= (plotdata["mean"] - 1 * plotdata["sd"])) & (x_normdist <= (plotdata["mean"] + 1 * plotdata["sd"]))
above_and_below_average = (x_normdist >= (plotdata["mean"] - 2 * plotdata["sd"])) & (x_normdist <= (plotdata["mean"] - 1 * plotdata["sd"])) | (x_normdist >= (plotdata["mean"] + 1 * plotdata["sd"])) & (x_normdist <= (plotdata["mean"] + 2 * plotdata["sd"]))
far_above_and_below_average = (x_normdist >= (plotdata["mean"] - 3 * plotdata["sd"])) & (x_normdist <= (plotdata["mean"] - 2 * plotdata["sd"])) | (x_normdist >= (plotdata["mean"] + 2 * plotdata["sd"])) & (x_normdist <= (plotdata["mean"] + 3 * plotdata["sd"]))
regions = [average,
above_and_below_average,
far_above_and_below_average
]
colors = [
lighter(default_blue_rgba,0.25),
lighter(default_blue_rgba,0.5),
lighter(default_blue_rgba,0.75)
]
# shade regions under curve, use different alpha channel values and labels
for idx,region in enumerate(regions):
plt.fill_between(x_normdist, y_normdist,color=colors[idx],where=regions[idx])
# save image as png
plt.savefig('app_icon.png',dpi=my_dpi,transparent=True)
# convert to .ico
img = Image.open('app_icon.png')
img.save('app_icon.ico',sizes=[(255,255)],dpi=my_dpi)
# delete unneeded png
os.remove('app_icon.png')
if __name__ == "__main__":
callPlotWindow()
``` |
{
"source": "JohannesWiesner/control_package",
"score": 3
} |
#### File: lib/network_control/energies.py
```python
import numpy as np
import scipy as sp
def sim_state_eq( A, B, xi, U):
"""This function caclulates the trajectory for the network given our model
if there are no constraints, and the target state is unknown, using the
control equation precess x(t+1) = Ax(t) + BU(t). x(t) is the state vector, A is
the adjacency matrix, U(t) is the time varying input as specified by the
user, and B selects the control set (stimulating electrodes)
Args:
A : NxN state matrix (numpy array), where N is the number of nodes in your
network (for example, a structural connectivity matrix
constructed from DTI). A should be stable to prevent
uncontrolled trajectories.
B : NxN input matrix (numpy array), where N is the number of nodes. B
selects where you want your input energy to be applied to.
For example, if B is the Identity matrix, then input energy
will be applied to all nodes in the network. If B is a
matrix of zeros, but B(1,1) = 1. then energy will only be
applied at the first node.
xi : Nx1 initial state (numpy array) of your system where N is the number of
nodes. xi MUST have N rows.
U : NxT matrix of Energy (numpy array), where N is the number of nodes
and T is the number of
time points. For example, if you want to simulate the
trajectory resulting from stimulation, U could have
log(StimFreq)*StimAmp*StimDur as every element. You can
also enter U's that vary with time
Returns:
x : x is the NxT trajectory (numpy array) that results from simulating
x(t+1) = Ax(t) + Bu(t) the equation with the parameters
above.
@author JStiso
June 2017
"""
# Simulate trajectory
T = np.size(U,1)
N = np.size(A,0)
# initialize x
x = np.zeros((N, T))
xt = xi
for t in range(T):
x[:,t] = np.reshape(xt, N) # annoying python 1d array thing
xt_1 = np.matmul(A,xt) + np.matmul(B,np.reshape(U[:,t],(N,1) ))# state equation
xt = xt_1
return x
def optimal_energy(A, T, B, x0, xf, rho, S):
"""This is a python adaptation of matlab code originally written by <NAME> and <NAME>
compute optimal inputs/trajectories for a system to transition between two states
<NAME> September 2017
Args:
A: (NxN numpy array) Structural connectivity matrix
B: (NxN numpy array) Input matrix: selects which nodes to put input into. Define
so there is a 1 on the diagonal of elements you want to add input to,
and 0 otherwise
S: (NxN numpy array) Selects nodes whose distance you want to constrain, Define so
that there is a 1 on the diagonal of elements you want to
constrain, and a zero otherwise
T: (float) Time horizon: how long you want to control for. Too large will give
large error, too short will not give enough time for control
rho: (float) weights energy and distance constraints. Small rho leads to larger
energy
Returns:
X_opt: (TxN numpy array)
The optimal trajectory through state space
U_opt: (TxN numpy array)
The optimal energy
n_err: (float)
the error associated with this calculation. Errors will be larger when B is not identity,
and when A is large. Large T and rho will also tend to increase the error
-------------- Change Log -------------
JStiso April 2018
Changed S to be an input, rather than something defined internally
<NAME> January 2021
Changed the forward propagation of states to matrix exponential to
avoid reliance on MATLAB toolboxes. Also changed definition of expanded
input U to save time by avoiding having to resize the matrix.
Also changed the initialization of U_opt for the same reason.
JStiso 2021
Translated to Python
"""
n = np.shape(A)[1]
# state vectors to float if they're bools
if type(x0[0]) == np.bool_:
x0 = x0.astype(float)
if type(xf[0]) == np.bool_:
xf = xf.astype(float)
Sbar = np.eye(n) - S
np.shape(np.dot(-B,B.T)/(2*rho))
Atilde = np.concatenate((np.concatenate((A, np.dot(-B,B.T)/(2*rho)), axis=1),
np.concatenate((-2*S, -A.T), axis=1)), axis=0)
M = sp.linalg.expm(Atilde*T)
M11 = M[0:n,0:n]
M12 = M[0:n,n:]
M21 = M[n:,0:n]
M22 = M[n:,n:]
N = np.linalg.solve(Atilde,(M-np.eye(np.shape(Atilde)[0])))
c = np.dot(np.dot(N,np.concatenate((np.zeros((n,n)),S),axis = 0)),2*xf)
c1 = c[0:n]
c2 = c[n:]
p0 = np.dot(np.linalg.pinv(np.concatenate((np.dot(S,M12),np.dot(Sbar,M22)), axis = 0)),
(-np.dot(np.concatenate((np.dot(S,M11),np.dot(Sbar,M21)),axis=0),x0) -
np.concatenate((np.dot(S,c1),np.dot(Sbar,c2)), axis=0) +
np.concatenate((np.dot(S,xf),np.zeros((n,1))), axis=0)))
n_err = np.linalg.norm(np.dot(np.concatenate((np.dot(S,M12),np.dot(Sbar,M22)), axis = 0),p0) -
(-np.dot(np.concatenate((np.dot(S,M11),np.dot(Sbar,M21)),axis=0),x0) -
np.concatenate((np.dot(S,c1),np.dot(Sbar,c2)), axis=0) +
np.concatenate((np.dot(S,xf),np.zeros((n,1))), axis=0))) # norm(error)
STEP = 0.001
t = np.arange(0,(T+STEP),STEP)
U = np.dot(np.ones((np.size(t),1)),2*xf.T)
# Discretize continuous-time input for convolution
Atilde_d = sp.linalg.expm(Atilde*STEP)
Btilde_d = np.linalg.solve(Atilde,
np.dot((Atilde_d-np.eye(2*n)),np.concatenate((np.zeros((n,n)),S), axis=0)))
# Propagate forward discretized model
xp = np.zeros((2*n,np.size(t)))
xp[:,0:1] = np.concatenate((x0,p0), axis=0)
for i in np.arange(1,np.size(t)):
xp[:,i] = np.dot(Atilde_d,xp[:,i-1]) + np.dot(Btilde_d,U[i-1,:].T)
xp = xp.T
U_opt = np.zeros((np.size(t),np.shape(B)[1]))
for i in range(np.size(t)):
U_opt[i,:] = -(1/(2*rho))*np.dot(B.T,xp[i,n:].T)
X_opt = xp[:,0:n]
return X_opt, U_opt, n_err
def minimum_energy(A, T, B, x0, xf):
""" This function computes the minimum energy required to transition between two states
This is a python adaptation of code originally written by <NAME>
Computes minimum control energy for state transition.
Args:
A: numpy array (N x N)
System adjacency matrix
B: numpy array (N x N)
Control input matrix
x0: numpy array (N x t)
Initial state
xf: numpy array (N x t)
Final state
T: float (1 x 1)
Control horizon
Returns:
x: numpy array (N x t)
State Trajectory
u: numpy array (N x t)
Control Input
"""
# System Size
n = np.shape(A)[0]
# state vectors to float if they're bools
if type(x0[0]) == np.bool_:
x0 = x0.astype(float)
if type(xf[0]) == np.bool_:
xf = xf.astype(float)
# Compute Matrix Exponential
AT = np.concatenate((np.concatenate((A, -.5*(B.dot(B.T))), axis=1),
np.concatenate((np.zeros(np.shape(A)), -A.T), axis=1)), axis=0)
E = sp.linalg.expm(AT*T)
# Compute Costate Initial Condition
E12 = E[0:n,n:]
E11 = E[0:n,0:n]
p0 = np.linalg.pinv(E12).dot(xf - E11.dot(x0))
# Compute Costate Initial Condition Error Induced by Inverse
n_err = np.linalg.norm(E12.dot(p0) - (xf - E11.dot(x0)))
# Prepare Simulation
nStep=1000
t = np.linspace(0,T,nStep+1)
v0 = np.concatenate((x0, p0), axis=0) # Initial Condition
v = np.zeros((2*n,len(t))) # Trajectory
Et = sp.linalg.expm(AT*T/(len(t)-1))
v[:,0] = v0.T
# Simulate State and Costate Trajectories
for i in np.arange(1,len(t)):
v[:,i] = Et.dot(v[:,i-1])
x = v[0:n,:]
u = -0.5*B.T.dot(v[np.arange(0,n)+n,:])
# transpose to be similar to opt_eng_cont
u = u.T
x = x.T
return x, u, n_err
def minimum_energy_fast(A, T, B, x0_mat, xf_mat):
""" This function computes the minimum energy required to transition between all pairs of brain states
encoded in (x0_mat,xf_mat)
Args:
A: numpy array (N x N)
System adjacency matrix
B: numpy array (N x N)
Control input matrix
x0_mat: numpy array (N x n_transitions)
Initial states (see expand_states)
xf_mat: numpy array (N x n_transitions)
Final states (see expand_states)
T: float (1 x 1)
Control horizon
Returns:
E: numpy array (N x n_transitions)
Regional energy for all state transition pairs.
Notes,
np.sum(E, axis=0)
collapse over regions to yield energy associated with all transitions.
np.sum(E, axis=0).reshape(n_states, n_states)
collapse over regions and reshape into a state by state transition matrix.
"""
# System Size
n_parcels = A.shape[0]
if type(x0_mat[0][0]) == np.bool_:
x0_mat = x0_mat.astype(float)
if type(xf_mat[0][0]) == np.bool_:
xf_mat = xf_mat.astype(float)
# Number of integration steps
nt = 1000
dt = T/nt
# Numerical integration with Simpson's 1/3 rule
# Integration step
dE = sp.linalg.expm(A * dt)
# Accumulation of expm(A * dt)
dEA = np.eye(n_parcels)
# Gramian
G = np.zeros((n_parcels, n_parcels))
for i in np.arange(1, nt/2):
# Add odd terms
dEA = np.matmul(dEA, dE)
p1 = np.matmul(dEA, B)
# Add even terms
dEA = np.matmul(dEA, dE)
p2 = np.matmul(dEA, B)
G = G + 4 * (np.matmul(p1, p1.transpose())) + 2 * (np.matmul(p2, p2.transpose()))
# Add final odd term
dEA = np.matmul(dEA, dE)
p1 = np.matmul(dEA, B)
G = G + 4 * (np.matmul(p1, p1.transpose()))
# Divide by integration step
E = sp.linalg.expm(A * T)
G = (G + np.matmul(B, B.transpose()) + np.matmul(np.matmul(E, B), np.matmul(E, B).transpose())) * dt / 3
delx = xf_mat - np.matmul(E, x0_mat)
E = np.multiply(np.matmul(np.linalg.pinv(G), delx), delx)
return E
```
#### File: lib/network_control/metrics.py
```python
import numpy as np
from scipy.linalg import schur
def node_strength(A):
""" Returns strength of the nodes of a network.
Args:
A: np.array (n_parcels, n_parcels)
Adjacency matrix from structural connectome
Returns:
s: np.array (n_parcels,)
vector of strength values across nodes
@author lindenmp
"""
s = np.sum(A, axis=0)
return s
def ave_control(A_norm):
""" Returns values of AVERAGE CONTROLLABILITY for each node in a network, given the adjacency matrix for that
network. Average controllability measures the ease by which input at that node can steer the system into many
easily-reachable states.
Args:
A_norm: np.array (n_parcels, n_parcels)
Normalized adjacency matrix from structural connectome (see matrix_normalization in utils for example)
Returns:
ac: np.array (n_parcels,)
vector of average controllability values for each node
@author lindenmp
Reference: <NAME>, Cieslak, Telesford, Yu, Kahn, Medaglia,
<NAME>, Grafton & Bassett, Nature Communications
6:8414, 2015.
"""
T, U = schur(A_norm, 'real') # Schur stability
midMat = np.multiply(U, U).transpose()
v = np.matrix(np.diag(T)).transpose()
N = A_norm.shape[0]
P = np.diag(1 - np.matmul(v, v.transpose()))
P = np.tile(P.reshape([N, 1]), (1, N))
ac = sum(np.divide(midMat, P))
return ac
def modal_control(A_norm):
""" Returns values of MODAL CONTROLLABILITY for each node in a network, given the adjacency matrix for that network.
Modal controllability indicates the ability of that node to steer the system into difficult-to-reach states,
given input at that node.
Args:
A_norm: np.array (n_parcels, n_parcels)
Normalized adjacency matrix from structural connectome (see matrix_normalization in utils for example)
Returns:
phi: np.array (n_parcels,)
vector of modal controllability values for each node
@author lindenmp
Reference: <NAME>, Cieslak, Telesford, Yu, Kahn, Medaglia,
Vettel, Miller, Grafton & Bassett, Nature Communications
6:8414, 2015.
"""
T, U = schur(A_norm, 'real') # Schur stability
eigVals = np.diag(T)
N = A_norm.shape[0]
phi = np.zeros(N, dtype=float)
for i in range(N):
Al = U[i,] * U[i,]
Ar = (1.0 - np.power(eigVals, 2)).transpose()
phi[i] = np.matmul(Al, Ar)
return phi
```
#### File: src/network_control/utils.py
```python
import numpy as np
import scipy as sp
import scipy.linalg as la
from scipy.linalg import svd
from scipy.linalg import eig
from numpy import matmul as mm
from scipy.linalg import expm as expm
from numpy import transpose as tp
def rank_to_normal(data, c, n):
# Standard quantile function
data = (data - c) / (n - 2 * c + 1)
return sp.stats.norm.ppf(data)
def rank_int(data, c=3.0 / 8):
if data.ndim > 1:
do_reshape = True
dims = data.shape
data = data.flatten()
else:
do_reshape = False
# Set seed
np.random.seed(0)
# Get rank, ties are averaged
data = sp.stats.rankdata(data, method="average")
# Convert rank to normal distribution
transformed = rank_to_normal(data=data, c=c, n=len(data))
if do_reshape:
transformed = transformed.reshape(dims)
return transformed
def matrix_normalization(A, version=None, c=1):
'''
Args:
A: np.array (n_parcels, n_parcels)
adjacency matrix from structural connectome
version: str
options: 'continuous' or 'discrete'. default=None
string variable that determines whether A is normalized for a continuous-time system or a discrete-time
system. If normalizing for a continuous-time system, the identity matrix is subtracted.
c: int
normalization constant, default=1
Returns:
A_norm: np.array (n_parcels, n_parcels)
normalized adjacency matrix
'''
if version == 'continuous':
print("Normalizing A for a continuous-time system")
elif version == 'discrete':
print("Normalizing A for a discrete-time system")
elif version == None:
raise Exception("Time system not specified. "
"Please nominate whether you are normalizing A for a continuous-time or a discrete-time system "
"(see function help).")
# singluar value decomposition
u, s, vt = svd(A)
# Matrix normalization for discrete-time systems
A_norm = A / (c + s[0])
if version == 'continuous':
# for continuous-time systems
A_norm = A_norm - np.eye(A.shape[0])
return A_norm
def get_p_val_string(p_val):
if p_val == 0.0:
p_str = "-log10($\mathit{:}$)>25".format('{p}')
elif p_val < 0.001:
p_str = '$\mathit{:}$ < 0.001'.format('{p}')
elif p_val >= 0.001 and p_val < 0.05:
p_str = '$\mathit{:}$ < 0.05'.format('{p}')
else:
p_str = "$\mathit{:}$ = {:.3f}".format('{p}', p_val)
return p_str
def expand_states(states):
"""
This function takes a list of integer values that designate a distinct set of binary brain states and returns
a pair of matrices (x0_mat, xf_mat) that encode all possible pairwise transitions between those states
Args:
states: numpy array (N x 1)
a vector of integers that designate which regions belong to which states. Note, regions cannot belong to
more than one brain state. For example, assuming N = 12, if:
states = np.array([0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2])
then the first 4 regions belong to state 0, the next 4 to state 1, and the final 4 to state 2
Returns:
x0_mat: boolean array (N, n_transitions)
boolean array of initial states. In each column, True designates regions belonging to a given initial state
xf_mat: boolean array (N, n_transitions)
boolean array of target states. In each column, True designates regions belonging to a given target state
"""
unique, counts = np.unique(states, return_counts=True)
n_parcels = len(states)
n_states = len(unique)
x0_mat = np.zeros((n_parcels, 1)).astype(bool)
xf_mat = np.zeros((n_parcels, 1)).astype(bool)
for i in np.arange(n_states):
for j in np.arange(n_states):
x0 = states == i
xf = states == j
x0_mat = np.append(x0_mat, x0.reshape(-1, 1), axis=1)
xf_mat = np.append(xf_mat, xf.reshape(-1, 1), axis=1)
x0_mat = x0_mat[:, 1:]
xf_mat = xf_mat[:, 1:]
return x0_mat, xf_mat
``` |
{
"source": "JohannesWiesner/demetrius",
"score": 3
} |
#### File: JohannesWiesner/demetrius/demetrius.py
```python
import json
import os
import sys
import shutil
import pandas as pd
import argparse
from spinner import Spinner
# TO-DO: Allow user to decide to save information for all found files. This list
# should either be placed in the same directory as the destination directories or
# separately in each of destination directory (showing only the source files for this
# directory)
# TO-DO: Let the user decide over the fashion of added indices (e.g. '_1' or ' (1)')
# in _get_dst_dirs_df
def _get_suffixes_tuple(which_suffixes='all'):
'''Get a tuple of suffixes based on the attached .json file file or based
on a user input.
Parameters
----------
which_suffixes : str, list, tuple, optional
If str and 'all', all suffixes from the .json file will be used for the search.
If str and not 'all', a single file suffix is provided (e.g. '.png'.).
If list, the strings represent file subsets of the .json file (e.g. ['video','bitmap']).
If tuple, multiple file suffixes are provided (e.g. ('.png','.jpeg')).
Default: 'all'
Returns
-------
suffixes : str,tuple of str
Returns a single suffix or a tuple of file suffixes that should be used
for the file search.
'''
if isinstance(which_suffixes, str) and which_suffixes == 'all':
suffixes_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'suffixes.json')
with open(suffixes_path) as json_file:
suffixes_dict = json.load(json_file)
suffixes = tuple([suffix for suffixes in suffixes_dict.values() for suffix in suffixes])
elif isinstance(which_suffixes, list):
suffixes_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'suffixes.json')
with open(suffixes_path) as json_file:
suffixes_dict = json.load(json_file)
suffixes_dict = {key: suffixes_dict[key] for key in which_suffixes}
suffixes = tuple([suffix for suffixes in suffixes_dict.values() for suffix in suffixes])
elif isinstance(which_suffixes, (str, tuple)):
suffixes = which_suffixes
return suffixes
# FIXME: Is there a better test to check if files are broken? Currently
# only os.path.exists is implemented but I still get files image files
# that can't be open using IrfanView
# FIXME: Currently, there's lot's of repetition because of the verbose
# argument. Maybe it would be better to work with a decorator in the run() function
# like from the halo package here?
def _find_files(src_dir,suffixes,exclude_dirs=None,verbose=False):
'''Search for files in a source directory based on one or multiple
file suffixes. This function will only append a found file to the list
if it exists (using os.path.exists) wich serves as a minimal test for
checking if a file is broken.
Parameters
----------
src_dir : str
The source directory.
suffixes : str, tuple of strs
A single file suffix or tuple of file suffixes that should be
searched for (e.g. '.jpeg' or ('.jpeg','.png')).
exclude_dirs : str, list of str, None
Name of single directory of list of directory names that should be ignored when searching for files.
All of the specified directories and their children directories will be
ignored (Default: None)
verbose : bool
If true, print spinning cursor
Returns
-------
filepath_list : list
A list of all found filepaths
'''
filepath_list = []
if verbose == True:
with Spinner('Searching for files '):
for (paths,dirs,files) in os.walk(src_dir):
if exclude_dirs:
dirs[:] = [d for d in dirs if d not in exclude_dirs]
for file in files:
filepath = os.path.join(paths,file)
if filepath.lower().endswith(suffixes) and os.path.exists(filepath):
filepath_list.append(filepath)
if verbose == False:
for (paths,dirs,files) in os.walk(src_dir):
if exclude_dirs:
dirs[:] = [d for d in dirs if d not in exclude_dirs]
for file in files:
filepath = os.path.join(paths,file)
if filepath.lower().endswith(suffixes) and os.path.exists(filepath):
filepath_list.append(filepath)
if not filepath_list:
sys.stdout.write('Did not find any files based on the given suffixes')
sys.exit()
return filepath_list
def _get_dst_dirs_df(filepath_list,dst_dir):
'''Create a pandas.DataFrame holding the source filepaths as one column and
corresponding destination directories as another column. In case there are
multiple source directories with the same name, the function will add indices to the
destination directories to maintain unique folder names.
Parameters
----------
filepath_list : list
The list of all found files as returned from _find_files.
dst_dir : str
Path to the master destination directory.
Returns
-------
dst_dirs_df : pd.DataFrame
A data frame that maps each filepath to its corresponding destination
directory.
'''
# create data frame with filepath as column
dst_dirs_df = pd.DataFrame({'filepath':filepath_list})
# get path to parent directory for each source file
dst_dirs_df['src_dir_path'] = dst_dirs_df['filepath'].map(os.path.dirname)
# get only the name of the parent for each source file
dst_dirs_df['src_dir_name'] = dst_dirs_df['src_dir_path'].map(os.path.basename)
# create destination path directory
def create_dst_dir_path(row):
return os.path.join(dst_dir,row['src_dir_name'])
dst_dirs_df['dst_dir_path'] = dst_dirs_df.apply(create_dst_dir_path,axis=1)
# find literal duplicates and modify the respective destination directories
for _,dir_name in dst_dirs_df.groupby('src_dir_name'):
if not dir_name['src_dir_path'].nunique() == 1:
for idx,(_,src_dir_path) in enumerate(dir_name.groupby('src_dir_path'),start=1):
dst_dirs_df.loc[src_dir_path.index,'dst_dir_path'] = dst_dirs_df.loc[src_dir_path.index,'dst_dir_path'] + '_' + str(idx)
# find pseudo duplicates and modify the respective destination directories
dst_dirs_df['dst_dir_path_lower_case'] = dst_dirs_df['dst_dir_path'].map(str.lower)
for _,dst_dir_path in dst_dirs_df.groupby('dst_dir_path_lower_case'):
if dst_dir_path['src_dir_path'].nunique() != 1:
for idx,(_,dir_name) in enumerate(dst_dir_path.groupby('src_dir_name'),start=1):
dst_dirs_df.loc[dir_name.index,'dst_dir_path'] = dst_dirs_df.loc[dir_name.index,'dst_dir_path'] + ' (' + str(idx) + ')'
return dst_dirs_df
def _copy_files(dst_dirs_df,verbose=False):
'''Copy files based on a list of source filepaths and a corresponding
list of destination directories.
Parameters
----------
dst_dirs_df : pd.DataFrame
A dataframe that holds a column with the source filepaths and one
column specifying the destination directories.
verbose : boolean, optional
If True, copy process is estimated based on total bytes
copied so far and then printed to console (Default: False).
Returns
-------
None.
'''
for dst_dir in set(dst_dirs_df['dst_dir_path']):
os.makedirs(dst_dir)
if verbose == True:
dst_dirs_df['filesize'] = dst_dirs_df['filepath'].map(os.path.getsize)
bytes_total = dst_dirs_df['filesize'].sum()
bytes_copied = 0
for idx,(file,dst_dir) in enumerate(zip(dst_dirs_df['filepath'],dst_dirs_df['dst_dir_path'])):
shutil.copy2(file,dst_dir)
bytes_copied += dst_dirs_df['filesize'][idx]
sys.stdout.write(f"Copied ~ {round(bytes_copied / bytes_total * 100,2)}% of files\r")
elif verbose == False:
for file,dst_dir in zip(dst_dirs_df['filepath'],dst_dirs_df['dst_dir_path']):
shutil.copy2(file,dst_dir)
def run(src_dir,dst_dir,which_suffixes='all',exclude_dirs=None,verbose=False):
'''Find and copy files with their respective parent directories
from a source directory to a destination directory
Parameters
----------
src_dir : str
Path to the source directory.
dst_dir : str
Path to the destination directory.
which_suffixes : str, list, tuple, optional
If str and 'all', all suffixes from the .json file will be used for the search.
If str and not 'all', a single file suffix is provided (e.g. '.png'.).
If list, the strings represent file subsets of the .json file (e.g. ['video','bitmap']).
If tuple, multiple file suffixes are provided (e.g. ('.png','.jpeg')).
(Default: 'all').
exclude_dirs : str, list of str, None
Name of single directory of list of directory names that should be ignored when searching for files.
All of the specified directories and their children directories will be
ignored (Default: None).
verbose: bool
If True, prints user information to console (Default: False).
Returns
-------
None.
'''
# get OS conform separator style
src_dir = os.path.normpath(src_dir)
dst_dir = os.path.normpath(dst_dir)
# check if the input directories exist
if not os.path.isdir(src_dir):
raise NotADirectoryError('The specified source directory does not exist')
if not os.path.isdir(dst_dir):
raise NotADirectoryError('The specified destination directory does not exist')
# get suffixes
suffixes = _get_suffixes_tuple(which_suffixes)
# find files
if exclude_dirs and isinstance(exclude_dirs,str):
exclude_dirs = list(exclude_dirs)
filepath_list = _find_files(src_dir,suffixes,exclude_dirs,verbose)
# get data frame with destination directories
dst_dirs_df = _get_dst_dirs_df(filepath_list,dst_dir)
# copy files
_copy_files(dst_dirs_df,verbose)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = 'Find and copy files from \
a source directory to a \
destination directory \
while preserving the original \
parent directories.')
# add required arguments source and destination directory
parser.add_argument('-src','--source_directory',type=str,required=True,help='The source directory which should be searched for files')
parser.add_argument('-dst','--destination_directory',type=str,required=True,help='The destination directory where the files should be copied to within their parent directories')
# add a mutually exlusive group (user should either provide one or multiple file suffixes
# themselves or they should choose from already provided categories (e.g. bitmap,video))
# if they neither provide --suffixes or --categories the default 'all' will be used
suffix_arg_group = parser.add_mutually_exclusive_group()
suffix_arg_group.add_argument('-sfx','--suffixes',type=str,nargs='+',help='File suffixes which should be used for the search. Mutually exlusive with -cat argument')
suffix_arg_group.add_argument('-cat','--categories',type=str,nargs='+',choices=['bitmap','video'],help='Broader file categories (e.g. video or bitmap files) that define the final set of file suffixes. Mutually exclusive with -sfx argument')
# add verbosity argument
parser.add_argument('-v','--verbose',action='store_true',help='Show progress information on finding and copying files when demetrius is run')
# add exclude dirs argument
parser.add_argument('-e','--exclude',type=str,nargs='+',help='One or multiple names of directories that should be ignored when searching for files. All of the specified directories and their children directories will be ignored')
# parse arguments
args = parser.parse_args()
if args.suffixes == None and args.categories == None:
which_suffixes = 'all'
elif args.categories:
which_suffixes = args.categories
elif args.suffixes:
which_suffixes = tuple(args.suffixes)
# run demetrius
run(src_dir=args.source_directory,
dst_dir=args.destination_directory,
which_suffixes=which_suffixes,
exclude_dirs=args.exclude,
verbose=args.verbose)
``` |
{
"source": "JohannesWiesner/nct_tutorial",
"score": 3
} |
#### File: JohannesWiesner/nct_tutorial/app.py
```python
import json
import numpy as np
import pandas as pd
import networkx as nx
import dash
import dash_cytoscape as cyto
import dash_html_components as html
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import dash_table
import itertools
import operator
import plotly.express as px
from network_control.utils import matrix_normalization
from network_control.energies import minimum_input,optimal_input
from nct_utils import state_trajectory
###############################################################################
## Set Default Data ###########################################################
###############################################################################
# set seed
np.random.seed(28)
# create a default adjacency matrix
A = np.array([[0, 1, 2, 1, 0, 0, 0, 0, 0],
[1, 0, 0, 3, 0, 0, 0, 0, 0],
[2, 0, 0, 4, 0, 0, 0, 0, 0],
[1, 3, 4, 0, 5, 0, 0, 0, 0],
[0, 0, 0, 5, 0, 6, 0, 0, 0],
[0, 0, 0, 0, 6, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 0, 1, 1],
[0, 0, 0, 0, 0, 1, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 0]])
# create default (random) x0 and xf (between 0 and 1)
states_df = pd.DataFrame({'x0':np.round(np.random.rand(len(A)),2),'xf':np.round(np.random.rand(len(A)),2)})
states_df.reset_index(inplace=True)
###############################################################################
## Dash App ###################################################################
###############################################################################
## Topology-Modification ######################################################
# FIXME: transform networkx coordinates into dash/plotly space
# - Positions could however also be irrelevant here, because layout component
# from dash can also decide automatically over node positions
def from_A_to_elements(A):
'''Create a lists of elements from a numpy adjaceny matrix that can be inter-
preted by dash_cytoscape.Cytoscape. The following steps are implemented from
https://community.plotly.com/t/converting-networkx-graph-object-into-cytoscape-format/23224/2
'''
# create graph object
G = nx.Graph(A)
# get node positions
pos = nx.spring_layout(G)
# convert networkx to cytoscape layout
cy = nx.readwrite.json_graph.cytoscape_data(G)
# Add the dictionary key 'label' to the node dict (this is a required attribute for dash)
# Delete the key 'value' from node dict (not needed)
# Delete the key 'name' from node dict (not needed)
# Add the dictionary key 'controller' to the node dict and set to True
for node_dict in cy['elements']['nodes']:
for _,d in node_dict.items():
d['label'] = d.pop('value')
del d['name']
d['controller'] = True
d['constrain'] = True
# NOTE: in cytoscape, all ids of the nodes must be strings, that's why
# we convert the edge ids also to strings (but check if this is really
# necessary)
for edge_dict in cy['elements']['edges']:
for _,d in edge_dict.items():
d['source'] = str(d['source'])
d['target'] = str(d['target'])
# Add the positions you got from as a value for data in the nodes portion of cy
# NOTE: This might be not necessary, as positions can be automatically
# determined in the layout attribute from cyto.Cytoscape (see FIXME above)
for n,p in zip(cy['elements']['nodes'],pos.values()):
n['pos'] = {'x':p[0],'y':p[1]}
# Take the results and write them to a list
elements = cy['elements']['nodes'] + cy['elements']['edges']
return elements
# NOTE: What's that utils module? https://dash.plotly.com/cytoscape/reference
def get_edge_dicts(elements):
'''Extract all edge dictionaries from elements. Edge dicts are
identfied by their 'weight' key'''
edge_dicts = []
for d in elements:
if 'weight' in d['data']:
edge_dicts.append(d)
return edge_dicts
# NOTE: What's that utils module? https://dash.plotly.com/cytoscape/reference
def get_node_dicts(elements):
'''Extract all node dictionaries from elements. Node dicts are
identified by not having a 'weight' key'''
node_dicts = []
for d in elements:
if not 'weight' in d['data']:
node_dicts.append(d)
return node_dicts
def add_edges(selectedNodeData,edge_weight,elements):
'''For each combination of selected nodes, check if this combination is connected
by an edge. If not, create an edge dict for that combination and modify the elements list'''
edge_dicts = get_edge_dicts(elements)
edge_ids = [(d['data']['source'],d['data']['target']) for d in edge_dicts]
# get a list of ids of all nodes that user has currently selected and that
# should be connected by an edge. Sort the list alphanumerically (that ensures
# that we get only get combinations of source and target ids where source id
# is always the lower integer)
node_ids = [d['id'] for d in selectedNodeData]
node_ids.sort()
# create all pair-wise combinations of the selected nodes
source_and_target_ids = list(itertools.combinations(node_ids,2))
# for each source and target tuple, check if this edge already exists. If not,
# create a new edge dict and add it to elements
for (source,target) in source_and_target_ids:
if not (source,target) in edge_ids:
new_edge = {'data':{'weight':edge_weight,'source':source,'target':target}}
elements.append(new_edge)
return elements
def drop_edges(selectedEdgeData,elements):
'''Drop an input list of selected edges from cytoscape elements'''
# get source and target ids for all currently selected edges
source_and_target_ids = [(d['source'],d['target']) for d in selectedEdgeData]
# iterate over all dictionaries in elements, identify edge dicts by their
# 'weight' key and check again if this edge dict belongs to the currently selected
# edges. If yes, add its index to list of to be dropped dictionaires.
drop_these_dicts = []
for idx,d in enumerate(elements):
if 'weight' in d['data']:
if (d['data']['source'],d['data']['target']) in source_and_target_ids:
drop_these_dicts.append(idx)
# drop selected edge dictionaries from elements
elements = [i for j,i in enumerate(elements) if j not in drop_these_dicts]
return elements
def get_edge_min_max(elements):
'''Get minimum and maximum edge weights'''
# get all edges from elements
edge_dicts = get_edge_dicts(elements)
# find minimum and maximum weights
edge_weights = [d['data']['weight'] for d in edge_dicts]
weights_max = max(edge_weights)
weights_min = min(edge_weights)
return weights_min,weights_max
# FIXME: Delete this function if it's not necessary
def set_edge_width(elements,edge_weight):
'''Return the edge width for a single edge'''
weights_min,weights_max = get_edge_min_max(elements)
min_width = 1 # constant (selected by me)
max_width = 10 # constant (selected by me)
edge_width = min_width + ((max_width - min_width) / (weights_max - weights_min)) * (edge_weight - weights_min)
return edge_width
def set_edge_weights(selectedEdgeData,edge_weight,elements):
'''Modify the weights of the selected edges'''
# get source and target ids for all currently selected edges
source_and_target_ids = [(d['source'],d['target']) for d in selectedEdgeData]
# iterate over all dictionaries in elements, identify edge dicts by their
# 'weight' key and check again if this edge dict belongs to the currently selected
# edges. If yes, add its index to list of to be dropped dictionaires.
modify_these_dicts = []
for idx,d in enumerate(elements):
if 'weight' in d['data']:
if (d['data']['source'],d['data']['target']) in source_and_target_ids:
modify_these_dicts.append(idx)
# drop selected edge dictionaries from elements
for i in modify_these_dicts:
elements[i]['data']['weight'] = edge_weight
return elements
## Figure Plotting ###########################################################
def from_elements_to_A(elements):
'''Extract nodes and edges from current elements and convert them to
adjacency matrix
'''
# FIXME: This is inefficient, we iterate over the same list twice (see #8)
edge_dicts = get_edge_dicts(elements)
node_dicts = get_node_dicts((elements))
edges = [(d['data']['source'],d['data']['target'],d['data']['weight']) for d in edge_dicts]
nodes = [d['data']['id'] for d in node_dicts]
n_nodes = len(nodes)
A = np.zeros((n_nodes,n_nodes))
for edge in edges:
i = int(edge[0])
j = int(edge[1])
weight = edge[2]
A[i,j] = weight
A[j,i] = weight
return A
# FIXME: lots of repetitions to from_elements_to_S
def from_elements_to_B(elements):
'''Extract nodes from current elements, check which nodes are selected
as controllers and get a corresponding control matrix B that can be
fed to control_package functions.
'''
# get a list of all nodes from current elements (get their ID and their
# controller attribute)
node_dicts = get_node_dicts(elements)
nodes = [(d['data']['id'],d['data']['controller']) for d in node_dicts]
# sort nodes by their ids and get controller attribute
nodes.sort(key=operator.itemgetter(0))
c_attributes = [n[1] for n in nodes]
# create B matrix
B = np.zeros(shape=(len(nodes),len(nodes)))
for idx,c in enumerate(c_attributes):
if c == True:
B[idx,idx] = 1
return B
# FIXME: lots of repetitions to from_elements_to_B
def from_elements_to_S(elements):
'''Extract nodes from current elements, check which nodes are selected
to be constrained and get a corresponding matrix S that can be
fed to control_package functions.
'''
# get a list of all nodes from current elements (get their ID and their
# controller attribute)
node_dicts = get_node_dicts(elements)
nodes = [(d['data']['id'],d['data']['constrain']) for d in node_dicts]
# sort nodes by their ids and get controller attribute
nodes.sort(key=operator.itemgetter(0))
constrain_attributes = [n[1] for n in nodes]
# create B matrix
S = np.zeros(shape=(len(nodes),len(nodes)))
for idx,constrain in enumerate(constrain_attributes):
if constrain == True:
S[idx,idx] = 1
return S
def get_state_trajectory_fig(A,x0,T,c):
'''Generate a plotly figure that plots a state trajectory using an input
matrix, a source state, a time horizon and a normalization constant'''
# simulate state trajectory
x = state_trajectory(A=A,xi=x0,T=T)
# create figure
x_df = pd.DataFrame(x).reset_index().rename(columns={'index':'Node'})
x_df = x_df.melt(id_vars='Node',var_name='t',value_name='Value')
fig = px.line(x_df,x='t',y='Value',color='Node')
fig.update_layout(title='x(t+1) = Ax(t)',title_x=0.5)
return fig
# FIXME: Plotting speed could probably bee optimized, take a look in niplot module
# TO-DO: n_err should also be visualized somewhere
def get_minimum_energy_figure(A,T,B,x0,xf,c):
# compute minimum energy
x,u,n_err = minimum_input(A,T,B,x0,xf)
# create figure (FIXME: could the following be shorted?)
x_df = pd.DataFrame(x).reset_index().rename(columns={'index':'t'})
x_df = x_df.melt(id_vars='t',var_name='Node',value_name='Value')
x_df['Type'] = 'x'
u_df = pd.DataFrame(u).reset_index().rename(columns={'index':'t'})
u_df = u_df.melt(id_vars='t',var_name='Node',value_name='Value')
u_df['Type'] = 'u'
fig_df = pd.concat([x_df,u_df])
fig = px.line(fig_df,x='t',y='Value',color='Node',facet_row='Type')
fig.update_layout(title='Minimum Control Energy',title_x=0.5)
return fig
# FIXME: Plotting speed could probably bee optimized, take a look in niplot module
# TO-DO: n_err should also be visualized somewhere
def get_optimal_energy_figure(A,T,B,x0,xf,rho,S,c):
# compute optimal energy
x,u,n_err = optimal_input(A,T,B,x0,xf,rho,S)
# create figure (FIXME: could the following be shorted?)
x_df = pd.DataFrame(x).reset_index().rename(columns={'index':'t'})
x_df = x_df.melt(id_vars='t',var_name='Node',value_name='Value')
x_df['Type'] = 'x'
u_df = pd.DataFrame(u).reset_index().rename(columns={'index':'t'})
u_df = u_df.melt(id_vars='t',var_name='Node',value_name='Value')
u_df['Type'] = 'u'
fig_df = pd.concat([x_df,u_df])
fig = px.line(fig_df,x='t',y='Value',color='Node',facet_row='Type')
fig.update_layout(title='Optimal Control Energy',title_x=0.5)
return fig
###############################################################################
## Dash App ###################################################################
###############################################################################
# run dash and take in the list of elements
app = dash.Dash(external_stylesheets=[dbc.themes.BOOTSTRAP])
# create custom style sheet
# show node labels
# Style the border of the nodes depending on if they are controllers
# and constrained
stylesheet = [{'selector':'node','style':{'content':'data(label)'}},
{'selector': '[?controller][?constrain]','style':{'border-width':2,'border-color':'black','border-style':'solid'}},
{'selector':'[?controller][!constrain]','style':{'border-width':2,'border-color':'black','border-style':'dashed'}},
{'selector':'[!controller][?constrain]','style':{'border-width':2,'border-color':'black','border-style':'dotted'}},
{'selector':'[!controller][!constrain]','style':{}}
]
app.layout = html.Div([
dbc.Row([
dbc.Col([
# cytoscape graph
cyto.Cytoscape(
id='cytoscape-compound',
layout={'name':'cose'},
elements=from_A_to_elements(A), # initialize elements with a function call
stylesheet=stylesheet,
style={'width':'100%','height':'500px'})
],width=6),
dbc.Col([
# x0/xf data table
dbc.Row([dash_table.DataTable(id='states-table',
columns=[{'id':'index','name':'index','type':'numeric'},
{'id':'x0','name':'x0','type':'numeric','editable':True},
{'id':'xf','name':'xf','type':'numeric','editable':True}],
data=states_df.to_dict('records'),
editable=False)],justify='center'),
dbc.Row([html.Button('Switch columns',id='switch-state-cols',n_clicks=0,style={'width':'100%'})],justify='center')],
width=3),
dbc.Col([
dbc.Row([dbc.Col([dcc.Input(id='c',type="number",debounce=True,placeholder='Normalization Constant (c)',value=1)])]),
dbc.Row([dbc.Col([dcc.Input(id='T',type="number",debounce=True,placeholder='Time Horizon (T)',value=3)]),dbc.Col(html.Button('Plot Trajectories',id='plot-button',n_clicks=0))]),
dbc.Row([dbc.Col([dcc.Input(id='rho',type="number",debounce=True,placeholder='rho',value=1)])]),
dbc.Row([dbc.Col([html.Button('(Dis-)Connect Nodes',id='edge-button',n_clicks=0)])]),
dbc.Row([dbc.Col([html.Button('(Un-)set controll',id='controll-button',n_clicks=0)])]),
dbc.Row([dbc.Col([html.Button('(Un-)set constrain',id='constrain-button',n_clicks=0)])]),
dbc.Row([dbc.Col([dcc.Input(id='edge-weight',type='number',debounce=True,placeholder='Edge Weight',value=1)]),dbc.Col([html.Button('Set Edge Weight',id='edge-weight-button',n_clicks=0)])])],
width=3)
],align='end'),
dbc.Row([
# figures
dbc.Col([dcc.Graph(id='state-trajectory-fig',figure={})],width=4),
dbc.Col([dcc.Graph(id='minimum-energy-fig',figure={})],width=4),
dbc.Col([dcc.Graph(id='optimal-energy-fig',figure={})],width=4)
],className="g-0"),
dbc.Row([
# debugging fields (can be deleted when not necessary anymore)
dbc.Col([html.Label(children='Selected Nodes:'),html.Pre(id='selected-node-data-json-output')],width=3),
dbc.Col([html.Label(children='Selected Edges:'),html.Pre(id='selected-edge-data-json-output')],width=3),
dbc.Col([html.Label(children='Current Elements:'),html.Pre(id='current-elements')],width=3),
dbc.Col([html.Label(children='Current Stylesheet:'),html.Pre(id='current-stylesheet')],width=3)
],className="g-0")
])
## Just for debugging (can be deleted when not necessary anymore ##############
@app.callback(Output('selected-node-data-json-output','children'),
Input('cytoscape-compound','selectedNodeData'),
prevent_initial_call=True)
def displaySelectedNodeData(data):
return json.dumps(data,indent=2)
@app.callback(Output('selected-edge-data-json-output','children'),
Input('cytoscape-compound','selectedEdgeData'),
prevent_initial_call=True)
def displaySelectedEdgeData(data):
return json.dumps(data,indent=2)
@app.callback(Output('current-elements','children'),
Input('cytoscape-compound','elements'),
prevent_initial_call=True)
def displayCurrentElements(elements):
return json.dumps(elements,indent=2)
@app.callback(Output('current-stylesheet','children'),
Input('cytoscape-compound','stylesheet'),
prevent_initial_call=True)
def displayCurrentStylesheet(elements):
return json.dumps(elements,indent=2)
## Callback Functions #########################################################
@app.callback(Output('cytoscape-compound','elements'),
Input('edge-button','n_clicks'),
Input('controll-button','n_clicks'),
Input('constrain-button','n_clicks'),
Input('edge-weight-button','n_clicks'),
State('edge-weight','value'),
State('cytoscape-compound','selectedNodeData'),
State('cytoscape-compound','selectedEdgeData'),
State('cytoscape-compound','elements'),
prevent_initial_call=True)
def updateElements(edge_button,controll_button,constrain_button,edge_weight_button,edge_weight,selectedNodeData,selectedEdgeData,elements):
print('UpdateElements was fired')
# check which button was triggered
ctx = dash.callback_context
button_id = ctx.triggered[0]['prop_id'].split('.')[0]
# add or delete edges
if button_id == 'edge-button':
# FIXME: Is that true? Couldn't we merge add_edges and drop_edges?
# user must exclusively selecte either at least two nodes or one edge
# but not both nodes and edges at the same time
if len(selectedNodeData) > 0 and len(selectedEdgeData) > 0:
return elements
if len(selectedNodeData) >= 2 and len(selectedEdgeData) == 0:
return add_edges(selectedNodeData,edge_weight,elements)
if len(selectedNodeData) == 0 and len(selectedEdgeData) >= 1:
return drop_edges(selectedEdgeData,elements)
else:
return elements
# modify edge weights
elif button_id == 'edge-weight-button':
return set_edge_weights(selectedEdgeData,edge_weight,elements)
# set or unset controll nodes
elif button_id == 'controll-button':
# get a list of ids of all nodes that user has currently selected and for
# which controll state should be switched
node_ids = [d['id'] for d in selectedNodeData]
# identify node dicts in elements and toggle their 'controller' attribute
# if they are part of the selected nodes
node_dicts = get_node_dicts(elements)
for d in node_dicts:
if d['data']['id'] in node_ids:
d['data']['controller'] = not d['data']['controller']
return elements
elif button_id == 'constrain-button':
# get a list of ids of all nodes that user has currently selected and for
# which controll state should be switched
node_ids = [d['id'] for d in selectedNodeData]
# identify node dicts in elements and toggle their 'controller' attribute
# if they are part of the selected nodes
node_dicts = get_node_dicts(elements)
for d in node_dicts:
if d['data']['id'] in node_ids:
d['data']['constrain'] = not d['data']['constrain']
return elements
# TODO: This callback is currently always fired when elements are changed.
# But it actually only needs to be fired when either the minimum or maximum
# edge weights change
@app.callback(Output('cytoscape-compound','stylesheet'),
Input('cytoscape-compound','elements'),
State('cytoscape-compound','stylesheet'))
def updateEdgeStyle(elements,stylesheet):
weights_min,weights_max = get_edge_min_max(elements)
# add (or overwrite) edge style
if not any([d['selector'] == 'edge' for d in stylesheet]):
stylesheet.append({'selector':'edge','style':{'width':f"mapData(weight,{weights_min},{weights_max},1,5)"}})
else:
edge_style_idx = [i for i,d in enumerate(stylesheet) if d['selector'] == 'edge'][0]
stylesheet[edge_style_idx] = {'selector':'edge','style':{'width':f"mapData(weight,{weights_min},{weights_max},1,5)"}}
return stylesheet
@app.callback(Output('states-table','data'),
Input('switch-state-cols','n_clicks'),
State('states-table','derived_virtual_data'),
prevent_initial_call=True)
def switchStateColums(n_clicks,states_data):
print(states_data)
states_data_copy = states_data.copy()
for d in states_data_copy:
d['x0'],d['xf'] = d['xf'],d['x0']
return states_data_copy
@app.callback(Output('state-trajectory-fig','figure'),
Output('minimum-energy-fig','figure'),
Output('optimal-energy-fig','figure'),
Input('plot-button','n_clicks'),
State('cytoscape-compound','elements'),
State('T','value'),
State('c','value'),
State('rho','value'),
State('states-table','derived_virtual_data'),
prevent_initial_call=True)
def updateFigures(n_clicks,elements,T,c,rho,states_data):
# debugging
print('UpdateFigures was fired')
# digest data for network_control package
A = from_elements_to_A(elements)
B = from_elements_to_B(elements)
S = from_elements_to_S(elements)
# debugging
print(f"This is A:\n{A}\n")
print(f"This is B:\n{B}\n")
print(f"This is S:\n{S}\n")
# normalize A
# FIXME: It should also be possible to not normalize the matrix (i.o.
# to get an intuition for what happens when you not apply normalization)
A = matrix_normalization(A,c)
# FIXME: Currently we use reshape (-1,1), but this is a bug in network
# control-package. When this is fixed, we don't need reshape anymore
x0 = np.array([d['x0'] for d in states_data]).reshape(-1,1)
xf = np.array([d['xf'] for d in states_data]).reshape(-1,1)
# debugging
print(f"This is x0:\n{x0}\n")
print(f"This is xf:\n{xf}\n")
fig_1 = get_state_trajectory_fig(A,x0=x0,T=T,c=c)
fig_2 = get_minimum_energy_figure(A=A,T=T,B=B,x0=x0,xf=xf,c=c)
fig_3 = get_optimal_energy_figure(A=A,T=T,B=B,x0=x0,xf=xf,rho=rho,S=S,c=c)
return fig_1,fig_2,fig_3
if __name__ == '__main__':
app.run_server(debug=True,use_reloader=False)
``` |
{
"source": "JohannesWiesner/nilearn",
"score": 2
} |
#### File: nilearn/datasets/atlas.py
```python
import os
import warnings
import xml.etree.ElementTree
from tempfile import mkdtemp
import json
import shutil
import nibabel as nb
import numpy as np
from numpy.lib import recfunctions
from sklearn.utils import Bunch
from .utils import _get_dataset_dir, _fetch_files, _get_dataset_descr
from .._utils import check_niimg
from .._utils.compat import _basestring
from ..image import new_img_like, get_data
_TALAIRACH_LEVELS = ['hemisphere', 'lobe', 'gyrus', 'tissue', 'ba']
def fetch_atlas_craddock_2012(data_dir=None, url=None, resume=True, verbose=1):
"""Download and return file names for the Craddock 2012 parcellation
The provided images are in MNI152 space.
Parameters
----------
data_dir: string
directory where data should be downloaded and unpacked.
url: string
url of file to download.
resume: bool
whether to resumed download of a partly-downloaded file.
verbose: int
verbosity level (0 means no message).
Returns
-------
data: sklearn.datasets.base.Bunch
dictionary-like object, keys are:
scorr_mean, tcorr_mean,
scorr_2level, tcorr_2level,
random
References
----------
Licence: Creative Commons Attribution Non-commercial Share Alike
http://creativecommons.org/licenses/by-nc-sa/2.5/
Craddock, <NAME>, <NAME>, <NAME>, <NAME>,
and <NAME>. "A Whole Brain fMRI Atlas Generated via Spatially
Constrained Spectral Clustering". Human Brain Mapping 33, no 8 (2012):
1914-1928. doi:10.1002/hbm.21333.
See http://www.nitrc.org/projects/cluster_roi/ for more information
on this parcellation.
"""
if url is None:
url = "ftp://www.nitrc.org/home/groups/cluster_roi/htdocs" \
"/Parcellations/craddock_2011_parcellations.tar.gz"
opts = {'uncompress': True}
dataset_name = "craddock_2012"
keys = ("scorr_mean", "tcorr_mean",
"scorr_2level", "tcorr_2level",
"random")
filenames = [
("scorr05_mean_all.nii.gz", url, opts),
("tcorr05_mean_all.nii.gz", url, opts),
("scorr05_2level_all.nii.gz", url, opts),
("tcorr05_2level_all.nii.gz", url, opts),
("random_all.nii.gz", url, opts)
]
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,
verbose=verbose)
sub_files = _fetch_files(data_dir, filenames, resume=resume,
verbose=verbose)
fdescr = _get_dataset_descr(dataset_name)
params = dict([('description', fdescr)] + list(zip(keys, sub_files)))
return Bunch(**params)
def fetch_atlas_destrieux_2009(lateralized=True, data_dir=None, url=None,
resume=True, verbose=1):
"""Download and load the Destrieux cortical atlas (dated 2009)
Parameters
----------
lateralized: boolean, optional
If True, returns an atlas with distinct regions for right and left
hemispheres.
data_dir: string, optional
Path of the data directory. Use to forec data storage in a non-
standard location. Default: None (meaning: default)
url: string, optional
Download URL of the dataset. Overwrite the default URL.
Returns
-------
data: sklearn.datasets.base.Bunch
dictionary-like object, contains:
- Cortical ROIs, lateralized or not (maps)
- Labels of the ROIs (labels)
References
----------
<NAME>, et al. "Automatically parcellating the human cerebral
cortex." Cerebral cortex 14.1 (2004): 11-22.
<NAME>., et al. "A sulcal depth-based anatomical parcellation
of the cerebral cortex." NeuroImage 47 (2009): S151.
"""
if url is None:
url = "https://www.nitrc.org/frs/download.php/7739/"
url += "destrieux2009.tgz"
opts = {'uncompress': True}
lat = '_lateralized' if lateralized else ''
files = [
('destrieux2009_rois_labels' + lat + '.csv', url, opts),
('destrieux2009_rois' + lat + '.nii.gz', url, opts),
('destrieux2009.rst', url, opts)
]
dataset_name = 'destrieux_2009'
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,
verbose=verbose)
files_ = _fetch_files(data_dir, files, resume=resume,
verbose=verbose)
params = dict(maps=files_[1], labels=np.recfromcsv(files_[0]))
with open(files_[2], 'r') as rst_file:
params['description'] = rst_file.read()
return Bunch(**params)
def fetch_atlas_harvard_oxford(atlas_name, data_dir=None,
symmetric_split=False,
resume=True, verbose=1):
"""Load Harvard-Oxford parcellations from FSL.
This function downloads Harvard Oxford atlas packaged from FSL 5.0
and stores atlases in NILEARN_DATA folder in home directory.
This function can also load Harvard Oxford atlas from your local directory
specified by your FSL installed path given in `data_dir` argument.
See documentation for details.
Parameters
----------
atlas_name: string
Name of atlas to load. Can be:
cort-maxprob-thr0-1mm, cort-maxprob-thr0-2mm,
cort-maxprob-thr25-1mm, cort-maxprob-thr25-2mm,
cort-maxprob-thr50-1mm, cort-maxprob-thr50-2mm,
sub-maxprob-thr0-1mm, sub-maxprob-thr0-2mm,
sub-maxprob-thr25-1mm, sub-maxprob-thr25-2mm,
sub-maxprob-thr50-1mm, sub-maxprob-thr50-2mm,
cort-prob-1mm, cort-prob-2mm,
sub-prob-1mm, sub-prob-2mm
data_dir: string, optional
Path of data directory where data will be stored. Optionally,
it can also be a FSL installation directory (which is dependent
on your installation).
Example, if FSL is installed in /usr/share/fsl/ then
specifying as '/usr/share/' can get you Harvard Oxford atlas
from your installed directory. Since we mimic same root directory
as FSL to load it easily from your installation.
symmetric_split: bool, optional, (default False).
If True, lateralized atlases of cort or sub with maxprob will be
returned. For subcortical types (sub-maxprob), we split every
symmetric region in left and right parts. Effectively doubles the
number of regions.
NOTE Not implemented for full probabilistic atlas (*-prob-* atlases).
Returns
-------
data: sklearn.datasets.base.Bunch
dictionary-like object, keys are:
- "maps": nibabel.Nifti1Image, 4D maps if a probabilistic atlas is
requested and 3D labels if a maximum probabilistic atlas was
requested.
- "labels": string list, labels of the regions in the atlas.
"""
atlas_items = ("cort-maxprob-thr0-1mm", "cort-maxprob-thr0-2mm",
"cort-maxprob-thr25-1mm", "cort-maxprob-thr25-2mm",
"cort-maxprob-thr50-1mm", "cort-maxprob-thr50-2mm",
"sub-maxprob-thr0-1mm", "sub-maxprob-thr0-2mm",
"sub-maxprob-thr25-1mm", "sub-maxprob-thr25-2mm",
"sub-maxprob-thr50-1mm", "sub-maxprob-thr50-2mm",
"cort-prob-1mm", "cort-prob-2mm",
"sub-prob-1mm", "sub-prob-2mm")
if atlas_name not in atlas_items:
raise ValueError("Invalid atlas name: {0}. Please chose an atlas "
"among:\n{1}".format(
atlas_name, '\n'.join(atlas_items)))
url = 'http://www.nitrc.org/frs/download.php/9902/HarvardOxford.tgz'
# For practical reasons, we mimic the FSL data directory here.
dataset_name = 'fsl'
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,
verbose=verbose)
opts = {'uncompress': True}
root = os.path.join('data', 'atlases')
if atlas_name[0] == 'c':
if 'cort-maxprob' in atlas_name and symmetric_split:
split_name = atlas_name.split('cort')
atlas_name = 'cortl' + split_name[1]
label_file = 'HarvardOxford-Cortical-Lateralized.xml'
lateralized = True
else:
label_file = 'HarvardOxford-Cortical.xml'
lateralized = False
else:
label_file = 'HarvardOxford-Subcortical.xml'
lateralized = False
label_file = os.path.join(root, label_file)
atlas_file = os.path.join(root, 'HarvardOxford',
'HarvardOxford-' + atlas_name + '.nii.gz')
atlas_img, label_file = _fetch_files(
data_dir,
[(atlas_file, url, opts), (label_file, url, opts)],
resume=resume, verbose=verbose)
names = {}
from xml.etree import ElementTree
names[0] = 'Background'
for label in ElementTree.parse(label_file).findall('.//label'):
names[int(label.get('index')) + 1] = label.text
names = list(names.values())
if not symmetric_split:
return Bunch(maps=atlas_img, labels=names)
if atlas_name in ("cort-prob-1mm", "cort-prob-2mm",
"sub-prob-1mm", "sub-prob-2mm"):
raise ValueError("Region splitting not supported for probabilistic "
"atlases")
atlas_img = check_niimg(atlas_img)
if lateralized:
return Bunch(maps=atlas_img, labels=names)
atlas = get_data(atlas_img)
labels = np.unique(atlas)
# Build a mask of both halves of the brain
middle_ind = (atlas.shape[0] - 1) // 2
# Put zeros on the median plane
atlas[middle_ind, ...] = 0
# Split every zone crossing the median plane into two parts.
left_atlas = atlas.copy()
left_atlas[middle_ind:, ...] = 0
right_atlas = atlas.copy()
right_atlas[:middle_ind, ...] = 0
new_label = 0
new_atlas = atlas.copy()
# Assumes that the background label is zero.
new_names = [names[0]]
for label, name in zip(labels[1:], names[1:]):
new_label += 1
left_elements = (left_atlas == label).sum()
right_elements = (right_atlas == label).sum()
n_elements = float(left_elements + right_elements)
if (left_elements / n_elements < 0.05 or
right_elements / n_elements < 0.05):
new_atlas[atlas == label] = new_label
new_names.append(name)
continue
new_atlas[right_atlas == label] = new_label
new_names.append(name + ', left part')
new_label += 1
new_atlas[left_atlas == label] = new_label
new_names.append(name + ', right part')
atlas_img = new_img_like(atlas_img, new_atlas, atlas_img.affine)
return Bunch(maps=atlas_img, labels=new_names)
def fetch_atlas_msdl(data_dir=None, url=None, resume=True, verbose=1):
"""Download and load the MSDL brain atlas.
Parameters
----------
data_dir: string, optional
Path of the data directory. Used to force data storage in a specified
location. Default: None
url: string, optional
Override download URL. Used for test only (or if you setup a mirror of
the data).
Returns
-------
data: sklearn.datasets.base.Bunch
Dictionary-like object, the interest attributes are :
- 'maps': str, path to nifti file containing regions definition.
- 'labels': string list containing the labels of the regions.
- 'region_coords': tuple list (x, y, z) containing coordinates
of each region in MNI space.
- 'networks': string list containing names of the networks.
- 'description': description about the atlas.
References
----------
:Download:
https://team.inria.fr/parietal/files/2015/01/MSDL_rois.zip
:Paper to cite:
`Multi-subject dictionary learning to segment an atlas of brain
spontaneous activity <http://hal.inria.fr/inria-00588898/en>`_
<NAME>, <NAME>, <NAME>, <NAME>,
<NAME>. Information Processing in Medical Imaging, 2011,
pp. 562-573, Lecture Notes in Computer Science.
:Other references:
`Learning and comparing functional connectomes across subjects
<http://hal.inria.fr/hal-00812911/en>`_.
<NAME>, <NAME> NeuroImage, 2013.
"""
url = 'https://team.inria.fr/parietal/files/2015/01/MSDL_rois.zip'
opts = {'uncompress': True}
dataset_name = "msdl_atlas"
files = [(os.path.join('MSDL_rois', 'msdl_rois_labels.csv'), url, opts),
(os.path.join('MSDL_rois', 'msdl_rois.nii'), url, opts)]
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,
verbose=verbose)
files = _fetch_files(data_dir, files, resume=resume, verbose=verbose)
csv_data = np.recfromcsv(files[0])
labels = [name.strip() for name in csv_data['name'].tolist()]
labels = [label.decode("utf-8") for label in labels]
with warnings.catch_warnings():
warnings.filterwarnings('ignore', module='numpy',
category=FutureWarning)
region_coords = csv_data[['x', 'y', 'z']].tolist()
net_names = [net_name.strip() for net_name in csv_data['net_name'].tolist()]
fdescr = _get_dataset_descr(dataset_name)
return Bunch(maps=files[1], labels=labels, region_coords=region_coords,
networks=net_names, description=fdescr)
def fetch_coords_power_2011():
"""Download and load the Power et al. brain atlas composed of 264 ROIs.
Returns
-------
data: sklearn.datasets.base.Bunch
dictionary-like object, contains:
- "rois": coordinates of 264 ROIs in MNI space
References
----------
Power, <NAME>., et al. "Functional network organization of the human
brain." Neuron 72.4 (2011): 665-678.
"""
dataset_name = 'power_2011'
fdescr = _get_dataset_descr(dataset_name)
package_directory = os.path.dirname(os.path.abspath(__file__))
csv = os.path.join(package_directory, "data", "power_2011.csv")
params = dict(rois=np.recfromcsv(csv), description=fdescr)
return Bunch(**params)
def fetch_atlas_smith_2009(data_dir=None, mirror='origin', url=None,
resume=True, verbose=1):
"""Download and load the Smith ICA and BrainMap atlas (dated 2009)
Parameters
----------
data_dir: string, optional
Path of the data directory. Used to force data storage in a non-
standard location. Default: None (meaning: default)
mirror: string, optional
By default, the dataset is downloaded from the original website of the
atlas. Specifying "nitrc" will force download from a mirror, with
potentially higher bandwith.
url: string, optional
Download URL of the dataset. Overwrite the default URL.
Returns
-------
data: sklearn.datasets.base.Bunch
dictionary-like object, contains:
- 20-dimensional ICA, Resting-FMRI components:
- all 20 components (rsn20)
- 10 well-matched maps from these, as shown in PNAS paper (rsn10)
- 20-dimensional ICA, BrainMap components:
- all 20 components (bm20)
- 10 well-matched maps from these, as shown in PNAS paper (bm10)
- 70-dimensional ICA, Resting-FMRI components (rsn70)
- 70-dimensional ICA, BrainMap components (bm70)
References
----------
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>.
Correspondence of the brain's functional architecture during activation and
rest. Proc Natl Acad Sci USA (PNAS), 106(31):13040-13045, 2009.
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. Behavioral interpretations
of intrinsic connectivity networks. Journal of Cognitive Neuroscience, 2011
Notes
-----
For more information about this dataset's structure:
http://www.fmrib.ox.ac.uk/datasets/brainmap+rsns/
"""
if url is None:
if mirror == 'origin':
url = "http://www.fmrib.ox.ac.uk/datasets/brainmap+rsns/"
elif mirror == 'nitrc':
url = [
'https://www.nitrc.org/frs/download.php/7730/',
'https://www.nitrc.org/frs/download.php/7729/',
'https://www.nitrc.org/frs/download.php/7731/',
'https://www.nitrc.org/frs/download.php/7726/',
'https://www.nitrc.org/frs/download.php/7728/',
'https://www.nitrc.org/frs/download.php/7727/',
]
else:
raise ValueError('Unknown mirror "%s". Mirror must be "origin" '
'or "nitrc"' % str(mirror))
files = [
'rsn20.nii.gz',
'PNAS_Smith09_rsn10.nii.gz',
'rsn70.nii.gz',
'bm20.nii.gz',
'PNAS_Smith09_bm10.nii.gz',
'bm70.nii.gz'
]
if isinstance(url, _basestring):
url = [url] * len(files)
files = [(f, u + f, {}) for f, u in zip(files, url)]
dataset_name = 'smith_2009'
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,
verbose=verbose)
files_ = _fetch_files(data_dir, files, resume=resume,
verbose=verbose)
fdescr = _get_dataset_descr(dataset_name)
keys = ['<KEY>
params = dict(zip(keys, files_))
params['description'] = fdescr
return Bunch(**params)
def fetch_atlas_yeo_2011(data_dir=None, url=None, resume=True, verbose=1):
"""Download and return file names for the Yeo 2011 parcellation.
The provided images are in MNI152 space.
Parameters
----------
data_dir: string
directory where data should be downloaded and unpacked.
url: string
url of file to download.
resume: bool
whether to resumed download of a partly-downloaded file.
verbose: int
verbosity level (0 means no message).
Returns
-------
data: sklearn.datasets.base.Bunch
dictionary-like object, keys are:
- "thin_7", "thick_7": 7-region parcellations,
fitted to resp. thin and thick template cortex segmentations.
- "thin_17", "thick_17": 17-region parcellations.
- "colors_7", "colors_17": colormaps (text files) for 7- and 17-region
parcellation respectively.
- "anat": anatomy image.
Notes
-----
For more information on this dataset's structure, see
http://surfer.nmr.mgh.harvard.edu/fswiki/CorticalParcellation_Yeo2011
Yeo BT, Krienen FM, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>., <NAME>, <NAME>, <NAME>,
<NAME>. The organization of the human cerebral cortex estimated by
intrinsic functional connectivity. J Neurophysiol 106(3):1125-65, 2011.
Licence: unknown.
"""
if url is None:
url = ('ftp://surfer.nmr.mgh.harvard.edu/pub/data/'
'Yeo_JNeurophysiol11_MNI152.zip')
opts = {'uncompress': True}
dataset_name = "yeo_2011"
keys = ("thin_7", "thick_7",
"thin_17", "thick_17",
"colors_7", "colors_17", "anat")
basenames = (
"Yeo2011_7Networks_MNI152_FreeSurferConformed1mm.nii.gz",
"Yeo2011_7Networks_MNI152_FreeSurferConformed1mm_LiberalMask.nii.gz",
"Yeo2011_17Networks_MNI152_FreeSurferConformed1mm.nii.gz",
"Yeo2011_17Networks_MNI152_FreeSurferConformed1mm_LiberalMask.nii.gz",
"Yeo2011_7Networks_ColorLUT.txt",
"Yeo2011_17Networks_ColorLUT.txt",
"FSL_MNI152_FreeSurferConformed_1mm.nii.gz")
filenames = [(os.path.join("Yeo_JNeurophysiol11_MNI152", f), url, opts)
for f in basenames]
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,
verbose=verbose)
sub_files = _fetch_files(data_dir, filenames, resume=resume,
verbose=verbose)
fdescr = _get_dataset_descr(dataset_name)
params = dict([('description', fdescr)] + list(zip(keys, sub_files)))
return Bunch(**params)
def fetch_atlas_aal(version='SPM12', data_dir=None, url=None, resume=True,
verbose=1):
"""Downloads and returns the AAL template for SPM 12.
This atlas is the result of an automated anatomical parcellation of the
spatially normalized single-subject high-resolution T1 volume provided by
the Montreal Neurological Institute (MNI) (<NAME> al., 1998,
Trans. Med. Imag. 17, 463-468, PubMed).
Parameters
----------
version: string, optional
The version of the AAL atlas. Must be SPM5, SPM8 or SPM12. Default is
SPM12.
data_dir: string
directory where data should be downloaded and unpacked.
url: string
url of file to download.
resume: bool
whether to resumed download of a partly-downloaded file.
verbose: int
verbosity level (0 means no message).
Returns
-------
data: sklearn.datasets.base.Bunch
dictionary-like object, keys are:
- "maps": str. path to nifti file containing regions.
- "labels": list of the names of the regions
Notes
-----
For more information on this dataset's structure, see
http://www.gin.cnrs.fr/AAL-217?lang=en
Automated Anatomical Labeling of Activations in SPM Using a Macroscopic
Anatomical Parcellation of the MNI MRI Single-Subject Brain.
<NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, and <NAME>.
NeuroImage 2002. 15 :273-28
Licence: unknown.
"""
versions = ['SPM5', 'SPM8', 'SPM12']
if version not in versions:
raise ValueError('The version of AAL requested "%s" does not exist.'
'Please choose one among %s.' %
(version, str(versions)))
if url is None:
baseurl = "http://www.gin.cnrs.fr/AAL_files/aal_for_%s.tar.gz"
url = baseurl % version
opts = {'uncompress': True}
dataset_name = "aal_" + version
# keys and basenames would need to be handled for each spm_version
# for now spm_version 12 is hardcoded.
basenames = ("AAL.nii", "AAL.xml")
filenames = [(os.path.join('aal', 'atlas', f), url, opts)
for f in basenames]
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,
verbose=verbose)
atlas_img, labels_file = _fetch_files(data_dir, filenames, resume=resume,
verbose=verbose)
fdescr = _get_dataset_descr(dataset_name)
# We return the labels contained in the xml file as a dictionary
xml_tree = xml.etree.ElementTree.parse(labels_file)
root = xml_tree.getroot()
labels = []
indices = []
for label in root.getiterator('label'):
indices.append(label.find('index').text)
labels.append(label.find('name').text)
params = {'description': fdescr, 'maps': atlas_img,
'labels': labels, 'indices': indices}
return Bunch(**params)
def fetch_atlas_basc_multiscale_2015(version='sym', data_dir=None,
resume=True, verbose=1):
"""Downloads and loads multiscale functional brain parcellations
This atlas includes group brain parcellations generated from
resting-state functional magnetic resonance images from about
200 young healthy subjects.
Multiple scales (number of networks) are available, among
7, 12, 20, 36, 64, 122, 197, 325, 444. The brain parcellations
have been generated using a method called bootstrap analysis of
stable clusters called as BASC, (Bellec et al., 2010) and the
scales have been selected using a data-driven method called MSTEPS
(Bellec, 2013).
Note that two versions of the template are available, 'sym' or 'asym'.
The 'asym' type contains brain images that have been registered in the
asymmetric version of the MNI brain template (reflecting that the brain
is asymmetric), while the 'sym' type contains images registered in the
symmetric version of the MNI template. The symmetric template has been
forced to be symmetric anatomically, and is therefore ideally suited to
study homotopic functional connections in fMRI: finding homotopic regions
simply consists of flipping the x-axis of the template.
.. versionadded:: 0.2.3
Parameters
----------
version: str, optional
Available versions are 'sym' or 'asym'. By default all scales of
brain parcellations of version 'sym' will be returned.
data_dir: str, optional
directory where data should be downloaded and unpacked.
url: str, optional
url of file to download.
resume: bool
whether to resumed download of a partly-downloaded file.
verbose: int
verbosity level (0 means no message).
Returns
-------
data: sklearn.datasets.base.Bunch
dictionary-like object, Keys are:
- "scale007", "scale012", "scale020", "scale036", "scale064",
"scale122", "scale197", "scale325", "scale444": str, path
to Nifti file of various scales of brain parcellations.
- "description": details about the data release.
References
----------
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, Jul. 2010.
Multi-level bootstrap analysis of stable clusters in resting-state fMRI.
NeuroImage 51 (3), 1126-1139.
URL http://dx.doi.org/10.1016/j.neuroimage.2010.02.082
<NAME>. 2013. Mining the Hierarchy of Resting-State Brain Networks:
Selection of Representative Clusters in a Multiscale Structure.
Pattern Recognition in Neuroimaging (PRNI), 2013 pp. 54-57.
Notes
-----
For more information on this dataset's structure, see
https://figshare.com/articles/basc/1285615
"""
versions = ['sym', 'asym']
if version not in versions:
raise ValueError('The version of Brain parcellations requested "%s" '
'does not exist. Please choose one among them %s.' %
(version, str(versions)))
keys = ['scale007', 'scale012', 'scale020', 'scale036', 'scale064',
'scale122', 'scale197', 'scale325', 'scale444']
if version == 'sym':
url = "https://ndownloader.figshare.com/files/1861819"
elif version == 'asym':
url = "https://ndownloader.figshare.com/files/1861820"
opts = {'uncompress': True}
dataset_name = "basc_multiscale_2015"
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,
verbose=verbose)
folder_name = 'template_cambridge_basc_multiscale_nii_' + version
basenames = ['template_cambridge_basc_multiscale_' + version +
'_' + key + '.nii.gz' for key in keys]
filenames = [(os.path.join(folder_name, basename), url, opts)
for basename in basenames]
data = _fetch_files(data_dir, filenames, resume=resume, verbose=verbose)
descr = _get_dataset_descr(dataset_name)
params = dict(zip(keys, data))
params['description'] = descr
return Bunch(**params)
def fetch_coords_dosenbach_2010(ordered_regions=True):
"""Load the Dosenbach et al. 160 ROIs. These ROIs cover
much of the cerebral cortex and cerebellum and are assigned to 6
networks.
Parameters
----------
ordered_regions : bool, optional
ROIs from same networks are grouped together and ordered with respect
to their names and their locations (anterior to posterior).
Returns
-------
data: sklearn.datasets.base.Bunch
dictionary-like object, contains:
- "rois": coordinates of 160 ROIs in MNI space
- "labels": ROIs labels
- "networks": networks names
References
----------
<NAME>., <NAME>., et al. "Prediction of individual brain maturity
using fMRI.", 2010, Science 329, 1358-1361.
"""
dataset_name = 'dosenbach_2010'
fdescr = _get_dataset_descr(dataset_name)
package_directory = os.path.dirname(os.path.abspath(__file__))
csv = os.path.join(package_directory, "data", "dosenbach_2010.csv")
out_csv = np.recfromcsv(csv)
if ordered_regions:
out_csv = np.sort(out_csv, order=['network', 'name', 'y'])
# We add the ROI number to its name, since names are not unique
names = out_csv['name']
numbers = out_csv['number']
labels = np.array(['{0} {1}'.format(name, number) for (name, number) in
zip(names, numbers)])
params = dict(rois=out_csv[['x', 'y', 'z']],
labels=labels,
networks=out_csv['network'], description=fdescr)
return Bunch(**params)
def fetch_coords_seitzman_2018(ordered_regions=True):
"""Load the Seitzman et al. 300 ROIs. These ROIs cover cortical,
subcortical and cerebellar regions and are assigned to one of 13
networks (Auditory, CinguloOpercular, DefaultMode, DorsalAttention,
FrontoParietal, MedialTemporalLobe, ParietoMedial, Reward, Salience,
SomatomotorDorsal, SomatomotorLateral, VentralAttention, Visual) and
have a regional label (cortexL, cortexR, cerebellum, thalamus, hippocampus,
basalGanglia, amygdala, cortexMid).
.. versionadded:: 0.5.1
Parameters
----------
ordered_regions : bool, optional
ROIs from same networks are grouped together and ordered with respect
to their locations (anterior to posterior).
Returns
-------
data: sklearn.datasets.base.Bunch
dictionary-like object, contains:
- "rois": Coordinates of 300 ROIs in MNI space
- "radius": Radius of each ROI in mm
- "networks": Network names
- "regions": Region names
References
----------
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., et al. (2018). A set of functionally-defined brain
regions with improved representation of the subcortex and cerebellum.
bioRxiv, 450452. http://doi.org/10.1101/450452
"""
dataset_name = 'seitzman_2018'
fdescr = _get_dataset_descr(dataset_name)
package_directory = os.path.dirname(os.path.abspath(__file__))
roi_file = os.path.join(package_directory, "data",
"seitzman_2018_ROIs_300inVol_MNI_allInfo.txt")
anatomical_file = os.path.join(package_directory, "data",
"seitzman_2018_ROIs_anatomicalLabels.txt")
rois = np.recfromcsv(roi_file, delimiter=" ")
rois = recfunctions.rename_fields(rois, {"netname": "network",
"radiusmm": "radius"})
rois.network = rois.network.astype(str)
# get integer regional labels and convert to text labels with mapping
# from header line
with open(anatomical_file, 'r') as fi:
header = fi.readline()
region_mapping = {}
for r in header.strip().split(","):
i, region = r.split("=")
region_mapping[int(i)] = region
anatomical = np.genfromtxt(anatomical_file, skip_header=1)
anatomical_names = np.array([region_mapping[a] for a in anatomical])
rois = recfunctions.merge_arrays((rois, anatomical_names),
asrecarray=True, flatten=True)
rois.dtype.names = rois.dtype.names[:-1] + ("region",)
if ordered_regions:
rois = np.sort(rois, order=['network', 'y'])
params = dict(rois=rois[['x', 'y', 'z']],
radius=rois['radius'],
networks=rois['network'].astype(str),
regions=rois['region'], description=fdescr)
return Bunch(**params)
def fetch_atlas_allen_2011(data_dir=None, url=None, resume=True, verbose=1):
"""Download and return file names for the Allen and MIALAB ICA atlas
(dated 2011).
The provided images are in MNI152 space.
Parameters
----------
data_dir: str, optional
directory where data should be downloaded and unpacked.
url: str, optional
url of file to download.
resume: bool
whether to resumed download of a partly-downloaded file.
verbose: int
verbosity level (0 means no message).
Returns
-------
data: sklearn.datasets.base.Bunch
dictionary-like object, keys are:
- "maps": T-maps of all 75 unthresholded components.
- "rsn28": T-maps of 28 RSNs included in E. Allen et al.
- "networks": string list containing the names for the 28 RSNs.
- "rsn_indices": dict[rsn_name] -> list of int, indices in the "maps"
file of the 28 RSNs.
- "comps": The aggregate ICA Components.
- "description": details about the data release.
References
----------
<NAME>, et al, "A baseline for the multivariate comparison of resting
state networks," Frontiers in Systems Neuroscience, vol. 5, p. 12, 2011.
Notes
-----
Licence: unknown
See http://mialab.mrn.org/data/index.html for more information
on this dataset.
"""
if url is None:
url = "https://osf.io/hrcku/download"
dataset_name = "allen_rsn_2011"
keys = ("maps",
"rsn28",
"comps")
opts = {'uncompress': True}
files = ["ALL_HC_unthresholded_tmaps.nii.gz",
"RSN_HC_unthresholded_tmaps.nii.gz",
"rest_hcp_agg__component_ica_.nii.gz"]
labels = [('Basal Ganglia', [21]),
('Auditory', [17]),
('Sensorimotor', [7, 23, 24, 38, 56, 29]),
('Visual', [46, 64, 67, 48, 39, 59]),
('Default-Mode', [50, 53, 25, 68]),
('Attentional', [34, 60, 52, 72, 71, 55]),
('Frontal', [42, 20, 47, 49])]
networks = [[name] * len(idxs) for name, idxs in labels]
filenames = [(os.path.join('allen_rsn_2011', f), url, opts) for f in files]
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,
verbose=verbose)
sub_files = _fetch_files(data_dir, filenames, resume=resume,
verbose=verbose)
fdescr = _get_dataset_descr(dataset_name)
params = [('description', fdescr),
('rsn_indices', labels),
('networks', networks)]
params.extend(list(zip(keys, sub_files)))
return Bunch(**dict(params))
def fetch_atlas_surf_destrieux(data_dir=None, url=None,
resume=True, verbose=1):
"""Download and load Destrieux et al, 2010 cortical atlas.
This atlas returns 76 labels per hemisphere based on sulco-gryal pattnerns
as distributed with Freesurfer in fsaverage5 surface space.
.. versionadded:: 0.3
Parameters
----------
data_dir: str, optional
Path of the data directory. Use to force data storage in a non-
standard location. Default: None
url: str, optional
Download URL of the dataset. Overwrite the default URL.
resume: bool, optional (default True)
If True, try resuming download if possible.
verbose: int, optional (default 1)
Defines the level of verbosity of the output.
Returns
-------
data: sklearn.datasets.base.Bunch
dictionary-like object, contains:
- "labels": list
Contains region labels
- "map_left": numpy.ndarray
Index into 'labels' for each vertex on the
left hemisphere of the fsaverage5 surface
- "map_right": numpy.ndarray
Index into 'labels' for each vertex on the
right hemisphere of the fsaverage5 surface
- "description": str
Details about the dataset
References
----------
Destrieux et al. (2010), Automatic parcellation of human cortical gyri and
sulci using standard anatomical nomenclature. NeuroImage 53, 1-15.
"""
if url is None:
url = "https://www.nitrc.org/frs/download.php/"
dataset_name = 'destrieux_surface'
fdescr = _get_dataset_descr(dataset_name)
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,
verbose=verbose)
# Download annot files, fsaverage surfaces and sulcal information
annot_file = '%s.aparc.a2009s.annot'
annot_url = url + '%i/%s.aparc.a2009s.annot'
annot_nids = {'lh annot': 9343, 'rh annot': 9342}
annots = []
for hemi in [('lh', 'left'), ('rh', 'right')]:
annot = _fetch_files(data_dir,
[(annot_file % (hemi[1]),
annot_url % (annot_nids['%s annot' % hemi[0]],
hemi[0]),
{'move': annot_file % (hemi[1])})],
resume=resume, verbose=verbose)[0]
annots.append(annot)
annot_left = nb.freesurfer.read_annot(annots[0])
annot_right = nb.freesurfer.read_annot(annots[1])
return Bunch(labels=annot_left[2], map_left=annot_left[0],
map_right=annot_right[0], description=fdescr)
def _separate_talairach_levels(atlas_img, labels, verbose=1):
"""Separate the multiple annotation levels in talairach raw atlas.
The Talairach atlas has five levels of annotation: hemisphere, lobe, gyrus,
tissue, brodmann area. They are mixed up in the original atlas: each label
in the atlas corresponds to a 5-tuple containing, for each of these levels,
a value or the string '*' (meaning undefined, background).
This function disentangles the levels, and stores each on an octet in an
int64 image (the level with most labels, ba, has 72 labels).
This way, any subset of these levels can be accessed by applying a bitwise
mask.
In the created image, the least significant octet contains the hemisphere,
the next one the lobe, then gyrus, tissue, and ba. Background is 0.
The labels contain
[('level name', ['labels', 'for', 'this', 'level' ...]), ...],
where the levels are in the order mentionned above.
The label '*' is replaced by 'Background' for clarity.
"""
labels = np.asarray(labels)
if verbose:
print(
'Separating talairach atlas levels: {}'.format(_TALAIRACH_LEVELS))
levels = []
new_img = np.zeros(atlas_img.shape, dtype=np.int64)
for pos, level in enumerate(_TALAIRACH_LEVELS):
if verbose:
print(level)
level_img = np.zeros(atlas_img.shape, dtype=np.int64)
level_labels = {'*': 0}
for region_nb, region in enumerate(labels[:, pos]):
level_labels.setdefault(region, len(level_labels))
level_img[get_data(atlas_img) == region_nb] = level_labels[
region]
# shift this level to its own octet and add it to the new image
level_img <<= 8 * pos
new_img |= level_img
# order the labels so that image values are indices in the list of
# labels for each level
level_labels = list(list(
zip(*sorted(level_labels.items(), key=lambda t: t[1])))[0])
# rename '*' -> 'Background'
level_labels[0] = 'Background'
levels.append((level, level_labels))
new_img = new_img_like(atlas_img, data=new_img)
return new_img, levels
def _get_talairach_all_levels(data_dir=None, verbose=1):
"""Get the path to Talairach atlas and labels
The atlas is downloaded and the files are created if necessary.
The image contains all five levels of the atlas, each encoded on 8 bits
(least significant octet contains the hemisphere, the next one the lobe,
then gyrus, tissue, and ba).
The labels json file contains
[['level name', ['labels', 'for', 'this', 'level' ...]], ...],
where the levels are in the order mentionned above.
"""
data_dir = _get_dataset_dir(
'talairach_atlas', data_dir=data_dir, verbose=verbose)
img_file = os.path.join(data_dir, 'talairach.nii')
labels_file = os.path.join(data_dir, 'talairach_labels.json')
if os.path.isfile(img_file) and os.path.isfile(labels_file):
return img_file, labels_file
atlas_url = 'http://www.talairach.org/talairach.nii'
temp_dir = mkdtemp()
try:
temp_file = _fetch_files(
temp_dir, [('talairach.nii', atlas_url, {})], verbose=verbose)[0]
atlas_img = nb.load(temp_file, mmap=False)
atlas_img = check_niimg(atlas_img)
finally:
shutil.rmtree(temp_dir)
labels = atlas_img.header.extensions[0].get_content()
labels = labels.strip().decode('utf-8').split('\n')
labels = [l.split('.') for l in labels]
new_img, level_labels = _separate_talairach_levels(
atlas_img, labels, verbose=verbose)
new_img.to_filename(img_file)
with open(labels_file, 'w') as fp:
json.dump(level_labels, fp)
return img_file, labels_file
def fetch_atlas_talairach(level_name, data_dir=None, verbose=1):
"""Download the Talairach atlas.
.. versionadded:: 0.4.0
Parameters
----------
level_name : {'hemisphere', 'lobe', 'gyrus', 'tissue', 'ba'}
Which level of the atlas to use: the hemisphere, the lobe, the gyrus,
the tissue type or the Brodmann area.
data_dir : str, optional (default=None)
Path of the data directory. Used to force data storage in a specified
location.
verbose : int
verbosity level (0 means no message).
Returns
-------
sklearn.datasets.base.Bunch
Dictionary-like object, contains:
- maps: 3D Nifti image, values are indices in the list of labels.
- labels: list of strings. Starts with 'Background'.
- description: a short description of the atlas and some references.
References
----------
http://talairach.org/about.html#Labels
`Lancaster JL, <NAME>, <NAME>, <NAME>, Freitas CS, <NAME>,
<NAME>, <NAME>, <NAME>, Fox PT, "Automated Talairach Atlas
labels for functional brain mapping". Human Brain Mapping 10:120-131,
2000.`
`Lancaster JL, <NAME>, <NAME>, Freitas CS, Fox PT, Evans AC, Toga
AW, Mazziotta JC. Automated labeling of the human brain: A preliminary
report on the development and evaluation of a forward-transform method. Hum
Brain Mapp 5, 238-242, 1997.`
"""
if level_name not in _TALAIRACH_LEVELS:
raise ValueError('"level_name" should be one of {}'.format(
_TALAIRACH_LEVELS))
position = _TALAIRACH_LEVELS.index(level_name)
atlas_file, labels_file = _get_talairach_all_levels(data_dir, verbose)
atlas_img = check_niimg(atlas_file)
with open(labels_file) as fp:
labels = json.load(fp)[position][1]
level_data = (get_data(atlas_img) >> 8 * position) & 255
atlas_img = new_img_like(atlas_img, data=level_data)
description = _get_dataset_descr(
'talairach_atlas').decode('utf-8').format(level_name)
return Bunch(maps=atlas_img, labels=labels, description=description)
def fetch_atlas_pauli_2017(version='prob', data_dir=None, verbose=1):
"""Download the Pauli et al. (2017) atlas with in total
12 subcortical nodes.
Parameters
----------
version: str, optional (default='prob')
Which version of the atlas should be download. This can be 'prob'
for the probabilistic atlas or 'det' for the deterministic atlas.
data_dir : str, optional (default=None)
Path of the data directory. Used to force data storage in a specified
location.
verbose : int
verbosity level (0 means no message).
Returns
-------
sklearn.datasets.base.Bunch
Dictionary-like object, contains:
- maps: 3D Nifti image, values are indices in the list of labels.
- labels: list of strings. Starts with 'Background'.
- description: a short description of the atlas and some references.
References
----------
https://osf.io/r2hvk/
`<NAME>., <NAME>., & <NAME>. (2018). A high-resolution
probabilistic in vivo atlas of human subcortical brain nuclei.
Scientific Data, 5, 180063-13. http://doi.org/10.1038/sdata.2018.63``
"""
if version == 'prob':
url_maps = 'https://osf.io/w8zq2/download'
filename = 'pauli_2017_labels.nii.gz'
elif version == 'labels':
url_maps = 'https://osf.io/5mqfx/download'
filename = 'pauli_2017_prob.nii.gz'
else:
raise NotImplementedError('{} is no valid version for '.format(version) + \
'the Pauli atlas')
url_labels = 'https://osf.io/6qrcb/download'
dataset_name = 'pauli_2017'
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,
verbose=verbose)
files = [(filename,
url_maps,
{'move':filename}),
('labels.txt',
url_labels,
{'move':'labels.txt'})]
atlas_file, labels = _fetch_files(data_dir, files)
labels = np.loadtxt(labels, dtype=str)[:, 1].tolist()
fdescr = _get_dataset_descr(dataset_name)
return Bunch(maps=atlas_file,
labels=labels,
description=fdescr)
def fetch_atlas_schaefer_2018(n_rois=400, yeo_networks=7, resolution_mm=1,
data_dir=None, base_url=None, resume=True,
verbose=1):
"""Download and return file names for the Schaefer 2018 parcellation
.. versionadded:: 0.5.1
The provided images are in MNI152 space.
Parameters
----------
n_rois: int
number of regions of interest {100, 200, 300, 400 (default), 500, 600,
700, 800, 900, 1000}
yeo_networks: int
ROI annotation according to yeo networks {7 (default), 17}
resolution_mm: int
Spatial resolution of atlas image in mm {1 (default), 2}
data_dir: string
directory where data should be downloaded and unpacked.
base_url: string
base_url of files to download (None results in default base_url).
resume: bool
whether to resumed download of a partly-downloaded file.
verbose: int
verbosity level (0 means no message).
Returns
-------
data: sklearn.datasets.base.Bunch
Dictionary-like object, contains:
- maps: 3D Nifti image, values are indices in the list of labels.
- labels: ROI labels including Yeo-network annotation,list of strings.
- description: A short description of the atlas and some references.
References
----------
For more information on this dataset, see
https://github.com/ThomasYeoLab/CBIG/tree/v0.14.3-Update_Yeo2011_Schaefer2018_labelname/stable_projects/brain_parcellation/Schaefer2018_LocalGlobal/Parcellations
<NAME>, <NAME>, <NAME>, Laumann TO, Zuo XN, <NAME>,
<NAME>, Yeo BTT. Local-Global parcellation of the human
cerebral cortex from intrinsic functional connectivity MRI,
Cerebral Cortex, 29:3095-3114, 2018.
Yeo BT, Krienen FM, Sepulcre J, Sabuncu MR, <NAME>, Hollinshead M,
Roffman JL, Smoller JW, Zollei L., <NAME>R, <NAME>, <NAME>,
Buckner RL. The organization of the human cerebral cortex estimated by
intrinsic functional connectivity. J Neurophysiol 106(3):1125-65, 2011.
Licence: MIT.
Notes
-----
Release v0.14.3 of the Schaefer 2018 parcellation is used by
default. Versions prior to v0.14.3 are known to contain erroneous region
label names. For more details, see
https://github.com/ThomasYeoLab/CBIG/blob/master/stable_projects/brain_parcellation/Schaefer2018_LocalGlobal/Parcellations/Updates/Update_20190916_README.md
"""
valid_n_rois = list(range(100, 1100, 100))
valid_yeo_networks = [7, 17]
valid_resolution_mm = [1, 2]
if n_rois not in valid_n_rois:
raise ValueError("Requested n_rois={} not available. Valid "
"options: {}".format(n_rois, valid_n_rois))
if yeo_networks not in valid_yeo_networks:
raise ValueError("Requested yeo_networks={} not available. Valid "
"options: {}".format(yeo_networks,valid_yeo_networks))
if resolution_mm not in valid_resolution_mm:
raise ValueError("Requested resolution_mm={} not available. Valid "
"options: {}".format(resolution_mm,
valid_resolution_mm)
)
if base_url is None:
base_url = ('https://raw.githubusercontent.com/ThomasYeoLab/CBIG/'
'v0.14.3-Update_Yeo2011_Schaefer2018_labelname/'
'stable_projects/brain_parcellation/'
'Schaefer2018_LocalGlobal/Parcellations/MNI/'
)
files = []
labels_file_template = 'Schaefer2018_{}Parcels_{}Networks_order.txt'
img_file_template = ('Schaefer2018_{}Parcels_'
'{}Networks_order_FSLMNI152_{}mm.nii.gz')
for f in [labels_file_template.format(n_rois, yeo_networks),
img_file_template.format(n_rois, yeo_networks, resolution_mm)]:
files.append((f, base_url + f, {}))
dataset_name = 'schaefer_2018'
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,
verbose=verbose)
labels_file, atlas_file = _fetch_files(data_dir, files, resume=resume,
verbose=verbose)
labels = np.genfromtxt(labels_file, usecols=1, dtype="S", delimiter="\t")
fdescr = _get_dataset_descr(dataset_name)
return Bunch(maps=atlas_file,
labels=labels,
description=fdescr)
``` |
{
"source": "johanneswilm/coveralls-python-action",
"score": 3
} |
#### File: coveralls-python-action/tests/test_entrypoint.py
```python
import signal
from unittest import mock
import pytest
from coveralls.api import CoverallsException
import entrypoint
def patch_os_envirion(environ):
return mock.patch.dict("os.environ", environ, clear=True)
def patch_coveralls_wear():
return mock.patch("entrypoint.Coveralls.wear")
def patch_log():
return mock.patch("entrypoint.log")
def patch_sys_argv(argv):
return mock.patch("sys.argv", argv)
def patch_requests_post(json_response=None):
new_mock = mock.Mock()
if json_response:
new_mock.return_value.json.return_value = json_response
return mock.patch("entrypoint.requests.post", new_mock)
class TestEntryPoint:
def test_main_no_token(self):
"""Argument `--github-token` is required."""
argv = ["src/entrypoint.py"]
with patch_sys_argv(argv), pytest.raises(SystemExit) as ex_info:
entrypoint.main()
assert ex_info.value.args == (signal.SIGINT.value,)
def test_main(self):
argv = ["src/entrypoint.py", "--github-token", "TOKEN"]
with patch_sys_argv(argv), mock.patch(
"entrypoint.run_coveralls"
) as m_run_coveralls:
entrypoint.main()
assert m_run_coveralls.call_args_list == [
mock.call("TOKEN", False, False, False)
]
def test_main_flag_name(self):
argv = ["src/entrypoint.py", "--github-token", "TOKEN", "--flag-name", "FLAG"]
with patch_sys_argv(argv), mock.patch(
"entrypoint.run_coveralls"
) as m_run_coveralls:
entrypoint.main()
assert m_run_coveralls.call_args_list == [
mock.call("TOKEN", False, "FLAG", False)
]
def test_main_base_path(self):
argv = ["src/entrypoint.py", "--github-token", "TOKEN", "--base-path", "SRC"]
with patch_sys_argv(argv), mock.patch(
"entrypoint.run_coveralls"
) as m_run_coveralls:
entrypoint.main()
assert m_run_coveralls.call_args_list == [
mock.call("TOKEN", False, False, "SRC")
]
def test_main_parallel_finished(self):
argv = ["src/entrypoint.py", "--github-token", "TOKEN", "--parallel-finished"]
with patch_sys_argv(argv), mock.patch(
"entrypoint.post_webhook"
) as m_post_webhook:
entrypoint.main()
assert m_post_webhook.call_args_list == [mock.call("TOKEN")]
def test_try_main(self):
with mock.patch(
"entrypoint.main", side_effect=Exception
) as m_main, pytest.raises(SystemExit) as ex_info:
entrypoint.try_main()
assert m_main.call_args_list == [mock.call()]
assert ex_info.value.args == (entrypoint.ExitCode.FAILURE,)
def test_run_coveralls_github_token(self):
"""Simple case when Coveralls.wear() returns some results."""
url = "https://coveralls.io/jobs/1234"
with patch_coveralls_wear() as m_wear, patch_log() as m_log:
m_wear.return_value = {
"message": "Job ##12.34",
"url": url,
}
entrypoint.run_coveralls(repo_token="TOKEN")
assert m_wear.call_args_list == [mock.call()]
assert m_log.method_calls == [
mock.call.info("Trying submitting coverage with service_name: github..."),
mock.call.debug(
"Patching os.environ with: "
"{'COVERALLS_REPO_TOKEN': 'TOKEN', 'COVERALLS_PARALLEL': ''}"
),
mock.call.debug(m_wear.return_value),
mock.call.info(url),
]
def test_run_coveralls_wear_error_once(self):
"""On Coveralls.wear() error we should try another `service_name`."""
url = "https://coveralls.io/jobs/1234"
side_effect = (
CoverallsException("Error"),
{"message": "Job ##12.34", "url": url},
)
with patch_coveralls_wear() as m_wear, patch_log() as m_log:
m_wear.side_effect = side_effect
entrypoint.run_coveralls(repo_token="TOKEN")
assert m_wear.call_args_list == [mock.call(), mock.call()]
assert m_log.method_calls == [
mock.call.info("Trying submitting coverage with service_name: github..."),
mock.call.debug(
"Patching os.environ with: "
"{'COVERALLS_REPO_TOKEN': 'TOKEN', 'COVERALLS_PARALLEL': ''}"
),
mock.call.warning("Failed submitting coverage with service_name: github"),
mock.call.warning(side_effect[0]),
mock.call.info(
"Trying submitting coverage with service_name: github-actions..."
),
mock.call.debug(
"Patching os.environ with: "
"{'COVERALLS_REPO_TOKEN': 'TOKEN', 'COVERALLS_PARALLEL': ''}"
),
mock.call.debug(side_effect[1]),
mock.call.info(url),
]
def test_run_coveralls_wear_error_twice(self):
"""Exits with error code if Coveralls.wear() fails twice."""
side_effect = (
CoverallsException("Error 1"),
CoverallsException("Error 2"),
)
with patch_coveralls_wear() as m_wear, pytest.raises(SystemExit) as ex_info:
m_wear.side_effect = side_effect
entrypoint.run_coveralls(repo_token="TOKEN")
assert ex_info.value.args == (entrypoint.ExitCode.FAILURE,)
def test_post_webhook(self):
"""
Tests different uses cases:
1) default, no environment variable
2) `GITHUB_RUN_ID` is set
"""
repo_token = "TOKEN"
json_response = {"done": True}
# 1) default, no environment variable
environ = {}
with patch_requests_post(json_response) as m_post, patch_os_envirion(environ):
entrypoint.post_webhook(repo_token)
assert m_post.call_args_list == [
mock.call(
"https://coveralls.io/webhook",
json={
"repo_token": "TOKEN",
"repo_name": None,
"payload": {"build_num": None, "status": "done"},
},
)
]
# 2) `GITHUB_RUN_ID` and `GITHUB_REPOSITORY` are set
environ = {
"GITHUB_RUN_ID": "845347868344",
"GITHUB_REPOSITORY": "AndreMiras/coveralls-python-action",
}
with patch_requests_post(json_response) as m_post, patch_os_envirion(environ):
entrypoint.post_webhook(repo_token)
assert m_post.call_args_list == [
mock.call(
"https://coveralls.io/webhook",
json={
"repo_token": "TOKEN",
"repo_name": "AndreMiras/coveralls-python-action",
"payload": {
"build_num": "845347868344",
"status": "done",
},
},
)
]
def test_post_webhook_error(self):
"""Coveralls.io json error response should raise an exception."""
repo_token = "TOKEN"
json_response = {"error": "Invalid repo token"}
# 1) default, no environment variable
environ = {}
with patch_requests_post(json_response) as m_post, patch_os_envirion(
environ
), pytest.raises(AssertionError) as ex_info:
entrypoint.post_webhook(repo_token)
assert m_post.call_args_list == [
mock.call(
"https://coveralls.io/webhook",
json={
"repo_token": "TOKEN",
"repo_name": None,
"payload": {"build_num": None, "status": "done"},
},
)
]
assert ex_info.value.args == (json_response,)
@pytest.mark.parametrize(
"value,expected",
[
(False, False),
("false", False),
("f", False),
("0", False),
("no", False),
("n", False),
(True, True),
("true", True),
("t", True),
("1", True),
("yes", True),
("y", True),
],
)
def test_str_to_bool(self, value, expected):
"""Possible recognised values."""
assert entrypoint.str_to_bool(value) is expected
@pytest.mark.parametrize("value", ["", "yesn't"])
def test_str_to_bool_value_error(self, value):
"""Other unrecognised string values raise a `ValueError`."""
with pytest.raises(ValueError) as ex_info:
entrypoint.str_to_bool(value)
assert ex_info.value.args == (f"{value} is not a valid boolean value",)
@pytest.mark.parametrize("value", [None, 0])
def test_str_to_bool_attribute_error(self, value):
"""Other unrecognised non-string values raise an `AttributeError`."""
with pytest.raises(AttributeError) as ex_info:
entrypoint.str_to_bool(value)
assert ex_info.value.args[0].endswith(" object has no attribute 'lower'")
``` |
{
"source": "johanneswilm/django-binary-database-files",
"score": 2
} |
#### File: management/commands/database_files_dump.py
```python
import os
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from binary_database_files.models import File
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
# make_option('-w', '--overwrite', action='store_true',
# dest='overwrite', default=False,
# help='If given, overwrites any existing files.'),
)
help = 'Dumps all files in the database referenced by FileFields ' + \
'or ImageFields onto the filesystem in the directory specified by ' + \
'MEDIA_ROOT.'
def handle(self, *args, **options):
File.dump_files(verbose=True)
``` |
{
"source": "johanneswilm/rapidsms",
"score": 2
} |
#### File: backends/vumi/views.py
```python
import json
import logging
from django.http import HttpResponse
from rapidsms.backends.vumi.forms import VumiForm
from rapidsms.backends.http.views import BaseHttpBackendView
logger = logging.getLogger(__name__)
class VumiBackendView(BaseHttpBackendView):
"""
Backend view for handling inbound SMSes from Vumi (http://vumi.org/)
"""
http_method_names = ['post']
form_class = VumiForm
def get_form_kwargs(self):
"""Load JSON POST data."""
kwargs = super(VumiBackendView, self).get_form_kwargs()
try:
kwargs['data'] = json.loads(self.request.body)
except ValueError:
logger.exception("Failed to parse JSON from Vumi.")
return kwargs
def form_valid(self, form):
super(VumiBackendView, self).form_valid(form)
# return 200 for Vumi
return HttpResponse('')
```
#### File: handlers/handlers/keyword.py
```python
import re
from django.core.exceptions import ObjectDoesNotExist
from ..exceptions import HandlerError
from .base import BaseHandler
class KeywordHandler(BaseHandler):
"""
This handler type can be subclassed to create simple keyword-based
handlers. When a message is received, it is checked against the mandatory
``keyword`` attribute (a regular expression) for a prefix match. For
example::
>>> class AbcHandler(KeywordHandler):
... keyword = "abc"
...
... def help(self):
... self.respond("Here is some help.")
...
... def handle(self, text):
... self.respond("You said: %s." % text)
If the keyword is matched and followed by some text, the ``handle`` method
is called::
>>> AbcHandler.test("abc waffles")
['You said: waffles.']
If *just* the keyword is matched, the ``help`` method is called::
>>> AbcHandler.test("abc")
['Here is some help.']
All other messages are silently ignored (as usual), to allow other apps or
handlers to catch them.
"""
#: A string specifying a regular expression matched against the
#: beginning of the message. Not case sensitive.
keyword = None
def help(self):
"""Called when the keyword matches but no text follows"""
raise NotImplementedError
def handle(self, text):
"""Called when the keyword matches and text follows
:param text: The text that follows the keyword. Any whitespace
between the keyword and the text is not included.
"""
raise NotImplementedError
@classmethod
def _keyword(cls):
if hasattr(cls, "keyword") and cls.keyword:
# The 'keyword' is inside non-grouping parentheses so that a
# user could set the keyword to a regex - e.g.
# keyword = r'one|two|three'
prefix = r"""
^\s* # discard leading whitespace
(?:{keyword}) # require the keyword or regex
[\s,;:]* # consume any whitespace , ; or :
([^\s,;:].*)? # capture rest of line if any, starting
# with the first non-whitespace
$ # match all the way to the end
""".format(keyword=cls.keyword)
return re.compile(prefix, re.IGNORECASE|re.VERBOSE)
raise HandlerError('KeywordHandler must define a keyword.')
@classmethod
def dispatch(cls, router, msg):
keyword = cls._keyword()
match = keyword.match(msg.text)
if match is None:
return False
# spawn an instance of this handler, and stash
# the low(er)-level router and message object
inst = cls(router, msg)
# if any non-whitespace content was send after the keyword, send
# it along to the handle method. the instance can always find
# the original text via self.msg if it really needs it.
text = match.group(1)
if text is not None and text.strip() != "":
try:
inst.handle(text)
# special case: if an object was expected but not found,
# return the (rather appropriate) "%s matching query does
# not exist." message. this can, of course, be overridden by
# catching the exception within the ``handle`` method.
except ObjectDoesNotExist, err:
return inst.respond_error(
unicode(err))
# another special case: if something was miscast to an int
# (it was probably a string from the ``text``), return a
# more friendly (and internationalizable) error.
except ValueError, err:
p = r"^invalid literal for int\(\) with base (\d+?): '(.+?)'$"
m = re.match(p, unicode(err))
# allow other valueerrors to propagate.
if m is None:
raise
return inst.respond_error(
"Not a valid number: %(string)s" % dict(
string=m.group(2)))
# if we received _just_ the keyword, with
# no content, some help should be sent back
else:
inst.help()
return True
```
#### File: contrib/registration/tests.py
```python
from StringIO import StringIO
from django.http import Http404, HttpRequest, HttpResponseRedirect, QueryDict
from django.test import TestCase
from mock import Mock, patch
from rapidsms.models import Connection, Contact
from rapidsms.tests.harness import CreateDataMixin, LoginMixin
from rapidsms.tests.scripted import TestScript
import rapidsms.contrib.registration.views as views
RAPIDSMS_HANDLERS = [
"rapidsms.contrib.registration.handlers.language.LanguageHandler",
"rapidsms.contrib.registration.handlers.register.RegisterHandler",
]
class TestRegister(TestScript):
handlers = RAPIDSMS_HANDLERS
def testRegister(self):
self.assertInteraction("""
8005551212 > register as someuser
8005551212 < Thank you for registering, as someuser!
""")
def testLang(self):
self.assertInteraction("""
8005551212 > lang english
8005551212 < %s
8005551212 > register as someuser
8005551212 < Thank you for registering, as someuser!
8005551212 > lang english
8005551212 < I will speak to you in English.
8005551212 > lang klingon
8005551212 < Sorry, I don't speak "klingon".
""" % ("You must JOIN or REGISTER yourself before you can set " +
"your language preference."))
def testHelp(self):
self.assertInteraction("""
8005551212 > lang
8005551212 < To set your language, send LANGUAGE <CODE>
8005551212 > register
8005551212 < To register, send JOIN <NAME>
""")
class TestViews(TestCase, CreateDataMixin, LoginMixin):
def setUp(self):
# Make some contacts
self.contacts = [self.create_contact() for i in range(2)]
self.backend = self.create_backend()
# Give the first one some connections
for i in range(2):
self.create_connection(data={'contact': self.contacts[0]})
def test_registration(self):
# The registration view calls render with a context that has a
# contacts_table that has the contacts in its data
request = HttpRequest()
request.GET = QueryDict('')
self.login()
request.user = self.user
with patch('rapidsms.contrib.registration.views.render') as render:
views.registration(request)
context = render.call_args[0][2]
table = context["contacts_table"]
self.assertEqual(len(self.contacts), len(list(table.data.queryset)))
def test_registration_render(self):
# render actually works (django_tables2 and all)
request = HttpRequest()
request.GET = QueryDict('')
self.login()
request.user = self.user
retval = views.registration(request)
self.assertEqual(200, retval.status_code)
def test_contact_existing_404(self):
# Trying to edit a non-existing contact raises a 404
with self.assertRaises(Http404):
views.contact(Mock(), pk=27)
def test_contact_existing(self):
# GET on contact form with valid pk renders template with that contact
contact = self.contacts[0]
connection = contact.default_connection
with patch('rapidsms.contrib.registration.views.render') as render:
request = Mock(method="GET")
self.login()
request.user = self.user
views.contact(request, pk=contact.pk)
context = render.call_args[0][2]
self.assertEqual(contact.pk, context['contact'].pk)
form = context['contact_form']
data = form.initial
self.assertEqual(contact.name, data['name'])
self.assertEqual(contact.pk, form.instance.pk)
formset = context['connection_formset']
forms = formset.forms
instances = [f.instance for f in forms]
# Connection should be in there
self.assertIn(connection, instances)
# Should be 1 more form than we have connections
self.assertEqual(len(forms), 1 + len(contact.connection_set.all()))
def test_contact_get(self):
# GET on contact form with no pk allows creating new one
with patch('rapidsms.contrib.registration.views.render') as render:
request = Mock(method="GET")
self.login()
request.user = self.user
views.contact(request)
context = render.call_args[0][2]
# ModelForms create a new unsaved instance
self.assertIsNotNone(context['contact_form'].instance)
self.assertTrue(context['contact_form'].instance.is_anonymous)
self.assertEqual(1, len(context['connection_formset'].forms))
def test_contact_update(self):
# POST to contact view updates the contact and connections
contact = self.contacts[0]
data = {
u'name': u'<NAME>',
u'language': u'wxyz',
u'submit': u'Save Contact',
u'connection_set-0-id': u'2',
u'connection_set-0-DELETE': u'',
u'connection_set-0-backend': u'1',
u'connection_set-0-contact': u'1',
u'connection_set-0-identity': u'4567',
u'connection_set-1-id': u'',
u'connection_set-1-contact': u'1',
u'connection_set-1-identity': u'',
u'connection_set-1-backend': u'',
u'connection_set-INITIAL_FORMS': u'1',
u'connection_set-TOTAL_FORMS': u'2',
u'connection_set-MAX_NUM_FORMS': u'10',
}
with patch('rapidsms.contrib.registration.views.render'):
request = Mock(method="POST", POST=data)
self.login()
request.user = self.user
retval = views.contact(request, pk=contact.pk)
self.assertTrue(isinstance(retval, HttpResponseRedirect))
self.assertEqual(302, retval.status_code)
new_contact = Contact.objects.get(pk=contact.pk)
self.assertEqual(data['name'], new_contact.name)
self.assertEqual(data['language'], new_contact.language)
identities = [c.identity for c in contact.connection_set.all()]
self.assertIn(data['connection_set-0-identity'], identities)
def test_contact_update_add_connection(self):
# POST to contact view can add a connection
contact = self.contacts[0]
data = {
u'name': u'<NAME>',
u'language': u'wxyz',
u'submit': u'Save Contact',
u'connection_set-0-id': u'2',
u'connection_set-0-DELETE': u'',
u'connection_set-0-backend': u'1',
u'connection_set-0-contact': u'1',
u'connection_set-0-identity': u'4567',
u'connection_set-1-id': u'',
u'connection_set-1-contact': u'1',
u'connection_set-1-identity': u'987654',
u'connection_set-1-backend': u'1',
u'connection_set-INITIAL_FORMS': u'1',
u'connection_set-TOTAL_FORMS': u'2',
u'connection_set-MAX_NUM_FORMS': u'10',
}
identities = [c.identity for c in contact.connection_set.all()]
self.assertNotIn(data['connection_set-1-identity'], identities)
with patch('rapidsms.contrib.registration.views.render') as render:
request = Mock(method="POST", POST=data)
self.login()
request.user = self.user
retval = views.contact(request, pk=contact.pk)
self.assertTrue(isinstance(retval, HttpResponseRedirect))
self.assertEqual(302, retval.status_code)
render.assert_called()
new_contact = Contact.objects.get(pk=contact.pk)
self.assertEqual(data['name'], new_contact.name)
self.assertEqual(data['language'], new_contact.language)
identities = [c.identity for c in contact.connection_set.all()]
self.assertIn(data['connection_set-1-identity'], identities)
def test_contact_delete(self):
# Submitting with Delete button deletes the contact
contact = self.contacts[0]
data = {
u'name': u'<NAME>',
u'language': u'wxyz',
u'delete_contact': u"dontcare",
u'connection_set-0-id': u'2',
u'connection_set-0-DELETE': u'',
u'connection_set-0-backend': u'1',
u'connection_set-0-contact': u'1',
u'connection_set-0-identity': u'4567',
u'connection_set-1-id': u'',
u'connection_set-1-contact': u'1',
u'connection_set-1-identity': u'987654',
u'connection_set-1-backend': u'1',
u'connection_set-INITIAL_FORMS': u'1',
u'connection_set-TOTAL_FORMS': u'2',
u'connection_set-MAX_NUM_FORMS': u'10',
}
with patch('rapidsms.contrib.registration.views.render'):
request = Mock(method="POST", POST=data)
self.login()
request.user = self.user
retval = views.contact(request, pk=contact.pk)
self.assertTrue(isinstance(retval, HttpResponseRedirect))
self.assertEqual(302, retval.status_code)
self.assertFalse(Contact.objects.filter(pk=contact.pk).exists())
def test_contact_create(self):
# POST with no existing contact creates a new one
name = u'<NAME>'
data = {
u'name': name,
u'language': u'wxyz',
u'submit': u'Save Contact',
u'connection_set-0-id': u'',
u'connection_set-0-DELETE': u'',
u'connection_set-0-backend': u'1',
u'connection_set-0-contact': u'',
u'connection_set-0-identity': u'4567',
u'connection_set-1-id': u'',
u'connection_set-1-contact': u'',
u'connection_set-1-identity': u'',
u'connection_set-1-backend': u'',
u'connection_set-INITIAL_FORMS': u'0',
u'connection_set-TOTAL_FORMS': u'2',
u'connection_set-MAX_NUM_FORMS': u'10',
}
with patch('rapidsms.contrib.registration.views.render'):
request = Mock(method="POST", POST=data)
self.login()
request.user = self.user
retval = views.contact(request)
self.assertTrue(isinstance(retval, HttpResponseRedirect))
self.assertEqual(302, retval.status_code)
Contact.objects.get(name=name)
def test_delete_connection(self):
# POST can delete one of the connections
contact = self.create_contact()
# Give it two connections
self.create_connection(data={'contact': contact})
self.create_connection(data={'contact': contact})
# Submit form filled out to delete a connection
connections = contact.connection_set.all()
data = {
u'name': u'<NAME>',
u'language': u'en',
u'submit': u"Save Contact",
u'connection_set-0-id': connections[0].pk,
u'connection_set-0-identity': connections[0].identity,
u'connection_set-0-backend': connections[0].backend.pk,
u'connection_set-0-contact': contact.pk,
u'connection_set-1-id': connections[1].pk,
u'connection_set-1-identity': connections[1].identity,
u'connection_set-1-backend': connections[1].backend.pk,
u'connection_set-1-contact': contact.pk,
u'connection_set-1-DELETE': u"connection_set-1-DELETE",
u'connection_set-2-id': u'',
u'connection_set-2-backend': u'',
u'connection_set-2-identity': u'',
u'connection_set-2-contact': u'',
u'connection_set-TOTAL_FORMS': u'3',
u'connection_set-MAX_NUM_FORMS': u'10',
u'connection_set-INITIAL_FORMS': u'2',
}
old_pk = connections[1].pk
with patch('rapidsms.contrib.registration.views.render'):
request = Mock(method="POST", POST=data)
self.login()
request.user = self.user
retval = views.contact(request, pk=contact.pk)
self.assertTrue(isinstance(retval, HttpResponseRedirect))
self.assertEqual(302, retval.status_code)
self.assertFalse(Connection.objects.filter(pk=old_pk).exists())
def test_add_connection(self):
# POST can add a new connection
contact = self.create_contact()
# Give it ONE connection
self.create_connection(data={'contact': contact})
# Submit form filled out to add another connection
connections = contact.connection_set.all()
data = {
u'name': u'<NAME>',
u'language': u'en',
u'submit': u"Save Contact",
u'connection_set-0-id': connections[0].pk,
u'connection_set-0-identity': connections[0].identity,
u'connection_set-0-backend': connections[0].backend.pk,
u'connection_set-0-contact': contact.pk,
u'connection_set-1-id': u'',
u'connection_set-1-identity': 'identity',
u'connection_set-1-backend': connections[0].backend.pk,
u'connection_set-1-contact': contact.pk,
u'connection_set-TOTAL_FORMS': u'2',
u'connection_set-MAX_NUM_FORMS': u'10',
u'connection_set-INITIAL_FORMS': u'1',
}
with patch('rapidsms.contrib.registration.views.render'):
request = Mock(method="POST", POST=data)
self.login()
request.user = self.user
retval = views.contact(request, pk=contact.pk)
self.assertTrue(isinstance(retval, HttpResponseRedirect))
self.assertEqual(302, retval.status_code)
self.assertEqual(2, Connection.objects.filter(contact=contact).count())
conn = Connection.objects.get(identity='identity', contact=contact)
self.assertEqual(connections[0].backend, conn.backend)
class TestBulkAdd(TestCase, CreateDataMixin, LoginMixin):
def test_bulk_get(self):
# Just make sure the page loads
with patch('rapidsms.contrib.registration.views.render') as render:
request = Mock(method="GET")
views.contact_bulk_add(request)
render.assert_called()
def test_bulk_add(self):
# We can upload a CSV file to create contacts & connections
backend1 = self.create_backend()
backend2 = self.create_backend()
# Include a unicode name to make sure that works
uname = u'Name 1 ḀḂḈ ᵺ'
data = [
(uname, backend1.name, u'11111'),
(u'Name 2', backend2.name, u'22222'),
(u'Name 3', backend1.name, u'33333'),
(u'Name 4', backend2.name, u'44444'),
]
# Create test file
testfile = u"\n".join([u",".join(parts) for parts in data]) + u"\n"
testfile_data = testfile.encode('utf-8')
with patch('rapidsms.contrib.registration.views.render') as render:
request = Mock(method="POST",
FILES={'bulk': StringIO(testfile_data)})
self.login()
request.user = self.user
retval = views.contact_bulk_add(request)
if not isinstance(retval, HttpResponseRedirect):
context = render.call_args[0][2]
self.fail(context['bulk_form'].errors + context['csv_errors'])
self.assertTrue(isinstance(retval, HttpResponseRedirect))
self.assertEqual(302, retval.status_code)
contacts = Contact.objects.all()
self.assertEqual(4, contacts.count())
names = [contact.name for contact in contacts]
self.assertIn(uname, names)
def test_bulk_add_no_lines(self):
testfile = ""
with patch('rapidsms.contrib.registration.views.render') as render:
request = Mock(method="POST", FILES={'bulk': StringIO(testfile)})
self.login()
request.user = self.user
retval = views.contact_bulk_add(request)
self.assertFalse(isinstance(retval, HttpResponseRedirect))
context = render.call_args[0][2]
self.assertIn('csv_errors', context)
self.assertEqual('No contacts found in file', context['csv_errors'])
def test_bulk_add_bad_line(self):
testfile = "Field 1, field 2\n"
with patch('rapidsms.contrib.registration.views.render') as render:
request = Mock(method="POST", FILES={'bulk': StringIO(testfile)})
self.login()
request.user = self.user
retval = views.contact_bulk_add(request)
self.assertFalse(isinstance(retval, HttpResponseRedirect))
context = render.call_args[0][2]
self.assertIn('csv_errors', context)
self.assertEqual('Could not unpack line 1', context['csv_errors'])
def test_bulk_add_bad_backend(self):
testfile = "Field 1, no_such_backend, 123\n"
with patch('rapidsms.contrib.registration.views.render') as render:
request = Mock(method="POST", FILES={'bulk': StringIO(testfile)})
self.login()
request.user = self.user
retval = views.contact_bulk_add(request)
self.assertFalse(isinstance(retval, HttpResponseRedirect))
context = render.call_args[0][2]
self.assertIn('csv_errors', context)
self.assertEqual("Could not find Backend. Line: 1",
context['csv_errors'])
``` |
{
"source": "johannEze/PROV_ESDF",
"score": 2
} |
#### File: PROV_ESDF/tests/test_dot.py
```python
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import unittest
# Skipping SVG tests if pydot is not installed
from pkgutil import find_loader
if find_loader("pydot") is not None:
from prov.dot import prov_to_dot
from prov.tests.test_model import AllTestsBase
from prov.tests.utility import DocumentBaseTestCase
class SVGDotOutputTest(DocumentBaseTestCase, AllTestsBase):
"""
One-way output SVG with prov.dot to exercise its code
"""
MIN_SVG_SIZE = 850
def do_tests(self, prov_doc, msg=None):
dot = prov_to_dot(prov_doc)
svg_content = dot.create(format="svg")
# Very naive check of the returned SVG content as we have no way to check the graphical content
self.assertGreater(
len(svg_content), self.MIN_SVG_SIZE,
"The size of the generated SVG content should be greater than %d bytes" % self.MIN_SVG_SIZE
)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "johannfaouzi/tslearn",
"score": 3
} |
#### File: docs/examples/plot_barycenters.py
```python
import numpy
import matplotlib.pyplot as plt
from tslearn.barycenters import \
euclidean_barycenter, \
dtw_barycenter_averaging, \
dtw_barycenter_averaging_subgradient, \
softdtw_barycenter
from tslearn.datasets import CachedDatasets
# fetch the example data set
numpy.random.seed(0)
X_train, y_train, _, _ = CachedDatasets().load_dataset("Trace")
X = X_train[y_train == 2]
length_of_sequence = X.shape[1]
def plot_helper(barycenter):
# plot all points of the data set
for series in X:
plt.plot(series.ravel(), "k-", alpha=.2)
# plot the given barycenter of them
plt.plot(barycenter.ravel(), "r-", linewidth=2)
# plot the four variants with the same number of iterations and a tolerance of
# 1e-3 where applicable
ax1 = plt.subplot(4, 1, 1)
plt.title("Euclidean barycenter")
plot_helper(euclidean_barycenter(X))
plt.subplot(4, 1, 2, sharex=ax1)
plt.title("DBA (vectorized version of Petitjean's EM)")
plot_helper(dtw_barycenter_averaging(X, max_iter=50, tol=1e-3))
plt.subplot(4, 1, 3, sharex=ax1)
plt.title("DBA (subgradient descent approach)")
plot_helper(dtw_barycenter_averaging_subgradient(X, max_iter=50, tol=1e-3))
plt.subplot(4, 1, 4, sharex=ax1)
plt.title("Soft-DTW barycenter ($\gamma$=1.0)")
plot_helper(softdtw_barycenter(X, gamma=1., max_iter=50, tol=1e-3))
# clip the axes for better readability
ax1.set_xlim([0, length_of_sequence])
# show the plot(s)
plt.tight_layout()
plt.show()
``` |
{
"source": "johannfrias/billboard-charts",
"score": 3
} |
#### File: billboard-charts/tests/test_digital_albums.py
```python
import json
import os
import unittest
import billboard
from utils import get_test_dir
class TestCurrentDigitalAlbums(unittest.TestCase):
"""Checks that the ChartData object for the current Digital Albums chart
has entries and instance variables that are valid and reasonable. Does not
test whether the data is actually correct.
"""
def setUp(self):
self.chart = billboard.ChartData('digital-albums')
def test_date(self):
self.assertIsNotNone(self.chart.date)
def test_ranks(self):
ranks = list(entry.rank for entry in self.chart)
self.assertEqual(ranks, list(range(1, 26)))
def test_entries_validity(self):
self.assertEqual(len(self.chart), 25)
for entry in self.chart:
self.assertGreater(len(entry.title), 0)
self.assertGreater(len(entry.artist), 0)
self.assertTrue(1 <= entry.peakPos <= 100)
self.assertTrue(0 <= entry.lastPos <= 100)
self.assertGreaterEqual(entry.weeks, 0)
# Redundant because of test_ranks
self.assertTrue(1 <= entry.rank <= 25)
self.assertIsInstance(entry.isNew, bool)
def test_entries_consistency(self):
for entry in self.chart:
if entry.isNew:
self.assertEqual(entry.lastPos, 0)
def test_json(self):
self.assertTrue(json.loads(self.chart.json()))
for entry in self.chart:
self.assertTrue(json.loads(entry.json()))
class TestHistoricalDigitalAlbums(TestCurrentDigitalAlbums):
"""Checks that the ChartData object for a previous week's Digital Albums
chart has entries and instance variables that are valid and reasonable.
Also compares the chart data against a previously downloaded "reference"
version. This comparison is done based on the string representation; it
excludes attributes like peakPos and weeks, which can change over time.
"""
def setUp(self):
self.chart = billboard.ChartData('digital-albums', date='2017-03-04')
def test_date(self):
self.assertEqual(self.chart.date, '2017-03-04')
self.assertEqual(self.chart.previousDate, '2017-02-25')
self.assertEqual(self.chart.nextDate, '2017-03-11')
def test_entries_correctness(self):
reference_path = os.path.join(get_test_dir(),
'2017-03-04-digital-albums.txt')
with open(reference_path) as reference:
self.assertEqual(str(self.chart), reference.read())
```
#### File: billboard-charts/tests/test_hot_100.py
```python
import json
import os
import unittest
import billboard
from utils import get_test_dir
class TestCurrentHot100(unittest.TestCase):
"""Checks that the ChartData object for the current Hot 100 chart has
entries and instance variables that are valid and reasonable. Does not test
whether the data is actually correct.
"""
def setUp(self):
self.chart = billboard.ChartData('hot-100')
def test_date(self):
self.assertIsNotNone(self.chart.date)
def test_ranks(self):
ranks = list(entry.rank for entry in self.chart)
self.assertEqual(ranks, list(range(1, 101)))
def test_entries_validity(self):
self.assertEqual(len(self.chart), 100)
for entry in self.chart:
self.assertGreater(len(entry.title), 0)
self.assertGreater(len(entry.artist), 0)
self.assertTrue(1 <= entry.peakPos <= 100)
self.assertTrue(0 <= entry.lastPos <= 100)
self.assertGreaterEqual(entry.weeks, 0)
# Redundant because of test_ranks
self.assertTrue(1 <= entry.rank <= 100)
self.assertIsInstance(entry.isNew, bool)
def test_entries_consistency(self):
for entry in self.chart:
if entry.isNew:
self.assertEqual(entry.lastPos, 0)
def test_json(self):
self.assertTrue(json.loads(self.chart.json()))
for entry in self.chart:
self.assertTrue(json.loads(entry.json()))
class TestHistoricalHot100(TestCurrentHot100):
"""Checks that the ChartData object for a previous week's Hot 100 chart has
entries and instance variables that are valid and reasonable.
Also compares the chart data against a previously downloaded "reference"
version. This comparison is done based on the string representation; it
excludes attributes like peakPos and weeks, which can change over time.
"""
def setUp(self):
self.chart = billboard.ChartData('hot-100', date='2015-11-28')
def test_date(self):
self.assertEqual(self.chart.date, '2015-11-28')
self.assertEqual(self.chart.previousDate, '2015-11-21')
self.assertEqual(self.chart.nextDate, '2015-12-05')
def test_entries_correctness(self):
reference_path = os.path.join(get_test_dir(), '2015-11-28-hot-100.txt')
with open(reference_path) as reference:
self.assertEqual(str(self.chart), reference.read())
```
#### File: billboard-charts/tests/test_misc.py
```python
import billboard
import unittest
from nose.tools import raises
from requests.exceptions import ConnectionError
@raises(ConnectionError)
def test_timeout():
"""Checks that using a very small timeout prevents connection."""
billboard.ChartData('hot-100', timeout=1e-9)
@raises(billboard.BillboardNotFoundException)
def test_non_existent_chart():
"""Checks that requesting a non-existent chart fails."""
billboard.ChartData('does-not-exist')
``` |
{
"source": "johanngan/special_relativity",
"score": 3
} |
#### File: specrel/tests/test_geom.py
```python
import unittest
import specrel.geom as geom
from specrel.graphics.basegraph import STPlotter
class _MockSTPlotter(STPlotter):
"""Mock plotter for draw() methods.
Just tabulates the objects it's told to draw, but doesn't actually draw
anything.
"""
def __init__(self):
# Tuples with all the given object info
self.points = []
self.segments = []
self.polygons = []
self.tlim = [None, None]
self.xlim = [None, None]
def draw_point(self, point, tag, **kwargs):
self.points.append((tuple(point), tag, kwargs))
def draw_line_segment(self, point1, point2, tag, **kwargs):
self.segments.append((tuple(point1), tuple(point2), tag, kwargs))
def draw_shaded_polygon(self, vertices, tag, **kwargs):
self.polygons.append(([tuple(v) for v in vertices], tag, kwargs))
def set_lims(self, tlim, xlim):
self.tlim = tlim
self.xlim = xlim
# Only for interactive mode; doesn't matter here, but abstract method must
# be overridden
def show(self):
pass
# For testing stored draw objects within numerical precision
@staticmethod
def points_equal(testcase, p1, p2):
for x, y in zip(p1[0], p2[0]):
testcase.assertAlmostEqual(x, y)
testcase.assertEqual(p1[1:], p2[1:])
@staticmethod
def segments_equal(testcase, s1, s2):
for x, y in zip(s1[0] + s1[1], s2[0] + s2[1]):
testcase.assertAlmostEqual(x, y)
testcase.assertEqual(s1[2:], s2[2:])
@staticmethod
def polygons_equal(testcase, p1, p2):
for v1, v2 in zip(p1[0], p2[0]):
for x, y in zip(v1, v2):
testcase.assertAlmostEqual(x, y)
testcase.assertEqual(p1[1:], p2[1:])
class STVectorInitTests(unittest.TestCase):
"""Instance initialization."""
def test_init_from_timepos(self):
stvec = geom.STVector(2, 3)
self.assertEqual(stvec.t, 2)
self.assertEqual(stvec.x, 3)
def test_init_from_iterable(self):
stvec = geom.STVector((2, 3))
self.assertEqual(stvec.t, 2)
self.assertEqual(stvec.x, 3)
def test_copy_ctor(self):
stvec = geom.STVector(2, 3,
tag='test', precision=4, draw_options={'color': 'red'})
stvec_cpy = geom.STVector(stvec)
self.assertEqual(stvec.t, stvec_cpy.t)
self.assertEqual(stvec.x, stvec_cpy.x)
self.assertEqual(stvec.tag, 'test')
self.assertEqual(stvec.precision, stvec_cpy.precision)
self.assertEqual(stvec.draw_options, {'color': 'red'})
def test_copy_ctor_with_override(self):
stvec = geom.STVector(2, 3, precision=4)
stvec_cpy = geom.STVector(stvec, precision=6, tag='test')
self.assertEqual(stvec_cpy.precision, 6)
self.assertEqual(stvec_cpy.tag, 'test')
def test_init_invalid_nargs(self):
self.assertRaises(TypeError, geom.STVector, 2, 3, 4)
class STVectorOverloadTests(unittest.TestCase):
"""Overloaded operators and special methods."""
def test_getitem(self):
stvec = geom.STVector(2, 3)
self.assertEqual(stvec[0], 2)
self.assertEqual(stvec[1], 3)
def test_str(self):
self.assertEqual(str(geom.STVector(2, 3)), 'STVector(2, 3)')
def test_str_within_precision(self):
self.assertEqual(str(geom.STVector(2.001, 2.999, precision=3)),
'STVector(2.001, 2.999)')
self.assertEqual(str(geom.STVector(2.0001, 2.9999, precision=3)),
'STVector(2.0, 3.0)')
def test_iter(self):
for cmp, answer in zip(geom.STVector(2, 3), [2, 3]):
self.assertEqual(cmp, answer)
def test_eq(self):
self.assertEqual(geom.STVector(2, 3), (2, 3))
def test_eq_within_precision(self):
self.assertEqual(geom.STVector(2, 3, precision=3), (2.0001, 3))
self.assertNotEqual(geom.STVector(2, 3, precision=3), (2.001, 3))
def test_neg(self):
self.assertEqual(-geom.STVector(2, 3), geom.STVector(-2, -3))
def test_add(self):
self.assertEqual(geom.STVector(2, 3) + geom.STVector(3, 3), (5, 6))
def test_abs(self):
self.assertEqual(abs(geom.STVector(2, 3)), 5)
class STVectorCoreTests(unittest.TestCase):
"""Actual core functionality as spacetime objects."""
def test_lorentz_transform(self):
stvec = geom.STVector(2, 3)
stvec.lorentz_transform(3/5)
self.assertAlmostEqual(stvec.t, 1/4)
self.assertAlmostEqual(stvec.x, 9/4)
def test_lorentz_transform_origin_1_1(self):
stvec = geom.STVector(3, 4)
stvec.lorentz_transform(3/5, origin=(1, 1))
self.assertAlmostEqual(stvec.t, 1/4 + 1)
self.assertAlmostEqual(stvec.x, 9/4 + 1)
def test_draw_in_bounds(self):
p = _MockSTPlotter()
geom.STVector(3, 4, tag='test', draw_options={'color': 'red'}).draw(p)
self.assertEqual(len(p.points), 1)
self.assertEqual(len(p.segments), 0)
self.assertEqual(len(p.polygons), 0)
self.assertEqual(p.points[0], ((3, 4), 'test', {'color': 'red'}))
self.assertEqual(p.tlim, (3, 3))
self.assertEqual(p.xlim, (4, 4))
def test_draw_out_of_bounds(self):
p = _MockSTPlotter()
geom.STVector(3, 4).draw(p, tlim=(4, 5))
self.assertEqual(len(p.points), 0)
self.assertEqual(len(p.segments), 0)
self.assertEqual(len(p.polygons), 0)
def test_auto_draw_lims(self):
self.assertEqual(geom.STVector(2, 3)._auto_draw_lims(),
((2, 2), (3, 3)))
def test_in_bounds(self):
stvec = geom.STVector(2, 3)
self.assertTrue(stvec._in_bounds((0, 3), (2, 4)))
self.assertFalse(stvec._in_bounds((3, 4), (1, 2)))
def test_in_bounds_within_precision(self):
stvec1 = geom.STVector(2.0001, 3.0001, precision=3)
self.assertTrue(stvec1._in_bounds((0, 2), (2, 3)))
stvec2 = geom.STVector(2.001, 3.001, precision=3)
self.assertFalse(stvec2._in_bounds((0, 2), (2, 3)))
def test_in_bounds_exact_equality(self):
self.assertTrue(geom.STVector(2, 3)._in_bounds((2, 2), (3, 3)))
# Tests specifically for the gamma_factor() static method
def test_gamma_factor_stationary(self):
self.assertEqual(geom.STVector.gamma_factor(0), 1)
def test_gamma_factor_at_c(self):
self.assertRaises(ZeroDivisionError, geom.STVector.gamma_factor, 1)
def test_gamma_factor_ftl(self):
self.assertIsInstance(geom.STVector.gamma_factor(2), complex)
def test_gamma_factor_three_fifths(self):
self.assertAlmostEqual(geom.STVector.gamma_factor(3/5), 5/4)
class test_lorentz_transformed(unittest.TestCase):
def setUp(self):
self.original = geom.STVector(2, 3)
self.transformed = geom.lorentz_transformed(self.original, 3/5)
def test_transformed_values(self):
self.assertAlmostEqual(self.transformed[0], 1/4)
self.assertAlmostEqual(self.transformed[1], 9/4)
def test_deep_copied(self):
# Check that the original wasn't mutated
self.assertNotAlmostEqual(self.original[0], self.transformed[0])
self.assertNotAlmostEqual(self.original[1], self.transformed[1])
class CollectionTests(unittest.TestCase):
def setUp(self):
group = geom.PointGroup([(0, 1), (2, 3)])
line = geom.Line((0, 1), (2, 3))
ribbon = geom.Ribbon(geom.Line((0, 1), (2, 3)),
geom.Line((0, 1), (0, 0)))
self.collection = geom.Collection([group, line, ribbon])
def test_init(self):
self.assertEqual(self.collection[0][0], (0, 1))
self.assertEqual(self.collection[0][1], (2, 3))
self.assertEqual(self.collection[1].direction(), (0, 1))
self.assertEqual(self.collection[1].point(), (2, 3))
self.assertEqual(self.collection[2][0].direction(), (0, 1))
self.assertEqual(self.collection[2][0].point(), (2, 3))
self.assertEqual(self.collection[2][1].direction(), (0, 1))
self.assertEqual(self.collection[2][1].point(), (0, 0))
def test_append(self):
self.collection.append(geom.STVector(5, 5))
self.assertEqual(len(self.collection), 4)
self.assertEqual(self.collection[3], (5, 5))
def test_append_nontransformable(self):
self.assertRaises(ValueError, self.collection.append, 1)
def test_lorentz_transform(self):
v = 3/5
group_transformed = geom.lorentz_transformed(
geom.PointGroup([(0, 1), (2, 3)]), v)
line_transformed = geom.lorentz_transformed(
geom.Line((0, 1), (2, 3)), v)
ribbon_transformed = geom.lorentz_transformed(
geom.Ribbon(geom.Line((0, 1), (2, 3)), geom.Line((0, 1), (0, 0))),
v)
self.collection.lorentz_transform(v)
self.assertAlmostEqual(self.collection[0][0], group_transformed[0])
self.assertAlmostEqual(self.collection[0][1], group_transformed[1])
self.assertAlmostEqual(self.collection[1].direction(),
line_transformed.direction())
self.assertAlmostEqual(self.collection[1].point(),
line_transformed.point())
self.assertAlmostEqual(self.collection[2][0].direction(),
ribbon_transformed[0].direction())
self.assertAlmostEqual(self.collection[2][0].point(),
ribbon_transformed[0].point())
self.assertAlmostEqual(self.collection[2][1].direction(),
ribbon_transformed[1].direction())
self.assertAlmostEqual(self.collection[2][1].point(),
ribbon_transformed[1].point())
def test_draw(self):
p = _MockSTPlotter()
self.collection.draw(p, tlim=(-5, 5), xlim=(-5, 5))
self.assertEqual(len(p.points), 2)
p.points_equal(self, p.points[0], ((0, 1), None, {}))
p.points_equal(self, p.points[1], ((2, 3), None, {}))
self.assertEqual(len(p.segments), 3)
p.segments_equal(self, p.segments[0], ((2, -5), (2, 5), None, {}))
self.assertEqual(len(p.polygons), 1)
p.polygons_equal(self, p.polygons[0],
([(0, -5), (0, 5), (2, 5), (2, -5)], None, {}))
p.segments_equal(self, p.segments[1], ((2, -5), (2, 5), None,
{'color': geom.geomrc['ribbon.default_edgecolor'], 'zorder': 1}))
p.segments_equal(self, p.segments[2], ((0, -5), (0, 5), None,
{'color': geom.geomrc['ribbon.default_edgecolor'], 'zorder': 1}))
self.assertEqual(p.tlim, (-5, 5))
self.assertEqual(p.xlim, (-5, 5))
def test_auto_draw_lims(self):
self.assertEqual(self.collection._auto_draw_lims(), ((0, 2), (-1, 4)))
class PointGroupTests(unittest.TestCase):
def setUp(self):
self.group = geom.PointGroup([(0, 0), (0, 1), (1, 0)], tag='test')
def test_init(self):
self.assertEqual(self.group[0].t, 0)
self.assertEqual(self.group[0].x, 0)
self.assertEqual(self.group[1].t, 0)
self.assertEqual(self.group[1].x, 1)
self.assertEqual(self.group[2].t, 1)
self.assertEqual(self.group[2].x, 0)
self.assertEqual(self.group.mode, geom.PointGroup.POINT)
def test_draw(self):
p = _MockSTPlotter()
self.group.draw(p)
self.assertEqual(len(p.points), 3)
self.assertEqual(len(p.segments), 0)
self.assertEqual(len(p.polygons), 0)
self.assertEqual(p.points[0], ((0, 0), None, {}))
self.assertEqual(p.points[1], ((0, 1), None, {}))
self.assertEqual(p.points[2], ((1, 0), None, {}))
self.assertEqual(p.tlim, (0, 1))
self.assertEqual(p.xlim, (0, 1))
def test_draw_connect(self):
self.group.mode = geom.PointGroup.CONNECT
p = _MockSTPlotter()
self.group.draw(p)
self.assertEqual(len(p.points), 0)
self.assertEqual(len(p.segments), 2)
self.assertEqual(len(p.polygons), 0)
p.segments_equal(self, p.segments[0], ((0, 0), (0, 1), 'test', {}))
p.segments_equal(self, p.segments[1], ((0, 1), (1, 0), 'test', {}))
self.assertEqual(p.tlim, (0, 1))
self.assertEqual(p.xlim, (0, 1))
def test_draw_polygon(self):
self.group.mode = geom.PointGroup.POLYGON
p = _MockSTPlotter()
self.group.draw(p)
self.assertEqual(len(p.points), 0)
self.assertEqual(len(p.segments), 0)
self.assertEqual(len(p.polygons), 1)
p.polygons_equal(self, p.polygons[0],
([(0, 0), (0, 1), (1, 0)], 'test', {}))
self.assertEqual(p.tlim, (0, 1))
self.assertEqual(p.xlim, (0, 1))
class LineInitTests(unittest.TestCase):
"""Instance initialization."""
def test_init(self):
line = geom.Line((0, 1), (2, 3), precision=7, tag='test',
draw_options={'color': 'red'})
self.assertEqual(line.direction().t, 0)
self.assertEqual(line.direction().x, 1)
self.assertEqual(line.point().t, 2)
self.assertEqual(line.point().x, 3)
self.assertEqual(line.precision(), 7)
self.assertEqual(line.tag, 'test')
self.assertEqual(line.draw_options, {'color': 'red'})
def test_init_error_on_zero_direction(self):
self.assertRaises(ValueError, geom.Line, (0, 0), (2, 3))
def test_init_with_override(self):
line = geom.Line(geom.STVector(0, 1, precision=7),
geom.STVector(2, 3, precision=7), precision=5, tag='test')
self.assertEqual(line.precision(), 5)
self.assertEqual(line.tag, 'test')
class LineOverloadTests(unittest.TestCase):
"""Overloaded operators and special methods."""
def test_str(self):
self.assertEqual(str(geom.Line((0, 1), (2, 3))),
'Line( [t, x] = [2, 3] + k*[0, 1] )')
def test_str_within_precision(self):
self.assertEqual(str(
geom.Line((0.001, 1.001), (2.001, 2.999), precision=3)),
'Line( [t, x] = [2.001, 2.999] + k*[0.001, 1.001] )')
self.assertEqual(str(
geom.Line((0.0001, 1.0001), (2.0001, 2.9999), precision=3)),
'Line( [t, x] = [2.0, 3.0] + k*[0.0, 1.0] )')
def test_eq_same(self):
self.assertEqual(geom.Line((1, 1), (2, 3)), geom.Line((1, 1), (2, 3)))
def test_eq_diff_params(self):
self.assertEqual(geom.Line((1, 1), (2, 3)), geom.Line((2, 2), (1, 2)))
def test_eq_within_precision(self):
self.assertEqual(geom.Line((1.0001, 1), (2, 3), precision=3),
geom.Line((1, 1), (2, 3)))
self.assertNotEqual(geom.Line((1.001, 1), (2, 3), precision=3),
geom.Line((1, 1), (2, 3)))
def test_cannot_append(self):
self.assertRaises(TypeError, geom.Line((0, 1), (2, 3)).append,
geom.STVector(1, 1))
class LineCoreTests(unittest.TestCase):
"""Actual core functionality as spacetime objects."""
def test_lorentz_transform_origin_1_1(self):
line = geom.Line((2, 3), (3, 4))
line.lorentz_transform(3/5, origin=(1, 1))
self.assertAlmostEqual(line.direction().t, 1/4)
self.assertAlmostEqual(line.direction().x, 9/4)
self.assertAlmostEqual(line.point().t, 1/4 + 1)
self.assertAlmostEqual(line.point().x, 9/4 + 1)
def test_draw_in_bounds(self):
line = geom.Line((1, 1), (2, 3))
p = _MockSTPlotter()
line.draw(p, tlim=(-1, 2), xlim=(0, 2))
self.assertEqual(len(p.points), 0)
self.assertEqual(len(p.segments), 1)
self.assertEqual(len(p.polygons), 0)
p.segments_equal(self, p.segments[0], ((-1, 0), (1, 2), None, {}))
self.assertEqual(p.tlim, (-1, 2))
self.assertEqual(p.xlim, (0, 2))
def test_draw_out_of_bounds(self):
line = geom.Line((1, 1), (2, 3))
p = _MockSTPlotter()
line.draw(p, tlim=(2, 3), xlim=(0, 1))
self.assertEqual(len(p.points), 0)
self.assertEqual(len(p.segments), 0)
self.assertEqual(len(p.polygons), 0)
def test_auto_draw_lims(self):
line = geom.Line((1, -1), (2, 3))
self.assertEqual(line._auto_draw_lims(), ((1, 3), (2, 4)))
def test_slope_nonvertical(self):
self.assertEqual(geom.Line((1, 2), (2, 3)).slope(), 0.5)
def test_slope_vertical(self):
self.assertIsNone(geom.Line((1, 0), (2, 3)).slope())
class LineIntersectTests(unittest.TestCase):
"""Line intersection logic."""
def test_boundary_intersections_diag(self):
line = geom.Line((1, 1), (0.5, 0))
tlim = (0, 1)
xlim = (0, 1)
self.assertEqual(line._boundary_intersections(tlim, xlim),
[(0, -0.5), (0.5, 0), (1, 0.5), (1.5, 1)])
def test_boundary_intersections_horz(self):
line = geom.Line((0, 1), (0.5, 0))
tlim = (0, 1)
xlim = (0, 1)
self.assertEqual(line._boundary_intersections(tlim, xlim),
[(0.5, 0), (0.5, 1)])
def test_boundary_intersections_corner(self):
line = geom.Line((1, -1), (1, 0))
tlim = (0, 1)
xlim = (0, 1)
self.assertEqual(line._boundary_intersections(tlim, xlim),
[(0, 1), (1, 0)])
def test_intersect_is_symmetric(self):
line1 = geom.Line((1, 1), (0, 0))
line2 = geom.Line((1, -1), (0, 2))
self.assertEqual(line1.intersect(line2), line2.intersect(line1))
def test_intersect_point_slanted_lines(self):
line1 = geom.Line((1, 1), (0, 0))
line2 = geom.Line((1, -1), (0, 2))
self.assertEqual(line1.intersect(line2), (1, 1))
def test_intersect_point_one_vertical(self):
vertline = geom.Line((1, 0), (0, 0))
otherline = geom.Line((1, -1), (0, 2))
self.assertEqual(otherline.intersect(vertline), (2, 0))
def test_intersect_point_one_horizontal(self):
horzline = geom.Line((0, 1), (0, 0))
otherline = geom.Line((1, -1), (2, 0))
self.assertEqual(otherline.intersect(horzline), (0, 2))
def test_intersect_point_vertical_and_horizontal(self):
vertline = geom.Line((1, 0), (0, 1))
horzline = geom.Line((0, 1), (1, 0))
self.assertEqual(vertline.intersect(horzline), (1, 1))
def test_intersect_parallel(self):
self.assertIsNone(
geom.Line((1, 1), (0, 0)).intersect(geom.Line((2, 2), (0, 1))))
def test_intersect_equal_lines_diff_direction_diff_point(self):
line1 = geom.Line((1, 1), (2, 3))
line2 = geom.Line((2, 2), (3, 4))
intersection = line1.intersect(line2)
self.assertIsInstance(intersection, geom.Line)
self.assertEqual(intersection.direction(), line1.direction())
self.assertEqual(intersection.point(), line1.point())
class RayOverloadTests(unittest.TestCase):
"""Overloaded operators and special methods."""
def test_str(self):
self.assertEqual(str(geom.Ray((0, 1), (2, 3))),
'Ray( [t, x] = [2, 3] + k*[0, 1] where k >= 0 )')
def test_eq(self):
self.assertEqual(geom.Ray((1, 1), (2, 3)), geom.Ray((1, 1), (2, 3)))
def test_eq_scaled_dir(self):
self.assertEqual(geom.Ray((1, 1), (2, 3)), geom.Ray((2, 2), (2, 3)))
def test_neq_diff_point(self):
self.assertNotEqual(geom.Ray((1, 1), (2, 3)), geom.Ray((1, 1), (1, 2)))
def test_neq_opp_dir(self):
self.assertNotEqual(geom.Ray((1, 1), (2, 3)),
geom.Ray((-1, -1), (2, 3)))
class RayCoreTests(unittest.TestCase):
"""Actual core functionality as spacetime objects."""
def test_draw_endpoint_out_of_bounds(self):
ray = geom.Ray((-1, -1), (2, 3))
p = _MockSTPlotter()
ray.draw(p, tlim=(-1, 2), xlim=(0, 2))
self.assertEqual(len(p.points), 0)
self.assertEqual(len(p.segments), 1)
self.assertEqual(len(p.polygons), 0)
p.segments_equal(self, p.segments[0], ((-1, 0), (1, 2), None, {}))
def test_draw_endpoint_in_bounds(self):
ray = geom.Ray((1, 1), (0, 1))
p = _MockSTPlotter()
ray.draw(p, tlim=(-1, 2), xlim=(0, 2))
self.assertEqual(len(p.points), 0)
self.assertEqual(len(p.segments), 1)
self.assertEqual(len(p.polygons), 0)
p.segments_equal(self, p.segments[0], ((0, 1), (1, 2), None, {}))
def test_draw_totally_out_of_bounds(self):
ray = geom.Ray((1, 1), (2, 3))
p = _MockSTPlotter()
ray.draw(p, tlim=(-1, 2), xlim=(0, 2))
self.assertEqual(len(p.points), 0)
self.assertEqual(len(p.segments), 0)
self.assertEqual(len(p.polygons), 0)
def test_auto_draw_lims(self):
ray = geom.Ray((1, -1), (2, 3))
self.assertEqual(ray._auto_draw_lims(), ((2, 3), (2, 3)))
class RayIntersectTests(unittest.TestCase):
"""Ray-line intersection logic."""
def test_boundary_intersections_exterior_endpoint(self):
ray = geom.Ray((0, 1), (0.5, -1))
tlim = (0, 1)
xlim = (0, 1)
self.assertEqual(ray._boundary_intersections(tlim, xlim),
[(0.5, -1), (0.5, 0), (0.5, 1)])
def test_boundary_intersections_interior_endpoint(self):
ray = geom.Ray((0, 1), (0.5, 0.5))
tlim = (0, 1)
xlim = (0, 1)
self.assertEqual(ray._boundary_intersections(tlim, xlim),
[(0.5, 0.5), (0.5, 1)])
def test_intersect_parallel(self):
ray = geom.Ray((1, 1), (0, 0))
line = geom.Line((2, 2), (0, 1))
self.assertIsNone(ray.intersect(line))
def test_intersect_equal_lines_diff_direction_diff_point(self):
ray = geom.Ray((1, 1), (2, 3))
line = geom.Line((2, 2), (3, 4))
intersection = ray.intersect(line)
self.assertIsInstance(intersection, geom.Ray)
self.assertEqual(intersection.direction(), ray.direction())
self.assertEqual(intersection.point(), ray.point())
def test_actual_intersection_full_crossing(self):
ray = geom.Ray((1, 0), (0, 1))
line = geom.Line((0, 1), (1, 0))
self.assertEqual(ray.intersect(line), (1, 1))
def test_actual_intersection_tangent(self):
ray = geom.Ray((1, 0), (1, 1))
line = geom.Line((0, 1), (1, 0))
self.assertEqual(ray.intersect(line), (1, 1))
def test_ghost_intersection(self):
ray = geom.Ray((1, 0), (2, 1))
line = geom.Line((0, 1), (1, 0))
self.assertIsNone(ray.intersect(line))
class RibbonBasicTests(unittest.TestCase):
"""Basic functionality tests."""
def test_init(self):
ribbon = geom.Ribbon(geom.Line((0, 1), (2, 3)),
geom.Line((0, 1), (0, 0)), tag='test',
draw_options={'color': 'red'})
self.assertEqual(ribbon[0].direction().t, 0)
self.assertEqual(ribbon[0].direction().x, 1)
self.assertEqual(ribbon[0].point().t, 2)
self.assertEqual(ribbon[0].point().x, 3)
self.assertEqual(ribbon[1].direction().t, 0)
self.assertEqual(ribbon[1].direction().x, 1)
self.assertEqual(ribbon[1].point().t, 0)
self.assertEqual(ribbon[1].point().x, 0)
self.assertEqual(ribbon.tag, 'test')
self.assertEqual(ribbon.draw_options, {'color': 'red'})
def test_init_not_parallel(self):
self.assertRaises(ValueError, geom.Ribbon,
geom.Line((0, 1), (2, 3)), geom.Line((1, 1), (2, 3)))
def test_cannot_append(self):
self.assertRaises(TypeError, geom.Line((0, 1), (2, 3)).append,
geom.STVector(1, 1))
def test_draw(self):
p = _MockSTPlotter()
ribbon = geom.Ribbon(geom.Line((0, 1), (2, 3)),
geom.Line((0, 1), (0, 0)), tag='test',
draw_options={'facecolor': 'red', 'label': 'test2'})
ribbon.draw(p, tlim=(-2, 3), xlim=(0, 1))
self.assertEqual(len(p.points), 0)
self.assertEqual(len(p.segments), 2)
self.assertEqual(len(p.polygons), 1)
p.polygons_equal(self, p.polygons[0],
([(0, 0), (0, 1), (2, 1), (2, 0)], 'test',
{'facecolor': 'red', 'label': 'test2'}))
p.segments_equal(self, p.segments[0], ((2, 0), (2, 1), None,
{'color': geom.geomrc['ribbon.default_edgecolor'], 'zorder': 1}))
p.segments_equal(self, p.segments[1], ((0, 0), (0, 1), None,
{'color': geom.geomrc['ribbon.default_edgecolor'], 'zorder': 1}))
self.assertEqual(p.tlim, (-2, 3))
self.assertEqual(p.xlim, (0, 1))
def test_draw_no_edges(self):
p = _MockSTPlotter()
ribbon = geom.Ribbon(geom.Line((0, 1), (2, 3)),
geom.Line((0, 1), (0, 0)), draw_options={'edgecolor': 'None'})
ribbon.draw(p, tlim=(-2, 3), xlim=(0, 1))
self.assertEqual(len(p.segments), 0)
self.assertEqual(len(p.polygons), 1)
def test_draw_out_of_bounds(self):
p = _MockSTPlotter()
ribbon = geom.Ribbon(geom.Line((0, 1), (2, 3)),
geom.Line((0, 1), (0, 0)))
ribbon.draw(p, tlim=(4, 5), xlim=(0, 1))
self.assertEqual(len(p.segments), 0)
self.assertEqual(len(p.polygons), 0)
class RibbonBoundaryTests(unittest.TestCase):
"""Tests for bounds checking logic."""
def test_point_inside(self):
ribbon = geom.Ribbon(geom.Line((1, 1), (0, 2)),
geom.Line((1, 1), (0, 0)))
self.assertTrue(ribbon._point_inside((0, 1)))
def test_point_not_inside(self):
ribbon = geom.Ribbon(geom.Line((1, 1), (0, 2)),
geom.Line((1, 1), (0, 0)))
self.assertFalse(ribbon._point_inside((0, 3)))
def test_point_on_boundary(self):
ribbon = geom.Ribbon(
geom.Line((1, 1), (0, 2)),
geom.Line((1, 1), (0, 0))
)
self.assertTrue(ribbon._point_inside((0, 2)))
class RibbonGetVerticesTests(unittest.TestCase):
def test_get_vertices_flat_lines(self):
ribbon = geom.Ribbon(
geom.Line((0, 1), (1, 0)),
geom.Line((0, 1), (2, 0)),
)
self.assertEqual(ribbon._get_vertices((0, 3), (0, 3)),
[
(1, 0),
(1, 3),
(2, 3),
(2, 0)
]
)
def test_get_vertices_exact_boundary(self):
ribbon = geom.Ribbon(
geom.Line((0, 1), (1, 0)),
geom.Line((0, 1), (3, 0)),
)
self.assertEqual(ribbon._get_vertices((0, 3), (0, 3)),
[
(1, 0),
(1, 3),
(3, 3),
(3, 0)
]
)
def test_get_vertices_lines_straddle_corner(self):
ribbon = geom.Ribbon(
geom.Line((1, 1), (1, 0)),
geom.Line((1, 1), (0, 1)),
)
self.assertEqual(ribbon._get_vertices((0, 2), (0, 2)),
[
(0, 0),
(0, 1),
(1, 2),
(2, 2),
(2, 1),
(1, 0)
]
)
def test_get_vertices_line_on_corner(self):
ribbon = geom.Ribbon(
geom.Line((1, 1), (1, 0)),
geom.Line((1, 1), (0, 2)),
)
self.assertEqual(ribbon._get_vertices((0, 2), (0, 2)),
[
(0, 0),
(0, 2),
(2, 2),
(2, 1),
(1, 0)
]
)
def test_get_vertices_one_line_out_of_bounds(self):
ribbon = geom.Ribbon(
geom.Line((1, 1), (1, 0)),
geom.Line((1, 1), (0, 3)),
)
self.assertEqual(ribbon._get_vertices((0, 2), (0, 2)),
[
(0, 0),
(0, 2),
(2, 2),
(2, 1),
(1, 0)
]
)
def test_get_vertices_both_lines_out_of_bounds(self):
ribbon = geom.Ribbon(
geom.Line((1, 1), (3, 0)),
geom.Line((1, 1), (0, 3)),
)
self.assertEqual(ribbon._get_vertices((0, 2), (0, 2)),
[
(0, 0),
(0, 2),
(2, 2),
(2, 0)
]
)
class HalfRibbonBasicTests(unittest.TestCase):
"""Basic functionality tests."""
def test_init_error_on_antiparallel(self):
self.assertRaises(ValueError, geom.HalfRibbon,
geom.Ray((0, 1), (0, 0)), geom.Ray((0, -1), (2, 3)))
def test_draw_endpoints_out_of_bounds(self):
p = _MockSTPlotter()
h = geom.HalfRibbon(geom.Ray((0, 1), (2, -3)),
geom.Ray((0, 1), (0, -1)), draw_options={'edgecolor': 'None'})
h.draw(p, tlim=(-2, 3), xlim=(0, 1))
self.assertEqual(len(p.points), 0)
self.assertEqual(len(p.segments), 0)
self.assertEqual(len(p.polygons), 1)
p.polygons_equal(self, p.polygons[0],
([(0, 0), (0, 1), (2, 1), (2, 0)], None, {}))
def test_draw_totally_out_of_bounds(self):
p = _MockSTPlotter()
h = geom.HalfRibbon(geom.Ray((0, 1), (2, 3)), geom.Ray((0, 1), (0, 2)))
h.draw(p, tlim=(-2, 3), xlim=(0, 1))
self.assertEqual(len(p.points), 0)
self.assertEqual(len(p.segments), 0)
self.assertEqual(len(p.polygons), 0)
def test_draw_endpoints_both_in_bounds(self):
p = _MockSTPlotter()
h = geom.HalfRibbon(geom.Ray((0, 1), (2, 0.5)),
geom.Ray((0, 1), (0, 0.25)), draw_options={'edgecolor': 'None'})
h.draw(p, tlim=(-2, 3), xlim=(0, 1))
self.assertEqual(len(p.points), 0)
self.assertEqual(len(p.segments), 0)
self.assertEqual(len(p.polygons), 1)
p.polygons_equal(self, p.polygons[0],
([(0, 0.25), (0, 1), (2, 1), (2, 0.5)], None, {}))
def test_draw_endpoints_one_endpoint_out_of_bounds(self):
p = _MockSTPlotter()
h = geom.HalfRibbon(geom.Ray((0, 1), (2, 0.5)),
geom.Ray((0, 1), (0, -0.5)), draw_options={'edgecolor': 'None'})
h.draw(p, tlim=(-2, 3), xlim=(0, 1))
self.assertEqual(len(p.points), 0)
self.assertEqual(len(p.segments), 0)
self.assertEqual(len(p.polygons), 1)
p.polygons_equal(self, p.polygons[0],
([(0, 0), (0, 1), (2, 1), (2, 0.5), (1, 0)], None, {}))
``` |
{
"source": "johanngerberding/cookiecutter-data-science",
"score": 2
} |
#### File: src/config/config.py
```python
import os
import warnings
from dotenv import find_dotenv, load_dotenv
from yacs.config import CfgNode as ConfigurationNode
from pathlib import Path
# Please configure your own settings here #
# YACS overwrite these settings using YAML
__C = ConfigurationNode()
### EXAMPLE ###
"""
# data augmentation parameters with albumentations library
__C.DATASET.AUGMENTATION = ConfigurationNode()
__C.DATASET.AUGMENTATION.BLURRING_PROB = 0.25
__C.DATASET.AUGMENTATION.GAUSS_NOISE_PROB = 0.25
__C.DATASET.AUGMENTATION.GAUSS_VAR_LIMIT =(10.0, 40.0)
__C.DATASET.AUGMENTATION.BLUR_LIMIT = 7
...
# model backbone configs
__C.MODEL.BACKBONE = ConfigurationNode()
__C.MODEL.BACKBONE.NAME = 'mobilenet_v2'
__C.MODEL.BACKBONE.RGB = True
__C.MODEL.BACKBONE.PRETRAINED_PATH = 'C:/data-science/kaggle/bengali.ai/models/mobilenet_v2-b0353104.pth'
# model head configs
__C.MODEL.HEAD = ConfigurationNode()
__C.MODEL.HEAD.NAME = 'simple_head_module'
__C.MODEL.HEAD.ACTIVATION = 'leaky_relu'
__C.MODEL.HEAD.OUTPUT_DIMS = [168, 11, 7]
__C.MODEL.HEAD.INPUT_DIM = 1280 # mobilenet_v2
__C.MODEL.HEAD.HIDDEN_DIMS = [512, 256]
__C.MODEL.HEAD.BATCH_NORM = True
__C.MODEL.HEAD.DROPOUT = 0.4
"""
def get_cfg_defaults():
"""
Get a yacs CfgNode object with default values for my_project.
"""
# Return a clone so that the defaults will not be altered
# This is for the "local variable" use pattern recommended by the YACS repo.
# It will be subsequently overwritten with local YAML.
return __C.clone()
def combine_cfgs(path_cfg_data: Path=None, path_cfg_override: Path=None):
"""
An internal facing routine thaat combined CFG in the order provided.
:param path_output: path to output files
:param path_cfg_data: path to path_cfg_data files
:param path_cfg_override: path to path_cfg_override actual
:return: cfg_base incorporating the overwrite.
"""
if path_cfg_data is not None:
path_cfg_data=Path(path_cfg_data)
if path_cfg_override is not None:
path_cfg_override=Path(path_cfg_override)
# Path order of precedence is:
# Priority 1, 2, 3, 4 respectively
# .env > other CFG YAML > data.yaml > default.yaml
# Load default lowest tier one:
# Priority 4:
cfg_base = get_cfg_defaults()
# Merge from the path_data
# Priority 3:
if path_cfg_data is not None and path_cfg_data.exists():
cfg_base.merge_from_file(path_cfg_data.absolute())
# Merge from other cfg_path files to further reduce effort
# Priority 2:
if path_cfg_override is not None and path_cfg_override.exists():
cfg_base.merge_from_file(path_cfg_override.absolute())
# Merge from .env
# Priority 1:
list_cfg = update_cfg_using_dotenv()
if list_cfg is not []:
cfg_base.merge_from_list(list_cfg)
return cfg_base
def update_cfg_using_dotenv() -> list:
"""
In case when there are dotenvs, try to return list of them.
# It is returning a list of hard overwrite.
:return: empty list or overwriting information
"""
# If .env not found, bail
if find_dotenv() == '':
warnings.warn(".env files not found. YACS config file merging aborted.")
return []
# Load env.
load_dotenv(find_dotenv(), verbose=True)
# Load variables
list_key_env = {
"DATASET.TRAIN_DATA_PATH",
"DATASET.VAL_DATA_PATH",
"MODEL.BACKBONE.PRETRAINED_PATH",
"MODEL.SOLVER.LOSS.LABELS_WEIGHTS_PATH"
}
# Instantiate return list.
path_overwrite_keys = []
# Go through the list of key to be overwritten.
for key in list_key_env:
# Get value from the env.
value = os.getenv("path_overwrite_keys")
# If it is none, skip. As some keys are only needed during training and others during the prediction stage.
if value is None:
continue
# Otherwise, adding the key and the value to the dictionary.
path_overwrite_keys.append(key)
path_overwrite_keys.append(value)
return path_overwrite_keys
``` |
{
"source": "JohannGillium/modern_english_ed",
"score": 3
} |
#### File: modern_english_ed/Python/xslt_handler.py
```python
import lxml.etree as etree
class xslt:
def __init__(self, book, xml_path, xslt_path, path_output):
#Dans le constructeur d'une classe, on declare tous les attributs des objets qui peupleront cette classe
self.book = book
self.xml_path = xml_path
self.xslt_path = xslt_path
self.path_output = path_output
def transform(self):
# parser = etree.XMLParser(recover=True)
# doc = etree.parse(self.xml_path, parser)
parser = etree.XMLParser(no_network=False)
doc = etree.parse(self.xml_path, parser)
# parser.error_log
xslt_root = etree.fromstring(open(self.xslt_path).read())
transform = etree.XSLT(xslt_root)
result_tree = transform(doc)
output_file = open(self.path_output, "w")
output_file.write(str(result_tree))
def process_xml(book, xml_path, xslt_path, path_output):
param_stylesheet = xslt(book, xml_path, xslt_path, path_output)
param_stylesheet.transform()
``` |
{
"source": "johanngoltz/helpling-scraper",
"score": 3
} |
#### File: johanngoltz/helpling-scraper/client.py
```python
from typing import List, Dict
import requests
from sgqlc.endpoint.http import HTTPEndpoint
from sgqlc.operation import Operation
from tabulate import tabulate
import helpling_schema
CandidateList = List[helpling_schema.DecoratedPotentialCandidateEdge]
BASE_URL = "https://www.helpling.de/api/"
DEFAULT_SEARCH_PARAMETERS = {
"time": "14:00null",
"date": "30/11/2019",
"repeat": "true",
"frequency": "week",
"duration": 12600,
"ironing": False,
"pets": False,
"materials_required": False,
"workday_flexibility": False,
"provider_type": "",
"notes": ""
}
gql_endpoint = HTTPEndpoint(BASE_URL + "v2/rr", base_headers={
# CloudFlare blocks the default user agent. Pretend to be IE 6.
"User-Agent": "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)"
})
def create_bid(postcode: int, **kwargs) -> str:
"""
Query the helpling API to create a new bid. Then, set the given parameters (or default values configured in
``DEFAULT_SEARCH_PARAMETERS``) to it. This is the first step to scraping offers for a region.
:param postcode: The postcode for which to request a new bid
:param kwargs: The parameters to set for the search.
:return: The bidCode of the created bid
"""
response = requests.post(BASE_URL + "v1/bids", {"bid[postcode]": postcode, "bid[checkout_version]": 1})
bid_id = response.json().get("data").get("code")
if bid_id is None:
raise Exception("Bid for postcode " + str(postcode) + " could not be created: " + response.text)
print("Create bid for " + str(postcode) + " (" + bid_id + "): OK")
op = Operation(helpling_schema.Mutation)
op.transition_bid_to_provider_selection(**{**DEFAULT_SEARCH_PARAMETERS, **kwargs, "bid_code": bid_id})
result = gql_endpoint(op).get("data").get("transitionBidToProviderSelection")
if result.get("success") is False:
raise Exception("Bid " + bid_id + " could not be parametrized: " + result.get("errors"))
print("Parametrize bid " + bid_id + ": OK")
return bid_id
def get_candidates_for_bid(bid_id: str) -> List[helpling_schema.DecoratedPotentialCandidateEdge]:
"""
Query the API for all (i.e. the first 1000) candidates for a given bid. The bid must have been parametrized already.
Note that not all fields are actually requested from the backend.
:param bid_id: Id of an already-parametrized bid
:return: First 1000 candidates available for the bid
"""
op = Operation(helpling_schema.Query)
candidates = op.customer_bid(code=bid_id).potential_candidates(first=1000)
candidates.edges.node.price_per_hour()
provider = candidates.edges.node.provider
provider.__fields__("id", "firstname", "shortname", "default_profile_image", "pets", "windows", "ironing",
"ratings_received_count", "verification_level", "documents", "performed_cleanings_count",
"language_skills", "instabook_enabled")
provider.avg_rating.total()
provider.experience.__fields__()
provider.distance_to_bid(bid_code=bid_id)
data = gql_endpoint(op)
return (op + data).customer_bid.potential_candidates.edges
def get_bid(bid_id: str) -> helpling_schema.CustomerBid:
"""
Get basic information on the given bid.
:param bid_id: Id of a bid, parametrized or not.
:return:
"""
op = Operation(helpling_schema.Query)
op.customer_bid(code=bid_id).__fields__("code", "duration", "start_time")
data = gql_endpoint(op)
return (op + data).customer_bid
def get_candidates(postcodes: List[int], parameters: Dict = None) -> List[CandidateList]:
"""
Find potential candidates for a list of postcodes.
:param postcodes:
:param parameters: see DEFAULT_SEARCH_PARAMETERS
"""
parameters = parameters or {}
for postcode in postcodes:
yield get_candidates_for_bid(create_bid(postcode, **parameters))
if __name__ == "__main__":
for c in get_candidates([90425], {"date": "30/11/2019"}):
as_dicts = [{"price_per_hour": c.node.price_per_hour, **c.node.provider.__dict__} for c in c]
for d in as_dicts:
d.pop("__selection_list__")
d.pop("__fields_cache__")
d.pop("__json_data__")
print(tabulate(as_dicts))
``` |
{
"source": "JohannGordillo/Computacion-Distribuida",
"score": 4
} |
#### File: Practicas/Practica 1/bfs_secuencial.py
```python
class GraphNode(object):
"""Implementación simple para el nodo de una gráfica"""
def __init__(self, val: int) -> None:
"""Constructor para el nodo
Args:
val (int): El elemento dentro del nodo
"""
self.neighbors = list()
self.visited = False
self.val = val
def connect(self, *nodes) -> None:
"""Conecta al nodo N con los nodos dados y
a cada uno de estos nodos con N
Args:
nodes (List[GraphNode]): Una lista de nodos
"""
for n in nodes:
if n is not self and n not in self.neighbors:
self.neighbors.append(n)
n.connect(self)
def __str__(self) -> str:
"""Regresa la representación en cadena del nodo
Regresa:
str: La representación en cadena del nodo
"""
return f"({self.val})"
def bfs(root: GraphNode) -> None:
"""Algoritmo de búsqueda por amplitud
Complejidad en Tiempo: O(|V| + |E|)
Complejidad en Espacio: O(n)
Args:
root (GraphNode): El nodo raíz
"""
root.visited = True
queue = [root]
while queue:
n = queue.pop(0)
print(n, end=" ")
for m in n.neighbors:
if not m.visited:
m.visited = True
queue.append(m)
def main() -> None:
"""Función principal
Ejecuta BFS con un grafo de ejemplo"""
# Ejemplo:
#
# (1)------(2)-----(6)
# \ \ /
# \ \ /
# (3) (4)
# \ / \
# \ / \
# \ / \
# (5)-----(7) G(V, E)
#
# BFS: (1)--(2)--(3)--(4)--(6)--(5)--(7)
# Creamos 7 nodos.
n1, n2, n3, n4, n5, n6, n7 = [GraphNode(x) for x in range(1, 8)]
# Conectamos los nodos.
# Como al hacer n.connect(m) se aplica a la vez m.connect(n),
# no es necesario volver a hacer m.connect(n).
n1.connect(n2, n3)
n2.connect(n4, n6)
n3.connect(n5)
n4.connect(n5, n6, n7)
n5.connect(n7)
# Tomamos como raíz a n1 y hacemos BFS.
bfs(n1)
if __name__ == "__main__":
main()
```
#### File: Practica 2/Canales/Canal.py
```python
import simpy
class Canal(object):
"""Implementación de un canal."""
def __init__(self, env, capacity=simpy.core.Infinity):
self.env = env
self.capacity = capacity
self.channels = list()
self.output_channel = None
def send(self, msg, neighbors):
"""Envía un mensaje a los canales de salida de los vecinos."""
if self.output_channel is None:
raise RuntimeError("No hay canales de salida.")
events = list()
l = len(self.channels)
for i in range(l):
if i in neighbors:
events.append(self.channels[i].put(msg))
return self.env.all_of(events)
def create_input_channel(self):
"""Creamos un objeto Store en el que recibiremos mensajes."""
channel = simpy.Store(self.env, capacity=self.capacity)
self.channels.append(channel)
self.output_channel = channel
return channel
def get_output_channel(self):
"""Regresa el objeto Store en el cual recibiremos los mensajes."""
return self.output_channel
```
#### File: Practicas/Practica 2/NodoSpanning.py
```python
import simpy
from Canales.Canal import Canal
from Nodo import Nodo
class NodoSpanning(Nodo):
"""Implementa la interfaz de Nodo para el algoritmo para conocer a
los vecinos de mis vecinos."""
def __init__(self, id: int, neighbors: list,
input_channel: simpy.Store,
output_channel: simpy.Store):
"""Constructor para el nodo."""
self.id = id
self.neighbors = neighbors
self.input_channel = input_channel
self.output_channel = output_channel
def span(self, env: simpy.Environment):
"""Función para que un nodo colabore en la construcción
de un árbol generador."""
# Solo el nodo raiz (id = 0) envía el primer msj
if self.id == 0:
self.parent = self.id
self.expected_msg = len(self.neighbors)
data = ("GO", self.id, None)
print(f'El nodo {self.id} inicializa sus variables en la ronda {env.now}')
yield env.timeout(1)
self.output_channel.send(data, self.neighbors)
# Los nodos no distinguidos tendrán padre vacío al inicio.
else:
self.parent = None
# Hacemos que el conjunto de hijos sea vacío para todos.
self.children = set()
while True:
# Esperamos a que nos llegue el mensaje.
msg_type, sender, val_set = yield self.input_channel.get()
print(f'El nodo {self.id} recibió el mensaje {msg_type}({val_set}) de {sender} en la ronda {env.now}')
yield env.timeout(1)
# Cuando recibimos un mensaje GO().
if msg_type == "GO":
if self.parent is None:
self.parent = sender
self.expected_msg = len(self.neighbors) - 1
if self.expected_msg == 0:
data = ("BACK", self.id, self.id)
self.output_channel.send(data, [sender])
else:
data = ("GO", self.id, None)
receivers = [k for k in self.neighbors if k != sender]
self.output_channel.send(data, receivers)
else:
data = ("BACK", self.id, None)
self.output_channel.send(data, [sender])
# Cuando recibimos un mensaje BACK(val_set).
elif msg_type == "BACK":
self.expected_msg -= 1
if val_set is not None:
self.children.add(sender)
if self.expected_msg == 0:
if self.parent != self.id:
data = ("BACK", self.id, self.id)
self.output_channel.send(data, [self.parent])
# Si el tipo de mensaje no existe, lanzamos excepción.
else:
raise Exception("El tipo de mensaje no existe.")
if __name__ == "__main__":
# Tomemos como ejemplo la siguiente gráfica:
example = '''Para la siguiente gráfica:
(6)
(1) / |
/ \ / |
/ \ / |
(0)-----(2)-----(3)--(4)
\ |
\ |
\ |
(5)
'''
# Creamos los nodos.
graph = list()
adjacencies = [[1, 2], [0, 2], [0, 1, 3], [2, 4, 5, 6],
[3, 5, 6], [3, 4], [3, 4]]
order = len(adjacencies)
# Inicializamos el ambiente y el canal.
env = simpy.Environment()
pipe = Canal(env)
# Llenado de la gráfica.
for i in range(order):
input_channel = pipe.create_input_channel()
neighbors = adjacencies[i]
n = NodoSpanning(i, neighbors, input_channel, pipe)
graph.append(n)
# Y le decimos al ambiente que lo procese.
for n in graph:
env.process(n.span(env))
# Imprimimos la gráfica de ejemplo.
print(example)
env.run(until=30)
# Inicializamos el árbol resultante.
tree = list()
# Llenado del árbol generador.
for n in graph:
# Llenado del árbol generador.
if len(n.children) > 0:
for c in n.children:
tree.append([n.id, c])
print(f"\nFinalmente, las aristas del árbol generador son:\n{tree}\n")
result = '''Visualmente, se ve como sigue:
(6)
(1) /
/ /
/ /
(0)-----(2)-----(3)--(4)
\
\
\
(5)
'''
print(result)
```
#### File: Practicas/Practica 3/NodoBFS.py
```python
import simpy
from Nodo import Nodo
from Canales.CanalRecorridos import CanalRecorridos
# La unidad de tiempo.
TICK = 1
class NodoBFS(Nodo):
"""Implementa la interfaz de Nodo para el algoritmo BFS."""
def __init__(self, id_nodo: int, vecinos: set,
canal_entrada: simpy.Store,
canal_salida: simpy.Store):
"""Constructor para el nodo."""
self.id_nodo = id_nodo
self.vecinos = vecinos
self.canal_entrada = canal_entrada
self.canal_salida = canal_salida
self.padre = id_nodo
self.distancia = float('inf')
def bfs(self, env: simpy.Store):
"""Implementación del algoritmo BFS."""
if self.id_nodo == 0:
self.distancia = 0
data = (self.distancia, self.id_nodo)
yield env.timeout(TICK)
self.canal_salida.envia(data, self.vecinos)
while True:
# Esperamos a que nos llegue el mensaje.
d, sender = yield self.canal_entrada.get()
if d + 1 < self.distancia:
self.distancia = d + 1
self.padre = sender
data = (self.distancia, self.id_nodo)
yield env.timeout(TICK)
self.canal_salida.envia(data, self.vecinos)
if __name__ == "__main__":
# Creamos el ambiente y el objeto Canal.
env = simpy.Environment()
bc_pipe = CanalRecorridos(env)
# Adyacencias.
adyacencias = [{1, 3, 4, 6}, {0, 3, 5, 7}, {3, 5, 6},
{0, 1, 2}, {0}, {1, 2}, {0, 2}, {1}]
# La lista que representa la gráfica.
grafica = []
# Creamos los nodos
for i in range(0, len(adyacencias)):
grafica.append(NodoBFS(i, adyacencias[i],
bc_pipe.crea_canal_de_entrada(), bc_pipe))
# Le decimos al ambiente lo que va a procesar.
for nodo in grafica:
env.process(nodo.bfs(env))
# Y lo corremos.
env.run(until=50)
# Probamos que efectivamente se hizo un BFS.
padres_esperados = [0, 0, 3, 0, 0, 1, 0, 1]
distancias_esperadas = [0, 1, 2, 1, 1, 2, 1, 2]
# Para cada nodo verificamos que su lista de identifiers sea la esperada.
for i in range(len(grafica)):
n = grafica[i]
assert n.padre == padres_esperados[i], ('El nodo %d tiene mal padre' % n.id_nodo)
assert n.distancia == distancias_esperadas[i], ('El nodo %d tiene distancia equivocada' % n.id_nodo)
```
#### File: Practicas/Practica 4/Test.py
```python
from Canales.CanalRecorridos import *
from NodoConsenso import *
class TestPractica2:
''' Clase para las pruebas unitarias de la práctica 2. '''
# Las aristas de adyacencias de la gráfica.
adyacencias = [[1, 2, 3, 4, 5, 6], [0, 2, 3, 4, 5, 6], [0, 1, 3, 4, 5, 6],
[0, 1, 2, 4, 5, 6], [0, 1, 2, 3, 5, 6], [0, 1, 2, 3, 4, 6],
[0, 1, 2, 3, 4, 5]]
def test_ejercicio_uno(self):
''' Método que prueba el algoritmo de consenso. '''
# Creamos el ambiente y el objeto Canal
env = simpy.Environment()
bc_pipe = CanalRecorridos(env)
# La lista que representa la gráfica
grafica = []
# Creamos los nodos
for i in range(0, len(self.adyacencias)):
grafica.append(NodoConsenso(i, self.adyacencias[i],
bc_pipe.crea_canal_de_entrada(), bc_pipe))
# Le decimos al ambiente lo que va a procesar ...
f = 2 # El número de fallos
for nodo in grafica:
env.process(nodo.consenso(env, f))
# ...y lo corremos
env.run()
nodos_fallidos = 0
lider_elegido = None
for i in range(0, len(grafica)):
nodo = grafica[i]
if nodo.fallare:
nodos_fallidos += 1
else:
lider_elegido = nodo.lider if lider_elegido is None else lider_elegido
assert lider_elegido == nodo.lider
assert nodo.lider == next(item for item in nodo.V if item is not None)
assert nodos_fallidos == f
``` |
{
"source": "JohannGordillo/RSA-Cryptosystem",
"score": 3
} |
#### File: RSA-Cryptosystem/rsa/main.py
```python
from rsa.gui import FileBrowser
from rsa import rsa
import platform
import os
# Genera un marco de símbolos "=".
genera_marco = (lambda: print("=" * 80))
# Operación para limpiar la consola.
if platform.system() == "Windows":
clear_op = "cls"
else:
clear_op = "clear"
clean_console = (lambda: os.system(clear_op))
def select_file():
"""Permite al usuario seleccionar un archivo
con ayuda de una interfaz gráfica.
>> Argumentos:
Ninguno.
>> Regresa:
La ruta del archivo.
"""
fb = FileBrowser()
fb.search_path()
return fb.get_path()
def print_menu():
"""Imprime el menú de selección."""
clean_console()
genera_marco()
print("Menú de Selección".center(80, ' '))
genera_marco()
print("1) Cifrar un mensaje.\n")
print("2) Descifrar un mensaje.\n")
print("3) Salir.")
genera_marco()
def main():
"""Funcion principal del programa."""
while True:
print_menu()
op = int(input("\nSeleccione una opcion: "))
# Cifrado.
if op == 1:
# Permitimos al usuario seleccionar el archivo.
src = select_file()
# Leemos el archivo.
with open(src) as f:
msg = f.read()
# Generamos dos primos aleatorios distintos de entre 50 y 60 dígitos.
p = rsa.generate_prime_number(50, 60)
q = p
while q == p:
q = rsa.generate_prime_number(50, 60)
# Obtenemos las claves pública y privada.
public_key, private_key = rsa.generate_keys(p, q)
# Cadena con el texto cifrado.
ciphertext = ' '.join([str(c) for c in rsa.encrypt(public_key, msg)])
print(f"Su llave pública es: {public_key}\nSu llave privada es: {private_key}")
# Escribimos el texto cifrado en un archivo salida.txt
# Se crea en el directorio actual.
with open("salida.txt", "w+") as out:
out.write(ciphertext)
print(f"\nEl archivo salida.txt se ha creado en {os.getcwd()}")
input("\nPresione <ENTER> para continuar... ")
clean_console()
# Descifrado.
elif op == 2:
# Permitimos al usuario seleccionar el archivo.
src = select_file()
# Leemos el archivo con el texto encriptado.
with open(src) as f:
text = f.read()
# Obtenemos la llave pública.
print(">> Ingrese la llave pública [n, e] (ejemplo: 567 785): ")
n, e = map(int, input().split())
public_key = (n, e)
# Obtenemos la llave privada.
print("\n>> Ingrese la llave privada: ")
private_key = int(input())
keys = (public_key, private_key)
# Lista de enteros asociados al texto cifrado.
ciphertext = [int(c) for c in text.split()]
# Obtenemos el mensaje descifrado como una cadena.
msg = rsa.decrypt(ciphertext, keys)
print(f"\nSu texto descifrado es:\n{msg}")
input("\nPresione <ENTER> para continuar... ")
clean_console()
# Salir del programa.
elif op == 3:
clean_console()
print("Hasta luego! Gracias por utilizar el programa :D")
break
# Opción inválida.
else:
print("Opción no válida.")
input("\nPresione <ENTER> para continuar... ")
clean_console()
``` |
{
"source": "JohannHospice/quico",
"score": 2
} |
#### File: quico/src/quico.py
```python
import argparse
import subprocess
def main():
args, unknown_joined = parse_known_args()
build_cmd = CommandBuilder(['docker', 'build'])
build_cmd.append_with_option('-f', args.file)
build_cmd.append_with_option('-t', args.tag)
build_cmd.append(args.directory)
run_cmd = CommandBuilder(['docker', 'run'])
run_cmd.append_with_option('--network', args.network)
for volume in args.volume if args.volume else []:
run_cmd.append_with_option('-v', volume)
run_cmd.append_with_option('-p', args.publish)
run_cmd.append_with_option('-ti', args.tag)
run_cmd.append(unknown_joined)
try :
read_proc('build', build_cmd.build())
read_proc('run', run_cmd.build())
except Exception as e:
print(e)
def parse_known_args():
parser = argparse.ArgumentParser(description='Quico ou quick-container permet de compiler puis lancer rapidement un conteneur docker.')
parser.add_argument('directory', help='Dossier ou compiler l\'image docker.')
parser.add_argument('-t', '--tag', required=True)
parser.add_argument('-n', '--network', help="Réseau ou lancer le conteneur docker", default='bridge', required=False)
parser.add_argument('-f', '--file', help="Chemin vers le Dockerfile à compiler", default='Dockerfile', required=False)
parser.add_argument('-p', '--publish', required=False)
parser.add_argument('-v', '--volume', action='append', required=False)
args, unknown = parser.parse_known_args()
unknown_joined = ' '.join(unknown)
return args, unknown_joined
def read_proc(title, cmd):
print(f"+{title}: start ({cmd})")
try:
subprocess.check_call(cmd, shell=True)
except:
raise Exception(f"+{title}: raised error")
class CommandBuilder:
cmd = []
def __init__(self, cmd):
self.cmd = cmd
def append_with_option(self, option, value):
if value:
self.cmd.append(option)
self.cmd.append(value)
return self
def append(self, text):
self.cmd.append(text)
return self
def build(self):
return " ".join(self.cmd)
if __name__ == '__main__':
main()
``` |
{
"source": "johannilsson/bakery",
"score": 2
} |
#### File: bakery/bakery/bakery.py
```python
from __future__ import with_statement
__author__ = '<NAME>'
__version__ = '0.4.dev'
__license__ = 'MIT'
import sys
import pystache
import os
import shutil
import errno
import fnmatch
import markdown
import codecs
import re
import yaml
import hashlib
import threading
import time
import socket
import filecmp
import typogrify
import math
import copy
from unicodedata import normalize
from functools import partial
# Workaround for the "print is a keyword/function" Python 2/3 dilemma
# and a fallback for mod_wsgi (resticts stdout/err attribute access)
# From Bottle.
try:
_stdout, _stderr = sys.stdout.write, sys.stderr.write
except IOError:
_stdout = lambda x: sys.stdout.write(x)
_stderr = lambda x: sys.stderr.write(x)
def mkdir_p(path):
""" Create intermediate directories as required.
"""
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST:
pass
else:
raise
# Helper to slugify paths.
# http://flask.pocoo.org/snippets/5/
_punct_re = re.compile(r'[\t !"#$%&\'()*\-<=>?@\[\\\]^_`{|},.]+')
def slugify(text, delim=u'-'):
"""Generates an slightly worse ASCII-only slug."""
if type(text) == str:
text = unicode(text)
result = []
for word in _punct_re.split(text.lower()):
word = normalize('NFKD', word).encode('ascii', 'ignore')
if word:
result.append(word)
return unicode(delim.join(result))
class TemplateView(pystache.TemplateSpec):
pass
class Config(object):
""" Configuration for the site to build. """
paths = {
'assets': u'assets',
'layouts': u'layouts',
'media': u'media',
'pages': u'pages',
}
def __init__(self, path=None, **config):
c = None
if path is not None:
with codecs.open(path, 'r', encoding='utf-8') as f:
content = f.read()
c = yaml.load(content)
if c is not None:
c.update(config)
else:
c = config
if config.get('no_compress'):
c['compress'] = False
self.site_context = c.get('site_context', {})
self.media = c.get('media', {})
self.compress = c.get('compress', False)
self.source_dir = c.get('source_dir', None)
self.build_dir = c.get('build_dir', None)
self.production = c.get('production', False)
self.pagination = c.get('pagination', {})
self.site_context.update({'production': self.production})
if self.source_dir is None:
self.source_dir = os.getcwd()
if self.build_dir is None:
self.build_dir = os.getcwd() + '/_out'
class Loader(object):
""" Content and context loader for resources.
"""
def __init__(self, source=''):
self.source = source
def load(self, path):
""" Return content and context from path.
Return a tuple consisting of the content as a string and a dict
representing the context extracted from a yaml front matter if
present in the content.
"""
context = {}
with codecs.open(self.source + path, 'r', encoding='utf-8') as f:
content = f.read()
result = re.search(r'^(---\s*\n.*?\n?)^(---\s*$\n?)', content, re.DOTALL|re.MULTILINE)
if result:
front_matter = result.group(1)
context = yaml.load(front_matter)
content = content[result.end(0):len(content)]
return content, context
class Resource(object):
""" Base resource
"""
def __init__(self, config, source):
self.config = config
self.source = source
def _clean_source(self):
""" Clears the first directory from the destination path.
"""
i = self.source.find(os.sep) + 1
if i == 1:
i = self.source[i:].find(os.sep) + 1
return self.source[i:]
@property
def destination(self):
return self._clean_source()
# Alias url to destination
url = destination
@property
def belongs_to(self):
return os.path.basename(os.path.dirname(self.destination))
@property
def belongs_to_parent(self):
root, last = os.path.split(os.path.dirname(self.destination))
parent = os.path.basename(root)
if parent == "":
parent = None
return parent
@property
def order(self):
return self.destination
class PageResource(Resource):
def __init__(self, config, source, context=None):
super(PageResource, self).__init__(config, source)
if not context:
context = {}
self.context = context
self.id = hashlib.md5(self.source).hexdigest()
self.layout_path = u'default.html'
self.page_layout_path = None
self.pager = None
self.content = None
self.rendered_content = None
self.rendered_page = None
l = Loader(source=config.source_dir)
content, context = l.load(self.source)
self.context.update(context)
self.page_content = content
if 'layout' in self.context:
self.layout_path = self.context['layout']
if 'page_layout' in self.context:
self.page_layout_path = self.context['page_layout']
if 'title_slug' not in self.context:
self.context['title_slug'] = slugify(self.title, delim=u'-')
def __repr__(self):
return '<PageResource {0}>'.format(self.title)
def is_markdown(self):
ext = os.path.splitext(self.source)[1]
return ext == '.md'
@property
def layout(self):
return self.config.source_dir + os.sep + self.config.paths['layouts'] + os.sep + self.layout_path
@property
def page_layout(self):
return self.config.source_dir + os.sep + self.config.paths['layouts'] + os.sep + self.page_layout_path
@property
def title(self):
return self.context.get('title', u'')
@property
def destination(self):
root, ext = os.path.splitext(self._clean_source())
return root + u'.html'
url = destination
@property
def order(self):
if 'order' in self.context:
return self.context.get('order')
return self.destination
def should_build(self):
""" Check if this resource should be built out to a html doc.
"""
return self.context.get('build', True)
def build(self):
""" Build this resource using the passed renderer and optional context.
"""
if self.pager is not None:
self.context.update({u'pager': self.pager.to_dict()})
dst = self.config.build_dir + os.sep + self.destination
dst_dir = os.path.dirname(dst)
if not os.path.exists(dst_dir):
mkdir_p(dst_dir)
with codecs.open(dst, 'w', encoding='utf-8') as f:
f.write(self.rendered_page)
def render(self, renderer, site_context):
view = TemplateView()
if self.page_layout_path:
view.template_rel_path = self.page_layout
else:
view.template = self.page_content
part = renderer.render(view, self.context, site=site_context)
if self.is_markdown():
part = markdown.markdown(part)
part = typogrify.typogrify(part)
page_context = {u'content': part}
self.rendered_content = part
page_context.update(self.context)
view = TemplateView()
view.template_rel_path = self.layout
page = renderer.render(view, self.context, page=page_context, site=site_context)
self.rendered_page = page
class MediaResource(Resource):
""" A media resource
This is a special type of resource that group images into collections based
on the directory they placed in.
"""
def __init__(self, config, source):
super(MediaResource, self).__init__(config, source)
self.source = self.source.replace(self.config.source_dir, '', 1)
def __repr__(self):
return '<MediaResource {0}>'.format(self.source)
def get_image_url(self, size_name):
root, ext = os.path.splitext(self.destination)
path = slugify(root + u'-' + size_name) + ext
return path
def create_image(self, name, size):
try:
import Image
except ImportError:
raise Exception('Image configuration requires PIL to be installed.')
path = self.get_image_url(name)
if path.startswith(os.sep):
path = path[1:]
src = os.sep.join([self.config.source_dir, self.source])
dst = os.sep.join([self.config.build_dir, path])
dst_dir = os.path.dirname(dst)
if not os.path.isdir(dst_dir):
mkdir_p(dst_dir)
if not os.path.isfile(dst):
try:
img = Image.open(src)
img.thumbnail((
size.get('width'),
size.get('height')
), Image.ANTIALIAS)
img.save(dst)
except Exception, e:
_stderr('! Error while processing media "{0}", {1}\n'.format(src, e))
return False
return True
def build_original(self):
""" Build this resource with the original media.
"""
src = os.sep.join([self.config.source_dir, self.source])
dst = os.sep.join([self.config.build_dir, self.destination])
dst_dir = os.path.dirname(dst)
if not os.path.isdir(dst_dir):
mkdir_p(dst_dir)
shutil.copy2(src, dst)
return True
def build(self):
""" Build this resource.
"""
if 'image' not in self.config.media:
return self.build_original()
for size_name, sizes in self.config.media['image'].items():
if not self.create_image(size_name, sizes):
return False
setattr(self, '%s_image_url' % size_name, partial(self.get_image_url, size_name=size_name))
return True
class ResourceTree(dict):
def __init__(self, nodes, **kwargs):
dict.__init__(self, **kwargs)
self.build(self, None, nodes)
self[u'all'] = self.all()
def build(self, tree, parent, nodes):
if parent is not None:
parent = parent.belongs_to
children = [n for n in nodes if n.belongs_to_parent == parent]
for child in children:
if child.belongs_to not in tree:
tree[child.belongs_to] = {u'list': []}
if child not in tree[child.belongs_to][u'list']:
tree[child.belongs_to][u'list'].append(child)
# This key handling... must be able to simplify...
tree[child.belongs_to][u'list'] = sorted(
tree[child.belongs_to][u'list'],
key=lambda r: r.order,
reverse=False)
self.build(tree[child.belongs_to], child, nodes)
def all(self):
a = self._all(self, [])
return sorted(a, key=lambda r: r.order, reverse=False)
def _all(self, d, l):
for k, v in d.iteritems():
if k == 'list':
for a in v:
l.append(a)
if isinstance(v, dict):
self._all(v, l)
return l
class Pager(object):
"""
Page the provided list of resources.
"""
def __init__(self, page, all_resources, config):
self.page = page
self.config = config
self.per_page = config.get('per_page', 20)
self.total_pages = self.total_pages(all_resources, self.per_page)
start_index = 0
if self.total_pages > 0:
start_index = (self.per_page * (self.page - 1))
stop_index = self.page * self.per_page
if self.page == self.total_pages:
stop_index = len(all_resources)
self.belongs_to, path_tail = os.path.split(all_resources[0].destination)
self.belongs_to += '/' if not self.belongs_to.endswith('/') else ''
self.total_resources = len(all_resources)
self.resources = all_resources[start_index:stop_index]
self.previous_page = self.page - 1 if self.page != 1 else None
self.previous_page_path = self.path(self.previous_page)
self.next_page = self.page + 1 if self.page != self.total_pages else None
self.next_page_path = self.path(self.next_page)
def __repr__(self):
return '<Page %s of %s>' % (self.page, self.total_pages)
def to_dict(self):
return {
'total_resources': self.total_resources,
'total_pages': self.total_pages,
'page': self.page,
'resources': self.resources,
'previous_page': self.previous_page,
'previous_page_path': self.previous_page_path,
'next_page': self.next_page,
'next_page_path': self.next_page_path
}
def pageurl(self, page):
path = self.config.get('url', 'page-{0}')
return path.format(page)
def path(self, page):
if page is None or page < 1:
return None
if page == 1:
return self.belongs_to
return self.belongs_to + self.pageurl(page)
@staticmethod
def total_pages(all_resources, per_page):
"""
Calculate total number of pages.
"""
return int(math.ceil(float(len(all_resources)) / float(per_page)))
class Paginator(object):
def __init__(self, site):
self.site = site
def paginate(self, config):
name, c = config
to_paginate = [r for r in self.site.resources if fnmatch.fnmatch(
r.destination,
c.get('pattern')
)]
self._paginate(to_paginate, config)
def _paginate(self, resources, config):
name, c = config
per_page = c.get('per_page', 20)
num_pages = Pager.total_pages(resources, per_page)
for idx, r in enumerate(resources):
#if r.destination.endswith('index.html'):
if idx == 0:
for page_num in range(1, num_pages + 1):
pager = Pager(page_num, resources, c)
if page_num > 1:
# Create new destination
r_copy = copy.deepcopy(r)
r_copy.pager = pager
path_head, path_tail = os.path.split(r_copy.source)
r_copy.source = path_head + u'/' + pager.pageurl(page_num) + u'/' + path_tail
self.site.resources.append(r_copy)
else:
r.pager = pager
class Site(object):
""" Represent a Site to be built.
"""
def __init__(self, config):
self.config = config
self.resources = list()
self.context = self.config.site_context if self.config.site_context else dict()
self.articles = list()
self.media = list()
self.renderer = pystache.Renderer(
search_dirs=[
self.config.source_dir + os.sep + self.config.paths['layouts'],
],
file_extension='html',
file_encoding='utf-8',
string_encoding='utf-8'
)
def _new_resource(self, path):
""" Internal factory for creating a resource from path.
"""
source_path = path.replace(self.config.source_dir, '', 1)
if source_path.startswith(u'/_') or source_path.startswith(u'_'):
return None
if source_path.endswith('.md'):
a = PageResource(self.config, source=source_path)
if 'articles' not in self.context:
self.context['articles'] = {}
self.articles.append(a)
return a
elif path.endswith('.html'):
r = PageResource(self.config, source=source_path)
return r
def read_directories(self):
""" Scan directories for resources.
"""
page_includes = [
'*.html',
'*.md',
'*.txt',
]
excludes = [
os.path.basename(self.config.build_dir),
self.config.paths['layouts'],
self.config.paths['media']
]
for root, dirs, files in os.walk(self.config.source_dir, topdown=True):
dirs[:] = [d for d in dirs if d not in excludes]
for pat in page_includes:
for f in fnmatch.filter(files, pat):
r = self._new_resource(os.path.join(root, f))
if r:
# Add resources on the top, this forces childs to be rendered before their parents.
self.resources.insert(0, r)
# TODO: Do these things in the loop above instead...
for root, dirs, files in os.walk(
os.path.join(self.config.source_dir,
self.config.paths['media']), topdown=True):
files[:] = [f for f in files if not f.startswith(u'.')]
for f in files:
m = MediaResource(self.config, os.path.join(root, f))
self.media.append(m)
self.articles.sort(key=lambda r: len(r.destination))
self.context['articles'] = ResourceTree(self.articles)
paginator = Paginator(self)
for c in self.config.pagination.items():
paginator.paginate(c)
def _build_media(self):
failed = []
for m in self.media:
if not m.build():
failed.append(m)
# Remove all resources that we failed to build from media.
self.media = list(set(self.media).difference(set(failed)))
#self.media.sort(key=lambda r: len(r.destination))
self.media = sorted(self.media, key=lambda r: r.destination, reverse=True)
self.context[u'media'] = ResourceTree(self.media)
def _build_static(self):
""" Create directories needed for the structure.
This step is done before most of the resources is moved to the build
directory. It involves creation of directories for the structure and
copying of assets.
"""
modified_files = []
# Create assets directory
mkdir_p(os.path.join(self.config.build_dir, self.config.paths['assets']))
for root, dirs, files in os.walk(
os.path.join(self.config.source_dir,
self.config.paths['assets']), topdown=True):
for d in dirs:
src = os.path.join(root, d)
dst = os.path.join(self.config.build_dir, self.config.paths['assets'], d)
if not os.path.exists(dst):
mkdir_p(dst)
shutil.copystat(src, dst)
# Ignore files starting with dot.
files[:] = [f for f in files if not f.startswith('.')]
for f in files:
srcname = os.path.join(root, f)
dstname = srcname.replace(self.config.source_dir, self.config.build_dir, 1)
# Copy file if does not exists in build dir or if it has changed.
if not os.path.exists(dstname) \
or os.path.exists(dstname) \
and os.stat(srcname).st_mtime != os.stat(dstname).st_mtime:
shutil.copy2(srcname, dstname)
modified_files.append(dstname)
# Compare the asset directory with source to build and remove files
# and directories that does not match.
# Adapt this be a more generic comparison so we can diff all
# resources.
asset_dir = os.path.join(self.config.build_dir, self.config.paths['assets'])
for root, dirs, files in os.walk(asset_dir, topdown=True):
for d in dirs:
compare = filecmp.dircmp(
os.path.join(self.config.source_dir, self.config.paths['assets'], d),
os.path.join(root, d)
)
for diff in compare.right_only:
p = os.path.join(asset_dir, d, diff)
if os.path.isdir(p):
try:
shutil.rmtree(p)
except OSError, e:
_stderr('** Could not remove directory {0} {1}\n'.format(p, e))
else:
try:
os.remove(p)
except OSError, e:
_stderr('** Could not remove file {0} {1}\n'.format(p, e))
# If compression is enabled run it.
if self.config.compress:
import yuicompressor
_stdout('** With compressing\n')
for root, dirs, files in os.walk(self.config.build_dir + os.sep + self.config.paths['assets'], topdown=True):
for pat in self.config.compress:
for f in fnmatch.filter(files, pat):
if not f.endswith('min.js') or not f.endswith('min.css'):
_stdout('>> {0}\n'.format(f))
yuicompressor.run(
os.path.join(root, f),
"-o", os.path.join(root, f)
)
def build(self):
""" Build this site and it resources.
"""
_stdout('** Building site\n')
# We start fresh on each build.
self.context = self.config.site_context if self.config.site_context else dict()
self.resources = list()
self.articles = list()
self.media = list()
if not os.path.exists(self.config.build_dir):
mkdir_p(self.config.build_dir)
self.read_directories()
self._build_media()
self._build_static()
_stdout('** Render resources\n')
for r in self.resources:
_stdout('>> {0}\n'.format(r.destination))
r.render(self.renderer, self.context)
_stdout('** Building resources\n')
for r in self.resources:
if r.should_build():
_stdout('>> {0}\n'.format(r.destination))
r.build()
def find_resource(self, resource_id):
""" Return an instance based on the id.
"""
for r in self.resources:
if r.id == resource_id:
return r
return None
class ResourceMonitor(threading.Thread):
""" Monitor resources for changes.
Example usage.
>>> def onchange(paths):
... print 'changed', paths
...
>>> ResourceMonitor(['.'], onchange).start()
"""
def __init__(self, paths, onchange):
threading.Thread.__init__(self)
self.daemon = True
self.paths = paths
self.onchange = onchange
self.modified_paths = {}
def diff(self, path):
""" Check for modifications returns a dict of paths and times of change.
"""
modified_paths = {}
for root, dirs, files in os.walk(path, topdown=True):
for f in files:
path = os.path.join(root, f)
try:
modified = os.stat(path).st_mtime
except Exception, e:
continue
if path not in self.modified_paths or self.modified_paths[path] != modified:
modified_paths[path] = modified
return modified_paths
def _diffall(self):
""" Run diff through all paths.
"""
modified_paths = {}
for p in self.paths:
modified_paths.update(self.diff(p))
return modified_paths
def run(self):
""" Starts monitoring, onchange is called if an resource is modified.
"""
self.modified_paths = self._diffall()
while True:
modified_paths = self._diffall()
if modified_paths:
self.modified_paths.update(modified_paths)
self.onchange(modified_paths)
time.sleep(0.5)
def bootstrap():
for p in Config.paths:
mkdir_p(p)
_stdout('+ ' + p + "\n")
_stdout('"If you can see it, I can shoot it." - Cordero (Skeleton Man)' + "\n")
def build(config_path, **config):
c = Config(config_path, **config)
_stdout('Building to %s\n' % (c.build_dir))
# TODO: Add a clean target...
#try:
# shutil.rmtree(c.build_dir)
#except OSError, e:
# pass
site = Site(c)
site.build()
def serve(config_path, port=8000, **config):
c = Config(config_path, **config)
c.source_dir = os.path.abspath(c.source_dir)
c.build_dir = os.path.abspath(c.build_dir)
site = Site(c)
site.build()
mkdir_p(c.build_dir)
import SimpleHTTPServer
import SocketServer
class Server(SocketServer.ForkingMixIn, SocketServer.TCPServer):
allow_reuse_address = True
class RequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
pass
try:
server = Server(('', port), RequestHandler)
except socket.error, e:
_stderr('Could not start webserver. Are you running another one on the same port?')
return
def rebuild(modified_paths):
_stdout('Rebuilding\n')
for p in modified_paths:
_stdout('Changed {0}\n'.format(p))
# TODO: Pass the modified paths
site.build()
paths = [os.path.join(c.source_dir, p) for p in c.paths]
monitor = ResourceMonitor(paths, rebuild)
monitor.start()
# Run server from our build directory.
os.chdir(c.build_dir)
_stdout('Running webserver at 0.0.0.0:%s for %s\n' % (port, c.build_dir))
_stdout('Type control-c to exit\n')
try:
server.serve_forever()
except KeyboardInterrupt:
pass
def main():
from optparse import OptionParser
_cmd_parser = OptionParser(usage="usage: %prog [options]", version="%prog {0}".format(__version__))
_opt = _cmd_parser.add_option
_opt("-c", "--config", action="store", help="path to yaml configuration [default: %default].", default="config.yaml")
_opt("-s", "--serve", action="store_true", help="start a webserver.")
_opt("-p", "--port", action="store", help="set port for webserver [default: %default].", default=8000, dest="port")
_opt("--bootstrap", action="store_true", help="create a new site here.")
_opt("--build", action="store_true", help="build this site.")
_opt("--debug", action="store_true", help="set debug mode.")
_opt("--no-compress", action="store_true", help="do not compress css and js.", dest="no_compress", default=False)
_cmd_options, _cmd_args = _cmd_parser.parse_args()
opt, args, parser = _cmd_options, _cmd_args, _cmd_parser
sys.path.insert(0, '.')
sys.modules.setdefault('bakery', sys.modules['__main__'])
if opt.bootstrap:
bootstrap()
sys.exit(0)
elif opt.serve:
try:
port = int(opt.port)
except ValueError, e:
_stderr('Invalid value for port: {0}'.format(e))
sys.exit(1)
serve(opt.config, port, no_compress=opt.no_compress)
elif opt.build:
build(opt.config, no_compress=opt.no_compress)
sys.exit(0)
else:
parser.print_help()
_stderr('\nError: No options specified.\n')
sys.exit(1)
if __name__ == '__main__':
main()
``` |
{
"source": "johannkm/dagster-okteto",
"score": 3
} |
#### File: airline_demo/airline_demo/unzip_file_handle.py
```python
import zipfile
from dagster import FileHandle, String, solid
@solid(
description='''Unzip a file that is resident in an archive file as a member.
This solid operates on FileHandles, meaning that their physical is dependent
on what system storage is operating in the pipeline. The physical file could
be on local disk, or it could be in s3. If on s3, this solid will download
that file to local disk, perform the unzip, upload that file back to s3, and
then return that file handle for downstream use in the computations.
''',
required_resource_keys={'file_manager'},
)
def unzip_file_handle(
context, archive_file_handle: FileHandle, archive_member: String
) -> FileHandle:
with context.resources.file_manager.read(archive_file_handle) as local_obj:
with zipfile.ZipFile(local_obj) as zip_file:
# boto requires a file object with seek(), but zip_file.open() would return a
# stream without seek(), so stage on the local filesystem first
local_extracted_path = zip_file.extract(archive_member)
with open(local_extracted_path, 'rb') as local_extracted_file:
return context.resources.file_manager.write(local_extracted_file)
```
#### File: overview_tests/repositories_workspaces_tests/test_repository_definition.py
```python
from docs_snippets.overview.repositories_workspaces.lazy_repository_definition import (
my_lazy_repository,
)
from docs_snippets.overview.repositories_workspaces.repository_definition import (
addition_pipeline,
my_repository,
subtraction_pipeline,
)
from dagster import execute_pipeline
def test_pipelines():
result = execute_pipeline(addition_pipeline)
assert result.success
assert result.result_for_solid('add').output_value() == 3
result = execute_pipeline(subtraction_pipeline)
assert result.success
assert result.result_for_solid('subtract').output_value() == -1
def test_my_repository():
assert my_repository
assert len(my_repository.get_all_pipelines()) == 2
assert len(my_repository.schedule_defs) == 1
def test_my_lazy_repository():
assert my_lazy_repository
assert len(my_lazy_repository.get_all_pipelines()) == 2
assert len(my_lazy_repository.schedule_defs) == 1
```
#### File: simple_lakehouse/simple_lakehouse/lakehouse_def.py
```python
import os
from typing import Tuple
import pandas as pd
from lakehouse import AssetStorage, Lakehouse, asset_storage
from dagster import ModeDefinition, StringSource
class LocalFileSystem:
def __init__(self, config):
self._root = config['root']
def get_fs_path(self, path: Tuple[str, ...]) -> str:
rpath = os.path.join(self._root, *(path[:-1]), path[-1] + '.csv')
return os.path.abspath(rpath)
@asset_storage(config_schema={'root': StringSource})
def pandas_df_local_filesystem_storage(init_context):
local_fs = LocalFileSystem(init_context.resource_config)
class Storage(AssetStorage):
def save(self, obj: pd.DataFrame, path: Tuple[str, ...], _resources) -> None:
'''This saves the dataframe as a CSV.'''
fpath = local_fs.get_fs_path(path)
obj.to_csv(fpath)
def load(self, _python_type, path: Tuple[str, ...], _resources):
'''This reads a dataframe from a CSV.'''
fpath = local_fs.get_fs_path(path)
return pd.read_csv(fpath)
return Storage()
def make_simple_lakehouse():
dev_mode = ModeDefinition(
name='dev',
resource_defs={'filesystem': pandas_df_local_filesystem_storage.configured({'root': '.'}),},
)
return Lakehouse(mode_defs=[dev_mode])
simple_lakehouse = make_simple_lakehouse()
```
#### File: automation/docs/cli.py
```python
import os
import shutil
import click
from .check_library_docs import validate_library_readmes
from .repo import DagsterDocsRepo, DagsterRepo
CLI_HELP = '''This CLI is used for validating and updating Dagster docs.
'''
DEFAULT_DOCS_DIR = '/tmp/dagster-docs'
@click.group(help=CLI_HELP)
def cli():
pass
@cli.command()
@click.option('-v', '--docs-version', type=click.STRING, required=True)
@click.option('-d', '--docs-dir', type=click.STRING, required=False, default=DEFAULT_DOCS_DIR)
def build(docs_version, docs_dir):
ddr = DagsterDocsRepo(docs_dir)
dr = DagsterRepo()
ddr.check_new_version_dir(docs_version)
ddr.remove_existing_docs_files()
# Build and copy new docs files into dagster-docs
dr.build_docs(docs_version)
copytree(dr.out_path, docs_dir)
dr.commit(docs_version)
ddr.commit(docs_version)
@cli.command()
def validate_libraries():
validate_library_readmes()
def copytree(src, dst):
'''https://stackoverflow.com/a/12514470/11295366'''
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d)
else:
shutil.copy2(s, d)
def main():
click_cli = click.CommandCollection(sources=[cli], help=CLI_HELP)
click_cli()
if __name__ == '__main__':
main()
```
#### File: core/storage/init.py
```python
from collections import namedtuple
from dagster import check
from dagster.core.definitions import (
IntermediateStorageDefinition,
ModeDefinition,
PipelineDefinition,
SystemStorageDefinition,
)
from dagster.core.instance import DagsterInstance
from dagster.core.storage.pipeline_run import PipelineRun
from dagster.core.storage.type_storage import TypeStoragePluginRegistry
from dagster.core.system_config.objects import EnvironmentConfig
class InitSystemStorageContext(
namedtuple(
'InitSystemStorageContext',
(
'pipeline_def mode_def system_storage_def pipeline_run instance environment_config '
'type_storage_plugin_registry resources system_storage_config'
),
)
):
'''System storage-specific initialization context.
Attributes:
pipeline_def (PipelineDefinition): The definition of the pipeline in context.
mode_def (ModeDefinition): The definition of the mode in contxt.
system_storage_def (SystemStorageDefinition): The definition of the system storage to be
constructed.
pipeline_run (PipelineRun): The pipeline run in context.
instance (DagsterInstance): The instance.
environment_config (EnvironmentConfig): The environment config.
type_storage_plugin_registry (TypeStoragePluginRegistry): Registry containing custom type
storage plugins.
resources (Any): Resources available in context.
system_storage_config (Dict[str, Any]): The system storage-specific configuration data
provided by the environment config. The schema for this data is defined by the
``config_field`` argument to :py:class:`SystemStorageDefinition`.
'''
def __new__(
cls,
pipeline_def,
mode_def,
system_storage_def,
pipeline_run,
instance,
environment_config,
type_storage_plugin_registry,
resources,
system_storage_config,
):
return super(InitSystemStorageContext, cls).__new__(
cls,
pipeline_def=check.inst_param(pipeline_def, 'pipeline_def', PipelineDefinition),
mode_def=check.inst_param(mode_def, 'mode_def', ModeDefinition),
system_storage_def=check.inst_param(
system_storage_def, 'system_storage_def', SystemStorageDefinition
),
pipeline_run=check.inst_param(pipeline_run, 'pipeline_run', PipelineRun),
instance=check.inst_param(instance, 'instance', DagsterInstance),
environment_config=check.inst_param(
environment_config, 'environment_config', EnvironmentConfig
),
type_storage_plugin_registry=check.inst_param(
type_storage_plugin_registry,
'type_storage_plugin_registry',
TypeStoragePluginRegistry,
),
resources=check.not_none_param(resources, 'resources'),
system_storage_config=check.dict_param(
system_storage_config, system_storage_config, key_type=str
),
)
class InitIntermediateStorageContext(
namedtuple(
'InitIntermediateStorageContext',
(
'pipeline_def mode_def intermediate_storage_def pipeline_run instance environment_config '
'type_storage_plugin_registry resources intermediate_storage_config'
),
)
):
'''Intermediate storage-specific initialization context.
Attributes:
pipeline_def (PipelineDefinition): The definition of the pipeline in context.
mode_def (ModeDefinition): The definition of the mode in contxt.
intermediate_storage_def (IntermediateStorageDefinition): The definition of the intermediate storage to be
constructed.
pipeline_run (PipelineRun): The pipeline run in context.
instance (DagsterInstance): The instance.
environment_config (EnvironmentConfig): The environment config.
type_storage_plugin_registry (TypeStoragePluginRegistry): Registry containing custom type
storage plugins.
resources (Any): Resources available in context.
intermediate_storage_config (Dict[str, Any]): The intermediate storage-specific configuration data
provided by the environment config. The schema for this data is defined by the
``config_field`` argument to :py:class:`IntermediateStorageDefinition`.
'''
def __new__(
cls,
pipeline_def,
mode_def,
intermediate_storage_def,
pipeline_run,
instance,
environment_config,
type_storage_plugin_registry,
resources,
intermediate_storage_config,
):
return super(InitIntermediateStorageContext, cls).__new__(
cls,
pipeline_def=check.inst_param(pipeline_def, 'pipeline_def', PipelineDefinition),
mode_def=check.inst_param(mode_def, 'mode_def', ModeDefinition),
intermediate_storage_def=check.inst_param(
intermediate_storage_def, 'intermediate_storage_def', IntermediateStorageDefinition
),
pipeline_run=check.inst_param(pipeline_run, 'pipeline_run', PipelineRun),
instance=check.inst_param(instance, 'instance', DagsterInstance),
environment_config=check.inst_param(
environment_config, 'environment_config', EnvironmentConfig
),
type_storage_plugin_registry=check.inst_param(
type_storage_plugin_registry,
'type_storage_plugin_registry',
TypeStoragePluginRegistry,
),
resources=check.not_none_param(resources, 'resources'),
intermediate_storage_config=check.dict_param(
intermediate_storage_config, intermediate_storage_config, key_type=str
),
)
```
#### File: dagster_tests/api_tests/utils.py
```python
from dagster import file_relative_path
from dagster.core.code_pointer import FileCodePointer
from dagster.core.definitions.reconstructable import ReconstructableRepository
from dagster.core.host_representation.handle import PipelineHandle, RepositoryLocationHandle
from dagster.core.host_representation.repository_location import (
GrpcServerRepositoryLocation,
InProcessRepositoryLocation,
PythonEnvRepositoryLocation,
)
from dagster.grpc.types import LoadableTargetOrigin
def get_bar_repo_repository_location_handle():
return RepositoryLocationHandle.create_out_of_process_location(
location_name='bar_repo_location',
repository_code_pointer_dict={
'bar_repo': FileCodePointer(
file_relative_path(__file__, 'api_tests_repo.py'), 'bar_repo'
)
},
)
def get_bar_repo_grpc_repository_location_handle():
return RepositoryLocationHandle.create_process_bound_grpc_server_location(
loadable_target_origin=LoadableTargetOrigin(
attribute='bar_repo', python_file=file_relative_path(__file__, 'api_tests_repo.py'),
),
location_name='bar_repo',
)
def get_bar_repo_handle():
return (
PythonEnvRepositoryLocation(get_bar_repo_repository_location_handle())
.get_repository('bar_repo')
.handle
)
def get_bar_grpc_repo_handle():
return (
GrpcServerRepositoryLocation(get_bar_repo_grpc_repository_location_handle())
.get_repository('bar_repo')
.handle
)
def get_foo_pipeline_handle():
return PipelineHandle('foo', get_bar_repo_handle())
def get_foo_grpc_pipeline_handle():
return PipelineHandle('foo', get_bar_grpc_repo_handle())
def legacy_get_bar_repo_handle():
recon_repo = ReconstructableRepository.from_legacy_repository_yaml(
file_relative_path(__file__, 'repository_file.yaml')
)
return InProcessRepositoryLocation(recon_repo).get_repository('bar_repo').handle
def legacy_get_foo_pipeline_handle():
return PipelineHandle('foo', legacy_get_bar_repo_handle())
```
#### File: core_tests/definitions_tests/test_logger_definition.py
```python
import logging
from dagster import (
Enum,
EnumValue,
Field,
Int,
ModeDefinition,
configured,
execute_pipeline,
logger,
pipeline,
)
from dagster.core.log_manager import coerce_valid_log_level
def assert_pipeline_runs_with_logger(logger_def, logger_config):
@pipeline(mode_defs=[ModeDefinition(logger_defs={'test_logger': logger_def})])
def pass_pipeline():
pass
# give us an opportunity to try an empty dict here
config_value = {'config': logger_config} if logger_config else {}
result = execute_pipeline(pass_pipeline, {'loggers': {'test_logger': config_value}})
assert result.success
def test_dagster_type_logger_decorator_config():
@logger(Int)
def dagster_type_logger_config(_):
raise Exception('not called')
assert dagster_type_logger_config.config_schema.config_type.given_name == 'Int'
@logger(int)
def python_type_logger_config(_):
raise Exception('not called')
assert python_type_logger_config.config_schema.config_type.given_name == 'Int'
def test_logger_using_configured():
it = {'ran': False}
@logger(config_schema=Field(str))
def test_logger(init_context):
assert init_context.logger_config == 'secret testing value!!'
it['ran'] = True
logger_ = logging.Logger('test', level=coerce_valid_log_level('INFO'))
return logger_
test_logger_configured = configured(test_logger)('secret testing value!!')
assert_pipeline_runs_with_logger(test_logger_configured, {})
assert it['ran']
def test_logger_with_enum_in_schema_using_configured():
from enum import Enum as PythonEnum
class TestPythonEnum(PythonEnum):
VALUE_ONE = 0
OTHER = 1
DagsterEnumType = Enum(
'TestEnum',
[
EnumValue('VALUE_ONE', TestPythonEnum.VALUE_ONE),
EnumValue('OTHER', TestPythonEnum.OTHER),
],
)
it = {}
@logger(config_schema={'enum': DagsterEnumType})
def test_logger(init_context):
assert init_context.logger_config['enum'] == TestPythonEnum.OTHER
it['ran test_logger'] = True
logger_ = logging.Logger('test', level=coerce_valid_log_level('INFO'))
return logger_
@configured(test_logger, {'enum': DagsterEnumType})
def pick_different_enum_value(config):
it['ran pick_different_enum_value'] = True
return {'enum': 'OTHER' if config['enum'] == TestPythonEnum.VALUE_ONE else 'VALUE_ONE'}
assert_pipeline_runs_with_logger(pick_different_enum_value, {'enum': 'VALUE_ONE'})
assert it['ran test_logger']
assert it['ran pick_different_enum_value']
```
#### File: runtime_types_tests/config_schema_tests/test_config_schema.py
```python
import re
import pytest
from dagster import String
from dagster.core.errors import DagsterInvalidDefinitionError
from dagster.core.types.config_schema import (
dagster_type_loader,
input_hydration_config,
output_materialization_config,
)
def test_dagster_type_loader_one():
@dagster_type_loader(String)
def _foo(_, hello):
return hello
def test_dagster_type_loader_missing_context():
with pytest.raises(DagsterInvalidDefinitionError):
@dagster_type_loader(String)
def _foo(hello):
return hello
def test_dagster_type_loader_missing_variable():
with pytest.raises(DagsterInvalidDefinitionError):
@dagster_type_loader(String)
def _foo(_):
return 1
def test_input_hydration_config_backcompat_args():
with pytest.warns(
UserWarning,
match=re.escape(
'"input_hydration_config" is deprecated and will be removed in 0.10.0, use '
'"dagster_type_loader" instead.'
),
):
@input_hydration_config(config_cls=String)
def _foo(_, hello):
return hello
def test_output_materialization_config_backcompat_args():
with pytest.warns(
UserWarning,
match=re.escape(
'"output_materialization_config" is deprecated and will be removed in 0.10.0, use '
'"dagster_type_materializer" instead.'
),
):
@output_materialization_config(config_cls=String)
def _foo(_, _a, _b):
pass
```
#### File: general_tests/grpc_tests/test_heartbeat.py
```python
import time
from dagster.grpc.server import GrpcServerProcess
from dagster.grpc.types import LoadableTargetOrigin
from dagster.utils import file_relative_path
def test_heartbeat():
loadable_target_origin = LoadableTargetOrigin(
attribute='bar_repo', python_file=file_relative_path(__file__, 'grpc_repo.py'),
)
server = GrpcServerProcess(
loadable_target_origin=loadable_target_origin,
max_workers=2,
heartbeat=True,
heartbeat_timeout=1,
)
with server.create_ephemeral_client() as client:
client.heartbeat()
assert server.server_process.poll() is None
time.sleep(2)
assert server.server_process.poll() is not None
```
#### File: general_tests/grpc_tests/test_server.py
```python
import threading
import time
from dagster.grpc.client import ephemeral_grpc_api_client
def _stream_events_target(results, api_client):
for result in api_client.streaming_ping(sequence_length=100000, echo='foo'):
results.append(result)
def test_streaming_terminate():
with ephemeral_grpc_api_client() as api_client:
streaming_results = []
stream_events_result_thread = threading.Thread(
target=_stream_events_target, args=[streaming_results, api_client]
)
stream_events_result_thread.daemon = True
stream_events_result_thread.start()
while not streaming_results:
time.sleep(0.001)
res = api_client.shutdown_server()
assert res.success
assert res.serializable_error_info is None
stream_events_result_thread.join()
assert len(streaming_results) == 100000
api_client._server_process.wait() # pylint: disable=protected-access
assert api_client._server_process.poll() == 0 # pylint: disable=protected-access
```
#### File: dagster_airflow/operators/kubernetes_operator.py
```python
import sys
import time
from airflow.contrib.kubernetes import kube_client, pod_generator, pod_launcher
from airflow.exceptions import AirflowException
from airflow.utils.state import State
from dagster_airflow.vendor.kubernetes_pod_operator import KubernetesPodOperator
from dagster_graphql.client.query import RAW_EXECUTE_PLAN_MUTATION
from dagster_graphql.client.util import construct_execute_plan_variables, parse_raw_log_lines
from dagster import __version__ as dagster_version
from dagster import check, seven
from dagster.core.events import EngineEventData
from dagster.core.instance import AIRFLOW_EXECUTION_DATE_STR, DagsterInstance
from dagster.utils.error import serializable_error_info_from_exc_info
from .util import (
airflow_tags_for_ts,
check_events_for_failures,
check_events_for_skips,
get_aws_environment,
)
# For retries on log retrieval
LOG_RETRIEVAL_MAX_ATTEMPTS = 5
LOG_RETRIEVAL_WAITS_BETWEEN_ATTEMPTS_SEC = 5
class DagsterKubernetesPodOperator(KubernetesPodOperator):
'''Dagster operator for Apache Airflow.
Wraps a modified KubernetesPodOperator.
'''
# py2 compat
# pylint: disable=keyword-arg-before-vararg
def __init__(self, operator_parameters, *args):
kwargs = operator_parameters.op_kwargs
self.pipeline_name = operator_parameters.pipeline_name
self.pipeline_snapshot = operator_parameters.pipeline_snapshot
self.execution_plan_snapshot = operator_parameters.execution_plan_snapshot
self.parent_pipeline_snapshot = operator_parameters.parent_pipeline_snapshot
kwargs['name'] = 'dagster.{pipeline_name}.{task_id}'.format(
pipeline_name=self.pipeline_name, task_id=operator_parameters.task_id
).replace(
'_', '-' # underscores are not permissible DNS names
)
self.run_config = operator_parameters.run_config
self.mode = operator_parameters.mode
self.step_keys = operator_parameters.step_keys
self.recon_repo = operator_parameters.recon_repo
self._run_id = None
# self.instance might be None in, for instance, a unit test setting where the operator
# was being directly instantiated without passing through make_airflow_dag
self.instance = (
DagsterInstance.from_ref(operator_parameters.instance_ref)
if operator_parameters.instance_ref
else None
)
# Add AWS creds
self.env_vars = kwargs.get('env_vars', {})
for k, v in get_aws_environment().items():
self.env_vars.setdefault(k, v)
kwargs.setdefault('labels', {})
kwargs['labels'].setdefault('dagster_pipeline', self.pipeline_name)
kwargs['labels'].setdefault('app.kubernetes.io/name', 'dagster')
kwargs['labels'].setdefault('app.kubernetes.io/instance', self.pipeline_name)
kwargs['labels'].setdefault('app.kubernetes.io/version', dagster_version)
kwargs['labels'].setdefault('app.kubernetes.io/component', 'pipeline-execution')
kwargs['labels'].setdefault('app.kubernetes.io/part-of', 'dagster-airflow')
kwargs['labels'].setdefault('app.kubernetes.io/managed-by', 'dagster-airflow')
# The xcom mechanism for the pod operator is very unlike that of the Docker operator, so
# we disable it
if 'xcom_push' in kwargs:
self.log.warning(
'xcom_push cannot be enabled with the DagsterKubernetesPodOperator, disabling'
)
kwargs['xcom_push'] = False
super(DagsterKubernetesPodOperator, self).__init__(
task_id=operator_parameters.task_id, dag=operator_parameters.dag, *args, **kwargs
)
@property
def run_id(self):
return getattr(self, '_run_id', '')
def query(self, airflow_ts):
check.opt_str_param(airflow_ts, 'airflow_ts')
variables = construct_execute_plan_variables(
self.recon_repo,
self.mode,
self.run_config,
self.pipeline_name,
self.run_id,
self.step_keys,
)
tags = airflow_tags_for_ts(airflow_ts)
variables['executionParams']['executionMetadata']['tags'] = tags
self.log.info(
'Executing GraphQL query: {query}\n'.format(query=RAW_EXECUTE_PLAN_MUTATION)
+ 'with variables:\n'
+ seven.json.dumps(variables, indent=2)
)
return [
'dagster-graphql',
'-v',
'{}'.format(seven.json.dumps(variables)),
'-t',
'{}'.format(RAW_EXECUTE_PLAN_MUTATION),
]
def execute(self, context):
try:
from dagster_graphql.client.mutations import (
DagsterGraphQLClientError,
handle_execution_errors,
handle_execute_plan_result_raw,
)
except ImportError:
raise AirflowException(
'To use the DagsterKubernetesPodOperator, dagster and dagster_graphql must be'
' installed in your Airflow environment.'
)
if 'run_id' in self.params:
self._run_id = self.params['run_id']
elif 'dag_run' in context and context['dag_run'] is not None:
self._run_id = context['dag_run'].run_id
# return to original execute code:
try:
client = kube_client.get_kube_client(
in_cluster=self.in_cluster,
cluster_context=self.cluster_context,
config_file=self.config_file,
)
gen = pod_generator.PodGenerator()
for mount in self.volume_mounts:
gen.add_mount(mount)
for volume in self.volumes:
gen.add_volume(volume)
pod = gen.make_pod(
namespace=self.namespace,
image=self.image,
pod_id=self.name,
cmds=self.cmds,
arguments=self.query(context.get('ts')),
labels=self.labels,
)
pod.service_account_name = self.service_account_name
pod.secrets = self.secrets
pod.envs = self.env_vars
pod.image_pull_policy = self.image_pull_policy
pod.image_pull_secrets = self.image_pull_secrets
pod.annotations = self.annotations
pod.resources = self.resources
pod.affinity = self.affinity
pod.node_selectors = self.node_selectors
pod.hostnetwork = self.hostnetwork
pod.tolerations = self.tolerations
pod.configmaps = self.configmaps
pod.security_context = self.security_context
launcher = pod_launcher.PodLauncher(kube_client=client, extract_xcom=self.xcom_push)
try:
if self.instance:
tags = (
{AIRFLOW_EXECUTION_DATE_STR: context.get('ts')} if 'ts' in context else {}
)
run = self.instance.register_managed_run(
pipeline_name=self.pipeline_name,
run_id=self.run_id,
run_config=self.run_config,
mode=self.mode,
solids_to_execute=None,
step_keys_to_execute=None,
tags=tags,
root_run_id=None,
parent_run_id=None,
pipeline_snapshot=self.pipeline_snapshot,
execution_plan_snapshot=self.execution_plan_snapshot,
parent_pipeline_snapshot=self.parent_pipeline_snapshot,
)
# we won't use the "result", which is the pod's xcom json file
(final_state, _) = launcher.run_pod(
pod, startup_timeout=self.startup_timeout_seconds, get_logs=self.get_logs
)
# fetch the last line independently of whether logs were read
# unbelievably, if you set tail_lines=1, the returned json has its double quotes
# turned into unparseable single quotes
res = None
num_attempts = 0
while not res and num_attempts < LOG_RETRIEVAL_MAX_ATTEMPTS:
raw_res = client.read_namespaced_pod_log(
name=pod.name, namespace=pod.namespace, container='base'
)
res = parse_raw_log_lines(raw_res.split('\n'))
time.sleep(LOG_RETRIEVAL_WAITS_BETWEEN_ATTEMPTS_SEC)
num_attempts += 1
try:
handle_execution_errors(res, 'executePlan')
except DagsterGraphQLClientError as err:
self.instance.report_engine_event(
str(err),
run,
EngineEventData.engine_error(
serializable_error_info_from_exc_info(sys.exc_info())
),
self.__class__,
)
raise
events = handle_execute_plan_result_raw(res)
if self.instance:
for event in events:
self.instance.handle_new_event(event)
events = [e.dagster_event for e in events]
check_events_for_failures(events)
check_events_for_skips(events)
return events
finally:
self._run_id = None
if self.is_delete_operator_pod:
launcher.delete_pod(pod)
if final_state != State.SUCCESS:
raise AirflowException('Pod returned a failure: {state}'.format(state=final_state))
# note the lack of returning the default xcom
except AirflowException as ex:
raise AirflowException('Pod Launching failed: {error}'.format(error=ex))
```
#### File: dagster-airflow/dagster_airflow_tests/test_compile.py
```python
from dagster_airflow.compile import coalesce_execution_steps
from dagster_test.toys.composition import composition
from dagster.core.definitions.executable import InMemoryExecutablePipeline
from dagster.core.execution.plan.plan import ExecutionPlan
from dagster.core.system_config.objects import EnvironmentConfig
def test_compile():
environment_config = EnvironmentConfig.build(
composition, {'solids': {'add_four': {'inputs': {'num': {'value': 1}}}}},
)
plan = ExecutionPlan.build(InMemoryExecutablePipeline(composition), environment_config)
res = coalesce_execution_steps(plan)
assert set(res.keys()) == {
'add_four.add_two.add_one',
'add_four.add_two.add_one_2',
'add_four.add_two_2.add_one',
'add_four.add_two_2.add_one_2',
'div_four.div_two',
'div_four.div_two_2',
'int_to_float',
}
```
#### File: dagster_aws/s3/solids.py
```python
from dagster import (
AssetMaterialization,
EventMetadataEntry,
Field,
FileHandle,
InputDefinition,
Output,
OutputDefinition,
StringSource,
check,
dagster_type_loader,
solid,
)
from dagster.core.types.dagster_type import PythonObjectDagsterType
from .file_manager import S3FileHandle
def dict_with_fields(name, fields):
check.str_param(name, 'name')
check.dict_param(fields, 'fields', key_type=str)
field_names = set(fields.keys())
@dagster_type_loader(fields)
def _input_schema(_context, value):
check.dict_param(value, 'value')
check.param_invariant(set(value.keys()) == field_names, 'value')
return value
class _DictWithSchema(PythonObjectDagsterType):
def __init__(self):
super(_DictWithSchema, self).__init__(python_type=dict, name=name, loader=_input_schema)
return _DictWithSchema()
S3Coordinate = dict_with_fields(
'S3Coordinate',
fields={
'bucket': Field(StringSource, description='S3 bucket name'),
'key': Field(StringSource, description='S3 key name'),
},
)
def last_key(key):
if '/' not in key:
return key
comps = key.split('/')
return comps[-1]
@solid(
config_schema={
'Bucket': Field(
StringSource, description='The name of the bucket to upload to.', is_required=True
),
'Key': Field(
StringSource, description='The name of the key to upload to.', is_required=True
),
},
input_defs=[InputDefinition('file_handle', FileHandle, description='The file to upload.')],
output_defs=[OutputDefinition(name='s3_file_handle', dagster_type=S3FileHandle)],
description='''Take a file handle and upload it to s3. Returns an S3FileHandle.''',
required_resource_keys={'s3'},
)
def file_handle_to_s3(context, file_handle):
bucket = context.solid_config['Bucket']
key = context.solid_config['Key']
with context.file_manager.read(file_handle, 'rb') as fileobj:
context.resources.s3.upload_fileobj(fileobj, bucket, key)
s3_file_handle = S3FileHandle(bucket, key)
yield AssetMaterialization(
asset_key=s3_file_handle.s3_path,
metadata_entries=[EventMetadataEntry.path(s3_file_handle.s3_path, label=last_key(key))],
)
yield Output(value=s3_file_handle, output_name='s3_file_handle')
```
#### File: dagstermill/dagstermill/serialize.py
```python
from dagster import check, seven
from dagster.core.types.dagster_type import DagsterType, DagsterTypeKind
PICKLE_PROTOCOL = 2
def is_json_serializable(value):
try:
seven.json.dumps(value)
return True
except TypeError:
return False
def read_value(dagster_type, value):
check.inst_param(dagster_type, 'dagster_type', DagsterType)
check.dict_param(value, 'value')
check.invariant(
list(value.keys()) in [['value'], ['file']],
'Malformed value received with keys: {bad_keys}, expected [\'value\'] or [\'file\']'.format(
bad_keys='[{keys}]'.format(
keys=', '.join(['\'{key}\''.format(key=key) for key in value.keys()])
)
),
)
if 'value' in value:
return value['value']
else:
return dagster_type.serialization_strategy.deserialize_from_file(value['file'])
def write_value(dagster_type, value, target_file):
check.inst_param(dagster_type, 'dagster_type', DagsterType)
if dagster_type.kind == DagsterTypeKind.SCALAR or (
dagster_type.kind == DagsterTypeKind.ANY and is_json_serializable(value)
):
return {'value': value}
else:
dagster_type.serialization_strategy.serialize_to_file(value, target_file)
return {'file': target_file}
```
#### File: lakehouse/lakehouse/computation.py
```python
from collections import namedtuple
from dagster import check
class AssetDependency(namedtuple('_AssetDependency', 'asset in_memory_type')):
'''An asset dependency describes how the contents of another asset are provided to a
Computation's compute_fn.
Attributes:
asset (Asset): The asset that we depend on.
in_memory_type (Type): The type, e.g. pandas.DataFrame, that the asset's contents should be
hydrated into to be passed as the compute_fn.
'''
class Computation(namedtuple('_Computation', 'compute_fn deps output_in_memory_type')):
'''A description of the computation responsible for producing an asset.
E.g. a SQL select statement or python code that trains an ML model.
The computation is agnostic to the physical details of where it's stored and how it's saved to
and loaded from that storage. The Lakehouse that the asset resides in is responsible for
those.
Attributes:
compute_fn (Callable): A python function with no side effects that produces an in-memory
representation of the asset's contents from in-memory representation of the
asset' inputs.
deps (Dict[str, AssetDependency]): The assets that the compute_fn depends on
to produce the asset's contents, keyed by their arg names in the compute_fn
definition.
output_in_memory_type (Type): The python type that the compute_fn will return.
'''
def __new__(cls, compute_fn, deps, output_in_memory_type):
return super(Computation, cls).__new__(
cls,
compute_fn=check.callable_param(compute_fn, 'compute_fn'),
deps=check.dict_param(deps, 'deps', key_type=str, value_type=AssetDependency),
output_in_memory_type=check.inst_param(
output_in_memory_type, 'output_in_memory_type', type
),
)
``` |
{
"source": "JohannKT/cti-stix-validator",
"score": 2
} |
#### File: test/v20/bundle_tests.py
```python
import copy
import json
import pytest
from . import ValidatorTest
from ... import ValidationError
VALID_BUNDLE = u"""
{
"type": "bundle",
"id": "bundle--44af6c39-c09b-49c5-9de2-394224b04982",
"spec_version": "2.0",
"objects": [
{
"type": "identity",
"id": "identity--8ae20dde-83d4-4218-88fd-41ef0dabf9d1",
"created": "2016-08-22T14:09:00.123Z",
"modified": "2016-08-22T14:09:00.123Z",
"name": "mitre.org",
"identity_class": "organization"
}
]
}
"""
class BundleTestCases(ValidatorTest):
valid_bundle = json.loads(VALID_BUNDLE)
def test_wellformed_bundle(self):
self.assertTrueWithOptions(self.valid_bundle)
def test_bundle_object_categories(self):
bundle = copy.deepcopy(self.valid_bundle)
bundle['identities'] = bundle['objects']
del bundle['objects']
self.assertFalseWithOptions(bundle)
def test_bundle_created(self):
bundle = copy.deepcopy(self.valid_bundle)
bundle['created'] = "2016-08-22T14:09:00.123456Z"
self.assertFalseWithOptions(bundle)
def test_bundle_version(self):
bundle = copy.deepcopy(self.valid_bundle)
bundle['version'] = 1
self.assertFalseWithOptions(bundle)
def test_bundle_duplicate_ids(self):
bundle = copy.deepcopy(self.valid_bundle)
bundle['objects'].append(bundle['objects'][0].copy())
self.assertFalseWithOptions(bundle)
bundle['objects'][1]['modified'] = "2017-06-22T14:09:00.123Z"
self.assertTrueWithOptions(bundle)
def test_silent_and_verbose(self):
bundle = json.loads(VALID_BUNDLE)
with pytest.raises(ValueError) as exc:
self.assertFalseWithOptions(bundle, silent=True, verbose=True)
assert 'silent or verbose, but not both' in str(exc)
def test_bundle_sdo_missing_type(self):
bundle = copy.deepcopy(self.valid_bundle)
del bundle['objects'][0]['type']
with pytest.raises(ValidationError):
self.assertFalseWithOptions(bundle)
```
#### File: stix2validator/v21/errors.py
```python
from collections import deque
from jsonschema import exceptions as schema_exceptions
from ..errors import (NoJSONFileFoundError, PatternError, SchemaError, # noqa
SchemaInvalidError, ValidationError, pretty_error)
from .enums import CHECK_CODES
class JSONError(schema_exceptions.ValidationError):
"""Wrapper for errors thrown by iter_errors() in the jsonschema module.
Makes errors generated by our functions look like those from jsonschema.
"""
def __init__(self, msg=None, instance_id=None, check_code=None):
if check_code is not None:
# Get code number code from name
code = list(CHECK_CODES.keys())[list(CHECK_CODES.values()).index(check_code)]
msg = '{%s} %s' % (code, msg)
super(JSONError, self).__init__(msg, path=deque([instance_id]))
``` |
{
"source": "JohannKT/cti-taxii-server",
"score": 2
} |
#### File: medallion/backends/base.py
```python
class Backend(object):
def server_discovery(self):
"""
Fill:
Returns the discovery information (api_roots, etc) for this server
Args:
-none-
Returns:
discovery information
"""
raise NotImplementedError()
def get_collections(self, api_root):
"""
Fill:
implement the get_collections TAXII endpoint by obtaining the collection metadata
for this api_root
Args:
api_root (str): the name of the api_root.
Returns:
metadata for all collections at this api root
"""
raise NotImplementedError()
def get_collection(self, api_root, collection_id):
"""
Fill:
implement the get_collection TAXII service by obtaining the collection metadata
for collection
Args:
api_root (str): the name of the api_root.
collection_id (str): the id of the collection
Returns:
collection metadata
"""
raise NotImplementedError()
def get_object_manifest(self, api_root, collection_id, filter_args, allowed_filters):
"""
Fill:
implement the get_object_manifest TAXII endpoint by obtaining the metadata
for the selected objects
Args:
api_root (str): the name of the api_root.
collection_id (str): the id of the collection
filter_args (str): query string from URL containing filter args
allowed_filters (list): STIX properties which are allowed in the filter for this endpoint
Returns:
metadata for the objects
"""
raise NotImplementedError()
def get_api_root_information(self, api_root):
"""
Fill:
implement the get_api_root_information TAXII endpoint by obtaining api_root metadata
Args:
api_root (str): the name of the api_root.
Returns:
metadata for the api root
"""
raise NotImplementedError()
def get_status(self, api_root, status_id):
"""
Fill:
implement the get_status TAXII endpoint by obtaining the status of an add_objects request
Args:
api_root (str): the name of the api_root.
status_id (str): the id of the add_objects request
Returns:
status of the request (including):
how many objects were successful saved
how many objects failed to be saved
how many objects are pending
"""
raise NotImplementedError()
def get_objects(self, api_root, collection_id, filter_args, allowed_filters):
"""
Fill:
implement the get_objects TAXII endpoint by obtaining the data from a collection
Args:
api_root (str): the name of the api_root.
collection_id (str): the id of the collection
filter_args (str): query string from URL containing filter args
allowed_filters (list): STIX properties which are allowed in the filter for this endpoint
Returns:
data from the collection that satisfies the filter
"""
raise NotImplementedError()
def add_objects(self, api_root, collection_id, objs):
"""
Fill:
implement the add_objects TAXII endpoint by save data into a collection
Args:
api_root (str): the name of the api_root.
collection_id (str): the id of the collection
objs (list): objects to insert into the collection
Returns:
status of the request (including):
how many objects were successful saved
how many objects failed to be saved
how many objects are pending
METADATA ABOUT EACH SUCCESSFUL OBJECT SAVED MUST BE AVAILABLE VIA THE get_object_manifest API CALL
THIS CAN BE IMPLEMENTED AS A SEPARATE STORE, OTHERWISE IT NEEDS TO BE GENERATABLE DYNAMICALLY
"""
raise NotImplementedError()
def get_object(self, api_root, collection_id, object_id, filter_args, allowed_filters):
"""
Fill:
implement the get_object TAXII endpoint by obtaining the data from a collection related
to the object_id
Args:
api_root (str): the name of the api_root.
collection_id (str): the id of the collection
object_id (str): the id of the requested object
filter_args (str): query string from URL containing filter args
allowed_filters (list): STIX properties which are allowed in the filter for this endpoint
Returns:
data from the collection that satisfies the filter
"""
raise NotImplementedError()
``` |
{
"source": "johann-lau/2048",
"score": 4
} |
#### File: johann-lau/2048/2048.py
```python
from enum import Enum
import random
class Direction(Enum):
W = UP = 1
D = RIGHT = 2
S = DOWN = 3
A = LEFT = 4
class Game2048:
def __init__(self):
self.score = 0 # Incremented at line 45
self.grid = [[None, None, None, None],
[None, None, None, None],
[None, None, None, None],
[None, None, None, None]]
self.spawn_random() # Two random tiles at initialization
self.spawn_random()
def pretty_print(self): # Returns a beautified string for monospace printing.
flattened_grid = [j for i in self.grid for j in i if j != None]
box_length = len(str(max(flattened_grid))) # String length of maximum value in self.grid
box_length = max(box_length, 4) # At least 4
desc = f"Score: {self.score}\n"
for i in self.grid:
desc += "|".join([f"{j: ^{box_length}}" if j else " " * box_length for j in i]) + "\n"
return desc[:-1]
def spawn_random(self): # Adds a random tile 2 or 4.
new = random.choice([2, 4])
empties = [[i, j] for i in range(0, 4) for j in range(0, 4) if not self.grid[i][j]]
chosen = random.choice(empties)
self.grid[chosen[0]][chosen[1]] = new
def input_left(self): # Handles input to the left. Other directions are done through reflection.
for i in range(4): # Let's say self.grid[0] is [4, None, 4, 8]
for j in range(3): # Loop through first three items [4, None, 4]
if self.grid[i][j]:
righters = list(filter(lambda x: x, self.grid[i][(j+1):])) # Values to the right excluding Nones
if righters:
dir_r = righters[0] # First value directly to the right excluding Nones
if self.grid[i][j] == dir_r:
self.grid[i][j] *= 2 # After all iterations should give [8, None, 4, 8]
self.score += self.grid[i][j]
self.grid[i][righters.index(dir_r) + j + 1] = None # Index of dir_r in i: righters.index(dir_r) + j + 1
# After all iterations should give [8, None, None, 8]
self.grid[i] = list(filter(lambda x: x, self.grid[i])) # Remove Nones
self.grid[i].extend([None] * (4 - len(self.grid[i])))
def twod_flip(self):
# ORIGINAL AFTER
# a | b | c | d a | e | i | m
# e | f | g | h b | f | j | n
# i | j | k | l c | g | k | o
# m | n | o | p d | h | l | p
new_grid = []
for j in range(4): # Note that this is iterating through the x-axis
new_grid.append([self.grid[i][j] for i in range(4)])
self.grid = new_grid
def input(self, direction): # Handle user inputs
if direction == Direction.LEFT:
self.input_left()
elif direction == Direction.RIGHT:
for i in range(4):
self.grid[i].reverse()
self.input_left()
for i in range(4):
self.grid[i].reverse()
else:
self.twod_flip()
if direction == Direction.DOWN:
for i in range(4):
self.grid[i].reverse()
self.input_left()
if direction == Direction.DOWN:
for i in range(4):
self.grid[i].reverse()
self.twod_flip()
game.spawn_random()
game = Game2048()
inp = ""
print(game.pretty_print() + "\n\n")
inp = input("Direction (WASD, or Q to quit): ").lower()
while inp != "q":
if inp in ['w', 'a', 's', 'd']:
game.input(eval(f"Direction.{inp.upper()}"))
print(game.pretty_print())
inp = input("Direction (WASD, or Q to quit): ").lower()
``` |
{
"source": "johann-lau/googlesearch",
"score": 3
} |
#### File: googlesearch/googlesearch/googlesearch.py
```python
import urllib.request
import math
import re
from bs4 import BeautifulSoup
from pprint import pprint
from threading import Thread
from collections import deque
from time import sleep
class GoogleSearch:
USER_AGENT = "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/ 58.0.3029.81 Safari/537.36"
SEARCH_URL = "https://google.com/search"
RESULT_SELECTOR = "h3.r a"
TOTAL_SELECTOR = "#resultStats"
RESULTS_PER_PAGE = 10
DEFAULT_HEADERS = [
('User-Agent', USER_AGENT),
("Accept-Language", "en-US,en;q=0.5"),
]
def search(self, query, num_results = 10, prefetch_pages = True, prefetch_threads = 10):
searchResults = []
pages = int(math.ceil(num_results / float(GoogleSearch.RESULTS_PER_PAGE)));
fetcher_threads = deque([])
total = None;
for i in range(pages) :
start = i * GoogleSearch.RESULTS_PER_PAGE
opener = urllib.request.build_opener()
opener.addheaders = GoogleSearch.DEFAULT_HEADERS
response = opener.open(GoogleSearch.SEARCH_URL + "?q="+ urllib.request.quote(query) + ("" if start == 0 else ("&start=" + str(start))))
soup = BeautifulSoup(response.read(), "lxml")
response.close()
if total is None:
totalText = soup.select(GoogleSearch.TOTAL_SELECTOR)[0].children.next().encode('utf-8')
total = long(re.sub("[', ]", "", re.search("(([0-9]+[', ])*[0-9]+)", totalText).group(1)))
results = self.parseResults(soup.select(GoogleSearch.RESULT_SELECTOR))
if len(searchResults) + len(results) > num_results:
del results[num_results - len(searchResults):]
searchResults += results
if prefetch_pages:
for result in results:
while True:
running = 0
for thread in fetcher_threads:
if thread.is_alive():
running += 1
if running < prefetch_threads:
break
sleep(1)
fetcher_thread = Thread(target=result.getText)
fetcher_thread.start()
fetcher_threads.append(fetcher_thread)
for thread in fetcher_threads:
thread.join()
return SearchResponse(searchResults, total);
def parseResults(self, results):
searchResults = [];
for result in results:
url = result["href"];
title = result.text
searchResults.append(SearchResult(title, url))
return searchResults
class SearchResponse:
def __init__(self, results, total):
self.results = results;
self.total = total;
class SearchResult:
def __init__(self, title, url):
self.title = title
self.url = url
self.__text = None
self.__markup = None
def getText(self):
if self.__text is None:
soup = BeautifulSoup(self.getMarkup(), "lxml")
for junk in soup(["script", "style"]):
junk.extract()
self.__text = soup.get_text()
return self.__text
def getMarkup(self):
if self.__markup is None:
opener = urllib.request.build_opener()
opener.addheaders = GoogleSearch.DEFAULT_HEADERS
response = opener.open(self.url);
self.__markup = response.read()
return self.__markup
def __str__(self):
return str(self.__dict__)
def __unicode__(self):
return unicode(self.__str__())
def __repr__(self):
return self.__str__()
if __name__ == "__main__":
import sys
search = GoogleSearch()
i=1
query = " ".join(sys.argv[1:])
if len(query) == 0:
query = "python"
count = 10
print ("Fetching first " + str(count) + " results for \"" + query + "\"...")
response = search.search(query, count)
print ("TOTAL: " + str(response.total) + " RESULTS")
for result in response.results:
print("RESULT #" +str (i) + ": "+ (result._SearchResult__text if result._SearchResult__text is not None else "[None]") + "\n\n")
i+=1
``` |
{
"source": "JohannLiebert1130/Flask_simple_blog",
"score": 3
} |
#### File: Flask_simple_blog/src/app.py
```python
from src.common.database import Database
from src.models.blog import Blog
from src.models.post import Post
from src.models.user import User
from flask import Flask, render_template, request, session, make_response
app = Flask(__name__) # '__main__'
app.secret_key = "Johann"
@app.route('/')
def home_template():
return render_template('home.html')
@app.route('/login')
def login_template():
return render_template('login.html')
@app.route('/register')
def register_template():
return render_template('register.html')
@app.before_first_request
def initialize_database():
Database.initialize()
@app.route('/auth/login', methods=['POST'])
def login_user():
email = request.form['email']
password = request.form['password']
if User.login_valid(email, password):
User.login(email)
else:
session['email'] = None
return render_template("profile.html", email=session['email'])
@app.route('/auth/register', methods=['POST'])
def register_user():
email = request.form['email']
password = request.form['password']
User.register(email, password)
return render_template("profile.html", email=session['email'])
@app.route('/blogs/<string:user_id>')
@app.route('/blogs')
def user_blogs(user_id=None):
if user_id is not None:
user = User.get_by_id(user_id)
else:
user = User.get_by_email(session['email'])
blogs = user.get_blogs()
return render_template("user_blogs.html", blogs=blogs, email=user.email)
@app.route('/blogs/new', methods=['POST', 'GET'])
def create_new_blog():
if request.method == 'GET':
return render_template('new_blog.html')
else:
title = request.form['title']
description = request.form['description']
user = User.get_by_email(session['email'])
new_blog = Blog(user.email, title, description, user._id)
new_blog.save_to_mongo()
return make_response(user_blogs(user._id))
@app.route('/posts/<string:blog_id>')
def blog_posts(blog_id):
blog = Blog.from_mongo(blog_id)
posts = blog.get_posts()
return render_template('posts.html', posts=posts, blog_title=blog.title, blog_id=blog._id)
@app.route('/posts/new/<string:blog_id>', methods=['POST', 'GET'])
def create_new_post(blog_id):
if request.method == 'GET':
return render_template('new_post.html', blog_id=blog_id)
else:
title = request.form['title']
content = request.form['content']
user = User.get_by_email(session['email'])
new_post = Post(blog_id, title, content, user.email)
new_post.save_to_mongo()
return make_response(blog_posts(blog_id))
if __name__ == '__main__':
app.run(port=4995, debug=True)
``` |
{
"source": "johannlilly/tf-quant-finance",
"score": 3
} |
#### File: tf_quant_finance/math/piecewise.py
```python
import numpy as np
import tensorflow.compat.v2 as tf
class PiecewiseConstantFunc(object):
"""Creates a piecewise constant function."""
def __init__(self, jump_locations, values, dtype=None, name=None):
r"""Initializes jumps of the piecewise constant function.
Sets jump locations and values for a piecewise constant function.
`jump_locations` split real line into intervals
`[-inf, jump_locations[..., 0]], ..,
[jump_locations[..., i], jump_locations[..., i + 1]], ...,
[jump_locations[..., -1], inf]`
so that the piecewise constant function takes corresponding `values` on the
intervals, i.e.,
```
f(x) = \sum_i values[..., i]
* I_{x \in [jump_locations[..., i -1], jump_locations[..., i])}
```
#### Example. Simple scalar-valued piecewise constant function.
```python
dtype = np.float64
jump_locations = [0.1, 10]
values = [3, 4, 5]
piecewise_func = piecewise.PiecewiseConstantFunc(jump_locations, values,
dtype=dtype)
# Locations at which to evaluate the function assuming it is
# left-continuous.
x = np.array([0., 0.1, 2., 11.])
value = piecewise_func(x)
# Expected values: [3, 3, 4, 5]
integral = piecewise_func.integrate(x, x + 1)
# Expected integrals: [3.9, 4, 4, 5]
```
#### Example. Matrix-valued piecewise constant function.
```python
dtype = np.float64
jump_locations = [0.1, 10]
# The function takes values [[1, 2], [3, 4]] on (-inf, 0.1),
# [[5, 6], [7, 8]] on (0.1, 0.5), and [[9, 10], [11, 12]] on (0.5, +inf).
values = [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]]
piecewise_func = piecewise.PiecewiseConstantFunc(
jump_locations, values, dtype=dtype)
# Locations at which to evaluate the function assuming it is
# left-continuous.
x = np.array([0., 0.1, 2, 11])
value = piecewise_func(x)
# Expected values: [[[1, 2], [3, 4]], [[1, 2], [3, 4]],
# [[5, 6], [7, 8]], [[9, 10], [11, 12]]]
integral = piecewise_func.integrate(x, x + 1)
# Expected integrals: [[[4.6, 5.6], [6.6, 7.6]],
# [[5, 6], [7, 8]],
# [[5, 6], [7, 8]],
# [[9, 10], [11, 12]]]
```
Args:
jump_locations: A real `Tensor` of shape
`batch_shape + [num_jump_points]`. The locations where the function
changes its values. Note that the values are expected to be ordered
along the last dimension. Repeated values are allowed but it is
up to the user to ensure that the corresponding `values` are also
repeated.
values: A `Tensor` of the same `dtype` as `jump_locations` and shape
`batch_shape + [num_jump_points + 1] + event_shape`. Defines
`values[batch_rank * slice(None), i]` on intervals
`(jump_locations[..., i - 1], jump_locations[..., i])`. Here
`event_shape` allows for array-valued piecewise constant functions
and `batch_rank = len(batch_shape)`.
dtype: Optional dtype for `jump_locations` and `values`.
Default value: `None` which maps to the default dtype inferred by
TensorFlow.
name: Python `str` name prefixed to ops created by this class.
Default value: `None` which is mapped to the default name
`PiecewiseConstantFunc`.
Raises:
ValueError:
If `jump_locations` and `values` have different batch shapes or,
in case of static shapes, if the event shape of `values` is different
from `num_jump_points + 1`.
"""
self._name = name or 'PiecewiseConstantFunc'
# Add a property that indicates that the class instance is a
# piecewise constant function
self.is_piecewise_constant = True
with tf.compat.v1.name_scope(self._name, values=[jump_locations, values]):
self._jump_locations = tf.convert_to_tensor(jump_locations, dtype=dtype,
name='jump_locations')
self._values = tf.convert_to_tensor(values, dtype=dtype,
name='values')
shape_values = self._values.shape.as_list()
shape_jump_locations = self._jump_locations.shape.as_list()
batch_rank = len(shape_jump_locations[:-1])
self._batch_rank = batch_rank
if shape_values[:batch_rank] != shape_jump_locations[:-1]:
raise ValueError('Batch shapes of `values` and `jump_locations` should '
'be the same but are {0} and {1}'.format(
shape_values[:-1], shape_jump_locations[:-1]))
if shape_values[batch_rank] - 1 != shape_jump_locations[-1]:
raise ValueError('Event shape of `values` should have one more '
'element than the event shape of `jump_locations` '
'but are {0} and {1}'.format(
shape_values[-1], shape_jump_locations[-1]))
def values(self):
"""The value of the piecewise constant function between jump locations."""
return self._values
def jump_locations(self):
"""The jump locations of the piecewise constant function."""
return self._jump_locations
def name(self):
"""The name to give to the ops created by this class."""
return self._name
def __call__(self, x, left_continuous=True, name=None):
"""Computes value of the piecewise constant function.
Returns a value of the piecewise function with jump locations and values
given by the initializer.
Args:
x: A real `Tensor` of shape `batch_shape + [num_points]`. Points at which
the function has to be evaluated.
left_continuous: Python `bool`. Whether the function is left- or right-
continuous, i.e., at the `jump_locations[..., i]` left-continuity means
that the function has the same value
`values[batch_rank * slice(None), i]`, whereas for
right-continuity, the value is
`values[batch_rank * slice(None), i + 1]`.
Default value: `True` which means that the function is left-continuous.
name: Python `str` name prefixed to ops created by this method.
Default value: `None` which is mapped to the default name
`self.name() + _call`.
Returns:
A `Tensor` of the same `dtype` as `x` and shape
`batch_shape + [num_points] + event_shape` containing values of the
piecewise constant function.
Raises:
ValueError:
If `batch_shape` of `x` is incompatible with the batch shape of
`self.jump_locations()`.
"""
with tf.compat.v1.name_scope(name, self._name + '_call', [x]):
x = tf.convert_to_tensor(x, dtype=self._jump_locations.dtype, name='x')
batch_shape = self._jump_locations.shape.as_list()[:-1]
x = _try_broadcast_to(x, batch_shape, name='x')
side = 'left' if left_continuous else 'right'
return _piecewise_constant_function(
x, self._jump_locations, self._values, self._batch_rank, side=side)
def integrate(self, x1, x2, name=None):
"""Integrates the piecewise constant function between end points.
Returns a value of the integral on the interval `[x1, x2]` of a piecewise
constant function with jump locations and values given by the initializer.
Args:
x1: A real `Tensor` of shape `batch_shape + [num_points]`. Left end points
at which the function has to be integrated.
x2: A `Tensor` of the same shape and `dtype` as `x1`. Right end points at
which the function has to be integrated.
name: Python `str` name prefixed to ops created by this method.
Default value: `None` which is mapped to the default name
`self.name() + `_integrate``.
Returns:
A `Tensor` of the same `dtype` as `x` and shape
`batch_shape + [num_points] + event_shape` containing values of the
integral of the piecewise constant function between `[x1, x2]`.
Raises:
ValueError:
If `batch_shape` of `x1` and `x2` are incompatible with the batch shape
of `self.jump_locations()`.
"""
with tf.compat.v1.name_scope(name, self._name + '_integrate', [x1, x2]):
x1 = tf.convert_to_tensor(x1, dtype=self._jump_locations.dtype,
name='x1')
x2 = tf.convert_to_tensor(x2, dtype=self._jump_locations.dtype,
name='x2')
batch_shape = self._jump_locations.shape.as_list()[:-1]
x1 = _try_broadcast_to(x1, batch_shape, name='x1')
x2 = _try_broadcast_to(x2, batch_shape, name='x1')
return _piecewise_constant_integrate(
x1, x2, self._jump_locations, self._values, self._batch_rank)
def find_interval_index(query_xs,
interval_lower_xs,
last_interval_is_closed=False,
dtype=None,
name=None):
"""Function to find the index of the interval where query points lies.
Given a list of adjacent half-open intervals [x_0, x_1), [x_1, x_2), ...,
[x_{n-1}, x_n), [x_n, inf), described by a list [x_0, x_1, ..., x_{n-1}, x_n].
Return the index where the input query points lie. If x >= x_n, n is returned,
and if x < x_0, -1 is returned. If `last_interval_is_closed` is set to `True`,
the last interval [x_{n-1}, x_n] is interpreted as closed (including x_n).
#### Example
```python
interval_lower_xs = [0.25, 0.5, 1.0, 2.0, 3.0]
query_xs = [0.25, 3.0, 5.0, 0.0, 0.5, 0.8]
result = find_interval_index(query_xs, interval_lower_xs)
# result == [0, 4, 4, -1, 1, 1]
```
Args:
query_xs: Rank 1 real `Tensor` of any size, the list of x coordinates for
which the interval index is to be found. The values must be strictly
increasing.
interval_lower_xs: Rank 1 `Tensor` of the same shape and dtype as
`query_xs`. The values x_0, ..., x_n that define the interval starts.
last_interval_is_closed: If set to `True`, the last interval is interpreted
as closed.
dtype: Optional `tf.Dtype`. If supplied, the dtype for `query_xs` and
`interval_lower_xs`.
Default value: None which maps to the default dtype inferred by TensorFlow
(float32).
name: Optional name of the operation.
Returns:
A tensor that matches the shape of `query_xs` with dtype=int32 containing
the indices of the intervals containing query points. `-1` means the query
point lies before all intervals and `n-1` means that the point lies in the
last half-open interval (if `last_interval_is_closed` is `False`) or that
the point lies to the right of all intervals (if `last_interval_is_closed`
is `True`).
"""
with tf.compat.v1.name_scope(
name,
default_name='find_interval_index',
values=[query_xs, interval_lower_xs, last_interval_is_closed]):
# TODO(b/138988951): add ability to validate that intervals are increasing.
# TODO(b/138988951): validate that if last_interval_is_closed, input size
# must be > 1.
query_xs = tf.convert_to_tensor(query_xs, dtype=dtype)
interval_lower_xs = tf.convert_to_tensor(interval_lower_xs, dtype=dtype)
# Result assuming that last interval is half-open.
indices = tf.searchsorted(interval_lower_xs, query_xs, side='right') - 1
# Handling the branch if the last interval is closed.
last_index = tf.shape(interval_lower_xs)[-1] - 1
last_x = tf.gather(interval_lower_xs, [last_index], axis=-1)
# should_cap is a tensor true where a cell is true iff indices is the last
# index at that cell and the query x <= the right boundary of the last
# interval.
should_cap = tf.logical_and(
tf.equal(indices, last_index), tf.less_equal(query_xs, last_x))
# cap to last_index if the query x is not in the last interval, otherwise,
# cap to last_index - 1.
caps = last_index - tf.cast(should_cap, dtype=tf.dtypes.int32)
return tf.compat.v1.where(last_interval_is_closed,
tf.minimum(indices, caps), indices)
def _piecewise_constant_function(x, jump_locations, values,
batch_rank, side='left'):
"""Computes value of the piecewise constant function."""
# Initializer already verified that `jump_locations` and `values` have the
# same shape
batch_shape = jump_locations.shape.as_list()[:-1]
# Check that the batch shape of `x` is the same as of `jump_locations` and
# `values`
batch_shape_x = x.shape.as_list()[:batch_rank]
if batch_shape_x != batch_shape:
raise ValueError('Batch shape of `x` is {1} but should be {0}'.format(
batch_shape, batch_shape_x))
if x.shape.as_list()[:batch_rank]:
no_batch_shape = False
else:
no_batch_shape = True
x = tf.expand_dims(x, 0)
# Expand batch size to one if there is no batch shape
if not batch_shape:
jump_locations = tf.expand_dims(jump_locations, 0)
values = tf.expand_dims(values, 0)
indices = tf.searchsorted(jump_locations, x, side=side)
index_matrix = _prepare_index_matrix(
indices.shape.as_list()[:-1], indices.shape.as_list()[-1], indices.dtype)
indices_nd = tf.concat(
[index_matrix, tf.expand_dims(indices, -1)], -1)
res = tf.gather_nd(values, indices_nd)
if no_batch_shape:
return tf.squeeze(res, 0)
else:
return res
def _piecewise_constant_integrate(x1, x2, jump_locations, values, batch_rank):
"""Integrates piecewise constant function between `x1` and `x2`."""
# Initializer already verified that `jump_locations` and `values` have the
# same shape.
# Expand batch size to one if there is no batch shape.
if x1.shape.as_list()[:batch_rank]:
no_batch_shape = False
else:
no_batch_shape = True
x1 = tf.expand_dims(x1, 0)
x2 = tf.expand_dims(x2, 0)
if not jump_locations.shape.as_list()[:-1]:
jump_locations = tf.expand_dims(jump_locations, 0)
values = tf.expand_dims(values, 0)
batch_rank += 1
# Compute the index matrix that is later used for `tf.gather_nd`.
index_matrix = _prepare_index_matrix(
x1.shape.as_list()[:-1], x1.shape.as_list()[-1], tf.int32)
# Compute integral values at the jump locations starting from the first jump
# location.
event_shape = values.shape[(batch_rank+1):]
num_data_points = values.shape.as_list()[batch_rank]
diff = jump_locations[..., 1:] - jump_locations[..., :-1]
# Broadcast `diff` to the shape of
# `batch_shape + [num_data_points - 2] + [1] * sample_rank`.
for _ in event_shape:
diff = tf.expand_dims(diff, -1)
slice_indices = batch_rank * [slice(None)]
slice_indices += [slice(1, num_data_points - 1)]
integrals = tf.cumsum(values[slice_indices] * diff, batch_rank)
# Pad integrals with zero values on left and right.
batch_shape = integrals.shape.as_list()[:batch_rank]
zeros = tf.zeros(batch_shape + [1] + event_shape, dtype=integrals.dtype)
integrals = tf.concat([zeros, integrals, zeros], axis=batch_rank)
# Get jump locations and values and the integration end points
value1, jump_location1, indices_nd1 = _get_indices_and_values(
x1, index_matrix, jump_locations, values, 'left', batch_rank)
value2, jump_location2, indices_nd2 = _get_indices_and_values(
x2, index_matrix, jump_locations, values, 'right', batch_rank)
integrals1 = tf.gather_nd(integrals, indices_nd1)
integrals2 = tf.gather_nd(integrals, indices_nd2)
# Broadcast `x1`, `x2`, `jump_location1`, `jump_location2` to the shape
# `batch_shape + [num_points] + [1] * sample_rank`.
for _ in event_shape:
x1 = tf.expand_dims(x1, -1)
x2 = tf.expand_dims(x2, -1)
jump_location1 = tf.expand_dims(jump_location1, -1)
jump_location2 = tf.expand_dims(jump_location2, -1)
# Compute the value of the integral.
res = ((jump_location1 - x1) * value1
+ (x2 - jump_location2) * value2
+ integrals2 - integrals1)
if no_batch_shape:
return tf.squeeze(res, 0)
else:
return res
def _get_indices_and_values(x, index_matrix, jump_locations, values, side,
batch_rank):
"""Computes values and jump locations of the piecewise constant function.
Given `jump_locations` and the `values` on the corresponding segments of the
piecewise constant function, the function identifies the nearest jump to `x`
from the right or left (which is determined by the `side` argument) and the
corresponding value of the piecewise constant function at `x`
Args:
x: A real `Tensor` of shape `batch_shape + [num_points]`. Points at which
the function has to be evaluated.
index_matrix: An `int32` `Tensor` of shape
`batch_shape + [num_points] + [len(batch_shape)]` such that if
`batch_shape = [i1, .., in]`, then for all `j1, ..., jn, l`,
`index_matrix[j1,..,jn, l] = [j1, ..., jn]`.
jump_locations: A `Tensor` of the same `dtype` as `x` and shape
`batch_shape + [num_jump_points]`. The locations where the function
changes its values. Note that the values are expected to be ordered
along the last dimension.
values: A `Tensor` of the same `dtype` as `x` and shape
`batch_shape + [num_jump_points + 1]`. Defines `values[..., i]` on
`jump_locations[..., i - 1], jump_locations[..., i]`.
side: A Python string. Whether the function is left- or right- continuous.
The corresponding values for side should be `left` and `right`.
batch_rank: A Python scalar stating the batch rank of `x`.
Returns:
A tuple of three `Tensor` of the same `dtype` as `x` and shapes
`batch_shape + [num_points] + event_shape`, `batch_shape + [num_points]`,
and `batch_shape + [num_points] + [2 * len(batch_shape)]`. The `Tensor`s
correspond to the values, jump locations at `x`, and the corresponding
indices used to obtain jump locations via `tf.gather_nd`.
"""
indices = tf.searchsorted(jump_locations, x, side=side)
num_data_points = tf.shape(values)[batch_rank] - 2
if side == 'right':
indices_jump = indices - 1
indices_jump = tf.maximum(indices_jump, 0)
else:
indices_jump = tf.minimum(indices, num_data_points)
indices_nd = tf.concat(
[index_matrix, tf.expand_dims(indices, -1)], -1)
indices_jump_nd = tf.concat(
[index_matrix, tf.expand_dims(indices_jump, -1)], -1)
value = tf.gather_nd(values, indices_nd)
jump_location = tf.gather_nd(jump_locations, indices_jump_nd)
return value, jump_location, indices_jump_nd
def _prepare_index_matrix(batch_shape, num_points, dtype):
"""Prepares index matrix for index argument of `tf.gather_nd`."""
batch_shape_reverse = batch_shape.copy()
batch_shape_reverse.reverse()
index_matrix = tf.constant(
np.flip(np.transpose(np.indices(batch_shape_reverse)), -1),
dtype=dtype)
batch_rank = len(batch_shape)
# Broadcast index matrix to the shape of
# `batch_shape + [num_points] + [batch_rank]`.
broadcasted_shape = batch_shape + [num_points] + [batch_rank]
index_matrix = tf.expand_dims(index_matrix, -2) + tf.zeros(
tf.TensorShape(broadcasted_shape), dtype=dtype)
return index_matrix
def _try_broadcast_to(x, batch_shape, name):
"""Broadcasts batch shape of `x` to a `batch_shape` if possible."""
batch_shape_x = x.shape.as_list()[:-1]
if batch_shape_x != batch_shape:
try:
np.broadcast_to(np.zeros(batch_shape_x), batch_shape)
except ValueError:
raise ValueError('Batch shapes of `{2}` should be broadcastable with {0} '
'but it is {1} instead'.format(
batch_shape, batch_shape_x, name))
return tf.broadcast_to(x, batch_shape + x.shape[-1:])
return x
def convert_to_tensor_or_func(x, dtype=None, name=None):
"""Returns either a `PiecewiseConstantFunc` or x converted to a `Tensor`.
Converts the input argument into a `Tensor` unless the input is of type
`PiecewiseConstantFunc` when the input is returned unchanged. This function
helps pricing models to preprocess inputs that can be either constant or time
dependent as represented by a PiecewiseConstantFunc.
Example:
class GeometricBrownianMotion(object)
def __init__(self, mu, sigma):
self._mu, self._mu_is_constant = piecewise.convert_to_tensor_or_func(
mu)
self._sigma, self._sigma_is_constant =
piecewise.convert_to_tensor_or_func(sigma)
Args:
x: Either a 'Tensor' or 'PiecewiseConstantFunc'.
dtype: The default dtype to use when converting values to `Tensor`s.
Default value: `None` which means that default dtypes inferred by
TensorFlow are used.
name: Python string. The name to give to the ops created by this class.
Default value: `None` which maps to the default name
'PiecewiseConstantFunc_tensor_or_func'.
Returns:
A tuple (y, flag) where y is either a Tensor or PiecewiseConstantFunc
and flag which is False if x is of type PiecewiseConstantFunc and True
otherwise.
"""
name = name or ('PiecewiseConstantFunc_tensor_or_func')
if isinstance(x, PiecewiseConstantFunc):
return x, False
return tf.convert_to_tensor(x, dtype=dtype, name=name), True
``` |
{
"source": "JohannLULW/get_time",
"score": 3
} |
#### File: get_time/get_datetime/__init__.py
```python
import datetime
def get_date_year():
current_time = datetime.datetime.now()
return current_time.year
def get_date_month():
current_time = datetime.datetime.now()
return current_time.month
def get_date_day():
current_time = datetime.datetime.now()
return current_time.day
def get_date_DM(character):
current_time = datetime.datetime.now()
str(current_time.day) + character + str(current_time.month)
def get_date_MD(character):
current_time = datetime.datetime.now()
str(current_time.month) + character + str(current_time.day)
def get_date_DMY(character):
current_time = datetime.datetime.now()
return str(current_time.day) + character + str(current_time.month) + character + str(current_time.year)
def get_date_MDY(character):
current_time = datetime.datetime.now()
return str(current_time.month) + character + str(current_time.day) + character + str(current_time.year)
def get_time_hour():
current_time = datetime.datetime.now()
return current_time.hour
def get_time_minute():
current_time = datetime.datetime.now()
return current_time.minute
def get_time_second():
current_time = datetime.datetime.now()
return current_time.second
def get_time_HM(character):
current_time = datetime.datetime.now()
return str(current_time.hour) + character + str(current_time.minute)
def get_time_HMS(character):
current_time = datetime.datetime.now()
return str(current_time.hour) + character + str(current_time.minute) + character + str(current_time.second)
``` |
{
"source": "johannmartin/shapash",
"score": 2
} |
#### File: shapash/explainer/smart_predictor.py
```python
from shapash.utils.check import check_consistency_model_features, check_consistency_model_label
from shapash.utils.check import check_model, check_preprocessing, check_preprocessing_options
from shapash.utils.check import check_label_dict, check_mask_params, check_ypred, check_contribution_object,\
check_features_name
from .smart_state import SmartState
from .multi_decorator import MultiDecorator
import pandas as pd
from shapash.utils.transform import adapt_contributions
from shapash.utils.shap_backend import check_explainer, shap_contributions
from shapash.manipulation.select_lines import keep_right_contributions
from shapash.utils.model import predict_proba
from shapash.utils.io import save_pickle
from shapash.utils.transform import apply_preprocessing, apply_postprocessing
from shapash.manipulation.filters import hide_contributions
from shapash.manipulation.filters import cap_contributions
from shapash.manipulation.filters import sign_contributions
from shapash.manipulation.filters import cutoff_contributions
from shapash.manipulation.filters import combine_masks
from shapash.manipulation.mask import init_mask
from shapash.manipulation.mask import compute_masked_contributions
from shapash.manipulation.summarize import summarize
from shapash.decomposition.contributions import rank_contributions, assign_contributions
class SmartPredictor :
"""
The SmartPredictor class is an object lighter than SmartExplainer Object with
additionnal consistency checks.
The SmartPredictor object is provided to deploy the summary of local explanation
for the operational needs.
Switching from SmartExplainer to SmartPredictor, allows users to reproduce
the same results automatically on datasets with right structure.
SmartPredictor is designed to make new results understandable:
- It checks consistency of all parameters
- It applies preprocessing and postprocessing
- It computes models contributions
- It makes predictions
- It summarizes local explainability
This class allows the user to automatically summarize the results of his model
on new datasets (prediction, preprocessing and postprocessing linking,
explainability).
The SmartPredictor has several methods described below.
The SmartPredictor Attributes :
features_dict: dict
Dictionary mapping technical feature names to domain names.
model: model object
model used to check the different values of target estimate predict_proba
explainer: explainer object
explainer must be a shap object (TreeExplainer, LinearExplainer, KernelExplainer)
columns_dict: dict
Dictionary mapping integer column number (in the same order of the trained dataset) to technical feature names.
features_types: dict
Dictionary mapping features with the right types needed.
label_dict: dict (optional)
Dictionary mapping integer labels to domain names (classification - target values).
preprocessing: category_encoders, ColumnTransformer, list or dict (optional)
The processing apply to the original data.
postprocessing: dict (optional)
Dictionary of postprocessing modifications to apply in x_pred dataframe.
_case: string
String that informs if the model used is for classification or regression problem.
_classes: list, None
List of labels if the model used is for classification problem, None otherwise.
mask_params: dict (optional)
Dictionary that specify how to summarize the explainability.
How to declare a new SmartPredictor object?
Example
-------
>>> predictor = SmartPredictor(features_dict=my_features_dict,
>>> model=my_model,
>>> explainer=my_explainer,
>>> columns_dict=my_columns_dict,
>>> features_types=my_features_type_dict,
>>> label_dict=my_label_dict,
>>> preprocessing=my_preprocess,
>>> postprocessing=my_postprocess)
or the most common syntax
>>> predictor = xpl.to_smartpredictor()
xpl, explainer: object
SmartExplainer instance to point to.
"""
def __init__(self, features_dict, model,
columns_dict, explainer, features_types,
label_dict=None, preprocessing=None,
postprocessing=None,
mask_params = {"features_to_hide": None,
"threshold": None,
"positive": None,
"max_contrib": None
}
):
params_dict = [features_dict, features_types, label_dict, columns_dict, postprocessing]
for params in params_dict:
if params is not None and isinstance(params, dict) == False:
raise ValueError(
"""
{0} must be a dict.
""".format(str(params))
)
self.model = model
self._case, self._classes = self.check_model()
self.explainer = self.check_explainer(explainer)
check_preprocessing_options(preprocessing)
self.preprocessing = preprocessing
self.check_preprocessing()
self.features_dict = features_dict
self.features_types = features_types
self.label_dict = label_dict
self.check_label_dict()
self.columns_dict = columns_dict
self.mask_params = mask_params
self.check_mask_params()
self.postprocessing = postprocessing
check_consistency_model_features(self.features_dict, self.model, self.columns_dict,
self.features_types, self.mask_params, self.preprocessing,
self.postprocessing)
check_consistency_model_label(self.columns_dict, self.label_dict)
def check_model(self):
"""
Check if model has a predict_proba method is a one column dataframe of integer or float
and if y_pred index matches x_pred index
Returns
-------
string:
'regression' or 'classification' according to the attributes of the model
"""
_case, _classes = check_model(self.model)
return _case, _classes
def check_preprocessing(self):
"""
Check that all transformation of the preprocessing are supported.
"""
return check_preprocessing(self.preprocessing)
def check_label_dict(self):
"""
Check if label_dict and model _classes match
"""
if self._case != "regression":
return check_label_dict(self.label_dict, self._case, self._classes)
def check_mask_params(self):
"""
Check if mask_params given respect the expected format.
"""
return check_mask_params(self.mask_params)
def add_input(self, x=None, ypred=None, contributions=None):
"""
The add_input method is the first step to add a dataset for prediction and explainability.
add_input applies to x parameter :
- consistencies checks
- preprocessing and postprocessing specified during the initialisation
- features reordering with the right order for the model
If you don't specify ypred or contributions, add_input compute them.
It's possible to not specified one parameter if it has already been defined before.
For example, if the user want to specified an ypred without reinitialize the dataset x already defined.
If the user declare a new input x, all the parameters stored will be cleaned.
Example
--------
>>> predictor.add_input(x=xtest_df)
>>> predictor.add_input(ypred=ytest_df)
Parameters
----------
x: dict, pandas.DataFrame (optional)
Raw dataset used by the model to perform the prediction (not preprocessed).
ypred: pandas.DataFrame (optional)
User-specified prediction values.
contributions: pandas.DataFrame (regression) or list (classification) (optional)
local contributions aggregated if the preprocessing part requires it (e.g. one-hot encoding).
"""
if x is not None:
x = self.check_dataset_features(self.check_dataset_type(x))
self.data = self.clean_data(x)
self.data["x_postprocessed"] = self.apply_postprocessing()
try :
self.data["x_preprocessed"] = self.apply_preprocessing()
except BaseException :
raise ValueError(
"""
Preprocessing has failed. The preprocessing specified or the dataset doesn't match.
"""
)
else:
if not hasattr(self,"data"):
raise ValueError ("No dataset x specified.")
if ypred is not None:
self.data["ypred_init"] = self.check_ypred(ypred)
if contributions is not None:
self.data["ypred"], self.data["contributions"] = self.compute_contributions(contributions=contributions)
else:
self.data["ypred"], self.data["contributions"] = self.compute_contributions()
def check_dataset_type(self, x=None):
"""
Check if dataset x given respect the expected format.
Parameters
----------
x: dict, pandas.DataFrame (optional)
Raw dataset used by the model to perform the prediction (not preprocessed).
Returns
-------
x: pandas.DataFrame
Raw dataset used by the model to perform the prediction (not preprocessed).
"""
if not (type(x) in [pd.DataFrame, dict]):
raise ValueError(
"""
x must be a dict or a pandas.DataFrame.
"""
)
else :
x = self.convert_dict_dataset(x)
return x
def convert_dict_dataset(self, x):
"""
Convert a dict to a dataframe if the dataset specified is a dict.
Parameters
----------
x: dict
Raw dataset used by the model to perform the prediction (not preprocessed).
Returns
-------
x: pandas.DataFrame
Raw dataset used by the model to perform the prediction (not preprocessed).
"""
if type(x) == dict:
if not all([column in self.features_types.keys() for column in x.keys()]):
raise ValueError("""
All features from dataset x must be in the features_types dict initialized.
""")
try:
x = pd.DataFrame.from_dict(x, orient="index").T
for feature, type_feature in self.features_types.items():
x[feature] = x[feature].astype(type_feature)
except BaseException:
raise ValueError(
"""
The structure of the given dict x isn't at the right format.
"""
)
return x
def check_dataset_features(self, x):
"""
Check if the features of the dataset x has the expected types before using preprocessing and model.
Parameters
----------
x: pandas.DataFrame (optional)
Raw dataset used by the model to perform the prediction (not preprocessed).
"""
assert all(column in self.columns_dict.values() for column in x.columns)
if not all([type(key) == int for key in self.columns_dict.keys()]):
raise ValueError("columns_dict must have only integers keys for features order.")
features_order = []
for order in range(min(self.columns_dict.keys()), max(self.columns_dict.keys()) + 1):
features_order.append(self.columns_dict[order])
x = x[features_order]
assert all(column in self.features_types.keys() for column in x.columns)
if not all([str(x[feature].dtypes) == self.features_types[feature] for feature in x.columns]):
raise ValueError("Types of features in x doesn't match with the expected one in features_types.")
return x
def check_ypred(self, ypred=None):
"""
Check that ypred given has the right shape and expected value.
Parameters
----------
ypred: pandas.DataFrame (optional)
User-specified prediction values.
"""
return check_ypred(self.data["x"],ypred)
def choose_state(self, contributions):
"""
Select implementation of the smart predictor. Typically check if it is a
multi-class problem, in which case the implementation should be adapted
to lists of contributions.
Parameters
----------
contributions : object
Local contributions. Could also be a list of local contributions.
Returns
-------
object
SmartState or SmartMultiState, depending on the nature of the input.
"""
if isinstance(contributions, list):
return MultiDecorator(SmartState())
else:
return SmartState()
def adapt_contributions(self, contributions):
"""
If _case is "classification" and contributions a np.array or pd.DataFrame
this function transform contributions matrix in a list of 2 contributions
matrices: Opposite contributions and contributions matrices.
Parameters
----------
contributions : pandas.DataFrame, np.ndarray or list
Returns
-------
pandas.DataFrame, np.ndarray or list
contributions object modified
"""
return adapt_contributions(self._case, contributions)
def validate_contributions(self, contributions):
"""
Check len of list if _case is "classification"
Check contributions object type if _case is "regression"
Check type of contributions and transform into (list of) pd.Dataframe if necessary
Parameters
----------
contributions : pandas.DataFrame, np.ndarray or list
Returns
-------
pandas.DataFrame or list
"""
check_contribution_object(self._case, self._classes, contributions)
return self.state.validate_contributions(contributions, self.data["x_preprocessed"])
def check_contributions(self, contributions):
"""
Check if contributions and prediction set match in terms of shape and index.
"""
if not self.state.check_contributions(contributions, self.data["x"], features_names=False):
raise ValueError(
"""
Prediction set and contributions should have exactly the same number of lines
and number of columns. the order of the columns must be the same
Please check x, contributions and preprocessing arguments.
"""
)
def clean_data(self, x):
"""
Clean data stored if x is defined and not None.
Parameters
----------
x: pandas.DataFrame
Raw dataset used by the model to perform the prediction (not preprocessed).
Returns
-------
dict of data stored
"""
return {"x" : x,
"ypred_init": None,
"ypred" : None,
"contributions" : None,
"x_preprocessed": None,
"x_postprocessed": None
}
def check_explainer(self, explainer):
"""
Check if explainer class correspond to a shap explainer object
"""
return check_explainer(explainer)
def predict_proba(self):
"""
The predict_proba compute the probabilities predicted for each x row defined in add_input.
Returns
-------
pandas.DataFrame
A dataset with all probabilities of each label if there is no ypred data or a dataset with ypred and the associated probability.
Example
--------
>>> predictor.add_input(x=xtest_df)
>>> predictor.predict_proba()
"""
return predict_proba(self.model, self.data["x_preprocessed"], self._classes)
def compute_contributions(self, contributions=None):
"""
The compute_contributions compute the contributions associated to data ypred specified.
Need a data ypred specified in an add_input to display detail_contributions.
Parameters
-------
contributions : object (optional)
Local contributions, or list of local contributions.
Returns
-------
pandas.DataFrame
Data with contributions associated to the ypred specified.
pandas.DataFrame
ypred data with right probabilities associated.
"""
if not hasattr(self, "data"):
raise ValueError("add_input method must be called at least once.")
if self.data["x"] is None:
raise ValueError(
"""
x must be specified in an add_input method to apply detail_contributions.
"""
)
if self.data["ypred_init"] is None:
self.predict()
if contributions is None:
contributions, explainer = shap_contributions(self.model,
self.data["x_preprocessed"],
self.explainer)
adapt_contrib = self.adapt_contributions(contributions)
self.state = self.choose_state(adapt_contrib)
contributions = self.validate_contributions(adapt_contrib)
contributions = self.apply_preprocessing_for_contributions(contributions,
self.preprocessing
)
self.check_contributions(contributions)
proba_values = self.predict_proba() if self._case == "classification" else None
y_pred, match_contrib = keep_right_contributions(self.data["ypred_init"], contributions,
self._case, self._classes,
self.label_dict, proba_values)
return y_pred, match_contrib
def detail_contributions(self, contributions=None):
"""
The detail_contributions method associates the right contributions with the right data predicted.
(with ypred specified in add_input or computed automatically)
Parameters
-------
contributions : object (optional)
Local contributions, or list of local contributions.
Returns
-------
pandas.DataFrame
A Dataset with ypred and the right associated contributions.
Example
--------
>>> predictor.add_input(x=xtest_df)
>>> predictor.detail_contributions()
"""
y_pred, detail_contrib = self.compute_contributions(contributions=contributions)
return pd.concat([y_pred, detail_contrib], axis=1)
def apply_preprocessing_for_contributions(self, contributions, preprocessing=None):
"""
Reconstruct contributions for original features, taken into account a preprocessing.
Parameters
----------
contributions : object
Local contributions, or list of local contributions.
preprocessing : object
Encoder taken from scikit-learn or category_encoders
Returns
-------
object
Reconstructed local contributions in the original space. Can be a list.
"""
if preprocessing:
return self.state.inverse_transform_contributions(
contributions,
preprocessing
)
else:
return contributions
def save(self, path):
"""
Save method allows users to save SmartPredictor object on disk using a pickle file.
Save method can be useful: you don't have to recompile to display results later.
Load_smartpredictor method allow to load your SmartPredictor object saved. (See example below)
Parameters
----------
path : str
File path to store the pickle file
Example
--------
>>> predictor.save('path_to_pkl/predictor.pkl')
>>> from shapash.utils.load_smartpredictor import load_smartpredictor
>>> predictor_load = load_smartpredictor('path_to_pkl/predictor.pkl')
"""
dict_to_save = {}
for att in self.__dict__.keys():
if (isinstance(getattr(self, att), (list, dict, pd.DataFrame, pd.Series, type(None))) or att == "model"
or att == "explainer" or att == "preprocessing") and not att == "data" :
dict_to_save.update({att: getattr(self, att)})
save_pickle(dict_to_save, path)
def apply_preprocessing(self):
"""
Apply preprocessing on new dataset input specified.
"""
return apply_preprocessing(self.data["x"], self.model, self.preprocessing)
def filter(self):
"""
The filter method is an important method which allows to summarize the local explainability
by using the user defined mask_params parameters which correspond to its use case.
"""
mask = [init_mask(self.summary['contrib_sorted'], True)]
if self.mask_params["features_to_hide"] is not None:
mask.append(
hide_contributions(
self.summary['var_dict'],
features_list=self.check_features_name(self.mask_params["features_to_hide"])
)
)
if self.mask_params["threshold"] is not None:
mask.append(
cap_contributions(
self.summary['contrib_sorted'],
threshold=self.mask_params["threshold"]
)
)
if self.mask_params["positive"] is not None:
mask.append(
sign_contributions(
self.summary['contrib_sorted'],
positive=self.mask_params["positive"]
)
)
self.mask = combine_masks(mask)
if self.mask_params["max_contrib"] is not None:
self.mask = cutoff_contributions(mask=self.mask, k=self.mask_params["max_contrib"])
self.masked_contributions = compute_masked_contributions(
self.summary['contrib_sorted'],
self.mask
)
def summarize(self):
"""
The summarize method allows to display the summary of local explainability.
This method can be configured with modify_mask method to summarize the explainability to suit needs.
If the user doesn't use modify_mask, the summarize method uses the mask_params parameters specified during
the initialisation of the SmartPredictor.
In classification case, The summarize method summarizes the explainability which corresponds to :
- the predicted values specified by the user or automatically computed (with add_input method)
- the right probabilities from predict_proba associated to the right predicted values
- the right contributions ranked and filtered as specify with modify_mask method
Returns
-------
pandas.DataFrame
- selected explanation of each row for classification case
Examples
--------
>>> summary_df = predictor.summarize()
>>> summary_df
pred proba feature_1 value_1 contribution_1 feature_2 value_2 contribution_2
0 0 0.756416 Sex 1.0 0.322308 Pclass 3.0 0.155069
1 3 0.628911 Sex 2.0 0.585475 Pclass 1.0 0.370504
2 0 0.543308 Sex 2.0 -0.486667 Pclass 3.0 0.255072
>>> predictor.modify_mask(max_contrib=1)
>>> summary_df = predictor.summarize()
>>> summary_df
pred proba feature_1 value_1 contribution_1
0 0 0.756416 Sex 1.0 0.322308
1 3 0.628911 Sex 2.0 0.585475
2 0 0.543308 Sex 2.0 -0.486667
"""
# data is needed : add_input() method must be called at least once
if not hasattr(self, "data"):
raise ValueError("You have to specify dataset x and y_pred arguments. Please use add_input() method.")
self.summary = assign_contributions(
rank_contributions(
self.data["contributions"],
self.data["x_postprocessed"]
)
)
# Apply filter method with mask_params attributes parameters
self.filter()
# Summarize information
self.data['summary'] = summarize(self.summary['contrib_sorted'],
self.summary['var_dict'],
self.summary['x_sorted'],
self.mask,
self.columns_dict,
self.features_dict)
# Matching with y_pred
return pd.concat([self.data["ypred"], self.data['summary']], axis=1)
def modify_mask(
self,
features_to_hide=None,
threshold=None,
positive=None,
max_contrib=None
):
"""
This method allows the users to modify the mask_params values.
Each parameter is optional, modify_mask method modifies only the values specified in parameters.
This method has to be used to configure the summary displayed with summarize method.
Parameters
----------
features_to_hide : list, optional (default: None)
List of strings, containing features to hide.
threshold : float, optional (default: None)
Absolute threshold below which any contribution is hidden.
positive: bool, optional (default: None)
If True, hide negative values. False, hide positive values
If None, hide nothing.
max_contrib : int, optional (default: None)
Maximum number of contributions to show.
Examples
--------
>>> predictor.modify_mask(max_contrib=1)
>>> summary_df = predictor.summarize()
>>> summary_df
pred proba feature_1 value_1 contribution_1
0 0 0.756416 Sex 1.0 0.322308
1 3 0.628911 Sex 2.0 0.585475
2 0 0.543308 Sex 2.0 -0.486667
"""
Attributes = {"features_to_hide": features_to_hide,
"threshold": threshold,
"positive": positive,
"max_contrib": max_contrib}
for label, attribute in Attributes.items() :
if attribute is not None:
self.mask_params[label] = attribute
def predict(self):
"""
The predict method compute the predicted values for each x row defined in add_input.
Returns
-------
pandas.DataFrame
A dataset with predicted values for each x row.
Example
--------
>>> predictor.add_input(x=xtest_df)
>>> predictor.predict()
"""
if not hasattr(self, "data"):
raise ValueError("add_input method must be called at least once.")
if self.data["x_preprocessed"] is None:
raise ValueError(
"""
x must be specified in an add_input method to apply predict.
"""
)
if hasattr(self.model, 'predict'):
self.data["ypred_init"] = pd.DataFrame(
self.model.predict(self.data["x_preprocessed"]),
columns=['ypred'],
index=self.data["x_preprocessed"].index)
else:
raise ValueError("model has no predict method")
return self.data["ypred_init"]
def apply_postprocessing(self):
"""
Modifies x Dataframe according to postprocessing modifications, if exists.
Parameters
----------
postprocessing: Dict
Dictionnary of postprocessing modifications to apply in x.
Returns
-------
pandas.Dataframe
Returns x_pred if postprocessing is empty, modified dataframe otherwise.
"""
if self.postprocessing:
return apply_postprocessing(self.data["x"], self.postprocessing)
else:
return self.data["x"]
def check_features_name(self, features):
"""
Convert a list of feature names (string) or features ids into features ids.
Features names can be part of columns_dict or features_dict.
Parameters
----------
features : List
List of ints (columns ids) or of strings (business names)
Returns
-------
list of ints
Columns ids compatible with var_dict
"""
return check_features_name(self.columns_dict, self.features_dict, features)
```
#### File: webapp/utils/utils.py
```python
import pandas as pd
import numpy as np
from math import log10, floor
def round_to_1(x):
"""
round float to 1 significant figure
Parameters
----------
x : float
number to round
Returns
-------
int
"""
if x == 0:
return 0
else:
return round(x, -int(floor(log10(abs(x)))))
def check_row(data, index):
"""
Identify the row number of datatable for a specific index
Parameters
----------
data : dash_table.DataTable.data
data display in datatable
index : int or str
index from the dataset to identify
Returns
-------
int:
row number corresponding to index
"""
df = pd.DataFrame.from_records(data, index='_index_')
if np.issubdtype(type(df.index[0]), np.dtype(int).type):
index = int(index)
row = df.index.get_loc(index) if index in list(df.index) else None
return row
def split_filter_part(filter_part):
"""
Transform dash.datatable filter part into pandas.DataFrame filter (source code : Dash documentation)
Parameters
----------
filter_part : str
filter apply on a column of the datatable
Returns
-------
tuple :
column, operator, value of the filter part
"""
operators = [['ge ', '>='],
['le ', '<='],
['lt ', '<'],
['gt ', '>'],
['ne ', '!='],
['eq ', '='],
['contains '],
['datestartswith ']]
for operator_type in operators:
for operator in operator_type:
if operator in filter_part:
name_part, value_part = filter_part.split(operator, 1)
name = name_part[name_part.find('{') + 1: name_part.rfind('}')]
value_part = value_part.strip()
v0 = value_part[0]
if v0 == value_part[-1] and v0 in ("'", '"', '`'):
value = value_part[1: -1].replace('\\' + v0, v0)
else:
try:
value = float(value_part)
except ValueError:
value = value_part
# word operators need spaces after them in the filter string,
# but we don't want these later
return name, operator_type[0].strip(), value
return [None] * 3
def apply_filter(df, filter_query):
"""
Apply a filter query from dash.datable to a pandas.DataFrame (source code : Dash documentation)
Parameters
----------
df : pandas.DataFrame
dataFrame to be filtered
filter_query : dcc.datatable.filter_query
query from dcc.datatable to apply to the DataFrame
Returns
-------
pandas.DataFrame
"""
filtering_expressions = filter_query.split(' && ')
for filter_part in filtering_expressions:
col_name, operator, filter_value = split_filter_part(filter_part)
if operator in ('eq', 'ne', 'lt', 'le', 'gt', 'ge'):
# these operators match pandas series operator method names
df = df.loc[getattr(df[col_name], operator)(filter_value)]
elif operator == 'contains':
df = df.loc[df[col_name].str.contains(filter_value)]
elif operator == 'datestartswith':
# this is a simplification of the front-end filtering logic,
# only works with complete fields in standard format
df = df.loc[df[col_name].str.startswith(filter_value)]
return df
```
#### File: unit_tests/explainer/test_smart_predictor.py
```python
import unittest
from shapash.explainer.smart_predictor import SmartPredictor
from shapash.explainer.smart_explainer import SmartExplainer
from shapash.explainer.smart_state import SmartState
from shapash.explainer.multi_decorator import MultiDecorator
import os
from os import path
from pathlib import Path
import pandas as pd
import numpy as np
import catboost as cb
from catboost import Pool
import category_encoders as ce
from unittest.mock import patch
import types
from sklearn.compose import ColumnTransformer
import sklearn.preprocessing as skp
import shap
def init_sme_to_pickle_test():
"""
Init sme to pickle test
TODO: Docstring
Returns
-------
[type]
[description]
"""
current = Path(path.abspath(__file__)).parent.parent.parent
pkl_file = path.join(current, 'data/predictor.pkl')
xpl = SmartExplainer(features_dict={})
y_pred = pd.DataFrame(data=np.array([1, 2]), columns=['pred'])
dataframe_x = pd.DataFrame([[1, 2, 4], [1, 2, 3]])
clf = cb.CatBoostClassifier(n_estimators=1).fit(dataframe_x, y_pred)
xpl.compile(x=dataframe_x, y_pred=y_pred, model=clf)
predictor = xpl.to_smartpredictor()
return pkl_file, predictor
class TestSmartPredictor(unittest.TestCase):
"""
Unit test Smart Predictor class
"""
def setUp(self):
df = pd.DataFrame(range(0, 5), columns=['id'])
df['y'] = df['id'].apply(lambda x: 1 if x < 2 else 0)
df['x1'] = np.random.randint(1, 123, df.shape[0])
df['x2'] = ["S", "M", "S", "D", "M"]
df = df.set_index('id')
encoder = ce.OrdinalEncoder(cols=["x2"], handle_unknown="None")
encoder_fitted = encoder.fit(df)
df_encoded = encoder_fitted.transform(df)
clf = cb.CatBoostClassifier(n_estimators=1).fit(df_encoded[['x1', 'x2']], df_encoded['y'])
clf_explainer = shap.TreeExplainer(clf)
columns_dict = {0: "x1", 1: "x2"}
label_dict = {0: "Yes", 1: "No"}
postprocessing = {"x2": {
"type": "transcoding",
"rule": {"S": "single", "M": "married", "D": "divorced"}}}
features_dict = {"x1": "age", "x2": "family_situation"}
features_types = {features: str(df[features].dtypes) for features in df[['x1', 'x2']]}
self.df_1 = df
self.preprocessing_1 = encoder_fitted
self.df_encoded_1 = df_encoded
self.clf_1 = clf
self.clf_explainer_1 = clf_explainer
self.columns_dict_1 = columns_dict
self.label_dict_1 = label_dict
self.postprocessing_1 = postprocessing
self.features_dict_1 = features_dict
self.features_types_1 = features_types
self.predictor_1 = SmartPredictor(features_dict, clf,
columns_dict, clf_explainer, features_types, label_dict,
encoder_fitted, postprocessing)
df['x2'] = np.random.randint(1, 100, df.shape[0])
encoder = ce.OrdinalEncoder(cols=["x2"], handle_unknown="None")
encoder_fitted = encoder.fit(df[["x1", "x2"]])
df_encoded = encoder_fitted.transform(df[["x1", "x2"]])
clf = cb.CatBoostClassifier(n_estimators=1).fit(df[['x1', 'x2']], df['y'])
clf_explainer = shap.TreeExplainer(clf)
features_dict = {"x1": "age", "x2": "weight"}
features_types = {features: str(df[features].dtypes) for features in df[["x1", "x2"]].columns}
self.df_2 = df
self.preprocessing_2 = encoder_fitted
self.df_encoded_2 = df_encoded
self.clf_2 = clf
self.clf_explainer_2 = clf_explainer
self.columns_dict_2 = columns_dict
self.label_dict_2 = label_dict
self.postprocessing_2 = postprocessing
self.features_dict_2 = features_dict
self.features_types_2 = features_types
self.predictor_2 = SmartPredictor(features_dict, clf,
columns_dict, clf_explainer, features_types, label_dict,
encoder_fitted, postprocessing)
df['x1'] = [25, 39, 50, 43, 67]
df['x2'] = [90, 78, 84, 85, 53]
columns_dict = {0: "x1", 1: "x2"}
label_dict = {0: "No", 1: "Yes"}
features_dict = {"x1": "age", "x2": "weight"}
features_types = {features: str(df[features].dtypes) for features in df[['x1', 'x2']].columns}
clf = cb.CatBoostRegressor(n_estimators=1).fit(df[['x1', 'x2']], df['y'])
clf_explainer = shap.TreeExplainer(clf)
self.df_3 = df
self.preprocessing_3 = None
self.df_encoded_3 = df
self.clf_3 = clf
self.clf_explainer_3 = clf_explainer
self.columns_dict_3 = columns_dict
self.label_dict_3 = label_dict
self.postprocessing_3 = None
self.features_dict_3 = features_dict
self.features_types_3 = features_types
self.predictor_3 = SmartPredictor(features_dict, clf,
columns_dict, clf_explainer,
features_types, label_dict)
def predict_proba(self, arg1, arg2):
"""
predict_proba method
"""
matrx = np.array(
[[0.2, 0.8],
[0.3, 0.7],
[0.4, 0.6]]
)
return matrx
def predict(self, arg1, arg2):
"""
predict method
"""
matrx = np.array(
[12, 3, 7]
)
return matrx
def test_init_1(self):
"""
Test init smart predictor
"""
predictor_1 = SmartPredictor(self.features_dict_1,
self.clf_1,
self.columns_dict_1,
self.clf_explainer_1,
self.features_types_1,
self.label_dict_1,
self.preprocessing_1,
self.postprocessing_1)
assert hasattr(predictor_1, 'model')
assert hasattr(predictor_1, 'explainer')
assert hasattr(predictor_1, 'features_dict')
assert hasattr(predictor_1, 'label_dict')
assert hasattr(predictor_1, '_case')
assert hasattr(predictor_1, '_classes')
assert hasattr(predictor_1, 'columns_dict')
assert hasattr(predictor_1, 'features_types')
assert hasattr(predictor_1, 'preprocessing')
assert hasattr(predictor_1, 'postprocessing')
assert hasattr(predictor_1, 'mask_params')
assert predictor_1.model == self.clf_1
assert predictor_1.explainer == self.clf_explainer_1
assert predictor_1.features_dict == self.features_dict_1
assert predictor_1.label_dict == self.label_dict_1
assert predictor_1._case == "classification"
assert predictor_1._classes == [0,1]
assert predictor_1.columns_dict == self.columns_dict_1
assert predictor_1.preprocessing == self.preprocessing_1
assert predictor_1.postprocessing == self.postprocessing_1
mask_params = {'features_to_hide': None,
'threshold': None,
'positive': True,
'max_contrib': 1
}
predictor_1.mask_params = mask_params
assert predictor_1.mask_params == mask_params
def add_input_1(self):
"""
Test add_input method from smart predictor
"""
ypred = self.df_1['y']
shap_values = self.clf_1.get_feature_importance(Pool(self.df_encoded_1), type="ShapValues")
predictor_1 = self.predictor_1
predictor_1.add_input(x=self.df_1[["x1", "x2"]], contributions=shap_values[:, :-1])
predictor_1_contrib = predictor_1.data["contributions"]
assert all(attribute in predictor_1.data.keys()
for attribute in ["x", "x_preprocessed", "contributions", "ypred"])
assert predictor_1.data["x"].shape == predictor_1.data["x_preprocessed"].shape
assert all(feature in predictor_1.data["x"].columns
for feature in predictor_1.data["x_preprocessed"].columns)
assert predictor_1_contrib.shape == predictor_1.data["x"].shape
predictor_1.add_input(ypred=ypred)
assert "ypred" in predictor_1.data.keys()
assert predictor_1.data["ypred"].shape[0] == predictor_1.data["x"].shape[0]
assert all(predictor_1.data["ypred"].index == predictor_1.data["x"].index)
@patch('shapash.explainer.smart_predictor.SmartState')
def test_choose_state_1(self, mock_smart_state):
"""
Unit test choose state 1
Parameters
----------
mock_smart_state : [type]
[description]
"""
predictor_1 = self.predictor_1
predictor_1.choose_state('contributions')
mock_smart_state.assert_called()
@patch('shapash.explainer.smart_predictor.MultiDecorator')
def test_choose_state_2(self, mock_multi_decorator):
"""
Unit test choose state 2
Parameters
----------
mock_multi_decorator : [type]
[description]
"""
predictor_1 = self.predictor_1
predictor_1.choose_state('contributions')
predictor_1.choose_state([1, 2, 3])
mock_multi_decorator.assert_called()
@patch('shapash.explainer.smart_predictor.SmartPredictor.choose_state')
def test_validate_contributions_1(self, choose_state):
"""
Unit test validate contributions 1
"""
choose_state.return_value = MultiDecorator(SmartState())
predictor_1 = self.predictor_1
contributions = [
np.array([[2, 1], [8, 4]]),
np.array([[5, 5], [0, 0]])
]
predictor_1.state = predictor_1.choose_state(contributions)
predictor_1.data = {"x": None, "ypred": None, "contributions": None}
predictor_1.data["x_preprocessed"] = pd.DataFrame(
[[1, 2],
[3, 4]],
columns=['Col1', 'Col2'],
index=['Id1', 'Id2']
)
expected_output = [
pd.DataFrame(
[[2, 1], [8, 4]],
columns=['Col1', 'Col2'],
index=['Id1', 'Id2']
),
pd.DataFrame(
[[5, 5], [0, 0]],
columns=['Col1', 'Col2'],
index=['Id1', 'Id2']
)
]
output = predictor_1.validate_contributions(contributions)
assert len(expected_output) == len(output)
test_list = [pd.testing.assert_frame_equal(e, m) for e, m in zip(expected_output, output)]
assert all(x is None for x in test_list)
@patch('shapash.explainer.smart_predictor.SmartPredictor.choose_state')
def test_check_contributions(self, choose_state):
"""
Unit test check_shape_contributions 1
"""
choose_state.return_value = MultiDecorator(SmartState())
shap_values = self.clf_2.get_feature_importance(Pool(self.df_encoded_2), type="ShapValues")
predictor_1 = self.predictor_2
predictor_1.data = {"x": None, "ypred": None, "contributions": None, "x_preprocessed": None}
predictor_1.data["x"] = self.df_2[["x1", "x2"]]
predictor_1.data["x_preprocessed"] = self.df_2[["x1", "x2"]]
predictor_1.data["ypred"] = self.df_2["y"]
adapt_contrib = [np.array([[-0.04395604, 0.13186813],
[-0.04395604, 0.13186813],
[-0.0021978, 0.01318681],
[-0.0021978, 0.01318681],
[-0.04395604, 0.13186813]]),
np.array([[0.04395604, -0.13186813],
[0.04395604, -0.13186813],
[0.0021978, -0.01318681],
[0.0021978, -0.01318681],
[0.04395604, -0.13186813]])]
contributions = list()
for element in adapt_contrib:
contributions.append(pd.DataFrame(element, columns=["x1", "x2"]))
predictor_1.state = predictor_1.choose_state(adapt_contrib)
predictor_1.check_contributions(contributions)
with self.assertRaises(ValueError):
predictor_1.check_contributions(shap_values[:, :-1])
def test_check_model_1(self):
"""
Unit test check model 1
"""
predictor_1 = self.predictor_1
model = lambda: None
model.n_features_in_ = 2
model.predict = types.MethodType(self.predict, model)
predictor_1.model = model
_case, _classes = predictor_1.check_model()
assert _case == 'regression'
assert _classes is None
def test_check_model_2(self):
"""
Unit test check model 2
"""
predictor_1 = self.predictor_1
model = lambda: None
model._classes = np.array([1, 2])
model.n_features_in_ = 2
model.predict = types.MethodType(self.predict, model)
model.predict_proba = types.MethodType(self.predict_proba, model)
predictor_1.model = model
_case, _classes = predictor_1.check_model()
assert _case == 'classification'
self.assertListEqual(_classes, [1, 2])
@patch('shapash.explainer.smart_predictor.SmartPredictor.check_model')
@patch('shapash.explainer.smart_predictor.SmartPredictor.check_explainer')
@patch('shapash.utils.check.check_preprocessing_options')
@patch('shapash.utils.check.check_consistency_model_features')
@patch('shapash.utils.check.check_consistency_model_label')
def test_check_preprocessing_1(self, check_consistency_model_label,
check_consistency_model_features,
check_preprocessing_options,
check_explainer,
check_model):
"""
Test check preprocessing on multiple preprocessing
"""
train = pd.DataFrame({'Onehot1': ['A', 'B', 'A', 'B'], 'Onehot2': ['C', 'D', 'C', 'D'],
'Binary1': ['E', 'F', 'E', 'F'], 'Binary2': ['G', 'H', 'G', 'H'],
'Ordinal1': ['I', 'J', 'I', 'J'], 'Ordinal2': ['K', 'L', 'K', 'L'],
'BaseN1': ['M', 'N', 'M', 'N'], 'BaseN2': ['O', 'P', 'O', 'P'],
'Target1': ['Q', 'R', 'Q', 'R'], 'Target2': ['S', 'T', 'S', 'T'],
'other': ['other', np.nan, 'other', 'other']})
features_dict = None
columns_dict = {i:features for i,features in enumerate(train.columns)}
features_types = {features: str(train[features].dtypes) for features in train.columns}
label_dict = None
enc_ordinal_all = ce.OrdinalEncoder(cols=['Onehot1', 'Onehot2', 'Binary1', 'Binary2', 'Ordinal1', 'Ordinal2',
'BaseN1', 'BaseN2', 'Target1', 'Target2', 'other']).fit(train)
train_ordinal_all = enc_ordinal_all.transform(train)
y = pd.DataFrame({'y_class': [0, 0, 0, 1]})
model = cb.CatBoostClassifier(n_estimators=1).fit(train_ordinal_all, y)
clf_explainer = shap.TreeExplainer(model)
check_preprocessing_options.return_value = True
check_consistency_model_features.return_value = True
check_consistency_model_label.return_value = True
check_explainer.return_value = clf_explainer
check_model.return_value = "classification", [0, 1]
predictor_1 = SmartPredictor(features_dict, model,
columns_dict, clf_explainer, features_types, label_dict)
y = pd.DataFrame(data=[0, 1, 0, 0], columns=['y'])
enc_onehot = ce.OneHotEncoder(cols=['Onehot1', 'Onehot2']).fit(train)
train_onehot = enc_onehot.transform(train)
enc_binary = ce.BinaryEncoder(cols=['Binary1', 'Binary2']).fit(train_onehot)
train_binary = enc_binary.transform(train_onehot)
enc_ordinal = ce.OrdinalEncoder(cols=['Ordinal1', 'Ordinal2']).fit(train_binary)
train_ordinal = enc_ordinal.transform(train_binary)
enc_basen = ce.BaseNEncoder(cols=['BaseN1', 'BaseN2']).fit(train_ordinal)
train_basen = enc_basen.transform(train_ordinal)
enc_target = ce.TargetEncoder(cols=['Target1', 'Target2']).fit(train_basen, y)
input_dict1 = dict()
input_dict1['col'] = 'Onehot2'
input_dict1['mapping'] = pd.Series(data=['C', 'D', np.nan], index=['C', 'D', 'missing'])
input_dict1['data_type'] = 'object'
input_dict2 = dict()
input_dict2['col'] = 'Binary2'
input_dict2['mapping'] = pd.Series(data=['G', 'H', np.nan], index=['G', 'H', 'missing'])
input_dict2['data_type'] = 'object'
input_dict = dict()
input_dict['col'] = 'state'
input_dict['mapping'] = pd.Series(data=['US', 'FR-1', 'FR-2'], index=['US', 'FR', 'FR'])
input_dict['data_type'] = 'object'
input_dict3 = dict()
input_dict3['col'] = 'Ordinal2'
input_dict3['mapping'] = pd.Series(data=['K', 'L', np.nan], index=['K', 'L', 'missing'])
input_dict3['data_type'] = 'object'
list_dict = [input_dict2, input_dict3]
y = pd.DataFrame(data=[0, 1], columns=['y'])
train = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR'],
'other': ['A', 'B']})
enc = ColumnTransformer(
transformers=[
('onehot', skp.OneHotEncoder(), ['city', 'state'])
],
remainder='drop')
enc.fit(train, y)
wrong_prepro = skp.OneHotEncoder().fit(train, y)
predictor_1.preprocessing = [enc_onehot, enc_binary, enc_ordinal, enc_basen, enc_target, input_dict1,
list_dict]
predictor_1.check_preprocessing()
for preprocessing in [enc_onehot, enc_binary, enc_ordinal, enc_basen, enc_target]:
predictor_1.preprocessing = preprocessing
predictor_1.check_preprocessing()
predictor_1.preprocessing = input_dict2
predictor_1.check_preprocessing()
predictor_1.preprocessing = enc
predictor_1.check_preprocessing()
predictor_1.preprocessing = None
predictor_1.check_preprocessing()
with self.assertRaises(Exception):
predictor_1.preprocessing = wrong_prepro
predictor_1.check_preprocessing()
def test_check_label_dict_1(self):
"""
Unit test check label dict 1
"""
predictor_1 = self.predictor_1
predictor_1.check_label_dict()
def test_check_label_dict_2(self):
"""
Unit test check label dict 2
"""
predictor_1 = self.predictor_1
predictor_1.label_dict = None
predictor_1._case = 'regression'
predictor_1.check_label_dict()
@patch('shapash.explainer.smart_predictor.SmartPredictor.check_model')
@patch('shapash.explainer.smart_predictor.SmartPredictor.check_explainer')
@patch('shapash.utils.check.check_preprocessing_options')
@patch('shapash.utils.check.check_consistency_model_features')
@patch('shapash.utils.check.check_consistency_model_label')
def test_check_mask_params(self, check_consistency_model_label,
check_consistency_model_features,
check_preprocessing_options,
check_explainer,
check_model):
"""
Unit test check mask params
"""
train = pd.DataFrame({'Onehot1': ['A', 'B', 'A', 'B'], 'Onehot2': ['C', 'D', 'C', 'D'],
'Binary1': ['E', 'F', 'E', 'F'], 'Binary2': ['G', 'H', 'G', 'H'],
'Ordinal1': ['I', 'J', 'I', 'J'], 'Ordinal2': ['K', 'L', 'K', 'L'],
'BaseN1': ['M', 'N', 'M', 'N'], 'BaseN2': ['O', 'P', 'O', 'P'],
'Target1': ['Q', 'R', 'Q', 'R'], 'Target2': ['S', 'T', 'S', 'T'],
'other': ['other', np.nan, 'other', 'other']})
features_dict = None
columns_dict = {i: features for i, features in enumerate(train.columns)}
features_types = {features: str(train[features].dtypes) for features in train.columns}
label_dict = None
enc_ordinal = ce.OrdinalEncoder(cols=['Onehot1', 'Onehot2', 'Binary1', 'Binary2', 'Ordinal1', 'Ordinal2',
'BaseN1', 'BaseN2', 'Target1', 'Target2', 'other']).fit(train)
train_ordinal = enc_ordinal.transform(train)
y = pd.DataFrame({'y_class': [0, 0, 0, 1]})
model = cb.CatBoostClassifier(n_estimators=1).fit(train_ordinal, y)
clf_explainer = shap.TreeExplainer(model)
check_preprocessing_options.return_value = True
check_consistency_model_features.return_value = True
check_consistency_model_label.return_value = True
check_explainer.return_value = clf_explainer
check_model.return_value = "classification", [0, 1]
wrong_mask_params_1 = list()
wrong_mask_params_2 = None
wrong_mask_params_3 = {
"features_to_hide": None,
"threshold": None,
"positive": None
}
wright_mask_params = {
"features_to_hide": None,
"threshold": None,
"positive": True,
"max_contrib": 5
}
with self.assertRaises(ValueError):
predictor_1 = SmartPredictor(features_dict, model,
columns_dict, clf_explainer, features_types, label_dict,
mask_params=wrong_mask_params_1)
predictor_1 = SmartPredictor(features_dict, model,
columns_dict, clf_explainer, features_types, label_dict,
mask_params=wrong_mask_params_2)
predictor_1 = SmartPredictor(features_dict, model,
columns_dict, clf_explainer, features_types, label_dict,
mask_params=wrong_mask_params_3)
predictor_1 = SmartPredictor(features_dict, model,
columns_dict, clf_explainer, features_types, label_dict,
mask_params=wright_mask_params)
def test_check_ypred_1(self):
"""
Unit test check y pred
"""
predictor_1 = self.predictor_1
predictor_1.data = {"x": None, "ypred": None, "contributions": None}
predictor_1.data["x"] = self.df_1[["x1","x2"]]
y_pred = None
predictor_1.check_ypred(ypred=y_pred)
def test_check_ypred_2(self):
"""
Unit test check y pred 2
"""
y_pred = pd.DataFrame(
data=np.array(['1', 0, 0, 1, 0]),
columns=['Y']
)
predictor_1 = self.predictor_1
predictor_1.data = {"x": None, "ypred": None, "contributions": None}
predictor_1.data["x"] = self.df_1
with self.assertRaises(ValueError):
predictor_1.check_ypred(y_pred)
def test_check_ypred_3(self):
"""
Unit test check y pred 3
"""
predictor_1 = self.predictor_1
predictor_1.data = {"x": None, "ypred": None, "contributions": None}
predictor_1.data["x"] = self.df_1[["x1","x2"]]
y_pred = pd.DataFrame(
data=np.array([0]),
columns=['Y']
)
with self.assertRaises(ValueError):
predictor_1.check_ypred(y_pred)
def test_check_y_pred_4(self):
"""
Unit test check y pred 4
"""
predictor_1 = self.predictor_1
predictor_1.data = {"x": None, "ypred": None, "contributions": None}
y_pred = [0, 1, 0, 1, 0]
with self.assertRaises(ValueError):
predictor_1.check_ypred(ypred=y_pred)
def test_check_ypred_5(self):
"""
Unit test check y pred 5
"""
predictor_1 = self.predictor_1
predictor_1.data = {"x": None, "ypred": None, "contributions": None}
predictor_1.data["x"] = self.df_1[["x1","x2"]]
y_pred = pd.Series(
data=np.array(['0'])
)
with self.assertRaises(ValueError):
predictor_1.check_ypred(y_pred)
def test_predict_proba_1(self):
"""
Unit test of predict_proba method.
"""
predictor_1 = self.predictor_1
clf = cb.CatBoostRegressor(n_estimators=1).fit(self.df_encoded_1[['x1', 'x2']], self.df_encoded_1['y'])
clf_explainer = shap.TreeExplainer(clf)
predictor_1.model = clf
predictor_1.explainer = clf_explainer
predictor_1._case = "regression"
predictor_1._classes = None
with self.assertRaises(AttributeError):
predictor_1.predict_proba()
predictor_1 = self.predictor_1
with self.assertRaises(AttributeError):
predictor_1.predict_proba()
predictor_1.data = {"x": None, "ypred": None, "contributions": None}
with self.assertRaises(KeyError):
predictor_1.predict_proba()
def test_predict_proba_2(self):
"""
Unit test of predict_proba method.
"""
clf = cb.CatBoostClassifier(n_estimators=1).fit(self.df_2[['x1', 'x2']], self.df_2['y'])
predictor_1 = self.predictor_2
predictor_1.model = clf
predictor_1.explainer = shap.TreeExplainer(clf)
predictor_1.preprocessing = None
predictor_1.data = {"x": None, "ypred": None, "contributions": None, "x_preprocessed":None}
predictor_1.data["x"] = self.df_2[["x1", "x2"]]
predictor_1.data["x_preprocessed"] = self.df_2[["x1", "x2"]]
prediction = predictor_1.predict_proba()
assert prediction.shape[0] == predictor_1.data["x"].shape[0]
predictor_1.data["ypred"] = pd.DataFrame(self.df_2["y"])
prediction = predictor_1.predict_proba()
assert prediction.shape[0] == predictor_1.data["x"].shape[0]
def test_detail_contributions_1(self):
"""
Unit test of detail_contributions method.
"""
predictor_1 = self.predictor_1
with self.assertRaises(ValueError):
predictor_1.detail_contributions()
predictor_1.data = {"x": None, "ypred": None, "contributions": None}
with self.assertRaises(ValueError):
predictor_1.detail_contributions()
predictor_1.data["x_preprocessed"] = self.df_1[["x1", "x2"]]
with self.assertRaises(ValueError):
predictor_1.detail_contributions()
def test_detail_contributions_2(self):
"""
Unit test 2 of detail_contributions method.
"""
clf = cb.CatBoostRegressor(n_estimators=1).fit(self.df_2[['x1', 'x2']], self.df_2['y'])
predictor_1 = self.predictor_2
predictor_1.model = clf
predictor_1.explainer = shap.TreeExplainer(clf)
predictor_1.preprocessing = None
predictor_1._case = "regression"
predictor_1._classes = None
predictor_1.data = {"x": None, "ypred": None, "contributions": None, "x_preprocessed": None}
predictor_1.data["x"] = self.df_2[["x1", "x2"]]
predictor_1.data["x_preprocessed"] = self.df_2[["x1", "x2"]]
predictor_1.data["ypred_init"] = pd.DataFrame(self.df_2["y"])
contributions = predictor_1.detail_contributions()
assert contributions.shape[0] == predictor_1.data["x"].shape[0]
assert all(contributions.index == predictor_1.data["x"].index)
assert contributions.shape[1] == predictor_1.data["x"].shape[1] + 1
clf = cb.CatBoostClassifier(n_estimators=1).fit(self.df_2[['x1', 'x2']], self.df_2['y'])
clf_explainer = shap.TreeExplainer(clf)
predictor_1 = self.predictor_2
predictor_1.preprocessing = None
predictor_1.model = clf
predictor_1.explainer = clf_explainer
predictor_1._case = "classification"
predictor_1._classes = [0, 1]
false_y = pd.DataFrame({"y_false": [2, 2, 1, 1, 1]})
predictor_1.data = {"x": None, "ypred": None, "contributions": None}
predictor_1.data["x_preprocessed"] = self.df_2[["x1", "x2"]]
predictor_1.data["x"] = self.df_2[["x1", "x2"]]
predictor_1.data["ypred_init"] = false_y
with self.assertRaises(ValueError):
predictor_1.detail_contributions()
predictor_1.data["ypred_init"] = pd.DataFrame(self.df_2["y"])
contributions = predictor_1.detail_contributions()
assert contributions.shape[0] == predictor_1.data["x"].shape[0]
assert all(contributions.index == predictor_1.data["x"].index)
assert contributions.shape[1] == predictor_1.data["x"].shape[1] + 2
def test_save_1(self):
"""
Unit test save 1
"""
pkl_file, predictor = init_sme_to_pickle_test()
predictor.save(pkl_file)
assert path.exists(pkl_file)
os.remove(pkl_file)
@patch('shapash.explainer.smart_predictor.SmartPredictor.check_model')
@patch('shapash.explainer.smart_predictor.SmartPredictor.check_explainer')
@patch('shapash.utils.check.check_preprocessing_options')
@patch('shapash.utils.check.check_consistency_model_features')
@patch('shapash.utils.check.check_consistency_model_label')
def test_apply_preprocessing_1(self,check_consistency_model_label,
check_consistency_model_features,
check_preprocessing_options,
check_explainer,
check_model ):
"""
Unit test for apply preprocessing method
"""
y = pd.DataFrame(data=[0, 1], columns=['y'])
train = pd.DataFrame({'num1': [0, 1],
'num2': [0, 2]})
enc = ColumnTransformer(transformers=[('power', skp.QuantileTransformer(n_quantiles=2), ['num1', 'num2'])],
remainder='passthrough')
enc.fit(train, y)
train_preprocessed = pd.DataFrame(enc.transform(train), index=train.index)
clf = cb.CatBoostClassifier(n_estimators=1).fit(train_preprocessed, y)
features_types = {features: str(train[features].dtypes) for features in train.columns}
clf_explainer = shap.TreeExplainer(clf)
columns_dict = {0: "num1", 1: "num2"}
label_dict = {0: "Yes", 1: "No"}
features_dict = {"num1": "city", "num2": "state"}
check_preprocessing_options.return_value = True
check_consistency_model_features.return_value = True
check_consistency_model_label.return_value = True
check_explainer.return_value = clf_explainer
check_model.return_value = "classification", [0, 1]
predictor_1 = SmartPredictor(features_dict, clf,
columns_dict, clf_explainer,
features_types, label_dict, enc)
predictor_1.data = {"x":None}
predictor_1.data["x"] = train
predictor_1.data["x_preprocessed"] = predictor_1.apply_preprocessing()
output_preprocessed = predictor_1.data["x_preprocessed"]
assert output_preprocessed.shape == train_preprocessed.shape
assert [column in clf.feature_names_ for column in output_preprocessed.columns]
assert all(train.index == output_preprocessed.index)
assert all([str(type_result) == str(train_preprocessed.dtypes[index])
for index, type_result in enumerate(output_preprocessed.dtypes)])
def test_summarize_1(self):
"""
Unit test 1 summarize method
"""
clf = cb.CatBoostRegressor(n_estimators=1).fit(self.df_3[['x1', 'x2']], self.df_3['y'])
clf_explainer = shap.TreeExplainer(clf)
predictor_1 = self.predictor_3
predictor_1.model = clf
predictor_1.explainer = clf_explainer
predictor_1.data = {"x": None,
"x_preprocessed": None,
"x_postprocessed": None,
"ypred": None,
"contributions": None}
predictor_1.data["x"] = self.df_3[['x1', 'x2']]
predictor_1.data["x_postprocessed"] = self.df_3[['x1', 'x2']]
predictor_1.data["x_preprocessed"] = self.df_3[['x1', 'x2']]
predictor_1.data["ypred"] = self.df_3["y"]
contribution = pd.DataFrame([[0.0, 0.094286],
[0.0, -0.023571],
[0.0, -0.023571],
[0.0, -0.023571],
[0.0, -0.023571]], columns=["x1", "x2"])
predictor_1.data["contributions"] = contribution
output = predictor_1.summarize()
print(output)
expected_output = pd.DataFrame({
"y": [1, 1, 0, 0, 0],
"feature_1": ["weight", "weight", "weight", "weight", "weight"],
"value_1": ["90", "78", "84", "85", "53"],
"contribution_1": ["0.0942857", "-0.0235714", "-0.0235714", "-0.0235714", "-0.0235714"],
"feature_2": ["age", "age", "age", "age", "age"],
"value_2": ["25", "39", "50", "43", "67"],
"contribution_2": ["0", "0", "0", "0", "0"]
}, dtype=object)
expected_output["y"] = expected_output["y"].astype(int)
feature_expected = [column for column in expected_output.columns if column.startswith("feature_")]
feature_output = [column for column in output.columns if column.startswith("feature_")]
value_expected = [column for column in expected_output.columns if column.startswith("value_")]
value_output = [column for column in output.columns if column.startswith("value_")]
contribution_expected = [column for column in expected_output.columns if column.startswith("contribution_")]
contribution_output = [column for column in output.columns if column.startswith("contribution_")]
assert expected_output.shape == output.shape
assert len(feature_expected) == len(feature_output)
assert len(value_expected) == len(value_output)
assert len(contribution_expected) == len(contribution_output)
def test_summarize_2(self):
"""
Unit test 2 summarize method
"""
predictor_1 = self.predictor_3
predictor_1._case = "classification"
predictor_1._classes = [0, 1]
clf = cb.CatBoostClassifier(n_estimators=1).fit(self.df_3[['x1', 'x2']], self.df_3['y'])
clf_explainer = shap.TreeExplainer(clf)
predictor_1.model = clf
predictor_1.explainer = clf_explainer
with self.assertRaises(ValueError):
predictor_1.summarize()
predictor_1.data = {"x": None,
"x_preprocessed": None,
"x_postprocessed": None,
"ypred": None,
"contributions": None}
predictor_1.data["x"] = self.df_3[["x1", "x2"]]
predictor_1.data["x_preprocessed"] = self.df_3[["x1", "x2"]]
predictor_1.data["x_postprocessed"] = self.df_3[["x1", "x2"]]
predictor_1.data["ypred"] = pd.DataFrame(
{"y": ["Yes", "Yes", "No", "No", "No"],
"proba": [0.519221, 0.468791, 0.531209, 0.531209, 0.531209]}
)
predictor_1.data["contributions"] = pd.DataFrame(
{"x1": [0, 0, -0, -0, -0],
"x2": [0.161538, -0.0403846, 0.0403846, 0.0403846, 0.0403846]}
)
output = predictor_1.summarize()
expected_output = pd.DataFrame({
"y": ["Yes", "Yes", "No", "No", "No"],
"proba": [0.519221, 0.468791, 0.531209, 0.531209, 0.531209],
"feature_1": ["weight", "weight", "weight", "weight", "weight"],
"value_1": ["90", "78", "84", "85", "53"],
"contribution_1": ["0.161538", "-0.0403846", "0.0403846", "0.0403846", "0.0403846"],
"feature_2": ["age", "age", "age", "age", "age"],
"value_2": ["25", "39", "50", "43", "67"],
"contribution_2": ["0", "0", "0", "0", "0"]
}, dtype=object)
expected_output["proba"] = expected_output["proba"].astype(float)
feature_expected = [column for column in expected_output.columns if column.startswith("feature_")]
feature_output = [column for column in output.columns if column.startswith("feature_")]
value_expected = [column for column in expected_output.columns if column.startswith("value_")]
value_output = [column for column in output.columns if column.startswith("value_")]
contribution_expected = [column for column in expected_output.columns if column.startswith("contribution_")]
contribution_output = [column for column in output.columns if column.startswith("contribution_")]
assert expected_output.shape == output.shape
assert len(feature_expected) == len(feature_output)
assert len(value_expected) == len(value_output)
assert len(contribution_expected) == len(contribution_output)
assert all(output.columns == expected_output.columns)
def test_summarize_3(self):
"""
Unit test 3 summarize method
"""
predictor_1 = self.predictor_3
predictor_1.mask_params = {"features_to_hide": None,
"threshold": None,
"positive": None,
"max_contrib": 1
}
predictor_1.data = {"x": None,
"x_preprocessed": None,
"x_postprocessed": None,
"ypred": None,
"contributions": None}
predictor_1.data["x"] = self.df_3[["x1", "x2"]]
predictor_1.data["x_preprocessed"] = self.df_3[["x1", "x2"]]
predictor_1.data["x_postprocessed"] = self.df_3[["x1", "x2"]]
predictor_1.data["ypred"] = pd.DataFrame(
{"y": ["Yes", "Yes", "No", "No", "No"],
"proba": [0.519221, 0.468791, 0.531209, 0.531209, 0.531209]}
)
predictor_1.data["contributions"] = pd.DataFrame(
{"x1": [0, 0, -0, -0, -0],
"x2": [0.161538, -0.0403846, 0.0403846, 0.0403846, 0.0403846]}
)
output = predictor_1.summarize()
expected_output = pd.DataFrame({
"y": ["Yes", "Yes", "No", "No", "No"],
"proba": [0.519221, 0.468791, 0.531209, 0.531209, 0.531209],
"feature_1": ["weight", "weight", "weight", "weight", "weight"],
"value_1": ["90", "78", "84", "85", "53"],
"contribution_1": ["0.161538", "-0.0403846", "0.0403846", "0.0403846", "0.0403846"],
"feature_2": ["age", "age", "age", "age", "age"],
"value_2": ["25", "39", "50", "43", "67"],
"contribution_2": ["0", "0", "0", "0", "0"]
}, dtype=object)
expected_output["proba"] = expected_output["proba"].astype(float)
feature_expected = [column for column in expected_output.columns if column.startswith("feature_")]
feature_output = [column for column in output.columns if column.startswith("feature_")]
value_expected = [column for column in expected_output.columns if column.startswith("value_")]
value_output = [column for column in output.columns if column.startswith("value_")]
contribution_expected = [column for column in expected_output.columns if column.startswith("contribution_")]
contribution_output = [column for column in output.columns if column.startswith("contribution_")]
assert not expected_output.shape == output.shape
assert not len(feature_expected) == len(feature_output)
assert not len(value_expected) == len(value_output)
assert not len(contribution_expected) == len(contribution_output)
assert not len(output.columns) == len(expected_output.columns)
predictor_1.mask_params = {"features_to_hide": None,
"threshold": None,
"positive": None,
"max_contrib": None}
def test_modfiy_mask(self):
"""
Unit test modify_mask method
"""
predictor_1 = self.predictor_2
assert all([value is None for value in predictor_1.mask_params.values()])
predictor_1.modify_mask(max_contrib=1)
assert not all([value is None for value in predictor_1.mask_params.values()])
assert predictor_1.mask_params["max_contrib"] == 1
assert predictor_1.mask_params["positive"] == None
predictor_1.modify_mask(max_contrib=2)
def test_apply_postprocessing_1(self):
"""
Unit test apply_postprocessing 1
"""
predictor_1 = self.predictor_3
predictor_1.data = {"x": None, "ypred": None, "contributions": None}
predictor_1.data["x"] = pd.DataFrame(
[[1, 2],
[3, 4]],
columns=['Col1', 'Col2'],
index=['Id1', 'Id2']
)
assert np.array_equal(predictor_1.data["x"], predictor_1.apply_postprocessing())
def test_apply_postprocessing_2(self):
"""
Unit test apply_postprocessing 2
"""
postprocessing = {'x1': {'type': 'suffix', 'rule': ' t'},
'x2': {'type': 'prefix', 'rule': 'test'}}
predictor_1 = self.predictor_3
predictor_1.postprocessing = postprocessing
predictor_1.data = {"x": None, "ypred": None, "contributions": None}
predictor_1.data["x"] = pd.DataFrame(
[[1, 2],
[3, 4]],
columns=['x1', 'x2'],
index=['Id1', 'Id2']
)
expected_output = pd.DataFrame(
data=[['1 t', 'test2'],
['3 t', 'test4']],
columns=['x1', 'x2'],
index=['Id1', 'Id2']
)
output = predictor_1.apply_postprocessing()
assert np.array_equal(output, expected_output)
def test_convert_dict_dataset(self):
"""
Unit test convert_dict_dataset
"""
predictor_1 = self.predictor_1
x = predictor_1.convert_dict_dataset(x={"x1": 1, "x2": "M"})
assert all([str(x[feature].dtypes) == predictor_1.features_types[feature]
for feature in predictor_1.features_types.keys()])
with self.assertRaises(ValueError):
predictor_1.convert_dict_dataset(x={"x1":"M", "x2":"M"})
predictor_1.convert_dict_dataset(x={"x1": 1, "x2": "M", "x3": "M"})
@patch('shapash.explainer.smart_predictor.SmartPredictor.convert_dict_dataset')
def test_check_dataset_type(self, convert_dict_dataset):
"""
Unit test check_dataset_type
"""
convert_dict_dataset.return_value = pd.DataFrame({"x1": [1], "x2": ["M"]})
predictor_1 = self.predictor_1
with self.assertRaises(ValueError):
predictor_1.check_dataset_type(x=1)
predictor_1.check_dataset_type(x=["x1", "x2"])
predictor_1.check_dataset_type(x=("x1", "x2"))
predictor_1.check_dataset_type(x=pd.DataFrame({"x1": [1], "x2": ["M"]}))
predictor_1.check_dataset_type(x={"x1": 1, "x2": "M"})
def test_check_dataset_features(self):
"""
Unit test check_dataset_features
"""
predictor_1 = self.predictor_1
with self.assertRaises(AssertionError):
predictor_1.check_dataset_features(x=pd.DataFrame({"x1": [1], "x2": ["M"], "x3": ["M"]}))
with self.assertRaises(ValueError):
predictor_1.check_dataset_features(x=pd.DataFrame({"x1": [1], "x2": [1]}))
predictor_1.check_dataset_features(x=pd.DataFrame({"x1": ["M"], "x2": ["M"]}))
x = predictor_1.check_dataset_features(x=pd.DataFrame({"x2": ["M"], "x1": [1]}))
assert all([str(x[feature].dtypes) == predictor_1.features_types[feature]
for feature in predictor_1.features_types.keys()])
features_order = []
for order in range(min(predictor_1.columns_dict.keys()), max(predictor_1.columns_dict.keys()) + 1):
features_order.append(predictor_1.columns_dict[order])
assert all(x.columns == features_order)
predictor_1.check_dataset_features(x=pd.DataFrame({"x1": [1], "x2": ["M"]}))
```
#### File: unit_tests/utils/test_check.py
```python
import unittest
import pandas as pd
import numpy as np
import category_encoders as ce
from shapash.utils.check import check_preprocessing, check_model, check_label_dict,\
check_mask_params, check_ypred, check_contribution_object,\
check_preprocessing_options, check_postprocessing, \
check_consistency_model_features, check_consistency_model_label
from sklearn.compose import ColumnTransformer
import sklearn.preprocessing as skp
import types
import sklearn.ensemble as ske
import sklearn.svm as svm
import sklearn.linear_model as skl
import xgboost as xgb
import lightgbm as lgb
import catboost as cb
class TestCheck(unittest.TestCase):
def setUp(self):
self.modellist = [
lgb.LGBMRegressor(n_estimators=1), lgb.LGBMClassifier(n_estimators=1),
xgb.XGBRegressor(n_estimators=1), xgb.XGBRegressor(n_estimators=1),
cb.CatBoostRegressor(n_estimators=1), cb.CatBoostClassifier(n_estimators=1),
ske.GradientBoostingRegressor(n_estimators=1), ske.GradientBoostingClassifier(n_estimators=1),
ske.ExtraTreesRegressor(n_estimators=1), ske.ExtraTreesClassifier(n_estimators=1),
ske.RandomForestRegressor(n_estimators=1), ske.RandomForestClassifier(n_estimators=1),
skl.LogisticRegression(), skl.LinearRegression(),
svm.SVR(kernel='linear'), svm.SVC(kernel='linear')
]
def test_check_preprocessing_1(self):
"""
Test check preprocessing on multiple preprocessing
"""
train = pd.DataFrame({'Onehot1': ['A', 'B', 'A', 'B'], 'Onehot2': ['C', 'D', 'C', 'D'],
'Binary1': ['E', 'F', 'E', 'F'], 'Binary2': ['G', 'H', 'G', 'H'],
'Ordinal1': ['I', 'J', 'I', 'J'], 'Ordinal2': ['K', 'L', 'K', 'L'],
'BaseN1': ['M', 'N', 'M', 'N'], 'BaseN2': ['O', 'P', 'O', 'P'],
'Target1': ['Q', 'R', 'Q', 'R'], 'Target2': ['S', 'T', 'S', 'T'],
'other': ['other', np.nan, 'other', 'other']})
y = pd.DataFrame(data=[0, 1, 0, 0], columns=['y'])
enc_onehot = ce.OneHotEncoder(cols=['Onehot1', 'Onehot2']).fit(train)
train_onehot = enc_onehot.transform(train)
enc_binary = ce.BinaryEncoder(cols=['Binary1', 'Binary2']).fit(train_onehot)
train_binary = enc_binary.transform(train_onehot)
enc_ordinal = ce.OrdinalEncoder(cols=['Ordinal1', 'Ordinal2']).fit(train_binary)
train_ordinal = enc_ordinal.transform(train_binary)
enc_basen = ce.BaseNEncoder(cols=['BaseN1', 'BaseN2']).fit(train_ordinal)
train_basen = enc_basen.transform(train_ordinal)
enc_target = ce.TargetEncoder(cols=['Target1', 'Target2']).fit(train_basen, y)
input_dict1 = dict()
input_dict1['col'] = 'Onehot2'
input_dict1['mapping'] = pd.Series(data=['C', 'D', np.nan], index=['C', 'D', 'missing'])
input_dict1['data_type'] = 'object'
input_dict2 = dict()
input_dict2['col'] = 'Binary2'
input_dict2['mapping'] = pd.Series(data=['G', 'H', np.nan], index=['G', 'H', 'missing'])
input_dict2['data_type'] = 'object'
input_dict = dict()
input_dict['col'] = 'state'
input_dict['mapping'] = pd.Series(data=['US', 'FR-1', 'FR-2'], index=['US', 'FR', 'FR'])
input_dict['data_type'] = 'object'
input_dict3 = dict()
input_dict3['col'] = 'Ordinal2'
input_dict3['mapping'] = pd.Series(data=['K', 'L', np.nan], index=['K', 'L', 'missing'])
input_dict3['data_type'] = 'object'
list_dict = [input_dict2, input_dict3]
y = pd.DataFrame(data=[0, 1], columns=['y'])
train = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR'],
'other': ['A', 'B']})
enc = ColumnTransformer(
transformers=[
('onehot', skp.OneHotEncoder(), ['city', 'state'])
],
remainder='drop')
enc.fit(train, y)
wrong_prepro = skp.OneHotEncoder().fit(train, y)
check_preprocessing([enc_onehot, enc_binary, enc_ordinal, enc_basen, enc_target, input_dict1,
list_dict])
for preprocessing in [enc_onehot, enc_binary, enc_ordinal, enc_basen, enc_target]:
check_preprocessing(preprocessing)
check_preprocessing(input_dict2)
check_preprocessing(enc)
check_preprocessing(None)
with self.assertRaises(Exception):
check_preprocessing(wrong_prepro)
def test_check_model_1(self):
"""
Unit test check model 1
"""
model = lambda: None
model.predict = types.MethodType(self.predict, model)
_case, _classes = check_model(model)
assert _case == 'regression'
assert _classes is None
def predict_proba(self, arg1, arg2):
"""
predict_proba method
"""
matrx = np.array(
[[0.2, 0.8],
[0.3, 0.7],
[0.4, 0.6]]
)
return matrx
def predict(self, arg1, arg2):
"""
predict method
"""
matrx = np.array(
[12, 3, 7]
)
return matrx
def test_check_model_2(self):
"""
Unit test check model 2
"""
model = lambda: None
model._classes = np.array([1, 2])
model.predict = types.MethodType(self.predict, model)
model.predict_proba = types.MethodType(self.predict_proba, model)
_case, _classes = check_model(model)
assert _case == 'classification'
self.assertListEqual(_classes, [1, 2])
def test_check_label_dict_1(self):
"""
Unit test check label dict 1
"""
label_dict={1: 'Yes', 0: 'No'}
_classes = [0, 1]
_case = 'classification'
check_label_dict(label_dict, _case, _classes)
def test_check_label_dict_2(self):
"""
Unit test check label dict 2
"""
label_dict = {}
_case = 'regression'
check_label_dict(label_dict, _case)
def test_check_mask_params(self):
"""
Unit test check mask params
"""
wrong_mask_params_1 = list()
wrong_mask_params_2 = None
wrong_mask_params_3 = {
"features_to_hide": None,
"threshold": None,
"positive": None
}
wright_mask_params = {
"features_to_hide": None,
"threshold": None,
"positive": True,
"max_contrib": 5
}
with self.assertRaises(ValueError):
check_mask_params(wrong_mask_params_1)
check_mask_params(wrong_mask_params_2)
check_mask_params(wrong_mask_params_3)
check_mask_params(wright_mask_params)
def test_check_ypred_1(self):
"""
Unit test check y pred
"""
y_pred = None
check_ypred(ypred=y_pred)
def test_check_ypred_2(self):
"""
Unit test check y pred 2
"""
x_pred = pd.DataFrame(
data=np.array([[1, 2], [3, 4]]),
columns=['Col1', 'Col2']
)
y_pred = pd.DataFrame(
data=np.array(['1', 0]),
columns=['Y']
)
with self.assertRaises(ValueError):
check_ypred(x_pred, y_pred)
def test_check_ypred_3(self):
"""
Unit test check y pred 3
"""
x_pred = pd.DataFrame(
data=np.array([[1, 2], [3, 4]]),
columns=['Col1', 'Col2']
)
y_pred = pd.DataFrame(
data=np.array([0]),
columns=['Y']
)
with self.assertRaises(ValueError):
check_ypred(x_pred, y_pred)
def test_check_y_pred_4(self):
"""
Unit test check y pred 4
"""
y_pred = [0, 1]
with self.assertRaises(ValueError):
check_ypred(ypred=y_pred)
def test_check_y_pred_5(self):
"""
Unit test check y pred 5
"""
x_pred = pd.DataFrame(
data=np.array([[1, 2], [3, 4]]),
columns=['Col1', 'Col2']
)
y_pred = pd.Series(
data=np.array(['0'])
)
with self.assertRaises(ValueError):
check_ypred(x_pred, y_pred)
def test_check_contribution_object_1(self):
"""
Unit test check_contribution_object 1
"""
contributions_1 = [
np.array([[2, 1], [8, 4]]),
np.array([[5, 5], [0, 0]])
]
contributions_2 = np.array([[2, 1], [8, 4]])
model = lambda: None
model._classes = np.array([1, 3])
model.predict = types.MethodType(self.predict, model)
model.predict_proba = types.MethodType(self.predict_proba, model)
_case = "classification"
_classes = list(model._classes)
check_contribution_object(_case, _classes, contributions_1)
assert len(contributions_1) == len(_classes)
assert isinstance(contributions_1, list)
check_contribution_object("regression", None, contributions_2)
assert isinstance(contributions_2, np.ndarray)
with self.assertRaises(ValueError):
check_contribution_object(_case, _classes, contributions_2)
check_mask_params("regression", None, contributions_1)
def test_check_consistency_model_features_1(self):
"""
Test check_consistency_model_features 1
"""
train = pd.DataFrame({'Onehot1': ['A', 'B', 'A', 'B'], 'Onehot2': ['C', 'D', 'C', 'D'],
'Binary1': ['E', 'F', 'E', 'F'], 'Binary2': ['G', 'H', 'G', 'H'],
'Ordinal1': ['I', 'J', 'I', 'J'], 'Ordinal2': ['K', 'L', 'K', 'L'],
'BaseN1': ['M', 'N', 'M', 'N'], 'BaseN2': ['O', 'P', 'O', 'P'],
'Target1': ['Q', 'R', 'Q', 'R'], 'Target2': ['S', 'T', 'S', 'T'],
'other': ['other', np.nan, 'other', 'other']})
features_dict = None
columns_dict = {i:features for i,features in enumerate(train.columns)}
features_types = {features: str(train[features].dtypes) for features in train.columns}
label_dict = None
mask_params = None
enc_ordinal_all = ce.OrdinalEncoder(cols=['Onehot1', 'Onehot2', 'Binary1', 'Binary2', 'Ordinal1', 'Ordinal2',
'BaseN1', 'BaseN2', 'Target1', 'Target2', 'other']).fit(train)
train_ordinal_all = enc_ordinal_all.transform(train)
preprocessing = enc_ordinal_all
y = pd.DataFrame({'y_class': [0, 0, 0, 1]})
model = cb.CatBoostClassifier(n_estimators=1).fit(train_ordinal_all, y)
check_consistency_model_features(features_dict, model, columns_dict,
features_types, mask_params, preprocessing)
def test_check_consistency_model_features_2(self):
"""
Test check_consistency_model_features 2
"""
train = pd.DataFrame({'Onehot1': ['A', 'B', 'A', 'B'], 'Onehot2': ['C', 'D', 'C', 'D'],
'Binary1': ['E', 'F', 'E', 'F'], 'Binary2': ['G', 'H', 'G', 'H'],
'Ordinal1': ['I', 'J', 'I', 'J'], 'Ordinal2': ['K', 'L', 'K', 'L'],
'BaseN1': ['M', 'N', 'M', 'N'], 'BaseN2': ['O', 'P', 'O', 'P'],
'Target1': ['Q', 'R', 'Q', 'R'], 'Target2': ['S', 'T', 'S', 'T'],
'other': ['other', np.nan, 'other', 'other']})
features_dict = None
columns_dict = {i: features for i, features in enumerate(train.columns)}
features_types = {features: str(train[features].dtypes) for features in train.columns}
mask_params = {
"features_to_hide": 'Binary3',
"threshold": None,
"positive": True,
"max_contrib": 5
}
enc_ordinal_all = ce.OrdinalEncoder(cols=['Onehot1', 'Onehot2', 'Binary1', 'Binary2', 'Ordinal1', 'Ordinal2',
'BaseN1', 'BaseN2', 'Target1', 'Target2', 'other']).fit(train)
train_ordinal_all = enc_ordinal_all.transform(train)
preprocessing = enc_ordinal_all
y = pd.DataFrame({'y_class': [0, 0, 0, 1]})
model = cb.CatBoostClassifier(n_estimators=1).fit(train_ordinal_all, y)
with self.assertRaises(ValueError):
check_consistency_model_features(features_dict, model, columns_dict,
features_types, mask_params, preprocessing)
def test_check_preprocessing_options_1(self):
"""
Unit test 1 for check_preprocessing_options
"""
y = pd.DataFrame(data=[0, 1], columns=['y'])
train = pd.DataFrame({'num1': [0, 1],
'num2': [0, 2],
'other': ['A', 'B']})
enc = ColumnTransformer(transformers=[('power', skp.QuantileTransformer(n_quantiles=2), ['num1', 'num2'])],
remainder='drop')
enc.fit(train, y)
with self.assertRaises(ValueError):
check_preprocessing_options(enc)
enc = ColumnTransformer(transformers=[('power', skp.QuantileTransformer(n_quantiles=2), ['num1', 'num2'])],
remainder='passthrough')
enc.fit(train, y)
check_preprocessing_options(enc)
def test_check_consistency_model_features_4(self):
"""
Test check_consistency_model_features 1
"""
train = pd.DataFrame({'Onehot1': ['A', 'B', 'A', 'B'], 'Onehot2': ['C', 'D', 'C', 'D'],
'Binary1': ['E', 'F', 'E', 'F'], 'Binary2': ['G', 'H', 'G', 'H'],
'Ordinal1': ['I', 'J', 'I', 'J'], 'Ordinal2': ['K', 'L', 'K', 'L'],
'BaseN1': ['M', 'N', 'M', 'N'], 'BaseN2': ['O', 'P', 'O', 'P'],
'Target1': ['Q', 'R', 'Q', 'R'], 'Target2': ['S', 'T', 'S', 'T'],
'other': ['other', np.nan, 'other', 'other']})
features_dict = None
columns_dict = {i:features for i,features in enumerate(train.columns)}
features_types = {features: str(train[features].dtypes) for features in train.columns}
label_dict = None
mask_params = None
enc_ordinal_all = ce.OrdinalEncoder(cols=['Onehot1', 'Onehot2', 'Binary1', 'Binary2', 'Ordinal1', 'Ordinal2',
'BaseN1', 'BaseN2', 'Target1', 'Target2', 'other']).fit(train)
train_ordinal_all = enc_ordinal_all.transform(train)
preprocessing = enc_ordinal_all
y = pd.DataFrame({'y_class': [0, 0, 0, 1]})
for model in self.modellist:
print(type(model))
model.fit(train_ordinal_all, y)
check_consistency_model_features(features_dict, model, columns_dict,
features_types, mask_params, preprocessing)
def test_check_consistency_model_label_1(self):
"""
Test check_consistency_model_label 1
"""
columns_dict = {0: "x1", 1: "x2"}
label_dict = {0: "Yes", 1: "No"}
check_consistency_model_label(columns_dict, label_dict)
def test_check_consistency_model_label_2(self):
"""
Test check_consistency_model_label 2
"""
columns_dict = {0: "x1", 1: "x2"}
label_dict = {0: "Yes", 2: "No"}
with self.assertRaises(ValueError):
check_consistency_model_label(columns_dict, label_dict)
def test_check_postprocessing_1(self):
"""
Unit test check_consistency_postprocessing
"""
x = pd.DataFrame(
[[1, 2],
[3, 4]],
columns=['Col1', 'Col2'],
index=['Id1', 'Id2']
)
columns_dict = {0: 'Col1', 1: 'Col2'}
features_types = {features: str(x[features].dtypes) for features in x.columns}
postprocessing1 = {0: {'Error': 'suffix', 'rule': ' t'}}
postprocessing2 = {0: {'type': 'Error', 'rule': ' t'}}
postprocessing3 = {0: {'type': 'suffix', 'Error': ' t'}}
postprocessing4 = {0: {'type': 'suffix', 'rule': ' '}}
postprocessing5 = {0: {'type': 'case', 'rule': 'lower'}}
postprocessing6 = {0: {'type': 'case', 'rule': 'Error'}}
with self.assertRaises(ValueError):
check_postprocessing(features_types, postprocessing1)
check_postprocessing(features_types, postprocessing2)
check_postprocessing(features_types, postprocessing3)
check_postprocessing(features_types, postprocessing4)
check_postprocessing(features_types, postprocessing5)
check_postprocessing(features_types, postprocessing6)
``` |
{
"source": "JohannMG/OM2MC-Python",
"score": 2
} |
#### File: JohannMG/OM2MC-Python/dex-sample.py
```python
import mailchimp
omUser = 'userlogin'
omPass = '<PASSWORD>'
mailchimpList = 'xxxxxxxxxx'
# Edit the merge tags for all Mailchimp additions here:
def mailchimp_merge_tags():
return {
'METER' : 'Opinion Meter'
}
# Edit the location merge tag for Mailchimp additions here:
def GET_Mailchimp_Location_Tag():
return "LOC"
# surveryID : Name to go into above location Merge Tag
Mailchimp_Location_Ids = {
00010 : "Baltimore",
00004 : "Baltimore",
48654 : "Gatlinburg",
54458 : "Gatlinburg",
78954 : "BION",
13248 : "Orlando",
96345 : "Orlando"
}
#other vars to look for AND how to look for it
# <Mailchimp Merge Tag> : {strings, to, find, it}
#case insensitive
def getMailchimpOtherMergeVars():
return {
'ZIPPOSTAL': ['zip', 'zipcode', 'postal'],
}
def get_mailchimp_api():
return mailchimp.Mailchimp('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx-us1')
def getLocationName(id):
try:
name = Mailchimp_Location_Ids[id]
return name
except:
name = ""
return ""
``` |
{
"source": "JohannOberleitner/pdesolver",
"score": 2
} |
#### File: sources/experiments/calc_central_charge2.py
```python
import time
from enum import Enum
import matplotlib.pyplot as plt
from sources.experiments.charges_generators import make_central_charge
from sources.experiments.fdm_helper import plotSurface
from sources.pdesolver.finite_differences_method.FiniteDifferencesSolver_V2 import GridConfiguration, \
ConstantGridValueProvider, FiniteDifferencesMethod4, FunctionGridValueProvider
from sources.pdesolver.finite_differences_method.boundaryconditions import RectangularBoundaryCondition
from sources.pdesolver.finite_differences_method.charge_distribution import ChargeDistribution
from sources.pdesolver.finite_differences_method.geometry import Geometry
from sources.pdesolver.finite_differences_method.rectangle import Rectangle
from sources.pdesolver.formula_parser.lexer import Lexer
from sources.pdesolver.formula_parser.parser import Parser, NumberExpression, UnaryOperatorExpression, \
BinaryOperatorExpression, VariableExpression, FunctionCallExpression, InnerExpression
from sources.pdesolver.formula_parser.visitor import Visitor
from sources.pdesolver.pde.PDE import PDEExpressionType, PDE
def solvePDE(geometry, charges, gridConfig):
g = geometry
boundaryCondition = RectangularBoundaryCondition(geometry)
start = time.time()
fdm = FiniteDifferencesMethod4(g, boundaryCondition, gridConfig, charges)
fdm.solve()
resulting_matrix = fdm.values
duration = time.time() - start
print('Total duration for solving the PDE lasted {0} sec'.format(duration))
return resulting_matrix
# class SimpleExpressionEvaluator(Visitor):
#
# def __init__(self, variables, functions={}):
# self.values = []
# self.variables = variables
# self.functions = functions
# self.result = None
#
# def get_result(self):
# if self.result is None:
# self.result = self.values.pop()
# return self.result
#
# def visit_number(self, number_expr):
# self.values.append(number_expr.get_value())
#
# def visit_function_call(self, function_call_expr):
# parameter_values = []
# for parameter in function_call_expr.get_parameter_expr_list():
# parameter.accept(self)
# parameter_values.append(self.values.pop())
#
# function_name = function_call_expr.get_function_name()
# if function_name in self.functions:
# fn = self.functions[function_name]
# function_result = fn(parameter_values)
# self.values.append(function_result)
# else:
# raise Exception("Function not provided for evaluation:" + function_name)
#
# def visit_variable(self, variable_expr):
# name = variable_expr.get_name()
# if name in self.variables:
# self.values.append(self.variables[name])
# else:
# raise Exception("Variable has no value:"+name)
#
# def visit_child_expression(self, child_expr):
# child_expr.get_child().accept(self)
#
# def visit_binary_operator(self, binary_expr):
# symbol = binary_expr.get_symbol()
#
# binary_expr.get_left_child_expr().accept(self)
# binary_expr.get_right_child_expr().accept(self)
#
# right_value = self.values.pop()
# left_value = self.values.pop()
#
# if symbol == '+':
# self.values.append(left_value + right_value)
# elif symbol == '-':
# self.values.append(left_value - right_value)
# elif symbol == '*':
# self.values.append(left_value * right_value)
# elif symbol == '/':
# self.values.append(left_value / right_value)
# else:
# raise Exception('Unsupported operator symbol:'+symbol)
#
# def visit_unary_operator(self, unary_expr):
# symbol = unary_expr.get_symbol()
# unary_expr.get_child_expr().accept(self)
#
# child_value = self.values.pop()
#
# if symbol == '+':
# self.values.append(child_value)
# elif symbol == '-':
# self.values.append(-child_value)
# else:
# raise Exception('Unsupported operator symbol:' + symbol)
# class ExpressionType(Enum):
# NUMBER = 0,
# COMPLICATED= 1
# class SimpleExpressionOptimizerVisitor(Visitor):
# def __init__(self):
# self.values = []
# self.valueTypes = []
# self.result = None
#
# def get_result(self):
# if self.result is None:
# assert len(self.values) == 1 and len(self.valueTypes) == 1
# valueType = self.valueTypes.pop()
# if valueType == ExpressionType.NUMBER:
# self.result = NumberExpression(self.values.pop())
# else:
# self.result = self.values.pop()
# return self.result
#
# def visit_variable(self, variable_expr):
# self.valueTypes.append(ExpressionType.COMPLICATED)
# self.values.append(variable_expr)
#
# def visit_number(self, number_expr):
# self.valueTypes.append(ExpressionType.NUMBER)
# self.values.append(number_expr.get_value())
#
# def visit_child_expression(self, child_expr):
# child_expr.get_child().accept(self)
#
# def visit_binary_operator(self, binary_expr):
# symbol = binary_expr.get_symbol()
#
# binary_expr.get_left_child_expr().accept(self)
# binary_expr.get_right_child_expr().accept(self)
#
# right_value = self.values.pop()
# right_value_type = self.valueTypes.pop()
# left_value = self.values.pop()
# left_value_type = self.valueTypes.pop()
#
# if left_value_type == ExpressionType.NUMBER and right_value_type == ExpressionType.NUMBER:
# self.values.append(self.calc_binary_operators_on_numbers(left_value, right_value, symbol))
# self.valueTypes.append(ExpressionType.NUMBER)
# elif left_value_type == ExpressionType.COMPLICATED or right_value_type == ExpressionType.COMPLICATED:
# self.values.append(self.calc_binary_operators_on_function_and_number(left_value, left_value_type, right_value, right_value_type, symbol))
# self.valueTypes.append(ExpressionType.COMPLICATED)
# else:
# raise Exception('Unsupported combination:' + symbol)
#
# def calc_binary_operators_on_numbers(self, left_value, right_value, symbol):
# if symbol == '+':
# return left_value + right_value
# elif symbol == '-':
# return left_value - right_value
# elif symbol == '*':
# return left_value * right_value
# elif symbol == '/':
# return left_value / right_value
# else:
# raise Exception('Unsupported operator symbol:' + symbol)
#
# def calc_binary_operators_on_function_and_number(self, first, first_type, second, second_type, symbol):
# if symbol == '*':
# if first_type == ExpressionType.NUMBER and first == 1.0:
# return second
# elif second_type == ExpressionType.NUMBER and second == 1.0:
# return first
# elif first_type == ExpressionType.NUMBER and first == -1.0:
# return UnaryOperatorExpression(second, '-')
# elif second_type == ExpressionType.NUMBER and second == -1.0:
# return UnaryOperatorExpression(first, '-')
# elif symbol == '+':
# if type(second) is UnaryOperatorExpression and second.symbol == '-':
# return BinaryOperatorExpression(first, second.get_child_expr(), '-')
#
# # TODO: + operator
#
# return BinaryOperatorExpression(first, second, symbol)
#
# def visit_function_call(self, function_call_expr):
# self.values.append(function_call_expr)
# self.valueTypes.append(ExpressionType.COMPLICATED)
#
# def visit_unary_operator(self, unary_expr):
# symbol = unary_expr.get_symbol()
# unary_expr.get_child_expr().accept(self)
#
# child_value = self.values.pop()
# child_value_type = self.valueTypes.pop()
#
# if symbol == '+':
# self.values.append(child_value)
# self.valueTypes.append(child_value_type )
# elif symbol == '-':
# # check for duplicated negation:
# if type(child_value) is UnaryOperatorExpression and child_value.symbol =='-':
# self.values.append(child_value.get_child_expr())
# self.valueTypes.append(child_value_type)
# elif child_value_type == ExpressionType.NUMBER:
# self.values.append(-child_value )
# self.valueTypes.append(child_value_type)
# else:
# self.values.append(UnaryOperatorExpression(child_value, '-'))
# self.valueTypes.append(ExpressionType.COMPLICATED)
# else:
# raise Exception('Unsupported operator symbol:' + symbol)
# class Function2DOffsets:
# def __init__(self, parameter_expr_list):
# assert len(parameter_expr_list)==2
# evaluator_x = self.evaluateParameterExpression(parameter_expr_list[0], 'i')
# evaluator_y = self.evaluateParameterExpression(parameter_expr_list[1], 'j')
# self.x = evaluator_x
# self.y = evaluator_y
#
# def evaluateParameterExpression(self, parameter_expr, positional_variable_name):
# variables = {positional_variable_name:0.0}
# evaluator = SimpleExpressionEvaluator(variables)
# parameter_expr.accept(evaluator)
# return evaluator.get_result()
#
# def __eq__(self, another):
# return self.x == another.x and self.y == another.y
#
# def __hash__(self):
# return hash(self.x) * hash(self.y)
#
# def __repr__(self):
# return 'i+{0} j+{1}'.format(self.x, self.y)
#
# def __str__(self):
# return 'i+{0} j+{1}'.format(self.x, self.y)
# class FunctionCall:
# def __init__(self, offset):
# self.offset = offset
# self.prefactorExpr = NumberExpression(1.0)
# self.simpleExpression = True
#
# def get_offset(self):
# return self.offset
#
# def get_prefactor(self):
# return self.prefactorExpr
#
# def negate(self):
# self.prefactorExpr = UnaryOperatorExpression(self.prefactorExpr, '-')
#
# def multiplyPrefactor(self, expr):
# self.prefactorExpr = BinaryOperatorExpression(expr, self.prefactorExpr, '*')
# self.simpleExpression = False
#
# def __str__(self):
# return '({0})u({1})'.format(str(self.prefactorExpr), self.offset)
#
# def __repr__(self):
# return '({0})u({1})'.format(repr(self.prefactorExpr), self.offset)
#
# def add(self, expr):
# self.prefactorExpr = BinaryOperatorExpression(self.prefactorExpr, expr, '+')
#
# def makeFunction(self, functionDict):
# def evaluatorFn(col, row):
# evaluator = SimpleExpressionEvaluator(variables={'i':col, 'j':row}, functions=functionDict)
# self.prefactorExpr.accept(evaluator)
# return evaluator.get_result()
#
# return evaluatorFn
# class FiniteDifferencesVisitor(Visitor):
#
# def __init__(self):
# self.solution_function_name = 'u'
# self.values = []
#
# def get_solution_function_name(self):
# return self.solution_function_name
#
# def set_solution_function_name(self, new_function_name):
# self.solution_function_name = new_function_name
#
# # 1. sammle alle Funktionen mit u(...)
# # in Listen
# # u(...) mit denselben Parametern sind identisch
# # 2. sammle deren Parameterwerte in einem eigenen Objekt
# # 3. und Werte deren Vorfaktoren
# # 4. summiere die Vorfaktoren
# #
# #
# def visit_function_call(self, expr):
# if expr.get_function_name() == self.solution_function_name:
# parameter_list = expr.get_parameter_expr_list()
# # offets_for_function_call are eg: (i,j+1)
# offsets_for_function_call = Function2DOffsets(parameter_list)
# function_call = FunctionCall(offsets_for_function_call)
#
# self.values.append([function_call])
# else:
# self.values.append([expr])
#
# def visit_child_expression(self, child_expr):
# child_expr.get_child().accept(self)
#
# def visit_binary_operator(self, binary_expr):
# symbol = binary_expr.get_symbol()
#
# binary_expr.get_left_child_expr().accept(self)
# binary_expr.get_right_child_expr().accept(self)
#
# right_function_calls = self.values.pop()
# left_function_calls = self.values.pop()
#
# if symbol == '+':
# left_function_calls.extend(right_function_calls)
# self.values.append(left_function_calls)
# elif symbol == '-':
# for fn in right_function_calls:
# fn.negate()
# left_function_calls.extend(right_function_calls)
# self.values.append(left_function_calls)
# elif symbol == '*':
# if len(left_function_calls) == 1:
# for fn in right_function_calls:
# fn.multiplyPrefactor(left_function_calls[0])
# self.values.append(right_function_calls)
# else:
# raise Exception("More than 1 element in left_function_calls")
# else:
# raise Exception("Not supported operator:"+symbol)
#
# def visit_unary_operator(self, unary_expr):
# unary_expr.get_child_expr().accept(self)
# child_function_calls = self.values.pop()
#
# for fn in child_function_calls:
# fn.negate()
#
# self.values.append(child_function_calls)
#
# def combineExpressions(self):
# simplified = {}
# for functionCall in self.values[0]:
# if functionCall.offset in simplified:
# simplified[functionCall.offset].add(functionCall.prefactorExpr)
# else:
# simplified[functionCall.offset] = functionCall
# self.values = list(simplified.values())
#
# def simplifyExpressions(self):
# for functionCall in self.values:
# simplifier = SimpleExpressionOptimizerVisitor()
# functionCall.prefactorExpr.accept(simplifier)
# functionCall.prefactorExpr = simplifier.get_result()
#
#
# def make_grid_config(self, eps):
# gridConfig = GridConfiguration()
# for functionCall in self.values:
# if type(functionCall.prefactorExpr) is NumberExpression:
# gridConfig.add(ConstantGridValueProvider(functionCall.prefactorExpr.value),
# int(functionCall.offset.x), int(functionCall.offset.y))
# else:
# gridConfig.add(FunctionGridValueProvider(functionCall.makeFunction(eps)),
# int(functionCall.offset.x), int(functionCall.offset.y))
# return gridConfig
#
# class Sign(Enum):
# Plus=1,
# Minus=2
# class VectorCalculusExpressionVisitor(Visitor):
#
# def __init__(self, vectorVariableNames, dimension):
# self.vectorVariableNames = vectorVariableNames
# self.dimension = dimension
# self.rewritten_expressions = []
# self.rewritten_expressions_count = []
# self.visitor_i_plus = RewriteFiniteDifferencesVisitor('i', sign=Sign.Plus)
# self.visitor_j_plus = RewriteFiniteDifferencesVisitor('j', sign=Sign.Plus)
# self.visitor_k_plus = RewriteFiniteDifferencesVisitor('k', sign=Sign.Plus)
# self.visitor_i_minus = RewriteFiniteDifferencesVisitor('i', sign=Sign.Minus)
# self.visitor_j_minus = RewriteFiniteDifferencesVisitor('j', sign=Sign.Minus)
# self.visitor_k_minus = RewriteFiniteDifferencesVisitor('k', sign=Sign.Minus)
#
# def get_result(self):
# return self.rewritten_expressions.pop()
#
# def getVectorCoordinateNames(self, vectorVariableName):
# # at time being ignore all variable names
# if self.dimension == 2:
# return ['i', 'j']
# elif self.dimension == 3:
# return ['i', 'j', 'k']
# else:
# raise Exception('Vectors of dimension 1 or dimension > 3 not supported!')
#
# def visit_number(self, number_expr):
# self.rewritten_expressions.append(number_expr)
# self.rewritten_expressions_count.append(1)
#
# def visit_variable(self, variable_expr):
# if variable_expr.get_name() in self.vectorVariableNames:
# for coordinateName in self.getVectorCoordinateNames(variable_expr.get_name()):
# self.rewritten_expressions.append(VariableExpression(coordinateName))
# self.rewritten_expressions_count.append(self.dimension)
# else:
# self.rewritten_expressions.append(variable_expr)
# self.rewritten_expressions_count.append(1)
#
# def visit_binary_operator(self, binary_expr):
# symbol = binary_expr.get_symbol()
#
# binary_expr.get_left_child_expr().accept(self)
# binary_expr.get_right_child_expr().accept(self)
#
# right_expr = self.rewritten_expressions.pop()
# left_expr = self.rewritten_expressions.pop()
#
# self.rewritten_expressions.append(BinaryOperatorExpression(left_expr, right_expr, symbol))
#
# def visit_function_call(self, function_call_expr):
# rewritten_argument_list = []
# for parameter in function_call_expr.get_parameter_expr_list():
# parameter.accept(self)
# count = self.rewritten_expressions_count.pop()
# for i in range(count):
# rewritten_argument_list.insert(0, self.rewritten_expressions.pop())
#
# if function_call_expr.get_function_name() == 'grad':
# self.visit_grad(rewritten_argument_list.pop())
#
# elif function_call_expr.get_function_name() == 'div':
# self.visit_div(rewritten_argument_list.pop())
# elif function_call_expr.get_function_name() == 'rot':
# raise Exception("rot not implemented")
# else:
# new_function_call_expr = FunctionCallExpression(function_call_expr.get_function_name(),rewritten_argument_list )
# self.rewritten_expressions.append(new_function_call_expr)
# self.rewritten_expressions_count.append(1)
#
# def apply_finite_differences(self, operand_expr, visitor_plus, visitor_minus):
# expr = operand_expr
# expr.accept(visitor_plus)
# left_expr = visitor_plus.get_result()
# expr.accept(visitor_minus)
# right_expr = visitor_minus.get_result()
# return BinaryOperatorExpression(left_expr, right_expr, '-')
#
# def visit_div(self, operand_expr):
# # TODO: split on all u functions and combine them
# # TODO: do for eps as well
#
# #expr_coord_i = operand_expr.get_parameter_expr_list()[0]
# expr_coord_i = operand_expr
# expr_i = self.apply_finite_differences(expr_coord_i, self.visitor_i_plus, self.visitor_i_minus)
#
# expr_coord_j = operand_expr
# expr_j = self.apply_finite_differences(expr_coord_j, self.visitor_j_plus, self.visitor_j_minus)
#
#
# # expr_coord_k = operand.get_parameter_expr_list()[2]
#
# expr = BinaryOperatorExpression(expr_i, expr_j, '+')
#
# self.rewritten_expressions.append(expr)
# self.rewritten_expressions_count.append(1)
#
# def visit_grad(self, operand_expr):
#
# expr_i = self.apply_finite_differences(operand_expr, self.visitor_i_plus, self.visitor_i_minus)
# expr_j = self.apply_finite_differences(operand_expr, self.visitor_j_plus, self.visitor_j_minus)
#
# expr = FunctionCallExpression('gradHelper', [expr_i, expr_j])
#
# self.rewritten_expressions.append(expr)
# self.rewritten_expressions_count.append(1)
#
# # u(i,j) -> (u(i+1/2,j) - u(i-1/2,j)) + (u(i,j+1/2) - u(i,j-1/2)
# class RewriteFiniteDifferencesVisitor(Visitor):
#
# def __init__(self, variableName, sign):
# self.variableName = variableName
# self.sign = sign
# self.rewritten_expressions = []
#
# def get_result(self):
# return self.rewritten_expressions.pop()
#
# def visit_variable(self, variable_expr):
# if variable_expr.get_name() == self.variableName:
# if self.sign == Sign.Plus:
# expr = BinaryOperatorExpression(variable_expr,
# BinaryOperatorExpression(NumberExpression(1.0), NumberExpression(2.0), '/'),'+')
# elif self.sign == Sign.Minus:
# expr = BinaryOperatorExpression(variable_expr,
# BinaryOperatorExpression(NumberExpression(1.0), NumberExpression(2.0),
# '/'), '-')
# else:
# raise Exception("Invalid Sign")
# self.rewritten_expressions.append(expr)
# else:
# self.rewritten_expressions.append(variable_expr)
#
# def visit_function_call(self, function_call_expr):
# rewritten_argument_list = []
#
# if function_call_expr.get_function_name() == 'gradHelper':
# for i,parameter in enumerate(function_call_expr.get_parameter_expr_list()):
# if i==0 and self.variableName == 'i':
# parameter.accept(self)
# paramExpr = self.rewritten_expressions.pop()
# elif i==1 and self.variableName == 'j':
# parameter.accept(self)
# paramExpr = self.rewritten_expressions.pop()
# elif i == 2 and self.variableName == 'k':
# parameter.accept(self)
# paramExpr = self.rewritten_expressions.pop()
# else:
# pass
#
# self.rewritten_expressions.append(paramExpr)
# else:
#
# for parameter in function_call_expr.get_parameter_expr_list():
# parameter.accept(self)
# rewritten_argument_list.append(self.rewritten_expressions.pop())
#
# new_function_call_expr = FunctionCallExpression(function_call_expr.get_function_name(),
# rewritten_argument_list)
# self.rewritten_expressions.append(new_function_call_expr)
#
# def visit_binary_operator(self, binary_expr):
# symbol = binary_expr.get_symbol()
#
# binary_expr.get_left_child_expr().accept(self)
# binary_expr.get_right_child_expr().accept(self)
#
# right_expr = self.rewritten_expressions.pop()
# left_expr = self.rewritten_expressions.pop()
#
# self.rewritten_expressions.append(BinaryOperatorExpression(left_expr, right_expr, symbol))
#
# def visit_unary_operator(self, unary_expr):
# symbol = unary_expr.get_symbol()
# unary_expr.get_child_expr().accept(self)
#
# child_expr = self.rewritten_expressions.pop()
# self.rewritten_expressions.append(UnaryOperatorExpression(child_expr, symbol))
#
# def visit_number(self, number_expr):
# self.rewritten_expressions.append(number_expr)
#
# def visit_child_expression(self, child_expr):
# child_expr.accept(self)
# new_child = self.rewritten_expressions.pop()
# self.rewritten_expressions.append(InnerExpression(new_child))
# class PDEExpressionType(Enum):
# NONE = 0
# FINITE_DIFFERENCES = 1,
# VECTOR_CALCULUS = 2
#
# class PDE:
#
# def __init__(self, gridWidth, gridHeight):
# self.gridWidth = gridWidth
# self.gridHeight = gridHeight
# self.delta = 1.0
# self.rect = Rectangle(0, 0, gridWidth, gridHeight)
# self.geometry = Geometry(self.rect, self.delta)
# self.boundaryCondition = RectangularBoundaryCondition(self.geometry)
# self.auxiliaryFunctions = {}
#
# def setEquationExpression(self, expressionType, expressionString):
# self.expressionType = expressionType
# lexer = Lexer(expressionString)
# l = list(lexer.parse())
# parser = Parser(l)
# self.expression = parser.parse()
#
# def setVectorVariable(self, vectorVariableName, dimension=2):
# if self.expressionType != PDEExpressionType.VECTOR_CALCULUS:
# raise Exception("Expression type must be set to VECTOR_EXPRESSION")
#
# self.vectorVariableName = vectorVariableName
# self.dimension = dimension
#
# def setAuxiliaryFunctions(self, functionDictionary):
# self.auxiliaryFunctions = functionDictionary
#
# def configureGrid(self):
# if self.expressionType == PDEExpressionType.NONE:
# raise Exception("Expression not set")
#
# if self.expressionType == PDEExpressionType.VECTOR_CALCULUS:
# finiteDifferencesExpression = self.evaluateVectorCalculusExpression(self.expression)
# self.configureFiniteDifferences(finiteDifferencesExpression)
# else:
# self.configureFiniteDifferences(self.expression)
#
# def evaluateVectorCalculusExpression(self, vectorCalculusExpression):
# visitor = VectorCalculusExpressionVisitor([self.vectorVariableName], self.dimension)
# vectorCalculusExpression.accept(visitor)
# finiteDifferencesExpression = visitor.get_result()
# return finiteDifferencesExpression
#
# def configureFiniteDifferences(self, finiteDifferencesExpression):
# visitor = FiniteDifferencesVisitor()
# finiteDifferencesExpression.accept(visitor)
# visitor.combineExpressions()
# visitor.simplifyExpressions()
# self.gridConfig = visitor.make_grid_config(self.auxiliaryFunctions)
#
#
# # TODO: replace charges -> rightSide
# def solve(self, charges):
#
# start = time.time()
#
# fdm = FiniteDifferencesMethod4(self.geometry, self.boundaryCondition, self.gridConfig, charges)
# fdm.solve()
#
# resulting_matrix = fdm.values
#
# self.duration = time.time() - start
# #print('Total duration for solving the PDE lasted {0} sec'.format(duration))
# return resulting_matrix
def eps(params):
col = params[0] #i
row = params[1] #j
if (col > 10 and col < 54 and row > 10 and row < 54): # and (col < 28 or col > 36) and (row < 28 or row > 36):
if col > 15 and row > 15 and col < 49 and row < 48:
if col > 25 and row > 25 and col < 39 and row < 39:
return 10.0
else:
return 3.0
else:
return 20.0
else:
return 1.0
def setupPDE_finite_differences():
# 1. Finite Differences without aux function eps
pde = PDE(64.0, 64.0)
pde.setEquationExpression(PDEExpressionType.FINITE_DIFFERENCES,
'(u(i+1,j)-u(i,j)) - (u(i,j)-u(i-1,j)) + (u(i,j+1)-u(i,j)) - (u(i,j)-u(i,j-1))')
pde.configureGrid()
return pde
def setupPDE_finite_differences_with_eps():
# 2. Finite Differences with aux function eps
pde = PDE(64.0, 64.0)
pde.setEquationExpression(PDEExpressionType.FINITE_DIFFERENCES,
'eps(i+1/2,j)*(u(i+1,j)-u(i,j)) - eps(i-1/2,j)*(u(i,j)-u(i-1,j)) + ' + \
'eps(i,j+1/2)*(u(i,j+1)-u(i,j)) - eps(i,j-1/2)*(u(i,j)-u(i,j-1))')
pde.setAuxiliaryFunctions({'eps':eps})
pde.configureGrid()
return pde
def setupPDE_vector_calculus():
# 3. Equation as vector calculus without aux function eps
pde = PDE(64.0, 64.0)
pde.setEquationExpression(PDEExpressionType.VECTOR_CALCULUS, "div(grad( u(r) ))")
pde.setVectorVariable("r", dimension=2)
pde.configureGrid()
return pde
def setupPDE_vector_calculus_with_eps():
# 4. Equation as vector calculus with aux function eps
pde = PDE(64.0, 64.0)
pde.setEquationExpression(PDEExpressionType.VECTOR_CALCULUS, "div(eps(r) * grad( u(r) ))")
pde.setVectorVariable("r", dimension=2)
pde.setAuxiliaryFunctions({'eps': eps})
pde.configureGrid()
return pde
if __name__ == '__main__':
pdeNr = 4
pde = None
if pdeNr == 1:
pde = setupPDE_finite_differences()
elif pdeNr == 2:
pde = setupPDE_finite_differences_with_eps()
elif pdeNr == 3:
pde = setupPDE_vector_calculus()
elif pdeNr == 4:
pde = setupPDE_vector_calculus_with_eps()
else:
raise Exception("Invalid pdeNr:"+pdeNr)
charges = make_central_charge(pde.geometry)
resulting_matrix = pde.solve(charges)
showGraph = 1
if showGraph:
plotSurface(pde.geometry.X, pde.geometry.Y, resulting_matrix)
plt.show()
```
#### File: experiments/data_generation/trainings_data.py
```python
import json
import numpy as np
import datetime
from sources.experiments.ellipsis_data_support.make_ellipsis import create_ellipsis_grid
from sources.pdesolver.finite_differences_method.charge_distribution import ChargeDistribution
def make_permeability_matrix_one(gridWidth, gridHeight, innerGridWidth, innerGridHeight, majorSemiAxis, minorSemiAxis, permeability, angle):
eps_data = create_ellipsis_grid(gridWidth, gridHeight, innerGridWidth, innerGridHeight, majorSemiAxis,\
minorSemiAxis, permeability, angle)
return eps_data
def encode_ndarray(array, columns, rows):
return array.tolist()
#resulting_array = []
#current_array = []
#i = 0
#for value in np.nditer(array):
# if i==0:
# current_array = []
# resulting_array.append(current_array)
# current_array.append(value.item(0))
# i += 1
# if i == columns:
# i = 0
#return resulting_array
def as_ndarray(array):
return np.asarray(array, dtype=float)
def encode_TrainingsSet(obj):
if isinstance(obj, TrainingsSet):
return ''
else:
type_name = obj.__class__.__name__
raise TypeError(f"Object of type '{type_name}' is not JSON serializable")
def as_TrainingsSet(json_data):
if '__TrainingsSet__' in json_data:
return TrainingsSetDecoder().decode(json_data)
return json_data
class TrainingsSetEncoder(json.JSONEncoder):
def default(self, data):
if isinstance(data, TrainingsSet):
return data.encode()
elif isinstance(data, TrainingsSetGeometry):
return data.encode()
elif isinstance(data, ChargeDistribution):
return data.chargesList
else:
super().default(self, data)
class TrainingsSetDecoder:
def decode(self, json_data):
count = json_data["count"]
items = json_data["items"]
label = json_data["label"]
geometry = json_data["geometry"]
chargesList = json_data["charges"]
timestamp = json_data["createdAt"]
trainingsSet = self.init_data(TrainingsSetGeometry(geometry), chargesList, count, label, timestamp)
trainingsSet.decode(items)
return trainingsSet
def init_data(self, geometry, chargesList, count, label, timestamp):
semiMajorAxis = [None] * count
semiMinorAxis = [None] * count
permittivities = [None] * count
angles = [None] * count
return TrainingsSet(geometry, chargesList, semiMajorAxis, semiMinorAxis, permittivities, angles, label=label, timestamp=timestamp)
class TrainingsSetGeometry:
def __init__(self, *args, **kwargs):
self.gridWidth = (kwargs['gridWidth'] if 'gridWidth' in kwargs else args[0][0])
self.gridHeight = (kwargs['gridHeight'] if 'gridHeight' in kwargs else args[0][1])
self.innerGridWidth = (kwargs['innerGridWidth'] if 'innerGridWidth' in kwargs else args[0][2])
self.innerGridHeight = (kwargs['innerGridHeight'] if 'innerGridHeight' in kwargs else args[0][3])
def encode(self):
return [self.gridWidth, self.gridHeight, self.innerGridWidth, self.innerGridHeight ]
class TrainingsElement:
def __init__(self, trainingsSet, index):
self.trainingsSet = trainingsSet
self.index = index
def get_semiMajorAxis(self):
return self.trainingsSet.semiMajorAxises[self.index]
def set_semiMajorAxis(self, value):
self.trainingsSet.semiMajorAxises[self.index] = value
def get_semiMinorAxis(self):
return self.trainingsSet.semiMinorAxises[self.index]
def set_semiMinorAxis(self, value):
self.trainingsSet.semiMinorAxises[self.index] = value
def get_permittivity(self):
return self.trainingsSet.permittivities[self.index]
def set_permittivity(self, value):
self.trainingsSet.permittivities[self.index] = value
def get_angle(self):
return self.trainingsSet.angles[self.index]
def set_angle(self, value):
self.trainingsSet.angles[self.index] = value
def calc_permittivity_matrix(self, gridWidth, gridHeight, innerGridWidth, innerGridHeight):
self.trainingsSet.permittivity_matrix[self.index] = make_permeability_matrix_one(gridWidth, gridHeight, innerGridWidth, innerGridHeight,
majorSemiAxis=self.get_semiMajorAxis(),
minorSemiAxis=self.get_semiMinorAxis(),
permeability=self.get_permittivity(),
angle=self.get_angle())
def is_permittivity_matrix_calculated(self):
return self.trainingsSet.permittivity_matrix[self.index] != None
def get_permittivity_matrix(self):
return self.trainingsSet.permittivity_matrix[self.index]
class TrainingsSet:
def __init__(self, geometry, chargesList, semiMajorAxises, semiMinorAxises, permittivities, angles, label=None, timestamp=None):
self.geometry = geometry
self.chargesList = chargesList
self.semiMajorAxises = semiMajorAxises
self.semiMinorAxises = semiMinorAxises
self.permittivities = permittivities
self.angles = angles
c = len(permittivities)
self.permittivity_matrix = [None] * len(permittivities)
self.label = label
self.timestamp = timestamp or datetime.datetime.utcnow()
def count(self):
return len(self.permittivities)
def __iter__(self):
self.index = 0
return self
def __next__(self):
if self.index < len(self.angles):
element = TrainingsElement(self, self.index)
self.index += 1
return element
else:
raise StopIteration
def get_element(self, index):
return TrainingsElement(self, index)
def encode(self):
items = []
for item in self:
items.append({'index':item.index+1,
'semiMajorAxis':item.get_semiMajorAxis(),
'semiMinorAxis':item.get_semiMinorAxis(),
'eps':item.get_permittivity(),
'permittivity_matrix':encode_ndarray(item.get_permittivity_matrix(), int(self.geometry.gridWidth), int(self.geometry.gridHeight)),
'angle':item.get_angle()
})
return { '__TrainingsSet__':True, 'label':self.label, 'createdAt':str(self.timestamp), 'geometry':self.geometry, 'charges':self.chargesList, 'count': self.count(), 'items':items }
def decode(self, itemsArray):
for item in self:
itemInArray = itemsArray[item.index]
item.set_semiMajorAxis(itemInArray["semiMajorAxis"])
item.set_semiMinorAxis(itemInArray["semiMinorAxis"])
item.set_permittivity(itemInArray["eps"])
item.set_angle(itemInArray["angle"])
self.permittivity_matrix[item.index] = as_ndarray(itemInArray["permittivity_matrix"])
```
#### File: sources/experiments/render_results.py
```python
import sys
import json
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.colors import BoundaryNorm
from matplotlib.ticker import MaxNLocator
from matplotlib.widgets import Button
from sources.experiments.data_generation.results_data import as_ResultsSet
from sources.experiments.data_generation.trainings_data import as_TrainingsSet
from sources.experiments.ellipsis_data_support.make_ellipsis import plot_ellipsis
class UICallback(object):
def __init__(self, permittivity_axes, initial_data, figure1, figure2, current_index):
if figure1[1] != None:
self.figure1 = figure1
self.figure1_axes = figure1[0]
self.figure1_results = figure1[1]
#self.ndValues = np.array(self.figure1_results.resultValues[current_index])
x = np.linspace(0.0, 64.0, 64)
y = np.linspace(0.0, 64.0, 64)
#x = np.linspace(0.0, 32.0, 32)
#y = np.linspace(0.0, 32.0, 32)
self.figure1_X, self.figure1_Y = np.meshgrid(x,y)
else:
self.figure1 = None
if figure2[1] != None:
self.figure2 = figure2
self.figure2_axes = figure2[0]
self.figure2_results = figure2[1]
else:
self.figure2 = None
self.permittivity_axes = permittivity_axes
self.initial_data = initial_data
self.current_index = current_index
self.count = self.figure1_results.count()
self.colorbar = 0
self.cmap = plt.get_cmap('PiYG')
def next(self, event):
if self.current_index+1 < self.count:
self.update(self.current_index + 1)
def prev(self, event):
if self.current_index - 1 >= 0:
self.update(self.current_index - 1)
def plotSurface_subplot(self, axes, x, y, values):
axes.set_zlim(-5.0, 40.0)
surf = axes.plot_surface(x, y, values, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
def redraw_figure(self, results_axes, ndValues, x, y):
results_axes.cla()
self.plotSurface_subplot(results_axes, x, y, ndValues)
def update(self, new_index):
self.current_index = new_index
self.redraw_permittivity()
self.redraw_figure1()
self.redraw_figure2()
plt.draw()
def redraw_permittivity(self):
self.permittivity_axes.cla()
pdata = self.initial_data.get_element(self.current_index)
#permittivity_title = 'id = {0}, eps = {1}, \naxisMaj = {2}, axisMin = {3},\n angle={4:6.4f} '.format(
# self.current_index, pdata['permittivities'][self.current_index], pdata['majorSemiAxis'][self.current_index],
# pdata['minorSemiAxis'][self.current_index], pdata['angles'][self.current_index])
#print(permittivity_title)
plot_ellipsis(self.permittivity_axes, pdata.get_permittivity_matrix(), 'permittivity')
def redraw_figure1(self):
if self.figure1 != None:
self.figure1_values = np.array(self.figure1_results.resultValues[self.current_index])
self.redraw_figure(self.figure1_axes, self.figure1_values, self.figure1_X, self.figure1_Y)
def redraw_figure2(self):
if self.figure2 != None:
self.figure2_values = np.array(self.figure2_results.resultValues[self.current_index])
self.redraw_figure(self.figure2_axes, self.figure2_values, self.figure1_X, self.figure1_Y)
# def redraw_permittivity(self):
# self.permittivity_axes.cla()
# pdata = self.permittivity_data[0]
# permittivity_title = 'id = {0}, eps = {1}, \naxisMaj = {2}, axisMin = {3},\n angle={4:6.4f} '.format(
# self.current_index, pdata['permittivities'][self.current_index],
# pdata['majorSemiAxis'][self.current_index], pdata['minorSemiAxis'][self.current_index],
# pdata['angles'][self.current_index])
# print(permittivity_title)
# plot_ellipsis(self.permittivity_axes, self.permittivity_data[1][self.current_index], permittivity_title)
# plt.draw()
#
# def redraw_errors(self, fdm):
# self.error_axes.cla()
# if self.colorbar != 0:
# self.colorbar.remove()
# self.colorbar = 0
# levels = MaxNLocator(nbins=20).tick_values(fdm.minValue - 2.0, fdm.maxValue + 2.0)
# norm = BoundaryNorm(levels, ncolors=self.cmap.N, clip=True)
# im = self.error_axes.pcolormesh(fdm.geometry.X, fdm.geometry.Y, fdm.error, norm=norm, cmap=self.cmap)
# self.error_axes.set_title('Errors', fontsize=9)
# self.colorbar = fig.colorbar(im, ax=self.error_axes)
def loadResultsFile(file):
if file == None:
return None
file = open(file, mode='r')
results = json.load(file, object_hook=as_ResultsSet)
return results
def loadPermittivities(file):
if file == None:
return None
file = open(file, mode='r')
results = json.load(file, object_hook=as_TrainingsSet)
return results
def parseArguments(argv):
supportedOptions = "hp:"
supportLongOptions = []
usage = 'render_results.py <inputfile1> <inputfile2> ... <trainingsDataFile>'
inputFile1 = None
inputFile2 = None
permittivityFile = None
if len(argv) == 0:
print(usage)
sys.exit(2)
if len(argv) >= 1:
inputFile1 = argv[0]
if len(argv) >= 2:
if len(argv) == 3:
inputFile2 = argv[1]
permittivityFile = argv[-1]
return permittivityFile, inputFile1, inputFile2
if __name__ == '__main__':
permittivity_file, inputFileName1, inputFileName2 = parseArguments(sys.argv[1:])
print(inputFileName1, inputFileName2)
resultsFile1 = loadResultsFile(inputFileName1)
resultsFile2 = loadResultsFile(inputFileName2)
initial_data = loadPermittivities(permittivity_file)
fig = plt.figure()
permittivity_axes = fig.add_subplot(1, 3, 1)
results1_axes = fig.add_subplot(1, 3, 2, projection='3d')
results2_axes = fig.add_subplot(1, 3, 3, projection='3d')
callback = UICallback(permittivity_axes, initial_data, (results1_axes, resultsFile1), (results2_axes, resultsFile2), 0)
callback.update(0)
axnext = plt.axes([0.81, 0.0, 0.1, 0.075])
bnext = Button(axnext, 'Next')
bnext.on_clicked(callback.next)
axprev = plt.axes([0.7, 0.0, 0.1, 0.075])
bprev = Button(axprev, 'Prev')
bprev.on_clicked(callback.prev)
plt.show()
```
#### File: pdesolver/finite_differences_method/geometry.py
```python
import numpy as np
class Geometry:
def __init__(self, rect, delta):
self.delta = delta
self.rect = rect
numX = (int)(self.rect.width/delta) #+1
numY = (int)(self.rect.height/delta) #+1
self.x = np.linspace(self.rect.x1, self.rect.x2, numX, dtype=np.double)
self.y = np.linspace(self.rect.y1, self.rect.y2, numY, dtype=np.double)
self.X, self.Y = np.meshgrid(self.x, self.y)
self.numX = len(self.X[0])
self.numY = len(self.X)
def indexFromCoords(self, x, y):
i = (int)((x - self.rect.x1) / self.delta)
j = (int)((y - self.rect.y1) / self.delta)
# because numpy's meshgrid returns the arrays in different order change this here:
return (j,i)
def coordFromHorizontalIndex(self, column):
return self.x + column * self.delta
def coordFromVerticalIndex(self, row):
return self.y + row * self.delta
def coordFromIndices(self, column, row):
return self.coordFromHorizontalIndex(self, column), self.coordFromVerticalIndex(self, row)
``` |
{
"source": "johann-petrak/farm-tools",
"score": 2
} |
#### File: farm-tools/farm_tools/farm_head_coral.py
```python
import os
import torch
import importlib
from torch import nn
from transformers import AutoModelForSequenceClassification
from farm.modeling.prediction_head import PredictionHead
from farm_tools.farm_coral.dataset import levels_from_labelbatch
from farm_tools.farm_coral.losses import coral_loss
from farm_tools.utils import init_logger
from farm_tools.farm_utils import OurFeedForwardBlock
logger = init_logger("FARM-CORAL-head")
class CoralLoss:
def __init__(self, num_labels, reduction="mean"):
self.reduction = reduction
self.num_labels = num_labels
if reduction == "none":
self.reduction = None
def __call__(self, logits, target):
# logger.info(f"logits={logits}")
# logger.info(f"target={target}")
levels = levels_from_labelbatch(target, self.num_labels)
theloss = coral_loss(logits, levels, importance_weights=None, reduction=self.reduction)
# logger.info(f"Running MyLoss.forward on {logits.shape}/{target.shape}, returning {theloss.shape}")
return theloss
class CoralOrdinalRegressionHead(PredictionHead):
def __init__(
self,
layer_dims=None,
hd_dim=768,
label_list=None,
loss_reduction="none",
nonlinearity="ReLU",
dropoutrate=None,
task_name="text_classification",
**kwargs,
):
"""
"""
# TODO: why does the original text classification head a
# label list attribute?
logger.info(f"Initializing Coral Head: layer_dims={layer_dims}, hd_dim={hd_dim}, label_list={label_list}")
super().__init__()
assert isinstance(label_list, list)
assert len(label_list) > 1
if layer_dims is None or len(layer_dims) == 0:
self.coral_weights = nn.Linear(hd_dim, 1, bias=False)
else:
self.nonlinearity = nonlinearity
mod = importlib.import_module("torch.nn")
nonlin = getattr(mod, nonlinearity)
self.coral_weights = nn.Sequential(
OurFeedForwardBlock(layer_dims, dropoutrate=dropoutrate, nonlinearity=nonlin),
nn.Linear(layer_dims[-1], 1, bias=False)
)
self.label_list = label_list
self.num_labels = len(label_list)
self.layer_dims = layer_dims
self.hd_dim = hd_dim
self.nonlinearity = nonlinearity
self.dropoutrate = dropoutrate
self.coral_bias = torch.nn.Parameter(
torch.arange(self.num_labels - 1, 0, -1).float() / (self.num_labels-1))
self.ph_output_type = "per_sequence"
self.model_type = "text_classification"
self.task_name = task_name #used for connecting with the right output of the processor
self.loss_fct = CoralLoss(
num_labels=self.num_labels,
reduction=loss_reduction,
)
if "label_list" in kwargs:
logger.warning(f"Ignoring label list from kwargs: {kwargs['label_list']}")
# TODO: maybe check if equal to the one we pass?
# self.label_list = kwargs["label_list"]
self.generate_config()
logger.info(f"Generated config: {self.config}")
logger.info(f"Created CoralOrdinalRegressionHead, ignored kwargs={kwargs}")
logger.info(f"Created head:\n{self}")
@classmethod
def load(cls, pretrained_model_name_or_path, revision=None, **kwargs):
"""
Load a prediction head from a saved FARM or transformers model. `pretrained_model_name_or_path`
can be one of the following:
a) Local path to a FARM prediction head config (e.g. my-bert/prediction_head_0_config.json)
b) Local path to a Transformers model (e.g. my-bert)
c) Name of a public model from https://huggingface.co/models (e.g. distilbert-base-uncased-distilled-squad)
:param pretrained_model_name_or_path: local path of a saved model or name of a publicly available model.
Exemplary public name:
- deepset/bert-base-german-cased-hatespeech-GermEval18Coarse
See https://huggingface.co/models for full list
:param revision: The version of model to use from the HuggingFace model hub. Can be tag name, branch name, or commit hash.
:type revision: str
"""
logger.info(f"Running HEAD.load for {pretrained_model_name_or_path}")
if os.path.exists(pretrained_model_name_or_path) \
and "config.json" in pretrained_model_name_or_path \
and "prediction_head" in pretrained_model_name_or_path:
# a) FARM style
head = super(CoralOrdinalRegressionHead, cls).load(pretrained_model_name_or_path)
else:
# b) transformers style
# load all weights from model
full_model = AutoModelForSequenceClassification.from_pretrained(pretrained_model_name_or_path, revision=revision, **kwargs)
# init empty head
head = cls(label_list=full_model.label_list)
# transfer weights for head from full model
head.feed_forward.feed_forward[0].load_state_dict(full_model.classifier.state_dict())
# add label list
head.label_list = list(full_model.config.id2label.values())
del full_model
return head
def forward(self, X):
logits = self.coral_weights(X) + self.coral_bias
#logger.info(f"Running forward on {X.shape}, returning {logits.shape}")
#logger.info(f"forward got logits={logits}")
return logits
def logits_to_loss(self, logits, **kwargs):
# after forward: gets logits as (batchsize, outputs) plus kwargs:
# input_ids, padding_mask, setment_ids (all batchsize,inputdim size)
# text_classification_ids (batchsize, 1)
# returns batchsize losses
label_ids = kwargs.get(self.label_tensor_name)
label_ids = label_ids
ret = self.loss_fct(logits, label_ids.view(-1))
# logger.info(f"Running logits_to_loss on {logits.shape}/kwargs={kwargs}, returning {ret.shape}")
return ret
def logits_to_probs(self, logits, return_class_probs, **kwargs):
probs = torch.sigmoid(logits)
if return_class_probs:
probs = probs
else:
probs = torch.max(probs, dim=1)[0]
probs = probs.cpu().numpy()
# logger.info(f"Running logits_to_probs on {logits.shape}/{return_class_probs}/kwargs={kwargs}, returning {probs.shape}")
return probs
def logits_to_preds(self, logits, **kwargs):
# this gets batchsize,1 logits
logits = logits.cpu().numpy()
# logger.info(f"LOGITS={logits}")
probas = torch.sigmoid(torch.tensor(logits))
# logger.info(f"PROBAS={probas}")
predict_levels = probas > 0.5
pred_ids = torch.sum(predict_levels, dim=1)
# logger.info(f"PRED_IDS={pred_ids}")
preds = [self.label_list[int(x)] for x in pred_ids]
# logger.info(f"Running logits_to_preds on {logits.shape}/kwargs={kwargs}, returning {preds}")
return preds
def prepare_labels(self, **kwargs):
label_ids = kwargs.get(self.label_tensor_name)
label_ids = label_ids.cpu().numpy()
# This is the standard doc classification case
try:
labels = [self.label_list[int(x)] for x in label_ids]
# This case is triggered in Natural Questions where each example can have multiple labels
except TypeError:
labels = [self.label_list[int(x[0])] for x in label_ids]
# logger.info(f"Running prepare_labels on kwargs={kwargs}, returning {labels}")
return labels
def formatted_preds(self, logits=None, preds=None, samples=None, return_class_probs=False, **kwargs):
""" Like QuestionAnsweringHead.formatted_preds(), this fn can operate on either logits or preds. This
is needed since at inference, the order of operations is very different depending on whether we are performing
aggregation or not (compare Inferencer._get_predictions() vs Inferencer._get_predictions_and_aggregate())"""
assert (logits is not None) or (preds is not None)
# When this method is used along side a QAHead at inference (e.g. Natural Questions), preds is the input and
# there is currently no good way of generating probs
if logits is not None:
preds = self.logits_to_preds(logits)
probs = self.logits_to_probs(logits, return_class_probs)
else:
probs = [None] * len(preds)
# TODO this block has to do with the difference in Basket and Sample structure between SQuAD and NQ
try:
contexts = [sample.clear_text["text"] for sample in samples]
# This case covers Natural Questions where the sample is in a QA style
except KeyError:
contexts = [sample.clear_text["question_text"] + " | " + sample.clear_text["passage_text"] for sample in samples]
contexts_b = [sample.clear_text["text_b"] for sample in samples if "text_b" in sample.clear_text]
if len(contexts_b) != 0:
contexts = ["|".join([a, b]) for a,b in zip(contexts, contexts_b)]
res = {"task": "text_classification",
"task_name": self.task_name,
"predictions": []}
for pred, prob, context in zip(preds, probs, contexts):
if not return_class_probs:
pred_dict = {
"start": None,
"end": None,
"context": f"{context}",
"label": f"{pred}",
"probability": prob,
}
else:
pred_dict = {
"start": None,
"end": None,
"context": f"{context}",
"label": "class_probabilities",
"probability": prob,
}
res["predictions"].append(pred_dict)
return res
``` |
{
"source": "johann-petrak/nlp-ml",
"score": 4
} |
#### File: nlp-ml/nlpml/destination.py
```python
from abc import ABC, abstractmethod
import json
class SerialDestination(ABC):
"""
Base class for all implementations of something that needs serial writing.
"""
def __init__(self):
self.data = None
@abstractmethod
def write(self, item):
"""
Write the next item to the destination.
:param item: the data to write
:return:
"""
pass
def close(self):
"""
Close the destination if necessary, otherwise do nothing.
:return:
"""
pass
def get_data(self):
"""
Returns the data from the destination if this is an in-memory destination.
For other kinds of destination, this could return None or whatever makes sense.
:return:
"""
return self.data
def set_data(self, data):
"""
This can be used to set the data in the original instance if a copy of the instance was used
in a different process to create the data.
:param data: the data to set
:return:
"""
self.data = data
def size(self):
"""
Returns the number of items written to that destination.
:return: number of items
"""
class SdJsonLinesFile(SerialDestination):
"""
A destination which writes each item as json to a line in a destination file.
"""
def __init__(self, file):
"""
Destination for writing items to a line of JSON each in a file.
:param file: the file to write to.
"""
self.file = file
self.fh = open(file, "wt", encoding="utf8")
self.n = 0
def write(self, item):
self.fh.write(json.dumps(item))
self.n += 1
self.fh.write("\n")
def size(self):
return self.n
class SdList(SerialDestination):
"""
A destination for lists
"""
def __init__(self, thelist):
if not isinstance(thelist, list):
raise Exception("Must be a list")
self.data = thelist
def write(self, item):
import sys
self.data.append(item)
def size(self):
return len(self.n)
class SdMap(SerialDestination):
"""
A destination for maps/dictionaries. For these, the destination needs to receive the tuple (id, item) from
the processor!
"""
def __init__(self, themap):
if not isinstance(themap, map):
raise Exception("Must be a map")
self.data = themap
def write(self, item):
if not isinstance(item, tuple) or not len(item) == 2:
raise Exception("write must get a tuple (id, item) instead of item!")
self.data[item[0]] = item[1]
def size(self):
return len(self.data)
``` |
{
"source": "johann-petrak/python-matchtext",
"score": 2
} |
#### File: python-matchtext/matchtext/utils.py
```python
def thisorthat(this, that):
"""
If this is None takes that otherwise takes this.
:param this:
:param that:
:return:
"""
if this is None:
return that
else:
return this
```
#### File: python-matchtext/tests/test_stringmatcher.py
```python
from matchtext.stringmatcher import StringMatcher
import sys
def test_sm_find1():
sm = StringMatcher()
for i, e in enumerate(["this", "word", "words", "thisis", "his"]):
sm.add(e, data=i, append=False)
assert sm["this"] == 0
assert sm["his"] == 4
assert sm.get("this") == 0
assert sm.get("asasas", "x") == "x"
assert sm.get("asaasa") is None
t1 = "this is a word"
ms1 = sm.find(t1, all=True, skip=True)
assert len(ms1) == 2
m1 = ms1[0]
assert m1.entrydata == 0
assert m1.start == 0
assert m1.end == 4
assert m1.match == "this"
assert m1.matcherdata is None
m2 = ms1[1]
assert m2.entrydata == 1
assert m2.match == "word"
assert m2.start == 10
def test_sm_find2():
sm = StringMatcher()
for i, e in enumerate(["this", "word", "words", "thisis", "his"]):
sm.add(e, data=i, append=False)
t1 = "this is a word"
ms1 = sm.find(t1, all=True, skip=False)
assert len(ms1) == 3
m1 = ms1[0]
assert m1.entrydata == 0
assert m1.start == 0
assert m1.end == 4
assert m1.match == "this"
assert m1.matcherdata is None
m2 = ms1[1]
assert m2.entrydata == 4
assert m2.match == "his"
assert m2.start == 1
m3 = ms1[2]
assert m3.entrydata == 1
assert m3.match == "word"
assert m3.start == 10
def test_sm_find3():
sm = StringMatcher()
for i, e in enumerate(["this", "word", "words", "thisis", "his"]):
sm.add(e, data=i, append=False)
t1 = "thisis a word"
ms1 = sm.find(t1, all=True, skip=False)
assert len(ms1) == 4
m1 = ms1[0]
assert m1.entrydata == 0
assert m1.start == 0
assert m1.end == 4
assert m1.match == "this"
assert m1.matcherdata is None
m2 = ms1[1]
assert m2.entrydata == 3
assert m2.match == "thisis"
assert m2.start == 0
m3 = ms1[2]
assert m3.entrydata == 4
assert m3.match == "his"
assert m3.start == 1
m4 = ms1[3]
assert m4.entrydata == 1
assert m4.match == "word"
assert m4.start == 9
def test_sm_find4():
sm = StringMatcher()
for i, e in enumerate(["this", "word", "words", "thisis", "his"]):
sm.add(e, data=i, append=False)
t1 = "thisis a word"
ms1 = sm.find(t1, all=False, skip=True)
assert len(ms1) == 2
m1 = ms1[0]
assert m1.entrydata == 3
assert m1.match == "thisis"
assert m1.start == 0
m2 = ms1[1]
assert m2.entrydata == 1
assert m2.match == "word"
assert m2.start == 9
def test_sm_find5():
sm = StringMatcher()
for i, e in enumerate(["this", "word", "words", "thisis", "his", "word"]):
sm.add(e, data=i, append=True)
t1 = "thisis a word"
ms1 = sm.find(t1, all=False, skip=True)
assert len(ms1) == 2
m1 = ms1[0]
assert m1.entrydata == [3]
assert m1.match == "thisis"
assert m1.start == 0
m2 = ms1[1]
assert m2.entrydata == [1, 5]
assert m2.match == "word"
assert m2.start == 9
def test_sm_find6():
def f_ign(x):
return x in "io"
sm = StringMatcher(ignorefunc=f_ign)
for i, e in enumerate(["this", "word", "words", "thisis", "his", "word"]):
sm.add(e, data=i, append=True)
#print(f"!!!!!!!!!!!DEBUG: nodes: ", file=sys.stderr)
#sm._root.print_node()
#print(file=sys.stderr)
# In the following "thoss" should match because o gets ignored and and i got ignored for "thisis" so
# we really match "thss"
t1 = "thoss a wiiiiiiiird"
ms1 = sm.find(t1, all=False, skip=True)
assert len(ms1) == 2
m1 = ms1[0]
assert m1.entrydata == [3]
assert m1.match == "thoss"
assert m1.start == 0
assert m1.end == 5
m2 = ms1[1]
assert m2.entrydata == [1, 5]
assert m2.match == "wiiiiiiiird"
assert m2.start == 8
assert m2.end == 19
def test_sm_replace1():
sm = StringMatcher()
for i, e in enumerate(["this", "word", "words", "thisis", "his"]):
sm.add(e, data=i, append=False)
t1 = "thisis a word"
rep = sm.replace(t1)
assert rep == "3 a 1"
```
#### File: python-matchtext/tests/test_tokenmatcher.py
```python
from matchtext.tokenmatcher import TokenMatcher, Node
ENTRIES = ["Some", "word", "to", "add", ["some", "word"], ["some", "word"]]
def test_tm_find1():
tm = TokenMatcher()
for i, e in enumerate(ENTRIES):
tm.add(e, data=i, append=False)
t1 = ["This", "contains", "Some", "text"]
ms1 = tm.find(t1, all=False, skip=True)
assert len(ms1) == 1
m1 = ms1[0]
assert m1.entrydata == 0
assert m1.start == 2
assert m1.end == 3
assert m1.matcherdata is None
def test_tm_find2():
tm = TokenMatcher(mapfunc=str.lower, matcherdata="x")
for i, e in enumerate(ENTRIES):
tm.add(e, data=i, append=True)
t1 = ["this", "contains", "some", "word", "of", "text", "to", "add"]
ms = tm.find(t1, all=True, skip=False)
# print("Matches:", ms)
assert len(ms) == 5
m = ms[0]
assert m.entrydata == [0]
assert m.start == 2
assert m.end == 3
assert m.matcherdata == "x"
m = ms[1]
assert m.match == ["some", "word"]
assert m.entrydata == [4, 5]
assert m.start == 2
assert m.end == 4
assert m.matcherdata == "x"
m = ms[2]
assert m.match == ["word"]
assert m.entrydata == [1]
assert m.start == 3
assert m.end == 4
assert m.matcherdata == "x"
def test_tm_replace1():
tm = TokenMatcher(mapfunc=str.lower)
for i, e in enumerate(ENTRIES):
tm.add(e, data=i, append=False)
t1 = ["this", "contains", "some", "word", "of", "text", "to", "add"]
rep = tm.replace(t1)
assert rep == ['this', 'contains', 5, 'of', 'text', 2, 3]
def test_tm_replace2():
tm = TokenMatcher(mapfunc=str.lower)
for i, e in enumerate(ENTRIES):
tm.add(e, data=i, append=False)
t1 = ["THIS", "CONTAINS", "SOME", "WORD", "OF", "TEXT", "TO", "ADD"]
rep = tm.replace(t1, replacer=lambda x: x.match)
assert rep == ['THIS', 'CONTAINS', 'some', 'word', 'OF', 'TEXT', 'to', 'add']
assert t1 == ["THIS", "CONTAINS", "SOME", "WORD", "OF", "TEXT", "TO", "ADD"]
def test_tm_find3():
tm = TokenMatcher()
for i, e in enumerate(ENTRIES):
tm.add(e, data=i, append=False)
t1 = ["This", "contains", "Some", "text"]
def mm(*args):
return args
ms1 = tm.find(t1, all=False, skip=True, matchmaker=mm)
assert len(ms1) == 1
m1 = ms1[0]
assert m1 == (2, 3, ["Some"], 0, None)
def test_tm_find4():
"""
Test finding 2 longest matches
:return:
"""
tm = TokenMatcher()
for i, e in enumerate([["some", "word"], ["some", "word"]]):
tm.add(e, data=i, append=True)
t1 = ["this", "contains", "some", "word", "yes"]
ms1 = tm.find(t1, all=True, skip=False)
assert len(ms1) == 1
m1 = ms1[0]
assert m1.start == 2
assert m1.end == 4
assert m1.match == ["some", "word"]
assert m1.entrydata == [0,1]
def test_tm_find5():
"""
Test finding 2 longest matches
:return:
"""
tm = TokenMatcher()
for i, e in enumerate([["some", "word"], ["some", "word"], "word"]):
tm.add(e, data=i, append=True)
t1 = ["this", "contains", "some", "word", "yes"]
ms1 = tm.find(t1, all=True, skip=False)
assert len(ms1) == 2
m1 = ms1[0]
assert m1.start == 2
assert m1.end == 4
assert m1.match == ["some", "word"]
assert m1.entrydata == [0, 1]
m2 = ms1[1]
assert m2.match == ["word"]
def test_tm_replace3():
tm = TokenMatcher()
tm.add(["this", "and", "that"], "ENTRY1")
tm.add(["she", "and", "he"], "ENTRY2")
tm.add(["other", "stuff"], "ENTRY3")
tokens = ["because", "this", "and", "that", "should", "also"]
ms1 = tm.find(tokens)
assert len(ms1) == 1
m1 = ms1[0]
assert m1.match == ["this", "and", "that"] # note: the ignored token is NOT part of the result match!
assert m1.start == 1
assert m1.end == 4
assert tokens[1:4] == ["this", "and", "that"] # note: but the range fits the original tokens!
assert tm.replace(["and", "also"]) == ["and", "also"]
assert tm.replace(tokens) == ['because', 'ENTRY1', 'should', 'also']
assert tm.replace(["other", "stuff"]) == ['ENTRY3']
assert tm.replace(["and", "other", "stuff"]) == ['and', 'ENTRY3']
assert tm.replace(["word1", "word2", "other", "word3"]) == ['word1', 'word2', 'other', 'word3']
assert tm.replace(["word1", "word2", "stuff", "word3"]) == ['word1', 'word2', 'stuff', 'word3']
assert tm.replace(["word1", "word2", "and", "word3"]) == ['word1', 'word2', 'and', 'word3']
assert tm.replace(["this", "and", "that", "other", "stuff"]) == ['ENTRY1', 'ENTRY3']
def test_tm_find6():
"""
Test ignoring tokens
:return:
"""
def ign1(x):
return x in ["and"]
tm = TokenMatcher(ignorefunc=ign1)
tm.add(["this", "and", "that"], "ENTRY1")
tm.add(["she", "and", "he"], "ENTRY2")
tm.add(["other", "stuff"], "ENTRY3")
tokens = ["because", "this", "and", "that", "should", "also"]
ms1 = tm.find(tokens)
assert len(ms1) == 1
m1 = ms1[0]
assert m1.match == ["this", "that"] # note: the ignored token is NOT part of the result match!
assert m1.start == 1
assert m1.end == 4
assert tokens[m1.start:m1.end] == ["this", "and", "that"] # note: but the range fits the original tokens!
assert tm.replace(tokens) == ['because', 'ENTRY1', 'should', 'also']
def test_tm_replace4():
def ign1(x):
return x in ["and"]
tm = TokenMatcher(ignorefunc=ign1)
tm.add(["this", "and", "that"], "ENTRY1")
tm.add(["she", "and", "he"], "ENTRY2")
tm.add(["other", "stuff"], "ENTRY3")
assert tm.replace(["and", "also"]) == ["and", "also"]
assert tm.replace(["because", "this", "and", "that", "should", "also"]) == ['because', 'ENTRY1', 'should', 'also']
assert tm.replace(["other", "stuff"]) == ['ENTRY3']
assert tm.replace(["and", "other", "stuff"]) == ['and', 'ENTRY3']
assert tm.replace(["word1", "word2", "other", "word3"]) == ['word1', 'word2', 'other', 'word3']
assert tm.replace(["word1", "word2", "stuff", "word3"]) == ['word1', 'word2', 'stuff', 'word3']
assert tm.replace(["word1", "word2", "and", "word3"]) == ['word1', 'word2', 'and', 'word3']
assert tm.replace(["this", "and", "that", "other", "stuff"]) == ['ENTRY1', 'ENTRY3']
def test_tmp_repr1():
tm = TokenMatcher()
tm.add(["this", "and", "that"], "ENTRY1")
tm.add(["she", "and", "he"], "ENTRY2")
tm.add(["other", "stuff"], "ENTRY3")
assert Node.dict_repr(tm.nodes) == """[('this', Node(is_match=None,data=None,nodes=[('and', Node(is_match=None,data=None,nodes=[('that', Node(is_match=True,data=ENTRY1,nodes=None))]))])), ('she', Node(is_match=None,data=None,nodes=[('and', Node(is_match=None,data=None,nodes=[('he', Node(is_match=True,data=ENTRY2,nodes=None))]))])), ('other', Node(is_match=None,data=None,nodes=[('stuff', Node(is_match=True,data=ENTRY3,nodes=None))]))]"""
``` |
{
"source": "johann-petrak/python-sparsevectors",
"score": 2
} |
#### File: johann-petrak/python-sparsevectors/sparsevector.py
```python
import os
def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__,'sparsevectors.cpython-35m-x86_64-linux-gnu.so')
__loader__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
os.environ['PYTHON_EGG_CACHE'] = '~/'
__bootstrap__()
```
#### File: python-sparsevectors/tests/tests1.py
```python
from __future__ import print_function
from sparsevectors import SparseVector
import unittest
class SparseVectorTestsBasics(unittest.TestCase):
def test_sv_basics1(self):
v1 = SparseVector()
e1 = v1[0]
self.assertEqual(e1, 0.0)
self.assertNotIn(0, v1)
class SparseVectorTestsDot(unittest.TestCase):
def test_sv_dot1(self):
v1 = SparseVector()
v1[2] = 2.0
v2 = SparseVector()
v2[2] = 3.0
self.assertEqual(type(v1[2]), float)
d1 = v1.dot(v2)
self.assertEqual(d1, 6.0)
def test_sv_dot2(self):
v1 = SparseVector()
v2 = SparseVector()
v2[2] = 3.0
d1 = v1.dot(v2)
self.assertEqual(d1, 0.0)
def test_sv_dot3(self):
v1 = SparseVector()
v1[1] = 2.0
v2 = SparseVector()
v2[2] = 3.0
self.assertEqual(type(v1[2]), float)
d1 = v1.dot(v2)
self.assertEqual(d1,0.0)
def test_sv_dot4(self):
v1 = SparseVector({1: 1.0, 2: 2.0})
v2 = SparseVector({2: 3.0, 4: 5.0})
d1 = v1.dot(v2)
self.assertEqual(d1,6.0)
def test_sv_dot5(self):
v1 = SparseVector()
v2 = SparseVector()
d1 = v1.dot(v2)
self.assertEqual(d1,0.0)
class SparseVectorTestsIaddc(unittest.TestCase):
def test_sv_iaddc1(self):
v1 = SparseVector({1: 1.0, 2: 2.0})
v2 = SparseVector({2: 3.0, 4: 5.0})
v1.iaddc(v2)
self.assertEqual(len(v1), 3)
self.assertEqual(len(v2), 2)
self.assertEqual(v1[1], 1.0)
self.assertEqual(v1[2], 5.0)
self.assertEqual(v1[4], 5.0)
def test_sv_iaddc2(self):
v1 = SparseVector()
v2 = SparseVector()
v2[2] = 2.0
v1.iaddc(v2)
self.assertEqual(len(v1), 1)
self.assertEqual(len(v2), 1)
self.assertEqual(v1[2], 2.0)
def test_sv_iaddc3(self):
v1 = SparseVector()
v2 = SparseVector()
v1.iaddc(v2)
self.assertEqual(len(v1), 0)
self.assertEqual(len(v2), 0)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "johannrichard/hypothesis-zotero",
"score": 2
} |
#### File: johannrichard/hypothesis-zotero/hypothesis_zotero.py
```python
import json
import os
from tkinter import *
import h_annot
from pyzotero import zotero
ZOTERO_API_UPDATE_LIMIT = 50
def save_transfer_settings(settings_path):
"""Save the currently entered transfer settings from the application form to
settings_path."""
try:
with open(settings_path, "w") as outfile:
settings = {"library_id": libraryid_w.get(),
"zot_api_key":zot_api_key_w.get(),
"hyp_username":hyp_username_w.get(),
"hyp_api_key":hyp_api_key_w.get(),
"num2grab":number_to_grab_w.get()}
json.dump(settings, outfile)
progress_indicator.set("Transfer settings saved!")
return True
except FileNotFoundError:
try:
os.mkdir(os.path.split(settings_path)[0])
save_transfer_settings(settings_path)
except PermissionError:
progress_indicator.set("No permission to save to home folder.")
return False
def load_transfer_settings(settings_path):
"""Load the settings used to transfer annotations from Hypothesis into Zotero
into the application form."""
try:
with open(settings_path) as infile:
settings = json.load(infile)
libraryid_w.insert(0, settings["library_id"])
zot_api_key_w.insert(0,settings["zot_api_key"])
hyp_username_w.insert(0,settings["hyp_username"])
hyp_api_key_w.insert(0,settings["hyp_api_key"])
number_to_grab_w.delete(0,END)
number_to_grab_w.insert(0,settings["num2grab"])
return True
except FileNotFoundError:
return False
def format_converted_note(annotation):
"""Format an annotation so that it translates properly into Zotero note markup."""
annotated_text = extract_exact(annotation)
annotation_text = annotation["text"]
return """<p style="color: green; text-align: center;">{}</p>
<br>
<p>{}</p>""".format(annotated_text, annotation_text)
def extract_exact(annotation):
try:
annotation["target"][0]["selector"]
except KeyError as e:
print(annotation)
return "<text not available>"
for selector in annotation["target"][0]["selector"]:
try:
return selector["exact"]
except KeyError:
continue
return None
def extract_note_tags(notes):
tags = set()
for note in notes:
for tag in note['data']['tags']:
tags.add(tag['tag'])
return tags
def grab():
grab_button.config(state=DISABLED)
progress_indicator.set("In progress...")
root.update()
library_id = libraryid_w.get()
zot_api_key = zot_api_key_w.get()
hyp_username = hyp_username_w.get()
hyp_api_key = hyp_api_key_w.get()
zot = zotero.Zotero(library_id, 'user', zot_api_key)
num2grab = number_to_grab_w.get()
items = zot.top(limit=num2grab)
progress_indicator.set("Zotero library downloaded")
root.update()
for entry_i in enumerate(items):
progress_indicator.set("Processing notes...({} of {})".format(entry_i[0]+1,len(items)))
root.update()
entry = entry_i[1]
entry_children = zot.children(entry['key'])
notes = [note for note in entry_children if note['data']['itemType'] == 'note']
tags = extract_note_tags(notes)
try:
entry_annotations = json.loads(h_annot.api.search(hyp_api_key,
url=entry['data']['url'],
user=hyp_username))["rows"]
note_imports = []
for annotation in entry_annotations:
if annotation["id"] in tags:
continue
else:
template = zot.item_template("note")
template['tags'] = (annotation['tags'].copy() +
[{"tag": annotation["id"], "type":1}] +
[{"tag": "hyp-annotation", "type":1}])
template['note'] = format_converted_note(annotation)
note_imports.append(template)
#TODO: Fix this so it doesn't break if you have more than 50 annotations on a document
zot.create_items(note_imports,entry["key"])
except:
# TODO: Output error details
continue
progress_indicator.set("Done!")
grab_button.config(state=NORMAL)
root = Tk()
root.title("Zotero Hypothesis Importer")
frame = Frame(root, width=100, height=100)
frame.pack()
# Add widgets to get auth info for API calls
libraryid_label = Label(frame, text="Library ID:")
libraryid_w = Entry(frame, width=25)
zot_api_key_label = Label(frame, text="Zotero API Key:")
zot_api_key_w = Entry(frame, width=25)
hyp_username_label = Label(frame, text="Hypothesis Username:")
hyp_username_w = Entry(frame, width=25)
hyp_api_key_label = Label(frame, text="Hypothesis API Key:")
hyp_api_key_w = Entry(frame, width=25)
number_to_grab_label = Label(frame,text="Grab last N items:")
number_to_grab_w = Entry(frame, width=25)
number_to_grab_w.insert(0,"50")
# Lay out widgets on application window
libraryid_label.grid(row=1)
libraryid_w.grid(row=1,column=1)
zot_api_key_label.grid(row=2)
zot_api_key_w.grid(row=2,column=1)
hyp_username_label.grid(row=3)
hyp_username_w.grid(row=3,column=1)
hyp_api_key_label.grid(row=4)
hyp_api_key_w.grid(row=4,column=1)
number_to_grab_label.grid(row=5)
number_to_grab_w.grid(row=5,column=1)
grab_button = Button(frame, text="Grab", command=grab)
grab_button.grid(row=6)
# Button to save transfer settings
save_button = Button(frame, text="Save Settings",
command=lambda: save_transfer_settings(
os.path.expanduser("c")
))
save_button.grid(row=6,column=1)
# Add progress indicators
progress_indicator = StringVar()
progress_indicator.set("Waiting...")
grab_zotero_library_label = Label(frame, text="Progress:")
grab_zotero_library_i = Label(frame, textvariable=progress_indicator)
grab_zotero_library_label.grid(row=7)
grab_zotero_library_i.grid(row=7,column=1)
load_transfer_settings(os.path.expanduser("~/.hypzot/transfer_settings.json"))
root.mainloop()
``` |
{
"source": "johannscanaval/toblackboard",
"score": 3
} |
#### File: toblackboard/GUI Forms/App GUI.py
```python
from appJar import gui
import forms_app as api
app = gui()
app.addLabel("title", "Hola")
app.setLabelBg("title", "red")
app.addLabelEntry("Nombre de la columna")
def press(button):
if button == "Salir":
app.stop()
else:
ruta_forms=app.openBox(title="Por favor, ingresa el archivo de salida de Microsoft Forms", dirName=None, fileTypes=None, asFile=False, parent=None, multiple=False, mode='r')
ruta_sicua=app.openBox(title="Por favor, ingresa el archivo del Centro de Calificaciones", dirName=None, fileTypes=None, asFile=False, parent=None, multiple=False, mode='r')
column_name = app.getEntry("Nombre de la columna")
api.pasar(ruta_sicua, ruta_forms, column_name)
app.infoBox("Información", "Aparentemente, todo se hizo", parent=None)
app.addButtons(["Ingresar archivos", "Salir"], press)
app.go()
```
#### File: toblackboard/GUI version Nearpod/App GUI.py
```python
from appJar import gui
import nearpod_app as api
app = gui()
app.addLabel("title", "Hola")
app.setLabelBg("title", "red")
app.addLabelEntry("Nombre de la columna")
def press(button):
if button == "Salir":
app.stop()
else:
ruta_nearpod=app.openBox(title="Por favor, ingresa el archivo de salida de Nearpod", dirName=None, fileTypes=None, asFile=False, parent=None, multiple=False, mode='r')
ruta_sicua=app.openBox(title="Por favor, ingresa el archivo del Centro de Calificaciones", dirName=None, fileTypes=None, asFile=False, parent=None, multiple=False, mode='r')
column_name = app.getEntry("Nombre de la columna")
api.pasar(ruta_sicua, ruta_nearpod, column_name)
app.infoBox("Información", "Aparentemente, todo se hizo", parent=None)
app.addButtons(["Ingresar archivos", "Salir"], press)
app.go()
``` |
{
"source": "johannsdg/johann_web_basic",
"score": 2
} |
#### File: johann_web_basic/johann_web_basic/main.py
```python
import secrets
import logzero
import requests
from flask import abort, jsonify, render_template, request
from flask_bootstrap import Bootstrap
from werkzeug.middleware.proxy_fix import ProxyFix
from johann_web_basic import app
from johann_web_basic.scenarios import scenarios_bp
logger = logzero.setup_logger(__name__)
app.secret_key = secrets.token_hex(16) # WTF CSRF
app.config["BOOTSTRAP_SERVE_LOCAL"] = True
logger.debug(app.config)
bootstrap = Bootstrap(app)
logger.debug(bootstrap)
app.register_blueprint(scenarios_bp)
app.wsgi_app = ProxyFix(app.wsgi_app)
logger.debug(ProxyFix)
@app.route("/")
@app.route("/index")
@app.route("/about")
def index():
return render_template("about.html")
@app.route("/johann/scores/<scenario_name>/status_alt")
def get_score_status_alt(scenario_name):
try:
url = f"http://johann_conductor:5000/scores/{scenario_name}/status_alt"
r = requests.get(url)
if not r.ok:
msg = f"Failed to get status_alt for score '{scenario_name}': {r.reason}"
logger.warn(msg)
abort(r.status_code)
else:
resp_json = r.json()
# logger.debug(resp_json)
except Exception as e:
msg = f"Exception getting status_alt for score '{scenario_name}': {str(e)}"
logger.warning(msg, exc_info=True)
abort(502)
return jsonify(resp_json)
@app.route("/johann/read_score/<scenario_name>")
def read_score(scenario_name):
try:
query_string = request.query_string.decode()
url = f"http://johann_conductor:5000/read_score/{scenario_name}?{query_string}"
logger.debug(f"read_score() request URL: {url}")
r = requests.get(url)
if not r.ok:
msg = f"Failed to read score '{scenario_name}': {r.reason}"
logger.warn(msg)
abort(r.status_code)
else:
resp_json = r.json()
# logger.debug(scenario_json)
except Exception as e:
msg = f"Exception reading score '{scenario_name}': {str(e)}"
logger.warning(msg, exc_info=True)
abort(502)
return jsonify(resp_json)
@app.errorhandler(404)
def handle_404(_):
return render_template("404.html"), 404
@app.errorhandler(500)
def handle_500(_):
return render_template("500.html"), 500
@app.errorhandler(502)
def handle_502(_):
return render_template("502.html"), 502
if __name__ == "__main__":
# Only for debugging while developing, i.e., `make dev`
app.run(host="0.0.0.0", debug=True, port=80) # nosec
``` |
{
"source": "JohannSuarez/fastapi_backend",
"score": 2
} |
#### File: blog/config/database.py
```python
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from blog import Config
addr: str = Config.sql_address()
name: str = Config.sql_name()
password: str = Config.sql_pass()
db: str = Config.sql_db()
'''
MARIADB_DATABASE_URL = f"mariadb+mariadbconnector://{name}:{password}@{addr}:3306/{db}"
engine = create_engine(
MARIADB_DATABASE_URL,
)
'''
# SQLite DB
SQLITE_DATABASE_URL = "sqlite:///./app.db"
engine = create_engine(
SQLITE_DATABASE_URL,
connect_args={"check_same_thread": False},
)
SessionLocal = sessionmaker(
autocommit=False,
autoflush=False,
bind=engine,
)
Base = declarative_base()
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
def create_tables():
Base.metadata.create_all(bind=engine)
```
#### File: fastapi_backend/blog/__init__.py
```python
from pathlib import Path
import sys
from threading import Lock
from typing import Any, Dict
from dotenv import dotenv_values
class ThreadSafeMeta(type):
"""
Thread safe implementation of Singleton.
"""
_instances: Dict[Any, Any] = {}
_lock: Lock = Lock()
def __call__(cls, *args, **kwargs):
"""
Possible changes to the value of the __init__
do not affect the returned instance.
"""
with cls._lock:
if cls not in cls._instances:
instance = super().__call__(*args, **kwargs)
cls._instances[cls] = instance
return cls._instances[cls]
class Config(metaclass=ThreadSafeMeta):
"""
Global program configuration, usess the dotenv package
to load runtime configuration from a .env file, once
and only once into this object, this object can be used
through-out the code base.
"""
try:
__config: Dict[str, Any] = dotenv_values('.env')
__sql_addr = str(__config["SQL_ADDR"])
__sql_name = str(__config["SQL_NAME"])
__sql_pass = str(__config["SQL_PASS"])
__sql_db = str(__config["SQL_DB"])
__sql_table = str(__config["SQL_TABLE"])
except KeyError as error:
sys.stderr.write(f'Dotenv config error: {error} is missing \n')
sys.exit(1)
@classmethod
def sql_address(cls) -> str:
"""
@description: getter for config
"""
return cls.__sql_addr
@classmethod
def sql_name(cls) -> str:
"""
@description: getter for config
"""
return cls.__sql_name
@classmethod
def sql_pass(cls) -> str:
"""
@description: getter for config
"""
return cls.__sql_pass
@classmethod
def sql_db(cls) -> str:
"""
@description: getter for config
"""
return cls.__sql_db
@classmethod
def sql_table(cls) -> str:
"""
@description: getter for config
"""
return cls.__sql_table
```
#### File: blog/old_files/db.py
```python
from os.path import join, dirname
from dotenv import load_dotenv
from typing import Union, Any
import mysql.connector
from mysql.connector.connection_cext import CMySQLConnection, CMySQLCursor
'''
Class that wraps around mysql.connector.
The interface has all the methods
needed to fetch data from the databse.
'''
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
class DBInterface():
def __init__(self, db_address: str,
db_username: str,
db_password: str,
db_name: str,
db_table: str
) -> None:
self._db_address: str = db_address # IP Address
self._db_username: str = db_username # User's Name
self._db_password: str = <PASSWORD>
self._db_name: str = db_name # Name of the database in the server.
self._db_table: str = db_table # The table within the database.
self._connection: Union[None, CMySQLConnection] = None
self._cursor: Union[None, CMySQLCursor] = None
self._connect_db()
def _connect_db(self) -> None:
self.connection = mysql.connector.connect(
host=self._db_address,
user=self._db_username,
password=self._db_password
)
self.cursor = self.connection.cursor()
self.cursor.execute(f"USE {self._db_name}")
def fetch_tables(self) -> Union[list, None]:
if self.cursor:
self.cursor.execute("SHOW DATABASES")
return self.cursor.fetchall()
def list_table_data(self) -> Any:
if self.cursor:
try:
self.cursor.execute(f"SELECT * FROM {self._db_table}")
return self.cursor.fetchall()
except Exception as error:
print(f"Caught Exception in db.py's list_table_data - {error}")
# return ( self.cursor.fetchall() )
```
#### File: blog/old_files/sqlalchem.py
```python
from . import Config
from sqlalchemy import create_engine, text
def sql_alch_test():
sqldb_name: str = Config.sql_name()
sqldb_addr: str = Config.sql_address()
sqldb_pass: str = Config.sql_pass()
engine = create_engine(f"mysql+pymysql://{sqldb_name}:{sqldb_pass}@{sqldb_addr}:3306");
with engine.connect() as conn:
result = conn.execute(text("select 'hello world'"))
print(result.all())
```
#### File: blog/services/main.py
```python
from sqlalchemy.orm import Session
class DBSessionMixin:
def __init__(self, db: Session):
self.db = db
class AppService(DBSessionMixin):
pass
class AppCRUD(DBSessionMixin):
pass
```
#### File: blog/utils/request_exceptions.py
```python
from fastapi.encoders import jsonable_encoder
from fastapi.exceptions import RequestValidationError
from starlette.exceptions import HTTPException
from starlette.requests import Request
from starlette.responses import JSONResponse
from starlette.status import HTTP_422_UNPROCESSABLE_ENTITY
async def http_exception_handler(
request: Request, exc: HTTPException
) -> JSONResponse:
return JSONResponse({"detail": exc.detail}, status_code=exc.status_code)
async def request_validation_exception_handler(
request: Request, exc: RequestValidationError
) -> JSONResponse:
return JSONResponse (
status_code=HTTP_422_UNPROCESSABLE_ENTITY,
content={"detail": jsonable_encoder(exc.errors())}
)
``` |
{
"source": "JohannSuarez/tokenspice",
"score": 2
} |
#### File: agents/test/test_DataconsumerAgent.py
```python
import pytest
from assets.agents.PoolAgent import PoolAgent
from assets.agents.DataconsumerAgent import DataconsumerAgent
from assets.agents.PublisherAgent import PublisherAgent
from engine.AgentBase import AgentBase
from engine.AgentDict import AgentDict
from util.constants import S_PER_HOUR
class MockSS:
def __init__(self):
#seconds per tick
self.time_step: int = S_PER_HOUR
self.pool_weight_DT: float = 1.0
self.pool_weight_OCEAN: float = 1.0
class MockState:
def __init__(self):
self.agents = AgentDict({})
self.ss = MockSS()
def addAgent(self, agent):
self.agents[agent.name] = agent
class MockAgent(AgentBase):
def takeStep(self, state):
pass
def test_doBuyAndConsumeDT(alice_pool):
state = MockState()
agent = DataconsumerAgent("agent1",USD=0.0,OCEAN=1000.0)
assert agent._s_since_buy == 0
assert agent._s_between_buys > 0
assert not agent._doBuyAndConsumeDT(state)
agent._s_since_buy += agent._s_between_buys
assert not state.agents.filterToPool().values()
assert not agent._doBuyAndConsumeDT(state) #still no, since no pools
state.agents["pool1"] = PoolAgent("pool1", alice_pool)
assert state.agents.filterToPool().values() #have pools
assert agent._candPoolAgents(state) #have useful pools
assert agent._doBuyAndConsumeDT(state)
def test_buyAndConsumeDT(alice_info):
state = MockState()
publisher_agent = MockAgent("agent1", USD=0.0, OCEAN=0.0)
publisher_agent._wallet = alice_info.agent_wallet
state.addAgent(publisher_agent)
OCEAN_before = 1000.0
consumer_agent = DataconsumerAgent("consumer1", USD=0.0, OCEAN=OCEAN_before)
consumer_agent._s_since_buy += consumer_agent._s_between_buys
state.addAgent(consumer_agent)
pool_agent = PoolAgent("pool1", alice_info.pool)
state.addAgent(pool_agent)
assert state.agents.filterToPool().values() #have pools
assert consumer_agent._candPoolAgents(state) #have useful pools
assert consumer_agent._doBuyAndConsumeDT(state)
# buyAndConsumeDT
dt = state.agents["pool1"].datatoken
assert consumer_agent.OCEAN() == OCEAN_before
assert consumer_agent.DT(dt) == 0.0
OCEAN_spend = consumer_agent._buyAndConsumeDT(state)
OCEAN_after = consumer_agent.OCEAN()
OCEAN_gained = OCEAN_spend * (1.0 + consumer_agent.profit_margin_on_consume)
assert OCEAN_after == (OCEAN_before - OCEAN_spend + OCEAN_gained)
assert consumer_agent.DT(dt) == 0.0 #bought 1.0, then consumed it
# consumeDT
assert state.agents.agentByAddress(pool_agent.controller_address)
```
#### File: engine/test/test_SimEngine.py
```python
from enforce_typing import enforce_types
import os
import shutil
from engine import AgentBase
from engine import SimEngine, SimStateBase, SimStrategyBase, KPIsBase
PATH1 = '/tmp/test_outpath1'
# ==================================================================
# testing stubs
class SimStrategy(SimStrategyBase.SimStrategyBase):
pass
class KPIs(KPIsBase.KPIsBase):
def takeStep(self, state):
pass
@staticmethod
def tick():
pass
class SimpleAgent(AgentBase.AgentBase):
def takeStep(self, state):
pass
class SimState(SimStateBase.SimStateBase):
def __init__(self):
super().__init__()
self.ss = SimStrategy()
self.kpis = KPIs(time_step=3)
# ==================================================================
# actual tests
@enforce_types
def setUp():
# possible cleanup from prev run
if os.path.exists(PATH1):
shutil.rmtree(PATH1)
@enforce_types
def testRunLonger():
_testRunLonger(15)
@enforce_types
def _testRunLonger(max_ticks):
state = SimState()
state.ss.setMaxTicks(max_ticks)
engine = SimEngine.SimEngine(state, PATH1)
engine.run()
@enforce_types
def testRunEngine():
state = SimState()
state.ss.setMaxTicks(3)
engine = SimEngine.SimEngine(state, PATH1)
engine.run()
assert os.path.exists(PATH1)
assert engine.state.tick == 3
n_agents = engine.state.numAgents()
@enforce_types
def tearDown():
if os.path.exists(PATH1):
shutil.rmtree(PATH1)
```
#### File: engine/test/test_SimStateBase.py
```python
from enforce_typing import enforce_types
from engine import SimStateBase, SimStrategyBase, KPIsBase
from engine import AgentBase
from util.constants import S_PER_DAY
# ==================================================================
# testing stubs
class SimStrategy(SimStrategyBase.SimStrategyBase):
pass
class KPIs(KPIsBase.KPIsBase):
def takeStep(self, state):
pass
@staticmethod
def tick():
pass
class SimpleAgent(AgentBase.AgentBase):
def takeStep(self, state):
pass
class SimState(SimStateBase.SimStateBase):
def __init__(self):
super().__init__()
self.ss = SimStrategy()
self.kpis = KPIs(time_step=3)
# FIXME: BUG: it's creating a dict of dict of dict...
self.addAgent(SimpleAgent("agent1", 0.0, 0.0))
self.addAgent(SimpleAgent("agent2", 0.0, 0.0))
# ==================================================================
# actual tests
@enforce_types
def test1():
state = SimState()
assert state.tick == 0
assert state.numAgents() == 2
assert isinstance(state.kpis, KPIs)
state.takeStep()
assert id(state.getAgent("agent1")) == id(state.agents["agent1"])
assert len(state.allAgents()) == 2
assert state.numAgents() == 2
agent3 = SimpleAgent("agent3", 0.0, 0.0)
state.addAgent(agent3)
assert state.numAgents() == 3
```
#### File: web3tools/test/test_account.py
```python
from web3tools import account
def test_Account():
private_key_str = '0x9c75ed156c45b5a365bde30dc8871e9c6d2b5dc08a7b47572e0354afb859cb15'
account1 = account.Account(private_key=private_key_str)
account2 = account.Account(private_key=private_key_str)
assert account1.private_key == account2.private_key
assert account1.address == account2.address
assert account1.keysStr() == account2.keysStr()
def test_randomPrivateKey():
private_key1 = account.randomPrivateKey()
assert len(private_key1) == 32
private_key2 = account.randomPrivateKey()
assert len(private_key2) == 32
assert private_key1 != private_key2
def test_privateKeyToAddress():
private_key1 = account.randomPrivateKey()
address1 = account.Account(private_key=private_key1).address
address2 = account.privateKeyToAddress(private_key1)
assert address1 == address2
``` |
{
"source": "johann-su/cov_charts",
"score": 2
} |
#### File: covid_charts/bot/bot.py
```python
import logging
from typing import Dict, List, Callable, Optional
from telegram.utils.request import Request
from telegram import Update, Bot
from telegram.ext import Updater, CallbackContext, PicklePersistence
LOGGER = logging.getLogger(__name__)
def log_error(update: Update, context):
LOGGER.fatal(context.error, exc_info=True)
def run(token: str, handlers: List[Callable]):
bot = Bot(token, request=Request(
con_pool_size=10, connect_timeout=40))
updater = Updater(bot=bot, use_context=True, persistence=PicklePersistence(filename='bot_data'))
for handler in handlers:
updater.dispatcher.add_handler(handler)
updater.dispatcher.add_error_handler(log_error)
updater.start_polling()
updater.idle()
```
#### File: covid_charts/bot/setup_conv.py
```python
import re
from telegram import Update, KeyboardButton, ReplyKeyboardMarkup, ReplyKeyboardRemove, ParseMode
from telegram.ext import CallbackContext
from covid_charts.bot.state import States
from covid_charts.bot.utils import is_sender_admin
from covid_charts.vars import choices_state, choices_county
def chart_type(update: Update, context: CallbackContext) -> str:
if is_sender_admin(update.message, context.bot):
chart_buttons = ReplyKeyboardMarkup([
[KeyboardButton("line", callback_data='line'), KeyboardButton("bar", callback_data='bar')],
[KeyboardButton("geo", callback_data='geo')],
], one_time_keyboard=True)
update.message.reply_text(
'Das wichtigste zuerst: Welches Chart gefällt dir am besten?',
reply_markup=chart_buttons
)
return States.TF
else:
update.message.reply_text(
"Du bist nicht der Admin der Gruppe und kannst daher keine Commands an den Bot senden.\nDu kannst mich zu deinen Kontakten hinzufügen https://t.me/CovGermanyBot um alle Funktionen nutzen zu können")
def timeframe(update: Update, context: CallbackContext) -> str:
regex = r'(line|bar|geo)'
# set data from prev step
if re.match(regex, update.message.text):
context.user_data['chart'] = update.message.text
update.message.reply_text(f"{context.user_data['chart']} charts? 👍\n\nAls nächstes brauche ich einen Zeitraum für den du Infos haben möchtest\.\n`7D` \= eine Woche, `4W` \= ein Monat, `52W` \= ein Jahr usw\.", reply_markup=ReplyKeyboardRemove(), parse_mode=ParseMode.MARKDOWN_V2)
return States.REGION
else:
update.message.reply_text("Huch 🤔\nDamit kann ich nichts anfangen\n\nBitte wähle ein Chart aus der Liste")
return States.CHART_TYPE
def region(update: Update, context: CallbackContext) -> str:
regex = r'[0-9][0-9]?[0-9]?(D|W)'
# set data from prev step
if re.match(regex, update.message.text):
context.user_data['tf'] = update.message.text
update.message.reply_text(
"Welche Region möchtest du sehen? Die `Bundesrepublik Deutschland`, `Sachsen` oder doch lieber nur `Dresden`?\n\n**Wenn du ein geo chart haben möchtest kannst du nicht daten auf Landkreisebene anfordern\!**", parse_mode=ParseMode.MARKDOWN_V2)
return States.DATA
else:
update.message.reply_text(
"Huch 🤔\nDamit kann ich nichts anfangen\n\nZur Erinnerung: Das Format war `7D` \= eine Woche, `4W` \= ein Monat, `52W` \= ein Jahr usw\.", parse_mode=ParseMode.MARKDOWN_V2)
return States.TIMEFRAME
def data(update: Update, context: CallbackContext) -> str:
if update.message.text in choices_county or update.message.text in choices_state or update.message.text == 'Bundesrepublik Deutschland':
context.user_data['region'] = update.message.text
update.message.reply_text("Letzte Frage: Welche Daten möchtest du sehen?\nIch kann dir Infos über die Infektionen und Tote geben\.\n\n`cases` \= Infektionen, `deaths` \= Tote\, `incidence` \= 7\-Tage\-Inzidenz\.\n**Tipp:** Du kannst auch mehrere Werte sehen indem du die Werte mit einem Komma trennst", parse_mode=ParseMode.MARKDOWN_V2)
return States.FINISHED
else:
valid_choices = choices_county.append(choices_State)
valid_choices = choices_county.append('Bundesrepublik Deutschland')
suggestion = difflib.get_close_matches(update.message.text, valid_choices)
print(suggestion)
update.message.reply_text(f"Huch 🤔\nDamit kann ich nichts anfangen\n\nMeintest du vlt. {suggestion}?")
return States.REGION
def finished(update: Update, context: CallbackContext) -> None:
regex = r'(cases|deaths|incidence),? ?(cases|deaths|incidence)?'
if re.match(regex, update.message.text):
context.user_data['data'] = update.message.text.replace(' ', '').split(',')
update.message.reply_text("Das war alles 👍. Ich merke mir deine Einstellungen, aber du kannst sie jederzeit mit /setup wieder ändern.")
return States.END
else:
update.message.reply_text(
"Huch 🤔\nDamit kann ich nichts anfangen\n\nZur Erinnerung: Die Daten können `cases`, `deaths` und `incidence` sein und müssen mit einem , getrennt werden")
return States.DATA
def cancel_setup(update: Update, context: CallbackContext) -> None:
update.message.reply_text("Du hast das Setup abgebrochen. Bis zum nächsten mal 👋")
return States.END
```
#### File: covid_charts/bot/utils.py
```python
def is_sender_admin(message, bot):
user = message.from_user
if message.chat.type != 'private':
# get chat member and user group info
chatmember = bot.get_chat_member(message.chat.id, user.id)
if(chatmember.status == 'administrator' or chatmember.status == 'creator'):
return True
else:
return False
else:
return True
``` |
{
"source": "johann-su/K-nstliche-Intelligenz---12.-klassarbeit",
"score": 2
} |
#### File: Reinforcement Learning/Flappy Birds/Q-Learning_Test.py
```python
import flappy
import numpy as np
import sys
from collections import defaultdict
import pickle
rewardAlive = 1
rewardKill = -10000
alpha = 0.1
gamma = 1
# Q[state] = (Nicht springen, springen)
with open("/Users/johann/github/Artificial_intelligence/Reinforcement Learning/Flappy Birds/Q/Q.pickle", "rb") as file:
Q = defaultdict(lambda: [0, 0], pickle.load(file))
def paramsToState(params):
playerVelY = params['playerVelY']
playery = params["playery"]
if int(params['upperPipes'][0]['x']) < 40:
index = 1
else:
index = 0
upperPipeX = round(int(params["upperPipes"][index]['x']) / 3) * 3
upperPipeY = int(params["upperPipes"][index]['y'])
yDiff = round((playery - upperPipeY) / 3) * 3
return str(playerVelY) + "_" + str(yDiff) + "_" + str(upperPipeX)
oldState = None
oldAction = None
gameCounter = 0
gameScores = []
def onGameover(gameInfo):
global oldState
global oldAction
global gameCounter
global gameScores
gameScores.append(gameInfo['score'])
if gameCounter % 100 == 0:
print(str(gameCounter) + " - " + str(np.mean(gameScores[-100:])))
# Q updaten für die vorherige Aktion
# -> Die vorherige Aktion war nicht erfolgreich!
prevReward = Q[oldState]
index = None
if oldAction == False:
index = 0
else:
index = 1
prevReward[index] = (1 - alpha) * prevReward[index] + \
alpha * rewardKill
Q[oldState] = prevReward
oldState = None
oldAction = None
#if gameCounter % 50000 == 0:
# with open("Q/" + str(gameCounter) + ".pickle", "wb") as file:
# pickle.dump(dict(Q), file)
gameCounter+=1
def shouldEmulateKeyPress(params):
global oldState
global oldAction
state = paramsToState(params)
estReward = Q[state]
# Q updaten für die vorherige Aktion
# -> Die vorherige Aktion war erfolgreich!
prevReward = Q[oldState]
index = None
if oldAction == False:
index = 0
else:
index = 1
prevReward[index] = (1 - alpha) * prevReward[index] + \
alpha * (rewardAlive + gamma * max(estReward))
Q[oldState] = prevReward
oldState = state
if estReward[0] >= estReward[1]:
oldAction = False
return False
else:
oldAction = True
return True
flappy.main(shouldEmulateKeyPress, onGameover)
```
#### File: Reinforcement Learning/gym/CartPole_v1_PER.py
```python
import gym
import random
import numpy as np
import tensorflow as tf
from collections import deque
print("Gym:", gym.__version__)
print("Tensorflow:", tf.__version__)
env_name = "CartPole-v1"
env = gym.make(env_name)
print("Observation space:", env.observation_space)
print("Action space:", env.action_space)
class QNetwork():
def __init__(self, state_dim, action_size, tau=0.01):
tf.reset_default_graph()
self.state_in = tf.placeholder(tf.float32, shape=[None, *state_dim])
self.action_in = tf.placeholder(tf.int32, shape=[None])
self.q_target_in = tf.placeholder(tf.float32, shape=[None])
self.importance_in = tf.placeholder(tf.float32, shape=[None])
action_one_hot = tf.one_hot(self.action_in, depth=action_size)
self.q_state_local = self.build_model(action_size, "local")
self.q_state_target = self.build_model(action_size, "target")
self.q_state_action = tf.reduce_sum(tf.multiply(self.q_state_local, action_one_hot), axis=1)
self.error = self.q_state_action - self.q_target_in
self.loss = tf.reduce_mean(tf.multiply(tf.square(self.error), self.importance_in))
self.optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(self.loss)
self.local_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="local")
self.target_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="target")
self.updater = tf.group([tf.assign(t, t + tau*(l-t)) for t,l in zip(self.target_vars, self.local_vars)])
def build_model(self, action_size, scope):
with tf.variable_scope(scope):
hidden1 = tf.layers.dense(self.state_in, 100, activation=tf.nn.relu)
q_state = tf.layers.dense(hidden1, action_size, activation=None)
return q_state
def update_model(self, session, state, action, q_target, importance):
feed = {self.state_in: state, self.action_in: action, self.q_target_in: q_target, self.importance_in: importance}
error, _, _ = session.run([self.error, self.optimizer, self.updater], feed_dict=feed)
return error
def get_q_state(self, session, state, use_target=False):
q_state_op = self.q_state_target if use_target else self.q_state_local
q_state = session.run(q_state_op, feed_dict={self.state_in: state})
return q_state
class PrioritizedReplayBuffer():
def __init__(self, maxlen):
self.buffer = deque(maxlen=maxlen)
self.priorities = deque(maxlen=maxlen)
def add(self, experience):
self.buffer.append(experience)
self.priorities.append(max(self.priorities, default=1))
def get_probabilities(self, priority_scale):
scaled_priorities = np.array(self.priorities) ** priority_scale
sample_probabilities = scaled_priorities / sum(scaled_priorities)
return sample_probabilities
def get_importance(self, probabilities):
importance = 1/len(self.buffer) * 1/probabilities
importance_normalized = importance / max(importance)
return importance_normalized
def sample(self, batch_size, priority_scale=1.0):
sample_size = min(len(self.buffer), batch_size)
sample_probs = self.get_probabilities(priority_scale)
sample_indices = random.choices(range(len(self.buffer)), k=sample_size, weights=sample_probs)
samples = np.array(self.buffer)[sample_indices]
importance = self.get_importance(sample_probs[sample_indices])
return map(list, zip(*samples)), importance, sample_indices
def set_priorities(self, indices, errors, offset=0.1):
for i,e in zip(indices, errors):
self.priorities[i] = abs(e) + offset
class DoubleDQNAgent():
def __init__(self, env):
self.state_dim = env.observation_space.shape
self.action_size = env.action_space.n
self.q_network = QNetwork(self.state_dim, self.action_size)
self.replay_buffer = PrioritizedReplayBuffer(maxlen=100000)
self.gamma = 0.97
self.eps = 1.0
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
def get_action(self, state):
q_state = self.q_network.get_q_state(self.sess, [state])
action_greedy = np.argmax(q_state)
action_random = np.random.randint(self.action_size)
action = action_random if random.random() < self.eps else action_greedy
return action
def train(self, state, action, next_state, reward, done, use_DDQN=True, a=0.0):
self.replay_buffer.add((state, action, next_state, reward, done))
(states, actions, next_states, rewards, dones), importance, indices = self.replay_buffer.sample(50, priority_scale=a)
next_actions = np.argmax(self.q_network.get_q_state(self.sess, next_states, use_target=False), axis=1)
q_next_states = self.q_network.get_q_state(self.sess, next_states, use_target=use_DDQN)
q_next_states[dones] = np.zeros([self.action_size])
q_next_states_next_actions = q_next_states[np.arange(next_actions.shape[0]), next_actions]
q_targets = rewards + self.gamma * q_next_states_next_actions
errors = self.q_network.update_model(self.sess, states, actions, q_targets, importance**(1-self.eps))
self.replay_buffer.set_priorities(indices, errors)
if done: self.eps = max(0.1, 0.99*self.eps)
def __del__(self):
self.sess.close()
num_runs = 10
run_rewards = []
for n in range(num_runs):
print("Run {}".format(n))
ep_rewards = []
agent = None
agent = DoubleDQNAgent(env)
num_episodes = 20000
for ep in range(num_episodes):
state = env.reset()
total_reward = 0
done = False
step = 0
while not done:
step += 1
action = agent.get_action(state)
next_state, reward, done, info = env.step(action)
agent.train(state, action, next_state, reward, done, a=(n%2==0)*0.7)
env.render()
total_reward += reward
state = next_state
if done:
print("score: " + str(step))
ep_rewards.append(total_reward)
#print("Episode: {}, total_reward: {:.2f}".format(ep, total_reward))
run_rewards.append(ep_rewards)
import matplotlib.pyplot as plt
for n, ep_rewards in enumerate(run_rewards):
x = range(len(ep_rewards))
cumsum = np.cumsum(ep_rewards)
avgs = [cumsum[ep]/(ep+1) if ep<100 else (cumsum[ep]-cumsum[ep-100])/100 for ep in x]
col = "r" if (n%2==0) else "b"
plt.plot(x, avgs, color=col, label=n)
plt.title("Prioritized Replay performance")
plt.xlabel("Episode")
plt.ylabel("Last 100 episode average rewards")
plt.legend()
``` |
{
"source": "johannvk/ProjectEuler",
"score": 3
} |
#### File: johannvk/ProjectEuler/Prob_11.py
```python
import numpy as np
#with open('Data_prob_11.txt', 'r') as f:
data_grid = np.loadtxt('Data_prob_11.txt', dtype='int64', ndmin=2)
def find_sum_ndarray_rowwise(ndarray, row, col):
ret_value = 1
for i in range(4): ret_value*=ndarray[row, col+i]
return ret_value
def find_sum_ndarray_colwise(ndarray, row, col):
ret_value = 1
for i in range(4): ret_value *= ndarray[row + i, col]
return ret_value
def sum_ndarray_right_diagonal(data, row, col):
ret_value = 1
for i in range(4):
ret_value *= data[row - i, col + i]
return ret_value
def sum_ndarray_left_diagonal(data, row, col):
ret_value = 1
for i in range(4):
# print(data[row + i, col + i])
ret_value *= data[row + i, col + i]
return ret_value
def find_largest_set_4():
max_value = 0
pos_row = (0, 0)
pos_col = (0, 0)
pos_ldiag = (0, 0)
pos_rdiag = (0, 0)
max_row_value = 0
max_col_value = 0
max_rdiag_value = 0
max_ldiag_value = 0
# horizontal and vertical directions
for i in range(0, 20):
for j in range(0, 17):
temp_value1 = find_sum_ndarray_rowwise(data_grid, i, j)
if temp_value1 > max_row_value:
max_row_value = temp_value1
pos_row = (i, j)
temp_value2 = find_sum_ndarray_colwise(data_grid, j, i)
if temp_value2 > max_col_value:
pos_col = (i, j)
max_col_value = temp_value1
# right-diagonal
for i in range(3, 20):
for j in range(0, 17):
temp_value1 = sum_ndarray_right_diagonal(data_grid, i, j)
if temp_value1 > max_rdiag_value:
max_rdiag_value = temp_value1
pos_rdiag = (i, j)
# left-diagonal
for i in range(0, 17):
for j in range(0, 17):
temp_value2 = sum_ndarray_left_diagonal(data_grid, i, j)
if temp_value2 > max_ldiag_value:
max_ldiag_value = temp_value2
pos_ldiag = (i, (j+20))
max_value = max(max_row_value, max_col_value, max_rdiag_value, max_ldiag_value)
return max_value, [pos_row, pos_col, pos_rdiag, pos_ldiag], [max_row_value, max_col_value, max_rdiag_value, max_ldiag_value]
largest_sum, list_pos, list_max_values = find_largest_set_4()
print("Largest sum:", largest_sum)
print("List of pos: ", list_pos)
print("List of values: ", list_max_values)
```
#### File: johannvk/ProjectEuler/Prob_17.py
```python
one_to_twenty_dict = {1: "one", 2: "two", 3: "three", 4: "four", 5: "five", 6: "six", 7: "seven", 8: "eight",
9: "nine", 10: "ten", 11: "eleven", 12: "twelve", 13: "thirteen", 14: "fourteen",
15: "fifteen", 16: "sixteen", 17: "seventeen", 18: "eighteen", 19: "nineteen"}
tens_dict = {2: "twenty", 3: "thirty", 4: "forty", 5: "fifty", 6: "sixty", 7: "seventy", 8: "eighty", 9: "ninety"}
hundred = "hundred"
thousand = "thousand"
def integer_to_string(num):
if num == 1000: return "onethousand"
if num < 100:
return integer_under_hundred_to_string(num)
ret_string = ""
ret_string += (integer_under_hundred_to_string(num//100) + "hundred")
if num % 100 != 0: ret_string += "and" + integer_under_hundred_to_string(num % 100)
return ret_string
def integer_under_hundred_to_string(num):
ret_string = ""
if num < 20:
ret_string = one_to_twenty_dict[num]
return ret_string
ret_string = tens_dict[(num//10)]
if num % 10 != 0: ret_string += one_to_twenty_dict[(num % 10)]
return ret_string
number_string = ""
for i in range(1, 1001):
number_string += integer_to_string(i)
print(len(number_string))
```
#### File: johannvk/ProjectEuler/Prob_24.py
```python
import numpy as np
def permutation_value(num_list, max_num):
return sum([num_list[i]*max_num**(len(num_list)-1-i) for i in range(len(num_list))])
def permutation_update(in_list):
pass
init_array = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
init_array.astype('int64')
#ULØST!
```
#### File: johannvk/ProjectEuler/Prob_39_unsolved.py
```python
import operator
# Integer Right Triangles
# Implementing stupid brute force trials:
def brute_force_triangle_check(max_perim):
for p in range(3, max_perim + 1):
for a in range(1, p - 2):
b = 0.5*(p - a)
c = 0.5*(p - a)
def is_perfect_square(num):
return (int(num**(1/2)))**2 == num
# perimeter_length_solutions = {}
#
#
# def len_hyp(a, b):
# return (a**2 + b**2)**(1/2)
#
#
# def check_right_triangle(kat1, kat2, hyp):
# return kat1**2 + kat2**2 - hyp**2 == 0
#
#
# base_pairs = []
# triangle_perimeter = 125
#
# # Real part of the imaginary number
# a = 2
#
#
# while a**2 + a < triangle_perimeter - 2:
# # Complex part of the imaginary number
# b = 1
# while b < a:
# if (a, b) not in base_pairs and (b, a) not in base_pairs:
# base_pairs.append((a, b))
# base_pairs.append((b, a))
# perim = (a**2 - b**2) + 2*a*b + (len_hyp(a, b))
# if perim in perimeter_length_solutions.keys():
# perimeter_length_solutions[perim] += 1
# else:
# perimeter_length_solutions[perim] = 1
#
# b += 1
#
# # Ønsker å implementere skalering av triplettene som dannes ved multiplisere (a + ib) med seg selv.
#
# a += 1
#
#
# max_key = max(perimeter_length_solutions.keys(), key=lambda k: perimeter_length_solutions[k])
#
# print("The perimeter value with the most amount of solutions was: {}\nWith {} solutions"
# .format(max_key, perimeter_length_solutions[max_key]))
``` |
{
"source": "johannwalder/content-reputation",
"score": 3
} |
#### File: content-reputation/models/content_type.py
```python
from sqlalchemy import Column, String, Integer
from sqlalchemy.orm import relationship
from base import Base
class ContentType(Base):
"""Contains the different content types (web page, file)"""
__tablename__ = 'content_type'
title = Column(String(80), unique=True)
contentratings = relationship('ContentRating',backref='contentType',lazy='dynamic')
def serialize(self):
return {
'id': self.id,
'title': self.title
}
def __init__(self, id, title):
self.id = id
self.title = title
def __repr__(self):
return '<ContentType: Type: %s>' % (self.title)
``` |
{
"source": "johanofverstedt/sdt",
"score": 2
} |
#### File: johanofverstedt/sdt/sdt.py
```python
import numpy as np
import scipy as sp
import scipy.spatial as spatial
from scipy.spatial import cKDTree
import scipy.ndimage as ndimage
import scipy.ndimage.morphology
import time
# Module parameters
MB_TEMP_SIZE = 64.0
# det_sdt - Computes the determinstic SDT for a given image
# ---------------------------------------------------------
# A - numpy array of arbitrary dimension
# bool (sets) / integer typed (multisets)
# p - the uncertainty factor (see SDT paper)
# k - number of nearest neighbours to consider (if k is not None,
# m must be None)
# m - the probability mass to capture (if m is not None,
# k must be None)
# dmax - the maximum distance; a value of 0 means that
# the diameter of the domain of image A will be used
# spacing - the pixel size (scalar or vector with the same
# number of elements as dimensions in A)
# processors - the number of processors used in the tree
# lookup stage
def det_sdt(A, p, k, m = None, dmax=0, spacing=None, processors=1):
# Exactly one of k and m must be none
assert((k is None or m is None) and not (k is None and m is None))
if m is not None:
k = compute_k(p, m)
input_shape = A.shape
if spacing is None:
spacing_vec = np.ones([A.ndim])
else:
spacing_vec = spacing.reshape([A.ndim])
# If no dmax is specified, use the diameter of the image grid
if dmax <= 0:
dmax = _grid_diameter(input_shape, spacing_vec)
if A.dtype == np.dtype('bool'):
A = A.astype('int8')
assert(issubclass(A.dtype.type, np.integer))
pnts_multiplicities = A.reshape([A.size])
grid_points = _array_grid(A, indexing = 'ij', spacing=spacing)
G = np.reshape(grid_points, [A.size, A.ndim])
pnts = point_set_to_multiset(G, pnts_multiplicities)
# If we have an empty set, just return
if pnts.shape[0] == 0:
return np.full(input_shape, fill_value=dmax, dtype='float32')
# If we have a singleton set, just return the distances to this point
if pnts.shape[0] == 1:
return (np.sqrt(np.sum(np.square(G - pnts), axis=-1)) * (1.0-p) + p * dmax).reshape(input_shape)
# If we have fewer than k points, reduce k
if pnts.shape[0] < k:
k = pnts.shape[0]
tree = cKDTree(pnts)
prob = np.array([np.power(p, (i-1)) * (1-p) for i in range(1, k+1)])
subset_size = int((MB_TEMP_SIZE * 1024.0 * 1024.0) / (k*8))
subsets = int(np.ceil(A.size / float(subset_size)))
drem = dmax * np.power(p, k)
res = np.full(shape=[A.size], fill_value=drem, dtype = 'float32')
# To save on required working memory, we only do the k-NN search
# and subsequent computation of the distances aggregate distances
# on subsets of the points, which are then written to the result
# array.
for ss_ind in range(subsets):
start_index = ss_ind * subset_size
end_index = min(start_index + subset_size, A.size)
res[start_index:end_index] += _knn_query(tree, G[start_index:end_index, :], k, dmax, prob, processors)
return res.reshape(A.shape)
"""
# Computes the sum pooling of an image subject to the neighborhood
# corresponding to a downsampling factor, and then downsample
# the image.
def sum_pool(A, pool_size, stride, padding_mode='valid'):
if stride is None:
stride = pool_size
# Create the convolution kernel
kernel = np.ones([pool_size]*A.ndim)
# Perform the convolution
B = ndimage.convolve(A, kernel, mode='constant', cval = 0)
# Create a slicer object depending on the chosen padding mode
if padding_mode == 'zero':
slicer = tuple(slice(0, A.shape[i], stride) for i in range(A.ndim))
elif padding_mode == 'valid':
slicer = tuple(slice(0, A.shape[i] - A.shape[i] % stride, stride) for i in range(A.ndim))
else:
raise ValueError('padding_mode must be either \'zero\' or \'valid\'')
# Slice and return the output
return B[slicer]
"""
# Computes the sum pooling of an image subject to the neighborhood
# corresponding to a downsampling factor, and then downsample
# the image.
def sum_pool(A, pool_size, stride, padding_mode='valid'):
if stride is None:
stride = pool_size
if A.dtype == np.dtype('bool'):
A = A.astype(dtype='int32')
# Create the convolution kernel
kernel = np.ones([pool_size]*A.ndim)
# Perform the convolution
origin = int(pool_size/2) - (1 - (pool_size % 2))
B = ndimage.convolve(A, kernel, mode='constant', cval = 0, origin = origin)
# Create a slicer object depending on the chosen padding mode
if padding_mode == 'zero':
slicer = tuple(slice(0, A.shape[i], stride) for i in range(A.ndim))
elif padding_mode == 'valid':
slicer = tuple(slice(0, A.shape[i] - A.shape[i] % stride, stride) for i in range(A.ndim))
else:
raise ValueError('padding_mode must be either \'zero\' or \'valid\'')
# Slice and return the output
return B[slicer]
# Computes the sum pooling of an image subject to the neighborhood
# corresponding to a downsampling factor, and then downsample
# the image.
def or_pool(A, pool_size, stride, padding_mode='valid'):
return sum_pool(A, pool_size, stride, padding_mode) > 0
def compute_k(p, m):
if np.isclose(p, 0.0):
return 1
else:
return int(np.ceil(np.log(1-m)/np.log(p)))
def det_sdt_multiset_naive(A, p, k, dmax=0, spacing=None):
if spacing is None:
spacing_vec = np.ones([A.ndim])
else:
spacing_vec = spacing.reshape([A.ndim])
# If no dmax is specified, use the diameter of the image grid
if dmax <= 0:
dmax = _grid_diameter(A.shape, spacing_vec)#np.sqrt(np.sum([np.power((A.shape[i]*spacing_vec[i])-1, 2.0) for i in range(A.ndim)]))
pnt_tup = np.nonzero(A)
pnts_multiplicities = A[pnt_tup].astype(dtype='int32')
total_pnts_count = np.sum(pnts_multiplicities)
if k > total_pnts_count:
k = total_pnts_count
G = np.reshape(_array_grid(A, indexing = 'xy', spacing=spacing), [A.size, A.ndim])
pnts = point_set_to_multiset(np.transpose(pnt_tup) * (spacing_vec), pnts_multiplicities)
prob = np.array([np.power(p, (i-1)) * (1-p) for i in range(1, k+1)])
res = np.zeros([A.size])
for i in range(A.size):
grid_p = G[i, :]
dists = np.zeros([total_pnts_count])
#print(total_pnts_count)
#print(np.sum(pnts_multiplicities))
#print(pnts.shape[0])
for j in range(total_pnts_count):
obj_p = pnts[j, :]
d = np.sqrt(np.sum(np.square(obj_p-grid_p)))
dists[j] = d
dists=np.sort(dists)
res[i] = np.sum(prob * dists[:k])
drem = dmax * np.power(p, k)
res = res + drem
return res.reshape(A.shape)
# Helper functions
def _array_grid(A, indexing = 'ij', spacing=None):
if spacing is None:
spacing = np.ones([A.ndim])
ranges = [spacing[i] * np.arange(0, A.shape[i]) for i in range(A.ndim)]
ax = range(1, A.ndim+1) + [0]
return np.transpose(np.meshgrid(*ranges, indexing = indexing), axes=ax)
#return np.transpose(np.meshgrid(*ranges, indexing = indexing))
def _grid_diameter(Sz, spacing=None):
n = len(Sz)
if spacing is None:
spacing = np.ones([n])
assert(spacing.size == n)
return np.sqrt(np.sum([np.square((Sz[i]-1)*spacing[i]) for i in range(n)])).astype('float32')
def _knn_query(T, pnts, k, dmax, probabilities, processors):
d,_ = T.query(pnts, k=k, distance_upper_bound=dmax, n_jobs=processors)
np.clip(d, a_min=None, a_max=dmax, out = d)
dp = d.dot(probabilities)
return dp
def point_set_to_multiset(pnts, multiplicities):
return np.repeat(pnts, repeats=multiplicities, axis=0)
# Test cases
def test_empty_set_2d():
A = np.zeros([3, 4], dtype='int32')
dmax = _grid_diameter(A.shape, spacing=np.array([1.0, 1.0]))
D = det_sdt(A, 0.5, k = None, m = 0.99, dmax = 0, spacing = None, processors = 1)
assert(np.all(D == dmax))
def test_singleton_set_2d():
A = np.zeros([4, 5], dtype='int32')
A[1, 2] = 1
dmax = _grid_diameter(A.shape, spacing=np.array([1.0, 1.0]))
D = det_sdt(A, 0.5, k = None, m = 0.99, dmax = 0, spacing = None, processors = 1)
print(D)
#assert(np.all(D == dmax))
def test_empty_set_3d():
A = np.zeros([3, 4, 5], dtype='int32')
dmax = _grid_diameter(A.shape, spacing=np.array([1.0, 1.0, 1.0]))
D = det_sdt(A, 0.5, k = None, m = 0.99, dmax = 0, spacing = None, processors = 1)
assert(np.all(D == dmax))
def test_singleton_set_3d():
A = np.zeros([4, 5, 6], dtype='int32')
A[1, 2, 2] = 1
dmax = _grid_diameter(A.shape, spacing=np.array([1.0, 1.0, 1.0]))
D = det_sdt(A, 0.5, k = None, m = 0.99, dmax = 0, spacing = None, processors = 1)
print(D)
#assert(np.all(D == dmax))
def main():
# run test-cases
test_empty_set_2d()
test_singleton_set_2d()
#return
sz = 1024
np.random.seed(1024)
#B = np.random.rand(sz, sz) >= 0.75
shp = (256, 256, 124)
dim = len(shp)
B = np.random.rand(*shp) >= 0.75
print(B)
start1 = time.time()
res_dt = sp.ndimage.morphology.distance_transform_edt(-B)
end1 = time.time()
start2 = time.time()
print(end1-start1)
dmax = 0
downsampling = 4
spacing = float(downsampling)
C = sum_pool(B, downsampling, downsampling, 'zero')
slicer = tuple(slice(int(C.shape[i]/2)-5, int(C.shape[i]/2)+5, 1) for i in range(C.ndim))
slicer2 = tuple(slice(int(C.shape[i]/2)-5, int(C.shape[i]/2)+5, 1) for i in range(C.ndim))
print('C')
print(C[slicer])
p = 0.9
m = 0.999
k = compute_k(p, m)
processors = 2
spacing_vec = np.full([dim], fill_value=spacing)
start2 = time.time()
res = det_sdt(C, p, k = None, m = m, dmax = dmax, spacing = spacing_vec, processors = processors)
end2 = time.time()
print('res')
print(res[slicer])
print(end2-start2)
return
start3 = time.time()
res2 = det_sdt(B, p, k, dmax=dmax, spacing=np.ones([dim]), processors=processors)
end3 = time.time()
res2 = res2[int(downsampling / 2)::downsampling, int(downsampling / 2)::downsampling, int(downsampling / 2)::downsampling]
#end2 = time.time()
#print res.shape
#print res2.shape
#return
print('res2')
print(res2[slicer2])
print(end3-start3)
if __name__ == '__main__':
main()
``` |
{
"source": "johanoren/IncrementalNumbers_Fusion360",
"score": 3
} |
#### File: fontTools/misc/py23.py
```python
from __future__ import print_function, division, absolute_import
try:
basestring
except NameError:
basestring = str
try:
unicode
except NameError:
unicode = str
try:
unichr
bytechr = chr
byteord = ord
except:
unichr = chr
def bytechr(n):
return bytes([n])
def byteord(c):
return c if isinstance(c, int) else ord(c)
try:
from StringIO import StringIO
except ImportError:
from io import BytesIO as StringIO
def strjoin(iterable, joiner=''):
return tostr(joiner).join(iterable)
def tobytes(s, encoding='ascii', errors='strict'):
if not isinstance(s, bytes):
return s.encode(encoding, errors)
else:
return s
def tounicode(s, encoding='ascii', errors='strict'):
if not isinstance(s, unicode):
return s.decode(encoding, errors)
else:
return s
if str == bytes:
class Tag(str):
def tobytes(self):
if isinstance(self, bytes):
return self
else:
return self.encode('latin1')
tostr = tobytes
bytesjoin = strjoin
else:
class Tag(str):
@staticmethod
def transcode(blob):
if not isinstance(blob, str):
blob = blob.decode('latin-1')
return blob
def __new__(self, content):
return str.__new__(self, self.transcode(content))
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
return str.__eq__(self, self.transcode(other))
def __hash__(self):
return str.__hash__(self)
def tobytes(self):
return self.encode('latin-1')
tostr = tounicode
def bytesjoin(iterable, joiner=b''):
return tobytes(joiner).join(tobytes(item) for item in iterable)
```
#### File: fontTools/misc/xmlReader.py
```python
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from fontTools import ttLib
from fontTools.misc.textTools import safeEval
from fontTools.ttLib.tables.DefaultTable import DefaultTable
import os
class TTXParseError(Exception): pass
BUFSIZE = 0x4000
class XMLReader(object):
def __init__(self, fileName, ttFont, progress=None, quiet=False):
self.ttFont = ttFont
self.fileName = fileName
self.progress = progress
self.quiet = quiet
self.root = None
self.contentStack = []
self.stackSize = 0
def read(self):
if self.progress:
import stat
self.progress.set(0, os.stat(self.fileName)[stat.ST_SIZE] // 100 or 1)
file = open(self.fileName)
self._parseFile(file)
file.close()
def _parseFile(self, file):
from xml.parsers.expat import ParserCreate
parser = ParserCreate()
parser.StartElementHandler = self._startElementHandler
parser.EndElementHandler = self._endElementHandler
parser.CharacterDataHandler = self._characterDataHandler
pos = 0
while True:
chunk = file.read(BUFSIZE)
if not chunk:
parser.Parse(chunk, 1)
break
pos = pos + len(chunk)
if self.progress:
self.progress.set(pos // 100)
parser.Parse(chunk, 0)
def _startElementHandler(self, name, attrs):
stackSize = self.stackSize
self.stackSize = stackSize + 1
if not stackSize:
if name != "ttFont":
raise TTXParseError("illegal root tag: %s" % name)
sfntVersion = attrs.get("sfntVersion")
if sfntVersion is not None:
if len(sfntVersion) != 4:
sfntVersion = safeEval('"' + sfntVersion + '"')
self.ttFont.sfntVersion = sfntVersion
self.contentStack.append([])
elif stackSize == 1:
subFile = attrs.get("src")
if subFile is not None:
subFile = os.path.join(os.path.dirname(self.fileName), subFile)
subReader = XMLReader(subFile, self.ttFont, self.progress, self.quiet)
subReader.read()
self.contentStack.append([])
return
tag = ttLib.xmlToTag(name)
msg = "Parsing '%s' table..." % tag
if self.progress:
self.progress.setlabel(msg)
elif self.ttFont.verbose:
ttLib.debugmsg(msg)
else:
if not self.quiet:
print(msg)
if tag == "GlyphOrder":
tableClass = ttLib.GlyphOrder
elif "ERROR" in attrs or ('raw' in attrs and safeEval(attrs['raw'])):
tableClass = DefaultTable
else:
tableClass = ttLib.getTableClass(tag)
if tableClass is None:
tableClass = DefaultTable
if tag == 'loca' and tag in self.ttFont:
# Special-case the 'loca' table as we need the
# original if the 'glyf' table isn't recompiled.
self.currentTable = self.ttFont[tag]
else:
self.currentTable = tableClass(tag)
self.ttFont[tag] = self.currentTable
self.contentStack.append([])
elif stackSize == 2:
self.contentStack.append([])
self.root = (name, attrs, self.contentStack[-1])
else:
l = []
self.contentStack[-1].append((name, attrs, l))
self.contentStack.append(l)
def _characterDataHandler(self, data):
if self.stackSize > 1:
self.contentStack[-1].append(data)
def _endElementHandler(self, name):
self.stackSize = self.stackSize - 1
del self.contentStack[-1]
if self.stackSize == 1:
self.root = None
elif self.stackSize == 2:
name, attrs, content = self.root
self.currentTable.fromXML(name, attrs, content, self.ttFont)
self.root = None
class ProgressPrinter(object):
def __init__(self, title, maxval=100):
print(title)
def set(self, val, maxval=None):
pass
def increment(self, val=1):
pass
def setLabel(self, text):
print(text)
```
#### File: ttLib/tables/_f_p_g_m.py
```python
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from . import DefaultTable
from . import ttProgram
class table__f_p_g_m(DefaultTable.DefaultTable):
def decompile(self, data, ttFont):
program = ttProgram.Program()
program.fromBytecode(data)
self.program = program
def compile(self, ttFont):
return self.program.getBytecode()
def toXML(self, writer, ttFont):
self.program.toXML(writer, ttFont)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
program = ttProgram.Program()
program.fromXML(name, attrs, content, ttFont)
self.program = program
def __len__(self):
return len(self.program)
``` |
{
"source": "JohanOstlund/plex_assistant",
"score": 2
} |
#### File: custom_components/plex_assistant/helpers.py
```python
import re
import time
import uuid
import pychromecast
from rapidfuzz import fuzz, process
from gtts import gTTS
from json import JSONDecodeError, loads
from homeassistant.components.plex.services import get_plex_server
from homeassistant.exceptions import HomeAssistantError, ServiceNotFound
from homeassistant.core import Context
from pychromecast.controllers.plex import PlexController
from .const import DOMAIN, _LOGGER
def fuzzy(media, lib, scorer=fuzz.QRatio):
if isinstance(lib, list) and len(lib) > 0:
return process.extractOne(media, lib, scorer=scorer) or ["", 0]
return ["", 0]
def process_config_item(options, option_type):
option = options.get(option_type)
if not option:
return {}
try:
option = loads("{" + option + "}")
for i in option.keys():
_LOGGER.debug(f"{option_type} {i}: {option[i]}")
except (TypeError, AttributeError, KeyError, JSONDecodeError):
_LOGGER.warning(f"There is a formatting error in the {option_type.replace('_', ' ')} config.")
option = {}
return option
async def get_server(hass, config, server_name):
try:
await hass.helpers.discovery.async_discover(None, None, "plex", config)
return get_plex_server(hass, server_name)._plex_server
except HomeAssistantError as error:
server_name_str = ", the server_name is correct," if server_name else ""
_LOGGER.warning(
f"Plex Assistant: {error.args[0]}. Ensure that you've setup the HA "
f"Plex integration{server_name_str} and the server is reachable. "
)
def get_devices(hass, pa):
for entity in list(hass.data["media_player"].entities):
info = str(entity.device_info.get("identifiers", "")) if entity.device_info else ""
dev_type = [x for x in ["cast", "sonos", "plex", ""] if x in info][0]
if not dev_type:
continue
try:
name = hass.states.get(entity.entity_id).attributes.get("friendly_name")
except AttributeError:
continue
pa.devices[name] = {"entity_id": entity.entity_id, "device_type": dev_type}
def run_start_script(hass, pa, command, start_script, device, default_device):
if device[0] in start_script.keys():
start = hass.data["script"].get_entity(start_script[device[0]])
start.script.run(context=Context())
get_devices(hass, pa)
return fuzzy(command["device"] or default_device, list(pa.devices.keys()))
return device
async def listeners(hass):
def ifttt_webhook_callback(event):
if event.data["service"] == "plex_assistant.command":
_LOGGER.debug("IFTTT Call: %s", event.data["command"])
hass.services.call(DOMAIN, "command", {"command": event.data["command"]})
listener = hass.bus.async_listen("ifttt_webhook_received", ifttt_webhook_callback)
try:
await hass.services.async_call("conversation", "process", {"text": "tell plex to initialize_plex_intent"})
except ServiceNotFound:
pass
return listener
def media_service(hass, entity_id, call, payload=None):
args = {"entity_id": entity_id}
if call == "play_media":
args = {**args, **{"media_content_type": "video", "media_content_id": payload}}
elif call == "media_seek":
args = {**args, **{"seek_position": payload}}
hass.services.call("media_player", call, args)
def jump(hass, device, amount):
if device["device_type"] == "plex":
media_service(hass, device["entity_id"], "media_pause")
time.sleep(0.5)
offset = hass.states.get(device["entity_id"]).attributes.get("media_position", 0) + amount
media_service(hass, device["entity_id"], "media_seek", offset)
if device["device_type"] == "plex":
media_service(hass, device["entity_id"], "media_play")
def cast_next_prev(hass, zeroconf, plex_c, device, direction):
entity = hass.data["media_player"].get_entity(device["entity_id"])
cast, browser = pychromecast.get_listed_chromecasts(
uuids=[uuid.UUID(entity._cast_info.uuid)], zeroconf_instance=zeroconf
)
pychromecast.discovery.stop_discovery(browser)
cast[0].register_handler(plex_c)
cast[0].wait()
if direction == "next":
plex_c.next()
else:
plex_c.previous()
def remote_control(hass, zeroconf, control, device, jump_amount):
plex_c = PlexController()
if control == "jump_forward":
jump(hass, device, jump_amount[0])
elif control == "jump_back":
jump(hass, device, -jump_amount[1])
elif control == "next_track" and device["device_type"] == "cast":
cast_next_prev(hass, zeroconf, plex_c, device, "next")
elif control == "previous_track" and device["device_type"] == "cast":
cast_next_prev(hass, zeroconf, plex_c, device, "previous")
else:
media_service(hass, device["entity_id"], f"media_{control}")
def seek_to_offset(hass, offset, entity):
if offset < 1:
return
timeout = 0
while not hass.states.is_state(entity, "playing") and timeout < 100:
time.sleep(0.10)
timeout += 1
timeout = 0
if hass.states.is_state(entity, "playing"):
media_service(hass, entity, "media_pause")
while not hass.states.is_state(entity, "paused") and timeout < 100:
time.sleep(0.10)
timeout += 1
if hass.states.is_state(entity, "paused"):
if hass.states.get(entity).attributes.get("media_position", 0) < 9:
media_service(hass, entity, "media_seek", offset)
media_service(hass, entity, "media_play")
def no_device_error(localize, device=None):
device = f': "{device.title()}".' if device else "."
_LOGGER.warning(f"{localize['cast_device'].capitalize()} {localize['not_found']}{device}")
def media_error(command, localize):
error = "".join(
f"{localize[keyword]['keywords'][0]} " for keyword in ["latest", "unwatched", "ondeck"] if command[keyword]
)
if command["media"]:
media = command["media"]
media = media if isinstance(media, str) else getattr(media, "title", str(media))
error += f"{media.capitalize()} "
elif command["library"]:
error += f"{localize[command['library']+'s'][0]} "
for keyword in ["season", "episode"]:
if command[keyword]:
error += f"{localize[keyword]['keywords'][0]} {command[keyword]} "
error += f"{localize['not_found']}."
return error.capitalize()
def play_tts_error(hass, tts_dir, device, error, lang):
tts = gTTS(error, lang=lang)
tts.save(tts_dir + "error.mp3")
hass.services.call(
"media_player",
"play_media",
{
"entity_id": device,
"media_content_type": "audio/mp3",
"media_content_id": "/local/plex_assist_tts/error.mp3",
},
)
def filter_media(pa, command, media, library):
offset = 0
if library == "playlist":
media = pa.server.playlist(media) if media else pa.server.playlists()
elif media or library:
media = pa.library.search(title=media or None, libtype=library or None)
if isinstance(media, list) and len(media) == 1:
media = media[0]
if command["episode"]:
media = media.episode(season=int(command["season"] or 1), episode=int(command["episode"]))
elif command["season"]:
media = media.season(season=int(command["season"]))
if command["ondeck"]:
title, libtype = [command["media"], command["library"]]
if getattr(media, "onDeck", None):
media = media.onDeck()
elif title or libtype:
search_result = pa.library.search(title=title or None, libtype=libtype or None, limit=1)[0]
if getattr(search_result, "onDeck", None):
media = search_result.onDeck()
else:
media = pa.library.sectionByID(search_result.librarySectionID).onDeck()
else:
media = pa.library.sectionByID(pa.tv_id).onDeck() + pa.library.sectionByID(pa.movie_id).onDeck()
media.sort(key=lambda x: getattr(x, "addedAt", None), reverse=False)
if command["unwatched"]:
if isinstance(media, list) or (not media and not library):
media = media[:200] if isinstance(media, list) else pa.library.recentlyAdded()
media = [x for x in media if getattr(x, "viewCount", 0) == 0]
elif getattr(media, "unwatched", None):
media = media.unwatched()[:200]
if command["latest"] and not command["unwatched"]:
if library and not media and pa.section_id[library]:
media = pa.library.sectionByID(pa.section_id[library]).recentlyAdded()[:200]
elif not media:
media = pa.library.sectionByID(pa.tv_id).recentlyAdded()
media += pa.library.sectionByID(pa.mov_id).recentlyAdded()
media.sort(key=lambda x: getattr(x, "addedAt", None), reverse=True)
media = media[:200]
elif command["latest"]:
if getattr(media, "type", None) in ["show", "season"]:
media = media.episodes()[-1]
elif isinstance(media, list):
media = media[:200]
media.sort(key=lambda x: getattr(x, "addedAt", None), reverse=True)
if not command["random"] and media:
pos = getattr(media[0], "viewOffset", 0) if isinstance(media, list) else getattr(media, "viewOffset", 0)
offset = (pos / 1000) - 5 if pos > 15 else 0
if getattr(media, "TYPE", None) == "show":
unwatched = media.unwatched()[:30]
media = unwatched if unwatched and not command["random"] else media.episodes()[:30]
elif getattr(media, "TYPE", None) == "episode":
episodes = media.show().episodes()
episodes = episodes[episodes.index(media) : episodes.index(media) + 30]
media = pa.server.createPlayQueue(episodes, shuffle=int(command["random"]))
elif getattr(media, "TYPE", None) in ["artist", "album"]:
tracks = media.tracks()
media = pa.server.createPlayQueue(tracks, shuffle=int(command["random"]))
elif getattr(media, "TYPE", None) == "track":
tracks = media.album().tracks()
tracks = tracks[tracks.index(media) :]
media = pa.server.createPlayQueue(tracks, shuffle=int(command["random"]))
if getattr(media, "TYPE", None) != "playqueue" and media:
media = pa.server.createPlayQueue(media, shuffle=int(command["random"]))
return [media, 0 if media and media.items[0].listType == "audio" else offset]
def roman_numeral_test(media, lib):
regex = re.compile(r"\b(\d|(10))\b")
replacements = {
"1": "I",
"2": "II",
"3": "III",
"4": "IV",
"5": "V",
"6": "VI",
"7": "VII",
"8": "VIII",
"9": "IX",
"10": "X",
}
if len(re.findall(regex, media)) > 0:
replaced = re.sub(regex, lambda m: replacements[m.group(1)], media)
return fuzzy(replaced, lib, fuzz.WRatio)
return ["", 0]
def find_media(pa, command):
result = ""
lib = ""
if getattr(command["media"], "type", None) in ["artist", "album", "track"]:
return [command["media"], command["media"].type]
if command["library"]:
lib_titles = pa.media[f"{command['library']}_titles"]
if command["media"]:
result = fuzzy(command["media"], lib_titles, fuzz.WRatio)
roman_test = roman_numeral_test(command["media"], lib_titles)
result = result[0] if result[1] > roman_test[1] else roman_test[0]
elif command["media"]:
item = {}
score = {}
for category in ["show", "movie", "artist", "album", "track", "playlist"]:
lib_titles = pa.media[f"{category}_titles"]
standard = fuzzy(command["media"], lib_titles, fuzz.WRatio) if lib_titles else ["", 0]
roman = roman_numeral_test(command["media"], lib_titles) if lib_titles else ["", 0]
winner = standard if standard[1] > roman[1] else roman
item[category] = winner[0]
score[category] = winner[1]
winning_category = max(score, key=score.get)
result = item[winning_category]
lib = winning_category
return [result, lib or command["library"]]
``` |
{
"source": "johanot/certmgr",
"score": 2
} |
#### File: ct/client/reporter_test.py
```python
import unittest
import sys
from collections import defaultdict
from ct.client import reporter
from ct.client.db import cert_desc
from ct.client.db import sqlite_cert_db
from ct.client.db import sqlite_connection as sqlitecon
from ct.crypto import cert
from ct.proto import certificate_pb2
from ct.proto import client_pb2
from ct.test import test_config
import gflags
STRICT_DER = cert.Certificate.from_der_file(
test_config.get_test_file_path('google_cert.der'), False).to_der()
NON_STRICT_DER = cert.Certificate.from_pem_file(
test_config.get_test_file_path('invalid_ip.pem'), False).to_der()
CHAIN_FILE = test_config.get_test_file_path('google_chain.pem')
CHAIN_DERS = [c.to_der() for c in cert.certs_from_pem_file(CHAIN_FILE)]
SELF_SIGNED_ROOT_DER = cert.Certificate.from_pem_file(
test_config.get_test_file_path('subrigo_net.pem'), False).to_der()
def readable_dn(dn_attribs):
return ",".join(["%s=%s" % (attr.type, attr.value) for attr in dn_attribs])
class CertificateReportTest(unittest.TestCase):
class CertificateReportBase(reporter.CertificateReport):
def __init__(self):
super(CertificateReportTest.CertificateReportBase, self).__init__()
def report(self):
super(CertificateReportTest.CertificateReportBase, self).report()
return self._certs
def reset(self):
self._certs = {}
def _batch_scanned_callback(self, result):
for desc, log_index in result:
self._certs[log_index] = desc
def setUp(self):
self.cert_db = sqlite_cert_db.SQLiteCertDB(
sqlitecon.SQLiteConnectionManager(":memory:", keepalive=True))
def test_scan_der_cert(self):
report = self.CertificateReportBase()
report.scan_der_certs([(0, STRICT_DER, [], client_pb2.X509_ENTRY)])
results = report.report()
self.assertEqual(len(results), 1)
def test_scan_der_cert_broken_cert(self):
report = self.CertificateReportBase()
report.scan_der_certs([(0, "asdf", [], client_pb2.X509_ENTRY)])
results = report.report()
self.assertEqual(len(results), 1)
self.assertFalse(results[0].subject)
def test_scan_der_cert_check_non_strict(self):
report = self.CertificateReportBase()
report.scan_der_certs([(0, NON_STRICT_DER, [], client_pb2.X509_ENTRY)])
results = report.report()
self.assertEqual(len(results), 1)
self.assertTrue(results[0].subject)
def test_entry_type_propogated(self):
report = self.CertificateReportBase()
report.scan_der_certs([(0, STRICT_DER, [], client_pb2.PRECERT_ENTRY),
(1, STRICT_DER, [], client_pb2.X509_ENTRY)])
results = report.report()
self.assertEqual(len(results), 2)
self.assertEquals(results[0].entry_type, client_pb2.PRECERT_ENTRY)
self.assertEquals(results[1].entry_type, client_pb2.X509_ENTRY)
def test_issuer_and_root_issuer_populated_from_chain(self):
self.assertEqual(3, len(CHAIN_DERS))
report = self.CertificateReportBase()
report.scan_der_certs([(0, CHAIN_DERS[0], CHAIN_DERS[1:],
client_pb2.X509_ENTRY)])
results = report.report()
self.assertEqual(len(results), 1)
issuer_cert = cert_desc.from_cert(cert.Certificate(CHAIN_DERS[1]))
root_cert = cert_desc.from_cert(cert.Certificate(CHAIN_DERS[2]))
self.assertEqual(readable_dn(results[0].issuer),
'C=US,O=Google Inc,CN=Google Internet Authority')
self.assertEqual(readable_dn(results[0].root_issuer),
'C=US,O=Equifax,OU=Equifax Secure Certificate Authority')
def test_chain_containing_only_root_handled(self):
report = self.CertificateReportBase()
report.scan_der_certs([(0, SELF_SIGNED_ROOT_DER, [], client_pb2.X509_ENTRY)])
results = report.report()
self.assertEqual(len(results), 1)
self.assertEquals(results[0].entry_type, client_pb2.X509_ENTRY)
def test_issuer_public_key_populated_from_chain(self):
# Verify the test data is what is expected for this unit test.
self.assertEqual(3, len(CHAIN_DERS))
self.assertEqual(
cert.Certificate(CHAIN_DERS[1]).key_hash(hashfunc="sha256").encode('hex'),
'b6b95432abae57fe020cb2b74f4f9f9173c8c708afc9e732ace23279047c6d05')
report = self.CertificateReportBase()
report.scan_der_certs([(0, CHAIN_DERS[0], CHAIN_DERS[1:],
client_pb2.X509_ENTRY)])
results = report.report()
self.assertEqual(len(results), 1)
self.assertEqual(results[0].issuer_pk_sha256_hash.encode('hex'),
'b6b95432abae57fe020cb2b74f4f9f9173c8c708afc9e732ace23279047c6d05')
if __name__ == '__main__':
sys.argv = gflags.FLAGS(sys.argv)
unittest.main()
```
#### File: utilities/log_list/openssl_generator.py
```python
def _log_id(log):
# Use log URL as its ID because it should be unique and is probably
# shorter and more readable in a comma-separated list than the log
# description.
return log["url"].replace(",", "")
def _openssl_list(items):
'''
Strip commas from any items used in a list in the OpenSSL CONF format,
becayse they would be interpreted as delimiters.
'''
return ", ".join(x.replace(",", "") for x in items)
def _enabled_logs_conf(logs):
return "enabled_logs = %s\n" % (
_openssl_list(_log_id(log) for log in logs)
)
def _log_conf(log):
return (
"[%(id)s]\n"
"description = %(description)s\n"
"key = %(key)s\n" % {
"id": _log_id(log),
"description": log["description"],
"key": log["key"],
})
def generate_openssl_conf(json_log_list, output_path):
'''Given a log list read from JSON, writes an OpenSSL log list to a file'''
with open(output_path, "w") as output:
logs = json_log_list["logs"]
log_confs = (_log_conf(log) for log in logs)
output.write(_enabled_logs_conf(logs) + "\n")
output.write("\n".join(log_confs))
``` |
{
"source": "johanovic/django-rated",
"score": 2
} |
#### File: django-rated/rated/decorators.py
```python
from functools import partial, wraps
from .settings import DEFAULT_REALM
from .utils import BACKEND
def rate_limit(func=None, realm=None):
'''Apply rate limiting directly to any view-like function.'''
if realm is None:
realm = DEFAULT_REALM
if func is None:
return partial(rate_limit, realm=realm)
@wraps(func)
def _inner(request, *args, **kwargs):
source = BACKEND.source_for_request(request)
if BACKEND.check_realm(source, realm):
return BACKEND.make_limit_response(realm)
return func(request, *args, **kwargs)
return _inner
def rate_limit_method(func=None, realm=None):
'''Rate limit a view-like method'''
if func is None:
return partial(rate_limit_method, realm=realm)
@wraps(func)
def _inner(self, request, *args, **kwargs):
source = BACKEND.source_for_request(request)
if BACKEND.check_realm(source, realm):
return BACKEND.make_limit_response(realm)
return func(self, request, *args, **kwargs)
return _inner
``` |
{
"source": "johanpel/j2a",
"score": 2
} |
#### File: j2a/experiments/battery.py
```python
import numpy as np
def gen_schema(file, num_values_max=1024, value_max=np.iinfo(np.uint64).max, num_values_min=1, value_min=0):
import pyarrow as pa
output_schema = pa.schema([
pa.field("voltage", pa.list_(
pa.field("item", pa.uint64(), False).with_metadata(
{"illex_MIN": str(value_min), "illex_MAX": str(value_max)})
), False).with_metadata(
{"illex_MIN_LENGTH": str(num_values_min), "illex_MAX_LENGTH": str(num_values_max)}
)
])
pa.output_stream(file).write(output_schema.serialize())
``` |
{
"source": "JohanpG/tf-object-counting",
"score": 4
} |
#### File: JohanpG/tf-object-counting/ShowCamera.py
```python
import cv2
def show_webcam(mirror=False):
cam = cv2.VideoCapture(0)
while True:
ret_val, img = cam.read()
if mirror:
img = cv2.flip(img, 1)
cv2.imshow('my webcam', img)
if cv2.waitKey(1) == 27:
break # esc to quit
cv2.destroyAllWindows()
def main():
show_webcam(mirror=True)
if __name__ == '__main__':
main()
``` |
{
"source": "johanrhodin/aiorabbit",
"score": 2
} |
#### File: aiorabbit/aiorabbit/__init__.py
```python
import asyncio
import contextlib
import logging
import typing
from aiorabbit import exceptions
from aiorabbit.__version__ import version
DEFAULT_LOCALE = 'en-US'
DEFAULT_PRODUCT = 'aiorabbit/{}'.format(version)
DEFAULT_URL = 'amqp://guest:guest@localhost:5672/%2f'
LOGGER = logging.getLogger('aiorabbit')
@contextlib.asynccontextmanager
async def connect(url: str = DEFAULT_URL,
locale: str = DEFAULT_LOCALE,
product: str = DEFAULT_PRODUCT,
loop: typing.Optional[asyncio.AbstractEventLoop] = None):
"""Asynchronous :ref:`context-manager <python:typecontextmanager>` that
connects to RabbitMQ, returning a connected
:class:`~aiorabbit.client.Client` as the target.
.. code-block:: python3
:caption: Example Usage
async with aiorabbit.connect(RABBITMQ_URL) as client:
await client.exchange_declare('test', 'topic')
:param url: The URL to connect to RabbitMQ with
:param locale: The locale for the connection, default `en-US`
:param product: The product name for the connection, default `aiorabbit`
:param loop: Optional :mod:`asyncio` event loop to use
"""
from aiorabbit import client
rmq_client = client.Client(url, locale, product, loop)
await rmq_client.connect()
try:
yield rmq_client
finally:
if not rmq_client.is_closed:
await rmq_client.close()
__all__ = [
'client',
'connect',
'DEFAULT_PRODUCT',
'DEFAULT_LOCALE',
'DEFAULT_URL',
'exceptions',
'message',
'types',
'version'
]
```
#### File: aiorabbit/aiorabbit/state.py
```python
import asyncio
import inspect
import logging
import time
import typing
from aiorabbit import exceptions
STATE_UNINITIALIZED = 0x00
STATE_EXCEPTION = 0x01
class StateManager:
"""Base Class used to implement state management"""
STATE_MAP: dict = {
STATE_UNINITIALIZED: 'Uninitialized',
STATE_EXCEPTION: 'Exception Raised'
}
STATE_TRANSITIONS: dict = {
STATE_UNINITIALIZED: [STATE_EXCEPTION]
}
def __init__(self, loop: asyncio.AbstractEventLoop):
self._logger = logging.getLogger(
dict(inspect.getmembers(self))['__module__'])
self._exception: typing.Optional[Exception] = None
self._loop: asyncio.AbstractEventLoop = loop
self._loop.set_exception_handler(self._on_exception)
self._state: int = STATE_UNINITIALIZED
self._state_start: float = self._loop.time()
self._waits: dict = {}
@property
def exception(self) -> typing.Optional[Exception]:
"""If an exception was set with the state, return the value"""
return self._exception
@property
def state(self) -> str:
"""Return the current state as descriptive string"""
return self.state_description(self._state)
def state_description(self, state: int) -> str:
"""Return a state description for a given state"""
return self.STATE_MAP[state]
@property
def time_in_state(self) -> float:
"""Return how long the current state has been active"""
return self._loop.time() - self._state_start
def _clear_waits(self, wait_id: int) -> None:
for state in self._waits.keys():
if wait_id in self._waits[state].keys():
del self._waits[state][wait_id]
def _on_exception(self,
_loop: asyncio.AbstractEventLoop,
context: typing.Dict[str, typing.Any]) -> None:
self._logger.debug('Exception on IOLoop: %r', context)
self._set_state(STATE_EXCEPTION, context.get('exception'))
def _reset_state(self, value: int) -> None:
self._logger.debug(
'Reset state %r while state is %r - %r',
self.state_description(value), self.state, self._waits)
self._state = value
self._state_start = self._loop.time()
self._exc = None
self._waits = {}
def _set_state(self, value: int,
exc: typing.Optional[Exception] = None) -> None:
self._logger.debug(
'Set state to %i: %s while state is %i: %s - %r [%r]',
value, self.state_description(value), self._state, self.state,
self._waits, exc)
if value == self._state and exc == self._exception:
return
elif value != STATE_EXCEPTION \
and value not in self.STATE_TRANSITIONS[self._state]:
raise exceptions.StateTransitionError(
'Invalid state transition from {!r} to {!r}'.format(
self.state, self.state_description(value)))
self._logger.debug(
'Transition to %i: %s from %i: %s after %.4f seconds',
value, self.state_description(value),
self._state, self.state, self.time_in_state)
self._exception = exc
self._state = value
self._state_start = self._loop.time()
if self._state in self._waits:
[self._loop.call_soon(event.set)
for event in self._waits[self._state].values()]
async def _wait_on_state(self, *args) -> int:
"""Wait on a specific state value to transition"""
wait_id, waits = time.monotonic_ns(), []
self._logger.debug(
'Waiter %i waiting on (%s) while in %i: %s',
wait_id, ' || '.join(
'{}: {}'.format(s, self.state_description(s))
for s in args), self._state, self.state)
for state in args:
if state not in self._waits:
self._waits[state] = {}
self._waits[state][wait_id] = asyncio.Event()
waits.append((state, self._waits[state][wait_id]))
while not self._exception:
for state, event in waits:
if event.is_set():
self._logger.debug(
'Waiter %r wait on %i: %s has finished [%r]', wait_id,
state, self.state_description(state), self._exception)
self._clear_waits(wait_id)
return state
await asyncio.sleep(0.001)
self._clear_waits(wait_id)
exc = self._exception
self._exception = None
raise exc
```
#### File: aiorabbit/tests/test_basic.py
```python
import uuid
from pamqp import body, commands, header
from aiorabbit import exceptions, message
from . import testing
class BasicAckTestCase(testing.ClientTestCase):
@testing.async_test
async def test_validation_errors(self):
await self.connect()
with self.assertRaises(TypeError):
await self.client.basic_ack('foo')
with self.assertRaises(TypeError):
await self.client.basic_ack(1, 1)
class BasicCancelTestCase(testing.ClientTestCase):
@testing.async_test
async def test_validation_errors(self):
await self.connect()
with self.assertRaises(TypeError):
await self.client.basic_cancel(1)
class BasicConsumeTestCase(testing.ClientTestCase):
def setUp(self) -> None:
super().setUp()
self.queue = self.uuid4()
self.exchange = 'amq.topic'
self.routing_key = self.uuid4()
self.body = uuid.uuid4().bytes
async def on_message(self, msg):
self.assertEqual(msg.exchange, self.exchange)
self.assertEqual(msg.routing_key, self.routing_key)
self.assertEqual(msg.body, self.body)
await self.client.basic_ack(msg.delivery_tag)
self.test_finished.set()
@testing.async_test
async def test_consume(self):
await self.connect()
await self.client.queue_declare(self.queue)
await self.client.queue_bind(self.queue, self.exchange, '#')
ctag = await self.client.basic_consume(
self.queue, callback=self.on_message)
await self.client.publish(self.exchange, self.routing_key, self.body)
await self.test_finished.wait()
await self.client.basic_cancel(ctag)
@testing.async_test
async def test_consume_large_message(self):
self.body = '-'.join([
self.uuid4() for _i in range(0, 100000)]).encode('utf-8')
await self.connect()
await self.client.queue_declare(self.queue)
await self.client.queue_bind(self.queue, self.exchange, '#')
ctag = await self.client.basic_consume(
self.queue, callback=self.on_message)
await self.client.publish(self.exchange, self.routing_key, self.body)
await self.test_finished.wait()
await self.client.basic_cancel(ctag)
@testing.async_test
async def test_consume_message_pending(self):
await self.connect()
await self.client.queue_declare(self.queue)
await self.client.queue_bind(self.queue, self.exchange, '#')
await self.client.publish(self.exchange, self.routing_key, self.body)
ctag = await self.client.basic_consume(
self.queue, callback=self.on_message)
await self.test_finished.wait()
await self.client.basic_cancel(ctag)
@testing.async_test
async def test_consume_sync_callback(self):
def on_message(msg):
self.assertEqual(msg.exchange, self.exchange)
self.assertEqual(msg.routing_key, self.routing_key)
self.assertEqual(msg.body, self.body)
self.test_finished.set()
await self.connect()
await self.client.queue_declare(self.queue)
await self.client.queue_bind(self.queue, self.exchange, '#')
await self.client.publish(self.exchange, self.routing_key, self.body)
ctag = await self.client.basic_consume(self.queue, callback=on_message)
await self.test_finished.wait()
await self.client.basic_cancel(ctag)
@testing.async_test
async def test_not_found(self):
await self.connect()
with self.assertRaises(exceptions.NotFound):
await self.client.basic_consume('foo', callback=lambda x: x)
@testing.async_test
async def test_validation_errors(self):
await self.connect()
with self.assertRaises(TypeError):
await self.client.basic_consume(1, callback=lambda x: x)
with self.assertRaises(TypeError):
await self.client.basic_consume('foo', 1, callback=lambda x: x)
with self.assertRaises(TypeError):
await self.client.basic_consume(
'foo', False, 1, callback=lambda x: x)
with self.assertRaises(TypeError):
await self.client.basic_consume(
'foo', False, False, 1, callback=lambda x: x)
with self.assertRaises(TypeError):
await self.client.basic_consume(
'foo', False, False, False, 1, callback=lambda x: x)
with self.assertRaises(TypeError):
await self.client.basic_consume('foo', callback=True)
with self.assertRaises(ValueError):
await self.client.basic_consume('foo')
with self.assertRaises(TypeError):
await self.client.basic_consume(
'foo', callback=lambda x: x, consumer_tag=1)
class BasicGetTestCase(testing.ClientTestCase):
@testing.async_test
async def test_basic_get(self):
queue = self.uuid4()
exchange = 'amq.direct'
routing_key = '#'
msg_body = uuid.uuid4().bytes
await self.connect()
msg_count, consumer_count = await self.client.queue_declare(queue)
self.assertEqual(msg_count, 0)
self.assertEqual(consumer_count, 0)
result = await self.client.basic_get(queue)
self.assertIsNone(result)
await self.client.queue_bind(queue, exchange, routing_key)
await self.client.publish(exchange, routing_key, msg_body)
result = await self.client.basic_get(queue)
self.assertIsInstance(result, message.Message)
self.assertEqual(result.body, msg_body)
self.assertEqual(result.message_count, 0)
await self.client.basic_ack(result.delivery_tag)
await self.client.queue_delete(queue)
@testing.async_test
async def test_basic_getok_message_count(self):
queue = self.uuid4()
exchange = 'amq.direct'
routing_key = '#'
msg_body = uuid.uuid4().bytes
await self.connect()
await self.client.queue_declare(queue)
result = await self.client.basic_get(queue)
self.assertIsNone(result)
await self.client.queue_bind(queue, exchange, routing_key)
await self.client.publish(exchange, routing_key, msg_body)
await self.client.publish(exchange, routing_key, uuid.uuid4().bytes)
await self.client.publish(exchange, routing_key, uuid.uuid4().bytes)
result = await self.client.basic_get(queue)
self.assertIsInstance(result, message.Message)
self.assertEqual(result.body, msg_body)
self.assertEqual(result.message_count, 2)
await self.client.basic_ack(result.delivery_tag)
result = await self.client.basic_get(queue)
self.assertIsInstance(result, message.Message)
self.assertEqual(result.message_count, 1)
await self.client.basic_nack(result.delivery_tag, requeue=False)
result = await self.client.basic_get(queue)
self.assertIsInstance(result, message.Message)
self.assertEqual(result.message_count, 0)
await self.client.basic_reject(result.delivery_tag)
await self.client.queue_delete(queue)
@testing.async_test
async def test_basic_get_validation_errors(self):
await self.connect()
with self.assertRaises(TypeError):
await self.client.basic_get(1)
with self.assertRaises(TypeError):
await self.client.basic_get('foo', 1)
class BasicNackTestCase(testing.ClientTestCase):
@testing.async_test
async def test_validation_errors(self):
await self.connect()
with self.assertRaises(TypeError):
await self.client.basic_nack('foo')
with self.assertRaises(TypeError):
await self.client.basic_nack(1, 1)
with self.assertRaises(TypeError):
await self.client.basic_nack(1, False, 1)
class BasicPublishTestCase(testing.ClientTestCase):
@testing.async_test
async def test_basic_publish_raises(self):
with self.assertRaises(NotImplementedError):
await self.client.basic_publish()
class BasicQosTestCase(testing.ClientTestCase):
@testing.async_test
async def test_basic_qos_raises(self):
self.raises = self.assertRaises(NotImplementedError)
with self.raises:
await self.client.basic_qos()
class BasicRecoverTestCase(testing.ClientTestCase):
@testing.async_test
async def test_basic_recover(self):
await self.connect()
await self.client.basic_recover(True)
@testing.async_test
async def test_basic_recover_false_raises(self):
await self.connect()
with self.assertRaises(exceptions.NotImplemented):
await self.client.basic_recover(False)
@testing.async_test
async def test_validation_errors(self):
await self.connect()
with self.assertRaises(TypeError):
await self.client.basic_recover(1)
class BasicRejectTestCase(testing.ClientTestCase):
@testing.async_test
async def test_validation_errors(self):
await self.connect()
with self.assertRaises(TypeError):
await self.client.basic_reject('foo')
with self.assertRaises(TypeError):
await self.client.basic_reject(1, 1)
class BasicReturnTestCase(testing.ClientTestCase):
def setUp(self) -> None:
super().setUp()
self.exchange = 'amq.topic'
self.routing_key = self.uuid4()
self.body = uuid.uuid4().bytes
@testing.async_test
async def test_basic_return(self):
async def on_return(msg: message.Message) -> None:
self.assertEqual(msg.reply_code, 404)
self.assertEqual(msg.reply_text, 'Not Found')
self.assertEqual(msg.exchange, self.exchange)
self.assertEqual(msg.body, self.body)
self.test_finished.set()
self.client.register_basic_return_callback(on_return)
await self.connect()
await self.client.publish(self.exchange, self.routing_key, self.body)
# Fake the Basic.Return
self.client._on_frame(self.client._channel, commands.Basic.Return(
404, 'Not Found', self.exchange, self.routing_key))
self.client._on_frame(self.client._channel, header.ContentHeader(
0, len(self.body), commands.Basic.Properties()))
self.client._on_frame(
self.client._channel, body.ContentBody(self.body))
await self.test_finished.wait()
```
#### File: aiorabbit/tests/test_exchange.py
```python
import uuid
from aiorabbit import exceptions
from . import testing
class ExchangeDeclareTestCase(testing.ClientTestCase):
@testing.async_test
async def test_exchange_declare(self):
name = str(uuid.uuid4().hex)
await self.connect()
await self.client.exchange_declare(name, 'direct', durable=True)
await self.client.exchange_declare(name, 'direct', passive=True)
@testing.async_test
async def test_exchange_declare_passive_raises(self):
await self.connect()
with self.assertRaises(exceptions.NotFound):
await self.client.exchange_declare(
str(uuid.uuid4().hex), 'direct', passive=True)
class ExchangeTestCase(testing.ClientTestCase):
@testing.async_test
async def test_exchange_declare(self):
await self.connect()
await self.client.exchange_declare(self.uuid4(), 'direct')
@testing.async_test
async def test_exchange_declare_invalid_exchange_type(self):
await self.connect()
with self.assertRaises(exceptions.CommandInvalid):
await self.client.exchange_declare(self.uuid4(), self.uuid4())
self.assertEqual(self.client.state, 'Channel Open')
# Ensure a command will properly work after the error
await self.client.exchange_declare(self.uuid4(), 'direct')
@testing.async_test
async def test_exchange_declare_validation_errors(self):
await self.connect()
with self.assertRaises(TypeError):
await self.client.exchange_declare(1)
with self.assertRaises(TypeError):
await self.client.exchange_declare(self.uuid4(), 1)
with self.assertRaises(TypeError):
await self.client.exchange_declare(self.uuid4(), passive='1')
with self.assertRaises(TypeError):
await self.client.exchange_declare(self.uuid4(), durable='1')
with self.assertRaises(TypeError):
await self.client.exchange_declare(self.uuid4(), auto_delete='1')
with self.assertRaises(TypeError):
await self.client.exchange_declare(self.uuid4(), internal='1')
with self.assertRaises(TypeError):
await self.client.exchange_declare(self.uuid4(), arguments='1')
@testing.async_test
async def test_exchange_bind_validation_errors(self):
await self.connect()
with self.assertRaises(TypeError):
await self.client.exchange_bind(1, self.uuid4(), self.uuid4())
with self.assertRaises(TypeError):
await self.client.exchange_bind(self.uuid4(), 1, self.uuid4())
with self.assertRaises(TypeError):
await self.client.exchange_bind(self.uuid4(), self.uuid4(), 1)
with self.assertRaises(TypeError):
await self.client.exchange_bind(
self.uuid4(), self.uuid4(), self.uuid4(), self.uuid4())
@testing.async_test
async def test_exchange_bind_raises_exchange_not_found(self):
await self.connect()
self.assertEqual(self.client._channel, 1)
with self.assertRaises(exceptions.NotFound):
await self.client.exchange_bind(
self.uuid4(), self.uuid4(), self.uuid4())
self.assertEqual(self.client._channel, 2)
# Ensure a command will properly work after the error
await self.client.exchange_declare(self.uuid4(), 'direct')
@testing.async_test
async def test_exchange_bind(self):
await self.connect()
exchange_1 = self.uuid4()
exchange_2 = self.uuid4()
await self.client.exchange_declare(exchange_1, 'topic')
await self.client.exchange_declare(exchange_2, 'topic')
await self.client.exchange_bind(exchange_1, exchange_2, '#')
await self.client.exchange_unbind(exchange_1, exchange_2, '#')
await self.client.exchange_delete(exchange_2)
await self.client.exchange_delete(exchange_1)
@testing.async_test
async def test_exchange_delete_invalid_exchange_name(self):
await self.connect()
self.assertEqual(self.client._channel, 1)
with self.assertRaises(TypeError):
await self.client.exchange_delete(327687)
@testing.async_test
async def test_exchange_unbind_validation_errors(self):
await self.connect()
with self.assertRaises(TypeError):
await self.client.exchange_unbind(1, self.uuid4(), self.uuid4())
with self.assertRaises(TypeError):
await self.client.exchange_unbind(self.uuid4(), 1, self.uuid4())
with self.assertRaises(TypeError):
await self.client.exchange_unbind(self.uuid4(), self.uuid4(), 1)
with self.assertRaises(TypeError):
await self.client.exchange_unbind(
self.uuid4(), self.uuid4(), self.uuid4(), self.uuid4())
@testing.async_test
async def test_exchange_unbind_invalid_exchange(self):
await self.connect()
self.assertEqual(self.client._channel, 1)
await self.client.exchange_unbind(
self.uuid4(), self.uuid4(), self.uuid4())
self.assertEqual(self.client._channel, 1)
```
#### File: aiorabbit/tests/test_integration.py
```python
import asyncio
import logging
import os
from unittest import mock
from pamqp import commands
import aiorabbit
from aiorabbit import client, exceptions
from tests import testing
LOGGER = logging.getLogger(__name__)
class ContextManagerTestCase(testing.AsyncTestCase):
@testing.async_test
async def test_context_manager_open(self):
async with aiorabbit.connect(
os.environ['RABBITMQ_URI'], loop=self.loop) as client_:
await client_.confirm_select()
self.assertEqual(client_._state,
client.STATE_CONFIRM_SELECTOK_RECEIVED)
self.assertEqual(client_._state, client.STATE_CLOSED)
@testing.async_test
async def test_context_manager_exception(self):
async with aiorabbit.connect(
os.environ['RABBITMQ_URI'], loop=self.loop) as client_:
await client_.confirm_select()
with self.assertRaises(RuntimeError):
await client_.confirm_select()
self.assertEqual(client_._state, client.STATE_CLOSED)
@testing.async_test
async def test_context_manager_remote_close(self):
async with aiorabbit.connect(
os.environ['RABBITMQ_URI'], loop=self.loop) as client_:
LOGGER.debug('Sending admin shutdown frame')
client_._on_frame(
0, commands.Connection.Close(200, 'Admin Shutdown'))
while not client_.is_closed:
await asyncio.sleep(0.1)
self.assertEqual(client_._state, client.STATE_CLOSED)
@testing.async_test
async def test_context_manager_already_closed_on_exit(self):
async with aiorabbit.connect(
os.environ['RABBITMQ_URI'], loop=self.loop) as client_:
self.assertFalse(client_.is_closed)
client_._state = client.STATE_CLOSED
self.assertTrue(client_.is_closed)
async with aiorabbit.connect(
os.environ['RABBITMQ_URI'], loop=self.loop) as client_:
self.assertFalse(client_.is_closed)
self.assertTrue(client_.is_closed)
class IntegrationTestCase(testing.ClientTestCase):
@testing.async_test
async def test_channel_recycling(self):
await self.connect()
self.assertEqual(self.client._channel, 1)
await self.close()
await self.connect()
self.assertEqual(self.client._channel, 1)
await self.close()
@testing.async_test
async def test_double_close(self):
await self.connect()
await self.close()
await self.close()
@testing.async_test
async def test_confirm_select(self):
await self.connect()
await self.client.confirm_select()
self.assert_state(client.STATE_CONFIRM_SELECTOK_RECEIVED)
await self.close()
@testing.async_test
async def test_connect_timeout(self):
with mock.patch.object(self.loop, 'create_connection') as create_conn:
create_conn.side_effect = asyncio.TimeoutError()
with self.assertRaises(asyncio.TimeoutError):
await self.connect()
@testing.async_test
async def test_client_close_error(self):
await self.connect()
with mock.patch.object(self.client, 'close') as close:
close.side_effect = RuntimeError('Faux Exception')
with self.assertRaises(RuntimeError):
await self.close()
@testing.async_test
async def test_update_secret_raises(self):
await self.connect()
with self.assertRaises(exceptions.CommandInvalid):
self.client._write_frames(
commands.Connection.UpdateSecret('foo', 'bar'))
await self.client._wait_on_state(
client.STATE_UPDATE_SECRETOK_RECEIVED)
class ReconnectPublisherConfirmsTestCase(testing.ClientTestCase):
@testing.async_test
async def test_confirm_select_already_invoked_on_reconnect(self):
await self.connect()
await self.client.confirm_select()
self.assertTrue(self.client._publisher_confirms)
with self.assertRaises(exceptions.CommandInvalid):
await self.client.exchange_declare(self.uuid4(), self.uuid4())
self.assertTrue(self.client._publisher_confirms)
class QosPrefetchTestCase(testing.ClientTestCase):
@testing.async_test
async def test_basic_qos(self):
await self.connect()
await self.client.qos_prefetch(100, False)
await self.client.qos_prefetch(125, True)
@testing.async_test
async def test_validation_errors(self):
await self.connect()
with self.assertRaises(TypeError):
await self.client.qos_prefetch('foo')
with self.assertRaises(TypeError):
await self.client.qos_prefetch(0, 'foo')
class ConsumeTestCase(testing.ClientTestCase):
def setUp(self) -> None:
super().setUp()
self.queue = self.uuid4()
self.exchange = 'amq.topic'
async def rmq_setup(self):
await self.connect()
await self.client.queue_declare(self.queue)
await self.client.queue_bind(self.queue, self.exchange, '#')
await self.client.qos_prefetch(1, True)
messages = [self.uuid4().encode('utf-8') for _offset in range(0, 5)]
for message in messages:
await self.client.publish(self.exchange, self.queue, message)
msgs, _consumers = await self.client.queue_declare(self.queue)
while msgs < len(messages):
await asyncio.sleep(0.5)
msgs, consumers = await self.client.queue_declare(self.queue)
return messages
@testing.async_test
async def test_consume(self):
messages = await self.rmq_setup()
async for message in self.client.consume(self.queue):
messages.remove(message.body)
await self.client.basic_ack(message.delivery_tag)
if not messages:
break
msgs, _consumers = await self.client.queue_declare(self.queue)
self.assertEqual(msgs, 0)
class ContextManagerConsumeTestCase(ConsumeTestCase):
@testing.async_test
async def test_consume(self):
messages = await self.rmq_setup()
async with aiorabbit.connect(self.rabbitmq_url) as rabbitmq:
async for message in rabbitmq.consume(self.queue):
messages.remove(message.body)
await rabbitmq.basic_ack(message.delivery_tag)
if not messages:
break
msgs, _consumers = await self.client.queue_declare(self.queue)
self.assertEqual(msgs, 0)
@testing.async_test
async def test_emulated_heartbeat_timeout_while_consuming(self):
messages = await self.rmq_setup()
async def consume_messages():
async with aiorabbit.connect(self.rabbitmq_url) as rabbitmq:
async for message in rabbitmq.consume(self.queue):
messages.remove(message.body)
await rabbitmq.basic_ack(message.delivery_tag)
if not messages:
rabbitmq._on_remote_close(599, 'Test Close')
with self.assertRaises(exceptions.ConnectionClosedException):
await consume_messages()
msgs, _consumers = await self.client.queue_declare(self.queue)
self.assertEqual(msgs, 0)
@testing.async_test
async def test_disconnected_while_consuming(self):
messages = await self.rmq_setup()
async def consume_messages():
async with aiorabbit.connect(self.rabbitmq_url) as rabbitmq:
async for message in rabbitmq.consume(self.queue):
messages.remove(message.body)
await rabbitmq.basic_ack(message.delivery_tag)
if not messages:
rabbitmq._on_disconnected(None)
with self.assertRaises(exceptions.ConnectionClosedException):
await consume_messages()
msgs, _consumers = await self.client.queue_declare(self.queue)
self.assertEqual(msgs, 0)
``` |
{
"source": "johanrhodin/rabbitmq-website",
"score": 3
} |
#### File: rabbitmq-website/code/render.py
```python
from lxml import etree
import re
import os
import os.path
import markdown
import codecs
import sys
reload(sys)
sys.setdefaultencoding('utf8')
try:
from mod_python import apache
except ImportError:
class StubApache:
def __init__(self):
self.HTTP_NOT_FOUND = 404
self.HTTP_INTERNAL_SERVER_ERROR = 500
self.OK = 0
apache = StubApache()
SITE_DIR='define_me_before_use'
def preprocess_markdown(fpath):
contents = open(fpath).read()
## Markdown will treat the whole file as markdown, whereas
## we want to only transform the body text.
title = re.search("^#\s*(\S.*\S)\s*$", contents, re.M)
contents = contents[0:title.start()] + contents[title.end():]
title = title.group(1)
entities = open(os.path.join(SITE_DIR, 'rabbit.ent')).read()
entities = '\n'.join(entities.split('\n')[1:])
nosyntax = re.search("NOSYNTAX", title)
if nosyntax:
title = re.sub("NOSYNTAX", "", title)
suppressRHS = re.search("SUPPRESS-RHS", title)
if suppressRHS:
title = re.sub("SUPPRESS-RHS", "", title)
pre = """<?xml-stylesheet type="text/xml" href="page.xsl"?>
<!DOCTYPE html [
%s
<!ENTITY nbsp " ">
]>
<html xmlns="http://www.w3.org/1999/xhtml"
xmlns:xi="http://www.w3.org/2003/XInclude">""" % entities
head = """<head>
<title>%s</title>
</head>
<body%s>
""" % (title, suppressRHS and ' suppress-rhs="true"' or '')
post = """</body>
</html>
"""
if nosyntax:
args = ["tables"]
else:
args = ["codehilite(css_class=highlight)", "tables"]
processed = markdown.markdown(contents, args)
# Unfortunately we can't stop markdown escaping entities. Unescape them.
processed = re.sub(r'&([a-z0-9-_.:]+);', r'&\1;', processed)
tutorial = re.search(r'tutorials/(tutorial-[a-z]*)-[a-z]*.md$', fpath)
if tutorial is not None:
tutorial_head = """<div id="left-content" class="tutorial">"""
tutorial_foot = """</div><div id="right-nav" class="{0}">
<xi:include href="site/tutorials/tutorials-menu.xml.inc"/>
</div>""".format(tutorial.group(1))
processed = tutorial_head + processed + tutorial_foot
utf8_parser = etree.XMLParser(encoding='utf-8')
s = (pre + head + processed + post).encode("utf-8")
return etree.fromstring(s, parser = utf8_parser).getroottree()
def parse(fpath):
class MissingFeedResolver(etree.Resolver):
def resolve(self, url, id, context):
if not '://' in url and not os.path.exists(url):
print "Ignoring missing file ", url
return self.resolve_empty(context)
return None # Defer to other resolvers
# TODO cache the blog feed and revert to no_network = True
parser = etree.XMLParser(ns_clean = True, no_network = False)
parser.resolvers.add(MissingFeedResolver())
try:
return etree.parse(fpath, parser)
except Exception as e:
print "\n\nException rendering {0}".format(fpath)
raise e
MARKUPS={'.xml': parse,
'.md': preprocess_markdown}
class Error404(Exception):
pass
class Error500(Exception):
pass
def render_page(page_name, site_mode, version = None):
"""
look for the xml file with this name. if found,
look inside the xml file for a stylesheet processing
instruction, apply the transformation and return the
transformed document.
"""
# Simple security check to prevent walking the filesystem
if page_name.find("../") != -1:
raise Error404
match = re.match('/(.*?)(\.html)?$', page_name)
if match:
page_name = match.group(1)
page_id = match.group(1)
else:
raise Error404
if page_name == '':
page_name = 'index'
xml_doc = read_file(page_name)
xml_doc.xinclude()
query = '/processing-instruction(\'xml-stylesheet\')'
xslt_file_name = xml_doc.xpath(query)[0].get('href')
xslt_doc = parse(os.path.join(SITE_DIR, xslt_file_name))
params = {'page-name': "'/%s.html'" % page_name,
'site-mode': "'%s'" % site_mode,
'page-id': "'%s'" % page_id}
transform = etree.XSLT(xslt_doc)
xhtml_doc = transform(xml_doc, **params)
if version:
xslt_rebase = parse(os.path.join(SITE_DIR, 'rebase.xsl'))
param = {'link-prefix': "'%s'" % version}
transform = etree.XSLT(xslt_rebase)
result = transform(xhtml_doc, **param)
else:
result = xhtml_doc
return str(result)
def read_file(page_name):
for ext in MARKUPS:
preprocess = MARKUPS[ext]
file_name = page_name + ext
fpath = os.path.join(SITE_DIR, file_name)
if os.path.exists(fpath):
return preprocess(fpath)
raise Error404, page_name
def handler(req, site_mode):
req.content_type = "text/html; charset=utf-8"
uri = getattr(req, "path", req.uri)
try:
req.write(render_page(uri, site_mode))
except Error404:
req.status = apache.HTTP_NOT_FOUND
req.write(render_page('/404', site_mode))
except Error500:
req.status = apache.HTTP_INTERNAL_SERVER_ERROR
req.write(render_page('/500', site_mode))
return apache.OK
``` |
{
"source": "johanribeiro/notion-to-ics",
"score": 3
} |
#### File: johanribeiro/notion-to-ics/timezone_builder.py
```python
from datetime import datetime, timedelta
import pytz
from icalendar import Timezone, TimezoneStandard
class TimezoneBuilder:
__tzid: str
def __init__(self, tzid):
self.__tzid = tzid
@property
def tzid(self):
return self.__tzid
def to_icalendar(self):
timezone = Timezone()
standard = TimezoneStandard()
tzinfo = pytz.timezone(self.__tzid)
offset = self.__get_tz_offset(datetime.now(tzinfo))
standard.add("dtstart", datetime(2000, 1, 1))
standard.add("tzoffsetfrom", offset)
standard.add("tzoffsetto", offset)
timezone.add("tzid", self.tzid.upper())
timezone.add_component(standard)
return timezone
def __get_tz_offset(self, dt):
offset = dt.strftime("%z")
has_signal = offset[0] == "-" or offset[0] == "+"
multiplier = 1
start = 0
pos = 2
if has_signal:
start = 1
pos = 3
if offset[0] == "-":
multiplier = -1
hours = int(offset[start:pos])
minutes = int(offset[pos:])
return timedelta(minutes=minutes * multiplier, hours=hours * multiplier)
``` |
{
"source": "JohanRodhe/scarf",
"score": 2
} |
#### File: scarf/tests/conftest.py
```python
import pytest
import os
@pytest.fixture
def pbmc_reader(scope="module"):
from ..readers import CrH5Reader
fn = os.path.join('scarf', 'tests', 'datasets', '1K_pbmc_citeseq.h5')
return CrH5Reader(fn, 'rna')
``` |
{
"source": "JohanRuuskanen/FedApp",
"score": 2
} |
#### File: deploy_istio/validate/deploy_test.py
```python
import os
import yaml
CLUSTER_1 = "cluster-1"
CLUSTER_2 = "cluster-2"
CLUSTER_ROOT = "/home/ubuntu/run_on_gateway/clusters"
def deploy_test(clusters):
for cluster in clusters:
print("====== {} ======".format(cluster))
os.system("kubectl --context={} create namespace sample".format(cluster))
os.system("kubectl --context={} label namespace sample istio-injection=enabled".format(cluster))
os.system("kubectl --context={} -n sample create secret generic gitlab-auth".format(cluster) \
+ " --from-file=.dockerconfigjson=/home/ubuntu/.docker/config.json" \
+ " --type=kubernetes.io/dockerconfigjson")
os.system("kubectl --context={} apply -n sample -f src/helloworld.yaml".format(cluster))
os.system("kubectl --context={} apply -n sample -f src/sleep.yaml".format(cluster))
if __name__ == '__main__':
clusters = [dir for dir in os.listdir(CLUSTER_ROOT) if "cluster-" in dir]
clusters.sort()
deploy_test(clusters)
```
#### File: deploy_istio/validate/remove_test.py
```python
import os
CLUSTER_ROOT = "/home/ubuntu/run_on_gateway/clusters"
def remove_test(clusters):
for cluster in clusters:
os.system("kubectl --context={} -n sample delete -f src/sleep.yaml".format(cluster))
os.system("kubectl --context={} -n sample delete -f src/helloworld.yaml".format(cluster))
os.system("kubectl --context={} -n sample delete secret gitlab-auth".format(cluster))
os.system("kubectl --context={} delete namespace sample".format(cluster))
if __name__ == '__main__':
clusters = [dir for dir in os.listdir(CLUSTER_ROOT) if "cluster-" in dir]
clusters.sort()
remove_test(clusters)
```
#### File: run_on_gateway/deploy_monitoring/remove_deployment.py
```python
import os
import time
import subprocess
ROOT = "/home/ubuntu/run_on_gateway/clusters"
def remove_prom_operator(clusters):
for cluster in clusters:
print("For %s" % cluster)
os.system("helm --kube-context=%s --namespace monitoring uninstall po" % cluster)
os.system("kubectl --context=%s delete crd prometheuses.monitoring.coreos.com" % cluster)
os.system("kubectl --context=%s delete crd prometheusrules.monitoring.coreos.com" % cluster)
os.system("kubectl --context=%s delete crd servicemonitors.monitoring.coreos.com" % cluster)
os.system("kubectl --context=%s delete crd podmonitors.monitoring.coreos.com" % cluster)
os.system("kubectl --context=%s delete crd probes.monitoring.coreos.com" % cluster)
os.system("kubectl --context=%s delete crd alertmanagers.monitoring.coreos.com" % cluster)
os.system("kubectl --context=%s delete crd alertmanagerconfigs.monitoring.coreos.com" % cluster)
os.system("kubectl --context=%s delete crd thanosrulers.monitoring.coreos.com" % cluster)
os.system("kubectl --context=%s delete namespace monitoring" % cluster)
print("\n")
if __name__ == '__main__':
clusters = [dir for dir in os.listdir(ROOT) if "cluster-" in dir]
clusters.sort()
print("Removing the cluster prom-operators")
remove_prom_operator(clusters)
print("Removing local grafana")
os.system("docker rm -f grafana")
print("\n")
```
#### File: run_on_gateway/netem/network_mapping.py
```python
import sys
import os
import shutil
import argparse
import numpy as np
import subprocess
import csv
DELAY_MATRIX = "/home/ubuntu/run_on_gateway/chaos_netem/matrix.csv"
def read_map(networks):
dim = len(networks)
default_matrix = np.zeros((dim, dim))
delay_matrix = []
try:
with open(DELAY_MATRIX, "r") as file:
reader = csv.reader(file, quoting=csv.QUOTE_NONNUMERIC) # change contents to floats
for row in reader: # each row is a list
delay_matrix.append(row)
delay_matrix = np.array(delay_matrix)
if np.shape(delay_matrix) != np.shape(default_matrix):
print("Wrong matrix dimension, use defualt")
delay_matrix = default_matrix
for i in range(dim):
if delay_matrix[i][i] != 0:
print("Wrong matrix, use defualt")
delay_matrix = default_matrix
break
except FileNotFoundError:
print("No delay matrix found, use default")
delay_matrix = default_matrix
return delay_matrix
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('networks', metavar='N', nargs='+',
help='net work lists')
args = parser.parse_args()
networks = args.networks
# Filter out networks not related to the clusters, e.g. docker0. This could be
# made smarter if filtering on the IP's instead, to only choose networks with
# IP's 10.0.i.0 where i > 0
networks = [n for n in networks if n.find("ens") == 0 and n != "ens3"]
matrix = read_map(networks)
# Restore
print("If old config exist, restore to default first")
for dst in range(len(networks)):
subprocess.run(["tc", "qdisc", "del", "dev", networks[dst], "root"])
for dst in range(len(networks)):
subprocess.run(["tc", "qdisc", "add", "dev", networks[dst], "handle", "1:", "root", "htb", "r2q", "1700"])
subprocess.run(["tc", "class", "add", "dev", networks[dst], "parent", "1:", "classid", "1:1", "htb", "rate", "10Gbps", "ceil", "10Gbps"])
flow = 11
for src in range(len(networks)):
if matrix[src][dst] == 0:
pass
else:
src_address = "10.0."+str(src+1)+".0/24"
delay = str(matrix[src][dst]) + "ms"
print("Add delay of {} from {} ({}) to {}".format(delay, networks[src], src_address, networks[dst]))
classid = "1:"+str(flow)
handle_nbr = str((flow-10)*10)+":"
subprocess.run(["tc", "class", "add", "dev", networks[dst], "parent", "1:1", "classid", classid, "htb", "rate", "10Gbps"])
subprocess.run(["tc", "qdisc", "add", "dev", networks[dst], "parent", classid, "handle", handle_nbr, "netem", "delay", delay])
subprocess.run(["tc", "filter", "add", "dev", networks[dst], "parent", "1:", "protocol", 'ip', "prio", "1", "u32", "match", "ip", "src", src_address, "flowid", classid])
flow += 1
``` |
{
"source": "johans1jo/wunderground_scraper",
"score": 3
} |
#### File: johans1jo/wunderground_scraper/main.py
```python
import os
import re
from numpy.lib.function_base import iterable
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
import pandas as pd
from dateutil import parser, rrule
from datetime import datetime, time, date
from tqdm import tqdm
import time
def getWundergroundData(station, day, month, year):
"""
Function to return a data frame of hour-level weather data for a single Wunderground PWS station.
Args:
station (string): Station code from the Wunderground website
day (int): Day of month for which data is requested
month (int): Month for which data is requested
year (int): Year for which data is requested
returns:
Pandas Dataframe with weather data for specified station and date.
"""
url = "https://www.wunderground.com/history/daily/np/kathmandu/{station}/date/{year}-{month}-{day}"
full_url = url.format(station=station, day=day, month=month, year=year)
chrome_options = Options()
chrome_options.add_argument("--headless")
driver = webdriver.Chrome("/usr/bin/chromedriver", options=chrome_options)
driver.get(full_url)
tables = WebDriverWait(driver,20).until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, "table")))
for table in tables:
dataframe = pd.read_html(table.get_attribute('outerHTML'))
# Adding date to the data
date_insert = "{year}-{month}-{day}".format(day=day, month=month, year=year)
dataframe[0].insert(0, "Date", date_insert)
return dataframe[0][:24]
def getDates(start_date, end_date):
"""Generates a list of dates.
Args:
start_date (string): yyyy-mm-dd
end_date (string): yyyy-mm-dd
returns:
list withh dates as strings
"""
# Generate a list of all of the dates we want data for
start = parser.parse(start_date)
end = parser.parse(end_date)
dates = list(rrule.rrule(rrule.DAILY, dtstart=start, until=end))
return dates
def weatherStaionToCSV(data, station):
"""Saves data from one weather station to a Comma Separated Values file (CSV file)
Args:
data (2D list): a list with weather stations and their data
station (string): the station in question
"""
# Combine all of the individual days and output to CSV for analysis.
outname = '{}_raw_weather.csv'.format(station)
outdir = './data/'
if not os.path.exists(outdir):
os.mkdir(outdir)
fullname = os.path.join(outdir, outname)
pd.concat(data[station]).to_csv(fullname)
def weatherStaionToEXCEL(data, station):
"""Saves data from one weather station to an Excel file for use in Unscrambler
Args:
data (2D list): a list with weather stations and their data
station (string): the station in question
"""
# Combine all of the individual days and output to CSV for analysis.
outname = '{}_raw_weather.csv'.format(station)
outdir = './data/'
if not os.path.exists(outdir):
os.mkdir(outdir)
fullname = os.path.join(outdir, outname)
pd.concat(data[station]).to_excel(fullname)
def scrapeDataToCSV(station, dates):
"""A function to scrape all data corresponding to "dates" and save the result
Args:
station (string): the station in question
dates (list of stirings): a list of dates
"""
# Set a backoff time in seconds if a request fails
backoff_time = 10
data = {}
# Gather data for each station in turn and save to CSV.
print("Working on {}".format(station))
data[station] = []
for date in tqdm(dates):
# Print period status update messages
if date.day % 10 == 0:
print("\nWorking on date: {} for station {}".format(date, station))
done = False
while done == False:
try:
weather_data = getWundergroundData(station, date.day, date.month, date.year)
#print(weather_data)
done = True
except ConnectionError as e:
# May get rate limited by Wunderground.com, backoff if so.
print("Got connection error on {}".format(date))
print("Will retry in {} seconds".format(backoff_time))
time.sleep(10)
# Add each processed date to the overall data
data[station].append(weather_data)
#print(data)
weatherStaionToCSV(data, station)
weatherStaionToEXCEL(data, station)
def processData(station):
"""Prosessing the data to make it suitable to further analysis.
Args:
station (string): the station in question
"""
csv_name = '{}_raw_weather.csv'.format(station)
# Loading CSV
data_raw = pd.read_csv('data/' + csv_name)
# Changing column names to something nice
cols = {'Date' : 'Date',
'Temperature' : 'Temperature [F]',
'Dew Point' : 'Dew Point [F]',
'Humidity' : 'Humidity [%]',
'Wind' : 'Wind Direction',
'Wind Speed' : 'Wind Speed [mph]',
'Wind Gust' : 'Wind Gust [mph]',
'Pressure' : 'Pressure [in]',
'Precip.' : 'Precipitation [inch]'}
data_raw.rename(columns = cols, inplace=True)
# Converting the data from string with unit to only a float number
data_raw['Time'] = data_raw['Time'].str.split(expand = True)[:][0]#.astype(float)
data_raw['Time'] = data_raw['Time'].str.replace(':', '.')
data_raw['Temperature [F]'] = data_raw['Temperature [F]'].str.split(expand = True)[:][0]#.astype(float)
data_raw['Dew Point [F]'] = data_raw['Dew Point [F]'].str.split(expand = True)[:][0]#.astype(float)
data_raw['Humidity [%]'] = data_raw['Humidity [%]'].str.split(expand = True)[:][0]#.astype(float)
data_raw['Wind Speed [mph]'] = data_raw['Wind Speed [mph]'].str.split(expand = True)[:][0]#.astype(float)
data_raw['Wind Gust [mph]'] = data_raw['Wind Gust [mph]'].str.split(expand = True)[:][0]#.astype(float)
data_raw['Pressure [in]'] = data_raw['Pressure [in]'].str.split(expand = True)[:][0].str.replace('.', ',')#.astype(float)
data_raw['Pressure [in]'] = data_raw['Pressure [in]'].str.replace('.', ',')
data_raw['Precipitation [inch]'] = data_raw['Precipitation [inch]'].str.split(expand = True)[:][0]#.astype(float)
# Katmandu has no presipitation sensor; droppping the column
data_raw = data_raw.drop('Precipitation [inch]', axis = 1)
# Updating CSV with prosecced data
data_raw.to_csv('data/{}_processed_weather.csv'.format(station))
data_raw.to_excel('data/{}_processed_weather.xlsx'.format(station))
def oneHotEncode(station):
"""One HHot encodes the string variables that Wunderground returns.
"""
csv_name = '{}_processed_weather.csv'.format(station)
# Loading CSV
data_raw = pd.read_csv('data/' + csv_name)
one_hot = pd.get_dummies(data_raw['Wind Direction'])
data_raw = data_raw.drop('Wind Direction', axis = 1)
data_raw = data_raw.join(one_hot)
# Possible conditions :
# ['Cloudy', 'Drizzle', 'Fair', 'Fog', 'Heavy Rain', 'Heavy T-Storm', 'Light Rain', 'Light Rain with Thunder', 'Mostly Cloudy', 'Partly Cloudy', 'Rain', 'T-Storm', 'Thunder']
generalized_conditions = []
for item in data_raw['Condition']:
if type(item) != str:
generalized_conditions.append("Other")
elif "rain" in item.lower() or "drizzle" in item.lower() or "misty" in item.lower() or "storm" in item.lower() or "thunder" in item.lower():
generalized_conditions.append("Rain")
elif "cloudy" in item.lower() or "fog" in item.lower():
generalized_conditions.append("Cloudy")
elif "fair" in item.lower():
generalized_conditions.append("Sun")
else:
generalized_conditions.append("Other")
one_hot = pd.get_dummies(generalized_conditions)
data_raw = data_raw.drop('Condition', axis = 1)
data_raw = data_raw.join(one_hot)
# Updating CSV with prosecced data
data_raw.to_csv('data/{}_onehot_weather.csv'.format(station))
data_raw.to_excel('data/{}_onehot_weather.xlsx'.format(station))
def main():
start_date = "2019-01-01"
end_date = "2019-12-28"
station = 'VNKT' # Khatmandu
dates = getDates(start_date, end_date)
#scrapeDataToCSV(station, dates)
processData(station)
oneHotEncode(station)
if __name__== "__main__":
main()
``` |
{
"source": "JohanSamir/rainbow_extend",
"score": 3
} |
#### File: lifting_veil/agents/opt_utils.py
```python
import gin
import optax
from absl import logging
@gin.configurable
def create_opt(name='adamw', learning_rate=6.25e-5, beta1=0.9, beta2=0.999,
eps=1.5e-4, weight_decay=0.0, centered=False):
"""Create an optimizer for training.
Currently, only the Adam and RMSProp optimizers are supported.
Args:
name: str, name of the optimizer to create.
learning_rate: float, learning rate to use in the optimizer.
beta1: float, beta1 parameter for the optimizer.
beta2: float, beta2 parameter for the optimizer.
eps: float, epsilon parameter for the optimizer.
centered: bool, centered parameter for RMSProp.
Returns:
A flax optimizer.
"""
if name == 'adam':
logging.info('Creating AdamW optimizer with settings lr=%f, beta1=%f, '
'beta2=%f, eps=%f, weight decay=%f', learning_rate, beta1, beta2, eps, weight_decay)
return optax.adam(learning_rate, b1=beta1, b2=beta2, eps=eps)
elif name == 'rmsprop':
logging.info('Creating RMSProp optimizer with settings lr=%f, beta2=%f, '
'eps=%f', learning_rate, beta2, eps)
return optax.rmsprop(learning_rate, decay=beta2, eps=eps,
centered=centered)
else:
raise ValueError('Unsupported optimizer {}'.format(name))
```
#### File: rainbow_extend/lifting_veil/utils.py
```python
from agents.dqn_agent_new import *
from agents.rainbow_agent_new import *
import numpy as np
import itertools
agents = {
'dqn': JaxDQNAgentNew,
'rainbow': JaxRainbowAgentNew,
# 'quantile': JaxQuantileAgentNew,
# 'implicit': JaxImplicitQuantileAgentNew,
}
inits = {
'orthogonal': {
'function': jax.nn.initializers.orthogonal
},
'zeros': {
'function': jax.nn.initializers.zeros
},
'ones': {
'function': jax.nn.initializers.ones
},
'xavier_uni': {
'function': jax.nn.initializers.variance_scaling,
'scale': 1,
'mode': 'fan_avg',
'distribution': 'uniform'
},
'xavier_nor': {
'function': jax.nn.initializers.variance_scaling,
'scale': 1,
'mode': 'fan_avg',
'distribution': 'truncated_normal'
},
'lecun_uni': {
'function': jax.nn.initializers.variance_scaling,
'scale': 1,
'mode': 'fan_in',
'distribution': 'uniform'
},
'lecun_nor': {
'function': jax.nn.initializers.variance_scaling,
'scale': 1,
'mode': 'fan_in',
'distribution': 'truncated_normal'
},
'he_uni': {
'function': jax.nn.initializers.variance_scaling,
'scale': 2,
'mode': 'fan_in',
'distribution': 'uniform'
},
'he_nor': {
'function': jax.nn.initializers.variance_scaling,
'scale': 2,
'mode': 'fan_in',
'distribution': 'truncated_normal'
},
'variance_baseline': {
'function': jax.nn.initializers.variance_scaling,
'scale': 1.0 / np.sqrt(3.0),
'mode': 'fan_in',
'distribution': 'uniform'
},
'variance_0.1': {
'function': jax.nn.initializers.variance_scaling,
'scale': 0.1,
'mode': 'fan_in',
'distribution': 'uniform'
},
'variance_0.3': {
'function': jax.nn.initializers.variance_scaling,
'scale': 0.3,
'mode': 'fan_in',
'distribution': 'uniform'
},
'variance_0.8': {
'function': jax.nn.initializers.variance_scaling,
'scale': 0.8,
'mode': 'fan_in',
'distribution': 'uniform'
},
'variance_3': {
'function': jax.nn.initializers.variance_scaling,
'scale': 3,
'mode': 'fan_in',
'distribution': 'uniform'
},
'variance_5': {
'function': jax.nn.initializers.variance_scaling,
'scale': 5,
'mode': 'fan_in',
'distribution': 'uniform'
},
'variance_10': {
'function': jax.nn.initializers.variance_scaling,
'scale': 10,
'mode': 'fan_in',
'distribution': 'uniform'
}
}
activations = ['non_activation', 'relu', 'relu6', 'sigmoid', 'softplus', 'soft_sign', 'silu', 'swish', 'log_sigmoid', 'hard_sigmoid', 'hard_silu', 'hard_swish', 'hard_tanh', 'elu', 'celu', 'selu', 'gelu', 'glu']
normalizations = ['non_normalization', 'BatchNorm', 'LayerNorm']
learning_rates = [10, 5, 2, 1, 0.1, 0.01, 0.001, 0.0001, 0.00001]
batch_sizes = [32, 64, 128, 256, 512]
epsilons = [1, 0.5, 0.3125, 0.03125, 0.003125, 0.0003125, 0.00003125, 0.000003125]
widths = [32, 64, 128, 256, 512, 1024]
depths = [1, 2, 3, 4]
update_periods = [1 ,2 ,3, 4, 8, 10, 12]
target_update_periods = [10, 25, 50, 100, 200, 400, 800, 1600]
gammas = [0.1, 0.5, 0.9, 0.99, 0.995, 0.999]
min_replay_historys = [125, 250, 375, 500, 625, 750, 875, 1000]
num_atoms = [11, 21, 31, 41, 51, 61]
update_horizon = [1, 2, 3, 4, 5, 8, 10]
noisy_net = [True, False]
clip_rewards = ["True", "False"]
experiments = {
"epsilon": epsilons,
"learning_rate": learning_rates,
"width": widths,
"depth": depths,
"normalization": normalizations,
"init": inits,
"activation": activations,
"update_period": update_periods,
"target_update_period": target_update_periods,
"gamma": gammas,
"min_replay_history": min_replay_historys,
"num_atoms": num_atoms,
"update_horizon": update_horizon,
"clip_rewards": clip_rewards,
}
groups = { "effective_horizon" : [update_periods, gammas],
"constancy_of_parameters" : [inits, update_periods, noisy_net],
"network_starting point" : [inits, activations, depths, normalizations],
"network_architecture" : [depths, widths, normalizations],
#"algorithmic_parameters" : [update_period", "gamma],
#"distribution_parameterization" : [update_period", "gamma],
"optimizer_parameters" : [learning_rates, epsilons, batch_sizes]
#"bellman_updates" : ["update_period", "gamma"]
}
def get_init_bidings(agent_name, init, seed=None):
initializer = inits[init]['function'].__name__
if init == 'zeros' or init == 'ones':
gin_bindings = [f"{agent_name}.seed={seed}",
f"{agent_name}.initzer = @{initializer}"]
elif init == "orthogonal":
gin_bindings = [f"{agent_name}.seed={seed}",
f"{agent_name}.initzer = @{initializer}()",
f"{initializer}.scale = 1"]
else:
mode = '"'+inits[init]['mode']+'"'
scale = inits[init]['scale']
distribution = '"'+inits[init]['distribution']+'"'
gin_bindings = [f"{agent_name}.seed={seed}",
f"{agent_name}.initzer = @{initializer}()",
f"{initializer}.scale = {scale}",
f"{initializer}.mode = {mode}",
f"{initializer}.distribution = {distribution}"
]
return gin_bindings
def get_gin_bindings(exp, agent_name, initial_seed, value, test):
if exp == "epsilon":
gin_bindings = [f"{agent_name}.seed={initial_seed}", f"create_opt.eps = {value}"]
elif exp == "learning_rate":
gin_bindings = [f"{agent_name}.seed={initial_seed}", f"create_opt.learning_rate = {value}"]
elif exp == "width":
gin_bindings = [f"{agent_name}.seed={initial_seed}", f"{agent_name}.neurons = {value}"]
elif exp == "depth":
gin_bindings = [f"{agent_name}.seed={initial_seed}", f"{agent_name}.hidden_layer = {value}"]
elif exp == "normalization":
gin_bindings = [f"{agent_name}.seed={initial_seed}", f"{agent_name}.normalization = '{value}'"]
elif exp == "init":
gin_bindings = get_init_bidings(agent_name, value, initial_seed)
elif exp == "activation":
gin_bindings = [f"{agent_name}.seed={initial_seed}", f"{agent_name}.layer_funct = '{value}'"]
elif exp == "update_period":
gin_bindings = [f"{agent_name}.seed={initial_seed}", f"{agent_name}.update_period = {value}"]
elif exp == "target_update_period":
gin_bindings = [f"{agent_name}.seed={initial_seed}", f"{agent_name}.target_update_period = {value}"]
elif exp == "gamma":
gin_bindings = [f"{agent_name}.seed={initial_seed}", f"{agent_name}.gamma = {value}"]
elif exp == "min_replay_history":
gin_bindings = [f"{agent_name}.seed={initial_seed}", f"{agent_name}.min_replay_history = {value}"]
elif exp == "num_atoms":
gin_bindings = [f"{agent_name}.seed={initial_seed}", f"{agent_name}.num_atoms = {value}"]
elif exp == "update_horizon":
gin_bindings = [f"{agent_name}.seed={initial_seed}", f"{agent_name}.update_horizon = {value}"]
elif exp == "clip_rewards":
gin_bindings = [f"{agent_name}.seed={initial_seed}", f"Runner.clip_rewards = {value}"]
elif exp == "batch_size":
gin_bindings = [f"{agent_name}.seed={initial_seed}", f"OutOfGraphPrioritizedReplayBuffer.batch_size = {value}"]
else:
print("Error! Check the kind of experiment")
if test:
gin_bindings.extend(["Runner.num_iterations=4", "Runner.training_steps=200"])
return gin_bindings
def sample_group(grp, num=1):
total = list(itertools.product(*[exp for exp in groups[grp]]))
total = np.array(total)
indices = np.random.choice(len(total), num, replace=False)
sample = total[indices]
return sample
``` |
{
"source": "johanseaxis/acap-native-sdk-examples",
"score": 3
} |
#### File: env/training/train.py
```python
import argparse
import tensorflow as tf
from model import create_model
from utils import SimpleCOCODataGenerator as DataGenerator
def train(image_dir, annotation_path):
""" Initiates a model and and trains it using a data generator. The model
is then saved to the output path.
Args:
image_dir (str): Path to the directory holding the dataset images.
annotation_path (str): Path to the dataset annotation json-file.
"""
person_car_indicator = create_model()
person_car_indicator.compile(optimizer='adam', metrics=['binary_accuracy'],
loss=['bce', 'bce'])
person_car_indicator.summary()
data_generator = DataGenerator(image_dir, annotation_path, batch_size=16)
person_car_indicator.fit(data_generator, epochs=10)
tf.saved_model.save(person_car_indicator, 'models/saved_model')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train a basic model.')
parser.add_argument('-i', '--images', type=str, required=True,
help='path to the directory containing training \
images')
parser.add_argument('-a', '--annotations', type=str, required=True,
help='path to the .json-file containing COCO instance \
annotations')
args = parser.parse_args()
train(args.images, args.annotations)
``` |
{
"source": "johansetiawan/telebot",
"score": 3
} |
#### File: johansetiawan/telebot/bot_local.py
```python
from dotenv import load_dotenv
import os
## for chatbot functionalities
import telebot
from string import Template
import emoji
from gtts import gTTS
## for data analysis
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
# SETUP: TELEGRAM BOT API TOKEN
load_dotenv()
TOKEN = os.environ['TOKEN']
bot = telebot.TeleBot(TOKEN)
# -------------------- CHECKPOINT 1 --------------------
@bot.message_handler(commands=['start', 'help'])
def send_welcome(message):
# TO DO: chat_id, full_name, message_text
chat_id = message.from_user.id
first_name = message.from_user.first_name
last_name = message.from_user.last_name
full_name = f'{first_name} {last_name}' if last_name is not None else first_name
# TO DO: subtitute text with variable
with open('template_text/welcome.txt', mode='r', encoding='utf-8') as f:
content = f.read()
temp = Template(content)
welcome = temp.substitute(FULL_NAME = full_name)
bot.send_message(
chat_id,
welcome,
parse_mode='Markdown'
)
@bot.message_handler(commands=['about'])
def send_about(message):
# TO DO: chat_id
chat_id = message.from_user.id
# TO DO: subtitute text with static values
with open('template_text/about.txt', mode='r', encoding='utf-8') as f:
content = f.read()
temp = Template(content)
about = temp.substitute(
STUDENT_NAME = "<NAME>",
BATCH_ACADEMY = "Phoenix Day Online",
GITHUB_REPO_LINK = "https://github.com/johansetiawan/telebot"
)
bot.send_message(
chat_id,
about,
parse_mode='Markdown'
)
# -------------------- CHECKPOINT 2 --------------------
# TO DO: read data and convert data type
df = pd.read_csv("data_input/facebook_ads_v2.csv", parse_dates=["reporting_date"])
# Convert the dtypes of gender, interest 1 to 3 from object to category to lower the memory usage
df[['gender', 'interest1', 'interest2', 'interest3']] = df[['gender', 'interest1', 'interest2', 'interest3']].astype('category')
# TO DO: get unique values of campaign_id
df['campaign_id'] = df['campaign_id'].astype('str')
unique_campaign = df['campaign_id'].unique()
@bot.message_handler(commands=['summary'])
def ask_id_summary(message):
# TO DO: chat_id (SAME AS CHECKPOINT 1)
chat_id = message.from_user.id
markup = telebot.types.ReplyKeyboardMarkup(one_time_keyboard=True)
for i in unique_campaign:
markup.add(i)
sent = bot.send_message(chat_id, 'Choose campaign to be summarized:', reply_markup=markup)
bot.register_next_step_handler(sent, send_summary)
def send_summary(message):
# TO DO: chat_id (SAME AS CHECKPOINT 1)
chat_id = message.from_user.id
selected_campaign_id = message.text
if selected_campaign_id in unique_campaign:
# TO DO: find the range date
df_campaign = df[df['campaign_id'] == selected_campaign_id]
start_date = df_campaign['reporting_date'].min().strftime(format='%d %b %Y')
end_date = df_campaign['reporting_date'].max().strftime(format='%d %b %Y')
# TO DO: perform calculation
total_spent = df_campaign['spent'].sum().astype('int64')
total_conversion = df_campaign['total_conversion'].sum().astype('int64')
cpc = (total_spent/total_conversion).round(1)
# TO DO: subtitute text with variables
with open('template_text/summary.txt', mode='r', encoding='utf-8') as f:
content = f.read()
temp = Template(content)
summary = temp.substitute(
CAMPAIGN_ID = selected_campaign_id,
START_DATE = start_date,
END_DATE = end_date,
TOTAL_SPENT = f"${total_spent:,}",
TOTAL_CONVERSION = f"{total_conversion:,}",
CPC = f"${cpc:,.1f}"
)
bot.send_message(chat_id, summary)
else:
bot.send_message(chat_id, 'Campaign ID not found. Please try again!')
ask_id_summary(message)
# -------------------- CHECKPOINT 3 --------------------
@bot.message_handler(commands=['plot'])
def ask_id_plot(message):
# TO DO: chat_id (SAME AS CHECKPOINT 1)
chat_id = message.from_user.id
markup = telebot.types.ReplyKeyboardMarkup(one_time_keyboard=True)
for i in unique_campaign:
markup.add(i)
sent = bot.send_message(chat_id, 'Choose campaign to be visualized:', reply_markup=markup)
bot.register_next_step_handler(sent, send_plot)
def send_plot(message):
# TO DO: chat_id (SAME AS CHECKPOINT 1)
chat_id = message.from_user.id
selected_campaign_id = message.text
if selected_campaign_id in unique_campaign:
# TO DO: prepare data for visualization
df_campaign = df[df['campaign_id'] == selected_campaign_id]
df_plot = df_campaign.groupby('age')[['spent', 'approved_conversion']].sum()
df_plot['cpc'] = df_plot['spent']/df_plot['approved_conversion']
# TO DO: visualization
# prepare 3 subplots vertically
fig, axes = plt.subplots(3, sharex=True, dpi=300)
# create frameless plot
for ax in axes:
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['bottom'].set_visible(False)
# first subplot: total spent per age group
axes[0].bar(x=df_plot.index, height=df_plot['spent'], color="#AE2024")
axes[0].set_ylabel("Total Spent", fontsize=8)
# second subplot: total approved conversion per age group
axes[1].bar(x=df_plot.index, height=df_plot['approved_conversion'], color="#000000")
axes[1].set_ylabel("Total Approved\n Conversion", fontsize=8)
# third subplot: average CPC per age group
axes[2].bar(x=df_plot.index, height=df_plot['cpc'], color="#AE2024")
axes[2].set_ylabel("Average CPC", fontsize=8)
# set the label and title for plots
plt.xlabel("Age Group")
axes[0].set_title(
f'''Average CPC, Total Spent, and Total Approved Conversion
across Age Group for Campaign ID: {selected_campaign_id}''')
# create output folder
if not os.path.exists('output'):
os.makedirs('output')
# save plot
plt.savefig('output/plot.png', bbox_inches='tight')
# send plot
bot.send_chat_action(chat_id, 'upload_photo')
with open('output/plot.png', 'rb') as img:
bot.send_photo(chat_id, img)
# (EXTRA CHALLENGE) Voice Message
plot_info = list(zip(
['total spent', 'total approved conversion', 'average cpc'],
df_plot.idxmax(),
df_plot.idxmin()))
plot_text = f'This is your requested plot for Campaign ID {selected_campaign_id}.\n'
for col, maxi, mini in plot_info:
text = f"Age group with the highest {col} is {maxi}, while the lowest is {mini}.\n"
plot_text += text
# save voice message
speech = gTTS(text = plot_text)
speech.save('output/plot_info.ogg')
# send voice message
with open('output/plot_info.ogg', 'rb') as f:
bot.send_voice(chat_id, f)
else:
bot.send_message(chat_id, 'Campaign ID not found. Please try again!')
ask_id_plot(message)
# -------------------- CHECKPOINT 4 --------------------
@bot.message_handler(func=lambda message: True)
def echo_all(message):
# TO DO: emoji
with open('template_text/default.txt', mode='r', encoding='utf-8') as f:
temp = Template(f.read())
default = temp.substitute(EMOJI = emoji.emojize(':hear_no_evil:', use_aliases=True))
bot.reply_to(message, default)
if __name__ == "__main__":
bot.polling()
``` |
{
"source": "johansettlin/Flask-MonitoringDashboard-Tutorial",
"score": 2
} |
#### File: johansettlin/Flask-MonitoringDashboard-Tutorial/app.py
```python
from flask import Flask
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Flask-Monitoring-Dashboard tutorial'
@app.route('/endpoint1')
def endpoint1():
time.sleep(0.20)
return 'Endpoint1', 400
@app.route('/endpoint2')
def endpoint2():
time.sleep(5)
return 'Endpoint2'
``` |
{
"source": "johansettlin/thesis-work",
"score": 2
} |
#### File: johansettlin/thesis-work/filter.py
```python
from gremlin_python.process.anonymous_traversal import traversal
from gremlin_python.process.graph_traversal import __
from gremlin_python.process.traversal import Cardinality, T
from gremlin_python.driver.driver_remote_connection import DriverRemoteConnection
from graph_api import *
def exchangePattern1(g):
## BAD PATTERN ##
p = g.V().match(__.as_("cred").where(__.out("instanceOf").hasLabel("Credentials")),\
__.as_("cred").out("identities").where(__.out("instanceOf").hasLabel("Identity")).as_("id"),\
__.not_(__.as_("cred").out().\
where(__.out("instanceOf").hasLabel("Data").out().as_("id"))))\
.select("cred", "id").toList()
## REWRITE PATTERN
for a in p:
removeAssociation(g, a['cred'], a['id'], "identities")
data = addNewObject(g, "Data", "data")
addNewAssociation(g, a['cred'], data, "EncryptionCredentials")
addNewAssociation(g, data, a['id'], "ReadPrivileges")
success = validatePatternExchange(g)
if(success):
activateDefense(g, data, "authenticated")
def getExchangePatterns1():
return [exchangePattern1]
```
#### File: johansettlin/thesis-work/graphModels.py
```python
from gremlin_python.process.anonymous_traversal import traversal
from gremlin_python.process.traversal import Cardinality, T
from gremlin_python.driver.driver_remote_connection import DriverRemoteConnection
import scad
import requests
import csv
import json
import re
from os import getcwd
def drop_all(g):
# Delete all vertices
g.V().drop().iterate()
########### MAL layer ##############
def mal(g):
# Add the root asset which is a starting point for schemantic queries
root = g.addV("root").next()
# Add the main building blocks of MAL
assets = g.addV("assets").next()
attackSteps = g.addV("attackSteps").next()
defenses = g.addV("defenses").next()
associations = g.addV("associations").next()
# Add root edges
g.V(root.id).addE("attackSteps").to(attackSteps).iterate()
g.V(root.id).addE("assets").to(assets).iterate()
g.V(root.id).addE("defenses").to(defenses).iterate()
g.V(root.id).addE("associations").to(associations).iterate()
#### Add DSL and instance properties ####
#DSL properties for attack steps
a_type = g.addV("type").next()
g.V(attackSteps.id).addE("DSLProperties").to(a_type).iterate()
#Instance properties for attack steps
ttc5 = g.addV("TTC-5%").next()
ttc50 = g.addV("TTC-50%").next()
ttc95 = g.addV("TTC-95%").next()
g.V(attackSteps.id).addE("instanceProperties").to(ttc5).iterate()
g.V(attackSteps.id).addE("instanceProperties").to(ttc50).iterate()
g.V(attackSteps.id).addE("instanceProperties").to(ttc95).iterate()
#DSL Porperties for associations
role = g.addV("role").next()
cardi_begin = g.addV("carinality_begin").next()
g.V(associations.id).addE("DSLProperties").to(role).iterate()
g.V(associations.id).addE("DSLProperties").to(cardi_begin).iterate()
#Instance Properties for assets
tag = g.addV("tag").next()
name = g.addV("name").next()
g.V(assets.id).addE("instanceProperties").to(tag).iterate()
g.V(assets.id).addE("instanceProperties").to(name).iterate()
#Instance properties for defenses
active = g.addV("active").next()
g.V(defenses.id).addE("instanceProperties").to(active).iterate()
def readMALSpec(url):
#url = "https://raw.githubusercontent.com/mal-lang/coreLang/master/src/main/mal/coreLang.mal"
#url = "https://raw.githubusercontent.com/mal-lang/coreLang/master/src/main/mal/coreLang.mal"
directory = getcwd()
filename = 'mal.txt'
r = requests.get(url)
f = open(filename,'w')
f.write(r.text)
f.close()
with open(filename) as infile, open('output.txt', 'w') as outfile:
for line in infile:
if not line.strip(): continue # skip the empty line
outfile.write(line) # non-empty line. Write it to output
spec = open('output.txt', "r")
content = spec.readlines()
asso = False
assocs = []
assets = []
currentAsset = {"name": "", "attackSteps":[], "defenses": [], "extends": "", "abstract": False}
for line in content:
words = line.split()
if(asso == False):
if(len(words) >= 2):
if(words[0] == "asset"):
if(currentAsset["name"] != ""):
assets.append(currentAsset)
#the asset extends another asset
if(len(words) >= 4 and words[2] == "extends"):
currentAsset = {"name": "", "attackSteps":[], "defenses": [], "extends": "", "abstract": False}
currentAsset["name"] = words[1]
print(words[1], " extends ", words[3])
currentAsset['extends'] = words[3]
else:
currentAsset = {"name": "", "attackSteps":[], "defenses": [], "extends": "", "abstract": False}
currentAsset["name"] = words[1]
## Create a new asset
if(words[1] == "asset"):
if(currentAsset["name"] != ""):
assets.append(currentAsset)
currentAsset = {"name": "", "attackSteps":[], "defenses": [], "extends": "", "abstract": False}
currentAsset["name"] = words[2]
if(words[0] == "abstract"):
print("asset ", words[2], " is abstarct")
currentAsset["abstract"] = True
#All defenses and attack steps until a new asset is
# present belongs to the prev asset
# Needs more functionality for E, !E, @hidden etc..
if(words[0] == "|"): #Attack step of type OR
currentAsset["attackSteps"].append({"name": words[1], "type": "OR"})
if(words[0] == "&"): #Attack step of type AND
currentAsset["attackSteps"].append({"name": words[1], "type": "AND"})
if(words[0] == "#"): #Defense
currentAsset["defenses"].append({"name": words[1]})
#spec.close()
## no more assets, just associations
if(words[0] == "associations"):
asso = True
assets.append(currentAsset)
else:
if(len(words) >= 2):
for d in assets:
if(d["name"] == words[0]):
line = ''.join(words)
lineContent = re.split('\[|\]|<--|-->', line)
# print(lineContent)
assoc = {}
assoc["linkName"] = lineContent[3]
assoc["asset1"] = lineContent[0]
assoc["asset2"] = lineContent[6]
assoc["role1"] = lineContent[1]
assoc["role2"] = lineContent[5]
assoc["cardinality1"] = lineContent[2]
assoc["cardinality2"] = lineContent[4]
assocs.append(assoc)
break
if(asso == False):
assets.append(currentAsset)
return assets, assocs
########### DSL Layer ###############
#Adds an assets in the DSL layer
def addAssets(g, assets):
#Create all the assets
for x in assets:
for asset in x:
#create the asset with the name as label
a = g.addV(asset["name"]).next()
rootA = g.V().hasLabel("root").out("assets").next()
#The asset is an instance of the asset node in the MAL layer
g.V(a.id).addE("instanceOf").to(rootA).iterate()
#Add defenses
addDefenses(g, a, asset["defenses"])
#Add attack steps
addAttackSteps(g, a, asset["attackSteps"])
#Add functionality for extends and abtract type assets
for x in assets:
for asset in x:
#check if the asset extends another asset
if(asset['extends'] != ""):
a = g.V().hasLabel(asset['name']).next()
#print("a", a.id)
extendedAsset = g.V().hasLabel(asset['extends']).next()
#print("ea", extendedAsset.id)
#add an extends edgen between the assets
g.V(a.id).addE("extends").to(extendedAsset).iterate()
#check if the asset is abstarct
if(asset['abstract']):
g.V().hasLabel(asset['name']).property('type', 'abstract').next()
def addAssociations(g, assocs):
for x in assocs:
for a in x:
role1 = a['role1'].strip("[]")
role2 = a['role2'].strip("[]")
#add two new assocs vertices containing information about both sides
a1 = g.addV(a["linkName"]).property("role", role1).property("cardinality_begin", a["cardinality1"]).next()
a2 = g.addV(a["linkName"]).property("role", role2).property("cardinality_begin", a["cardinality2"]).next()
#add instanceOf edges to associations in the MAL Layer
rootAs = g.V().hasLabel("root").out("associations").next()
g.V(a1.id).addE("instanceOf").to(rootAs).iterate()
g.V(a2.id).addE("instanceOf").to(rootAs).iterate()
#Add association edges from the asset to its role and target edges
g.V().hasLabel(a["asset1"]).addE("associations").to(a1).iterate()
g.V().hasLabel(a["asset2"]).addE("associations").to(a2).iterate()
asset2 = g.V().hasLabel(a["asset2"]).next()
g.V(a1.id).addE("targetType").to(asset2).iterate()
asset1 = g.V().hasLabel(a["asset1"]).next()
g.V(a2.id).addE("targetType").to(asset1).iterate()
# Adds defenses to an asset in DSL layer
def addDefenses(g, asset, defenses):
# g : Graph traversal source to access the database
# asset : is a vertex in the DSL layer representing
# the asset the defenses should relate to
#
# defenses : list of dictionaries {"name" : "nameOfDefense"}
for defense in defenses:
#Add a new vertex representing the defense
d = g.addV(defense["name"]).next()
#Add an edge (defense relation) from the asset having the defense
g.V(asset.id).addE("defenses").to(d).iterate()
# Adds attack steps to an asset on the DSL layer
def addAttackSteps(g, asset, attackSteps):
# g : Graph traversal source to access the database
# asset : is a vertex in the DSL layer representing
# the asset the defenses should relate to
#
# attackSteps : list of dictionaries {"name" : "nameOfAttackStep", "type": "type"}
for step in attackSteps:
a = g.addV(step["name"]).property("type", step["type"]).next()
rootAt = g.V().hasLabel("root").out("attackSteps").next()
g.V(a.id).addE("instanceOf").to(rootAt).iterate()
g.V(asset.id).addE("attackSteps").to(a).iterate()
###################### Instance Layer #############################
def readCSV(file):
# initializing the titles and rows list
fields = []
rows = []
headerInfo = []
# reading csv file
with open(file, 'r') as csvfile:
# creating a csv reader object
csvreader = csv.reader(csvfile)
# extracting field names through first row
headerInfo.append(next(csvreader))
headerInfo.append(next(csvreader))
headerInfo.append(next(csvreader))
headerInfo.append(next(csvreader))
fields = next(csvreader)
# extracting each data row one by one
for row in csvreader:
rows.append(row)
return rows
#Add attack steps with TTC values and set active defenses
def getActiveDefenses(file, oid, metaConcept):
assets = scad.open(file)
#Get the meta concept
activeDefenses = []
for o in assets['eom_etree'].findall('objects'):
if (o.get('id') == oid):
for evidence in o.findall('evidenceAttributes'):
if evidence.findall('evidenceDistribution'):
defense = evidence.get("metaConcept")[0].lower() + evidence.get("metaConcept")[1:]
#might fix to handle probability, on, off etc
activeDefenses.append(defense)
return activeDefenses
def addInstanceDefenses(g, vertex, oid, metaConcept, file):
#get the defenses in the DSL layer associated to the object type
defenses = g.V().hasLabel("root").out("assets").in_("instanceOf").hasLabel(metaConcept).out("defenses").project("id", "label").by(T.id).by(T.label).toList()
activeDefenses = getActiveDefenses(file, oid, metaConcept)
for defense in defenses:
# check if the defense is active in the oem file
if(defense['label'] in activeDefenses):
#create an active defense vertex
d = g.addV().property('active', 1).next()
#The instance object has an edge to the defense
g.V(vertex.id).addE("defense").to(d).iterate()
#The defense is an instance of a defence in the DSL layer
metaD = g.V(defense['id']).next()
g.V(d.id).addE("instanceOf").to(metaD).iterate()
else:
#create an inactive defense vertex
d = g.addV().property('active', 0).next()
#The instance object has an edge to the defense
g.V(vertex.id).addE("defense").to(d).iterate()
#The defense is an instance of a defence in the DSL layer
mD = g.V(defense['id']).next()
g.V(d.id).addE("instanceOf").to(mD).iterate()
# Gets the TTC value for an attack step, if there is none return 0
def getTTC(oid, name, attackStep, simulation):
#TTC values is on index 6,7,8 and id of the object id is on index 1
#and the name of the attack step is on index 5
#print(name, attackStep)
for row in simulation:
if((row[1] == oid) and (row[5].lower() == attackStep.lower())):
return row[6], row[7], row[8]
return 0, 0, 0
def addInstanceAttackSteps(g, vertex, metaConcept, oid , name , simulation):
#for each attack step get the TTC values from the simulation and create the
#attack step vertex
#get the attack steps from the DSL layer as a list
attackSteps = g.V().hasLabel("root").out("assets").in_("instanceOf").hasLabel(metaConcept).out("attackSteps").project("id", "label").by(T.id).by(T.label).toList()
for attackStep in attackSteps:
TTC5, TTC50, TTC95 = getTTC(oid, name, attackStep['label'], simulation)
#Add the attack step as a vertex in the instance layer with the ttc values
aStep = g.addV().property('TTC-5%', TTC5).property('TTC-50%', TTC50).property('TTC-95%', TTC95).next()
#connect to the model
#add an edge between the object and the attack step
g.V(vertex.id).addE("attackStep").to(aStep).next()
#the attack step is an instance of the attack step in the DSL layer
attackStep = g.V(attackStep['id']).next()
g.V(aStep.id).addE("instanceOf").to(attackStep).iterate()
def xmlToModel(g, file, csv):
eom = scad.open(file)
assets = scad.get_assets(eom)
simulation = readCSV(csv)
for o in assets['objects']:
if(not (o['metaConcept'] == 'Attacker')):
#add the instance object, need to keep the securiCAD id for the associations
vertex = g.addV().property('name', o['name']).property('id', o['id']).next()
#Check if there is any tags present
if('attributesJsonString' in o):
for k, v in json.loads(o['attributesJsonString']).items():
g.V(vertex.id).property(k,v).next()
#the object vertex is an instance of a DSL asset
metaAs = g.V().hasLabel("root").out("assets").in_("instanceOf").hasLabel(o['metaConcept']).next()
g.V(vertex.id).addE("instanceOf").to(metaAs).iterate()
addInstanceAttackSteps(g, vertex, o['metaConcept'], o['exportedId'], o['name'] , simulation)
addInstanceDefenses(g, vertex, o['id'], o['metaConcept'], file)
#assumes that associations in the instance model is correct in respect to the DSL
for a in assets['assocs']:
if( '.attacker' not in a['targetProperty'] ):
#getLinkName(g.V().has('id', a['sourceObject']), a['targetProperty'], a['sourceProperty'])
target = g.V().has('id', a['targetObject']).next()
g.V().has('id', a['sourceObject']).addE(a['sourceProperty']).to(target).iterate()
source = g.V().has('id', a['sourceObject']).next()
g.V().has('id', a['targetObject']).addE(a['targetProperty']).to(source).iterate()
return eom
def addVertex(g, className, name, defenses):
#Need to check rules in the MAL language
return g.addV(className)
def addEdge(g, associationName, fromAssetId, toAssetId):
return g.addE(associationName)
#Creation of the example graph provided in Chapter. 2 representing a band.
def exampleModel(g):
#drop the old graph
drop_all(g)
#Creation of all the vertecies
johan = g.addV("Person").property("Name", "Johan").property("Gender", "Male").next()
noomi = g.addV("Person").property("Name", "Noomi").property("Gender", "Female").next()
band = g.addV("Band").property("Name", "Soulmates").property("Genre", "Indie").property("Founded", "2019").next()
piano = g.addV("Instrument").property("Type", "Piano").next()
guitar = g.addV("Instrument").property("Type", "Guitar").next()
festival = g.addV("Festival").property("Location", "Stockholm").next()
#Creation of all the edges
g.addE("member").property("weight", 0.5).from_(johan).to(band).iterate()
g.addE("member").property("weight", 0.5).from_(noomi).to(band).iterate()
g.addE("plays").property("weight", 0.8).from_(johan).to(piano).iterate()
g.addE("plays").property("weight", 0.5).from_(noomi).to(guitar).iterate()
g.addE("played_at").property("weight", 0.8).from_(band).to(festival).iterate()
```
#### File: johansettlin/thesis-work/patterns.py
```python
from gremlin_python.process.graph_traversal import __
from gremlin_python.process.traversal import P, T
from graph_api import *
#Caller for pw-reuse pattern
def passwordReuse(g):
ap = passwordReuseAP(g)
passwordReuseEP(g, ap)
#An anti pattern (AP) returns a list of affected vertices (or edges)
def passwordReuseAP(g):
ap = g.V().match(\
__.as_("credential").out("instanceOf").hasLabel("Credentials"),\
__.as_("credential").out().where(__.out("instanceOf").hasLabel("Identity")).as_("Identity")).\
select("credential", "Identity").toList()
#__.as_("Identity").out().where(__.out("instanceOf").hasLabel("Application")).count().is_(P.gte(2))).\
#print("Pattern: ",ap)
return ep
#Exchange Patterns is a set of funtion calls to the graph_api
def passwordReuseEP(g, ap):
for p in ap:
changes = {"objects": [], "associations": []}
#Do something with the vertecies in ap
## (1) add new objects
obj1, valid = addObject(g, "idx", "Identity")
## (2) add new assocs
assoc1, valid = addAssociation(g, obj1, p['Identity'], "CanAssume")
## (3) set properties /remove or assets
activateDefense(g, obj1, "twoFactorAuthentication")
```
#### File: johansettlin/thesis-work/proertyGraphToXML.py
```python
from gremlin_python.process.graph_traversal import __
from gremlin_python.process.traversal import P, T
import scad
from graph_api import *
def convertPropertyGraphToSecuriCAD(g,s):
scad.delete_all_objects_and_assocs(s)
addObjectsToEOM(g, s)
addAssociationsToEOM(g, s)
scad.to_file(s, "newModelAgain.sCAD")
## Adds all objects from the property graph to the XML
def addObjectsToEOM(g, s):
objects = g.V().hasLabel("root").out("assets").in_("instanceOf").in_("instanceOf").toList()
counter = 1000000000
for o in objects:
obj = {}
obj['id'] = str(o.id)
obj['metaConcept'] = g.V(o.id).out("instanceOf").label().next()
obj['name'] = g.V(o.id).values("name").next()
obj['tag'] = g.V(o.id).valueMap().next()
obj['exportedId'] = str(counter)
scad.add_object(s, obj)
counter = counter + 1
#add active defenses
defenses = g.V(o.id).out("defense").has('active', 1).out("instanceOf").label().toList()
unactiveDefenses = g.V(o.id).out("defense").has('active', 0).out("instanceOf").label().toList()
asset = g.V(o.id).out("instanceOf").label().next()
defensList = []
for d in defenses:
name = d.capitalize()
scad.set_activated_defense(s, str(o.id),name)
#add attacksteps
addAttackSteps(g, s)
## Adds all associations from the property graph to the XML
def addAssociationsToEOM(g,s):
counter = -1000000000
#assoc = {'id': counter, 'targetId': , 'sourceId': , 'targetRole': , 'sourceRole':}
objects = g.V().hasLabel("root").out("assets").in_("instanceOf").in_("instanceOf").toList()
for o in objects:
assocs = g.V(o.id).match(__.as_('sourceId')\
.out()\
.where(__.out("instanceOf").out("instanceOf").hasLabel("assets"))\
.as_("targetId"))\
.select("sourceId", "targetId").toList()
for a in assocs:
#print(g.V(a["sourceId"]).properties('name').next(), g.V(a["targetId"]).properties('name').next())
if(assocExits(s, a["sourceId"], a["targetId"])):
continue
else:
#Get roles
#print(g.V(a["targetId"].id).properties('name').next(), g.V(a["sourceId"].id).properties('name').next())
sourceRole = getRoleInAssociation(g, a["targetId"], a["sourceId"])
targetRole = getRoleInAssociation(g, a["sourceId"], a["targetId"])
association = {'id': str(counter), 'targetId': str(a["targetId"].id), 'sourceId': str(a["sourceId"].id), 'targetRole': targetRole, 'sourceRole': sourceRole}
scad.add_association(s, association)
counter = counter -1
#checks if a association exits between two objects
def assocExits(s, obj1, obj2):
assets = scad.get_assets(s)
for a in assets['assocs']:
if((a['sourceObject'] == str(obj1.id) and a['targetObject'] == str(obj2.id)) or (a['sourceObject'] == str(obj2.id) and a['targetObject'] == str(obj1.id))):
#association exits
return True
return False
#gets all attack steps and adds them to the objects
def addAttackSteps(g, s):
attacksteps = g.V().hasLabel("root").out("attackSteps").in_("instanceOf").project("name", "metaConcept").by(__.label()).by(__.in_("attackSteps").label()).toList()
j = []
for x in attacksteps:
lis = [x['metaConcept'], x['name']]
j.append(lis)
scad.add_attacksteps(s, j)
``` |
{
"source": "johansi/nnlib",
"score": 3
} |
#### File: nnlib/tools/heatmap_to_points.py
```python
from .helper import *
try:
import cv2
except ModuleNotFoundError :
printing("UNALBE TO IMPORT OpenCV", print_types.WARNING)
import numpy as np
from numba import njit
import numba
import pdb
from scipy.interpolate import splprep, splev
def heatmap_to_multiple_points(pred, thres=0.5, max_points=100, cut_off_area=0.5):
mask = (pred > thres).astype(np.uint8)
if int(cv2.__version__[0]) < 4:
_, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
else:
contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if (len(contours) == 0) or (len(contours) > max_points):
return None
nut_points = np.zeros((len(contours),3))
for i in range(len(contours)):
nut_points[i,0:2] = np.mean(contours[i][:,0,:], axis=0)
nut_points[i,2] = cv2.contourArea(contours[i])
if len(contours) > 0:
cut_off = nut_points[:,2].mean()*cut_off_area
nut_points = nut_points[nut_points[:,2] > cut_off]
return nut_points[:,0:2]
def heatmap_to_max_confidence_point(heatmap, thres=0.5):
center_point = None
if heatmap.max() > thres:
center_point = np.flip(np.unravel_index(heatmap.argmax(), heatmap.shape))
return center_point
def heatmap_to_circle_HoughCircles(mask, cp):
radius = get_radius(mask,cp[0],cp[1], add_factor=0.0)
if not is_valid_radius(radius,cp,mask.shape):
return None
radius_range = 40
mind_circle_center_dist = 100
param1 = 100
param2 = 30
circle = np.squeeze(cv2.HoughCircles(image=np.round(mask*255).astype(np.uint8), method=cv2.HOUGH_GRADIENT, dp=1,
minDist=mind_circle_center_dist,param1=param1,
param2=param2,minRadius=radius-radius_range,
maxRadius=radius+radius_range).astype(np.int))
return circle
@njit
def check_validity(circle_points):
zero_counter = 0
len_c = int(len(circle_points)*0.3)
for idx in range(len(circle_points)):
if (circle_points[idx,0] == 0) and (circle_points[idx,1] == 0):
zero_counter += 1
if zero_counter == len_c:
return False
else:
zero_counter = 0
return True
def heatmap_to_circle(mask, cp=None):
#pdb.set_trace()
#radius = get_radius(mask,cp[0],cp[1])
radius = mask.shape[0]
#if not is_valid_radius(radius,cp,mask.shape):
#return None
if cp is None:
thresholded_mask = mask>0.5
if len(np.unique(thresholded_mask)) == 1:
return None
cp = np.flip(np.mean(np.array(np.where(thresholded_mask>0.5)).T, axis=0).astype(np.int))
scan_points = get_scan_points(10,cp[0],cp[1],mask.shape[0],radius)
circle_points = get_circle_points(scan_points,cp[0],cp[1],mask)
if not check_validity(circle_points):
return None
len_points = circle_points
circle_filter = circle_points[np.logical_and(circle_points[:,0] > 0, circle_points[:,1] > 0)]
try:
tck, u = splprep([circle_filter[:,0], circle_filter[:,1]], s=0)
new_points = splev(np.linspace(0,1,len(len_points)), tck)
new_points = np.array(new_points).T
except:
return None
return new_points
@njit
def get_radius(mask,p0,p1, add_factor=0.1):
radiuss = np.zeros(4,dtype=numba.int64)
# detect circle radius
m_north = np.flip(mask[:p1+1,p0])
m_east = mask[p1,p0:]
m_south = mask[p1:,p0]
m_west = np.flip(mask[p1,:p0+1])
radiuss[0] = np.argsort(m_north)[-1:][0]
radiuss[1] = np.argsort(m_east)[-1:][0]
radiuss[2] = np.argsort(m_south)[-1:][0]
radiuss[3] = np.argsort(m_west)[-1:][0]
radius = np.median(radiuss)
return int(radius + round(radius*add_factor))
def is_valid_radius(radius,cp,shape):
return (((cp[0] + radius) < shape[1]) and ((cp[1] + radius) < shape[0]) and ((cp[0] - radius) >= 0) and ((cp[1] - radius) >= 0))
@njit
def get_scan_points(step,cp0,cp1,shape,radius):
angles = np.arange(0,360,step)
scan_points = np.zeros((len(angles),2), dtype=numba.int64)
for i in range(len(angles)):
x = round(radius*np.sin(np.deg2rad(angles[i]))+cp0)
y = round(radius*np.sin(np.deg2rad(90-angles[i]))+cp1)
scan_points[i,0] = x
scan_points[i,1] = y
return scan_points
@njit
def line(r0, c0, r1, c1):
steep = 0
r = r0
c = c0
dr = abs(r1 - r0)
dc = abs(c1 - c0)
sr=0
sc=0
d=0
i=0
rr = np.zeros(max(dc, dr) + 1, dtype=np.intp)
cc = np.zeros(max(dc, dr) + 1, dtype=np.intp)
if (c1 - c) > 0:
sc = 1
else:
sc = -1
if (r1 - r) > 0:
sr = 1
else:
sr = -1
if dr > dc:
steep = 1
c, r = r, c
dc, dr = dr, dc
sc, sr = sr, sc
d = (2 * dr) - dc
for i in range(dc):
if steep:
rr[i] = c
cc[i] = r
else:
rr[i] = r
cc[i] = c
while d >= 0:
r = r + sr
d = d - (2 * dc)
c = c + sc
d = d + (2 * dr)
rr[dc] = r1
cc[dc] = c1
return rr, cc
@njit
def get_circle_points(scan_points,cp0,cp1,mask):
circle_points = np.zeros((len(scan_points),2))
point_diffs = np.zeros(len(scan_points)-1)
shape = mask.shape[0]
p_idx = 0
for i in range(len(scan_points)):
#pdb.set_trace()
p = scan_points[i]
l = line(cp0,cp1,p[0],p[1])
discrete_line = np.zeros((len(l[0]),2),dtype=np.int64)
discrete_line[:,0] = l[0]
discrete_line[:,1] = l[1]
x_cond = np.where(np.logical_or(discrete_line[:,0] < 0, discrete_line[:,0] > shape-1))[0]
y_cond = np.where(np.logical_or(discrete_line[:,1] < 0, discrete_line[:,1] > shape-1))[0]
idx_x = len(discrete_line) if len(x_cond) == 0 else x_cond[0]
idx_y = len(discrete_line) if len(y_cond) == 0 else y_cond[0]
discrete_line = discrete_line[:min(idx_x,idx_y)]
intens = np.zeros(len(discrete_line))
for lp in range(len(discrete_line)):
intens[lp] = mask[discrete_line[lp,1],discrete_line[lp,0]]
circle_point_idx = np.argsort(intens)[-1]
circle_point = discrete_line[circle_point_idx]
# return None detected circle if confidence for circle is below 0.3
if mask[circle_point[1], circle_point[0]] < 0.3:
#return None
continue
# return None detected circle if detected circle point is 3x far away, as mean of all circle points
#pdb.set_trace()
if i > 0:
if (circle_points[i-1][0] != 0) and (circle_points[i-1][1] != 0):
point_diff = np.sqrt(np.sum((circle_points[i-1] - circle_point)**2))
if p_idx > 0:
if (point_diffs[0:p_idx].mean()*3) < point_diff:
#return None
continue
point_diffs[p_idx] = point_diff
p_idx += 1
circle_points[i] = circle_point
return circle_points
```
#### File: nnlib/tools/inference.py
```python
import os
import numpy as np
from .image_tools import *
import pdb
import time
import PIL
import cv2
from .helper import *
try:
import pycuda.autoinit
import tensorrt as trt
import pycuda.driver as cuda
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
except:
printing("UNALBE TO IMPORT TENSORRT", print_types.WARNING)
try:
import torch
from torchvision import transforms
except:
printing("UNALBE TO IMPORT PYTORCH", print_types.WARNING)
def predict_classification(runtime, image, size, class_types):
times= {}
if type(image) == PIL.Image.Image:
image = np.asarray(image)
t_resize = time.time()
image = cv2.resize(image, (size, size))
times["resize"] = round((time.time() - t_resize)*1000)
t_prediction = time.time()
prediction = runtime.inference(image)
times["prediction"] = round((time.time() - t_prediction)*1000)
#t_img_points = time.time()
#img_points = get_image_points(prediction, heatmap_types)
#times["classification"] = round((time.time() - t_img_points)*1000)
return image, prediction, times
def predict_heatmap_image(runtime, image, size, heatmap_types, enhance=False):#dist_image,y_pos_dist
times = {}
if type(image) == PIL.Image.Image:
image = np.asarray(image)
t_resize = time.time()
image = cv2.resize(image, (size, size))
times["resize"] = round((time.time() - t_resize)*1000)
if enhance:
t_enhance = time.time()
image = image_enhance(image)
times["enhance"] = round((time.time() - t_enhance)*1000)
t_prediction = time.time()
prediction = np.squeeze(runtime.inference(image))
times["prediction"] = round((time.time() - t_prediction)*1000)
t_img_points = time.time()
img_points = get_image_points(prediction, heatmap_types)
times["img_points"] = round((time.time() - t_img_points)*1000)
return image, prediction, img_points, times
class PYTORCH_CONTEXT:
def __init__(self, model, state_dict_file, norm_stats_mean=None, norm_stats_std=None, batch_norm=False, device="cpu", model_key=None, convert_out_to_np=False):
self.device = device
self.model = model.to(self.device)
checkpoint = torch.load(state_dict_file, map_location=torch.device(self.device))
checkpoint = checkpoint if model_key is None else checkpoint["model"]
self.model.load_state_dict(checkpoint)
self.model = self.model.eval()
self.batch_norm = batch_norm
self.convert_out_to_np = convert_out_to_np
if batch_norm:
self.normalize = transforms.Normalize(norm_stats_mean,norm_stats_std)
self.to_tensor = transforms.ToTensor()
def inference(self, inputs):
inputs = [inputs] if type(inputs) != list else inputs
for idx in range(len(inputs)):
if (type(inputs[idx]) == np.ndarray) and self.batch_norm:
inputs[idx] = PIL.Image.fromarray(inputs[idx])
if self.batch_norm:
for idx in range(len(inputs)):
inputs[idx] = self.normalize(self.to_tensor(inputs[idx]))[None].to(self.device)
else:
for idx in range(len(inputs)):
inputs[idx] = inputs[idx].to(self.device)
with torch.no_grad():
out = self.model(*inputs)
if self.convert_out_to_np:
return np.array(out.cpu())
else:
return out
class TENSOR_RT_CONTEXT:
def __init__(self, onnxfile, input_names, output_names, norm_stats_mean=None, norm_stats_std=None,
fp16=True, max_workspace_size=3<<28, batch_norm=True):
#print("CREATE TENSOR_RT_CONTEXT")
trt.init_libnvinfer_plugins(None, "")
self._fp16 = fp16
self.norm_stats_mean = norm_stats_mean
self.norm_stats_std = norm_stats_std
self._max_workspace_size = max_workspace_size
self.input_names = input_names
self.output_names = output_names
self.engine_path = onnxfile.parent/(onnxfile.stem+".engine")
self.batch_norm = batch_norm
if os.path.exists(self.engine_path):
#print("ENINGE EXISTS")
self.load_tensorrt_engine()
else:
if trt.__version__[0] == "8":
self.onnx_to_tensorrt_8(onnxfile)
else:
self.onnx_to_tensorrt_7(onnxfile)
self.create_execution_context()
def create_execution_context(self):
# Determine dimensions and create page-locked memory buffers (i.e. won't be swapped to disk) to hold host inputs/outputs.
self.host_inputs = []
self.host_outputs = []
self.device_inputs = []
self.device_outputs = []
self.bindings = []
for name in self.input_names:
idx = self.engine.get_binding_index(name)
host_input = cuda.pagelocked_empty(trt.volume(self.engine.get_binding_shape(idx)), dtype=np.float32)
device_input = cuda.mem_alloc(host_input.nbytes)
self.bindings.append(int(device_input))
self.host_inputs.append(host_input)
self.device_inputs.append(device_input)
for name in self.output_names:
idx = self.engine.get_binding_index(name)
host_output = cuda.pagelocked_empty(trt.volume(self.engine.get_binding_shape(idx)), dtype=np.float32)
device_output = cuda.mem_alloc(host_output.nbytes)
self.bindings.append(int(device_output))
self.host_outputs.append(host_output)
self.device_outputs.append(device_output)
# Create a stream in which to copy inputs/outputs and run inference.
self.stream = cuda.Stream()
# create execution context
self.context = self.engine.create_execution_context()
def inference(self, inputs):
inputs = [inputs] if type(inputs) != list else inputs
for idx in range(len(inputs)):
if self.batch_norm:
inputs[idx] = batch_and_normalize(inputs[idx], mean=self.norm_stats_mean, std=self.norm_stats_std)
np.copyto(self.host_inputs[idx], inputs[idx].ravel())
cuda.memcpy_htod_async(self.device_inputs[idx], self.host_inputs[idx], self.stream)
self.context.execute_async(batch_size= 1, bindings=self.bindings, stream_handle=self.stream.handle)
for idx in range(len(self.host_outputs)):
cuda.memcpy_dtoh_async(self.host_outputs[idx], self.device_outputs[idx], self.stream)
self.stream.synchronize()
outs = []
for idx in range(len(self.output_names)):
binding_idx = self.engine.get_binding_index(self.output_names[idx])
outs.append(np.reshape(self.host_outputs[idx],self.engine.get_binding_shape(binding_idx)))
return tuple(outs) if len(outs) > 1 else outs
def load_tensorrt_engine(self):
with open(self.engine_path, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime:
self.engine = runtime.deserialize_cuda_engine(f.read())
def onnx_to_tensorrt_8(self,onnx_file):
builder = trt.Builder(TRT_LOGGER)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
parser = trt.OnnxParser(network, TRT_LOGGER)
config = builder.create_builder_config()
success = parser.parse_from_file(str(onnx_file))
for idx in range(parser.num_errors):
print(parser.get_error(idx))
config = builder.create_builder_config()
if self._fp16:
config.set_flag(trt.BuilderFlag.FP16)
builder.max_batch_size = 1
serialized_engine = builder.build_serialized_network(network, config)
with open(self.engine_path, "wb") as f:
f.write(serialized_engine)
with trt.Runtime(TRT_LOGGER) as runtime:
self.engine = runtime.deserialize_cuda_engine(serialized_engine)
def onnx_to_tensorrt_7(self,onnx_file):
EXPLICIT_BATCH = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
builder = trt.Builder(TRT_LOGGER)
network = builder.create_network(EXPLICIT_BATCH)
parser = trt.OnnxParser(network,TRT_LOGGER)
model = open(onnx_file, 'rb')
parser.parse(model.read())
for error in range(parser.num_errors):
print(parser.get_error(error))
builder.max_batch_size = 1
builder.fp16_mode = self._fp16
builder.max_workspace_size = self._max_workspace_size
self.engine = builder.build_cuda_engine(network)
with open(self.engine_path, "wb") as f:
f.write(self.engine.serialize())
```
#### File: nnlib/training/unet.py
```python
from collections import OrderedDict
import torch
import torch.nn as nn
from torch.nn.utils import weight_norm
import pdb
__all__ = ["UNet","res34_downsample", "downsample_none", "downsample_maxpool", "downsample_stride", "upsample_none", "upsample_upsample", "upsample_conv"]
res34_downsample = [3,4,6,3]
downsample_none = 0
downsample_maxpool = 1
downsample_stride = 2
upsample_none = 0
upsample_upsample = 1
upsample_conv = 2
def init_default(m, func=nn.init.kaiming_normal_):
"Initialize `m` weights with `func` and set `bias` to 0."
if func:
if hasattr(m, 'weight'): func(m.weight)
if hasattr(m, 'bias') and hasattr(m.bias, 'data'): m.bias.data.fill_(0.)
return m
def conv_2d(ni, nf, kernel_size, bias=True, stride=1, padding=0, dilation=1, transpose=False):
conv_func = nn.Conv2d if not transpose else nn.ConvTranspose2d
conv = conv_func(ni, nf, kernel_size, bias=bias, padding=padding,stride=stride, dilation=dilation)
#conv = init_default(conv)
#conv = weight_norm(conv)
return conv
class AttentionBlock(nn.Module):
def __init__(self, in_channels_encoder, in_channels_decoder, features):
super(AttentionBlock, self).__init__()
self.conv_encoder = nn.Sequential(
nn.BatchNorm2d(in_channels_encoder),
nn.ReLU(),
conv_2d(in_channels_encoder, features, 3, padding=1),
nn.MaxPool2d(2, 2),
)
self.conv_decoder = nn.Sequential(
nn.BatchNorm2d(in_channels_decoder),
nn.ReLU(),
conv_2d(in_channels_decoder, features, 3, padding=1),
)
self.conv_attn = nn.Sequential(
nn.BatchNorm2d(features),
nn.ReLU(),
conv_2d(features, 1, 1),
)
def forward(self, decoder, encoder):
out = self.conv_encoder(encoder) + self.conv_decoder(decoder)
out = self.conv_attn(out)
return out * decoder
class ASPP(nn.Module):
def __init__(self, in_channels, features, downsampling, rate=[6, 12, 18]):
super(ASPP, self).__init__()
if downsampling == downsample_stride:
self.down = nn.Sequential(
conv_2d(in_channels, in_channels, 3, stride=2, padding=0),
nn.BatchNorm2d(features),
nn.ReLU(inplace=True),
)
elif downsampling == downsample_maxpool:
self.down = nn.MaxPool2d(kernel_size=2, stride=2)
self.aspp_block1 = nn.Sequential(
conv_2d(
in_channels, features, 3, stride=1, padding=rate[0], dilation=rate[0]
),
nn.ReLU(inplace=True),
nn.BatchNorm2d(features),
)
self.aspp_block2 = nn.Sequential(
conv_2d(
in_channels, features, 3, stride=1, padding=rate[1], dilation=rate[1]
),
nn.ReLU(inplace=True),
nn.BatchNorm2d(features),
)
self.aspp_block3 = nn.Sequential(
conv_2d(
in_channels, features, 3, stride=1, padding=rate[2], dilation=rate[2]
),
nn.ReLU(inplace=True),
nn.BatchNorm2d(features),
)
self.output = conv_2d(len(rate) * features, features, 1)
self._init_weights()
self.downsampling=downsampling
def forward(self, x):
if self.downsampling != downsample_none:
x = self.down(x)
x1 = self.aspp_block1(x)
x2 = self.aspp_block2(x)
x3 = self.aspp_block3(x)
out = torch.cat([x1, x2, x3], dim=1)
return self.output(out)
def _init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
class Squeeze_Excite_Block(nn.Module):
def __init__(self, channel, reduction=16):
super(Squeeze_Excite_Block, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel, bias=False),
nn.Sigmoid(),
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y.expand_as(x)
def _layerBlocks(in_channels, features, downsample_method=downsample_maxpool, blocksize=1, squeeze_excite=None,
resblock=False, bn_relu_at_first=False, bn_relu_at_end=False):
layers = OrderedDict([])
for idx in range(blocksize):
downsample = downsample_method if idx == 0 else downsample_none
in_channels = in_channels if idx == 0 else features
layers["block_"+str(idx)] = Block(in_channels,features,downsample,squeeze_excite,resblock,bn_relu_at_first, bn_relu_at_end)
return nn.Sequential(layers)
class UpSample(nn.Module):
def __init__(self, in_channels, features, upsample_method=upsample_conv, bias=True):
super(UpSample, self).__init__()
if upsample_method == upsample_upsample:
self.up = nn.Upsample(scale_factor=2, mode='bilinear')
else:
self.up = conv_2d(in_channels, features, kernel_size=2, stride=2, bias=bias, transpose=True)
def forward(self, x):
return self.up(x)
class ConvLayer(nn.Module):
def __init__(self, in_channels, features,kernel_size=3,padding=1,stride=1, bn_relu=True,bias=True):
super(ConvLayer, self).__init__()
layers = OrderedDict([])
layers["conv"] = conv_2d(in_channels,features,kernel_size,padding=padding,stride=stride,bias=bias)
if bn_relu:
layers["bn_relu"] = nn.Sequential(nn.BatchNorm2d(num_features=features),nn.ReLU(inplace=True))
self.layer = nn.Sequential(layers)
def forward(self, x):
return self.layer(x)
class Block(nn.Module):
def __init__(self, in_channels, features, downsample=downsample_none, do_squeeze_excite=False, resblock=False,
bn_relu_at_first=False, bn_relu_at_end=False):
super(Block, self).__init__()
layers = OrderedDict([])
if bn_relu_at_first:
self.bn_relu = nn.Sequential(nn.BatchNorm2d(num_features=in_channels),nn.ReLU(inplace=True))
if downsample == downsample_maxpool:
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
if (downsample == downsample_none) or (downsample == downsample_maxpool):
layers["conv1"] = ConvLayer(in_channels,features)
elif downsample == downsample_stride:
layers["conv1"] = ConvLayer(in_channels,features, kernel_size=2, padding=0,stride=2)
layers["conv2"] = ConvLayer(features,features, bn_relu=bn_relu_at_end)
if do_squeeze_excite:
layers["squeeze_excite"] = Squeeze_Excite_Block(features)
self.block = nn.Sequential(layers)
if resblock:
params_skip_conn = (1,1) if (downsample == downsample_none) or (downsample == downsample_maxpool) else (1,2)
self.skip_conn = nn.Sequential(conv_2d(in_channels,features,kernel_size=3,padding=params_skip_conn[0],
stride=params_skip_conn[1],bias=True),nn.BatchNorm2d(num_features=features))
self.resblock = resblock
self.downsample = downsample
self.bn_relu_at_first = bn_relu_at_first
def forward(self, x):
if self.bn_relu_at_first:
x = self.bn_relu(x)
if self.downsample == downsample_maxpool:
x = self.maxpool(x)
out = self.block(x)
if self.resblock:
out = self.skip_conn(x)+out
return out
class UNet(nn.Module):
def __init__(self, in_channels=3, out_channels=1, init_features=32, block_sizes_down=[1,1,1,1], blocksize_bottleneck=1, block_sizes_up=[1,1,1,1],
downsample_method=downsample_maxpool, upsample_method=upsample_conv, resblock=False, squeeze_excite=False, aspp=False, attention=False,
bn_relu_at_first=False, bn_relu_at_end=False):
super(UNet, self).__init__()
features = init_features
self.squeeze_excite = squeeze_excite
# ENCODER
self.encoder1 = _layerBlocks(in_channels,features,downsample_none,block_sizes_down[0],squeeze_excite,
resblock, False, bn_relu_at_end)
self.encoder2 = _layerBlocks(features,features*2,downsample_method,block_sizes_down[1],squeeze_excite,
resblock, bn_relu_at_first, bn_relu_at_end)
self.encoder3 = _layerBlocks(features*2,features*4,downsample_method,block_sizes_down[2],squeeze_excite,
resblock, bn_relu_at_first, bn_relu_at_end)
self.encoder4 = _layerBlocks(features*4,features*8,downsample_method,block_sizes_down[3],squeeze_excite,
resblock, bn_relu_at_first, bn_relu_at_end)
# BOTTLENECK
if aspp:
self.bottleneck = ASPP(features*8, features*16, downsample_method)
else:
self.bottleneck = _layerBlocks(features*8,features*16,downsample_method,blocksize_bottleneck,False,resblock)
# DECODER
if attention:
self.attn1 = AttentionBlock(features*8, features*16, features * 16)
self.upsample4 = UpSample(features * 16, features * 8, upsample_method)
smaple_factor = 8 if upsample_method==upsample_conv else 16
self.decoder4 = _layerBlocks((features * smaple_factor) * 2,features*smaple_factor,downsample_none,
block_sizes_up[3], False, resblock, bn_relu_at_first, bn_relu_at_end)
if attention:
self.attn2 = AttentionBlock(features*4, features*8, features * 8)
self.upsample3 = UpSample(features * 8, features * 4, upsample_method)
smaple_factor = 4 if upsample_method==upsample_conv else 8
self.decoder3 = _layerBlocks((features * smaple_factor) * 2,features*smaple_factor,downsample_none,
block_sizes_up[2], False, resblock, bn_relu_at_first, bn_relu_at_end)
if attention:
self.attn3 = AttentionBlock(features*2, features*4, features * 4)
self.upsample2 = UpSample(features * 4, features * 2, upsample_method)
smaple_factor = 2 if upsample_method==upsample_conv else 4
self.decoder2 = _layerBlocks((features * smaple_factor) * 2,features*smaple_factor,downsample_none,
block_sizes_up[1], False, resblock, bn_relu_at_first, bn_relu_at_end)
if attention:
self.attn4 = AttentionBlock(features, features*2, features * 2)
self.upsample1 = UpSample(features * 2, features, upsample_method)
smaple_factor = 1 if upsample_method==upsample_conv else 2
self.decoder1 = _layerBlocks((features * smaple_factor) * 2,features*smaple_factor,downsample_none,
block_sizes_up[0], False, resblock, bn_relu_at_first, bn_relu_at_end)
if aspp:
smaple_factor = 1 if upsample_method==upsample_conv else 2
self.out_aspp = ASPP(features*smaple_factor, features*smaple_factor, downsample_none)
smaple_factor = 1 if upsample_method==upsample_conv else 2
self.out_conv = conv_2d(features*smaple_factor, out_channels, 1)
self.aspp = aspp
self.attention = attention
def forward(self, x):
enc1 = self.encoder1(x)
enc2 = self.encoder2(enc1)
enc3 = self.encoder3(enc2)
enc4 = self.encoder4(enc3)
bottle = self.bottleneck(enc4)
if self.attention:
bottle = self.attn1(bottle,enc4)
dec4 = self.upsample4(bottle)
dec4 = torch.cat((dec4, enc4), dim=1)
dec4 = self.decoder4(dec4)
if self.attention:
dec4 = self.attn2(dec4,enc3)
dec3 = self.upsample3(dec4)
dec3 = torch.cat((dec3, enc3), dim=1)
dec3 = self.decoder3(dec3)
if self.attention:
dec3 = self.attn3(dec3,enc2)
dec2 = self.upsample2(dec3)
dec2 = torch.cat((dec2, enc2), dim=1)
dec2 = self.decoder2(dec2)
if self.attention:
dec2 = self.attn4(dec2,enc1)
dec1 = self.upsample1(dec2)
dec1 = torch.cat((dec1, enc1), dim=1)
dec1 = self.decoder1(dec1)
if self.aspp:
dec1 = self.out_aspp(dec1)
return torch.sigmoid(self.out_conv(dec1))
``` |
{
"source": "JohanSmet/lsim",
"score": 3
} |
#### File: src/bench/bench_utils.py
```python
count_check = 0
count_failure = 0
def CHECK(was, expected, op_str):
global count_check, count_failure
count_check = count_check + 1
if was != expected:
count_failure = count_failure + 1
print("FAILURE: {} = {} (expected {})".format(op_str, was, expected))
def print_stats():
global count_check, count_failure
print("======================================================")
if count_failure == 0:
print("All tests passed ({} checks executed)".format(count_check))
else:
print("{} out of {} checks failed!".format(count_failure, count_check))
def instantiate_circuit(lsim, name):
circuit_desc = lsim.user_library().circuit_by_name(name)
circuit = circuit_desc.instantiate(lsim.sim())
lsim.sim().init()
return circuit
def run_thruth_table(lsim, circuit_name, truth_table):
print ("* Testing {}".format(circuit_name))
circuit = instantiate_circuit(lsim, circuit_name)
for test in truth_table:
for v in test[0].items():
circuit.write_port(v[0], v[1])
lsim.sim().run_until_stable(5)
for v in test[1].items():
CHECK(circuit.read_port(v[0]), v[1], "{}: {}".format(circuit_name, test[0]))
``` |
{
"source": "johanso-au/HaloBot",
"score": 3
} |
#### File: johanso-au/HaloBot/halobot.py
```python
import socket
import json
import random
import time
DEBUG = False
with open('config.json') as configFile:
botSettings = json.load(configFile)
# Set all the variables necessary to connect to Twitch IRC
HOST = "irc.chat.twitch.tv"
NICK = "TwitchAccountUsername"
PASS = str(botSettings['OAUTH'])
PORT = 6667
CHANNEL = "#twitchchannelnamehere"
readbuffer = ""
MODT = False
botRun = True
userlist = []
greetings = ["hai", "Hey", "Welcome", "o hai", "hello"]
# Function for converting to UTF-8 (Required by Python 3 version of Socket)
def ConvertSend(command):
s.sendall(command.encode('utf-8'))
# Function for sending a message
def sendMessage(message):
ConvertSend("PRIVMSG " + CHANNEL + " :" + message + "\r\n")
if DEBUG: print(NICK + ": " + message)
# Connecting to Twitch IRC by passing credentials and joining a certain channel
s = socket.socket()
s.connect((HOST, PORT))
ConvertSend("PASS " + PASS + "\r\n")
ConvertSend("NICK " + NICK + "\r\n")
ConvertSend("JOIN " + CHANNEL + "\r\n")
# Main Loop - Receive, decode, parse.
while True:
received = s.recv(1024)
readbuffer = readbuffer + received.decode('utf-8')
temp = readbuffer.split("\r\n")
readbuffer = temp.pop()
if DEBUG: print("Temp: " + str(temp))
for line in temp:
# Splits the given string so we can work with it better
parts = line.split(":")
if DEBUG: print("Parts: " + str(parts))
# Checks whether the message is PING because its a method of Twitch to check if you're afk
if "PING" in parts[0]:
ConvertSend("PONG %s\r\n" % line[1])
print("PONG")
elif "QUIT" not in parts[1] and "JOIN" not in parts[1] and "PART" not in parts[1]:
try:
# Sets the message variable to the actual message sent
message = parts[2]
except:
message = ""
# Sets the username variable to the actual username
usernamesplit = parts[1].split("!")
username = usernamesplit[0]
# Only works after twitch is done announcing stuff (MODT = Message of the day)
if MODT:
print(username + ": " + message + "\r\n")
# 'On' Command.
if username == "twitchusername" and message == "!halobotstart":
botRun = True
sendMessage("I got you, fam.")
if botRun:
# You can add all your plain commands here
# 'Off' Command - username is a user who can control with a command.
if username == "twitchusername" and message == "!halobotstop":
botRun = False
sendMessage("Fine, I didn't want to do your job anyway.")
if username not in userlist:
userlist.append(username)
time.sleep(1)
sendMessage(random.choice(greetings) + " @" + username)
for l in parts:
if "End of /NAMES list" in l:
MODT = True
``` |
{
"source": "johansoula/malware_playground",
"score": 3
} |
#### File: johansoula/malware_playground/virusshare_fetch.py
```python
import re
import requests
import sys
import os
def virusshare_fetch(fname, i):
url = "http://virusshare.com/hashes/VirusShare_%s.md5" % i
r = requests.get(url, stream = True)
if r.status_code != 200:
return False
with open(fname, 'aw') as fd:
chunk_size = 8192
for chunk in r.iter_content(chunk_size):
fd.write(chunk)
fd.close
return True
def virusshare_fetch_all(fname):
for i in range(0, 99999):
print i
if not virusshare_fetch(fname, "%05d" % i):
break
virusshare_fetch_all("virusshare_hashlist.csv")
``` |
{
"source": "johanssone/nlbaas2octavia-lb-replicator",
"score": 3
} |
#### File: nlbaas2octavia_lb_replicator/common/utils.py
```python
def _remove_empty(lb_dict):
"""Recursively Removes blank str values from the dictionary"
Removes keys from dictionary and sub objs such as dictionaries and
list of dictionaries, if the value is an empty string.
:param lb_dict: dict
"""
for key, val in lb_dict.items():
if isinstance(val, dict):
_remove_empty(val)
if isinstance(val, list):
for x in val:
if isinstance(x, dict):
_remove_empty(x)
if val in ['', u'']:
lb_dict.pop(key)
``` |
{
"source": "johanste/carbonite",
"score": 3
} |
#### File: carbonite/carbon/__init__.py
```python
from typing import Tuple
import cv2
class CameraStream:
def __init__(self, *, camera_no:int=0, buffer_frames=1024):
self._video_capture = cv2.VideoCapture(camera_no)
self.last_frame = None
self.buffer_frames = buffer_frames
self.frame_buf = [None] * buffer_frames
self.frame_buf_no = 0
self.frames_per_second = self._video_capture.get(cv2.CAP_PROP_FPS)
def get_frame(self, frame_no):
return self.frame_buf[frame_no]
def frames(self):
while True:
ret, frame = self._video_capture.read()
frame_buf_no = (self.frame_buf_no + 1) % self.buffer_frames
self.frame_buf_no = frame_buf_no
self.frame_buf[frame_buf_no] = frame
if ret:
yield frame_buf_no, frame
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self._video_capture.release()
self._video_capture = None
class Session:
def __init__(self, *configurations):
self._detectors = []
self._handlers = {}
self._closed = True
for configuration in configurations:
try:
self._detectors.append(configuration.create_detector())
except AttributeError:
configuration.session = self
self._detectors.append(configuration)
def close(self):
print('Closing')
self._closed = True
def open(self):
...
def emit(self, event):
try:
self._handlers[type(event)](event)
except KeyError:
print(f'No handler for {type(event)}')
def handler(self, event_type: type):
"""Used as a decorator for functions that want to handle a specific event type
:param event_type type: The type of event to handle
"""
def wrapped(func):
self._handlers[event_type] = func
return func
return wrapped
def analyze_frame(self, frame_no, frame):
events = []
for analyzer in self._detectors:
for detected_event in analyzer.analyze_frame(self, frame_no, frame):
events.append(detected_event)
return events
def analyze(self, stream):
self._closed = False
for frame_no, frame in stream.frames():
if self._closed:
return
self.analyze_frame(frame_no, frame)
``` |
{
"source": "johansten/rtxp-py",
"score": 2
} |
#### File: rtxp/core/dsa.py
```python
from ecdsa import curves, SigningKey
from ecdsa.util import sigencode_der
from rtxp.core import hashes
from rtxp.core import utils
def sign(signing_hash, root_key, **kw):
""" Signs hash with root_key. """
number = utils.bytes_to_int(signing_hash)
r, s = root_key.sign_number(number, **kw)
r, s = _get_canonical_signature(r, s)
return sigencode_der(r, s, None)
def get_root_key(seed):
""" Gets the root key from the seed. """
# :TODO: add support for more than key #0
# see https://github.com/ripple/ripple-lib/blob/develop/src/js/ripple/seed.js
def generate_key(seed):
i = 0
res = 0
while True:
res = from_bytes(hashes.sha512half(seed + to_bytes(i, 4)))
i += 1
if curves.SECP256k1.order >= res:
break
return res
private_generator = generate_key(seed)
public_generator = curves.SECP256k1.generator * private_generator
# use public + private generators to generate a secret
sequence = 0 #
public_compressed = _get_compressed_point(public_generator)
secret = generate_key(public_compressed + to_bytes(sequence, 4))
secret += private_generator
secret %= curves.SECP256k1.order
# use secret to generate a secp256k1 key
return SigningKey.from_secret_exponent(secret, curves.SECP256k1)
def get_public_key_from_root(root_key):
return _get_compressed_point(root_key.privkey.public_key.point, pad=True)
def get_public_key(seed):
return get_public_key_from_root(get_root_key(seed))
#-------------------------------------------------------------------------------
def to_bytes(value, size):
return utils.int_to_bytes(value, size=size)
def from_bytes(value):
return utils.bytes_to_int(value)
def _get_compressed_point(point, pad=False):
""" Returns the compressed ecc point. """
header = '\x03' if point.y() & 1 else '\x02'
bytes = to_bytes(
point.x(),
curves.SECP256k1.order.bit_length() // 8 if pad else None
)
return ''.join([header, bytes])
def _get_canonical_signature(r, s):
""" returns a canonical signature. """
N = curves.SECP256k1.order
if not N / 2 >= s:
s = N - s
return r, s
```
#### File: rtxp/core/hashes.py
```python
import hashlib
def sha512half(s):
return hashlib.sha512(s).digest()[0:32]
def hash160(s):
h = hashlib.sha256(s).digest()
m = hashlib.new('ripemd160')
m.update(h)
t = m.digest()
return t
def sha256hash(s):
s = s if s else ' '
hash1 = hashlib.sha256(s).digest()
hash2 = hashlib.sha256(hash1).digest()
return hash2
```
#### File: rtxp/core/serialize.py
```python
from decimal import Decimal
import utils
#-------------------------------------------------------------------------------
FIELD_MAP = {
'Account': (8, 1),
'Amount': (6, 1),
'ClearFlag': (2, 34),
'Destination': (8, 3),
'DestinationTag': (2, 14),
'Fee': (6, 8),
'Flags': (2, 2),
'InflationDest': (8, 9), #stellar
'InvoiceID': (5, 17),
'LastLedgerSequence': (2, 27),
'LimitAmount': (6, 3),
'Memo': (14, 10),
'MemoData': (7, 13),
'MemoFormat': (7, 14),
'Memos': (15, 9),
'MemoType': (7, 12),
'OfferSequence': (2, 25),
'Paths': (18, 1),
'RegularKey': (8, 8),
'SendMax': (6, 9),
'Sequence': (2, 4),
'SetAuthKey': (8, 10), #stellar
'SetFlag': (2, 33),
'SigningPubKey': (7, 3),
'SourceTag': (2, 3),
'TakerGets': (6, 5),
'TakerPays': (6, 4),
'TransactionType': (1, 2),
'TransferRate': (2, 11),
'TxnSignature': (7, 4),
}
TRANSACTION_TYPES = {
'AccountMerge': 4, #stellar
'AccountSet': 3,
'OfferCancel': 8,
'OfferCreate': 7,
'Payment': 0,
'SetRegularKey': 5,
'TrustSet': 20
}
#-------------------------------------------------------------------------------
class Serializer(object):
def __init__(self, native_currency, account_from_human):
self.native_currency = native_currency
self.account_from_human = account_from_human
def serialize_kv_pair(self, key, value):
blob = ''
if key == 'TransactionType':
value = TRANSACTION_TYPES[value]
type_id, field_id = FIELD_MAP[key]
tag = ((type_id << 4 if type_id < 16 else 0) |
(field_id if field_id < 16 else 0))
blob += chr(tag)
if type_id >= 16:
blob += chr(type_id)
if field_id >= 16:
blob += chr(field_id)
if type_id in self.serializer_dict:
blob += self.serializer_dict[type_id](self, value)
return blob
def serialize_json(self, tx_json):
def comparator(a, b):
return 1 if FIELD_MAP[a] > FIELD_MAP[b] else -1
blob = ''
keys = sorted(tx_json.keys(), cmp=comparator)
for key in keys:
value = tx_json[key]
blob += self.serialize_kv_pair(key, value)
return blob
def parse_non_native_amount(self, string):
amount = Decimal(string).normalize()
parts = amount.as_tuple()
exponent = parts.exponent
value = ''.join(map(str, parts.digits))
exponent += len(value)
value = value.ljust(16, '0')
exponent -= 16
value = int(value)
if value == 0:
exponent = -99
return parts.sign, value, exponent
def serialize_non_native_amount(self, amount):
negative, value, exponent = self.parse_non_native_amount(amount)
hi = 1 << 31
if value:
if not negative:
hi |= 1 << 30
hi |= ((97 + exponent) & 0xff) << 22
value |= hi << 32
return self.serialize_int64(value)
def serialize_currency(self, currency):
if currency == self.native_currency:
data = 20 * chr(0)
else:
data = 12 * chr(0) + currency + 5 * chr(0)
return data
def serialize_native_amount(self, amount):
amount = int(amount)
pos = True if amount > 0 else False
amount &= (1 << 62) - 1
if pos:
amount |= 1 << 62
return self.serialize_int64(amount)
def serialize_account_id(self, human):
return self.account_from_human(human)
def serialize_amount(self, value):
if type(value) == int:
amount = value # tx fee in provided as an int
else:
amount = value.to_json()
if type(amount) == dict:
blob = self.serialize_non_native_amount(amount['value'])
blob += self.serialize_currency(amount['currency'])
blob += self.serialize_account_id(amount['issuer'])
else:
blob = self.serialize_native_amount(amount)
return blob
def serialize_account(self, value):
return chr(20) + self.serialize_account_id(value)
def serialize_path(self, path):
FLAG_ACCOUNT = 0x01
FLAG_CURRENCY = 0x10
FLAG_ISSUER = 0x20
blob = ''
for entry in path:
flags = 0
if 'account' in entry:
flags |= FLAG_ACCOUNT
if 'currency' in entry:
flags |= FLAG_CURRENCY
if 'issuer' in entry:
flags |= FLAG_ISSUER
if entry['type'] != flags:
#raise hell
pass
flags = entry['type']
blob += chr(flags)
if flags & FLAG_ACCOUNT:
blob += self.serialize_account_id(entry['account'])
if flags & FLAG_CURRENCY:
blob += self.serialize_currency(entry['currency'])
if flags & FLAG_ISSUER:
blob += self.serialize_account_id(entry['issuer'])
return blob
def serialize_pathset(self, pathset):
PATH_BOUNDARY = chr(0xff)
PATH_END = chr(0x00)
blobs = [
self.serialize_path(path)
for path in pathset
]
return PATH_BOUNDARY.join(blobs) + PATH_END
def serialize_int16(self, value):
return utils.int_to_bytes(value, size=2)
def serialize_int32(self, value):
return utils.int_to_bytes(value, size=4)
def serialize_int64(self, value):
return utils.int_to_bytes(value, size=8)
def serialize_hash256(self, value):
return value.decode('hex')
def serialize_vl(self, value):
value = value.decode('hex')
return chr(len(value)) + value
def serialize_array(self, array):
blob = ''
for t in array:
key, value = t.items()[0]
blob += self.serialize_kv_pair(key, value)
blob += chr((15 << 4) | 1)
return blob
def serialize_object(self, tx_json):
def comparator(a, b):
return 1 if FIELD_MAP[a] > FIELD_MAP[b] else -1
blob = ''
keys = sorted(tx_json.keys(), cmp=comparator)
for key in keys:
value = tx_json[key]
blob += self.serialize_kv_pair(key, value)
blob += chr((14 << 4) | 1)
return blob
serializer_dict = {
1: serialize_int16,
2: serialize_int32,
5: serialize_hash256,
6: serialize_amount,
7: serialize_vl,
8: serialize_account,
14: serialize_object,
15: serialize_array,
18: serialize_pathset,
}
#-------------------------------------------------------------------------------
```
#### File: rtxp/stellar/server.py
```python
import rtxp.core.server
class Server(rtxp.core.server.Server):
def __init__(self, url, callback):
super(Server, self).__init__(url, callback)
def clear_subscriptions(self):
self.subscriptions = {
'streams': [],
'accounts': [],
'accounts_rt': [],
'books': []
}
```
#### File: rtxp-py/tests/test_amount.py
```python
import unittest
from rtxp import stellar
class AmountTest(unittest.TestCase):
def test_str_currency(self):
amount = stellar.Amount(1)
self.assertEqual('STR', amount.currency)
amount = stellar.Amount(10, 'USD')
self.assertNotEqual('STR', amount.currency)
def test_value_normalization(self):
amount = stellar.Amount('1')
self.assertEqual('1', amount.value)
amount = stellar.Amount('1.')
self.assertEqual('1', amount.value)
amount = stellar.Amount('1.000')
self.assertEqual('1', amount.value)
def test_json_import_str(self):
amount = stellar.Amount.from_json('1000000')
self.assertEqual('STR', amount.currency)
self.assertEqual('1', amount.value)
def test_json_import_usd(self):
amount = stellar.Amount.from_json({
'value': 100,
'currency': 'USD',
'issuer': 'issuer'
})
self.assertEqual('100', amount.value)
self.assertEqual('USD', amount.currency)
def test_json_export_str(self):
res = stellar.Amount(1).to_json()
self.assertEqual(res, '1000000')
def test_json_export_usd(self):
res = stellar.Amount(1, 'USD', 'issuer').to_json()
self.assertEqual(res, {
'value': '1',
'currency': 'USD',
'issuer': 'issuer'
})
```
#### File: rtxp-py/tests/test_remote.py
```python
import unittest
from mock import MagicMock as Mock
from rtxp import stellar
class DummyPromise(object):
def __init__(self):
pass
def then(self, ok, not_ok):
self.success = ok
self.failure = not_ok
return self
def fulfill(self, res):
if hasattr(self, 'success'):
self.callback = self.success
self.value = res
def reject(self, err):
if hasattr(self, 'failure'):
self.callback = self.failure
self.value = err
def get(self):
if hasattr(self, 'callback'):
return self.callback(self.value)
else:
return self.value
class RemotePromiseTest(unittest.TestCase):
def setUp(self):
self.remote = stellar.Remote(None, server=Mock)
self.server = self.remote.server
def test_promise_ok(self):
self.server.request.return_value = DummyPromise()
p = self.remote.get_account_currencies('account', async=True)
p.fulfill({'result':'ok'})
self.assertEqual(p.get(), 'ok')
def test_promise_not_ok(self):
self.server.request.return_value = DummyPromise()
p = self.remote.get_account_currencies('account', async=True)
p.reject("failure")
self.assertRaises(Exception, p.get)
def test_get_fee(self):
self.server.fee_promise = DummyPromise()
p = self.remote.get_fee(async=True)
p.fulfill(10)
self.assertEqual(p.get(), 10)
class RemoteParamTest(unittest.TestCase):
def setUp(self):
self.remote = stellar.Remote(None, server=Mock)
self.server = self.remote.server
def test_cancel(self):
res = self.remote.cancel("promise")
self.server.cancel.assert_called_with(
"promise"
)
def test_add_callback(self):
self.remote.add_callback("tx_type", "callback")
self.server.add_callback.assert_called_with(
"tx_type",
"callback"
)
def test_account_currencies_params(self):
self.remote.get_account_currencies('account')
self.server.request.assert_called_with(
'account_currencies',
account='account'
)
def test_account_info_params(self):
self.remote.get_account_info('account')
self.server.request.assert_called_with(
'account_info',
account='account'
)
def test_account_lines_params(self):
self.remote.get_account_lines('account')
self.server.request.assert_called_with(
'account_lines',
account='account'
)
def test_account_offers_params(self):
self.remote.get_account_offers('account')
self.server.request.assert_called_with(
'account_offers',
account='account'
)
def test_account_tx_params(self):
self.remote.get_account_tx('account')
self.server.request.assert_called_with(
'account_tx',
account='account'
)
def test_book_offers_params(self):
self.remote.get_book_offers('taker_gets', 'taker_pays')
self.server.request.assert_called_with(
'book_offers',
taker_gets='taker_gets',
taker_pays='taker_pays',
)
def test_create_find_path(self):
self.remote.create_find_path('source', 'dest', 'dest_amount', None)
self.server.request.assert_called_with(
'find_path',
subcommand='create',
source_account='source',
destination_account='dest',
destination_amount='dest_amount'
)
def test_close_find_path(self):
self.remote.close_find_path()
self.server.request.assert_called_with(
'find_path',
subcommand='close'
)
def test_ledger_params(self):
self.remote.get_ledger(ledger_index='index')
self.server.request.assert_called_with(
'ledger',
ledger_index='index'
)
def test_static_path_params(self):
self.remote.get_static_path(
'source',
'dest',
'dest_amount'
)
self.server.request.assert_called_with(
'static_path_find',
source_account='source',
destination_account='dest',
destination_amount='dest_amount'
)
def test_transaction_entry_params(self):
self.remote.get_transaction_entry('hash', 'index')
self.server.request.assert_called_with(
'transaction_entry',
tx_hash='hash',
ledger_index='index'
)
def test_tx_params(self):
self.remote.get_tx('tx')
self.server.request.assert_called_with(
'tx',
transaction='tx'
)
def test_tx_history_params(self):
self.remote.get_tx_history('start')
self.server.request.assert_called_with(
'tx_history',
start='start'
)
def test_submit_params(self):
self.remote.submit_transaction('blob')
self.server.request.assert_called_with(
'submit',
tx_blob='blob'
)
def test_subscribe_params(self):
self.remote.subscribe(streams=['stream1', 'stream2'])
self.server.subscribe.assert_called_with(
streams=['stream1', 'stream2']
)
def test_unsubscribe_params(self):
self.remote.unsubscribe(streams=['stream1', 'stream2'])
self.server.unsubscribe.assert_called_with(
streams=['stream1', 'stream2']
)
``` |
{
"source": "johan-uden/CMSIS_5",
"score": 2
} |
#### File: Tests/buildutils/dscmd.py
```python
import os
from buildcmd import BuildCmd
from datetime import datetime
import mmap
class DsCmd(BuildCmd):
def __init__(self, project, config):
BuildCmd.__init__(self)
self._project = project
self._config = config
workspace = os.getenv('WORKSPACE')
if workspace:
self._workspace = os.path.join(workspace, "eclipse")
else:
self._workspace = os.getenv('DSMDK_WORKSPACE')
if not self._workspace:
raise RuntimeError("No DS-MDK workspace found, set either DSMDK_WORKSPACE or WORKSPACE in environment!")
def getCommand(self):
return "eclipsec.exe"
def getArguments(self):
return [
"-nosplash",
"-application", "org.eclipse.cdt.managedbuilder.core.headlessbuild",
"-data", self._workspace,
"-import", os.path.dirname(os.path.abspath(self._project)),
"-cleanBuild", self._config
]
def isSuccess(self):
return self._result == 0
```
#### File: Tests/buildutils/fvpcmd.py
```python
from buildcmd import BuildCmd
class FvpCmd(BuildCmd):
def __init__(self, model, app, **args):
BuildCmd.__init__(self)
self._model = model
self._app = app
self._args = args
def getCommand(self):
return self._model
def getArguments(self):
args = []
if 'limit' in self._args: args += [ "--cyclelimit", self._args['limit'] ]
if 'config' in self._args: args += [ "-f", self._args['config'] ]
if 'target' in self._args:
for a in self._app:
args += [ "-a", "{0}={1}".format(self._args['target'], a ) ]
else:
args += [ self._app[0] ]
return args
``` |
{
"source": "Johanu/MDAnalysis_scripts",
"score": 2
} |
#### File: Johanu/MDAnalysis_scripts/coil_COMdistance.py
```python
from __future__ import division
import matplotlib.pyplot as plt
import MDAnalysis as md
import numpy as np
def calculate_dists(gro_file, xtc_file):
u = md.Universe(gro_file, xtc_file)
select_group1 = u.selectAtoms("backbone and (resnum 50 or resnum 51)")
select_group2 = u.selectAtoms("backbone and (resnum 149 or resnum 150)")
select_group3 = u.selectAtoms("backbone and (resnum 50 or resnum 51 or resnum 149 or resnum 150)")
select_group4 = u.selectAtoms("backbone and (resnum 25 or resnum 124)")
for i in select_group1:
print "Loop1 ", i
for i in select_group2:
print "Loop2 ", i
for i in select_group4:
print "ASP ", i
COM_distance = []
COM_distance_ASP = []
COM_distance_ASP1 = []
COM_distance_ASP2 = []
max_dist = 0
index = 0
min_dist = 100
index_min = 0
max_dist_1 = 0
index_1 = 0
min_dist_1 = 100
index_min_1 = 0
max_dist_2 = 0
index_2 = 0
min_dist_2 = 100
index_min_2 = 0
max_dist_3 = 0
index_3 = 0
min_dist_3 = 100
index_min_3 = 0
#group1_COM = select_group1.centerOfMass()
#group2_COM = select_group2.centerOfMass()
#print group1_COM
#print group2_COM
#print np.sqrt(np.dot(group1_COM-group2_COM, group1_COM-group2_COM))
#print np.linalg.norm(group1_COM - group2_COM)
for i in u.trajectory:
group1_COM = select_group1.centerOfMass()
group2_COM = select_group2.centerOfMass()
dist = np.linalg.norm(group1_COM - group2_COM)
COM_distance.append(dist)
if dist > max_dist:
max_dist = dist
index = i.frame
if dist < min_dist:
min_dist = dist
index_min = i.frame
group3_COM = select_group3.centerOfMass()
group4_COM = select_group4.centerOfMass()
dist1 = np.linalg.norm(group3_COM - group4_COM)
COM_distance_ASP.append(dist1)
if dist1 > max_dist_1:
max_dist_1 = dist1
index_1 = i.frame
if dist1 < min_dist_1:
min_dist_1 = dist1
index_min_1 = i.frame
dist2 = np.linalg.norm(group1_COM - group4_COM)
dist3 = np.linalg.norm(group2_COM - group4_COM)
COM_distance_ASP1.append(dist2)
COM_distance_ASP2.append(dist3)
if dist2 > max_dist_2:
max_dist_2 = dist2
index_2 = i.frame
if dist2 < min_dist_2:
min_dist_2 = dist2
index_min_2 = i.frame
if dist3 > max_dist_3:
max_dist_3 = dist3
index_3 = i.frame
if dist3 < min_dist_3:
min_dist_3 = dist3
index_min_3 = i.frame
print 'Max interloop distance: ', max_dist, index
print 'Min interloop distance: ', min_dist, index_min
print 'Max loops-ASP distance: ', max_dist_1, index_1
print 'Min loops-ASP distance: ', min_dist_1, index_min_1
print 'Max loop1-ASP distance: ', max_dist_2, index_2
print 'Min loop1-ASP distance: ', min_dist_2, index_min_2
print 'Max loop2-ASP distance: ', max_dist_3, index_3
print 'Min loop2-ASP distance: ', min_dist_3, index_min_3
return COM_distance, COM_distance_ASP, COM_distance_ASP1, COM_distance_ASP2
coil_distance, ASP_distance, ASP_distance1, ASP_distance2 = calculate_dists('structure.pdb', 'equ.dcd')
x_vals = [x / 10 for x in range(0, len(coil_distance))]
plt.plot(x_vals, coil_distance, linewidth=0.5)
#leg = plt.legend(ncol=3, loc=9, fancybox=True)
#leg.get_frame().set_alpha(0.5)
plt.xlabel('Time / ns')
plt.ylabel(ur'Loop COM distance / $\AA$')
plt.axhline(y=9.84, linewidth=1, color = 'red')
plt.axhline(y=11.11, linewidth=1, color = 'green')
plt.savefig('coil_COMdistance.png', dpi=300)
plt.close()
plt.plot(x_vals, ASP_distance, linewidth=0.5)
plt.plot(x_vals, ASP_distance1, linewidth=0.5)
plt.plot(x_vals, ASP_distance2, linewidth=0.5)
print 'Loop1 average: ', np.average(ASP_distance1[500:]), np.std(ASP_distance1[500:])
print 'Loop2 average: ', np.average(ASP_distance2[500:]), np.std(ASP_distance2[500:])
plt.xlabel('Time / ns')
plt.ylabel(ur'Loop COM distance / $\AA$')
plt.axhline(y=21.29, linewidth=1, color = '#C45AEC', label='PR20')
plt.axhline(y=15.18, linewidth=1, color = '#C45AEC')
plt.axhline(y=20.36, linewidth=1, color = '#EAC117', label='PR')
plt.axhline(y=15.11, linewidth=1, color = '#EAC117')
plt.axhline(y=np.average(ASP_distance1), linewidth=1, color = 'green', label='Loop1 average')
plt.axhline(y=np.average(ASP_distance2), linewidth=1, color = 'red', label='Loop2 average')
leg = plt.legend(fancybox=True, loc=2, framealpha=0.5)
#leg.get_frame().set_alpha(0.5)
plt.savefig('ASP_COMdistance.png', dpi=300)
plt.close()
```
#### File: Johanu/MDAnalysis_scripts/SPRs_scatter.py
```python
from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
import MDAnalysis as md
from matplotlib.mlab import griddata
from matplotlib.colors import LogNorm
import pylab
def calculate_dists(gro_file, xtc_file, ligand_name):
u = md.Universe(gro_file, xtc_file)
select_ligand = u.selectAtoms("resname " + ligand_name)
select_res31 = u.selectAtoms("backbone and resnum 31")
COM_distance_x = []
COM_distance_y = []
COM_distance_z = []
print gro_file[:-4], select_ligand
for i in u.trajectory:
ligand_COM = select_ligand.centerOfMass()
res31_COM = select_res31.centerOfMass()
#COM_distance.append(res31_COM[2] - ligand_COM[2])
COM_distance_x.append(ligand_COM[0] - res31_COM[0])
COM_distance_y.append(ligand_COM[1] - res31_COM[1])
COM_distance_z.append(ligand_COM[2] - res31_COM[2])
return [COM_distance_x, COM_distance_y, COM_distance_z]
S31N = calculate_dists('./mutants/2HSP/2HSP_S31N/ligands/2HSP_S31N_SPRP/minim_new.gro', './mutants/2HSP/2HSP_S31N/ligands/2HSP_S31N_SPRP/equil_new.xtc', 'SPR')
V27A = calculate_dists('./mutants/2HSP/2HSP_V27A/ligands/2HSP_V27A_SPRP/minim_new.gro', './mutants/2HSP/2HSP_V27A/ligands/2HSP_V27A_SPRP/equil_new.xtc', 'SPR')
SPRP = calculate_dists('./WT_2HSP/ligands/WT_2HSP_SPRP/minim_new.gro', './WT_2HSP/ligands/WT_2HSP_SPRP/equil_new.xtc', 'SPR')
SPRC = calculate_dists('./WT_2HSP/ligands/WT_2HSP_SPRC/minim.gro', './WT_2HSP/ligands/WT_2HSP_SPRC/equil.xtc', 'SPRC')
SPRN = calculate_dists('./WT_2HSP/ligands/WT_2HSP_SPRN/minim.gro', './WT_2HSP/ligands/WT_2HSP_SPRN/equil.xtc', 'SPRN')
#x_vals = [x / 100 for x in range(0, len(SPRP))]
#x_S31N = [x / 100 for x in range(0, len(S31N))]
#plt.plot(x_vals, SPRN, label='SPRN WT')
data = [[], [], []]
for i, val in enumerate(SPRP[0]):
data[0].append(SPRP[0][i])
data[0].append(S31N[0][i])
data[0].append(V27A[0][i])
data[0].append(SPRC[0][i])
data[0].append(SPRN[0][i])
data[1].append(SPRP[1][i])
data[1].append(S31N[1][i])
data[1].append(V27A[1][i])
data[1].append(SPRC[1][i])
data[1].append(SPRN[1][i])
plt.scatter(data[0], data[1], marker='o', c='b', s=5, zorder=10, norm=True)
#leg.get_frame().set_alpha(0.5)
plt.xlabel('Time / ns')
plt.ylabel(ur'Z difference to resnum 31 COM / $\AA$')
# plt.show()
plt.savefig('SPRs_scatter.png', dpi=300)
plt.close()
#H, xedges, yedges, img = plt.hist2d(data[0], data[1], norm=LogNorm())
#extent = [yedges[0], yedges[-1], xedges[0], xedges[-1]]
#fig = plt.figure()
#ax = fig.add_subplot(1, 1, 1)
#im = ax.imshow(H, cmap=plt.cm.jet, interpolation='spline36', norm=LogNorm(), extent=extent)
#fig.colorbar(im, ax=ax)
#plt.savefig('SPRs_hist.png', dpi=300)
#plt.close()
plt.hist2d(data[0], data[1], bins=40)
plt.colorbar()
plt.savefig('SPRs_hist2.png', dpi=300)
plt.close()
``` |
{
"source": "johanvandegriff/midi",
"score": 3
} |
#### File: johanvandegriff/midi/midi2.py
```python
from pygame import midi
import time
import os
"""
this code is for a custom midi device that my brother made
here's a video of the wiring process: https://www.youtube.com/watch?v=jEtFtwcCJcM
reference for the device's code:
https://learn.adafruit.com/grand-central-usb-midi-controller-in-circuitpython/code-usb-midi-in-circuitpython
references I used to make this code:
https://stackoverflow.com/questions/1554896/getting-input-from-midi-devices-live-python
https://www.pygame.org/docs/ref/midi.html
"""
midi.init()
print(midi.get_count())
for id in range(midi.get_count()):
interf, name, is_input, is_output, is_opened = midi.get_device_info(id)
#s = "id: {}".format(id)
s = interf.decode("utf-8") + " " + name.decode("utf-8")
if is_input == 1:
s += " input"
if is_output == 1:
s += " output"
if is_opened == 1:
s += " (opened)"
print(id, s)
#input_id = midi.get_default_input_id()
#input_id = 5
print("Enter the number above corresponding to the input of the mixer device")
input_id = int(input())
input_device = midi.Input(input_id)
#output_id = midi.get_default_input_id()
#output_id = 4
print("Enter the number above corresponding to the output of the mixer device")
output_id = int(input())
output_device = midi.Output(output_id)
output_device.note_off(1)
output_device.note_off(2)
output_device.note_off(3)
output_device.note_off(4)
output_device.note_off(5)
def process_data(isButton, channel, value):
if isButton:
if value:
print("button {} pressed".format(channel))
output_device.note_on(channel+1, 100)
os.system("/home/user/nextcloud/bin/m-keys {} &".format(channel+1))
else:
print("button {} released".format(channel))
output_device.note_off(channel+1)
else: #isButton = false, it is a slider
print("slider {} value: {}".format(channel, value))
while True:
while not input_device.poll():
time.sleep(0.05)
data = input_device.read(1)
print(data[0][0])
button_or_slider, channel, value, _ = data[0][0]
if button_or_slider == 176: #slider value
process_data(False, channel, value)
elif button_or_slider == 144: #button pressed
process_data(True, channel, True)
elif button_or_slider == 128: #button released
process_data(True, channel, False)
#[[[176, 4, 79, 0], 0]] slider 4 value: 79
#[[[144, 3, 127, 0], 0]] button 3 pressed
#[[[128, 3, 127, 0], 1]] button 3 released
``` |
{
"source": "johanvandegriff/network-scenery",
"score": 3
} |
#### File: johanvandegriff/network-scenery/networkScanner.py
```python
import subprocess, datetime
# this function will return a list of lists describing the list of devices in the network
# format: [IP, MAC, NAME/MANUFACTURER]
# find out interface by running ifconfig and determining which interface is being used, like eth0 or wlp3s0
def scan(interface):
# use subprocess.run to run the command
cliOutput = subprocess.run(['sudo', 'arp-scan', '--localnet', '--interface=' + interface], stdout=subprocess.PIPE)
# extract the output from the subprocess
strOutput = cliOutput.stdout.decode()
# generate a list of lists in the specified format and return it
#return strOutput
return [i.split('\t') for i in strOutput.split('\n')][2:-4]
def scanDict(interfaces):
ts = datetime.datetime.now().timestamp() #seconds since the epoch (floating point)
data = {}
for interface, network in interfaces.items():
data[network] = scan(interface)
result = []
for network, lines in data.items():
for item in lines:
if len(item) < 3: continue
ip = item[0]
mac = item[1]
device = item[2]
dup = False
for other in result:
if mac == other['MAC']:
dup = True
continue
if dup:
continue
result.append({
"IP": ip,
"MAC": mac,
"device": device,
"network": network,
"time": ts
})
return result
if __name__ == "__main__":
from sense_hat import SenseHat
sense = SenseHat()
s = scan("wlan1")
print(len(s))
print(s[0:3])
sense.clear()
sense.set_pixel(2, 2, (0, min(len(s), 255), 0))
```
#### File: johanvandegriff/network-scenery/old_dashboard.py
```python
from flask import Flask, request, render_template, url_for, jsonify
import os, re, sys
from sense_hat import SenseHat
import json
from pymongo import MongoClient
ROOT = "/home/pi/Nextcloud/NetworkVisualizer/network-scenery/"
with open(ROOT + '/secret.json') as f:
key = json.load(f)['mongodb']
client = MongoClient(key)
db = client.networkviz
sense = SenseHat()
app = Flask(__name__)
def getMacFromIP(ip):
device = db.scans.find_one({'IP': ip})
if device is None:
return None
else:
return device['MAC']
@app.route("/")
def main():
sense.clear()
sense.set_pixel(0,0,(197, 8, 255)) #1 puple
sense.set_pixel(0,1,(197, 8, 255)) #1 puple
sense.set_pixel(1,0,(197, 8, 255)) #1 puple
sense.set_pixel(1,1,(197, 8, 255)) #1 puple
sense.set_pixel(0,2,(255, 243, 8)) #2 yellow
sense.set_pixel(0,3,(255, 243, 8)) #2 yellow
sense.set_pixel(1,2,(255, 243, 8)) #2 yellow
sense.set_pixel(1,3,(255, 243, 8)) #2 yellow
sense.set_pixel(0,4,(82, 255, 8)) #3 green
sense.set_pixel(0,5,(82, 255, 8)) #3 green
sense.set_pixel(1,4,(82, 255, 8)) #3 green
sense.set_pixel(1,5,(82, 255, 8)) #3 green
sense.set_pixel(0,6,(3, 45, 255)) #4 blue
sense.set_pixel(0,7,(3, 45, 255)) #4 blue
sense.set_pixel(1,6,(3, 45, 255)) #4 blue
sense.set_pixel(1,7,(3, 45, 255)) #4 blue
table = ""
for doc in db.scans.find({}):
table += "<tr>"
table += "<td>" + doc["IP"] + "</td>\n"
table += "<td>" + doc["MAC"] + "</td>\n"
table += "<td>" + doc["device"] + "</td>\n"
table+= "</tr>"
ip = request.remote_addr
return render_template("index.html", data=table, ip=ip, mac=getMacFromIP(ip))
@app.route("/profile")
def test():
sense.set_pixel(0,0,(82, 255, 8)) #1 green
sense.set_pixel(0,1,(82, 255, 8)) #1 green
sense.set_pixel(1,0,(82, 255, 8)) #1 green
sense.set_pixel(1,1,(82, 255, 8)) #1 green
sense.set_pixel(0,2,(3, 45, 255)) #2 blue
sense.set_pixel(0,3,(3, 45, 255)) #2 blue
sense.set_pixel(1,2,(3, 45, 255)) #2 blue
sense.set_pixel(1,3,(3, 45, 255)) #2 blue
sense.set_pixel(0,4,(197, 8, 255)) #3 puple
sense.set_pixel(0,5,(197, 8, 255)) #3 puple
sense.set_pixel(1,4,(197, 8, 255)) #3 puple
sense.set_pixel(1,5,(197, 8, 255)) #3 puple
sense.set_pixel(0,6,(255, 243, 8)) #4 yellow
sense.set_pixel(0,7,(255, 243, 8)) #4 yellow
sense.set_pixel(1,6,(255, 243, 8)) #4 yellow
sense.set_pixel(1,7,(255, 243, 8)) #4 yellow
ip = request.remote_addr
return render_template("profile.html", ip=ip, mac=getMacFromIP(ip))
@app.route("/get_my_ip", methods=["GET"])
def get_my_ip():
return jsonify({'ip': request.remote_addr}), 200
if __name__ == "__main__":
# app.run()
app.run(host= '0.0.0.0') # for local testing
``` |
{
"source": "johanvandegriff/quest",
"score": 3
} |
#### File: quest/old/dbl.py
```python
import itertools
def doublerange(xlen, ylen=0):
if ylen == 0 and xlen.__class__ in [tuple, list]:
return doublerange(xlen[0], xlen[1])
return itertools.product(range(xlen), range(ylen))
def get23():
return 2,3
for x, y in doublerange(2,3):
print x, y
print
print get23()
print
for x, y in doublerange(get23()):
print x, y
```
#### File: johanvandegriff/quest/old.py
```python
def rescale(a, b, x):
sign=0
if x>(a-1)/2: sign= 1
if x<(a-1)/2: sign=-1
return(2*b*x+b-a-b*sign)/(2*a)
#...
def add(self, doors):
self.doors = [mine or add for mine, add in zip(self.doors, doors.doors)]
# self.doors[0] = self.doors[0] or doors.doors[0]
# self.doors[1] = self.doors[1] or doors.doors[1]
# self.doors[2] = self.doors[2] or doors.doors[2]
# self.doors[3] = self.doors[3] or doors.doors[3]
#...
class Level:
'Contains a 2-D list of rooms'
# def __init__(self, width, length, minRoomWidth, maxRoomWidth, minRoomLength, maxRoomLength):
def __init__(self, type, name, width, length, roomWidth, roomLength):
# def __init__(self, arg0, arg1=0, arg2=0, arg3=0, arg4=0):
# type = arg0
# if type == DEFAULT_LEVEL: width, length, roomWidth, roomLength = arg1, arg2, arg3, arg4
# if type == CUSTOM_LEVEL:
# level = arg1
# width = len(level[0])
# length = len(level)
# return
log(7, ' ===== new Level '+str(width)+'x'+str(length)+' =====')
log(7, ' type (unused)', type)
log(7, ' name', name)
self.name = name
self.width = width
self.length = length
self.level = []
for y in range(length): # make a 2-D list of rooms
row = []
for x in range(width):
# roomWidth = random.randint(minRoomWidth, maxRoomWidth)
# roomLength = random.randint(minRoomLength, maxRoomLength)
room = Room(roomWidth, roomLength)
# room = Room(roomWidth+random.randint(0,5), roomLength+random.randint(0,5))
row.append(room)
self.level.append(row)
#...
dx = [-1, 0, 1, 1, 1, 0, -1, -1]
dy = [ 1, 1, 1, 0, -1, -1, -1, 0]
for i in range(8):
if key == options[i][2]: return self.move(dx[i], dy[i], creatures)
# if key == options[0][2]: return self.move(-1, 1, creatures)
# if key == options[1][2]: return self.move( 0, 1, creatures)
# if key == options[2][2]: return self.move( 1, 1, creatures)
# if key == options[3][2]: return self.move( 1, 0, creatures)
# if key == options[4][2]: return self.move( 1,-1, creatures)
# if key == options[5][2]: return self.move( 0,-1, creatures)
# if key == options[6][2]: return self.move(-1,-1, creatures)
# if key == options[7][2]: return self.move(-1, 0, creatures)
#...
miniWin.addch(2+y*3,5+x*5, char)
#if doors.getTop() and y>0: miniWin.addch(roomDispY-1+y*3,3+x*5, '|')
#if doors.getBottom() and y<levelLength-1: miniWin.addch(roomDispY+1+y*3,3+x*5, '|')
#if doors.getLeft() and x>0: miniWin.addch(roomDispY+y*3,1+x*5, '-')
#if doors.getRight() and x<levelWidth-1: miniWin.addch(roomDispY+y*3,5+x*5, '-')
#...
upX = random.randrange(self.width)
upY = random.randrange(self.length)
downX = upX
downY = upY
while upX==downX and upY==downY:
downX = random.randrange(self.width)
downY = random.randrange(self.length)
#...
def findStairs(self, up):
char = DOWN_STAIRS_ID
if up: char = UP_STAIRS_ID
return self.findChar(char)
#...
def findStairs(self, location, upStairs): #TODO remove this
z = location.getZ()
level = self.getLevel(z)
levelX, levelY, roomX, roomY = level.findStairs(upStairs)
location.setLevelXY(levelX, levelY)
location.setRoomXY(roomX, roomY)
return location
def loadLevel(self, i, upStairs): #TODO remove this
level = self.getLevel(i)
levelWidth, levelLength = level.getWL()
levelX, levelY, roomX, roomY = level.findStairs(upStairs)
room, roomWidth, roomLength = level.loadRoom(levelX, levelY)
return level, levelWidth, levelLength, levelX, levelY, room, roomWidth, roomLength, roomX, roomY
#...
def getLevelXY(self): return self.levelX, self.levelY
def setLevelXY(self, levelX, levelY):
self.levelX = levelX
self.levelY = levelY
def getRoomXY(self): return self.roomX, self.roomY
def setRoomXY(self, roomX, roomY):
self.roomX = roomX
self.roomY = roomY
def getZ(self): return self.z
#...
def setZ(self, z):
difference = z - self.z
self.z = z
if difference > 0: self.loadLevel(True)
if difference < 0: self.loadLevel(False)
def move(self, dx, dy, creatures):
if dx==0 and dy==0: return False
level = self.getLevel()
levelWidth, levelLength = level.getWL()
room = self.getRoom()
roomWidth, roomLength = room.getWL()
x2=self.roomX+dx
y2=self.roomY+dy
if x2<-1 or x2>roomWidth or \
y2<-1 or y2>roomLength:
return False
terrain = room.getTile(self.roomX, self.roomY).getTerrain()
newTerrain = room.getTile(limit(x2, 0, roomWidth-1), limit(y2, 0, roomLength-1)).getTerrain()
if newTerrain in solid:
# if self.getName() != 'player': #uncomment to walk through walls
return False
#if moving diagonally and standing on a door or attempting to move on to one
if dx != 0 and dy != 0 and DOOR_ID in [terrain, newTerrain]:
return False
enemy = creatures.findAt(self.z, self.levelX, self.levelY, x2, y2)
if not enemy == None:
#if self.attack(enemy): return True
enemy.getStats().takeDamage(1) #TODO different amounts of damage, only attack enemies
return True
self.setRoomXY(x2, y2)
if self.roomX<0 and self.levelX>0:
self.levelX-=1
oldRoomLength = roomLength
room, roomWidth, roomLength = level.loadRoom(self.levelX, self.levelY)
self.roomX=roomWidth-1
if not roomLength == oldRoomLength: self.roomY = int(roomLength/2)
elif self.roomX>roomWidth-1 and self.levelX<levelWidth-1:
self.levelX+=1
oldRoomLength = roomLength
room, roomWidth, roomLength = level.loadRoom(self.levelX, self.levelY)
self.roomX=0
if not roomLength == oldRoomLength: self.roomY = int(roomLength/2)
elif self.roomY<0 and self.levelY>0:
self.levelY-=1
oldRoomWidth = roomWidth
room, roomWidth, roomLength = level.loadRoom(self.levelX, self.levelY)
if not roomWidth == oldRoomWidth: self.roomX = int(roomWidth/2)
self.roomY=roomLength-1
elif self.roomY>roomLength-1 and self.levelY<levelLength-1:
self.levelY+=1
oldRoomWidth = roomWidth
room, roomWidth, roomLength = level.loadRoom(self.levelX, self.levelY)
if not roomWidth == oldRoomWidth: self.roomX = int(roomWidth/2)
self.roomY=0
self.roomX = limit(self.roomX, 0, roomWidth-1)
self.roomY = limit(self.roomY, 0, roomLength-1)
return True
#...
def doublerange(arg0, arg1=0):
if arg1 == 0 and arg0.__class__ in [tuple, list]:
xlen = arg0[0]
ylen = arg0[1]
else:
xlen = arg0
ylen = arg1
return itertools.product(range(xlen), range(ylen))
#...
def removeDead(self): # Bring out your dead!
# i=0
# while i < len(self.creatures)-1:
# if not self.creatures[i].getStats().isAlive():
# self.creatures.pop(i)
# else:
# i += 1
#rebuild the list, excluding creatures that return false for isAlive()
self.creatures = [creature for creature in self.creatures if creature.getStats().isAlive()]
#...
def findAt(self, location):
for creature in self.creatures:
if location == creature.getLocation(): return creature
return None
#...
log(4, ' Level layout:')
for y in range(self.length):
line = ''
for x in range(self.width):
if (x==self.upX and y==self.upY) or (x==self.downX and y==self.downY):
line += '2'
else:
line += str(minimap.getMiniroom(x,y).getMiddle())
log(4,' '+line+' y:'+str(y))
log(4, '')
#...
def getStartingLocation(self):
level = self.getLevel(0)
levelX, levelY, roomX, roomY = level.findChar(UP_STA$
location = Location(0, levelX, levelY, roomX, roomY)
return location
#...
def randomLevel(name):
levelWidth = random.randint(4,6)
levelheight = random.randint(3,5)
roomWidth = randSkip(7,17)
roomLength = randSkip(7,15)
level = Level(name, DEFAULT_LEVEL, levelWidth, levelhe$
return level
class Location:
'The location of a creature'
def __init__(self, z, levelX, levelY, roomX, roomY):
self.z = z
self.setLevelXY(levelX, levelY)
self.setRoomXY(roomX, roomY)
def getZ(self): return self.z
def setZ(self, z): self.z = z
def changeZ(self, amount): self.z += amount
def getLevelXY(self): return self.levelX, self.levelY
def setLevelXY(self, levelX, levelY):
self.levelX = levelX
self.levelY = levelY
def changeLevelX(self, amount): self.levelX += amount
def changeLevelY(self, amount): self.levelY += amount
def getRoomXY(self): return self.roomX, self.roomY
def setRoomXY(self, roomX, roomY):
self.roomX = roomX
self.roomY = roomY
def changeRoomX(self, amount): self.roomX += amount
def changeRoomY(self, amount): self.roomY += amount
#...
#TODO change stats to a dictionary
class Stats: #TODO add a lot of stats including difficulty/experience levels
'All the stats of a creature, including health and attributes but not location.'
def __init__(self, health, maxHealth):
self.health = health
self.maxHealth = maxHealth
self.healthTimer = 0
def getHealth(self): return self.health
def getMaxHealth(self): return self.maxHealth
def changeMaxHealth(self, amount):
self.maxHealth += amount
if self.maxHealth < 0: self.maxHealth = 0
self.health = limit(self.health, 0, self.maxHealth)
def takeDamage(self, amount):
#TODO apply armor
self.changeHealth(-amount)
def changeHealth(self, amount):
self.health += amount
self.health = limit(self.health, 0, self.maxHealth)
def isAlive(self): return self.health > 0
def display(self, screen, y, x):
i = int(self.health * (len(healthColors)-1.0) / self.maxHealth)
screen.addstr(y,x, 'HP:')
screen.addstr(y,x+3, str(self.health)+'/'+str(self.maxHealth), curses.color_pair(healthColors[i]))
def act(self):
self.healthTimer -= 1
if self.healthTimer < 1:
self.healthTimer = 10
self.changeHealth(1)
#...
class Doors:
'Stores info about which of the 4 directions are active'
def __init__(self, left, right, top, bottom):
self.doors = [left, right, top, bottom]
def getIndex(self, i): return self.doors[i]
def setIndex(self, i, value): self.doors[i] = value
def getLeft(self): return self.doors[0]
def getRight(self): return self.doors[1]
def getTop(self): return self.doors[2]
def getBottom(self): return self.doors[3]
def setLeft(self, left): self.doors[0] = left
def setRight(self, right): self.doors[1] = right
def setTop(self, top): self.doors[2] = top
def setBottom(self, bottom): self.doors[3] = bottom
def getInverse(self): #swap left with right and top with bottom
return Doors(self.doors[1], self.doors[0], self.doors[3], self.doors[2])
def add(self, doors):
self.doors = [mine or add for mine, add in zip(self.doors, doors.doors)]
#...
class Miniroom:
def __init__(self, shown, middle, color, doors, visible):
self.shown = shown
self.middle = middle
self.color = color
self.doors = doors #openings in the wall
self.visible = visible #visible walls
def isShown(self): return self.shown
def getMiddle(self): return self.middle
def setMiddle(self, middle): self.middle = middle
def getColor(self): return self.color
def setColor(self, color): self.color = color
def getDoors(self): return self.doors
def getVisibleWalls(self): return self.visible
#...
template = ( #template defines the smallest room
( 8, 6, 9),
( 7, 1, 7),
(10, 6,11))
self.room = []
for y in range(self.length): #construct the room row by row
row = []
for x in range(self.width):
templateX = (x > 0) + (x == self.width - 1) #use template to find out which terrain to use
templateY = (y > 0) + (y == self.length - 1)
terrainID = template[templateY][templateX]
row.append(Tile(terrainID, 0, [], True))
self.room.append(row)
self.fillCorners(corners, hallwayWidth)
def fillCorners(self, corners, hW):
#...
def jsondefault(o):
if isinstance(o, set):
return list(o)
return o.__dict__
def jsonLog(priority, obj):
log(priority, repr(json.dumps(obj, indent=4, default=jsondefault)))
def jsonWrite(obj, filename):
# jsonLog(1, obj)
# return
with open(filename, 'w') as f:
f.write(json.dumps(obj, default=jsondefault))
def jsonRead(filename):
with open(filename, 'r') as f:
#...
#TODO use *args for Level.__init__()
# def __init__(self, type, name, args*arg0=0, arg1=0, arg2=0, arg3=0): #TODO use *args for Level.__init__()
# def __init__(self, type, name, width, length, roomWidth, roomLength):
# log(7, ' == == = new Level == == =')
# log(7, ' type', type)
#...
#...
z = plpos['z']
levelX = plpos['levelX']
levelY = plpos['levelY']
roomX = plpos['roomX']
roomY = plpos['roomY']
#...
#Disabled since it does not work on weirdly-shaped rooms
"""for x in range(self.width): #top row
topTile = self.room[0][x] #get the tile
if not topTile.isShown(): shown.setTop(False) #if the tile is hidden, the wall is considered not visible
elif topTile.getTerrain() == DOOR_ID: shown.setTop(True); break
#otherwise if the wall has a door, the wall is visible and it overrides hidden tiles
for x in range(self.width): # bottom row
bottomTile = self.room[self.length-1][x]
if not bottomTile.isShown(): shown.setBottom(False)
elif bottomTile.getTerrain() == DOOR_ID: shown.setBottom(True); break
for y in range(self.length): #left row
leftTile = self.room[y][0]
if not leftTile.isShown(): shown.setLeft(False)
elif leftTile.getTerrain() == DOOR_ID: shown.setLeft(True); break
for y in range(self.length): # right row
rightTile = self.room[y][self.width-1]
if not rightTile.isShown(): shown.setRight(False)
elif rightTile.getTerrain() == DOOR_ID: shown.setRight(True); break
"""
#...
class Tile:
'Contains the terrain and a list of items.'
def __init__(self, terrainID, color, items, shown):
#TODO make terrain an item
self.terrainID = terrainID
self.color = color
self.items = items
self.shown = shown
#terrain is the material that is seen
def getTopID(self):
if len(self.items) == 0: return self.terrainID
return self.items[-1].getID()
def getTopColor(self):
if len(self.items) == 0: return self.color
return self.items[-1].getColor()
def setTerrain(self, ID): self.terrainID = ID
def getTerrain(self): return self.terrainID
def setColor(self, color): self.color = color
def getColor(self): return self.color
def setItems(self, items): self.items = items
def getItems(self): return self.items
def addItem(self, item): self.items.append(item)
def popItem(self): return self.items.pop()
def show(self): self.shown = True
def hide(self): self.shown = False
def isShown(self): return self.shown #TODO Tile.isShown() might need to be on each creature
#...
def addDoors(self, doors):
self.doors = addCards(self.doors, doors) #combine the current doors and the new doors info
for x, y in doublerange(self.width, self.length): #add the doors to the actual room
#TODO door creation is incorrect
if doors[WEST] and abs(y - (self.length - 1) / 2.0) < 1 and x == 0 \
or doors[EAST] and abs(y - (self.length - 1) / 2.0) < 1 and x == self.width - 1 \
or doors[NORTH] and abs(x - (self.width - 1) / 2.0) < 1 and y == 0 \
or doors[SOUTH] and abs(x - (self.width - 1) / 2.0) < 1 and y == self.length - 1: #TODO improve door creation $
tile = self.getTile(x, y)
tile.ID = DOOR_ID
tile.color = DOOR_COLOR
#...
z = pos['z']
levelX = pos['levelX']
levelY = pos['levelY']
roomX = pos['roomX']
roomY = pos['roomY']
#...
def findAt(self, z, levelX, levelY, roomX, roomY): #TODO replace findAt() with findAtPos()
for creature in self.creatures:
pos = creature.getPos()
if pos['z'] != z: continue
if pos['levelX'] != levelX: continue
if pos['levelY'] != levelY: continue
if pos['roomX'] != roomX: continue
if pos['roomY'] != roomY: continue
return creature
return None
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
#...
``` |
{
"source": "johanvandegriff/Stoichiometry",
"score": 3
} |
#### File: Stoichiometry/elements/compile_data.py
```python
import os, json
def jsonFormat(o):
return json.dumps(o, sort_keys=True, indent=4, separators=(',', ': '))
dataDir = "data"
files = os.listdir(dataDir)
#print files
elements = {}
#structure {'H': {'propertyName':'value'}}
for filename in files:
# print filename
filepath = os.path.join(dataDir, filename)
with open(filepath, 'r') as f:
lines = f.read().splitlines()
i = 1
numfields = int(lines[0])
symbolIndex = -1
titles = []
for j in range(i, i + numfields):
titles.append(lines[j])
if lines[j] == "symbol":
symbolIndex = j-i
if symbolIndex == -1:
print "no 'symbol' column"
print " file: " + str(filepath)
print " titles: " + str(titles)
print " numfields: " + str(numfields)
quit()
i += numfields
while i+numfields < len(lines):
fields = []
for j in range(i, i + numfields):
fields.append(lines[j])
if j-i == symbolIndex:
symbol = lines[j]
# print symbol
for j in range(numfields):
if j != symbolIndex:
value = fields[j]
if not value == "":
try:
value = int(value)
except ValueError:
try:
value = float(value.replace(",", "."))
except ValueError:
pass
if not symbol in elements:
elements[symbol] = {}
if titles[j] in elements[symbol]:
prevValue = elements[symbol][titles[j]]
if value != prevValue:
print 'conflicting data! (new value not equal to previous)'
print " file: " + str(filepath)
print ' symbol: ' + str(symbol)
print ' title: ' + str(titles[j])
print ' prevValue: ' + str(prevValue)
print ' value: ' + str(value)
quit()
elements[symbol][titles[j]] = value
i += numfields
#print jsonFormat(elements)
print jsonFormat(elements['K'])
print jsonFormat(elements['Kr'])
``` |
{
"source": "JohanvandenHeuvel/AggregationOfLocalExplanations",
"score": 3
} |
#### File: AggregationOfLocalExplanations/models/model.py
```python
from torchvision import transforms, models
import torch
import torch.nn as nn
from models.resnet import resnet34, resnet18
from models.simple_model import MNIST_Net, Cifar_Net
def get_model(model_name, device="cpu"):
"""
loads the given model
"""
if model_name == "Inceptionv3":
net = models.inception_v3(pretrained=True)
elif model_name == "Resnet34":
net = resnet34(pretrained=True)
elif model_name == "Resnet18":
net = resnet18(pretrained=True)
elif model_name == 'Resnet18_cifar10':
net = resnet18(pretrained=True)
# cifar10 has only 10 classes, while resnet has 1000 by default
net.fc = nn.Linear(512, 10)
net.load_state_dict(torch.load("cifar10.pt", map_location=device))
elif model_name == "VGG-19":
net = models.vgg19(pretrained=True)
elif model_name == "mnist_model":
net = MNIST_Net()
net.load_model()
elif model_name == "cifar10_model":
net = Cifar_Net()
net.load_model()
else:
raise ValueError
net.to(device)
net.eval()
return net
```
#### File: AggregationOfLocalExplanations/models/simple_model.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import transforms, models
from torchvision.models import resnet18
from models.train import train
# from scripts.load_data import get_data
from models.resnet import resnet34, resnet18
class MNIST_Net(nn.Module):
"""
Simple CNN models for MNIST dataset
"""
def __init__(self, model_name="mnist_net", device="cpu"):
super(MNIST_Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
self.model_name = model_name
self.device = device
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def save_model(self, path=None):
if path is None:
path = self.model_name
path = path + ".pt"
torch.save(self.state_dict(), path)
print("models saved")
def load_model(self, path=None):
try:
if path is None:
path = self.model_name
self.load_state_dict(torch.load(path + '.pt'))
print("models loaded")
except:
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081))]
)
trainset = torchvision.datasets.MNIST(
root="./data", train=True, download=True, transform=transform
)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=64, shuffle=True, num_workers=2
)
train(self, trainloader)
class Cifar_Net(nn.Module):
"""
Simple CNN models for CIFAR10
"""
def __init__(self, model_name="cifar_net"):
super().__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
self.model_name = model_name
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = torch.flatten(x, 1) # flatten all dimensions except batch
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def save_model(self, path=None):
if path is None:
path = self.model_name
path = path + ".pt"
torch.save(self.state_dict(), path)
print("models saved")
def load_model(self, path=None):
try:
if path is None:
path = self.model_name
self.load_state_dict(torch.load(path + '.pt'))
print("models loaded")
except:
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081))]
)
trainset = torchvision.datasets.CIFAR10(
root="./data", train=True, download=True, transform=transform
)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=64, shuffle=True, num_workers=2
)
train(self, trainloader)
# if __name__ == "__main__":
# net = Cifar_Net()
# trainloader, testloader, classes = get_data()
# train(net, trainloader)
``` |
{
"source": "johanvdw/Fiona",
"score": 3
} |
#### File: Fiona/fiona/collection.py
```python
import os
import sys
from fiona.ogrext import Iterator, ItemsIterator, KeysIterator
from fiona.ogrext import Session, WritingSession
from fiona.ogrext import calc_gdal_version_num, get_gdal_version_num, get_gdal_release_name
from fiona.errors import DriverError, SchemaError, CRSError
from fiona._drivers import driver_count, GDALEnv
from six import string_types
class Collection(object):
"""A file-like interface to features in the form of GeoJSON-like
mappings."""
def __init__(
self, path, mode='r',
driver=None, schema=None, crs=None,
encoding=None,
layer=None,
vsi=None,
archive=None,
**kwargs):
"""The required ``path`` is the absolute or relative path to
a file, such as '/data/test_uk.shp'. In ``mode`` 'r', data can
be read only. In ``mode`` 'a', data can be appended to a file.
In ``mode`` 'w', data overwrites the existing contents of
a file.
In ``mode`` 'w', an OGR ``driver`` name and a ``schema`` are
required. A Proj4 ``crs`` string is recommended.
In 'w' mode, kwargs will be mapped to OGR layer creation
options.
"""
if not isinstance(path, string_types):
raise TypeError("invalid path: %r" % path)
if not isinstance(mode, string_types) or mode not in ('r', 'w', 'a'):
raise TypeError("invalid mode: %r" % mode)
if driver and not isinstance(driver, string_types):
raise TypeError("invalid driver: %r" % driver)
if schema and not hasattr(schema, 'get'):
raise TypeError("invalid schema: %r" % schema)
if crs and not isinstance(crs, (dict,) + string_types):
raise TypeError("invalid crs: %r" % crs)
if encoding and not isinstance(encoding, string_types):
raise TypeError("invalid encoding: %r" % encoding)
if layer and not isinstance(layer, tuple(list(string_types) + [int])):
raise TypeError("invalid name: %r" % layer)
if vsi:
if not isinstance(vsi, string_types) or vsi not in ('zip', 'tar', 'gzip'):
raise TypeError("invalid vsi: %r" % vsi)
if archive and not isinstance(archive, string_types):
raise TypeError("invalid archive: %r" % archive)
# Check GDAL version against drivers
if (driver == "GPKG" and
get_gdal_version_num() < calc_gdal_version_num(1, 11, 0)):
raise DriverError(
"GPKG driver requires GDAL 1.11.0, "
"fiona was compiled against: {}".format(get_gdal_release_name()))
self.session = None
self.iterator = None
self._len = 0
self._bounds = None
self._driver = None
self._schema = None
self._crs = None
self._crs_wkt = None
self.env = None
self.path = vsi_path(path, vsi, archive)
if mode == 'w':
if layer and not isinstance(layer, string_types):
raise ValueError("in 'r' mode, layer names must be strings")
if driver == 'GeoJSON':
if layer is not None:
raise ValueError("the GeoJSON format does not have layers")
self.name = 'OgrGeoJSON'
# TODO: raise ValueError as above for other single-layer formats.
else:
self.name = layer or os.path.basename(os.path.splitext(path)[0])
else:
if layer in (0, None):
self.name = 0
else:
self.name = layer or os.path.basename(os.path.splitext(path)[0])
self.mode = mode
if self.mode == 'w':
if driver == 'Shapefile':
driver = 'ESRI Shapefile'
if not driver:
raise DriverError("no driver")
elif driver not in supported_drivers:
raise DriverError(
"unsupported driver: %r" % driver)
elif self.mode not in supported_drivers[driver]:
raise DriverError(
"unsupported mode: %r" % self.mode)
self._driver = driver
if not schema:
raise SchemaError("no schema")
elif 'properties' not in schema:
raise SchemaError("schema lacks: properties")
elif 'geometry' not in schema:
raise SchemaError("schema lacks: geometry")
self._schema = schema
if crs:
if 'init' in crs or 'proj' in crs or 'epsg' in crs.lower():
self._crs = crs
else:
raise CRSError("crs lacks init or proj parameter")
if driver_count == 0:
# create a local manager and enter
self.env = GDALEnv(True)
else:
self.env = GDALEnv(False)
self.env.__enter__()
if self.mode == "r":
self.encoding = encoding
self.session = Session()
self.session.start(self)
# If encoding param is None, we'll use what the session
# suggests.
self.encoding = encoding or self.session.get_fileencoding().lower()
elif self.mode in ("a", "w"):
self.encoding = encoding
self.session = WritingSession()
self.session.start(self, **kwargs)
self.encoding = encoding or self.session.get_fileencoding().lower()
if self.session:
self.guard_driver_mode()
def __repr__(self):
return "<%s Collection '%s', mode '%s' at %s>" % (
self.closed and "closed" or "open",
self.path + ":" + str(self.name),
self.mode,
hex(id(self)))
def guard_driver_mode(self):
driver = self.session.get_driver()
if driver not in supported_drivers:
raise DriverError("unsupported driver: %r" % driver)
if self.mode not in supported_drivers[driver]:
raise DriverError("unsupported mode: %r" % self.mode)
@property
def driver(self):
"""Returns the name of the proper OGR driver."""
if not self._driver and self.mode in ("a", "r") and self.session:
self._driver = self.session.get_driver()
return self._driver
@property
def schema(self):
"""Returns a mapping describing the data schema.
The mapping has 'geometry' and 'properties' items. The former is a
string such as 'Point' and the latter is an ordered mapping that
follows the order of fields in the data file.
"""
if not self._schema and self.mode in ("a", "r") and self.session:
self._schema = self.session.get_schema()
return self._schema
@property
def crs(self):
"""Returns a Proj4 string."""
if self._crs is None and self.mode in ("a", "r") and self.session:
self._crs = self.session.get_crs()
return self._crs
@property
def crs_wkt(self):
"""Returns a WKT string."""
if self._crs_wkt is None and self.mode in ("a", "r") and self.session:
self._crs_wkt = self.session.get_crs_wkt()
return self._crs_wkt
@property
def meta(self):
"""Returns a mapping with the driver, schema, and crs properties."""
return {
'driver': self.driver,
'schema': self.schema,
'crs': self.crs }
def filter(self, *args, **kwds):
"""Returns an iterator over records, but filtered by a test for
spatial intersection with the provided ``bbox``, a (minx, miny,
maxx, maxy) tuple or a geometry ``mask``.
Positional arguments ``stop`` or ``start, stop[, step]`` allows
iteration to skip over items or stop at a specific item.
"""
if self.closed:
raise ValueError("I/O operation on closed collection")
elif self.mode != 'r':
raise IOError("collection not open for reading")
if args:
s = slice(*args)
start = s.start
stop = s.stop
step = s.step
else:
start = stop = step = None
bbox = kwds.get('bbox')
mask = kwds.get('mask')
if bbox and mask:
raise ValueError("mask and bbox can not be set together")
self.iterator = Iterator(
self, start, stop, step, bbox, mask)
return self.iterator
def items(self, *args, **kwds):
"""Returns an iterator over FID, record pairs, optionally
filtered by a test for spatial intersection with the provided
``bbox``, a (minx, miny, maxx, maxy) tuple or a geometry
``mask``.
Positional arguments ``stop`` or ``start, stop[, step]`` allows
iteration to skip over items or stop at a specific item.
"""
if self.closed:
raise ValueError("I/O operation on closed collection")
elif self.mode != 'r':
raise IOError("collection not open for reading")
if args:
s = slice(*args)
start = s.start
stop = s.stop
step = s.step
else:
start = stop = step = None
bbox = kwds.get('bbox')
mask = kwds.get('mask')
if bbox and mask:
raise ValueError("mask and bbox can not be set together")
self.iterator = ItemsIterator(
self, start, stop, step, bbox, mask)
return self.iterator
def keys(self, *args, **kwds):
"""Returns an iterator over FIDs, optionally
filtered by a test for spatial intersection with the provided
``bbox``, a (minx, miny, maxx, maxy) tuple or a geometry
``mask``.
Positional arguments ``stop`` or ``start, stop[, step]`` allows
iteration to skip over items or stop at a specific item.
"""
if self.closed:
raise ValueError("I/O operation on closed collection")
elif self.mode != 'r':
raise IOError("collection not open for reading")
if args:
s = slice(*args)
start = s.start
stop = s.stop
step = s.step
else:
start = stop = step = None
bbox = kwds.get('bbox')
mask = kwds.get('mask')
if bbox and mask:
raise ValueError("mask and bbox can not be set together")
self.iterator = KeysIterator(
self, start, stop, step, bbox, mask)
return self.iterator
def __contains__(self, fid):
return self.session.has_feature(fid)
values = filter
def __iter__(self):
"""Returns an iterator over records."""
return self.filter()
def __next__(self):
"""Returns next record from iterator."""
if not self.iterator:
iter(self)
return next(self.iterator)
next = __next__
def __getitem__(self, item):
return self.session.__getitem__(item)
def writerecords(self, records):
"""Stages multiple records for writing to disk."""
if self.closed:
raise ValueError("I/O operation on closed collection")
if self.mode not in ('a', 'w'):
raise IOError("collection not open for writing")
self.session.writerecs(records, self)
self._len = self.session.get_length()
self._bounds = self.session.get_extent()
def write(self, record):
"""Stages a record for writing to disk."""
self.writerecords([record])
def validate_record(self, record):
"""Compares the record to the collection's schema.
Returns ``True`` if the record matches, else ``False``.
"""
# Currently we only compare keys of properties, not the types of
# values.
return set(record['properties'].keys()
) == set(self.schema['properties'].keys()
) and self.validate_record_geometry(record)
def validate_record_geometry(self, record):
"""Compares the record's geometry to the collection's schema.
Returns ``True`` if the record matches, else ``False``.
"""
# Shapefiles welcome mixes of line/multis and polygon/multis.
# OGR reports these mixed files as type "Polygon" or "LineString"
# but will return either these or their multi counterparts when
# reading features.
if (self.driver == "ESRI Shapefile" and
"Point" not in record['geometry']['type']):
return record['geometry']['type'].lstrip(
"Multi") == self.schema['geometry'].lstrip("3D ").lstrip(
"Multi")
else:
return (record['geometry']['type'] ==
self.schema['geometry'].lstrip("3D "))
def __len__(self):
if self._len <= 0 and self.session is not None:
self._len = self.session.get_length()
return self._len
@property
def bounds(self):
"""Returns (minx, miny, maxx, maxy)."""
if self._bounds is None and self.session is not None:
self._bounds = self.session.get_extent()
return self._bounds
def flush(self):
"""Flush the buffer."""
if self.session is not None and self.session.get_length() > 0:
self.session.sync(self)
new_len = self.session.get_length()
self._len = new_len > self._len and new_len or self._len
self._bounds = self.session.get_extent()
def close(self):
"""In append or write mode, flushes data to disk, then ends
access."""
if self.session is not None:
if self.mode in ('a', 'w'):
self.flush()
self.session.stop()
self.session = None
self.iterator = None
if self.env:
self.env.__exit__()
@property
def closed(self):
"""``False`` if data can be accessed, otherwise ``True``."""
return self.session is None
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __del__(self):
# Note: you can't count on this being called. Call close() explicitly
# or use the context manager protocol ("with").
self.__exit__(None, None, None)
def vsi_path(path, vsi=None, archive=None):
# If a VSF and archive file are specified, we convert the path to
# an OGR VSI path (see cpl_vsi.h).
if vsi:
if archive:
result = "/vsi%s/%s%s" % (vsi, archive, path)
else:
result = "/vsi%s/%s" % (vsi, path)
else:
result = path
return result
# Here is the list of available drivers as (name, modes) tuples. Currently,
# we only expose the defaults (excepting FileGDB). We also don't expose
# the CSV or GeoJSON drivers. Use Python's csv and json modules instead.
# Might still exclude a few more of these after making a pass through the
# entries for each at http://www.gdal.org/ogr/ogr_formats.html to screen
# out the multi-layer formats.
supported_drivers = dict([
#OGR Vector Formats
#Format Name Code Creation Georeferencing Compiled by default
#Aeronav FAA files AeronavFAA No Yes Yes
("AeronavFAA", "r"),
#ESRI ArcObjects ArcObjects No Yes No, needs ESRI ArcObjects
#Arc/Info Binary Coverage AVCBin No Yes Yes
# multi-layer
# ("AVCBin", "r"),
#Arc/Info .E00 (ASCII) Coverage AVCE00 No Yes Yes
# multi-layer
# ("AVCE00", "r"),
#Arc/Info Generate ARCGEN No No Yes
("ARCGEN", "r"),
#Atlas BNA BNA Yes No Yes
("BNA", "raw"),
#AutoCAD DWG DWG No No No
#AutoCAD DXF DXF Yes No Yes
("DXF", "raw"),
#Comma Separated Value (.csv) CSV Yes No Yes
#CouchDB / GeoCouch CouchDB Yes Yes No, needs libcurl
#DODS/OPeNDAP DODS No Yes No, needs libdap
#EDIGEO EDIGEO No Yes Yes
# multi-layer? Hard to tell from the OGR docs
# ("EDIGEO", "r"),
#ElasticSearch ElasticSearch Yes (write-only) - No, needs libcurl
#ESRI FileGDB FileGDB Yes Yes No, needs FileGDB API library
# multi-layer
("FileGDB", "raw"),
#ESRI Personal GeoDatabase PGeo No Yes No, needs ODBC library
#ESRI ArcSDE SDE No Yes No, needs ESRI SDE
#ESRI Shapefile ESRI Shapefile Yes Yes Yes
("ESRI Shapefile", "raw"),
#FMEObjects Gateway FMEObjects Gateway No Yes No, needs FME
#GeoJSON GeoJSON Yes Yes Yes
("GeoJSON", "rw"),
#Géoconcept Export Geoconcept Yes Yes Yes
# multi-layers
# ("Geoconcept", "raw"),
#Geomedia .mdb Geomedia No No No, needs ODBC library
#GeoPackage GPKG Yes Yes No, needs libsqlite3
("GPKG", "rw"),
#GeoRSS GeoRSS Yes Yes Yes (read support needs libexpat)
#Google Fusion Tables GFT Yes Yes No, needs libcurl
#GML GML Yes Yes Yes (read support needs Xerces or libexpat)
#GMT GMT Yes Yes Yes
("GMT", "raw"),
#GPSBabel GPSBabel Yes Yes Yes (needs GPSBabel and GPX driver)
#GPX GPX Yes Yes Yes (read support needs libexpat)
("GPX", "raw"),
#GRASS GRASS No Yes No, needs libgrass
#GPSTrackMaker (.gtm, .gtz) GPSTrackMaker Yes Yes Yes
("GPSTrackMaker", "raw"),
#Hydrographic Transfer Format HTF No Yes Yes
# TODO: Fiona is not ready for multi-layer formats: ("HTF", "r"),
#Idrisi Vector (.VCT) Idrisi No Yes Yes
("Idrisi", "r"),
#Informix DataBlade IDB Yes Yes No, needs Informix DataBlade
#INTERLIS "Interlis 1" and "Interlis 2" Yes Yes No, needs Xerces (INTERLIS model reading needs ili2c.jar)
#INGRES INGRES Yes No No, needs INGRESS
#KML KML Yes Yes Yes (read support needs libexpat)
#LIBKML LIBKML Yes Yes No, needs libkml
#Mapinfo File MapInfo File Yes Yes Yes
("MapInfo File", "raw"),
#Microstation DGN DGN Yes No Yes
("DGN", "raw"),
#Access MDB (PGeo and Geomedia capable) MDB No Yes No, needs JDK/JRE
#Memory Memory Yes Yes Yes
#MySQL MySQL No Yes No, needs MySQL library
#NAS - ALKIS NAS No Yes No, needs Xerces
#Oracle Spatial OCI Yes Yes No, needs OCI library
#ODBC ODBC No Yes No, needs ODBC library
#MS SQL Spatial MSSQLSpatial Yes Yes No, needs ODBC library
#Open Document Spreadsheet ODS Yes No No, needs libexpat
#OGDI Vectors (VPF, VMAP, DCW) OGDI No Yes No, needs OGDI library
#OpenAir OpenAir No Yes Yes
# multi-layer
# ("OpenAir", "r"),
#PCI Geomatics Database File PCIDSK No No Yes, using internal PCIDSK SDK (from GDAL 1.7.0)
("PCIDSK", "r"),
#PDS PDS No Yes Yes
("PDS", "r"),
#PGDump PostgreSQL SQL dump Yes Yes Yes
#PostgreSQL/PostGIS PostgreSQL/PostGIS Yes Yes No, needs PostgreSQL client library (libpq)
#EPIInfo .REC REC No No Yes
#S-57 (ENC) S57 No Yes Yes
# multi-layer
# ("S57", "r"),
#SDTS SDTS No Yes Yes
# multi-layer
# ("SDTS", "r"),
#SEG-P1 / UKOOA P1/90 SEGUKOOA No Yes Yes
# multi-layers
# ("SEGUKOOA", "r"),
#SEG-Y SEGY No No Yes
("SEGY", "r"),
#Norwegian SOSI Standard SOSI No Yes No, needs FYBA library
#SQLite/SpatiaLite SQLite Yes Yes No, needs libsqlite3 or libspatialite
#SUA SUA No Yes Yes
("SUA", "r"),
#SVG SVG No Yes No, needs libexpat
#UK .NTF UK. NTF No Yes Yes
# multi-layer
# ("UK. NTF", "r"),
#U.S. Census TIGER/Line TIGER No Yes Yes
# multi-layer
# ("TIGER", "r"),
#VFK data VFK No Yes Yes
# multi-layer
# ("VFK", "r"),
#VRT - Virtual Datasource VRT No Yes Yes
# multi-layer
# ("VRT", "r"),
#OGC WFS (Web Feature Service) WFS Yes Yes No, needs libcurl
#MS Excel format XLS No No No, needs libfreexl
#Office Open XML spreadsheet XLSX Yes No No, needs libexpat
#X-Plane/Flighgear aeronautical data XPLANE No Yes Yes
# multi-layer
# ("XPLANE", "r")
])
```
#### File: Fiona/tests/test_multiconxn.py
```python
import logging
import os
import shutil
import sys
import tempfile
import unittest
import fiona
from fiona.odict import OrderedDict
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
class ReadAccess(unittest.TestCase):
# To check that we'll be able to get multiple 'r' connections to layers
# in a single file.
def setUp(self):
self.c = fiona.open("docs/data/test_uk.shp", "r", layer="test_uk")
def tearDown(self):
self.c.close()
def test_meta(self):
with fiona.open("docs/data/test_uk.shp", "r", layer="test_uk") as c2:
self.assertEqual(len(self.c), len(c2))
self.assertEqual(sorted(self.c.schema.items()), sorted(c2.schema.items()))
def test_meta(self):
f1 = next(self.c)
with fiona.open("docs/data/test_uk.shp", "r", layer="test_uk") as c2:
f2 = next(c2)
self.assertEqual(f1, f2)
class ReadWriteAccess(unittest.TestCase):
# To check that we'll be able to read from a file that we're
# writing to.
def setUp(self):
self.tempdir = tempfile.mkdtemp()
self.c = fiona.open(
os.path.join(self.tempdir, "multi_write_test.shp"),
"w",
driver="ESRI Shapefile",
schema={
'geometry': 'Point',
'properties': [('title', 'str:80'), ('date', 'date')]},
crs={'init': "epsg:4326", 'no_defs': True},
encoding='utf-8')
self.f = {
'type': 'Feature',
'geometry': {'type': 'Point', 'coordinates': (0.0, 0.1)},
'properties': OrderedDict([('title', 'point one'), ('date', '2012-01-29')])}
self.c.writerecords([self.f])
self.c.flush()
def tearDown(self):
self.c.close()
shutil.rmtree(self.tempdir)
def test_meta(self):
c2 = fiona.open(os.path.join(self.tempdir, "multi_write_test.shp"), "r")
self.assertEqual(len(self.c), len(c2))
self.assertEqual(sorted(self.c.schema.items()), sorted(c2.schema.items()))
def test_read(self):
c2 = fiona.open(os.path.join(self.tempdir, "multi_write_test.shp"), "r")
f2 = next(c2)
del f2['id']
self.assertEqual(self.f, f2)
def test_read_after_close(self):
c2 = fiona.open(os.path.join(self.tempdir, "multi_write_test.shp"), "r")
self.c.close()
f2 = next(c2)
del f2['id']
self.assertEqual(self.f, f2)
class LayerCreation(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
self.dir = os.path.join(self.tempdir, 'layer_creation')
if os.path.exists(self.dir):
shutil.rmtree(self.dir)
os.mkdir(self.dir)
self.c = fiona.open(
self.dir,
'w',
layer='write_test',
driver='ESRI Shapefile',
schema={
'geometry': 'Point',
'properties': [('title', 'str:80'), ('date', 'date')]},
crs={'init': "epsg:4326", 'no_defs': True},
encoding='utf-8')
self.f = {
'type': 'Feature',
'geometry': {'type': 'Point', 'coordinates': (0.0, 0.1)},
'properties': OrderedDict([('title', 'point one'), ('date', '2012-01-29')])}
self.c.writerecords([self.f])
self.c.flush()
def tearDown(self):
self.c.close()
shutil.rmtree(self.tempdir)
def test_meta(self):
c2 = fiona.open(os.path.join(self.dir, "write_test.shp"), "r")
self.assertEqual(len(self.c), len(c2))
self.assertEqual(sorted(self.c.schema.items()), sorted(c2.schema.items()))
def test_read(self):
c2 = fiona.open(os.path.join(self.dir, "write_test.shp"), "r")
f2 = next(c2)
del f2['id']
self.assertEqual(self.f, f2)
def test_read_after_close(self):
c2 = fiona.open(os.path.join(self.dir, "write_test.shp"), "r")
self.c.close()
f2 = next(c2)
del f2['id']
self.assertEqual(self.f, f2)
```
#### File: Fiona/tests/test_revolvingdoor.py
```python
import logging
import os.path
import shutil
import subprocess
import sys
import tempfile
import unittest
import fiona
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
log = logging.getLogger('fiona.tests')
class RevolvingDoorTest(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_write_revolving_door(self):
with fiona.open('docs/data/test_uk.shp') as src:
meta = src.meta
features = list(src)
shpname = os.path.join(self.tempdir, 'foo.shp')
with fiona.open(shpname, 'w', **meta) as dst:
dst.writerecords(features)
with fiona.open(shpname) as src:
pass
```
#### File: Fiona/tests/test_unicode.py
```python
import logging
import os
import shutil
import sys
import tempfile
import unittest
import six
import fiona
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
class UnicodePathTest(unittest.TestCase):
def setUp(self):
tempdir = tempfile.mkdtemp()
self.dir = os.path.join(tempdir, 'français')
shutil.copytree('docs/data/', self.dir)
def tearDown(self):
shutil.rmtree(os.path.dirname(self.dir))
def test_unicode_path(self):
path = self.dir + '/test_uk.shp'
if sys.version_info < (3,):
path = path.decode('utf-8')
with fiona.open(path) as c:
assert len(c) == 48
def test_unicode_path_layer(self):
path = self.dir
layer = 'test_uk'
if sys.version_info < (3,):
path = path.decode('utf-8')
layer = layer.decode('utf-8')
with fiona.open(path, layer=layer) as c:
assert len(c) == 48
def test_utf8_path(self):
path = self.dir + '/test_uk.shp'
if sys.version_info < (3,):
with fiona.open(path) as c:
assert len(c) == 48
``` |
{
"source": "johanvdw/niche_vlaanderen",
"score": 3
} |
#### File: niche_vlaanderen/niche_vlaanderen/acidity.py
```python
from pkg_resources import resource_filename
import numpy as np
import pandas as pd
from .codetables import validate_tables_acidity, check_codes_used
class Acidity(object):
'''
'''
nodata = 255 # uint8 data type
def __init__(self, ct_acidity=None,
ct_soil_mlw_class=None,
ct_soil_codes=None,
lnk_acidity=None,
ct_seepage=None):
if ct_acidity is None:
ct_acidity = resource_filename(
"niche_vlaanderen", "system_tables/acidity.csv")
if ct_soil_mlw_class is None:
ct_soil_mlw_class = resource_filename(
"niche_vlaanderen", "system_tables/soil_mlw_class.csv")
if ct_soil_codes is None:
ct_soil_codes = resource_filename(
"niche_vlaanderen", "system_tables/soil_codes.csv")
if lnk_acidity is None:
lnk_acidity = resource_filename(
"niche_vlaanderen", "system_tables/lnk_acidity.csv")
if ct_seepage is None:
ct_seepage = resource_filename(
"niche_vlaanderen", "system_tables/seepage.csv")
self._ct_acidity = pd.read_csv(ct_acidity)
self._ct_soil_mlw = pd.read_csv(ct_soil_mlw_class)
self._ct_soil_codes = pd.read_csv(ct_soil_codes)
self._lnk_acidity = pd.read_csv(lnk_acidity)
self._ct_seepage = pd.read_csv(ct_seepage)
inner = all(v is None for v in self.__init__.__code__.co_varnames[1:])
validate_tables_acidity(ct_acidity=self._ct_acidity,
ct_soil_mlw_class=self._ct_soil_mlw,
ct_soil_codes=self._ct_soil_codes,
lnk_acidity=self._lnk_acidity,
ct_seepage=self._ct_seepage,
inner=inner)
self._ct_soil_codes = self._ct_soil_codes.set_index("soil_code")
def _calculate_soil_mlw(self, soil_code, mlw):
check_codes_used("soil_code", soil_code, self._ct_soil_codes.index)
# determine soil_group for soil_code
orig_shape = mlw.shape
soil_code = soil_code.flatten()
mlw = mlw.flatten()
soil_group = self._ct_soil_codes.soil_group.reindex(soil_code)\
.values.astype("int8")
# the function above gives 0 for no data
soil_group[soil_group == 0] = -99
result = np.full(soil_code.shape, -99)
for sel_group, subtable in self._ct_soil_mlw.groupby(["soil_group"]):
subtable = subtable.copy().reset_index(drop=True)
index = np.digitize(mlw, subtable.mlw_max, right=True)
selection = (soil_group == sel_group)
result[selection] = \
subtable.soil_mlw_class.reindex(index)[selection]
result[mlw == -99] = -99
result = result.reshape(orig_shape)
return result
def _get_acidity(self, rainwater, minerality, inundation, seepage,
soil_mlw_class):
orig_shape = inundation.shape
check_codes_used("rainwater", rainwater, {0, 1})
check_codes_used("minerality", minerality,
self._lnk_acidity["mineral_richness"])
check_codes_used("inundation", inundation,
self._lnk_acidity["inundation"])
check_codes_used("seepage", seepage,
self._ct_seepage["seepage"])
rainwater = rainwater.flatten()
minerality = minerality.flatten()
inundation = inundation.flatten()
seepage = seepage.flatten()
soil_mlw_class = soil_mlw_class.flatten()
result = np.full(soil_mlw_class.shape, self.nodata, dtype="uint8")
for labels, subtable in self._lnk_acidity.groupby(
["rainwater", "mineral_richness", "inundation", "seepage",
"soil_mlw_class"]):
sel_rainwater, sel_mr, sel_inundation, \
sel_seepage, sel_soil_mlw_class = labels
subtable = subtable.copy().reset_index(drop=True)
selection = ((rainwater == sel_rainwater)
& (minerality == sel_mr)
& (inundation == sel_inundation)
& (seepage == sel_seepage)
& (soil_mlw_class == sel_soil_mlw_class))
result[(selection)] = subtable.acidity[0]
result = result.reshape(orig_shape)
return result
def _get_seepage(self, seepage):
"""Classify seepage values
"""
orig_shape = seepage.shape
seepage = seepage.flatten()
index = np.digitize(seepage, self._ct_seepage.seepage_max, right=True)
seepage_class = self._ct_seepage.seepage.reindex(index)
seepage_class[(np.isnan(seepage) | (seepage == -99))]
return seepage_class.values.reshape(orig_shape)
def calculate(self, soil_class, mlw, inundation, seepage, minerality,
rainwater):
soil_mlw = self._calculate_soil_mlw(soil_class, mlw)
seepage = self._get_seepage(seepage)
acidity = self._get_acidity(rainwater, minerality, inundation,
seepage, soil_mlw)
return acidity
```
#### File: niche_vlaanderen/niche_vlaanderen/cli.py
```python
import click
import niche_vlaanderen
from pkg_resources import resource_filename
@click.command()
@click.pass_context
@click.option('--example', is_flag=True,
help='prints an example configuration file')
@click.option('--version', is_flag=True,
help='prints the version number')
@click.argument('config', required=False, type=click.Path(exists=True))
def cli(ctx, config, example, version):
"""Command line interface to the NICHE vegetation model
"""
if example:
ex = resource_filename(
"niche_vlaanderen",
"system_tables/example.yaml")
with open(ex) as f:
print(f.read())
if config is not None:
n = niche_vlaanderen.Niche()
n.run_config_file(config, overwrite_ct=True)
click.echo(n)
if config is None and not example:
# we should really find a neater way to show --help here by default.
print("No config file added. Use --help for more info")
if version:
print("niche_vlaanderen version: " + niche_vlaanderen.__version__)
```
#### File: niche_vlaanderen/tests/test_acidity.py
```python
from unittest import TestCase
import rasterio
import numpy as np
import niche_vlaanderen
import pytest
from niche_vlaanderen.exception import NicheException
def raster_to_numpy(filename):
"""Read a GDAL grid as numpy array
Notes
------
No-data values are -99 for integer types and np.nan for real types.
"""
with rasterio.open(filename) as ds:
data = ds.read(1)
nodata = ds.nodatavals[0]
print(nodata)
# create a mask for no-data values, taking into account the data-types
if data.dtype == 'float32':
data[np.isclose(data, nodata)] = np.nan
else:
data[np.isclose(data, nodata)] = -99
return data
class testAcidity(TestCase):
def test_get_soil_mlw(self):
mlw = np.array([50, 66])
soil_code = np.array([14, 7])
a = niche_vlaanderen.Acidity()
result = a._calculate_soil_mlw(soil_code, mlw)
np.testing.assert_equal(np.array([1, 9]), result)
def test_get_soil_mlw_borders(self):
mlw = np.array([79, 80, 100, 110, 111])
soil_code = np.array([14, 14, 14, 14, 14])
a = niche_vlaanderen.Acidity()
result = a._calculate_soil_mlw(soil_code, mlw)
expected = np.array([1, 1, 2, 2, 3])
np.testing.assert_equal(expected, result)
def test_acidity_partial(self):
rainwater = np.array([0])
minerality = np.array([1])
inundation = np.array([1])
seepage = np.array([1])
soil_mlw = np.array([1])
a = niche_vlaanderen.Acidity()
result = a._get_acidity(rainwater, minerality, inundation,
seepage, soil_mlw)
np.testing.assert_equal(np.array([3]), result)
def test_seepage_code(self):
seepage = np.array([5, 0.3, 0.05, -0.04, -0.2, -5])
a = niche_vlaanderen.Acidity()
result = a._get_seepage(seepage)
expected = np.array([1, 1, 1, 1, 2, 3])
np.testing.assert_equal(expected, result)
def test_acidity(self):
rainwater = np.array([0])
minerality = np.array([0])
soilcode = np.array([14])
inundation = np.array([1])
seepage = np.array([20])
mlw = np.array([50])
a = niche_vlaanderen.Acidity()
result = a.calculate(soilcode, mlw, inundation, seepage, minerality,
rainwater)
np.testing.assert_equal(3, result)
def test_acidity_testcase(self):
a = niche_vlaanderen.Acidity()
inputdir = "testcase/zwarte_beek/input/"
soil_code = raster_to_numpy(inputdir + "soil_code.asc")
soil_code_r = soil_code
soil_code_r[soil_code > 0] = np.round(soil_code / 10000)[soil_code > 0]
mlw = raster_to_numpy(inputdir + "mlw.asc")
inundation = \
raster_to_numpy(inputdir + "inundation.asc")
rainwater = raster_to_numpy(inputdir + "nullgrid.asc")
seepage = raster_to_numpy(inputdir + "seepage.asc")
minerality = raster_to_numpy(inputdir + "minerality.asc")
acidity = raster_to_numpy("testcase/zwarte_beek/abiotic/acidity.asc")
acidity[np.isnan(acidity)] = 255
acidity[acidity == -99] = 255
result = a.calculate(soil_code_r, mlw, inundation, seepage,
minerality, rainwater)
np.testing.assert_equal(acidity, result)
def test_acidity_invalidsoil(self):
a = niche_vlaanderen.Acidity()
rainwater = np.array([0])
minerality = np.array([0])
soilcode = np.array([-1])
inundation = np.array([1])
seepage = np.array([20])
mlw = np.array([50])
a = niche_vlaanderen.Acidity()
with pytest.raises(NicheException):
a.calculate(soilcode, mlw, inundation, seepage, minerality,
rainwater)
def test_acidity_invalidminerality(self):
a = niche_vlaanderen.Acidity()
rainwater = np.array([0])
minerality = np.array([500])
soilcode = np.array([14])
inundation = np.array([1])
seepage = np.array([20])
mlw = np.array([50])
with pytest.raises(NicheException):
a.calculate(soilcode, mlw, inundation, seepage, minerality,
rainwater)
```
#### File: niche_vlaanderen/tests/test_nutrient_level.py
```python
from unittest import TestCase
import numpy as np
import rasterio
import niche_vlaanderen
def raster_to_numpy(filename):
'''Read a GDAL grid as numpy array
Notes
------
No-data values are -99 for integer types and np.nan for real types.
'''
with rasterio.open(filename) as ds:
data = ds.read(1)
nodata = ds.nodatavals[0]
# create a mask for no-data values, taking into account the data-types
if data.dtype == 'float32':
data[data == nodata] = np.nan
else:
data[data == nodata] = -99
return data
class TestNutrientLevel(TestCase):
def test_nitrogen_mineralisation(self):
soil_code = np.array([14])
msw = np.array([33])
nl = niche_vlaanderen.NutrientLevel()
result = nl._calculate_mineralisation(soil_code, msw)
np.testing.assert_equal(np.array([75]), result)
def test_borders(self):
soil_code = np.array([7, 7, 7, 7, 7])
msw = np.array([4, 5, 7, 10, 11])
nl = niche_vlaanderen.NutrientLevel()
result_nm = nl._calculate_mineralisation(soil_code, msw)
expected_nm = np.array([50, 50, 55, 55, 76])
np.testing.assert_equal(expected_nm, result_nm)
nuls = np.array([0, 0, 0, 0, 0])
# we want to check the boundaries ]156, 293]
nitrogen_sum = np.array([155, 156, 200, 293, 294])
# so we substract the nitrogen_sum from the expected mineralisation
nitrogen_animal = nitrogen_sum - expected_nm
management = np.array([2, 2, 2, 2, 2])
result = nl.calculate(soil_code=soil_code,
msw=msw,
management=management,
nitrogen_animal=nitrogen_animal,
nitrogen_atmospheric=nuls,
nitrogen_fertilizer=nuls,
inundation=nuls)
expected = np.array([2, 2, 3, 3, 4])
np.testing.assert_equal(expected, result)
def test__get(self):
management = np.array([2])
soil_code = np.array([14])
nitrogen = np.array([445])
inundation = np.array([1])
nl = niche_vlaanderen.NutrientLevel()
result = nl._calculate(management, soil_code, nitrogen, inundation)
np.testing.assert_equal(np.array(5), result)
def test_calculate(self):
nl = niche_vlaanderen.NutrientLevel()
management = np.array([2])
soil_code = np.array([14])
msw = np.array([33])
nitrogen_deposition = np.array([20])
nitrogen_animal = np.array([350])
nitrogen_fertilizer = np.array([0])
inundation = np.array([1])
result = nl.calculate(soil_code, msw, nitrogen_deposition,
nitrogen_animal, nitrogen_fertilizer, management,
inundation)
np.testing.assert_equal(np.array([5]), result)
def test_nutrient_level_testcase(self):
nl = niche_vlaanderen.NutrientLevel()
soil_code = raster_to_numpy("testcase/zwarte_beek/input/soil_code.asc")
soil_code_r = soil_code
soil_code_r[soil_code > 0] = np.round(soil_code / 10000)[soil_code > 0]
input_dir = "testcase/zwarte_beek/input/"
msw = raster_to_numpy(input_dir + "msw.asc")
nitrogen_deposition = \
raster_to_numpy(input_dir + "nitrogen_atmospheric.asc")
nitrogen_animal = raster_to_numpy(input_dir + "nullgrid.asc")
nitrogen_fertilizer = raster_to_numpy(input_dir + "nullgrid.asc")
inundation = raster_to_numpy(input_dir + "inundation.asc")
management = raster_to_numpy(input_dir + "management.asc")
nutrient_level = \
raster_to_numpy("testcase/zwarte_beek/abiotic/nutrient_level.asc")
# convert nodata value from -99 to 255 (
nutrient_level[nutrient_level == -99] = 255
result = nl.calculate(soil_code_r, msw, nitrogen_deposition,
nitrogen_animal, nitrogen_fertilizer, management,
inundation)
np.testing.assert_equal(nutrient_level, result)
``` |
{
"source": "johanvdw/rasterio",
"score": 3
} |
#### File: rasterio/rasterio/crs.py
```python
from rasterio._base import is_geographic_crs, is_projected_crs
from rasterio.five import string_types
def to_string(crs):
"""Turn a parameter mapping into a more conventional PROJ.4 string.
Mapping keys are tested against the ``all_proj_keys`` list. Values of
``True`` are omitted, leaving the key bare: {'no_defs': True} -> "+no_defs"
and items where the value is otherwise not a str, int, or float are
omitted.
"""
items = []
for k, v in sorted(filter(
lambda x: x[0] in all_proj_keys and x[1] is not False and (
isinstance(x[1], (bool, int, float)) or
isinstance(x[1], string_types)),
crs.items() )):
items.append(
"+" + "=".join(
map(str, filter(
lambda y: (y or y == 0) and y is not True, (k, v)))) )
return " ".join(items)
def from_string(prjs):
"""Turn a PROJ.4 string into a mapping of parameters.
Bare parameters like "+no_defs" are given a value of ``True``. All keys
are checked against the ``all_proj_keys`` list.
"""
parts = [o.lstrip('+') for o in prjs.strip().split()]
def parse(v):
if v in ('True', 'true'):
return True
elif v in ('False', 'false'):
return False
else:
try:
return int(v)
except ValueError:
pass
try:
return float(v)
except ValueError:
return v
items = map(
lambda kv: len(kv) == 2 and (kv[0], parse(kv[1])) or (kv[0], True),
(p.split('=') for p in parts) )
return dict((k,v) for k, v in items if k in all_proj_keys)
def from_epsg(code):
"""Given an integer code, returns an EPSG-like mapping.
Note: the input code is not validated against an EPSG database.
"""
if int(code) <= 0:
raise ValueError("EPSG codes are positive integers")
return {'init': "epsg:%s" % code, 'no_defs': True}
# Below is the big list of PROJ4 parameters from
# http://trac.osgeo.org/proj/wiki/GenParms.
# It is parsed into a list of paramter keys ``all_proj_keys``.
_param_data = """
+a Semimajor radius of the ellipsoid axis
+alpha ? Used with Oblique Mercator and possibly a few others
+axis Axis orientation (new in 4.8.0)
+b Semiminor radius of the ellipsoid axis
+datum Datum name (see `proj -ld`)
+ellps Ellipsoid name (see `proj -le`)
+init Initialize from a named CRS
+k Scaling factor (old name)
+k_0 Scaling factor (new name)
+lat_0 Latitude of origin
+lat_1 Latitude of first standard parallel
+lat_2 Latitude of second standard parallel
+lat_ts Latitude of true scale
+lon_0 Central meridian
+lonc ? Longitude used with Oblique Mercator and possibly a few others
+lon_wrap Center longitude to use for wrapping (see below)
+nadgrids Filename of NTv2 grid file to use for datum transforms (see below)
+no_defs Don't use the /usr/share/proj/proj_def.dat defaults file
+over Allow longitude output outside -180 to 180 range, disables wrapping (see below)
+pm Alternate prime meridian (typically a city name, see below)
+proj Projection name (see `proj -l`)
+south Denotes southern hemisphere UTM zone
+to_meter Multiplier to convert map units to 1.0m
+towgs84 3 or 7 term datum transform parameters (see below)
+units meters, US survey feet, etc.
+vto_meter vertical conversion to meters.
+vunits vertical units.
+x_0 False easting
+y_0 False northing
+zone UTM zone
+a Semimajor radius of the ellipsoid axis
+alpha ? Used with Oblique Mercator and possibly a few others
+azi
+b Semiminor radius of the ellipsoid axis
+belgium
+beta
+czech
+e Eccentricity of the ellipsoid = sqrt(1 - b^2/a^2) = sqrt( f*(2-f) )
+ellps Ellipsoid name (see `proj -le`)
+es Eccentricity of the ellipsoid squared
+f Flattening of the ellipsoid (often presented as an inverse, e.g. 1/298)
+gamma
+geoc
+guam
+h
+k Scaling factor (old name)
+K
+k_0 Scaling factor (new name)
+lat_0 Latitude of origin
+lat_1 Latitude of first standard parallel
+lat_2 Latitude of second standard parallel
+lat_b
+lat_t
+lat_ts Latitude of true scale
+lon_0 Central meridian
+lon_1
+lon_2
+lonc ? Longitude used with Oblique Mercator and possibly a few others
+lsat
+m
+M
+n
+no_cut
+no_off
+no_rot
+ns
+o_alpha
+o_lat_1
+o_lat_2
+o_lat_c
+o_lat_p
+o_lon_1
+o_lon_2
+o_lon_c
+o_lon_p
+o_proj
+over
+p
+path
+proj Projection name (see `proj -l`)
+q
+R
+R_a
+R_A Compute radius such that the area of the sphere is the same as the area of the ellipsoid
+rf Reciprocal of the ellipsoid flattening term (e.g. 298)
+R_g
+R_h
+R_lat_a
+R_lat_g
+rot
+R_V
+s
+south Denotes southern hemisphere UTM zone
+sym
+t
+theta
+tilt
+to_meter Multiplier to convert map units to 1.0m
+units meters, US survey feet, etc.
+vopt
+W
+westo
+x_0 False easting
+y_0 False northing
+zone UTM zone
"""
_lines = filter(lambda x: len(x) > 1, _param_data.split("\n"))
all_proj_keys = list(
set(line.split()[0].lstrip("+").strip() for line in _lines)
) + ['no_mayo']
```
#### File: rasterio/tests/test_rio_helpers.py
```python
from rasterio.rio import helpers
def test_resolve_files_inout__output():
assert helpers.resolve_inout(input='in', output='out') == ('out', ['in'])
def test_resolve_files_inout__input():
assert helpers.resolve_inout(input='in') == (None, ['in'])
def test_resolve_files_inout__inout_files():
assert helpers.resolve_inout(files=('a', 'b', 'c')) == ('c', ['a', 'b'])
def test_resolve_files_inout__inout_files_output_o():
assert helpers.resolve_inout(
files=('a', 'b', 'c'), output='out') == ('out', ['a', 'b', 'c'])
```
#### File: rasterio/tests/test_sampling.py
```python
import rasterio
def test_sampling():
with rasterio.open('tests/data/RGB.byte.tif') as src:
data = next(src.sample([(220650.0, 2719200.0)]))
assert list(data) == [18, 25, 14]
def test_sampling_beyond_bounds():
with rasterio.open('tests/data/RGB.byte.tif') as src:
data = next(src.sample([(-10, 2719200.0)]))
assert list(data) == [0, 0, 0]
def test_sampling_indexes():
with rasterio.open('tests/data/RGB.byte.tif') as src:
data = next(src.sample([(220650.0, 2719200.0)], indexes=[2]))
assert list(data) == [25]
``` |
{
"source": "johanvdw/scikit-gstat",
"score": 3
} |
#### File: scikit-gstat/skgstat/binning.py
```python
import numpy as np
from sklearn.cluster import KMeans, AgglomerativeClustering
from scipy.optimize import minimize, OptimizeWarning
from skgstat.util import shannon_entropy
def even_width_lags(distances, n, maxlag):
"""Even lag edges
Calculate the lag edges for a given amount of bins using the same lag
step width for all bins.
.. versionchanged:: 0.3.8
Function returns `None` as second value to indicate that
The number of lag classes was not changed
Parameters
----------
distances : numpy.array
Flat numpy array representing the upper triangle of
the distance matrix.
n : integer
Amount of lag classes to find
maxlag : integer, float
Limit the last lag class to this separating distance.
Returns
-------
bin_edges : numpy.ndarray
The **upper** bin edges of the lag classes
"""
# maxlags larger than the maximum separating distance will be ignored
if maxlag is None or maxlag > np.nanmax(distances):
maxlag = np.nanmax(distances)
return np.linspace(0, maxlag, n + 1)[1:], None
def uniform_count_lags(distances, n, maxlag):
"""Uniform lag counts
Calculate the lag edges for a given amount of bins with the same amount
of observations in each lag class. The lag step width will be variable.
.. versionchanged:: 0.3.8
Function returns `None` as second value to indicate that
The number of lag classes was not changed
Parameters
----------
distances : numpy.array
Flat numpy array representing the upper triangle of
the distance matrix.
n : integer
Amount of lag classes to find
maxlag : integer, float
Limit the last lag class to this separating distance.
Returns
-------
bin_edges : numpy.ndarray
The **upper** bin edges of the lag classes
"""
# maxlags larger than the maximum separating distance will be ignored
if maxlag is None or maxlag > np.nanmax(distances):
maxlag = np.nanmax(distances)
# filter for distances < maxlag
d = distances[np.where(distances <= maxlag)]
return np.fromiter(
(np.nanpercentile(d, (i / n) * 100) for i in range(1, n + 1)),
dtype=float
), None
def auto_derived_lags(distances, method_name, maxlag):
"""Derive bins automatically
.. versionadded:: 0.3.8
Uses `histogram_bin_edges <numpy.histogram_bin_edges>` to derive the
lag classes automatically. Supports any method supported by
`histogram_bin_edges <numpy.histogram_bin_edges>`. It is recommended
to use `'sturges'`, `'doane'` or `'fd'`.
Parameters
----------
distances : numpy.array
Flat numpy array representing the upper triangle of
the distance matrix.
maxlag : integer, float
Limit the last lag class to this separating distance.
method_name : str
Any method supported by
`histogram_bin_edges <numpy.histogram_bin_edges>`
Returns
-------
bin_edges : numpy.ndarray
The **upper** bin edges of the lag classes
See Also
--------
numpy.histogram_bin_edges
"""
# maxlags larger than maximum separating distance will be ignored
if maxlag is None or maxlag > np.nanmax(distances):
maxlag = np.nanmax(distances)
# filter for distances < maxlag
d = distances[np.where(distances <= maxlag)]
# calculate the edges
edges = np.histogram_bin_edges(d, bins=method_name)[1:]
return edges, len(edges)
def kmeans(distances, n, maxlag, binning_random_state=42, **kwargs):
"""KMeans binning
.. versionadded:: 0.3.9
Clustering of pairwise separating distances between locations up to
maxlag. The lag class edges are formed equidistant from each cluster
center. Note: this does not necessarily result in equidistance lag classes.
Parameters
----------
distances : numpy.array
Flat numpy array representing the upper triangle of
the distance matrix.
n : integer
Amount of lag classes to find
maxlag : integer, float
Limit the last lag class to this separating distance.
Returns
-------
bin_edges : numpy.ndarray
The **upper** bin edges of the lag classes
Note
----
The `KMeans <sklearn.cluster.KMeans>` that is used under the hood is not
a deterministic algorithm, as the starting cluster centroids are seeded
randomly. This can yield slightly different results on reach run.
Thus, for this application, the random_state on KMeans is fixed to a
specific value. You can change the seed by passing another seed to
`Variogram <skgstat.Variogram>` as `binning_random_state`.
"""
# maxlags larger than maximum separating distance will be ignored
if maxlag is None or maxlag > np.nanmax(distances):
maxlag = np.nanmax(distances)
# filter for distances < maxlag
d = distances[np.where(distances <= maxlag)]
# cluster the filtered distances
km = KMeans(n_clusters=n, random_state=binning_random_state).fit(d.reshape(-1, 1))
# get the centers
_centers = np.sort(km.cluster_centers_.flatten())
# build the upper edges
bounds = zip([0] + list(_centers)[:-1], _centers)
edges = np.fromiter(((low + up) / 2 for low, up in bounds), dtype=float)
return edges, None
def ward(distances, n, maxlag, **kwargs):
"""Agglomerative binning
.. versionadded:: 0.3.9
Clustering of pairwise separating distances between locations up to
maxlag. The lag class edges are formed equidistant from each cluster
center. Note: this does not necessarily result in equidistance lag classes.
The clustering is done by merging pairs of clusters that minimize the
variance for the merged clusters, unitl `n` clusters are found.
Parameters
----------
distances : numpy.array
Flat numpy array representing the upper triangle of
the distance matrix.
n : integer
Amount of lag classes to find
maxlag : integer, float
Limit the last lag class to this separating distance.
Returns
-------
bin_edges : numpy.ndarray
The **upper** bin edges of the lag classes
"""
# maxlags larger than maximum separating distance will be ignored
if maxlag is None or maxlag > np.nanmax(distances):
maxlag = np.nanmax(distances)
# filter for distances < maxlag
d = distances[np.where(distances <= maxlag)]
# cluster the filtered distances
w = AgglomerativeClustering(linkage='ward', n_clusters=n).fit(d.reshape(-1, 1))
# get the aggregation function
if kwargs.get('binning_agg_func', False) == 'median':
agg = np.median
else:
agg = np.mean
# get the centers
_centers = np.sort([agg(d[np.where(w.labels_ == i)[0]]) for i in np.unique(w.labels_)])
# build the upper edges
bounds = zip([0] + list(_centers)[:-1], _centers)
edges = np.fromiter(((low + up) / 2 for low, up in bounds), dtype=float)
return edges, None
def stable_entropy_lags(distances, n, maxlag, **kwargs):
"""Stable lags
.. versionadded: 0.4.0
Optimizes the lag class edges for `n` lag classes.
The algorithm minimizes the difference between Shannon
Entropy for each lag class. Consequently, the final
lag classes should be of comparable uncertainty.
Parameters
----------
distances : numpy.array
Flat numpy array representing the upper triangle of
the distance matrix.
n : integer
Amount of lag classes to find
maxlag : integer, float
Limit the last lag class to this separating distance.
Keyword Arguments
-----------------
binning_maxiter : int
Maximum iterations before the optimization is stopped,
if the lag edges do not converge.
binning_entropy_bins : int, str
Binning method for calculating the shannon entropy
on each iteration.
Returns
-------
bin_edges : numpy.ndarray
The **upper** bin edges of the lag classes
"""
# maxlags larger than maximum separating distance will be ignored
if maxlag is None or maxlag > np.nanmax(distances):
maxlag = np.nanmax(distances)
# filter for distances < maxlag
d = distances[np.where(distances <= maxlag)]
# create a global binning and initial guess
bins = np.histogram_bin_edges(d, bins=kwargs.get('binning_entropy_bins', 'sqrt'))
initial_guess = np.linspace(0, np.nanmax(d), n + 1)[1:]
# define the loss function
def loss(edges):
# get the shannon entropy for the current binning
h = np.ones(len(edges) - 1) * 9999
for i, bnd in enumerate(zip(edges, edges[1:])):
l, u = bnd
x = d[np.where((d >= l) & (d < u))[0]]
if len(x) == 0:
continue
else:
h[i] = shannon_entropy(x, bins)
# return the absolute differences between the bins
return np.sum(np.abs(np.diff(h)))
# minimize the loss function
opt = dict(maxiter=kwargs.get('binning_maxiter', 5000))
res = minimize(loss, initial_guess, method='Nelder-Mead', options=opt)
if res.success:
return res.x, None
else: # pragma: no cover
raise OptimizeWarning("Failed to find optimal lag classes.")
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.