metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "joesider9/forecasting_library",
"score": 2
} |
#### File: version2/cluster_predict_manager/rbf_cnn_model.py
```python
import os
import numpy as np
from Fuzzy_clustering.version2.cluster_predict_manager.deep_models import model3d
from Fuzzy_clustering.version2.rbf_ols_manager.rbf_ols import rbf_ols_module
class RBF_CNN_model(object):
def __init__(self, static_data, cluster, cnn=False):
self.static_data = static_data
self.cluster_dir = cluster.cluster_dir
self.cnn = cnn
self.model_dir_rbfols = os.path.join(self.cluster_dir, 'RBF_OLS')
self.model_rbf_ols = rbf_ols_module(self.static_data, self.model_dir_rbfols, self.static_data['rated'],
self.static_data['sklearn']['njobs'], GA=False)
self.model_rbf_ga = rbf_ols_module(self.static_data, self.model_dir_rbfols, self.static_data['rated'],
self.static_data['sklearn']['njobs'],
GA=True)
self.model_rbf_nn = model3d(self.static_data, cluster, 'RBFNN')
if self.model_rbf_nn.istrained == False:
raise ImportError('Cannot found RBFNN model for cluster %s of project %s', cluster.cluster_name,
static_data['_id'])
if self.model_rbf_ols.istrained == False:
raise ImportError('Cannot found RBF_OLS model for cluster %s of project %s', cluster.cluster_name,
static_data['_id'])
if self.model_rbf_ga.istrained == False:
raise ImportError('Cannot found RBF_GA_OLS model for cluster %s of project %s', cluster.cluster_name,
static_data['_id'])
if cnn:
self.model_cnn = model3d(self.static_data, cluster, 'RBF-CNN')
if self.model_cnn.istrained == False:
raise ImportError('Cannot found RBF_CNN model for cluster %s of project %s', cluster.cluster_name,
static_data['_id'])
def predict(self, X):
if self.model_rbf_ols.istrained == True:
pred1 = self.model_rbf_ols.predict(X)
if self.model_rbf_ga.istrained == True:
pred2 = self.model_rbf_ga.predict(X)
if self.model_rbf_nn.istrained == True:
pred3 = self.model_rbf_nn.predict(X)
pred1[np.where(pred1 < 0)] = 0
pred2[np.where(pred2 < 0)] = 0
pred3[np.where(pred3 < 0)] = 0
pred1[np.where(pred1 > 1)] = 1
pred2[np.where(pred2 > 1)] = 1
pred3[np.where(pred3 > 1)] = 1
rbf_models = [self.model_rbf_ols.models, self.model_rbf_ga.models, self.model_rbf_nn.model]
if self.cnn:
if self.model_cnn.istrained == True:
pred4 = self.model_cnn.predict(X, rbf_models=rbf_models)
pred4[np.where(pred4 < 0)] = 0
pred4[np.where(pred4 > 1)] = 1
return np.hstack([pred1, pred2, pred3, pred4])
else:
return np.hstack([pred1, pred2, pred3])
```
#### File: version2/dataset_manager/create_dataset_for_load.py
```python
import logging
import os
import joblib
import numpy as np
import pandas as pd
from joblib import Parallel
from joblib import delayed
from scipy.interpolate import interp2d
from workalendar.europe import Portugal, Greece
def rescale(arr, nrows, ncol):
W, H = arr.shape
new_W, new_H = (nrows, ncol)
xrange = lambda x: np.linspace(0, 1, x)
f = interp2d(xrange(H), xrange(W), arr, kind="linear")
new_arr = f(xrange(new_H), xrange(new_W))
return new_arr
def stack_2d(X, sample, compress):
if compress:
sample = rescale(sample, 8, 8)
if len(sample.shape) == 3:
if X.shape[0] == 0:
X = sample
elif len(X.shape) == 3:
X = np.stack((X, sample))
else:
X = np.vstack((X, sample[np.newaxis, :, :, :]))
elif len(sample.shape) == 2:
if X.shape[0] == 0:
X = sample
elif len(X.shape) == 2:
X = np.stack((X, sample))
else:
X = np.vstack((X, sample[np.newaxis, :, :]))
return X
def stack_3d(X, sample):
if X.shape[0] == 0:
X = sample
elif len(sample.shape) != len(X.shape):
X = np.vstack((X, sample[np.newaxis]))
else:
X = np.vstack((X, sample))
return X
class st_miguel(Portugal):
FIXED_HOLIDAYS = Portugal.FIXED_HOLIDAYS + (
(4, 11, "<NAME>"),
(7, 18, "Dia de Portugal"),
(6, 29, " <NAME>"),
)
def get_fixed_holidays(self, year):
days = super().get_fixed_holidays(year)
return days
def get_variable_days(self, year):
days = super().get_variable_days(year)
if year > 2015 or year < 2013:
days.append((self.get_easter_sunday(year) + pd.DateOffset(days=36), "Santo Cristo"))
days.append((self.get_easter_sunday(year) + pd.DateOffset(days=50), "Pombinha"))
days.append((self.get_easter_sunday(year) + pd.DateOffset(days=64), "Dia do Corpo de Deus"))
return days
def get_extras(self, year):
days = []
days.append(self.get_easter_sunday(year) + pd.DateOffset(days=36))
days.append(self.get_easter_sunday(year) + pd.DateOffset(days=50))
days.append(self.get_easter_sunday(year) + pd.DateOffset(days=64))
return days
class dataset_creator_load():
def __init__(self, projects_group, projects, data, path_nwp, nwp_model, nwp_resolution, data_variables, njobs=1,
test=False):
self.projects = projects
self.isfortest = test
self.projects_group = projects_group
self.data = data
self.path_nwp = path_nwp
self.create_logger()
self.check_dates()
self.nwp_model = nwp_model
self.nwp_resolution = nwp_resolution
if self.nwp_resolution == 0.05:
self.compress = True
else:
self.compress = False
self.njobs = njobs
self.variables = data_variables
def create_logger(self):
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.INFO)
handler = logging.FileHandler(
os.path.join(os.path.dirname(self.path_nwp), 'log_' + self.projects_group + '.log'), 'a')
handler.setLevel(logging.INFO)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
self.logger.addHandler(handler)
def check_dates(self):
start_date = pd.to_datetime(self.data.index[0].strftime('%d%m%y'), format='%d%m%y')
end_date = pd.to_datetime(self.data.index[-1].strftime('%d%m%y'), format='%d%m%y')
dates = pd.date_range(start_date, end_date)
data_dates = pd.to_datetime(np.unique(self.data.index.strftime('%d%m%y')), format='%d%m%y')
dates = [d for d in dates if d in data_dates]
self.logger.info('Dates is checked. Number of time samples %s', str(len(dates)))
self.dates = pd.DatetimeIndex(dates)
def check_empty_nwp(self, nwp, variables):
flag = True
for var in variables:
if nwp[var].shape[0] == 0:
flag = False
break
return flag
def stack_daily_nwps(self, t, data, lats, longs, path_nwp, nwp_model, projects, variables, compress):
X = dict()
fname = os.path.join(path_nwp, nwp_model + '_' + t.strftime('%d%m%y') + '.pickle')
if os.path.exists(fname):
nwps = joblib.load(fname)
pdates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=47), freq='H').strftime(
'%d%m%y%H%M')
for project in projects:
X[project['_id']] = pd.DataFrame()
areas = project['static_data']['areas']
x = pd.DataFrame()
for date in pdates:
try:
nwp = nwps[date]
if nwp['lat'].shape[0] == 0:
area_group = self.projects[0]['static_data']['area_group']
resolution = self.projects[0]['static_data']['NWP_resolution']
nwp['lat'] = np.arange(area_group[0][0], area_group[1][0] + resolution / 2,
resolution).reshape(-1, 1)
nwp['long'] = np.arange(area_group[0][1], area_group[1][1] + resolution / 2,
resolution).reshape(-1, 1).T
for var in nwp.keys():
if not var in {'lat', 'long'}:
if nwp['lat'].shape[0] != nwp[var].shape[0]:
nwp[var] = nwp[var].T
if 'WS' in variables and not 'WS' in nwp.keys():
if 'Uwind' in nwp.keys() and 'Vwind' in nwp.keys():
if nwp['Uwind'].shape[0] > 0 and nwp['Vwind'].shape[0] > 0:
r2d = 45.0 / np.arctan(1.0)
wspeed = np.sqrt(np.square(nwp['Uwind']) + np.square(nwp['Vwind']))
wdir = np.arctan2(nwp['Uwind'], nwp['Vwind']) * r2d + 180
nwp['WS'] = wspeed
nwp['WD'] = wdir
if 'Temp' in nwp.keys():
nwp['Temperature'] = nwp['Temp']
del nwp['Temp']
date = pd.to_datetime(date, format='%d%m%y%H%M')
if self.check_empty_nwp(nwp, variables):
inp = self.create_sample_nwp(date, nwp, lats[project['_id']], longs[project['_id']])
x = pd.concat([x, inp])
except:
continue
if x.shape[0] > 0:
X[project['_id']] = x
cols = ['Temp' + '_' + area for area in lats[project['_id']].keys()]
X[project['_id']]['Temp_max'] = x[cols].mean(axis=1).max()
X[project['_id']]['Temp_min'] = x[cols].mean(axis=1).min()
print(t.strftime('%d%m%y%H%M'), ' extracted')
return (X, t.strftime('%d%m%y%H%M'))
def lats_longs(self):
lats = dict()
longs = dict()
flag = False
for t in self.dates:
fname = os.path.join(self.path_nwp, self.nwp_model + '_' + t.strftime('%d%m%y') + '.pickle')
pdates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=48), freq='H').strftime(
'%d%m%y%H%M')
if os.path.exists(fname):
nwps = joblib.load(fname)
for date in pdates:
try:
nwp = nwps[date]
flag = True
break
except:
continue
if flag:
break
if len(nwp['lat'].shape) == 1:
nwp['lat'] = nwp['lat'][:, np.newaxis]
if len(nwp['long'].shape) == 1:
nwp['long'] = nwp['long'][np.newaxis, :]
if nwp['lat'].shape[0] == 0:
area_group = self.projects[0]['static_data']['area_group']
resolution = self.projects[0]['static_data']['NWP_resolution']
nwp['lat'] = np.arange(area_group[0][0], area_group[1][0] + resolution / 2, resolution).reshape(-1, 1)
nwp['long'] = np.arange(area_group[0][1], area_group[1][1] + resolution / 2, resolution).reshape(-1, 1).T
for project in self.projects:
areas = project['static_data']['areas']
if isinstance(areas, list):
lats[project['_id']] = \
(np.where((nwp['lat'][:, 0] >= areas[0][0]) & (nwp['lat'][:, 0] <= areas[1][0])))[0]
longs[project['_id']] = \
(np.where((nwp['long'][0, :] >= areas[0][1]) & (nwp['long'][0, :] <= areas[1][1])))[
0]
else:
lats[project['_id']] = dict()
longs[project['_id']] = dict()
for area in sorted(areas.keys()):
lats[project['_id']][area] = \
(np.where((nwp['lat'][:, 0] >= areas[area][0][0]) & (nwp['lat'][:, 0] <= areas[area][1][0])))[0]
longs[project['_id']][area] = \
(np.where((nwp['long'][0, :] >= areas[area][0][1]) & (nwp['long'][0, :] <= areas[area][1][1])))[
0]
return lats, longs
def make_dataset_load_short_term(self):
X = dict()
project = self.projects[0]
X[project['_id']] = pd.DataFrame()
file_nwp = 'weather_data_test.csv'
X[project['_id']] = pd.read_csv(os.path.join(project['static_data']['path_data'], file_nwp), header=0,
index_col=0, parse_dates=True, dayfirst=True)
data_path = project['static_data']['path_data']
predictions = dict()
predictions[project['_id']] = joblib.load(os.path.join(project['static_data']['path_data']
, 'predictions_short_term.pickle'))
dataset_X, dataset_y, X_3d = self.create_dataset_short_term_eval(X[project['_id']], predictions[project['_id']]
, data_path, start_index=9001,
test=self.isfortest)
if dataset_y.isna().any().values[0]:
dataset_X = dataset_X.drop(dataset_y.index[np.where(dataset_y.isna())[0]])
if len(X_3d.shape) > 1:
X_3d = np.delete(X_3d, np.where(dataset_y.isna())[0], axis=0)
dataset_y = dataset_y.drop(dataset_y.index[np.where(dataset_y.isna())[0]])
if dataset_X.isna().any().values[0]:
dataset_y = dataset_y.drop(dataset_X.index[np.where(dataset_X.isna())[0]])
if len(X_3d.shape) > 1:
X_3d = np.delete(X_3d, np.where(dataset_X.isna())[0], axis=0)
dataset_X = dataset_X.drop(dataset_X.index[np.where(dataset_X.isna())[0]])
dataset_X.to_csv(os.path.join(project['static_data']['path_data'], 'dataset_X_test.csv'))
dataset_y.to_csv(os.path.join(project['static_data']['path_data'], 'dataset_y_test.csv'))
joblib.dump(X_3d, os.path.join(project['static_data']['path_data'], 'dataset_lstm_test.pickle'))
def make_dataset_load(self):
X = dict()
for project in self.projects:
X[project['_id']] = pd.DataFrame()
if self.isfortest:
file_nwp = 'weather_data_test.csv'
else:
file_nwp = 'weather_data.csv'
if not os.path.exists(os.path.join(self.projects[0]['static_data']['path_data'], file_nwp)):
lats, longs = self.lats_longs()
nwp = self.stack_daily_nwps(self.dates[-1], self.data, lats, longs, self.path_nwp, self.nwp_model,
self.projects, self.variables,
self.compress)
nwp_daily = Parallel(n_jobs=self.njobs)(
delayed(self.stack_daily_nwps)(t, self.data, lats, longs, self.path_nwp, self.nwp_model, self.projects,
self.variables,
self.compress) for t in self.dates)
for nwp in nwp_daily:
for project in self.projects:
if nwp[0][project['_id']].shape[0] != 0:
X[project['_id']] = pd.concat([X[project['_id']], nwp[0][project['_id']]])
self.logger.info('All Inputs stacked for date %s', nwp[1])
for project in self.projects:
X[project['_id']].to_csv(os.path.join(project['static_data']['path_data'], file_nwp))
else:
for project in self.projects:
X[project['_id']] = pd.read_csv(os.path.join(project['static_data']['path_data'], file_nwp), header=0,
index_col=0, parse_dates=True, dayfirst=True)
for project in self.projects:
data_path = project['static_data']['path_data']
if self.isfortest:
if project['static_data']['horizon'] == 'day_ahead':
dataset_X, dataset_y, X_3d = self.create_dataset(X[project['_id']], data_path, start_index = 9001,
test=self.isfortest)
elif project['static_data']['horizon'] == 'short-term':
dataset_X, dataset_y, X_3d = self.create_dataset_short_term(X[project['_id']], data_path, start_index = 200,
test=self.isfortest)
if dataset_y.isna().any().values[0]:
dataset_X = dataset_X.drop(dataset_y.index[np.where(dataset_y.isna())[0]])
if len(X_3d.shape) > 1:
X_3d = np.delete(X_3d, np.where(dataset_y.isna())[0], axis=0)
dataset_y = dataset_y.drop(dataset_y.index[np.where(dataset_y.isna())[0]])
dataset_X.to_csv(os.path.join(project['static_data']['path_data'], 'dataset_X_test.csv'))
dataset_y.to_csv(os.path.join(project['static_data']['path_data'], 'dataset_y_test.csv'))
joblib.dump(X_3d, os.path.join(project['static_data']['path_data'], 'dataset_lstm_test.pickle'))
self.logger.info('Datasets saved for project %s', project['_id'])
else:
if project['static_data']['horizon'] == 'day_ahead':
dataset_X, dataset_y, X_3d = self.create_dataset(X[project['_id']], data_path, start_index=9001,
test=self.isfortest)
elif project['static_data']['horizon'] == 'short-term':
dataset_X, dataset_y, X_3d = self.create_dataset_short_term(X[project['_id']], data_path, start_index = 200,
test=self.isfortest)
if dataset_y.isna().any().values[0]:
dataset_X = dataset_X.drop(dataset_y.index[np.where(dataset_y.isna())[0]])
if len(X_3d.shape) > 1:
X_3d = np.delete(X_3d, np.where(dataset_y.isna())[0], axis=0)
dataset_y = dataset_y.drop(dataset_y.index[np.where(dataset_y.isna())[0]])
dataset_X.to_csv(os.path.join(project['static_data']['path_data'], 'dataset_X.csv'))
dataset_y.to_csv(os.path.join(project['static_data']['path_data'], 'dataset_y.csv'))
joblib.dump(X_3d, os.path.join(project['static_data']['path_data'], 'dataset_lstm.pickle'))
self.logger.info('Datasets saved for project %s', project['_id'])
def sp_index(self, r):
### Is modified
if self.projects[0]['_id'] == 'St_Miguel':
cal = st_miguel()
extra = cal.get_extras(r.year)
if cal.is_holiday(r, extra):
sp = 100
else:
if r.dayofweek == 6:
sp = 50
else:
sp = 0
else:
cal = Greece()
# {'New year','Epiphany','Clean Monday','Independence Day','Good Friday','Easter Saturday','Easter Sunday',
# 'Easter Monday','Labour Day','Pentecost','Whit Monday','Assumption of Mary to Heaven','Ohi Day',
# 'Christmas Eve','Christmas Day','Glorifying Mother of God','Last day of year'}
if cal.is_holiday(r):
sp = 100
else:
if r.dayofweek == 6:
sp = 50
else:
sp = 0
return sp
def create_dataset(self, nwps, data_path, start_index=9001, test=False):
self.data['dayweek'] = self.data.index.dayofweek
self.data['month'] = self.data.index.month
self.data['hour'] = self.data.index.hour
self.data['sp_index'] = [self.sp_index(d) for d in self.data.index]
col = ['Temp', 'hour', 'month', 'sp_index', 'dayweek'] + nwps.drop(columns=['Temp_max']).columns.tolist() + [
'Temp_month', 'Temp_sp_days']
col += ['load_' + str(i) for i in range(43)]
col += ['load_' + str(i) for i in range(43, 51)]
col += ['Temp_max_' + str(i) for i in range(7)]
col += ['Temp_min_' + str(i) for i in range(7)]
col += ['sp_index_' + str(i) for i in range(7)]
dataset = pd.DataFrame(columns=col)
target = pd.DataFrame(columns=['target'])
dataset_3d = np.array([])
nwps_lstm = nwps.copy(deep=True)
for var in self.variables:
if var == 'WS':
var = 'wind'
elif var == 'WD':
var = 'direction'
elif var == 'Temperature':
var = 'Temp'
cols = [col for col in nwps.columns if str.lower(var) in str.lower(col)]
nwps_lstm[str.lower(var)] = nwps_lstm[cols].mean(axis=1).values
lags1 = np.hstack(
[np.arange(48, 75), np.arange(95, 98), 96, 120, 144, np.arange(166, 175), 192, ])
lags2 = np.hstack(
[np.arange(8735, 8741), 8760, 8736 + 168])
lags_days = np.arange(1, 8)
for date in self.data.index[start_index:]:
print('Input for ', date)
date_inp1 = [date - pd.DateOffset(hours=int(l)) for l in lags1]
date_inp2 = [date - pd.DateOffset(hours=int(l)) for l in lags2]
date_days = [date - pd.DateOffset(days=int(l)) for l in lags_days]
try:
temp_max = nwps[['Temp_max']].loc[date].values
var_imp = np.hstack((temp_max, self.data[['hour', 'month', 'sp_index', 'dayweek']].loc[date].values,
nwps.drop(columns=['Temp_max']).loc[date].values,
np.power(self.data['month'].loc[date] * temp_max / 12, 3),
np.power(self.data['sp_index'].loc[date] * temp_max / 100, 3)))
col = ['Temp', 'hour', 'month', 'sp_index', 'dayweek'] + nwps.drop(
columns=['Temp_max']).columns.tolist() + ['Temp_month', 'Temp_sp_days']
var_unimp = np.hstack((
self.data.loc[date_inp1, self.projects[0]['_id']].values,
self.data.loc[date_inp2, self.projects[0]['_id']].values,
nwps.loc[date_days, 'Temp_max'].values,
nwps.loc[date_days, 'Temp_min'].values,
[self.sp_index(d) for d in date_days]
))
col += ['load_' + str(i) for i in range(43)]
col += ['load_' + str(i) for i in range(43, 51)]
col += ['Temp_max_' + str(i) for i in range(7)]
col += ['Temp_min_' + str(i) for i in range(7)]
col += ['sp_index_' + str(i) for i in range(7)]
temp_max = nwps[['Temp_max']].loc[date].values
var_3d = np.hstack((np.array([0]),
nwps_lstm[['cloud', 'wind', 'direction', 'Temp_max', 'Temp_min',
'Temp_' + self.projects[0]['_id']]].loc[
date].values,
self.data[['hour', 'month', 'sp_index', 'dayweek']].loc[date].values,
np.power(self.data['month'].loc[date] * temp_max / 12, 3),
np.power(self.data['sp_index'].loc[date] * temp_max / 100, 3)))
for d in date_inp1:
temp_max = nwps[['Temp_max']].loc[d].values
v = np.hstack(
(self.data.loc[d, self.projects[0]['_id']],
nwps_lstm[
['cloud', 'wind', 'direction', 'Temp_max', 'Temp_min',
'Temp_' + self.projects[0]['_id']]].loc[d].values,
self.data[['hour', 'month', 'sp_index', 'dayweek']].loc[d].values,
np.power(self.data['month'].loc[d] * temp_max / 12, 3),
np.power(self.data['sp_index'].loc[d] * temp_max / 100, 3)))
var_3d = np.vstack((var_3d, v))
except:
continue
inp = np.hstack((var_imp, var_unimp))
inp1 = pd.DataFrame(inp.reshape(-1, 1).T, index=[date], columns=col)
targ1 = pd.DataFrame(self.data[self.projects[0]['_id']].loc[date], index=[date], columns=['target'])
if not inp1.isnull().any(axis=1).values and not targ1.isnull().any().values:
dataset = pd.concat([dataset, inp1])
target = pd.concat([target, targ1])
if dataset_3d.shape[0] == 0:
dataset_3d = var_3d
elif len(dataset_3d.shape) == 2:
dataset_3d = np.stack((dataset_3d, var_3d))
else:
dataset_3d = np.vstack((dataset_3d, var_3d[np.newaxis, :, :]))
if not test:
corr = []
for f in range(dataset.shape[1]):
corr.append(np.abs(np.corrcoef(dataset.values[:, f], target.values.ravel())[1, 0]))
ind = np.argsort(np.array(corr))[::-1]
columns = dataset.columns[ind]
dataset = dataset[columns]
joblib.dump(ind, os.path.join(data_path, 'dataset_columns_order.pickle'))
else:
ind = joblib.load(os.path.join(data_path, 'dataset_columns_order.pickle'))
columns = dataset.columns[ind]
dataset = dataset[columns]
return dataset, target, dataset_3d
def smooth(self, x, window_len=11, window='hanning'):
s = np.r_[x[window_len - 1:0:-1], x, x[-2:-window_len - 1:-1]]
# print(len(s))
if window == 'flat': # moving average
w = np.ones(window_len, 'd')
else:
w = eval('np.' + window + '(window_len)')
y = np.convolve(w / w.sum(), s, mode='valid')
return y
def create_dataset_short_term(self, nwps, data_path, start_index=9001, test=False):
self.data['dayweek'] = self.data.index.dayofweek
self.data['month'] = self.data.index.month
self.data['hour'] = self.data.index.hour
self.data['sp_index'] = [self.sp_index(d) for d in self.data.index]
col = ['Temp', 'hour', 'month', 'sp_index', 'dayweek'] + nwps.drop(columns=['Temp_max']).columns.tolist() + [
'Temp_month', 'Temp_sp_days']
col += ['load_' + str(i) for i in range(30)]
col += ['Temp_max_' + str(i) for i in range(7)]
col += ['Temp_min_' + str(i) for i in range(7)]
col += ['sp_index_' + str(i) for i in range(7)]
dataset = pd.DataFrame(columns=col)
target = pd.DataFrame(columns=['target'])
dataset_3d = np.array([])
nwps_lstm = nwps.copy(deep=True)
for var in self.variables:
if var == 'WS':
var = 'wind'
elif var == 'WD':
var = 'direction'
elif var == 'Temperature':
var = 'Temp'
cols = [col for col in nwps.columns if str.lower(var) in str.lower(col)]
nwps_lstm[str.lower(var)] = nwps_lstm[cols].mean(axis=1).values
res = self.create_inp(self.data.index[start_index], nwps, nwps_lstm)
results=Parallel(n_jobs=self.njobs)(
delayed(self.create_inp)(t, nwps, nwps_lstm) for t in self.data.index[start_index:])
for res in results:
if len(res[0].shape) > 1:
inp1 = res[0]
targ1 = res[1]
var_3d = res[2]
if not inp1.isnull().any(axis=1).values and not targ1.isnull().any().values:
dataset = pd.concat([dataset, inp1])
target = pd.concat([target, targ1])
if dataset_3d.shape[0] == 0:
dataset_3d = var_3d
elif len(dataset_3d.shape) == 2:
dataset_3d = np.stack((dataset_3d, var_3d))
else:
dataset_3d = np.vstack((dataset_3d, var_3d[np.newaxis, :, :]))
if not test:
corr = []
for f in range(dataset.shape[1]):
corr.append(np.abs(np.corrcoef(dataset.values[:, f], target.values.ravel())[1, 0]))
ind = np.argsort(np.array(corr))[::-1]
columns = dataset.columns[ind]
dataset = dataset[columns]
joblib.dump(ind, os.path.join(data_path, 'dataset_columns_order.pickle'))
else:
ind = joblib.load(os.path.join(data_path, 'dataset_columns_order.pickle'))
columns = dataset.columns[ind]
dataset = dataset[columns]
return dataset, target, dataset_3d
def create_inp(self, date, nwps, nwps_lstm):
print('Input for ', date)
lags1 = np.hstack(
[np.arange(1, 4), np.arange(4, 12), np.arange(22, 26), np.arange(47, 52), np.arange(166, 175), 192, ])
lags_days = np.arange(1, 8)
date_inp1 = [date - pd.DateOffset(hours=int(l)) for l in lags1]
date_days = [date.round('H') - pd.DateOffset(days=int(l)) for l in lags_days]
try:
temp_max = nwps[['Temp_max']].loc[date.round('H')].values
var_imp = np.hstack((temp_max, self.data[['hour', 'month', 'sp_index', 'dayweek']].loc[date].values,
nwps.drop(columns=['Temp_max']).loc[date.round('H')].values,
np.power(self.data['month'].loc[date] * temp_max / 12, 3),
np.power(self.data['sp_index'].loc[date] * temp_max / 100, 3)))
col = ['Temp', 'hour', 'month', 'sp_index', 'dayweek'] + nwps.drop(
columns=['Temp_max']).columns.tolist() + ['Temp_month', 'Temp_sp_days']
var_unimp = np.hstack((
self.data.loc[date_inp1, self.projects[0]['_id']].values,
nwps.loc[date_days, 'Temp_max'].values,
nwps.loc[date_days, 'Temp_min'].values,
[self.sp_index(d) for d in date_days]
))
col += ['load_' + str(i) for i in range(30)]
col += ['Temp_max_' + str(i) for i in range(7)]
col += ['Temp_min_' + str(i) for i in range(7)]
col += ['sp_index_' + str(i) for i in range(7)]
temp_max = nwps[['Temp_max']].loc[date.round('H')].values
var_3d = np.hstack((np.array([0]),
nwps_lstm[['cloud', 'wind', 'direction', 'Temp_max', 'Temp_min',
'Temp_' + self.projects[0]['_id']]].loc[
date.round('H')].values,
self.data[['hour', 'month', 'sp_index', 'dayweek']].loc[date].values,
np.power(self.data['month'].loc[date] * temp_max / 12, 3),
np.power(self.data['sp_index'].loc[date] * temp_max / 100, 3)))
for d in date_inp1:
temp_max = nwps[['Temp_max']].loc[d.round('H')].values
v = np.hstack(
(self.data.loc[d, self.projects[0]['_id']],
nwps_lstm[
['cloud', 'wind', 'direction', 'Temp_max', 'Temp_min', 'Temp_' + self.projects[0]['_id']]].loc[
d.round('H')].values,
self.data[['hour', 'month', 'sp_index', 'dayweek']].loc[d].values,
np.power(self.data['month'].loc[d] * temp_max / 12, 3),
np.power(self.data['sp_index'].loc[d] * temp_max / 100, 3)))
var_3d = np.vstack((var_3d, v))
except:
return np.array([]), np.array([]), np.array([])
inp = np.hstack((var_imp, var_unimp))
inp1 = pd.DataFrame(inp.reshape(-1, 1).T, index=[date], columns=col)
targ1 = pd.DataFrame(self.data[self.projects[0]['_id']].loc[date], index=[date], columns=['target'])
return inp1, targ1, var_3d
def create_sample_nwp(self, date, nwp, lats, longs):
inp = pd.DataFrame()
for var in sorted(self.variables):
if var in {'WS', 'Flux', 'WD', 'Cloud', 'Temperature'}:
if isinstance(lats, dict) and isinstance(longs, dict):
for area in lats.keys():
X0 = nwp[var][np.ix_(lats[area], longs[area])]
X = np.mean(X0)
if var == 'Flux':
var_name = 'flux'
elif var == 'WS':
var_name = 'wind'
elif var == 'Cloud':
var_name = 'cloud'
elif var == 'Temperature':
var_name = 'Temp'
else:
var_name = 'direction'
col = [var_name + '_' + area]
inp = pd.concat([inp, pd.DataFrame(X.reshape(-1, 1).T, index=[date], columns=col)], axis=1)
else:
X0 = nwp[var][np.ix_(lats, longs)]
X = np.mean(X0)
if var == 'Flux':
var_name = 'flux'
elif var == 'WS':
var_name = 'wind'
elif var == 'Cloud':
var_name = 'cloud'
elif var == 'Temperature':
var_name = 'Temp'
else:
var_name = 'direction'
col = [var_name]
inp = pd.concat([inp, pd.DataFrame(X.reshape(-1, 1).T, index=[date], columns=col)], axis=1)
else:
continue
return inp
def create_dataset_short_term_eval(self, nwps, predictions, data_path, start_index=9001, test=False):
self.data['dayweek'] = self.data.index.dayofweek
self.data['month'] = self.data.index.month
self.data['hour'] = self.data.index.hour
self.data['sp_index'] = [self.sp_index(d) for d in self.data.index]
col = ['Temp', 'hour', 'month', 'sp_index', 'dayweek'] + nwps.drop(columns=['Temp_max']).columns.tolist() + [
'Temp_month', 'Temp_sp_days']
col += ['load_' + str(i) for i in range(30)]
col += ['Temp_max_' + str(i) for i in range(7)]
col += ['Temp_min_' + str(i) for i in range(7)]
col += ['sp_index_' + str(i) for i in range(7)]
dataset = pd.DataFrame(columns=col)
target = pd.DataFrame(columns=['target'])
dataset_3d = np.array([])
nwps_lstm = nwps.copy(deep=True)
for var in self.variables:
if var == 'WS':
var = 'wind'
elif var == 'WD':
var = 'direction'
elif var == 'Temperature':
var = 'Temp'
cols = [col for col in nwps.columns if str.lower(var) in str.lower(col)]
nwps_lstm[str.lower(var)] = nwps_lstm[cols].mean(axis=1).values
res = self.create_inp_eval(self.data.index[start_index], nwps, nwps_lstm, predictions)
results=Parallel(n_jobs=self.njobs)(
delayed(self.create_inp_eval)(t, nwps, nwps_lstm, predictions) for t in self.data.index[start_index:])
for res in results:
if len(res[0].shape) > 1:
inp1 = res[0]
targ1 = res[1]
var_3d = res[2]
if not inp1.isnull().any(axis=1).values and not targ1.isnull().any().values:
dataset = pd.concat([dataset, inp1])
target = pd.concat([target, targ1])
if dataset_3d.shape[0] == 0:
dataset_3d = var_3d
elif len(dataset_3d.shape) == 2:
dataset_3d = np.stack((dataset_3d, var_3d))
else:
dataset_3d = np.vstack((dataset_3d, var_3d[np.newaxis, :, :]))
ind = joblib.load(os.path.join(data_path, 'dataset_columns_order.pickle'))
columns = dataset.columns[ind]
dataset = dataset[columns]
return dataset, target, dataset_3d
def create_inp_eval(self, date, nwps, nwps_lstm, predictions):
lags1 = np.hstack(
[np.arange(1, 4), np.arange(4, 12), np.arange(22, 26), np.arange(47, 52), np.arange(166, 175), 192, ])
lags_days = np.arange(1, 8)
timestep = 60
preds = predictions
hor = preds.columns[-1] + timestep
t = date - pd.DateOffset(minutes=hor)
preds = preds.loc[t].to_frame().T
dates_pred = [t + pd.DateOffset(minutes=h) for h in preds.columns]
pred = pd.DataFrame(preds.values.ravel(), index=dates_pred, columns=[self.projects[0]['_id']])
data_temp = pd.concat([self.data[self.projects[0]['_id']].iloc[np.where(self.data.index < t)].to_frame(), pred])
print('Input for ', date)
date_inp1 = [date - pd.DateOffset(hours=int(l)) for l in lags1]
date_days = [date.round('H') - pd.DateOffset(days=int(l)) for l in lags_days]
try:
temp_max = nwps[['Temp_max']].loc[date.round('H')].values
var_imp = np.hstack((temp_max, self.data[['hour', 'month', 'sp_index', 'dayweek']].loc[date].values,
nwps.drop(columns=['Temp_max']).loc[date.round('H')].values,
np.power(self.data['month'].loc[date] * temp_max / 12, 3),
np.power(self.data['sp_index'].loc[date] * temp_max / 100, 3)))
col = ['Temp', 'hour', 'month', 'sp_index', 'dayweek'] + nwps.drop(
columns=['Temp_max']).columns.tolist() + ['Temp_month', 'Temp_sp_days']
var_unimp = np.hstack((
data_temp.loc[date_inp1, self.projects[0]['_id']].values,
nwps.loc[date_days, 'Temp_max'].values,
nwps.loc[date_days, 'Temp_min'].values,
[self.sp_index(d) for d in date_days]
))
col += ['load_' + str(i) for i in range(30)]
col += ['Temp_max_' + str(i) for i in range(7)]
col += ['Temp_min_' + str(i) for i in range(7)]
col += ['sp_index_' + str(i) for i in range(7)]
temp_max = nwps[['Temp_max']].loc[date.round('H')].values
var_3d = np.hstack((np.array([0]),
nwps_lstm[['cloud', 'wind', 'direction', 'Temp_max', 'Temp_min', 'Temp_' + self.projects[0]['_id']]].loc[
date.round('H')].values,
self.data[['hour', 'month', 'sp_index', 'dayweek']].loc[date].values,
np.power(self.data['month'].loc[date] * temp_max / 12, 3),
np.power(self.data['sp_index'].loc[date] * temp_max / 100, 3)))
for d in date_inp1:
temp_max = nwps[['Temp_max']].loc[d.round('H')].values
v = np.hstack(
(data_temp.loc[d, self.projects[0]['_id']],
nwps_lstm[
['cloud', 'wind', 'direction', 'Temp_max', 'Temp_min', 'Temp_' + self.projects[0]['_id']]].loc[d.round('H')].values,
self.data[['hour', 'month', 'sp_index', 'dayweek']].loc[d].values,
np.power(self.data['month'].loc[d] * temp_max / 12, 3),
np.power(self.data['sp_index'].loc[d] * temp_max / 100, 3)))
var_3d = np.vstack((var_3d, v))
except:
return np.array([]), np.array([]), np.array([])
inp = np.hstack((var_imp, var_unimp))
inp1 = pd.DataFrame(inp.reshape(-1, 1).T, index=[date], columns=col)
targ1 = pd.DataFrame(self.data[self.projects[0]['_id']].loc[date], index=[date], columns=['target'])
return inp1, targ1, var_3d
```
#### File: version2/dataset_manager/create_datasets_pca.py
```python
import os
import joblib
import numpy as np
import pandas as pd
from joblib import Parallel
from joblib import delayed
from pytz import timezone
from sklearn.decomposition import KernelPCA
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import MinMaxScaler
from Fuzzy_clustering.version2.common_utils.logging import create_logger
from Fuzzy_clustering.version2.dataset_manager.common_utils import my_scorer
from Fuzzy_clustering.version2.dataset_manager.common_utils import stack_3d
from Fuzzy_clustering.version2.dataset_manager.common_utils import stack_daily_nwps
class DatasetCreatorPCA:
def __init__(self, project, data=None, n_jobs=1, test=False, dates=None):
if test is None:
raise NotImplemented('test is none for short-term, not implemented for PCA')
self.data = data
self.is_for_test = test
self.project_name = project['_id']
self.static_data = project['static_data']
self.path_nwp_project = self.static_data['pathnwp']
self.path_data = self.static_data['path_data']
self.areas = self.static_data['areas']
self.area_group = self.static_data['area_group']
self.nwp_model = self.static_data['NWP_model']
self.nwp_resolution = self.static_data['NWP_resolution']
self.location = self.static_data['location']
self.compress = True if self.nwp_resolution == 0.05 else False
self.n_jobs = n_jobs
self.variables = self.static_data['data_variables']
self.logger = create_logger(logger_name=f"log_{self.static_data['project_group']}",
abs_path=self.path_nwp_project,
logger_path=f"log_{self.static_data['project_group']}.log", write_type='a')
if self.data is not None:
self.dates = self.check_dates()
elif dates is not None:
self.dates = dates
def check_dates(self):
# Extract dates of power measurements.
start_date = pd.to_datetime(self.data.index[0].strftime('%d%m%y'), format='%d%m%y')
end_date = pd.to_datetime(self.data.index[-1].strftime('%d%m%y'), format='%d%m%y')
dates = pd.date_range(start_date, end_date)
data_dates = pd.to_datetime(np.unique(self.data.index.strftime('%d%m%y')), format='%d%m%y')
dates = [d for d in dates if d in data_dates]
self.logger.info('Dates is checked. Number of time samples %s', str(len(dates)))
return pd.DatetimeIndex(dates)
def make_dataset_res(self):
nwp_3d_pickle = 'nwps_3d_test.pickle' if self.is_for_test else 'nwps_3d.pickle'
dataset_cnn_pickle = 'dataset_cnn_test.pickle' if self.is_for_test else 'dataset_cnn.pickle'
nwp_3d_pickle = os.path.join(self.path_data, nwp_3d_pickle)
dataset_cnn_pickle = os.path.join(self.path_data, dataset_cnn_pickle)
if not (os.path.exists(nwp_3d_pickle) and os.path.exists(dataset_cnn_pickle)):
data, x_3d = self.get_3d_dataset()
else:
data = joblib.load(nwp_3d_pickle)
x_3d = joblib.load(dataset_cnn_pickle) # FIXME: unused variable
data_path = self.path_data
if not isinstance(self.areas, dict):
self.dataset_for_single_farm(data, data_path)
else:
dates_stack = []
for t in self.dates:
p_dates = pd.date_range(t + pd.DateOffset(hours=25), t + pd.DateOffset(hours=48), freq='H')
dates = [dt.strftime('%d%m%y%H%M') for dt in p_dates if dt in self.data.index]
dates_stack.append(dates)
flag = False
for i, p_dates in enumerate(dates_stack):
t = self.dates[i]
file_name = os.path.join(self.path_nwp_project, self.nwp_model + '_' + t.strftime('%d%m%y') + '.pickle')
if os.path.exists(file_name):
nwps = joblib.load(file_name)
for date in p_dates:
try:
nwp = nwps[date]
if len(nwp['lat'].shape) == 1:
nwp['lat'] = nwp['lat'][:, np.newaxis]
if len(nwp['long'].shape) == 1:
nwp['long'] = nwp['long'][np.newaxis, :]
lats = (np.where((nwp['lat'][:, 0] >= self.area_group[0][0]) & (
nwp['lat'][:, 0] <= self.area_group[1][0])))[0]
longs = (np.where((nwp['long'][0, :] >= self.area_group[0][1]) & (
nwp['long'][0, :] <= self.area_group[1][1])))[0]
lats_group = nwp['lat'][lats]
longs_group = nwp['long'][:, longs]
flag = True
break
except Exception:
continue
if flag:
break
self.dataset_for_multiple_farms(data, self.areas, lats_group, longs_group)
def correct_nwps(self, nwp, variables):
if nwp['lat'].shape[0] == 0:
area_group = self.projects[0]['static_data']['area_group']
resolution = self.projects[0]['static_data']['NWP_resolution']
nwp['lat'] = np.arange(area_group[0][0], area_group[1][0] + resolution / 2,
resolution).reshape(-1, 1)
nwp['long'] = np.arange(area_group[0][1], area_group[1][1] + resolution / 2,
resolution).reshape(-1, 1).T
for var in nwp.keys():
if not var in {'lat', 'long'}:
if nwp['lat'].shape[0] != nwp[var].shape[0]:
nwp[var] = nwp[var].T
if 'WS' in variables and not 'WS' in nwp.keys():
if 'Uwind' in nwp.keys() and 'Vwind' in nwp.keys():
if nwp['Uwind'].shape[0] > 0 and nwp['Vwind'].shape[0] > 0:
r2d = 45.0 / np.arctan(1.0)
wspeed = np.sqrt(np.square(nwp['Uwind']) + np.square(nwp['Vwind']))
wdir = np.arctan2(nwp['Uwind'], nwp['Vwind']) * r2d + 180
nwp['WS'] = wspeed
nwp['WD'] = wdir
if 'Temp' in nwp.keys():
nwp['Temperature'] = nwp['Temp']
del nwp['Temp']
return nwp
def get_3d_dataset(self):
dates_stack = []
for t in self.dates:
p_dates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=47),
freq='H') # 47 hours: 00:00 -> 23:00
dates = [dt.strftime('%d%m%y%H%M') for dt in p_dates if dt in self.data.index]
dates_stack.append(dates) # For each date we have prediction append the next 47 hours
area = self.area_group if isinstance(self.areas, dict) else self.areas
nwp = stack_daily_nwps(self.dates[0], dates_stack[0], self.path_nwp_project,
self.nwp_model, area, self.variables,
self.compress, self.static_data['type'])
nwp_daily = Parallel(n_jobs=self.n_jobs)(
delayed(stack_daily_nwps)(self.dates[i], p_dates, self.path_nwp_project,
self.nwp_model, area, self.variables,
self.compress, self.static_data['type'])
for i, p_dates in enumerate(dates_stack))
x = np.array([])
data_var = dict()
for var in self.variables:
if (var == 'WS' and self.static_data['type'] == 'wind') or \
(var == 'Flux' and self.static_data['type'] == 'pv'):
data_var[var + '_prev'] = x
data_var[var] = x
data_var[var + '_next'] = x
else:
data_var[var] = x
data_var['dates'] = x
x_3d = np.array([])
for arrays in nwp_daily:
nwp = arrays[0]
x_2d = arrays[1]
if x_2d.shape[0] != 0:
for var in nwp.keys():
if var != 'dates':
data_var[var] = stack_3d(data_var[var], nwp[var])
else:
data_var[var] = np.hstack((data_var[var], nwp[var]))
x_3d = stack_3d(x_3d, x_2d)
self.logger.info('NWP data stacked for date %s', arrays[2])
if self.is_for_test:
joblib.dump(data_var, os.path.join(self.path_data, 'nwps_3d_test.pickle'))
joblib.dump(x_3d, os.path.join(self.path_data, 'dataset_cnn_test.pickle'))
else:
joblib.dump(data_var, os.path.join(self.path_data, 'nwps_3d.pickle'))
joblib.dump(x_3d, os.path.join(self.path_data, 'dataset_cnn.pickle'))
self.logger.info('NWP stacked data saved')
return data_var, x_3d
def train_pca(self, data, components, level):
scaler = MinMaxScaler()
data_scaled = scaler.fit_transform(data)
param_grid = [{
"gamma": np.logspace(-3, 0, 20),
}]
kpca = KernelPCA(n_components=components, fit_inverse_transform=True, n_jobs=self.n_jobs)
grid_search = GridSearchCV(kpca, param_grid, cv=3, scoring=my_scorer, n_jobs=self.n_jobs)
grid_search.fit(data_scaled)
kpca = grid_search.best_estimator_
fname = os.path.join(self.path_data, 'kpca_' + level + '.pickle')
joblib.dump({'scaler': scaler, 'kpca': kpca}, fname)
def pca_transform(self, data, components, level):
fname = os.path.join(self.path_data, 'kpca_' + level + '.pickle')
if not os.path.exists(fname):
self.train_pca(data, components, level)
models = joblib.load(fname)
data_scaled = models['scaler'].transform(data)
data_compress = models['kpca'].transform(data_scaled)
return data_compress
def dataset_for_single_farm(self, data, data_path):
dataset_X = pd.DataFrame()
if self.static_data['type'] == 'pv':
hours = [dt.hour for dt in data['dates']]
months = [dt.month for dt in data['dates']]
dataset_X = pd.concat(
[dataset_X, pd.DataFrame(np.stack([hours, months]).T, index=data['dates'], columns=['hour', 'month'])])
for var in self.variables:
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
X0 = np.transpose(data[var + '_prev'], [0, 2, 1])
X0_level0 = X0[:, 2, 2]
X1 = np.transpose(data[var], [0, 2, 1])
X1_level1 = X1[:, 2, 2]
ind = [[1, j] for j in range(1, 4)] + [[i, 1] for i in range(2, 4)]
ind = np.array(ind)
X1_level3d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_down'
self.logger.info('Begin PCA training for %s', level)
X1_level3d = self.pca_transform(X1_level3d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[2, 3], [3, 2], [3, 3]]
ind = np.array(ind)
X1_level3u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_up'
self.logger.info('Begin PCA training for %s', level)
X1_level3u = self.pca_transform(X1_level3u, 2, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[0, j] for j in range(5)] + [[i, 0] for i in range(1, 5)]
ind = np.array(ind)
X1_level4d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_down'
self.logger.info('Begin PCA training for %s', level)
X1_level4d = self.pca_transform(X1_level4d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[4, j] for j in range(1, 5)] + [[i, 4] for i in range(1, 4)]
ind = np.array(ind)
X1_level4u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_up'
self.logger.info('Begin PCA training for %s', level)
X1_level4u = self.pca_transform(X1_level4u, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
X2 = np.transpose(data[var + '_next'], [0, 2, 1])
X2_level0 = X2[:, 2, 2]
var_name = 'flux' if var == 'Flux' else 'wind'
var_sort = 'fl' if var == 'Flux' else 'ws'
col = ['p_' + var_name] + ['n_' + var_name] + [var_name]
col = col + [var_sort + '_l1.' + str(i) for i in range(3)] + [var_sort + '_l2.' + str(i) for i in
range(2)]
col = col + [var_sort + '_l3d.' + str(i) for i in range(3)] + [var_sort + '_l3u.' + str(i) for i in
range(3)]
X = np.hstack((X0_level0.reshape(-1, 1), X2_level0.reshape(-1, 1), X1_level1.reshape(-1, 1), X1_level3d,
X1_level3u, X1_level4d
, X1_level4u))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif var in {'WD', 'Cloud'}:
X1 = np.transpose(data[var], [0, 2, 1])
X1_level1 = X1[:, 2, 2]
ind = [[1, j] for j in range(1, 4)] + [[i, 1] for i in range(2, 4)]
ind = np.array(ind)
X1_level3d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_down'
self.logger.info('Begin PCA training for %s', level)
X1_level3d = self.pca_transform(X1_level3d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[2, 3], [3, 2], [3, 3]]
ind = np.array(ind)
X1_level3u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_up'
self.logger.info('Begin PCA training for %s', level)
X1_level3u = self.pca_transform(X1_level3u, 2, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[0, j] for j in range(5)] + [[i, 0] for i in range(1, 5)]
ind = np.array(ind)
X1_level4d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_down'
self.logger.info('Begin PCA training for %s', level)
X1_level4d = self.pca_transform(X1_level4d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[4, j] for j in range(1, 5)] + [[i, 4] for i in range(1, 4)]
ind = np.array(ind)
X1_level4u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_up'
self.logger.info('Begin PCA training for %s', level)
X1_level4u = self.pca_transform(X1_level4u, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'cloud' if var == 'Cloud' else 'direction'
var_sort = 'cl' if var == 'Cloud' else 'wd'
col = [var_name] + [var_sort + '_l1.' + str(i) for i in range(3)] + [var_sort + '_l2.' + str(i) for i in
range(2)]
col = col + [var_sort + '_l3d.' + str(i) for i in range(3)] + [var_sort + '_l3u.' + str(i) for i in
range(3)]
X = np.hstack((X1_level1.reshape(-1, 1), X1_level3d, X1_level3u, X1_level4d
, X1_level4u))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
X2 = np.transpose(data[var], [0, 2, 1])
X2_level0 = X2[:, 2, 2]
var_name = 'Temp' if var == 'Temperature' else 'wind'
var_sort = 'tp' if var == 'Temperature' else 'ws'
col = [var_name]
X = X2_level0
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
else:
continue
dataset_X = dataset_X
dataset_y = self.data.loc[dataset_X.index].to_frame()
dataset_y.columns = ['target']
if self.is_for_test:
ind = joblib.load(os.path.join(data_path, 'dataset_columns_order.pickle'))
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
dataset_X.to_csv(os.path.join(self.path_data, 'dataset_X_test.csv'))
dataset_y.to_csv(os.path.join(self.path_data, 'dataset_y_test.csv'))
self.logger.info('Successfully dataset created for Evaluation for %s', self.project_name)
else:
corr = []
for f in range(dataset_X.shape[1]):
corr.append(np.abs(np.corrcoef(dataset_X.values[:, f], dataset_y.values.ravel())[1, 0]))
ind = np.argsort(np.array(corr))[::-1]
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
joblib.dump(ind, os.path.join(data_path, 'dataset_columns_order.pickle'))
dataset_X.to_csv(os.path.join(self.path_data, 'dataset_X.csv'))
dataset_y.to_csv(os.path.join(self.path_data, 'dataset_y.csv'))
self.logger.info('Successfully dataset created for training for %s', self.project_name)
def dataset_for_multiple_farms(self, data, areas, lats_group, longs_group):
dataset_X = pd.DataFrame()
if self.static_data['type'] == 'pv':
hours = [dt.hour for dt in data['dates']]
months = [dt.month for dt in data['dates']]
dataset_X = pd.concat(
[dataset_X, pd.DataFrame(np.stack([hours, months]).T, index=data['dates'], columns=['hour', 'month'])])
for var in self.variables:
for area_name, area in areas.items():
if len(area) > 1:
lats = (np.where((lats_group[:, 0] >= area[0, 0]) & (lats_group[:, 0] <= area[1, 0])))[0]
longs = (np.where((longs_group[0, :] >= area[0, 1]) & (longs_group[0, :] <= area[1, 1])))[0]
else:
lats = (np.where((lats_group[:, 0] >= area[0]) & (lats_group[:, 0] <= area[2])))[0]
longs = (np.where((longs_group[0, :] >= area[1]) & (longs_group[0, :] <= area[3])))[0]
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
X0 = data[var + '_prev'][:, lats, :][:, :, longs]
X0 = X0.reshape(-1, X0.shape[1] * X0.shape[2])
level = var + '_prev_' + area_name
self.logger.info('Begin PCA training for %s', level)
X0_compressed = self.pca_transform(X0, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.pca_transform(X1, 9, level)
self.logger.info('Successfully PCA transform for %s', level)
X2 = data[var + '_next'][:, lats, :][:, :, longs]
X2 = X2.reshape(-1, X2.shape[1] * X2.shape[2])
level = var + '_next_' + area_name
self.logger.info('Begin PCA training for %s', level)
X2_compressed = self.pca_transform(X2, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'flux_' + area_name if var == 'Flux' else 'wind_' + area_name
var_sort = 'fl_' + area_name if var == 'Flux' else 'ws_' + area_name
col = ['p_' + var_name + '.' + str(i) for i in range(3)]
col += ['n_' + var_name + '.' + str(i) for i in range(3)]
col += [var_name + '.' + str(i) for i in range(9)]
X = np.hstack((X0_compressed, X2_compressed, X1_compressed))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif var in {'WD', 'Cloud'}:
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.pca_transform(X1, 9, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'cloud_' + area_name if var == 'Cloud' else 'direction_' + area_name
var_sort = 'cl_' + area_name if var == 'Cloud' else 'wd_' + area_name
col = [var_name + '.' + str(i) for i in range(9)]
X = X1_compressed
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.pca_transform(X1, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'Temp_' + area_name if var == 'Temperature' else 'wind_' + area_name
var_sort = 'tp_' + area_name if var == 'Temperature' else 'ws_' + area_name
col = [var_name + '.' + str(i) for i in range(3)]
X = X1_compressed
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
else:
continue
for var in self.variables:
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
col = []
col_p = []
col_n = []
for area_name, area in areas.items():
var_name = 'flux_' + area_name if var == 'Flux' else 'wind_' + area_name
col += [var_name + '.' + str(i) for i in range(9)]
col_p += ['p_' + var_name + '.' + str(i) for i in range(3)]
col_n += ['n_' + var_name + '.' + str(i) for i in range(3)]
var_name = 'flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
var_name = 'p_flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col_p].mean(axis=1)
var_name = 'n_flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col_n].mean(axis=1)
elif var in {'WD', 'Cloud'}:
col = []
for area_name, area in areas.items():
var_name = 'cloud_' + area_name if var == 'Cloud' else 'direction_' + area_name
col += [var_name + '.' + str(i) for i in range(9)]
var_name = 'cloud' if var == 'Cloud' else 'direction'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
col = []
for area_name, area in areas.items():
var_name = 'Temp_' + area_name if var == 'Temperature' else 'wind_' + area_name
col += [var_name + '.' + str(i) for i in range(3)]
var_name = 'Temp' if var == 'Temperature' else 'wind'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
dataset_y = self.data.loc[dataset_X.index].to_frame()
dataset_y.columns = ['target']
if self.is_for_test:
ind = joblib.load(os.path.join(self.path_data, 'dataset_columns_order.pickle'))
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
dataset_X.to_csv(os.path.join(self.path_data, 'dataset_X_test.csv'))
dataset_y.to_csv(os.path.join(self.path_data, 'dataset_y_test.csv'))
self.logger.info('Successfully dataset created for Evaluation for %s', self.project_name)
else:
corr = []
for f in range(dataset_X.shape[1]):
corr.append(np.abs(np.corrcoef(dataset_X.values[:, f], dataset_y.values.ravel())[1, 0]))
ind = np.argsort(np.array(corr))[::-1]
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
joblib.dump(ind, os.path.join(self.path_data, 'dataset_columns_order.pickle'))
dataset_X.to_csv(os.path.join(self.path_data, 'dataset_X.csv'))
dataset_y.to_csv(os.path.join(self.path_data, 'dataset_y.csv'))
self.logger.info('Successfully dataset created for training for %s', self.project_name)
def make_dataset_res_offline(self, utc=False):
def datetime_exists_in_tz(dt, tz):
try:
dt.tz_localize(tz)
return True
except:
return False
data, X_3d = self.get_3d_dataset_offline(utc)
if not isinstance(self.areas, dict):
X = self.dataset_for_single_farm_offline(data)
else:
dates_stack = []
for t in self.dates:
pdates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=47), freq='H')
dates = [dt.strftime('%d%m%y%H%M') for dt in pdates]
dates_stack.append(dates)
flag = False
for i, pdates in enumerate(dates_stack):
t = self.dates[i]
fname = os.path.join(self.path_nwp_project, self.nwp_model + '_' + t.strftime('%d%m%y') + '.pickle')
if os.path.exists(fname):
nwps = joblib.load(fname)
for date in pdates:
try:
nwp = nwps[date]
if len(nwp['lat'].shape) == 1:
nwp['lat'] = nwp['lat'][:, np.newaxis]
if len(nwp['long'].shape) == 1:
nwp['long'] = nwp['long'][np.newaxis, :]
lats = (np.where((nwp['lat'][:, 0] >= self.area_group[0][0]) & (
nwp['lat'][:, 0] <= self.area_group[1][0])))[0]
longs = (np.where((nwp['long'][0, :] >= self.area_group[0][1]) & (
nwp['long'][0, :] <= self.area_group[1][1])))[0]
lats_group = nwp['lat'][lats]
longs_group = nwp['long'][:, longs]
flag = True
break
except:
continue
if flag:
break
X = self.dataset_for_multiple_farms_offline(data, self.areas, lats_group, longs_group)
return X, X_3d
def get_3d_dataset_offline(self, utc):
def datetime_exists_in_tz(dt, tz):
try:
dt.tz_localize(tz)
return True
except:
return False
dates_stack = []
for dt in self.dates:
if utc:
pdates = pd.date_range(dt + pd.DateOffset(hours=25), dt + pd.DateOffset(hours=48), freq='H')
dates = [t.strftime('%d%m%y%H%M') for t in pdates]
dates_stack.append(dates)
else:
pdates = pd.date_range(dt + pd.DateOffset(hours=25), dt + pd.DateOffset(hours=48), freq='H')
indices = [i for i, t in enumerate(pdates) if datetime_exists_in_tz(t, tz=timezone('Europe/Athens'))]
pdates = pdates[indices]
pdates = pdates.tz_localize(timezone('Europe/Athens'))
pdates = pdates.tz_convert(timezone('UTC'))
dates = [dt.strftime('%d%m%y%H%M') for dt in pdates]
dates_stack.append(dates)
if not isinstance(self.areas, dict):
nwp_daily = Parallel(n_jobs=self.n_jobs)(
delayed(stack_daily_nwps)(self.dates[i], pdates, self.path_nwp_project, self.nwp_model,
self.areas, self.variables, self.compress, self.static_data['type'])
for i, pdates in enumerate(dates_stack))
else:
nwp = stack_daily_nwps(self.dates[0], dates_stack[0], self.path_nwp_project, self.nwp_model,
self.area_group,
self.variables, self.compress, self.static_data['type'])
nwp_daily = Parallel(n_jobs=self.n_jobs)(
delayed(stack_daily_nwps)(self.dates[i], pdates, self.path_nwp_project, self.nwp_model,
self.area_group, self.variables, self.compress, self.static_data['type'])
for i, pdates in enumerate(dates_stack))
X = np.array([])
data_var = dict()
for var in self.variables:
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
data_var[var + '_prev'] = X
data_var[var] = X
data_var[var + '_next'] = X
else:
data_var[var] = X
data_var['dates'] = X
X_3d = np.array([])
for arrays in nwp_daily:
nwp = arrays[0]
x_2d = arrays[1]
if x_2d.shape[0] != 0:
for var in nwp.keys():
if var != 'dates':
data_var[var] = stack_3d(data_var[var], nwp[var])
else:
data_var[var] = np.hstack((data_var[var], nwp[var]))
X_3d = stack_3d(X_3d, x_2d)
self.logger.info('NWP data stacked for date %s', arrays[2])
return data_var, X_3d
def dataset_for_single_farm_offline(self, data):
dataset_X = pd.DataFrame()
if self.static_data['type'] == 'pv':
hours = [dt.hour for dt in data['dates']]
months = [dt.month for dt in data['dates']]
dataset_X = pd.concat(
[dataset_X, pd.DataFrame(np.stack([hours, months]).T, index=data['dates'], columns=['hour', 'month'])])
for var in self.variables:
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
X0 = np.transpose(data[var + '_prev'], [0, 2, 1])
X0_level0 = X0[:, 2, 2]
X1 = np.transpose(data[var], [0, 2, 1])
X1_level1 = X1[:, 2, 2]
ind = [[1, j] for j in range(1, 4)] + [[i, 1] for i in range(2, 4)]
ind = np.array(ind)
X1_level3d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_down'
self.logger.info('Begin PCA training for %s', level)
X1_level3d = self.pca_transform(X1_level3d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[2, 3], [3, 2], [3, 3]]
ind = np.array(ind)
X1_level3u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_up'
self.logger.info('Begin PCA training for %s', level)
X1_level3u = self.pca_transform(X1_level3u, 2, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[0, j] for j in range(5)] + [[i, 0] for i in range(1, 5)]
ind = np.array(ind)
X1_level4d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_down'
self.logger.info('Begin PCA training for %s', level)
X1_level4d = self.pca_transform(X1_level4d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[4, j] for j in range(1, 5)] + [[i, 4] for i in range(1, 4)]
ind = np.array(ind)
X1_level4u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_up'
self.logger.info('Begin PCA training for %s', level)
X1_level4u = self.pca_transform(X1_level4u, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
X2 = np.transpose(data[var + '_next'], [0, 2, 1])
X2_level0 = X2[:, 2, 2]
var_name = 'flux' if var == 'Flux' else 'wind'
var_sort = 'fl' if var == 'Flux' else 'ws'
col = ['p_' + var_name] + ['n_' + var_name] + [var_name]
col = col + [var_sort + '_l1.' + str(i) for i in range(3)] + [var_sort + '_l2.' + str(i) for i in
range(2)]
col = col + [var_sort + '_l3d.' + str(i) for i in range(3)] + [var_sort + '_l3u.' + str(i) for i in
range(3)]
X = np.hstack((X0_level0.reshape(-1, 1), X2_level0.reshape(-1, 1), X1_level1.reshape(-1, 1), X1_level3d,
X1_level3u, X1_level4d
, X1_level4u))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif var in {'WD', 'Cloud'}:
X1 = np.transpose(data[var], [0, 2, 1])
X1_level1 = X1[:, 2, 2]
ind = [[1, j] for j in range(1, 4)] + [[i, 1] for i in range(2, 4)]
ind = np.array(ind)
X1_level3d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_down'
self.logger.info('Begin PCA training for %s', level)
X1_level3d = self.pca_transform(X1_level3d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[2, 3], [3, 2], [3, 3]]
ind = np.array(ind)
X1_level3u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_up'
self.logger.info('Begin PCA training for %s', level)
X1_level3u = self.pca_transform(X1_level3u, 2, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[0, j] for j in range(5)] + [[i, 0] for i in range(1, 5)]
ind = np.array(ind)
X1_level4d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_down'
self.logger.info('Begin PCA training for %s', level)
X1_level4d = self.pca_transform(X1_level4d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[4, j] for j in range(1, 5)] + [[i, 4] for i in range(1, 4)]
ind = np.array(ind)
X1_level4u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_up'
self.logger.info('Begin PCA training for %s', level)
X1_level4u = self.pca_transform(X1_level4u, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'cloud' if var == 'Cloud' else 'direction'
var_sort = 'cl' if var == 'Cloud' else 'wd'
col = [var_name] + [var_sort + '_l1.' + str(i) for i in range(3)] + [var_sort + '_l2.' + str(i) for i in
range(2)]
col = col + [var_sort + '_l3d.' + str(i) for i in range(3)] + [var_sort + '_l3u.' + str(i) for i in
range(3)]
X = np.hstack((X1_level1.reshape(-1, 1), X1_level3d, X1_level3u, X1_level4d
, X1_level4u))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
X2 = np.transpose(data[var], [0, 2, 1])
X2_level0 = X2[:, 2, 2]
var_name = 'Temp' if var == 'Temperature' else 'wind'
var_sort = 'tp' if var == 'Temperature' else 'ws'
col = [var_name]
X = X2_level0
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
else:
continue
ind = joblib.load(os.path.join(self.path_data, 'dataset_columns_order.pickle'))
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
return dataset_X
def dataset_for_multiple_farms_offline(self, data, areas, lats_group, longs_group):
dataset_X = pd.DataFrame()
if self.static_data['type'] == 'pv':
hours = [dt.hour for dt in data['dates']]
months = [dt.month for dt in data['dates']]
dataset_X = pd.concat(
[dataset_X, pd.DataFrame(np.stack([hours, months]).T, index=data['dates'], columns=['hour', 'month'])])
for var in self.variables:
for area_name, area in areas.items():
if len(area) > 1:
lats = (np.where((lats_group[:, 0] >= area[0, 0]) & (lats_group[:, 0] <= area[1, 0])))[0]
longs = (np.where((longs_group[0, :] >= area[0, 1]) & (longs_group[0, :] <= area[1, 1])))[0]
else:
lats = (np.where((lats_group[:, 0] >= area[0]) & (lats_group[:, 0] <= area[2])))[0]
longs = (np.where((longs_group[0, :] >= area[1]) & (longs_group[0, :] <= area[3])))[0]
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
X0 = data[var + '_prev'][:, lats, :][:, :, longs]
X0 = X0.reshape(-1, X0.shape[1] * X0.shape[2])
level = var + '_prev_' + area_name
self.logger.info('Begin PCA training for %s', level)
X0_compressed = self.pca_transform(X0, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.pca_transform(X1, 9, level)
self.logger.info('Successfully PCA transform for %s', level)
X2 = data[var + '_next'][:, lats, :][:, :, longs]
X2 = X2.reshape(-1, X2.shape[1] * X2.shape[2])
level = var + '_next_' + area_name
self.logger.info('Begin PCA training for %s', level)
X2_compressed = self.pca_transform(X2, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'flux_' + area_name if var == 'Flux' else 'wind_' + area_name
var_sort = 'fl_' + area_name if var == 'Flux' else 'ws_' + area_name
col = ['p_' + var_name + '.' + str(i) for i in range(3)]
col += ['n_' + var_name + '.' + str(i) for i in range(3)]
col += [var_name + '.' + str(i) for i in range(9)]
X = np.hstack((X0_compressed, X2_compressed, X1_compressed))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif var in {'WD', 'Cloud'}:
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.pca_transform(X1, 9, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'cloud_' + area_name if var == 'Cloud' else 'direction_' + area_name
var_sort = 'cl_' + area_name if var == 'Cloud' else 'wd_' + area_name
col = [var_name + '.' + str(i) for i in range(9)]
X = X1_compressed
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.pca_transform(X1, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'Temp_' + area_name if var == 'Temperature' else 'wind_' + area_name
var_sort = 'tp_' + area_name if var == 'Temperature' else 'ws_' + area_name
col = [var_name + '.' + str(i) for i in range(3)]
X = X1_compressed
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
else:
continue
for var in self.variables:
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
col = []
col_p = []
col_n = []
for area_name, area in areas.items():
var_name = 'flux_' + area_name if var == 'Flux' else 'wind_' + area_name
col += [var_name + '.' + str(i) for i in range(9)]
col_p += ['p_' + var_name + '.' + str(i) for i in range(3)]
col_n += ['n_' + var_name + '.' + str(i) for i in range(3)]
var_name = 'flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
var_name = 'p_flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col_p].mean(axis=1)
var_name = 'n_flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col_n].mean(axis=1)
elif var in {'WD', 'Cloud'}:
col = []
for area_name, area in areas.items():
var_name = 'cloud_' + area_name if var == 'Cloud' else 'direction_' + area_name
col += [var_name + '.' + str(i) for i in range(9)]
var_name = 'cloud' if var == 'Cloud' else 'direction'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
col = []
for area_name, area in areas.items():
var_name = 'Temp_' + area_name if var == 'Temperature' else 'wind_' + area_name
col += [var_name + '.' + str(i) for i in range(3)]
var_name = 'Temp' if var == 'Temperature' else 'wind'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
ind = joblib.load(os.path.join(self.path_data, 'dataset_columns_order.pickle'))
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
return dataset_X
def make_dataset_res_online(self, utc=False):
def datetime_exists_in_tz(dt, tz):
try:
dt.tz_localize(tz)
return True
except:
return False
data, X_3d = self.get_3d_dataset_online(utc)
if not isinstance(self.areas, dict):
X = self.dataset_for_single_farm_online(data)
else:
dates_stack = []
for t in self.dates:
pdates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=47), freq='H')
dates = [dt.strftime('%d%m%y%H%M') for dt in pdates]
dates_stack.append(dates)
flag = False
for i, pdates in enumerate(dates_stack):
t = self.dates[i]
fname = os.path.join(self.path_nwp_project, self.nwp_model + '_' + t.strftime('%d%m%y') + '.pickle')
if os.path.exists(fname):
nwps = joblib.load(fname)
for date in pdates:
try:
nwp = nwps[date]
if len(nwp['lat'].shape) == 1:
nwp['lat'] = nwp['lat'][:, np.newaxis]
if len(nwp['long'].shape) == 1:
nwp['long'] = nwp['long'][np.newaxis, :]
lats = (np.where((nwp['lat'][:, 0] >= self.area_group[0][0]) & (
nwp['lat'][:, 0] <= self.area_group[1][0])))[0]
longs = (np.where((nwp['long'][0, :] >= self.area_group[0][1]) & (
nwp['long'][0, :] <= self.area_group[1][1])))[0]
lats_group = nwp['lat'][lats]
longs_group = nwp['long'][:, longs]
flag = True
break
except:
continue
if flag:
break
X = self.dataset_for_multiple_farms_online(data, self.areas, lats_group, longs_group)
return X, X_3d
def get_3d_dataset_online(self, utc):
def datetime_exists_in_tz(dt, tz):
try:
dt.tz_localize(tz)
return True
except:
return False
dates_stack = []
if utc:
pdates = pd.date_range(self.dates[-1] + pd.DateOffset(hours=25), self.dates[-1] + pd.DateOffset(hours=48),
freq='H')
dates = [dt.strftime('%d%m%y%H%M') for dt in pdates if dt in self.data.index]
dates_stack.append(dates)
else:
pdates = pd.date_range(self.dates[-1] + pd.DateOffset(hours=25), self.dates[-1] + pd.DateOffset(hours=48),
freq='H')
indices = [i for i, t in enumerate(pdates) if datetime_exists_in_tz(t, tz=timezone('Europe/Athens'))]
pdates = pdates[indices]
pdates = pdates.tz_localize(timezone('Europe/Athens'))
pdates = pdates.tz_convert(timezone('UTC'))
dates = [dt.strftime('%d%m%y%H%M') for dt in pdates]
dates_stack.append(dates)
if not isinstance(self.areas, dict):
arrays = stack_daily_nwps(self.dates[-1], dates_stack[0], self.path_nwp_project, self.nwp_model, self.areas,
self.variables, self.compress, self.static_data['type'])
else:
arrays = stack_daily_nwps(self.dates[-1], dates_stack[0], self.path_nwp_project, self.nwp_model,
self.area_group,
self.variables, self.compress, self.static_data['type'])
X = np.array([])
data_var = dict()
for var in self.variables:
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
data_var[var + '_prev'] = X
data_var[var] = X
data_var[var + '_next'] = X
else:
data_var[var] = X
data_var['dates'] = X
X_3d = np.array([])
nwp = arrays[0]
x_2d = arrays[1]
if x_2d.shape[0] != 0:
for var in nwp.keys():
if var != 'dates':
data_var[var] = stack_3d(data_var[var], nwp[var])
else:
data_var[var] = np.hstack((data_var[var], nwp[var]))
X_3d = stack_3d(X_3d, x_2d)
self.logger.info('NWP data stacked for date %s', arrays[2])
return data_var, X_3d
def dataset_for_single_farm_online(self, data):
dataset_X = pd.DataFrame()
if self.static_data['type'] == 'pv':
hours = [dt.hour for dt in data['dates']]
months = [dt.month for dt in data['dates']]
dataset_X = pd.concat(
[dataset_X, pd.DataFrame(np.stack([hours, months]).T, index=data['dates'], columns=['hour', 'month'])])
for var in self.variables:
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
X0 = np.transpose(data[var + '_prev'], [0, 2, 1])
X0_level0 = X0[:, 2, 2]
X1 = np.transpose(data[var], [0, 2, 1])
X1_level1 = X1[:, 2, 2]
ind = [[1, j] for j in range(1, 4)] + [[i, 1] for i in range(2, 4)]
ind = np.array(ind)
X1_level3d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_down'
self.logger.info('Begin PCA training for %s', level)
X1_level3d = self.pca_transform(X1_level3d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[2, 3], [3, 2], [3, 3]]
ind = np.array(ind)
X1_level3u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_up'
self.logger.info('Begin PCA training for %s', level)
X1_level3u = self.pca_transform(X1_level3u, 2, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[0, j] for j in range(5)] + [[i, 0] for i in range(1, 5)]
ind = np.array(ind)
X1_level4d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_down'
self.logger.info('Begin PCA training for %s', level)
X1_level4d = self.pca_transform(X1_level4d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[4, j] for j in range(1, 5)] + [[i, 4] for i in range(1, 4)]
ind = np.array(ind)
X1_level4u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_up'
self.logger.info('Begin PCA training for %s', level)
X1_level4u = self.pca_transform(X1_level4u, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
X2 = np.transpose(data[var + '_next'], [0, 2, 1])
X2_level0 = X2[:, 2, 2]
var_name = 'flux' if var == 'Flux' else 'wind'
var_sort = 'fl' if var == 'Flux' else 'ws'
col = ['p_' + var_name] + ['n_' + var_name] + [var_name]
col = col + [var_sort + '_l1.' + str(i) for i in range(3)] + [var_sort + '_l2.' + str(i) for i in
range(2)]
col = col + [var_sort + '_l3d.' + str(i) for i in range(3)] + [var_sort + '_l3u.' + str(i) for i in
range(3)]
X = np.hstack((X0_level0.reshape(-1, 1), X2_level0.reshape(-1, 1), X1_level1.reshape(-1, 1), X1_level3d,
X1_level3u, X1_level4d
, X1_level4u))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif var in {'WD', 'Cloud'}:
X1 = np.transpose(data[var], [0, 2, 1])
X1_level1 = X1[:, 2, 2]
ind = [[1, j] for j in range(1, 4)] + [[i, 1] for i in range(2, 4)]
ind = np.array(ind)
X1_level3d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_down'
self.logger.info('Begin PCA training for %s', level)
X1_level3d = self.pca_transform(X1_level3d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[2, 3], [3, 2], [3, 3]]
ind = np.array(ind)
X1_level3u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_up'
self.logger.info('Begin PCA training for %s', level)
X1_level3u = self.pca_transform(X1_level3u, 2, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[0, j] for j in range(5)] + [[i, 0] for i in range(1, 5)]
ind = np.array(ind)
X1_level4d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_down'
self.logger.info('Begin PCA training for %s', level)
X1_level4d = self.pca_transform(X1_level4d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[4, j] for j in range(1, 5)] + [[i, 4] for i in range(1, 4)]
ind = np.array(ind)
X1_level4u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_up'
self.logger.info('Begin PCA training for %s', level)
X1_level4u = self.pca_transform(X1_level4u, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'cloud' if var == 'Cloud' else 'direction'
var_sort = 'cl' if var == 'Cloud' else 'wd'
col = [var_name] + [var_sort + '_l1.' + str(i) for i in range(3)] + [var_sort + '_l2.' + str(i) for i in
range(2)]
col = col + [var_sort + '_l3d.' + str(i) for i in range(3)] + [var_sort + '_l3u.' + str(i) for i in
range(3)]
X = np.hstack((X1_level1.reshape(-1, 1), X1_level3d, X1_level3u, X1_level4d
, X1_level4u))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
X2 = np.transpose(data[var], [0, 2, 1])
X2_level0 = X2[:, 2, 2]
var_name = 'Temp' if var == 'Temperature' else 'wind'
var_sort = 'tp' if var == 'Temperature' else 'ws'
col = [var_name]
X = X2_level0
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
else:
continue
dataset_X = dataset_X
ind = joblib.load(os.path.join(self.path_data, 'dataset_columns_order.pickle'))
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
self.logger.info('Successfully dataset created for training for %s', self.project_name)
return dataset_X
def dataset_for_multiple_farms_online(self, data, areas, lats_group, longs_group):
dataset_X = pd.DataFrame()
if self.static_data['type'] == 'pv':
hours = [dt.hour for dt in data['dates']]
months = [dt.month for dt in data['dates']]
dataset_X = pd.concat(
[dataset_X, pd.DataFrame(np.stack([hours, months]).T, index=data['dates'], columns=['hour', 'month'])])
for var in self.variables:
for area_name, area in areas.items():
if len(area) > 1:
lats = (np.where((lats_group[:, 0] >= area[0, 0]) & (lats_group[:, 0] <= area[1, 0])))[0]
longs = (np.where((longs_group[0, :] >= area[0, 1]) & (longs_group[0, :] <= area[1, 1])))[0]
else:
lats = (np.where((lats_group[:, 0] >= area[0]) & (lats_group[:, 0] <= area[2])))[0]
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
X0 = data[var + '_prev'][:, lats, :][:, :, longs]
X0 = X0.reshape(-1, X0.shape[1] * X0.shape[2])
level = var + '_prev_' + area_name
self.logger.info('Begin PCA training for %s', level)
X0_compressed = self.pca_transform(X0, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.pca_transform(X1, 9, level)
self.logger.info('Successfully PCA transform for %s', level)
X2 = data[var + '_next'][:, lats, :][:, :, longs]
X2 = X2.reshape(-1, X2.shape[1] * X2.shape[2])
level = var + '_next_' + area_name
self.logger.info('Begin PCA training for %s', level)
X2_compressed = self.pca_transform(X2, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'flux_' + area_name if var == 'Flux' else 'wind_' + area_name
var_sort = 'fl_' + area_name if var == 'Flux' else 'ws_' + area_name
col = ['p_' + var_name + '.' + str(i) for i in range(3)]
col += ['n_' + var_name + '.' + str(i) for i in range(3)]
col += [var_name + '.' + str(i) for i in range(9)]
X = np.hstack((X0_compressed, X2_compressed, X1_compressed))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif var in {'WD', 'Cloud'}:
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.pca_transform(X1, 9, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'cloud_' + area_name if var == 'Cloud' else 'direction_' + area_name
var_sort = 'cl_' + area_name if var == 'Cloud' else 'wd_' + area_name
col = [var_name + '.' + str(i) for i in range(9)]
X = X1_compressed
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.pca_transform(X1, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'Temp_' + area_name if var == 'Temperature' else 'wind_' + area_name
var_sort = 'tp_' + area_name if var == 'Temperature' else 'ws_' + area_name
col = [var_name + '.' + str(i) for i in range(3)]
X = X1_compressed
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
else:
continue
for var in self.variables:
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
col = []
col_p = []
col_n = []
for area_name, area in areas.items():
var_name = 'flux_' + area_name if var == 'Flux' else 'wind_' + area_name
col += [var_name + '.' + str(i) for i in range(9)]
col_p += ['p_' + var_name + '.' + str(i) for i in range(3)]
col_n += ['n_' + var_name + '.' + str(i) for i in range(3)]
var_name = 'flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
var_name = 'p_flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col_p].mean(axis=1)
var_name = 'n_flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col_n].mean(axis=1)
elif var in {'WD', 'Cloud'}:
col = []
for area_name, area in areas.items():
var_name = 'cloud_' + area_name if var == 'Cloud' else 'direction_' + area_name
col += [var_name + '.' + str(i) for i in range(9)]
var_name = 'cloud' if var == 'Cloud' else 'direction'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
col = []
for area_name, area in areas.items():
var_name = 'Temp_' + area_name if var == 'Temperature' else 'wind_' + area_name
col += [var_name + '.' + str(i) for i in range(3)]
var_name = 'Temp' if var == 'Temperature' else 'wind'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
ind = joblib.load(os.path.join(self.path_data, 'dataset_columns_order.pickle'))
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
self.logger.info('Successfully dataset created for training for %s', self.project_name)
return dataset_X
```
#### File: version2/feature_selection_manager/fs_manager.py
```python
import os
import pickle
import joblib
class FeatSelManager(object):
def __init__(self, cluster):
self.istrained = False
self.static_data = cluster.static_data
self.cluster_name = cluster.cluster_name
self.method = cluster.static_data['sklearn']['fs_method']
self.njobs = cluster.static_data['njobs_feat_sel']
self.inner_jobs = cluster.static_data['inner_jobs_feat_sel']
self.data_dir = cluster.data_dir
self.cluster_dir = cluster.cluster_dir
if self.method == 'boruta':
self.model_dir = os.path.join(cluster.cluster_dir, 'FS/boruta')
else:
self.model_dir = os.path.join(cluster.cluster_dir, 'FS/PERM')
try:
self.load()
except:
pass
if not os.path.exists(self.model_dir):
os.makedirs(self.model_dir)
def load_data(self):
data_path = self.data_dir
if os.path.exists(os.path.join(data_path, 'cvs_full.pickle')):
cvs = joblib.load(os.path.join(data_path, 'cvs_full.pickle'))
else:
raise ImportError('Cannot find data for cluster %s of %s', self.cluster_name, self.static_data['_id'])
return cvs
def save_data(self, cvs):
data_path = self.data_dir
for i in range(3):
if self.pca is None:
cvs[i][0] = cvs[i][0][:, self.features]
cvs[i][2] = cvs[i][2][:, self.features]
cvs[i][4] = cvs[i][4][:, self.features]
else:
cvs[i][0] = self.pca.transform(cvs[i][0][:, self.features])
cvs[i][2] = self.pca.transform(cvs[i][2][:, self.features])
cvs[i][4] = self.pca.transform(cvs[i][4][:, self.features])
joblib.dump(cvs, os.path.join(data_path, 'cvs.pickle'))
def fit(self):
cvs = self.load_data()
if self.method == 'boruta':
from Fuzzy_clustering.version2.feature_selection_manager.feature_selection_boruta import FS
fs = FS(self.static_data, self.cluster_dir, self.njobs, path_group=self.static_data['path_group'])
elif self.method == 'linearsearch':
from Fuzzy_clustering.version2.feature_selection_manager.feature_selection_linearsearch import FS
fs = FS(self.static_data, self.cluster_dir, self.njobs, self.inner_jobs,
path_group=self.static_data['path_group'])
else:
from Fuzzy_clustering.version2.feature_selection_manager.feature_selection_permutation import FS
fs = FS(self.static_data, self.cluster_dir, self.njobs, self.inner_jobs,
path_group=self.static_data['path_group'])
self.features, self.pca = fs.fit(cvs)
self.save_data(cvs)
self.istrained = True
self.save()
return 'Done'
def fit_TL(self):
cvs = self.load_data()
static_data_tl = self.static_data['tl_project']['static_data']
cluster_dir_tl = os.path.join(static_data_tl['path_model'], 'Regressor_layer/' + self.cluster_name)
if self.method == 'boruta':
from Fuzzy_clustering.version2.feature_selection_manager.feature_selection_boruta import FS
fs_trained = FS(static_data_tl, cluster_dir_tl, self.njobs, path_group=self.static_data['path_group'])
elif self.method == 'linearsearch':
from Fuzzy_clustering.version2.feature_selection_manager.feature_selection_linearsearch import FS
fs_trained = FS(static_data_tl, cluster_dir_tl, self.njobs, self.inner_jobs,
path_group=self.static_data['path_group'])
else:
from Fuzzy_clustering.version2.feature_selection_manager.feature_selection_permutation import FS
fs_trained = FS(static_data_tl, cluster_dir_tl, self.njobs, self.inner_jobs,
path_group=self.static_data['path_group'])
self.features = fs_trained.features
self.pca = fs_trained.pca
self.save_data(cvs)
self.istrained = True
self.save()
return 'Done'
def transform(self, X):
if self.pca is None:
return X[:, self.features]
else:
return self.pca.transform(X[:, self.features])
def to_dict(self):
dict = {}
for k in self.__dict__.keys():
if k not in ['logger', 'data_dir', 'cluster_dir', 'model_dir']:
dict[k] = self.__dict__[k]
return dict
def load(self):
if os.path.exists(os.path.join(self.model_dir, 'model_fs.pickle')):
try:
f = open(os.path.join(self.model_dir, 'model_fs.pickle'), 'rb')
tmp_dict = pickle.load(f)
f.close()
self.__dict__.update(tmp_dict)
self.pca = joblib.load(os.path.join(self.model_dir, 'pca.pickle'))
except:
raise ValueError('Cannot find model for %s', self.model_dir)
else:
raise ValueError('Cannot find model for %s', self.model_dir)
def save(self):
joblib.dump(self.pca, os.path.join(self.model_dir, 'pca.pickle'))
f = open(os.path.join(self.model_dir, 'model_fs.pickle'), 'wb')
dict = {}
for k in self.__dict__.keys():
if k not in ['logger', 'data_dir', 'cluster_dir', 'model_dir', 'pca']:
dict[k] = self.__dict__[k]
pickle.dump(dict, f)
f.close()
```
#### File: version2/project_manager/create_tasks_tl.py
```python
import numpy as np
class TaskCreator_TL():
def __init__(self, static_data):
self.static_data = static_data
def create_tasks_TL_stage_for_sklearn(self, projects, sklearn_methods):
# Train in parallel SKLEARN models
tasks_sk_ols = []
for method in sklearn_methods:
for project in projects:
for cluster_name, cluster in project.clusters.items():
if cluster.istrained == False:
task = {'project': project.static_data['_id'], 'static_data': project.static_data,
'cluster': cluster, 'method': method,
'optimize_method': project.static_data['sklearn']['optimizer']}
tasks_sk_ols.append(task)
return tasks_sk_ols
def create_tasks_TL_rbfcnn_stage1(self, projects, njobs_cnn):
# Train in parallel RBF-CNN
task_rbfcnn_stage1 = dict()
gpu = 0
task_count = 0
task_rbfcnn_stage1['task' + str(task_count)] = dict()
for n in range(self.static_data['ngpus']):
task_rbfcnn_stage1['task' + str(task_count)]['/device:GPU:' + str(n)] = []
def task_check(task_rbfcnn_stage1, task_count, njobs, ngpus):
flag = 0
for n in range(ngpus):
if len(task_rbfcnn_stage1['task' + str(task_count)]['/device:GPU:' + str(n)]) >= njobs:
flag += 1
if flag == ngpus:
task_count += 1
task_rbfcnn_stage1['task' + str(task_count)] = dict()
for n in range(project.static_data['ngpus']):
task_rbfcnn_stage1['task' + str(task_count)]['/device:GPU:' + str(n)] = []
return task_rbfcnn_stage1, task_count
for project in projects:
for cluster_name, cluster in project.clusters.items():
if cluster.istrained == False:
if ('RBF_ALL_CNN' in cluster.methods):
task = {'method': 'RBF-CNN', 'project': project.static_data['_id'], 'cluster': cluster,
'static_data': project.static_data,
'params': {'test': 1, 'gpu': gpu}}
task_rbfcnn_stage1['task' + str(task_count)]['/device:GPU:' + str(gpu)].append(task)
task_rbfcnn_stage1, task_count = task_check(task_rbfcnn_stage1, task_count, njobs_cnn,
project.static_data['ngpus'])
gpu += 1
if gpu == project.static_data['ngpus']:
gpu = 0
return task_rbfcnn_stage1
def create_tasks_TL_stage_for_rbfs(self, projects, njobs_rbf):
# Train in parallel deep_models and Feature Selection
tasks_rbf_ols = []
for project in projects:
for cluster_name, cluster in project.clusters.items():
if cluster.istrained == False:
task = {'project': project.static_data['_id'], 'static_data': project.static_data,
'cluster': cluster}
tasks_rbf_ols.append(task)
task_rbf_stage1 = dict()
gpu = 0
task_count = 0
task_rbf_stage1['task' + str(task_count)] = dict()
for n in range(self.static_data['ngpus']):
task_rbf_stage1['task' + str(task_count)]['/device:GPU:' + str(n)] = []
def task_check(task_rbf_stage1, task_count, njobs, ngpus):
flag = 0
for n in range(ngpus):
if len(task_rbf_stage1['task' + str(task_count)]['/device:GPU:' + str(n)]) >= njobs:
flag += 1
if flag == ngpus:
task_count += 1
task_rbf_stage1['task' + str(task_count)] = dict()
for n in range(project.static_data['ngpus']):
task_rbf_stage1['task' + str(task_count)]['/device:GPU:' + str(n)] = []
return task_rbf_stage1, task_count
for project in projects:
for cluster_name, cluster in project.clusters.items():
if cluster.istrained == False:
if ('RBF_ALL_CNN' in cluster.methods) or ('RBF_ALL' in cluster.methods):
task = {'method': 'RBFNN', 'project': project.static_data['_id'], 'cluster': cluster,
'static_data': project.static_data,
'params': {'test': 1, 'gpu': gpu}}
task_rbf_stage1['task' + str(task_count)]['/device:GPU:' + str(gpu)].append(task)
task_rbf_stage1, task_count = task_check(task_rbf_stage1, task_count, njobs_rbf,
project.static_data['ngpus'])
gpu += 1
if gpu == project.static_data['ngpus']:
gpu = 0
return tasks_rbf_ols, task_rbf_stage1
def create_tasks_TL_3d_fs(self, projects, njobs_cnn, njobs_lstm):
# Train in parallel deep_models and Feature Selection
tasks_fs = []
for project in projects:
for cluster_name, cluster in project.clusters.items():
if cluster.istrained == False:
task = {'project': project.static_data['_id'], 'cluster': cluster}
tasks_fs.append(task)
task_3d_stage1 = dict()
gpu = 0
task_count = 0
task_3d_stage1['task' + str(task_count)] = dict()
for n in range(self.static_data['ngpus']):
task_3d_stage1['task' + str(task_count)]['/device:GPU:' + str(n)] = []
def task_check(task_3d_stage1, task_count, njobs, ngpus):
flag = 0
for n in range(ngpus):
if len(task_3d_stage1['task' + str(task_count)]['/device:GPU:' + str(n)]) >= njobs:
flag += 1
if flag == ngpus:
task_count += 1
task_3d_stage1['task' + str(task_count)] = dict()
for n in range(project.static_data['ngpus']):
task_3d_stage1['task' + str(task_count)]['/device:GPU:' + str(n)] = []
return task_3d_stage1, task_count
for project in projects:
for cluster_name, cluster in project.clusters.items():
if cluster.istrained == False:
if 'LSTM' in cluster.methods:
min_units = np.maximum(cluster.D, 64)
lr = project.static_data['LSTM']['learning_rate']
task = {'method': 'LSTM', 'project': project.static_data['_id'], 'cluster': cluster,
'static_data': project.static_data,
'params': {'test': 1, 'gpu': gpu}}
task_3d_stage1['task' + str(task_count)]['/device:GPU:' + str(gpu)].append(task)
task_3d_stage1, task_count = task_check(task_3d_stage1, task_count, njobs_lstm,
project.static_data['ngpus'])
gpu += 1
if gpu == project.static_data['ngpus']:
gpu = 0
gpu = 0
for project in projects:
for cluster_name, cluster in project.clusters.items():
if cluster.istrained == False:
if 'CNN' in cluster.methods:
task = {'method': 'CNN', 'project': project.static_data['_id'], 'cluster': cluster,
'static_data': project.static_data,
'params': {'test': 1, 'gpu': gpu}}
task_3d_stage1['task' + str(task_count)]['/device:GPU:' + str(gpu)].append(task)
task_3d_stage1, task_count = task_check(task_3d_stage1, task_count, njobs_cnn,
project.static_data['ngpus'])
gpu += 1
if gpu == project.static_data['ngpus']:
gpu = 0
return tasks_fs, task_3d_stage1
```
#### File: version2/project_manager/project_eval_manager.py
```python
import joblib
import os
import sys
import numpy as np
import pandas as pd
from Fuzzy_clustering.version2.model_manager.models_predict_manager import ModelPredictManager
from Fuzzy_clustering.version2.project_manager.projects_data_manager import ProjectsDataManager
class ProjectsEvalManager:
def __init__(self, static_data):
self.static_data = static_data
self.nwp_model = static_data['NWP_model']
self.nwp_resolution = static_data['NWP_resolution']
self.project_owner = static_data['project_owner']
self.projects_group = static_data['projects_group']
self.area_group = static_data['area_group']
self.version_group = static_data['version_group']
self.version_model = static_data['version_model']
self.data_variables = static_data['data_variables']
self.methods = [method for method in static_data['project_methods'].keys() if
static_data['project_methods'][method] == True]
self.path_group = self.static_data['path_group']
self.group_static_data = joblib.load(os.path.join(self.path_group, 'static_data_projects.pickle'))
self.model_type = self.static_data['type']
self.sys_folder = self.static_data['sys_folder']
self.path_nwp = self.static_data['path_nwp']
self.path_nwp_group = self.static_data['path_nwp_group']
def evaluate(self):
projects = self.collect_projects()
for project in projects:
project.evaluate_all()
def eval_short_term(self, horizon=4, best_method = 'average'):
projects = self.collect_projects()
project_data_manager = ProjectsDataManager(self.static_data, is_test = None)
if hasattr(project_data_manager, 'data_eval'):
nwp_response = project_data_manager.nwp_extractor()
data_eval = project_data_manager.data_eval
if self.static_data['ts_resolution'] == '15min':
window = np.arange(0, 60 * horizon + 0.2, 15)
else:
window = np.arange(0, 60 * horizon + 0.2, 60)
for hor in window:
for project in projects:
if hor == 0:
predictions = pd.DataFrame(data_eval.values, index=data_eval.index, columns=[hor])
observations = pd.DataFrame(data_eval.values, index=data_eval.index, columns=[hor])
joblib.dump(predictions,
os.path.join(project.static_data['path_data'], 'predictions_short_term.pickle'))
joblib.dump(observations,
os.path.join(project.static_data['path_data'], 'observations_short_term.pickle'))
else:
pred, y = project.evaluate_short_term(best_method)
pred.index = pred.index - pd.DateOffset(minutes=hor)
y.index = y.index - pd.DateOffset(minutes=hor)
predictions[hor] = np.nan
predictions[hor].loc[pred.index] = pred.values.ravel()
observations[hor] = np.nan
observations[hor].loc[y.index] = y.values.ravel()
joblib.dump(predictions,
os.path.join(project.static_data['path_data'], 'predictions_short_term.pickle'))
joblib.dump(observations,
os.path.join(project.static_data['path_data'], 'observations_short_term.pickle'))
result = pd.DataFrame(index=[project.static_data['_id']],
columns=window)
if project.static_data['rated'] is None:
rated = observations
else:
rated = project.static_data['rated']
err = np.abs(predictions - observations) / rated
mae = np.mean(err, axis=0)
print(mae)
_ = project_data_manager.create_short_term_datasets(data_eval)
for project in projects:
predictions = joblib.load(os.path.join(project.static_data['path_data'], 'predictions_short_term.pickle'))
observations = joblib.load(os.path.join(project.static_data['path_data'], 'observations_short_term.pickle'))
result = pd.DataFrame(index=[project.static_data['_id']],
columns=window)
if project.static_data['rated'] is None:
rated = observations
else:
rated = project.static_data['rated']
err = np.abs(predictions - observations) / rated
mae = np.mean(err, axis=0)
result.loc[project.static_data['_id']] = mae
result.to_csv(os.path.join(project.static_data['path_data'], 'result_short_term.csv'))
def collect_projects(self):
projects = []
for project in self.group_static_data:
if project['_id'] != project['static_data']['projects_group'] + '_' + project['static_data']['type']:
project_model = ModelPredictManager(project['static_data'])
if project_model.istrained == True:
projects.append(project_model)
else:
raise ValueError('Project is not trained ', project['_id'])
return projects
```
#### File: version2/project_manager/project_group_init.py
```python
import os
import joblib
import numpy as np
import pandas as pd
from Fuzzy_clustering.version2.common_utils.logging import create_logger
def compute_area_grid(lat, long, resolution, round_coord, levels):
lat_range = np.arange(np.around(lat, round_coord) - 20, np.around(lat, round_coord) + 20,
resolution)
lat1 = lat_range[np.abs(lat_range - lat).argmin()] - resolution / 10
lat2 = lat_range[np.abs(lat_range - lat).argmin()] + resolution / 10
long_range = np.arange(np.around(long, round_coord) - 20, np.around(long, round_coord) + 20,
resolution)
long1 = long_range[np.abs(long_range - long).argmin()] - resolution / 10
long2 = long_range[np.abs(long_range - long).argmin()] + resolution / 10
return [[lat1 - resolution * levels, long1 - resolution * levels],
[lat2 + resolution * levels, long2 + resolution * levels]]
class ProjectGroupInit:
"""
Class responsible for managing and loading the
power output or load.
"""
def __init__(self, static_data):
"""
Parameters
----------
static_data: python dict
contains all the information required to load the power measurement for specific project(s).
"""
self.static_data = static_data # dict containing information about project paths, model structure and training
# params, input file, see in util_database_timos.py and config_timos.py
self.file_data = static_data['data_file_name'] # input .csv file PROBLEM_TYPE + '_ts.csv' i.e. wind_ts.csv
self.project_owner = static_data[
'project_owner'] # Name of project owner or research program i.e. my_projects or CROSSBOW
self.projects_group = static_data['projects_group'] # Name of the country
self.area_group = static_data['area_group'] # coordinates of the country
self.version_group = static_data['version_group']
self.version_model = static_data['version_model']
self.weather_in_data = static_data[
'weather_in_data'] # True if input file contains more columns than the power output column
self.nwp_model = static_data['NWP_model']
self.nwp_resolution = static_data['NWP_resolution']
self.data_variables = static_data['data_variables'] # Variable names used
self.projects = [] # list containing all the parks, we're interested in. Each park is considered as a project.
self.use_rated = True
self.model_type = self.static_data['type']
self.sys_folder = self.static_data['sys_folder']
self.path_nwp = self.static_data['path_nwp']
self.path_group = self.static_data['path_group']
self.path_nwp_group = self.static_data['path_nwp_group']
self.group_static_data = []
self.logger = create_logger(logger_name=f'ProjectInitManager_{self.model_type}', abs_path=self.path_group,
logger_path=f'log_{self.projects_group}.log', write_type='a')
def initialize(self):
if os.path.exists(os.path.join(os.path.dirname(self.file_data), 'coord_auto_' + self.model_type + '.csv')):
self.file_coord = os.path.join(os.path.dirname(self.file_data), 'coord_auto_' + self.model_type + '.csv')
else:
self.file_coord = os.path.join(os.path.dirname(self.file_data), 'coord_' + self.model_type + '.csv')
if not os.path.exists(self.file_coord) and not self.weather_in_data:
raise IOError('File with coordinates does not exist')
self.file_rated = os.path.join(os.path.dirname(self.file_data), 'rated_' + self.model_type + '.csv')
if not os.path.exists(self.file_rated):
if self.model_type in {'wind', 'pv'} and self.projects_group not in {'IPTO'}:
raise ValueError('Provide rated_power for each project. The type of projects is %s', self.model_type)
self.use_rated = False
else:
self.use_rated = True
self.load_power_of_parks() # Loads power output, coordinates and rated power.
if len(self.projects) == 0:
raise ImportError('No project loaded. check the input file in configuration')
if self.check_project_names():
for project_name in self.projects:
path_project = self.path_group + '/' + project_name
if not os.path.exists(path_project):
os.makedirs(path_project)
path_model = path_project + '/model_ver' + str(self.version_model)
if not os.path.exists(path_model):
os.makedirs(path_model)
path_backup = self.path_group + '/backup_models/' + project_name + '/model_ver' + str(
self.version_model)
if not os.path.exists(path_backup):
os.makedirs(path_backup)
path_data = path_model + '/DATA'
if not os.path.exists(path_data):
os.makedirs(path_data)
path_fuzzy_models = path_model + '/fuzzy_models'
if not os.path.exists(path_fuzzy_models):
os.makedirs(path_fuzzy_models)
if self.use_rated:
if project_name == self.projects_group + '_' + self.model_type and project_name not in self.rated.index.to_list():
rated = self.rated.sum().to_list()[0]
else:
rated = self.rated.loc[project_name].to_list()[0]
else:
rated = None
if hasattr(self, 'coord'):
if project_name == 'APE_net' or self.model_type == 'load' or project_name == self.projects_group + '_' + self.model_type:
coord = dict()
for name, lat_long in self.coord.iterrows():
coord[name] = lat_long.values.tolist()
else:
coord = self.coord.loc[project_name].to_list() # [lat, long]
else:
coord = None
area = self.create_area(coord)
temp = {'_id': project_name,
'owner': self.project_owner,
'project_group': self.projects_group,
'type': self.model_type,
'location': coord,
'areas': area,
'rated': rated,
'path_project': path_project,
'path_model': path_model,
'path_group': self.path_group,
'version_group': self.version_group,
'version_model': self.version_model,
'path_backup': path_backup,
'path_data': path_data,
'pathnwp': self.path_nwp_group,
'path_fuzzy_models': path_fuzzy_models,
'run_on_platform': False,
}
static_data = dict()
for key, value in self.static_data.items():
static_data[key] = value
for key, value in temp.items():
static_data[key] = value
self.group_static_data.append({'_id': project_name, 'static_data': static_data})
joblib.dump(static_data, os.path.join(path_model, 'static_data.pickle'))
with open(os.path.join(path_model, 'static_data.txt'), 'w') as file:
for k, v in static_data.items():
if not isinstance(v, dict):
file.write(str(k) + ' >>> ' + str(v) + '\n\n')
else:
file.write(str(k) + ' >>> ' + '\n')
for kk, vv in v.items():
file.write('\t' + str(kk) + ' >>> ' + str(vv) + '\n')
joblib.dump(self.group_static_data, os.path.join(self.path_group, 'static_data_projects.pickle'))
self.logger.info('Static data of all projects created')
def check_project_names(self):
flag = True
if self.model_type in {'wind', 'pv'}:
for name in self.projects:
if name not in self.coord.index.to_list() and name != self.projects_group + '_' + self.model_type and name != 'APE_net':
flag = False
self.logger.info('There is inconsistency to files data and coord for the project %s', name)
if not flag:
raise ValueError('Inconsistency in project names between data and coord')
if self.use_rated:
for name in self.projects:
if name not in self.rated.index.to_list() and name != self.projects_group + '_' + self.model_type:
flag = False
self.logger.info('There is inconsistency to files data and rated for the project %s', name)
if not flag:
raise ValueError('Inconsistency in project names between data and rated')
return flag
def load_power_of_parks(self):
try:
# Data containing power output or load. Each column refers to a different wind, pv park.
self.data = pd.read_csv(self.file_data, header=0, index_col=0, parse_dates=True, dayfirst=True)
except Exception:
self.logger.info(f'Cannot import timeseries from the file {self.file_data}')
raise IOError(f'Cannot import timeseries from the file {self.file_data}')
self.logger.info('Timeseries imported successfully from the file %s', self.file_data)
if 'total' in self.data.columns: # In some cases, the total output of all parks is included.
self.data = self.data.rename(
columns={'total': self.projects_group + '_' + self.model_type}) # e.g group = 'Greece'
if self.static_data['Evaluation_start']:
valid_combination = True
time_offset = pd.DateOffset(hours=0)
if self.model_type == 'fa':
time_offset = pd.DateOffset(days=372)
elif self.model_type == 'load':
if self.data.columns[0] == 'lv_load':
time_offset = pd.DateOffset(days=9001)
else:
if self.static_data['horizon'] == 'short-term':
time_offset = pd.DateOffset(hours = 350)
else:
if self.static_data['ts_resolution'] == 'hourly':
time_offset = pd.DateOffset(hours = 9001)
elif self.static_data['ts_resolution'] == '15min':
time_offset = pd.DateOffset(minutes = 60 * 9001)
if valid_combination:
try:
eval_date = pd.to_datetime(self.static_data['Evaluation_start'], format='%d%m%Y %H:%M')
self.data_eval = self.data.iloc[np.where(self.data.index > eval_date - time_offset)]
self.data = self.data.iloc[np.where(self.data.index <= eval_date)]
except Exception:
raise ValueError('Wrong date format, use %d%m%Y %H:%M. Or the date does not exist in the dataset')
if self.model_type == 'load':
self.projects.append(self.data.columns[0])
elif self.model_type == 'fa':
if self.version_model == 0:
self.projects.append('fa_curr_morning')
elif self.version_model == 1:
self.projects.append('fa_ahead_morning')
else:
raise ValueError(
'Version model should be 0 for current day and 1 for day ahead otherwise choose another group version')
else:
for name in self.data.columns:
var = f'{self.projects_group}_{self.model_type}' if name == 'total' else name
self.projects.append(var)
if not self.weather_in_data:
try:
# For each of the park, load its coordinates. (lat,long) single tuple
self.coord = pd.read_csv(self.file_coord, header=None, index_col=0)
except Exception:
self.logger.info('Cannot import coordinates from the file %s', self.file_coord)
raise IOError('Cannot import coordinates from the file %s', self.file_coord)
self.logger.info('Coordinates imported successfully from the file %s', self.file_coord)
else:
self.logger.info('Coordinates in the data')
if self.use_rated:
try:
# For each park, read its rated (maximum) power.
self.rated = pd.read_csv(self.file_rated, header=None, index_col=0)
except Exception:
self.logger.info('Cannot import Rated Power from the file %s', self.file_rated)
raise IOError('Cannot import Rated Power from the file %s', self.file_rated)
self.logger.info('Rated Power imported successfully from the file %s', self.file_rated)
self.logger.info('Data loaded successfully')
def create_area(self, coord):
levels = 4 if self.nwp_resolution == 0.05 else 2
round_coord = 1 if self.nwp_resolution == 0.05 else 0
if coord is None:
area = dict()
elif isinstance(coord, list):
if len(coord) == 2:
lat, long = coord[0], coord[1]
area = compute_area_grid(lat, long, self.nwp_resolution, round_coord, levels)
elif len(coord) == 4:
area = list(np.array(coord).reshape(2, 2))
else:
raise ValueError(
'Wrong coordinates. Should be point (lat, long) or area [lat1, long1, lat2, long2]')
elif isinstance(coord, dict):
area = dict()
for key, value in coord.items():
if len(value) == 2:
lat, long = value[0], value[1]
area[key] = compute_area_grid(lat, long, self.nwp_resolution, round_coord, levels)
else:
area[key] = np.array(value).reshape(2, 2)
else:
raise ValueError('Wrong coordinates. Should be dict or list')
self.logger.info('Areas created successfully')
return area
```
#### File: version2/rbf_ols_manager/rbf_ols_manager.py
```python
import os
import joblib
from Fuzzy_clustering.version2.rbf_ols_manager.rbf_ols import rbf_ols_module
class RbfOlsManager(object):
def __init__(self, static_data, cluster):
self.static_data = static_data
self.istrained = False
self.njobs = cluster.static_data['sklearn']['njobs']
self.models = dict()
self.cluster_name = cluster.cluster_name
self.data_dir = cluster.data_dir
self.cluster_dir = cluster.cluster_dir
self.model_dir = os.path.join(self.cluster_dir, 'RBF_OLS')
if not os.path.exists(self.model_dir):
os.makedirs(self.model_dir)
try:
self.load()
except:
pass
def load_data(self):
data_path = self.data_dir
cvs = joblib.load(os.path.join(data_path, 'cvs.pickle'))
return cvs
def fit(self):
if self.istrained == False:
cvs = self.load_data()
model_rbf_ols = rbf_ols_module(self.static_data, self.model_dir, self.static_data['rated'],
self.static_data['sklearn']['njobs'], GA=False
, path_group=self.static_data['path_group'])
model_rbf_ga = rbf_ols_module(self.static_data, self.model_dir, self.static_data['rated'],
self.static_data['sklearn']['njobs'], GA=True
, path_group=self.static_data['path_group'])
if model_rbf_ols.istrained == False:
max_samples = 1500
print('Train RBFOLS ', self.cluster_name)
self.models['RBF_OLS'] = model_rbf_ols.optimize_rbf(cvs, max_samples=max_samples)
else:
self.models['RBF_OLS'] = model_rbf_ols.to_dict()
if model_rbf_ga.istrained == False:
max_samples = 1500
print('Train GA-RBF ', self.cluster_name)
self.models['GA_RBF_OLS'] = model_rbf_ga.optimize_rbf(cvs, max_samples=max_samples)
else:
self.models['GA_RBF_OLS'] = model_rbf_ga.to_dict()
self.istrained = True
self.save()
return 'Done'
def fit_TL(self):
if self.istrained == False:
static_data_tl = self.static_data['tl_project']['static_data']
cluster_dir_tl = os.path.join(static_data_tl['path_model'], 'Regressor_layer/' + self.cluster_name)
model_rbf_ols_TL = rbf_ols_module(static_data_tl, cluster_dir_tl, static_data_tl['rated'],
self.static_data['sklearn']['njobs'], GA=False)
model_rbf_ga_TL = rbf_ols_module(static_data_tl, cluster_dir_tl, static_data_tl['rated'],
self.static_data['sklearn']['njobs'], GA=True)
cvs = self.load_data()
model_rbf_ols = rbf_ols_module(self.static_data, self.model_dir, self.static_data['rated'],
self.static_data['sklearn']['njobs'], GA=False)
model_rbf_ga = rbf_ols_module(self.static_data, self.model_dir, self.static_data['rated'],
self.static_data['sklearn']['njobs'], GA=True)
if model_rbf_ols.istrained == False:
self.models['RBF_OLS'] = model_rbf_ols.optimize_rbf_TL(cvs, model_rbf_ols_TL.models)
else:
self.models['RBF_OLS'] = model_rbf_ols.to_dict()
if model_rbf_ga.istrained == False:
self.models['GA_RBF_OLS'] = model_rbf_ga.optimize_rbf_TL(cvs, model_rbf_ga_TL.models)
else:
self.models['GA_RBF_OLS'] = model_rbf_ga.to_dict()
self.istrained = True
self.save()
return 'Done'
def load(self):
if os.path.exists(os.path.join(self.model_dir, 'RBFolsManager.pickle')):
try:
tmp_dict = joblib.load(os.path.join(self.model_dir, 'RBFolsManager.pickle'))
self.__dict__.update(tmp_dict)
except:
raise ImportError('Cannot open CNN model')
else:
raise ImportError('Cannot find CNN model')
def save(self):
tmp_dict = {}
for k in self.__dict__.keys():
if k not in ['logger', 'static_data', 'model_dir', 'cluster_dir', 'data_dir']:
tmp_dict[k] = self.__dict__[k]
joblib.dump(tmp_dict, os.path.join(self.model_dir, 'RBFolsManager.pickle'))
```
#### File: version2/sklearn_models/sklearn_predict.py
```python
import os
import joblib
import numpy as np
import pickle
class sklearn_model_predict(object):
def __init__(self, cluster_dir, rated, model_type, njobs):
self.njobs = njobs
self.rated = rated
self.cluster = os.path.basename(cluster_dir)
self.model_dir = os.path.join(cluster_dir, str.upper(model_type))
self.istrained = False
if not os.path.exists(self.model_dir):
os.makedirs(self.model_dir)
self.model_type = model_type
try:
self.load(self.model_dir)
except:
pass
def compute_metrics(self, pred, y, rated):
if rated is None:
rated = y.ravel()
else:
rated = 1
err = np.abs(pred.ravel() - y.ravel()) / rated
sse = np.sum(np.square(pred.ravel() - y.ravel()))
rms = np.sqrt(np.mean(np.square(err)))
mae = np.mean(err)
mse = sse / y.shape[0]
return [sse, rms, mae, mse]
def predict(self, X):
self.load(self.model_dir)
if self.istrained:
pred = self.model.predict(X).reshape(-1, 1)
else:
raise ModuleNotFoundError("Error on prediction of %s cluster. The model %s seems not properly trained",
self.cluster, self.model_type)
return pred
def load(self, model_dir):
self.model = joblib.load(os.path.join(model_dir, 'model.pkl'))
if os.path.exists(os.path.join(model_dir, 'model_all' + '.pickle')):
try:
f = open(os.path.join(model_dir, 'model_all' + '.pickle'), 'rb')
tmp_dict = pickle.load(f)
f.close()
del tmp_dict['model_dir']
self.__dict__.update(tmp_dict)
except:
raise ImportError('Cannot open model_all model')
else:
raise ImportError('Cannot find model_all model')
```
#### File: version3/ClusterCombineManager/Adasyn_var2d.py
```python
import numpy as np
from scipy import sparse
from sklearn.utils import check_random_state
from sklearn.utils import _safe_indexing
from imblearn.over_sampling.base import BaseOverSampler
from imblearn.utils import check_neighbors_object
from imblearn.utils import check_sampling_strategy
class ADASYN():
def __init__(
self,
sampling_strategy="auto",
variables=None,
variables_3d=None,
random_state=None,
n_neighbors=5,
n_jobs=None,
):
self.sampling_strategy=sampling_strategy
self.random_state = random_state
self.n_neighbors = n_neighbors
self.n_jobs = n_jobs
self.variables = variables
self.variables_3d = variables_3d
def _validate_estimator(self):
"""Create the necessary objects for ADASYN"""
self.nn_ = check_neighbors_object(
"n_neighbors", self.n_neighbors, additional_neighbor=1
)
self.nn_.set_params(**{"n_jobs": self.n_jobs})
def fit_resample(self, X, X_3d, y, y_org):
self.sampling_strategy_ = check_sampling_strategy(
self.sampling_strategy, y, 'over-sampling'
)
self._validate_estimator()
random_state = check_random_state(self.random_state)
X_resampled = [X.copy()]
X_3d_resampled = [X_3d.copy()]
y_resampled = [y.copy()]
y_org_resampled = [y_org.copy()]
for class_sample, n_samples in self.sampling_strategy_.items():
if n_samples == 0:
continue
target_class_indices = np.flatnonzero(y == class_sample)
X_class = _safe_indexing(X, target_class_indices)
X_class_3d = _safe_indexing(X_3d, target_class_indices)
y_class_org = _safe_indexing(y_org, target_class_indices)
# self.nn_.set_params(**{"n_neighbors": self.n_neighbors})
self.nn_.fit(X[:, self.variables])
nns = self.nn_.kneighbors(X_class[:, self.variables], return_distance=False)[:, 1:]
# The ratio is computed using a one-vs-rest manner. Using majority
# in multi-class would lead to slightly different results at the
# cost of introducing a new parameter.
n_neighbors = self.nn_.n_neighbors - 1
ratio_nn = np.sum(y[nns] != class_sample, axis=1) / n_neighbors
if not np.sum(ratio_nn):
raise RuntimeError(
"Not any neigbours belong to the majority"
" class. This case will induce a NaN case"
" with a division by zero. ADASYN is not"
" suited for this specific dataset."
" Use SMOTE instead."
)
ratio_nn /= np.sum(ratio_nn)
n_samples_generate = np.rint(ratio_nn * n_samples).astype(int)
# rounding may cause new amount for n_samples
n_samples = np.sum(n_samples_generate)
if not n_samples:
raise ValueError(
"No samples will be generated with the"
" provided ratio settings."
)
# the nearest neighbors need to be fitted only on the current class
# to find the class NN to generate new samples
# self.nn_.set_params(**{"n_neighbors": np.minimum(int(X_class.shape[0]-1), self.n_neighbors)})
self.nn_.fit(X_class[:, self.variables])
nns = self.nn_.kneighbors(X_class[:, self.variables], return_distance=False)[:, 1:]
enumerated_class_indices = np.arange(len(target_class_indices))
rows = np.repeat(enumerated_class_indices, n_samples_generate)
cols = random_state.choice(n_neighbors, size=n_samples)
diffs = X_class[nns[rows, cols]][:, self.variables] - X_class[rows][:, self.variables]
diffs_3d = X_class_3d[nns[rows, cols]][:, self.variables_3d, :] - X_class_3d[rows][:, self.variables_3d, :]
steps = random_state.uniform( size=(n_samples, 1))
X_new = X_class[rows]
X_new_3d = X_class_3d[rows]
y_new_org = y_class_org[rows]
if sparse.issparse(X):
sparse_func = type(X).__name__
steps = getattr(sparse, sparse_func)(steps)
X_new[:, self.variables] = X_class[rows][:, self.variables] + steps.multiply(diffs)
X_new_3d[:, self.variables_3d, :] = X_class_3d[rows][:, self.variables_3d, :] + steps[:, :,
np.newaxis].multiply(diffs)
else:
X_new[:, self.variables] = X_class[rows][:, self.variables] + steps * diffs
X_new_3d[:, self.variables_3d, :] = X_class_3d[rows][:, self.variables_3d, :] + steps[:, :,
np.newaxis] * diffs_3d
X_new = X_new.astype(X.dtype)
X_new_3d = X_new_3d.astype(X.dtype)
y_new = np.full(n_samples, fill_value=class_sample, dtype=y.dtype)
X_resampled.append(X_new)
X_3d_resampled.append(X_new_3d)
y_resampled.append(y_new)
y_org_resampled.append(y_new_org)
if sparse.issparse(X):
X_resampled = sparse.vstack(X_resampled, format=X.format)
X_3d_resampled = sparse.vstack(X_3d_resampled, format=X.format)
else:
X_resampled = np.vstack(X_resampled)
X_3d_resampled = np.vstack(X_3d_resampled)
y_resampled = np.hstack(y_resampled)
y_org_resampled = np.hstack(y_org_resampled)
return X_resampled, X_3d_resampled, y_org_resampled
```
#### File: version3/common_utils/logging.py
```python
import logging
import os
def create_logger(logger_name, abs_path, logger_path, write_type='w'):
assert write_type in ('w', 'a')
logger = logging.getLogger(logger_name)
logger.setLevel(logging.INFO)
handler = logging.FileHandler(os.path.join(abs_path, logger_path), write_type)
handler.setLevel(logging.INFO)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
return logger
```
#### File: version3/DatasetManager/create_datasets_PCA.py
```python
import numpy as np
import pandas as pd
import joblib, os, logging
from joblib import Parallel, delayed
from scipy.interpolate import interp2d
from sklearn.decomposition import KernelPCA
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import GridSearchCV
from pytz import timezone
def my_scorer(estimator, X, y=None):
X_reduced = estimator.transform(X)
X_preimage = estimator.inverse_transform(X_reduced)
return -1 * mean_squared_error(X, X_preimage)
def rescale(arr, nrows, ncol):
W, H = arr.shape
new_W, new_H = (nrows, ncol)
xrange = lambda x: np.linspace(0, 1, x)
f = interp2d(xrange(H), xrange(W), arr, kind="linear")
new_arr = f(xrange(new_H), xrange(new_W))
return new_arr
def rescale_mean(arr):
arr_new = np.zeros([int(np.ceil(arr.shape[0]/2)), int(np.ceil(arr.shape[1]/2))])
for i in range(0, arr.shape[0], 2):
for j in range(0, arr.shape[1], 2):
arr_new[int((i+1)/2),int((j+1)/2)] = np.mean(arr[i:i+2, j:j+2])
return arr_new
def stack_2d(X, sample, compress):
if compress:
sample = rescale_mean(sample)
if len(sample.shape) == 3:
if X.shape[0] == 0:
X = sample
elif len(X.shape) == 3:
X = np.stack((X, sample))
else:
X = np.vstack((X, sample[np.newaxis, :, :, :]))
elif len(sample.shape) == 2:
if X.shape[0] == 0:
X = sample
elif len(X.shape) == 2:
X = np.stack((X, sample))
else:
X = np.vstack((X, sample[np.newaxis, :, :]))
elif len(sample.shape) == 4:
if X.shape[0] == 0:
X = sample
elif len(X.shape) == 4:
X = np.stack((X, sample))
else:
X = np.vstack((X, sample[np.newaxis, :, :, :, :]))
return X
def stack_3d(X, sample):
if X.shape[0] == 0:
X = sample
elif len(sample.shape)!=len(X.shape):
X = np.vstack((X, sample[np.newaxis]))
else:
X = np.vstack((X, sample))
return X
def check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
flag = True
for var in variables:
if nwp[var].shape[0]==0 and nwp_next[var].shape[0]==0 and nwp_prev[var].shape[0]==0:
flag = False
break
return flag
def stack_daily_nwps(t, pdates, path_nwp_project, nwp_model, areas, variables, compress, model_type):
X = np.array([])
X_3d = np.array([])
data_var = dict()
for var in variables:
if ((var == 'WS') and (model_type=='wind')) or ((var == 'Flux') and (model_type=='pv')):
data_var[var + '_prev'] = X
data_var[var] = X
data_var[var + '_next'] = X
else:
data_var[var] = X
data_var['dates'] = X
fname = os.path.join(path_nwp_project, nwp_model + '_' + t.strftime('%d%m%y') + '.pickle')
if os.path.exists(fname):
nwps = joblib.load(fname)
for date in pdates:
try:
nwp = nwps[date]
if len(nwp['lat'].shape) == 1:
nwp['lat'] = nwp['lat'][:, np.newaxis]
if len(nwp['long'].shape) == 1:
nwp['long'] = nwp['long'][np.newaxis, :]
lats = (np.where((nwp['lat'][:,0]>=areas[0][0]) & (nwp['lat'][:,0]<=areas[1][0])))[0]
longs = (np.where((nwp['long'][0,:]>=areas[0][1]) & (nwp['long'][0,:]<=areas[1][1])))[0]
break
except:
continue
try:
for date in pdates:
nwp = nwps[date]
date = pd.to_datetime(date, format='%d%m%y%H%M')
nwp_prev = nwps[(date - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
data_var['dates'] = np.hstack((data_var['dates'], date))
x_2d = np.array([])
for var in sorted(variables):
if ((var == 'WS') and (model_type=='wind')) or ((var == 'Flux') and (model_type=='pv')):
data_var[var + '_prev'] = stack_2d(data_var[var + '_prev'], nwp_prev[var][np.ix_(lats, longs)], compress)
data_var[var] = stack_2d(data_var[var], nwp[var][np.ix_(lats, longs)], compress)
data_var[var + '_next'] = stack_2d(data_var[var + '_next'], nwp_next[var][np.ix_(lats, longs)], compress)
x_2d = stack_2d(x_2d, nwp_prev[var][np.ix_(lats, longs)], compress)
x_2d = stack_2d(x_2d, nwp[var][np.ix_(lats, longs)], compress)
x_2d = stack_2d(x_2d, nwp_next[var][np.ix_(lats, longs)], compress)
else:
data_var[var] = stack_2d(data_var[var], nwp[var][np.ix_(lats, longs)], compress)
x_2d = stack_2d(x_2d, nwp[var][np.ix_(lats, longs)], compress)
X_3d = stack_2d(X_3d, x_2d, False)
except:
pass
print(t.strftime('%d%m%y%H%M'), ' extracted')
return (data_var, X_3d, t.strftime('%d%m%y%H%M'))
class dataset_creator_PCA():
def __init__(self, project, data=None, njobs=1, test=False, dates=None):
self.data = data
self.isfortest = test
self.project_name= project['_id']
self.static_data = project['static_data']
self.path_nwp_project = self.static_data['pathnwp']
self.path_data = self.static_data['path_data']
self.areas = self.static_data['areas']
self.area_group = self.static_data['area_group']
self.nwp_model = self.static_data['NWP_model']
self.nwp_resolution = self.static_data['NWP_resolution']
self.location = self.static_data['location']
if self.nwp_resolution == 0.05:
self.compress = True
else:
self.compress = False
self.njobs = njobs
self.variables = self.static_data['data_variables']
self.create_logger()
if not self.data is None:
self.check_dates()
elif not dates is None:
self.dates = dates
def create_logger(self):
self.logger = logging.getLogger('log_' + self.static_data['project_group'] + '.log')
self.logger.setLevel(logging.INFO)
handler = logging.FileHandler(os.path.join(os.path.dirname(self.path_nwp_project), 'log_' + self.static_data['project_group'] + '.log'), 'a')
handler.setLevel(logging.INFO)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
self.logger.addHandler(handler)
def check_dates(self):
start_date = pd.to_datetime(self.data.index[0].strftime('%d%m%y'), format='%d%m%y')
end_date = pd.to_datetime(self.data.index[-1].strftime('%d%m%y'), format='%d%m%y')
dates = pd.date_range(start_date, end_date)
data_dates = pd.to_datetime(np.unique(self.data.index.strftime('%d%m%y')), format='%d%m%y')
dates = [d for d in dates if d in data_dates]
self.logger.info('Dates is checked. Number of time samples %s', str(len(dates)))
self.dates = pd.DatetimeIndex(dates)
def get_3d_dataset(self):
dates_stack = []
for t in self.dates:
pdates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=47), freq='H')
dates = [dt.strftime('%d%m%y%H%M') for dt in pdates if dt in self.data.index]
dates_stack.append(dates)
if not isinstance(self.areas, dict):
nwp = stack_daily_nwps(self.dates[0], dates_stack[0], self.path_nwp_project, self.nwp_model, self.areas, self.variables, self.compress, self.static_data['type'])
nwp_daily = Parallel(n_jobs=self.njobs)(delayed(stack_daily_nwps)(self.dates[i], pdates, self.path_nwp_project, self.nwp_model,
self.areas, self.variables, self.compress, self.static_data['type'])
for i, pdates in enumerate(dates_stack))
else:
nwp = stack_daily_nwps(self.dates[0], dates_stack[0], self.path_nwp_project, self.nwp_model, self.area_group,
self.variables, self.compress, self.static_data['type'])
nwp_daily = Parallel(n_jobs=self.njobs)(
delayed(stack_daily_nwps)(self.dates[i], pdates, self.path_nwp_project, self.nwp_model,
self.area_group, self.variables, self.compress, self.static_data['type'])
for i, pdates in enumerate(dates_stack))
X = np.array([])
data_var = dict()
for var in self.variables:
if ((var == 'WS') and (self.static_data['type']=='wind')) or ((var == 'Flux') and (self.static_data['type']=='pv')):
data_var[var+'_prev'] = X
data_var[var] = X
data_var[var+'_next'] = X
else:
data_var[var] = X
data_var['dates'] = X
X_3d = np.array([])
for arrays in nwp_daily:
nwp = arrays[0]
x_2d = arrays[1]
if x_2d.shape[0]!=0:
for var in nwp.keys():
if var != 'dates':
data_var[var] = stack_3d(data_var[var], nwp[var])
else:
data_var[var] = np.hstack((data_var[var], nwp[var]))
X_3d = stack_3d(X_3d, x_2d)
self.logger.info('NWP data stacked for date %s', arrays[2])
if self.isfortest:
joblib.dump(data_var, os.path.join(self.path_data, 'nwps_3d_test.pickle'))
joblib.dump(X_3d, os.path.join(self.path_data, 'dataset_cnn_test.pickle'))
else:
joblib.dump(data_var, os.path.join(self.path_data, 'nwps_3d.pickle'))
joblib.dump(X_3d, os.path.join(self.path_data, 'dataset_cnn.pickle'))
self.logger.info('NWP stacked data saved')
return data_var, X_3d
def create_sample(self):
pass
def train_PCA(self, data, components, level):
scaler = MinMaxScaler()
data_scaled = scaler.fit_transform(data)
param_grid = [{
"gamma": np.logspace(-3, 0, 20),
}]
kpca = KernelPCA(n_components=components, fit_inverse_transform=True, n_jobs=self.njobs)
grid_search = GridSearchCV(kpca, param_grid, cv=3, scoring=my_scorer, n_jobs=self.njobs)
grid_search.fit(data_scaled)
kpca = grid_search.best_estimator_
fname = os.path.join(self.path_data, 'kpca_' + level + '.pickle')
joblib.dump({'scaler':scaler, 'kpca':kpca}, fname)
def PCA_transform(self, data, components, level):
fname = os.path.join(self.path_data, 'kpca_' + level + '.pickle')
if not os.path.exists(fname):
self.train_PCA(data, components, level)
models = joblib.load(fname)
data_scaled = models['scaler'].transform(data)
data_compress = models['kpca'].transform(data_scaled)
return data_compress
def make_dataset_res(self):
if self.isfortest:
if not os.path.exists(os.path.join(self.path_data, 'nwps_3d_test.pickle')) or not os.path.exists(os.path.join(self.path_data, 'dataset_cnn_test.pickle')):
data, X_3d = self.get_3d_dataset()
else:
data = joblib.load(os.path.join(self.path_data, 'nwps_3d_test.pickle'))
X_3d = joblib.load(os.path.join(self.path_data, 'dataset_cnn_test.pickle'))
else:
if not os.path.exists(os.path.join(self.path_data, 'nwps_3d.pickle')) or not os.path.exists(os.path.join(self.path_data, 'dataset_cnn.pickle')):
data, X_3d = self.get_3d_dataset()
else:
data = joblib.load(os.path.join(self.path_data, 'nwps_3d.pickle'))
X_3d = joblib.load(os.path.join(self.path_data, 'dataset_cnn.pickle'))
data_path = self.path_data
if not isinstance(self.areas, dict):
self.dataset_for_single_farm(data, data_path)
else:
dates_stack = []
for t in self.dates:
pdates = pd.date_range(t + pd.DateOffset(hours=25), t + pd.DateOffset(hours=48), freq='H')
dates = [dt.strftime('%d%m%y%H%M') for dt in pdates if dt in self.data.index]
dates_stack.append(dates)
flag = False
for i, pdates in enumerate(dates_stack):
t= self.dates[i]
fname = os.path.join(self.path_nwp_project, self.nwp_model + '_' + t.strftime('%d%m%y') + '.pickle')
if os.path.exists(fname):
nwps = joblib.load(fname)
for date in pdates:
try:
nwp = nwps[date]
if len(nwp['lat'].shape) == 1:
nwp['lat'] = nwp['lat'][:, np.newaxis]
if len(nwp['long'].shape) == 1:
nwp['long'] = nwp['long'][np.newaxis, :]
lats = (np.where((nwp['lat'][:, 0] >= self.area_group[0][0]) & (nwp['lat'][:, 0] <= self.area_group[1][0])))[0]
longs = (np.where((nwp['long'][0, :] >= self.area_group[0][1]) & (nwp['long'][0, :] <= self.area_group[1][1])))[0]
lats_group = nwp['lat'][lats]
longs_group = nwp['long'][:, longs]
flag = True
break
except:
continue
if flag:
break
self.dataset_for_multiple_farms(data, self.areas, lats_group, longs_group)
def dataset_for_single_farm(self, data, data_path):
dataset_X = pd.DataFrame()
if self.static_data['type'] == 'pv':
hours = [dt.hour for dt in data['dates']]
months = [dt.month for dt in data['dates']]
dataset_X = pd.concat(
[dataset_X, pd.DataFrame(np.stack([hours, months]).T, index=data['dates'], columns=['hour', 'month'])])
for var in self.variables:
if ((var == 'WS') and (self.static_data['type']=='wind')) or ((var == 'Flux') and (self.static_data['type']=='pv')):
X0 = np.transpose(data[var + '_prev'],[0, 2, 1])
X0_level0 = X0[:, 2, 2]
X1 = np.transpose(data[var],[0, 2, 1])
X1_level1 = X1[:, 2, 2]
ind = [[1, j] for j in range(1, 4)] + [[i, 1] for i in range(2, 4)]
ind = np.array(ind)
X1_level3d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_down'
self.logger.info('Begin PCA training for %s', level)
X1_level3d = self.PCA_transform(X1_level3d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[2, 3], [3, 2], [3, 3]]
ind = np.array(ind)
X1_level3u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_up'
self.logger.info('Begin PCA training for %s', level)
X1_level3u = self.PCA_transform(X1_level3u, 2, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[0, j] for j in range(5)] + [[i, 0] for i in range(1, 5)]
ind = np.array(ind)
X1_level4d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_down'
self.logger.info('Begin PCA training for %s', level)
X1_level4d = self.PCA_transform(X1_level4d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[4, j] for j in range(1, 5)] + [[i, 4] for i in range(1, 4)]
ind = np.array(ind)
X1_level4u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_up'
self.logger.info('Begin PCA training for %s', level)
X1_level4u = self.PCA_transform(X1_level4u, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
X2 = np.transpose(data[var + '_next'],[0, 2, 1])
X2_level0 = X2[:, 2, 2]
var_name = 'flux' if var=='Flux' else 'wind'
var_sort = 'fl' if var=='Flux' else 'ws'
col = ['p_' + var_name] + ['n_' + var_name] + [var_name]
col = col + [var_sort + '_l1.' + str(i) for i in range(3)] + [var_sort + '_l2.' + str(i) for i in
range(2)]
col = col + [var_sort + '_l3d.' + str(i) for i in range(3)] + [var_sort + '_l3u.' + str(i) for i in
range(3)]
X = np.hstack((X0_level0.reshape(-1, 1), X2_level0.reshape(-1, 1), X1_level1.reshape(-1, 1), X1_level3d, X1_level3u, X1_level4d
, X1_level4u))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif var in {'WD', 'Cloud'}:
X1 = np.transpose(data[var],[0, 2, 1])
X1_level1 = X1[:, 2, 2]
ind = [[1, j] for j in range(1, 4)] + [[i, 1] for i in range(2, 4)]
ind = np.array(ind)
X1_level3d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1,1) for indices in ind])
level = var + '_curr_mid_down'
self.logger.info('Begin PCA training for %s', level)
X1_level3d = self.PCA_transform(X1_level3d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[2, 3], [3, 2], [3, 3]]
ind = np.array(ind)
X1_level3u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1,1) for indices in ind])
level = var + '_curr_mid_up'
self.logger.info('Begin PCA training for %s', level)
X1_level3u = self.PCA_transform(X1_level3u, 2, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[0, j] for j in range(5)] + [[i, 0] for i in range(1, 5)]
ind = np.array(ind)
X1_level4d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1,1) for indices in ind])
level = var + '_curr_out_down'
self.logger.info('Begin PCA training for %s', level)
X1_level4d = self.PCA_transform(X1_level4d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[4, j] for j in range(1, 5)] + [[i, 4] for i in range(1, 4)]
ind = np.array(ind)
X1_level4u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1,1) for indices in ind])
level = var + '_curr_out_up'
self.logger.info('Begin PCA training for %s', level)
X1_level4u = self.PCA_transform(X1_level4u, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'cloud' if var=='Cloud' else 'direction'
var_sort = 'cl' if var=='Cloud' else 'wd'
col = [var_name] + [var_sort + '_l1.' + str(i) for i in range(3)] + [var_sort + '_l2.' + str(i) for i in
range(2)]
col = col + [var_sort + '_l3d.' + str(i) for i in range(3)] + [var_sort + '_l3u.' + str(i) for i in
range(3)]
X = np.hstack((X1_level1.reshape(-1, 1), X1_level3d, X1_level3u, X1_level4d
, X1_level4u))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type']=='pv')):
X2 = np.transpose(data[var],[0, 2, 1])
X2_level0 = X2[:, 2, 2]
var_name = 'Temp' if var == 'Temperature' else 'wind'
var_sort = 'tp' if var == 'Temperature' else 'ws'
col = [var_name]
X = X2_level0
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
else:
continue
dataset_X = dataset_X
dataset_y = self.data.loc[dataset_X.index].to_frame()
dataset_y.columns = ['target']
if self.isfortest:
ind = joblib.load(os.path.join(data_path, 'dataset_columns_order.pickle'))
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
dataset_X.to_csv(os.path.join(self.path_data, 'dataset_X_test.csv'))
dataset_y.to_csv(os.path.join(self.path_data, 'dataset_y_test.csv'))
self.logger.info('Successfully dataset created for Evaluation for %s', self.project_name)
else:
corr = []
for f in range(dataset_X.shape[1]):
corr.append(np.abs(np.corrcoef(dataset_X.values[:, f], dataset_y.values.ravel())[1, 0]))
ind = np.argsort(np.array(corr))[::-1]
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
joblib.dump(ind, os.path.join(data_path, 'dataset_columns_order.pickle'))
dataset_X.to_csv(os.path.join(self.path_data, 'dataset_X.csv'))
dataset_y.to_csv(os.path.join(self.path_data, 'dataset_y.csv'))
self.logger.info('Successfully dataset created for training for %s', self.project_name)
def dataset_for_multiple_farms(self, data, areas, lats_group, longs_group):
dataset_X = pd.DataFrame()
if self.static_data['type'] == 'pv':
hours = [dt.hour for dt in data['dates']]
months = [dt.month for dt in data['dates']]
dataset_X = pd.concat([dataset_X, pd.DataFrame(np.stack([hours, months]).T, index=data['dates'], columns=['hour','month'])])
for var in self.variables:
for area_name, area in areas.items():
if len(area)>1:
lats = (np.where((lats_group[:, 0] >= area[0, 0]) & (lats_group[:, 0] <= area[1, 0])))[0]
longs = (np.where((longs_group[0, :] >= area[0, 1]) & (longs_group[0, :] <= area[1, 1])))[0]
else:
lats = (np.where((lats_group[:, 0] >= area[0]) & (lats_group[:, 0] <= area[2])))[0]
longs = (np.where((longs_group[0, :] >= area[1]) & (longs_group[0, :] <= area[3])))[0]
if ((var == 'WS') and (self.static_data['type']=='wind')) or ((var == 'Flux') and (self.static_data['type']=='pv')):
X0 = data[var + '_prev'][:, lats, :][:, :, longs]
X0 = X0.reshape(-1, X0.shape[1] * X0.shape[2])
level = var + '_prev_' + area_name
self.logger.info('Begin PCA training for %s', level)
X0_compressed = self.PCA_transform(X0, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.PCA_transform(X1, 9, level)
self.logger.info('Successfully PCA transform for %s', level)
X2 = data[var + '_next'][:, lats, :][:, :, longs]
X2 = X2.reshape(-1, X2.shape[1] * X2.shape[2])
level = var + '_next_' + area_name
self.logger.info('Begin PCA training for %s', level)
X2_compressed = self.PCA_transform(X2, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'flux_' + area_name if var=='Flux' else 'wind_' + area_name
var_sort = 'fl_' + area_name if var=='Flux' else 'ws_' + area_name
col = ['p_' + var_name + '.' + str(i) for i in range(3)]
col += ['n_' + var_name + '.' + str(i) for i in range(3)]
col += [var_name + '.' + str(i) for i in range(9)]
X = np.hstack((X0_compressed, X2_compressed, X1_compressed))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif var in {'WD', 'Cloud'}:
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.PCA_transform(X1, 9, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'cloud_' + area_name if var=='Cloud' else 'direction_' + area_name
var_sort = 'cl_' + area_name if var=='Cloud' else 'wd_' + area_name
col = [var_name + '.' + str(i) for i in range(9)]
X = X1_compressed
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type']=='pv')):
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.PCA_transform(X1, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'Temp_' + area_name if var == 'Temperature' else 'wind_' + area_name
var_sort = 'tp_' + area_name if var == 'Temperature' else 'ws_' + area_name
col = [var_name + '.' + str(i) for i in range(3)]
X = X1_compressed
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
else:
continue
for var in self.variables:
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
col = []
col_p = []
col_n = []
for area_name, area in areas.items():
var_name = 'flux_' + area_name if var == 'Flux' else 'wind_' + area_name
col += [var_name + '.' + str(i) for i in range(9)]
col_p += ['p_' + var_name + '.' + str(i) for i in range(3)]
col_n += ['n_' + var_name + '.' + str(i) for i in range(3)]
var_name = 'flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
var_name = 'p_flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col_p].mean(axis=1)
var_name = 'n_flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col_n].mean(axis=1)
elif var in {'WD', 'Cloud'}:
col = []
for area_name, area in areas.items():
var_name = 'cloud_' + area_name if var == 'Cloud' else 'direction_' + area_name
col += [var_name + '.' + str(i) for i in range(9)]
var_name = 'cloud' if var == 'Cloud' else 'direction'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
col = []
for area_name, area in areas.items():
var_name = 'Temp_' + area_name if var == 'Temperature' else 'wind_' + area_name
col += [var_name + '.' + str(i) for i in range(3)]
var_name = 'Temp' if var == 'Temperature' else 'wind'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
dataset_y = self.data.loc[dataset_X.index].to_frame()
dataset_y.columns = ['target']
if self.isfortest:
ind = joblib.load(os.path.join(self.path_data, 'dataset_columns_order.pickle'))
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
dataset_X.to_csv(os.path.join(self.path_data, 'dataset_X_test.csv'))
dataset_y.to_csv(os.path.join(self.path_data, 'dataset_y_test.csv'))
self.logger.info('Successfully dataset created for Evaluation for %s', self.project_name)
else:
corr = []
for f in range(dataset_X.shape[1]):
corr.append(np.abs(np.corrcoef(dataset_X.values[:, f], dataset_y.values.ravel())[1, 0]))
ind = np.argsort(np.array(corr))[::-1]
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
joblib.dump(ind, os.path.join(self.path_data, 'dataset_columns_order.pickle'))
dataset_X.to_csv(os.path.join(self.path_data, 'dataset_X.csv'))
dataset_y.to_csv(os.path.join(self.path_data, 'dataset_y.csv'))
self.logger.info('Successfully dataset created for training for %s', self.project_name)
def make_dataset_res_offline(self, utc=False):
def datetime_exists_in_tz(dt, tz):
try:
dt.tz_localize(tz)
return True
except:
return False
data, X_3d = self.get_3d_dataset_offline(utc)
if not isinstance(self.areas, dict):
X = self.dataset_for_single_farm_offline(data)
else:
dates_stack = []
for t in self.dates:
pdates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=47), freq='H')
dates = [dt.strftime('%d%m%y%H%M') for dt in pdates]
dates_stack.append(dates)
flag = False
for i, pdates in enumerate(dates_stack):
t= self.dates[i]
fname = os.path.join(self.path_nwp_project, self.nwp_model + '_' + t.strftime('%d%m%y') + '.pickle')
if os.path.exists(fname):
nwps = joblib.load(fname)
for date in pdates:
try:
nwp = nwps[date]
if len(nwp['lat'].shape) == 1:
nwp['lat'] = nwp['lat'][:, np.newaxis]
if len(nwp['long'].shape) == 1:
nwp['long'] = nwp['long'][np.newaxis, :]
lats = (np.where((nwp['lat'][:, 0] >= self.area_group[0][0]) & (nwp['lat'][:, 0] <= self.area_group[1][0])))[0]
longs = (np.where((nwp['long'][0, :] >= self.area_group[0][1]) & (nwp['long'][0, :] <= self.area_group[1][1])))[0]
lats_group = nwp['lat'][lats]
longs_group = nwp['long'][:, longs]
flag = True
break
except:
continue
if flag:
break
X = self.dataset_for_multiple_farms_offline(data, self.areas, lats_group, longs_group)
return X, X_3d
def get_3d_dataset_offline(self, utc):
def datetime_exists_in_tz(dt, tz):
try:
dt.tz_localize(tz)
return True
except:
return False
dates_stack = []
for dt in self.dates:
if utc:
pdates = pd.date_range(dt + pd.DateOffset(hours=25), dt + pd.DateOffset(hours=48), freq='H')
dates = [t.strftime('%d%m%y%H%M') for t in pdates]
dates_stack.append(dates)
else:
pdates = pd.date_range(dt + pd.DateOffset(hours=25), dt + pd.DateOffset(hours=48), freq='H')
indices = [i for i, t in enumerate(pdates) if datetime_exists_in_tz(t, tz=timezone('Europe/Athens'))]
pdates = pdates[indices]
pdates = pdates.tz_localize(timezone('Europe/Athens'))
pdates = pdates.tz_convert(timezone('UTC'))
dates = [dt.strftime('%d%m%y%H%M') for dt in pdates]
dates_stack.append(dates)
if not isinstance(self.areas, dict):
nwp = stack_daily_nwps(self.dates[0], dates_stack[0], self.path_nwp_project, self.nwp_model, self.areas,
self.variables, self.compress, self.static_data['type'])
nwp_daily = Parallel(n_jobs=self.njobs)(
delayed(stack_daily_nwps)(self.dates[i], pdates, self.path_nwp_project, self.nwp_model,
self.areas, self.variables, self.compress, self.static_data['type'])
for i, pdates in enumerate(dates_stack))
else:
nwp = stack_daily_nwps(self.dates[0], dates_stack[0], self.path_nwp_project, self.nwp_model,
self.area_group,
self.variables, self.compress, self.static_data['type'])
nwp_daily = Parallel(n_jobs=self.njobs)(
delayed(stack_daily_nwps)(self.dates[i], pdates, self.path_nwp_project, self.nwp_model,
self.area_group, self.variables, self.compress, self.static_data['type'])
for i, pdates in enumerate(dates_stack))
X = np.array([])
data_var = dict()
for var in self.variables:
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
data_var[var + '_prev'] = X
data_var[var] = X
data_var[var + '_next'] = X
else:
data_var[var] = X
data_var['dates'] = X
X_3d = np.array([])
for arrays in nwp_daily:
nwp = arrays[0]
x_2d = arrays[1]
if x_2d.shape[0] != 0:
for var in nwp.keys():
if var != 'dates':
data_var[var] = stack_3d(data_var[var], nwp[var])
else:
data_var[var] = np.hstack((data_var[var], nwp[var]))
X_3d = stack_3d(X_3d, x_2d)
self.logger.info('NWP data stacked for date %s', arrays[2])
return data_var, X_3d
def dataset_for_single_farm_offline(self, data):
dataset_X = pd.DataFrame()
if self.static_data['type'] == 'pv':
hours = [dt.hour for dt in data['dates']]
months = [dt.month for dt in data['dates']]
dataset_X = pd.concat(
[dataset_X, pd.DataFrame(np.stack([hours, months]).T, index=data['dates'], columns=['hour', 'month'])])
for var in self.variables:
if ((var == 'WS') and (self.static_data['type']=='wind')) or ((var == 'Flux') and (self.static_data['type']=='pv')):
X0 = np.transpose(data[var + '_prev'],[0, 2, 1])
X0_level0 = X0[:, 2, 2]
X1 = np.transpose(data[var],[0, 2, 1])
X1_level1 = X1[:, 2, 2]
ind = [[1, j] for j in range(1, 4)] + [[i, 1] for i in range(2, 4)]
ind = np.array(ind)
X1_level3d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_down'
self.logger.info('Begin PCA training for %s', level)
X1_level3d = self.PCA_transform(X1_level3d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[2, 3], [3, 2], [3, 3]]
ind = np.array(ind)
X1_level3u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_up'
self.logger.info('Begin PCA training for %s', level)
X1_level3u = self.PCA_transform(X1_level3u, 2, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[0, j] for j in range(5)] + [[i, 0] for i in range(1, 5)]
ind = np.array(ind)
X1_level4d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_down'
self.logger.info('Begin PCA training for %s', level)
X1_level4d = self.PCA_transform(X1_level4d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[4, j] for j in range(1, 5)] + [[i, 4] for i in range(1, 4)]
ind = np.array(ind)
X1_level4u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_up'
self.logger.info('Begin PCA training for %s', level)
X1_level4u = self.PCA_transform(X1_level4u, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
X2 = np.transpose(data[var + '_next'],[0, 2, 1])
X2_level0 = X2[:, 2, 2]
var_name = 'flux' if var=='Flux' else 'wind'
var_sort = 'fl' if var=='Flux' else 'ws'
col = ['p_' + var_name] + ['n_' + var_name] + [var_name]
col = col + [var_sort + '_l1.' + str(i) for i in range(3)] + [var_sort + '_l2.' + str(i) for i in
range(2)]
col = col + [var_sort + '_l3d.' + str(i) for i in range(3)] + [var_sort + '_l3u.' + str(i) for i in
range(3)]
X = np.hstack((X0_level0.reshape(-1, 1), X2_level0.reshape(-1, 1), X1_level1.reshape(-1, 1), X1_level3d, X1_level3u, X1_level4d
, X1_level4u))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif var in {'WD', 'Cloud'}:
X1 = np.transpose(data[var],[0, 2, 1])
X1_level1 = X1[:, 2, 2]
ind = [[1, j] for j in range(1, 4)] + [[i, 1] for i in range(2, 4)]
ind = np.array(ind)
X1_level3d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1,1) for indices in ind])
level = var + '_curr_mid_down'
self.logger.info('Begin PCA training for %s', level)
X1_level3d = self.PCA_transform(X1_level3d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[2, 3], [3, 2], [3, 3]]
ind = np.array(ind)
X1_level3u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1,1) for indices in ind])
level = var + '_curr_mid_up'
self.logger.info('Begin PCA training for %s', level)
X1_level3u = self.PCA_transform(X1_level3u, 2, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[0, j] for j in range(5)] + [[i, 0] for i in range(1, 5)]
ind = np.array(ind)
X1_level4d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1,1) for indices in ind])
level = var + '_curr_out_down'
self.logger.info('Begin PCA training for %s', level)
X1_level4d = self.PCA_transform(X1_level4d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[4, j] for j in range(1, 5)] + [[i, 4] for i in range(1, 4)]
ind = np.array(ind)
X1_level4u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1,1) for indices in ind])
level = var + '_curr_out_up'
self.logger.info('Begin PCA training for %s', level)
X1_level4u = self.PCA_transform(X1_level4u, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'cloud' if var=='Cloud' else 'direction'
var_sort = 'cl' if var=='Cloud' else 'wd'
col = [var_name] + [var_sort + '_l1.' + str(i) for i in range(3)] + [var_sort + '_l2.' + str(i) for i in
range(2)]
col = col + [var_sort + '_l3d.' + str(i) for i in range(3)] + [var_sort + '_l3u.' + str(i) for i in
range(3)]
X = np.hstack((X1_level1.reshape(-1, 1), X1_level3d, X1_level3u, X1_level4d
, X1_level4u))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type']=='pv')):
X2 = np.transpose(data[var],[0, 2, 1])
X2_level0 = X2[:, 2, 2]
var_name = 'Temp' if var == 'Temperature' else 'wind'
var_sort = 'tp' if var == 'Temperature' else 'ws'
col = [var_name]
X = X2_level0
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
else:
continue
return dataset_X
def dataset_for_multiple_farms_offline(self, data, areas, lats_group, longs_group):
dataset_X = pd.DataFrame()
if self.static_data['type'] == 'pv':
hours = [dt.hour for dt in data['dates']]
months = [dt.month for dt in data['dates']]
dataset_X = pd.concat([dataset_X, pd.DataFrame(np.stack([hours, months]).T, index=data['dates'], columns=['hour','month'])])
for var in self.variables:
for area_name, area in areas.items():
if len(area) > 1:
lats = (np.where((lats_group[:, 0] >= area[0, 0]) & (lats_group[:, 0] <= area[1, 0])))[0]
longs = (np.where((longs_group[0, :] >= area[0, 1]) & (longs_group[0, :] <= area[1, 1])))[0]
else:
lats = (np.where((lats_group[:, 0] >= area[0]) & (lats_group[:, 0] <= area[2])))[0]
longs = (np.where((longs_group[0, :] >= area[1]) & (longs_group[0, :] <= area[3])))[0]
if ((var == 'WS') and (self.static_data['type']=='wind')) or ((var == 'Flux') and (self.static_data['type']=='pv')):
X0 = data[var + '_prev'][:, lats, :][:, :, longs]
X0 = X0.reshape(-1, X0.shape[1] * X0.shape[2])
level = var + '_prev_' + area_name
self.logger.info('Begin PCA training for %s', level)
X0_compressed = self.PCA_transform(X0, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.PCA_transform(X1, 9, level)
self.logger.info('Successfully PCA transform for %s', level)
X2 = data[var + '_next'][:, lats, :][:, :, longs]
X2 = X2.reshape(-1, X2.shape[1] * X2.shape[2])
level = var + '_next_' + area_name
self.logger.info('Begin PCA training for %s', level)
X2_compressed = self.PCA_transform(X2, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'flux_' + area_name if var=='Flux' else 'wind_' + area_name
var_sort = 'fl_' + area_name if var=='Flux' else 'ws_' + area_name
col = ['p_' + var_name + '.' + str(i) for i in range(3)]
col += ['n_' + var_name + '.' + str(i) for i in range(3)]
col += [var_name + '.' + str(i) for i in range(9)]
X = np.hstack((X0_compressed, X2_compressed, X1_compressed))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif var in {'WD', 'Cloud'}:
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.PCA_transform(X1, 9, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'cloud_' + area_name if var=='Cloud' else 'direction_' + area_name
var_sort = 'cl_' + area_name if var=='Cloud' else 'wd_' + area_name
col = [var_name + '.' + str(i) for i in range(9)]
X = X1_compressed
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type']=='pv')):
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.PCA_transform(X1, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'Temp_' + area_name if var == 'Temperature' else 'wind_' + area_name
var_sort = 'tp_' + area_name if var == 'Temperature' else 'ws_' + area_name
col = [var_name + '.' + str(i) for i in range(3)]
X = X1_compressed
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
else:
continue
for var in self.variables:
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
col = []
col_p = []
col_n = []
for area_name, area in areas.items():
var_name = 'flux_' + area_name if var == 'Flux' else 'wind_' + area_name
col += [var_name + '.' + str(i) for i in range(9)]
col_p += ['p_' + var_name + '.' + str(i) for i in range(3)]
col_n += ['n_' + var_name + '.' + str(i) for i in range(3)]
var_name = 'flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
var_name = 'p_flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col_p].mean(axis=1)
var_name = 'n_flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col_n].mean(axis=1)
elif var in {'WD', 'Cloud'}:
col = []
for area_name, area in areas.items():
var_name = 'cloud_' + area_name if var == 'Cloud' else 'direction_' + area_name
col += [var_name + '.' + str(i) for i in range(9)]
var_name = 'cloud' if var == 'Cloud' else 'direction'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
col = []
for area_name, area in areas.items():
var_name = 'Temp_' + area_name if var == 'Temperature' else 'wind_' + area_name
col += [var_name + '.' + str(i) for i in range(3)]
var_name = 'Temp' if var == 'Temperature' else 'wind'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
return dataset_X
def make_dataset_res_online(self, utc=False):
def datetime_exists_in_tz(dt, tz):
try:
dt.tz_localize(tz)
return True
except:
return False
data, X_3d = self.get_3d_dataset_online(utc)
if not isinstance(self.areas, dict):
X = self.dataset_for_single_farm_online(data)
else:
dates_stack = []
for t in self.dates:
pdates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=47), freq='H')
dates = [dt.strftime('%d%m%y%H%M') for dt in pdates]
dates_stack.append(dates)
flag = False
for i, pdates in enumerate(dates_stack):
t= self.dates[i]
fname = os.path.join(self.path_nwp_project, self.nwp_model + '_' + t.strftime('%d%m%y') + '.pickle')
if os.path.exists(fname):
nwps = joblib.load(fname)
for date in pdates:
try:
nwp = nwps[date]
if len(nwp['lat'].shape) == 1:
nwp['lat'] = nwp['lat'][:, np.newaxis]
if len(nwp['long'].shape) == 1:
nwp['long'] = nwp['long'][np.newaxis, :]
lats = (np.where((nwp['lat'][:, 0] >= self.area_group[0][0]) & (nwp['lat'][:, 0] <= self.area_group[1][0])))[0]
longs = (np.where((nwp['long'][0, :] >= self.area_group[0][1]) & (nwp['long'][0, :] <= self.area_group[1][1])))[0]
lats_group = nwp['lat'][lats]
longs_group = nwp['long'][:, longs]
flag = True
break
except:
continue
if flag:
break
X = self.dataset_for_multiple_farms_online(data, self.areas, lats_group, longs_group)
return X, X_3d
def get_3d_dataset_online(self, utc):
def datetime_exists_in_tz(dt, tz):
try:
dt.tz_localize(tz)
return True
except:
return False
dates_stack = []
if utc:
pdates = pd.date_range(self.dates + pd.DateOffset(hours=25), self.dates + pd.DateOffset(hours=48), freq='H')
dates = [dt.strftime('%d%m%y%H%M') for dt in pdates if dt in self.data.index]
dates_stack.append(dates)
else:
pdates = pd.date_range(self.dates + pd.DateOffset(hours=25), self.dates + pd.DateOffset(hours=48), freq='H')
indices = [i for i, t in enumerate(pdates) if datetime_exists_in_tz(t, tz=timezone('Europe/Athens'))]
pdates = pdates[indices]
pdates = pdates.tz_localize(timezone('Europe/Athens'))
pdates = pdates.tz_convert(timezone('UTC'))
dates = [dt.strftime('%d%m%y%H%M') for dt in pdates]
dates_stack.append(dates)
if not isinstance(self.areas, dict):
arrays = stack_daily_nwps(self.dates, dates_stack[0], self.path_nwp_project, self.nwp_model, self.areas, self.variables, self.compress, self.static_data['type'])
else:
arrays = stack_daily_nwps(self.dates, dates_stack[0], self.path_nwp_project, self.nwp_model, self.area_group,
self.variables, self.compress, self.static_data['type'])
X = np.array([])
data_var = dict()
for var in self.variables:
if ((var == 'WS') and (self.static_data['type']=='wind')) or ((var == 'Flux') and (self.static_data['type']=='pv')):
data_var[var+'_prev'] = X
data_var[var] = X
data_var[var+'_next'] = X
else:
data_var[var] = X
data_var['dates'] = X
X_3d = np.array([])
nwp = arrays[0]
x_2d = arrays[1]
if x_2d.shape[0]!=0:
for var in nwp.keys():
if var != 'dates':
data_var[var] = stack_3d(data_var[var], nwp[var])
else:
data_var[var] = np.hstack((data_var[var], nwp[var]))
X_3d = stack_3d(X_3d, x_2d)
self.logger.info('NWP data stacked for date %s', arrays[2])
return data_var, X_3d
def dataset_for_single_farm_online(self, data):
dataset_X = pd.DataFrame()
if self.static_data['type'] == 'pv':
hours = [dt.hour for dt in data['dates']]
months = [dt.month for dt in data['dates']]
dataset_X = pd.concat(
[dataset_X, pd.DataFrame(np.stack([hours, months]).T, index=data['dates'], columns=['hour', 'month'])])
for var in self.variables:
if ((var == 'WS') and (self.static_data['type']=='wind')) or ((var == 'Flux') and (self.static_data['type']=='pv')):
X0 = np.transpose(data[var + '_prev'],[0, 2, 1])
X0_level0 = X0[:, 2, 2]
X1 = np.transpose(data[var],[0, 2, 1])
X1_level1 = X1[:, 2, 2]
ind = [[1, j] for j in range(1, 4)] + [[i, 1] for i in range(2, 4)]
ind = np.array(ind)
X1_level3d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_down'
self.logger.info('Begin PCA training for %s', level)
X1_level3d = self.PCA_transform(X1_level3d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[2, 3], [3, 2], [3, 3]]
ind = np.array(ind)
X1_level3u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_up'
self.logger.info('Begin PCA training for %s', level)
X1_level3u = self.PCA_transform(X1_level3u, 2, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[0, j] for j in range(5)] + [[i, 0] for i in range(1, 5)]
ind = np.array(ind)
X1_level4d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_down'
self.logger.info('Begin PCA training for %s', level)
X1_level4d = self.PCA_transform(X1_level4d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[4, j] for j in range(1, 5)] + [[i, 4] for i in range(1, 4)]
ind = np.array(ind)
X1_level4u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_up'
self.logger.info('Begin PCA training for %s', level)
X1_level4u = self.PCA_transform(X1_level4u, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
X2 = np.transpose(data[var + '_next'],[0, 2, 1])
X2_level0 = X2[:, 2, 2]
var_name = 'flux' if var=='Flux' else 'wind'
var_sort = 'fl' if var=='Flux' else 'ws'
col = ['p_' + var_name] + ['n_' + var_name] + [var_name]
col = col + [var_sort + '_l1.' + str(i) for i in range(3)] + [var_sort + '_l2.' + str(i) for i in
range(2)]
col = col + [var_sort + '_l3d.' + str(i) for i in range(3)] + [var_sort + '_l3u.' + str(i) for i in
range(3)]
X = np.hstack((X0_level0.reshape(-1, 1), X2_level0.reshape(-1, 1), X1_level1.reshape(-1, 1), X1_level3d, X1_level3u, X1_level4d
, X1_level4u))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif var in {'WD', 'Cloud'}:
X1 = np.transpose(data[var],[0, 2, 1])
X1_level1 = X1[:, 2, 2]
ind = [[1, j] for j in range(1, 4)] + [[i, 1] for i in range(2, 4)]
ind = np.array(ind)
X1_level3d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1,1) for indices in ind])
level = var + '_curr_mid_down'
self.logger.info('Begin PCA training for %s', level)
X1_level3d = self.PCA_transform(X1_level3d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[2, 3], [3, 2], [3, 3]]
ind = np.array(ind)
X1_level3u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1,1) for indices in ind])
level = var + '_curr_mid_up'
self.logger.info('Begin PCA training for %s', level)
X1_level3u = self.PCA_transform(X1_level3u, 2, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[0, j] for j in range(5)] + [[i, 0] for i in range(1, 5)]
ind = np.array(ind)
X1_level4d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1,1) for indices in ind])
level = var + '_curr_out_down'
self.logger.info('Begin PCA training for %s', level)
X1_level4d = self.PCA_transform(X1_level4d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[4, j] for j in range(1, 5)] + [[i, 4] for i in range(1, 4)]
ind = np.array(ind)
X1_level4u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1,1) for indices in ind])
level = var + '_curr_out_up'
self.logger.info('Begin PCA training for %s', level)
X1_level4u = self.PCA_transform(X1_level4u, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'cloud' if var=='Cloud' else 'direction'
var_sort = 'cl' if var=='Cloud' else 'wd'
col = [var_name] + [var_sort + '_l1.' + str(i) for i in range(3)] + [var_sort + '_l2.' + str(i) for i in
range(2)]
col = col + [var_sort + '_l3d.' + str(i) for i in range(3)] + [var_sort + '_l3u.' + str(i) for i in
range(3)]
X = np.hstack((X1_level1.reshape(-1, 1), X1_level3d, X1_level3u, X1_level4d
, X1_level4u))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type']=='pv')):
X2 = np.transpose(data[var],[0, 2, 1])
X2_level0 = X2[:, 2, 2]
var_name = 'Temp' if var == 'Temperature' else 'wind'
var_sort = 'tp' if var == 'Temperature' else 'ws'
col = [var_name]
X = X2_level0
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
else:
continue
dataset_X = dataset_X
self.logger.info('Successfully dataset created for training for %s', self.project_name)
return dataset_X
def dataset_for_multiple_farms_online(self, data, areas, lats_group, longs_group):
dataset_X = pd.DataFrame()
if self.static_data['type'] == 'pv':
hours = [dt.hour for dt in data['dates']]
months = [dt.month for dt in data['dates']]
dataset_X = pd.concat([dataset_X, pd.DataFrame(np.stack([hours, months]).T, index=data['dates'], columns=['hour','month'])])
for var in self.variables:
for area_name, area in areas.items():
lats = (np.where((lats_group[:, 0] >= area[0]) & (lats_group[:, 0] <= area[2])))[0]
longs = (np.where((longs_group[0, :] >= area[1]) & (longs_group[0, :] <= area[3])))[0]
if ((var == 'WS') and (self.static_data['type']=='wind')) or ((var == 'Flux') and (self.static_data['type']=='pv')):
X0 = data[var + '_prev'][:, lats, :][:, :, longs]
X0 = X0.reshape(-1, X0.shape[1] * X0.shape[2])
level = var + '_prev_' + area_name
self.logger.info('Begin PCA training for %s', level)
X0_compressed = self.PCA_transform(X0, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.PCA_transform(X1, 9, level)
self.logger.info('Successfully PCA transform for %s', level)
X2 = data[var + '_next'][:, lats, :][:, :, longs]
X2 = X2.reshape(-1, X2.shape[1] * X2.shape[2])
level = var + '_next_' + area_name
self.logger.info('Begin PCA training for %s', level)
X2_compressed = self.PCA_transform(X2, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'flux_' + area_name if var=='Flux' else 'wind_' + area_name
var_sort = 'fl_' + area_name if var=='Flux' else 'ws_' + area_name
col = ['p_' + var_name + '.' + str(i) for i in range(3)]
col += ['n_' + var_name + '.' + str(i) for i in range(3)]
col += [var_name + '.' + str(i) for i in range(9)]
X = np.hstack((X0_compressed, X2_compressed, X1_compressed))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif var in {'WD', 'Cloud'}:
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.PCA_transform(X1, 9, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'cloud_' + area_name if var=='Cloud' else 'direction_' + area_name
var_sort = 'cl_' + area_name if var=='Cloud' else 'wd_' + area_name
col = [var_name + '.' + str(i) for i in range(9)]
X = X1_compressed
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type']=='pv')):
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.PCA_transform(X1, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'Temp_' + area_name if var == 'Temperature' else 'wind_' + area_name
var_sort = 'tp_' + area_name if var == 'Temperature' else 'ws_' + area_name
col = [var_name + '.' + str(i) for i in range(3)]
X = X1_compressed
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
else:
continue
for var in self.variables:
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
col = []
col_p = []
col_n = []
for area_name, area in areas.items():
var_name = 'flux_' + area_name if var == 'Flux' else 'wind_' + area_name
col += [var_name + '.' + str(i) for i in range(9)]
col_p += ['p_' + var_name + '.' + str(i) for i in range(3)]
col_n += ['n_' + var_name + '.' + str(i) for i in range(3)]
var_name = 'flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
var_name = 'p_flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col_p].mean(axis=1)
var_name = 'n_flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col_n].mean(axis=1)
elif var in {'WD', 'Cloud'}:
col = []
for area_name, area in areas.items():
var_name = 'cloud_' + area_name if var == 'Cloud' else 'direction_' + area_name
col += [var_name + '.' + str(i) for i in range(9)]
var_name = 'cloud' if var == 'Cloud' else 'direction'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
col = []
for area_name, area in areas.items():
var_name = 'Temp_' + area_name if var == 'Temperature' else 'wind_' + area_name
col += [var_name + '.' + str(i) for i in range(3)]
var_name = 'Temp' if var == 'Temperature' else 'wind'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
self.logger.info('Successfully dataset created for training for %s', self.project_name)
return dataset_X
```
#### File: version3/MLP_Manager/MLP_manager.py
```python
import joblib
from Fuzzy_clustering.version3.MLP_Manager.MLP_tf_core import MLP
import pika, uuid, time, json, os
import numpy as np
from rabbitmq_rpc.server import RPCServer
from Fuzzy_clustering.version3.MLP_Manager.Cluster_object import cluster_object
RABBIT_MQ_HOST = os.getenv('RABBIT_MQ_HOST')
RABBIT_MQ_PASS = os.<PASSWORD>('<PASSWORD>BIT_MQ_PASS')
RABBIT_MQ_PORT = int(os.getenv('RABBIT_MQ_PORT'))
server = RPCServer(queue_name='MLPmanager', host=RABBIT_MQ_HOST, port=RABBIT_MQ_PORT, threaded=False)
class mlp_manager():
def __init__(self, static_data, cluster, method, params):
self.params = params
self.test = params['test']
self.method = str.lower(method)
self.cluster = cluster
self.istrained = False
if self.method == 'mlp_3d':
self.model_dir = os.path.join(cluster.cluster_dir, 'MLP_3D')
if not os.path.exists(self.model_dir):
os.makedirs(self.model_dir)
self.test_dir = self.model_dir
try:
self.load()
except:
pass
if not self.istrained:
self.test_dir = os.path.join(self.model_dir, 'test_' + str(self.test))
try:
self.load()
except:
if not os.path.exists(self.test_dir):
os.makedirs(self.test_dir)
pass
self.static_data = static_data
self.cluster_name = cluster.cluster_name
self.rated = static_data['rated']
self.data_dir = cluster.data_dir
self.probabilistic = False
def fit(self):
if self.istrained == False:
return self.optimize_mlp()
else:
return self.acc
def fit_TL(self):
if self.istrained == False:
return self.optimize_mlp_TL()
else:
return self.acc
def load_data(self):
if os.path.exists(os.path.join(self.data_dir, 'dataset_X.csv')):
cvs = joblib.load(os.path.join(self.data_dir, 'cvs.pickle'))
else:
cvs = np.array([])
return cvs
def optimize_mlp(self):
self.trial = self.params['trial']
self.units = self.params['units']
self.act_func = self.params['act_func']
self.lr = self.params['lr']
mlp_max_iterations = self.static_data['LSTM']['max_iterations']
self.hold_prob = self.static_data['LSTM']['hold_prob']
cvs = self.load_data()
mlp = MLP(self.static_data, self.rated, cvs[0], cvs[1], cvs[2], cvs[3], cvs[4], cvs[5], trial=self.trial,
probabilistc=self.probabilistic)
# try:
self.acc, self.model = mlp.train(max_iterations=mlp_max_iterations,
learning_rate=self.lr, units=self.units,
hold_prob=self.hold_prob, act_func=self.act_func)
# except:
# acc_old_lstm=np.inf
# scale_lstm=None
# model_lstm=None
# pass
self.istrained=True
self.save()
return self.acc
def load(self):
if os.path.exists(os.path.join(self.test_dir, self.method + '.pickle')):
try:
tmp_dict = joblib.load(os.path.join(self.test_dir, self.method + '.pickle'))
self.__dict__.update(tmp_dict)
except:
raise ImportError('Cannot open CNN model')
else:
raise ImportError('Cannot find CNN model')
def save(self):
tmp_dict = {}
for k in self.__dict__.keys():
if k not in ['logger', 'static_data_all', 'static_data', 'temp_dir', 'cluster_cnn_dir', 'cluster_dir']:
tmp_dict[k] = self.__dict__[k]
joblib.dump(tmp_dict,os.path.join(self.test_dir, self.method + '.pickle'), compress=9)
def optimize_mlp_TL(self):
static_data_tl = self.static_data['tl_project']['static_data']
cluster_dir_tl = os.path.join(static_data_tl['path_model'], 'Regressor_layer/' + self.cluster_name)
model_TL_dir = os.path.join(cluster_dir_tl, 'LSTM_3d')
model_TL = joblib.load(os.path.join(model_TL_dir, self.method + '.pickle'))
self.trial = model_TL['trial']
self.units = model_TL['units']
self.act_func = model_TL['act_func']
self.lr = model_TL['lr']
mlp_max_iterations = self.static_data['LSTM']['max_iterations']
self.hold_prob = model_TL['hold_prob']
cvs = self.load_data()
mlp = MLP(self.static_data, self.rated, cvs[0], cvs[1], cvs[2], cvs[3], cvs[4], cvs[5], trial=self.trial,
probabilistc=self.probabilistic)
# try:
self.acc, self.model = mlp.train(max_iterations=mlp_max_iterations,
learning_rate=self.lr, units=self.units,
hold_prob=self.hold_prob)
# except:
# acc_old_lstm=np.inf
# scale_lstm=None
# model_lstm=None
# pass
self.istrained=True
self.save()
return self.acc
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, np.integer) or isinstance(obj, int):
return int(obj)
elif isinstance(obj, np.floating) or isinstance(obj, float):
return float(obj)
elif isinstance(obj, np.str) or isinstance(obj, str):
return str(obj)
elif isinstance(obj, np.bool) or isinstance(obj, bool):
return bool(obj)
try:
return json.JSONEncoder.default(self, obj)
except:
print(obj)
raise TypeError('Object is not JSON serializable')
@server.consumer()
def deep_manager(static_data):
print(" [.] Receive cluster %s)" % static_data['cluster_name'])
cluster = cluster_object(static_data, static_data['cluster_name'])
model_method = static_data['method']
params = static_data['params']
model_3d = mlp_manager(static_data, cluster, model_method, params=params)
if model_3d.istrained == False:
response = {'result': model_3d.fit(), 'cluster_name': cluster.cluster_name, 'project': static_data['_id'],
'test': params['test'], 'method': model_method}
else:
response = {'result': model_3d.acc, 'cluster_name': cluster.cluster_name, 'project': static_data['_id'],
'test': params['test'], 'method': model_method}
return response
if __name__=='__main__':
server.run()
```
#### File: project_manager/PredictModelManager/ClusterPredictManager.py
```python
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
from Fuzzy_clustering.version3.project_manager.PredictModelManager.Model_3d_object import model3d_object
from Fuzzy_clustering.version3.project_manager.PredictModelManager.RBF_CNN_model import RBF_CNN_model
from Fuzzy_clustering.version3.project_manager.PredictModelManager.SKlearn_object import SKLearn_object
from Fuzzy_clustering.version3.project_manager.PredictModelManager.FS_object import FeatSelobject
class MultiEvaluator():
def __init__(self, processes: int = 8):
self.processes = processes
def predict(self, i, x, model):
return i, model.predict(x)
def evaluate(self, X, model):
partitions = 3000
X_list=[]
for i in range(0, X.shape[0], partitions):
if (i+partitions+1)>X.shape[0]:
X_list.append(X[i:])
else:
X_list.append(X[i:i+partitions])
pred =Parallel(self.processes)(delayed(self.predict)(i, x, model) for i, x in enumerate(X_list))
indices = np.array([p[0] for p in pred])
predictions = np.array([])
for ind in indices:
if len(predictions.shape)==1:
predictions = pred[ind][1]
else:
predictions = np.vstack((predictions, pred[ind][1]))
return predictions
class ClusterPredict():
def __init__(self, static_data, cluster):
self.cluster = cluster
self.cluster_dir = cluster.cluster_dir
self.cluster_name = cluster.cluster_name
self.static_data = static_data
self.model_type = static_data['type']
self.methods = cluster.methods
self.combine_methods = static_data['combine_methods']
self.rated = static_data['rated']
self.n_jobs = static_data['njobs']
def pred_model(self, X, method, X_cnn=np.array([]), X_lstm=np.array([])):
if (method == 'RBF_ALL'):
model = RBF_CNN_model(self.static_data, self.cluster)
pred = model.predict(X)
elif (method == 'RBF_ALL_CNN'):
model = RBF_CNN_model(self.static_data, self.cluster, cnn=True)
pred = model.predict(X)
elif method in {'CNN'}:
model = model3d_object(self.static_data, self.cluster, method)
pred = model.predict(X_cnn)
pred[np.where(pred < 0)] = 0
elif method in {'LSTM'}:
model = model3d_object(self.static_data, self.cluster, method)
pred = model.predict(X_lstm)
pred[np.where(pred < 0)] = 0
elif method in {'SVM', 'NUSVM', 'MLP', 'RF', 'XGB', 'elasticnet'}:
model = SKLearn_object(self.static_data, self.cluster, method)
pred = model.predict(X)
pred[np.where(pred < 0)] = 0
return pred
def parallel_pred_model(self, X, method, X_cnn=np.array([]), X_lstm=np.array([])):
parallel = MultiEvaluator(self.n_jobs)
if (method == 'RBF_ALL'):
model = RBF_CNN_model(self.static_data, self.cluster)
pred = parallel.evaluate(X, model)
elif (method == 'RBF_ALL_CNN'):
model = RBF_CNN_model(self.static_data, self.cluster, cnn=True)
pred = parallel.evaluate(X, model)
elif method in {'CNN'}:
model = model3d_object(self.static_data, self.cluster, method)
pred = parallel.evaluate(X_cnn, model)
elif method in {'LSTM'}:
model = model3d_object(self.static_data, self.cluster, method)
pred = parallel.evaluate(X_lstm, model)
elif method in {'SVM', 'NUSVM', 'MLP', 'RF', 'XGB', 'elasticnet'}:
model = SKLearn_object(self.static_data, self.cluster, method)
pred = parallel.evaluate(X, model)
return pred
def compute_metrics(self, pred, y, rated):
if rated == None:
rated = y.ravel()
else:
rated = rated
err = np.abs(pred.ravel() - y.ravel())/rated
sse = np.sum(np.square(pred.ravel() - y.ravel()))
rms = np.sqrt(np.mean(np.square(err)))
mae = np.mean(err)
mse = sse / y.shape[0]
return [sse, rms, mae, mse]
def evaluate(self, pred_all, y):
result = pd.DataFrame(index=[method for method in pred_all.keys()], columns=['sse', 'rms', 'mae', 'mse'])
for method, pred in pred_all.items():
if not method in {'metrics'}:
result.loc[method] = self.compute_metrics(pred, y, self.rated)
return result
def spark_predict(self, X, X_cnn=np.array([]), X_lstm=np.array([]), fs_reduced=False):
if fs_reduced==False:
fs_manager = FeatSelobject(self.cluster)
if fs_manager.istrained==True:
X = fs_manager.transform(X)
predictions=dict()
for method in self.methods:
if X.shape[0]>0:
pred = self.parallel_pred_model(X, method, X_cnn=X_cnn, X_lstm=X_lstm)
for j in range(pred.shape[1]):
if np.any(np.isnan(pred[:, j])):
if np.sum(np.isnan(pred[:, j]))<=X.shape[0]/3:
pred[:, j][np.where(np.isnan(pred[:, j]))] = np.nanmean(pred[:, j])
else:
raise ValueError('There are nans in dataset of %s clust or model of method %s is not trained well', self.cluster_name, method)
if method == 'RBF_ALL_CNN':
predictions['RBF_OLS'] = pred[:, 0].reshape(-1, 1)
predictions['GA_RBF_OLS'] = pred[:, 1].reshape(-1, 1)
predictions['RBFNN'] = pred[:, 2].reshape(-1, 1)
predictions['RBF-CNN'] = pred[:, 3].reshape(-1, 1)
elif method == 'RBF_ALL':
predictions['RBF_OLS'] = pred[:, 0].reshape(-1, 1)
predictions['GA_RBF_OLS'] = pred[:, 1].reshape(-1, 1)
predictions['RBFNN'] = pred[:, 2].reshape(-1, 1)
else:
if len(pred.shape)==1:
pred = pred.reshape(-1, 1)
predictions[method] = pred
else:
if method == 'RBF_ALL_CNN':
predictions['RBF_OLS'] = np.array([])
predictions['GA_RBF_OLS'] = np.array([])
predictions['RBFNN'] = np.array([])
predictions['RBF-CNN'] = np.array([])
elif method == 'RBF_ALL':
predictions['RBF_OLS'] = np.array([])
predictions['GA_RBF_OLS'] = np.array([])
predictions['RBFNN'] = np.array([])
else:
predictions[method] = np.array([])
return predictions
def predict(self, X, X_cnn=np.array([]), X_lstm=np.array([]), fs_reduced=False):
if fs_reduced==False:
fs_manager = FeatSelobject(self.cluster)
if fs_manager.istrained==True:
X = fs_manager.transform(X)
predictions=dict()
for method in self.methods:
if X.shape[0]>0:
pred = self.pred_model(X, method, X_cnn=X_cnn, X_lstm=X_lstm)
for j in range(pred.shape[1]):
if np.any(np.isnan(pred[:, j])):
if np.sum(np.isnan(pred[:, j]))<=X.shape[0]/3:
pred[:, j][np.where(np.isnan(pred[:, j]))] = np.nanmean(pred[:, j])
else:
raise ValueError('There are nans in dataset of %s clust or model of method %s is not trained well', self.cluster_name, method)
if method == 'RBF_ALL_CNN':
predictions['RBF_OLS'] = pred[:, 0].reshape(-1, 1)
predictions['GA_RBF_OLS'] = pred[:, 1].reshape(-1, 1)
predictions['RBFNN'] = pred[:, 2].reshape(-1, 1)
predictions['RBF-CNN'] = pred[:, 3].reshape(-1, 1)
elif method == 'RBF_ALL':
predictions['RBF_OLS'] = pred[:, 0].reshape(-1, 1)
predictions['GA_RBF_OLS'] = pred[:, 1].reshape(-1, 1)
predictions['RBFNN'] = pred[:, 2].reshape(-1, 1)
else:
if len(pred.shape)==1:
pred = pred.reshape(-1, 1)
predictions[method] = pred
else:
if method == 'RBF_ALL_CNN':
predictions['RBF_OLS'] = np.array([])
predictions['GA_RBF_OLS'] = np.array([])
predictions['RBFNN'] = np.array([])
predictions['RBF-CNN'] = np.array([])
elif method == 'RBF_ALL':
predictions['RBF_OLS'] = np.array([])
predictions['GA_RBF_OLS'] = np.array([])
predictions['RBFNN'] = np.array([])
else:
predictions[method] = np.array([])
return predictions
```
#### File: project_manager/PredictModelManager/FS_object.py
```python
import pickle
import os
class FeatSelobject(object):
def __init__(self, cluster):
self.istrained = False
self.static_data = cluster.static_data['sklearn']
self.cluster_name = cluster.cluster_name
self.method = cluster.static_data['sklearn']['fs_method']
self.njobs = cluster.static_data['njobs_feat_sel']
self.inner_jobs = cluster.static_data['inner_jobs_feat_sel']
self.data_dir = cluster.data_dir
self.cluster_dir = cluster.cluster_dir
if self.method == 'boruta':
self.model_dir = os.path.join(cluster.cluster_dir, 'FS/boruta')
else:
self.model_dir = os.path.join(cluster.cluster_dir, 'FS/PERM')
try:
self.load()
except:
raise ImportError('Cannot find feature selection model for %s', self.cluster_name)
if not os.path.exists(self.model_dir):
os.makedirs(self.model_dir)
def transform(self, X):
return X[:, self.features]
def load(self):
if os.path.exists(os.path.join(self.model_dir, 'model_fs.pickle')):
try:
f = open(os.path.join(self.model_dir, 'model_fs.pickle'), 'rb')
tmp_dict = pickle.load(f)
f.close()
self.__dict__.update(tmp_dict)
except:
raise ValueError('Cannot find model for %s', self.model_dir)
else:
raise ValueError('Cannot find model for %s', self.model_dir)
```
#### File: project_manager/PredictModelManager/FullClusterPredictManager.py
```python
import numpy as np
import pandas as pd
import joblib, os, pickle
from Fuzzy_clustering.version3.project_manager.PredictModelManager.Clusterer import clusterer
from Fuzzy_clustering.version3.project_manager.PredictModelManager.ClusterPredictManager import ClusterPredict
class FullClusterPredictManager(object):
def __init__(self, path_model, static_data):
self.path_model = path_model
self.static_data = static_data
self.thres_split = static_data['clustering']['thres_split']
self.thres_act = static_data['clustering']['thres_act']
self.n_clusters = static_data['clustering']['n_clusters']
self.rated = static_data['rated']
self.var_imp = static_data['clustering']['var_imp']
self.var_lin = static_data['clustering']['var_lin']
self.var_nonreg = static_data['clustering']['var_nonreg']
try:
self.load()
except:
pass
def load_data(self):
data_path = self.static_data['path_data']
X = pd.read_csv(os.path.join(data_path, 'dataset_X.csv'), index_col=0, header=0, parse_dates=True, dayfirst=True)
y = pd.read_csv(os.path.join(data_path, 'dataset_y.csv'), index_col=0, header=0, parse_dates=True, dayfirst=True)
if os.path.exists(os.path.join(data_path, 'dataset_cnn.pickle')):
X_cnn = joblib.load(os.path.join(data_path, 'dataset_cnn.pickle'))
X_cnn = X_cnn.transpose([0, 2, 3, 1])
else:
X_cnn = np.array([])
index = X.index
index_all = X.index
if self.static_data['type'] == 'pv' and self.static_data['NWP_model'] == 'skiron':
index = np.where(X['flux'] > 1e-8)[0]
X = X.iloc[index]
y_reduced = y.iloc[index]
if X_cnn.shape[0]>0:
X_cnn = X_cnn[index]
else:
y_reduced = y
if os.path.exists(os.path.join(data_path, 'dataset_lstm.pickle')):
X_lstm = joblib.load(os.path.join(data_path, 'dataset_lstm.pickle'))
else:
X_lstm = np.array([])
return X, y, y_reduced, X_cnn, X_lstm, index, index_all
def load_test_data(self):
X, y, y_reduced, X_cnn, X_lstm, index, index_all = self.load_data()
test_ind = np.where(X.index >= self.split_test)[0]
test_ind_all = np.where(index_all >= self.split_test)[0]
index = index[index >= self.split_test]
index_all = index_all[index_all >= self.split_test]
indices_test = test_ind
X_test = X.iloc[test_ind]
y_test = y.iloc[test_ind_all]
y_test_reduced = y_reduced.iloc[test_ind]
if len(X_cnn.shape) > 1:
X_cnn_test = X_cnn[indices_test]
else:
X_cnn_test = np.array([])
if len(X_lstm.shape) > 1:
X_lstm_test = X_lstm[indices_test]
else:
X_lstm_test = np.array([])
return X_test, y_test, y_test_reduced, X_cnn_test, X_lstm_test, index, index_all
def check_if_all_nans(self, activations):
if activations.isna().all(axis=1).any() == True:
indices = activations.index[activations.isna().all(axis=1).to_numpy().ravel()]
if indices.shape[0] > 50:
raise RuntimeError('Too many nans. Please check your model')
for ind in indices:
act = activations.loc[ind]
clust = act.idxmax()
activations.loc[ind, clust] = 0.1
return activations
def predict_clusters(self, X_test = pd.DataFrame([]), y_test = pd.DataFrame([]), X_cnn_test = np.array([]), X_lstm_test = np.array([]), test = True):
if X_test.shape[0]==0:
offline = True
else:
offline = False
if offline:
if test:
X_test, y_test_all, y_test, X_cnn_test, X_lstm_test, index, index_all = self.load_test_data()
else:
X_test, y_test_all, y_test, X_cnn_test, X_lstm_test, index, index_all = self.load_data()
else:
index = X_test.index
index_all = X_test.index
y_test_all = y_test
sc = joblib.load(os.path.join(self.static_data['path_data'], 'X_scaler.pickle'))
scale_y = joblib.load(os.path.join(self.static_data['path_data'], 'Y_scaler.pickle'))
pred_cluster = dict()
X_test = pd.DataFrame(sc.transform(X_test.values), columns=X_test.columns, index=X_test.index)
if y_test.shape[0]>0:
y_test = pd.DataFrame(scale_y.transform(y_test.values), columns=y_test.columns, index=y_test.index)
if not hasattr(self, 'clusterer'):
self.clusterer = clusterer(self.static_data['path_fuzzy_models'])
act_test = self.clusterer.compute_activations(X_test)
act_test = self.check_if_all_nans(act_test)
for clust in self.clusters.keys():
predict_module = ClusterPredict(self.static_data, self.clusters[clust])
if clust == 'global':
if len(self.clusters[clust].methods) > 0:
pred_cluster[clust] = predict_module.predict(X_test.values, X_cnn=X_cnn_test, X_lstm=X_lstm_test)
if y_test.shape[0] > 0:
pred_cluster[clust]['metrics'] = predict_module.evaluate(pred_cluster['global'], y_test.values)
pred_cluster[clust]['dates'] = X_test.index
pred_cluster[clust]['index'] = np.arange(0, X_test.shape[0])
else:
dates = X_test.index[act_test[clust] >= self.thres_act]
nind = np.where(act_test[clust] >= self.thres_act)[0]
nind.sort()
x = X_test.loc[dates]
if y_test.shape[0] > 0:
targ = y_test.loc[dates].values
if len(X_cnn_test.shape) > 1:
x_cnn = X_cnn_test[nind]
else:
x_cnn = np.array([])
if len(X_lstm_test.shape) > 1:
x_lstm = X_lstm_test[nind]
else:
x_lstm = np.array([])
pred_cluster[clust] = predict_module.predict(x.values, X_cnn=x_cnn, X_lstm=x_lstm)
if y_test.shape[0] > 0:
pred_cluster[clust]['metrics'] = predict_module.evaluate(pred_cluster[clust], targ)
pred_cluster[clust]['dates'] = dates
pred_cluster[clust]['index'] = nind
predictions = dict()
result_clust = pd.DataFrame()
for clust in pred_cluster.keys():
for method in pred_cluster[clust].keys():
if not method in {'dates', 'index', 'metrics'}:
if not method in predictions.keys():
predictions[method] = pd.DataFrame(index=X_test.index,
columns=[cl for cl in pred_cluster.keys()])
predictions[method].loc[pred_cluster[clust]['dates'], clust] = pred_cluster[clust][method].ravel()
elif method in {'metrics'}:
result_clust = pd.concat([result_clust, pred_cluster[clust][method]['mae'].rename(clust)], axis=1)
result_clust.to_csv(os.path.join(self.static_data['path_data'], 'result_of_clusters.csv'))
if y_test.shape[0] > 0:
return pred_cluster, predictions, y_test_all, y_test, index, index_all
else:
return pred_cluster, predictions, index, index_all
def load(self):
if os.path.exists(os.path.join(self.path_model, 'manager' + '.pickle')):
try:
f = open(os.path.join(self.path_model, 'manager' + '.pickle'), 'rb')
tmp_dict = pickle.load(f)
f.close()
if 'path_model' in tmp_dict.keys():
del tmp_dict['path_model']
self.__dict__.update(tmp_dict)
except:
raise ValueError('Cannot find model for %s', self.path_model)
else:
raise ValueError('Cannot find model for %s', self.path_model)
```
#### File: project_manager/PredictModelManager/LSTM_predict_3d.py
```python
import tensorflow as tf
class LSTM_3d_predict():
def __init__(self, model, scale_lstm, trial, probabilistic):
self.scale_lstm = scale_lstm
self.trial = trial
self.probabilistic = probabilistic
self.model = model
def create_inputs(self, X_train):
self.N, self.D1, self.D2 = X_train.shape
H = X_train
H = H.reshape(-1, self.D1 * self.D2)
sc = self.scale_lstm
H = sc.transform(H.reshape(-1, self.D1 * self.D2))
H = H.reshape(-1, self.D1, self.D2)
return H
def init_weights(self, init_w):
init_random_dist = tf.convert_to_tensor(init_w)
return tf.Variable(init_random_dist)
def init_bias(self, init_b):
init_bias_vals = tf.convert_to_tensor(init_b)
return tf.Variable(init_bias_vals)
def normal_full_layer(self,input_layer, init_w, init_b):
W = self.init_weights(init_w)
b = self.init_bias(init_b)
return tf.add(tf.matmul(input_layer, W), b, name='prediction'), W, b
def build_graph(self, x1, best_weights, units, hold_prob):
with tf.name_scope("build_lstm") as scope:
if self.trial == 0:
lstm_1 = tf.keras.layers.LSTM(
units[0],
name='lstm1',
return_sequences=True,
activation=tf.nn.elu)
full_out_dropout = tf.nn.dropout(lstm_1(x1), rate=1 - hold_prob)
shape = full_out_dropout.get_shape().as_list()
full_out_dropout = tf.reshape(full_out_dropout, [-1, shape[1] * shape[2]])
elif self.trial == 1:
lstm_1 = tf.keras.layers.LSTM(
units[0],
name='lstm1',
return_sequences=True,
activation=tf.nn.elu)
full_one_dropout = tf.nn.dropout(lstm_1(x1), rate=1 - hold_prob)
shape = full_one_dropout.get_shape().as_list()
lstm_1_flat = tf.reshape(full_one_dropout, [-1, shape[1] * shape[2]])
full_layer_one = tf.keras.layers.Dense(units=shape[1] * shape[2], activation=tf.nn.elu,
name='dense1')
full_out_dropout = tf.nn.dropout(full_layer_one(lstm_1_flat), rate=1 - hold_prob)
elif self.trial == 2:
lstm_1 = tf.keras.layers.LSTM(
units[0],
name='lstm1',
return_sequences=True,
activation=tf.nn.elu)
full_one_dropout = tf.nn.dropout(lstm_1(x1), rate=1 - hold_prob)
shape = full_one_dropout.get_shape().as_list()
lstm_2_flat = tf.reshape(full_one_dropout, [-1, shape[1] * shape[2]])
full_layer_two = tf.keras.layers.Dense(units=shape[1] * shape[2], activation=tf.nn.elu,
name='dense1')
full_two_dropout = tf.nn.dropout(full_layer_two(lstm_2_flat), rate=1 - hold_prob)
full_two_dropout = tf.reshape(full_two_dropout, [-1, shape[1], shape[2]])
lstm_2 = tf.keras.layers.LSTM(
units[2],
name='lstm2',
return_sequences=True,
activation=tf.nn.elu)
full_out_dropout = tf.nn.dropout(lstm_2(full_two_dropout), rate=1 - hold_prob)
shape = full_out_dropout.get_shape().as_list()
full_out_dropout = tf.reshape(full_out_dropout, [-1, shape[1] * shape[2]])
elif self.trial == 3:
lstm_1 = tf.keras.layers.LSTM(
units[0],
name='lstm1',
return_sequences=True,
activation=tf.nn.elu)
full_one_dropout = tf.nn.dropout(lstm_1(x1), rate=1 - hold_prob)
shape = full_one_dropout.get_shape().as_list()
lstm_2_flat = tf.reshape(full_one_dropout, [-1, shape[1] * shape[2]])
full_layer_two = tf.keras.layers.Dense(units=shape[1] * shape[2], activation=tf.nn.elu,
name='dense1')
full_two_dropout = tf.nn.dropout(full_layer_two(lstm_2_flat), rate=1 - hold_prob)
full_two_dropout = tf.reshape(full_two_dropout, [-1, shape[1], shape[2]])
lstm_2 = tf.keras.layers.LSTM(
units[2],
name='lstm2',
return_sequences=True,
activation=tf.nn.elu)
full_three_dropout = tf.nn.dropout(lstm_2(full_two_dropout), rate=1 - hold_prob)
shape = full_three_dropout.get_shape().as_list()
lstm_2_flat = tf.reshape(full_three_dropout, [-1, shape[1] * shape[2]])
full_layer_three = tf.keras.layers.Dense(units=shape[1] * shape[2], activation=tf.nn.elu,
name='dense2')
full_three_dropout = tf.nn.dropout(full_layer_three(lstm_2_flat), rate=1 - hold_prob)
full_three_dropout = tf.reshape(full_three_dropout, [-1, shape[1], shape[2]])
lstm_3 = tf.keras.layers.LSTM(
units[2],
name='lstm3',
return_sequences=True,
activation=tf.nn.elu)
full_out_dropout = tf.nn.dropout(lstm_3(full_three_dropout), rate=1 - hold_prob)
shape = full_out_dropout.get_shape().as_list()
full_out_dropout = tf.reshape(full_out_dropout, [-1, shape[1] * shape[2]])
if self.probabilistic:
prob_layer = tf.keras.layers.Dense(100, activation=tf.nn.softmax, name='dense_prob')
y_pred = prob_layer(full_out_dropout)
else:
y_pred, W, b = self.normal_full_layer(full_out_dropout, best_weights['build_lstm/Variable:0'],best_weights['build_lstm/Variable_1:0'] )
if self.trial == 0:
weights = lstm_1.trainable_weights
if self.probabilistic:
weights += prob_layer.trainable_weights
return y_pred, weights, lstm_1, prob_layer
else:
weights += [W, b]
return y_pred, weights, lstm_1
elif self.trial == 1:
weights = lstm_1.trainable_weights + full_layer_one.trainable_weights
if self.probabilistic:
weights += prob_layer.trainable_weights
return y_pred, weights, lstm_1, full_layer_one, prob_layer
else:
weights += [W, b]
return y_pred, weights, lstm_1, full_layer_one
elif self.trial == 2:
weights = lstm_1.trainable_weights + full_layer_two.trainable_weights + lstm_2.trainable_weights
if self.probabilistic:
weights += prob_layer.trainable_weights
return y_pred, weights, lstm_1, full_layer_two, lstm_2, prob_layer
else:
weights += [W, b]
return y_pred, weights, lstm_1, full_layer_two, lstm_2
elif self.trial == 3:
weights = lstm_1.trainable_weights + full_layer_two.trainable_weights + lstm_2.trainable_weights + full_layer_three.trainable_weights +lstm_3.trainable_weights
if self.probabilistic:
weights += prob_layer.trainable_weights
return y_pred, weights, lstm_1, full_layer_two, lstm_2, full_layer_three, lstm_3, prob_layer
else:
weights += [W, b]
return y_pred, weights, lstm_1, full_layer_two, lstm_2, full_layer_three, lstm_3
def predict(self, X):
units = self.model['units']
best_weights = self.model['best_weights']
H = self.create_inputs(X)
tf.compat.v1.reset_default_graph()
graph_lstm = tf.Graph()
with graph_lstm.as_default():
with tf.device("/cpu:0"):
x1 = tf.compat.v1.placeholder('float', shape=[None, self.D1, self.D2], name='input_data')
with tf.device("/cpu:0"):
if self.trial == 0:
if self.probabilistic:
y_pred_, weights, lstm_1, prob_layer = self.build_graph(x1, best_weights, units, 1)
else:
y_pred_, weights, lstm_1 = self.build_graph(x1, best_weights, units, 1)
elif self.trial == 1:
if self.probabilistic:
y_pred_, weights, lstm_1, full_layer_one, prob_layer = self.build_graph(x1, best_weights, units, 1)
else:
y_pred_, weights, lstm_1, full_layer_one = self.build_graph(x1, best_weights, units, 1)
elif self.trial == 2:
if self.probabilistic:
y_pred_, weights, lstm_1, full_layer_two, lstm_2, prob_layer = self.build_graph(x1, best_weights, units, 1)
else:
y_pred_, weights, lstm_1, full_layer_two, lstm_2 = self.build_graph(x1, best_weights, units, 1)
elif self.trial == 3:
if self.probabilistic:
y_pred_, weights, lstm_1, full_layer_two, lstm_2, full_layer_three, lstm_3, prob_layer = self.build_graph(x1, best_weights, units, 1)
else:
y_pred_, weights, lstm_1, full_layer_two, lstm_2, full_layer_three, lstm_3 = self.build_graph(x1, best_weights, units, 1)
config_tf = tf.compat.v1.ConfigProto(allow_soft_placement=True)
config_tf.gpu_options.allow_growth = True
with tf.compat.v1.Session(graph=graph_lstm, config=config_tf) as sess:
sess.run(tf.compat.v1.global_variables_initializer())
lstm_1.set_weights([best_weights['build_lstm/lstm1/kernel:0'], best_weights['build_lstm/lstm1/recurrent_kernel:0'], best_weights['build_lstm/lstm1/bias:0']])
if self.trial == 0:
if self.probabilistic:
lstm_1.set_weights([best_weights['build_lstm/lstm1/kernel:0'],
best_weights['build_lstm/lstm1/recurrent_kernel:0'],
best_weights['build_lstm/lstm1/bias:0']])
prob_layer.set_weights(
[best_weights['build_lstm/dense_prob/kernel:0'], best_weights['build_lstm/dense_prob/bias:0']])
else:
lstm_1.set_weights([best_weights['build_lstm/lstm1/kernel:0'], best_weights['build_lstm/lstm1/recurrent_kernel:0'], best_weights['build_lstm/lstm1/bias:0']])
elif self.trial == 1:
if self.probabilistic:
lstm_1.set_weights([best_weights['build_lstm/lstm1/kernel:0'],
best_weights['build_lstm/lstm1/recurrent_kernel:0'],
best_weights['build_lstm/lstm1/bias:0']])
full_layer_one.set_weights(
[best_weights['build_lstm/dense1/kernel:0'], best_weights['build_lstm/dense1/bias:0']])
prob_layer.set_weights(
[best_weights['build_lstm/dense_prob/kernel:0'],
best_weights['build_lstm/dense_prob/bias:0']])
else:
lstm_1.set_weights([best_weights['build_lstm/lstm1/kernel:0'],
best_weights['build_lstm/lstm1/recurrent_kernel:0'],
best_weights['build_lstm/lstm1/bias:0']])
full_layer_one.set_weights(
[best_weights['build_lstm/dense1/kernel:0'], best_weights['build_lstm/dense1/bias:0']])
elif self.trial == 2:
if self.probabilistic:
lstm_1.set_weights([best_weights['build_lstm/lstm1/kernel:0'],
best_weights['build_lstm/lstm1/recurrent_kernel:0'],
best_weights['build_lstm/lstm1/bias:0']])
full_layer_two.set_weights(
[best_weights['build_lstm/dense1/kernel:0'], best_weights['build_lstm/dense1/bias:0']])
lstm_2.set_weights([best_weights['build_lstm/lstm2/kernel:0'],
best_weights['build_lstm/lstm2/recurrent_kernel:0'],
best_weights['build_lstm/lstm2/bias:0']])
prob_layer.set_weights(
[best_weights['build_lstm/dense_prob/kernel:0'],
best_weights['build_lstm/dense_prob/bias:0']])
else:
lstm_1.set_weights([best_weights['build_lstm/lstm1/kernel:0'],
best_weights['build_lstm/lstm1/recurrent_kernel:0'],
best_weights['build_lstm/lstm1/bias:0']])
full_layer_two.set_weights(
[best_weights['build_lstm/dense1/kernel:0'], best_weights['build_lstm/dense1/bias:0']])
lstm_2.set_weights([best_weights['build_lstm/lstm2/kernel:0'],
best_weights['build_lstm/lstm2/recurrent_kernel:0'],
best_weights['build_lstm/lstm2/bias:0']])
elif self.trial == 3:
if self.probabilistic:
lstm_1.set_weights([best_weights['build_lstm/lstm1/kernel:0'],
best_weights['build_lstm/lstm1/recurrent_kernel:0'],
best_weights['build_lstm/lstm1/bias:0']])
full_layer_two.set_weights(
[best_weights['build_lstm/dense1/kernel:0'], best_weights['build_lstm/dense1/bias:0']])
lstm_2.set_weights([best_weights['build_lstm/lstm2/kernel:0'],
best_weights['build_lstm/lstm2/recurrent_kernel:0'],
best_weights['build_lstm/lstm2/bias:0']])
full_layer_three.set_weights(
[best_weights['build_lstm/dense2/kernel:0'], best_weights['build_lstm/dense2/bias:0']])
lstm_3.set_weights([best_weights['build_lstm/lstm3/kernel:0'],
best_weights['build_lstm/lstm3/recurrent_kernel:0'],
best_weights['build_lstm/lstm3/bias:0']])
prob_layer.set_weights(
[best_weights['build_lstm/dense_prob/kernel:0'],
best_weights['build_lstm/dense_prob/bias:0']])
else:
lstm_1.set_weights([best_weights['build_lstm/lstm1/kernel:0'],
best_weights['build_lstm/lstm1/recurrent_kernel:0'],
best_weights['build_lstm/lstm1/bias:0']])
full_layer_two.set_weights(
[best_weights['build_lstm/dense1/kernel:0'], best_weights['build_lstm/dense1/bias:0']])
lstm_2.set_weights([best_weights['build_lstm/lstm2/kernel:0'],
best_weights['build_lstm/lstm2/recurrent_kernel:0'],
best_weights['build_lstm/lstm2/bias:0']])
full_layer_three.set_weights(
[best_weights['build_lstm/dense2/kernel:0'], best_weights['build_lstm/dense2/bias:0']])
lstm_3.set_weights([best_weights['build_lstm/lstm3/kernel:0'],
best_weights['build_lstm/lstm3/recurrent_kernel:0'],
best_weights['build_lstm/lstm3/bias:0']])
y_pred, weights_run= sess.run([y_pred_, weights],
feed_dict={x1: H})
sess.close()
return y_pred
```
#### File: version3/project_manager/Proba_Model_manager.py
```python
import os
import numpy as np
import joblib
class proba_model_manager():
def __init__(self, static_data, params={}):
if len(params)>0:
self.params = params
self.test = params['test']
self.test_dir = os.path.join(self.model_dir, 'test_' + str(self.test))
self.istrained = False
self.method = 'mlp'
self.model_dir = os.path.join(static_data['path_model'], 'Probabilistic')
self.data_dir = self.static_data['path_data']
if hasattr(self, 'test'):
try:
self.load(self.test_dir)
except:
pass
else:
try:
self.load(self.model_dir)
except:
pass
self.static_data = static_data
self.cluster_name = static_data['_id']
self.rated = static_data['rated']
self.probabilistic = True
if not os.path.exists(self.model_dir):
os.makedirs(self.model_dir)
if not os.path.exists(self.test_dir):
os.makedirs(self.test_dir)
def load(self, path):
if os.path.exists(os.path.join(path, self.method + '.pickle')):
try:
tmp_dict = joblib.load(os.path.join(path, self.method + '.pickle'))
self.__dict__.update(tmp_dict)
except:
raise ImportError('Cannot open CNN model')
else:
raise ImportError('Cannot find CNN model')
```
#### File: version3/project_manager/project_group_init.py
```python
import os, joblib,sys, logging
import numpy as np
import pandas as pd
class ProjectGroupInit():
def __init__(self, static_data):
self.static_data = static_data
self.file_data = static_data['data_file_name']
self.project_owner = static_data['project_owner']
self.projects_group = static_data['projects_group']
self.area_group = static_data['area_group']
self.version_group = static_data['version_group']
self.version_model = static_data['version_model']
self.weather_in_data = static_data['weather_in_data']
self.nwp_model = static_data['NWP_model']
self.nwp_resolution = static_data['NWP_resolution']
self.data_variables = static_data['data_variables']
data_file_name = os.path.basename(self.file_data)
if 'load' in data_file_name:
self.model_type = 'load'
elif 'pv' in data_file_name:
self.model_type = 'pv'
elif 'wind' in data_file_name:
self.model_type = 'wind'
elif 'fa' in data_file_name:
self.model_type = 'fa'
else:
raise IOError('Wrong data file name. Use one of load_ts.csv, wind_ts.csv, pv_ts.csv')
if self.static_data['Docker']:
if sys.platform == 'linux':
self.sys_folder = '/models/'
if self.nwp_model == 'skiron' and self.nwp_resolution == 0.05:
self.path_nwp = '/nwp_grib/SKIRON'
elif self.nwp_model == 'skiron' and self.nwp_resolution == 0.1:
self.path_nwp = '/nwp_grib/SKIRON_low'
elif self.nwp_model == 'ecmwf':
self.path_nwp = '/nwp_grib/ECMWF'
else:
self.path_nwp = None
else:
if self.nwp_model == 'ecmwf':
self.sys_folder = '/models/'
self.path_nwp = '/nwp_grib/ECMWF'
else:
self.sys_folder = '/models/'
self.path_nwp = None
else:
if sys.platform == 'linux':
self.sys_folder = '/media/smartrue/HHD1/George/models/'
if self.nwp_model == 'skiron' and self.nwp_resolution == 0.05:
self.path_nwp = '/media/smartrue/HHD2/SKIRON'
elif self.nwp_model == 'skiron' and self.nwp_resolution == 0.1:
self.path_nwp = '/media/smartrue/HHD2/SKIRON_low'
elif self.nwp_model == 'ecmwf':
self.path_nwp = '/media/smartrue/HHD2/ECMWF'
else:
self.path_nwp = None
else:
if self.nwp_model == 'ecmwf':
self.sys_folder = 'D:/models/'
self.path_nwp = 'D:/Dropbox/ECMWF'
else:
self.sys_folder = 'D:/models/'
self.path_nwp = None
self.path_group = self.sys_folder + self.project_owner + '/' + self.projects_group + '_ver' + str(
self.version_group) + '/' + self.model_type
if not os.path.exists(self.path_group):
os.makedirs(self.path_group)
self.path_nwp_group = self.sys_folder + self.project_owner + '/' + self.projects_group + '_ver' + str(
self.version_group) + '/nwp'
if not os.path.exists(self.path_nwp_group):
os.makedirs(self.path_nwp_group)
self.create_logger()
def create_logger(self):
self.logger = logging.getLogger('ProjectInitManager_' + self.model_type)
self.logger.setLevel(logging.INFO)
handler = logging.FileHandler(os.path.join(self.path_group, 'log_' + self.projects_group + '.log'), 'a')
handler.setLevel(logging.INFO)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
self.logger.addHandler(handler)
def check_project_names(self):
flag = True
if self.model_type in {'wind', 'pv'}:
for name in self.projects:
if name not in self.coord.index.to_list() and name != self.projects_group + '_' + self.model_type and name != 'APE_net':
flag = False
self.logger.info('There is inconsistency to files data and coord for the project %s', name)
if flag == False:
raise ValueError('Inconcistency in project names between data and coord')
if self.use_rated:
for name in self.projects:
if name not in self.rated.index.to_list() and name != self.projects_group + '_' + self.model_type:
flag = False
self.logger.info('There is inconsistency to files data and rated for the project %s', name)
if flag == False:
raise ValueError('Inconcistency in project names between data and rated')
return flag
def load_data(self):
try:
self.data = pd.read_csv(self.file_data, header=0, index_col=0, parse_dates=True, dayfirst=True)
except:
self.logger.info('Cannot import timeseries from the file %s', self.file_data)
raise IOError('Cannot import timeseries from the file %s', self.file_data)
self.logger.info('Timeseries imported successfully from the file %s', self.file_data)
if 'total' in self.data.columns:
self.data = self.data.rename(columns={'total':self.projects_group + '_' + self.model_type})
if self.static_data['Evaluation_start'] != None:
if self.model_type == 'fa':
try:
eval_date = pd.to_datetime(self.static_data['Evaluation_start'], format='%d%m%Y %H:%M')
self.data_eval = self.data.iloc[np.where(self.data.index>eval_date-pd.DateOffset(days=372))]
self.data = self.data.iloc[np.where(self.data.index<=eval_date)]
except:
raise ValueError('Wrong date format, use %d%m%Y %H:%M. Or the date does not exist in the dataset')
elif self.model_type == 'load':
if self.data.columns[0]=='lv_load':
try:
eval_date = pd.to_datetime(self.static_data['Evaluation_start'], format='%d%m%Y %H:%M')
self.data_eval = self.data.iloc[np.where(self.data.index > eval_date - pd.DateOffset(hours=372))]
self.data = self.data.iloc[np.where(self.data.index <= eval_date)]
except:
raise ValueError(
'Wrong date format, use %d%m%Y %H:%M. Or the date does not exist in the dataset')
elif self.data.columns[0]=='SCADA':
try:
eval_date = pd.to_datetime(self.static_data['Evaluation_start'], format='%d%m%Y %H:%M')
self.data_eval = self.data.iloc[np.where(self.data.index > eval_date - pd.DateOffset(hours=9001))]
self.data = self.data.iloc[np.where(self.data.index <= eval_date)]
except:
raise ValueError(
'Wrong date format, use %d%m%Y %H:%M. Or the date does not exist in the dataset')
else:
try:
eval_date = pd.to_datetime(self.static_data['Evaluation_start'], format='%d%m%Y %H:%M')
self.data_eval = self.data.iloc[np.where(self.data.index > eval_date)]
self.data = self.data.iloc[np.where(self.data.index <= eval_date)]
except:
raise ValueError(
'Wrong date format, use %d%m%Y %H:%M. Or the date does not exist in the dataset')
self.projects = []
if self.model_type == 'load':
self.projects.append(self.data.columns[0])
elif self.model_type == 'fa':
if self.version_model==0:
self.projects.append('fa_curr_morning')
elif self.version_model==1:
self.projects.append('fa_ahead_morning')
else:
raise ValueError('Version model should be 0 for current day and 1 for day ahead otherwise choose another group version')
else:
for name in self.data.columns:
if name=='total':
name = self.projects_group + '_' + self.model_type
self.projects.append(name)
if self.weather_in_data == False:
try:
self.coord = pd.read_csv(self.file_coord, header=None, index_col=0)
except:
self.logger.info('Cannot import coordinates from the file %s', self.file_coord)
raise IOError('Cannot import coordinates from the file %s', self.file_coord)
self.logger.info('Coordinates imported successfully from the file %s', self.file_coord)
else:
self.logger.info('Coordinates in the data')
if self.use_rated:
try:
self.rated = pd.read_csv(self.file_rated, header=None, index_col=0)
except:
self.logger.info('Cannot import Rated Power from the file %s', self.file_rated)
raise IOError('Cannot import Rated Power from the file %s', self.file_rated)
self.logger.info('Rated Power imported successfully from the file %s', self.file_rated)
self.logger.info('Data loaded successfully')
def create_area(self, coord, resolution):
if self.nwp_resolution == 0.05:
levels = 4
round_coord = 1
else:
levels = 2
round_coord = 0
if coord != None:
if isinstance(coord, list):
if len(coord) == 2:
lat = coord[0]
long = coord[1]
lat_range = np.arange(np.around(lat, round_coord) - 20, np.around(lat, round_coord) + 20, resolution)
lat1 = lat_range[np.abs(lat_range - lat).argmin()] - self.nwp_resolution / 10
lat2 = lat_range[np.abs(lat_range - lat).argmin()] + self.nwp_resolution / 10
long_range = np.arange(np.around(long, round_coord) - 20, np.around(long, round_coord) + 20, resolution)
long1 = long_range[np.abs(long_range - long).argmin()] - self.nwp_resolution / 10
long2 = long_range[np.abs(long_range - long).argmin()] + self.nwp_resolution / 10
area = [[lat1 - self.nwp_resolution * levels, long1 - self.nwp_resolution * levels],
[lat2 + self.nwp_resolution * levels, long2 + self.nwp_resolution * levels]]
elif len(coord) == 4:
area = list(np.array(coord).reshape(2, 2))
else:
raise ValueError('Wrong coordinates. Should be point (lat, long) or area [lat1, long1, lat2, long2]')
elif isinstance(coord, dict):
area = dict()
for key, value in coord.items():
if len(value) == 2:
lat = value[0]
long = value[1]
lat_range = np.arange(np.around(lat, round_coord) - 20, np.around(lat, round_coord) + 20,
resolution)
lat1 = lat_range[np.abs(lat_range - lat).argmin()] - self.nwp_resolution / 10
lat2 = lat_range[np.abs(lat_range - lat).argmin()] + self.nwp_resolution / 10
long_range = np.arange(np.around(long, round_coord) - 20, np.around(long, round_coord) + 20,
resolution)
long1 = long_range[np.abs(long_range - long).argmin()] - self.nwp_resolution / 10
long2 = long_range[np.abs(long_range - long).argmin()] + self.nwp_resolution / 10
area[key] = [[lat1 - self.nwp_resolution * levels, long1 - self.nwp_resolution * levels],
[lat2 + self.nwp_resolution * levels, long2 + self.nwp_resolution * levels]]
else:
area[key] = np.array(value).reshape(2, 2)
else:
raise ValueError('Wrong coordinates. Should be dict or list')
else:
area = dict()
self.logger.info('Areas created succesfully')
return area
def initialize(self):
data_file_name = os.path.basename(self.file_data)
if os.path.exists(os.path.join(os.path.dirname(self.file_data), 'coord_auto_' + self.model_type + '.csv')):
self.file_coord = os.path.join(os.path.dirname(self.file_data), 'coord_auto_' + self.model_type + '.csv')
else:
self.file_coord = os.path.join(os.path.dirname(self.file_data), 'coord_' + self.model_type + '.csv')
if not os.path.exists(self.file_coord) and self.weather_in_data == False:
raise IOError('File with coordinates does not exist')
self.file_rated = os.path.join(os.path.dirname(self.file_data), 'rated_' + self.model_type + '.csv')
if not os.path.exists(self.file_rated):
if self.model_type in {'wind', 'pv'} and self.projects_group not in {'APE_net'}:
raise ValueError('Provide rated_power for each project. The type of projects is %s', self.model_type)
self.use_rated = False
else:
self.use_rated = True
self.load_data()
self.group_static_data = []
if self.check_project_names():
for project_name in self.projects:
path_project = self.path_group + '/' + project_name
if not os.path.exists(path_project):
os.makedirs(path_project)
path_model = path_project + '/model_ver' + str(self.version_model)
if not os.path.exists(path_model):
os.makedirs(path_model)
path_backup = self.path_group + '/backup_models/' + project_name + '/model_ver' + str(self.version_model)
if not os.path.exists(path_backup):
os.makedirs(path_backup)
path_data = path_model + '/DATA'
if not os.path.exists(path_data):
os.makedirs(path_data)
path_fuzzy_models = path_model + '/fuzzy_models'
if not os.path.exists(path_fuzzy_models):
os.makedirs(path_fuzzy_models)
if self.use_rated:
if project_name == self.projects_group + '_' + self.model_type and project_name not in self.rated.index.to_list():
rated = self.rated.sum().to_list()[0]
else:
rated = self.rated.loc[project_name].to_list()[0]
else:
rated = None
if hasattr(self, 'coord'):
if project_name == 'APE_net' or self.model_type == 'load' or project_name == self.projects_group + '_' + self.model_type:
coord = dict()
for name, latlong in self.coord.iterrows():
coord[name] = latlong.values.tolist()
else:
coord = self.coord.loc[project_name].to_list()
else:
coord = None
area = self.create_area(coord, self.nwp_resolution)
temp = {'_id': project_name,
'owner': self.project_owner,
'project_group': self.projects_group,
'type': self.model_type,
'location': coord,
'areas': area,
'rated': rated,
'path_project': path_project,
'path_model': path_model,
'path_group': self.path_group,
'version_group': self.version_group,
'version_model': self.version_model,
'path_backup': path_backup,
'path_data': path_data,
'pathnwp': self.path_nwp_group,
'path_fuzzy_models': path_fuzzy_models,
'run_on_platform': False,
}
static_data = dict()
for key, value in self.static_data.items():
static_data[key] = value
for key, value in temp.items():
static_data[key] = value
self.group_static_data.append({'_id': project_name, 'static_data': static_data})
joblib.dump(static_data, os.path.join(path_model, 'static_data.pickle'))
with open(os.path.join(path_model, 'static_data.txt'), 'w') as file:
for k, v in static_data.items():
if not isinstance(v, dict):
file.write(str(k) + ' >>> ' + str(v) + '\n\n')
else:
file.write(str(k) + ' >>> ' + '\n')
for kk, vv in v.items():
file.write('\t' + str(kk) + ' >>> ' + str(vv) + '\n')
joblib.dump(self.group_static_data, os.path.join(self.path_group, 'static_data_projects.pickle'))
self.logger.info('Static data of all projects created')
```
#### File: version3/rabbitmq_rpc/queue.py
```python
class Queue(object):
def __init__(self, name, dispatcher, exclusive=False):
self.name = name
self.exclusive = exclusive
self.dispatcher = dispatcher
def add_consumer(self, consumer):
return self.dispatcher.register(consumer)
```
#### File: version3/RBF_CNN_Manager/RBF_CNN_manager.py
```python
import joblib
from Fuzzy_clustering.version3.RBF_CNN_Manager.CNN_tf_core import CNN
from Fuzzy_clustering.version3.RBF_CNN_Manager.RBF_CNN_model import RBF_CNN_model
import pika, uuid, time, json, os
import numpy as np
from rabbitmq_rpc.server import RPCServer
from Fuzzy_clustering.version3.RBF_CNN_Manager.Cluster_object import cluster_object
RABBIT_MQ_HOST = os.getenv('RABBIT_MQ_HOST')
RABBIT_MQ_PASS = os.getenv('RABBIT_MQ_PASS')
RABBIT_MQ_PORT = int(os.getenv('RABBIT_MQ_PORT'))
server = RPCServer(queue_name='RBF_CNN_manager', host=RABBIT_MQ_HOST, port=RABBIT_MQ_PORT, threaded=False)
class rbf_cnn_manager():
def __init__(self, static_data, cluster, method, params):
self.params = params
self.test = params['test']
self.method = str.lower(method)
self.cluster = cluster
self.istrained = False
self.model_dir = os.path.join(cluster.cluster_dir, 'RBF_CNN')
if not os.path.exists(self.model_dir):
os.makedirs(self.model_dir)
self.test_dir = self.model_dir
try:
self.load()
except:
pass
if not self.istrained:
self.test_dir = os.path.join(self.model_dir, 'test_' + str(self.test))
try:
self.load()
except:
if not os.path.exists(self.test_dir):
os.makedirs(self.test_dir)
pass
self.static_data = static_data
self.cluster_name = cluster.cluster_name
self.rated = static_data['rated']
self.data_dir = cluster.data_dir
self.probabilistic = False
def fit(self):
if self.istrained == False:
return self.optimize_rbf_cnn()
else:
return self.acc
def fit_TL(self):
if self.istrained == False:
return self.optimize_rbf_cnn_TL()
else:
return self.acc
def load_data(self):
if os.path.exists(os.path.join(self.data_dir, 'dataset_X.csv')):
cvs = joblib.load(os.path.join(self.data_dir, 'cvs.pickle'))
else:
cvs = np.array([])
return cvs
def load_rbf_models(self):
model_rbfs = RBF_CNN_model(self.static_data, self.cluster, cnn=False)
rbf_models = [model_rbfs.model_rbf_ols.models, model_rbfs.model_rbf_ga.models, model_rbfs.model_rbfnn.model]
return rbf_models
def optimize_rbf_cnn(self):
self.trial = self.params['trial']
self.pool_size = self.params['pool_size']
self.kernels = self.params['kernels']
self.lr = self.params['lr']
self.hsize = self.params['h_size']
cnn_max_iterations = self.static_data['CNN']['max_iterations']
self.filters = self.static_data['CNN']['filters']
cvs = self.load_data()
self.N = cvs[0][0].shape[1]
self.D = cvs[0][0].shape[0] + cvs[0][2].shape[0] + cvs[0][4].shape[0]
self.static_data_cnn = self.static_data['CNN']
self.static_data_rbf = self.static_data['RBF']
X_train = cvs[0][0]
y_train = cvs[0][1].reshape(-1, 1)
X_val = cvs[0][2]
y_val = cvs[0][3].reshape(-1, 1)
X_test = cvs[0][4]
y_test = cvs[0][5].reshape(-1, 1)
self.rbf_models = self.load_rbf_models()
cnn = CNN(self.static_data, self.rated, self.rbf_models, X_train, y_train, X_val, y_val, X_test, y_test, self.pool_size, self.trial)
flag = False
for _ in range(3):
try:
self.acc, self.scale_cnn, self.model = cnn.train_cnn(max_iterations=cnn_max_iterations,
learning_rate=self.lr, kernels=self.kernels,
h_size=self.hsize, filters=self.filters)
flag = True
break
except:
self.filters = int(self.filters / 2)
pass
if not flag:
self.acc = np.inf
self.scale_cnn = None
self.model = None
self.istrained=True
self.save()
return self.acc
def load(self):
if os.path.exists(os.path.join(self.test_dir, self.method + '.pickle')):
try:
tmp_dict = joblib.load(os.path.join(self.test_dir, self.method + '.pickle'))
self.__dict__.update(tmp_dict)
except:
raise ImportError('Cannot open CNN model')
else:
raise ImportError('Cannot find CNN model')
def save(self):
tmp_dict = {}
for k in self.__dict__.keys():
if k not in ['logger', 'static_data_all', 'static_data', 'temp_dir', 'cluster_cnn_dir', 'cluster_dir']:
tmp_dict[k] = self.__dict__[k]
joblib.dump(tmp_dict,os.path.join(self.test_dir, self.method + '.pickle'), compress=9)
def optimize_rbf_cnn_TL(self):
static_data_tl = self.static_data['tl_project']['static_data']
cluster_dir_tl = os.path.join(static_data_tl['path_model'], 'Regressor_layer/' + self.cluster_name)
model_TL_dir = os.path.join(cluster_dir_tl, 'RBF_CNN')
model_TL = joblib.load(os.path.join(model_TL_dir, self.method + '.pickle'))
self.trial = model_TL['trial']
self.pool_size = model_TL['pool_size']
self.kernels = model_TL['kernels']
self.lr = model_TL['lr']
self.hsize = model_TL['h_size']
cnn_max_iterations = self.static_data['CNN']['max_iterations']
self.filters = model_TL['filters']
cvs = self.load_data()
self.N = cvs[0][0].shape[1]
self.D = cvs[0][0].shape[0] + cvs[0][2].shape[0] + cvs[0][4].shape[0]
self.static_data_cnn = self.static_data['CNN']
self.static_data_rbf = self.static_data['RBF']
X_train = cvs[0][0]
y_train = cvs[0][1].reshape(-1, 1)
X_val = cvs[0][2]
y_val = cvs[0][3].reshape(-1, 1)
X_test = cvs[0][4]
y_test = cvs[0][5].reshape(-1, 1)
self.rbf_models = self.load_rbf_models()
cnn = CNN(self.static_data, self.rated, self.rbf_models, X_train, y_train, X_val, y_val, X_test, y_test, self.pool_size, self.trial)
flag = False
for _ in range(3):
try:
self.acc, self.scale_cnn, self.model = cnn.train_cnn(max_iterations=cnn_max_iterations,
learning_rate=self.lr, kernels=self.kernels,
h_size=self.hsize, filters=self.filters)
flag = True
break
except:
self.filters = int(self.filters / 2)
pass
if not flag:
self.acc = np.inf
self.scale_cnn = None
self.model = None
self.istrained=True
self.save()
return self.acc
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, np.integer) or isinstance(obj, int):
return int(obj)
elif isinstance(obj, np.floating) or isinstance(obj, float):
return float(obj)
elif isinstance(obj, np.str) or isinstance(obj, str):
return str(obj)
elif isinstance(obj, np.bool) or isinstance(obj, bool):
return bool(obj)
try:
return json.JSONEncoder.default(self, obj)
except:
print(obj)
raise TypeError('Object is not JSON serializable')
@server.consumer()
def deep_manager(static_data):
print(" [.] Receive cluster %s)" % static_data['cluster_name'])
cluster = cluster_object(static_data, static_data['cluster_name'])
model_method = static_data['method']
params = static_data['params']
model_3d = rbf_cnn_manager(static_data, cluster, model_method, params=params)
if model_3d.istrained == False:
response = {'result': model_3d.fit(), 'cluster_name': cluster.cluster_name, 'project': static_data['_id'],
'test': params['test'], 'method': model_method}
else:
response = {'result': model_3d.acc, 'cluster_name': cluster.cluster_name, 'project': static_data['_id'],
'test': params['test'], 'method': model_method}
return response
if __name__=='__main__':
server.run()
```
#### File: version3/RBFNN_Manager/RBFNN_manager.py
```python
import joblib
from Fuzzy_clustering.version3.RBFNN_Manager.RBFNN_tf_core import RBFNN
import pika, uuid, time, json, os
import numpy as np
from Fuzzy_clustering.version3.rabbitmq_rpc.server import RPCServer
from Fuzzy_clustering.version3.RBFNN_Manager.Cluster_object import cluster_object
RABBIT_MQ_HOST = os.getenv('RABBIT_MQ_HOST')
RABBIT_MQ_PASS = os.getenv('RABBIT_MQ_PASS')
RABBIT_MQ_PORT = int(os.getenv('RABBIT_MQ_PORT'))
server = RPCServer(queue_name='RBFNNmanager', host=RABBIT_MQ_HOST, port=RABBIT_MQ_PORT, threaded=False)
class rbfnn_manager():
def __init__(self, static_data, cluster, method, params):
self.params = params
self.test = params['test']
self.method = str.lower(method)
self.cluster = cluster
self.istrained = False
self.model_dir = os.path.join(cluster.cluster_dir, 'RBFNN')
if not os.path.exists(self.model_dir):
os.makedirs(self.model_dir)
self.test_dir = self.model_dir
try:
self.load()
except:
pass
if not self.istrained:
self.test_dir = os.path.join(self.model_dir, 'test_' + str(self.test))
try:
self.load()
except:
if not os.path.exists(self.test_dir):
os.makedirs(self.test_dir)
pass
self.static_data = static_data
self.cluster_name = cluster.cluster_name
self.rated = static_data['rated']
self.data_dir = cluster.data_dir
self.probabilistic = False
def fit(self):
if self.istrained == False:
return self.optimize_rbf()
else:
return self.acc
def fit_TL(self):
if self.istrained == False:
return self.optimize_rbf_TL()
else:
return self.acc
def load_data(self):
if os.path.exists(os.path.join(self.data_dir, 'dataset_X.csv')):
cvs = joblib.load(os.path.join(self.data_dir, 'cvs.pickle'))
else:
cvs = np.array([])
return cvs
def optimize_rbf(self):
self.num_centr = self.params['num_centr']
self.lr = self.params['lr']
self.mean_var = self.static_data['RBF']['mean_var']
self.std_var = self.static_data['RBF']['std_var']
max_iterations = self.static_data['RBF']['max_iterations']
cvs = self.load_data()
self.N = cvs[0][0].shape[1]
self.D = cvs[0][0].shape[0] + cvs[0][2].shape[0] + cvs[0][4].shape[0]
X_train = cvs[0][0]
y_train = cvs[0][1].reshape(-1, 1)
X_val = cvs[0][2]
y_val = cvs[0][3].reshape(-1, 1)
X_test = cvs[0][4]
y_test = cvs[0][5].reshape(-1, 1)
rbf = RBFNN(self.static_data, max_iterations=max_iterations)
self.acc, self.centroids, self.radius, self.w, self.model = rbf.train(X_train, y_train, X_val, y_val, X_test, y_test, self.num_centr, self.lr)
self.istrained=True
self.save()
return self.acc
def load(self):
if os.path.exists(os.path.join(self.test_dir, self.method + '.pickle')):
try:
tmp_dict = joblib.load(os.path.join(self.test_dir, self.method + '.pickle'))
self.__dict__.update(tmp_dict)
except:
raise ImportError('Cannot open CNN model')
else:
raise ImportError('Cannot find CNN model')
def save(self):
tmp_dict = {}
for k in self.__dict__.keys():
if k not in ['logger', 'static_data_all', 'static_data', 'temp_dir', 'cluster_cnn_dir', 'cluster_dir']:
tmp_dict[k] = self.__dict__[k]
joblib.dump(tmp_dict,os.path.join(self.test_dir, self.method + '.pickle'), compress=9)
def optimize_rbf_TL(self):
static_data_tl = self.static_data['tl_project']['static_data']
cluster_dir_tl = os.path.join(static_data_tl['path_model'], 'Regressor_layer/' + self.cluster_name)
model_TL_dir = os.path.join(cluster_dir_tl, 'RBFNN')
model_TL = joblib.load(os.path.join(model_TL_dir, self.method + '.pickle'))
self.num_centr = model_TL['num_centr']
self.lr = model_TL['lr']
self.mean_var = model_TL['mean_var']
self.std_var = model_TL['std_var']
max_iterations = self.static_data['RBF']['max_iterations']
cvs = self.load_data()
self.N = cvs[0][0].shape[1]
self.D = cvs[0][0].shape[0] + cvs[0][2].shape[0] + cvs[0][4].shape[0]
X_train = cvs[0][0]
y_train = cvs[0][1].reshape(-1, 1)
X_val = cvs[0][2]
y_val = cvs[0][3].reshape(-1, 1)
X_test = cvs[0][4]
y_test = cvs[0][5].reshape(-1, 1)
rbf = RBFNN(self.static_data, max_iterations=max_iterations)
self.acc, self.centroids, self.radius, self.w, self.model = rbf.train(X_train, y_train, X_val, y_val, X_test, y_test, self.num_centr, self.lr)
self.istrained=True
self.save()
return self.acc
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, np.integer) or isinstance(obj, int):
return int(obj)
elif isinstance(obj, np.floating) or isinstance(obj, float):
return float(obj)
elif isinstance(obj, np.str) or isinstance(obj, str):
return str(obj)
elif isinstance(obj, np.bool) or isinstance(obj, bool):
return bool(obj)
try:
return json.JSONEncoder.default(self, obj)
except:
print(obj)
raise TypeError('Object is not JSON serializable')
@server.consumer()
def deep_manager(static_data):
print(" [.] Receive cluster %s)" % static_data['cluster_name'])
cluster = cluster_object(static_data, static_data['cluster_name'])
model_method = static_data['method']
params = static_data['params']
model_3d = rbfnn_manager(static_data, cluster, model_method, params=params)
if model_3d.istrained == False:
response = {'result': model_3d.fit(), 'cluster_name': cluster.cluster_name, 'project': static_data['_id'],
'test': params['test'], 'method': model_method}
else:
response = {'result': model_3d.acc, 'cluster_name': cluster.cluster_name, 'project': static_data['_id'],
'test': params['test'], 'method': model_method}
return response
if __name__=='__main__':
server.run()
```
#### File: Fuzzy_clustering/ver_tf2/Clusterer_optimize_deep.py
```python
import numpy as np
import pandas as pd
import os, copy
import joblib
import skfuzzy as fuzz
import difflib, random
from deap import base, creator, tools, algorithms
from itertools import repeat
from collections import Sequence
import re
import logging
from sklearn.linear_model import LinearRegression
from Fuzzy_clustering.ver_tf2.imblearn.over_sampling import BorderlineSMOTE, SVMSMOTE, SMOTE,ADASYN
from joblib import Parallel, delayed
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
MAX_EVALUATIONS = 30000
JOBS = 5
POPULATION_SIZE = 50
class cluster_optimize():
def __init__(self, static_data):
self.static_data = static_data
self.train_online = static_data['train_online']
self.add_individual_rules = static_data['clustering']['add_rules_indvidual']
self.import_external_rules = static_data['clustering']['import_external_rules']
self.njobs=2*static_data['RBF']['njobs']
self.resampling = static_data['resampling']
self.path_fuzzy = static_data['path_fuzzy_models']
self.file_fuzzy = static_data['clustering']['cluster_file']
self.type = static_data['type']
logger = logging.getLogger('log_fuzzy.log')
logger.setLevel(logging.INFO)
handler = logging.FileHandler(os.path.join(self.path_fuzzy, 'log_fuzzy.log'), 'w')
handler.setLevel(logging.INFO)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
self.logger = logger
def create_mfs(self, model_mfs, type_mf, num_mf, var_range, var_name):
mfs = []
if type_mf in {'hdd_h2', 'temp_max', 'flux', 'wind', 'temp', 'Temp', 'load', 'power'}:
mean = np.linspace(var_range[0], var_range[1], num=num_mf)
std = var_range[1] / num_mf
for i in range(num_mf):
mfs.append({'name': 'mf_' + type_mf + str(i),
'var_name': var_name,
'type': 'gauss',
'param': [mean[i], 1.25 * std],
'universe': np.arange(var_range[0] - std - .01, var_range[1] + std + .01, .001),
'func': fuzz.gaussmf(np.arange(var_range[0] - std - .01, var_range[1] + std + .01, .001),
mean[i], std)})
elif type_mf in {'sp_index', 'dayweek', 'cloud', 'hour', 'month', 'direction', 'sp_days'}:
mean = np.linspace(var_range[0], var_range[1], num=num_mf)
std = var_range[1] / num_mf
std1 = var_range[1] / (2 * num_mf)
for i in range(num_mf):
param = [mean[i] - 1.5 * std, mean[i] - 1.25 * std1, mean[i] + 1.25 * std1, mean[i] + 1.5 * std]
mfs.append({'name': 'mf_' + type_mf + str(i),
'var_name': var_name,
'type': 'trap',
'param': param,
'universe': np.arange(var_range[0] - .01 - std, var_range[1] + std + .01, .001),
'func': fuzz.trapmf(np.arange(var_range[0] - .01 - std, var_range[1] + std + .01, .001, ),
param)})
else:
raise NameError('MF type not recognize')
model_mfs[var_name] = mfs
return model_mfs
def create_rules(self, model_mfs):
rules = []
for mf in sorted(model_mfs.keys()):
if len(rules) == 0:
for f in model_mfs[mf]:
rules.append([f])
else:
new_rules = []
for rule in rules:
for f in model_mfs[mf]:
new_rules.append(rule + [f])
rules = new_rules
## Uncomment when you want to create rules without combine variables
if self.add_individual_rules:
for mf in sorted(model_mfs.keys()):
for f in model_mfs[mf]:
rules.append([f])
final_rules = dict()
for i in range(len(rules)):
final_rules['rule.' + str(i)] = rules[i]
return final_rules
def run(self, X_train, y_train, X_test, y_test, rated, num_samples=200):
if rated is None:
rated = y_test.values.ravel()
else:
rated = 20
self.var_names = [v for v in sorted(self.static_data['clustering']['var_imp'].keys())]
self.p = len(self.var_names)
self.range_mfs = dict()
self.num_mfs = dict()
for k, v in self.static_data['clustering']['var_imp'].items():
self.range_mfs[k] = v['range']
self.num_mfs[k] = [v['mfs']]
self.var_lin = self.static_data['clustering']['var_lin']
if self.train_online:
try:
fuzzy_model = joblib.load(os.path.join(self.path_fuzzy, self.file_fuzzy))
self.var_names = [var for var in self.var_names if var not in sorted(fuzzy_model['mfs'].keys())]
except:
fuzzy_model = dict()
fuzzy_model['mfs'] = dict()
else:
fuzzy_model = dict()
fuzzy_model['mfs'] = dict()
for fuzzy_var_name in self.var_names:
for n in self.num_mfs[fuzzy_var_name]:
fuzzy_model['mfs'] = self.create_mfs(fuzzy_model['mfs'], fuzzy_var_name, n,
self.range_mfs[fuzzy_var_name], fuzzy_var_name)
var_del=[]
for var in self.var_names:
if var not in X_train.columns:
var_names = [c for c in X_train.columns if var in c]
X_train[var] = X_train[var_names].mean(axis=1)
X_test[var] = X_test[var_names].mean(axis=1)
var_del.append(var)
if var not in self.var_lin:
self.var_lin.append(var)
lin_models = LinearRegression().fit(X_train[self.var_lin].values, y_train.values.ravel())
preds = lin_models.predict(X_test[self.var_lin].values).ravel()
err = (preds - y_test.values.ravel()) / rated
rms = np.sum(np.square(err))
mae = np.mean(np.abs(err))
print('rms = %s', rms)
print('mae = %s', mae)
self.logger.info("Objective before train: %s", mae)
problem = cluster_problem(fuzzy_model['mfs'], X_train[self.var_lin], y_train, X_test[self.var_lin],
y_test, self.p, rated, self.resampling, self.add_individual_rules, self.logger, self.njobs, num_samples=num_samples)
problem.run(50, 50)
fuzzy_model = problem.fmodel
self.logger.info("Objective after train: %s", str(fuzzy_model['result']))
best_fuzzy_model = copy.deepcopy(fuzzy_model)
joblib.dump(best_fuzzy_model, os.path.join(self.path_fuzzy, self.file_fuzzy))
if 'horizon' in self.import_external_rules:
self.compact_external_mfs()
if len(var_del) > 0:
X_train = X_train.drop(columns=var_del)
X_test = X_test.drop(columns=var_del)
def compact_external_mfs(self):
self.fuzzy_file = os.path.join(self.path_fuzzy, self.file_fuzzy)
fmodel = joblib.load(self.fuzzy_file)
type_mf = 'horizon'
var_name = 'horizon'
params = [
[0.5, 0.9, 1.1, 1.5],
[1.5, 1.9, 2.1, 2.5],
[2.5, 2.9, 3.1, 3.5],
[3.5, 3.9, 4.1, 4.5],
[4.5, 4.9, 5.1, 5.5],
[5.5, 5.9, 6.1, 6.5],
[6.5, 6.9, 7.1, 7.5],
[7.5, 7.9, 8.1, 8.5],
[8.5, 8.9, 12.1, 15.5],
[12, 13.2, 22.1, 27.5],
[22.1, 25.2, 36.1, 42.5],
[38.1, 42.2, 48.1, 52.5],
]
mfs = []
i = 0
for param in params:
mfs.append({'name': 'mf_' + type_mf + str(i),
'var_name': var_name,
'type': 'trap',
'param': param,
'universe': np.arange(0, 49, .01),
'func': fuzz.trapmf(np.arange(0, 49, .01), param)})
i += 1
fmodel['mfs']['horizon'] = mfs
i = 0
rules = dict()
for mf in mfs:
for rule in fmodel['rules']:
rules['rule.' + str(i)] = fmodel['rules'][rule] + [mf]
i += 1
fmodel['rules'] = rules
joblib.dump(fmodel, os.path.join(self.path_fuzzy, self.file_fuzzy))
def cx_fun(ind1,ind2, alpha):
if random.random()>0.5:
size = min(len(ind1), len(ind2))
cxpoint1 = random.randint(1, size)
cxpoint2 = random.randint(1, size - 1)
if cxpoint2 >= cxpoint1:
cxpoint2 += 1
else: # Swap the two cx points
cxpoint1, cxpoint2 = cxpoint2, cxpoint1
ind1[cxpoint1:cxpoint2], ind2[cxpoint1:cxpoint2] \
= ind2[cxpoint1:cxpoint2], ind1[cxpoint1:cxpoint2]
else:
for i, (x1, x2) in enumerate(zip(ind1, ind2)):
gamma = (1. + 2. * alpha) * random.random() - alpha
ind1[i] = (1. - gamma) * x1 + gamma * x2
ind2[i] = gamma * x1 + (1. - gamma) * x2
return ind1, ind2
def mut_fun(individual, mu, sigma, eta, low, up, indpb):
if random.random() > 0.5:
size = len(individual)
if not isinstance(mu, Sequence):
mu = repeat(mu, size)
elif len(mu) < size:
raise IndexError("mu must be at least the size of individual: %d < %d" % (len(mu), size))
if not isinstance(sigma, Sequence):
sigma = repeat(sigma, size)
elif len(sigma) < size:
raise IndexError("sigma must be at least the size of individual: %d < %d" % (len(sigma), size))
for i, m, s in zip(range(size), mu, sigma):
if random.random() < indpb:
individual[i] += random.gauss(m, s)
else:
size = len(individual)
if not isinstance(low, Sequence):
low = repeat(low, size)
elif len(low) < size:
raise IndexError("low must be at least the size of individual: %d < %d" % (len(low), size))
if not isinstance(up, Sequence):
up = repeat(up, size)
elif len(up) < size:
raise IndexError("up must be at least the size of individual: %d < %d" % (len(up), size))
for i, xl, xu in zip(range(size), low, up):
if random.random() <= indpb:
x = individual[i]
delta_1 = (x - xl) / (xu - xl)
delta_2 = (xu - x) / (xu - xl)
rand = random.random()
mut_pow = 1.0 / (eta + 1.)
if rand < 0.5:
xy = 1.0 - delta_1
if xy<0:
xy=1e-6
val = 2.0 * rand + (1.0 - 2.0 * rand) * xy ** (eta + 1)
delta_q = val ** mut_pow - 1.0
else:
xy = 1.0 - delta_2
if xy<0:
xy=1e-6
val = 2.0 * (1.0 - rand) + 2.0 * (rand - 0.5) * xy ** (eta + 1)
delta_q = 1.0 - val ** mut_pow
x = x + delta_q * (xu - xl)
x = min(max(x, xl), xu)
individual[i] = x
return individual,
def checkBounds(mn, mx):
def decorator(func):
def wrappper(*args, **kargs):
offspring = func(*args, **kargs)
for child in offspring:
for i in range(len(child)):
if child[i] > mx[i]:
child[i] = mx[i]
elif child[i] < mn[i]:
child[i] = mn[i]
return offspring
return wrappper
return decorator
class cluster_problem():
def __init__(self, mfs, X, y, X_test, y_test, p, rated, resampling, add_individual_rules, logger, njobs, num_samples=500, n_ratio=0.6):
self.logger = logger
self.njobs=njobs
self.resampling = resampling
self.add_individual_rules = add_individual_rules
self.num_samples = num_samples
self.n_ratio = n_ratio
self.rules = self.create_rules(mfs)
self.mfs = mfs
self.X = X
self.y = y
self.X_test = X_test
self.y_test = y_test
self.p = p
if rated is None:
self.rated = self.y_test.values.ravel()
else:
self.rated = rated
x = []
self.lower_bound = []
self.upper_bound = []
self.sigma = []
self.index_constrains = []
self.number_of_constraints = 0
for var in sorted(self.mfs.keys()):
for mf in self.mfs[var]:
param = mf['param']
xrange = [mf['universe'][0], mf['universe'][-1]]
prange = (xrange[1] - xrange[0]) / 5
x = x + param
if len(param) == 2:
self.index_constrains.append(np.arange(len(x) - 2, len(x)))
self.number_of_constraints = self.number_of_constraints + 3
lo = param[0] - prange if (param[0] - prange)<-0.05 else -0.05
up = param[0] + prange if (param[0] + prange)>1.05 else 1.05
self.lower_bound.extend([lo, 0.01])
self.upper_bound.extend([up, prange])
self.sigma.extend([X[var].std() / 3, X[var].std() / 3])
elif len(param) == 4:
self.index_constrains.append(np.arange(len(x) - 4, len(x)))
self.number_of_constraints = self.number_of_constraints + 7
for i in param:
lo = param[0] - prange if (param[0] - prange) < -0.05 else -0.05
up = param[3] + prange if (param[3] + prange) > 1.05 else 1.05
self.lower_bound.append(lo)
self.upper_bound.append(up)
self.sigma.append(X[var].std() / 3)
self.number_of_variables = len(x)
self.number_of_objectives = 2
self.x = x
creator.create("FitnessMin", base.Fitness, weights=(-1.0, -1.0))
creator.create("Individual", np.ndarray, fitness=creator.FitnessMin)
self.toolbox = base.Toolbox()
attributes=[]
for i in range(self.number_of_variables):
self.toolbox.register("attribute"+str(i), random.gauss, self.lower_bound[i], self.upper_bound[i])
attributes.append(self.toolbox.__getattribute__("attribute"+str(i)))
self.toolbox.register("individual1", tools.initCycle, creator.Individual, tuple(attributes), n=1)
self.toolbox.register("population", tools.initRepeat, list, self.toolbox.individual1, n=100)
self.toolbox.register("mate", cx_fun, alpha=0.05)
self.toolbox.register("mutate", mut_fun, mu=0, sigma=self.sigma, eta=0.8, low=self.lower_bound, up=self.upper_bound, indpb=0.6)
self.toolbox.register("select", tools.selTournament, tournsize=3)
self.toolbox.register("evaluate", evaluate)
self.hof = tools.ParetoFront(lambda x, y: (x == y).all())
self.stats = tools.Statistics(lambda ind: ind.fitness.values)
self.stats.register("Avg", np.mean)
self.stats.register("Std", np.std)
self.stats.register("Min", np.min)
self.stats.register("Max", np.max)
self.toolbox.decorate("mate", checkBounds(self.lower_bound, self.upper_bound))
self.toolbox.decorate("mutate", checkBounds(self.lower_bound, self.upper_bound))
def create_rules(self, model_mfs):
rules = []
for mf in sorted(model_mfs.keys()):
if len(rules) == 0:
for f in model_mfs[mf]:
rules.append([f])
else:
new_rules = []
for rule in rules:
for f in model_mfs[mf]:
new_rules.append(rule + [f])
rules = new_rules
if self.add_individual_rules:
for mf in sorted(model_mfs.keys()):
for f in model_mfs[mf]:
rules.append([f])
final_rules = dict()
for i in range(len(rules)):
final_rules['rule.' + str(i)] = rules[i]
return final_rules
def run(self, mu, lambda_, cxpb=0.6, mutpb=0.4, ngen=300):
self.population=self.toolbox.population()
param_ind = creator.Individual(self.x)
self.population.pop()
self.population.insert(0, param_ind)
assert lambda_ >= mu, "lambda must be greater or equal to mu."
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in self.population if not ind.fitness.valid]
fit1 = evaluate(np.array(invalid_ind[0]).ravel(),
self.X, self.y, self.X_test, self.y_test, self.rated,
self.mfs, self.rules, self.p, self.resampling, self.num_samples, self.n_ratio)
fitnesses = Parallel(n_jobs=self.njobs)(delayed(evaluate)(np.array(individual).ravel(),
self.X, self.y, self.X_test, self.y_test, self.rated,
self.mfs, self.rules, self.p, self.resampling, self.num_samples, self.n_ratio) for individual in invalid_ind)
# fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
if self.hof is not None:
self.hof.update(self.population)
self.logbook = tools.Logbook()
# Gather all the fitnesses in one list and compute the stats
fits = np.array([ind.fitness.values for ind in self.population])
maximums = np.nanmax(fits, axis=0)
minimums = np.nanmin(fits, axis=0)
self.logbook.header = ['gen', 'nevals'] + ['Max_sse:', 'Min_sse:', 'Max_mae:', 'Min_mae:']
self.logger.info('Iter: %s, Max_sse: %s, Min_mae: %s', 0, *minimums)
record = {'Max_sse:': maximums[0], 'Min_sse:': minimums[0], 'Max_mae:': maximums[1], 'Min_mae:': minimums[1]}
print('GA rbf running generation 0')
print(record)
self.logbook.record(gen=0, nevals=len(invalid_ind), **record)
print(self.logbook.stream)
# Begin the generational process
for gen in range(1, ngen + 1):
# Vary the population
offspring = algorithms.varOr(self.population, self.toolbox, lambda_, cxpb, mutpb)
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = Parallel(n_jobs=self.njobs)(delayed(evaluate)(np.array(individual).ravel(),
self.X, self.y, self.X_test, self.y_test, self.rated,
self.mfs, self.rules, self.p, self.resampling, self.num_samples) for
individual in invalid_ind)
# fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
fits = np.array([ind.fitness.values for ind in self.population])
maximums = np.nanmax(fits, axis=0)
minimums = np.nanmin(fits, axis=0)
# Update the hall of fame with the generated individuals
if self.hof is not None:
self.hof.update(self.population)
# Select the next generation population
self.population[:] = self.toolbox.select(offspring, mu)
# Update the statistics with the new population
record = {'Max_sse:': maximums[0], 'Min_sse:': minimums[0], 'Max_mae:': maximums[1],
'Min_mae:': minimums[1]}
print('GA rbf running generation ', str(gen))
print(record)
self.logbook.record(gen=gen, nevals=len(invalid_ind), **record)
self.logger.info('Iter: %s, Max_sse: %s, Min_mae: %s', str(gen), *minimums)
front = self.population
perf = np.inf
best = 0
for i in range(len(front)):
if front[i].fitness.getValues()[0] < perf:
best = i
perf = front[i].fitness.getValues()[0]
self.fmodel = self.evaluate(np.array(front[best]).ravel(), self.X, self.y, self.X_test, self.y_test, self.rated,
self.mfs, self.rules, self.p, self.resampling)
def evaluate(self, x, X, y, X_test, y_test, rated, mfs, rules, p, resampling):
# print(solution.variables)
i = 0
for var in sorted(mfs.keys()):
for mf in range(len(mfs[var])):
if mfs[var][mf]['type'] == 'gauss':
mfs[var][mf]['param'] = x[i:i + 2]
mfs[var][mf]['func'] = fuzz.gaussmf(mfs[var][mf]['universe'],
mfs[var][mf]['param'][0],
np.abs(mfs[var][mf]['param'][1]))
i += 2
elif mfs[var][mf]['type'] == 'trap':
mfs[var][mf]['param'] = sorted(x[i:i + 4])
mfs[var][mf]['func'] = fuzz.trapmf(mfs[var][mf]['universe'], mfs[var][mf]['param'])
i += 4
for r in sorted(rules.keys()):
for i in range(len(rules[r])):
ind = int(re.sub("\D", "", rules[r][i]['name']))
rules[r][i]['param'] = mfs[rules[r][i]['var_name']][ind]['param']
rules[r][i]['func'] = mfs[rules[r][i]['var_name']][ind]['func']
activations = pd.DataFrame(index=X.index, columns=[rule for rule in sorted(rules.keys())])
for rule in sorted(rules.keys()):
act = []
for mf in rules[rule]:
act.append(fuzz.interp_membership(mf['universe'], mf['func'], X[mf['var_name']]))
activations[rule] = np.power(np.prod(np.array(act), axis=0), 1 / p)
lin_models = dict()
remove_null_rules = []
for rule in sorted(activations.columns):
indices = activations[rule].index[activations[rule] >= 0.01].tolist()
if len(indices) > self.num_samples and len(indices) < self.n_ratio * X.shape[0] :
X1 = X.loc[indices].values
y1 = y.loc[indices].values
lin_models[rule] = LinearRegression().fit(X1, y1.ravel())
elif len(indices) > 0:
lin_models[rule] = 'null'
remove_null_rules.append(rule)
else:
lin_models[rule] = None
remove_null_rules.append(rule)
total = 0
for rule in sorted(rules.keys()):
indices = activations[rule].index[activations[rule] >= 0.01].tolist()
act = activations.loc[indices].copy(deep=True)
act = act.drop(columns=[rule])
if len(indices) <= self.num_samples and len(indices) < self.n_ratio * X.shape[0] and not act.isnull().all(axis=1).any():
del rules[rule]
del lin_models[rule]
else:
print(len(indices))
self.logger.info("Number of samples of rule %s is %s", rule, len(indices))
total += len(indices)
print(total)
self.logger.info("Number of samples of dataset with %s is %s", X.shape[0], total)
activations_test = pd.DataFrame(index=X_test.index,
columns=[rule for rule in sorted(rules.keys())])
for rule in sorted(rules.keys()):
act = []
for mf in rules[rule]:
act.append(fuzz.interp_membership(mf['universe'], mf['func'], X_test[mf['var_name']]))
activations_test[rule] = np.power(np.prod(np.array(act), axis=0), 1 / p)
preds = pd.DataFrame(index=X_test.index, columns=sorted(lin_models.keys()))
for rule in sorted(rules.keys()):
indices = activations_test[rule].index[activations_test[rule] >= 0.01].tolist()
if len(indices) != 0:
X1 = X_test.loc[indices].values
y1 = y_test.loc[indices].values
if (lin_models[rule] != 'null' and not lin_models[rule] is None):
preds.loc[indices, rule] = lin_models[rule].predict(X1).ravel()
elif lin_models[rule] == 'null':
preds.loc[indices, rule] = 1e+15
if isinstance(rated, float) or isinstance(rated, int):
err = (preds.loc[indices, rule].values.ravel() - y1.ravel()) / rated
else:
err = (preds.loc[indices, rule].values.ravel() - y1.ravel()) / y1.ravel()
self.logger.info("MAE of rule %s is %s", rule, np.mean(np.abs(err)))
pred = preds.mean(axis=1)
pred[pred.isnull()] = 1e+15
# pred.name='target'
# pred=pred.to_frame()
err = (pred.values.ravel() - y_test.values.ravel()) / rated
self.objectives = [np.sum(np.square(err)),np.mean(np.abs(err))]
self.rules = rules
self.mfs = mfs
fmodel = dict()
fmodel['mfs'] = self.mfs
fmodel['rules'] = self.rules
fmodel['result'] = self.objectives[1]
print('Error = ', self.objectives[1])
return fmodel
def resampling_fun(X, y, random_state=42):
flag = False
Std = 0.01
while (flag == False and Std <= 1):
try:
std = np.maximum(Std * np.std(y), 0.2)
yy = np.digitize(y, np.arange(np.min(y), np.max(y), std), right=True)
bins = np.arange(np.min(y), np.max(y), std)
bins = bins[(np.bincount(yy.ravel()) >= 2)[:-1]]
yy = np.digitize(y, bins, right=True)
# if Std==0.01 and np.max(yy)!=0:
# strategy = {cl:int(100*X.shape[0]/np.max(yy)) for cl in np.unique(yy)}
# else:
strategy = "auto"
if np.unique(yy).shape[0]==1:
X2 = X
yy2 = y
return X2, yy2
if np.any(np.bincount(yy.ravel())<2):
for cl in np.where(np.bincount(yy.ravel())<2)[0]:
X = X[np.where(yy!=cl)[0]]
y = y[np.where(yy!=cl)[0]]
yy = yy[np.where(yy!=cl)[0]]
sm = ADASYN(sampling_strategy=strategy, random_state=random_state, n_neighbors=np.min(np.bincount(yy.ravel()) - 1))
try:
X2, yy2 = sm.fit_resample(X, yy.ravel())
except:
pass
X2[np.where(X2<0)] = 0
yy2 = bins[yy2 - 1]
flag = True
except:
Std *= 10
if flag == True:
return X2, yy2
else:
raise RuntimeError('Cannot make resampling ')
def evaluate(x, X, y, X_test, y_test, rated, mfs, rules, p, resampling, num_samples=200, n_ratio=0.6):
# print(solution.variables)
i = 0
for var in sorted(mfs.keys()):
for mf in range(len(mfs[var])):
if mfs[var][mf]['type'] == 'gauss':
mfs[var][mf]['param'] = x[i:i + 2]
mfs[var][mf]['func'] = fuzz.gaussmf(mfs[var][mf]['universe'],
mfs[var][mf]['param'][0],
np.abs(mfs[var][mf]['param'][1]))
i += 2
elif mfs[var][mf]['type'] == 'trap':
mfs[var][mf]['param'] = sorted(x[i:i + 4])
mfs[var][mf]['func'] = fuzz.trapmf(mfs[var][mf]['universe'], mfs[var][mf]['param'])
i += 4
for r in sorted(rules.keys()):
for i in range(len(rules[r])):
ind = int(re.sub("\D", "", rules[r][i]['name']))
rules[r][i]['param'] = mfs[rules[r][i]['var_name']][ind]['param']
rules[r][i]['func'] = mfs[rules[r][i]['var_name']][ind]['func']
activations = pd.DataFrame(index=X.index, columns=['rule.' + str(i) for i in range(len(rules))])
for rule in sorted(rules.keys()):
act = []
for mf in rules[rule]:
act.append(fuzz.interp_membership(mf['universe'], mf['func'], X[mf['var_name']]))
activations[rule] = np.power(np.prod(np.array(act), axis=0), 1 / p)
lin_models = dict()
for rule in activations.columns:
indices = activations[rule].index[activations[rule] >= 0.01].tolist()
if len(indices) > num_samples and len(indices) < n_ratio * X.shape[0] :
X1 = X.loc[indices].values
y1 = y.loc[indices].values
# if resampling:
# if X1.shape[0] < 300:
# X1, y1 = resampling_fun(X1,y1)
lin_models[rule] = LinearRegression().fit(X1, y1.ravel())
elif len(indices) > 0:
lin_models[rule] = 'null'
else:
lin_models[rule] = None
for rule in sorted(rules.keys()):
indices = activations[rule].index[activations[rule] >= 0.01].tolist()
if len(indices) <= num_samples and len(indices) < n_ratio * X.shape[0] and len(indices) < n_ratio * X.shape[0] :
del rules[rule]
del lin_models[rule]
activations_test = pd.DataFrame(index=X_test.index,
columns=['rule.' + str(i) for i in sorted(rules.keys())])
for rule in sorted(rules.keys()):
act = []
for mf in rules[rule]:
act.append(fuzz.interp_membership(mf['universe'], mf['func'], X_test[mf['var_name']]))
activations_test[rule] = np.power(np.prod(np.array(act), axis=0), 1 / p)
preds = pd.DataFrame(index=X_test.index, columns=sorted(rules.keys()))
for rule in sorted(rules.keys()):
indices = activations_test[rule].index[activations_test[rule] >= 0.01].tolist()
if len(indices) != 0:
X1 = X_test.loc[indices].values
if (lin_models[rule] != 'null' and not lin_models[rule] is None):
preds.loc[indices, rule] = lin_models[rule].predict(X1).ravel()
elif lin_models[rule] == 'null':
preds.loc[indices, rule] = 1e+15
pred = preds.mean(axis=1)
pred[pred.isnull()] = 1e+15
# pred.name='target'
# pred=pred.to_frame()
err = (pred.values.ravel() - y_test.values.ravel()) / rated
objectives = [np.sum(np.square(err)),np.mean(np.abs(err))]
return objectives
class clusterer(object):
def __init__(self, fuzzy_path, fuzzy_file, type):
self.fuzzy_file = os.path.join(fuzzy_path, fuzzy_file)
fmodel = joblib.load(self.fuzzy_file)
self.rules = fmodel['rules']
if type == 'pv':
self.p = 4
elif type == 'wind':
self.p = 3
elif type == 'load':
self.p = 4
elif type == 'fa':
self.p = 3
def compute_activations(self, X):
activations = pd.DataFrame(index=X.index, columns=[i for i in sorted(self.rules.keys())])
var_del=[]
for rule in sorted(self.rules.keys()):
act = []
for mf in self.rules[rule]:
if mf['var_name'] not in X.columns:
var_names = [c for c in X.columns if mf['var_name'] in c]
X[mf['var_name']] = X[var_names].mean(axis=1)
var_del.append(mf['var_name'])
act.append(fuzz.interp_membership(mf['universe'], mf['func'], X[mf['var_name']]))
activations[rule] = np.power(np.prod(np.array(act), axis=0), 1 / self.p)
if len(var_del)>0:
X = X.drop(columns=var_del)
return activations
#
# if __name__ == '__main__':
# import sys
# warnings.filterwarnings("ignore", category=FutureWarning)
# if sys.platform == 'linux':
# sys_folder = '/media/smartrue/HHD1/George/models/'
# else:
# sys_folder = 'D:/models/'
# project_name = 'APE_net_ver2'
# project_country = 'APE_net_ver2'
# project_owner = '4cast_models'
# path_project = sys_folder + project_owner + '/' + project_country + '/' + project_name
# cluster_dir = path_project +'/Regressor_layer/rule.12'
# data_dir = path_project + '/Regressor_layer/rule.12/data'
# # logger = logging.getLogger(__name__)
# # logger.setLevel(logging.INFO)
# # handler = logging.FileHandler(os.path.join(cluster_dir, 'log_rbf_cnn_test.log'), 'a')
# # handler.setLevel(logging.INFO)
# #
# # # create a logging format
# # formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# # handler.setFormatter(formatter)
# #
# # # add the handlers to the logger
# # logger.addHandler(handler)
#
# rated = None
#
# static_data = write_database()
# X = pd.read_csv(os.path.join(static_data['path_data'], 'training_inputs.csv'), index_col=0,
# parse_dates=True, dayfirst=True)
# y = pd.read_csv(os.path.join(static_data['path_data'], 'training_target.csv'), index_col=0,
# header=None,
# names=['target'], parse_dates=True, dayfirst=True)
# X_train = X.loc[X.index <= pd.to_datetime('2019-01-01 00:00')]
# X_test = X.loc[X.index > pd.to_datetime('2019-01-01 00:00')]
# y_train = y.loc[y.index <= pd.to_datetime('2019-01-01 00:00')]
# y_test = y.loc[y.index > pd.to_datetime('2019-01-01 00:00')]
#
# optimizer = cluster_optimize(static_data)
# optimizer.run(X_train, y_train, X_test, y_test, rated)
```
#### File: Fuzzy_clustering/ver_tf2/Cluster_predict_regressors.py
```python
import os
import pickle
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
from Fuzzy_clustering.ver_tf2.RBFNN_predict import rbf_model_predict
from Fuzzy_clustering.ver_tf2.RBF_ols_predict import rbf_ols_predict
from Fuzzy_clustering.ver_tf2.Sklearn_predict import sklearn_model_predict
from Fuzzy_clustering.ver_tf2.CNN_predict import CNN_predict
from Fuzzy_clustering.ver_tf2.CNN_predict_3d import CNN_3d_predict
from Fuzzy_clustering.ver_tf2.LSTM_predict_3d import LSTM_3d_predict
from Fuzzy_clustering.ver_tf2.Combine_module_predict import combine_model_predict
from datetime import datetime
import time, logging
class MultiEvaluator():
def __init__(self, processes: int = 8):
self.processes = processes
def predict(self, i, x, model):
return i, model.predict(x)
def evaluate(self, X, model):
partitions = 3000
X_list=[]
for i in range(0, X.shape[0], partitions):
if (i+partitions+1)>X.shape[0]:
X_list.append(X[i:])
else:
X_list.append(X[i:i+partitions])
pred =Parallel(self.processes)(delayed(self.predict)(i, x, model) for i, x in enumerate(X_list))
indices = np.array([p[0] for p in pred])
predictions = np.array([])
for ind in indices:
if len(predictions.shape)==1:
predictions = pred[ind][1]
else:
predictions = np.vstack((predictions, pred[ind][1]))
return predictions
class cluster_predict(object):
def __init__(self, static_data, clust):
self.istrained = False
self.cluster_dir = os.path.join(static_data['path_model'], 'Regressor_layer/' + clust)
self.cluster_name = clust
try:
self.load(self.cluster_dir)
except:
pass
self.static_data=static_data
self.model_type=static_data['type']
self.methods=static_data['project_methods']
self.combine_methods=static_data['combine_methods']
self.rated=static_data['rated']
self.n_jobs=static_data['njobs']
self.data_dir = os.path.join(self.cluster_dir, 'data')
def pred_model(self, X, method, static_data, rated, cluster_dir, X_cnn=np.array([]), X_lstm=np.array([])):
if method == 'ML_RBF_ALL':
model_rbf = rbf_model_predict(static_data['RBF'], rated, cluster_dir)
model_rbf_ols = rbf_ols_predict(cluster_dir, rated, static_data['sklearn']['njobs'], GA=False)
model_rbf_ga = rbf_ols_predict(cluster_dir, rated, static_data['sklearn']['njobs'], GA=True)
if model_rbf_ols.istrained == True:
pred1 = model_rbf_ols.predict(X)
if model_rbf_ga.istrained == True:
pred2 = model_rbf_ga.predict(X)
if model_rbf.istrained == True:
pred3 = model_rbf.predict(X)
pred1[np.where(pred1 < 0)] = 0
pred2[np.where(pred2 < 0)] = 0
pred3[np.where(pred3 < 0)] = 0
return [pred1, pred2, pred3]
elif method == 'ML_RBF_ALL_CNN':
model_rbf = rbf_model_predict(static_data['RBF'], rated, cluster_dir)
model_rbf_ols = rbf_ols_predict(cluster_dir, rated, static_data['sklearn']['njobs'], GA=False)
model_rbf_ga = rbf_ols_predict(cluster_dir, rated, static_data['sklearn']['njobs'], GA=True)
if model_rbf_ols.istrained == True:
pred1 = model_rbf_ols.predict(X)
if model_rbf_ga.istrained == True:
pred2 = model_rbf_ga.predict(X)
if model_rbf.istrained == True:
pred3 = model_rbf.predict(X)
rbf_dir = [model_rbf_ols.cluster_dir, model_rbf_ga.cluster_dir, model_rbf.cluster_dir]
model_cnn = CNN_predict(static_data, rated, cluster_dir, rbf_dir)
if model_cnn.istrained == True:
pred4 = model_cnn.predict(X)
pred1[np.where(pred1 < 0)] = 0
pred2[np.where(pred2 < 0)] = 0
pred3[np.where(pred3 < 0)] = 0
pred4[np.where(pred4 < 0)] = 0
return [pred1, pred2, pred3, pred4]
elif method == 'ML_NUSVM':
method = method.replace('ML_', '')
model_sklearn = sklearn_model_predict(cluster_dir, rated, method, static_data['sklearn']['njobs'])
if model_sklearn.istrained == True:
pred = model_sklearn.predict(X)
pred[np.where(pred < 0)] = 0
return [pred]
elif method == 'ML_MLP':
method = method.replace('ML_', '')
model_sklearn = sklearn_model_predict(cluster_dir, rated, method, static_data['sklearn']['njobs'])
if model_sklearn.istrained == True:
pred = model_sklearn.predict(X)
pred[np.where(pred < 0)] = 0
return [pred]
elif method == 'ML_SVM':
method = method.replace('ML_', '')
model_sklearn = sklearn_model_predict(cluster_dir, rated, method, static_data['sklearn']['njobs'])
if model_sklearn.istrained == True:
pred = model_sklearn.predict(X)
pred[np.where(pred < 0)] = 0
return [pred]
elif method == 'ML_RF':
method = method.replace('ML_', '')
model_sklearn = sklearn_model_predict(cluster_dir, rated, method, static_data['sklearn']['njobs'])
if model_sklearn.istrained == True:
pred = model_sklearn.predict(X)
pred[np.where(pred < 0)] = 0
return [pred]
elif method == 'ML_XGB':
method = method.replace('ML_', '')
model_sklearn = sklearn_model_predict(cluster_dir, rated, method, static_data['sklearn']['njobs'])
if model_sklearn.istrained == True:
pred = model_sklearn.predict(X)
pred[np.where(pred < 0)] = 0
return [pred]
elif method == 'ML_CNN_3d':
model_cnn3d = CNN_3d_predict(static_data, rated, cluster_dir)
if model_cnn3d.istrained == True:
pred = model_cnn3d.predict(X_cnn)
pred[np.where(pred < 0)] = 0
return [pred]
elif method == 'ML_LSTM_3d':
model_lstm_3d = LSTM_3d_predict(static_data, rated, cluster_dir)
if model_lstm_3d.istrained == True:
pred = model_lstm_3d.predict(X_lstm)
pred[np.where(pred < 0)] = 0
return [pred]
else:
return [np.nan]
def parallel_pred_model(self, X, method, static_data, rated, cluster_dir, X_cnn=np.array([]), X_lstm=np.array([])):
parallel = MultiEvaluator(2*self.n_jobs)
if method == 'ML_RBF_ALL':
model_rbf = rbf_model_predict(static_data['RBF'], rated, cluster_dir)
model_rbf_ols = rbf_ols_predict(cluster_dir, rated, static_data['sklearn']['njobs'], GA=False)
model_rbf_ga = rbf_ols_predict(cluster_dir, rated, static_data['sklearn']['njobs'], GA=True)
if model_rbf_ols.istrained == True:
pred1 = parallel.evaluate(X, model_rbf_ols)
if model_rbf_ga.istrained == True:
pred2 = parallel.evaluate(X, model_rbf_ga)
if model_rbf.istrained == True:
pred3 = parallel.evaluate(X, model_rbf)
pred1[np.where(pred1 < 0)] = 0
pred2[np.where(pred2 < 0)] = 0
pred3[np.where(pred3 < 0)] = 0
return [pred1, pred2, pred3]
elif method == 'ML_RBF_ALL_CNN':
model_rbf = rbf_model_predict(static_data['RBF'], rated, cluster_dir)
model_rbf_ols = rbf_ols_predict(cluster_dir, rated, static_data['sklearn']['njobs'], GA=False)
model_rbf_ga = rbf_ols_predict(cluster_dir, rated, static_data['sklearn']['njobs'], GA=True)
if model_rbf_ols.istrained == True:
pred1 = parallel.evaluate(X, model_rbf_ols)
if model_rbf_ga.istrained == True:
pred2 = parallel.evaluate(X, model_rbf_ga)
if model_rbf.istrained == True:
pred3 = parallel.evaluate(X, model_rbf)
rbf_dir = [model_rbf_ols.cluster_dir, model_rbf_ga.cluster_dir, model_rbf.cluster_dir]
model_cnn = CNN_predict(static_data, rated, cluster_dir, rbf_dir)
if model_cnn.istrained == True:
pred4 = parallel.evaluate(X, model_cnn)
pred1[np.where(pred1 < 0)] = 0
pred2[np.where(pred2 < 0)] = 0
pred3[np.where(pred3 < 0)] = 0
pred4[np.where(pred4 < 0)] = 0
return [pred1, pred2, pred3, pred4]
elif method == 'ML_NUSVM':
method = method.replace('ML_', '')
model_sklearn = sklearn_model_predict(cluster_dir, rated, method, static_data['sklearn']['njobs'])
if model_sklearn.istrained == True:
pred = parallel.evaluate(X, model_sklearn)
pred[np.where(pred < 0)] = 0
return [pred]
elif method == 'ML_MLP':
method = method.replace('ML_', '')
model_sklearn = sklearn_model_predict(cluster_dir, rated, method, static_data['sklearn']['njobs'])
if model_sklearn.istrained == True:
pred = parallel.evaluate(X, model_sklearn)
pred[np.where(pred < 0)] = 0
return [pred]
elif method == 'ML_SVM':
method = method.replace('ML_', '')
model_sklearn = sklearn_model_predict(cluster_dir, rated, method, static_data['sklearn']['njobs'])
if model_sklearn.istrained == True:
pred = parallel.evaluate(X, model_sklearn)
pred[np.where(pred < 0)] = 0
return [pred]
elif method == 'ML_RF':
method = method.replace('ML_', '')
model_sklearn = sklearn_model_predict(cluster_dir, rated, method, static_data['sklearn']['njobs'])
if model_sklearn.istrained == True:
pred = parallel.evaluate(X, model_sklearn)
pred[np.where(pred < 0)] = 0
return [pred]
elif method == 'ML_XGB':
method = method.replace('ML_', '')
model_sklearn = sklearn_model_predict(cluster_dir, rated, method, static_data['sklearn']['njobs'])
if model_sklearn.istrained == True:
pred = parallel.evaluate(X, model_sklearn)
pred[np.where(pred < 0)] = 0
return [pred]
elif method == 'ML_CNN_3d':
model_cnn3d = CNN_3d_predict(static_data, rated, cluster_dir)
if model_cnn3d.istrained == True:
pred = parallel.evaluate(X_cnn, model_cnn3d)
pred[np.where(pred < 0)] = 0
return [pred]
elif method == 'ML_LSTM_3d':
model_lstm_3d = LSTM_3d_predict(static_data, rated, cluster_dir)
if model_lstm_3d.istrained == True:
pred = parallel.evaluate(X_lstm, model_lstm_3d)
pred[np.where(pred < 0)] = 0
return [pred]
else:
return [np.nan]
def compute_metrics(self, pred, y, rated):
if rated == None:
rated = y.ravel()
else:
rated = 20
err = np.abs(pred.ravel() - y.ravel()) / rated
sse = np.sum(np.square(pred.ravel() - y.ravel()))
rms = np.sqrt(np.mean(np.square(err)))
mae = np.mean(err)
mse = sse / y.shape[0]
return [sse, rms, mae, mse]
def evaluate(self, pred_all, y, y_cnn=None):
result = pd.DataFrame(index=[method for method in pred_all.keys()], columns=['sse', 'rms', 'mae', 'mse'])
for method, pred in pred_all.items():
if method == 'ML_CNN_3d' and not y_cnn is None:
result.loc[method] = self.compute_metrics(pred, y_cnn, self.rated)
else:
result.loc[method] = self.compute_metrics(pred, y, self.rated)
return result
def spark_predict(self, X, X_cnn=np.array([]), X_lstm=np.array([]), fs_reduced=False):
if hasattr(self, 'features') and fs_reduced == False:
X = X[:, self.features]
methods = []
for model in self.static_data['project_methods'].keys():
if self.static_data['project_methods'][model]['status'] == 'train':
methods.append(model)
predictions = dict()
for method in methods:
pred = self.parallel_pred_model(X, method, self.static_data, self.rated, self.cluster_dir, X_cnn=X_cnn,
X_lstm=X_lstm)
for p in pred:
if np.any(np.isnan(p)):
if np.sum(np.isnan(p)) <= 3:
p[np.where(np.isnan(p))] = np.nanmean(p)
else:
raise ValueError(
'There are nans in dataset of %s clust or model of method %s is not trained well',
self.cluster_name, method)
if method == 'ML_RBF_ALL_CNN':
predictions['RBF_OLS'] = 20 * pred[0]
predictions['GA_RBF_OLS'] = 20 * pred[1]
predictions['RBFNN'] = 20 * pred[2]
predictions['RBF-CNN'] = 20 * pred[3]
elif method == 'ML_RBF_ALL':
predictions['RBF_OLS'] = 20 * pred[0]
predictions['GA_RBF_OLS'] = 20 * pred[1]
predictions['RBFNN'] = 20 * pred[2]
else:
predictions[method] = 20 * pred[0]
comb_model = combine_model_predict(self.static_data, self.cluster_dir)
if comb_model.istrained==True and len(predictions.keys())>1:
pred_combine = comb_model.predict(predictions)
predictions.update(pred_combine)
return predictions
def predict(self, X, X_cnn=np.array([]), X_lstm=np.array([]), fs_reduced=False):
if hasattr(self, 'features') and fs_reduced==False:
X = X[:, self.features]
methods = []
for model in self.static_data['project_methods'].keys():
if self.static_data['project_methods'][model]['status'] == 'train':
methods.append(model)
predictions=dict()
for method in methods:
if X.shape[0]>0:
pred = self.pred_model(X, method, self.static_data, self.rated, self.cluster_dir, X_cnn=X_cnn, X_lstm=X_lstm)
for p in pred:
if np.any(np.isnan(p)):
if np.sum(np.isnan(p))<=X.shape[0]/3:
p[np.where(np.isnan(p))] = np.nanmean(p)
else:
raise ValueError('There are nans in dataset of %s clust or model of method %s is not trained well', self.cluster_name, method)
if method == 'ML_RBF_ALL_CNN':
predictions['RBF_OLS'] = 20 * pred[0]
predictions['GA_RBF_OLS'] = 20 * pred[1]
predictions['RBFNN'] = 20 * pred[2]
predictions['RBF-CNN'] = 20 * pred[3]
elif method == 'ML_RBF_ALL':
predictions['RBF_OLS'] = 20 * pred[0]
predictions['GA_RBF_OLS'] = 20 * pred[1]
predictions['RBFNN'] = 20 * pred[2]
else:
predictions[method] = 20 * pred[0]
else:
if method == 'ML_RBF_ALL_CNN':
predictions['RBF_OLS'] = np.array([])
predictions['GA_RBF_OLS'] = np.array([])
predictions['RBFNN'] = np.array([])
predictions['RBF-CNN'] = np.array([])
elif method == 'ML_RBF_ALL':
predictions['RBF_OLS'] = np.array([])
predictions['GA_RBF_OLS'] = np.array([])
predictions['RBFNN'] = np.array([])
else:
predictions[method] = np.array([])
comb_model = combine_model_predict(self.static_data, self.cluster_dir)
if comb_model.istrained == True and len(predictions.keys())>1:
pred_combine = comb_model.predict(predictions)
predictions.update(pred_combine)
elif len(predictions.keys())>1:
pred_combine = comb_model.averaged(predictions)
predictions.update(pred_combine)
return predictions
def load(self, cluster_dir):
if os.path.exists(os.path.join(cluster_dir, 'model_' + self.cluster_name +'.pickle')):
try:
f = open(os.path.join(cluster_dir, 'model_' + self.cluster_name +'.pickle'), 'rb')
tmp_dict = pickle.load(f)
f.close()
tdict = {}
for k in tmp_dict.keys():
if not k in ['logger', 'static_data', 'data_dir', 'cluster_dir', 'n_jobs']:
tdict[k] = tmp_dict[k]
self.__dict__.update(tdict)
except:
raise ImportError('Cannot open rule model %s', self.cluster_name)
else:
raise ImportError('Cannot find rule model %s', self.cluster_name)
if __name__=='__main__':
from Fuzzy_clustering.ver_tf2.train_with_docker import ModelTrain
from Fuzzy_clustering.ver_tf2.Models_train_manager import ModelTrainManager
from Fuzzy_clustering.ver_tf2.Cluster_train_regressors import cluster_train
import logging, os
project_owner = 'my_projects'
projects_group = 'APE_net'
project_name = 'SCADA'
version = 0
model_type = 'load'
njobs = 3
gpus = ['/device:GPU:0']
model = ModelTrain(project_owner, projects_group, project_name, version, model_type, njobs, gpus)
project_model = ModelTrainManager(model.project['static_data']['path_model'])
# if project_model.istrained == False:
data_variables = model.project['static_data']['data_variables']
project_model.init(model.project['static_data'], data_variables)
clust_regressor = cluster_train(project_model.static_data, 'rule.101', project_model.sc)
clust_regressor.fit()
```
#### File: Fuzzy_clustering/ver_tf2/CNN_module.py
```python
import os
import numpy as np
import pandas as pd
import pickle
import glob
import shutil
import logging, joblib
import re
import multiprocessing as mp
from joblib import Parallel, delayed
import tensorflow as tf
from Fuzzy_clustering.ver_tf2.RBFNN_module import rbf_model
from Fuzzy_clustering.ver_tf2.CNN_tf_core import CNN
from sklearn.model_selection import train_test_split
from scipy.interpolate import interp2d
from Fuzzy_clustering.ver_tf2.utils_for_forecast import split_continuous
from Fuzzy_clustering.ver_tf2.RBF_ols import rbf_ols_module
from Fuzzy_clustering.ver_tf2.CNN_predict import CNN_predict
def optimize_cnn(cnn, kernels, hsize, cnn_max_iterations, cnn_learning_rate, gpu, filters):
flag = False
for _ in range(3):
try:
acc_old_cnn, scale_cnn, model_cnn = cnn.train_cnn(max_iterations=cnn_max_iterations,
learning_rate=cnn_learning_rate, kernels=kernels, h_size=hsize, gpu_id=gpu, filters=filters)
flag = True
except:
filters = int(filters / 2)
pass
if not flag:
acc_old_cnn = np.inf
scale_cnn = None
model_cnn = None
return acc_old_cnn, kernels[1], hsize, cnn_learning_rate, scale_cnn, model_cnn
def predict(q, H, model):
tf.config.set_soft_device_placement(True)
pred = model.predict(H)
q.put((pred[0]))
class cnn_model():
def __init__(self, static_data, rated, cluster_dir, rbf_dir):
self.static_data_all = static_data
self.static_data = static_data['CNN']
self.static_data_rbf = static_data['RBF']
self.rated = rated
self.cluster = os.path.basename(cluster_dir)
self.istrained = False
self.rbf_dir = rbf_dir
self.cluster_dir = cluster_dir
if isinstance(rbf_dir, list):
self.rbf_method = 'RBF_ALL'
self.cluster_cnn_dir = os.path.join(cluster_dir, 'RBF_ALL/CNN')
self.model_dir = os.path.join(self.cluster_cnn_dir, 'model')
self.rbf = rbf_model(self.static_data_rbf, self.rated, cluster_dir)
self.rbf.models=[]
for dir in rbf_dir:
rbf_method = os.path.basename(dir)
cluster_rbf_dir = os.path.join(dir, 'model')
if rbf_method == 'RBFNN':
rbf = rbf_model(self.static_data_rbf, self.rated, cluster_dir)
try:
rbf.load(cluster_rbf_dir)
except:
raise ImportError('Cannot load RBFNN models')
self.rbf.models.append(rbf.models[0])
elif rbf_method == 'RBF_OLS':
rbf = rbf_ols_module(cluster_dir, rated, self.static_data_rbf['njobs'], GA=False)
try:
rbf.load(cluster_rbf_dir)
except:
raise ImportError('Cannot load RBFNN models')
self.rbf.models.append(rbf.models[-1])
elif rbf_method == 'GA_RBF_OLS':
rbf = rbf_ols_module(cluster_dir, rated, self.static_data_rbf['njobs'], GA=True)
try:
rbf.load(cluster_rbf_dir)
except:
raise ImportError('Cannot load RBFNN models')
self.rbf.models.append(rbf.models[0])
else:
raise ValueError('Cannot recognize RBF method')
else:
self.rbf_method = os.path.basename(rbf_dir)
cluster_rbf_dir = os.path.join(rbf_dir, 'model')
self.cluster_cnn_dir = os.path.join(rbf_dir, 'CNN')
self.model_dir = os.path.join(self.cluster_cnn_dir, 'model')
if self.rbf_method == 'RBFNN':
self.rbf = rbf_model(self.static_data_rbf, self.rated, cluster_dir)
elif self.rbf_method == 'RBF_OLS':
self.rbf = rbf_ols_module(cluster_dir, rated, self.static_data_rbf['njobs'], GA=False)
elif self.rbf_method == 'GA_RBF_OLS':
self.rbf = rbf_ols_module(cluster_dir, rated, self.static_data_rbf['njobs'], GA=True)
else:
raise ValueError('Cannot recognize RBF method')
try:
self.rbf.load(cluster_rbf_dir)
except:
raise ImportError('Cannot load RBFNN models')
if not os.path.exists(self.model_dir):
os.makedirs(self.model_dir)
try:
self.load(self.model_dir)
except:
pass
def train_cnn(self, cvs):
logger = logging.getLogger('log_train_' + self.cluster + '.log')
logger.setLevel(logging.INFO)
handler = logging.FileHandler(os.path.join(self.model_dir, 'log_train_' + self.cluster + '.log'), 'a')
handler.setLevel(logging.INFO)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
print('CNN training...begin for %s ', self.cluster)
logger.info('CNN training...begin for %s ', self.cluster)
logger.info('CNN training...begin for method %s ', self.rbf_method)
X_train = cvs[0][0]
y_train = cvs[0][1].reshape(-1, 1)
X_val = cvs[0][2]
y_val = cvs[0][3].reshape(-1, 1)
X_test = cvs[0][4]
y_test = cvs[0][5].reshape(-1, 1)
cnn = CNN(self.static_data, self.rated, self.rbf.models, X_train, y_train, X_val, y_val, X_test, y_test)
self.acc_cnn = np.inf
gpus = np.tile(self.static_data['gpus'], 4)
# k=2
# optimize_cnn(cnn, self.temp_dir + str(k), [2, k],
# self.static_data['max_iterations'], self.static_data['learning_rate'],
# gpus[int(k / 4)])
# pool = mp.Pool(processes=len(self.static_data['gpus']))
# result = [pool.apply_async(optimize_cnn, args=(cnn, self.temp_dir + str(k), [2, k], self.static_data['h_size'],
# self.static_data['max_iterations'],
# self.static_data['learning_rate'],
# gpus[int(k / 4)])) for k in [2, 12]]
# results = [p.get() for p in result]
# pool.close()
# pool.terminate()
# pool.join()
results = Parallel(n_jobs=len(self.static_data['gpus']))(
delayed(optimize_cnn)(cnn, [2, k], self.static_data['h_size'],
self.static_data['max_iterations'],
self.static_data['learning_rate'],
gpus[i], int(self.static_data['filters'])) for i, k in enumerate([4, 12]))
for r in results:
logger.info("kernel: %s accuracy cnn: %s", r[1], r[0])
acc_cnn = np.array([r[0] for r in results])
self.acc_cnn, self.best_kernel, hsize, lr, self.scale_cnn, model_cnn = results[acc_cnn.argmin()]
self.model = model_cnn
train_res = pd.DataFrame.from_dict(model_cnn['error_func'], orient='index')
train_res.to_csv(os.path.join(self.model_dir, 'train_result.csv'), header=None)
self.save(self.model_dir)
try:
self.acc_cnn = np.inf
gpus = np.tile(self.static_data['gpus'], 4)
h_size=[
[1024, 256],
[512, 128],
]
results1 = Parallel(n_jobs=len(self.static_data['gpus']))(
delayed(optimize_cnn)(cnn, [2, self.best_kernel], h_size[k],
self.static_data['max_iterations'],
self.static_data['learning_rate'],
gpus[int(k)], int(self.static_data['filters'])) for k in range(2))
for r in results1:
logger.info("num neurons: 1st %s and 2nd %s with accuracy cnn: %s", *r[2], r[0])
results += results1
acc_cnn = np.array([r[0] for r in results])
self.acc_cnn, self.best_kernel, self.best_h_size, self.lr, self.scale_cnn, model_cnn = results[acc_cnn.argmin()]
self.model = model_cnn
train_res = pd.DataFrame.from_dict(model_cnn['error_func'], orient='index')
train_res.to_csv(os.path.join(self.model_dir, 'train_result_hsize.csv'), header=None)
logger.info("Best kernel: %s", self.best_kernel)
logger.info("accuracy cnn: %s", self.acc_cnn)
logger.info("num neurons: 1st %s and 2nd %s", *self.best_h_size)
logger.info("with accuracy cnn: %s", self.acc_cnn)
logger.info("Best learning rate: %s", self.lr)
logger.info("Total accuracy cnn: %s", self.acc_cnn)
logger.info('\n')
self.istrained = True
self.save(self.model_dir)
except:
pass
# self.acc_cnn = np.inf
# gpus = np.tile(self.static_data['gpus'], 4)
# lrs = [1e-6, 1e-4]
# k=2
# optimize_cnn(cnn, self.temp_dir + str(k), [2, k],
# self.static_data['max_iterations'], self.static_data['learning_rate'],
# gpus[int(k / 4)])
# pool = mp.Pool(processes=len(self.static_data['gpus']))
# result = [pool.apply_async(optimize_cnn, args=(cnn, self.temp_dir + str(k), [2, self.best_kernel], self.best_h_size,
# self.static_data['max_iterations'],
# lrs[k],
# gpus[k])) for k in [0, 1]]
# results = [p.get() for p in result]
# pool.close()
# pool.terminate()
# pool.join()
# results1 = Parallel(n_jobs=len(self.static_data['gpus']))(
# delayed(optimize_cnn)(cnn, [2, self.best_kernel], self.best_h_size,
# self.static_data['max_iterations'],
# lrs[k],
# gpus[k], int(self.static_data['filters'])) for k in [0, 1])
# for r in results1:
# lr = r[3]
# logger.info("Learning rate: %s accuracy cnn: %s", lr, r[0])
#
# results += results1
# acc_cnn = np.array([r[0] for r in results])
# self.acc_cnn, self.best_kernel, self.best_h_size, self.lr, self.scale_cnn, model_cnn = results[acc_cnn.argmin()]
# self.model = model_cnn
return self.to_dict()
def to_dict(self):
dict = {}
for k in self.__dict__.keys():
if k not in ['logger','static_data_all','model_dir', 'temp_dir', 'cluster_cnn_dir', 'cluster_dir','rbf_dir', 'model']:
dict[k] = self.__dict__[k]
return dict
def train_cnn_TL(self, cvs, model, gpu):
print('CNN training...begin for %s ', self.cluster)
X_train = cvs[0][0]
y_train = cvs[0][1].reshape(-1, 1)
X_val = cvs[0][2]
y_val = cvs[0][3].reshape(-1, 1)
X_test = cvs[0][4]
y_test = cvs[0][5].reshape(-1, 1)
cnn = CNN(self.static_data, self.rated, self.rbf.models, X_train, y_train, X_val, y_val, X_test, y_test)
self.acc_cnn = np.inf
gpus = np.tile(gpu, 1)
results = Parallel(n_jobs=len(self.static_data['gpus']))(
delayed(optimize_cnn)(cnn, [2, model['best_kernel']], model['best_h_size'],
self.static_data['max_iterations'],
model['lr'],
gpus[k], int(self.static_data['filters'])) for k in [0])
acc_cnn = np.array([r[0] for r in results])
self.acc_cnn, best_kernel, best_h_size, lr, self.scale_cnn, model_cnn = results[acc_cnn.argmin()]
self.model = model_cnn
self.lr = model['lr']
self.best_h_size = model['best_h_size']
self.best_kernel = model['best_kernel']
self.istrained = True
self.save(self.model_dir)
return self.to_dict()
def rbf_map(self,X, num_centr, centroids, radius):
hmap_list = []
s = X.shape
d1 = np.transpose(np.tile(np.expand_dims(X, axis=0), [num_centr, 1, 1]), [1, 0, 2]) - np.tile(
np.expand_dims(centroids, axis=0), [s[0], 1, 1])
d = np.sqrt(np.power(np.multiply(d1, np.tile(np.expand_dims(radius, axis=0), [s[0], 1, 1])), 2))
phi = np.exp((-1) * np.power(d, 2))
return np.transpose(phi,[1, 0, 2])
def rescale(self, arr, nrows, ncol):
W, H = arr.shape
new_W, new_H = (nrows, ncol)
xrange = lambda x: np.linspace(0, 1, x)
f = interp2d(xrange(H), xrange(W), arr, kind="linear")
new_arr = f(xrange(new_H), xrange(new_W))
return new_arr
def predict(self,X):
cnn = CNN_predict(self.static_data_all, self.rated, self.cluster_dir, self.rbf_dir)
return cnn.predict(X)
def move_files(self, path1, path2):
for filename in glob.glob(os.path.join(path1, '*.*')):
shutil.copy(filename, path2)
def compute_metrics(self, pred, y, rated):
if rated == None:
rated = y.ravel()
else:
rated = 1
err = np.abs(pred.ravel() - y.ravel()) / rated
sse = np.sum(np.square(pred.ravel() - y.ravel()))
rms = np.sqrt(np.mean(np.square(err)))
mae = np.mean(err)
mse = sse / y.shape[0]
return [sse, rms, mae, mse]
def load(self, pathname):
cluster_dir = pathname
if os.path.exists(os.path.join(cluster_dir, 'cnn' + '.pickle')):
try:
tmp_dict = joblib.load(os.path.join(cluster_dir, 'cnn' + '.pickle'))
self.__dict__.update(tmp_dict)
except:
raise ImportError('Cannot open CNN model')
else:
raise ImportError('Cannot find CNN model')
def save(self, pathname):
tmp_dict = {}
for k in self.__dict__.keys():
if k not in ['logger','static_data_all','model_dir', 'temp_dir', 'cluster_cnn_dir', 'cluster_dir','rbf_dir']:
tmp_dict[k] = self.__dict__[k]
joblib.dump(tmp_dict,os.path.join(pathname, 'cnn' + '.pickle'), compress=9)
```
#### File: Fuzzy_clustering/ver_tf2/CNN_predict.py
```python
import tensorflow as tf
import numpy as np
from scipy.interpolate import interp2d
import os, joblib
from Fuzzy_clustering.ver_tf2.RBFNN_predict import rbf_model_predict
from Fuzzy_clustering.ver_tf2.RBF_ols_predict import rbf_ols_predict
class CNN_predict():
def __init__(self, static_data, rated, cluster_dir, rbf_dir):
self.static_data = static_data['CNN']
self.static_data_rbf = static_data['RBF']
self.rated = rated
self.cluster = os.path.basename(cluster_dir)
self.istrained = False
if isinstance(rbf_dir, list):
self.rbf_method = 'RBF_ALL'
self.cluster_cnn_dir = os.path.join(cluster_dir, 'RBF_ALL/CNN')
self.model_dir = os.path.join(self.cluster_cnn_dir, 'model')
self.rbf = rbf_model_predict(self.static_data_rbf, self.rated, cluster_dir)
self.rbf.models=[]
for dir in rbf_dir:
rbf_method = os.path.basename(dir)
cluster_rbf_dir = os.path.join(dir, 'model')
if rbf_method == 'RBFNN':
rbf = rbf_model_predict(self.static_data_rbf, self.rated, cluster_dir)
try:
rbf.load(cluster_rbf_dir)
except:
raise ImportError('Cannot load RBFNN models')
self.rbf.models.append(rbf.models[0])
elif rbf_method == 'RBF_OLS':
rbf = rbf_ols_predict(cluster_dir, rated, self.static_data_rbf['njobs'], GA=False)
try:
rbf.load(cluster_rbf_dir)
except:
raise ImportError('Cannot load RBFNN models')
self.rbf.models.append(rbf.models[-1])
elif rbf_method == 'GA_RBF_OLS':
rbf = rbf_ols_predict(cluster_dir, rated, self.static_data_rbf['njobs'], GA=True)
try:
rbf.load(cluster_rbf_dir)
except:
raise ImportError('Cannot load RBFNN models')
self.rbf.models.append(rbf.models[0])
else:
raise ValueError('Cannot recognize RBF method')
else:
self.rbf_method = os.path.basename(rbf_dir)
cluster_rbf_dir = os.path.join(rbf_dir, 'model')
self.cluster_cnn_dir = os.path.join(rbf_dir, 'CNN')
self.model_dir = os.path.join(self.cluster_cnn_dir, 'model')
if self.rbf_method == 'RBFNN':
self.rbf = rbf_model_predict(self.static_data_rbf, self.rated, cluster_dir)
elif self.rbf_method == 'RBF_OLS':
self.rbf = rbf_ols_predict(cluster_dir, rated, self.static_data_rbf['njobs'], GA=False)
elif self.rbf_method == 'GA_RBF_OLS':
self.rbf = rbf_ols_predict(cluster_dir, rated, self.static_data_rbf['njobs'], GA=True)
else:
raise ValueError('Cannot recognize RBF method')
try:
self.rbf.load(cluster_rbf_dir)
except:
raise ImportError('Cannot load RBFNN models')
if not os.path.exists(self.model_dir):
os.makedirs(self.model_dir)
self.temp_dir = os.path.join(self.static_data['CNN_path_temp'], 'temp')
if not os.path.exists(self.temp_dir):
os.makedirs(self.temp_dir)
try:
self.load(self.model_dir)
self.istrained = True
except:
pass
def rbf_map(self, X, num_centr, centroids, radius):
hmap_list = []
s = X.shape
d1 = np.transpose(np.tile(np.expand_dims(X, axis=0), [num_centr, 1, 1]), [1, 0, 2]) - np.tile(
np.expand_dims(centroids, axis=0), [s[0], 1, 1])
d = np.sqrt(np.power(np.multiply(d1, np.tile(np.expand_dims(radius, axis=0), [s[0], 1, 1])), 2))
phi = np.exp((-1) * np.power(d, 2))
return np.transpose(phi,[1, 0, 2])
def rescale(self,arr, nrows, ncol):
W, H = arr.shape
new_W, new_H = (nrows, ncol)
xrange = lambda x: np.linspace(0, 1, x)
f = interp2d(xrange(H), xrange(W), arr, kind="linear")
new_arr = f(xrange(new_H), xrange(new_W))
return new_arr
def create_inputs(self, X_train):
self.N, self.D = X_train.shape
H = []
self.depth = len(self.rbf.models)
self.num_centr=0
for i in range(self.depth):
if self.rbf.models[i]['centroids'].shape[0]>self.num_centr:
self.num_centr = self.rbf.models[i]['centroids'].shape[0]
for i in range(self.depth):
if len(self.rbf.models[i]['Radius'].shape) == 1:
self.rbf.models[i]['Radius'] = np.tile(self.rbf.models[i]['Radius'].reshape(1, -1), [self.num_centr,1])
if self.rbf.models[i]['centroids'].shape[0] < self.num_centr:
centroids=self.rescale(self.rbf.models[i]['centroids'], self.num_centr, self.D)
else:
centroids = self.rbf.models[i]['centroids']
if np.isscalar(self.rbf.models[i]['Radius']):
Radius = self.rbf.models[i]['Radius']
elif self.rbf.models[i]['Radius'].shape[0] == self.num_centr:
Radius = self.rbf.models[i]['Radius']
elif self.rbf.models[i]['Radius'].shape[0] < self.num_centr:
Radius = self.rescale(self.rbf.models[i]['Radius'], self.num_centr, self.D)
else:
raise ValueError('Unkown shape')
H.append(np.transpose(self.rbf_map(X_train, self.num_centr, centroids, Radius), [1, 2, 0]))
H[i] = np.array(H[i])
H[i] = H[i].reshape(-1, self.D * self.num_centr)
sc=self.scale_cnn[i]
H[i] = sc.transform(H[i].reshape(-1, self.D * self.num_centr))
H[i] = np.nan_to_num(H[i].reshape(-1, self.D, self.num_centr))
H = np.transpose(np.stack(H), [1, 2, 3, 0])
return H
def init_weights(self, init_w):
init_random_dist = tf.convert_to_tensor(init_w)
return tf.Variable(init_random_dist)
def init_bias(self, init_b):
init_bias_vals = tf.convert_to_tensor(init_b)
return tf.Variable(init_bias_vals)
def normal_full_layer(self,input_layer, init_w, init_b):
W = self.init_weights(init_w)
b = self.init_bias(init_b)
return tf.add(tf.matmul(input_layer, W), b, name='prediction'), W, b
def build_graph(self, x1, best_weights, kernels, h_size, hold_prob, filters):
with tf.name_scope("build_cnn") as scope:
H = tf.reshape(x1, [-1, self.D, self.num_centr, self.depth],name='reshape_1')
convo_1= tf.keras.layers.Conv2D(filters=int(filters),
kernel_size=kernels,
padding="same",
name='cnn1',
# kernel_initializer=lambda shape, dtype: best_weights['build_cnn/cnn1/kernel:0'],
# bias_initializer=lambda shape, dtype: best_weights['build_cnn/cnn1/bias:0'],
activation=tf.nn.elu)
convo_1_pool = tf.keras.layers.AveragePooling2D(pool_size=self.static_data['pool_size'], strides=1, name='pool1')
cnn_output=convo_1_pool(convo_1(H))
full_one_dropout = tf.nn.dropout(cnn_output, keep_prob=hold_prob)
shape = full_one_dropout.get_shape().as_list()
s = shape[1] * shape[2] * shape[3]
convo_2_flat = tf.reshape(full_one_dropout, [-1, s])
full_layer_one = tf.keras.layers.Dense(units=h_size[0],activation=tf.nn.elu, name='dense1')
# , kernel_initializer = lambda x: best_weights['build_cnn/dense1/kernel:0'],
# bias_initializer = lambda x: best_weights['build_cnn/dense1/bias:0'],
full_layer_two = tf.keras.layers.Dense(units=h_size[1], activation=tf.nn.elu, name='dense2')
# , kernel_initializer = lambda x: best_weights['build_cnn/dense2/kernel:0'],
# bias_initializer = lambda x: best_weights['build_cnn/dense2/bias:0']
full_two_dropout = tf.nn.dropout(full_layer_one(convo_2_flat), keep_prob=hold_prob)
dense_output = tf.nn.dropout(full_layer_two(full_two_dropout), keep_prob=hold_prob)
convo_1.set_weights([best_weights['build_cnn/cnn1/kernel:0'], best_weights['build_cnn/cnn1/bias:0']])
full_layer_one.set_weights([best_weights['build_cnn/dense1/kernel:0'], best_weights['build_cnn/dense1/bias:0']])
full_layer_two.set_weights([best_weights['build_cnn/dense2/kernel:0'], best_weights['build_cnn/dense2/bias:0']])
y_pred, W, b = self.normal_full_layer(dense_output, best_weights['build_cnn/Variable:0'],best_weights['build_cnn/Variable_1:0'] )
weights = convo_1.trainable_weights + full_layer_one.trainable_weights + full_layer_two.trainable_weights + [W, b]
return y_pred, weights, convo_1, full_layer_one, full_layer_two
def predict(self, X):
if self.istrained:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
if hasattr(self.model, 'filters'):
filters = self.model['filters']
else:
filters = int(self.static_data['filters'])
kernels = self.model['kernels']
h_size = self.model['h_size']
best_weights = self.model['best_weights']
H = self.create_inputs(X)
tf.compat.v1.reset_default_graph()
graph_cnn = tf.Graph()
with graph_cnn.as_default():
with tf.device("/cpu:0"):
x1 = tf.compat.v1.placeholder('float', shape=[None, self.D, self.num_centr, self.depth], name='input_data')
hold_prob = tf.compat.v1.placeholder(tf.float32, name='drop')
with tf.device("/cpu:0"):
y_pred_, weights, convo_1, full_layer_one, full_layer_two = self.build_graph(x1, best_weights, kernels, h_size, hold_prob, filters)
config_tf = tf.compat.v1.ConfigProto(allow_soft_placement=True)
config_tf.gpu_options.allow_growth = True
with tf.compat.v1.Session(graph=graph_cnn, config=config_tf) as sess:
print('Open an rbf-cnn network with %s' % self.num_centr)
sess.run(tf.compat.v1.global_variables_initializer())
convo_1.set_weights([best_weights['build_cnn/cnn1/kernel:0'], best_weights['build_cnn/cnn1/bias:0']])
full_layer_one.set_weights(
[best_weights['build_cnn/dense1/kernel:0'], best_weights['build_cnn/dense1/bias:0']])
full_layer_two.set_weights(
[best_weights['build_cnn/dense2/kernel:0'], best_weights['build_cnn/dense2/bias:0']])
y_pred, weights_run= sess.run([y_pred_, weights],
feed_dict={x1: H, hold_prob:1})
sess.close()
else:
raise ModuleNotFoundError("Error on prediction of %s cluster. The model CNN seems not properly trained", self.cluster)
return y_pred
def load(self, pathname):
cluster_dir = pathname
if os.path.exists(os.path.join(cluster_dir, 'cnn' + '.pickle')):
try:
tmp_dict = joblib.load(os.path.join(cluster_dir, 'cnn' + '.pickle'))
self.__dict__.update(tmp_dict)
except:
raise ImportError('Cannot open CNN model')
else:
raise ImportError('Cannot find CNN model')
```
#### File: Fuzzy_clustering/ver_tf2/Combine_module_predict.py
```python
import os
import numpy as np
import joblib
from Fuzzy_clustering.ver_tf2.Sklearn_predict import sklearn_model_predict
class combine_model_predict(object):
def __init__(self, static_data, cluster_dir, is_global=False):
self.istrained = False
self.combine_methods = static_data['combine_methods']
self.cluster_name = os.path.basename(cluster_dir)
self.cluster_dir = cluster_dir
self.model_dir = os.path.join(self.cluster_dir, 'Combine')
try:
self.load(self.model_dir)
except:
pass
self.static_data = static_data
self.model_type = static_data['type']
self.methods = []
if is_global:
for method in static_data['project_methods'].keys():
if self.static_data['project_methods'][method]['Global'] == True:
if method == 'ML_RBF_ALL_CNN':
self.methods.extend(['RBF_OLS', 'GA_RBF_OLS', 'RBFNN', 'RBF-CNN'])
elif method == 'ML_RBF_ALL':
self.methods.extend(['RBF_OLS', 'GA_RBF_OLS', 'RBFNN'])
else:
self.methods.append(method)
else:
for method in static_data['project_methods'].keys():
if static_data['project_methods'][method]['status'] == 'train':
if method == 'ML_RBF_ALL_CNN':
self.methods.extend(['RBF_OLS', 'GA_RBF_OLS', 'RBFNN', 'RBF-CNN'])
elif method == 'ML_RBF_ALL':
self.methods.extend(['RBF_OLS', 'GA_RBF_OLS', 'RBFNN'])
else:
self.methods.append(method)
self.rated = static_data['rated']
self.n_jobs = 2 * static_data['njobs']
self.data_dir = os.path.join(self.cluster_dir, 'data')
def averaged(self,X):
pred_combine = dict()
X_pred = np.array([])
self.best_methods = X.keys()
for method in sorted(self.best_methods):
if X_pred.shape[0]==0:
X_pred = X[method]
else:
X_pred = np.hstack((X_pred, X[method]))
if X_pred.shape[0]>0:
pred = np.mean(X_pred, axis=1).reshape(-1, 1)
if len(pred.shape)==1:
pred = pred.reshape(-1,1)
pred[np.where(pred<0)] = 0
pred_combine['average'] = pred
else:
pred_combine['average'] = np.array([])
return pred_combine
def predict(self, X):
pred_combine = dict()
if len(self.methods) > 1:
X_pred = np.array([])
if not hasattr(self, 'best_methods'):
self.best_methods = X.keys()
for method in sorted(self.best_methods):
if X_pred.shape[0]==0:
X_pred = X[method]
else:
X_pred = np.hstack((X_pred, X[method]))
X_pred /= 20
if not hasattr(self, 'model'):
raise ValueError('The combine models does not exist')
for combine_method in self.combine_methods:
if X_pred.shape[0]>0:
if combine_method == 'rls':
pred = np.matmul(self.model[combine_method]['w'], X_pred.T).T
elif combine_method == 'bcp':
pred =np.matmul(self.model[combine_method]['w'], X_pred.T).T
elif combine_method == 'mlp':
self.model[combine_method] = sklearn_model_predict(self.model_dir, self.rated, 'mlp', self.n_jobs)
pred = self.model[combine_method].predict(X_pred)
elif combine_method == 'bayesian_ridge':
pred = self.model[combine_method].predict(X_pred)
elif combine_method == 'elastic_net':
pred = self.model[combine_method].predict(X_pred)
elif combine_method == 'ridge':
pred = self.model[combine_method].predict(X_pred)
else:
pred = np.mean(X_pred, axis=1).reshape(-1, 1)
if len(pred.shape)==1:
pred = pred.reshape(-1,1)
pred[np.where(pred<0)] = 0
pred_combine[combine_method] = 20 * pred
else:
pred_combine[combine_method] = np.array([])
return pred_combine
def load(self, pathname):
cluster_dir = pathname
if os.path.exists(os.path.join(cluster_dir, 'combine_models.pickle')):
try:
f = open(os.path.join(cluster_dir, 'combine_models.pickle'), 'rb')
tmp_dict = joblib.load(f)
f.close()
del tmp_dict['model_dir']
self.__dict__.update(tmp_dict)
except:
raise ImportError('Cannot open RLS model')
else:
raise ImportError('Cannot find RLS model')
```
#### File: Fuzzy_clustering/ver_tf2/Global_train_regressor.py
```python
import os
import pickle, glob, shutil
import numpy as np
import pandas as pd
from Fuzzy_clustering.ver_tf2.utils_for_forecast import split_continuous
from Fuzzy_clustering.ver_tf2.RBFNN_module import rbf_model
from Fuzzy_clustering.ver_tf2.RBF_ols import rbf_ols_module
from Fuzzy_clustering.ver_tf2.CNN_module import cnn_model
from Fuzzy_clustering.ver_tf2.CNN_module_3d import cnn_3d_model
from Fuzzy_clustering.ver_tf2.LSTM_module_3d import lstm_3d_model
from Fuzzy_clustering.ver_tf2.Combine_module_train import combine_model
from Fuzzy_clustering.ver_tf2.Clusterer import clusterer
from Fuzzy_clustering.ver_tf2.Global_predict_regressor import global_predict
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from datetime import datetime
from Fuzzy_clustering.ver_tf2.imblearn.over_sampling import BorderlineSMOTE, SVMSMOTE, SMOTE,ADASYN
import time, logging, warnings, joblib
class global_train(object):
def __init__(self, static_data, x_scaler):
self.istrained = False
self.cluster_dir=os.path.join(static_data['path_model'], 'Global_regressor')
try:
self.load(self.cluster_dir)
except:
pass
self.static_data=static_data
self.model_type=static_data['type']
self.x_scaler = x_scaler
self.methods=static_data['project_methods']
self.combine_methods=static_data['combine_methods']
self.rated=static_data['rated']
self.n_jobs=static_data['njobs']
self.var_lin = static_data['clustering']['var_lin']
self.cluster_dir=os.path.join(static_data['path_model'], 'Global_regressor')
self.data_dir = os.path.join(self.cluster_dir, 'data')
if not os.path.exists(self.data_dir):
os.makedirs(self.data_dir)
logger = logging.getLogger('Glob_train_procedure' + '_' +self.model_type)
logger.setLevel(logging.INFO)
handler = logging.FileHandler(os.path.join(self.cluster_dir, 'log_train_procedure.log'), 'a')
handler.setLevel(logging.INFO)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
self.logger = logger
def move_files(self, path1, path2):
for filename in glob.glob(os.path.join(path1, '*.*')):
shutil.copy(filename, path2)
def split_dataset(self, X, y, act, X_cnn=np.array([]), X_lstm=np.array([])):
if len(y.shape)>1:
y=y.ravel()
if len(act.shape)>1:
act=act.ravel()
self.N_tot, self.D = X.shape
X_train, X_test1, y_train, y_test1, mask_test1 = split_continuous(X, y, test_size=0.15, random_state=42, mask=False)
cvs = []
for _ in range(3):
X_train1 = np.copy(X_train)
y_train1 = np.copy(y_train)
X_train1, X_val, y_train1, y_val = train_test_split(X_train1, y_train1, test_size=0.15)
cvs.append([X_train1, y_train1, X_val, y_val, X_test1, y_test1])
self.N_train = cvs[0][0].shape[0]
self.N_val = cvs[0][2].shape[0] + cvs[0][4].shape[0]
return cvs, mask_test1, X, y, act, X_cnn, X_lstm
def find_features(self, cvs, method, njobs):
if method=='boruta':
from Fuzzy_clustering.ver_tf2.Feature_selection_boruta import FS
else:
from Fuzzy_clustering.ver_tf2.Feature_selection_permutation import FS
fs=FS(self.cluster_dir, 2*njobs)
self.features=fs.fit(cvs)
self.save(self.cluster_dir)
def split_test_data(self, X, y, act, X_cnn=np.array([]), X_lstm=np.array([]), test_indices =None):
self.N_tot, self.D = X.shape
if not test_indices is None:
X_test = X.loc[test_indices['dates_test']]
y_test = y.loc[test_indices['dates_test']]
act_test = act.loc[test_indices['dates_test']]
X = X.loc[test_indices['dates_train']]
y = y.loc[test_indices['dates_train']]
act = act.loc[test_indices['dates_train']]
if len(X_cnn.shape) > 1:
X_cnn_test = X_cnn[test_indices['indices_test']]
X_cnn = X_cnn[test_indices['indices_train']]
else:
X_cnn_test = np.array([])
if len(X_lstm.shape) > 1:
X_lstm_test = X_lstm[test_indices['indices_test']]
X_lstm = X_lstm[test_indices['indices_train']]
else:
X_lstm_test = np.array([])
else:
X_test = pd.DataFrame([])
y_test = pd.DataFrame([])
act_test = pd.DataFrame([])
X_cnn_test = np.array([])
X_lstm_test = np.array([])
self.N_test = X_test.shape[0]
return X, y, act, X_cnn, X_lstm, X_test, y_test, act_test, X_cnn_test, X_lstm_test
def load_data(self):
data_path = self.data_dir
X = pd.read_csv(os.path.join(data_path, 'dataset_X.csv'), index_col=0, header=0, parse_dates=True, dayfirst=True)
y = pd.read_csv(os.path.join(data_path, 'dataset_y.csv'), index_col=0, header=0, parse_dates=True, dayfirst=True)
act = pd.read_csv(os.path.join(data_path, 'dataset_act.csv'), index_col=0, header=0, parse_dates=True, dayfirst=True)
if os.path.exists(os.path.join(data_path, 'dataset_cnn.pickle')):
X_cnn = joblib.load(os.path.join(data_path, 'dataset_cnn.pickle'))
if X_cnn.shape[1]==6:
X_cnn = X_cnn.transpose([0, 2, 3, 1])
else:
X_cnn = np.array([])
if os.path.exists(os.path.join(data_path, 'dataset_lstm.pickle')):
X_lstm = joblib.load(os.path.join(data_path, 'dataset_lstm.pickle'))
else:
X_lstm = np.array([])
if os.path.exists(os.path.join(self.data_dir, 'test_indices.pickle')):
test_indices = joblib.load(os.path.join(self.data_dir, 'test_indices.pickle'))
else:
test_indices = None
return X, y, act, X_cnn, X_lstm, test_indices
def fit(self):
self.logger.info('Start training Global models')
self.logger.info('/n')
X, y, act, X_cnn, X_lstm, test_indices = self.load_data()
self.variables = X.columns
indices = X.index
X, y, act, X_cnn, X_lstm, X_test, y_test, act_test, X_cnn_test, X_lstm_test = self.split_test_data(X, y,
act,
X_cnn=X_cnn,
X_lstm=X_lstm,
test_indices=test_indices)
if X_test.shape[0]>0:
lin_models = LinearRegression().fit(X[self.var_lin].values, y.values.ravel())
preds = lin_models.predict(X_test[self.var_lin].values).ravel()
err = (preds - y_test.values.ravel()) / 20
rms = np.sum(np.square(err))
mae = np.mean(np.abs(err))
print('rms = %s', rms)
print('mae = %s', mae)
self.logger.info("Objective from linear models: %s", mae)
X = X.values
y = y.values / 20
act = act.values
if len(y.shape)==1:
y = y[:, np.newaxis]
if len(act.shape)==1:
act = act[:, np.newaxis]
try:
self.load(self.cluster_dir)
except:
pass
if hasattr(self, 'features') and self.static_data['train_online'] == False:
pass
else:
if self.static_data['sklearn']['fs_status'] != 'ok':
X_train, X_test1, y_train, y_test1 = split_continuous(X, y, test_size=0.15, random_state=42)
cvs = []
for _ in range(3):
X_train1 = np.copy(X_train)
y_train1 = np.copy(y_train)
X_train1, X_val, y_train1, y_val = train_test_split(X_train1, y_train1, test_size=0.15)
cvs.append([X_train1, y_train1, X_val, y_val, X_test1, y_test1])
self.find_features(cvs, self.static_data['sklearn']['fs_method'], self.static_data['sklearn']['njobs'])
cvs, mask_test1, X, y, act, X_cnn, X_lstm = self.split_dataset(X, y, act, X_cnn, X_lstm)
self.indices = indices[:X.shape[0]]
for i in range(3):
cvs[i][0] = cvs[i][0][:, self.features]
cvs[i][2] = cvs[i][2][:, self.features]
cvs[i][4] = cvs[i][4][:, self.features]
self.logger.info('Data info for Global models')
self.logger.info('Number of variables %s', str(self.D))
self.logger.info('Number of total samples %s', str(self.N_tot))
self.logger.info('Number of training samples %s', str(self.N_train))
self.logger.info('Number of validation samples %s', str(self.N_val))
self.logger.info('Number of testing samples %s', str(self.N_test))
self.logger.info('/n')
self.models = dict()
for method in self.static_data['project_methods'].keys():
if self.static_data['project_methods'][method]['Global'] == True:
self.logger.info('Training start of method %s', method)
self.logger.info('/n')
if 'sklearn_method' in self.static_data['project_methods'][method].keys():
optimize_method = self.static_data['project_methods'][method]['sklearn_method']
else:
optimize_method = []
self.fit_model(cvs, method, self.static_data, self.cluster_dir, optimize_method, X_cnn=X_cnn, X_lstm=X_lstm, y=y, rated=1)
self.logger.info('Training end of method %s', method)
comb_model = combine_model(self.static_data, self.cluster_dir, x_scaler=self.x_scaler,is_global=True)
if comb_model.istrained == False and X_test.shape[0] > 0:
comb_model.train(X_test, y_test, act_test, X_cnn_test, X_lstm_test)
predict_module = global_predict(self.static_data)
predictions = predict_module.predict(X_test.values, X_cnn=X_cnn_test, X_lstm= X_lstm_test)
result = predict_module.evaluate(predictions, y_test.values)
result.to_csv(os.path.join(self.data_dir, 'result_test.csv'))
self.logger.info('Training end for Global models')
self.logger.info('/n')
self.istrained = True
self.save(self.cluster_dir)
return self.to_dict()
def to_dict(self):
dict = {}
for k in self.__dict__.keys():
if k not in ['logger']:
dict[k] = self.__dict__[k]
return dict
def fit_model(self, cvs, method, static_data, cluster_dir, optimize_method, X_cnn=np.array([]), X_lstm=np.array([]), y=np.array([]), rated=1):
# deap, optuna, skopt, grid_search
if optimize_method=='deap':
from Fuzzy_clustering.ver_tf2.Sklearn_models_deap import sklearn_model
elif optimize_method=='optuna':
from Fuzzy_clustering.ver_tf2.Sklearn_models_optuna import sklearn_model
elif optimize_method=='skopt':
from Fuzzy_clustering.ver_tf2.Sklearn_models_skopt import sklearn_model
else:
from Fuzzy_clustering.ver_tf2.SKlearn_models import sklearn_model
# if (datetime.now().hour>=8 and datetime.now().hour<10):
# time.sleep(2*60*60)
if method == 'ML_RBF_ALL':
model_rbf = rbf_model(static_data['RBF'], rated, cluster_dir)
model_rbf_ols = rbf_ols_module(cluster_dir, rated, static_data['sklearn']['njobs'], GA=False)
model_rbf_ga = rbf_ols_module(cluster_dir, rated, static_data['sklearn']['njobs'], GA=True)
if model_rbf_ols.istrained == False or static_data['train_online'] == True:
self.logger.info('Start of training of model_rbf_ols')
self.models['RBF_OLS'] = model_rbf_ols.optimize_rbf(cvs)
else:
self.models['RBF_OLS'] = model_rbf_ols.to_dict()
if model_rbf_ga.istrained == False or static_data['train_online'] == True:
self.logger.info('Start of training of model_rbf_ga')
self.models['GA_RBF_OLS'] = model_rbf_ga.optimize_rbf(cvs)
else:
self.models['GA_RBF_OLS'] = model_rbf_ga.to_dict()
if model_rbf.istrained == False or static_data['train_online'] == True:
self.logger.info('Start of training of model_rbf_adam')
self.models['RBFNN'] = model_rbf.rbf_train(cvs)
else:
self.models['RBFNN'] = model_rbf.to_dict()
elif method == 'ML_RBF_ALL_CNN':
model_rbf = rbf_model(static_data['RBF'], rated, cluster_dir)
model_rbf_ols = rbf_ols_module(cluster_dir, rated, static_data['sklearn']['njobs'], GA=False)
model_rbf_ga = rbf_ols_module(cluster_dir, rated, static_data['sklearn']['njobs'], GA=True)
if model_rbf_ols.istrained == False or static_data['train_online'] == True:
self.logger.info('Start of training of model_rbf_ols')
self.models['RBF_OLS'] = model_rbf_ols.optimize_rbf(cvs)
else:
self.models['RBF_OLS'] = model_rbf_ols.to_dict()
if model_rbf_ga.istrained == False or static_data['train_online'] == True:
self.logger.info('Start of training of model_rbf_ga')
self.models['GA_RBF_OLS'] = model_rbf_ga.optimize_rbf(cvs)
else:
self.models['GA_RBF_OLS'] = model_rbf_ga.to_dict()
if model_rbf.istrained == False or static_data['train_online'] == True:
self.logger.info('Start of training of model_rbf_adam')
self.models['RBFNN'] = model_rbf.rbf_train(cvs)
else:
self.models['RBFNN'] = model_rbf.to_dict()
rbf_dir = [model_rbf_ols.cluster_dir, model_rbf_ga.cluster_dir, model_rbf.cluster_dir]
model_cnn = cnn_model(static_data, rated, cluster_dir, rbf_dir)
if model_cnn.istrained==False or static_data['train_online']==True:
self.logger.info('Start of training of model_cnn')
self.models['RBF-CNN'] = model_cnn.train_cnn(cvs)
else:
self.models['RBF-CNN'] = model_cnn.to_dict()
elif method == 'ML_NUSVM':
method =method.replace('ML_','')
model_sklearn = sklearn_model(cluster_dir, rated, method, static_data['sklearn']['njobs'])
if model_sklearn.istrained==False or static_data['train_online']==True:
self.logger.info('Start of training of NUSVM')
self.models['NUSVM'] = model_sklearn.train(cvs)
else:
self.models['NUSVM'] = model_sklearn.to_dict()
elif method == 'ML_MLP':
method = method.replace('ML_','')
model_sklearn = sklearn_model(cluster_dir, rated, method, static_data['sklearn']['njobs'])
if model_sklearn.istrained==False or static_data['train_online']==True:
self.logger.info('Start of training of MLP')
self.models['MLP'] = model_sklearn.train(cvs)
else:
self.models['MLP'] = model_sklearn.to_dict()
elif method == 'ML_SVM':
method = method.replace('ML_','')
model_sklearn = sklearn_model(cluster_dir, rated, method, static_data['sklearn']['njobs'])
if model_sklearn.istrained==False or static_data['train_online']==True:
self.logger.info('Start of training of SVM')
self.models['SVM'] = model_sklearn.train(cvs)
else:
self.models['SVM'] = model_sklearn.to_dict()
elif method == 'ML_RF':
method = method.replace('ML_','')
model_sklearn = sklearn_model(cluster_dir, rated, method, static_data['sklearn']['njobs'])
if model_sklearn.istrained==False or static_data['train_online']==True:
self.logger.info('Start of training of RF')
self.models['RF'] = model_sklearn.train(cvs)
else:
self.models['RF'] = model_sklearn.to_dict()
elif method == 'ML_XGB':
method = method.replace('ML_','')
model_sklearn = sklearn_model(cluster_dir, rated, method, static_data['sklearn']['njobs'])
if model_sklearn.istrained==False or static_data['train_online']==True:
self.logger.info('Start of training of XGB')
self.models['XGB'] = model_sklearn.train(cvs)
else:
self.models['XGB'] = model_sklearn.to_dict()
elif method == 'ML_CNN_3d':
cnn_model_3d = cnn_3d_model(static_data, rated, cluster_dir)
if cnn_model_3d.istrained == False or static_data['train_online'] == True:
self.logger.info('Start of training of CNN_3d')
self.models['CNN_3d'] = cnn_model_3d.train_cnn(X_cnn, y)
else:
self.models['CNN_3d'] = cnn_model_3d.to_dict()
elif method == 'ML_LSTM_3d':
lstm_model_3d = lstm_3d_model(static_data, rated, cluster_dir)
if lstm_model_3d.istrained == False or static_data['train_online'] == True:
self.logger.info('Start of training of LSTM_3d')
self.models['LSTM_3d'] = lstm_model_3d.train_lstm(X_lstm, y)
else:
self.models['LSTM_3d'] = lstm_model_3d.to_dict()
self.save(self.cluster_dir)
def load(self, cluster_dir):
if os.path.exists(os.path.join(cluster_dir, 'Global_models.pickle')):
try:
f = open(os.path.join(cluster_dir, 'Global_models.pickle'), 'rb')
tmp_dict = pickle.load(f)
f.close()
tdict = {}
for k in tmp_dict.keys():
if k not in ['logger', 'static_data', 'data_dir', 'cluster_dir']:
tdict[k] = tmp_dict[k]
self.__dict__.update(tdict)
except:
raise ImportError('Cannot open Global models')
else:
raise ImportError('Cannot find Global models')
def save(self, pathname):
if not os.path.exists(pathname):
os.makedirs(pathname)
f = open(os.path.join(pathname, 'Global_models.pickle'), 'wb')
dict = {}
for k in self.__dict__.keys():
if k not in ['logger', 'static_data', 'data_dir', 'cluster_dir']:
dict[k] = self.__dict__[k]
pickle.dump(dict, f)
f.close()
```
#### File: Fuzzy_clustering/ver_tf2/NWP_sampler.py
```python
import tensorflow as tf
from tqdm import tqdm
import numpy as np
import os, joblib, logging, sys
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from Fuzzy_clustering.ver_tf2.utils_for_forecast import split_continuous
from sklearn.model_selection import train_test_split
class nwp_sampler():
def __init__(self, static_data):
self.static_data = static_data
self.model_type = self.static_data['type']
self.data_variables = self.static_data['data_variables']
self.model_dir = os.path.join(self.static_data['path_model'], 'NWP_sampler')
self.istrained = False
if not os.path.exists(self.model_dir):
os.makedirs(self.model_dir)
try:
self.load(self.model_dir)
except:
pass
def train(self, X_inp, X, gpu_id='/cpu:0'):
if gpu_id == '/device:GPU:0':
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
elif gpu_id == '/device:GPU:1':
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
else:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
self.gpu_id = gpu_id
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.FileHandler(os.path.join(self.static_data['path_model'], 'log_model.log'), 'a')
handler.setLevel(logging.INFO)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
print('CNN training...begin')
logger.info('CNN training...begin')
i=0
X1 = []
self.columns = []
self.model_name = []
self.max_val = []
for var in sorted(self.data_variables):
if ((var == 'WS') and (self.model_type =='wind')) or ((var == 'Flux') and (self.model_type == 'pv')):
var_name = 'flux' if var == 'Flux' else 'wind'
self.columns.extend(['p_' + var_name] + [var_name] + ['n_' + var_name])
X1.append(X[:, :, :, i])
model1_name = 'p_' + var_name
self.model_name.append(model1_name)
self.max_val.append(1000 if var == 'Flux' else 30)
X1.append(X[:, :, :, i+1])
model1_name = var_name
self.model_name.append(model1_name)
self.max_val.append(1000 if var == 'Flux' else 30)
X1.append(X[:, :, :, i + 2])
model1_name = 'n_' + var_name
self.model_name.append(model1_name)
self.max_val.append(1000 if var == 'Flux' else 30)
i += 3
elif var in {'WD', 'Cloud'}:
X1.append(X[:, :, :, i])
self.columns.append('cloud' if var == 'Cloud' else 'direction')
model2_name = 'cloud' if var == 'Cloud' else 'direction'
self.model_name.append(model2_name)
self.max_val.append(100 if var == 'Cloud' else 360)
i += 1
elif (var in {'Temperature'}):
X1.append(X[:, :, :, i])
self.columns.append('Temp' if var == 'Temperature' else 'wind')
model2_name = 'Temp' if var == 'Temperature' else 'wind'
self.model_name.append(model2_name)
self.max_val.append(320 if var == 'Temperature' else 30)
i+=1
elif ((var == 'WS') and (self.model_type == 'pv')):
X1.append(X[:, :, :, i])
self.columns.append('Temp' if var == 'Temperature' else 'wind')
model2_name = 'Temp' if var == 'Temperature' else 'wind'
self.model_name.append(model2_name)
self.max_val.append(320 if var == 'Temperature' else 30)
i += 1
else:
i += 1
X_inp = X_inp[self.columns].values
self.models = dict()
for x, var, max_val in zip(X1, self.model_name,self.max_val):
self.models[var] = self.train_model(X_inp, (x / max_val)[:, :, :, np.newaxis])
self.istrained = True
self.save(self.model_dir)
def build_graph(self, inp, nwps, learning_rate):
with tf.name_scope("build_cnn") as scope:
inp = tf.keras.layers.GaussianNoise(0.05)(inp)
full_layer_one = tf.keras.layers.Dense(units=32, activation=tf.nn.elu, name='dense1')
full_layer_two = tf.keras.layers.Dense(units=96, activation=tf.nn.elu,
name='dense2')
full_layer_three = tf.keras.layers.Dense(units=self.D1 * self.D2, activation=tf.nn.elu,
name='dense3')
full_two_out = full_layer_one(inp)
full_three_out = full_layer_two(full_two_out)
y_pred = full_layer_three(full_three_out)
y_pred = tf.reshape(y_pred, [-1, self.D1, self.D2, self.depth], name='reshape')
weights = full_layer_one.trainable_weights + full_layer_two.trainable_weights + full_layer_three.trainable_weights
with tf.name_scope("train_cnn") as scope:
cost_cnn = tf.reduce_mean(tf.square(y_pred - nwps))
optimizer_cnn = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)
train_cnn = optimizer_cnn.minimize(cost_cnn)
accuracy_cnn = tf.reduce_mean(tf.abs(y_pred - nwps))
sse_cnn = tf.reduce_sum(tf.square(y_pred - nwps))
rse_cnn = tf.sqrt(tf.reduce_mean(tf.square(y_pred - nwps)))
return train_cnn, cost_cnn, accuracy_cnn, sse_cnn, rse_cnn, weights, full_layer_one, full_layer_two, full_layer_three
def build_graph_predict(self, inp):
with tf.name_scope("build_cnn") as scope:
full_layer_one = tf.keras.layers.Dense(units=32, activation=tf.nn.elu, name='dense1')
full_layer_two = tf.keras.layers.Dense(units=96, activation=tf.nn.elu,
name='dense2')
full_layer_three = tf.keras.layers.Dense(units=self.D1 * self.D2, activation=tf.nn.elu,
name='dense3')
full_two_out = full_layer_one(inp)
full_three_out = full_layer_two(full_two_out)
y_pred = full_layer_three(full_three_out)
y_pred = tf.reshape(y_pred, [-1, self.D1, self.D2, self.depth], name='reshape')
return y_pred, full_layer_one, full_layer_two, full_layer_three
def distance(self, obj_new, obj_old, obj_max, obj_min):
if np.any(np.isinf(obj_old)):
obj_old = obj_new.copy()
obj_max = obj_new.copy()
return True, obj_old, obj_max, obj_min
if np.any(np.isinf(obj_min)) and not np.all(obj_max == obj_new):
obj_min = obj_new.copy()
d = 0
for i in range(obj_new.shape[0]):
if obj_max[i] < obj_new[i]:
obj_max[i] = obj_new[i]
if obj_min[i] > obj_new[i]:
obj_min[i] = obj_new[i]
d += (obj_new[i] - obj_old[i]) / (obj_max[i] - obj_min[i])
if d < -0.05:
obj_old = obj_new.copy()
return True, obj_old, obj_max, obj_min
else:
return False, obj_old, obj_max, obj_min
def train_model(self, X_inp, X):
if len(X.shape)==3:
X=X[:, :, :, np.newaxis]
if X_inp.shape[0] != X.shape[0]:
raise ValueError('dataset_X and dataset_cnn has not the same N samples')
X_train, X_test, y_train, y_test = split_continuous(X_inp, X, test_size=0.15, random_state=42)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.15, random_state=42)
N, self.D1, self.D2, self.depth = y_train.shape
batch_size = 100
tf.compat.v1.reset_default_graph()
graph_cnn = tf.Graph()
with graph_cnn.as_default():
with tf.device("/cpu:0"):
x1 = tf.compat.v1.placeholder('float', shape=[None, X_inp.shape[1]],
name='input_data')
y_pred_ = tf.compat.v1.placeholder(tf.float32, shape=[None, self.D1, self.D2, self.depth], name='target_cnn')
with tf.device(self.gpu_id):
train_cnn, cost_cnn, accuracy_cnn, sse_cnn, rse_cnn, weights, full_layer_one, full_layer_two, full_layer_three = self.build_graph(x1, y_pred_, self.static_data['CNN']['learning_rate'])
obj_old = np.inf * np.ones(4)
obj_max = np.inf * np.ones(4)
obj_min = np.inf * np.ones(4)
batches = [np.random.choice(N, batch_size, replace=False) for _ in range(self.static_data['CNN']['max_iterations'] + 1)]
config_tf = tf.compat.v1.ConfigProto(allow_soft_placement=True, log_device_placement=True)
config_tf.gpu_options.allow_growth = True
res = dict()
self.best_weights = dict()
best_iteration = 0
best_glob_iterations = 0
ext_iterations = self.static_data['CNN']['max_iterations']
train_flag = True
patience = 10000
wait = 0
max_iterations =self.static_data['CNN']['max_iterations']
with tf.compat.v1.Session(graph=graph_cnn, config=config_tf) as sess:
sess.run(tf.compat.v1.global_variables_initializer())
while train_flag:
for i in tqdm(range(max_iterations)):
if i % 1000 == 0:
sess.run([train_cnn],
feed_dict={x1: X_train[batches[i]], y_pred_: y_train[batches[i]]})
acc_new_v, mse_new_v, sse_new_v, rse_new_v, weights_cnn = sess.run(
[accuracy_cnn, cost_cnn, sse_cnn, rse_cnn, weights],
feed_dict={x1: X_val, y_pred_: y_val})
acc_new_t, mse_new_t, sse_new_t, rse_new_t = sess.run([accuracy_cnn, cost_cnn, sse_cnn, rse_cnn],
feed_dict={x1: X_test, y_pred_: y_test})
acc_new = 0.4 * acc_new_v + 0.6 * acc_new_t + 2 * np.abs(acc_new_v - acc_new_t)
mse_new = 0.4 * mse_new_v + 0.6 * mse_new_t + 2 * np.abs(mse_new_v - acc_new_t)
sse_new = 0.4 * sse_new_v + 0.6 * sse_new_t + 2 * np.abs(sse_new_t - sse_new_v)
rse_new = 0.4 * rse_new_v + 0.6 * rse_new_t + 2 * np.abs(rse_new_t - rse_new_v)
obj_new = np.array([acc_new, mse_new, sse_new, rse_new])
flag, obj_old, obj_max, obj_min = self.distance(obj_new, obj_old, obj_max, obj_min)
if flag:
variables_names = [v.name for v in tf.compat.v1.trainable_variables()]
for k, v in zip(variables_names, weights_cnn):
self.best_weights[k] = v
res[str(i)] = obj_old
print(acc_new)
best_iteration = i
wait = 0
else:
wait += 1
if wait > patience:
train_flag = False
break
else:
sess.run(train_cnn,
feed_dict={x1: X_train[batches[i]], y_pred_: y_train[batches[i]]})
wait += 1
best_glob_iterations = ext_iterations + best_iteration
if (max_iterations - best_iteration) <= 5000 and max_iterations > 2000:
ext_iterations += 10000
max_iterations = 10000
best_iteration = 0
else:
best_glob_iterations = ext_iterations + best_iteration
train_flag = False
sess.close()
model_dict = dict()
model_dict['best_weights'] = self.best_weights
model_dict['static_data'] = self.static_data
model_dict['n_vars'] = self.D1 * self.D2
model_dict['depth'] = self.depth
model_dict['best_iteration'] = best_glob_iterations
model_dict['metrics'] = obj_old
model_dict['error_func'] = res
print("Total accuracy cnn-3d: %s" % obj_old[0])
return model_dict
def run_models(self, X_inp, model_name):
if self.istrained:
best_weights = self.models[model_name]['best_weights']
tf.compat.v1.reset_default_graph()
graph_cnn = tf.Graph()
with graph_cnn.as_default():
with tf.device("/cpu:0"):
x1 = tf.compat.v1.placeholder('float', shape=[None, X_inp.shape[1]],
name='input_data')
with tf.device(self.gpu_id):
y_pred_, full_layer_one, full_layer_two, full_layer_three = self.build_graph_predict(x1)
config_tf = tf.compat.v1.ConfigProto(allow_soft_placement=True)
config_tf.gpu_options.allow_growth = True
with tf.compat.v1.Session(graph=graph_cnn, config=config_tf) as sess:
sess.run(tf.compat.v1.global_variables_initializer())
full_layer_one.set_weights(
[best_weights['build_cnn/dense1/kernel:0'], best_weights['build_cnn/dense1/bias:0']])
full_layer_two.set_weights(
[best_weights['build_cnn/dense2/kernel:0'], best_weights['build_cnn/dense2/bias:0']])
full_layer_three.set_weights(
[best_weights['build_cnn/dense3/kernel:0'], best_weights['build_cnn/dense3/bias:0']])
y_pred = sess.run(y_pred_, feed_dict={x1: X_inp})
sess.close()
else:
raise ModuleNotFoundError("Error on prediction of %s cluster. The model nwp_sampler seems not properly trained")
return y_pred
def load(self, pathname):
cluster_dir = pathname
if os.path.exists(os.path.join(cluster_dir, 'nwp_sampler' + '.pickle')):
try:
tmp_dict = joblib.load(os.path.join(cluster_dir, 'nwp_sampler' + '.pickle'))
self.__dict__.update(tmp_dict)
except:
raise ImportError('Cannot open nwp_sampler model')
else:
raise ImportError('Cannot find nwp_sampler model')
def save(self, pathname):
tmp_dict = {}
for k in self.__dict__.keys():
if k not in ['logger', 'static_data_all', 'static_data', 'model_dir', 'temp_dir', 'cluster_cnn_dir', 'cluster_dir']:
tmp_dict[k] = self.__dict__[k]
joblib.dump(tmp_dict,os.path.join(pathname, 'nwp_sampler' + '.pickle'), compress=9)
if __name__ == '__main__':
if sys.platform == 'linux':
sys_folder = '/media/smartrue/HHD1/George/models/'
else:
sys_folder = 'D:/models/'
path_project = sys_folder + '/Crossbow/Bulgaria_ver2/pv/Lach/model_ver0'
static_data = joblib.load(os.path.join(path_project, 'static_data.pickle'))
data_path = static_data['path_data']
X = pd.read_csv(os.path.join(data_path, 'dataset_X.csv'), index_col=0, header=0, parse_dates=True, dayfirst=True)
if os.path.exists(os.path.join(data_path, 'dataset_cnn.pickle')):
X_cnn = joblib.load(os.path.join(data_path, 'dataset_cnn.pickle'))
else:
X_cnn = np.array([])
sc = MinMaxScaler(feature_range=(0, 1)).fit(X.values)
X = pd.DataFrame(sc.transform(X.values),columns=X.columns,index=X.index)
create_nwps = nwp_sampler(static_data)
create_nwps.train(X, X_cnn, gpu_id=static_data['CNN']['gpus'][0])
wind_samples = create_nwps.run_models(X[create_nwps.columns].values,'flux')
dir_samples = create_nwps.run_models(X[create_nwps.columns].values,'cloud')
print('End')
```
#### File: Fuzzy_clustering/ver_tf2/pvlib_power_curve.py
```python
import pvlib
import numpy as np
import pandas as pd
def sol_pos(times_index,latitude,longitude,tz):
times_index=pd.DatetimeIndex([times_index])
altitude = 42 # above the sea level in meters
sand_point = pvlib.location.Location(latitude, longitude, tz=tz, altitude=altitude, name=tz)
solpos = pvlib.solarposition.get_solarposition(times_index, sand_point.latitude, sand_point.longitude)
return solpos['apparent_zenith'].values[0]
```
#### File: Fuzzy_clustering/ver_tf2/RBFNN_module.py
```python
import os
import gc
import tensorflow as tf
import numpy as np
import pickle
import glob
import shutil
import multiprocessing as mp
import pandas as pd
from Fuzzy_clustering.ver_tf2.RBFNN_tf_core import RBFNN
from Fuzzy_clustering.ver_tf2.utils_for_forecast import split_continuous
import logging
from joblib import Parallel, delayed
# from util_database import write_database
# from Fuzzy_clustering.ver_tf2.Forecast_model import forecast_model
# from Fuzzy_clustering.ver_tf2.utils_for_forecast import split_continuous
def optimize_rbf(rbf, X_train, y_train, X_val, y_val, X_test, y_test, num_centr, lr, gpu):
acc_old = np.inf
acc_old, centroids, radius, w, model = rbf.train(X_train, y_train, X_val, y_val, X_test, y_test, num_centr, lr, gpu_id=gpu)
return num_centr, lr, acc_old, model
class rbf_model(object):
def __init__(self, static_data, rated, cluster_dir):
self.static_data=static_data
self.cluster = os.path.basename(cluster_dir)
self.rated=rated
self.cluster_dir=os.path.join(cluster_dir, 'RBFNN')
self.model_dir = os.path.join(self.cluster_dir, 'model')
self.istrained = False
if not os.path.exists(self.model_dir):
os.makedirs(self.model_dir)
try:
self.load(self.model_dir)
except:
pass
def train_core(self, X_train, y_train, X_val, y_val, X_test, y_test, ncs, lrs):
self.gpu = True
nproc = self.static_data['njobs']
gpus = np.tile(self.static_data['gpus'], ncs.shape[0]*lrs.shape[0])
RBFnn = RBFNN(self.model_dir, rated=self.rated, max_iterations=self.static_data['max_iterations'])
# n=0
# optimize_rbf(RBFnn, cvs[n][0], cvs[n][1], cvs[n][2], cvs[n][3], X_test, y_test, nc[n], gpus[n])
# pool = mp.Pool(processes=nproc)
#
# result = []
# k=0
# for n in range(ncs.shape[0]):
# for lr in range(lrs.shape[0]):
# optimize_rbf(RBFnn, X_train, y_train, X_val, y_val, X_test, y_test, ncs[n], lrs[lr], gpus[k])
# result.append(pool.apply_async(optimize_rbf,
# args=(RBFnn, X_train, y_train, X_val, y_val, X_test, y_test, ncs[n], lrs[lr], gpus[k])))
k = np.arange(ncs.shape[0]*lrs.shape[0])
# optimize_rbf(RBFnn, X_train, y_train, X_val, y_val, X_test, y_test, ncs[0], lrs[0], gpus[0])
results = Parallel(n_jobs=nproc)(
delayed(optimize_rbf)(RBFnn, X_train, y_train, X_val, y_val, X_test, y_test, ncs[n], lrs[lr], gpus[i+j]) for i, n in enumerate(range(ncs.shape[0])) for j, lr in enumerate(range(lrs.shape[0])))
# k+=1
# results = [p.get() for p in result]
# pool.close()
# pool.terminate()
# pool.join()
r = pd.DataFrame(results, columns=['num_centr', 'lr', 'acc', 'model'])
self.num_centr = r.loc[r['acc'].idxmin()]['num_centr']
self.lr = r.loc[r['acc'].idxmin()]['lr']
self.rbf_performance = r['acc'].min()
self.save(self.model_dir)
gc.collect()
models = [r2[3] for r2 in results]
return models
def rbf_train(self, cvs):
logger = logging.getLogger('RBFNN ADAM_train_' + self.cluster)
logger.setLevel(logging.INFO)
handler = logging.FileHandler(os.path.join(self.model_dir, 'log_train_' + self.cluster + '.log'), 'a')
handler.setLevel(logging.INFO)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
print('RBFNN ADAM training...begin')
logger.info('RBFNN ADAM training...begin for %s', self.cluster)
nc = [8, 12, 16, 20, 24, 28, 32, 36, 40, 48, 52]
# nc = [12]
self.N = cvs[0][0].shape[1]
self.D = cvs[0][0].shape[0] + cvs[0][2].shape[0] + cvs[0][4].shape[0]
X_train = cvs[0][0]
y_train = cvs[0][1].reshape(-1, 1)
X_val = cvs[0][2]
y_val = cvs[0][3].reshape(-1, 1)
X_test = cvs[0][4]
y_test = cvs[0][5].reshape(-1, 1)
ncs = np.array(nc)
lrs=np.array([self.static_data['learning_rate']])
models = self.train_core(X_train, y_train, X_val, y_val, X_test, y_test, ncs, lrs)
same = 1
for model in models:
logger.info('Best number of centers training ')
logger.info('Model with num centers %s ', str(model['num_centr']))
logger.info('val_mae %s, val_mse %s, val_sse %s, val_rms %s ', *model['metrics'])
logger.info('Model trained with max iterations %s ', str(model['best_iteration']))
train_res = pd.DataFrame.from_dict(model['error_func'], orient='index')
if not os.path.exists(
os.path.join(self.model_dir, 'train_centers_result_' + str(model['num_centr']) + '.csv')):
train_res.to_csv(
os.path.join(self.model_dir, 'train_centers_result_' + str(model['num_centr']) + '.csv'),
header=None)
else:
train_res.to_csv(os.path.join(self.model_dir,
'train_centers_result_' + str(model['num_centr']) + '_' + str(
same) + '.csv'), header=None)
same += 1
logger.info('temporary performance %s ', str(self.rbf_performance))
logger.info('temporary RBF number %s ', str(self.num_centr))
logger.info('\n')
logger.info('\n')
if self.num_centr >= 5 and self.static_data['Fine_tuning']:
logger.info('Begin fine tuning....')
print('Begin fine tuning....')
ncs = np.hstack(
[np.arange(self.num_centr - 2, self.num_centr - 1), np.arange(self.num_centr + 1, self.num_centr + 3)])
models = self.train_core(X_train, y_train, X_val, y_val, X_test, y_test, ncs, lrs)
same = 1
for model in models:
logger.info('fine tunninig training ')
logger.info('Model with num centers %s ', str(model['num_centr']))
logger.info('val_mae %s, val_mse %s, val_sse %s, val_rms %s ', *model['metrics'])
logger.info('Model trained with max iterations %s ', str(model['best_iteration']))
train_res = pd.DataFrame.from_dict(model['error_func'], orient='index')
if not os.path.exists(
os.path.join(self.model_dir, 'train_fine_tune_result_' + str(model['num_centr']) + '.csv')):
train_res.to_csv(
os.path.join(self.model_dir, 'train_fine_tune_result_' + str(model['num_centr']) + '.csv'),
header=None)
else:
train_res.to_csv(os.path.join(self.model_dir,
'train_fine_tune_result_' + str(model['num_centr']) + '_' + str(
same) + '.csv'), header=None)
same += 1
logger.info('After fine tuning performance %s ', str(self.rbf_performance))
logger.info('After fine tuning RBF number %s ', str(self.num_centr))
logger.info('\n')
ncs = np.array([self.num_centr])
lrs=np.array([1e-3, 5e-4, 1e-4, 5e-5])
models = self.train_core(X_train, y_train, X_val, y_val, X_test, y_test, ncs, lrs)
same=1
for model in models:
logger.info('Best Learning rate training ')
logger.info('Model with num centers %s ', str(model['num_centr']))
logger.info('val_mae %s, val_mse %s, val_sse %s, val_rms %s ', *model['metrics'])
logger.info('Model trained with max iterations %s ', str(model['best_iteration']))
train_res = pd.DataFrame.from_dict(model['error_func'], orient='index')
if not os.path.exists(os.path.join(self.model_dir,'train_lr_result_' + str(model['num_centr']) + '.csv')):
train_res.to_csv(os.path.join(self.model_dir,'train_lr_result_' + str(model['num_centr']) + '.csv'), header=None)
else:
train_res.to_csv(os.path.join(self.model_dir, 'train_lr_result_' + str(model['num_centr']) + '_'+ str(same) + '.csv'), header=None)
same+=1
logger.info('Tuning lr performance %s ', str(self.rbf_performance))
logger.info('Tuning lr is %s ', str(self.lr))
logger.info('\n')
ncs = np.array([self.num_centr])
ncs = np.repeat(ncs, 3)
gpus = np.tile(self.static_data['gpus'], ncs.shape[0])
RBFnn = RBFNN(self.model_dir, rated=self.rated, max_iterations=self.static_data['max_iterations'])
nproc = self.static_data['njobs']
# pool = mp.Pool(processes=nproc)
#
# result = [pool.apply_async(optimize_rbf, args=(
# RBFnn, cvs[n][0], cvs[n][1].reshape(-1, 1), cvs[n][2], cvs[n][3].reshape(-1, 1), X_test, y_test, ncs[n], self.lr, gpus[n])) for n in
# range(ncs.shape[0])]
#
# results = [p.get() for p in result]
# pool.close()
# pool.terminate()
# pool.join()
#
results = Parallel(n_jobs=nproc)(
delayed(optimize_rbf)(RBFnn, cvs[n][0], cvs[n][1].reshape(-1, 1), cvs[n][2], cvs[n][3].reshape(-1, 1), X_test, y_test, ncs[n], self.lr, gpus[n]) for n in range(ncs.shape[0]))
r = pd.DataFrame(results, columns=['num_centr','lr', 'acc', 'model'])
r2 = r.groupby(['num_centr'])['model'].apply(lambda x: np.squeeze([x]))
r1 = r.groupby(['num_centr']).mean()
self.acc_old = r1['acc'].values[0]
r2 = r2[self.num_centr]
self.models = [r2[i] for i in range(3)]
self.rbf_performance = self.acc_old
self.istrained = True
self.save(self.model_dir)
gc.collect()
same=1
for model in self.models:
logger.info('Final training ')
logger.info('Model with num centers %s ', str(model['num_centr']))
logger.info('val_mae %s, val_mse %s, val_sse %s, val_rms %s ', *model['metrics'])
logger.info('Model trained with max iterations %s ', str(model['best_iteration']))
train_res = pd.DataFrame.from_dict(model['error_func'], orient='index')
if not os.path.exists(os.path.join(self.model_dir,'train_fin_result_' + str(model['num_centr']) + '.csv')):
train_res.to_csv(os.path.join(self.model_dir,'train_fin_result_' + str(model['num_centr']) + '.csv'), header=None)
else:
train_res.to_csv(os.path.join(self.model_dir, 'train_fin_result_' + str(model['num_centr']) + '_'+ str(same) + '.csv'), header=None)
same+=1
logger.info('final performance %s ', str(self.rbf_performance))
logger.info('final RBF number %s ', str(self.num_centr))
logger.info('RBFNN training...end for %s', self.cluster)
logger.info('\n')
return self.to_dict()
def to_dict(self):
dict = {}
for k in self.__dict__.keys():
if k not in ['static_data', 'logger', 'cluster_dir','model_dir', 'model']:
dict[k] = self.__dict__[k]
return dict
def predict(self,X):
p=[]
self.load(self.model_dir)
for i in range(len(self.models)):
centroids=self.models[i]['centroids']
radius=self.models[i]['Radius']
w=self.models[i]['W']
s = X.shape
d1 = np.transpose(np.tile(np.expand_dims(X, axis=0), [self.num_centr, 1, 1]), [1, 0, 2]) - np.tile(
np.expand_dims(centroids, axis=0), [s[0], 1, 1])
d = np.sqrt(np.sum(np.power(np.multiply(d1, np.tile(np.expand_dims(radius, axis=0), [s[0], 1, 1])),2), axis=2))
phi = np.exp((-1) * np.power(d,2))
p.append(np.matmul(phi, w))
p=np.mean(np.array(p),axis=0)
return p
def rbf_train_TL(self, cvs, model, gpu):
print('RBFNN ADAM training...begin')
self.N = cvs[0][0].shape[1]
self.D = cvs[0][0].shape[0] + cvs[0][2].shape[0] + cvs[0][4].shape[0]
X_test = cvs[0][4]
y_test = cvs[0][5].reshape(-1, 1)
ncs = np.array([model['num_centr']])
ncs = np.repeat(ncs, 3)
gpus = np.tile(gpu, ncs.shape[0])
RBFnn = RBFNN(self.model_dir, rated=self.rated, max_iterations=self.static_data['max_iterations'])
nproc = self.static_data['njobs']
results = Parallel(n_jobs=nproc)(
delayed(optimize_rbf)(RBFnn, cvs[n][0], cvs[n][1].reshape(-1, 1), cvs[n][2], cvs[n][3].reshape(-1, 1),
X_test, y_test, ncs[n], model['lr'], gpus[n]) for n in range(ncs.shape[0]))
r = pd.DataFrame(results, columns=['num_centr', 'lr', 'acc', 'model'])
r2 = r.groupby(['num_centr'])['model'].apply(lambda x: np.squeeze([x]))
r1 = r.groupby(['num_centr']).mean()
self.num_centr = model['num_centr']
self.lr = model['lr']
self.acc_old = r1['acc'].values[0]
r2 = r2[self.num_centr]
self.models = [r2[i] for i in range(3)]
self.rbf_performance = self.acc_old
self.istrained = True
self.save(self.model_dir)
gc.collect()
return self.to_dict()
def move_files(self, path1, path2):
for filename in glob.glob(os.path.join(path1, '*.*')):
shutil.copy(filename, path2)
def compute_metrics(self, pred, y, rated):
if rated == None:
rated = y.ravel()
else:
rated = 1
err = np.abs(pred.ravel() - y.ravel()) / rated
sse=np.sum(np.square(pred.ravel()-y.ravel()))
rms=np.sqrt(np.mean(np.square(err)))
mae=np.mean(err)
mse = sse/y.shape[0]
return [sse, rms, mae, mse]
def load(self, pathname):
cluster_dir = pathname
if os.path.exists(os.path.join(cluster_dir, 'rbfnn' + '.pickle')):
try:
f = open(os.path.join(cluster_dir, 'rbfnn' + '.pickle'), 'rb')
tmp_dict = pickle.load(f)
f.close()
self.__dict__.update(tmp_dict)
except:
raise ImportError('Cannot open RBFNN model')
else:
raise ImportError('Cannot find RBFNN model')
def save(self, pathname):
f = open(os.path.join(pathname, 'rbfnn' + '.pickle'), 'wb')
dict = {}
for k in self.__dict__.keys():
if k not in ['static_data', 'logger', 'cluster_dir','model_dir']:
dict[k] = self.__dict__[k]
pickle.dump(dict, f)
f.close()
# if __name__=='__main__':
# cluster_dir='D:/APE_net_ver2/Regressor_layer/rule.2'
# data_dir='D:/APE_net_ver2/Regressor_layer/rule.2/data'
#
# rated = None
#
# static_data = write_database()
# X=np.load(os.path.join(data_dir, 'X_train.npy'))
# y=np.load(os.path.join(data_dir, 'y_train.npy'))
# forecast = forecast_model(static_data, use_db=False)
# forecast.load()
# X=X[:,0:-1]
# X = forecast.sc.transform(X)
# y= forecast.scale_y.transform(y)
# # scy = MinMaxScaler(feature_range=(0, 1)).fit(y.reshape(-1,1))
# #
# # y = scy.transform(y.reshape(-1,1)).ravel()
# N, D = X.shape
# n_split = int(np.round(N * 0.85))
# X_test1 = X[n_split + 1:, :]
# y_test1 = y[n_split + 1:]
# X = X[:n_split, :]
# y = y[:n_split]
#
# # X_train, X_val, y_train, y_val=split_continuous(X, y, test_size=0.15, random_state=42)
# X_train, X_test, y_train, y_test=split_continuous(X, y, test_size=0.15, random_state=42)
#
# model_rbf = rbf_model(static_data['RBF'], static_data['type'], static_data['rated'], cluster_dir)
# model_rbf.rbf_train(X, y, X_test, y_test)
# pred = model_rbf.predict(X_test1)
# metrics_single = model_rbf.compute_metrics(pred, y_test1, 0)
#
# print('Single width')
# print('sse, rms, mae, mse')
# print(metrics_single)
```
#### File: Fuzzy_clustering/ver_tf2/RBF_ols_predict.py
```python
import numpy as np
import joblib, os, pickle
from deap import base, creator, tools, algorithms
from itertools import repeat
from collections import Sequence
import warnings
warnings.filterwarnings("ignore", category=RuntimeWarning)
class rbf_ols_predict(object):
def __init__(self, cluster_dir, rated, njobs, GA=False):
self.cluster = os.path.basename(cluster_dir)
self.njobs = 2 * njobs
self.rated = rated
self.GA=GA
self.istrained = False
if GA==False:
self.cluster_dir = os.path.join(cluster_dir, 'RBF_OLS')
self.model_dir = os.path.join(self.cluster_dir, 'model')
else:
self.cluster_dir = os.path.join(cluster_dir, 'GA_RBF_OLS')
self.model_dir = os.path.join(self.cluster_dir, 'model')
if not os.path.exists(self.model_dir):
os.makedirs(self.model_dir)
try:
self.load(self.model_dir)
except:
pass
def compute_metrics(self, pred, y, rated):
if rated== None:
rated=y.ravel()
else:
rated=20
err=np.abs(pred.ravel()-y.ravel())/rated
sse=np.sum(np.square(pred.ravel()-y.ravel()))
rms=np.sqrt(np.mean(np.square(err)))
mae=np.mean(err)
mse = sse/y.shape[0]
return [sse, rms, mae, mse]
def predict(self, x):
if len(x.shape)==1:
x=x.reshape(1,-1)
self.load(self.model_dir)
pred=[]
for model in self.models:
v = (np.atleast_2d(x)[:, np.newaxis] - model['centroids'][np.newaxis, :]) * model['Radius']
v = np.sqrt((v ** 2.).sum(-1))
v = np.exp(-(v ** 2.))
v = np.matmul(v,model['W'][:-1])+model['W'][-1]
pred.append(v)
return np.mean(np.array(pred), axis=0)
def load(self, pathname):
# creator.create("FitnessMin", base.Fitness, weights=(-1.0, -1.0, -1.0, -1.0))
# creator.create("Individual", np.ndarray, fitness=creator.FitnessMin)
# toolbox = base.Toolbox()
cluster_dir = pathname
if os.path.exists(os.path.join(cluster_dir, 'rbf_ols' + '.pickle')):
try:
f = open(os.path.join(cluster_dir, 'rbf_ols' + '.pickle'), 'rb')
tmp_dict = pickle.load(f)
f.close()
del tmp_dict['cluster_dir'], tmp_dict[
'model_dir']
self.__dict__.update(tmp_dict)
except:
raise ImportError('Cannot open RBFNN model')
else:
raise ImportError('Cannot find RBFNN model')
```
#### File: ver_tf2/scripts_for_tf2/RBFNN_model.py
```python
import tensorflow as tf
from tensorflow.python.ops import math_ops
from tensorflow.python.keras import backend as K
from tensorflow.python.framework import ops
from sklearn.cluster import KMeans
from sklearn.model_selection import train_test_split
from clustering.algorithms import FCV
from tqdm import tqdm
import numpy as np
import os
import pandas as pd
from Fuzzy_clustering.ver_0.utils_for_forecast import split_continuous
from util_database import write_database
from Fuzzy_clustering.ver_tf2.Forecast_model import forecast_model
class RBF_model(tf.keras.Model):
def __init__(self, num_centr):
super(RBF_model, self).__init__()
self.num_centr = num_centr
def find_centers(self,X_train):
self.N, self.D = X_train.shape
self.batch_size = self.N
try:
centers = FCV(X_train, n_clusters=self.num_centr, r=4).optimize()
c = centers.C
except:
c = KMeans(n_clusters=self.num_centr, random_state=0).fit(X_train)
c = c.cluster_centers_
centroids = c.astype(np.float32)
return centroids
def initialize(self,inputs):
centroids = self.find_centers(inputs)
cnt = pd.DataFrame(centroids, index=['c' + str(i) for i in range(centroids.shape[0])],
columns=['v' + str(i) for i in range(centroids.shape[1])])
var_init = pd.DataFrame(columns=['v' + str(i) for i in range(centroids.shape[1])])
for r in cnt.index:
v = (cnt.loc[r] - cnt.drop(r)).min()
v[v == 0] = 0.0001
v.name = r
var_init = var_init.append(v)
var_init = tf.convert_to_tensor(var_init.values, dtype=tf.float32, name='var_init')
self.var = tf.Variable(var_init,
dtype=tf.float32, name='RBF_variance')
self.centroids = tf.convert_to_tensor(centroids, dtype=tf.float32, name='centroids')
@tf.function
def lin_out(self, x, y):
return tf.linalg.lstsq(x, y, l2_regularizer=0)
@tf.function
def rbf_map(self, x):
s = tf.shape(x)
d1 = tf.transpose(tf.tile(tf.expand_dims(x, 0), [self.num_centr, 1, 1]), perm=[1, 0, 2]) - tf.tile(
tf.expand_dims(self.centroids, 0), [s[0], 1, 1])
d = tf.sqrt(
tf.reduce_sum(tf.pow(tf.multiply(d1, tf.tile(tf.expand_dims(self.var, 0), [s[0], 1, 1])), 2), axis=2))
return tf.cast(tf.exp(tf.multiply(tf.constant(-1, dtype=tf.float32), tf.square(d))), tf.float32)
def call(self, inputs, training=None, mask=None):
if training:
x = inputs[:, :-1]
y = tf.expand_dims(inputs[:, -1], 1)
else:
x=inputs
phi = self.rbf_map(x)
if training:
self.w = self.lin_out(phi, y)
h = tf.matmul(phi, self.w)
else:
h = tf.matmul(phi, self.w)
return h
class sum_square_loss(tf.keras.losses.Loss):
def __init__(self, name='SSE', **kwargs):
super(sum_square_loss, self).__init__(name=name, **kwargs)
def call(self, y_true, y_pred, sample_weight=None):
return math_ops.reduce_sum(math_ops.square(y_true - y_pred))
class RBF_train():
def __init__(self, path_model, rated=None, max_iterations=10000,):
self.path_model = path_model
self.rated = rated
self.max_iterations = max_iterations
def distance(self, obj_new, obj_old, obj_max, obj_min):
if np.any(np.isinf(obj_old)):
obj_old = obj_new.copy()
obj_max = obj_new.copy()
return True, obj_old, obj_max, obj_min
if np.any(np.isinf(obj_min)) and not np.all(obj_max == obj_new):
obj_min = obj_new.copy()
d = 0
for i in range(obj_new.shape[0]):
if obj_max[i] < obj_new[i]:
obj_max[i] = obj_new[i]
if obj_min[i] > obj_new[i]:
obj_min[i] = obj_new[i]
d += (obj_new[i] - obj_old[i]) / (obj_max[i] - obj_min[i])
if d < 0:
obj_old = obj_new.copy()
return True, obj_old, obj_max, obj_min
else:
return False, obj_old, obj_max, obj_min
def train(self, X_train, y_train, X_val, y_val, X_test, y_test, num_centr, lr, gpu_id=[-1]):
tf.config.experimental.set_visible_devices(gpu_id[0], 'GPU')
tf.config.experimental.set_memory_growth(gpu_id[0], True)
tf.config.set_soft_device_placement(True)
tf.debugging.set_log_device_placement(True)
self.N, self.D = X_train.shape
X_val = np.vstack((X_val, X_test))
y_val = np.vstack((y_val, y_test))
X_train = X_train.astype('float32',casting='same_kind')
X_val = X_val.astype('float32',casting='same_kind')
y_train = y_train.astype('float32',casting='same_kind')
y_val = y_val.astype('float32',casting='same_kind')
batch_size = self.N
model = RBF_model(num_centr)
model.initialize(X_train)
optimizer = tf.keras.optimizers.Adam(lr)
batches = [np.random.choice(self.N, batch_size, replace=False) for _ in range(self.max_iterations)]
obj_old = np.inf * np.ones(4)
obj_max = np.inf * np.ones(4)
obj_min = np.inf * np.ones(4)
if self.rated is None:
loss_fn = sum_square_loss()
mae = tf.keras.metrics.MeanAbsolutePercentageError(name='mae')
mse = tf.keras.metrics.MeanSquaredLogarithmicError(name='mse')
rms = tf.keras.metrics.RootMeanSquaredError(name='rms')
else:
loss_fn = sum_square_loss()
mae = tf.keras.metrics.MeanAbsolutePercentageError(name='mae')
mse = tf.keras.metrics.MeanSquaredLogarithmicError(name='mse')
rms = tf.keras.metrics.RootMeanSquaredError(name='rms')
res = dict()
self.best_weights = None
best_iteration = 0
best_glob_iterations = 0
max_iterations = self.max_iterations
ext_iterations = self.max_iterations
train_flag = True
patience = 20000
wait = 0
while train_flag:
for i in tqdm(range(max_iterations)):
if i % 500 == 0:
with tf.GradientTape() as tape:
predictions = model(np.hstack((X_train,y_train)), training=True)
loss = loss_fn(y_train, predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
pred_val = model(X_val, training=False)
val_mae = mae(y_val, pred_val)
val_mse = mse(y_val, pred_val)
val_rms = rms(y_val, pred_val)
val_sse = loss_fn(y_val, pred_val)
obj_new = np.array([val_mae, val_mse, val_sse, val_rms])
flag, obj_old, obj_max, obj_min = self.distance(obj_new, obj_old, obj_max, obj_min)
if flag:
res[str(i)] = obj_old
print(val_mae.numpy())
self.best_weights = model.get_weights()
best_iteration = i
wait = 0
else:
wait += 1
if wait > patience:
train_flag = False
break
else:
with tf.GradientTape() as tape:
predictions = model(np.hstack((X_train,y_train)), training=True)
loss = loss_fn(y_train, predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
wait += 1
best_glob_iterations = ext_iterations + best_iteration
if (max_iterations - best_iteration) <= 10000:
ext_iterations += 20000
max_iterations = 20000
else:
best_glob_iterations = ext_iterations + best_iteration
train_flag = False
model.set_weights(self.best_weights)
model.save_weights(self.path_model + '/rbf_model.h5')
model_dict = dict()
model_dict['centroids'] = model.centroids.numpy()
model_dict['Radius'] = model.var.numpy()
model_dict['n_vars'] = self.D
model_dict['num_centr'] = num_centr
model_dict['W'] = model.w.numpy()
model_dict['best_iteration'] = best_glob_iterations
model_dict['metrics'] = obj_old
model_dict['error_func'] = res
print("Total accuracy cnn: %s" % obj_old[0])
return obj_old[3], model.centroids.numpy(), model.var.numpy(), model.w.numpy(), model_dict
if __name__ == '__main__':
cluster_dir = 'D:/APE_net_ver2/Regressor_layer/rule.2'
data_dir = 'D:/APE_net_ver2/Regressor_layer/rule.2/data'
rated = None
X = np.load(os.path.join(data_dir, 'X_train.npy'))
y = np.load(os.path.join(data_dir, 'y_train.npy'))
static_data = write_database()
forecast = forecast_model(static_data, use_db=False)
forecast.load()
X = X[:, 0:-1]
X = forecast.sc.transform(X)
y = forecast.scale_y.transform(y)
X_train, X_test, y_train, y_test = split_continuous(X, y, test_size=0.15, random_state=42)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.15, random_state=42)
rbf=RBF_train(cluster_dir+'/RBFNN',rated=rated, max_iterations=1000)
rbf.train(X_train, y_train, X_val, y_val, X_test, y_test, 12, 0.0001, gpu_id=tf.config.experimental.list_physical_devices('GPU'))
```
#### File: Fuzzy_clustering/ver_tf2/skiron_extractor.py
```python
import os
import ftplib
import pandas as pd
import pygrib, joblib, logging
import numpy as np
from joblib import Parallel, delayed
class skiron_Extractor():
def __init__(self, projects_group, pathnwp, nwp_resolution, path_nwp_group, dates_ts, area_group, njobs=1):
self.pathnwp = pathnwp
self.pathnwp_group = path_nwp_group
self.nwp_resolution = nwp_resolution
self.area = area_group
self.projects_group = projects_group
self.njobs = njobs
self.create_logger()
self.dates_ts = self.check_dates(dates_ts)
def create_logger(self):
self.logger = logging.getLogger('log_' + self.projects_group + '.log')
self.logger.setLevel(logging.INFO)
handler = logging.FileHandler(os.path.join(os.path.dirname(self.pathnwp_group), 'log_' + self.projects_group + '.log'), 'a')
handler.setLevel(logging.INFO)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
self.logger.addHandler(handler)
def check_dates(self, dates_ts):
start_date = pd.to_datetime(dates_ts[0].strftime('%d%m%y'), format='%d%m%y')
end_date = pd.to_datetime(dates_ts[-1].strftime('%d%m%y'), format='%d%m%y')
dates = pd.date_range(start_date, end_date)
data_dates = pd.to_datetime(np.unique(dates_ts.strftime('%d%m%y')), format='%d%m%y')
dates = [d for d in dates if d in data_dates]
self.logger.info('Dates is checked. Number of time samples %s', str(len(dates)))
return pd.DatetimeIndex(dates)
def skiron_download(self, dt):
with ftplib.FTP('ftp.mg.uoa.gr') as ftp:
try:
ftp.login('mfstep', '!lam')
ftp.set_pasv(True)
except:
print('Error in connection to FTP')
local_dir=self.pathnwp +dt.strftime('%Y')+'/'+ dt.strftime('%d%m%y')
if not os.path.exists(local_dir):
os.makedirs(local_dir)
try:
for hor in range(76):
target_filename='/forecasts/Skiron/daily/005X005/'+dt.strftime('%d%m%y')+'/MFSTEP005_00'+ dt.strftime('%d%m%y')+'_'+str(hor).zfill(3)+'.grb'
self.logger.info('Trying to download nwp file %s', '/MFSTEP005_00'+ dt.strftime('%d%m%y')+'_'+str(hor).zfill(3)+'.grb')
local_filename = local_dir + '/MFSTEP005_00' + dt.strftime('%d%m%y') + '_' + str(hor).zfill(3) + '.grb'
if not os.path.exists(local_filename):
with open(local_filename, 'w+b') as f:
res = ftp.retrbinary('RETR %s' % target_filename, f.write)
count=0
while not res.startswith('226 Transfer complete') and count<=4:
print('Downloaded of file {0} is not compile.'.format(target_filename))
os.remove(local_filename)
with open(local_filename, 'w+b') as f:
res = ftp.retrbinary('RETR %s' % target_filename, f.write)
self.logger.info('Success to download nwp file %s',
'/MFSTEP005_00' + dt.strftime('%d%m%y') + '_' + str(hor).zfill(
3) + '.grb')
count+=1
self.logger.info('Failed to download nwp file %s',
'/MFSTEP005_00' + dt.strftime('%d%m%y') + '_' + str(hor).zfill(
3) + '.grb')
except:
print('Error downloading {0} '.format(local_filename))
ftp.quit()
def extract(self, grb, la1,la2,lo1,lo2):
nwps=dict()
if self.nwp_resolution == 0.05:
g = grb.message(21)
Uwind, lat, long = g.data(lat1=la1, lat2=la2, lon1=lo1, lon2=lo2)
g = grb.message(22)
Vwind = g.data(lat1=la1, lat2=la2, lon1=lo1, lon2=lo2)[0]
else:
g = grb.message(1)
Uwind, lat, long = g.data(lat1=la1, lat2=la2, lon1=lo1, lon2=lo2)
g = grb.message(2)
Vwind = g.data(lat1=la1, lat2=la2, lon1=lo1, lon2=lo2)[0]
r2d = 45.0 / np.arctan(1.0)
wspeed = np.sqrt(np.square(Uwind) + np.square(Vwind))
wdir = np.arctan2(Uwind, Vwind) * r2d + 180
nwps['lat'] = lat
nwps['long'] = long
nwps['Uwind'] = Uwind
nwps['Vwind'] = Vwind
nwps['WS'] = wspeed
nwps['WD'] = wdir
g = grb.message(3)
x = g.data(lat1=la1, lat2=la2, lon1=lo1, lon2=lo2)
nwps['Temperature'] = x[0]
g = grb.message(7)
x = g.data(lat1=la1, lat2=la2, lon1=lo1, lon2=lo2)
nwps['Precipitation'] = x[0]
g = grb.message(5)
x = g.data(lat1=la1, lat2=la2, lon1=lo1, lon2=lo2)
nwps['Cloud'] = x[0]
g = grb.message(8)
x = g.data(lat1=la1, lat2=la2, lon1=lo1, lon2=lo2)
nwps['Flux'] = x[0]
del x
return nwps
def nwps_extract_for_train(self, t):
nwps = dict()
dates = pd.date_range(start=t + pd.DateOffset(hours=19), end=t + pd.DateOffset(hours=53), freq='H')
hors = [int(hor) for hor in range(20, 49)]
for hor, dt in zip(hors, dates):
if self.nwp_resolution == 0.05:
fname = os.path.join(self.pathnwp,
t.strftime('%Y') + '/' + t.strftime('%d%m%y') + '/MFSTEP005_00' + t.strftime(
'%d%m%y') + '_' + str(hor).zfill(3) + '.grb')
else:
fname = os.path.join(self.pathnwp,
t.strftime('%Y') + '/MFSTEP_IASA_00' + t.strftime(
'%d%m%y') + '_' + str(hor).zfill(3) + '.grb')
#MFSTEP_IASA_00010117_000
if os.path.exists(fname):
try:
grb = pygrib.open(fname)
la1 = self.area[0][0]
la2 = self.area[1][0]
lo1 = self.area[0][1]
lo2 = self.area[1][1]
nwps[dt.strftime('%d%m%y%H%M')] = self.extract(grb, la1,la2,lo1,lo2)
grb.close()
del grb
print('nwps exrtacted from ', fname)
except:
pass
return (t.strftime('%d%m%y'), nwps)
def grib2dict_for_train(self):
res = self.nwps_extract_for_train(self.dates_ts[0])
results = Parallel(n_jobs=self.njobs)(delayed(self.nwps_extract_for_train)(t) for t in self.dates_ts)
for res in results:
joblib.dump(res[1], os.path.join(self.pathnwp_group, 'skiron_' +res[0] + '.pickle'))
print('NWPs extracted for', res[0])
self.logger.info('Nwp pickle file created for date %s', res[0])
def grib2dict_for_train_online(self):
res = self.nwps_extract_for_train(self.dates_ts)
joblib.dump(res[1], os.path.join(self.pathnwp_group, 'skiron_' +res[0] + '.pickle'))
print('NWPs extracted for', res[0])
self.logger.info('Nwp pickle file created for date %s', res[0])
def extract_nwps(self,train=True):
if train:
dates = []
for dt in self.dates_ts:
if not os.path.exists(os.path.join(self.pathnwp_group, 'skiron_' +dt.strftime('%d%m%y') + '.pickle')):
dates.append(dt)
self.dates_ts = pd.DatetimeIndex(dates)
self.grib2dict_for_train()
else:
self.grib2dict_for_train_online()
```
#### File: Fuzzy_clustering/ver_tf2/test_modules.py
```python
import joblib, os
import pandas as pd
import numpy as np
from Fuzzy_clustering.ver_tf2.Models_train_manager import ModelTrainManager
model_path = 'D:/models/my_projects/APE_net_ver1/pv/APE_net/model_ver0'
rule = 'rule.8'
cluster_dir = os.path.join(model_path, 'Regressor_layer/' + rule)
data_path = os.path.join(cluster_dir, 'data')
static_data = joblib.load(os.path.join(model_path, 'static_data.pickle'))
model = ModelTrainManager(path_model=model_path)
model.load()
def split_test_data(X, y, act, X_cnn=np.array([]), X_lstm=np.array([]), test_indices=None):
N_tot, D = X.shape
if not test_indices is None:
X_test = X.loc[test_indices['dates_test']]
y_test = y.loc[test_indices['dates_test']]
act_test = act.loc[test_indices['dates_test']]
X = X.loc[test_indices['dates_train']]
y = y.loc[test_indices['dates_train']]
act = act.loc[test_indices['dates_train']]
if len(X_cnn.shape) > 1:
X_cnn_test = X_cnn[test_indices['indices_test']]
X_cnn = X_cnn[test_indices['indices_train']]
else:
X_cnn_test = np.array([])
if len(X_lstm.shape) > 1:
X_lstm_test = X_lstm[test_indices['indices_test']]
X_lstm = X_lstm[test_indices['indices_train']]
else:
X_lstm_test = np.array([])
else:
X_test = pd.DataFrame([])
y_test = pd.DataFrame([])
act_test = pd.DataFrame([])
X_cnn_test = np.array([])
X_lstm_test = np.array([])
N_test = X_test.shape[0]
return X, y, act, X_cnn, X_lstm, X_test, y_test, act_test, X_cnn_test, X_lstm_test
def load_data():
X = pd.read_csv(os.path.join(data_path, 'dataset_X.csv'), index_col=0, header=0, parse_dates=True, dayfirst=True)
y = pd.read_csv(os.path.join(data_path, 'dataset_y.csv'), index_col=0, header=0, parse_dates=True, dayfirst=True)
act = pd.read_csv(os.path.join(data_path, 'dataset_act.csv'), index_col=0, header=0, parse_dates=True,
dayfirst=True)
if os.path.exists(os.path.join(data_path, 'dataset_cnn.pickle')):
X_cnn = joblib.load(os.path.join(data_path, 'dataset_cnn.pickle'))
if X_cnn.shape[1] == 6:
X_cnn = X_cnn.transpose([0, 2, 3, 1])
else:
X_cnn = np.array([])
if os.path.exists(os.path.join(data_path, 'dataset_lstm.pickle')):
X_lstm = joblib.load(os.path.join(data_path, 'dataset_lstm.pickle'))
else:
X_lstm = np.array([])
if os.path.exists(os.path.join(data_path, 'test_indices.pickle')):
test_indices = joblib.load(os.path.join(data_path, 'test_indices.pickle'))
else:
test_indices = None
return X, y, act, X_cnn, X_lstm, test_indices
def test_combine_module():
from Fuzzy_clustering.ver_tf2.Combine_module_train import combine_model
X, y, act, X_cnn, X_lstm, test_indices = load_data()
X, y, act, X_cnn, X_lstm, X_test, y_test, act_test, X_cnn_test, X_lstm_test = split_test_data(X, y,
act,
X_cnn=X_cnn,
X_lstm=X_lstm,
test_indices=test_indices)
comb_model = combine_model(static_data, cluster_dir, model.sc)
comb_model.istrained = False
comb_model.train(X_test, y_test, act_test, X_cnn_test, X_lstm_test)
def test_cluster_module():
from Fuzzy_clustering.ver_tf2.Cluster_train_regressors import cluster_train
cluster_model = cluster_train(static_data, rule, model.sc)
cluster_model.istrained=False
cluster_model.fit()
if __name__ == '__main__':
# test_combine_module()
test_cluster_module()
``` |
{
"source": "joesie/io",
"score": 2
} |
#### File: io/bin/gpio2mqtt.py
```python
from MqttClient import MqttClient
from Channel import Channel
from datetime import datetime
import RPi.GPIO as GPIO
import logging
import getopt
import sys
# ======================
##
# Mapping Loglevel from loxberry log to python logging
##
def map_loglevel(loxlevel):
switcher={
0:logging.NOTSET,
3:logging.ERROR,
4:logging.WARNING,
6:logging.INFO,
7:logging.DEBUG
}
return switcher.get(int(loxlevel),"unsupported loglevel")
# ======================
##
# handle start arguments
##
inputs = None
outputs = None
loglevel=logging.ERROR
logfile=""
logfileArg = ""
lbhomedir = ""
configfile = ""
opts, args = getopt.getopt(sys.argv[1:], 'f:l:c:h:', ['logfile=', 'loglevel=', 'configfile=', 'lbhomedir='])
for opt, arg in opts:
if opt in ('-f', '--logfile'):
logfile=arg
logfileArg = arg
elif opt in ('-l', '--loglevel'):
loglevel=map_loglevel(arg)
elif opt in ('-c', '--configfile'):
configfile=arg
elif opt in ('-h', '--lbhomedir'):
lbhomedir=arg
# ==============
##
# Setup logger function
##
def setup_logger(name):
global loglevel
global logfile
logging.captureWarnings(1)
logger = logging.getLogger(name)
handler = logging.StreamHandler()
logger.addHandler(handler)
logger.setLevel(loglevel)
if not logfile:
logfile="/tmp/"+datetime.utcnow().strftime('%Y%m%d_%H%M%S_%f')[:-3]+"_gpio2mqtt.log"
logging.basicConfig(filename=logfile,level=loglevel,format='%(asctime)s.%(msecs)03d <%(levelname)s> %(message)s',datefmt='%H:%M:%S')
return logger
_LOGGER = setup_logger("GPIO2MQTT")
_LOGGER.debug("logfile: " + logfileArg)
_LOGGER.info("loglevel: " + logging.getLevelName(_LOGGER.level))
# ============================
# init MQTT
# ============================
client = MqttClient(lbhomedir + '/data/system/plugindatabase.json', _LOGGER)
# ============================
##
# setup GPIOS
##
Channel.init(configfile, _LOGGER, client)
Channel.sendChannelStates()
# ============================
##
# start mqtt Client
##
try:
_LOGGER.info("start MQTT Client")
client.loop_forever()
except Exception as e:
_LOGGER.exception(str(e))
finally:
_LOGGER.info("Stop MQTT Client")
client.disconnect() # disconnect gracefully
GPIO.cleanup()
logging.shutdown()
# ============================
```
#### File: io/bin/MqttClient.py
```python
import threading
from Channel import Channel, OutputChannel
import socket
import time
import json
import paho.mqtt.client as mqtt
class MqttClient():
MQTT_DEFAULT_PORT = 1883
client = None
mqtt_heatbeatThread = None
#Configuration MQTT Topics
hostname = socket.gethostname()
MQTT_TOPIC_OUTPUT = hostname + "/gpio/set/"
MQTT_TOPIC_PWM = hostname + "/gpio/pwm/"
MQTT_TOPIC_PWM_FREQUENCY = hostname + "/gpio/pwm/freq/"
MQTT_TOPIC_PWM_DC = hostname + "/gpio/pwm/dc/"
MQTT_RESPONSE_STATE = hostname + "/gpio/"
_LOGGER = None
#=============================
# publish MQTT
##
def publish(self, topic, payload=None, qos=0, retain=False, properties=None):
# self.client.publish(topic, payload=payload, qos=qos, retain=retain, properties=properties)
self.client.publish(topic, payload=payload, qos=qos, retain=retain)
# ============================
##
# get configuration from mqtt broker and store config in mqttconf variable
##
def createMqttConfig(self, configfile):
try:
mqttconf = None
with open(configfile) as json_plugindatabase_file:
plugindatabase = json.load(json_plugindatabase_file)
mqttconfigdir = plugindatabase['plugins']['07a6053111afa90479675dbcd29d54b5']['directories']['lbpconfigdir']
mqttPluginconfig = None
with open(mqttconfigdir + '/mqtt.json') as json_mqttconfig_file:
mqttPluginconfig = json.load(json_mqttconfig_file)
mqttcred = None
with open(mqttconfigdir + '/cred.json') as json_mqttcred_file:
mqttcred = json.load(json_mqttcred_file)
mqttuser = mqttcred['Credentials']['brokeruser']
mqttpass = mqttcred['Credentials']['brokerpass']
mqttaddressArray = mqttPluginconfig['Main']['brokeraddress'].split(":")
mqttPort = MqttClient.MQTT_DEFAULT_PORT
if len(mqttaddressArray) > 1:
mqttPort = int(mqttaddressArray[1])
mqttconf = {
'username':mqttuser,
'password':<PASSWORD>,
'address': mqttaddressArray[0],
'port': mqttPort
}
self._LOGGER.debug("MQTT config" + str(mqttconf))
return mqttconf
except Exception as e:
self._LOGGER.exception(str(e))
# ============================
##
# MQTT Heartbeat
#
def mqtt_heatbeat(self, name):
while(1):
self.client.publish(MqttClient.MQTT_RESPONSE_STATE+'status', "Online", retain = True)
time.sleep(10)
# ============================
##
# definition of mqtt callbacks
##
def on_connectCallback(self, client, userdata, flags, rc):
client.subscribe(MqttClient.MQTT_TOPIC_OUTPUT + "#")
self._LOGGER.debug("Subscribe topic: " + MqttClient.MQTT_TOPIC_OUTPUT + "#")
client.subscribe(MqttClient.MQTT_TOPIC_PWM + "#")
self._LOGGER.debug("Subscribe topic: " + MqttClient.MQTT_TOPIC_PWM + "#")
def on_messageCallback(self, client, userdata, msg):
mymsg = str(msg.payload.decode("utf-8"))
mytopic = str(msg.topic)
self._LOGGER.debug("Topic: " + mytopic + " with Payload: " + mymsg + "!")
if(mytopic.startswith(MqttClient.MQTT_TOPIC_OUTPUT + "json")):
if(not mymsg):
self.client.publish(MqttClient.MQTT_RESPONSE_STATE + "error" + "/stateText", "Error, can't set GPIO. For more information read the logfile!", retain = True)
self._LOGGER.error('If you use json to set outputs, you have to transmit a json like {"5":"off","6":"on"}. For more information read the manual!')
return
try:
msg_json = json.loads(mymsg)
self._LOGGER.debug("msg_json: " + str(msg_json))
for key in msg_json:
OutputChannel.handle_setOutput(key, msg_json[key])
self._LOGGER.debug("key: " + key + " msg_jsonkey] "+ str(msg_json[key]))
return
except json.decoder.JSONDecodeError as ex:
self._LOGGER.exception("Malformed json given. Cannot parse string: " + mymsg)
except Exception as e:
self._LOGGER.exception(str(e))
self.client.publish(MqttClient.MQTT_RESPONSE_STATE + "error" + "/stateText", "Error, can't set GPIO. For more information read the logfile!", retain = True)
# Handle output Topic
if(mytopic.startswith(MqttClient.MQTT_TOPIC_OUTPUT)):
switched = False
# Search for topic in List of available output pins and set gpio to state LOW or HIGH
for channel in Channel.outputChannels:
self._LOGGER.debug("mytopic : " + mytopic)
self._LOGGER.debug("testtopic: " + MqttClient.MQTT_TOPIC_OUTPUT + str(channel.pin))
if mytopic.strip() == MqttClient.MQTT_TOPIC_OUTPUT + str(channel.pin) :
try:
channel.setOutput(mymsg)
switched = True
except Exception as e:
self._LOGGER.exception(str(e))
self.client.publish(MqttClient.MQTT_RESPONSE_STATE + str(channel.pin) + "/stateText", "Error, can't set GPIO. For more information read the logfile!", retain = True)
if switched == False:
self.client.publish(MqttClient.MQTT_RESPONSE_STATE + "error" + "/stateText", "Error, unknown Topic: " + mytopic, retain = True)
# Handle PWM Topic
if(mytopic.startswith(MqttClient.MQTT_TOPIC_PWM)):
for channel in Channel.outputChannels:
try:
#set frequency
if mytopic.strip() == MqttClient.MQTT_TOPIC_PWM_FREQUENCY + str(channel.pin) :
channel.setFrequency(mymsg)
self._LOGGER.debug("set frequency " + mymsg + " channel " + str(channel.pin))
#set duty cycle
if mytopic.strip() == MqttClient.MQTT_TOPIC_PWM_DC + str(channel.pin) :
channel.setDutyCycle(mymsg)
self._LOGGER.debug("set frequency " + mymsg + " channel " + str(channel.pin))
except Exception as e:
self._LOGGER.exception(str(e))
self.client.publish(MqttClient.MQTT_RESPONSE_STATE + str(channel.pin) + "/stateText", "Error, can't set PWM. For more information read the logfile!", retain = True)
def loop_forever(self):
self.client.loop_forever()
def disconnect(self):
self.client.disconnect()
self.client.loop_stop()
self.mqtt_heatbeat._stop()
def __init__(self, configfile, _LOGGER):
self._LOGGER = _LOGGER
self.client = mqtt.Client()
mqttconf = self.createMqttConfig(configfile)
if mqttconf is None:
raise ValueError("No MQTT config found.")
_LOGGER.info("start MQTT Client")
self.client.on_connect = self.on_connectCallback
self.client.on_message = self.on_messageCallback
self.client.username_pw_set(mqttconf['username'], mqttconf['password'])
self.client.will_set(MqttClient.MQTT_RESPONSE_STATE+'status', 'Offline', qos=0, retain=True)
self.client.connect(mqttconf['address'], mqttconf['port'], 60)
mqtt_heatbeatThread = threading.Thread(target=self.mqtt_heatbeat, args=(1,))
mqtt_heatbeatThread.start()
``` |
{
"source": "Joe-Sin7h/web_scraper",
"score": 2
} |
#### File: Joe-Sin7h/web_scraper/results.py
```python
import typing_extensions
from selenium import webdriver
from selenium import common
import selenium
from threading import Thread
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.common import exceptions
from tkinter import *
optionschrome = webdriver.ChromeOptions()
optionschrome.add_argument("headless")
def get_score_block(driver,sport='football'):
if sport.lower()!='boxing':
try:
section = WebDriverWait(driver, 120).until(EC.presence_of_element_located((By.CLASS_NAME ,"event")))
except exceptions.TimeoutException:
driver.refresh()
section = WebDriverWait(driver, 120).until(EC.presence_of_element_located((By.CLASS_NAME ,"event")))
else:
try:
section = WebDriverWait(driver, 120).until(EC.presence_of_element_located((By.ID ,"main")))
except exceptions.TimeoutException:
driver.refresh()
section = WebDriverWait(driver, 120).until(EC.presence_of_element_located((By.ID ,"main")))
return section
def get_results(section,sport,driver):
try:
if sport.lower()=='football':
home = section.find_elements_by_class_name('event__participant.event__participant--home')
away = section.find_elements_by_class_name('event__participant.event__participant--away')
result = section.find_elements_by_class_name('event__scores')
return home, away, result
elif sport.lower()=='cricket':
# finished = WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.XPATH,'/html/body/div[5]/div[1]/div/div[1]/div[2]/div[5]/div/div[1]/div[1]/div[4]/div')))
# finished.click()
home = section.find_elements_by_class_name('event__participant.event__participant--home')
home_score = section.find_elements_by_class_name('event__score.event__score--home')
away = section.find_elements_by_class_name('event__participant.event__participant--away')
away_score = section.find_elements_by_class_name('event__score.event__score--away')
return home, home_score, away, away_score
elif sport.lower()=='basketball' or sport.lower()=='rugby':
home = section.find_elements_by_class_name('event__participant.event__participant--home')
home_score = section.find_elements_by_class_name('event__score.event__score--home')
away = section.find_elements_by_class_name('event__participant.event__participant--away')
away_score = section.find_elements_by_class_name('event__score.event__score--away')
return home, home_score, away, away_score
elif sport.lower()=='boxing':
date = section.find_elements_by_tag_name('li')
result = section.find_elements_by_tag_name('a')
return date, result
elif sport.lower()=='golf':
rank = section.find_elements_by_class_name('event__rating.rank')
name = section.find_elements_by_class_name('event__participantName')
par = section.find_elements_by_class_name('event__center.event__result--par')
today = section.find_elements_by_class_name('event__center.event__result--today')
result = section.find_elements_by_class_name('event__center.event__result--roundAll.event__result--grey')
country = section.find_elements_by_class_name('flag')
return rank, name, par, today, result, country
except Exception as e:
print(e)
return None, None, None
def get_football(link="https://www.flashscore.co.uk"):
football_driver = webdriver.Chrome('chromedriver.exe',options=optionschrome)
football_driver.get(link)
football_section = get_score_block(football_driver,'football')
return football_driver, football_section
def get_cricket(link="https://www.flashscore.co.uk/cricket/"):
cricket_driver = webdriver.Chrome('chromedriver.exe',options=optionschrome)
cricket_driver.get(link)
section = get_score_block(cricket_driver,'cricket')
return cricket_driver, section
def get_basketball(link="https://www.flashscore.co.uk/basketball/"):
basketball_driver = webdriver.Chrome('chromedriver.exe',options=optionschrome)
basketball_driver.get(link)
section = get_score_block(basketball_driver,'basketball')
return basketball_driver, section
def get_rugby(link="https://www.flashscore.co.uk/rugby-union/"):
rugby_driver = webdriver.Chrome('chromedriver.exe',options=optionschrome)
rugby_driver.get(link)
section = get_score_block(rugby_driver,'rugby')
return rugby_driver, section
def get_golf(link="https://www.flashscore.co.uk/golf/"):
golf_driver = webdriver.Chrome('chromedriver.exe',options=optionschrome)
golf_driver.get(link)
section = get_score_block(golf_driver,'golf')
return golf_driver, section
def get_boxing(link="https://www.boxing247.com/boxing-results"):
boxing_driver = webdriver.Chrome('chromedriver.exe',options=optionschrome)
boxing_driver.get(link)
section = get_score_block(boxing_driver,'boxing')
return boxing_driver, section
class Football(object):
def __init__(self, frame, home, away, result):
self.homedata = home
self.awaydata = away
self.resultdata = result
self.frame = frame
def create_widgets(self):
self.home = Text(self.frame,wrap=WORD,exportselection=False,relief='flat')
self.home.insert(END,self.homedata)
self.home.place(relwidth=0.22,height=40,relx=0.2,y=300)
self.result = Text(self.frame,wrap=WORD,exportselection=False,relief='flat')
self.result.insert(END, self.resultdata)
self.result.place(height=40,relx=0.43,y=300,relwidth=0.14)
self.away = Text(self.frame,wrap=WORD,exportselection=False,relief='flat')
self.away.insert(END, self.awaydata)
self.away.place(relwidth=0.22,height=40,relx=0.58,y=300)
class BasketballRugby(object):
def __init__(self, frame, home, home_score, away, away_score):
self.frame = frame
self.home_score = home_score
self.home = home
self.away_score = away_score
self.away = away
def create_widgets(self):
self.homew = Text(self.frame,wrap=WORD,exportselection=False,relief='flat')
self.homew.insert(END,self.home)
self.homew.place(relwidth=0.22,height=40,relx=0.2,y=300)
self.result = Text(self.frame,wrap=WORD,exportselection=False,relief='flat')
self.result.insert(END, self.home_score+" - "+self.away_score)
self.result.place(height=40,relx=0.43,y=300,relwidth=0.14)
self.awayw = Text(self.frame,wrap=WORD,exportselection=False,relief='flat')
self.awayw.insert(END, self.away)
self.awayw.place(relwidth=0.22,height=40,relx=0.58,y=300)
class Cricket(object):
def __init__(self, frame, home, home_score, away, away_score):
self.frame = frame
self.home_score = home_score
self.home = home
self.away_score = away_score
self.away = away
def create_widgets(self):
self.homew = Text(self.frame,wrap=WORD,exportselection=False,relief='flat')
self.homew.insert(END,self.home)
self.homew.place(relwidth=0.22,height=40,relx=0.2,y=300)
self.result = Text(self.frame,wrap=WORD,exportselection=False,relief='flat')
self.result.insert(END, self.home_score+" - "+self.away_score)
self.result.place(height=40,relx=0.43,y=300,relwidth=0.14)
self.awayw = Text(self.frame,wrap=WORD,exportselection=False,relief='flat')
self.awayw.insert(END, self.away)
self.awayw.place(relwidth=0.22,height=40,relx=0.58,y=300)
class Boxing(object):
def __init__(self, frame, date, result):
self.frame = frame
self.datevalue = date
self.news = result
def create_widgets(self):
self.newsw = Text(self.frame,wrap=WORD,exportselection=False,relief='flat')
self.newsw.insert(END, self.news)
self.newsw.place(height=100,relx=0.2,relwidth=0.6, y=300)
class Golf(object):
def __init__(self,frame, rank, name, par, today, result, country):
self.frame = frame
self.rank = rank
self.name = name
self.par = par
self.today = today
self.result = result
self.country = country
def create_widgets(self):
self.rankw = Text(self.frame, wrap=WORD,exportselection=False)
self.rankw.insert(END, self.rank)
# self.rankw.place(relx=0.02,width=5,height=70)
self.countryw = Text(self.frame,wrap=WORD,exportselection=False)
self.countryw.insert(END,self.country)
# self.countryw.place(relx=0.1,width=70,height=70)
self.namew = Text(self.frame,wrap=WORD,exportselection=False,relief='flat')
self.namew.insert(END,self.name)
self.namew.place(relx=0.2,relwidth=0.2,height=70,y=320)
self.parw = Text(self.frame,wrap=WORD,exportselection=False,relief='flat')
self.parw.insert(END,self.par)
self.parw.place(relx=0.42,relwidth=0.1,height=70,y=320)
self.todayw = Text(self.frame,wrap=WORD,exportselection=False,relief='flat')
self.todayw.insert(END,self.today)
self.todayw.place(relx=0.54,relwidth=0.1,height=70,y=320)
self.resultw = Text(self.frame,wrap=WORD,exportselection=False,relief='flat')
self.resultw.insert(END,self.result)
self.resultw.place(relx=0.66,relwidth=0.1,height=70,y=320)
if __name__=='__main__':
driver = webdriver.Chrome('chromedriver.exe')
driver.get('https://www.flashscore.co.uk/golf/')
section = get_score_block(driver,'golf')
# print(section)
# home, home_score, away= get_results(section,'golf',driver)
# home, home_score = get_results(section,'basketball',driver)
# print(len(home),len(result)
# print(home)
# print(away)
# print(result)
# for i in range(len(home)):
# print(home[i].text,home_score[i].text)
rank, name, par, today, result, country = get_results(section,'golf',driver)
print(len(result),len(rank),len(name),len(par),len(today),len(country))
# print(len(rank),print(len(par)))
# print(today,result)
# print(len(country))
for i in range(len(rank)):
print(par[i].text,country[i].get_attribute('title'))
driver.quit()
``` |
{
"source": "joesinghh/AlgoApp",
"score": 3
} |
#### File: joesinghh/AlgoApp/handlefile.py
```python
import pandas as pd
import numpy as np
from openpyxl import load_workbook
from threading import Thread
def insert_data_main(data,cols,filename=".\\OrderData\\order_data.xlsx"):
# dataframe = pd.read_excel(filename)
df = pd.DataFrame([data],columns=cols)
writer = pd.ExcelWriter(filename,engine='openpyxl')
writer.book = load_workbook(filename)
writer.sheets = {ws.title: ws for ws in writer.book.worksheets}
last_row = writer.sheets['Sheet1'].max_row
df.to_excel(writer,index=False,header=False,startrow=last_row,)
writer.save()
print("Done",filename)
def insert_data(data,cols,filename):
t = Thread(target=insert_data_main,args=(data,cols,filename))
t.start()
def fetch_data(filename):
df = pd.read_excel(filename)
return df.to_numpy().tolist()
def delete_data(row_index):
book = load_workbook('.\\OrderData\\notcompleted.xlsx')
sheet = book['Sheet1']
sheet.delete_rows(row_index)
book.save('.\\OrderData\\notcompleted.xlsx')
def change_data(data, new_sl, new_target):
workbook = load_workbook('.\\OrderData\\notcompleted.xlsx')
sheet = workbook['Sheet1']
sheet.cell(data.index[0]+2,15).value = new_sl
sheet.cell(data.index[0]+2,16).value = new_target
workbook.save('.\\OrderData\\notcompleted.xlsx')
if __name__=="__main__":
# dataframe = pd.read_excel(".\\OrderData\\order_data.xlsx")
# last_num = dataframe.values
# print(last_num)
delete_data(2)
```
#### File: joesinghh/AlgoApp/kite_api.py
```python
from kiteconnect import KiteConnect
import re
import requests
import pandas as pd
import json
requests.packages.urllib3.disable_warnings(requests.packages.urllib3.exceptions.InsecureRequestWarning)
class Kite():
instruments = None
def __init__(self):
f = open('.\\Config\\kite.json')
data = json.load(f)
self.key = data["kitekey"]
self.secretkey = data["secretkey"]
self.cls = KiteConnect(api_key=self.key)
self.r_key = None
self.session = None
self.set_token()
def set_token(self):
with open("kite_access.txt","r") as f:
self.r_key = f.read()
with open("kite_session.txt","r") as f:
self.session = f.read()
self.cls.set_access_token(self.session)
def login(self):
with open("kite.txt" ,"w") as f:
self.r_key = int(re.findall(r"[0-9]+",self.cls.login_url())[0])
f.write(self.r_key)
with open("kite_session.txt","w") as f:
self.session = self.cls.generate_session(self.r_key,self.secretkey)['access_token']
f.write(self.session)
def get_id(self,symbol):
return Kite.instruments['instrument_token'][Kite.instruments.tradingsymbol == symbol]
def get_symbol(self,instru_id):
return Kite.instruments['tradingsymbol'][Kite.instruments.instrument_token == instru_id]
def get_lot(self,symbol):
return Kite.instruments['lot_size'][Kite.instruments.tradingsymbol == symbol]
def get_expiry(self,symbol):
return Kite.instruments['expiry'][Kite.instruments.tradingsymbol == symbol]
def get_quote(self,symbol,exchange):
ins = symbol+":"+exchange
r = self.cls.quote(ins)
bids = [r["data"][ins]["depth"]["buy"][0]["price"],r["data"][ins]["depth"]["buy"][1]["price"],r["data"][ins]["depth"]["buy"][2]["price"]]
sells = [r["data"][ins]["depth"]["sell"][0]["price"],r["data"][ins]["depth"]["sell"][1]["price"],r["data"][ins]["depth"]["sell"][2]["price"]]
return bids, sells
def place_order(self, variety='regular', exchange='NFO', symbol='FSL', t_type='BUY', quantity=1, product="MIS", order_type="MARKET"):
self.cls.place_order(variety, exchange, symbol, t_type, quantity, product, order_type)
def set_instruments(self):
Kite.instruments=pd.DataFrame(self.cls.instruments("NSE"))
``` |
{
"source": "joesinghh/Text-Extractor",
"score": 3
} |
#### File: joesinghh/Text-Extractor/main.py
```python
from tkinter import *
from tkinter.messagebox import showwarning,showerror
from tkinter.filedialog import askopenfile
import cv2
from PIL import Image, ImageTk
from threading import Thread
from tesseract import ProcessImage
def stop_():
camera.destroy()
cap.release()
#display result
def extractimg(img):
result = Toplevel(mainframe)
result.title('Result')
result.geometry('300x300')
text = Text(result,wrap=WORD)
text.place(relwidth=1,relheight=1)
text.insert(END,ProcessImage(img))
#camera Cature display
def display():
rep,frame = cap.read()
frame = cv2.flip(frame,1)
cv2img = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
img = Image.fromarray(cv2img)
imgtk = ImageTk.PhotoImage(image=img)
screen.imgtk = imgtk
screen.configure(image=imgtk)
if val==0:
camera.destroy()
cap.release()
img = img.convert('RGB')
img = img.save("test.jpg","JPEG",quality=80, optimize=True, progressive=True)
extractimg('test.jpg')
else:
camera.after(10,display)
#select image and extract text
def open_img():
path = askopenfile(title="Select Image",filetypes=[("Jpg",".jpg"),("Png",".png")])
if path!=None:
extractimg(path.name)
#stop camera display
def snap_():
global val
val=0
#open camera window
def opencam():
global val ,screen,cap,camera
camera = Toplevel(mainframe)
camera.title("camera")
camera.geometry('500x500')
val = 1
snap = Button(camera,text='Snap',command = Thread(target=snap_,args=()).start,bg='#41abf2')
snap.place(relheight=0.1,relwidth=0.5,relx=0.5,rely=0.9)
stop = Button(camera,text="Stop",command = stop_,bg='#f50c46',activebackground='#f2417f')
stop.place(relheight=0.1,relwidth=0.5,relx=0.0,rely=0.9)
screen = Label(camera)
screen.place(relheight=0.90,relwidth=1)
cap = cv2.VideoCapture(0) #Video capture object ( 0 - default camera , 1 - second camera , and so on)
display()
#main window
root = Tk()
#size of window
root.geometry('600x600');
root.title("Text Extractor");
icon = PhotoImage(file='icon.png')
root.iconphoto(True,icon)
#Frames
startframe = Frame(root,bg='#34cceb')
mainframe = Frame(root,bg='#348feb')
startframe.place(relheight=1,relwidth=1)
mainframe.place(relheight=1,relwidth=1)
startframe.tkraise()
#Mainframe
opencam = Button(mainframe,text='Open Cam',command=opencam,bg='#37e6e6',activebackground='#3777e6',fg='black',activeforeground='#37e6e6')
opencam.place(relheight=0.1,relwidth=0.3,relx=0.15,rely=0.3)
selectimg = Button(mainframe,text='Select Image',command = open_img,bg='#37e6e6',activebackground='#3777e6',fg='black',activeforeground='#37e6e6');
selectimg.place(relheight=0.1,relwidth=0.3,relx=0.53,rely=0.3)
#start Frame
Start = Button(startframe,text='Start',command= mainframe.tkraise,bg='#3777e6',fg='#000000',font=('Arial',25,'bold'))
Start.place(relheight=0.15,relwidth=0.3,relx=0.35,rely=0.4)
root.mainloop()
```
#### File: joesinghh/Text-Extractor/tesseract.py
```python
import cv2
import numpy as np
import pytesseract as pt
pt.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe'
# get grayscale image
def get_grayscale(image):
return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# noise removal
def remove_noise(image):
return cv2.medianBlur(image,5)
#Threading
def thresholding(image):
return cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
def opening(image):
kernel = np.ones((5,5),np.uint8)
return cv2.morphologyEx(image, cv2.MORPH_OPEN, kernel)
def ProcessImage(img):
image = cv2.imread(img)
rn = remove_noise(image)
gray = get_grayscale(rn)
openin = opening(gray)
thresh = thresholding(openin)
# cv2.imwrite('testout2.jpg',thresh)
text = pt.image_to_string(thresh,lang='eng')
text = text.split(' ')
textoutput = ''
for i in text:
textoutput+=' '+i
return textoutput
if __name__=='__main__':
print(ProcessImage('test.jpg'))
``` |
{
"source": "joesingo/tom_astrosource",
"score": 2
} |
#### File: tom_astrosource/tom_astrosource/models.py
```python
from io import StringIO
import logging
import os
from pathlib import Path
from astrosource import TimeSeries
from astrosource.detrend import detrend_data
from astrosource.eebls import plot_bls
from astrosource.utils import AstrosourceException
import numpy as np
from tom_dataproducts.models import DataProduct, ReducedDatum
from tom_education.models import AsyncError, PipelineProcess, PipelineOutput
from django.conf import settings
class AstrosourceLogBuffer(StringIO):
"""
Thin wrapper around StringIO that logs messages against a `AstrosourceProcess`
on write
"""
def __init__(self, process, *args, **kwargs):
super().__init__(*args, **kwargs)
self.process = process
def write(self, s):
self.process.log(s, end='')
return super().write(s)
class AstrosourceProcess(PipelineProcess):
short_name = 'astrosource'
allowed_suffixes = ['.fz', '.fits.fz', '.psx']
flags = {
'plot': {
'default': False,
'long_name': 'Create plot files'
},
'period': {
'default': False,
'long_name': 'Perform automatic period finding'
},
'eebls': {
'default': False,
'long_name': 'EEBLS - box fitting to search for periodic transits'
},
'detrend': {
'default': False,
'long_name': 'Detrend exoplanet data'
},
}
class Meta:
proxy = True
def copy_input_files(self, tmpdir):
"""
Copy the input files to the given temporary directory
"""
return [prod.data.file for prod in self.input_files.all()]
# for prod in self.input_files.all():
# dest = tmpdir / os.path.basename(prod.data.file.name) # Use basename of original file
# dest.write_bytes(prod.data.read())
def do_pipeline(self, tmpdir, **flags):
"""
Call astrosource to perform the actual analysis
"""
with self.update_status('Gathering data files'):
filelist = self.copy_input_files(tmpdir)
buf = AstrosourceLogBuffer(self)
logger = logging.getLogger('astrosource')
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler(buf))
targets = np.array([self.target.ra, self.target.dec, 0, 0])
# Get file type from the first input file (this assumes that all input
# files are the same type!)
filetype = Path(self.input_files.first().data.name).suffix[1:] # remove the leading '.'
try:
with self.update_status('Initialising'):
ts = TimeSeries(indir=tmpdir, filelist=filelist, targets=targets, verbose=True)
with self.update_status('Analysing input data files'):
ts.analyse(calib=True)
with self.update_status('Calculating curves'):
ts.find_stable()
with self.update_status('Performing photometric calculations'):
ts.photometry()
if flags['plot']:
with self.update_status('Plotting results and finding period'):
ts.plot(period=flags['period'], filesave=True)
if flags['detrend']:
with self.update_status('Detrending'):
detrend_data(ts.paths, filterCode=ts.filtercode)
if flags['eebls']:
with self.update_status('Doing EEBLS'):
plot_bls(paths=ts.paths)
except AstrosourceException as ex:
raise AsyncError(str(ex))
yield from self.gather_outputs(ts, tmpdir)
def gather_outputs(self, timeseries, tmpdir):
"""
Yield PipelineOutput objects for astrosource output files
"""
timeseries.usedimages.sort()
filesused = [timeseries.files[i] for i in timeseries.usedimages]
photdata = [x for x in zip(timeseries.data[0][:,6],timeseries.data[0][:,10],timeseries.data[0][:,11],filesused)]
outputs = []
for pd in photdata:
yield PipelineOutput(path=None, data=pd, output_type=ReducedDatum, data_product_type=settings.DATA_PRODUCT_TYPES['photometry'][0])
# These data products are PLOTS
outfiles = [
# (dirname, filename format string, output type, modes)
('outputplots', 'V1_EnsembleVar{}Mag.png', DataProduct, ['Calib', 'Diff']),
('periods', 'V1_StringTestPeriodPlot{}.png', DataProduct, ['_calibrated', '']),
]
for dirname, filename, output_type, modes in outfiles:
outdir = tmpdir / Path(dirname)
if not outdir.is_dir():
self.log(f"Output directory '{dirname}' not found")
continue
found_file = False
for mode in modes:
p = outdir / filename.format(mode)
if p.is_file():
yield PipelineOutput(path=p, output_type=output_type, data_product_type=settings.DATA_PRODUCT_TYPES['plot'][0], data=None)
found_file = True
break
if not found_file:
glob = filename.format('(' + " | ".join(modes) + ')')
self.log(f"No files matching '{glob}' found in '{dirname}'")
``` |
{
"source": "joesingo/tom_education",
"score": 4
} |
#### File: tom_education/tom_education/utils.py
```python
def assert_valid_suffix(filename, allowed_suffixes):
"""
Check that `filename` has one of the strings in `allowed_suffixes` as a
suffix. Raises an AssertionError if not.
"""
if not any(filename.endswith(suffix) for suffix in allowed_suffixes):
err_msg = (
"File '{}' does not end an allowed filename suffix ({})"
.format(filename, ', '.join(allowed_suffixes))
)
raise AssertionError(err_msg)
``` |
{
"source": "joesitton/Ciphey",
"score": 3
} |
#### File: basemods/Decoders/a1z26.py
```python
from typing import Optional, Dict, List
from ciphey.iface import Config, ParamSpec, T, U, Decoder, registry
from loguru import logger
import re
@registry.register
class A1z26(Decoder[str, str]):
def decode(self, ctext: T) -> Optional[U]:
"""
Performs A1Z26 decoding
"""
logger.trace("Attempting A1Z26")
ctext_converted = []
ctext_split = re.split(r"[ ,;:\-\n]", ctext)
delimiters = set(sorted(re.sub(r"[^ ,;:\-\n]", "", ctext)))
ctext_num = re.sub(r"[,;:\-\s]", "", ctext)
ctext_decoded = ""
if ctext_num.isnumeric() is False:
logger.trace("Failed to decode A1Z26 due to non numeric character(s)")
return None
try:
for i in ctext_split:
val = int(i)
if val > 26 or val < 1:
logger.trace(
f"Failed to decode A1Z26 due to invalid number '{val}'"
)
return None
val2 = int(i) + 96
ctext_converted.append(chr(val2))
ctext_decoded = "".join(ctext_converted)
logger.debug(
f"A1Z26 successful, returning '{ctext_decoded}' with delimiter(s) {delimiters}"
)
return ctext_decoded
except Exception:
return None
@staticmethod
def priority() -> float:
return 0.05
def __init__(self, config: Config):
super().__init__(config)
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return None
@staticmethod
def getTarget() -> str:
return "a1z26"
```
#### File: basemods/Decoders/hexadecimal.py
```python
from typing import Optional, Dict, Any
import ciphey
from ciphey.iface import registry
@registry.register
class Hex(ciphey.iface.Decoder[str, bytes]):
def decode(self, text: str) -> Optional[bytes]:
"""
It takes an octal string and return a string
:octal_str: octal str like "110 145 154"
"""
try:
str_converted = bytearray.fromhex(text).decode()
return str_converted
# Catch bad octal chars
except ValueError:
return None
@staticmethod
def priority() -> float:
return 0.015
def __init__(self, config: ciphey.iface.Config):
super().__init__(config)
@staticmethod
def getParams() -> Optional[Dict[str, Dict[str, Any]]]:
pass
@staticmethod
def getTarget() -> str:
return "hex"
```
#### File: basemods/Decoders/leet.py
```python
from typing import Optional, Dict, Any
import ciphey
from ciphey.iface import registry, Translation, ParamSpec
@registry.register
class Leet(ciphey.iface.Decoder[str, str]):
def decode(self, text: str) -> Optional[str]:
for src, dst in self.translate.items():
text = text.replace(src, dst)
return text
@staticmethod
def priority() -> float:
return 0.05
def __init__(self, config: ciphey.iface.Config):
super().__init__(config)
self.translate = config.get_resource(self._params()["dict"], Translation)
@staticmethod
def getParams() -> Optional[Dict[str, ParamSpec]]:
return {
"dict": ParamSpec(
desc="The leetspeak dictionary to use",
req=False,
default="cipheydists::translate::leet",
)
}
@staticmethod
def getTarget() -> str:
return "leet"
``` |
{
"source": "joesmall37/Algorithms",
"score": 4
} |
#### File: Algorithms/codingchallenges/first_three_multiples.py
```python
def first_three_multiples(num):
print(num * 1)
print(num * 2)
print(num * 3)
return (num * 3)
print(first_three_multiples(10))
```
#### File: Algorithms/codingchallenges/squareRoot.py
```python
def square_root(num):
root = num ** 0.5
return root
print(square_root(10))
```
#### File: Algorithms/codingchallenges/tenthPower.py
```python
def tenth_power(num):
power_ten = num ** 10
return power_ten
print(tenth_power(10))
```
#### File: Algorithms/codingchallenges/winPerentage.py
```python
def win_percentage(wins, losses):
total = wins + losses
percentage = (wins / total) * 100
return percentage
print(win_percentage(5, 5))
```
#### File: Algorithms/QuickSort/intro.py
```python
def quicksort(list, start, end):
if start >= end:
return
print(list[start])
start += 1
quicksort(list, start, end)
colors = ["blue", "red", "green", "purple", "orange"]
quicksort(colors, 0, len(colors) - 1)
``` |
{
"source": "joesmall37/E-Commerce-",
"score": 2
} |
#### File: E-Commerce-/main/views.py
```python
from django.shortcuts import render, redirect
from main.models import Item
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.forms import UserCreationForm
from django.contrib import messages
# Create your views here.
def homepage(request):
return render(request, template_name='main/home.html')
def itemspage(request):
if request.method == 'GET':
items = Item.objects.filter(owner=None)
return render(request, template_name='main/items.html', context= {'items' : items})
if request.method == 'POST':
purchased_item = request.POST.get('purchased-item')
if purchased_item:
purchased_item_object = Item.objects.get(name=purchased_item)
purchased_item_object.owner = request.user
purchased_item_object.save()
messages.success(request, f'Congratulations. You just bought {purchased_item_object.name} for {purchased_item_object.price}')
return redirect('items')
def loginpage(request):
if request.method == 'GET':
return render(request, template_name='main/login.html')
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
messages.success(request, f'You are logged in as {user.username}')
return redirect('items')
else:
messages.error(request, 'The combination of the user name and the password is wrong!')
return redirect('login')
def registerpage(request):
if request.method == 'GET':
return render(request, template_name='main/register.html')
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password1')
user = authenticate(username=username, password=password)
login(request, user)
messages.success(request, f'You have registered your account successfully! Logged in as {user.username}')
return redirect('home')
else:
messages.error(request, form.errors)
return redirect('register')
def logoutpage(request):
logout(request)
messages.success(request, f'You have been logged out!')
return redirect('home')
``` |
{
"source": "joe-smith-py/searchPdf",
"score": 3
} |
#### File: joe-smith-py/searchPdf/searchPdf2.py
```python
import PyPDF2
import re
import os
from multiprocessing import Pool
example = '/home/joe/PycharmProjects/PDFsearch'
def find_pdfs(directory):
pdfs = []
for foldername, subfolder, files in os.walk(directory):
for file in files:
if '.pdf' in file:
pdfs.append(foldername + '/' + file)
return pdfs
def search_pdf(critera):
search_term, path = critera
pdf = PyPDF2.PdfFileReader(path)
for n in range(0, pdf.getNumPages()):
page = pdf.getPage(n)
text = page.extractText().lower()
search = re.search(search_term, text)
if search != None:
print(path)
break
term = input('Enter search word: ')
choice = int(input('1. SC\n2. HSC \nSelect a folder: '))
thread_count = int(input('How many threads would you like to devote? '))
if choice == 1:
cho = '/media/raja/316E396D3C478D8A/pdrv/computer science/SC/'
else:
cho = '/media/raja/316E396D3C478D8A/pdrv/computer science/HSC/'
pdfs = find_pdfs(cho)
search_crit = []
for pdf in pdfs:
search_crit.append((term, pdf))
with Pool(thread_count) as p:
p.map(search_pdf, search_crit)
``` |
{
"source": "joesoftheart/Metagenomic",
"score": 3
} |
#### File: Metagenomic/ScriptQiime2018/extractQiime2Html.py
```python
"""Import Required Modules"""
import argparse, os, subprocess, shutil
"""Create arguments for parsing parameters"""
#Instantiate the parser
parser = argparse.ArgumentParser(description='Description: The script for extracting qiime2 artifact')
#Add Positional Arguments [required parameters]/No Positional arguments
##Add Optional Arguments [optional parameters]
parser.add_argument('--inPath', type=str, default="./", help="Input folder to extract results")
##Get value for each parameters
args = parser.parse_args()
path = args.inPath
"""Summary Input and Output Files"""
print("="*50)
print("SUMMARY INPUT")
print("Input Path:",path)
print("="*50)
######################################################################################################################
###[1] Define Function
######################################################################################################################
"""function to get the index.html link"""
def getLink(initPath):
fileList = os.listdir(initPath)
folderName = fileList[0]
#link = initPath + "/" + folderName + "/data/index.html"
link = folderName + "/data/index.html"
return link
######################################################################################################################
###[2] Check Condition and Run Extraction
######################################################################################################################
if os.path.exists(path):
print("Input path is exists. ==> OK, starting extract QIIME2 results")
##################################################################################################################
"""Create HTML output folder"""
os.mkdir(path+"/outHTML")
os.mkdir(path+"/outHTML/1_sequence")
os.mkdir(path+"/outHTML/2_otuTable")
os.mkdir(path+"/outHTML/3_alphaBetaDiversity")
os.mkdir(path+"/outHTML/4_taxonomy")
os.mkdir(path+"/outHTML/5_file")
##################################################################################################################
"""[1] Extract raw sequence"""
cmd = "unzip "+path+"/2_dereplicate_seq.qzv -d "+path+"/outHTML/1_sequence/dereplicate_seq"
run = subprocess.Popen(cmd,shell="TRUE")
run.wait()
link11 = "1_sequence/dereplicate_seq/"+getLink(path+"/outHTML/1_sequence/dereplicate_seq")##dereplicated sequence
cmd = "unzip "+path+"/2_dereplicate_table.qzv -d "+path+"/outHTML/1_sequence/dereplicate_table"
run = subprocess.Popen(cmd,shell="TRUE")
run.wait()
link12 = "1_sequence/dereplicate_table/"+getLink(path+"/outHTML/1_sequence/dereplicate_table")##dereplicated table
##################################################################################################################
"""[2] Extract OTU table"""
cmd = "unzip "+path+"/4_table_nonchimeric.qzv -d "+path+"/outHTML/2_otuTable/otu_table"
run = subprocess.Popen(cmd,shell="TRUE")
run.wait()
link21 = "2_otuTable/otu_table/"+getLink(path+"/outHTML/2_otuTable/otu_table")##OTU table
cmd = "unzip "+path+"/4_rep_seq_nonchimeric.qzv -d "+path+"/outHTML/2_otuTable/rep_seq"
run = subprocess.Popen(cmd,shell="TRUE")
run.wait()
link22 = "2_otuTable/rep_seq/"+getLink(path+"/outHTML/2_otuTable/rep_seq")##representative sequence
cmd = "unzip "+path+"/checkingChimera/stats.qzv -d "+path+"/outHTML/2_otuTable/chimera_stat"
run = subprocess.Popen(cmd,shell="TRUE")
run.wait()
link23 = "2_otuTable/chimera_stat/"+getLink(path+"/outHTML/2_otuTable/chimera_stat")##statistics of chimera checking
##################################################################################################################
"""[3] Extract Alpha/Beta Diversity"""
cmd = "unzip "+path+"/diversityAnalysisResults/alpha-rarefaction.qzv -d "+path+"/outHTML/3_alphaBetaDiversity/alpha_rarefaction"
run = subprocess.Popen(cmd,shell="TRUE")
run.wait()
link31 = "3_alphaBetaDiversity/alpha_rarefaction/"+getLink(path+"/outHTML/3_alphaBetaDiversity/alpha_rarefaction")##alpha rarefaction
cmd = "unzip "+path+"/diversityAnalysisResults/shannon_vector.qzv -d "+path+"/outHTML/3_alphaBetaDiversity/shannon_vector"
run = subprocess.Popen(cmd,shell="TRUE")
run.wait()
link32 = "3_alphaBetaDiversity/shannon_vector/"+getLink(path+"/outHTML/3_alphaBetaDiversity/shannon_vector")##shannon diversity index
cmd = "unzip "+path+"/diversityAnalysisResults/observed_otus_vector.qzv -d "+path+"/outHTML/3_alphaBetaDiversity/observed_otus"
run = subprocess.Popen(cmd,shell="TRUE")
run.wait()
link33 = "3_alphaBetaDiversity/observed_otus/"+getLink(path+"/outHTML/3_alphaBetaDiversity/observed_otus")##observed OTUs
link34 = ""#link for weighted_unifrac_pcoa_results
link35 = ""#unweighted_unifrac_pcoa_results
link36 = ""#bray_curtis_pcoa_results
if os.path.exists(path+"/diversityAnalysisResults/weighted_unifrac_pcoa_results.qzv"):
cmd = "unzip "+path+"/diversityAnalysisResults/weighted_unifrac_pcoa_results.qzv -d "+path+"/outHTML/3_alphaBetaDiversity/pcoa_weighted_unifrac"
run = subprocess.Popen(cmd,shell="TRUE")
run.wait()
link34 = "3_alphaBetaDiversity/pcoa_weighted_unifrac/"+getLink(path+"/outHTML/3_alphaBetaDiversity/pcoa_weighted_unifrac")##link for weighted_unifrac_pcoa_results
if os.path.exists(path+"/diversityAnalysisResults/unweighted_unifrac_pcoa_results.qzv"):
cmd = "unzip "+path+"/diversityAnalysisResults/unweighted_unifrac_pcoa_results.qzv -d "+path+"/outHTML/3_alphaBetaDiversity/pcoa_unweighted_unifrac"
run = subprocess.Popen(cmd,shell="TRUE")
run.wait()
link35 = "3_alphaBetaDiversity/pcoa_unweighted_unifrac/"+getLink(path+"/outHTML/3_alphaBetaDiversity/pcoa_unweighted_unifrac")##link for weighted_unifrac_pcoa_results
if os.path.exists(path+"/diversityAnalysisResults/bray_curtis_pcoa_results.qzv"):
cmd = "unzip "+path+"/diversityAnalysisResults/bray_curtis_pcoa_results.qzv -d "+path+"/outHTML/3_alphaBetaDiversity/pcoa_bray_curtis"
run = subprocess.Popen(cmd,shell="TRUE")
run.wait()
link36 = "3_alphaBetaDiversity/pcoa_bray_curtis/"+getLink(path+"/outHTML/3_alphaBetaDiversity/pcoa_bray_curtis")##link for weighted_unifrac_pcoa_results
##################################################################################################################
"""[4] Extract Taxonomy"""
cmd = "unzip "+path+"/7_taxonomy.qzv -d "+path+"/outHTML/4_taxonomy/taxonomy"
run = subprocess.Popen(cmd,shell="TRUE")
run.wait()
link41 = "4_taxonomy/taxonomy/"+getLink(path+"/outHTML/4_taxonomy/taxonomy")##taxonomy
cmd = "unzip "+path+"/7_taxa_bar_plots.qzv -d "+path+"/outHTML/4_taxonomy/taxonomy_bar_plot"
run = subprocess.Popen(cmd,shell="TRUE")
run.wait()
link42 = "4_taxonomy/taxonomy_bar_plot/"+getLink(path+"/outHTML/4_taxonomy/taxonomy_bar_plot")##barchart taxonomy
##################################################################################################################
"""Save output path into HTML file"""
saveFile = open(path+"/outHTML/index.html","w")
"""Define header and Footer"""
header = """<html><title>Qiime2 Output HTML</title><meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="stylesheet" href="https://www.w3schools.com/w3css/4/w3.css">
<link href='https://fonts.googleapis.com/css?family=RobotoDraft' rel='stylesheet' type='text/css'>
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css">
<body><style>p {text-indent:20px;}</style>
"""
footer = "</body></html>"
"""Write Data"""
saveFile.write(header)
saveFile.write("<p><strong>###QIIME2 OUTPUT List###</strong></p>")
saveFile.write("<hr/>")
saveFile.write("<p><strong>[1] Input Sequence Summary</strong></p>")
saveFile.write('<p><a href="'+link11+'">1.1 Dereplicated Sequence</a></p>')
saveFile.write('<p><a href="'+link12+'">1.2 Dereplicated Sequence Table</a></p>')
saveFile.write("<p><strong>[2] OTU table</strong></p>")
saveFile.write('<p><a href="'+link21+'">2.1 OTU table</a></p>')
saveFile.write('<p><a href="'+link22+'">2.2 Representative sequence</a></p>')
saveFile.write('<p><a href="'+link23+'">2.3 Chimera checking Statistics</a></p>')
saveFile.write("<p><strong>[3] Alpha/Beta Diversity</strong></p>")
saveFile.write('<p><a href="'+link31+'">3.1 Rarefaction curve</a></p>')
saveFile.write('<p><a href="'+link32+'">3.2 Shannon diversity index</a></p>')
saveFile.write('<p><a href="'+link33+'">3.3 Observed OTU</a></p>')
if link34 != "":
saveFile.write('<p><a href="'+link34+'">3.4 PCoA (weighted unifrac)</a></p>')
if link35 != "":
saveFile.write('<p><a href="'+link35+'">3.5 PCoA (unweighted unifrac)</a></p>')
if link36 != "":
saveFile.write('<p><a href="'+link36+'">3.6 PCoA (bray curtis)</a></p>')
saveFile.write("<p><strong>[4] Taxonomy</strong></p>")
saveFile.write('<p><a href="'+link41+'">4.1 Taxonomy</a></p>')
saveFile.write('<p><a href="'+link42+'">4.2 Barchart plot</a></p>')
saveFile.write("<hr/>")
saveFile.write(footer)
saveFile.close()
##################################################################################################################
"""Copy file into folder"""
#Dereplicated sequence
fileIn = path+"/outHTML/"+link11.replace("index.html","sequences.fasta")
fileOut = path+"/outHTML/5_file/dereplicated_sequence.fasta"
shutil.copy(fileIn,fileOut)
#Representative sequence
cmd = "unzip -l "+path+"/4_rep_seq_nonchimeric.qza | grep 'dna-sequences.fasta' | rev | cut -d \" \" -f 1 | rev | xargs -n 1 unzip -c -q "+path+"/4_rep_seq_nonchimeric.qza > "+path+"/outHTML/5_file/representative_sequence.fasta"
run = subprocess.Popen(cmd,shell="TRUE")
run.wait()
#feature-table.biom
cmd = "unzip -l "+path+"/4_table_nonchimeric.qza | grep 'feature-table.biom' | rev | cut -d \" \" -f 1 | rev | xargs -n 1 unzip -c -q "+path+"/4_table_nonchimeric.qza > "+path+"/outHTML/5_file/feature-table.biom"
run = subprocess.Popen(cmd,shell="TRUE")
run.wait()
#convert .biom to .tsv
cmd = "biom convert -i "+path+"/outHTML/5_file/feature-table.biom -o "+path+"/outHTML/5_file/feature-table.tsv"+" --to-tsv"
run = subprocess.Popen(cmd,shell="TRUE")
run.wait()
#feature-frequency-detail
cmd = "unzip -l "+path+"/4_table_nonchimeric.qzv | grep 'feature-frequency-detail.csv' | rev | cut -d \" \" -f 1 | rev | xargs -n 1 unzip -c -q "+path+"/4_table_nonchimeric.qzv > "+path+"/outHTML/5_file/feature-frequency-detail.csv"
run = subprocess.Popen(cmd,shell="TRUE")
run.wait()
#sample-frequency-detail
cmd = "unzip -l "+path+"/4_table_nonchimeric.qzv | grep 'sample-frequency-detail.csv' | rev | cut -d \" \" -f 1 | rev | xargs -n 1 unzip -c -q "+path+"/4_table_nonchimeric.qzv > "+path+"/outHTML/5_file/sample-frequency-detail.csv"
run = subprocess.Popen(cmd,shell="TRUE")
run.wait()
#Taxonomy mapping for biom table
cmd = "unzip -l "+path+"/7_taxonomy.qza | grep 'taxonomy.tsv' | rev | cut -d \" \" -f 1 | rev | xargs -n 1 unzip -c -q "+path+"/7_taxonomy.qza > "+path+"/outHTML/5_file/taxonomy_mapping_for_OTU.tsv"
run = subprocess.Popen(cmd,shell="TRUE")
run.wait()
#Taxonomy
rank = {1:"kingdom",2:"phylum",3:"class",4:"order",5:"family",6:"genus",7:"species"}
for i in range(1,8):
cmd = "unzip -l "+path+"/7_taxa_bar_plots.qzv | grep 'level-"+str(i)+".csv' | rev | cut -d \" \" -f 1 | rev | xargs -n 1 unzip -c -q "+path+"/7_taxa_bar_plots.qzv > "+path+"/outHTML/5_file/taxonomy_level_"+str(i)+"_"+rank[i]+".csv"
run = subprocess.Popen(cmd,shell="TRUE")
run.wait()
##End of code
``` |
{
"source": "joesolly/neopixel_fft",
"score": 3
} |
#### File: joesolly/neopixel_fft/recorder.py
```python
import numpy
import pyaudio
import threading
class SwhRecorder:
"""Simple, cross-platform class to record from the microphone."""
MAX_FREQUENCY = 5000 # sounds above this are just annoying
MIN_FREQUENCY = 16 # can't hear anything less than this
def __init__(self, buckets=300, min_freq=16, max_freq=5000):
"""minimal garb is executed when class is loaded."""
self.buckets = buckets
self.MIN_FREQUENCY = min_freq
self.MAX_FREQUENCY = max_freq
self.p = pyaudio.PyAudio()
self.input_device = self.p.get_default_input_device_info()
self.secToRecord = 0.08
self.RATE = int(self.input_device['defaultSampleRate'])
self.BUFFERSIZE = int(self.secToRecord * self.RATE) # should be a power of 2 and at least double buckets
self.threadsDieNow = False
self.newData = False
self.buckets_within_frequency = (self.MAX_FREQUENCY * self.BUFFERSIZE) / self.RATE
self.buckets_per_final_bucket = max(int(self.buckets_within_frequency / buckets), 1)
self.buckets_below_frequency = int((self.MIN_FREQUENCY * self.BUFFERSIZE) / self.RATE)
self.buffersToRecord = int(self.RATE * self.secToRecord / self.BUFFERSIZE)
if self.buffersToRecord == 0:
self.buffersToRecord = 1
def setup(self):
"""initialize sound card."""
self.inStream = self.p.open(
format=pyaudio.paInt16,
channels=1,
rate=self.RATE,
input=True,
frames_per_buffer=self.BUFFERSIZE,
input_device_index=self.input_device['index'])
self.audio = numpy.empty((self.buffersToRecord * self.BUFFERSIZE), dtype=numpy.int16)
def close(self):
"""cleanly back out and release sound card."""
self.continuousEnd()
self.inStream.stop_stream()
self.inStream.close()
self.p.terminate()
def getAudio(self):
"""get a single buffer size worth of audio."""
audioString = self.inStream.read(self.BUFFERSIZE)
return numpy.fromstring(audioString, dtype=numpy.int16)
def record(self, forever=True):
"""record secToRecord seconds of audio."""
while True:
if self.threadsDieNow:
break
for i in range(self.buffersToRecord):
try:
audio = self.getAudio()
self.audio[i * self.BUFFERSIZE:(i + 1) * self.BUFFERSIZE] = audio
except: #OSError input overflowed
print('OSError: input overflowed')
self.newData = True
if forever is False:
break
def continuousStart(self):
"""CALL THIS to start running forever."""
self.t = threading.Thread(target=self.record)
self.t.start()
def continuousEnd(self):
"""shut down continuous recording."""
self.threadsDieNow = True
if hasattr(self, 't') and self.t:
self.t.join()
def fft(self):
if not self.newData:
return None
data = self.audio.flatten()
self.newData = False
left, right = numpy.split(numpy.abs(numpy.fft.fft(data)), 2)
ys = numpy.add(left, right[::-1]) # don't lose power, add negative to positive
ys = ys[self.buckets_below_frequency:]
# Shorten to requested number of buckets within MAX_FREQUENCY
final = numpy.copy(ys[::self.buckets_per_final_bucket])
final_size = len(final)
for i in range(1, self.buckets_per_final_bucket):
data_to_combine = numpy.copy(ys[i::self.buckets_per_final_bucket])
data_to_combine.resize(final_size)
final = numpy.add(final, data_to_combine)
return final[:int(self.buckets_within_frequency)]
``` |
{
"source": "joesolly/vrs-python",
"score": 2
} |
#### File: vrs-python/tests/test_vcr.py
```python
import pytest
@pytest.mark.vcr
def test_vcrtest(dataproxy):
seq = dataproxy.get_sequence("NC_000013.11",50_000_000,50_000_050)
assert len(seq) == 50
assert seq == "TTAGGTGTTTAGATGATTTCTAAGATGCTTTTAAGCCCAGTATTTCTATT"
```
#### File: vrs-python/tests/test_vrs.py
```python
from ga4gh.core import sha512t24u, ga4gh_digest, ga4gh_serialize, ga4gh_identify
from ga4gh.vrs import models, vr_deref, vr_enref
allele_dict = {
'location': {'interval': {
'end': 55181320,
'start': 55181319,
'type': 'SimpleInterval'},
'sequence_id': 'ga4gh:SQ.F-LrLMe1SRpfUZHkQmvkVKFEGaoDeHul',
'type': 'SequenceLocation'},
'state': {'sequence': 'T', 'type': 'SequenceState'},
'type': 'Allele'}
a = models.Allele(**allele_dict)
def test_vr():
assert a.as_dict() == allele_dict
assert ga4gh_serialize(a.location.interval) == b'{"end":55181320,"start":55181319,"type":"SimpleInterval"}'
assert ga4gh_serialize(a.location) == b'{"interval":{"end":55181320,"start":55181319,"type":"SimpleInterval"},"sequence_id":"F-LrLMe1SRpfUZHkQmvkVKFEGaoDeHul","type":"SequenceLocation"}'
assert sha512t24u(ga4gh_serialize(a.location)) == '5D9eG-ev4fA7mYIpOpDEe-4Am1lzPZlQ'
assert ga4gh_digest(a.location) == '5D9eG-ev4fA7mYIpOpDEe-4Am1lzPZlQ'
assert ga4gh_identify(a.location) == 'ga4gh:VSL.5D9eG-ev4fA7mYIpOpDEe-4Am1lzPZlQ'
assert ga4gh_serialize(a) == b'{"location":"5D9eG-ev4fA7mYIpOpDEe-4Am1lzPZlQ","state":{"sequence":"T","type":"SequenceState"},"type":"Allele"}'
assert ga4gh_digest(a) == 'vU0meY5wGjpyRLCjSxCfb2Jlruyn2adL'
assert ga4gh_identify(a) == 'ga4gh:VA.vU0meY5wGjpyRLCjSxCfb2Jlruyn2adL'
assert a.as_dict() == {'location': {'interval': {'end': 55181320, 'start': 55181319, 'type': 'SimpleInterval'},
'sequence_id': 'ga4gh:SQ.F-LrLMe1SRpfUZHkQmvkVKFEGaoDeHul',
'type': 'SequenceLocation'},
'state': {'sequence': 'T', 'type': 'SequenceState'},
'type': 'Allele'}
vros = {}
a2 = vr_enref(a, vros)
assert ga4gh_identify(a) == ga4gh_identify(a2)
assert a2.location == "ga4gh:VSL.5D9eG-ev4fA7mYIpOpDEe-4Am1lzPZlQ"
assert a2.location in vros
assert ga4gh_identify(a) in vros
a3 = vr_deref(a2, vros)
assert a == a3
```
#### File: tests/validation/test_models.py
```python
import os
import pytest
import yaml
from ga4gh.core import ga4gh_serialize
from ga4gh.vrs import models
validation_fn = os.path.join(os.path.dirname(__file__), "data", "models.yaml")
validation_tests = yaml.load(open(validation_fn), Loader=yaml.SafeLoader)
@pytest.mark.parametrize("test", validation_tests["SimpleInterval"])
def test_SimpleInterval(test):
o = models.SimpleInterval(**test["in"])
assert test["out"]["ga4gh_serialize"] == ga4gh_serialize(o).decode()
#@pytest.mark.parametrize("test", validation_tests["NestedInterval"])
#def test_NestedInterval(test):
# o = models.NestedInterval(**test["in"])
# assert test["out"]["ga4gh_serialize"] == ga4gh_serialize(o).decode()
@pytest.mark.parametrize("test", validation_tests["SequenceLocation"])
def test_SequenceLocation(test):
o = models.SequenceLocation(**test["in"])
assert test["out"]["ga4gh_serialize"] == ga4gh_serialize(o).decode()
@pytest.mark.parametrize("test", validation_tests["Allele"])
def test_Allele(test):
o = models.Allele(**test["in"])
assert test["out"]["ga4gh_serialize"] == ga4gh_serialize(o).decode()
``` |
{
"source": "joesonitaly/JLicence",
"score": 3
} |
#### File: JLicence/jlicence/client.py
```python
import datetime
import json
from os.path import exists, isfile
from django.core.signing import BadSignature, Signer
from django.utils.dateparse import parse_datetime
from . import setting, licenceFile, timestampFile, client, provider
class IncorrectSystemClock(Exception):
pass
def licenceExists():
"""
Checks if the licence file exists and is a regular file.
Returns:
- Boolean
"""
updateTimestamp()
return exists(licenceFile) and isfile(licenceFile)
def licenceIsValid():
"""
Checks if the licence validity period has not been exceeded.
Returns:
- Boolean
"""
updateTimestamp()
if not licenceExists():
return False
licence = getLicense()
with open(timestampFile, "r") as f:
savedTimestamp = parse_datetime(f.read().strip())
return (savedTimestamp >= parse_datetime(licence["CREATED"])) and (savedTimestamp < parse_datetime(licence["EXPIRES"])) and (provider == licence["PROVIDER"]) and (client == licence["CLIENT"])
def updateTimestamp():
"""
Must be called after every login or logout attempt and/or periodically.
Ensures that the system clock continues to tick forward in time, using a saved timestamp.
Returns:
- None
Exceptions:
- IncorrectSystemClock(), if the current system time is less than the saved timestamp.
"""
now = datetime.datetime.now()
try:
with open(timestampFile, "r+") as f:
savedTimestamp = parse_datetime(f.read().strip())
if savedTimestamp:
if now >= savedTimestamp:
f.seek(0, 0)
f.truncate()
f.write(str(now)[:-7])
else:
raise IncorrectSystemClock("Please set your system clock to at least %s." % str(savedTimestamp)[:-7])
else:
raise IOError
except IOError:
with open(timestampFile, "w") as f:
licence = getLicense()
if now >= parse_datetime(licence["CREATED"]):
f.write(str(now)[:-7])
else:
f.write(str(parse_datetime(licence["CREATED"]))[:-7])
def getLicense():
with open(licenceFile, "r") as f:
return json.loads(Signer().unsign(f.read().strip()))
``` |
{
"source": "joesonw/pulumi-mysql",
"score": 2
} |
#### File: python/pulumi_mysql/user.py
```python
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['UserArgs', 'User']
@pulumi.input_type
class UserArgs:
def __init__(__self__, *,
user: pulumi.Input[str],
auth_plugin: Optional[pulumi.Input[str]] = None,
host: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
plaintext_password: Optional[pulumi.Input[str]] = None,
tls_option: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a User resource.
:param pulumi.Input[str] user: The name of the user.
:param pulumi.Input[str] auth_plugin: Use an [authentication plugin][ref-auth-plugins] to authenticate the user instead of using password authentication. Description of the fields allowed in the block below. Conflicts with `password` and `plaintext_password`.
:param pulumi.Input[str] host: The source host of the user. Defaults to "localhost".
:param pulumi.Input[str] password: Deprecated alias of `plaintext_password`, whose value is *stored as plaintext in state*. Prefer to use `plaintext_password` instead, which stores the password as an unsalted hash. Conflicts with `auth_plugin`.
:param pulumi.Input[str] plaintext_password: The password for the user. This must be provided in plain text, so the data source for it must be secured. An _unsalted_ hash of the provided password is stored in state. Conflicts with `auth_plugin`.
:param pulumi.Input[str] tls_option: An TLS-Option for the `CREATE USER` or `ALTER USER` statement. The value is suffixed to `REQUIRE`. A value of 'SSL' will generate a `CREATE USER ... REQUIRE SSL` statement. See the [MYSQL `CREATE USER` documentation](https://dev.mysql.com/doc/refman/5.7/en/create-user.html) for more. Ignored if MySQL version is under 5.7.0.
"""
pulumi.set(__self__, "user", user)
if auth_plugin is not None:
pulumi.set(__self__, "auth_plugin", auth_plugin)
if host is not None:
pulumi.set(__self__, "host", host)
if password is not None:
warnings.warn("""Please use plaintext_password instead""", DeprecationWarning)
pulumi.log.warn("""password is deprecated: Please use plaintext_password instead""")
if password is not None:
pulumi.set(__self__, "password", password)
if plaintext_password is not None:
pulumi.set(__self__, "plaintext_password", plaintext_password)
if tls_option is not None:
pulumi.set(__self__, "tls_option", tls_option)
@property
@pulumi.getter
def user(self) -> pulumi.Input[str]:
"""
The name of the user.
"""
return pulumi.get(self, "user")
@user.setter
def user(self, value: pulumi.Input[str]):
pulumi.set(self, "user", value)
@property
@pulumi.getter(name="authPlugin")
def auth_plugin(self) -> Optional[pulumi.Input[str]]:
"""
Use an [authentication plugin][ref-auth-plugins] to authenticate the user instead of using password authentication. Description of the fields allowed in the block below. Conflicts with `password` and `<PASSWORD>`.
"""
return pulumi.get(self, "auth_plugin")
@auth_plugin.setter
def auth_plugin(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "auth_plugin", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
The source host of the user. Defaults to "localhost".
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
Deprecated alias of `plaintext_password`, whose value is *stored as plaintext in state*. Prefer to use `plaintext_password` instead, which stores the password as an unsalted hash. Conflicts with `auth_plugin`.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="plaintextPassword")
def plaintext_password(self) -> Optional[pulumi.Input[str]]:
"""
The password for the user. This must be provided in plain text, so the data source for it must be secured. An _unsalted_ hash of the provided password is stored in state. Conflicts with `auth_plugin`.
"""
return pulumi.get(self, "plaintext_password")
@plaintext_password.setter
def plaintext_password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "plaintext_password", value)
@property
@pulumi.getter(name="tlsOption")
def tls_option(self) -> Optional[pulumi.Input[str]]:
"""
An TLS-Option for the `CREATE USER` or `ALTER USER` statement. The value is suffixed to `REQUIRE`. A value of 'SSL' will generate a `CREATE USER ... REQUIRE SSL` statement. See the [MYSQL `CREATE USER` documentation](https://dev.mysql.com/doc/refman/5.7/en/create-user.html) for more. Ignored if MySQL version is under 5.7.0.
"""
return pulumi.get(self, "tls_option")
@tls_option.setter
def tls_option(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tls_option", value)
@pulumi.input_type
class _UserState:
def __init__(__self__, *,
auth_plugin: Optional[pulumi.Input[str]] = None,
host: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
plaintext_password: Optional[pulumi.Input[str]] = None,
tls_option: Optional[pulumi.Input[str]] = None,
user: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering User resources.
:param pulumi.Input[str] auth_plugin: Use an [authentication plugin][ref-auth-plugins] to authenticate the user instead of using password authentication. Description of the fields allowed in the block below. Conflicts with `password` and `plaintext_password`.
:param pulumi.Input[str] host: The source host of the user. Defaults to "localhost".
:param pulumi.Input[str] password: Deprecated alias of `plaintext_password`, whose value is *stored as plaintext in state*. Prefer to use `plaintext_password` instead, which stores the password as an unsalted hash. Conflicts with `auth_plugin`.
:param pulumi.Input[str] plaintext_password: <PASSWORD>. This must be provided in plain text, so the data source for it must be secured. An _unsalted_ hash of the provided password is stored in state. Conflicts with `auth_plugin`.
:param pulumi.Input[str] tls_option: An TLS-Option for the `CREATE USER` or `ALTER USER` statement. The value is suffixed to `REQUIRE`. A value of 'SSL' will generate a `CREATE USER ... REQUIRE SSL` statement. See the [MYSQL `CREATE USER` documentation](https://dev.mysql.com/doc/refman/5.7/en/create-user.html) for more. Ignored if MySQL version is under 5.7.0.
:param pulumi.Input[str] user: The name of the user.
"""
if auth_plugin is not None:
pulumi.set(__self__, "auth_plugin", auth_plugin)
if host is not None:
pulumi.set(__self__, "host", host)
if password is not None:
warnings.warn("""Please use plaintext_password instead""", DeprecationWarning)
pulumi.log.warn("""password is deprecated: Please use plaintext_password instead""")
if password is not None:
pulumi.set(__self__, "password", password)
if plaintext_password is not None:
pulumi.set(__self__, "plaintext_password", plaintext_password)
if tls_option is not None:
pulumi.set(__self__, "tls_option", tls_option)
if user is not None:
pulumi.set(__self__, "user", user)
@property
@pulumi.getter(name="authPlugin")
def auth_plugin(self) -> Optional[pulumi.Input[str]]:
"""
Use an [authentication plugin][ref-auth-plugins] to authenticate the user instead of using password authentication. Description of the fields allowed in the block below. Conflicts with `password` and `plaintext_password`.
"""
return pulumi.get(self, "auth_plugin")
@auth_plugin.setter
def auth_plugin(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "auth_plugin", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
The source host of the user. Defaults to "localhost".
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
Deprecated alias of `plaintext_password`, whose value is *stored as plaintext in state*. Prefer to use `plaintext_password` instead, which stores the password as an unsalted hash. Conflicts with `auth_plugin`.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="plaintextPassword")
def plaintext_password(self) -> Optional[pulumi.Input[str]]:
"""
The password for the user. This must be provided in plain text, so the data source for it must be secured. An _unsalted_ hash of the provided password is stored in state. Conflicts with `auth_plugin`.
"""
return pulumi.get(self, "plaintext_password")
@plaintext_password.setter
def plaintext_password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "plaintext_password", value)
@property
@pulumi.getter(name="tlsOption")
def tls_option(self) -> Optional[pulumi.Input[str]]:
"""
An TLS-Option for the `CREATE USER` or `ALTER USER` statement. The value is suffixed to `REQUIRE`. A value of 'SSL' will generate a `CREATE USER ... REQUIRE SSL` statement. See the [MYSQL `CREATE USER` documentation](https://dev.mysql.com/doc/refman/5.7/en/create-user.html) for more. Ignored if MySQL version is under 5.7.0.
"""
return pulumi.get(self, "tls_option")
@tls_option.setter
def tls_option(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tls_option", value)
@property
@pulumi.getter
def user(self) -> Optional[pulumi.Input[str]]:
"""
The name of the user.
"""
return pulumi.get(self, "user")
@user.setter
def user(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user", value)
class User(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
auth_plugin: Optional[pulumi.Input[str]] = None,
host: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
plaintext_password: Optional[pulumi.Input[str]] = None,
tls_option: Optional[pulumi.Input[str]] = None,
user: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
The ``User`` resource creates and manages a user on a MySQL
server.
## Examples
### Basic Usage
```python
import pulumi
import pulumi_mysql as mysql
jdoe = mysql.User("jdoe",
host="example.com",
plaintext_password="password",
user="jdoe")
```
### Example Usage with an Authentication Plugin
```python
import pulumi
import pulumi_mysql as mysql
nologin = mysql.User("nologin",
auth_plugin="mysql_no_login",
host="example.com",
user="nologin")
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] auth_plugin: Use an [authentication plugin][ref-auth-plugins] to authenticate the user instead of using password authentication. Description of the fields allowed in the block below. Conflicts with `password` and `plaintext_password`.
:param pulumi.Input[str] host: The source host of the user. Defaults to "localhost".
:param pulumi.Input[str] password: Deprecated alias of `plaintext_password`, whose value is *stored as plaintext in state*. Prefer to use `plaintext_password` instead, which stores the password as an unsalted hash. Conflicts with `auth_plugin`.
:param pulumi.Input[str] plaintext_password: <PASSWORD>. This must be provided in plain text, so the data source for it must be secured. An _unsalted_ hash of the provided password is stored in state. Conflicts with `auth_plugin`.
:param pulumi.Input[str] tls_option: An TLS-Option for the `CREATE USER` or `ALTER USER` statement. The value is suffixed to `REQUIRE`. A value of 'SSL' will generate a `CREATE USER ... REQUIRE SSL` statement. See the [MYSQL `CREATE USER` documentation](https://dev.mysql.com/doc/refman/5.7/en/create-user.html) for more. Ignored if MySQL version is under 5.7.0.
:param pulumi.Input[str] user: The name of the user.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: UserArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
The ``User`` resource creates and manages a user on a MySQL
server.
## Examples
### Basic Usage
```python
import pulumi
import pulumi_mysql as mysql
jdoe = mysql.User("jdoe",
host="example.com",
plaintext_password="password",
user="jdoe")
```
### Example Usage with an Authentication Plugin
```python
import pulumi
import pulumi_mysql as mysql
nologin = mysql.User("nologin",
auth_plugin="mysql_no_login",
host="<EMAIL>",
user="nologin")
```
:param str resource_name: The name of the resource.
:param UserArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(UserArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
auth_plugin: Optional[pulumi.Input[str]] = None,
host: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
plaintext_password: Optional[pulumi.Input[str]] = None,
tls_option: Optional[pulumi.Input[str]] = None,
user: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = UserArgs.__new__(UserArgs)
__props__.__dict__["auth_plugin"] = auth_plugin
__props__.__dict__["host"] = host
if password is not None and not opts.urn:
warnings.warn("""Please use plaintext_password instead""", DeprecationWarning)
pulumi.log.warn("""password is deprecated: Please use plaintext_password instead""")
__props__.__dict__["password"] = password
__props__.__dict__["plaintext_password"] = <PASSWORD>
__props__.__dict__["tls_option"] = tls_option
if user is None and not opts.urn:
raise TypeError("Missing required property 'user'")
__props__.__dict__["user"] = user
super(User, __self__).__init__(
'mysql:index/user:User',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
auth_plugin: Optional[pulumi.Input[str]] = None,
host: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
plaintext_password: Optional[pulumi.Input[str]] = None,
tls_option: Optional[pulumi.Input[str]] = None,
user: Optional[pulumi.Input[str]] = None) -> 'User':
"""
Get an existing User resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] auth_plugin: Use an [authentication plugin][ref-auth-plugins] to authenticate the user instead of using password authentication. Description of the fields allowed in the block below. Conflicts with `password` and `plaintext_password`.
:param pulumi.Input[str] host: The source host of the user. Defaults to "localhost".
:param pulumi.Input[str] password: Deprecated alias of `plaintext_password`, whose value is *stored as plaintext in state*. Prefer to use `plaintext_password` instead, which stores the password as an unsalted hash. Conflicts with `auth_plugin`.
:param pulumi.Input[str] plaintext_password: The <PASSWORD> the user. This must be provided in plain text, so the data source for it must be secured. An _unsalted_ hash of the provided password is stored in state. Conflicts with `auth_plugin`.
:param pulumi.Input[str] tls_option: An TLS-Option for the `CREATE USER` or `ALTER USER` statement. The value is suffixed to `REQUIRE`. A value of 'SSL' will generate a `CREATE USER ... REQUIRE SSL` statement. See the [MYSQL `CREATE USER` documentation](https://dev.mysql.com/doc/refman/5.7/en/create-user.html) for more. Ignored if MySQL version is under 5.7.0.
:param pulumi.Input[str] user: The name of the user.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _UserState.__new__(_UserState)
__props__.__dict__["auth_plugin"] = auth_plugin
__props__.__dict__["host"] = host
__props__.__dict__["password"] = password
__props__.__dict__["plaintext_password"] = <PASSWORD>_password
__props__.__dict__["tls_option"] = tls_option
__props__.__dict__["user"] = user
return User(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="authPlugin")
def auth_plugin(self) -> pulumi.Output[Optional[str]]:
"""
Use an [authentication plugin][ref-auth-plugins] to authenticate the user instead of using password authentication. Description of the fields allowed in the block below. Conflicts with `password` and `plaintext_password`.
"""
return pulumi.get(self, "auth_plugin")
@property
@pulumi.getter
def host(self) -> pulumi.Output[Optional[str]]:
"""
The source host of the user. Defaults to "localhost".
"""
return pulumi.get(self, "host")
@property
@pulumi.getter
def password(self) -> pulumi.Output[Optional[str]]:
"""
Deprecated alias of `plaintext_password`, whose value is *stored as plaintext in state*. Prefer to use `plaintext_password` instead, which stores the password as an unsalted hash. Conflicts with `auth_plugin`.
"""
return pulumi.get(self, "password")
@property
@pulumi.getter(name="plaintextPassword")
def plaintext_password(self) -> pulumi.Output[Optional[str]]:
"""
The password for the user. This must be provided in plain text, so the data source for it must be secured. An _unsalted_ hash of the provided password is stored in state. Conflicts with `auth_plugin`.
"""
return pulumi.get(self, "plaintext_password")
@property
@pulumi.getter(name="tlsOption")
def tls_option(self) -> pulumi.Output[Optional[str]]:
"""
An TLS-Option for the `CREATE USER` or `ALTER USER` statement. The value is suffixed to `REQUIRE`. A value of 'SSL' will generate a `CREATE USER ... REQUIRE SSL` statement. See the [MYSQL `CREATE USER` documentation](https://dev.mysql.com/doc/refman/5.7/en/create-user.html) for more. Ignored if MySQL version is under 5.7.0.
"""
return pulumi.get(self, "tls_option")
@property
@pulumi.getter
def user(self) -> pulumi.Output[str]:
"""
The name of the user.
"""
return pulumi.get(self, "user")
``` |
{
"source": "joesox/raspiAI",
"score": 3
} |
#### File: raspiAI/raspiAI/demomenu.py
```python
import os
import sys
import time
import TXT2SPEECH
import TwitterAI
import SpeechRecognition
import Vision
import Jokes
import AIMLAI
import Pandorabot
import aiml
__version__ = '0.1.20150502'
__author__ = "JPSIII and sjs_20012001"
__url__ = 'https://github.com/joesox/raspiAI'
__doc__ = 'Demo Menu for raspiAI'
__raspiaibotid__ = "f3d034f13e34d592"
def printline(length, div, line):
"""
Print a line of length <length> with a * at each end and <line> in the body
<div> determins where the line should be justified
1 right justified
2 centered
use any large number for left
"""
pad = max(0, (length-len(line))/div)
print "*%*s%-*s *" %(pad+1, ' ', length-pad, line)
def box(length, justify, *lines):
"""
Disaply <lines> in a box of *'s width <length>
Split each line at < > if to long
Justify gives the overall text justification
c: center
r: right
default left
"""
div = {'c':2, 'r':1}.get(justify, 100)
print "*"*(length+4)
for line in lines:
if len(line) <= length or ' ' not in line:
printline(length, div, line)
else:
store = ''
for word in line.split():
if len(store)+len(word)+1 > length:
printline(length, div, store.strip())
store = ''
store = store+' '+word
printline(length, div, store.strip())
print "*"*(length+4)
def run_menu(width, values, *options):
"""
Create a menu of width <width> from a tuple of one description, function/method per option
Add an exit method at the end of the list
Display with each option being given an integer key in order starting from 1
Repeatedly ask for an option and call the linked function untill asked to stop
The menu can also be left by entering 'exit'
"""
while True:
tmp = ['%i) %s'%(i+1, s[0]%values) for i, s in enumerate(options)]
box(width, 'l', 'Choose one of the below:', *tmp+['%i) exit'%(len(options)+1)])
entered = raw_input("> ")
try:
choice = int(entered)
except:
if(str(entered).lower() == "exit"):
break
else:
if choice == len(options)+1:
break
elif 0 < choice <= len(options):
options[choice-1][1]()
class demomenu(object):
""" """
def __init__(self, bloadaiml, aimlfolder):
"""
Initialize global class variables
Create aiml object with file
"""
if (bloadaiml == True):
#Let the user know we need to pre-load the AIML files for knowledge
print "LOADING ALL AIML FILES FROM '" + aimlfolder + "'\r\nTHIS WILL TAKE A FEW MINUTES..."
time.sleep(2)
self.AIMLFOLDER = aimlfolder
self._k = aiml.Kernel()
#self._k.bootstrap(learnFiles = aimlfile)
#self._k.saveBrain("raspiai.brn")
full_file_paths = self.get_filepaths(aimlfolder)
self._k.bootstrap(learnFiles = full_file_paths)
def get_filepaths(self, directory):
"""
This function will generate the file names in a directory
tree by walking the tree either top-down or bottom-up. For each
directory in the tree rooted at directory top (including top itself),
it yields a 3-tuple (dirpath, dirnames, filenames).
"""
file_paths = [] # List which will store all of the full filepaths.
# Walk the tree.
for root, directories, files in os.walk(directory):
for filename in files:
# Join the two strings in order to form the full filepath.
filepath = os.path.join(root, filename)
file_paths.append(filepath) # Add it to the list.
return file_paths # Self-explanatory.
def _txttospeech(self):
""" Demo TXT2SPEECH """
tts = TXT2SPEECH.demo()
def _twitterauto(self):
""" Demo TwitterAI [PostTweet-auto]"""
twitter = TwitterAI.demo()
def _twitterprompt(self):
""" Demo TwitterAI [PostTweet-prompt]"""
twitter = TwitterAI.demoTwo()
def _speechrecognition(self):
""" Demo speechrecognition"""
sr = SpeechRecognition.demo()
def _speechmicrecognition(self):
""" Demo speechrecognition"""
sr = SpeechRecognition.demo3()
def _visionphoto(self):
""" Demo Vision class """
v = Vision.demo()
def _visionvideo(self):
""" Demo Vision class """
v = Vision.demo2()
def _tellrandomjoke(self):
""" Demo Joke class """
j = Jokes.demo()
def _tellrandomjokeandtweet(self):
""" Demo Joke class """
j = Jokes.demo2()
def _realTimeTalk(self):
""" Pandorabot Personality System """
p = Pandorabot.Pandorabot(__raspiaibotid__)
sr = SpeechRecognition.SpeechRecognition()
print "\nEntering interactive mode (Say exit to excape)"
while True:
print "LISTENING>> "
input = sr.Micinput()
print " RASPAI HEARD: " + str(input)
#"exit" is a command; it is the first command.
if(str(input).lower() == "exit"):
break
else:
response = p._botsession.think(input)
speech = TXT2SPEECH.txttospeech(120, 0)
print "Started!"
speech.Say(response, True) #True=print
print "Finished Pandorabot Personality System with SpeechRecognition!"
def _aimldemo(self):
""" Demo Joke class """
print "RUNNING PRE-SET TESTS..."
#Tell who you are
print "INPUT:My name is Joe.|OUTPUT:" + AIMLAI.Say(self, "My name is Joe.", self._k)
#who
print "INPUT:What is your name?|OUTPUT:" + AIMLAI.Say(self, "What is your name?", self._k)
#what
print "INPUT:What is your purpose?|OUTPUT:" + AIMLAI.Say(self, "What is your purpose?", self._k)
#when
print "INPUT:When where you born?|OUTPUT:" + AIMLAI.Say(self, "When where you born?", self._k)
#why
print "INPUT:Why are you here?|OUTPUT:" + AIMLAI.Say(self, "Why are you here?", self._k)
print "INPUT:What is your favorite hobby?|OUTPUT:" + AIMLAI.Say(self, "What is your favorite hobby?", self._k)
print "INPUT:Do you know Ben?|OUTPUT:" + AIMLAI.Say(self, "Do you know Ben?", self._k)
print "INPUT:Do you know Joe?|OUTPUT:" + AIMLAI.Say(self, "Do you know Joe?", self._k)
print "=====\r\nTESTS COMPLETED."
def _aimlpandorabot(self):
""" Pandorabot Personality System """
p = Pandorabot.Pandorabot(__raspiaibotid__)
print "\nEntering interactive mode (ctrl-c to exit)"
while True:
input = raw_input("TYPE HERE:>> ")
if(str(input).lower() == "exit"):
break
else:
response = p._botsession.think(input)
speech = TXT2SPEECH.txttospeech(110, 0)
print "Started!"
speech.Say(response, True) #True=print
print "Finished Pandorabot Personality System!"
def menu(self):
"""Main Menu loop"""
box(60, 'c', 'raspiAI', __version__, __url__, '-'*40, __doc__)
run_menu(60, self.__dict__,
('Text2Speech demo', self._txttospeech),
('Twitter [PostTweet-auto] demo', self._twitterauto),
('Twitter [PostTweet-prompt] demo', self._twitterprompt),
('SpeechRecognition demo', self._speechrecognition),
('SpeechmicRecognition demo', self._speechmicrecognition),
('Vision demo [photo]', self._visionphoto),
('Vision demo [video]', self._visionvideo),
('Joke demo [random joke]', self._tellrandomjoke),
('Joke demo [random joke and Tweet it]', self._tellrandomjokeandtweet),
('AIML demo [Pre-set questions]', self._aimldemo),
('Activate Pandorabot Personality System [raspiAI bot]', self._aimlpandorabot),
('Activate Pandorabot Personality System with SpeechmicRecognition', self._realTimeTalk))
def start():
i = demomenu(True, "aiml-en-us-foundation-alice") #Load local AIML files for chatbot system
#i = demomenu(False, "aiml-en-us-foundation-alice") #Skip local AIML files because going to use Pandorabot for chatbot system #setting False creates some issues for _aimldemo & _aimlpandorabot
i.menu()
if __name__ == '__main__':
start()
``` |
{
"source": "joesphramkishun/falcon",
"score": 2
} |
#### File: falcon/falcon/response.py
```python
from http import cookies as http_cookies
import mimetypes
from falcon import DEFAULT_MEDIA_TYPE
from falcon.errors import HeaderNotSupported
from falcon.media import Handlers
from falcon.response_helpers import (
format_content_disposition,
format_etag_header,
format_header_value_list,
format_range,
header_property,
is_ascii_encodable,
)
from falcon.util import dt_to_http, structures, TimezoneGMT
from falcon.util.uri import encode as uri_encode
from falcon.util.uri import encode_value as uri_encode_value
GMT_TIMEZONE = TimezoneGMT()
_STREAM_LEN_REMOVED_MSG = (
'The deprecated stream_len property was removed in Falcon 3.0. '
'Please use Response.set_stream() or Response.content_length instead.'
)
class Response:
"""Represents an HTTP response to a client request.
Note:
`Response` is not meant to be instantiated directly by responders.
Keyword Arguments:
options (dict): Set of global options passed from the API handler.
Attributes:
status (str): HTTP status line (e.g., '200 OK'). Falcon requires the
full status line, not just the code (e.g., 200). This design
makes the framework more efficient because it does not have to
do any kind of conversion or lookup when composing the WSGI
response.
If not set explicitly, the status defaults to '200 OK'.
Note:
Falcon provides a number of constants for common status
codes. They all start with the ``HTTP_`` prefix, as in:
``falcon.HTTP_204``.
media (object): A serializable object supported by the media handlers
configured via :class:`falcon.RequestOptions`.
See :ref:`media` for more information regarding media handling.
body (str): String representing response content.
Note:
Falcon will encode the given text as UTF-8
in the response. If the content is already a byte string,
use the :attr:`data` attribute instead (it's faster).
data (bytes): Byte string representing response content.
Use this attribute in lieu of `body` when your content is
already a byte string (of type ``bytes``). See also the note below.
Warning:
Always use the `body` attribute for text, or encode it
first to ``bytes`` when using the `data` attribute, to
ensure Unicode characters are properly encoded in the
HTTP response.
stream: Either a file-like object with a `read()` method that takes
an optional size argument and returns a block of bytes, or an
iterable object, representing response content, and yielding
blocks as byte strings. Falcon will use *wsgi.file_wrapper*, if
provided by the WSGI server, in order to efficiently serve
file-like objects.
Note:
If the stream is set to an iterable object that requires
resource cleanup, it can implement a close() method to do so.
The close() method will be called upon completion of the request.
context (dict): Dictionary to hold any data about the response which is
specific to your app. Falcon itself will not interact with this
attribute after it has been initialized.
context (object): Empty object to hold any data (in its attributes)
about the response which is specific to your app (e.g. session
object). Falcon itself will not interact with this attribute after
it has been initialized.
Note:
**New in 2.0:** the default `context_type` (see below) was
changed from dict to a bare class, and the preferred way to
pass response-specific data is now to set attributes directly
on the `context` object, for example::
resp.context.cache_strategy = 'lru'
context_type (class): Class variable that determines the factory or
type to use for initializing the `context` attribute. By default,
the framework will instantiate bare objects (instances of the bare
:class:`falcon.Context` class). However, you may override this
behavior by creating a custom child class of ``falcon.Response``,
and then passing that new class to `falcon.API()` by way of the
latter's `response_type` parameter.
Note:
When overriding `context_type` with a factory function (as
opposed to a class), the function is called like a method of
the current Response instance. Therefore the first argument is
the Response instance itself (self).
options (dict): Set of global options passed from the API handler.
headers (dict): Copy of all headers set for the response,
sans cookies. Note that a new copy is created and returned each
time this property is referenced.
complete (bool): Set to ``True`` from within a middleware method to
signal to the framework that request processing should be
short-circuited (see also :ref:`Middleware <middleware>`).
"""
__slots__ = (
'body',
'context',
'options',
'status',
'stream',
'_cookies',
'_data',
'_extra_headers',
'_headers',
'_media',
'__dict__',
)
complete = False
# Child classes may override this
context_type = structures.Context
def __init__(self, options=None):
self.status = '200 OK'
self._headers = {}
# NOTE(kgriffs): Collection of additional headers as a list of raw
# tuples, to use in cases where we need more control over setting
# headers and duplicates are allowable or even necessary.
#
# PERF(kgriffs): Save some CPU cycles and a few bytes of RAM by
# only instantiating the list object later on IFF it is needed.
self._extra_headers = None
self.options = options if options else ResponseOptions()
# NOTE(tbug): will be set to a SimpleCookie object
# when cookie is set via set_cookie
self._cookies = None
self.body = None
self.stream = None
self._data = None
self._media = None
self.context = self.context_type()
@property
def data(self):
# NOTE(kgriffs): Test explicitly against None since the
# app may have set it to an empty binary string.
if self._data is not None:
return self._data
# NOTE(kgriffs): Test explicitly against None since the
# app may have set it to an empty string that should still
# be serialized.
if self._media is None:
return None
if not self.content_type:
self.content_type = self.options.default_media_type
handler = self.options.media_handlers.find_by_media_type(
self.content_type,
self.options.default_media_type
)
# NOTE(kgriffs): Set _data to avoid re-serializing if the
# data() property is called multiple times.
self._data = handler.serialize(
self._media,
self.content_type
)
return self._data
@data.setter
def data(self, value):
self._data = value
@property
def headers(self):
return self._headers.copy()
@property
def media(self):
return self._media
@media.setter
def media(self, obj):
self._media = obj
# NOTE(kgriffs): This will be set just-in-time by the data() property,
# rather than serializing immediately. That way, if media() is called
# multiple times we don't waste time serializing objects that will
# just be thrown away.
self._data = None
@property
def stream_len(self):
# NOTE(kgriffs): Provide some additional information by raising the
# error explicitly.
raise AttributeError(_STREAM_LEN_REMOVED_MSG)
@stream_len.setter
def stream_len(self, value):
# NOTE(kgriffs): We explicitly disallow setting the deprecated attribute
# so that apps relying on it do not fail silently.
raise AttributeError(_STREAM_LEN_REMOVED_MSG)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.status)
def set_stream(self, stream, content_length):
"""Convenience method for setting both `stream` and `content_length`.
Although the `stream` and `content_length` properties may be set
directly, using this method ensures `content_length` is not
accidentally neglected when the length of the stream is known in
advance. Using this method is also slightly more performant
as compared to setting the properties individually.
Note:
If the stream length is unknown, you can set `stream`
directly, and ignore `content_length`. In this case, the
WSGI server may choose to use chunked encoding or one
of the other strategies suggested by PEP-3333.
Args:
stream: A readable file-like object.
content_length (int): Length of the stream, used for the
Content-Length header in the response.
"""
self.stream = stream
# PERF(kgriffs): Set directly rather than incur the overhead of
# the self.content_length property.
self._headers['content-length'] = str(content_length)
def set_cookie(self, name, value, expires=None, max_age=None,
domain=None, path=None, secure=None, http_only=True):
"""Set a response cookie.
Note:
This method can be called multiple times to add one or
more cookies to the response.
See Also:
To learn more about setting cookies, see
:ref:`Setting Cookies <setting-cookies>`. The parameters
listed below correspond to those defined in `RFC 6265`_.
Args:
name (str): Cookie name
value (str): Cookie value
Keyword Args:
expires (datetime): Specifies when the cookie should expire.
By default, cookies expire when the user agent exits.
(See also: RFC 6265, Section 4.1.2.1)
max_age (int): Defines the lifetime of the cookie in
seconds. By default, cookies expire when the user agent
exits. If both `max_age` and `expires` are set, the
latter is ignored by the user agent.
Note:
Coercion to ``int`` is attempted if provided with
``float`` or ``str``.
(See also: RFC 6265, Section 4.1.2.2)
domain (str): Restricts the cookie to a specific domain and
any subdomains of that domain. By default, the user
agent will return the cookie only to the origin server.
When overriding this default behavior, the specified
domain must include the origin server. Otherwise, the
user agent will reject the cookie.
(See also: RFC 6265, Section 4.1.2.3)
path (str): Scopes the cookie to the given path plus any
subdirectories under that path (the "/" character is
interpreted as a directory separator). If the cookie
does not specify a path, the user agent defaults to the
path component of the requested URI.
Warning:
User agent interfaces do not always isolate
cookies by path, and so this should not be
considered an effective security measure.
(See also: RFC 6265, Section 4.1.2.4)
secure (bool): Direct the client to only return the cookie
in subsequent requests if they are made over HTTPS
(default: ``True``). This prevents attackers from
reading sensitive cookie data.
Note:
The default value for this argument is normally
``True``, but can be modified by setting
:py:attr:`~.ResponseOptions.secure_cookies_by_default`
via :any:`API.resp_options`.
Warning:
For the `secure` cookie attribute to be effective,
your application will need to enforce HTTPS.
(See also: RFC 6265, Section 4.1.2.5)
http_only (bool): Direct the client to only transfer the
cookie with unscripted HTTP requests
(default: ``True``). This is intended to mitigate some
forms of cross-site scripting.
(See also: RFC 6265, Section 4.1.2.6)
Raises:
KeyError: `name` is not a valid cookie name.
ValueError: `value` is not a valid cookie value.
.. _RFC 6265:
http://tools.ietf.org/html/rfc6265
"""
if not is_ascii_encodable(name):
raise KeyError('"name" is not ascii encodable')
if not is_ascii_encodable(value):
raise ValueError('"value" is not ascii encodable')
value = str(value)
if self._cookies is None:
self._cookies = http_cookies.SimpleCookie()
try:
self._cookies[name] = value
except http_cookies.CookieError as e: # pragma: no cover
# NOTE(tbug): we raise a KeyError here, to avoid leaking
# the CookieError to the user. SimpleCookie (well, BaseCookie)
# only throws CookieError on issues with the cookie key
raise KeyError(str(e))
if expires:
# set Expires on cookie. Format is Wdy, DD Mon YYYY HH:MM:SS GMT
# NOTE(tbug): we never actually need to
# know that GMT is named GMT when formatting cookies.
# It is a function call less to just write "GMT" in the fmt string:
fmt = '%a, %d %b %Y %H:%M:%S GMT'
if expires.tzinfo is None:
# naive
self._cookies[name]['expires'] = expires.strftime(fmt)
else:
# aware
gmt_expires = expires.astimezone(GMT_TIMEZONE)
self._cookies[name]['expires'] = gmt_expires.strftime(fmt)
if max_age:
# RFC 6265 section 5.2.2 says about the max-age value:
# "If the remainder of attribute-value contains a non-DIGIT
# character, ignore the cookie-av."
# That is, RFC-compliant response parsers will ignore the max-age
# attribute if the value contains a dot, as in floating point
# numbers. Therefore, attempt to convert the value to an integer.
self._cookies[name]['max-age'] = int(max_age)
if domain:
self._cookies[name]['domain'] = domain
if path:
self._cookies[name]['path'] = path
if secure is None:
is_secure = self.options.secure_cookies_by_default
else:
is_secure = secure
if is_secure:
self._cookies[name]['secure'] = True
if http_only:
self._cookies[name]['httponly'] = http_only
def unset_cookie(self, name):
"""Unset a cookie in the response
Clears the contents of the cookie, and instructs the user
agent to immediately expire its own copy of the cookie.
Warning:
In order to successfully remove a cookie, both the
path and the domain must match the values that were
used when the cookie was created.
"""
if self._cookies is None:
self._cookies = http_cookies.SimpleCookie()
self._cookies[name] = ''
# NOTE(Freezerburn): SimpleCookie apparently special cases the
# expires attribute to automatically use strftime and set the
# time as a delta from the current time. We use -1 here to
# basically tell the browser to immediately expire the cookie,
# thus removing it from future request objects.
self._cookies[name]['expires'] = -1
def get_header(self, name, default=None):
"""Retrieve the raw string value for the given header.
Normally, when a header has multiple values, they will be
returned as a single, comma-delimited string. However, the
Set-Cookie header does not support this format, and so
attempting to retrieve it will raise an error.
Args:
name (str): Header name, case-insensitive. Must be of type ``str``
or ``StringType``, and only character values 0x00 through 0xFF
may be used on platforms that use wide characters.
Keyword Args:
default: Value to return if the header
is not found (default ``None``).
Raises:
ValueError: The value of the 'Set-Cookie' header(s) was requested.
Returns:
str: The value of the specified header if set, or
the default value if not set.
"""
# NOTE(kgriffs): normalize name by lowercasing it
name = name.lower()
if name == 'set-cookie':
raise HeaderNotSupported('Getting Set-Cookie is not currently supported.')
return self._headers.get(name, default)
def set_header(self, name, value):
"""Set a header for this response to a given value.
Warning:
Calling this method overwrites any values already set for this
header. To append an additional value for this header, use
:meth:`~.append_header` instead.
Warning:
This method cannot be used to set cookies; instead, use
:meth:`~.append_header` or :meth:`~.set_cookie`.
Args:
name (str): Header name (case-insensitive). The name may contain
only US-ASCII characters.
value (str): Value for the header. As with the header's name, the
value may contain only US-ASCII characters.
Raises:
ValueError: `name` cannot be ``'Set-Cookie'``.
"""
# NOTE(kgriffs): uwsgi fails with a TypeError if any header
# is not a str, so do the conversion here. It's actually
# faster to not do an isinstance check. str() will encode
# to US-ASCII.
value = str(value)
# NOTE(kgriffs): normalize name by lowercasing it
name = name.lower()
if name == 'set-cookie':
raise HeaderNotSupported('This method cannot be used to set cookies')
self._headers[name] = value
def delete_header(self, name):
"""Delete a header that was previously set for this response.
If the header was not previously set, nothing is done (no error is
raised). Otherwise, all values set for the header will be removed
from the response.
Note that calling this method is equivalent to setting the
corresponding header property (when said property is available) to
``None``. For example::
resp.etag = None
Warning:
This method cannot be used with the Set-Cookie header. Instead,
use :meth:`~.unset_cookie` to remove a cookie and ensure that the
user agent expires its own copy of the data as well.
Args:
name (str): Header name (case-insensitive). The name may
contain only US-ASCII characters.
Raises:
ValueError: `name` cannot be ``'Set-Cookie'``.
"""
# NOTE(kgriffs): normalize name by lowercasing it
name = name.lower()
if name == 'set-cookie':
raise HeaderNotSupported('This method cannot be used to remove cookies')
self._headers.pop(name, None)
def append_header(self, name, value):
"""Set or append a header for this response.
If the header already exists, the new value will normally be appended
to it, delimited by a comma. The notable exception to this rule is
Set-Cookie, in which case a separate header line for each value will be
included in the response.
Note:
While this method can be used to efficiently append raw
Set-Cookie headers to the response, you may find
:py:meth:`~.set_cookie` to be more convenient.
Args:
name (str): Header name (case-insensitive). The name may contain
only US-ASCII characters.
value (str): Value for the header. As with the header's name, the
value may contain only US-ASCII characters.
"""
# NOTE(kgriffs): uwsgi fails with a TypeError if any header
# is not a str, so do the conversion here. It's actually
# faster to not do an isinstance check. str() will encode
# to US-ASCII.
value = str(value)
# NOTE(kgriffs): normalize name by lowercasing it
name = name.lower()
if name == 'set-cookie':
if not self._extra_headers:
self._extra_headers = [(name, value)]
else:
self._extra_headers.append((name, value))
else:
if name in self._headers:
value = self._headers[name] + ', ' + value
self._headers[name] = value
def set_headers(self, headers):
"""Set several headers at once.
This method can be used to set a collection of raw header names and
values all at once.
Warning:
Calling this method overwrites any existing values for the given
header. If a list containing multiple instances of the same header
is provided, only the last value will be used. To add multiple
values to the response for a given header, see
:meth:`~.append_header`.
Warning:
This method cannot be used to set cookies; instead, use
:meth:`~.append_header` or :meth:`~.set_cookie`.
Args:
headers (dict or list): A dictionary of header names and values
to set, or a ``list`` of (*name*, *value*) tuples. Both
*name* and *value* must be of type ``str`` and
contain only US-ASCII characters.
Note:
Falcon can process a list of tuples slightly faster
than a dict.
Raises:
ValueError: `headers` was not a ``dict`` or ``list`` of ``tuple``.
"""
if isinstance(headers, dict):
headers = headers.items()
# NOTE(kgriffs): We can't use dict.update because we have to
# normalize the header names.
_headers = self._headers
for name, value in headers:
# NOTE(kgriffs): uwsgi fails with a TypeError if any header
# is not a str, so do the conversion here. It's actually
# faster to not do an isinstance check. str() will encode
# to US-ASCII.
value = str(value)
name = name.lower()
if name == 'set-cookie':
raise HeaderNotSupported('This method cannot be used to set cookies')
_headers[name] = value
def add_link(self, target, rel, title=None, title_star=None,
anchor=None, hreflang=None, type_hint=None):
"""Add a link header to the response.
(See also: RFC 5988, Section 1)
Note:
Calling this method repeatedly will cause each link to be
appended to the Link header value, separated by commas.
Note:
So-called "link-extension" elements, as defined by RFC 5988,
are not yet supported. See also Issue #288.
Args:
target (str): Target IRI for the resource identified by the
link. Will be converted to a URI, if necessary, per
RFC 3987, Section 3.1.
rel (str): Relation type of the link, such as "next" or
"bookmark".
(See also: http://www.iana.org/assignments/link-relations/link-relations.xhtml)
Keyword Args:
title (str): Human-readable label for the destination of
the link (default ``None``). If the title includes non-ASCII
characters, you will need to use `title_star` instead, or
provide both a US-ASCII version using `title` and a
Unicode version using `title_star`.
title_star (tuple of str): Localized title describing the
destination of the link (default ``None``). The value must be a
two-member tuple in the form of (*language-tag*, *text*),
where *language-tag* is a standard language identifier as
defined in RFC 5646, Section 2.1, and *text* is a Unicode
string.
Note:
*language-tag* may be an empty string, in which case the
client will assume the language from the general context
of the current request.
Note:
*text* will always be encoded as UTF-8.
anchor (str): Override the context IRI with a different URI
(default None). By default, the context IRI for the link is
simply the IRI of the requested resource. The value
provided may be a relative URI.
hreflang (str or iterable): Either a single *language-tag*, or
a ``list`` or ``tuple`` of such tags to provide a hint to the
client as to the language of the result of following the link.
A list of tags may be given in order to indicate to the
client that the target resource is available in multiple
languages.
type_hint(str): Provides a hint as to the media type of the
result of dereferencing the link (default ``None``). As noted
in RFC 5988, this is only a hint and does not override the
Content-Type header returned when the link is followed.
"""
# PERF(kgriffs): Heuristic to detect possiblity of an extension
# relation type, in which case it will be a URL that may contain
# reserved characters. Otherwise, don't waste time running the
# string through uri.encode
#
# Example values for rel:
#
# "next"
# "http://example.com/ext-type"
# "https://example.com/ext-type"
# "alternate http://example.com/ext-type"
# "http://example.com/ext-type alternate"
#
if '//' in rel:
if ' ' in rel:
rel = ('"' +
' '.join([uri_encode(r) for r in rel.split()]) +
'"')
else:
rel = '"' + uri_encode(rel) + '"'
value = '<' + uri_encode(target) + '>; rel=' + rel
if title is not None:
value += '; title="' + title + '"'
if title_star is not None:
value += ("; title*=UTF-8'" + title_star[0] + "'" +
uri_encode_value(title_star[1]))
if type_hint is not None:
value += '; type="' + type_hint + '"'
if hreflang is not None:
if isinstance(hreflang, str):
value += '; hreflang=' + hreflang
else:
value += '; '
value += '; '.join(['hreflang=' + lang for lang in hreflang])
if anchor is not None:
value += '; anchor="' + uri_encode(anchor) + '"'
_headers = self._headers
if 'link' in _headers:
_headers['link'] += ', ' + value
else:
_headers['link'] = value
cache_control = header_property(
'Cache-Control',
"""Set the Cache-Control header.
Used to set a list of cache directives to use as the value of the
Cache-Control header. The list will be joined with ", " to produce
the value for the header.
""",
format_header_value_list)
content_location = header_property(
'Content-Location',
"""Set the Content-Location header.
This value will be URI encoded per RFC 3986. If the value that is
being set is already URI encoded it should be decoded first or the
header should be set manually using the set_header method.
""",
uri_encode)
content_length = header_property(
'Content-Length',
"""Set the Content-Length header.
This property can be used for responding to HEAD requests when you
aren't actually providing the response body, or when streaming the
response. If either the `body` property or the `data` property is set
on the response, the framework will force Content-Length to be the
length of the given body bytes. Therefore, it is only necessary to
manually set the content length when those properties are not used.
Note:
In cases where the response content is a stream (readable
file-like object), Falcon will not supply a Content-Length header
to the WSGI server unless `content_length` is explicitly set.
Consequently, the server may choose to use chunked encoding or one of the
other strategies suggested by PEP-3333.
""",
)
content_range = header_property(
'Content-Range',
"""A tuple to use in constructing a value for the Content-Range header.
The tuple has the form (*start*, *end*, *length*, [*unit*]), where *start* and
*end* designate the range (inclusive), and *length* is the
total length, or '\\*' if unknown. You may pass ``int``'s for
these numbers (no need to convert to ``str`` beforehand). The optional value
*unit* describes the range unit and defaults to 'bytes'
Note:
You only need to use the alternate form, 'bytes \\*/1234', for
responses that use the status '416 Range Not Satisfiable'. In this
case, raising ``falcon.HTTPRangeNotSatisfiable`` will do the right
thing.
(See also: RFC 7233, Section 4.2)
""",
format_range)
content_type = header_property(
'Content-Type',
"""Sets the Content-Type header.
The ``falcon`` module provides a number of constants for
common media types, including ``falcon.MEDIA_JSON``,
``falcon.MEDIA_MSGPACK``, ``falcon.MEDIA_YAML``,
``falcon.MEDIA_XML``, ``falcon.MEDIA_HTML``,
``falcon.MEDIA_JS``, ``falcon.MEDIA_TEXT``,
``falcon.MEDIA_JPEG``, ``falcon.MEDIA_PNG``,
and ``falcon.MEDIA_GIF``.
""")
downloadable_as = header_property(
'Content-Disposition',
"""Set the Content-Disposition header using the given filename.
The value will be used for the *filename* directive. For example,
given ``'report.pdf'``, the Content-Disposition header would be set
to: ``'attachment; filename="report.pdf"'``.
""",
format_content_disposition)
etag = header_property(
'ETag',
"""Set the ETag header.
The ETag header will be wrapped with double quotes ``"value"`` in case
the user didn't pass it.
""",
format_etag_header)
expires = header_property(
'Expires',
"""Set the Expires header. Set to a ``datetime`` (UTC) instance.
Note:
Falcon will format the ``datetime`` as an HTTP date string.
""",
dt_to_http)
last_modified = header_property(
'Last-Modified',
"""Set the Last-Modified header. Set to a ``datetime`` (UTC) instance.
Note:
Falcon will format the ``datetime`` as an HTTP date string.
""",
dt_to_http)
location = header_property(
'Location',
"""Set the Location header.
This value will be URI encoded per RFC 3986. If the value that is
being set is already URI encoded it should be decoded first or the
header should be set manually using the set_header method.
""",
uri_encode)
retry_after = header_property(
'Retry-After',
"""Set the Retry-After header.
The expected value is an integral number of seconds to use as the
value for the header. The HTTP-date syntax is not supported.
""",
str)
vary = header_property(
'Vary',
"""Value to use for the Vary header.
Set this property to an iterable of header names. For a single
asterisk or field value, simply pass a single-element ``list``
or ``tuple``.
The "Vary" header field in a response describes what parts of
a request message, aside from the method, Host header field,
and request target, might influence the origin server's
process for selecting and representing this response. The
value consists of either a single asterisk ("*") or a list of
header field names (case-insensitive).
(See also: RFC 7231, Section 7.1.4)
""",
format_header_value_list)
accept_ranges = header_property(
'Accept-Ranges',
"""Set the Accept-Ranges header.
The Accept-Ranges header field indicates to the client which
range units are supported (e.g. "bytes") for the target
resource.
If range requests are not supported for the target resource,
the header may be set to "none" to advise the client not to
attempt any such requests.
Note:
"none" is the literal string, not Python's built-in ``None``
type.
""")
def _set_media_type(self, media_type=None):
"""Wrapper around set_header to set a content-type.
Args:
media_type: Media type to use for the Content-Type
header.
"""
# PERF(kgriffs): Using "in" like this is faster than dict.setdefault()
# in most cases, except on PyPy where it is only a fraction of a
# nanosecond slower. Last tested on Python versions 3.5-3.7.
if media_type is not None and 'content-type' not in self._headers:
self._headers['content-type'] = media_type
def _wsgi_headers(self, media_type=None):
"""Convert headers into the format expected by WSGI servers.
Args:
media_type: Default media type to use for the Content-Type
header if the header was not set explicitly (default ``None``).
"""
headers = self._headers
# PERF(vytas): uglier inline version of Response._set_media_type
if media_type is not None and 'content-type' not in headers:
headers['content-type'] = media_type
items = list(headers.items())
if self._extra_headers:
items += self._extra_headers
# NOTE(kgriffs): It is important to append these after self._extra_headers
# in case the latter contains Set-Cookie headers that should be
# overridden by a call to unset_cookie().
if self._cookies is not None:
# PERF(tbug):
# The below implementation is ~23% faster than
# the alternative:
#
# self._cookies.output().split("\\r\\n")
#
# Even without the .split("\\r\\n"), the below
# is still ~17% faster, so don't use .output()
items += [('set-cookie', c.OutputString())
for c in self._cookies.values()]
return items
class ResponseOptions:
"""Defines a set of configurable response options.
An instance of this class is exposed via :any:`API.resp_options` for
configuring certain :py:class:`~.Response` behaviors.
Attributes:
secure_cookies_by_default (bool): Set to ``False`` in development
environments to make the `secure` attribute for all cookies
default to ``False``. This can make testing easier by
not requiring HTTPS. Note, however, that this setting can
be overridden via `set_cookie()`'s `secure` kwarg.
default_media_type (str): The default Internet media type (RFC 2046) to
use when deserializing a response. This value is normally set to the
media type provided when a :class:`falcon.API` is initialized;
however, if created independently, this will default to the
``DEFAULT_MEDIA_TYPE`` specified by Falcon.
media_handlers (Handlers): A dict-like object that allows you to
configure the media-types that you would like to handle.
By default, a handler is provided for the ``application/json``
media type.
static_media_types (dict): A mapping of dot-prefixed file extensions to
Internet media types (RFC 2046). Defaults to ``mimetypes.types_map``
after calling ``mimetypes.init()``.
"""
__slots__ = (
'secure_cookies_by_default',
'default_media_type',
'media_handlers',
'static_media_types',
)
def __init__(self):
self.secure_cookies_by_default = False
self.default_media_type = DEFAULT_MEDIA_TYPE
self.media_handlers = Handlers()
mimetypes.init()
self.static_media_types = mimetypes.types_map
```
#### File: falcon/tests/test_response_context.py
```python
import pytest
from falcon import Response
class TestResponseContext:
def test_default_response_context(self):
resp = Response()
resp.context.hello = 'World!'
assert resp.context.hello == 'World!'
assert resp.context['hello'] == 'World!'
resp.context['note'] = 'Default Response.context_type used to be dict.'
assert 'note' in resp.context
assert hasattr(resp.context, 'note')
assert resp.context.get('note') == resp.context['note']
def test_custom_response_context(self):
class MyCustomContextType:
pass
class MyCustomResponse(Response):
context_type = MyCustomContextType
resp = MyCustomResponse()
assert isinstance(resp.context, MyCustomContextType)
def test_custom_response_context_failure(self):
class MyCustomResponse(Response):
context_type = False
with pytest.raises(TypeError):
MyCustomResponse()
def test_custom_response_context_factory(self):
def create_context(resp):
return {'resp': resp}
class MyCustomResponse(Response):
context_type = create_context
resp = MyCustomResponse()
assert isinstance(resp.context, dict)
assert resp.context['resp'] is resp
```
#### File: falcon/tests/test_slots.py
```python
import pytest
from falcon import Request, Response
import falcon.testing as testing
class TestSlots:
def test_slots_request(self):
env = testing.create_environ()
req = Request(env)
try:
req.doesnt = 'exist'
except AttributeError:
pytest.fail('Unable to add additional variables dynamically')
def test_slots_response(self):
resp = Response()
try:
resp.doesnt = 'exist'
except AttributeError:
pytest.fail('Unable to add additional variables dynamically')
``` |
{
"source": "joesphramkishun/hd-example",
"score": 2
} |
#### File: app/components/config.py
```python
import os
from datetime import datetime
import click
import sqlalchemy
from sqlalchemy import create_engine
from sqlalchemy import Column, Sequence, Integer, String, DateTime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from werkzeug.security import generate_password_hash, check_password_hash
class Config(object):
def __init__(self):
self._site_version = os.getenv('SITE_VERSION')
self._ROGUE_SHOPS_USER = os.getenv('ROGUE_SHOPS_USER')
self._ROGUE_SHOPS_PASS = os.getenv('ROGUE_SHOPS_PASS')
self._store_hash = os.getenv('store_hash')
self._X_Auth_Client = os.getenv('X-Auth-Client')
self._X_Auth_Token = os.getenv('X-Auth-Token')
self._DOMAIN = os.getenv('DOMAIN')
self._SEND_GRID_KEY = os.getenv('SEND_GRID_KEY')
self._PAYMENT_METHOD = os.getenv('PAYMENT_METHOD')
@property
def site_version(self):
return self._site_version
@property
def database_user(self):
return self._ROGUE_SHOPS_USER
@property
def database_pass(self):
return self._ROGUE_SHOPS_PASS
@property
def store_hash(self):
return self._store_hash
@property
def auth_client(self):
return self._X_Auth_Client
@property
def auth_token(self):
return self._X_Auth_Token
@property
def domain(self):
return self._DOMAIN
@property
def send_grid_key(self):
return self._SEND_GRID_KEY
@property
def payment_method(self):
return self._PAYMENT_METHOD
```
#### File: app/resources/SendGridSDK.py
```python
from app.components.config import Config
import sendgrid
import re
def send_sg_email(t, f, subj, msg):
sg = sendgrid.SendGridClient(Config().send_grid_key)
message = sendgrid.Mail()
message.add_to(t)
message.set_subject(subj)
message.set_text(msg)
message.set_from(f)
status, msg = sg.send(message)
``` |
{
"source": "joesponde/ymodem",
"score": 3
} |
#### File: joesponde/ymodem/ymodem_sender.py
```python
import os
import sys
import time
import random
import serial
from YModem import YModem
def main(argv):
serial_io = serial.Serial()
try:
filename = argv[0]
serial_io.port = argv[1]
serial_io.baudrate = argv[2]
except Exception as e:
print ('python ymodem_sender.py # <device> <baudrate>')
serial_io.parity = "N"
serial_io.bytesize = 8
serial_io.stopbits = 1
serial_io.timeout = 2
try:
serial_io.open()
except Exception as e:
raise Exception("Failed to open serial port!")
def sender_getc(size):
return serial_io.read(size) or None
def sender_putc(data, timeout=15):
return serial_io.write(data)
os.chdir(sys.path[0])
file_path = os.path.abspath(filename)
sender = YModem(sender_getc, sender_putc)
sent = sender.send_file(file_path)
serial_io.close()
if __name__ == '__main__':
main(sys.argv[1:])
``` |
{
"source": "JoesSattes/Thai-Biaffine-Dependency-Parsing",
"score": 3
} |
#### File: JoesSattes/Thai-Biaffine-Dependency-Parsing/optimizer.py
```python
import torch
class NoamOpt:
"""Optim wrapper that implements rate."""
def __init__(self, model_size, factor, warmup, optimizer):
self.optimizer = optimizer
self._step = 0
self.warmup = warmup
self.factor = factor
self.model_size = model_size
self._rate = 0
def step(self):
"""Update parameters and rate."""
self._step += 1
rate = self.rate()
for p in self.optimizer.param_groups:
p['lr'] = rate
self._rate = rate
self.optimizer.step()
def zero_grad(self):
"""Delegate zero grad to underlying optimizer."""
self.optimizer.zero_grad()
def rate(self, step = None):
"""Implement `lrate` above."""
if step is None:
step = self._step
return self.factor * \
(self.model_size ** (-0.5) *
min(step ** (-0.5), step * self.warmup ** (-1.5)))
def get_std_transformer_opt(args, model):
return NoamOpt(args.d_model, 2, 4000,
torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9))
```
#### File: JoesSattes/Thai-Biaffine-Dependency-Parsing/predict.py
```python
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import matplotlib.pyplot as plt
from data import Dictionary, Corpus, PAD_INDEX
from mst import mst
import os
from train import evaluate_predict, arc_accuracy, lab_accuracy
import inspect
def plot(args, S_arc, heads):
fig, ax = plt.subplots()
# Make a 0/1 gold adjacency matrix.
n = heads.size(1)
G = np.zeros((n, n))
heads = heads.squeeze().data.numpy()
G[heads, np.arange(n)] = 1.
im = ax.imshow(G, vmin=0, vmax=1)
fig.colorbar(im)
plt.savefig('img'+"/"+args.data+'/gold.pdf')
plt.cla()
# Plot the predicted adjacency matrix
A = F.softmax(S_arc.squeeze(0), dim=0)
fig, ax = plt.subplots()
im = ax.imshow(A.data.numpy(), vmin=0, vmax=1)
fig.colorbar(im)
plt.savefig('img'+"/"+args.data+'/a.pdf')
plt.cla()
plt.clf()
def predict(model, words, tags):
assert type(words) == type(tags)
if type(words) == type(tags) == list:
# Convert the lists into input for the PyTorch model.
words = Variable(torch.LongTensor([words]))
tags = Variable(torch.LongTensor([tags]))
# Dissable dropout.
model.eval()
# Predict arc and label score matrices.
S_arc, S_lab = model(words=words, tags=tags)
# Predict heads
S = S_arc[0].data.numpy()
heads = mst(S)
# Predict labels
S_lab = S_lab[0]
select = torch.LongTensor(heads).unsqueeze(0).expand(S_lab.size(0), -1)
select = Variable(select)
selected = torch.gather(S_lab, 1, select.unsqueeze(1)).squeeze(1)
_, labels = selected.max(dim=0)
labels = labels.data.numpy()
return heads, labels
def predict_a(model, words, tags, oriheads, orilabels, arc_acc_ls, lab_acc_ls):
assert type(words) == type(tags)
if type(words) == type(tags) == list:
# Convert the lists into input for the PyTorch model.
words = Variable(torch.LongTensor([words]))
tags = Variable(torch.LongTensor([tags]))
arc_acc, lab_acc = 0, 0
# Dissable dropout.
model.eval()
# Predict arc and label score matrices.
S_arc1, S_lab1 = model(words=words, tags=tags)
# Predict heads
S = S_arc1[0].data.numpy()
heads = mst(S)
# Predict labels
S_lab = S_lab1[0]
select = torch.LongTensor(heads).unsqueeze(0).expand(S_lab.size(0), -1)
select = Variable(select)
selected = torch.gather(S_lab, 1, select.unsqueeze(1)).squeeze(1)
_, labels = selected.max(dim=0)
labels = labels.data.numpy()
arc_acc += arc_accuracy(S_arc1, oriheads)
lab_acc += lab_accuracy(S_lab1, oriheads, orilabels)
arc_acc_ls.append(arc_acc)
lab_acc_ls.append(lab_acc)
return heads, labels, arc_acc_ls, lab_acc_ls
def predict_all(args, model, test_batches):
# heads_list, labels_list = [], []
print("Start Predict & Evaluate in {} ...".format('Test'))
model.eval()
# test_batches = batch
# test_batches = corpus.test.batches(256, length_ordered=True)
arc_acc, lab_acc = [], []
arc_word, arc_pos, arc_pred, arc_ans, lab_pred, lab_ans = [], [], [], [], [], []
# k_out = 0
# print('Test BACTH: '+str(test_batches)
# for i in test_batches:
# print(i)
for k, batch in enumerate(test_batches, 1):
words, tags, heads, labels = batch
if args.cuda:
words, tags, heads, labels = words.cuda(), tags.cuda(), heads.cuda(), labels.cuda()
arcpred, labpred, arc_acc, lab_acc = predict_a(model, words, tags, heads, labels, arc_acc, lab_acc)
# S_arc, S_lab = model(words=words, tags=tags)
# print(words, S_arc)
# _, arcpred = S_arc.max(dim=-2)
arc_pred.append(arcpred)
arc_ans.append(heads)
arc_word.append(words)
arc_pos.append(tags)
lab_ans.append(labels)
# select = torch.LongTensor(mst(S_arc[0].data.numpy())).unsqueeze(0).expand(S_lab.size(0), -1)
# select = Variable(select)
# selected = torch.gather(S_lab, 1, select.unsqueeze(1)).squeeze(1)
# _, labpred = selected.max(dim=0)
lab_pred.append(labpred)
# arc_acc += arc_accuracy(S_arc, heads)
# lab_acc += lab_accuracy(S_lab, heads, labels)
# k_out += k
# print(k_out)
print(k)
print(arc_pred)
print(arc_ans[0].data[0])
print(arc_acc, lab_acc)
arc_acc[0] /= k
lab_acc[0] /= k
# heads, labels = predict(model, words, tags)
# heads_list.append(heads)
# labels_list.append(labels)
return arc_acc, lab_acc, arc_pred, lab_pred, arc_ans, lab_ans
def predict_batch(S_arc, S_lab, tags):
# Predict heads
S = S_arc.data.numpy()
heads = mst(S)
# Predict labels
select = torch.LongTensor(heads).unsqueeze(0).expand(S_lab.size(0), -1)
select = Variable(select)
selected = torch.gather(S_lab, 1, select.unsqueeze(1)).squeeze(1)
_, labels = selected.max(dim=0)
labels = labels.data.numpy()
return heads, labels
def predict_define(args):
# print(args)
data_path = args.data
vocab_path = args.vocab
model_path = args.checkpoints
# data_path = 'data/ud/UD_English-EWT'
# vocab_path = 'vocab/train'
# model_path = 'checkpoints/enmodel.pt'
corpus = Corpus(data_path=data_path, vocab_path=vocab_path)
index2word = corpus.dictionary.i2w
index2pos = corpus.dictionary.i2t
index2label = corpus.dictionary.i2l
model = torch.load(model_path)
print(model)
batches = corpus.test.batches(1, shuffle=False)
# print(index2word[0])
# print([(step, batch) for step, batch in enumerate(batches, 1)])
word_all, pos_all, head_all, label_all, label_predall = [], [], [], [], []
for i in batches:
# print(i)
# words, tags, heads, labels = next(batches)
words, tags, heads, labels = i
# print(words, tags, heads, labels)
S_arc, S_lab = model(words=words, tags=tags)
args.cuda = torch.cuda.is_available()
# arc_acc, lab_acc, _, _, _, _ = evaluate_predict(args, model, corpus)
plot(args, S_arc, heads)
heads_pred, labels_pred = predict(model, words, tags)
word_data, pos_data, label_data, label_pred = [], [], [], []
for i in words[0].data.numpy():
word_data.append(index2word[i])
for j in tags[0].data.numpy():
pos_data.append(index2pos[j])
for k in labels[0].data.numpy():
label_data.append(index2label[k])
for l in labels_pred:
label_pred.append(index2label[l])
print("Word: ", word_data, '\n', 'POS :', pos_data)
print("Head Pred: ", heads_pred, '\n', 'Head Data :', heads[0].data.numpy())
print("Label Pred: ", label_pred, '\n', 'Label Data :', label_data)
word_all.append(word_data)
pos_all.append(pos_data)
head_all.append(heads_pred)
label_all.append(label_data)
label_predall.append(label_pred)
# break
arc_acc, lab_acc, _, _, _, _ = evaluate_predict(args, model, corpus)
print('Arc Accuracy: {} , Label Accuracy: {}'.format(arc_acc, lab_acc))
to_conllu(word_all, pos_all, head_all, label_predall)
def to_conllu(word_sen, pos_sen, head_sen, label_predsen):
for i in range(len(word_sen)):
for idx, word in enumerate(word_sen[i]):
# print(idx)
print('{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}'.format(idx, word, '_', pos_sen[i][idx], '_', '_', head_sen[i][idx], label_predsen[i][idx], '_', '_'))
print('\n')
print('\n')
# if __name__ == '__main__':
# # parser = argparse.ArgumentParser()
# # parser.add_argument('--data', default='~/data/ptb-stanford')
# # parser.add_argument('--out', default='vocab')
# # args = parser.parse_args()
# # main(args)
# data_path = 'data/ud/UD_English-EWT'
# vocab_path = 'vocab/train'
# model_path = 'checkpoints/enmodel.pt'
# main(args)
```
#### File: JoesSattes/Thai-Biaffine-Dependency-Parsing/train.py
```python
import os
import time
import torch
from torch import nn
from torch.autograd import Variable
import numpy as np
import matplotlib.pyplot as plt
from model import PAD_INDEX
from data import Corpus
from model import make_model
from optimizer import get_std_transformer_opt
from util import Timer, write_losses
from earlystop import EarlyStopping
### New version tensor.data[index] is not avaliable, but can use tenser.data.item()
LOSSES = dict(train_loss=[], train_acc=[], val_acc=[], test_acc=[])
def arc_accuracy(S_arc, heads, eps=1e-10):
"""Accuracy of the arc predictions based on gready head prediction."""
_, pred = S_arc.max(dim=-2)
mask = (heads != PAD_INDEX).float()
accuracy = torch.sum((pred == heads).float() * mask, dim=-1) / (torch.sum(mask, dim=-1) + eps)
return torch.mean(accuracy).data.item()
def lab_accuracy(S_lab, heads, labels, eps=1e-10):
"""Accuracy of label predictions on the gold arcs."""
_, pred = S_lab.max(dim=1)
pred = torch.gather(pred, 1, heads.unsqueeze(1)).squeeze(1)
mask = (heads != PAD_INDEX).float()
accuracy = torch.sum((pred == labels).float() * mask, dim=-1) / (torch.sum(mask, dim=-1) + eps)
return torch.mean(accuracy).data.item()
def evaluate(args, model, corpus):
"""Evaluate the arc and label accuracy of the model on the development corpus."""
# Turn on evaluation mode to disable dropout.
print("Start Evaluate in {} ...".format('Dev'))
model.eval()
dev_batches = corpus.dev.batches(256, length_ordered=True)
arc_acc, lab_acc = 0, 0
for k, batch in enumerate(dev_batches, 1):
words, tags, heads, labels = batch
if args.cuda:
words, tags, heads, labels = words.cuda(), tags.cuda(), heads.cuda(), labels.cuda()
S_arc, S_lab = model(words=words, tags=tags)
arc_acc += arc_accuracy(S_arc, heads)
lab_acc += lab_accuracy(S_lab, heads, labels)
arc_acc /= k
lab_acc /= k
return arc_acc, lab_acc
def evaluate_predict(args, model, corpus):
"""Evaluate the arc and label accuracy of the model on the test corpus."""
# Turn on evaluation mode to disable dropout.
print("Start Evaluate in {} ...".format('Test'))
model.eval()
test_batches = corpus.test.batches(256, length_ordered=True)
arc_acc, lab_acc = 0, 0
arc_word, arc_pos, arc_pred, arc_ans = [], [], [], []
# k_out = 0
for k, batch in enumerate(test_batches, 1):
words, tags, heads, labels = batch
if args.cuda:
words, tags, heads, labels = words.cuda(), tags.cuda(), heads.cuda(), labels.cuda()
S_arc, S_lab = model(words=words, tags=tags)
# print(words, S_arc)
_, pred = S_arc.max(dim=-2)
arc_pred.append(pred)
arc_ans.append(heads)
arc_word.append(words)
arc_pos.append(tags)
arc_acc += arc_accuracy(S_arc, heads)
lab_acc += lab_accuracy(S_lab, heads, labels)
# k_out += k
# print(k_out)
# print(arc_pred)
# print(arc_ans)
arc_acc /= k
lab_acc /= k
return arc_acc, lab_acc, arc_word, arc_pos, arc_pred, arc_ans
class SimpleLossCompute:
"""A simple loss compute and train function on one device."""
def __init__(self, model, optimizer):
self.model = model
self.optimizer = optimizer
def __call__(self, words, tags, heads, labels):
# Forward pass.
S_arc, S_lab = self.model(words=words, tags=tags)
# Compute loss.
arc_loss = self.model.arc_loss(S_arc, heads)
lab_loss = self.model.lab_loss(S_lab, heads, labels)
loss = arc_loss + lab_loss
# print("Tensor : "+str(arc_loss.data.item()), lab_loss, loss)
# Update parameters.
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
loss_dict = dict(loss=loss.data.item(), arc_loss=arc_loss.data.item(), lab_loss=lab_loss.data.item())
return S_arc, S_lab, loss_dict
class MultiGPULossCompute:
"""A multi-gpu loss compute and train function.
Only difference with SimpleLossCompute is we need to access loss
through model.module.
"""
def __init__(self, model, optimizer, devices, output_device=None):
self.model = model
self.optimizer = optimizer
self.devices = devices
self.output_device = output_device if output_device is not None else devices[0]
def __call__(self, words, tags, heads, labels):
# Forward pass.
S_arc, S_lab = self.model(words=words, tags=tags)
# Compute loss.
arc_loss = self.model.module.arc_loss(S_arc, heads)
lab_loss = self.model.module.lab_loss(S_lab, heads, labels)
loss = arc_loss + lab_loss
# Update parameters.
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
loss_dict = dict(loss=loss.data.item(), arc_loss=arc_loss.data.item(), lab_loss=lab_loss.data.item())
return S_arc, S_lab, loss_dict
def run_epoch(args, model, corpus, train_step):
model.train()
nbatches = len(corpus.train.words) // args.batch_size
start_time = time.time()
# Get a new set of shuffled training batches.
# print(args.batch_size, length_ordered=args.disable_length_ordered)
# print('joe'+str(args.batch_size),corpus.train.words)
train_batches = corpus.train.batches(args.batch_size, length_ordered=args.disable_length_ordered)
ntokens = 0
for step, batch in enumerate(train_batches, 1):
words, tags, heads, labels = batch
if args.cuda:
words, tags, heads, labels = words.cuda(), tags.cuda(), heads.cuda(), labels.cuda()
S_arc, S_lab, loss_dict = train_step(words, tags, heads, labels)
# es = EarlyStopping(patience=2)
# if es.step(loss_dict['loss']):
# print('Es Stop!!!!')
# break
ntokens += words.size(0) * words.size(1)
LOSSES['train_loss'].append(loss_dict['loss'])
print("Epoch (Step, Print): "+str(step)+", "+str(args.print_every))
if step % args.print_every == 0:
arc_train_acc = arc_accuracy(S_arc, heads)
lab_train_acc = lab_accuracy(S_lab, heads, labels)
LOSSES['train_acc'].append([arc_train_acc, lab_train_acc])
print(
'| Step {:5d}/{:5d} ({:.0f}%)| Avg loss {:3.4f} | Arc acc {:4.2f}% '
'| Label acc {:4.2f}% | {:4.0f} tokens/sec |'.format(
step,
nbatches,
100*step/nbatches,
np.mean(LOSSES['train_loss'][-args.print_every:]),
100*arc_train_acc,
100*lab_train_acc,
ntokens/(time.time() - start_time)),
)
def train(args):
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
args.cuda = torch.cuda.is_available()
print('Using cuda: {}'.format(args.cuda))
# Initialize the data, model, and optimizer.
corpus = Corpus(data_path=args.data, vocab_path=args.vocab, char=args.use_chars)
model = make_model(
args,
word_vocab_size=len(corpus.dictionary.w2i),
tag_vocab_size=len(corpus.dictionary.t2i),
num_labels=len(corpus.dictionary.l2i)
)
print('Embedding parameters: {:,}'.format(model.embedding.num_parameters))
print('Encoder parameters: {:,}'.format(model.encoder.num_parameters))
print('Total model parameters: {:,}'.format(model.num_parameters))
if args.cuda:
model.cuda()
if args.encoder == 'transformer':
optimizer = get_std_transformer_opt(args, model)
else:
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
if args.cuda:
device_count = torch.cuda.device_count()
if args.multi_gpu:
devices = list(range(device_count))
model = nn.DataParallel(model, device_ids=devices)
train_step = MultiGPULossCompute(model, optimizer, devices)
print('Training on {} GPUs: {}.'.format(device_count, devices))
else:
train_step = SimpleLossCompute(model, optimizer)
print('Training on 1 device out of {} availlable.'.format(device_count))
else:
train_step = SimpleLossCompute(model, optimizer)
timer = Timer()
best_val_acc = 0.
best_epoch = 0
print('Start of training..')
try:
for epoch in range(1, args.epochs+1):
run_epoch(args, model, corpus, train_step)
# Evaluate model on validation set.
# TODO: replace this with a UAS and LAS eval instead of this proxy
arc_val_acc, lab_val_acc = evaluate(args, model, corpus)
LOSSES['val_acc'].append([arc_val_acc, lab_val_acc])
# Save model if it is the best so far.
if arc_val_acc > best_val_acc:
torch.save(model, args.checkpoints)
best_val_acc = arc_val_acc
best_epoch = epoch
print("Train Acc : "+str(LOSSES['train_acc']))
write_losses(LOSSES['train_loss'], LOSSES['train_acc'], LOSSES['val_acc'], args.logdir)
# End epoch with some useful info in the terminal.
print('-' * 89)
print(
'| End of epoch {:3d}/{} | Time {:5.2f}s | Valid accuracy {:3.2f}% |'
' Best accuracy {:3.2f}% (epoch {:3d}) |'.format(
epoch,
args.epochs,
timer.elapsed(),
100*arc_val_acc,
100*best_val_acc,
best_epoch)
)
print('-' * 89)
except KeyboardInterrupt:
print()
print('-' * 89)
print('Exiting from training early')
write_losses(LOSSES['train_loss'], LOSSES['train_acc'], LOSSES['val_acc'], args.logdir)
arc_val_acc, lab_val_acc = evaluate(args, model, corpus)
if arc_val_acc > best_val_acc:
torch.save(model, args.checkpoints)
best_val_acc = arc_val_acc
best_epoch = epoch
print('=' * 89)
print('| End of training | Best validation accuracy {:3.2f} (epoch {}) |'.format(
100*best_val_acc, best_epoch))
print('=' * 89)
```
#### File: JoesSattes/Thai-Biaffine-Dependency-Parsing/transformer.py
```python
import math, copy, time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
def clones(module, N):
"""Produce N identical layers."""
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
def attention(query, key, value, mask=None, dropout=None):
"""Compute 'Scaled Dot Product Attention'"""
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) \
/ math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e9)
p_attn = F.softmax(scores, dim = -1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
class Encoder(nn.Module):
"""Core encoder is a stack of N layers"""
def __init__(self, layer, N):
super(Encoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, mask):
"""Pass the input (and mask) through each layer in turn."""
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
class LayerNorm(nn.Module):
"""Construct a layernorm module (See citation for details)."""
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class SublayerConnection(nn.Module):
"""A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
"""
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
"Apply residual connection to any sublayer with the same size."
return x + self.dropout(sublayer(self.norm(x)))
class EncoderLayer(nn.Module):
"""Encoder is made up of self-attn and feed forward (defined below)"""
def __init__(self, size, self_attn, feed_forward, dropout):
super(EncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 2)
self.size = size
def forward(self, x, mask):
"""Follow Figure 1 (left) for connections."""
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))
return self.sublayer[1](x, self.feed_forward)
class MultiHeadedAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.1):
"""Take in model size and number of heads."""
super(MultiHeadedAttention, self).__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_k = d_model // h
self.h = h
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None):
"""Implements Figure 2."""
if mask is not None:
# Same mask applied to all h heads.
mask = mask.unsqueeze(1)
nbatches = query.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = \
[l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linears, (query, key, value))]
# 2) Apply attention on all the projected vectors in batch.
x, self.attn = attention(query, key, value, mask=mask,
dropout=self.dropout)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous() \
.view(nbatches, -1, self.h * self.d_k)
return self.linears[-1](x)
class PositionwiseFeedForward(nn.Module):
"""Implements FFN equation."""
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.w_2(self.dropout(F.relu(self.w_1(x))))
class PositionalEncoding(nn.Module):
"""Implement the PE function."""
def __init__(self, d_model, dropout, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2) *
-(math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + Variable(self.pe[:, :x.size(1)],
requires_grad=False)
return self.dropout(x)
class TransformerEncoder(nn.Module):
"""A Transformer encoder."""
def __init__(self, input_size, N, d_model, d_ff, h, dropout):
super(TransformerEncoder, self).__init__()
attn = MultiHeadedAttention(h, d_model)
ff = PositionwiseFeedForward(d_model, d_ff, dropout)
self.d_model = d_model
self.encoder = Encoder(EncoderLayer(d_model, attn, ff, dropout), N)
self.projection = nn.Linear(input_size, d_model) # to get to the proper input size
self.positional = PositionalEncoding(d_model, dropout)
self.initialize_parameters()
def initialize_parameters(self):
"""Initialize parameters with Glorot."""
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform(p)
def forward(self, x, mask):
"""Take in and process masked src and target sequences."""
return self.encode(x, mask)
def embed(self, x):
x = self.projection(x) * math.sqrt(self.d_model)
return self.positional(x)
def encode(self, x, mask):
return self.encoder(self.embed(x), mask)
@property
def num_parameters(self):
"""Returns the number of trainable parameters of the model."""
return sum(np.prod(p.shape) for p in self.parameters() if p.requires_grad)
if __name__ == '__main__':
encoder = TransformerEncoder(d_model=512)
embedding = nn.Embedding(100, 512)
x = Variable(torch.arange(1, 16).long().view(3, 5))
mask = (x != 0).unsqueeze(-2) # Why unsqueeze?
print(x)
print(mask)
out = encoder(embedding(x), mask)
print(out)
```
#### File: JoesSattes/Thai-Biaffine-Dependency-Parsing/util.py
```python
import os
import time
import csv
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import matplotlib.pyplot as plt
import numpy as np
class Timer:
"""A simple timer to use during training."""
def __init__(self):
self.time0 = time.time()
def elapsed(self):
time1 = time.time()
elapsed = time1 - self.time0
self.time0 = time1
return elapsed
def write_losses(train_loss, train_acc, val_acc, outdir):
"""Write out the loss and accuracy to CSV files."""
with open(os.path.join(outdir, 'loss.train.csv'), 'w') as f:
writer = csv.writer(f)
names = [['loss']]
losses = [[l] for l in train_loss]
writer.writerows(names + losses)
with open(os.path.join(outdir, 'acc.train.csv'), 'w') as f:
writer = csv.writer(f)
names = [["train_arc_acc", "train_lab_acc"]]
writer.writerows(names + train_acc)
with open(os.path.join(outdir, 'acc.val.csv'), 'w') as f:
writer = csv.writer(f)
names = [["val_arc_acc", "val_lab_acc"]]
writer.writerows(names + val_acc)
# def plot(corpus, model, fig, ax, step, sent=2):
# words = Variable(torch.LongTensor([corpus.train.words[sent]]))
# tags = Variable(torch.LongTensor([corpus.train.tags[sent]]))
# heads = Variable(torch.LongTensor([corpus.train.heads[sent]]))
# labels = Variable(torch.LongTensor([corpus.train.labels[sent]]))
# # Disable dropout.
# model.eval()
# S_arc, S_lab = model(words, tags)
# # Turn dropout back on.
# model.train()
# # Plot the gold adjacency matrix, if does not yet exist.
# if not os.path.exists('img/gold.pdf'):
# # Make a 0/1 gold adjacency matrix.
# n = words.size(1)
# G = np.zeros((n, n))
# heads = heads.squeeze().data.numpy()
# G[heads, np.arange(n)] = 1.
# im = ax.imshow(G, vmin=0, vmax=1)
# fig.colorbar(im)
# plt.savefig('img/gold.pdf'.format(step))
# plt.cla()
# plt.clf()
# # Plot the predicted adjacency matrix
# A = F.softmax(S_arc.squeeze(0), dim=0)
# fig, ax = plt.subplots()
# im = ax.imshow(A.data.numpy(), vmin=0, vmax=1)
# fig.colorbar(im)
# plt.savefig('img/a.{}.pdf'.format(step))
# plt.cla()
# plt.clf()
``` |
{
"source": "joest67/my-beancount-scripts",
"score": 3
} |
#### File: modules/imports/yuebao.py
```python
import calendar
import csv
import datetime
from datetime import date
from io import StringIO
import xlrd
from beancount.core import data
from beancount.core.data import Note, Transaction
from . import (DictReaderStrip, get_account_by_guess,
get_income_account_by_guess)
from .base import Base
from .deduplicate import Deduplicate
Account余额宝 = 'Assets:Company:Alipay:MonetaryFund'
incomes = ['余额自动转入', '收益', '单次转入']
class YuEBao(Base):
def __init__(self, filename, byte_content, entries, option_map):
if not filename.endswith('xls'):
raise 'Not YuEBao!'
data = xlrd.open_workbook(filename)
table = data.sheets()[0]
rows_value = table.row_values(0)
if rows_value[0] != '余额宝收支明细查询':
raise 'Not YuEBao!'
self.book = data
self.table = table
self.deduplicate = Deduplicate(entries, option_map)
def parse(self):
table = self.table
rows = table.nrows
for i in range(5, rows - 4):
row = table.row_values(i)
time = datetime.datetime(
*xlrd.xldate_as_tuple(table.cell_value(rowx=i, colx=0), self.book.datemode))
print("Importing {} price = {} balance = {}".format(
time, row[2], row[3]))
meta = {}
amount = float(row[1])
entry = Transaction(
meta,
date(time.year, time.month, time.day),
'*',
'余额宝',
'余额宝',
data.EMPTY_SET,
data.EMPTY_SET, []
)
if not row[2] in incomes:
amount = -amount
if self.deduplicate.find_duplicate(entry, amount, None, Account余额宝):
print(
"Unknown transaction for {}, check if Alipay transaction exists.".format(time))
self.deduplicate.apply_beans()
return []
``` |
{
"source": "joest67/qsProj",
"score": 3
} |
#### File: qsBack/app/functions.py
```python
from qs.models import Person, Answer
from django.forms.models import model_to_dict
def newPerson(dicts):
default = ''
imei = dicts.get('imei',default)
name = dicts.get('name', default)
mobile = dicts.get('mobile', default)
qq = dicts.get('qq', default)
email = dicts.get('email', default)
a = Answer.objects.get(imei=imei)
try:
p = Person.objects.get(imei = a)
updatePerson(dicts, p)
except Person.DoesNotExist:
p = Person(name=name,mobile=mobile,qq=qq,email=email)
p.imei = a
p.save()
def newAnswer(imei="", sex="", age="", address="", edu="", position="",
payment="",useMobileTime="",webTime="",webMoment="",webUsage=""):
a = Answer(imei=imei,sex=sex,age=age,address=address,edu=edu,
position=position,payment=payment)
a.save()
def udpatePerson(dicts, person):
try:
person.name = dicts.get("name", person.name)
person.mobile = dicts.get("mobile", person.mobile)
person.qq = dicts.get("qq", person.qq)
person.email = dicts.get("email", person.email)
person.save()
except:
print "update person error."
def updateAnswer(dicts, answer):
try:
answer.sex = dicts.get("sex", answer.sex)
answer.sex = dicts.get("age", answer.age)
answer.address = dicts.get("address", answer.address)
answer.edu = dicts.get("edu",answer.edu)
answer.position = dicts.get("position",answer.position)
answer.payment = dicts.get("payment", answer.payment)
answer.useMobileTime = dicts.get("useMobileTime", answer.useMobileTime)
answer.webTime = dicts.get("webTime", answer.webTime)
answer.webMoment = dicts.get("webMoment", answer.webMoment)
answer.webUsage = dicts.get("webUsage", answer.webUsage )
answer.save()
except:
print "update answer error."
def update(imei, dicts):
try:
a = Answer.objects.get(imei=imei)
if checkForm(dicts) == 2:
updateAnswer(dicts, a)
newPerson(dicts)
else:
updateAnswer(dicts, a)
except Answer.DoesNotExist:
newAnswer(**dicts)
def store(dicts):
try:
imei = dicts['imei']
flag = isAnswered(imei)
if flag == 2:
print "you have answered this questionnaire."
elif flag == 1:
update(imei, dicts)
else:
newAnswer(**dicts)
except KeyError:
print "This dict donot have a imei value."
# check which form is posted
# 1 => form1
# 2 => form2
def checkForm(dicts):
if dicts.get('sex') != None:
return 1
return 2
# check if the user have answered this questionnaire
# 0 => donot answered
# 1 => have answered q1
# 2 => have answered all
def isAnswered(imei):
try:
a = Answer.objects.get(imei=imei)
if a.webTime != '' and a.sex != '':
return 2
return 1
except Answer.DoesNotExist:
return 0
def getObject():
answers = {}
persons = {}
for e in Answer.objects.all():
answers[e.imei] = model_to_dict(e)
persons[e.imei] = getPerson(e.imei)
result = {"answers":answers,"persons":persons}
return result
def getPerson(imei):
try:
p = Person.objects.get(imei_id=imei)
return p.getDic()
except Person.DoesNotExist:
return {}
```
#### File: qsBack/app/views.py
```python
from django.http import HttpResponse
from qs.functions import store, getObject
from qs.models import Person, Answer
from django.template import loader,Context
def index(request):
return HttpResponse("Hello,World!")
def sendqs(request):
if(request.POST['dic']):
dicts = request.POST.get('dic')
store(eval(dicts))
return HttpResponse("hehe")
def showall(request):
t = loader.get_template('show.html')
c = Context(getObject())
return HttpResponse(t.render(c))
``` |
{
"source": "joestarhu/jhml",
"score": 3
} |
#### File: jhml/src/knn.py
```python
import numpy as np
import pandas as pd
class KNN:
@staticmethod
def predict(X,y,T,k=3,p=2,normalize=False) -> pd.DataFrame:
"""
KNN, 采用Brute方式(计算目标和样本的每个距离)
Paramters:
----------------
X: np.ndarray
训练数据集(Training Datasets)
y: np.ndarray
训练数据集标签(Training Datasets Label)
T: np.ndarray
测试数据集(Testing Datasets)
k: int
K值,Default:3
p: int
P值,Default:2
P值为1就是曼哈顿距离(Mahattan Distance)
P值为2就是欧式距离(Euclidean Distance)
normalize: bool
正规化(归一化)标签,Default:False
True: 实施归一化
False: 对输入数据不做处理
Returns:
---------
pd.DataFrame
示列:
动作 爱情 CLS
0 0.333333 0.666667 爱情
1 1.000000 0.000000 动作
"""
if normalize:
ds = np.r_[X,T]
ds_min,ds_max = ds.min(axis=0),ds.max(axis=0)
ds = (ds - ds_min)/(ds_max-ds_min)
X = ds[:X.shape[0]]
T = ds[X.shape[0]:]
lbl = np.unique(y)
n_lp = [np.sum(np.abs(t-X)**p,axis=1)**(1/p) for t in T]
n_idx = np.argsort(n_lp,axis=1)[:,:k]
proba = [[n[n==v].size / n.size for v in lbl] for n in y[n_idx]]
df = pd.DataFrame(proba,columns=lbl)
df['CLS'] = lbl[np.argmax(proba,axis=1)]
return df
if __name__ == '__main__':
from sklearn.datasets import load_iris
import matplotlib.pyplot as plt
iris = load_iris()
X = np.r_[iris.data[:30],iris.data[50:80],iris.data[100:130]]
y = np.r_[iris.target[:30],iris.target[50:80],iris.target[100:130]]
tX = np.r_[iris.data[30:50],iris.data[80:100],iris.data[130:]]
ty = np.r_[iris.target[30:50],iris.target[80:100],iris.target[130:]]
for k in range(3,20,2):
error_size = ty.size - ty[KNN.predict(X,y,tX,k=k)['CLS'] == ty].size
print(1 - error_size/ty.size)
``` |
{
"source": "joestasks/gainterc-ardvis",
"score": 2
} |
#### File: gainterc-ardvis/data_comparison/nbar_lam_ratio_plot.py
```python
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def plot_nbar_lam_ratio(nbar_ratio_dfs, lam_ratio_dfs, plot_plan, **ack):
plt.close('all')
plt.style.use(ack.get('plot_style'))
temp_a_df = None
temp_b_df = None
oa_temp_a_df = None
oa_temp_b_df = None
for site in [*nbar_ratio_dfs]:
for ga_band in [*nbar_ratio_dfs[site]]:
temp_a_df = nbar_ratio_dfs[site][ga_band][1]
oa_temp_a_df = nbar_ratio_dfs[site][ga_band][4]
if site in [*lam_ratio_dfs]:
lam_ga_band = ga_band.replace('nbar', 'lambertian')
temp_b_df = lam_ratio_dfs[site][lam_ga_band][1]
oa_temp_b_df = lam_ratio_dfs[site][lam_ga_band][6]
res = pd.merge(
temp_a_df.assign(
grouper=pd.to_datetime(
temp_a_df[ack.get('date_col')]
).dt.to_period('D')),
temp_b_df.assign(
grouper=pd.to_datetime(
temp_b_df[ack.get('date_col')]
).dt.to_period('D')),
how='inner', on='grouper',
suffixes=('_nbar', '_lam'))
res['ratio'] = res['Mean_sr_nbar'] / res['Mean_sr_lam']
m_fig, m_axs = plt.subplots(2, 1, figsize=(12, 10), squeeze=False)
# Do plotting.
m_axs[0][0].set(
xlabel=ack.get('date_col'),
ylabel='NBAR/LAM',
title='NBAR / LAM Ratio for ' + ((ack.get('in_a_source_name') == 'GA' and ga_band.split('_nbar_')[1]) or ga_band) + ' at ' + site.replace('NBAR_', ''),
xlim=[np.datetime64(ack.get('plot_start_date')), np.datetime64(ack.get('plot_end_date'))]
)
if temp_a_df is not None:
ax = res.plot(
kind=ack.get(
'measurements_plot_type'
), x='Date_nbar',
y='ratio', label=((ack.get('in_a_source_name') == 'GA' and ga_band.split('_nbar_')[1]) or ga_band),
#marker='o',
ax=m_axs[0][0],
# sharex=m_axs[1][0]
)
#if temp_b_df is not None:
# ax = temp_b_df.plot(
# kind=ack.get(
# 'measurements_plot_type'
# ), x=ack.get(
# 'date_col'
# ), y='Mean_sr', label='B',
# #marker='o',
# ax=m_axs[0][0],
# # sharex=m_axs[1][0]
# )
m_axs[1][0].set(
xlabel=ack.get('date_col'),
ylabel=ack.get('oa_plot_y_label'),
title='GA Additional Attribute(s)',
xlim=[np.datetime64(ack.get('plot_start_date')), np.datetime64(ack.get('plot_end_date'))]
)
if oa_temp_a_df is not None:
ax = oa_temp_a_df.plot(
kind=ack.get(
'measurements_plot_type'
), x=ack.get('date_col'), y='Mean_sr', label=nbar_ratio_dfs[site][ga_band][3][7:],
ax=m_axs[1][0],
# sharex=m_axs[0][0]
)
if oa_temp_b_df is not None:
ax = oa_temp_b_df.plot(
kind=ack.get(
'measurements_plot_type'
), x=ack.get('date_col'), y='Mean_sr', label=nbar_ratio_dfs[site][ga_band][5][7:],
ax=m_axs[1][0],
# sharex=m_axs[0][0]
)
plot_target = plot_plan.get_plot_target('NBAR', site.replace('NBAR_', ''), **ack)
plot_path = plot_target + ga_band + '_nbar_lam_ratio.png'
print('Writing plot image: ' + plot_path)
m_fig.autofmt_xdate()
#plt.show()
plt.savefig(plot_path)
plt.close(m_fig)
return True
```
#### File: gainterc-ardvis/data_comparison/plot_plan.py
```python
import os
from pathlib import Path
import numpy as np
def get_plans(app_configuration, subproject_name):
"""Return a flattened list of standardised plans."""
ack = _make_app_config_kwargs(app_configuration, subproject_name)
plans = []
products = get_products(**ack)
print('Using these products:-')
print(products)
for product in products:
sites = _get_sites(product, **ack)
print('Processing these sites:-')
print(sites)
for site in sites:
m_title, oa_title, i_title, esa_oa_title = _get_plot_titles(product, site, **ack)
plan_properties = {
"product": product,
"site": site,
"in_a_source_name": ack.get('in_a_source_name'),
"in_b_source_name": ack.get('in_b_source_name'),
"in_c_source_name": ack.get('in_c_source_name'),
"in_a_satellite_name": ack.get('in_a_satellite_name'),
"in_b_satellite_name": ack.get('in_b_satellite_name'),
"in_c_satellite_name": ack.get('in_c_satellite_name'),
"in_a_site_path": get_in_site_path('a', product, **ack),
"in_b_site_path": get_in_site_path('b', product, **ack),
"in_c_site_path": get_in_site_path('c', product, **ack),
"in_a_sr_measurements_file": ack.get('in_a_measurements_file'),
"in_b_sr_measurements_file": ack.get('in_b_measurements_file'),
"in_a_indices_file": ack.get('in_a_indices_file'),
"in_b_indices_file": ack.get('in_b_indices_file'),
"in_c_indices_file": ack.get('in_c_indices_file'),
"plot_target": get_plot_target(product, site, **ack),
"all_sites_plot_target": get_all_sites_plot_target(product, **ack),
"srm_title": m_title,
"srm_oa_title": oa_title,
"srm_esa_oa_title": esa_oa_title,
"indices_title": i_title,
"rec_max": ack.get('rec_max'),
"plot_sr_measurements": ack.get('plot_sr_measurements'),
"plot_indices": ack.get('plot_indices'),
"ga_bands": ack.get('ga_bands'),
"ga_band_mappings": ack.get('ga_band_mappings'),
"ga_oas": ack.get('ga_oas'),
"ga_oa_mappings": ack.get('ga_oa_mappings'),
"ga_band_lam_name": ack.get('ga_band_lam_name'),
"ga_band_oa_name": ack.get('ga_band_oa_name'),
"plot_start_date": np.datetime64(ack.get('plot_start_date')),
"plot_end_date": np.datetime64(ack.get('plot_end_date')),
"in_a_measurements_min_valid_pixel_percentage": ack.get('in_a_measurements_min_valid_pixel_percentage'),
"in_b_measurements_min_valid_pixel_percentage": ack.get('in_b_measurements_min_valid_pixel_percentage'),
"in_c_measurements_min_valid_pixel_percentage": ack.get('in_c_measurements_min_valid_pixel_percentage'),
"in_a_indices_min_valid_pixel_percentage": ack.get('in_a_indices_min_valid_pixel_percentage'),
"in_b_indices_min_valid_pixel_percentage": ack.get('in_b_indices_min_valid_pixel_percentage'),
"in_c_indices_min_valid_pixel_percentage": ack.get('in_c_indices_min_valid_pixel_percentage'),
"sr_measurements_date_filtering": ack.get('sr_measurements_date_filtering'),
"date_col": ack.get('date_col'),
"band_col": ack.get('band_col'),
"standardised_date_format": ack.get('standardised_date_format'),
"esa_oa_mappings": ack.get('esa_oa_mappings'),
"plot_style": ack.get('plot_style'),
"measurements_plot_y_label": ack.get('measurements_plot_y_label'),
"measurements_plot_type": ack.get('measurements_plot_type'),
"oa_plot_y_label": ack.get('oa_plot_y_label'),
"acd": ack.get('acd'),
"indices_col": ack.get('indices_col'),
"indices_plot_type": ack.get('indices_plot_type'),
"in_a_data_path": ack.get('in_a_data_path'),
"in_a_prefix": ack.get('in_a_prefix'),
"in_a_same_sensor_date_filter_source": ack.get('in_a_same_sensor_date_filter_source'),
"in_b_data_path": ack.get('in_b_data_path'),
"in_b_prefix": ack.get('in_b_prefix'),
"in_b_same_sensor_date_filter_source": ack.get('in_b_same_sensor_date_filter_source'),
"in_c_data_path": ack.get('in_c_data_path'),
"in_c_prefix": ack.get('in_c_prefix'),
"in_c_same_sensor_date_filter_source": ack.get('in_c_same_sensor_date_filter_source'),
"product_label": get_product_label(product, **ack),
"spectral_indices": ack.get('spectral_indices'),
"test_ref_base": ack.get('test_ref_base'),
"test_ref_path": get_test_ref_path(product, site, **ack),
"indices_same_sensor_date_filtering": ack.get('indices_same_sensor_date_filtering'),
"in_a_same_sensor_date_filter_source": ack.get('in_a_same_sensor_date_filter_source'),
"in_b_same_sensor_date_filter_source": ack.get('in_b_same_sensor_date_filter_source'),
"in_c_same_sensor_date_filter_source": ack.get('in_c_same_sensor_date_filter_source'),
}
plans.append(plan_properties)
return (plans, ack)
def _make_app_config_kwargs(app_c, subp_name):
acd = app_c['APP_SOURCE']['SUBPROJECTS'][subp_name]['DATA']
ack = {
"acd": acd,
"in_a_measurements_min_valid_pixel_percentage": acd['COMPARISON_SOURCES']['A']['MEASUREMENTS_MIN_VALID_PIXEL_PERCENTAGE'],
"in_b_measurements_min_valid_pixel_percentage": acd['COMPARISON_SOURCES']['B']['MEASUREMENTS_MIN_VALID_PIXEL_PERCENTAGE'],
"in_c_measurements_min_valid_pixel_percentage": ('C' in [*acd['COMPARISON_SOURCES']] and acd['COMPARISON_SOURCES']['C']['MEASUREMENTS_MIN_VALID_PIXEL_PERCENTAGE']) or None,
"in_a_indices_min_valid_pixel_percentage": acd['COMPARISON_SOURCES']['A']['INDICES_MIN_VALID_PIXEL_PERCENTAGE'],
"in_b_indices_min_valid_pixel_percentage": acd['COMPARISON_SOURCES']['B']['INDICES_MIN_VALID_PIXEL_PERCENTAGE'],
"in_c_indices_min_valid_pixel_percentage": ('C' in [*acd['COMPARISON_SOURCES']] and acd['COMPARISON_SOURCES']['C']['INDICES_MIN_VALID_PIXEL_PERCENTAGE']) or None,
"plot_sr_measurements": acd['PLOT_SR_MEASUREMENTS'],
"plot_indices": acd['PLOT_INDICES'],
"sr_measurements_date_filtering": acd['SR_MEASUREMENTS_DATE_FILTERING'],
"indices_same_sensor_date_filtering": acd['INDICES_SAME_SENSOR_DATE_FILTERING'],
"standardised_date_format": acd['STANDARDISED_DATE_FORMAT'],
"plot_start_date": acd['PLOT_START_DATE'],
"plot_end_date": acd['PLOT_END_DATE'],
"plot_style": acd['PLOT_STYLE'],
"measurements_plot_type": acd['MEASUREMENTS_PLOT_TYPE'],
"indices_plot_type": acd['INDICES_PLOT_TYPE'],
"measurements_plot_y_label": acd['MEASUREMENTS_PLOT_Y_LABEL'],
"oa_plot_y_label": acd['OA_PLOT_Y_LABEL'],
"ga_algorithms": [*acd['GA_ALGORITHMS']],
"ga_band_lam_name": acd['GA_BAND_LAM_NAME'],
"ga_band_oa_name": acd['GA_BAND_OA_NAME'],
"spectral_indices": [*acd['SPECTRAL_INDICES']],
"date_col": acd['DATE_COL'],
"band_col": acd['BAND_COL'],
"indices_col": acd['INDICES_COL'],
"ga_oa_mappings": acd['GA_OA_MAPPINGS'],
"ga_oas": [*acd['GA_OA_MAPPINGS']],
"esa_oa_mappings": ('ESA_OA_MAPPINGS' in [*acd] and acd['ESA_OA_MAPPINGS']) or {},
"ga_band_mappings": acd['GA_BAND_MAPPINGS'],
"ga_bands": [*acd['GA_BAND_MAPPINGS']],
"in_a_data_path": acd['IN_BASE'],
"in_a_prefix": acd['COMPARISON_SOURCES']['A']['PREFIX'],
"in_a_source_name": acd['COMPARISON_SOURCES']['A']['SOURCE_NAME'],
"in_a_satellite_name": acd['COMPARISON_SOURCES']['A']['SATELLITE_NAME'],
"in_a_product": acd['COMPARISON_SOURCES']['A']['PRODUCT'],
"in_a_site": acd['COMPARISON_SOURCES']['A']['SITE'],
"in_a_measurements_file": acd['COMPARISON_SOURCES']['A']['MEASUREMENTS_FILE'],
"in_a_indices_file": acd['COMPARISON_SOURCES']['A']['INDICES_FILE'],
"in_a_same_sensor_date_filter_source": ('SAME_SENSOR_DATE_FILTER_SOURCE' in [*acd['COMPARISON_SOURCES']['A']] and acd['COMPARISON_SOURCES']['A']['SAME_SENSOR_DATE_FILTER_SOURCE']) or '',
"in_b_data_path": acd['IN_BASE'],
"in_b_prefix": acd['COMPARISON_SOURCES']['B']['PREFIX'],
"in_b_source_name": acd['COMPARISON_SOURCES']['B']['SOURCE_NAME'],
"in_b_satellite_name": acd['COMPARISON_SOURCES']['B']['SATELLITE_NAME'],
"in_b_product": acd['COMPARISON_SOURCES']['B']['PRODUCT'],
"in_b_site": acd['COMPARISON_SOURCES']['B']['SITE'],
"in_b_measurements_file": acd['COMPARISON_SOURCES']['B']['MEASUREMENTS_FILE'],
"in_b_indices_file": acd['COMPARISON_SOURCES']['B']['INDICES_FILE'],
"in_b_same_sensor_date_filter_source": ('SAME_SENSOR_DATE_FILTER_SOURCE' in [*acd['COMPARISON_SOURCES']['B']] and acd['COMPARISON_SOURCES']['B']['SAME_SENSOR_DATE_FILTER_SOURCE']) or '',
"in_c_data_path": acd['IN_BASE'],
"in_c_prefix": ('C' in [*acd['COMPARISON_SOURCES']] and acd['COMPARISON_SOURCES']['C']['PREFIX']) or '',
"in_c_source_name": ('C' in [*acd['COMPARISON_SOURCES']] and acd['COMPARISON_SOURCES']['C']['SOURCE_NAME']) or '',
"in_c_satellite_name": ('C' in [*acd['COMPARISON_SOURCES']] and acd['COMPARISON_SOURCES']['C']['SATELLITE_NAME']) or '',
"in_c_product": ('C' in [*acd['COMPARISON_SOURCES']] and acd['COMPARISON_SOURCES']['C']['PRODUCT']) or '',
"in_c_site": ('C' in [*acd['COMPARISON_SOURCES']] and acd['COMPARISON_SOURCES']['C']['SITE']) or '',
"in_c_measurements_file": ('C' in [*acd['COMPARISON_SOURCES']] and acd['COMPARISON_SOURCES']['C']['MEASUREMENTS_FILE']) or '',
"in_c_indices_file": ('C' in [*acd['COMPARISON_SOURCES']] and acd['COMPARISON_SOURCES']['C']['INDICES_FILE']) or '',
"in_c_same_sensor_date_filter_source": ('C' in [*acd['COMPARISON_SOURCES']] and acd['COMPARISON_SOURCES']['C']['SAME_SENSOR_DATE_FILTER_SOURCE']) or '',
"out_path": acd['OUT_BASE'],
"rec_max": app_c['APP_SOURCE']['DATA_RECORD_MAX_LIMIT'],
"test_ref_base": acd['TEST_REF_BASE'],
}
ack['in_a_site_path'] = ack.get(
'in_a_data_path'
) + '/' + ack.get(
'in_a_prefix'
) + '_' + ack.get(
'in_a_source_name'
) + '_' + ack.get(
'in_a_satellite_name'
) + '/'
ack['in_b_site_path'] = ack.get(
'in_b_data_path'
) + '/' + ack.get(
'in_b_prefix'
) + '_' + ack.get(
'in_b_source_name'
) + '_' + ack.get(
'in_b_satellite_name'
) + (
lambda x: (x is not None and '_' + x) or ('')
)(ack.get('in_b_product')) + '/'
if ack.get('in_c_source_name'):
ack['in_c_site_path'] = ack.get(
'in_c_data_path'
) + '/' + ack.get(
'in_c_prefix'
) + '_' + ack.get(
'in_c_source_name'
) + '_' + ack.get(
'in_c_satellite_name'
) + (
lambda x: (len(x) > 0 and '_' + x) or ('')
)(ack.get('in_c_product')) + '/'
else:
ack['in_c_site_path'] = ''
return ack
def get_products(**ack):
products = None
if ack.get('in_a_product') is None:
products = ack.get('ga_algorithms')
else:
products = list((ack.get('in_a_product'),))
return products
def _get_sites(s_product, **ack):
site_paths = None
if ack.get('in_a_site') is None:
site_paths = list(Path(get_in_site_path('a', s_product, **ack)).glob('**'))
site_paths.pop(0)
else:
site_paths = list((get_in_site_path('a', s_product, **ack) + ack.get('in_a_site'),))
print('Found these site paths:-')
print(site_paths)
sites = list(map(lambda x: os.path.basename(os.path.normpath(x)), site_paths))
return sites
def get_product_label(s_product, **ack):
product_label = ''
if s_product:
product_label = ' for ' + ack.get('acd')['GA_ALGORITHMS'][s_product]
return product_label
def get_in_site_path(inn, s_product, **ack):
site_path = ack.get(f"in_{inn}_site_path")
if ack.get(f"in_{inn}_source_name").upper() == 'GA':
site_path = ack.get(
f"in_{inn}_data_path"
) + '/' + ack.get(
f"in_{inn}_prefix"
) + '_' + ack.get(
f"in_{inn}_source_name"
) + '_' + ack.get(
f"in_{inn}_satellite_name"
) + '_' + s_product + '/'
return site_path
def _get_plot_titles(t_product, t_site, **ack):
"""Make plot titles."""
m_title = ack.get(
'in_a_source_name'
) + ' ' + ack.get(
'in_a_satellite_name'
) + ' VS ' + ack.get(
'in_b_source_name'
) + ' ' + ack.get(
'in_b_satellite_name'
) + get_product_label(
t_product, **ack
) + ' at ' + t_site
oa_title = ack.get(
'in_a_source_name'
) + ' ' + ack.get(
'in_a_satellite_name'
) + get_product_label(
t_product, **ack
) + ' at ' + t_site
i_title = m_title
if len(ack.get(
'in_c_source_name'
)) > 0 and len(ack.get(
'in_c_satellite_name'
)) > 0:
i_title = ack.get(
'in_a_source_name'
) + ' ' + ack.get(
'in_a_satellite_name'
) + ' VS ' + ack.get(
'in_b_source_name'
) + ' ' + ack.get(
'in_b_satellite_name'
) + ' VS ' + ack.get(
'in_c_source_name'
) + ' ' + ack.get(
'in_c_satellite_name'
) + ' at ' + t_site
esa_oa_title = ack.get(
'in_b_source_name'
) + ' ' + ack.get(
'in_b_satellite_name'
) + ' at ' + t_site
return (m_title, oa_title, i_title, esa_oa_title)
def get_plot_target(t_product, t_site, **ack):
plot_target = ack.get(
'out_path'
) + '/' + ack.get(
'in_a_source_name'
).lower() + '_' + ack.get(
'in_a_satellite_name'
).lower() + '_vs_' + ack.get(
'in_b_source_name'
).lower() + '_' + ack.get(
'in_b_satellite_name'
).lower() + '/' + t_product.lower() + '/' + t_site.lower() + '/'
return plot_target
def get_test_ref_path(t_product, t_site, **ack):
plot_target = ack.get(
'test_ref_base'
) + '/' + ack.get(
'in_a_source_name'
).lower() + '_' + ack.get(
'in_a_satellite_name'
).lower() + '_vs_' + ack.get(
'in_b_source_name'
).lower() + '_' + ack.get(
'in_b_satellite_name'
).lower() + '/' + t_product.lower() + '/' + (
(t_site is not None and t_site.lower() + '/') or ''
)
return plot_target
def get_all_sites_plot_target(t_product, **ack):
all_sites_plot_target = ack.get(
'out_path'
) + '/' + ack.get(
'in_a_source_name'
).lower() + '_' + ack.get(
'in_a_satellite_name'
).lower() + '_vs_' + ack.get(
'in_b_source_name'
).lower() + '_' + ack.get(
'in_b_satellite_name'
).lower() + '/' + t_product.lower() + '/'
return all_sites_plot_target
``` |
{
"source": "JoeStratton/home-assistant-config",
"score": 2
} |
#### File: custom_components/modernforms/__init__.py
```python
import json
import requests
from datetime import timedelta
from homeassistant.const import (CONF_NAME, CONF_HOST, CONF_SCAN_INTERVAL)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import (async_call_later, async_track_time_interval)
from homeassistant.helpers import discovery
import logging
_LOGGER = logging.getLogger(__name__)
DOMAIN = "modernforms"
DEVICES = "devices"
CONF_LIGHT = "light"
SCAN_INTERVAL = timedelta(seconds=10)
def setup(hass, config):
hass.data[DOMAIN] = {}
hass.data[DOMAIN][DEVICES] = []
fans = config[DOMAIN]
for fan in fans:
scan_interval = fan.get(CONF_SCAN_INTERVAL, SCAN_INTERVAL)
name = fan.get(CONF_NAME)
host = fan.get(CONF_HOST)
has_light = fan.get(CONF_LIGHT, False)
hass.data[DOMAIN][DEVICES].append(ModernFormsDevice(hass, name, host, has_light, scan_interval))
discovery.load_platform(hass, 'fan', DOMAIN, None, config)
discovery.load_platform(hass, 'light', DOMAIN, None, config)
return True
class ModernFormsBaseEntity(Entity):
def __init__(self, hass, device):
self.hass = hass
self.device = device
self.device._attach(self)
def _device_updated(self):
self.schedule_update_ha_state()
@property
def should_poll(self):
return False
@property
def device_state_attributes(self):
return self.device.data
class ModernFormsDevice:
def __init__(self, hass, name, host, has_light, interval):
self.url = "http://{}/mf".format(host)
self.name = name
self.data = {}
self.has_light = has_light
self.subscribers = []
def update_action(time):
self.update_status()
async_call_later(hass, 0, update_action)
self.poll = async_track_time_interval(hass, update_action, interval)
def _attach(self, sub):
self.subscribers.append(sub)
def _notify(self):
for sub in self.subscribers:
sub._device_updated()
def clientId(self):
return self.data.get("clientId", None)
def fanOn(self):
return self.data.get("fanOn", False)
def fanSpeed(self):
return self.data.get("fanSpeed", None)
def fanDirection(self):
return self.data.get("fanDirection", None)
def lightOn(self):
return self.data.get("lightOn", False)
def lightBrightness(self):
return self.data.get("lightBrightness", 0);
def set_fan_on(self):
self._send_request({"fanOn": 1})
def set_fan_off(self):
self._send_request({"fanOn": 0})
def set_fan_speed(self, speed):
if speed < 1:
speed = 1
elif speed > 6:
speed = 6
self._send_request({"fanOn": 1, "fanSpeed": speed})
def set_fan_direction(self, direction):
self._send_request({"fanDirection": direction})
def set_light_on(self):
self._send_request({"lightOn": 1})
def set_light_off(self):
self._send_request({"lightOn": 0})
def set_light_brightness(self, level):
if level < 1:
level = 1
elif level > 100:
level = 100
self._send_request({"lightOn": 1, "lightBrightness": level})
def update_status(self):
self._send_request({"queryDynamicShadowData":1})
def _send_request(self, data):
r = requests.post(self.url, json=data)
r.raise_for_status()
self.data = r.json();
self._notify()
```
#### File: custom_components/unifiprotect/camera.py
```python
import logging
from homeassistant.components.camera import SUPPORT_STREAM, Camera
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_ATTRIBUTION, ATTR_LAST_TRIP_TIME
from homeassistant.helpers import entity_platform
from homeassistant.helpers.typing import HomeAssistantType
from .const import (
ATTR_CAMERA_ID,
ATTR_ONLINE,
ATTR_UP_SINCE,
DEFAULT_ATTRIBUTION,
DEFAULT_BRAND,
DEVICE_TYPE_DOORBELL,
DOMAIN,
SAVE_THUMBNAIL_SCHEMA,
SERVICE_SAVE_THUMBNAIL,
SERVICE_SET_DOORBELL_LCD_MESSAGE,
SERVICE_SET_HDR_MODE,
SERVICE_SET_HIGHFPS_VIDEO_MODE,
SERVICE_SET_IR_MODE,
SERVICE_SET_RECORDING_MODE,
SERVICE_SET_STATUS_LIGHT,
SET_DOORBELL_LCD_MESSAGE_SCHEMA,
SET_HDR_MODE_SCHEMA,
SET_HIGHFPS_VIDEO_MODE_SCHEMA,
SET_IR_MODE_SCHEMA,
SET_RECORDING_MODE_SCHEMA,
SET_STATUS_LIGHT_SCHEMA,
)
from .entity import UnifiProtectEntity
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistantType, entry: ConfigEntry, async_add_entities
) -> None:
"""Discover cameras on a Unifi Protect NVR."""
entry_data = hass.data[DOMAIN][entry.entry_id]
upv_object = entry_data["upv"]
protect_data = entry_data["protect_data"]
snapshot_direct = entry_data["snapshot_direct"]
if not protect_data.data:
return
async_add_entities(
[
UnifiProtectCamera(upv_object, protect_data, camera_id, snapshot_direct)
for camera_id in protect_data.data
]
)
platform = entity_platform.current_platform.get()
platform.async_register_entity_service(
SERVICE_SET_RECORDING_MODE,
SET_RECORDING_MODE_SCHEMA,
"async_set_recording_mode",
)
platform.async_register_entity_service(
SERVICE_SET_IR_MODE, SET_IR_MODE_SCHEMA, "async_set_ir_mode"
)
platform.async_register_entity_service(
SERVICE_SET_STATUS_LIGHT, SET_STATUS_LIGHT_SCHEMA, "async_set_status_light"
)
platform.async_register_entity_service(
SERVICE_SET_HDR_MODE, SET_HDR_MODE_SCHEMA, "async_set_hdr_mode"
)
platform.async_register_entity_service(
SERVICE_SET_HIGHFPS_VIDEO_MODE,
SET_HIGHFPS_VIDEO_MODE_SCHEMA,
"async_set_highfps_video_mode",
)
platform.async_register_entity_service(
SERVICE_SET_DOORBELL_LCD_MESSAGE,
SET_DOORBELL_LCD_MESSAGE_SCHEMA,
"async_set_doorbell_lcd_message",
)
platform.async_register_entity_service(
SERVICE_SAVE_THUMBNAIL, SAVE_THUMBNAIL_SCHEMA, "async_save_thumbnail"
)
return True
class UnifiProtectCamera(UnifiProtectEntity, Camera):
"""A Ubiquiti Unifi Protect Camera."""
def __init__(self, upv_object, protect_data, camera_id, snapshot_direct):
"""Initialize an Unifi camera."""
super().__init__(upv_object, protect_data, camera_id, None)
self._snapshot_direct = snapshot_direct
self._name = self._camera_data["name"]
self._stream_source = self._camera_data["rtsp"]
self._last_image = None
self._supported_features = SUPPORT_STREAM if self._stream_source else 0
@property
def name(self):
"""Return the name of this camera."""
return self._name
@property
def supported_features(self):
"""Return supported features for this camera."""
return self._supported_features
@property
def motion_detection_enabled(self):
"""Camera Motion Detection Status."""
return self._camera_data["recording_mode"]
@property
def brand(self):
"""Return the Cameras Brand."""
return DEFAULT_BRAND
@property
def model(self):
"""Return the camera model."""
return self._model
@property
def is_recording(self):
"""Return true if the device is recording."""
return (
True
if self._camera_data["recording_mode"] != "never"
and self._camera_data["online"]
else False
)
@property
def device_state_attributes(self):
"""Add additional Attributes to Camera."""
if self._device_type == DEVICE_TYPE_DOORBELL:
last_trip_time = self._camera_data["last_ring"]
else:
last_trip_time = self._camera_data["last_motion"]
return {
ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION,
ATTR_UP_SINCE: self._camera_data["up_since"],
ATTR_ONLINE: self._camera_data["online"],
ATTR_CAMERA_ID: self._camera_id,
ATTR_LAST_TRIP_TIME: last_trip_time,
}
async def async_set_recording_mode(self, recording_mode):
"""Set Camera Recording Mode."""
await self.upv_object.set_camera_recording(self._camera_id, recording_mode)
async def async_save_thumbnail(self, filename, image_width):
"""Save Thumbnail Image."""
if not self.hass.config.is_allowed_path(filename):
_LOGGER.error("Can't write %s, no access to path!", filename)
return
image = await self.upv_object.get_thumbnail(self._camera_id, image_width)
if image is None:
_LOGGER.error("Last recording not found for Camera %s", self.name)
return
try:
await self.hass.async_add_executor_job(_write_image, filename, image)
except OSError as err:
_LOGGER.error("Can't write image to file: %s", err)
async def async_set_ir_mode(self, ir_mode):
"""Set camera ir mode."""
await self.upv_object.set_camera_ir(self._camera_id, ir_mode)
async def async_set_status_light(self, light_on):
"""Set camera Status Light."""
await self.upv_object.set_camera_status_light(self._camera_id, light_on)
async def async_set_hdr_mode(self, hdr_on):
"""Set camera HDR mode."""
await self.upv_object.set_camera_hdr_mode(self._camera_id, hdr_on)
async def async_set_highfps_video_mode(self, high_fps_on):
"""Set camera High FPS video mode."""
await self.upv_object.set_camera_video_mode_highfps(
self._camera_id, high_fps_on
)
async def async_set_doorbell_lcd_message(self, message, duration):
"""Set LCD Message on Doorbell display."""
if not duration.isnumeric():
duration = None
await self.upv_object.set_doorbell_custom_text(
self._camera_id, message, duration
)
async def async_enable_motion_detection(self):
"""Enable motion detection in camera."""
if not await self.upv_object.set_camera_recording(self._camera_id, "motion"):
return
_LOGGER.debug("Motion Detection Enabled for Camera: %s", self._name)
async def async_disable_motion_detection(self):
"""Disable motion detection in camera."""
if not await self.upv_object.set_camera_recording(self._camera_id, "never"):
return
_LOGGER.debug("Motion Detection Disabled for Camera: %s", self._name)
async def async_camera_image(self):
"""Return the Camera Image."""
if self._snapshot_direct:
last_image = await self.upv_object.get_snapshot_image_direct(
self._camera_id
)
else:
last_image = await self.upv_object.get_snapshot_image(self._camera_id)
self._last_image = last_image
return self._last_image
async def stream_source(self):
"""Return the Stream Source."""
return self._stream_source
def _write_image(to_file, image_data):
"""Executor helper to write image."""
with open(to_file, "wb") as img_file:
img_file.write(image_data)
_LOGGER.debug("Thumbnail Image written to %s", to_file)
``` |
{
"source": "joestrouth1/tinkerpop",
"score": 2
} |
#### File: gremlin_python/process/anonymous_traversal.py
```python
__author__ = '<NAME> (http://stephen.genoprime.com)'
from gremlin_python.structure.graph import Graph
from gremlin_python.process.graph_traversal import GraphTraversalSource
from gremlin_python.process.traversal import TraversalStrategies
from .. import statics
class AnonymousTraversalSource(object):
def __init__(self, traversal_source_class=GraphTraversalSource):
self.traversal_source_class = traversal_source_class
@classmethod
def traversal(cls, traversal_source_class=GraphTraversalSource):
return AnonymousTraversalSource(traversal_source_class)
def withGraph(self, graph):
return self.traversal_source_class(graph, TraversalStrategies.global_cache[graph.__class__])
def withRemote(self, remote_connection):
return self.withGraph(Graph()).withRemote(remote_connection)
def traversal(traversal_source_class=GraphTraversalSource):
return AnonymousTraversalSource.traversal(traversal_source_class)
statics.add_static('traversal', traversal)
``` |
{
"source": "joestump/django-ajax",
"score": 2
} |
#### File: django-ajax/ajax/decorators.py
```python
from __future__ import absolute_import
from django.utils.translation import ugettext as _
from ajax.compat import getLogger
from django.http import Http404
from django.conf import settings
from decorator import decorator
from ajax.exceptions import AJAXError, PrimaryKeyMissing
from functools import wraps
from django.utils.decorators import available_attrs
logger = getLogger('django.request')
@decorator
def login_required(f, *args, **kwargs):
if not args[0].user.is_authenticated():
raise AJAXError(403, _('User must be authenticated.'))
return f(*args, **kwargs)
@decorator
def require_pk(func, *args, **kwargs):
if not hasattr(args[0], 'pk') or args[0].pk is None:
raise PrimaryKeyMissing()
return func(*args, **kwargs)
def allowed_methods(*args,**kwargs):
request_method_list = args
def decorator(func):
@wraps(func, assigned=available_attrs(func))
def inner(request, *args, **kwargs):
if request.method not in request_method_list:
raise AJAXError(403, _('Access denied.'))
return func(request, *args, **kwargs)
return inner
return decorator
@decorator
def json_response(f, *args, **kwargs):
"""Wrap a view in JSON.
This decorator runs the given function and looks out for ajax.AJAXError's,
which it encodes into a proper HttpResponse object. If an unknown error
is thrown it's encoded as a 500.
All errors are then packaged up with an appropriate Content-Type and a JSON
body that you can inspect in JavaScript on the client. They look like:
{
"message": "Error message here.",
"code": 500
}
Please keep in mind that raw exception messages could very well be exposed
to the client if a non-AJAXError is thrown.
"""
try:
result = f(*args, **kwargs)
if isinstance(result, AJAXError):
raise result
except AJAXError as e:
result = e.get_response()
request = args[0]
logger.warn('AJAXError: %d %s - %s', e.code, request.path, e.msg,
exc_info=True,
extra={
'status_code': e.code,
'request': request
}
)
except Http404 as e:
result = AJAXError(404, e.__str__()).get_response()
except Exception as e:
import sys
exc_info = sys.exc_info()
type, message, trace = exc_info
if settings.DEBUG:
import traceback
tb = [{'file': l[0], 'line': l[1], 'in': l[2], 'code': l[3]} for
l in traceback.extract_tb(trace)]
result = AJAXError(500, message, traceback=tb).get_response()
else:
result = AJAXError(500, "Internal server error.").get_response()
request = args[0]
logger.error('Internal Server Error: %s' % request.path,
exc_info=exc_info,
extra={
'status_code': 500,
'request': request
}
)
result['Content-Type'] = 'application/json'
return result
```
#### File: django-ajax/ajax/encoders.py
```python
from __future__ import absolute_import
from django.core import serializers
from ajax.exceptions import AlreadyRegistered, NotRegistered
from django.db.models.fields import FieldDoesNotExist
from django.db import models
from django.conf import settings
from django.utils.html import escape
from django.db.models.query import QuerySet
from django.utils.encoding import smart_str
import collections
import six
# Used to change the field name for the Model's pk.
AJAX_PK_ATTR_NAME = getattr(settings, 'AJAX_PK_ATTR_NAME', 'pk')
def _fields_from_model(model):
return [field.name for field in model.__class__._meta.fields]
class DefaultEncoder(object):
_mapping = {
'IntegerField': int,
'PositiveIntegerField': int,
'AutoField': int,
'FloatField': float,
}
def to_dict(self, record, expand=False, html_escape=False, fields=None):
self.html_escape = html_escape
if hasattr(record, '__exclude__') and callable(record.__exclude__):
try:
exclude = record.__exclude__()
if fields is None:
fields = _fields_from_model(record)
fields = set(fields) - set(exclude)
except TypeError:
pass
data = serializers.serialize('python', [record], fields=fields)[0]
if hasattr(record, 'extra_fields'):
ret = record.extra_fields
else:
ret = {}
ret.update(data['fields'])
ret[AJAX_PK_ATTR_NAME] = data['pk']
for field, val in six.iteritems(ret):
try:
f = record.__class__._meta.get_field(field)
if expand and isinstance(f, models.ForeignKey):
try:
row = f.rel.to.objects.get(pk=val)
new_value = self.to_dict(row, False)
except f.rel.to.DoesNotExist:
new_value = None # Changed this to None from {} -G
else:
new_value = self._encode_value(f, val)
ret[smart_str(field)] = new_value
except FieldDoesNotExist as e:
pass # Assume extra fields are already safe.
if expand and hasattr(record, 'tags') and \
record.tags.__class__.__name__.endswith('TaggableManager'):
# Looks like this model is using taggit.
ret['tags'] = [{'name': self._escape(t.name),
'slug': self._escape(t.slug)} for t in record.tags.all()]
return ret
__call__ = to_dict
def _encode_value(self, field, value):
if value is None:
return value # Leave all None's as-is as they encode fine.
try:
return self._mapping[field.__class__.__name__](value)
except KeyError:
if isinstance(field, models.ForeignKey):
f = field.rel.to._meta.get_field(field.rel.field_name)
return self._encode_value(f, value)
elif isinstance(field, models.BooleanField):
# If someone could explain to me why the fuck the Python
# serializer appears to serialize BooleanField to a string
# with "True" or "False" in it, please let me know.
return (value == "True" or (type(value) == bool and value))
return self._escape(value)
def _escape(self, value):
if self.html_escape:
return escape(value)
return value
class HTMLEscapeEncoder(DefaultEncoder):
"""Encodes all values using Django's HTML escape function."""
def _escape(self, value):
return escape(value)
class ExcludeEncoder(DefaultEncoder):
def __init__(self, exclude):
self.exclude = exclude
def __call__(self, record, html_escape=False):
fields = set(_fields_from_model(record)) - set(self.exclude)
return self.to_dict(record, html_escape=html_escape, fields=fields)
class IncludeEncoder(DefaultEncoder):
def __init__(self, include):
self.include = include
def __call__(self, record, html_escape=False):
return self.to_dict(record, html_escape=html_escape, fields=self.include)
class Encoders(object):
def __init__(self):
self._registry = {}
def register(self, model, encoder):
if model in self._registry:
raise AlreadyRegistered()
self._registry[model] = encoder
def unregister(self, model):
if model not in self._registry:
raise NotRegistered()
del self._registry[model]
def get_encoder_from_record(self, record):
if isinstance(record, models.Model) and \
record.__class__ in self._registry:
encoder = self._registry[record.__class__]
else:
encoder = DefaultEncoder()
return encoder
def encode(self, record, encoder=None, html_escape=False):
if isinstance(record, collections.Iterable):
ret = []
for i in record:
if not encoder:
encoder = self.get_encoder_from_record(i)
ret.append(self.encode(i, html_escape=html_escape))
else:
if not encoder:
encoder = self.get_encoder_from_record(record)
ret = encoder(record, html_escape=html_escape)
return ret
encoder = Encoders()
``` |
{
"source": "joestump/lazyboy",
"score": 2
} |
#### File: lazyboy/lazyboy/connection.py
```python
from __future__ import with_statement
import random
import os
import threading
import socket
import time
from cassandra import Cassandra
from thrift import Thrift
from thrift.transport import TTransport, TSocket
from thrift.protocol import TBinaryProtocol
import thrift
import lazyboy.exceptions as exc
from contextlib import contextmanager
_SERVERS = {}
_CLIENTS = {}
def _retry_default_callback(attempt, exc):
"""Retry an attempt five times, then give up."""
return attempt < 5
def retry(callback=None):
"""Retry an operation."""
callback = callback or _retry_default_callback
assert callable(callback)
def __closure__(func):
def __inner__(*args, **kwargs):
attempt = 0
while True:
try:
return func(*args, **kwargs)
except Exception, exc:
if not callback(attempt, exc):
raise exc
attempt += 1
return __inner__
return __closure__
def add_pool(name, servers, timeout=None, recycle=None):
"""Add a connection."""
_SERVERS[name] = dict(servers=servers, timeout=timeout, recycle=recycle)
def get_pool(name):
"""Return a client for the given pool name."""
key = str(os.getpid()) + threading.currentThread().getName() + name
if key in _CLIENTS:
return _CLIENTS[key]
try:
_CLIENTS[key] = Client(**_SERVERS[name])
return _CLIENTS[key]
except Exception, e:
raise exc.ErrorCassandraClientNotFound(
"Pool `%s' is not defined." % name)
class Client(object):
"""A wrapper around the Cassandra client which load-balances."""
def __init__(self, servers, timeout=None, recycle=None):
"""Initialize the client."""
self._servers = servers
self._recycle = recycle
self._timeout = timeout
self._clients = [s for s in [self._build_server(*server.split(":")) \
for server in servers] if s]
self._current_server = random.randint(0, len(self._clients))
@retry()
def get(self, *args, **kwargs):
"""
Parameters:
- keyspace
- key
- column_path
- consistency_level
"""
with self.get_client() as client:
return client.get(*args, **kwargs)
@retry()
def get_slice(self, *args, **kwargs):
"""
Parameters:
- keyspace
- key
- column_parent
- predicate
- consistency_level
"""
with self.get_client() as client:
return client.get_slice(*args, **kwargs)
@retry()
def multiget(self, *args, **kwargs):
"""
Parameters:
- keyspace
- keys
- column_path
- consistency_level
"""
with self.get_client() as client:
return client.multiget(*args, **kwargs)
@retry()
def multiget_slice(self, *args, **kwargs):
"""
Parameters:
- keyspace
- keys
- column_parent
- predicate
- consistency_level
"""
with self.get_client() as client:
return client.multiget_slice(*args, **kwargs)
@retry()
def get_count(self, *args, **kwargs):
"""
Parameters:
- keyspace
- key
- column_parent
- consistency_level
"""
with self.get_client() as client:
return client.get_count(*args, **kwargs)
@retry()
def get_key_range(self, *args, **kwargs):
"""
Parameters:
- keyspace
- column_family
- start
- finish
- count
- consistency_level
"""
with self.get_client() as client:
return client.get_key_range(*args, **kwargs)
@retry()
def remove(self, *args, **kwargs):
"""
Parameters:
- keyspace
- key
- column_path
- timestamp
- consistency_level
"""
with self.get_client() as client:
return client.remove(*args, **kwargs)
@retry()
def get_string_property(self, *args, **kwargs):
"""
Parameters:
- property
"""
with self.get_client() as client:
return client.get_string_property(*args, **kwargs)
@retry()
def get_string_list_property(self, *args, **kwargs):
"""
Parameters:
- property
"""
with self.get_client() as client:
return client.get_string_list_property(*args, **kwargs)
@retry()
def describe_keyspace(self, *args, **kwargs):
"""
Parameters:
- keyspace
"""
with self.get_client() as client:
return client.describe_keyspace(*args, **kwargs)
@retry()
def batch_insert(self, *args, **kwargs):
"""
Parameters:
- keyspace
- key
- cfmap
- consistency_level
"""
with self.get_client() as client:
return client.batch_insert(*args, **kwargs)
@retry()
def insert(self, *args, **kwargs):
"""
Parameters:
- keyspace
- key
- column_path
- value
- timestamp
- consistency_level
"""
with self.get_client() as client:
return client.insert(*args, **kwargs)
def _build_server(self, host, port):
"""Return a client for the given host and port."""
try:
socket = TSocket.TSocket(host, int(port))
if self._timeout:
socket.setTimeout(self._timeout)
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocolAccelerated(transport)
client = Cassandra.Client(protocol)
client.transport = transport
return client
except Exception:
return None
def _get_server(self):
"""Return the next server (round-robin) from the list."""
if self._clients is None or len(self._clients) == 0:
raise exc.ErrorCassandraNoServersConfigured
next_server = self._current_server % len(self._clients)
self._current_server += 1
return self._clients[next_server]
def list_servers(self):
"""Return all servers we know about."""
return self._clients
def _connect(self):
"""Connect to Cassandra if not connected."""
client = self._get_server()
if client.transport.isOpen() and self._recycle:
if (client.connect_time + self._recycle) > time.time():
return client
else:
client.transport.close()
elif client.transport.isOpen():
return client
try:
client.transport.open()
client.connect_time = time.time()
except thrift.transport.TTransport.TTransportException, e:
client.transport.close()
raise exc.ErrorThriftMessage(e.message)
return client
@contextmanager
def get_client(self):
"""Yield a Cassandra client connection."""
client = None
try:
client = self._connect()
yield client
except (socket.error, Thrift.TException), e:
message = e.message or "Transport error, reconnect"
if client:
client.transport.close()
raise exc.ErrorThriftMessage(message)
```
#### File: lazyboy/lazyboy/iterators.py
```python
from itertools import groupby
from operator import attrgetter
from collections import defaultdict
from lazyboy.connection import get_pool
import lazyboy.exceptions as exc
from cassandra.ttypes import SlicePredicate, SliceRange, ConsistencyLevel, \
ColumnOrSuperColumn, Column, ColumnParent
GET_KEYSPACE = attrgetter("keyspace")
GET_COLFAM = attrgetter("column_family")
GET_KEY = attrgetter("key")
GET_SUPERCOL = attrgetter("super_column")
def groupsort(iterable, keyfunc):
"""Return a generator which sort and groups a list."""
return groupby(sorted(iterable, key=keyfunc), keyfunc)
def slice_iterator(key, consistency, **range_args):
"""Return an iterator over a row."""
kwargs = {'start': "", 'finish': "",
'count': 100000, 'reversed': 0}
kwargs.update(range_args)
consistency = consistency or ConsistencyLevel.ONE
client = get_pool(key.keyspace)
res = client.get_slice(
key.keyspace, key.key, key,
SlicePredicate(slice_range=SliceRange(**kwargs)),
consistency)
if not res:
raise exc.ErrorNoSuchRecord("No record matching key %s" % key)
return unpack(res)
def multigetterator(keys, consistency, **range_args):
"""Return a dictionary of data from Cassandra.
This fetches data with the minumum number of network requests. It
DOES NOT preserve order.
If you depend on ordering, use list_multigetterator. This may
require more requests.
"""
kwargs = {'start': "", 'finish': "",
'count': 100000, 'reversed': 0}
kwargs.update(range_args)
predicate = SlicePredicate(slice_range=SliceRange(**kwargs))
consistency = consistency or ConsistencyLevel.ONE
out = {}
for (keyspace, ks_keys) in groupsort(keys, GET_KEYSPACE):
client = get_pool(keyspace)
out[keyspace] = {}
for (colfam, cf_keys) in groupsort(ks_keys, GET_COLFAM):
if colfam not in keyspace:
out[keyspace][colfam] = defaultdict(lambda: dict())
for (supercol, sc_keys) in groupsort(cf_keys, GET_SUPERCOL):
records = client.multiget_slice(
keyspace, map(GET_KEY, sc_keys),
ColumnParent(colfam, supercol), predicate, consistency)
for (row_key, cols) in records.iteritems():
cols = unpack(cols)
if supercol is None:
out[keyspace][colfam][row_key] = cols
else:
out[keyspace][colfam][row_key][supercol] = cols
return out
def sparse_get(key, columns):
"""Return an iterator over a specific set of columns."""
client = get_pool(key.keyspace)
res = client.get_slice(
key.keyspace, key.key, key, SlicePredicate(column_names=columns),
ConsistencyLevel.ONE)
return unpack(res)
def sparse_multiget(keys, columns):
"""Return an iterator over a specific set of columns."""
first_key = iter(keys).next()
client = get_pool(first_key.keyspace)
row_keys = [key.key for key in keys]
res = client.multiget_slice(
first_key.keyspace, row_keys, first_key,
SlicePredicate(column_names=columns), ConsistencyLevel.ONE)
out = {}
for (row_key, cols) in res.iteritems():
out[row_key] = [corsc.column or corsc.super_column for corsc in cols]
return out
def key_range(key, start="", finish="", count=100):
"""Return an iterator over a range of keys."""
cas = get_pool(key.keyspace)
return cas.get_key_range(key.keyspace, key.column_family, start,
finish, count, ConsistencyLevel.ONE)
def key_range_iterator(key, start="", finish="", count=100):
"""Return an iterator which produces Key instances for a key range."""
return (key.clone(key=k) for k in key_range(key, start, finish, count))
def pack(objects):
"""Return a generator which packs objects into ColumnOrSuperColumns."""
for object_ in objects:
key = 'column' if isinstance(object_, Column) else 'super_column'
yield ColumnOrSuperColumn(**{key: object_})
def unpack(records):
"""Return a generator which unpacks objects from ColumnOrSuperColumns."""
return (corsc.column or corsc.super_column for corsc in records)
```
#### File: lazyboy/lazyboy/record.py
```python
import time
import copy
from itertools import ifilterfalse as filternot
from cassandra.ttypes import Column, SuperColumn
from lazyboy.base import CassandraBase
from lazyboy.key import Key
import lazyboy.iterators as iterators
import lazyboy.exceptions as exc
class Record(CassandraBase, dict):
"""An object backed by a record in Cassandra."""
# A tuple of items which must be present for the object to be valid
_required = ()
# The keyspace this record should be saved in
_keyspace = None
# The column family this record should be saved in
_column_family = None
# Indexes which this record should be added to on save
_indexes = []
# Denormalized copies of this record
_mirrors = []
def __init__(self, *args, **kwargs):
dict.__init__(self)
CassandraBase.__init__(self)
self._clean()
if args or kwargs:
self.update(*args, **kwargs)
def make_key(self, key=None, super_column=None, **kwargs):
"""Return a new key."""
args = {'keyspace': self._keyspace,
'column_family': self._column_family,
'key': key,
'super_column': super_column}
args.update(**kwargs)
return Key(**args)
def default_key(self):
"""Return a default key for this record."""
raise exc.ErrorMissingKey("There is no key set for this record.")
def set_key(self, key, super_column=None):
"""Set the key for this record."""
self.key = self.make_key(key=key, super_column=super_column)
return self
def get_indexes(self):
"""Return indexes this record should be stored in."""
return [index() if isinstance(index, type) else index
for index in self._indexes]
def get_mirrors(self):
"""Return mirrors this record should be stored in."""
return [mirror if isinstance(mirror, type) else mirror
for mirror in self._mirrors]
def valid(self):
"""Return a boolean indicating whether the record is valid."""
return len(self.missing()) == 0
def missing(self):
"""Return a tuple of required items which are missing."""
return tuple(filternot(self.get, self._required))
def is_modified(self):
"""Return True if the record has been modified since it was loaded."""
return bool(len(self._modified) + len(self._deleted))
def _clean(self):
"""Remove every item from the object"""
map(self.__delitem__, self.keys())
self._original, self._columns = {}, {}
self._modified, self._deleted = {}, {}
self.key = None
def update(self, arg=None, **kwargs):
"""Update the object as with dict.update. Returns None."""
if arg:
if hasattr(arg, 'keys'):
for key in arg:
self[key] = arg[key]
else:
for (key, val) in arg:
self[key] = val
if kwargs:
for key in kwargs:
self[key] = kwargs[key]
return self
def sanitize(self, value):
"""Return a value appropriate for sending to Cassandra."""
if value.__class__ is unicode:
value = value.encode('utf-8')
return str(value)
def __repr__(self):
"""Return a printable representation of this record."""
return "%s: %s" % (self.__class__.__name__, dict.__repr__(self))
def timestamp(self):
"""Return a GMT UNIX timestamp."""
return int(time.time()*10e6)
def __setitem__(self, item, value):
"""Set an item, storing it into the _columns backing store."""
if value is None:
raise exc.ErrorInvalidValue("You may not set an item to None.")
value = self.sanitize(value)
# If this doesn't change anything, don't record it
_orig = self._original.get(item)
if _orig and _orig.value == value:
return
dict.__setitem__(self, item, value)
if item not in self._columns:
self._columns[item] = Column(name=item)
col = self._columns[item]
if item in self._deleted:
del self._deleted[item]
self._modified[item] = True
col.value, col.timestamp = value, self.timestamp()
def __delitem__(self, item):
dict.__delitem__(self, item)
# Don't record this as a deletion if it wouldn't require a remove()
self._deleted[item] = item in self._original
if item in self._modified:
del self._modified[item]
del self._columns[item]
def _inject(self, key, columns):
"""Inject columns into the record after they have been fetched.."""
self.key = key
if not isinstance(columns, dict):
columns = dict((col.name, col) for col in iter(columns))
self._original = columns
self.revert()
return self
def _marshal(self):
"""Marshal deleted and changed columns."""
return {'deleted': tuple(self.key.get_path(column=col)
for col in self._deleted.keys()),
'changed': tuple(self._columns[key]
for key in self._modified.keys())}
def load(self, key, consistency=None):
"""Load this record from primary key"""
if not isinstance(key, Key):
key = self.make_key(key)
self._clean()
consistency = consistency or self.consistency
columns = iterators.slice_iterator(key, consistency)
self._inject(key, dict([(column.name, column) for column in columns]))
return self
def save(self, consistency=None):
"""Save the record, returns self."""
if not self.valid():
raise exc.ErrorMissingField("Missing required field(s):",
self.missing())
if not hasattr(self, 'key') or not self.key:
self.key = self.default_key()
assert isinstance(self.key, Key), "Bad record key in save()"
# Marshal and save changes
changes = self._marshal()
self._save_internal(self.key, changes, consistency)
try:
try:
# Save mirrors
for mirror in self.get_mirrors():
self._save_internal(mirror.mirror_key(self), changes,
consistency)
finally:
# Update indexes
for index in self.get_indexes():
index.append(self)
finally:
# Clean up internal state
if changes['changed']:
self._modified.clear()
self._original = copy.deepcopy(self._columns)
return self
def _save_internal(self, key, changes, consistency=None):
"""Internal save method."""
consistency = consistency or self.consistency
client = self._get_cas(key.keyspace)
# Delete items
for path in changes['deleted']:
client.remove(key.keyspace, key.key, path,
self.timestamp(), consistency)
self._deleted.clear()
# Update items
if changes['changed']:
client.batch_insert(*self._get_batch_args(
key, changes['changed'], consistency))
def _get_batch_args(self, key, columns, consistency=None):
"""Return a BatchMutation for the given key and columns."""
consistency = consistency or self.consistency
if key.is_super():
columns = [SuperColumn(name=key.super_column, columns=columns)]
return (key.keyspace, key.key,
{key.column_family: tuple(iterators.pack(columns))},
consistency)
def remove(self, consistency=None):
"""Remove this record from Cassandra."""
consistency = consistency or self.consistency
self._get_cas().remove(self.key.keyspace, self.key.key,
self.key.get_path(), self.timestamp(),
consistency)
self._clean()
return self
def revert(self):
"""Revert changes, restoring to the state we were in when loaded."""
for col in self._original.values():
dict.__setitem__(self, col.name, col.value)
self._columns[col.name] = col
self._modified, self._deleted = {}, {}
class MirroredRecord(Record):
"""A mirrored (denormalized) record."""
def mirror_key(self, parent_record):
"""Return the key this mirror should be saved into."""
assert isinstance(parent_record, Record)
raise exc.ErrorMissingKey("Please implement a mirror_key method.")
def save(self):
"""Refuse to save this record."""
raise exc.ErrorImmutable("Mirrored records are immutable.")
``` |
{
"source": "joestump/python-antminer",
"score": 3
} |
#### File: python-antminer/antminer/base.py
```python
import socket
import json
import sys
from antminer.exceptions import (
WarningResponse, ErrorResponse, FatalResponse, UnknownError,
raise_exception
)
from antminer.constants import (
STATUS_INFO, STATUS_SUCCESS, DEFAULT_PORT, MINER_CGMINER,
MINER_BMMINER
)
from antminer.utils import parse_version_number
class Core(object):
def __init__(self, host, port=DEFAULT_PORT):
self.host = host
self.port = int(port)
self.conn = None
def connect(self):
self.conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.conn.connect((self.host, self.port))
def close(self):
self.conn.close()
self.conn = None
def send_command(self, command):
if self.conn is None:
self.connect()
cmd = command.split('|')
if len(cmd) > 2 or len(cmd) == 0:
raise ValueError("Commands must be one or two parts")
payload = {
'command': cmd[0]
}
if len(cmd) == 2:
payload['parameter'] = cmd[1]
self.conn.send(json.dumps(payload))
payload = self.read_response()
try:
response = json.loads(payload)
except ValueError:
response = payload# Assume downstream code knows what to do.
self.close()
return response
def read_response(self):
done = False
buf = self.conn.recv(4096)
while done is False:
more = self.conn.recv(4096)
if not more:
done = True
else:
buf += more
return buf.replace('\x00','')
def command(self, *args):
"""
Send a raw command to the API.
This is a lower level method that assumes the command is the first
argument and that the rest of the arguments are parameters that should
be comma separated.
The basic format of API commands is 'command|param1,param2,etc'. The meaning
of parameters depends greatly on the command provided. This method will return
odd results if poorly constructed commands are passed.
"""
return self._send('{command}|{parameters}'.format(command=args[0],
parameters=','.join(args[1:])))
def _raise(self, response, message=None):
raise_exception(response, message)
def _send(self, command):
response = self.send_command(command)
try:
success = (response['STATUS'][0]['STATUS'] in [STATUS_INFO, STATUS_SUCCESS])
except:
raise UnknownError(response)
if not success:
self._raise(response)
return response
class BaseClient(Core):
def stats(self):
"""
Get stats for the miner.
Unfortunately, the API doesn't return valid JSON for this API response, which
requires us to do some light JSON correction before we load the response.
"""
response = self.send_command('stats')
return json.loads(response.replace('"}{"', '"},{"'))
def version(self):
"""
Get basic hardware and software version information for a miner.
This returns a number of important version numbers for the miner. Each of the
version numbers is an instance of Version from the SemVer Python package.
"""
fields = [
('Type', 'model', str),
('API', 'api', parse_version_number),
('Miner', 'version', parse_version_number),
]
resp = self.command('version')
version = {}
for from_name, to_name, formatter in fields:
try:
version[to_name] = formatter(str(resp['VERSION'][0][from_name]))
except KeyError:
pass
version['miner'] = {}
if MINER_CGMINER in resp['VERSION'][0]:
version['miner']['vendor'] = MINER_CGMINER
version['miner']['version'] = parse_version_number(resp['VERSION'][0][MINER_CGMINER])
elif MINER_BMMINER in resp['VERSION'][0]:
version['miner']['vendor'] = MINER_BMMINER
version['miner']['version'] = parse_version_number(resp['VERSION'][0][MINER_BMMINER])
else:
version['miner']['vendor'] = MINER_UNKNWON
version['miner']['version'] = None
return version
def __getattr__(self, name, *args):
return lambda *x: self.command(name, *x)
``` |
{
"source": "JoeSuber/dialtones",
"score": 3
} |
#### File: JoeSuber/dialtones/psycon.py
```python
import numpy as np
import cv2
import os
"""
Use template matching to find icon location.
numpy and install help:
(basically just make sure to use all 32 or 64 bit versions for everything.
On windows the default python version is likely 32 bit)
https://www.solarianprogrammer.com/2016/09/17/install-opencv-3-with-python-3-on-windows/
http://www.lfd.uci.edu/~gohlke/pythonlibs/
"""
resolutions = [(320, 480), (480,800), (540,960), (600,1024), (640, 1136), (720,1280), (768,1280), (800,1280),
(1080, 1920), (1440, 2560), (1440, 2880)]
def iconograph(screen_path, icon_path, icon_source_size=(720, 1280), DEBUG=False):
""" returns coordinates in an image that best match the given icon """
image = cv2.imread(screen_path, cv2.IMREAD_GRAYSCALE)
if image is None:
print("INVALID SCREEN PATH: {}".format(screen_path))
if DEBUG: print("screen size: {}".format(image.shape))
raw_icon = cv2.imread(icon_path, cv2.IMREAD_GRAYSCALE)
best_match = None
for res in resolutions:
if res[0] < icon_source_size[0]:
interpol_method = cv2.INTER_AREA
else:
interpol_method = cv2.INTER_CUBIC
icon = cv2.resize(raw_icon, None, fx=res[0] / float(icon_source_size[0]),
fy=res[1] / float(icon_source_size[1]), interpolation=interpol_method)
if (image.shape[0] < icon.shape[0]) or (image.shape[1] < icon.shape[1]):
continue
_, max_val, _, max_loc = cv2.minMaxLoc(cv2.matchTemplate(image, icon, 5))
if DEBUG: print(max_val, max_loc)
max_loc = (max_loc[0] + int(icon.shape[1]/2), max_loc[1] + int(icon.shape[0]/2))
if best_match is None or best_match[0] < max_val:
best_match = (max_val, max_loc, res)
if DEBUG:
cv2.circle(image, best_match[1], 25,(255,255,255), thickness=3)
cv2.imshow(str(best_match[2]), cv2.resize(image, (640,1080)))
cv2.waitKey(0)
cv2.destroyAllWindows()
return best_match if DEBUG else best_match[1]
def colors(im=None):
""" not really related. Just helps with finding details in gray scale images """
if im is None:
path = "C:\\Users\\2053_HSUF\\PycharmProjects\\dialtones\\pics\\76aa16c9_playstore.png"
im = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
for shade in range(13):
colorized = cv2.applyColorMap(im, shade)
cv2.imshow("yo", colorized)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == "__main__":
""" for testing one icon against a bunch of screens """
print(cv2.__version__)
print(np.__version__)
iconpaths = [os.path.join(os.getcwd(), "icons", icp)
for icp in os.listdir(os.path.join(os.getcwd(), "icons")) if icp.endswith(".png")]
images = [os.path.join(os.getcwd(), "pics", ip)
for ip in os.listdir(os.path.join(os.getcwd(), "pics")) if ip.endswith(".png")]
for icp in iconpaths:
cv2.imshow("Finding:", cv2.imread(icp))
for impath in images:
# colors(im=cv2.imread(impath, cv2.IMREAD_GRAYSCALE))
print(iconograph(impath, icp, DEBUG=True))
``` |
{
"source": "JoeSuber/phones",
"score": 2
} |
#### File: JoeSuber/phones/app.py
```python
from flask import Flask, render_template, redirect, url_for, flash, session, request
from flask_bootstrap import Bootstrap
from flask_wtf import FlaskForm
from flask_mail import Mail, Message
from wtforms import StringField, PasswordField, BooleanField, IntegerField, ValidationError
from wtforms.validators import InputRequired, Email, Length
from flask_sqlalchemy import SQLAlchemy
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import LoginManager, UserMixin, login_user, login_required, logout_user, current_user
from flask_table import Table, Col
# from flask_sslify import SSLify
import pickle, os, csv
from datetime import datetime, timedelta, timezone
from random import randint
from collections import Counter
try:
from papers import stamp, basedir, use_local_mail
except:
print("WARNING! You have no papers.")
stamp = ''
basedir = ''
use_local_mail = True
"""
** setup on remote host **
$ git clone https://github.com/JoeSuber/phones.git
$ virtualenv --python=$python3 phones/
$ cd phones/
$ source bin/activate
$ pip install requirements.txt
$ sudoedit /etc/apache2/sites-available/app.conf
## /etc/apache2/sites-available/app.conf <--ubuntu, etc.
## /etc/httpd/sites-available/app.conf <---redhat etc.
<VirtualHost *:80>
ServerName www.dvtandc.com/inventory
WSGIDaemonProcess app user=joe.suber group=joe.suber threads=5 home=/user/joe.suber/phones
WSGIScriptAlias /inventory /home/joe.suber/phones/app.wsgi
<Directory /home/joe.suber/phones/>
WSGIProcessGroup app
WSGIApplicationGroup %{GLOBAL}
WSGIScriptReloading On
WSGIRestrictStdout Off
Require all granted
</Directory>
</VirtualHost>
## end app.conf ##
### How to set the backup ###
tar -cvpzf ~/backup/phones.tar.gz ~/phones/
"""
###################################################################################
# DONT FORGET! to uncomment the '@login_required' for newperson() upon deployment
###################################################################################
app = Flask(__name__)
app.config['SECRET_KEY'] = os.urandom(24)
#sslify = SSLify(app, subdomains=True)
__dbfn__ = "DVTCinventory"
__sqlext__ = '.sqlite'
__sql_inventory_fn__ = os.path.join(os.getcwd(), (__dbfn__ + __sqlext__))
print("Database file located at: {}".format(__sql_inventory_fn__))
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + __sql_inventory_fn__
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['WERKZEUG_DEBUG_PIN'] = False
app.config['MAIL_SERVER'] = 'localhost' if use_local_mail else 'smtp.gmail.com'
app.config['MAIL_PORT'] = 25 if use_local_mail else 465
app.config['MAIL_USE_SSL'] = False if use_local_mail else True
app.config['MAIL_USE_TLS'] = False
app.config['MAIL_USERNAME'] = '<EMAIL>'
app.config['MAIL_PASSWORD'] = stamp
print("mail server, port, SSL = '{}', '{}', '{}'".format(app.config['MAIL_SERVER'],
app.config['MAIL_PORT'],
app.config['MAIL_USE_SSL']))
Bootstrap(app)
mail = Mail(app)
db = SQLAlchemy(app)
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
###########################
#### Database Tables ######
###########################
class User(UserMixin, db.Model):
__tablename__ = "people"
id = db.Column(db.Integer, primary_key=True)
badge = db.Column(db.String(40), unique=True)
username = db.Column(db.String(40), unique=True)
email = db.Column(db.String(50))
password = db.Column(db.String(94))
phone_number = db.Column(db.String(12))
admin = db.Column(db.Boolean)
class Phone(db.Model):
""" will add relations to User ...http://flask-sqlalchemy.pocoo.org/2.1/quickstart/"""
__tablename__ = "devices"
id = db.Column(db.Integer, primary_key=True)
MEID = db.Column(db.String(28), unique=True)
SKU = db.Column(db.String(50))
MODEL = db.Column(db.String(50))
OEM = db.Column(db.String(16))
Serial_Number = db.Column(db.String(50))
Hardware_Version = db.Column(db.String(50))
In_Date = db.Column(db.DateTime)
Archived = db.Column(db.Boolean)
TesterId = db.Column(db.Integer)
DVT_Admin = db.Column(db.String(80))
MSL = db.Column(db.String(50))
History = db.Column(db.LargeBinary)
Comment = db.Column(db.String(255))
db.create_all()
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
##########################
### Tables ###
##########################
class People(Table):
username = Col('username')
badge = Col('badge')
class Devices(Table):
MEID = Col('MEID')
TesterID = Col('Tester')
SKU = Col('SKU')
OEM = Col('OEM')
MODEL = Col('Model')
DVT_Admin = Col('Manager')
class Historical(Table):
User = Col('Responsible: ')
Date = Col(' taken date: ')
class Checked_Out(Table):
MEID = Col('MEID___________:')
SKU = Col('SKU____:')
OEM = Col('OEM_____:')
Serial_Number = Col('Serial Number__:')
Hardware_Version = Col('Hardware Version__:')
MODEL = Col('Model___:')
MSL = Col('MSL____:')
Comment = Col('Comment___:')
##########################
##### Validators #########
##########################
class Unique(object):
""" validator for FlaskForm that demands field uniqueness against the current database entries """
def __init__(self, model, field, message=None):
self.model = model
self.field = field
if not message:
message = u'not validated'
self.message = message
def __call__(self, form, field):
check = self.model.query.filter(self.field == field.data).first()
if check:
raise ValidationError(self.message)
class Exists(Unique):
""" validator for FlaskForm that demands that an item exists """
def __call__(self, form, field):
check = self.model.query.filter(self.field == field.data).first()
if not check:
raise ValidationError(self.message)
class Exists_Or_Blank(Unique):
def __call__(self, form, field):
if field.data:
check = self.model.query.filter(self.field == field.data).first()
if not check:
raise ValidationError(self.message)
##########################
######## Forms ###########
##########################
class BadgeEntryForm(FlaskForm):
badge = StringField('badge', validators=[InputRequired(),
Length(min=4, max=40),
Exists(User, User.badge,
message="Badge does not belong to a registered user")])
class MeidForm(FlaskForm):
meid = StringField('MEID', validators=[Exists_Or_Blank(Phone, Phone.MEID,
message="MEID does not match any devices in database")])
meid2 = StringField('MEID', validators=[Exists_Or_Blank(Phone, Phone.MEID,
message="MEID does not match any devices in database")])
meid3 = StringField('MEID', validators=[Exists_Or_Blank(Phone, Phone.MEID,
message="MEID does not match any devices in database")])
meid4 = StringField('MEID', validators=[Exists_Or_Blank(Phone, Phone.MEID,
message="MEID does not match any devices in database")])
meid5 = StringField('MEID', validators=[Exists_Or_Blank(Phone, Phone.MEID,
message="MEID does not match any devices in database")])
meid6 = StringField('MEID', validators=[Exists_Or_Blank(Phone, Phone.MEID,
message="MEID does not match any devices in database")])
meid7 = StringField('MEID', validators=[Exists_Or_Blank(Phone, Phone.MEID,
message="MEID does not match any devices in database")])
meid8 = StringField('MEID', validators=[Exists_Or_Blank(Phone, Phone.MEID,
message="MEID does not match any devices in database")])
meid9 = StringField('MEID', validators=[Exists_Or_Blank(Phone, Phone.MEID,
message="MEID does not match any devices in database")])
meid10 = StringField('MEID', validators=[Exists_Or_Blank(Phone, Phone.MEID,
message="MEID does not match any devices in database")])
class LoginForm(FlaskForm):
username = StringField('username', validators=[InputRequired(),
Exists(User, User.username, message="Not a registered username")])
password = PasswordField('password', validators=[InputRequired(), Length(min=8, max=80)])
class RegisterForm(FlaskForm):
email = StringField('email', validators=[InputRequired(), Email(message='Invalid email'), Length(min=4, max=50),
Unique(User, User.email, message="Email address already in use")])
badge = StringField('badge', validators=[InputRequired(), Length(min=10, max=80),
Unique(User, User.badge, message="Badge number already assigned!")])
username = StringField('username', validators=[InputRequired(), Length(min=4, max=15),
Unique(User, User.username, message="Please choose another name")])
password = PasswordField('password', validators=[InputRequired(),
Length(min=8, max=80, message="Passwords are 8-80 characters")])
phone_number = StringField('phone xxx-xxx-xxxx', validators=[InputRequired(), Length(min=4, max=12)])
admin = BooleanField('admin ')
class NewDevice(FlaskForm):
OEM = StringField('OEM', validators=[InputRequired()])
MEID = StringField('MEID', validators=[InputRequired(), Length(min=10, max=24),
Unique(Phone, Phone.MEID, message="This MEID is already in the database")])
SKU = StringField('SKU', validators=[InputRequired(), Length(min=2, max=80)])
MODEL = StringField('MODEL', validators=[InputRequired(), Length(min=2, max=80)])
Hardware_Version = StringField('Hardware Version', validators=[InputRequired(), Length(min=1, max=40)])
Serial_Number = StringField('Serial Number', validators=[InputRequired(), Length(min=6, max=16)])
Archived = BooleanField('Archived ')
MSL = StringField('MSL', validators=[InputRequired()])
Comment = StringField('Comment')
class ChangePassword(FlaskForm):
account = StringField('user name for which we will change the password: ', validators=[InputRequired(),
Exists(User, User.username, message="Not a registered username")])
password = PasswordField('new password:', validators=[InputRequired(), Length(min=8, max=80)])
retype = PasswordField('re-type :', validators=[InputRequired(), Length(min=8, max=80)])
class OemForm(FlaskForm):
OEM = StringField('OEM name', validators=[InputRequired(), Exists(Phone, Phone.OEM, message="No OEM by that name!")])
class OverdueForm(FlaskForm):
timeframe = IntegerField('Number of Days', validators=[InputRequired()])
###########################
####### Routes ############
###########################
sub = basedir
print("sub = {}".format(sub))
@app.route(sub + '/', methods=['GET', 'POST'])
def index():
# step 1, get the badge to get the user
session['userid'] = None
form = BadgeEntryForm()
if form.validate_on_submit():
user = User.query.filter_by(badge=form.badge.data).first()
session['userid'] = user.id
return redirect(url_for('meid'))
message = None
if 'message' in session:
message = session.pop('message')
return render_template('index.html', form=form, message=message)
@app.route(sub + '/meid', methods=['GET', 'POST'])
def meid():
# step 2, get the device, change owner
form = MeidForm()
if form.validate_on_submit():
device1 = Phone.query.filter_by(MEID=form.meid.data).first()
device2 = Phone.query.filter_by(MEID=form.meid2.data).first()
device3 = Phone.query.filter_by(MEID=form.meid3.data).first()
device4 = Phone.query.filter_by(MEID=form.meid4.data).first()
device5 = Phone.query.filter_by(MEID=form.meid5.data).first()
device6 = Phone.query.filter_by(MEID=form.meid6.data).first()
device7 = Phone.query.filter_by(MEID=form.meid7.data).first()
device8 = Phone.query.filter_by(MEID=form.meid8.data).first()
device9 = Phone.query.filter_by(MEID=form.meid9.data).first()
device10 = Phone.query.filter_by(MEID=form.meid10.data).first()
devices = [device1, device2, device3, device4, device5, device6, device7, device8, device9, device10]
messages = []
for device in devices:
if device and session['userid']:
# change owner of device and append new owner to history blob ####
device.TesterId = session['userid']
device.In_Date = datetime.utcnow()
history = pickle.loads(device.History)
history.append((session['userid'], datetime.utcnow()))
device.History = pickle.dumps(history)
db.session.commit()
phrase = "{} - {} {} meid: {}".format(device.OEM, device.SKU, device.MODEL, device.MEID)
messages.append(phrase)
session['message'] = load_user(session['userid']).username + " took: " + ", ".join(messages)
session['userid'], devices = None, None
return redirect(url_for('index')) # success!
if ('userid' in session) and session['userid']:
username = load_user(session['userid']).username
else:
session['message'] = "Enter destination badge first:"
return redirect(url_for('index')) # fail! probably tried to access page directly
return render_template('meid.html', form=form, name=username)
@app.route(sub + '/newperson', methods=['GET', 'POST'])
@login_required ### <-- uncomment after adding first admin user to database
def newperson():
form = RegisterForm()
if request.method == 'GET':
form.badge.data = unique_badge()
if form.validate_on_submit():
hashed_password = generate_password_hash(form.password.data)
logged = User(badge=form.badge.data,
email=form.email.data,
username=form.username.data,
password=<PASSWORD>,
phone_number=form.phone_number.data,
admin=form.admin.data)
db.session.add(logged)
db.session.commit()
print("NEW USER! {}".format(logged.username))
flash("created new user: {}".format(logged.username))
return redirect(url_for('admin'))
return render_template('signup.html', form=form)
@app.route(sub + '/newdevice', methods=['GET', 'POST'])
@login_required
def newdevice():
form = NewDevice()
if form.validate_on_submit():
new_device = Phone(OEM=form.OEM.data,
MEID=form.MEID.data,
SKU=form.SKU.data,
MODEL=form.MODEL.data,
Serial_Number=form.Serial_Number.data,
Hardware_Version=form.Hardware_Version.data,
MSL=form.MSL.data,
History=pickle.dumps([(session['userid'], datetime.utcnow())]),
Comment=form.Comment.data,
Archived=form.Archived.data,
In_Date=datetime.utcnow(),
DVT_Admin=current_user.id)
db.session.add(new_device)
db.session.commit()
return redirect(url_for('newdevice'))
return render_template('newdevice.html', form=form)
@app.route(sub + '/admin')
@login_required
def admin():
user = User.query.get(int(current_user.id))
if user.admin:
peeps = User.query.order_by(User.username).all()
table = People(peeps, border=True)
return render_template('admin.html', name=user.username, table=table)
return redirect(url_for('login'))
@app.route(sub + '/newpass', methods=['GET', 'POST'])
@login_required
def newpass():
message = None
user = User.query.get(int(current_user.id))
form = ChangePassword()
if form.validate_on_submit() and user.admin:
changer = User.query.filter_by(username=form.account.data).first()
if user.admin or (user.username == changer.username):
if form.password.data == form.retype.data:
changer.password = generate_password_hash(form.password.data)
db.session.commit()
print("Changed password for: {}".format(changer.username))
return redirect(url_for('admin'))
message = "Password fields don't match!"
return render_template('newpass.html', form=form, name=user.username, message=message)
@app.route(sub + '/meidedit', methods=['GET', 'POST'])
@login_required
def meidedit():
form = MeidForm()
user = User.query.get(int(current_user.id))
print("user.admin = {}".format(user.admin))
if form.validate_on_submit() and user.admin:
print("checking MEID {}".format(form.meid.data))
session['editingMEID'] = form.meid.data
return redirect(url_for('editdevice'))
return render_template('meidedit.html', form=form)
@app.route(sub + '/editdevice', methods=['GET', 'POST'])
@login_required
def editdevice():
try:
device = Phone.query.filter_by(MEID=session['editingMEID']).first()
except KeyError: # protect against false access attempt
return redirect(url_for('meidedit'))
# fill is some form blanks for user:
tester = load_user(int(device.TesterId or 0))
manager = load_user(int(device.DVT_Admin or 0))
newform = NewDevice(MEID=device.MEID,
SKU=device.SKU,
OEM=device.OEM,
MODEL=device.MODEL,
Serial_Number=device.Serial_Number,
Hardware_Version=device.Hardware_Version,
MSL=device.MSL,
Archived=device.Archived,
Comment=device.Comment)
if tester:
testerstring = " Tester: {:20} {}".format(tester.username, tester.badge)
else:
testerstring = "No current tester"
if manager:
managerstring = "Manager: {:20} {}".format(manager.username, manager.badge)
else:
managerstring = "No current manager"
if request.method == "POST":
history = pickle.loads(device.History)
history.append((current_user.id, datetime.utcnow()))
device.MEID = newform.MEID.data
device.SKU = newform.SKU.data
device.OEM = newform.OEM.data
device.MODEL = newform.MODEL.data
device.Serial_Number = newform.Serial_Number.data
device.Hardware_Version = newform.Hardware_Version.data
device.MSL = newform.MSL.data
device.Archived = newform.Archived.data
device.Comment = newform.Comment.data
device.History = pickle.dumps(history)
db.session.commit()
used = session.pop('editingMEID')
print(" {} MEID = {} was updated".format(device.SKU, used))
return render_template('admin.html')
return render_template('editdevice.html', form=newform, tester=testerstring, manager=managerstring)
@app.route(sub + '/editperson/<badge>', methods=['GET', 'POST'])
@login_required
def editperson(badge):
print("badge = {}".format(badge))
try:
person = User.query.filter_by(badge=badge).first()
except KeyError: # protect against false access attempt
print("something amiss")
return redirect(url_for('meidedit'))
# fill is some form blanks for user:
newform = RegisterForm(email=person.email,
badge=person.badge,
username=person.username,
phone_number=person.phone_number,
admin=person.admin)
print("person = {}".format(person.username))
if request.method == "POST":
person.email = newform.email.data
person.badge = newform.badge.data
person.username = newform.username.data
person.phone_number = newform.phone_number.data
person.admin = newform.admin.data
db.session.commit()
print(" {} id = {} was updated".format(person.username, person.id))
return render_template('admin.html')
return render_template('editperson.html', form=newform)
@app.route(sub + '/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
message = None
if request.method == 'GET':
session['sent_from'] = request.args.get('next')
print("session.sent_from = {}".format(session['sent_from']))
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if (not user.password) or (not form.password.data):
session['message'] = "Must supply valid user data."
return redirect(url_for('logout'))
if check_password_hash(user.password, form.password.data):
login_user(user, remember=True)
session['userid'] = user.id
sent_from = session['sent_from']
session['sent_from'] = None
return redirect(sent_from or url_for('index'))
message = "Incorrect Password"
return render_template('login.html', form=form, message=message)
@app.route(sub + '/oemreport', methods=['GET', 'POST'])
@login_required
def oemreport():
user = load_user(current_user.id)
form = OemForm()
if form.validate_on_submit():
email, fn = oem_report(current_user.id,
form.OEM.data,
os.path.join(os.getcwd(), '{}_{}.csv'.format(user.username, form.OEM.data)))
send_report(email, fn, subject='OEM-{} report'.format(form.OEM.data))
return render_template('oemreport.html', form=form, message='report on {} sent!'.format(form.OEM.data))
return render_template('oemreport.html', form=form, message="send report to: " + user.email)
@app.route(sub + '/overdue', methods=['GET', 'POST'])
@login_required
def overdue():
user = load_user(current_user.id)
form = OverdueForm()
if form.validate_on_submit():
email, fn = overdue_report(current_user.id,
days=form.timeframe.data,
outfile=os.path.join(os.getcwd(), '{}_overdue.csv'.format(user.username)))
send_report(email, fn, subject="Overdue devices report")
return render_template('overdue.html', form=form, message="overdue devices report sent")
return render_template('overdue.html', form=form,
message="Please enter the number of days 'out' you are interested in")
@app.route(sub + '/mailtest')
@login_required
def mailtest():
user = load_user(current_user.id)
print(user.email)
send_test(user.email)
user = User.query.get(int(current_user.id))
if not user.admin:
return redirect(url_for('login'))
return render_template('admin.html', name='mail sent whooha')
@app.route(sub + '/logout')
@login_required
def logout():
logout_user()
session['userid'] = None
return redirect(url_for('index'))
@app.route(sub + '/people')
@login_required
def people():
items = User.query.all()
table = People(items)
return render_template('people.html', table=table)
@app.route(sub + '/checkouts', methods=['GET', 'POST'])
@login_required
def checkouts():
""" Show all devices possessed by a particular user """
form = BadgeEntryForm()
records = Checked_Out([])
user = ""
if form.validate_on_submit():
records = users_devices(form.badge.data)
records = Checked_Out(records, border=True)
user = User.query.filter_by(badge=form.badge.data).first()
user = user.username
return render_template('checkouts.html', form=form, records=records, user=user)
return render_template('checkouts.html', form=form, records=records, user=user)
@app.route(sub + '/history', methods=['GET', 'POST'])
@login_required
def history():
""" show history of who has had a particular device and when """
form = MeidForm()
records = Historical([])
if form.validate_on_submit():
records = retrieve_history(form.meid.data)
records = Historical(records, border=True)
return render_template('history.html', form=form, records=records)
return render_template('history.html', form=form, records=records)
################################
###### Import/Export Data ######
################################
""" _columns must be the same at time of import and export to assure the proper labels """
_columns = ['MEID', 'OEM', 'MODEL', 'SKU', 'Serial_Number', 'Hardware_Version',
'In_Date', 'Archived', 'TesterId', 'DVT_Admin', 'MSL', 'Comment']
def csv_template(outfile=None):
""" create a spreadsheet template for project managers to fill using the _column list """
if not outfile:
outfile = os.path.join(os.getcwd(), "your_own_devices.csv")
with open(outfile, 'w', newline='') as output:
spamwriter = csv.writer(output, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(_columns)
print("spreadsheet columns exported to: {}".format(outfile))
def csv_dump(phones_fn=None):
""" Call from console to get a dump file that can be re-imported
or examined as a csv.
WARNING: does not preserve passwords or device history """
from collections import Counter
if not phones_fn:
phones_fn = os.path.join(os.getcwd(), "all_devices.csv")
existing_items = Phone.query.all()
if report_spamer(existing_items, phones_fn):
print("dumped {} lines of device data".format(len(existing_items)))
existing_people = User.query.all()
people_columns = []
for k in User.__dict__.keys():
dunders = Counter(k)
if dunders['_'] > 1:
continue
people_columns.append(k)
for peep in existing_people:
print("****")
for stat in people_columns:
if 'password' not in stat:
print("{}: {}".format(stat, peep.__dict__[stat]))
def datefix(datestr):
""" transform string into a python datetime object
handle mm/dd/yy or mm/dd/yyyy or dashes instead of slashes """
fix = datestr.replace('-','/')
if len(fix) > 4:
try:
return datetime.strptime(fix, "%m/%d/%y")
except ValueError:
return datetime.strptime(fix, "%m/%d/%Y")
return datetime.utcnow()
def csv_import(filename=None):
""" For importing devices into database from existing spreadsheet.
Assumes users have kept columns in the _column list-order.
(to use, save the inventory sheets as local .csv files with those
particular columns, in the same column order, run from console and/or use
'app.import_all_sheets()')
"""
if not filename:
filename = os.path.join(os.getcwd(), "scotts.csv")
columns = _columns
column_checksum = len(columns)
if column_checksum != len(set(columns)):
print("Your expected column headers contain repeats. Not gonna work!")
return False
new_item_count, existing_item_count = 0, 0
with open(filename, "rU") as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='"')
for num, line in enumerate(spamreader):
if not new_item_count: # first line should be the column labels
## check the column labels to head off import errors ##
for present, reference in zip(line, columns):
if present.strip() != reference.strip():
print("for: {}".format(filename))
print("the column headers are not correct: {} != {}".format(present, reference))
return False # abort!
new_item_count = 1
continue
row = {label: item.strip() for label, item in zip(columns, line)}
if len(row) != column_checksum:
print("ABORT! on bad row: {}".format(row))
print("Import not finished! Fix data")
exit(1)
# check that item is not already in database
existing_item = Phone.query.filter_by(MEID=row['MEID']).first()
if existing_item:
existing_item_count += 1
print("!{:5} Item exists {}".format(num, row['MEID']))
continue
try:
admin = User.query.filter_by(id=int(row['DVT_Admin'].strip())).first()
except Exception as e:
print("{} ERROR:".format(e))
admin = None
if not admin:
print(type(row['DVT_Admin']), len(row['DVT_Admin']))
print("!{:5} skipped due to non-existant DVT_admin: '{}'".format(num, row['DVT_Admin']))
continue
print("#{:5}: {}".format(num, row))
new_device = Phone(OEM=row['OEM'],
MEID=row['MEID'],
SKU=row['SKU'],
MODEL=row['MODEL'],
Serial_Number=row['Serial_Number'],
Hardware_Version=row['Hardware_Version'],
MSL=row['MSL'].strip('"'),
History=pickle.dumps([(row['DVT_Admin'], datetime.utcnow())]),
Comment=row['Comment'].replace(os.linesep, ' '),
In_Date=datefix(row['In_Date']),
Archived=bool(row['Archived']),
TesterId=row['TesterId'],
DVT_Admin=row['DVT_Admin'])
try:
db.session.add(new_device)
new_item_count += 1
except Exception as e:
print("ER: {}, {}".format(e, new_device))
db.session.commit()
print("imported {} items".format(new_item_count - 1))
print("ignored {} existing items".format(existing_item_count))
return True
def import_all_sheets(fns=None):
""" gather up the .csv files with 'newsheet' in the title and import them all at once """
base = os.getcwd()
if not fns:
fns = [os.path.join(base, fn) for fn in os.listdir(base) if fn.endswith(".csv") and ('newsheet' in fn)]
for fn in fns:
result = csv_import(filename=fn)
print("processed {}: {}".format(fn, result))
return 1
def nameid(id_num):
""" try to find a human readable string to go with the id number """
person = None
if id_num:
person = User.query.get(int(id_num))
if person:
return person.username
return ''
def newpeople(filename=None):
""" Import people from the spreadsheet. Save it as a csv. Also can change the info for a user.id"""
if not filename:
filename = os.path.join(os.getcwd(), "People.csv")
print("filename = {}".format(filename))
columns = ["Full_Name", "Badge_ID", "DB_ID", "email", "phone"]
new_item_count, existing_item_count = 0, 0
with open(filename, "rU") as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='"')
column_checksum = len(columns)
for num, line in enumerate(spamreader):
if not new_item_count: # skip the row labels
new_item_count = 1
continue
row = {label: item.strip() for label, item in zip(columns, line)}
if len(row) != column_checksum:
print("ABORT! on bad row: {}".format(row))
print("Import not finished! Fix data")
exit(1)
# check that item is not already in database
existing_id = User.query.filter_by(id=int(row['DB_ID'])).first()
if existing_id:
existing_item_count += 1
print("!{:5} Person exists {}".format(num, row['DB_ID']))
existing_id.badge=row['Badge_ID']
existing_id.username=row['Full_Name']
existing_id.email=row['email']
existing_id.phone_number=row['phone']
db.session.commit()
print("details updated for {} (id={})".format(existing_id.username, existing_id.id))
continue
existing_badge = User.query.filter_by(badge=row['Badge_ID']).first()
if existing_badge:
existing_item_count += 1
print("!{:5} Badge number in use: {}".format(num, row['Badge_ID']))
continue
existing_email = User.query.filter_by(email=row['email']).first()
if existing_email:
existing_item_count += 1
print("!{:5} email in use: {}".format(num, row['email']))
continue
print("#{:5}: {}".format(num, row))
new_person = User(id=int(row['DB_ID']),
badge=row['Badge_ID'],
username=row['Full_Name'],
email=row['email'],
phone_number=row['phone'])
try:
db.session.add(new_person)
new_item_count += 1
except Exception as e:
print("ER: {}, {}".format(e, new_person))
db.session.commit()
print("imported {} people".format(new_item_count - 1))
print("ignored {} existing people".format(existing_item_count))
return True
def report_spamer(spam_list, outfn):
""" writes out reports to a csv that can be opened into a spreadsheet"""
columns = _columns
with open(outfn, 'w', newline='') as output_obj:
spamwriter = csv.writer(output_obj, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(columns) # column labels
for i in spam_list:
line = [i.MEID, i.OEM, i.MODEL, i.SKU, i.Serial_Number, i.Hardware_Version, str(i.In_Date.date()),
i.Archived, nameid(i.TesterId), nameid(i.DVT_Admin), i.MSL, i.Comment]
spamwriter.writerow(line)
print("report file written to = {}".format(outfn))
return True
def overdue_report(manager_id, days=14, outfile=None):
""" query by manager to find devices that need checking-up on
write a report that can be sent as an attachment to managers. return filename. """
if outfile is None:
outfile = os.path.join(os.getcwd(), "overdue_report.csv")
manager = User.query.get(manager_id)
try:
assert manager.admin
except AssertionError:
responce = "User: {} is not an Administrator".format(manager.username)
print(responce)
return None, responce
today = datetime.utcnow()
delta = timedelta(days)
overdue_stuff = [phone for phone in Phone.query.filter_by(DVT_Admin=manager.id).all()
if ((today - phone.In_Date) > delta) and phone.TesterId]
report_spamer(overdue_stuff, outfile)
return manager.email, outfile
def oem_report(manager_id, oem=None, outfile=None):
""" prepare a .csv report that lists all devices from a particular OEM
or just return all devices from a manager (old and gone: filter by manager and OEM)"""
manager = User.query.get(manager_id)
if outfile is None:
outfile = os.path.join(os.getcwd(), "oem_report.csv")
if oem is None:
results = Phone.query.filter_by(DVT_Admin=manager_id).all()
else:
results = Phone.query.filter_by(OEM=oem).all()
report_spamer(results, outfile)
return manager.email, outfile
def send_report(email, attachment_fn, sender=None, subject='Overdue Devices Report'):
""" email an attachment """
if sender is None:
sender=app.config['MAIL_USERNAME']
human_name = os.path.split(attachment_fn)[-1]
message = Message(subject=subject + " " + human_name,
sender=sender,
recipients=[email])
with app.open_resource(attachment_fn) as attachment:
message.attach(human_name, "spreadsheet/csv", attachment.read())
mail.send(message)
print("sent mail from {} to {}".format(sender, email))
return True
def swapm(oem, new_owner):
""" app.swapm('Blu', 7) = change all devices with OEM='Blu' to be owned by DVT_Admin='7' """
try:
assert(int(new_owner) < 99)
except:
print("new owner should be an integer representing the db id.")
return False
print("looking for devices made by {}, to transfer to {}".format(oem, new_owner))
for device in Phone.query.all():
if device.OEM.lower() == oem.lower():
device.DVT_Admin = str(new_owner)
print("device {} has project manager = {}".format(device.MEID, new_owner))
db.session.commit()
return True
def oems():
""" show the project managers, the oems, and the device counts """
devices = Phone.query.all()
oems = Counter([device.OEM for device in devices])
managers = Counter([(device.DVT_Admin, device.OEM) for device in devices])
outlist = []
for m in managers:
output = (nameid(m[0]), m[0], m[1], managers[m], oems[m[1]])
print("{} id#{} has {}: {} - of {}".format(output[0], output[1], output[2], output[3], output[4]))
outlist.append(output)
return outlist
def send_test(email):
message = Message(subject="testes12..3?",
sender=app.config['MAIL_USERNAME'],
recipients=[email])
mail.send(message)
def utc_to_local(utc_dt):
return utc_dt.replace(tzinfo=timezone.utc).astimezone(tz=None)
def retrieve_history(meid, date_filter=":%b %d, %Y, %I.%M %p"):
""" http://strftime.org/
unpickles and formats the history of a device into a list of dict"""
device = Phone.query.filter_by(MEID=meid).first()
if not device:
return []
history = pickle.loads(device.History)
herstory = []
for event in history:
print("event = {}".format(event))
id, date = event
date = utc_to_local(date)
if id is not None:
person = User.query.filter_by(id=id).first()
username = person.username
else:
username = "Original PM"
herstory.append({'User': username, 'Date': date.strftime(date_filter)})
return herstory
def users_devices(badge):
""" find all devices owned by a person """
user = User.query.filter_by(badge=badge).first()
id = str(user.id)
return Phone.query.filter_by(TesterId=id).all()
def unique_badge():
""" keep trying until a new random badge number has been found to return """
rando = str(randint(1000000000, 9999999999))
badge = User.query.filter_by(badge=rando).first()
print("rando badge query = {}".format(badge))
if badge:
unique_badge()
return rando
def check_histories():
""" run from command line to fix the import error caused by leaving the TesterID cells blank
Also will verify that that the device checkout histories that exist all have the required info """
devices = Phone.query.all()
for device in devices:
history = pickle.loads(device.History)
newid = None
dates = []
for entry in history:
id, date = entry
dates.append(date)
if id == '':
print("device: {}, {}, {}, {}".format(device.MEID, device.DVT_Admin, device.TesterId, device.OEM))
newid = device.TesterId
if newid:
device.History = pickle.dumps([(newid, old_date) for old_date in dates])
db.session.commit()
print("fixed")
print("DONE!")
def meidication(adminid=None):
""" fix the MEID where an IMEI (or some other improper thing was entered) by truncating to 14 characters"""
if adminid is None:
admins = User.query.filter_by(admin=True).all()
for perp in admins:
print("{:5} - {}".format(perp.id, perp.username))
print(" ---------- ")
adminid = int(input("Enter the id to use: "))
devices = Phone.query.filter_by(DVT_Admin='{}'.format(adminid)).all()
tally = Counter([len(device.MEID) for device in devices])
print(tally)
to_fix = [d for d in devices if len(d.MEID) > 14 and (not Phone.query.filter_by(MEID=d.MEID[:14]).first())]
fn = os.getcwd() + "fixed_MEID_for_id_{}".format(adminid)
while os.path.exists(fn):
fn = fn + "_{}".format(adminid)
with open(fn, 'w') as fob:
fob.writelines([d.MEID + os.linesep for d in to_fix])
print("{} MEID will be truncated to 14 characters and saved in file '{}'".format(len(to_fix), fn))
for device in to_fix:
device.MEID = device.MEID[:14]
db.session.commit()
print("database updated!")
def look_for_blanks():
devices = Phone.query.filter_by(MODEL="", SKU="").all()
print(devices)
return devices
if __name__ == '__main__':
if os.name == 'nt':
app.run(debug=True)
else:
app.run(host='0.0.0.0', debug=False)
``` |
{
"source": "JoeSuber/QuickerPicker",
"score": 3
} |
#### File: JoeSuber/QuickerPicker/wave.py
```python
import numpy as np
"""
output a list of points consumable by openscad polygon function
"""
def wave(degs, scale=10):
pts = []
for i in xrange(degs):
rad = i*np.pi/180.0
x = float(i/180.0*scale)
y=np.sin(rad) * scale
pts.append([x, y])
return pts
def pwave(degs, scale=20):
# default scale of 20 would give: 0 < x < 40 units
print("points = {};".format(wave(degs, scale=scale)))
print("*** finito ***")
return 0
if __name__ == "__main__":
pwave(359, scale=100)
``` |
{
"source": "Joe-Surrey/slt",
"score": 2
} |
#### File: Joe-Surrey/slt/extract_gcn.py
```python
import lzma
import pickle
import time
import random
import os
import yaml
from signjoey.GCN.processor import load_model
from signjoey.augmentations import load_augment
import argparse
def get_files(video_folder):
# Glob directory
videos = [
{
"file": y,
"file_name": y.split(".")[0],
"input_file": os.path.join(p, y),
}
for p, _, x in os.walk(video_folder)
for y in x
if y.endswith(".pkl")
]
return videos
def main(params):
# Load
files = get_files(params.input_folder)
model = load_model()
print(len(files))
for index, file in enumerate(files):
#name = params.input_file.split("/")[-1].split(".")[0]
with open(f"{file['input_file']}", "rb") as f:
label = pickle.loads(f.read())
# Run
try:
keypoints = load_augment("holistic", pickle.loads(lzma.decompress(label['sign'])))
except TypeError:
print(label["name"])
keypoints = load_augment("holistic", pickle.loads(lzma.decompress(label['sgn'])).numpy())
features = model(keypoints.unsqueeze(0).cuda()).squeeze(0).cpu().detach().numpy()
label["sign"] = lzma.compress(pickle.dumps(features))
# Make file
with open(f"{params.output_folder}/{file['file_name']}.pkl", "wb") as f:
f.write(pickle.dumps(label))
if __name__ == '__main__':
# Get params
parser = argparse.ArgumentParser()
parser.add_argument("--input_folder", type=str,
default="", help="")
parser.add_argument("--output_folder", type=str,
default="", help="")
params, _ = parser.parse_known_args()
_time = time.time()
main(params)
print(f"Done in {time.time() - _time}s")
```
#### File: GCN/feeders/augmentations.py
```python
import random
def random_mirror(data_numpy):
# TODO random!!!
if random.random() < 0.5:
data_numpy[0] = - data_numpy[0]
return data_numpy
def augment(data_numpy):
data_numpy = random_mirror(data_numpy)
return data_numpy
```
#### File: GCN/model/msg3d.py
```python
import sys
sys.path.insert(0, '')
import torch
import torch.nn as nn
import torch.nn.functional as F
import pickle
from .utils import import_class, count_params
from .ms_gcn import MultiScale_GraphConv as MS_GCN
from .ms_tcn import MultiScale_TemporalConv as MS_TCN
from .ms_gtcn import SpatialTemporal_MS_GCN, UnfoldTemporalWindows
from .mlp import MLP
from einops import rearrange
from ..feeders.specs import left_hand_group, right_hand_group, upper_body_group, head_group, holistic_joints, get_indexes
class MS_G3D(nn.Module):
def __init__(self,
in_channels,
out_channels,
A_binary,
num_scales,
window_size,
window_stride,
window_dilation,
embed_factor=1,
activation='relu'):
super().__init__()
self.window_size = window_size
self.out_channels = out_channels
self.embed_channels_in = self.embed_channels_out = out_channels // embed_factor
if embed_factor == 1:
self.in1x1 = nn.Identity()
self.embed_channels_in = self.embed_channels_out = in_channels
# The first STGC block changes channels right away; others change at collapse
if in_channels == 3:
self.embed_channels_out = out_channels
else:
self.in1x1 = MLP(in_channels, [self.embed_channels_in])
self.gcn3d = nn.Sequential(
UnfoldTemporalWindows(window_size, window_stride, window_dilation),
SpatialTemporal_MS_GCN(
in_channels=self.embed_channels_in,
out_channels=self.embed_channels_out,
A_binary=A_binary,
num_scales=num_scales,
window_size=window_size,
use_Ares=True
)
)
self.out_conv = nn.Conv3d(self.embed_channels_out, out_channels, kernel_size=(1, self.window_size, 1))
self.out_bn = nn.BatchNorm2d(out_channels)
def forward(self, x):
N, _, T, V = x.shape
x = self.in1x1(x)
# Construct temporal windows and apply MS-GCN
x = self.gcn3d(x)
# Collapse the window dimension
x = x.view(N, self.embed_channels_out, -1, self.window_size, V)
x = self.out_conv(x).squeeze(dim=3)
x = self.out_bn(x)
# no activation
return x
class MultiWindow_MS_G3D(nn.Module):
def __init__(self,
in_channels,
out_channels,
A_binary,
num_scales,
window_sizes=[3,5],
window_stride=1,
window_dilations=[1,1]):
super().__init__()
self.gcn3d = nn.ModuleList([
MS_G3D(
in_channels,
out_channels,
A_binary,
num_scales,
window_size,
window_stride,
window_dilation
)
for window_size, window_dilation in zip(window_sizes, window_dilations)
])
def forward(self, x):
# Input shape: (N, C, T, V)
out_sum = 0
for gcn3d in self.gcn3d:
out_sum += gcn3d(x)
# no activation
return out_sum
class Model(nn.Module):
def __init__(self,
num_class,
num_point,
num_person,
num_gcn_scales,
num_g3d_scales,
graph,
in_channels=3,
test=False):
super(Model, self).__init__()
self.test = test
Graph = import_class("GCN." + graph)
A_binary = Graph(num_node=num_point).A_binary
self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point)
# channels
c1 = 96
c2 = c1 * 2 # 192
c3 = c2 * 2 # 384
# r=3 STGC blocks
self.gcn3d1 = MultiWindow_MS_G3D(3, c1, A_binary, num_g3d_scales, window_stride=1)
self.sgcn1 = nn.Sequential(
MS_GCN(num_gcn_scales, 3, c1, A_binary, disentangled_agg=True),
MS_TCN(c1, c1),
MS_TCN(c1, c1))
self.sgcn1[-1].act = nn.Identity()
self.tcn1 = MS_TCN(c1, c1)
self.gcn3d2 = MultiWindow_MS_G3D(c1, c2, A_binary, num_g3d_scales, window_stride=2)
self.sgcn2 = nn.Sequential(
MS_GCN(num_gcn_scales, c1, c1, A_binary, disentangled_agg=True),
MS_TCN(c1, c2, stride=2),
MS_TCN(c2, c2))
self.sgcn2[-1].act = nn.Identity()
self.tcn2 = MS_TCN(c2, c2)
self.gcn3d3 = MultiWindow_MS_G3D(c2, c3, A_binary, num_g3d_scales, window_stride=2)
self.sgcn3 = nn.Sequential(
MS_GCN(num_gcn_scales, c2, c2, A_binary, disentangled_agg=True),
MS_TCN(c2, c3, stride=2),
MS_TCN(c3, c3))
self.sgcn3[-1].act = nn.Identity()
self.tcn3 = MS_TCN(c3, c3)
self.fc = nn.Linear(c3, num_class)
self.upper_body = get_indexes(upper_body_group, holistic_joints)
self.head = get_indexes(head_group, holistic_joints)
self.left_hand = get_indexes(left_hand_group, holistic_joints)
self.right_hand = get_indexes(right_hand_group, holistic_joints)
def forward(self, x):
N, C, T, V, M = x.size()
with open("/vol/research/SignRecognition/x_before.pkl", "wb") as f:
f.write(pickle.dumps(x))
x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T)
x = self.data_bn(x)
x = x.view(N * M, V, C, T).permute(0,2,3,1).contiguous()
# Apply activation to the sum of the pathways
x = F.relu(self.sgcn1(x) + self.gcn3d1(x), inplace=True)
x = self.tcn1(x)
x = F.relu(self.sgcn2(x) + self.gcn3d2(x), inplace=True)
x = self.tcn2(x)
x = F.relu(self.sgcn3(x) + self.gcn3d3(x), inplace=True)
x = self.tcn3(x)
if not self.test:
# Combine means of hands, head and body
left_hand_features = x[:, :, :, self.left_hand].mean(dim=-1)
right_hand_features = x[:, :, :, self.right_hand].mean(dim=-1)
head_features = x[:, :, :, self.head].mean(dim=-1)
upper_body_features = x[:, :, :, self.upper_body].mean(dim=-1)
x = torch.cat([left_hand_features, right_hand_features, head_features, upper_body_features], dim=1)
return rearrange(x, "b f t -> b t f")
# Just rearrange and return
#return rearrange(x, "b f t j -> b t (f j)")[:, :, :512]
else:
out = x
out_channels = out.size(1)
out = out.view(N, M, out_channels, -1)
out = out.mean(3) # Global Average Pooling (Spatial+Temporal)
out = out.mean(1) # Average pool number of bodies in the sequence
out = self.fc(out)
with open("/vol/research/SignRecognition/x_after.pkl", "wb") as f:
f.write(pickle.dumps(out))
return out
if __name__ == "__main__":
# For debugging purposes
import sys
sys.path.append('..')
model = Model(
num_class=60,
num_point=25,
num_person=2,
num_gcn_scales=13,
num_g3d_scales=6,
graph='graph.ntu_rgb_d.AdjMatrixGraph'
)
N, C, T, V, M = 6, 3, 50, 25, 2
x = torch.randn(N,C,T,V,M)
model.forward(x)
print('Model total # params:', count_params(model))
#N batch size
#M = Number of people
#V = Number of points
#C = Input channels
#T = Number of timesteps
#num_person * in_channels * num_point
#M * V * C
#[32, 56, 369]) N T V*C
```
#### File: GCN/model/ms_tcn.py
```python
import sys
sys.path.insert(0, '')
import torch
import torch.nn as nn
from .activation import activation_factory
class TemporalConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1):
super(TemporalConv, self).__init__()
pad = (kernel_size + (kernel_size-1) * (dilation-1) - 1) // 2
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size=(kernel_size, 1),
padding=(pad, 0),
stride=(stride, 1),
dilation=(dilation, 1))
self.bn = nn.BatchNorm2d(out_channels)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
class MultiScale_TemporalConv(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size=3,
stride=1,
dilations=[1,2,3,4],
residual=True,
residual_kernel_size=1,
activation='relu'):
super().__init__()
assert out_channels % (len(dilations) + 2) == 0, '# out channels should be multiples of # branches'
# Multiple branches of temporal convolution
self.num_branches = len(dilations) + 2
branch_channels = out_channels // self.num_branches
# Temporal Convolution branches
self.branches = nn.ModuleList([
nn.Sequential(
nn.Conv2d(
in_channels,
branch_channels,
kernel_size=1,
padding=0),
nn.BatchNorm2d(branch_channels),
activation_factory(activation),
TemporalConv(
branch_channels,
branch_channels,
kernel_size=kernel_size,
stride=stride,
dilation=dilation),
)
for dilation in dilations
])
# Additional Max & 1x1 branch
self.branches.append(nn.Sequential(
nn.Conv2d(in_channels, branch_channels, kernel_size=1, padding=0),
nn.BatchNorm2d(branch_channels),
activation_factory(activation),
nn.MaxPool2d(kernel_size=(3,1), stride=(stride,1), padding=(1,0)),
nn.BatchNorm2d(branch_channels)
))
self.branches.append(nn.Sequential(
nn.Conv2d(in_channels, branch_channels, kernel_size=1, padding=0, stride=(stride,1)),
nn.BatchNorm2d(branch_channels)
))
# Residual connection
if not residual:
self.residual = lambda x: 0
elif (in_channels == out_channels) and (stride == 1):
self.residual = lambda x: x
else:
self.residual = TemporalConv(in_channels, out_channels, kernel_size=residual_kernel_size, stride=stride)
self.act = activation_factory(activation)
def forward(self, x):
# Input dim: (N,C,T,V)
res = self.residual(x)
branch_outs = []
for tempconv in self.branches:
out = tempconv(x)
branch_outs.append(out)
out = torch.cat(branch_outs, dim=1)
out += res
out = self.act(out)
return out
if __name__ == "__main__":
mstcn = MultiScale_TemporalConv(288, 288)
x = torch.randn(32, 288, 100, 20)
mstcn.forward(x)
for name, param in mstcn.named_parameters():
print(f'{name}: {param.numel()}')
print(sum(p.numel() for p in mstcn.parameters() if p.requires_grad))
``` |
{
"source": "joesusecom/salt-tutorial",
"score": 2
} |
#### File: salt/runners/spacewalk.py
```python
from __future__ import absolute_import
# Import python libs
import atexit
import logging
# Import third party libs
HAS_LIBS = False
try:
import salt.ext.six as six
HAS_LIBS = True
except ImportError:
# Salt version <= 2014.7.0
try:
import six
HAS_LIBS = True
except ImportError:
pass
log = logging.getLogger(__name__)
def __virtual__():
'''
Check for spacewalk configuration in master config file
or directory and load runner only if it is specified
'''
if not HAS_LIBS:
return False
if _get_spacewalk_configuration() is False:
return False
return True
def _get_spacewalk_configuration(spacewalk_url=''):
'''
Return the configuration read from the master configuration
file or directory
'''
spacewalk_config = __opts__['spacewalk'] if 'spacewalk' in __opts__ else None
if spacewalk_config:
try:
for spacewalk_server, service_config in six.iteritems(spacewalk_config):
username = service_config.get('username', None)
password = service_config.get('password', None)
protocol = service_config.get('protocol', 'https')
if not username or not password:
log.error(
"Username or Password has not been specified in the master "
"configuration for {0}".format(spacewalk_server)
)
return False
ret = {
'api_url': "{0}://{1}/rpc/api".format(protocol, spacewalk_server),
'username': username,
'password': password
}
if (not spacewalk_url) or (spacewalk_url == spacewalk_server):
return ret
except Exception as exc:
log.error(
"Exception encountered: {0}".format(exc)
)
return False
if spacewalk_url:
log.error(
"Configuration for {0} has not been specified in the master "
"configuration".format(spacewalk_url)
)
return False
return False
def _get_client_and_key(url, user, password, verbose=0):
'''
Return the client object and session key for the client
'''
session = {}
session['client'] = six.moves.xmlrpc_client.Server(url, verbose=verbose, use_datetime=True)
session['key'] = session['client'].auth.login(user, password)
return session
def _disconnect_session(session):
'''
Disconnect API connection
'''
session['client'].auth.logout(session['key'])
def _get_session(server_url):
'''
'''
config = _get_spacewalk_configuration(server_url)
if not config:
return False
try:
session = _get_client_and_key(config['api_url'], config['username'], config['password'])
atexit.register(_disconnect_session, session)
except Exception as exc:
err_msg = "Exception raised when connecting to spacewalk server ({0}): {1}".format(server_url, exc)
log.error(err_msg)
return {name: err_msg}
client = session['client']
key = session['key']
return client, key
def systemgroup_listAllGroups(server_url = ''):
'''
'''
client, key = _get_session(server_url)
groups = client.systemgroup.listAllGroups(key)
return groups
def systemgroup_create(group_name , description = '', server_url = ''):
'''
'''
client, key = _get_session(server_url)
group = client.systemgroup.create(key, group_name, description)
return group
def api(server_url, namespace, method, *args):
'''
Example:
salt-run spacewalk.api manager.su.se system getCpu 1000010002
'''
client, key = _get_session(server_url)
endpoint = getattr(getattr(client, namespace), method)
result = endpoint(key, *args)
return result
def call(server, namespace, method, args):
'''
Example:
salt-run spacewalk.call manager.su.se systemgroup create "['MyGroup', 'Description of my group']"
'''
client, key = _get_session(server)
endpoint = getattr(getattr(client, namespace), method)
result = endpoint(key, *args)
return result
``` |
{
"source": "joesusecom/spacewalk-explode",
"score": 3
} |
#### File: joesusecom/spacewalk-explode/setup.py
```python
from xmlrpc.client import ServerProxy
import json
import yaml
from random import choice
MANAGER_URL = 'http://url.to.your.server/rpc/api'
MANAGER_LOGIN = 'login'
MANAGER_PASSWORD = 'password'
MAX_COUNTRIES = 10
MAX_CITIES = 10
GROUPS_FILE = 'your_groups_definitions.yml'
DUMMY_EMAIL = 'name@domain'
client = ServerProxy(MANAGER_URL, verbose=0)
key = client.auth.login(MANAGER_LOGIN, MANAGER_PASSWORD)
males = open('males.csv', 'r').readlines()
females = open('females.csv', 'r').readlines()
scenario = yaml.load(open(GROUPS_FILE, 'r'))
def generateName():
''' '''
prefix = choice(['Mr.', 'Ms.'])
if prefix == 'Mr.':
first = choice(males)
last = choice(males)
else:
first = choice(females)
last = choice(females)
return (prefix, first, last, DUMMY_EMAIL)
# Set up an organization for every country and add groups to the orgs
countries = json.load(open('countries.json', 'r'))
cities = json.load(open('cities.json', 'r'))
no_of_countries = 1
for country in countries:
if no_of_countries > MAX_COUNTRIES:
break
login = MANAGER_LOGIN + '@' + country['code']
prefix, first, last, email = generateName()
client.org.create(
key,
country['name'],
login,
MANAGER_PASSWORD, # FIXME
prefix,
first,
last,
email,
False
)
org_key = client.auth.login(login, MANAGER_PASSWORD)
# Set up groups for cities of the world
no_of_groups = 1
for city in cities:
if no_of_groups > MAX_CITIES:
break
if country['code'] == city['country']:
try:
client.systemgroup.create(
org_key,
city['name'] + ' (LOCATION)',
'City of ' +
city['name'] + ' in ' + city['country'] + '(' +
'Location: ' +
'latitude ' + city['lat'] + ', ' +
'longitude ' + city['lng'] + ')'
)
print(city['name'], city['country'])
no_of_groups+=1
except xmlrpclib.Fault:
print('ERROR: ', city['name'], ': Seems to be a duplicate')
# Set up staging groups
for stage in scenario['Stages']:
client.systemgroup.create(
org_key,
stage + ' (STAGE)',
stage + ' Systems'
)
# Set up HW type groups
for hw in scenario['Server_HW']:
client.systemgroup.create(
org_key,
hw + ' (SERVER HARDWARE TYPE)',
hw + ' Server Systems'
)
for hw in scenario['Client_HW']:
client.systemgroup.create(
org_key,
hw + ' (CLIENT HARDWARE TYPE)',
hw + ' Client (Desktop, POS) Systems'
)
# Set up Role groups
for role in scenario['Roles']:
client.systemgroup.create(
org_key,
role + ' (ROLE)',
role + ' System'
)
# Set up OS groups
for os in scenario['OS']:
client.systemgroup.create(
org_key,
os + ' (OS)',
os
)
# Create activation keys, so systems can join the groups right away
# Start with (POS) client hardware:
for hw in scenario['Client_HW']:
for role in ['POS Terminal', 'Branch Server']:
groups = client.systemgroup.listAllGroups(org_key)
for group in groups:
if 'LOCATION' in group['name']:
g = group['name'][:-11]
short_g = g[:12]
try:
activationkey = client.activationkey.create(
org_key,
short_g + '_' + role + '_' + hw,
'Key to activate systems with role ' + role + ' from ' + hw + ' at location ' + g,
'sles12-sp3-pool-x86_64',
[],
False
)
for gg in groups:
if gg['name'] == hw + ' (CLIENT HARDWARE TYPE)':
hw_group = gg['id']
if gg['name'] == role + ' (ROLE)':
role_group = gg['id']
if gg['name'] == g + ' (LOCATION)':
location_group = gg['id']
client.activationkey.addServerGroups(
org_key,
activationkey,
[hw_group, role_group, location_group]
)
client.activationkey.addChildChannels(
org_key,
activationkey,
['sle-manager-tools12-pool-x86_64-sp3',
'sle-manager-tools12-updates-x86_64-sp3',
'sles12-sp3-updates-x86_64'
]
)
except xmlrpclib.Fault:
print("ERROR ERROR ERROR (usually duplicate cities)")
client.auth.logout(org_key)
no_of_countries+=1
client.auth.logout(key)
``` |
{
"source": "joetache4/ProjectEuler",
"score": 2
} |
#### File: joetache4/ProjectEuler/008_LargestProductInASeries.py
```python
import math
n = "".join([
"73167176531330624919225119674426574742355349194934",
"96983520312774506326239578318016984801869478851843",
"85861560789112949495459501737958331952853208805511",
"12540698747158523863050715693290963295227443043557",
"66896648950445244523161731856403098711121722383113",
"62229893423380308135336276614282806444486645238749",
"30358907296290491560440772390713810515859307960866",
"70172427121883998797908792274921901699720888093776",
"65727333001053367881220235421809751254540594752243",
"52584907711670556013604839586446706324415722155397",
"53697817977846174064955149290862569321978468622482",
"83972241375657056057490261407972968652414535100474",
"82166370484403199890008895243450658541227588666881",
"16427171479924442928230863465674813919123162824586",
"17866458359124566529476545682848912883142607690042",
"24219022671055626321111109370544217506941658960408",
"07198403850962455444362981230987879927244284909188",
"84580156166097919133875499200524063689912560717606",
"05886116467109405077541002256983155200055935729725",
"71636269561882670428252483600823257530420752963450"
])
def digit_product(s):
return math.prod(int(d) for d in s)
def solve(n, L):
ans = (0,0) # product, index
for i in range(len(n)-L+1):
prod = digit_product(n[i:i+L])
ans = max(ans, (prod, i))
return ans[0]
assert solve(n, 4) == 5832
print(solve(n, 13))
```
#### File: joetache4/ProjectEuler/009_SpecialPythagoreanTriplet.py
```python
from bisect import bisect
def solve():
squares = [x*x for x in range(1000)]
for a in range(1, 500):
for b in range(a+1, 501):
a2 = a*a
b2 = b*b
c2 = a2 + b2
c = bisect(squares, c2)-1
if squares[c] == c2 and a + b + c == 1000:
return a*b*c
print(solve())
``` |
{
"source": "joetache4/project-euler",
"score": 4
} |
#### File: joetache4/project-euler/019_CountingSundays.py
```python
dow = 2
def no_days(month, year):
if month in [0,2,4,6,7,9,11]:
return 31
elif month in [3,5,8,10]:
return 30
elif year % 400 == 0:
return 29
elif year % 100 == 0:
return 28
elif year % 4 == 0:
return 29
else:
return 28
sum = 0
for y in range(1901, 2001):
for m in range(0, 12):
if dow == 0:
sum += 1
dow = (dow + no_days(m, y)) % 7
print(sum)
``` |
{
"source": "joetache4/ProjectEuler",
"score": 4
} |
#### File: joetache4/ProjectEuler/020_FactorialDigitSum.py
```python
from math import perm
def solve(n):
num = perm(n)
sum = 0
for d in str(num):
sum += int(d)
return sum
assert solve(10) == 27
print(solve(100))
``` |
{
"source": "joetache4/project-euler",
"score": 3
} |
#### File: joetache4/project-euler/031_CoinSums.py
```python
def ways(amount, nohigher=200):
if amount == 0:
return 1
count = 0
coins = [1, 2, 5, 10, 20, 50, 100, 200]
for c in coins:
if amount >= c and c <= nohigher:
count += ways(amount - c, c)
return count
print(ways(200))
```
#### File: joetache4/project-euler/032_PandigitalProducts.py
```python
from math import prod
from lib.num import factor
from lib.list import subsets
def pandigital(num):
if num < 123456789 or num > 987654321:
return False
arr = [int(x) for x in str(num)]
for k in range(1, 10):
if k not in arr:
return False
return True
p = set()
for n in range(2, 11111):
factors = factor(n)
for subset in subsets(factors):
term1 = subset
term2 = factors.copy()
for t in term1:
term2.remove(t)
term1 = prod(term1)
term2 = prod(term2)
if pandigital(int(f"{term1}{term2}{n}")):
print(f"{term1} {term2} {n}")
p.add(n)
break
ans = sum(p)
print(f"ans = {ans}")
```
#### File: joetache4/project-euler/033_DigitCancellingFractions.py
```python
from lib.num import gcd
def test(n, d):
if n%10 == 0 and d%10 == 0:
return False
n = str(n)
d = str(d)
eq = lambda n1, d1, n2, d2: int(n1)*int(d2) == int(n2)*int(d1)
if n[0] == d[0] and eq(n, d, n[1], d[1]):
return True
if n[0] == d[1] and eq(n, d, n[1], d[0]):
return True
if n[1] == d[0] and eq(n, d, n[0], d[1]):
return True
if n[1] == d[1] and eq(n, d, n[0], d[0]):
return True
return False
prod_n = 1
prod_d = 1
for d in range(11, 99):
for n in range(10, d):
if test(n, d):
print((n, d))
prod_n *= n
prod_d *= d
g = gcd(prod_n, prod_d)
print(f"ans = {prod_d // g}")
```
#### File: joetache4/project-euler/035_CircularPrimes.py
```python
from lib.num import get_primes
primes = get_primes(10**6)
primes = [p for p in primes if not any(d in str(p) for d in ["0","2","4","6","8"])]
primes.insert(0, 2)
def rotations(n):
yield n
m = n
while True:
m = str(m)
m = int(m[1:] + m[0])
if m == n:
break
yield m
circular = []
for n in primes:
is_circ = True
for m in rotations(n):
if m not in primes:
is_circ = False
break
if is_circ:
circular.append(n)
assert 2 in circular
assert 7 in circular
assert 11 in circular
assert 97 in circular
assert 23 not in circular
assert 47 not in circular
print(circular)
print(f"ans = {len(circular)}")
```
#### File: joetache4/project-euler/040_ChampernownesConstant.py
```python
import sys
do_test = False
if len(sys.argv) > 1 and sys.argv[1] == "test":
do_test = True
def d(n):
# get digit count of number that contains n-th digit
len = 10
digit_count = 1
while len <= n:
n -= len # set n to an index that ignores shorter numbers
digit_count += 1
len = digit_count * 9 * 10**(digit_count-1)
# get the number at this index
num = n // digit_count
if digit_count > 1:
num += 10**(digit_count - 1)
# get the digit
dig = int(str(num)[n % digit_count])
return dig
def test():
print("Testing...")
concatted = [str(x) for x in range(100000)]
concatted = "".join(concatted)
for i in range(1,10000):
assert d(i) == int(concatted[i])
print("[SUCCESS]")
if do_test:
test()
else:
ans = d(1) * d(10) * d(100) * d(1000) * d(10000) * d(100000) * d(1000000)
print(ans)
``` |
{
"source": "joetache4/ProjectEuler",
"score": 4
} |
#### File: joetache4/ProjectEuler/045_TriangularPentagonalAndHexagonal.py
```python
def T(n):
return n*(n+1) // 2
def P(n):
return n*(3*n-1) // 2
def H(n):
return n*(2*n-1)
t_index = 286
p_index = 165
h_index = 144
t_val = T(t_index)
p_val = P(p_index)
h_val = H(h_index)
while t_val != p_val or p_val != h_val:
if t_val < p_val:
t_index += 1
t_val = T(t_index)
if p_val < h_val:
p_index += 1
p_val = P(p_index)
if h_val < t_val:
h_index += 1
h_val = H(h_index)
print(t_val)
```
#### File: joetache4/ProjectEuler/081_PathSumTwoWays.py
```python
from data.p081 import get_data
node = get_data()
N = len(node)
path = [[None]*N for _ in range(N)]
path[-1][-1] = node[-1][-1]
def get_path(y, x):
if path[y][x] is None:
if y == N-1: # at bottom
path[y][x] = node[y][x] + get_path(y, x+1)
elif x == N-1: # at right side
path[y][x] = node[y][x] + get_path(y+1, x)
else:
path[y][x] = node[y][x] + min(get_path(y+1, x), get_path(y, x+1))
return path[y][x]
print(get_path(0, 0))
```
#### File: joetache4/ProjectEuler/088_Product-SumNumbers.py
```python
from math import isqrt
max_val = 12000
def first_divisor(n):
if n % 2 == 0 and n != 2:
return 2
for m in range(3, isqrt(n) + 1, 2):
if n % m == 0:
return m
return None # don't return 1 or n
# get a list of list of all ways to factor n into (prime or composite) numbers
def partitions(n):
ans = set()
if n == 1:
pass
else:
d = first_divisor(n)
if d is None:
ans.add( (n,) ) # comma makes a tuple, not parentheses
else:
for partition in partitions(n//d):
for i in range(len(partition)):
a = list(partition)
a[i] *= d
ans.add( tuple(sorted(a)) )
a = list(partition)
a.append(d)
ans.add(tuple(sorted(a)))
return ans
min_ps_num = {}
for n in range(2, 2*max_val+1):
for p in partitions(n):
k = n - sum(p) + len(p)
if k not in min_ps_num:
min_ps_num[k] = n
ans = set()
for k in range(2, 12001):
ans.add(min_ps_num[k])
print(sum(ans))
```
#### File: joetache4/ProjectEuler/094_AlmostEquilateralTriangles.py
```python
P = 10**9
ans = 0
#(k,k,k+1)
a = [0,2]
while True:
k = a[-1]**2 + 1
p = 3*k + 1
if p <= P:
ans += p
else:
break
a.append(4*a[-1] - a[-2])
#(k,k,k-1)
a = [0,1]
while True:
k = 16*a[-1] + 1
p = 3*k - 1
if p <= P:
ans += p
else:
break
a.append(14*a[-1] - a[-2] + 1)
print(ans)
# 5:00
'''
from math import isqrt
def heronian_perimeter2(a,b,c):
p = a+b+c
A2 = p*c*c*(a+b-c)
A = isqrt(A2)
if A2 == A*A:# and (A&3) == 0 :
input((a,b,c))
return p
return 0
ans = 0
for x in range(3, (10**9)//3 + 1, 2): # 1,1,2 is not Heronian, so skip it for simplicity's sake
ans += heronian_perimeter2(x, x, x+1)
ans += heronian_perimeter2(x, x, x-1)
print(ans)
'''
``` |
{
"source": "joetache4/project-euler",
"score": 4
} |
#### File: joetache4/project-euler/095_AmicableChains.py
```python
max_val = 10**6
div = [[1] for x in range(max_val+1)]
div[0] = [0]
div[1] = [0]
for a in range(2, max_val//2+1):
for b in range(2*a, max_val+1, a):
div[b].append(a)
assert div[24] == [1,2,3,4,6,8,12]
assert div[28] == [1,2,4,7,14]
for i in range(len(div)):
div[i] = sum(div[i])
assert div[28] == 28
assert div[220] == 284
assert div[284] == 220
chain_len = {}
def get_chain_len(start):
if start in chain_len:
return
visited = []
n = start
while True:
visited.append(n)
n = div[n]
if n > max_val or n == 0:
# faster to not do this
# for m in visited:
# chain_len[m] = -1
break
elif n == start:
for m in visited:
chain_len[m] = len(visited)
break
elif n in visited:
break
for i in range(len(div)):
get_chain_len(i)
assert 7 not in chain_len
assert chain_len[28] == 1
assert chain_len[220] == 2
assert chain_len[284] == 2
assert chain_len[12496] == 5
assert chain_len[14288] == 5
assert chain_len[15472] == 5
assert chain_len[14536] == 5
assert chain_len[14264] == 5
longest = max(( (length,-start) for start,length in chain_len.items() ))
print(-longest[1])
``` |
{
"source": "joetache4/ProjectEuler",
"score": 4
} |
#### File: joetache4/ProjectEuler/102_TriangleContainment.py
```python
from data.p102 import get_data
triangles = get_data()
o = (0,0)
def area(a, b, c):
val = a[0]*(b[1]-c[1]) + b[0]*(c[1]-a[1]) + c[0]*(a[1]-b[1])
return abs(val / 2)
def contains_origin(tri):
a = (tri[0], tri[1])
b = (tri[2], tri[3])
c = (tri[4], tri[5])
return area(a,b,c) == area(o,b,c) + area(a,o,c) + area(a,b,o)
print(sum( 1 for tri in triangles if contains_origin(tri) ))
``` |
{
"source": "joetache4/project-euler",
"score": 4
} |
#### File: joetache4/project-euler/103_SpecialSubsetSumsOptimum.py
```python
from queue import PriorityQueue
from lib.array import subsets
from interface import Solution
class S103(Solution):
def __init__(self):
NUM = 103
NAME = "Problem title"
ARG = [11,18,19,20,22,25]
ANS = 20313839404245
super().__init__(NUM, NAME)
self.add_test([1], "12")
self.add_test([1,2], "234")
self.add_test([2,3,4], "3567")
self.add_test([3,5,6,7], "69111213")
self.add_test([6,9,11,12,13], "111819202225")
self.add_test(ARG, ANS)
################################################################################
def cond1(self, arr):
sums = set()
for subset in subsets(arr):
s = sum(subset)
if s in sums:
return False
sums.add(s)
return True
def cond2(self, arr):
#arr.sort()
m = arr[0] # sum of lower terms
M = 0 # sum of higher terms
a = 1
b = len(arr)-1
while a < b:
m += arr[a]
M += arr[b]
if m <= M:
return False
a += 1
b -= 1
return True
def solve(self, arr):
q = PriorityQueue()
a = arr[len(arr)//2]
arr = [i+a-2 for i in arr]
arr.insert(0, a)
q.put((sum(arr), arr))
visited = set()
visited.add(tuple(arr))
while True:
_, arr = q.get()
if self.cond2(arr) and self.cond1(arr):
return "".join([str(i) for i in arr])
for a in arr:
b = a + 1
while b in arr:
b += 1
arr2 = arr.copy()
arr2.remove(a)
arr2.append(b)
arr2.sort()
tup_arr2 = tuple(arr2)
if tup_arr2 not in visited:
q.put((sum(arr2), arr2))
visited.add(tup_arr2)
################################################################################
if __name__ == "__main__":
S103().run()
``` |
{
"source": "joetache4/ProjectEuler",
"score": 4
} |
#### File: joetache4/ProjectEuler/111_PrimesWithRuns.py
```python
from itertools import combinations
from lib.num import is_prime, get_primes
def replacements(n, indices):
'''
Replace the digits of n at the given indices. Iterate through all combinations of replacement digits.
n is represented as a string list. indicides is an int list. Yields ints.
'''
L2 = len(indices)
for n2 in range(10**L2):
n2 = str(n2).zfill(L2)
for i in range(L2):
n[indices[i]] = n2[i]
if n[0] == "0":
# skip replacements that start with 0
continue
a = int("".join(n))
yield a
def solve(L):
primes = get_primes(int(10**((L+1)/2))+1)
S = 0
for d in "0123456789":
for num_replace in range(1,L+1):
Sd = 0
for indices in combinations(range(L), num_replace):
for n in replacements([d]*L, indices):
if is_prime(n, primes):
Sd += n
if Sd > 0:
S += Sd
break
return S
assert solve(4) == 273700
print(solve(10))
```
#### File: joetache4/ProjectEuler/115_CountingBlockCombinationsII.py
```python
mem = {}
def F(m, length):
return F_r(True, m, length) + F_r(False, m, length)
def F_r(red, m, length):
if red:
if length == m:
return 1
elif length < m:
return 0
else:
if length == 1:
return 1
elif length < 1:
return 0
if (red, length) in mem:
return mem[(red, length)]
count = 1
if red:
for i in range(m, length+1):
count += F_r(False, m, length-i)
else:
for i in range(1, length+1):
count += F_r(True, m, length-i)
mem[(red,length)] = count
return count
assert F(3,7) == 17
mem = {}
assert F(10,57) == 1148904
mem = {}
for n in range(100,1000):
if F(50, n) > 10**6:
print(n)
break
mem = {}
assert F(50, 167) <= 10**6 and F(50, 168) > 10**6
```
#### File: joetache4/ProjectEuler/118_PandigitalPrimeSets.py
```python
import primesieve as ps
def index(p):
v = 0
while p > 0:
d = p%10
if d == 0:
return 0
d = 1<<(d-1)
if d&v:
return 0
v += d
p //= 10
return v
# get primes & their indices -- this takes the most time by far
count_orig = [0]*(2**9)
it = ps.Iterator()
p = it.next_prime()
#while p <= 987654321: # there are no 9-digit pandigital primes, so this could be 98765432 instead
while p <= 98765432:
if (i := index(p)) > 0:
count_orig[i] += 1
p = it.next_prime()
# main loop
count_prev = count_orig
ans = 0
while True:
count_new = [0]*len(count_orig)
for i in range(len(count_orig)):
for j in range(i):
if i&j == 0: # no shared digits allowed
count_new[i+j] += count_orig[i]*count_prev[j]
if sum(count_new) == 0:
# no new sets have been made, quit
break
else:
ans += count_new[-1]
count_prev = count_new
print(ans)
# Old. This overcounts, acting as if {a,b}+{c,d} is different from {a,c}+{b,d}.
'''
for i in range(1, len(count)):
for j in range(i):
if i & j == 0:
# no shared digits
count[i + j] += count[i] * count[j]
print(count[-1])
'''
```
#### File: joetache4/ProjectEuler/121_DiscGamePrizeFund.py
```python
from math import floor
def perms(numT, numF, start = None):
if start is None:
start = []
if numT == 0 and numF == 0:
yield start.copy()
if numT > 0:
start.append(True)
yield from perms(numT - 1, numF, start)
start.pop()
if numF > 0:
start.append(False)
yield from perms(numT, numF - 1, start)
start.pop()
def solve(turns):
prob_win = 0
for b in range(turns//2+1, turns+1):
for perm in perms(b, turns-b):
prob_perm = 1
for i, draw in enumerate(perm):
if draw:
prob_perm *= 1/(i+2)
else:
prob_perm *= (i+1)/(i+2)
prob_win += prob_perm
return floor(1/prob_win)
assert solve(4) == 10
print(solve(15))
``` |
{
"source": "joetache4/project-euler",
"score": 4
} |
#### File: joetache4/project-euler/125_PalindromicSums.py
```python
from math import ceil, sqrt
def is_pal(n):
s = str(n)
return s == s[::-1]
def main(M):
squares = [n*n for n in range(1, ceil(sqrt(M)))]
ps = set()
for i in range(len(squares)-1):
partial = squares[i]
for j in range(i+1, len(squares)):
partial += squares[j]
if partial >= M:
break
if is_pal(partial):
ps.add(partial)
return sum(ps)
assert main(10**3) == 4164
print(main(10**8))
``` |
{
"source": "joetache4/ProjectEuler",
"score": 3
} |
#### File: joetache4/ProjectEuler/136_SingletonDifference.py
```python
from lib.num import get_primes
def solve(N):
primes = get_primes(N)
del primes[0]
ans = 0
for p in primes:
if p%4 == 3:
ans += 1
if 16*p < N:
ans += 2
elif 4*p < N:
ans += 1
ans += 2 # for n=4,16
return ans
assert solve(100) == 25
print(solve(50*10**6))
# shows patterns
'''
from math import isqrt
from lib.num import get_primes, divisors, factor
N = 50*10**6
primes = get_primes(isqrt(N)+1)
for n in range(1, N):
div = divisors(n, primes)
num_sol = 0
for x in div:
q, r = divmod(n+x*x, 4*x)
if r == 0:
if q >= x:
continue
num_sol += 1
print(f'count({n}) = {num_sol}')
#if num_sol == 1:
# print(factor(n, primes))
#'''
# slightly faster, much less memory used
'''
from math import isqrt
from itertools import cycle
from lib.num import get_primes, divisors, factor
N = 50*10**6
T = True
F = False
primes = get_primes(isqrt(N)+1)
gt1 = [None] * N # n with 'greater than one' solutions
ans = 0
for n, gt0 in zip(range(1, N), cycle([F,F,T,T,F,F,T,F,F,F,T,T,F,F,T,T])): # zeros come in a pattern
if gt0 and not gt1[n]:
num_sol = 0
for d in divisors(n, primes):
q, r = divmod(n+d*d, 4*d)
if q < d and r == 0:
num_sol += 1
if num_sol > 1:
# all sn have >=1 solution where s is a square
k = 2
m = n*k*k
while m < N:
gt1[m] = True
k += 1
m = n*k*k
break
if num_sol == 1:
ans += 1
print(ans)
#'''
# Original solution - slow and uses a ton of memory
'''
N = 50*10**6
def list_divisors(n):
div = [[1] for x in range(n+1)]
div[0] = []
for d in range(2, n+1):
for m in range(d, n+1, d):
div[m].append(d)
return div
ans = 0
for n,div in enumerate(list_divisors(N)):
num_sol = 0
for x in div:
q, r = divmod(n+x*x, 4*x)
if r == 0:
if q >= x:
continue
num_sol += 1
if num_sol > 1:
break
if num_sol == 1:
ans += 1
print(ans)
'''
``` |
{
"source": "joetache4/project-euler",
"score": 3
} |
#### File: joetache4/project-euler/136_SingletonDifference__SLOW.py
```python
M = 50*10**6
def list_divisors(n):
div = [[1] for x in range(n+1)]
div[0] = []
for d in range(2, n+1):
for q in range(d, n+1, d):
div[q].append(d)
return div
ans = 0
for n,div in enumerate(list_divisors(M)):
num_sol = 0
for x in div:
if (n + x*x) % (4*x) == 0:
if (n + x*x) // (4*x) >= x:
continue
num_sol += 1
if num_sol > 1:
break
if num_sol == 1:
ans += 1
print(ans)
``` |
{
"source": "joetache4/ProjectEuler",
"score": 4
} |
#### File: joetache4/ProjectEuler/149_SearchingForAMaximum-SumSubsequence.py
```python
import numpy as np
def rand():
s = []
for k in range(1, 56):
sk = (100003 - 200003*k + 300007*k**3) % 1000000 - 500000
yield sk
s.append(sk)
for k in range(56, 4000001):
sk = (s[55-24] + s[0] + 1000000) % 1000000 - 500000
yield sk
s.append(sk)
s.pop(0)
def test_rand():
a = [x for x in rand()]
assert a[ 10-1] == -393027
assert a[100-1] == 86613
# skews an np.array by appending zeros on the sides
def skew(values):
new_arr = []
for i in range(len(values)):
skewed = [0]*i + values[i].tolist() + [0]*(len(values)-i-1) # append using list +
new_arr.append(skewed)
return np.array(new_arr)
def max_sum():
print("Creating pseudorandom array")
values = np.array([x for x in rand()]).reshape((2000, 2000))
print("Finding max vertical sum")
max_val = max_vertical_sum(values)
print("Finding max horizontal sum")
max_val = max(max_val, max_vertical_sum(values.transpose()))
print("Finding max diagonal (/) sum")
max_val = max(max_val, max_vertical_sum(skew(values)))
print("Finding max diagonal (\) sum")
max_val = max(max_val, max_vertical_sum(skew(values[::-1])))
print(f"max: {max_val}")
# finds the greatest sum of consecutive values in each column of a 2 dimensional np.array
def max_vertical_sum(values):
top, mid, bot, tot = max_vertical_sum_helper(values)
return max(mid)
# helps find the greatest sum of consecutive values in each column of a 2 dimensional np.array
# recursive binary splitting of rows in values
# i.e., {2000 rows} -> {1000, 1000 rows} -> {500,500,500,500 rows} -> ...
# method returns what I call top, middle, bottom, total sums
# top: greatest sum of a sequence that contains the topmost value in the chunk
# mid: greatest sum of a any sequence in the chunk
# bot: greatest sum of a sequence that contains the bottommost value in the chunk
# tot: the sum of all elements in the chunk
# the parent chunk can then easily calculate its own top, mid, bottom, and total
# sums from the sums of its two child chunks
def max_vertical_sum_helper(values):
if len(values) == 0:
return (None, None, None, None)
if len(values) == 1:
return (values[0], values[0], values[0], values[0])
a_top, a_mid, a_bot, a_tot = max_vertical_sum_helper(values[:len(values)//2 ]) # a is "on top"
b_top, b_mid, b_bot, b_tot = max_vertical_sum_helper(values[ len(values)//2:])
if a_top is None:
return (b_top, b_mid, b_bot, b_tot)
if b_top is None:
return (a_top, a_mid, a_bot, a_tot)
new_top = np.maximum(a_top, a_tot + b_top)
new_mid = np.maximum(a_mid, np.maximum(b_mid, a_bot + b_top))
new_bot = np.maximum(b_bot, b_tot + a_bot)
new_tot = a_tot + b_tot
return (new_top, new_mid, new_bot, new_tot)
max_sum()
```
#### File: joetache4/ProjectEuler/173_UsingUpToOneMillionTilesHowManyDifferentHollowSquareLaminaeCanBeFormed.py
```python
def solve(M):
ans = 0
D = 3
while True:
if 4*D - 4 > M:
break
for d in range(D-2, 0, -2):
size = D*D - d*d
if size <= M:
ans += 1
else:
break
D += 1
return ans
assert solve(100) == 41
print(solve(10**6))
``` |
{
"source": "joetache4/project-euler",
"score": 4
} |
#### File: joetache4/project-euler/179_ConsecutivePositiveDivisors.py
```python
n = 10**7
def count_divisors(n):
div = [1 for x in range(n+1)]
div[0] = 0
for d in range(2, n+1):
for q in range(d, n+1, d):
div[q] += 1
return div
div = count_divisors(n)
ans = sum(1 for i in range(n) if div[i] == div[i+1])
print(ans)
``` |
{
"source": "joetache4/ProjectEuler",
"score": 4
} |
#### File: joetache4/ProjectEuler/181_InvestigatingInHowManyWaysObjectsOfTwoDifferentColoursCanBeGrouped.py
```python
from lib.helpers import memoize
# Time can be cut in half by ignoring subproblems where b_min is too big. When b_min is bigger than b/2, then there is only one solution, that with all the black stones (and thus white ones, too) in the last group. That option is also why 1 is added to the total in cases where b_min <= b//2.
@memoize
def solve(b, w, b_min = 0, w_min = 0):
if b < 0 or w < 0:
return 0
if b == 0 and w == 0:
return 1
if w_min > w:
b_min += 1
w_min = 0
if b_min > b:
return 0
if b_min > b//2:
return 1 # <--------------
total = 0
# same b, same or higher w
b2 = b_min
if b_min == 0 and w_min == 0:
w_min = 1
for w2 in range(w_min, w+1):
total += solve(b-b2, w-w2, b2, w2)
# higher b, any w
for b2 in range(b_min+1, b//2+1): # <--------------
for w2 in range(0, w+1):
total += solve(b-b2, w-w2, b2, w2)
return total+1 # <--------------
assert solve(3,1) == 7
print(solve(60,40))
# first attempt
'''
# b, w = remaining black and white stones to place in groups.
# b_min, w_min = minimum such stones to place in each subsequent group.
@memoize
def first_attempt(b, w, b_min = 0, w_min = 0):
if b < 0 or w < 0:
return 0
if b == 0 and w == 0:
return 1
if w_min > w:
b_min += 1
w_min = 0
if b_min > b:
return 0
total = 0
# same b, same or higher w
b2 = b_min
if b_min + w_min == 0:
w_min = 1
for w2 in range(w_min, w+1):
total += first_attempt(b-b2, w-w2, b2, w2)
# higher b, any w
for b2 in range(b_min+1, b+1):
for w2 in range(0, w+1):
total += first_attempt(b-b2, w-w2, b2, w2)
return total
'''
# not mine
'''
def very_fast_solution(b, w):
F = []
for i in range(b+1):
F.append([])
for j in range(w+1):
F[-1].append(0)
F[0][0] = 1
for i in range(b+1):
for j in range(w+1):
if i + j > 0:
for k in range(i, b+1):
for l in range(j, w+1):
F[k][l] += F[k-i][l-j]
return F[b][w]
'''
```
#### File: joetache4/ProjectEuler/206_ConcealedSquare.py
```python
def check(n):
return str(n*n)[0::2] == "1234567890"
def solve():
n = 1000000030
while True:
if check(n):
return n
n += 40
if check(n):
return n
n += 60
print(solve())
``` |
{
"source": "joetache4/project-euler",
"score": 3
} |
#### File: joetache4/project-euler/206_ConcealedSquare.py
```python
from math import sqrt
def main():
for a in [0,1,2,3,4,5,6,7,8,9]:
for b in [0,1,2,3,4,5,6,7,8,9]:
for c in [0,1,2,3,4,5,6,7,8,9]:
for d in [0,1,2,3,4,5,6,7,8,9]:
for e in [0,1,2,3,4,5,6,7,8,9]:
for f in [0,1,2,3,4,5,6,7,8,9]:
for g in [0,1,2,3,4,5,6,7,8,9]:
for h in [0,1,2,3,4,5,6,7,8,9]:
for i in [0]:
num = 0
for j in [1,a,2,b,3,c,4,d,5,e,6,f,7,g,8,h,9,i]:
num = 10*(num + j)
sqrt_num = int(sqrt(num))
if num == sqrt_num**2:
print(sqrt(num))
return
main()
```
#### File: joetache4/project-euler/215_Crack_FreeWalls.py
```python
import numpy as np
import scipy.sparse
m = 32
n = 10
levels = [] # ways to arrange 2- and 3-bricks on a single level
def get_levels(m, level = None):
last_brick = 0
if level is None:
level = []
else:
last_brick = level[-1]
if last_brick in [m-2, m-3]:
levels.append(level.copy())
elif last_brick > m-2:
return
level.append(last_brick + 2)
get_levels(m, level)
level[-1] += 1
get_levels(m, level)
level.pop()
get_levels(m)
adj_mat = [] # adjacency matrix
for i in range(len(levels)):
adj_vec = []
for j in range(len(levels)):
if i == j:
adj_vec.append(0)
elif all((k not in levels[j] for k in levels[i])):
adj_vec.append(1)
else:
adj_vec.append(0)
adj_mat.append(adj_vec)
adj_mat = np.matrix(adj_mat, np.uint8)
adj_mat = scipy.sparse.coo_matrix(adj_mat, dtype = np.uint64)
def pow(base, exp):
if exp == 1:
return base
elif exp % 2 == 0:
tmp = pow(base, exp//2)
return tmp.dot(tmp)
else:
tmp = pow(base, exp - 1)
return tmp.dot(base)
adj_mat = pow(adj_mat, n - 1)
print(adj_mat.sum(dtype = np.uint64))
```
#### File: joetache4/project-euler/621_ExpressingAnIntegerAsTheSumOfTriangularNumbers.py
```python
import sys
import math
from lib.log import log
target = 17526*10**9
try:
target = int(sys.argv[1])
except IndexError:
pass
def get_tri(target):
tri, i, n = [], 0, 0
while n <= target:
tri.append(n)
i += 1
n += i
return tri
def max_ab(tri):
max_a_index = -1
max_b_index = -1
for i, t in enumerate(tri):
if max_a_index == -1 and t > target//3:
max_a_index = i-1
if t > target//2:
max_b_index = i-1
break
return (max_a_index, max_b_index)
########################################################################################################
# for testing
def is_tri(n):
n = 2*n
m = int(math.sqrt(n))
return m*m + m == n
def brute_force(n, tri):
count = 0
for a in tri:
if a > n: break
for b in tri:
if a + b > n: break
if is_tri(n - a - b):
count += 1
return count
########################################################################################################
import numpy as np
from numba import cuda, int64
def info():
gpu = cuda.get_current_device()
print("name = %s" % gpu.name)
print("maxThreadsPerBlock = %s" % str(gpu.MAX_THREADS_PER_BLOCK))
print("maxBlockDimX = %s" % str(gpu.MAX_BLOCK_DIM_X))
print("maxBlockDimY = %s" % str(gpu.MAX_BLOCK_DIM_Y))
print("maxBlockDimZ = %s" % str(gpu.MAX_BLOCK_DIM_Z))
print("maxGridDimX = %s" % str(gpu.MAX_GRID_DIM_X))
print("maxGridDimY = %s" % str(gpu.MAX_GRID_DIM_Y))
print("maxGridDimZ = %s" % str(gpu.MAX_GRID_DIM_Z))
print("maxSharedMemoryPerBlock = %s" % str(gpu.MAX_SHARED_MEMORY_PER_BLOCK))
print("asyncEngineCount = %s" % str(gpu.ASYNC_ENGINE_COUNT))
print("canMapHostMemory = %s" % str(gpu.CAN_MAP_HOST_MEMORY))
print("multiProcessorCount = %s" % str(gpu.MULTIPROCESSOR_COUNT))
print("warpSize = %s" % str(gpu.WARP_SIZE))
print("unifiedAddressing = %s" % str(gpu.UNIFIED_ADDRESSING))
print("pciBusID = %s" % str(gpu.PCI_BUS_ID))
print("pciDeviceID = %s" % str(gpu.PCI_DEVICE_ID))
# Count (a,b,c) where target = a + b + c and a <= b <= c
# Note: two terms (a,b) must be < target/2 and 1 term (a) must be < target/3
block_count = 512
threads_per_block = 512
# The start and stride params indicate where to start looking for the 1st and 2nd terms
# and how far to look. They are used to divide the problem into a series of GPU calls
# so it doesn't time out and give an error.
@cuda.jit
def count(target, tri_arr, count_arr, start_a, start_b, max_a_index, max_b_index, stride_a, stride_b):
th_ind = cuda.grid(1)
if th_ind >= stride_a:
return
a_ind = th_ind + start_a
if a_ind >= tri_arr.shape[0] or a_ind > max_a_index:
return
a = tri_arr[a_ind]
step = 1
if target % 3 == 0 and a % 3 == 1:
# a, b, and c must all be 1 mod 3
step = 3
#for b_ind in range(a_ind, max_b_index+1):
for b_ind in range(a_ind + start_b, min(a_ind + start_b + stride_b, max_b_index+1), step):
b = tri_arr[b_ind]
c = target - a - b
if c < b:
break
tmp = math.floor(math.sqrt(2.0*c))
if tmp*tmp + tmp == 2*c:
# is triangular
if a == b and b == c:
count_arr[a_ind] += 1
elif a == b or b == c:
count_arr[a_ind] += 3
else:
count_arr[a_ind] += 6
def main():
#info()
print("***")
tri = get_tri(target)
max_a_index, max_b_index = max_ab(tri)
print(f"target : {target}")
print(f"len(tri): {len(tri)}")
print(f"max(tri): {tri[-1]}")
print(f"max 1st index: {max_a_index}")
print(f"max 2nd index: {max_b_index}")
print(f"threads : {block_count*threads_per_block}")
print("***")
total = 0
tri_arr = np.array(tri, np.int64)[:max(max_a_index, max_b_index)+1]
count_arr = np.zeros(tri_arr.shape, np.int64)
stride_a = block_count * threads_per_block
stride_b = 25002 # must be divisible by 3 to make use of modulo qualities in count()
for start_a in range(0, max_a_index+1, stride_a):
print(f"a: {start_a}...")
for start_b in range(0, max_b_index+1, stride_b):
print(f" b: {start_b}...")
count[block_count, threads_per_block] \
(target, tri_arr, count_arr, start_a, start_b, max_a_index, max_b_index, stride_a, stride_b)
total = sum(( count_arr ))
print(total)
log(f"G({target}) = {total}")
# test
if target <= 10000000:
print(brute_force(target, tri))
return total
#main()
########################################################################################################
# multithreading
'''
import threading
from lib.log import log
def is_tri(n):
n = 2*n
m = int(math.sqrt(n))
return m*m + m == n
terms, terms_lock = set(), threading.Lock()
def found_set(new_found):
terms_lock.acquire()
for f in new_found:
terms.add(f)
terms_lock.release()
def count_two_term_sums(a_index, a, target):
found = set()
target2 = target - a
if target % 3 == 0:
if a % 3 == 0:
# b must also be 0 mod 3
for b_index in range(a_index, len(tri), 3):
b = tri[b_index]
if b > target//2:
break
c = target2 - b
if b <= c and is_tri(c): # TODO break if c < b
found.add((a, b, c))
a_index += 1
if tri[a_index] % 3 == 1:
a_index += 1
for b_index in range(a_index, len(tri), 3):
b = tri[b_index]
if b > target//2:
break
c = target2 - b
if b <= c and is_tri(c):
found.add((a, b, c))
else:
# b must also be 1 mod 3
for b_index in range(a_index, len(tri), 3):
b = tri[b_index]
if b > target//2:
break
c = target2 - b
if b <= c and is_tri(c):
found.add((a, b, c))
else:
for b_index in range(a_index, len(tri)):
b = tri[b_index]
if b > target//2:
break
c = target2 - b
if b <= c and is_tri(c):
found.add((a, b, c))
found_set(found)
# assume a <= b <= c
def main(target):
print(f"main loop end: {len(tri)//3}")
# two terms must be < target/2, 1 term must be < target/3
for a_index, a in enumerate(tri):
if a > target//3:
break
if a_index % 10 == 0:
print(a_index)
threading.Thread(target=count_two_term_sums, args=(a_index, a, target)).start()
# wait for all threads to finish
main_thread = threading.currentThread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
#print(terms)
ans = sum(( count_unique_perms(t) for t in terms ))
print(ans)
log(ans)
return ans
#main(target)
'''
``` |
{
"source": "joetache4/ProjectEuler",
"score": 4
} |
#### File: joetache4/ProjectEuler/700_Eulercoin.py
```python
def ecoins():
'''Generate eulercoins naively. Good for recognizing patterns.'''
n = 0
d = 1504170715041707
mod = 4503599627370517
min = 9999999999999999
sum = 0
while True:
n = (n+d)%mod
if n < min:
min_diff = min - n
min = n
sum += n
print((sum, min, min_diff, min-min_diff))
#ecoins()
def solve():
'''Copy the pattern observed in eulercoins.'''
# start with the second eulercoin so min_diff is defined
sum = 1513083232796311
min = 8912517754604
min_diff = 1495258197287103
while min > 0:
if min_diff > min:
min_diff %= min
min -= min_diff
sum += min
#print((sum, min, min_diff))
return sum
print(solve())
'''
(1504170715041707, 1504170715041707, 8495829284958292, -6991658569916585)
(1513083232796311, 8912517754604, 1495258197287103, -1486345679532499) <--- min_diff = ??? (previous min_diff isn't actually defined)
(1515128018282680, 2044785486369, 6867732268235, -4822946781866) <--- min_diff = min_diff - min*167 (obs: >167 & min_diff goes < 0)
(1516439427959921, 1311409677241, 733375809128, 578033868113) <--- min_diff = min_diff - min*3
(1517017461828034, 578033868113, 733375809128, -155341941015)
(1517440153755132, 422691927098, 155341941015, 267349986083) <---- min_diff = min_diff - min
(1517707503741215, 267349986083, 155341941015, 112008045068)
(1517819511786283, 112008045068, 155341941015, -43333895947)
(1517888185935404, 68674149121, 43333895947, 25340253174) <---- min_diff = min_diff - min
(1517913526188578, 25340253174, 43333895947, -17993642773)
(1517920872798979, 7346610401, 17993642773, -10647032372) <---- min_diff = min_diff - min
(1517924918987409, 4046188430, 3300421971, 745766459) <---- min_diff = min_diff - min*2
(1517925664753868, 745766459, 3300421971, -2554655512)
(1517926093164192, 428410324, 317356135, 111054189)
(1517926204218381, 111054189, 317356135, -206301946)
(1517926220024813, 15806432, 95247757, -79441325) <---- min_diff = min_diff - min*2
(1517926235422080, 15397267, 409165, 14988102) <---- min_diff = min_diff - min*6
(1517926250410182, 14988102, 409165, 14578937)
(1517926264989119, 14578937, 409165, 14169772)
(1517926279158891, 14169772, 409165, 13760607)
(1517926292919498, 13760607, 409165, 13351442)
(1517926306270940, 13351442, 409165, 12942277)
(1517926319213217, 12942277, 409165, 12533112)
(1517926331746329, 12533112, 409165, 12123947)
(1517926343870276, 12123947, 409165, 11714782)
(1517926355585058, 11714782, 409165, 11305617)
(1517926366890675, 11305617, 409165, 10896452)
(1517926377787127, 10896452, 409165, 10487287)
(1517926388274414, 10487287, 409165, 10078122)
(1517926398352536, 10078122, 409165, 9668957)
(1517926408021493, 9668957, 409165, 9259792)
(1517926417281285, 9259792, 409165, 8850627)
(1517926426131912, 8850627, 409165, 8441462)
(1517926434573374, 8441462, 409165, 8032297)
(1517926442605671, 8032297, 409165, 7623132)
(1517926450228803, 7623132, 409165, 7213967)
(1517926457442770, 7213967, 409165, 6804802)
(1517926464247572, 6804802, 409165, 6395637)
(1517926470643209, 6395637, 409165, 5986472)
(1517926476629681, 5986472, 409165, 5577307)
(1517926482206988, 5577307, 409165, 5168142)
(1517926487375130, 5168142, 409165, 4758977)
(1517926492134107, 4758977, 409165, 4349812)
(1517926496483919, 4349812, 409165, 3940647)
(1517926500424566, 3940647, 409165, 3531482)
(1517926503956048, 3531482, 409165, 3122317)
(1517926507078365, 3122317, 409165, 2713152)
(1517926509791517, 2713152, 409165, 2303987)
(1517926512095504, 2303987, 409165, 1894822)
(1517926513990326, 1894822, 409165, 1485657)
(1517926515475983, 1485657, 409165, 1076492)
(1517926516552475, 1076492, 409165, 667327)
(1517926517219802, 667327, 409165, 258162)
(1517926517477964, 258162, 409165, -151003)
(1517926517585123, 107159, 151003, -43844)
'''
``` |
{
"source": "joetache4/project-euler",
"score": 3
} |
#### File: joetache4/project-euler/743_WindowIntoAMatrix__SLOW.py
```python
import time
from lib.num import mod_inverse
mod = 1000000007
fact = [1]
for a in range(1, 10**8+1):
fact.append((fact[-1] * a) % mod)
fact_inv = []
for a in fact:
fact_inv.append(mod_inverse(a, mod))
print(f"pre-processing done ({time.strftime('%I:%M:%S %p')})")
def choose(a,b):
return (fact[a] * fact_inv[a-b] * fact_inv[b]) % mod
def solve(k, L):
total = 0
for ones in range(k, -1, -2):
twos = (k - ones) // 2
count = choose(k, ones) * choose(k - ones, twos)
count *= pow(2, ones * L // k, mod)
total += count
return total % mod
assert solve(3, 9) == 560
assert solve(4, 20) == 1060870
print(solve(10**8, 10**16))
```
#### File: joetache4/project-euler/751_ConcatenationCoincidence.py
```python
from math import floor
from decimal import getcontext, Decimal as D
P = 24 # precision
getcontext().prec = P+1
def concat(theta):
a = [floor(theta)]
b = [theta]
for _ in range(P+1):
b.append(floor(b[-1])*(b[-1]-floor(b[-1])+1))
a.append(floor(b[-1]))
tau = D(str(a[0]) + "." + "".join(str(i) for i in a[1:]))
return tau
assert str(concat(D('2.956938891377988'))).startswith('2.3581321345589')
theta = D(2)
tau = concat(theta)
while theta != tau:
theta = tau
tau = concat(theta)
print(str(round(tau, P)))
``` |
{
"source": "joetache4/ProjectEuler",
"score": 3
} |
#### File: joetache4/ProjectEuler/788_DominatingNumbers.py
```python
import numpy as np
def D(N, M = 1000000007):
# row = total number of digits
# col = number of digits that are equal
a = np.zeros((N,N+1), dtype=np.uint64)
a[0][:2] = [9,1]
for i in range(1,N):
a[i] = np.mod(
9*a[i-1] + np.roll(a[i-1],1), # np.roll used to shift right
M)
for i in range(N):
a[i][:(i+1)//2+1] = 0
return 9*int(a.sum()) % M
assert D(4) == 603
assert D(10) == 21893256
print(D(2022))
``` |
{
"source": "joetache4/project-euler",
"score": 2
} |
#### File: project-euler/data/p099.py
```python
def get_data():
return [
(519432,525806),
(632382,518061),
(78864,613712),
(466580,530130),
(780495,510032),
(525895,525320),
(15991,714883),
(960290,502358),
(760018,511029),
(166800,575487),
(210884,564478),
(555151,523163),
(681146,515199),
(563395,522587),
(738250,512126),
(923525,503780),
(595148,520429),
(177108,572629),
(750923,511482),
(440902,532446),
(881418,505504),
(422489,534197),
(979858,501616),
(685893,514935),
(747477,511661),
(167214,575367),
(234140,559696),
(940238,503122),
(728969,512609),
(232083,560102),
(900971,504694),
(688801,514772),
(189664,569402),
(891022,505104),
(445689,531996),
(119570,591871),
(821453,508118),
(371084,539600),
(911745,504251),
(623655,518600),
(144361,582486),
(352442,541775),
(420726,534367),
(295298,549387),
(6530,787777),
(468397,529976),
(672336,515696),
(431861,533289),
(84228,610150),
(805376,508857),
(444409,532117),
(33833,663511),
(381850,538396),
(402931,536157),
(92901,604930),
(304825,548004),
(731917,512452),
(753734,511344),
(51894,637373),
(151578,580103),
(295075,549421),
(303590,548183),
(333594,544123),
(683952,515042),
(60090,628880),
(951420,502692),
(28335,674991),
(714940,513349),
(343858,542826),
(549279,523586),
(804571,508887),
(260653,554881),
(291399,549966),
(402342,536213),
(408889,535550),
(40328,652524),
(375856,539061),
(768907,510590),
(165993,575715),
(976327,501755),
(898500,504795),
(360404,540830),
(478714,529095),
(694144,514472),
(488726,528258),
(841380,507226),
(328012,544839),
(22389,690868),
(604053,519852),
(329514,544641),
(772965,510390),
(492798,527927),
(30125,670983),
(895603,504906),
(450785,531539),
(840237,507276),
(380711,538522),
(63577,625673),
(76801,615157),
(502694,527123),
(597706,520257),
(310484,547206),
(944468,502959),
(121283,591152),
(451131,531507),
(566499,522367),
(425373,533918),
(40240,652665),
(39130,654392),
(714926,513355),
(469219,529903),
(806929,508783),
(287970,550487),
(92189,605332),
(103841,599094),
(671839,515725),
(452048,531421),
(987837,501323),
(935192,503321),
(88585,607450),
(613883,519216),
(144551,582413),
(647359,517155),
(213902,563816),
(184120,570789),
(258126,555322),
(502546,527130),
(407655,535678),
(401528,536306),
(477490,529193),
(841085,507237),
(732831,512408),
(833000,507595),
(904694,504542),
(581435,521348),
(455545,531110),
(873558,505829),
(94916,603796),
(720176,513068),
(545034,523891),
(246348,557409),
(556452,523079),
(832015,507634),
(173663,573564),
(502634,527125),
(250732,556611),
(569786,522139),
(216919,563178),
(521815,525623),
(92304,605270),
(164446,576167),
(753413,511364),
(11410,740712),
(448845,531712),
(925072,503725),
(564888,522477),
(7062,780812),
(641155,517535),
(738878,512100),
(636204,517828),
(372540,539436),
(443162,532237),
(571192,522042),
(655350,516680),
(299741,548735),
(581914,521307),
(965471,502156),
(513441,526277),
(808682,508700),
(237589,559034),
(543300,524025),
(804712,508889),
(247511,557192),
(543486,524008),
(504383,526992),
(326529,545039),
(792493,509458),
(86033,609017),
(126554,589005),
(579379,521481),
(948026,502823),
(404777,535969),
(265767,554022),
(266876,553840),
(46631,643714),
(492397,527958),
(856106,506581),
(795757,509305),
(748946,511584),
(294694,549480),
(409781,535463),
(775887,510253),
(543747,523991),
(210592,564536),
(517119,525990),
(520253,525751),
(247926,557124),
(592141,520626),
(346580,542492),
(544969,523902),
(506501,526817),
(244520,557738),
(144745,582349),
(69274,620858),
(292620,549784),
(926027,503687),
(736320,512225),
(515528,526113),
(407549,535688),
(848089,506927),
(24141,685711),
(9224,757964),
(980684,501586),
(175259,573121),
(489160,528216),
(878970,505604),
(969546,502002),
(525207,525365),
(690461,514675),
(156510,578551),
(659778,516426),
(468739,529945),
(765252,510770),
(76703,615230),
(165151,575959),
(29779,671736),
(928865,503569),
(577538,521605),
(927555,503618),
(185377,570477),
(974756,501809),
(800130,509093),
(217016,563153),
(365709,540216),
(774508,510320),
(588716,520851),
(631673,518104),
(954076,502590),
(777828,510161),
(990659,501222),
(597799,520254),
(786905,509727),
(512547,526348),
(756449,511212),
(869787,505988),
(653747,516779),
(84623,609900),
(839698,507295),
(30159,670909),
(797275,509234),
(678136,515373),
(897144,504851),
(989554,501263),
(413292,535106),
(55297,633667),
(788650,509637),
(486748,528417),
(150724,580377),
(56434,632490),
(77207,614869),
(588631,520859),
(611619,519367),
(100006,601055),
(528924,525093),
(190225,569257),
(851155,506789),
(682593,515114),
(613043,519275),
(514673,526183),
(877634,505655),
(878905,505602),
(1926,914951),
(613245,519259),
(152481,579816),
(841774,507203),
(71060,619442),
(865335,506175),
(90244,606469),
(302156,548388),
(399059,536557),
(478465,529113),
(558601,522925),
(69132,620966),
(267663,553700),
(988276,501310),
(378354,538787),
(529909,525014),
(161733,576968),
(758541,511109),
(823425,508024),
(149821,580667),
(269258,553438),
(481152,528891),
(120871,591322),
(972322,501901),
(981350,501567),
(676129,515483),
(950860,502717),
(119000,592114),
(392252,537272),
(191618,568919),
(946699,502874),
(289555,550247),
(799322,509139),
(703886,513942),
(194812,568143),
(261823,554685),
(203052,566221),
(217330,563093),
(734748,512313),
(391759,537328),
(807052,508777),
(564467,522510),
(59186,629748),
(113447,594545),
(518063,525916),
(905944,504492),
(613922,519213),
(439093,532607),
(445946,531981),
(230530,560399),
(297887,549007),
(459029,530797),
(403692,536075),
(855118,506616),
(963127,502245),
(841711,507208),
(407411,535699),
(924729,503735),
(914823,504132),
(333725,544101),
(176345,572832),
(912507,504225),
(411273,535308),
(259774,555036),
(632853,518038),
(119723,591801),
(163902,576321),
(22691,689944),
(402427,536212),
(175769,572988),
(837260,507402),
(603432,519893),
(313679,546767),
(538165,524394),
(549026,523608),
(61083,627945),
(898345,504798),
(992556,501153),
(369999,539727),
(32847,665404),
(891292,505088),
(152715,579732),
(824104,507997),
(234057,559711),
(730507,512532),
(960529,502340),
(388395,537687),
(958170,502437),
(57105,631806),
(186025,570311),
(993043,501133),
(576770,521664),
(215319,563513),
(927342,503628),
(521353,525666),
(39563,653705),
(752516,511408),
(110755,595770),
(309749,547305),
(374379,539224),
(919184,503952),
(990652,501226),
(647780,517135),
(187177,570017),
(168938,574877),
(649558,517023),
(278126,552016),
(162039,576868),
(658512,516499),
(498115,527486),
(896583,504868),
(561170,522740),
(747772,511647),
(775093,510294),
(652081,516882),
(724905,512824),
(499707,527365),
(47388,642755),
(646668,517204),
(571700,522007),
(180430,571747),
(710015,513617),
(435522,532941),
(98137,602041),
(759176,511070),
(486124,528467),
(526942,525236),
(878921,505604),
(408313,535602),
(926980,503640),
(882353,505459),
(566887,522345),
(3326,853312),
(911981,504248),
(416309,534800),
(392991,537199),
(622829,518651),
(148647,581055),
(496483,527624),
(666314,516044),
(48562,641293),
(672618,515684),
(443676,532187),
(274065,552661),
(265386,554079),
(347668,542358),
(31816,667448),
(181575,571446),
(961289,502320),
(365689,540214),
(987950,501317),
(932299,503440),
(27388,677243),
(746701,511701),
(492258,527969),
(147823,581323),
(57918,630985),
(838849,507333),
(678038,515375),
(27852,676130),
(850241,506828),
(818403,508253),
(131717,587014),
(850216,506834),
(904848,504529),
(189758,569380),
(392845,537217),
(470876,529761),
(925353,503711),
(285431,550877),
(454098,531234),
(823910,508003),
(318493,546112),
(766067,510730),
(261277,554775),
(421530,534289),
(694130,514478),
(120439,591498),
(213308,563949),
(854063,506662),
(365255,540263),
(165437,575872),
(662240,516281),
(289970,550181),
(847977,506933),
(546083,523816),
(413252,535113),
(975829,501767),
(361540,540701),
(235522,559435),
(224643,561577),
(736350,512229),
(328303,544808),
(35022,661330),
(307838,547578),
(474366,529458),
(873755,505819),
(73978,617220),
(827387,507845),
(670830,515791),
(326511,545034),
(309909,547285),
(400970,536363),
(884827,505352),
(718307,513175),
(28462,674699),
(599384,520150),
(253565,556111),
(284009,551093),
(343403,542876),
(446557,531921),
(992372,501160),
(961601,502308),
(696629,514342),
(919537,503945),
(894709,504944),
(892201,505051),
(358160,541097),
(448503,531745),
(832156,507636),
(920045,503924),
(926137,503675),
(416754,534757),
(254422,555966),
(92498,605151),
(826833,507873),
(660716,516371),
(689335,514746),
(160045,577467),
(814642,508425),
(969939,501993),
(242856,558047),
(76302,615517),
(472083,529653),
(587101,520964),
(99066,601543),
(498005,527503),
(709800,513624),
(708000,513716),
(20171,698134),
(285020,550936),
(266564,553891),
(981563,501557),
(846502,506991),
(334,1190800),
(209268,564829),
(9844,752610),
(996519,501007),
(410059,535426),
(432931,533188),
(848012,506929),
(966803,502110),
(983434,501486),
(160700,577267),
(504374,526989),
(832061,507640),
(392825,537214),
(443842,532165),
(440352,532492),
(745125,511776),
(13718,726392),
(661753,516312),
(70500,619875),
(436952,532814),
(424724,533973),
(21954,692224),
(262490,554567),
(716622,513264),
(907584,504425),
(60086,628882),
(837123,507412),
(971345,501940),
(947162,502855),
(139920,584021),
(68330,621624),
(666452,516038),
(731446,512481),
(953350,502619),
(183157,571042),
(845400,507045),
(651548,516910),
(20399,697344),
(861779,506331),
(629771,518229),
(801706,509026),
(189207,569512),
(737501,512168),
(719272,513115),
(479285,529045),
(136046,585401),
(896746,504860),
(891735,505067),
(684771,514999),
(865309,506184),
(379066,538702),
(503117,527090),
(621780,518717),
(209518,564775),
(677135,515423),
(987500,501340),
(197049,567613),
(329315,544673),
(236756,559196),
(357092,541226),
(520440,525733),
(213471,563911),
(956852,502490),
(702223,514032),
(404943,535955),
(178880,572152),
(689477,514734),
(691351,514630),
(866669,506128),
(370561,539656),
(739805,512051),
(71060,619441),
(624861,518534),
(261660,554714),
(366137,540160),
(166054,575698),
(601878,519990),
(153445,579501),
(279899,551729),
(379166,538691),
(423209,534125),
(675310,515526),
(145641,582050),
(691353,514627),
(917468,504026),
(284778,550976),
(81040,612235),
(161699,576978),
(616394,519057),
(767490,510661),
(156896,578431),
(427408,533714),
(254849,555884),
(737217,512182),
(897133,504851),
(203815,566051),
(270822,553189),
(135854,585475),
(778805,510111),
(784373,509847),
(305426,547921),
(733418,512375),
(732087,512448),
(540668,524215),
(702898,513996),
(628057,518328),
(640280,517587),
(422405,534204),
(10604,746569),
(746038,511733),
(839808,507293),
(457417,530938),
(479030,529064),
(341758,543090),
(620223,518824),
(251661,556451),
(561790,522696),
(497733,527521),
(724201,512863),
(489217,528217),
(415623,534867),
(624610,518548),
(847541,506953),
(432295,533249),
(400391,536421),
(961158,502319),
(139173,584284),
(421225,534315),
(579083,521501),
(74274,617000),
(701142,514087),
(374465,539219),
(217814,562985),
(358972,540995),
(88629,607424),
(288597,550389),
(285819,550812),
(538400,524385),
(809930,508645),
(738326,512126),
(955461,502535),
(163829,576343),
(826475,507891),
(376488,538987),
(102234,599905),
(114650,594002),
(52815,636341),
(434037,533082),
(804744,508880),
(98385,601905),
(856620,506559),
(220057,562517),
(844734,507078),
(150677,580387),
(558697,522917),
(621751,518719),
(207067,565321),
(135297,585677),
(932968,503404),
(604456,519822),
(579728,521462),
(244138,557813),
(706487,513800),
(711627,513523),
(853833,506674),
(497220,527562),
(59428,629511),
(564845,522486),
(623621,518603),
(242689,558077),
(125091,589591),
(363819,540432),
(686453,514901),
(656813,516594),
(489901,528155),
(386380,537905),
(542819,524052),
(243987,557841),
(693412,514514),
(488484,528271),
(896331,504881),
(336730,543721),
(728298,512647),
(604215,519840),
(153729,579413),
(595687,520398),
(540360,524240),
(245779,557511),
(924873,503730),
(509628,526577),
(528523,525122),
(3509,847707),
(522756,525555),
(895447,504922),
(44840,646067),
(45860,644715),
(463487,530404),
(398164,536654),
(894483,504959),
(619415,518874),
(966306,502129),
(990922,501212),
(835756,507474),
(548881,523618),
(453578,531282),
(474993,529410),
(80085,612879),
(737091,512193),
(50789,638638),
(979768,501620),
(792018,509483),
(665001,516122),
(86552,608694),
(462772,530469),
(589233,520821),
(891694,505072),
(592605,520594),
(209645,564741),
(42531,649269),
(554376,523226),
(803814,508929),
(334157,544042),
(175836,572970),
(868379,506051),
(658166,516520),
(278203,551995),
(966198,502126),
(627162,518387),
(296774,549165),
(311803,547027),
(843797,507118),
(702304,514032),
(563875,522553),
(33103,664910),
(191932,568841),
(543514,524006),
(506835,526794),
(868368,506052),
(847025,506971),
(678623,515342),
(876139,505726),
(571997,521984),
(598632,520198),
(213590,563892),
(625404,518497),
(726508,512738),
(689426,514738),
(332495,544264),
(411366,535302),
(242546,558110),
(315209,546555),
(797544,509219),
(93889,604371),
(858879,506454),
(124906,589666),
(449072,531693),
(235960,559345),
(642403,517454),
(720567,513047),
(705534,513858),
(603692,519870),
(488137,528302),
(157370,578285),
(63515,625730),
(666326,516041),
(619226,518883),
(443613,532186),
(597717,520257),
(96225,603069),
(86940,608450),
(40725,651929),
(460976,530625),
(268875,553508),
(270671,553214),
(363254,540500),
(384248,538137),
(762889,510892),
(377941,538833),
(278878,551890),
(176615,572755),
(860008,506412),
(944392,502967),
(608395,519571),
(225283,561450),
(45095,645728),
(333798,544090),
(625733,518476),
(995584,501037),
(506135,526853),
(238050,558952),
(557943,522972),
(530978,524938),
(634244,517949),
(177168,572616),
(85200,609541),
(953043,502630),
(523661,525484),
(999295,500902),
(840803,507246),
(961490,502312),
(471747,529685),
(380705,538523),
(911180,504275),
(334149,544046),
(478992,529065),
(325789,545133),
(335884,543826),
(426976,533760),
(749007,511582),
(667067,516000),
(607586,519623),
(674054,515599),
(188534,569675),
(565185,522464),
(172090,573988),
(87592,608052),
(907432,504424),
(8912,760841),
(928318,503590),
(757917,511138),
(718693,513153),
(315141,546566),
(728326,512645),
(353492,541647),
(638429,517695),
(628892,518280),
(877286,505672),
(620895,518778),
(385878,537959),
(423311,534113),
(633501,517997),
(884833,505360),
(883402,505416),
(999665,500894),
(708395,513697),
(548142,523667),
(756491,511205),
(987352,501340),
(766520,510705),
(591775,520647),
(833758,507563),
(843890,507108),
(925551,503698),
(74816,616598),
(646942,517187),
(354923,541481),
(256291,555638),
(634470,517942),
(930904,503494),
(134221,586071),
(282663,551304),
(986070,501394),
(123636,590176),
(123678,590164),
(481717,528841),
(423076,534137),
(866246,506145),
(93313,604697),
(783632,509880),
(317066,546304),
(502977,527103),
(141272,583545),
(71708,618938),
(617748,518975),
(581190,521362),
(193824,568382),
(682368,515131),
(352956,541712),
(351375,541905),
(505362,526909),
(905165,504518),
(128645,588188),
(267143,553787),
(158409,577965),
(482776,528754),
(628896,518282),
(485233,528547),
(563606,522574),
(111001,595655),
(115920,593445),
(365510,540237),
(959724,502374),
(938763,503184),
(930044,503520),
(970959,501956),
(913658,504176),
(68117,621790),
(989729,501253),
(567697,522288),
(820427,508163),
(54236,634794),
(291557,549938),
(124961,589646),
(403177,536130),
(405421,535899),
(410233,535417),
(815111,508403),
(213176,563974),
(83099,610879),
(998588,500934),
(513640,526263),
(129817,587733),
(1820,921851),
(287584,550539),
(299160,548820),
(860621,506386),
(529258,525059),
(586297,521017),
(953406,502616),
(441234,532410),
(986217,501386),
(781938,509957),
(461247,530595),
(735424,512277),
(146623,581722),
(839838,507288),
(510667,526494),
(935085,503327),
(737523,512167),
(303455,548204),
(992779,501145),
(60240,628739),
(939095,503174),
(794368,509370),
(501825,527189),
(459028,530798),
(884641,505363),
(512287,526364),
(835165,507499),
(307723,547590),
(160587,577304),
(735043,512300),
(493289,527887),
(110717,595785),
(306480,547772),
(318593,546089),
(179810,571911),
(200531,566799),
(314999,546580),
(197020,567622),
(301465,548487),
(237808,559000),
(131944,586923),
(882527,505449),
(468117,530003),
(711319,513541),
(156240,578628),
(965452,502162),
(992756,501148),
(437959,532715),
(739938,512046),
(614249,519196),
(391496,537356),
(62746,626418),
(688215,514806),
(75501,616091),
(883573,505412),
(558824,522910),
(759371,511061),
(173913,573489),
(891351,505089),
(727464,512693),
(164833,576051),
(812317,508529),
(540320,524243),
(698061,514257),
(69149,620952),
(471673,529694),
(159092,577753),
(428134,533653),
(89997,606608),
(711061,513557),
(779403,510081),
(203327,566155),
(798176,509187),
(667688,515963),
(636120,517833),
(137410,584913),
(217615,563034),
(556887,523038),
(667229,515991),
(672276,515708),
(325361,545187),
(172115,573985),
(13846,725685)
]
```
#### File: joetache4/project-euler/_main.py
```python
import sys
import os
import time
def get_file(prefix):
try:
if prefix == "":
# get most recently changed file
files = [f for f in os.listdir(".") if f[-3:] == ".py" and f != "_main.py"]
files = sorted((os.path.getmtime(f),f) for f in files)
return files[-1][1]
else:
# get first file that matches prefix
prefix = prefix.zfill(3)
for file in os.listdir("."):
if file.startswith(prefix):
return file
except:
pass
return None
def run(args):
file = args[0]
args = " ".join(args[1:])
print()
print(time.strftime("%I:%M:%S %p"))
print(file)
print("-" * len(file))
print()
start = time.monotonic()
try:
success = False
success = os.system(f"python {file} {args}") == 0
except KeyboardInterrupt:
pass
stop = time.monotonic()
elapsed = time.strftime('%H:%M:%S', time.gmtime(stop - start))
print()
print("-" * len(file))
print(f"Completion time: {elapsed}")
print()
return success
def main_loop():
while True:
# get input
args = input("Enter a problem number: ")
if args.lower() in ["quit", "exit", "qqq"]:
break
args = args.split(" ")
file = get_file(args[0])
# run
if file is None:
print("File not found.")
else:
args[0] = file
while not run(args):
if input("Retry? [Y/n]").lower() == 'n':
break
if __name__ == "__main__":
try:
main_loop()
except (EOFError, KeyboardInterrupt):
pass
``` |
{
"source": "joetainment/joecceasy",
"score": 3
} |
#### File: joecceasy/examples/example__qtui__ExecWidget.py
```python
from joecceasy import Easy
Easy.Qtui.CreateApp().ExecWidget( Easy.Qtw.QLabel("Widget Made By One Liner") )
Easy.Qtui.CreateApp()
widget = Easy.Qtw.QPushButton("Widget Made Directly")
widget.setStyleSheet( 'font: 30pt sans')
Easy.Qtui.ExecWidget( widget )
def makeWidget():
global widget ## this is just so it can be shared with onExec
widget = Easy.Qtw.QPushButton("Widget Made By Function")
widget.setStyleSheet( 'font: 40pt mono')
return widget
def onExec( ):
global widget
Easy.Qtc.QTimer.singleShot(
1000,
lambda:
widget.setStyleSheet(
'font: 50pt serif'
)
)
Easy.Qtui.CreateApp()
Easy.Qtui.ExecWidget( makeWidget, onExec=onExec )
class MyWidgetClass(Easy.Qtw.QPushButton):
def __init__(self):
super().__init__()
self.toggle=True
self.setStyleSheet( 'font: 40pt sans')
self.setText( "Widget Made By Class" )
self.tickTimer = Easy.Qtc.QTimer( )
self.tickTimer.start( 250 )
self.tickTimer.timeout.connect( self.onTick )
def onExec(self):
self.setStyleSheet( "background: white" )
def onTick(self):
if self.toggle:
self.setStyleSheet('color: red')
else:
self.setStyleSheet('color: blue')
self.toggle = not self.toggle
Easy.Qtui.CreateApp()
Easy.Qtui.ExecWidget( MyWidgetClass, onExec=True )
```
#### File: joecceasy/examples/example__walker_01.py
```python
from joecceasy import Easy
def main():
paths = ['..','.']
absOfEntries = [ i.abs for i in Easy.WalkAnIter(paths) ]
for i in absOfEntries:
print( i )
if __name__=='__main__':
main()
"""
def main(maxEntries = 99):
i = -1
print( "Walker test, Walking current directory:" )
for entry in Easy.WalkAnIter( ['.'] ):
i += 1 ## because i start at -1, 1st run of line will be 0
if i > maxEntries:
break
print(entry.abs)
print( ' \n ' )
"""
#isFileByPython = os.path.isfile(entry.abs)
# print( 'entry: ', entry.name, 'f', entry.isFile, 'd', entry.isDir,
# 'fa', entry.isFileAt, 'da', entry.isDirAt, 'pf', isFileByPython, se#p=' ')
#end='' )
#print( entry.abs, entry.isFileAt, entry.isDirAt, sep=' ' )
#print( entry.__dict__ )
```
#### File: joecceasy/examples/gtignore----example__ascui_01-1.py
```python
from random import randint
import time
from joecceasy import Easy
#from asciimatics.screen import Screen
#Screen = Easy.Ascui.AsciimaticsMod.screen.Screen
#Easy.Mods.sys.exit( )
## Multiples can be used in sequence if you want multiple steps...
## First one, minor customization, no custom class
## note that since this functions as first screen only,
## we show "Next" instead of "Quit"
Easy.Ascui(title='Ascui Examples Step 1 of 2', quitLabel="Next").exec_()
## Second one, via, customized subclass
class ExampleAscui( Easy.Ascui ):
def __init__(self,*args,**kwargs):
super().__init__(*args,**kwargs)
def initWidgets(self):
self.frame.createWidget("Text", "MyText", "My Text" )
# self.frame.createWidget("Divider", "Divider01", None, height=3 )
self.frame.createWidget("Button", "Do Nothing", None, layoutCol=0, inFooter=True )
self.frame.createWidget("Button", "Show Anim Msg", None,
layoutCol=2, inFooter=True,
callback=lambda:
Easy.Ascui.FullscreenMsg(
msg="Button was pressed!",
timeout=3
)
)
exampleAscui = ExampleAscui(title='Ascui Examples Step 2 of 2')
exampleAscui.exec()
```
#### File: joecceasy/joecceasy/FileWatcher.py
```python
import os, sys, time
from . import Utils
class FileWatcher:
def __init__(self):
self.updateInterval = 1
self.filesDict={}
def addFile( self, filePath, action=None, actionArgs=(), actionKwargs={}, useFilePathAsFirstArg=True, easyAction=None ):
entry = Utils.Object()
self.filesDict[filePath] = entry
entry.filePath = filePath
entry.lastMtime = os.stat( filePath ).st_mtime
entry.actionArgs = actionArgs
entry.actionKwargs = actionKwargs
entry.useFilePathAsFirstArg=useFilePathAsFirstArg
if easyAction==None:
entry.action = action
elif easyAction=='run':
import subprocess, sys
entry.action = (
lambda filePath:
subprocess.run( [sys.executable, filePath ], capture_output=False )
)
entry.actionArgs=()
entry.actionKwargs={}
self.updateEntry( entry )
return self
def update( self ):
for filePath, entry in self.filesDict.items():
newMtime = os.stat( filePath ).st_mtime
actionArgs = entry.actionArgs
actionKwargs = entry.actionKwargs
if newMtime != entry.lastMtime:
self.updateEntry( entry )
def updateEntry( self, entry ):
entry.lastMtime = os.stat( entry.filePath ).st_mtime
action = entry.action
actionArgs = entry.actionArgs
actionKwargs = entry.actionKwargs
if callable( action ):
if entry.useFilePathAsFirstArg:
action( entry.filePath, *actionArgs, **actionKwargs )
else:
action( *actionArgs, **actionKwargs )
return self
def loop( self ):
while True:
#print( 'updating' )
self.update()
time.sleep( self.updateInterval )
return self
```
#### File: joecceasy/joecceasy/__main__.py
```python
import importlib, os, runpy, sys, traceback,math, random
import distutils.sysconfig as sysconfig
import os
import sys
from . import Utils
def get_standard_modules():
found=[]
std_lib = sysconfig.get_python_lib(standard_lib=True)
for top, dirs, files in os.walk(std_lib):
for nm in files:
prefix = top[len(std_lib)+1:]
if prefix[:13] == 'site-packages':
continue
if nm == '__init__.py':
found.append( top[len(std_lib)+1:].replace(os.path.sep,'.') )
elif nm[-3:] == '.py':
found.append( os.path.join(prefix, nm)[:-3].replace(os.path.sep,'.') )
elif nm[-3:] == '.so' and top[-11:] == 'lib-dynload':
found.append( nm[0:-3] )
for builtin in sys.builtin_module_names:
found.append( builtin )
result = []
for m in found:
if '.' in m:
continue
if '_' in m:
continue
if 'site'==m:
continue
if 'antigravity'==m:
continue
if 'this'==m:
continue
#if 't' in m[:3]
if True: ##'t' in m[2:3]:
#print(m)
if not m in result:
result.append(m)
return result
modNames = get_standard_modules()
#print( *modNames, sep="\n")
Mods = Utils.Object()
ModsDict = {}
for n in modNames: ## sys.builtin_module_names not enough
try:
im=importlib.import_module( n )
setattr(Mods, n, im)
ModsDict[n]=im
except:
##just skip it, error will instead occur on use attempt
'pass'
oldSysArgv = sys.argv.copy() # or list(sys.argv)
oldArg0=None ## this will be the joecceasy/__main__.py
oldArg1=None ## this will be the dummy command or the launcher path
if len(sys.argv)>0:
oldArg0 = sys.argv.pop(0)
if len(sys.argv)>0:
oldArg1 = sys.argv.pop(0)
if oldArg0==None or oldArg1==None or len(sys.argv)<1:
not_enough_args_msg = (
"\n"
"FAILURE"
"\n"
"joecceasy module cannot run since sufficient arguments "
"have not been provided."
"\n"
"Please provide at least one EasyPython file as "
"an argument if using the launcher."
"\n"
"e.g. joecceasyLauncher yourscript.easy.py"
"\n"
"If using the module, provide the 'run' command and "
"specify a script to run."
"\n"
"e.g. python -m joecceasy run yourscript.easy.py"
)
print( not_enough_args_msg )
#print( f"sys.argv: {sys.argv}" )
#print( f"oldArg0: {oldArg0}" )
#print( f"oldArg1: {oldArg1}" )
exit( )
#print( f"oldSysArgv: {oldSysArgv}" )
## get Easy
joecceasy = sys.modules[__package__]
Easy = joecceasy.Easy
Easy.Inst
newGlobals={
'joecceasy':joecceasy,
'Easy': Easy,
'Mods':Mods,
}
for k,v in ModsDict.items():
newGlobals[k]=v
#print( f"newGlobals: {newGlobals}" )
#print( f"sys.argv before run is: {sys.argv}" )
runpy.run_path(
sys.argv[0],
init_globals=newGlobals,
run_name="__main__",
)
#print( f"sys.argv: {sys.argv}" )
#print( f"package: {__package__}" )
#print( f"name: {__name__}" )
#for mod in sys.modules:
# #print( mod )
# 'pass'
"""
print( sys.argv )
print( sys.argv[0] )
print( sys.argv[0] )
print( sys.argv )
"""
"""
## adjust cwd with given script's path
# may need to change and work into init call below
## initialize it and get it
Easy = joecceasy.Easy #.Init( options )
## could handle args here and setup sys.argv manually
## runpy will also update it.
print(f"Easy.Args: {Easy.Args}" )
#sys.argv[0]
#argsForRun=Easy.Args
#sys.argv.clear()
#sys.argv.extend( Easy.Args ) ## sys.argv[0] get replaced anyway on run
#print(f"arg0NoQ: {arg0NoQ}" )
## do stuff for new sys args
#import ..joecceasy
# Easy.CallInteractive( "notepad.exe" )
## __init__.py will have already run
#import .
#importlib.import_module( ".." + __package__ )
#Easy.Inst
#from .. import __package__
#print( joecceasy )
#print( 'main' )
#print( sys.argv )
#print( Easy.Args )
"""
```
#### File: joecceasy/joecceasy/Qtui.py
```python
import os, sys
import PySide2
import PySide2.QtGui as QtGui
import PySide2.QtWidgets as QtWidgets
import PySide2.QtCore as QtCore
Qtc = QtCore
Qtw = QtWidgets
Qtg = QtGui
## Get self mod and package __init__ mod (joecceasy modules)
## It is safe to get this without circular import problems
## because Qtui is only ever called 'lazily', after
## joecceasy module is fully loadedimport sys ## just for SelfMod/SelfPak most imports later section
SelfPak=__import__(__package__)
SelfMod=sys.modules[__name__]
joecceasy = SelfPak
from . import Utils
from .Utils import classproperty
from . import EasyMod
from . import Easy
assert EasyMod.EasyModLoadingIsComplete==True
class KbEventFilterer(Qtc.QObject):
"""
optimize wasted effort when using this class by specifying on=press/release/shortcut
and watched keys watched=['k','l']
"""
def __init__( self, *args, **kwargs ):
on=kwargs.get('on','release')
target = kwargs.get('target',None)
self.callback = kwargs.get( 'callback',None )
self.watched = kwargs.get('watched', [ ] ) ## uses unmodified keys, e.g. no "+" or ":"
self.watchCase = False ## currently disabled due to ctrl bug
#kwargs.get('watchCase', False ) ## related code commented out below
super().__init__( ) ## not needed #*args,**kwargs
onRelease= 'release' in on
onPress = 'press' in on
onShortcut = 'shortcut' in on
self.onlyOnTypes=[]
if onRelease:
self.onlyOnTypes.append( QtCore.QEvent.Type.KeyRelease )
if onPress:
self.onlyOnTypes.append( QtCore.QEvent.Type.KeyPress )
if onShortcut:
self.onlyOnTypes.append( QtCore.QEvent.Type.Shortcut )
if target is not None:
target.installEventFilter( self )
def eventFilter(self, obj, event):
## possible types:
## ShortcutOverride KeyPress KeyRelease
if not isinstance( event, Qtg.QKeyEvent ):
return False
## We now know it's a key event so we can get the key
key = event.key()
modifiers = int( event.modifiers() )
#Qtc.QEvent.Type.KeyRelease
if not event.type() in self.onlyOnTypes:
return False
if key==16777249 or key==16777251 or key==16777248 or key==16777250:
return False ## this is just an optmized version of below
## simple early bail if key not watched, ignores letter case
## and modifiers
qksNoMods = Qtg.QKeySequence( key )
qsNoMods = qksNoMods.toString()
if len(self.watched) > 0:
if not qsNoMods.lower() in [ w.lower() for w in self.watched ]:
return False
## this won't work due to bug when holding ctrl, text() returns useless info
#text = event.text()
#print(text)
#textLower = text.lower()
#print(textLower)
#if len(self.watched) > 0:
# if not textLower in [ w.lower() for w in self.watched ]:
# return False
key = event.key()
modifiers = event.modifiers()
## had to manually figure these out via testing
## qt seems to get special events for when it's only a modifier
intOnlyCtrl = 16777249
intOnlyAlt = 16777251
intOnlyShift = 16777248
intOnlyMeta = 16777250
modOnlyCodes = [intOnlyCtrl,intOnlyAlt,intOnlyShift,intOnlyMeta]
if key in modOnlyCodes:
return False
intCtrl = int(Qtc.Qt.CTRL) #67108864
intAlt = int(Qtc.Qt.ALT) #134217728
intShift = int(Qtc.Qt.SHIFT) #33554432
intMeta = int(Qtc.Qt.META) # ?
modCodes = [intCtrl,intAlt,intShift,intMeta]
if key in modCodes:
return False
qks = Qtg.QKeySequence( key | modifiers )
qs = qks.toString()
## More detailed case sensitive letter watching
## disabled for now because it was buggy with ctrl
#if len(self.watched) > 0:
# if self.watchCase:
# if not t in self.watched:
# return False
intMods=modifiers
andedCtrl = intMods & intCtrl
andedAlt = intMods & intAlt
andedShift = intMods & intShift
andedMeta = intMods & intMeta
hasCtrl = andedCtrl != 0
hasAlt = andedAlt != 0
hasShift = andedShift != 0
hasMeta = andedMeta != 0
## ignores different keyboard pluses
## always uses shift= never actual plus
## treat plus as special, won't work with unmodified plus such as other keyboards
qsEscaped = qs.replace('++', '+=') if 'shift' in qs.lower() else qs.replace('++','+Shift+=')
## because Num will be in qs if
qsEscaped = qsEscaped.replace( "Num+", "" )
qsl = qsEscaped.lower().split('+')
qsl=list( sorted(qsl,key=len) )
#print( qsl )
#print( qks, " ", hasCtrl, hasAlt, hasShift )
#print( qksNoMods, " ", hasCtrl, hasAlt, hasShift )
#print( qsNoMods )
#print( qs )
# print( event.text().lower(), " ", event.modifiers )
class EasyQKeyEvent:
'pass'
info = EasyQKeyEvent()
info.qs = qs
info.qks = qks
info.keys = qsl
info.key = key
info.hasCtrl=hasCtrl
info.hasAlt=hasAlt
info.hasShift=hasShift
info.hasMeta=hasMeta
info.typ=str( event.type() ).split('.')[-1].lower().replace('key','')
info.keys.append( info.typ )
if callable( self.callback):
return self.callback( obj, event, info )
else:
return False
class WidgetRecipe():
def __init__(self, label, connections=None, name=None, kind='button',
func=None,
layout=None,
useFuncNameInsteadOfLabelIfNoName=False,
widget=None ):
if connections is None:
connections = {}
self.connections = connections
self.label = label
self.kind = kind
labelr = label.replace(" ", "" )
if name is None:
if func is not None:
setattr( func, 'label', labelr )
if useFuncNameInsteadOfLabelIfNoName:
name = func.__name__
else: ## dont use
name = labelr
func.__name__ = name
else: ##func doesn't exist
name = labelr
else: ## name exists
if not func is None:
func.__name__ = name
self.name = name
self.func = func
self.layout = layout
class QtuiFuncs( ):
@classmethod
def GetQapp(cls, argv=None, ignoreArgvError=False ):
existingQapp = Qtw.QApplication.instance()
if existingQapp:
if not ignoreArgvError:
assert argv==None ## we can't share qapp if giving custom args
return existingQapp
else:
if argv==None:
argv = sys.argv.copy()
qapp = Qtw.QApplication( argv )
return qapp
@classproperty
def Qapp(cls):
return cls.GetQapp()
@classproperty
def KbEventFilterer(cls):
return KbEventFilterer
@classproperty
def Qtc(cls):
return Qtc
@classproperty
def QtCore(cls):
return Qtc
@classproperty
def Qtg(cls):
return QtGui
@classproperty
def QtGui(cls):
return QtGui
@classproperty
def Qtw(cls):
return Qtw
@classproperty
def QtWidgets(cls):
return Qtw
@classproperty
def SelfMod(cls):
return SelfMod
@classmethod
def ExecWidget(cls, widgetOrGetter, onExec=None, autoShow=True ): #args, kwargs=None):
"""
reeturns an object with info
.app .widget .returnCode
does a simple exec of qapp using the widget
as root. If widget is class or func, call
it to make Widget
running the widget creation function
widget getter should be a function or class
that when called returns one widget
"""
import types
#Utils.Object()
execInfo = types.SimpleNamespace()
execInfo.qapp = Qtw.QApplication.instance()
if execInfo.qapp==None:
execInfo.qapp = Qtw.QApplication( Easy.Argv )
#cls.GetQapp() ## should use override if available
## assign, get, or create our widget
if isinstance( widgetOrGetter, Qtw.QWidget ):
execInfo.widget = widgetOrGetter
else:
execInfo.widget = widgetOrGetter()
## apply onExec function if wanted
if onExec is not None:
if onExec==True:
Qtc.QTimer.singleShot(0, execInfo.widget.onExec)
elif isinstance( onExec, str ):
def runOnExecByStr(onExec):
foundFuncByStr = getattr( execInfo.widget, onExec )
foundFuncByStr()
Qtc.QTimer.singleShot(0, runOnExecByStr)
else:
Qtc.QTimer.singleShot(0, onExec)
execInfo.onExec = onExec
if autoShow:
execInfo.widget.show()
execInfo.returnCode = execInfo.qapp.exec_()
return execInfo
@classproperty
def WidgetRecipe(cls):
return WidgetRecipe
class QtuiBase(QtuiFuncs):
__QappSingleton = None
@classmethod
def CreateApp(cls):
cls.GetQapp()
return cls
@classmethod ##override
def GetQapp(cls, argv=None ):
## setup self.qapp and self.argv which are closely related
if cls.__QappSingleton!=None:
assert argv==None ## we can't share qapp if giving custom args
return cls.__QappSingleton
else:
existingQapp = Qtw.QApplication.instance()
if existingQapp:
assert argv==None ## we can't share qapp if giving custom args
cls.__QappSingleton = existingQapp
return existingQapp
else:
if argv==None:
argv = sys.argv.copy()
__QappSinglton = Qtw.QApplication( argv )
return __QappSinglton
@classproperty ##override
def Qapp(cls):
return cls.GetQapp()
class Qtui( Qtw.QMainWindow, QtuiBase, Easy.AbstractBaseClass ): #metaclass=QtuiMeta ):
@classmethod
def Exec(cls, *args, **kwargs):
qtui = cls( *args, **kwargs )
return qtui.exec_()
@classmethod
def ExecAndExit(cls,*args,**kwargs ):
"""
Exec Qapp and then sys.exit w qapp's returned
"""
qtui = cls( *args, **kwargs )
qtui.execAndExit()
return qtui ## this line may never be called
def exec_(self): ## an extra name to match qt regular atApp.exec_()
return self.qapp.exec_()
def execAndExit(self):
"""
run qapp and automatically call sys.exit with return result
"""
r = self.qapp.exec_()
Easy.Mods.sys.exit( r )
return self ## this line probably never reached
def execQapp(self):
return self.qapp.exec_()
def __init__(self, *args, **kwargs ):
argsOrig = args ## like tuple(args) because args is already a tuple
kwargsOrig = kwargs.copy()
## initArgsMan = self.argsMan = ArgsMan(
# args,
# kwargs,
# forSelf=,
# forLocal=,
#)
## add specified entries from kwargs
## or fallback defaults to self.
dkwargsForSelf = {
## kwargs default fallbacks
'qapp' : None,
'argv' : None, ## the args to give qapp
'papp' : None,
'title' : 'Application',
'appTitleToBeShownInMenu' : 'App',
'tabTitle' : None, ## title of first default tab
'windowTitle': None, ## later we'll use self.title as fallback
'instructionsText': None, ## later we'll use self.title as fallback
'iconPath': None,
'appUserModelId' : 'mycompany.myproduct.subproduct.version', #taskbarIcon
'callbacks' : {}, ## should be a dictionary like object
######## Callbacks should take 'self' at a minimum
######## They should generally take same args/kwargs as
######## the wrapper functions
## createCentralWidget
## createCentralLayout
## update
## onGoClicked
## initAdditionalWidgetsIntoDefault
## initWidgetsPre
## initWidgetsPost
## be careful of potential confusion with
## setCentralWidget from centralWidget
'updateInterval' : 30,
'autoUpdateViaTimer' : True,
'autoUpdatePapp' : None,
## todo noauto
'autoShow' : True,
'autoCreateLayout' : True,
'autoCreateDefaultWidgetsInLayout' : True,
'showInput' : True,
'useTabs' : True,
'useMenu' : True,
'useOutput' : True,
'autoExpandOutput':False,
'useOutputToggleButton' : True,
'useStatusBar' : True,
'statusBarMsg' : '',
'exitSleepTime' : 0.15,
'minimumWidth': 400,
'minimumHeight': 300,
'widgetRecipes' : {},
'widgets' : {}, ## Stores widgets
'layouts' : {}, ## Stores layouts
}
Easy.DictOfDefaultsOntoObj( self, dkwargsForSelf, kwargs )
dkwargsLocal = {
## kwargs default fallbacks
'QMainWindowArgs' : tuple(), #tuple( (None, self.Qtc.Qt.WindowStaysOnTopHint,) ),
'QMainWindowKwargs' : {},
'KeyboardInterruptFix' : True,
}
Easy.DictOfDefaultsOntoDict( dkwargsLocal, kwargs )
## KeyboardInterrupt work, fixes qt bug
if kwargs['KeyboardInterruptFix']:
print( 'keyboard int fix')
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
## add a couple fallback that are dependent on other fallbacks
## should add a nice Easy function to do selfToSelf fallbacks
## based on attribute names
if self.windowTitle==None:
self.windowTitle = self.title
if self.tabTitle==None:
self.tabTitle = self.title
#if self.qapp==None:
self.initQapp()
## after qapp, set self.argv to something reasonable if it's None
if self.argv==None:
self.argv = sys.argv.copy()
super().__init__(
*( kwargs[ 'QMainWindowArgs' ] ),
**( kwargs[ 'QMainWindowKwargs' ] ),
)
'''
cb = self.callbacks.get( 'createCentralWidget' )
if cb=!None:
self.centralWidgetForLayout = cb()
'''
self.clipboard = self.Qtg.QGuiApplication.clipboard()
self.mainWidget = Qtw.QWidget()
self.mainLayout = Qtw.QFormLayout()
self.addToWidgets( 'mainLayout', self.mainLayout)
self.mainWidget.setLayout( self.mainLayout )
if self.useTabs==True:
self.tabView = PySide2.QtWidgets.QTabWidget()
self.tabBar = self.tabView.tabBar()
## widget for first tab page
self.tabView.addTab( self.mainWidget, self.tabTitle )
self.setCentralWidget( self.tabView )
else:
centralWidget = self.mainWidget #Qtw.QWidget()
self.setCentralWidget( centralWidget )
#### self.centralWidget should now be valid via superclass ####
self.initMenu()
"""
cb = self.callbacks.get( 'createMainLayout' )
if cb==None:
self.centralLayout = Qtw.QFormLayout()
else:
self.centralLayout = cb()
"""
self.updateCallback = self.callbacks.get( 'update', None )
import ctypes
if os.name=='nt':
myappid = self.appUserModelId
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)
if self.iconPath!=None:
self.icon = QtGui.QIcon( self.iconPath )
self.setWindowIcon( self.icon )
self.qapp.setWindowIcon( self.icon )
#print( self.icon )
self.setWindowTitle( self.windowTitle )
self.setMinimumWidth( self.minimumWidth )
self.setMinimumHeight( self.minimumHeight )
self.initOutput()
##self.initCentralWidget()
##self.initCentralLayout()
self.initWidgetsPreWrapper()
self.createWidgetsInCentralLayout()
self.initWidgetsPostWrapper()
self.initUpdateTimer()
statusBar = self.statusBar() ## first call also creates it
## 0 means no timeout
statusBar.showMessage( self.statusBarMsg, 0 )
if self.useStatusBar==True:
statusBar.show()
else:
statusBar.hide()
self.initWidgetRecipesWrapper()
if self.autoShow==True:
self.show()
'''
def initLayoutThenWidgets(self):
self.initSetCentralWidget() ## create central widget
self.initSetCentralLayout() ## create central widget
self.initCreateDefaultWidgetsInLayout()
'''
def initCentralWidget(self):
self.setCentralWidget( self.centralWidgetForLayout )
def initCentralLayout(self):
self.centralWidgetForLayout.setLayout( self.centralLayout )
def initLayout(self, *args, **kwargs):
"""
Override this to fill layout with your own widgets
"""
def initOutput(self):
self.outputDockWidget=QtWidgets.QDockWidget( "Output" )
self.outputWidget=QtWidgets.QWidget(
#parent=self.outputDockWidget
)
self.outputLayout=QtWidgets.QFormLayout()
## alyout has to be added first
self.outputWidget.setLayout(self.outputLayout)
self.outputDockWidget.setWidget( self.outputWidget )
#if self.autoExpandOutput:
# self.outputDockWidget.show() ## show should be called on dock childs
"""
self.outputDockWidget.setFeatures(
QtWidgets.QDockWidget.DockWidgetMovable
| QtWidgets.QDockWidget.DockWidgetFloatable
| QtWidgets.QDockWidget.DockWidgetClosable
)
"""
#QDockWidget.DockWidgetClosable
#QDockWidget.DockWidgetMovable
#QDockWidget.DockWidgetFloatable
#QDockWidget.DockWidgetVerticalTitleBar
#QDockWidget.NoDockWidgetFeatures
if self.useOutput:
self.addDockWidget(
Qtc.Qt.BottomDockWidgetArea,
self.outputDockWidget,
)
self.outputTextEdit = Qtw.QTextEdit( )
self.outputTextEdit.setReadOnly( True )
self.outputBlankLabel = QtWidgets.QLabel( "-" )
self.outputLayout.addRow(self.outputTextEdit)
self.outputToggleButton = QtWidgets.QPushButton("Output: (Click Here To Hide)")
## left, top, right, bottom
self.outputLayout. setContentsMargins(5,0,5,10 )#addRow(self.outputBlankLabel)
if self.useOutputToggleButton:
self.outputDockWidget.setTitleBarWidget(
self.outputToggleButton
)
self.outputDockWidget.titleBarWidget().clicked.connect(
lambda: self.outputTextEditVisible('toggle'))
if not self.autoExpandOutput:
self.outputTextEditVisible(False)
def initWidgetRecipes(self):
pass
def initWidgetRecipesWrapper(self):
self.initWidgetRecipes()
self.makeWidgetsFromRecipes()
def initQapp(self):
## setup self.qapp and self.argv which are closely related
if self.qapp==None:
self.qapp = self.GetQapp( self.argv )
def initWidgetsPostWrapper(self):
cb = self.callbacks.get( 'initWidgetsPost', None )
if cb!=None: cb(self)
self.initWidgetsPost()
def initWidgetsPost(self):
'pass'
def initWidgetsPreWrapper(self):
cb = self.callbacks.get( 'initWidgetsPre', None )
if cb!=None: cb(self)
self.initWidgetsPre()
def initWidgetsPre(self):
'pass'
def initAdditionalWidgetsIntoDefaultWrapper(self):
cb = self.callbacks.get( 'initAdditionalWidgetsIntoDefault', None )
if cb!=None: cb(self)
self.initAdditionalWidgetsIntoDefault()
def initAdditionalWidgetsIntoDefault(self):
'pass'
def initMenu(self):
if self.useMenu==True:
self.menuEdit = self.menuBar().addMenu("&" + self.appTitleToBeShownInMenu )
self.menuEditExitAction = self.menuEdit.addAction("E&xit" )
self.menuEditExitAction.setShortcut(
PySide2.QtGui.QKeySequence("Ctrl+Q")
#PySide2.QtGui.QKeySequence.Quit doesn't work by default on windows,
#and since other platforms all seem to use ctrl-q, we'll just use it'
)
self.menuEditExitAction.triggered.connect(
self.exitWrapper
)
def initUpdateTimer(self):
if self.autoUpdateViaTimer==True:
## Create tmer based update loop
self.updateTimer = Qtc.QTimer()
self.updateTimer.timeout.connect( self.updateWrapper )
self.setNextTimerUpdate()
def addToWidgets( self, name, widget):
self.widgets[name]=widget
return widget
def addToLayouts( self, name, layout):
self.layouts[name]=layout
return layout
def appendToTextHeard(self, txt ):
self.textHeard.append(txt)
def createWidgetsInCentralLayout(self):
"""
Override this to fill self.layout with your
own widgets
"""
if self.autoCreateDefaultWidgetsInLayout==True:
self._createDefaultWidgetsInLayout()
def _createDefaultWidgetsInLayout(self):
if self.instructionsText!=None:
self.instructionsLabel = \
Qtw.QLabel(self.instructionsText)
self.instructionsLabel.setWordWrap(True)
self.mainLayout.addRow(self.instructionsLabel)
self.inputLabel = Qtw.QLabel("Input:")
self.inputLabel.setWordWrap(True)
self.inputTextEdit = Qtw.QTextEdit( )
if self.showInput==True:
self.mainLayout.addRow(self.inputLabel)
self.mainLayout.addRow(self.inputTextEdit)
## Prep text edit area for testing
self.inputTextEdit.selectAll()
self.inputTextEdit.setFocus()
self.initAdditionalWidgetsIntoDefaultWrapper()
"""
self.goButton = QtWidgets.QPushButton("Go")
self.goButton.clicked.connect( self.onGoButtonClickedWrapper )
self.goButton.setFocus()
self.mainLayout.addRow(self.goButton)
"""
"""
self.outputLabel = Qtw.QLabel("Output:")
self.outputLabel.setWordWrap(True)
self.outputLayout.addRow(self.outputLabel)
"""
"""
self.exitButton = Qtw.QPushButton("Exit")
self.exitButton.clicked.connect( self.exitWrapper )
self.mainLayout.addRow(self.exitButton)
"""
#self.button.setFocus()
'''
def onExitButtonClickedWrapper(self):
cb = self.callbacks.get( 'onExitButtonClicked', None )
if cb!=None:
cb()
self.onExitButtonClick()
'pass'
self.exit()
def onExitButtonClick(self):
'pass'
'''
def decorateFuncByAddingToWidgetRecipes( self, label,
connections={},
name=None,
kind='button',
layout=None,
useFuncNameInsteadOfLabelIfNoName=False,
):
#self = selfRef ## not needed if this is a method with self
def inner(func):
nonlocal self
nonlocal kind
nonlocal name
nonlocal connections
nonlocal layout
nonlocal useFuncNameInsteadOfLabelIfNoName
import types
recipe = WidgetRecipe( label, kind=kind, name=name,
connections=connections, func=func, layout=layout,
useFuncNameInsteadOfLabelIfNoName=useFuncNameInsteadOfLabelIfNoName,
)
method = recipe.func
setattr( self.__class__, recipe.name, method, ) #func,
methodBack = getattr( self.__class__, recipe.name )
recipe.func = methodBack
self.widgetRecipes[recipe.name] = recipe
#print( self.widgetRecipes )
#print( self.widgetRecipes )
return func
return inner
def exit(self):
"""
actual exit is
disabled by default for safety
override this to control exit behaviour,
will be called after onExitWrapper and onExit callback
to actually quit instead, you should call
use self.qapp.quit() for full uncoditional
"""
'pass'
def exitWrapper(self):
cb = self.callbacks.get( 'onExit', None )
if cb!=None: cb()
self.exit()
try:
## *** todo we could change app and win titles to say exit
self.statusBar().showMessage( "Exiting...", 0)
self.exitButton.setText('Exiting...')
self.exitButton.repaint()
except:
## it *really* doesn't matter if
## the above gives errors
'pass'
Easy.Sleep( self.exitSleepTime )
self.qapp.quit()
## the parent app will probably quit at this point if it has
## nothing else to do
def outputTextEditVisible( self, newState=None ):
if newState==True:
self.outputToggleButton.setText("Output: (Click Here To Hide)")
self.outputTextEdit.show()
elif newState==False:
self.outputToggleButton.setText("Show Output")
self.outputTextEdit.hide()
elif newState=='toggle':
self.outputTextEditVisible(
not self.outputTextEditVisible()
)
elif newState is None:
return self.outputTextEdit.isVisible()
def makeWidgetFromRecipe( self, recipe ):
#print('making button')
#print( f"recipe.name: {recipe.name}")
layout = recipe.layout
if layout is None:
layout = self.mainLayout
elif isinstance(layout,str):
layout=self.layouts[layout]
else:
""
#print( "layout will be used directly" )
if recipe.kind=='button':
widget = Qtw.QPushButton( recipe.label )
if recipe.kind=='label':
widget = Qtw.QLabel( recipe.label )
self.widgets[recipe.name]=widget
if hasattr( recipe, 'connections' ):
for signalName, slotRef in recipe.connections.items():
sig = getattr( widget, signalName)
if slotRef==True:
slot=( lambda:
getattr(self, recipe.name)()
)
else:
if isinstance( slotRef, str):
slot=( lambda:
getattr(self, slotRef)()
)
else:
slot=slotRef
sig.connect( slot )
layout.addRow(
widget
)
def makeWidgetsFromRecipes(self):
for recipeName, recipe in self.widgetRecipes.items():
self.makeWidgetFromRecipe( recipe )
def onGoButtonClicked(self):
#self.outputTextArea.insertPlainText("Go Button Clicked.\n")
#self.outputTextArea.ensureCursorVisible()
'pass'
def onGoButtonClickedWrapper(self):
cb = self.callbacks.get( 'onGoButtonClicked', None )
if cb!=None: cb(self)
self.onGoButtonClicked()
def print(self, *args, doGui=True, doStandard=True, **kwargs):
## *** todo store curpos, move to end for insert,
## then restore the cursor's position if it wasn't at end
sep = kwargs.get('sep', ' ')
end = kwargs.get('end', '\n')
if doGui:
for a in args:
self.outputTextEdit.insertPlainText( str(a) + str(sep) )
if len(end)>0:
self.outputTextEdit.insertPlainText( end )
self.outputTextEdit.ensureCursorVisible()
if doStandard:
print( *args, **kwargs )
self.qapp.processEvents()
def updateWrapper(self):
self.update()
if self.updateCallback!=None:
self.updateCallback(self)
self.setNextTimerUpdate()
def update(self):
'pass'
def setNextTimerUpdate(self):
self.updateTimer.start(
self.updateInterval ## milliseconds
)
@property
def statusbar(self):
return self.statusBar()
"""
class QtuiMeta( type ):
@property
def QappMeta(cls): ## depreciated
return cls.GetQapp()
"""
oldDecoratorInnerCode = r"""
#print( f"self is {self}" )
#print("decorating")
#print( f" func is {func} - {type(func)}" )
#print( f"self is {self}" )
#print("decorating")
#print( f" func is {func} - {type(func)}" )
#print( f"label is {label}" )
#if not hasattr( self, 'buttonsFromDecorations' ):
#setattr(self, 'buttonsFromDecorations', {} )
#self.buttonsFromDecorations[label]=connections
#print( f"func in decorateAsButton is: {func}" )
#got = obj.__get__(self, self.__class__)
#instance.bar
#### could add to instance instead of class
#import types
#method = types.MethodType( obj, self )
#setattr( self, obj.__name__, method, )
#print( getattr(self,funcLike.__name__))
#print( f"label is {label}" )
#if not hasattr( self, 'buttonsFromDecorations' ):
#setattr(self, 'buttonsFromDecorations', {} )
#self.buttonsFromDecorations[label]=connections
#print( f"func in decorateAsButton is: {func}" )
#got = obj.__get__(self, self.__class__)
#instance.bar
#### could add to instance instead of class
#import types
#method = types.MethodType( obj, self )
#setattr( self, obj.__name__, method, )
#print( getattr(self,funcLike.__name__))
#types.MethodType( func, None, self.__class__ ) ## None doesn't work
## add it to the class so it can be access as a bound method
#setattr( self.__class__, name, func, )
#self.__class__[]
"""
oldSlotRefStuff = r"""
#slotName = recipe.func
#print( slotName )
#print( dir(self) )
#slot = getattr( self, slotName )
#slot()
#elif callable(slotRef):
# slot=slotRef
"""
old02=r"""
#getattr(self, n.fn)
#n.s = getattr( n.w, 'clicked' ) #signal
#n.w.clicked.connect(
self.spacesToHyphensButton = Qtw.QPushButton('Convert Spaces To Hyphens')
n.s.connect(
self.onSpacesToHyphensButton )
'Convert Spaces To Hyphens'
fpr k,v in self.widgets.items():
self.mainLayout.addRow(self.spacesToUnderscoresButton)
self.mainLayout.addRow(self.spacesToHyphensButton)
"""
old03=r"""
#for i,k in enumerate( self.widgetRecipes ):
# print( k, " ", i, )
"""
```
#### File: joecceasy/joecceasy/ReferencesAndExamplesOfCode.py
```python
class ExampleClass01Meta(type):
## __setattr__ and __getattr__ can potentially cause issues
## such as recursions (they introduce serious complexity)
'''
def __setattr__(cls, key, val ):
if key=='P':
print( val )
#else:
# setattr( cls, key, val) ## recursion issues
'''
```
#### File: submodules/unipath/abstractpath.py
```python
import os
.from unipath.errors import UnsafePathError
__all__ = ["AbstractPath"]
# Use unicode strings if possible.
_base = str # Python 3 str (=unicode), or Python 2 bytes.
if os.path.supports_unicode_filenames:
try:
_base = unicode # Python 2 unicode.
except NameError:
pass
class AbstractPath(_base):
"""An object-oriented approach to os.path functions."""
pathlib = os.path
auto_norm = False
#### Special Python methods.
def __new__(class_, *args, **kw):
norm = kw.pop("norm", None)
if norm is None:
norm = class_.auto_norm
if kw:
kw_str = ", ".join(kw.iterkeys())
raise TypeError("unrecognized keyword args: %s" % kw_str)
newpath = class_._new_helper(args)
if isinstance(newpath, class_):
return newpath
if norm:
newpath = class_.pathlib.normpath(newpath)
# Can't call .norm() because the path isn't instantiated yet.
return _base.__new__(class_, newpath)
def __add__(self, more):
try:
resultStr = _base.__add__(self, more)
except TypeError: #Python bug
resultStr = NotImplemented
if resultStr is NotImplemented:
return resultStr
return self.__class__(resultStr)
@classmethod
def _new_helper(class_, args):
pathlib = class_.pathlib
# If no args, return "." or platform equivalent.
if not args:
return pathlib.curdir
# Avoid making duplicate instances of the same immutable path
if len(args) == 1 and isinstance(args[0], class_) and \
args[0].pathlib == pathlib:
return args[0]
try:
legal_arg_types = (class_, basestring, list, int, long)
except NameError: # Python 3 doesn't have basestring nor long
legal_arg_types = (class_, str, list, int)
args = list(args)
for i, arg in enumerate(args):
if not isinstance(arg, legal_arg_types):
m = "arguments must be str, unicode, list, int, long, or %s"
raise TypeError(m % class_.__name__)
try:
int_types = (int, long)
except NameError: # We are in Python 3
int_types = int
if isinstance(arg, int_types):
args[i] = str(arg)
elif isinstance(arg, class_) and arg.pathlib != pathlib:
arg = getattr(arg, components)() # Now a list.
if arg[0]:
reason = ("must use a relative path when converting "
"from '%s' platform to '%s': %s")
tup = arg.pathlib.__name__, pathlib.__name__, arg
raise ValueError(reason % tup)
# Fall through to convert list of components.
if isinstance(arg, list):
args[i] = pathlib.join(*arg)
return pathlib.join(*args)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, _base(self))
def norm(self):
return self.__class__(self.pathlib.normpath(self))
def expand_user(self):
return self.__class__(self.pathlib.expanduser(self))
def expand_vars(self):
return self.__class__(self.pathlib.expandvars(self))
def expand(self):
""" Clean up a filename by calling expandvars(),
expanduser(), and norm() on it.
This is commonly everything needed to clean up a filename
read from a configuration file, for example.
"""
newpath = self.pathlib.expanduser(self)
newpath = self.pathlib.expandvars(newpath)
newpath = self.pathlib.normpath(newpath)
return self.__class__(newpath)
#### Properies: parts of the path.
@property
def parent(self):
"""The path without the final component; akin to os.path.dirname().
Example: Path('/usr/lib/libpython.so').parent => Path('/usr/lib')
"""
return self.__class__(self.pathlib.dirname(self))
@property
def name(self):
"""The final component of the path.
Example: path('/usr/lib/libpython.so').name => Path('libpython.so')
"""
return self.__class__(self.pathlib.basename(self))
@property
def stem(self):
"""Same as path.name but with one file extension stripped off.
Example: path('/home/guido/python.tar.gz').stem => Path('python.tar')
"""
return self.__class__(self.pathlib.splitext(self.name)[0])
@property
def ext(self):
"""The file extension, for example '.py'."""
return self.__class__(self.pathlib.splitext(self)[1])
#### Methods to extract and add parts to the path.
def split_root(self):
"""Split a path into root and remainder. The root is always "/" for
posixpath, or a backslash-root, drive-root, or UNC-root for ntpath.
If the path begins with none of these, the root is returned as ""
and the remainder is the entire path.
"""
P = self.__class__
if hasattr(self.pathlib, "splitunc"):
root, rest = self.pathlib.splitunc(self)
if root:
if rest.startswith(self.pathlib.sep):
root += self.pathlib.sep
rest = rest[len(self.pathlib.sep):]
return P(root), P(rest)
# @@MO: Should test altsep too.
root, rest = self.pathlib.splitdrive(self)
if root:
if rest.startswith(self.pathlib.sep):
root += self.pathlib.sep
rest = rest[len(self.pathlib.sep):]
return P(root), P(rest)
# @@MO: Should test altsep too.
if self.startswith(self.pathlib.sep):
return P(self.pathlib.sep), P(rest[len(self.pathlib.sep):])
if self.pathlib.altsep and self.startswith(self.pathlib.altsep):
return P(self.pathlib.altsep), P(rest[len(self.pathlib.altsep):])
return P(""), self
def components(self):
# @@MO: Had to prevent "" components from being appended. I don't
# understand why Lindqvist didn't have this problem.
# Also, doesn't this fail to get the beginning components if there's
# a "." or ".." in the middle of the path?
root, loc = self.split_root()
components = []
while loc != self.pathlib.curdir and loc != self.pathlib.pardir:
prev = loc
loc, child = self.pathlib.split(prev)
#print "prev=%r, loc=%r, child=%r" % (prev, loc, child)
if loc == prev:
break
if child != "":
components.append(child)
if loc == "":
break
if loc != "":
components.append(loc)
components.reverse()
components.insert(0, root)
return [self.__class__(x) for x in components]
def ancestor(self, n):
p = self
for i in range(n):
p = p.parent
return p
def child(self, *children):
# @@MO: Compare against Glyph's method.
for child in children:
if self.pathlib.sep in child:
msg = "arg '%s' contains path separator '%s'"
tup = child, self.pathlib.sep
raise UnsafePathError(msg % tup)
if self.pathlib.altsep and self.pathlib.altsep in child:
msg = "arg '%s' contains alternate path separator '%s'"
tup = child, self.pathlib.altsep
raise UnsafePathError(msg % tup)
if child == self.pathlib.pardir:
msg = "arg '%s' is parent directory specifier '%s'"
tup = child, self.pathlib.pardir
raise UnsafePathError(msg % tup)
if child == self.pathlib.curdir:
msg = "arg '%s' is current directory specifier '%s'"
tup = child, self.pathlib.curdir
raise UnsafePathError(msg % tup)
newpath = self.pathlib.join(self, *children)
return self.__class__(newpath)
def norm_case(self):
return self.__class__(self.pathlib.normcase(self))
def isabsolute(self):
"""True if the path is absolute.
Note that we consider a Windows drive-relative path ("C:foo")
absolute even though ntpath.isabs() considers it relative.
"""
return bool(self.split_root()[0])
```
#### File: joecceasy/joecceasy/Tubprocess.py
```python
import collections, io, queue, subprocess, sys, time
from threading import Thread
class Duck():
"pass"
class Tubprocess():
OutErrTuple = collections.namedtuple('OutErrTuple',['out','err'])
@staticmethod
def moveFromQToList( q, l ):
while True:
try:
gotChar = q.get_nowait()
l.append( gotChar )
except queue.Empty:
break
#def __next__(self):
# raise StopIteration
def __init__(self,
cmdAndArgsAsIter=None, sleep=0.01,
shell=False, shellCmdJoin=True,
errToOut=False, autoPrint=False,
## old extra args: autoRun=False
):
self.cmdAndArgsAsIter = cmdAndArgsAsIter
self.sleep = sleep
self.shell=shell
self.shellCmdJoin=shellCmdJoin
self.errToOut = errToOut
self.autoPrint = autoPrint
self.qOut = queue.Queue()
self.qErr = queue.Queue()
self.outList = []
self.errList = []
self.errStrLast=''
self.outStrLast=''
self.outListLastLen=0
self.errListLastLen=0
self.threads = Duck()
self.reader = self.getReaderFunc()
self.iterObj=None
self.retVal=None
self.isRunInProgress=False
self.isIterInProgress=False
self.isAlreadyRan=False
#if autoRun==True:
# assert self.cmdAndArgsAsIter is not None
# self.run()
@property
def outStr(self):
outList=self.outList
if self.outListLastLen <= len(outList):
self.outStrLast = ''.join(outList)
return self.outStrLast
@property
def errStr(self):
errList=self.errList
if self.errListLastLen <= len(errList):
self.errStrLast = ''.join(errList)
return self.errStrLast
def next(self):
if self.iterObj==None:
self.iterObj=self.__iterRun()
return next( self.iterObj )
def __iter__(self):
## can only iterate once!
if self.iterObj==None:
self.iterObj=self.__iterRun()
for i in self.iterObj:
yield i
def getReaderFunc(self):
if hasattr( self, 'reader' ):
return self.reader
## this func should use only local state
## so it can be threaded
def reader(f,q):
#optTime=0.01
#sleepTime=0.3
#lastTime = time.time_ns()
doBreak = False
while True:
try:
gotChar=f.read(1)
except ValueError as err:
break
if gotChar:
q.put(gotChar)
else:
doBreak = True
if doBreak:
break
return reader
def makeThread(self):
"pass"
def joinProcAndThreads(self):
self.retVal = self.ps.wait()
threads=self.threads
threads.outReader.join()
threads.errReader.join()
self.isRunInProgress=False
def __runProcAndThreads(self):
assert self.isAlreadyRan==False
assert self.isRunInProgress == False
self.isAlreadyRan=True
self.isRunInProgress=True
self.retVal=None
#assert self.qOut.empty()
#assert self.qErr.empty()
if self.shell==False:
c = self.cmdAndArgsAsIter
else:
if self.shellCmdJoin==False:
c = self.cmdAndArgsAsIter
else:
c = " ".join( self.cmdAndArgsAsIter )
if self.errToOut:
errBind = subprocess.STDOUT
else:
errBind = subprocess.PIPE
ps = subprocess.Popen(
self.cmdAndArgsAsIter,
#stdin=subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = errBind,
shell=self.shell,
text=True,
universal_newlines=True,
)
self.ps = ps
reader = self.getReaderFunc()
tOut = Thread(target=reader, args=(ps.stdout, self.qOut) )
if self.errToOut==False:
tErr = Thread(target=reader, args=(ps.stderr, self.qErr) )
else:
with io.StringIO() as fakeErr:
tErr = Thread(target=reader, args=(fakeErr, self.qErr) )
self.threads.outReader = tOut
self.threads.errReader = tErr
## Actually launch them
for t in [tOut,tErr]:
t.daemon=True
t.start()
return self
def __iterRun(self):
self.isIterInProgress = True
self.__runProcAndThreads( )
gotOutChars = []
gotErrChars = []
threads = self.threads
threadOut = threads.outReader
threadErr = threads.outReader
qOut = self.qOut
qErr = self.qErr
doBreak = False
while True:
outIsAlive = threadOut.is_alive()
errIsAlive = threadErr.is_alive()
eitherIsAlive = outIsAlive or errIsAlive
if not eitherIsAlive:
if qOut.empty() and qErr.empty():
doBreak = True
self.moveFromQToList( qOut, gotOutChars )
self.moveFromQToList( qErr, gotErrChars )
returnStringOut=None
returnStringErr=None
doSleep=True
if len(gotOutChars) > 0 :
returnStringOut = ''.join(gotOutChars)
self.outList.append( returnStringOut )
if self.autoPrint:
print( returnStringOut, end='' )
gotOutChars.clear()
doSleep=False
if len(gotErrChars) > 0 :
returnStringErr = ''.join(gotErrChars)
self.errList.append( returnStringErr )
if self.autoPrint:
print( returnStringOut, end='' )
gotErrChars.clear()
doSleep=False
if doSleep:
time.sleep( 0.01 )
if doBreak == True:
break
else:
if returnStringOut is None:
returnStringOut=''
if returnStringErr is None:
returnStringErr=''
yield self.OutErrTuple(returnStringOut, returnStringErr)
## now out of while loop, yield final one if either remains
if returnStringOut is not None or returnStringErr is not None:
if returnStringOut is None:
returnStringOut=''
if returnStringErr is None:
returnStringErr=''
yield self.OutErrTuple(returnStringOut, returnStringErr)
self.isIterInProgress = False
## Cleanup end of iteration
self.joinProcAndThreads()
def wait(self):
for i in self:
"pass"
return self.retVal
def run(self):
return self.wait()
```
#### File: joecceasy/joecceasy/VarWatcher.py
```python
import os, sys, time
from . import Utils
from .Utils import classproperty
class VarWatcher:
@classproperty
def EasyPak(cls):
import sys
return __import__(__package__) ## *** fix this to be relative
@classproperty
def EasyMod(cls):
from . import Easy
return Easy ## *** fix this to be relative
@classproperty
def Easy(cls):
return cls.EasyMod.Easy
def __init__(self):
self.easyMod = self.EasyMod
self.easy = self..Easy
self.updateInterval = 1
self.varssDict={}
self.lastUpdateTime = time.time()
def add( self, var, action=None, actionArgs=(), actionKwargs={}, easyAction=None ):
if action==None and easyAction==None:
easyAction=print
entry = Utils.Object()
self.varsDict[var] = entry
entry.var = var
entry.actionArgs = actionArgs
entry.actionKwargs = actionKwargs
if easyAction==None:
entry.action = action
elif easyAction=='print':
import subprocess, sys
entry.action = (
lambda var:
Easy.PrintVar( var )
)
entry.actionArgs=()
entry.actionKwargs={}
self.updateEntry( entry )
return self
def update( self ):
for var, entry in self.filesDict.items():
newMtime = os.stat( filePath ).st_mtime
if True:
actionArgs = entry.actionArgs
actionKwargs = entry.actionKwargs
entry.action( *actionArgs, **actionKwargs )
#else:
# raise Exception( f'action: {action} = Not implemented Yet! '
#
# if newMtime != entry.lastMtime:
# self.updateEntry( entry )
def updateEntry( self, entry ):
entry.lastMtime = os.stat( entry.filePath ).st_mtime
action = entry.action
actionArgs = entry.actionArgs
actionKwargs = entry.actionKwargs
if callable( action ):
if entry.useFilePathAsFirstArg:
action( entry.filePath, *actionArgs, **actionKwargs )
else:
action( *actionArgs, **actionKwargs )
return self
def loop( self ):
while True:
#print( 'updating' )
self.update()
time.sleep( self.updateInterval )
return self
```
#### File: tests/standalone/aaal-standalone-tests-run.py
```python
import os, subprocess, sys, time
scriptAbsPath = os.path.abspath(__file__)
scriptDir = os.path.dirname(scriptAbsPath)
os.chdir(scriptDir)
exeAbsPath = sys.executable
pyExt='.py'
testPrefix='test__'
stdout, stderr = None,None
outputFh=None
#def msg(*args,**kwargs):
# print(*args,**kwargs)
def msg( argOutputFh, *args, **kwargs):
if not argOutputFh is None:
print( *args, file=argOutputFh, **kwargs )
argOutputFh.flush()
else:
print( *args, **kwargs )
if '--output-to-delme-file' in sys.argv:
doOutput=True
outputFilePath = "gitignore--test-results-deleteme.txt"
outputFh = open(outputFilePath, 'w').close()
outputFh = open(outputFilePath, 'a' )
stdout = outputFh
stderr = outputFh
#sys.stdout=
for steproot, dirs, files in os.walk('.'):
for file in files:
isTest = file.lower().startswith(testPrefix)
isPy = file.lower().endswith(pyExt)
if isPy and isTest:
msgAtStart = ( """############################\n"""
+"""#### Start test of python file: """
+ file + "\n"
)
msgAtEnd = (
"""#### End test of python file: """
+ file + "\n"
+"""############################\n"""
)
print( file )
msg(outputFh, msgAtStart, end='')
subprocess.run( [exeAbsPath, file], stdout=stdout, stderr=stderr )
msg(outputFh, msgAtEnd, end='' )
msg(outputFh, "\n\n\n", end='' )
if not outputFh is None:
outputFh.close()
``` |
{
"source": "joetainment/t33d",
"score": 3
} |
#### File: t33d/maya/t33d__maya_python__example__attribute_setter__level01.py
```python
import maya.cmds as cmds
#import pymel.all as pm
import math, random, traceback
## Short help msg shown at top of window
## It's common (standard) to have "constant" variables,
## that we don't intend to change while running
## all together at the top of the script in ALL CAPS
BANNER=""" You can use python expressions.
Use "a" for the old attribute value,
and i for the object index.
So to double the values, you could use: a * 2
To stack you could use: a + i
To randomize from 20 to 30 could use: random.uniform( 20.0, 30.0 )
"""
## This simple version will just use a bunch of global variables
## It's actually a bad way to do it, so a better version later
## will avoid so many global vars.
## This simple way with globals is easy to understand
## for beginner students thoug.
## This is the function that will actually
## create and show the UI window
def attributeSetterUiCreate():
## we have to tell python which variables should be global,
## made available outside this function, normally they should
## all be at the top, but to make it easier to follow,
## we'll actually declare them later when we make them
#global win
#global col
#global banner
#global attrNameLabel
#global attrNameField
#global attrValueLabel
#global attrValueField
#global setAttrButton
global win
win = 'T33d_AttributeSetterUi'
if win in cmds.lsUI(windows=True):
cmds.deleteUI( win )
win = cmds.window( win, title='T33d Attribute Setter' )
## store our win in our dict in case we want it elsewhere
print( "Created UI:", win )
## A ColumnLayout - This will stack UI widgets vertically
## adjustableColumn=True means the widgets can auto adjust
## their width to the window size
global col
col = cmds.columnLayout( adjustableColumn=True, parent=win )
## A banner msg we will show with some help
## cmds.text just creates a text label
global banner
banner = cmds.text(BANNER, align="left")
## Text label that says "Attribute to change:"
global attrNameLabel
attrNameLabel = cmds.text(
'Attribute to change:',
align="left",
parent=col
)
## Text entry field,
## a place where the user will type in the attribute to change
global attrNameField
attrNameField = cmds.textField( parent=col )
## Text label that says "New value for attribute:"
global attrValueLabel
attrValueLabel = cmds.text(
'New value for attribute:',
align="left",
parent=col
)
## Text entry field,
## a place where the user will type the new value
## to set the attribute to
global attrValueField
attrValueField = cmds.textField( parent=col )
## Finally, a button the user can click on to actually run the code!
## even tho it's not strictly necessary,
## it's usually a good idea to use lambda as below
## to specify what command to run
## otherwise you can't write parens for the func call
## or put args in the parens... but with lambda you can!
global setAttrButton
setAttrButton = cmds.button(
label="Set Attributes",
align="left",
parent=col,
command=lambda x: setAttributes( )
)
cmds.showWindow( win )
def setAttributes():
print( "run!" )
## remember to declare our globals from the other function!
global attrNameLabel
global attrNameField
global attrValueLabel
global attrValueField
## this function doesn't actually use the next few globals,
## so they don't have to be included
#global win
#global col
#global banner
#global btn
## we'll get a list of all selected objects
objs = cmds.ls(selection=True,flatten=True)
## Now we'll Loop through the list, and try setting each
## object's attributes as requested
for i, obj in enumerate(objs):
## We use a try block so that if one fails,
## we just keep going to the next
try:
## Get the attribute based on the attribute name in the UI
attrName = cmds.textField( attrNameField, q=True, text=True )
attrValueOld = cmds.getAttr( obj + '.' + attrName )
a = attrValueOld
## Get the value from the UI
attrValueNew = cmds.textField( attrValueField, q=True, text=True )
## eval it to convert convert the value from the UI
## into the target type, e.g. a floating point number
attrValueNew = eval( attrValueNew )
cmds.setAttr( obj + '.' + attrName, attrValueNew )
except:
print( traceback.format_exc() )
print( 'Failed for object: ' + obj )
if __name__=="__main__":
attributeSetterUiCreate()
``` |
{
"source": "joetannn/cs-321-project",
"score": 3
} |
#### File: joetannn/cs-321-project/api.py
```python
import hug
import base64
from SkillsComparer import SkillsComparer
@hug.response_middleware()
def process_data(request, response, resource):
response.set_header('Access-Control-Allow-Origin', '*')
@hug.get('/home')
def root():
return 'Welcome home!'
@hug.get('/add', examples='/add?first_num=6&second_num=8')
def do_add(first_num: hug.types.number, second_num: hug.types.number):
return {'Final Result:': first_num + second_num}
@hug.get('/send')
def receive_data(firstName: hug.types.text, lastName: hug.types.text, position: hug.types.text, link: hug.types.text, skills: hug.types.text, resume: hug.types.text):
#debug
values = {'firstname': firstName, 'lastName': lastName, 'position': position, 'link': link,
'resume': resume, 'skills': base64.b64decode(skills)}
skills_class = SkillsComparer()
api_firstName = firstName
api_lastName = lastName
api_position = position
api_link = link
#list of skills is array
api_skills = base64.b64decode(skills).decode('utf-8').split("|")
api_skills.pop(0)
#Strip duplicates
api_skills = list(set(api_skills))
if api_skills[0] == '':
api_skills.pop(0)
print("SKILLS FROM SKILLS LIST: " + str(api_skills))
# get all the resume stuff, remove duplicates
api_resume = resume.strip()
api_resume = list(set(api_resume.split()))
print("SKILLS FROM RESUME: " + str(api_resume))
api_resume = skills_class.returnTechTerms(api_resume)
print("SKILLS FROM RESUME IN SKILLS LIST: " + str(api_resume))
print("Extending resume to skills list")
api_skills.extend(api_resume)
print("FINAL SKILLS LIST: " + str(api_skills))
scrape_method = skills_class.scrape_link
scrape_method(api_link)
extra_skill_call = skills_class.getExtraJobSkills
extraJobSkills = extra_skill_call(api_skills)
print("SKILLS FROM Job NOT IN SKILLS_LIST: " + str(extraJobSkills))
extra_skill_list_call_call = skills_class.getExtraSkillsListSkills
extraSkillsListSkills = extra_skill_list_call_call(api_skills)
print("SKILLS FROM SKILLS_LIST NOT IN JOB: " + str(extraSkillsListSkills))
notInSkillsInJob = "|".join(list(set(extraJobSkills)))
if notInSkillsInJob != "" and "|" in notInSkillsInJob:
notInSkillsInJob = notInSkillsInJob[1:]
notInJobInSkills = "|".join(extraSkillsListSkills)
if notInJobInSkills != "" and "|" in notInJobInSkills:
notInJobInSkills = notInJobInSkills[1:]
inSkillsinJob = skills_class.getSimilarSkills(api_skills)
inSkillsinJob = "|".join(list(set(inSkillsinJob)))
if inSkillsinJob != "" and "|" in inSkillsinJob:
inSkillsinJob = inSkillsinJob[1:]
print("SKILLS IN JOB AND LIST:" + inSkillsinJob)
return {'1':inSkillsinJob,'2': notInSkillsInJob, '3': notInJobInSkills}
#return {'1':'abc|def','2':'yuuu2','3':'skillsboi|a|b'}
def buildCoverLetter():
pass
def getExtraSkills():
pass
print("Running API!")
```
#### File: joetannn/cs-321-project/scrape.py
```python
import requests
from bs4 import BeautifulSoup
class JobScraper:
def __init__(self, url):
self.url = url
# Scrapes this JobScraper's url and returns a list of all words
# that are not part of html tags
def scrape(self):
r = requests.get(self.url)
if(r.status_code != 200):
print('Unable to access site')
#soup = BeautifulSoup(r.text, 'html.parser')
soup = BeautifulSoup(r.text, 'html.parser')
plaintext = soup.encode("utf-8");
allWords = []
curWord = ""
curTag = ""
inWord = False
inTag = False
inScript = False
for s in plaintext:
char = chr(s)
if(char == '<'):
inTag = True
#print('Tag'+ str(curTag))
curTag = ""
elif(char == '>'):
if(inTag):
#if(curTag == "script"):
# inScript = True
#elif(curTag == "/script"):
# inScript = False
#print("Tag"+ str(curTag))
curTag = ""
inTag = False
elif(char == ' ' or char == '\n' or char == '\r' or char == '\t'):
if(inTag):
#if(curTag == "script"):
# inScript = True
#elif(curTag == "/script"):
# inScript = False
#print("Tag"+ str(curTag))
curTag = ""
if(inWord and (not inScript)):
exclude = set('!,.()?~|')
curWord = ''.join(char for char in curWord if char not in exclude)
#print("Word"+ str(curWord))
allWords.append(curWord)
curWord = ""
inWord = False
else:
if(not inTag and not inScript):
inWord = True
curWord = curWord + str(char)
else:
curTag = curTag + str(char)
#should remove duplicates
return allWords
#Arbitrary job posting for testing
#url = 'https://www.alarm.com/about/open-positions.aspx?gh_jid=4267530002'
#scraper = JobScraper(url)
#allWords = scraper.scrape()
#strip extras
#for word in allWords:
# if word == '/r' or word == '/n' or word == '':
# allWords.remove(word)
#
#for word in allWords:
# print("RAYTHEON POST:" + str(word))
# pass
``` |
{
"source": "JoeTao-097/Multi-REZ-Evalution-for-Breast-Ultrasound-Images",
"score": 2
} |
#### File: JoeTao-097/Multi-REZ-Evalution-for-Breast-Ultrasound-Images/Tools.py
```python
import pandas as pd
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
from skimage.transform import resize
import itertools
from sklearn.metrics import confusion_matrix,roc_auc_score, roc_curve, auc, precision_recall_curve, average_precision_score, f1_score
import seaborn as sns
import scipy
from scipy import stats
from sklearn.utils import resample
def zero_pad(img, size=448):
'''
pad zeros to make a square img for resize
'''
h, w, c = img.shape
if h > w:
zeros = np.zeros([h, h - w, c]).astype(np.uint8)
img_padded = np.hstack((img, zeros))
elif h < w:
zeros = np.zeros([w - h, w, c]).astype(np.uint8)
img_padded = np.vstack((img, zeros))
else:
img_padded = img
img_resized = (255 * resize(img_padded, (size, size), anti_aliasing=True)).astype(np.uint8)
return img_resized
def get_precision_recall(ax, y_true, y_pred, title, boostrap=5, plot=True):
def delta_confidence_interval(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * scipy.stats.t.ppf((1 + confidence) / 2., n - 1)
return h
ap_score = []
for i in range(boostrap):
pred_bt, y_bt = resample(y_pred, y_true)
ap_score.append(average_precision_score(y_bt, pred_bt))
AP = average_precision_score(y_true, y_pred)
precision, recall, thresholds = precision_recall_curve(y_true, y_pred)
if plot:
delta = delta_confidence_interval(ap_score)
sns.set_style('ticks')
# plt.figure()
ax.plot(recall, precision, color='red', lw=2,
label='AUC = {:.3f}, \n95% C.I. = [{:.3f}, {:.3f}]'.format(AP, AP - delta, AP + delta), alpha=.8)
ax.set_xlabel('Recall', fontsize=16, fontweight='bold')
ax.set_ylabel('Precision', fontsize=16, fontweight='bold')
ax.xaxis.set_tick_params(labelsize=16)
ax.yaxis.set_tick_params(labelsize=16)
ax.set_ylim(0, 1)
ax.set_xlim(0, 1)
ax.set_title(title, fontsize=16, fontweight='bold')
ax.legend(fontsize=12, loc='lower right')
ax.grid()
return thresholds
def get_auc(ax, y_true, y_score, title, plot=True):
fpr_keras, tpr_keras, thresholds_keras = roc_curve(y_true, y_score)
auc_keras = auc(fpr_keras, tpr_keras)
optimal_idx = np.argmax(tpr_keras - fpr_keras)
optimal_threshold = thresholds_keras[optimal_idx]
if plot:
ci = get_CI(y_true, y_score)
sns.set_style('ticks')
# plt.figure()
ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='orange', label='Chance', alpha=.8)
ax.plot(fpr_keras, tpr_keras, color='red', lw=2,
label='AUC = {:.3f}, \n95% C.I. = [{:.3f}, {:.3f}]'.format(auc_keras, ci[0], ci[1]), alpha=.8)
ax.set_xlabel('Specificity', fontsize=16, fontweight='bold')
ax.set_ylabel('Sensitivity', fontsize=16, fontweight='bold')
ax.xaxis.set_tick_params(labelsize=16)
ax.yaxis.set_tick_params(labelsize=16)
ax.set_ylim(0, 1)
ax.set_xlim(0, 1)
ax.set_title(title, fontsize=16, fontweight='bold')
ax.legend(fontsize=12, loc='lower right')
ax.grid()
return optimal_threshold
def get_CI(y_true, y_score, alpha=0.95):
auc, auc_cov = delong_roc_variance(y_true, y_score)
auc_std = np.sqrt(auc_cov)
lower_upper_q = np.abs(np.array([0, 1]) - (1 - alpha) / 2)
ci = stats.norm.ppf(lower_upper_q, loc=auc, scale=auc_std)
ci[ci > 1] = 1
print('AUC:', auc)
print('AUC COV:', auc_cov)
print('95% AUC CI:', ci)
return ci
def delong_roc_variance(ground_truth, predictions, sample_weight=None):
"""
Computes ROC AUC variance for a single set of predictions
Args:
ground_truth: np.array of 0 and 1
predictions: np.array of floats of the probability of being class 1
"""
order, label_1_count, ordered_sample_weight = compute_ground_truth_statistics(
ground_truth, sample_weight)
predictions_sorted_transposed = predictions[np.newaxis, order]
aucs, delongcov = fastDeLong(predictions_sorted_transposed, label_1_count, ordered_sample_weight)
assert len(aucs) == 1, "There is a bug in the code, please forward this to the developers"
return aucs[0], delongcov
def compute_ground_truth_statistics(ground_truth, sample_weight):
assert np.array_equal(np.unique(ground_truth), [0, 1])
order = (-ground_truth).argsort()
label_1_count = int(ground_truth.sum())
if sample_weight is None:
ordered_sample_weight = None
else:
ordered_sample_weight = sample_weight[order]
return order, label_1_count, ordered_sample_weight
def fastDeLong(predictions_sorted_transposed, label_1_count, sample_weight):
if sample_weight is None:
return fastDeLong_no_weights(predictions_sorted_transposed, label_1_count)
else:
return fastDeLong_weights(predictions_sorted_transposed, label_1_count, sample_weight)
def fastDeLong_weights(predictions_sorted_transposed, label_1_count, sample_weight):
"""
The fast version of DeLong's method for computing the covariance of
unadjusted AUC.
Args:
predictions_sorted_transposed: a 2D numpy.array[n_classifiers, n_examples]
sorted such as the examples with label "1" are first
Returns:
(AUC value, DeLong covariance)
Reference:
@article{sun2014fast,
title={Fast Implementation of DeLong's Algorithm for
Comparing the Areas Under Correlated Receiver Oerating Characteristic Curves},
author={<NAME> and <NAME>},
journal={IEEE Signal Processing Letters},
volume={21},
number={11},
pages={1389--1393},
year={2014},
publisher={IEEE}
}
"""
# Short variables are named as they are in the paper
m = label_1_count
n = predictions_sorted_transposed.shape[1] - m
positive_examples = predictions_sorted_transposed[:, :m]
negative_examples = predictions_sorted_transposed[:, m:]
k = predictions_sorted_transposed.shape[0]
tx = np.empty([k, m], dtype=np.float)
ty = np.empty([k, n], dtype=np.float)
tz = np.empty([k, m + n], dtype=np.float)
for r in range(k):
tx[r, :] = compute_midrank_weight(positive_examples[r, :], sample_weight[:m])
ty[r, :] = compute_midrank_weight(negative_examples[r, :], sample_weight[m:])
tz[r, :] = compute_midrank_weight(predictions_sorted_transposed[r, :], sample_weight)
total_positive_weights = sample_weight[:m].sum()
total_negative_weights = sample_weight[m:].sum()
pair_weights = np.dot(sample_weight[:m, np.newaxis], sample_weight[np.newaxis, m:])
total_pair_weights = pair_weights.sum()
aucs = (sample_weight[:m] * (tz[:, :m] - tx)).sum(axis=1) / total_pair_weights
v01 = (tz[:, :m] - tx[:, :]) / total_negative_weights
v10 = 1. - (tz[:, m:] - ty[:, :]) / total_positive_weights
sx = np.cov(v01)
sy = np.cov(v10)
delongcov = sx / m + sy / n
return aucs, delongcov
def fastDeLong_no_weights(predictions_sorted_transposed, label_1_count):
"""
The fast version of DeLong's method for computing the covariance of
unadjusted AUC.
Args:
predictions_sorted_transposed: a 2D numpy.array[n_classifiers, n_examples]
sorted such as the examples with label "1" are first
Returns:
(AUC value, DeLong covariance)
Reference:
@article{sun2014fast,
title={Fast Implementation of DeLong's Algorithm for
Comparing the Areas Under Correlated Receiver Oerating
Characteristic Curves},
author={<NAME> and <NAME>},
journal={IEEE Signal Processing Letters},
volume={21},
number={11},
pages={1389--1393},
year={2014},
publisher={IEEE}
}
"""
# Short variables are named as they are in the paper
m = label_1_count
n = predictions_sorted_transposed.shape[1] - m
positive_examples = predictions_sorted_transposed[:, :m]
negative_examples = predictions_sorted_transposed[:, m:]
k = predictions_sorted_transposed.shape[0]
tx = np.empty([k, m], dtype=np.float)
ty = np.empty([k, n], dtype=np.float)
tz = np.empty([k, m + n], dtype=np.float)
for r in range(k):
tx[r, :] = compute_midrank(positive_examples[r, :])
ty[r, :] = compute_midrank(negative_examples[r, :])
tz[r, :] = compute_midrank(predictions_sorted_transposed[r, :])
aucs = tz[:, :m].sum(axis=1) / m / n - float(m + 1.0) / 2.0 / n
v01 = (tz[:, :m] - tx[:, :]) / n
v10 = 1.0 - (tz[:, m:] - ty[:, :]) / m
sx = np.cov(v01)
sy = np.cov(v10)
delongcov = sx / m + sy / n
return aucs, delongcov
def calc_pvalue(aucs, sigma):
"""Computes log(10) of p-values.
Args:
aucs: 1D array of AUCs
sigma: AUC DeLong covariances
Returns:
log10(pvalue)
"""
l = np.array([[1, -1]])
z = np.abs(np.diff(aucs)) / np.sqrt(np.dot(np.dot(l, sigma), l.T))
return np.log10(2) + scipy.stats.norm.logsf(z, loc=0, scale=1) / np.log(10)
def compute_midrank(x):
"""Computes midranks.
Args:
x - a 1D numpy array
Returns:
array of midranks
"""
J = np.argsort(x)
Z = x[J]
N = len(x)
T = np.zeros(N, dtype=np.float)
i = 0
while i < N:
j = i
while j < N and Z[j] == Z[i]:
j += 1
T[i:j] = 0.5 * (i + j - 1)
i = j
T2 = np.empty(N, dtype=np.float)
# Note(kazeevn) +1 is due to Python using 0-based indexing
# instead of 1-based in the AUC formula in the paper
T2[J] = T + 1
return T2
def compute_midrank_weight(x, sample_weight):
"""Computes midranks.
Args:
x - a 1D numpy array
Returns:
array of midranks
"""
J = np.argsort(x)
Z = x[J]
cumulative_weight = np.cumsum(sample_weight[J])
N = len(x)
T = np.zeros(N, dtype=np.float)
i = 0
while i < N:
j = i
while j < N and Z[j] == Z[i]:
j += 1
T[i:j] = cumulative_weight[i:j].mean()
i = j
T2 = np.empty(N, dtype=np.float)
T2[J] = T
return T2
def plot_confusion_matrix(ax, cm, target_names, title='Confusion matrix', cmap=None, normalize=True, fontsize=16):
"""
given a sklearn confusion matrix (cm), make a nice plot
Arguments
---------
cm: confusion matrix from sklearn.metrics.confusion_matrix
target_names: given classification classes such as [0, 1, 2]
the class names, for example: ['high', 'medium', 'low']
title: the text to display at the top of the matrix
cmap: the gradient of the values displayed from matplotlib.pyplot.cm
see http://matplotlib.org/examples/color/colormaps_reference.html
plt.get_cmap('jet') or plt.cm.Blues
normalize: If False, plot the raw numbers
If True, plot the proportions
Citiation
---------
http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
"""
accuracy = np.trace(cm) / float(np.sum(cm))
misclass = 1 - accuracy
if cmap is None:
cmap = plt.get_cmap('Blues')
if target_names is not None:
tick_marks = np.arange(len(target_names))
ax.set_xticks(tick_marks)
ax.set_yticks(tick_marks)
ax.set_xticklabels(target_names)
ax.set_yticklabels(target_names, rotation=90)
ax.xaxis.set_tick_params(labelsize=fontsize)
ax.yaxis.set_tick_params(labelsize=fontsize)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.set_title(title, fontsize=fontsize, fontweight='bold')
ax.tick_params(labelsize=fontsize - 3)
thresh = cm.max() / 1.5 if normalize else cm.max() / 2
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
if normalize:
ax.text(j, i, "{:0.2f}".format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black", fontsize=fontsize)
else:
ax.text(j, i, "{:,}".format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black", fontsize=fontsize)
ax.set_ylabel('True label', fontsize=fontsize, fontweight='bold')
ax.set_xlabel('Predicted label\naccuracy={:0.2f}; misclass={:0.2f}'.format(accuracy, misclass), fontsize=fontsize,
fontweight='bold')
``` |
{
"source": "joe-taylor/bcgov-python-presentations",
"score": 3
} |
#### File: bcgov-python-presentations/20200318_richardson_unsupervised_classification/kgc.py
```python
from util import *
'''variables for clustering'''
data = None # 1. Input data a) points
idx = None # b) range(0, len(data))
knn_k = None # 2. Intermediate data: a) number of k-nearest neighbours
dmat = None # b) list of rows of sorted dist. matrix
rho = None # 3. Model data a) density estimate
label = None # 4. Class data a) class label
next_label = None # b) next avail. class label
class_label = None # c) truth data label if available!
# command line parameters
input_file = None
print("len(args)", len(args), "args", str(args))
if len(args) > 1:
input_file = args[1]
print("input_file", input_file)
else:
err("Error:\n\tusage: python3 kgc.py [input file name] [knn-k (optional)]")
if len(args) > 2:
knn_k = int(args[2])
print("knn_k", knn_k)
# other parameters
scale_data = len(args) > 3 # add an optional parameter to turn on [0, 1] scaling
# calculate one sorted row of the distance matrix
def dmat_row(i):
global data, idx
row, pi, n_d = [], data[i], len(data[i])
for j in idx:
d, pj = 0., data[j] # d(data[i], data[j]), data[j]
for k in range(0, n_d):
d += math.pow(pi[k] - pj[k], 2)
row.append([d, j])
# print(row)
row.sort(key=lambda x: x[0]) # sort on dist: increasing
# print(row)
return row # output: sorted dmat row for data[i]
def rho(): # density estimation
global data, idx, knn_k, dmat, rho
rho = []
for i in idx: # for each point
row, r = dmat[i], 0. # distance matrix row, dens. est. for a point
for j in range(0, knn_k):
r += row[j][0] # add dist. to j-th nearest neighbour
try: rho.append(1. / r)
except Exception: pass
def climb(i, climb_from = None):
print("climb i=", i, "from", climb_from)
global rho, knn_k, dmat, label, next_label
if label[i] >= 0:
return label[i] # base case: data[i] already done
else:
dmat_row = dmat[i]
rho_knn = [[rho[j], j] # [density estimate, data idx j]
for j in [dmat_row[k][1] # for j'th k-nearest neighbour
for k in range(1, knn_k)]] # over k-nearest neighbours
rho_knn.sort(key=lambda x: -x[0]) # sort by density
if rho[i] > rho_knn[0][0]: # at hilltop? return new label
print(" new label", next_label)
label[i] = next_label
next_label += 1 # create new label
else: # not at hilltop? climb up
label[i] = climb(rho_knn[0][1], i)
return label[i] # return label
def cluster(input_file):
global data, idx, knn_k, dmat, rho, label, next_label
idx = range(0, len(data)) # data indices
label, next_label = [-1 for i in idx], 0 # first label will be 0
dmat, pkl_f = None, input_file + '_dmat.p'
if os.path.exists(pkl_f):
print("restoring dmat from pickle file..")
dmat = pickle.load(open(pkl_f, 'rb'))
else:
print("1. sorted distance matrix..") # memoize?
dmat = parfor(dmat_row, idx)
pickle.dump(dmat, open(pkl_f, 'wb'))
print("2. density estimation..")
rho() # density estimation
print("rho", rho)
print("3. recursive hillclimbing..")
for i in idx:
climb(i)
from read_csv import read_csv, write_output
data, class_label = read_csv(input_file)
if scale_data:
# scale data to [0, 1]
min_x, max_x = copy.deepcopy(data[0]), copy.deepcopy(data[0])
for p in data:
for k in range(0, len(data[0])):
min_x[k] = p[k] if p[k] < min_x[k] else min_x[k]
max_x[k] = p[k] if p[k] > max_x[k] else max_x[k]
for i in range(0, len(data)):
for k in range(0, len(data[0])):
data[i][k] -= min_x[k]
denom = (max_x[k] - min_x[k])
if denom != 0.:
data[i][k] /= denom
print("points", data) # print out points just in case
# if not specified, set knn_k to be equal to
if knn_k is None:
knn_k = math.ceil(math.sqrt(len(data)))
print("knn_k", knn_k)
cluster(input_file)
print('label', label)
print('class_label', class_label)
print('n_labels', next_label)
from evaluate import class_match, accuracy, consistency
mapping = None
if class_label is not None:
mapping, count = class_match(label, class_label)
print(mapping)
print(accuracy(label, class_label, mapping))
c = consistency(label, class_label, mapping, count)
table_filename = input_file + '_table.html'
open(table_filename, "wb").write(c.encode())
a = os.system('firefox ' + table_filename)
# can still write output labels, even if no "reference" class data
output_file = input_file + '_output.csv'
print(rho)
write_output(input_file, output_file, label, mapping, rho)
# maybe do later?
# put field names on the three axes (in the 3d plot)
# convert binary class maps (colored with values) to PNG
``` |
{
"source": "JoeTester1965/AsteriskTheTelemarketers",
"score": 3
} |
#### File: JoeTester1965/AsteriskTheTelemarketers/AsteriskTheSpammers.py
```python
from google.cloud import texttospeech
from google.cloud import speech
from google.oauth2 import service_account
import grpc
import io
import sys
import numpy
import platform
import os
import time
from datetime import datetime
import re
from random import randint
import socket
import configparser
import subprocess
import logging
file_sequence=1
if len(sys.argv) != 2:
print("I need config file location as first paramater in command line")
sys.exit(1)
test = platform.system()
on_debug_platform = platform.system().startswith('Windows')
if on_debug_platform:
import pyaudio
audio = pyaudio.PyAudio()
my_config_label = "AsteriskTheSpammers"
config = configparser.ConfigParser()
config.read(sys.argv[1])
my_incoming_audio_match = config[my_config_label]["my_incoming_audio_match"]
my_credentials_file_path = config[my_config_label]["my_credentials_file_path"]
my_logfile = config[my_config_label]["my_logfile"]
my_outgoing_audio_transcription_file = config[my_config_label]["my_outgoing_audio_transcription_file"]
my_audio_out_directory = config[my_config_label]["my_audio_out_directory"]
hello_file = config[my_config_label]["hello_file"]
areyoustillthere_file = config[my_config_label]["areyoustillthere_file"]
audio_average_absolute_power_threshold_int16 = int(config[my_config_label]["audio_average_absolute_power_threshold_int16"])
waittheyspeak_timeout_bytes = int(config[my_config_label]["waittheyspeak_timeout_bytes"])
files_in_file_sequence = int(config[my_config_label]["files_in_file_sequence"])
theystopppedspeaking_timeout_bytes = int(config[my_config_label]["theystopppedspeaking_timeout_bytes"])
audio_read_granularity = int(config[my_config_label]["audio_read_granularity"])
context_leeway_bytes = int(config[my_config_label]["context_leeway_bytes"])
timeout_seconds_no_data_read_from_file = float(config[my_config_label]["timeout_seconds_no_data_read_from_file"])
cloud_processing_audio_file_size_limit = int(config[my_config_label]["cloud_processing_audio_file_size_limit"])
host_address = config[my_config_label]["host_address"]
port_number = int(config[my_config_label]["port_number"])
min_valid_buffer_size = int(config[my_config_label]["min_valid_buffer_size"])
logging.basicConfig( handlers=[
logging.FileHandler(my_logfile),
logging.StreamHandler()],
format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d:%H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
def lock_onto_incoming_audio_file():
if on_debug_platform:
lockon_filename = my_incoming_audio_match
else:
command_line = "ls -t " + my_incoming_audio_match + " | head -1 > tmp"
os.system(command_line)
lockon_filename = open('tmp', 'r').read().strip()
return lockon_filename
def transcribe_audio(speech_file):
retval = ""
try:
transcription_client = speech.SpeechClient.from_service_account_file(my_credentials_file_path)
except:
logger.info("Not using Google speech to text as credz file not at : " + my_credentials_file_path)
return ""
with io.open(speech_file, "rb") as audio_file:
content = audio_file.read()
audio = speech.RecognitionAudio(content=content)
config = speech.RecognitionConfig(
encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16,
sample_rate_hertz=8000,
language_code="en-GB",
)
response = transcription_client.recognize(config=config, audio=audio)
for result in response.results:
retval = result.alternatives[0].transcript
return retval
def test_is_audio_activity(buffer, threshold):
retval = False
if len(buffer) > min_valid_buffer_size:
audio_bytes = numpy.frombuffer(buffer, dtype=numpy.int16)
average_absolute_power = numpy.sum(numpy.absolute(audio_bytes)) / audio_bytes.size
logger.debug("average_absolute_power : " + str(average_absolute_power))
if (average_absolute_power > threshold):
retval = True
return retval
def process_WaitTheySpeak():
global audio_average_absolute_power_threshold_int16
global waittheyspeak_timeout_bytes
global file_sequence
global files_in_file_sequence
global theystopppedspeaking_timeout_bytes
global audio_read_granularity
global context_leeway_bytes
incoming_audio_file = lock_onto_incoming_audio_file()
total_bytes_processed = 0
if incoming_audio_file == "microphone":
mic_stream = audio.open(
format = pyaudio.paInt16,
rate = 8000,
channels = 1,
input_device_index = 1,
input = True,
frames_per_buffer=audio_read_granularity)
microphone_file_handle = open("microphone", 'wb')
else:
audio_source = open(incoming_audio_file, 'rb')
audio_source.seek(0, os.SEEK_END)
activity_heard = False
audio_first_noisy_marker = -1
total_bytes_processed = 0;
timer_start = time.time()
logger.debug("timer_start set to %s", repr(timer_start))
while activity_heard == False:
while True:
try:
if incoming_audio_file == "microphone":
buffer = mic_stream.read(audio_read_granularity)
microphone_file_handle.write(buffer)
microphone_file_handle.flush()
else:
buffer = audio_source.read(audio_read_granularity)
if buffer:
timer_start = time.time()
logger.debug("%d bytes returned from file read at %s", len(buffer), repr(timer_start))
break
else:
timer_end = time.time()
timer_delta = timer_end - timer_start
if timer_delta > timeout_seconds_no_data_read_from_file:
logger.info("Call state transitioned to NewCall : No bytes returned from read within timeout - assume handle dead")
return("ENDCALL")
except OSError as error:
logger.error("Call state transitioned to NewCall : os.read error %d", error)
return("ENDCALL")
total_bytes_processed = total_bytes_processed + len(buffer)
activity_heard = test_is_audio_activity(buffer, audio_average_absolute_power_threshold_int16)
if total_bytes_processed > waittheyspeak_timeout_bytes:
retval="FILE:" + areyoustillthere_file
total_bytes_processed = 0
return(retval)
if activity_heard:
if audio_first_noisy_marker == -1:
audio_first_noisy_marker = os.stat(incoming_audio_file).st_size
logger.info("*** You started speaking ***")
audio_last_noisy_marker = audio_first_noisy_marker
total_bytes_processed = 0
bytes_activity_heard = 0
bytes_activity_not_heard = 0
timer_start = time.time()
logger.debug("timer_start set to %s", repr(timer_start))
while activity_heard == True:
while True:
try:
if incoming_audio_file == "microphone":
buffer = mic_stream.read(audio_read_granularity)
microphone_file_handle.write(buffer)
microphone_file_handle.flush()
else:
buffer = audio_source.read(audio_read_granularity)
if buffer:
timer_start = time.time()
logger.debug("%d bytes returned from file read at %s", len(buffer), repr(timer_start))
break
else:
timer_end = time.time()
timer_delta = timer_end - timer_start
if timer_delta > timeout_seconds_no_data_read_from_file:
logger.info("Call state transitioned to NewCall : No bytes returned from read within timeout- assume handle dead")
return("ENDCALL")
except OSError as error:
logger.error("Call state transitioned to NewCall : os.read error %d", error)
return("ENDCALL")
total_bytes_processed = total_bytes_processed + len(buffer)
activity_heard = test_is_audio_activity(buffer, audio_average_absolute_power_threshold_int16)
if activity_heard:
bytes_activity_heard = total_bytes_processed
else:
bytes_activity_not_heard = total_bytes_processed
if (bytes_activity_not_heard - bytes_activity_heard) < theystopppedspeaking_timeout_bytes:
activity_heard = True
else:
audio_last_noisy_marker = os.stat(incoming_audio_file).st_size
logger.info("*** You stopped speaking ***")
activity_heard = False
speaking_bytes_to_process = audio_last_noisy_marker - audio_first_noisy_marker
# Put in sensible limits for cloud processing
if speaking_bytes_to_process > cloud_processing_audio_file_size_limit:
speaking_bytes_to_process = cloud_processing_audio_file_size_limit
bytes_in_file = os.stat(incoming_audio_file).st_size
available_context_leeway = bytes_in_file - speaking_bytes_to_process;
if available_context_leeway > context_leeway_bytes:
available_context_leeway = context_leeway_bytes
rewind_bytes = bytes_in_file - audio_first_noisy_marker + available_context_leeway;
speaking_bytes_to_process = speaking_bytes_to_process + available_context_leeway;
if incoming_audio_file == "microphone":
microphone_file_handle.close()
temp_file = create_temp_audio_file(incoming_audio_file, rewind_bytes, speaking_bytes_to_process + audio_read_granularity)
transcription_text = transcribe_audio(temp_file)
retval = ""
logger.info("*** Transcription text is : " + transcription_text + " ***")
if transcription_text:
retval = do_stuff_based_on_transcription(transcription_text)
if not retval:
retval="FILE:" + str(file_sequence)
file_sequence = file_sequence + 1
if file_sequence > files_in_file_sequence:
file_sequence = 1
return(retval)
def create_temp_audio_file(incoming_audio_file, offset_from_eof, bytes_to_process):
if (bytes_to_process % 2) != 0:
bytes_to_process = bytes_to_process + 1
start_of_header =b"\x52\x49\x46\x46\x00\x00\x00\x00\x57\x41\x56\x45\x66\x6D\x74\x20\x10\x00\x00\x00\x01\x00\x01\x00\x40\x1F\x00\x00\x80\x3E\x00\x00\x02\x00\x10\x00\x64\x61\x74\x61"
end_of_header = (bytes_to_process).to_bytes(4,'little')
wav_header = start_of_header + end_of_header
global my_outgoing_audio_transcription_file
f1 = open(incoming_audio_file,'rb')
f2 = open(my_outgoing_audio_transcription_file,'wb')
f1.seek(-1 * offset_from_eof, os.SEEK_END)
f2.write(wav_header)
buf=f1.read(bytes_to_process)
f2.write(buf)
f1.close()
f2.flush()
f2.close()
return my_outgoing_audio_transcription_file
def do_stuff_based_on_transcription(transcription_text):
DTMFFiles = {
"zero" : "FILE:DTMF/0",
"one" : "FILE:DTMF/1",
"two" : "FILE:DTMF/2",
"three" : "FILE:DTMF/3",
"four" : "FILE:DTMF/4",
"five" : "FILE:DTMF/5",
"six" : "FILE:DTMF/6",
"seven" : "FILE:DTMF/7",
"eight" : "FILE:DTMF/8",
"nine" : "FILE:DTMF/9",
"0" : "FILE:DTMF/0",
"1" : "FILE:DTMF/1",
"2" : "FILE:DTMF/2",
"3" : "FILE:DTMF/3",
"4" : "FILE:DTMF/4",
"5" : "FILE:DTMF/5",
"6" : "FILE:DTMF/6",
"7" : "FILE:DTMF/7",
"8" : "FILE:DTMF/8",
"9" : "FILE:DTMF/9",
"star" : "FILE:DTMF/10",
"hash" : "FILE:DTMF/11",
"pound" : "FILE:DTMF/11"
}
if transcription_text.find("press") > -1:
for key, value in DTMFFiles.items():
if transcription_text.find(key) > -1:
retval = value
return(retval)
elif any(re.findall(r'stupid|whatever', transcription_text, re.IGNORECASE)):
audo_out_file = "echo"
if transcription_text:
write_transcription_audioFile("Did you really say " + transcription_text + "?", audo_out_file)
retval = "FILE:echo"
return(retval)
elif randint(1,6) == 1: # roll a dice
audo_out_file = "echo"
if transcription_text:
write_transcription_audioFile("Did you say " + transcription_text + "?", audo_out_file)
retval = "FILE:echo"
return(retval)
return
def write_transcription_audioFile(text, filename):
filename = my_audio_out_directory + filename
client = texttospeech.TextToSpeechClient.from_service_account_file(my_credentials_file_path)
synthesis_input = texttospeech.SynthesisInput(text=text)
voice = texttospeech.VoiceSelectionParams(language_code="en-GB", name="en-GB-Wavenet-B", ssml_gender=texttospeech.SsmlVoiceGender.NEUTRAL)
audio_config = texttospeech.AudioConfig(audio_encoding=texttospeech.AudioEncoding.LINEAR16, speaking_rate=0.89, pitch=2.4, sample_rate_hertz=8000)
response = client.synthesize_speech(input=synthesis_input, voice=voice, audio_config=audio_config)
wav_filename=filename+".wav"
with open(wav_filename, "wb") as out:
out.write(response.audio_content)
sln_filename=filename+".sln"
with open(sln_filename, "wb") as out:
audio = bytearray(response.audio_content)
audio_without_wav_header = audio[44:]
out.write(audio_without_wav_header)
def create_static_conversation_files():
write_transcription_audioFile("Hello, this is Kenny speaking!","hello")
write_transcription_audioFile("Sorry. I cannot hear you. Are you still there?" ,"areyoustillthere")
write_transcription_audioFile("My, that sounds most interesting.","1")
write_transcription_audioFile("Someone did call about the same thing last week, was that you?","2")
write_transcription_audioFile("Excuse me but what did you say your name was again?","3")
write_transcription_audioFile("It's funny that you should call about this, my neighbour mentioned that yesterday.","4")
write_transcription_audioFile("Oh boy! I never knew that was possible." ,"5")
write_transcription_audioFile("That does need some consideration." ,"6")
write_transcription_audioFile("Could you say that again, please?" ,"7")
write_transcription_audioFile("Oh! I see! That sounds fine.","8")
write_transcription_audioFile("Sorry which company did you say that you were calling from?","9")
write_transcription_audioFile("The last time someone called up and spoke to me about that, something came on the telly and I had to hang up.","10")
write_transcription_audioFile("Since you put it that way, please do carry on.", "11")
write_transcription_audioFile("Well, what with coronavirus and all that, we all need more patience and understanding.", "12")
write_transcription_audioFile("That does sound great, what needs to happen next?" ,"13")
write_transcription_audioFile("Sorry I am a bit confused can you say that again?","14")
#create_static_conversation_files() # remake static files if neccecary
logger.info("AsteriskTheSpammers started")
HOST = host_address
PORT = port_number
if on_debug_platform:
subprocess.Popen('"C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python37_86\python.exe" stub-client.py')
while True:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((HOST, PORT))
s.listen()
conn, addr = s.accept()
with conn:
while True:
data = conn.recv(1024)
if not data:
break
agi_message = data.decode('ascii')
if agi_message:
logger.info("AsteriskTheSpammers called with : " + agi_message)
if agi_message.find("NewCall") > -1:
file_sequence=1
retval = "FILE:" + hello_file
logger.info("Message for AGI is: " + retval)
conn.sendall(retval.encode('ascii'))
if agi_message.find("MeStopSpeak") > -1:
retval = process_WaitTheySpeak()
logger.info("Message for AGI is: " + retval)
conn.sendall(retval.encode('ascii'))
``` |
{
"source": "JoeTheVoices/xarray-sentinel",
"score": 2
} |
#### File: xarray-sentinel/tests/test_20_sentinel1.py
```python
import pathlib
import numpy as np
import pytest
import xarray as xr
from xarray_sentinel import esa_safe, sentinel1
DATA_FOLDER = pathlib.Path(__file__).parent / "data"
SLC_IW = (
DATA_FOLDER
/ "S1B_IW_SLC__1SDV_20210401T052622_20210401T052650_026269_032297_EFA4.SAFE"
)
SLC_IW1_VV_annotation = (
SLC_IW
/ "annotation"
/ "s1b-iw1-slc-vv-20210401t052624-20210401t052649-026269-032297-004.xml"
)
SLC_IW1_VV_calibration = (
SLC_IW
/ "annotation"
/ "calibration"
/ "calibration-s1b-iw1-slc-vv-20210401t052624-20210401t052649-026269-032297-004.xml"
)
SLC_IW1_VV_noise = (
SLC_IW
/ "annotation"
/ "calibration"
/ "noise-s1b-iw1-slc-vv-20210401t052624-20210401t052649-026269-032297-004.xml"
)
SLC_IW1_VV_measurement = (
SLC_IW
/ "measurement"
/ "s1b-iw1-slc-vv-20210401t052624-20210401t052649-026269-032297-004.tiff"
)
SLC_S3 = (
DATA_FOLDER
/ "S1A_S3_SLC__1SDV_20210401T152855_20210401T152914_037258_04638E_6001.SAFE"
)
SLC_S3_VV_annotation = (
SLC_S3
/ "annotation"
/ "s1a-s3-slc-vv-20210401t152855-20210401t152914-037258-04638e-002.xml"
)
SLC_S3_VV_measurement = (
SLC_S3
/ "measurement"
/ "s1a-s3-slc-vv-20210401t152855-20210401t152914-037258-04638e-002.tiff"
)
GRD_IW_VV_annotation = (
DATA_FOLDER
/ "S1B_IW_GRDH_1SDV_20210401T052623_20210401T052648_026269_032297_ECC8.SAFE"
/ "annotation"
/ "s1b-iw-grd-vv-20210401t052623-20210401t052648-026269-032297-001.xml"
)
def test_get_fs_path() -> None:
fs, path = sentinel1.get_fs_path(str(SLC_IW))
assert path == str(SLC_IW.absolute())
fs, path = sentinel1.get_fs_path(SLC_IW, fs)
assert path == str(SLC_IW)
with pytest.raises(ValueError):
sentinel1.get_fs_path("non-existent-path/*")
with pytest.raises(ValueError):
sentinel1.get_fs_path(DATA_FOLDER / "*")
def test_normalise_group() -> None:
assert sentinel1.normalise_group(None) == ("", None)
assert sentinel1.normalise_group("/") == ("", None)
assert sentinel1.normalise_group("IW1") == ("IW1", None)
assert sentinel1.normalise_group("/IW1") == ("IW1", None)
assert sentinel1.normalise_group("/IW1/VH/0") == ("IW1/VH", 0)
assert sentinel1.normalise_group("/IW1/VH/orbit") == ("IW1/VH/orbit", None)
def test_open_calibration_dataset() -> None:
res = sentinel1.open_calibration_dataset(SLC_IW1_VV_calibration)
assert isinstance(res, xr.Dataset)
assert set(res.coords) == {"line", "pixel"}
def test_open_noise_range_dataset() -> None:
res = sentinel1.open_noise_range_dataset(SLC_IW1_VV_noise)
assert isinstance(res, xr.Dataset)
assert set(res.coords) == {"line", "pixel"}
def test_open_noise_azimuth_dataset() -> None:
res = sentinel1.open_noise_azimuth_dataset(SLC_IW1_VV_noise)
assert isinstance(res, xr.Dataset)
assert set(res.coords) == {"line"}
def test_open_coordinate_conversion_dataset() -> None:
res = sentinel1.open_coordinate_conversion_dataset(GRD_IW_VV_annotation)
assert isinstance(res, xr.Dataset)
assert set(res.coords) == {"azimuth_time", "degree"}
def test_open_gcp_dataset() -> None:
res = sentinel1.open_gcp_dataset(SLC_IW1_VV_annotation)
assert isinstance(res, xr.Dataset)
assert set(res.coords) == {"line", "pixel", "azimuth_time", "slant_range_time"}
def test_open_attitude_dataset() -> None:
res = sentinel1.open_attitude_dataset(SLC_IW1_VV_annotation)
assert isinstance(res, xr.Dataset)
assert set(res.coords) == {"azimuth_time"}
def test_open_orbit_dataset() -> None:
res = sentinel1.open_orbit_dataset(SLC_IW1_VV_annotation)
assert isinstance(res, xr.Dataset)
assert set(res.coords) == {"axis", "azimuth_time"}
def test_open_dc_estimate_dataset() -> None:
res = sentinel1.open_dc_estimate_dataset(SLC_IW1_VV_annotation)
assert isinstance(res, xr.Dataset)
assert set(res.coords) == {"degree", "azimuth_time"}
def test_open_azimuth_fm_rate_dataset() -> None:
res = sentinel1.open_azimuth_fm_rate_dataset(SLC_IW1_VV_annotation)
assert isinstance(res, xr.Dataset)
assert set(res.coords) == {"degree", "azimuth_time"}
def test_open_pol_dataset_iw() -> None:
res = sentinel1.open_pol_dataset(SLC_IW1_VV_measurement, SLC_IW1_VV_annotation)
assert isinstance(res, xr.Dataset)
assert set(res.dims) == {"line", "pixel"}
assert set(res.coords) == {"slant_range_time", "azimuth_time", "line", "pixel"}
@pytest.mark.xfail
def test_open_pol_dataset_sm() -> None:
res = sentinel1.open_pol_dataset(SLC_S3_VV_measurement, SLC_S3_VV_annotation)
assert isinstance(res, xr.Dataset)
assert set(res.dims) == {"slant_range_time", "azimuth_time"}
assert set(res.coords) == {"slant_range_time", "azimuth_time", "line", "pixel"}
def test_build_burst_id() -> None:
lat = 11.8475875
lon = 47.16626783
relative_orbit = 168
burst_id = sentinel1.build_burst_id(lat=lat, lon=lon, relative_orbit=relative_orbit)
assert burst_id == "R168-N118-E0472"
def test_find_avalable_groups() -> None:
_, product_files = esa_safe.parse_manifest_sentinel1(SLC_S3 / "manifest.safe")
expected_groups = {
"S3",
"S3/VV",
"S3/VV/attitude",
"S3/VV/gcp",
"S3/VV/orbit",
"S3/VV/dc_estimate",
"S3/VV/azimuth_fm_rate",
"S3/VV/coordinate_conversion",
"S3/VV/calibration",
"S3/VV/noise_range",
"S3/VV/noise_azimuth",
"S3",
"S3/VH",
"S3/VH/attitude",
"S3/VH/gcp",
"S3/VH/orbit",
"S3/VH/dc_estimate",
"S3/VH/azimuth_fm_rate",
"S3/VH/coordinate_conversion",
"S3/VH/calibration",
"S3/VH/noise_range",
"S3/VH/noise_azimuth",
}
res = sentinel1.find_available_groups(product_files, str(SLC_IW))
assert set(res) == expected_groups
res = sentinel1.find_available_groups(
product_files, str(SLC_IW), check_files_exist=True
)
assert res == {}
def test_compute_burst_centres() -> None:
gcp = xr.Dataset(
{
"latitude": xr.DataArray(
np.arange(5).reshape(5, 1), dims=("azimuth_time", "slant_range_time")
),
"longitude": xr.DataArray(
np.arange(5).reshape(5, 1) * 10,
dims=("azimuth_time", "slant_range_time"),
),
},
attrs={"burst_count": 4},
)
lat, lon = sentinel1.compute_burst_centres(gcp)
assert np.allclose(lat, [0.5, 1.5, 2.5, 3.5])
assert np.allclose(lon, [5, 15, 25, 35])
def test_open_dataset() -> None:
expected_groups = {
"IW1",
"IW1/VV",
"IW1/VV/gcp",
"IW1/VV/attitude",
"IW1/VV/orbit",
"IW1/VV/calibration",
"IW1/VV/noise_range",
"IW1/VV/noise_azimuth",
}
res = sentinel1.open_sentinel1_dataset(SLC_IW)
assert isinstance(res, xr.Dataset)
assert len(res.data_vars) == 0
assert set(res.attrs["subgroups"]) >= expected_groups
res = sentinel1.open_sentinel1_dataset(SLC_IW, group="IW1")
assert isinstance(res, xr.Dataset)
assert len(res.data_vars) == 0
res = sentinel1.open_sentinel1_dataset(SLC_IW, group="IW1/VV/orbit")
assert isinstance(res, xr.Dataset)
assert res.dims == {"axis": 3, "azimuth_time": 17}
with pytest.raises(ValueError):
sentinel1.open_sentinel1_dataset(SLC_IW, group="IW1/VV/non-existent")
def test_open_dataset_virtual_groups() -> None:
res = sentinel1.open_sentinel1_dataset(SLC_IW, group="IW1/VV/0")
assert isinstance(res, xr.Dataset)
assert len(res.data_vars) == 1
assert res.attrs["burst_index"] == 0
def test_open_dataset_chunks() -> None:
res = sentinel1.open_sentinel1_dataset(SLC_IW, group="IW1/VV")
assert isinstance(res, xr.Dataset)
assert len(res.dims) == 2
assert res.measurement.chunks[0][0] == res.attrs["lines_per_burst"]
assert not np.all(np.isnan(res.measurement))
def test_crop_burst_dataset() -> None:
swath_ds = sentinel1.open_sentinel1_dataset(SLC_IW, group="IW1/VH")
res1 = sentinel1.crop_burst_dataset(swath_ds, 8)
assert set(res1.dims) == {"azimuth_time", "slant_range_time"}
assert res1.dims["azimuth_time"] == swath_ds.attrs["lines_per_burst"]
res2 = sentinel1.crop_burst_dataset(swath_ds, azimuth_anx_time=2210)
assert res2.equals(res1)
res3 = sentinel1.crop_burst_dataset(
swath_ds, azimuth_anx_time=2213, use_center=True
)
assert res3.equals(res1)
with pytest.raises(TypeError):
sentinel1.crop_burst_dataset(swath_ds)
with pytest.raises(TypeError):
sentinel1.crop_burst_dataset(swath_ds, burst_index=8, azimuth_anx_time=2213)
with pytest.raises(IndexError):
sentinel1.crop_burst_dataset(swath_ds, burst_index=-1)
def test_calibrate_amplitude() -> None:
swath_ds = sentinel1.open_sentinel1_dataset(SLC_IW, group="IW1/VH")
burst_ds = sentinel1.crop_burst_dataset(swath_ds, burst_index=8)
calibration_ds = sentinel1.open_calibration_dataset(SLC_IW1_VV_calibration)
res = sentinel1.calibrate_amplitude(
burst_ds.measurement, calibration_ds["betaNought"]
)
assert set(res.dims) == {"azimuth_time", "slant_range_time"}
assert np.issubdtype(res.dtype, np.complex64)
def test_calibrate_intensity() -> None:
swath_ds = sentinel1.open_sentinel1_dataset(SLC_IW, group="IW1/VH")
burst_ds = sentinel1.crop_burst_dataset(swath_ds, burst_index=8)
calibration_ds = sentinel1.open_calibration_dataset(SLC_IW1_VV_calibration)
res = sentinel1.calibrate_intensity(
burst_ds.measurement, calibration_ds["betaNought"]
)
assert set(res.dims) == {"azimuth_time", "slant_range_time"}
assert np.issubdtype(res.dtype, np.float32)
``` |
{
"source": "Joetib/bases",
"score": 4
} |
#### File: bases/bases/stack.py
```python
from typing import List, Union
class Stack:
"""
Stack Implementation based of python list (array)
values are kept in as string to suite the implementation needs
methods:
pop:
pops out the last element of the stack and returns it
push:
pushes an element to the top of the stack
to_base_string:
Helper function to convert the stack to a string
representing the results after computation of bases
"""
def __init__(self, content: List = None) -> None:
self._stack = content or []
def pop(self) -> Union[str, int]:
"""
pop: pops out the last element of the stack and returns it
"""
return self._stack.pop()
def push(self, value: Union[str, int]) -> bool:
"""
push: pushes an element to the top of the stack
"""
self._stack.append(str(value))
return True
def to_base_string(self):
"""
to_base_string: Helper function to convert the stack to a string
representing the results after computation of bases
"""
stack = self._stack[::-1]
return ''.join(stack)
``` |
{
"source": "Joetib/django-unicorn",
"score": 2
} |
#### File: management/commands/startunicorn.py
```python
from pathlib import Path
from django.core.management.base import BaseCommand, CommandError
from ...components import convert_to_pascal_case, convert_to_snake_case
COMPONENT_FILE = """from django_unicorn.components import UnicornView
class {pascal_case_component_name}View(UnicornView):
pass
"""
TEMPLATE_FILE = """<div>
<!-- put component code here -->
</div>
"""
class Command(BaseCommand):
help = "Creates a new component for `django-unicorn`"
def add_arguments(self, parser):
parser.add_argument("component_names", nargs="+", type=str)
def handle(self, *args, **options):
if not Path("manage.py").exists():
raise CommandError("Can't find manage.py in current path.")
if not Path("unicorn").exists():
Path("unicorn").mkdir()
self.stdout.write(self.style.SUCCESS("Create unicorn directory ✨"))
for component_name in options["component_names"]:
snake_case_component_name = convert_to_snake_case(component_name)
pascal_case_component_name = convert_to_pascal_case(component_name)
# Create component
if not Path("unicorn/components").exists():
Path("unicorn/components").mkdir()
component_path = Path(f"unicorn/components/{snake_case_component_name}.py")
component_path.write_text(
COMPONENT_FILE.format(
**{"pascal_case_component_name": pascal_case_component_name}
)
)
self.stdout.write(self.style.SUCCESS(f"Created {component_path}."))
# Create template
if not Path("unicorn/templates/unicorn").exists():
if not Path("unicorn/templates").exists():
Path("unicorn/templates").mkdir()
Path("unicorn/templates/unicorn").mkdir()
template_path = Path(f"unicorn/templates/unicorn/{component_name}.html")
template_path.write_text(TEMPLATE_FILE)
self.stdout.write(self.style.SUCCESS(f"Created {template_path}."))
self.stdout.write(
self.style.SUCCESS(
f"\nMake sure to add '\"unicorn\",' to your INSTALLED_APPS array in your settings file if necessary."
)
)
``` |
{
"source": "Joetib/eggstore",
"score": 3
} |
#### File: eggstore/pages/cart.py
```python
from decimal import Decimal
from django.conf import settings
from .models import Size, Color, SingleOrder
class Cart:
def __init__(self, request):
self.session = request.session
cart = self.session.get(settings.CART_SESSION_ID)
if not cart:
cart = self.session[settings.CART_SESSION_ID] = {}
self.cart = cart
def add(self, size, color, quantity=1, update_quantity=False):
product_group = f'{size}:{color}'
if product_group not in self.cart:
self.cart[product_group] = {'size': size.size, 'color': color.color,'quantity': 0, 'price': Size.objects.get(size=size).price}
if update_quantity:
self.cart[product_group]['quantity'] = int(quantity)
else:
self.cart[product_group]['quantity'] += int(quantity)
self.save()
def save(self):
self.session.modified = True
def remove(self, size, color, quantity=None):
product_group = f'{size}:{color}'
if product_group in self.cart:
if quantity:
if quantity < self.cart[product_group]['quantity']:
self.cart[product_group]['quantity'] = self.cart[product_group]['quantity'] -quantity
self.save()
return
del self.cart[product_group]
self.save()
def total_amount(self):
price = 0
for key, value in self.cart.items():
price += int(value['quantity']) * float(value['price'])
return price
def total_items(self):
count = 0
for _, _ in self.cart.items():
count += 1
return count
def total_items_quantity(self):
count = 0
for key, value in self.cart.items():
count += int(value['quantity'])
return count
def as_object(self):
objects = []
for key, value in self.cart.items():
try:
objects.append(
SingleOrder(
size = Size.objects.get(size=value.get('size')),
color = Color.objects.get(color=value.get('color')),
quantity = value.get('quantity', 0)
)
)
except Exception as e:
print(e)
return objects
```
#### File: eggstore/users/models.py
```python
from django.contrib.auth.models import AbstractUser
from django.db import models
class CustomUser(AbstractUser):
photo = models.ImageField(null=True, blank=True)
def __str__(self):
return self.username or self.email
``` |
{
"source": "Joetib/gmsa",
"score": 2
} |
#### File: gmsa/pages/models.py
```python
from django.db import models
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation
from django.contrib.contenttypes.models import ContentType
from django.utils.text import slugify
from django.utils import timezone
User = get_user_model()
# Create your models here.
class Configuration(models.Model):
location = models.CharField(max_length=500)
main_phone_number = models.CharField(max_length=13)
office_phone_number = models.CharField(max_length=13)
email_address = models.EmailField()
about = models.TextField()
history = models.TextField()
# social links
whatsapp_link = models.URLField(blank=True)
facebook_link = models.URLField(blank=True)
twitter_link = models.URLField(blank=True)
youtube_link = models.URLField(blank=True)
def __str__(self):
return "Site Configuration"
@classmethod
def object(cls):
return cls.objects.first()
def save(self, *args, **kwargs):
self.pk = 1
self.id = 1
super().save(*args, **kwargs)
class Slider(models.Model):
banner = models.ImageField(upload_to="slider/%Y/%m/")
title = models.CharField(max_length=60)
sub_title = models.CharField(max_length=50)
date_created = models.DateTimeField(auto_now=True)
class Meta:
ordering = ("-date_created",)
def __str__(self):
return self.title
class Image(models.Model):
image = models.ImageField(upload_to="gallery/images/%Y/%m/")
description = models.TextField(blank=True)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
date_created = models.DateTimeField(auto_now=True)
class Meta:
ordering = ("-date_created",)
@classmethod
def for_model(cls,*,image, description, content_object: models.Model):
_image = cls(image=image, description=description, content_object=content_object)
_image.save()
return _image
class ExecutiveRole(models.Model):
name = models.CharField(max_length=100)
core = models.BooleanField(default=True)
duty = models.TextField()
class Meta:
ordering = ("core", 'name')
def __str__(self) -> str:
return self.name
class Executive(models.Model):
executive_role = models.ForeignKey(ExecutiveRole, related_name="executives", on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.SET_NULL, null=True, blank=True)
name = models.CharField(max_length=150)
about = models.TextField()
picture = models.ImageField(upload_to='executives/images/%Y/')
is_active = models.BooleanField(True)
date_started = models.DateField()
end_date = models.DateField(null=True, blank=True)
date_created = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ("-is_active", "executive_role", "date_created")
def __str__(self) -> str:
return self.name
def get_absolute_url(self) -> str:
return reverse('executive-detail', kwargs={'pk': self.pk})
class Event(models.Model):
name = models.CharField(max_length=250)
picture = models.ImageField(upload_to="events/%Y/%m/")
description = models.TextField()
date = models.DateTimeField()
venue = models.CharField(max_length=500)
images = GenericRelation(Image, related_query_name="event")
event_link = models.URLField(blank=True)
event_link_text = models.CharField(max_length=30, blank=True)
is_upcoming = models.BooleanField(default=True)
date_created = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ("date", "date_created", "name")
def __str__(self) -> str:
return self.name
def get_absolute_url(self) -> str:
return reverse("event-detail",kwargs={"pk": self.pk} )
class Programme(models.Model):
title = models.CharField(max_length=200)
class Meta:
ordering = ("title",)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("programme-detail", kwargs={"pk": self.pk})
def get_courses(self):
course_for_level = [
{
"level": "Level 100",
"courses": self.get_level_100_courses()
},
{
"level": "Level 200",
"courses": self.get_level_200_courses()
},
{
"level": "Level 300",
"courses": self.get_level_300_courses()
},
{
"level": "Level 400",
"courses": self.get_level_400_courses()
},
]
courses = self.get_level_500_courses()
if courses.exists:
course_for_level.append({"level": "Level 500", "courses": courses})
courses = self.get_level_600_courses()
if courses.exists:
course_for_level.append({"level": "Level 600", "courses": courses})
return course_for_level
def get_level_100_courses(self):
return self.courses.filter(year=1)
def get_level_200_courses(self):
return self.courses.filter(year=2)
def get_level_300_courses(self):
return self.courses.filter(year=3)
def get_level_400_courses(self):
return self.courses.filter(year=4)
def get_level_500_courses(self):
return self.courses.filter(year=5)
def get_level_600_courses(self):
return self.courses.filter(year=6)
class Course(models.Model):
YEAR_CHOICES = (
(1, "Level 100"),
(2, "Level 200"),
(3, "Level 300"),
(4, "Level 400"),
(5, "Level 500"),
(6, "Level 600"),
)
programme = models.ForeignKey(Programme, related_name="courses", on_delete=models.CASCADE)
year = models.PositiveIntegerField(choices=YEAR_CHOICES)
title = models.CharField(max_length=200)
description = models.TextField(blank=True)
class Meta:
ordering = ("year", "title")
def __str__(self):
return self.title
class Book(models.Model):
course = models.ForeignKey(Course, related_name="books", on_delete=models.CASCADE)
title = models.CharField(max_length=300)
description = models.TextField(blank=True)
book = models.FileField(upload_to="books/%Y/")
class Meta:
ordering = ("course", "title")
def __str__(self):
return self.title
class ContactMessage(models.Model):
name = models.CharField(max_length=100)
email = models.EmailField()
message = models.TextField()
def __str__(self):
return self.name + self.message[:20]
class MadarasahCategory(models.Model):
name = models.CharField(max_length=60)
class Meta:
ordering = ("name",)
def __str__(self) -> str:
return self.name
def get_absolute_url(self) -> str:
return f'{reverse("madarasah-list")}?category={self.pk}'
class Madarasah(models.Model):
category = models.ForeignKey(MadarasahCategory, related_name="madarasah_set", on_delete=models.CASCADE)
title = models.CharField(max_length=150)
description = models.TextField(blank=True)
file = models.FileField(upload_to="madarasah/%Y/%m/")
date = models.DateTimeField(auto_now=True)
class Meta:
ordering = ("date",)
def __str__(self) -> str:
return self.title
class Business(models.Model):
title = models.CharField(max_length=150)
description = models.TextField()
phone_number = models.CharField(max_length=13)
banner = models.ImageField(upload_to="business/banner/%Y/%m/")
slug = models.SlugField(blank=True)
date = models.DateTimeField(auto_now=True)
class Meta:
ordering = ("title", "date")
def __str__(self):
return self.title
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
if not self.slug:
self.slug = slugify(f"{self.title} {self.pk}")
super().save(*args, **kwargs)
def get_absolute_url(self):
return reverse("business-detail", kwargs={"slug": self.slug})
class Product(models.Model):
business = models.ForeignKey(Business, related_name="products", on_delete=models.CASCADE)
title = models.CharField(max_length=150)
description = models.TextField(max_length=30)
slug = models.SlugField(blank=True)
price = models.DecimalField(decimal_places=2, max_digits=10)
picture = models.ImageField(upload_to="products/%Y/%m/")
date = models.DateTimeField(auto_now=True)
class Meta:
ordering = ("title", "date")
def __str__(self):
return self.title
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
if not self.slug:
self.slug = slugify(f"{self.title} {self.pk}")
super().save(*args, **kwargs)
class Project(models.Model):
title = models.CharField(max_length=150)
description = models.TextField()
picture = models.ImageField(upload_to="projects/%Y/%m/")
slug = models.SlugField(blank=True)
date_started = models.DateTimeField()
date_ended = models.DateTimeField(blank=True)
completed = models.BooleanField(default=False)
images = GenericRelation(Image, related_query_name="project")
date_created = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
class Meta:
ordering = ("-date_created", "title")
def __str__(self):
return self.title
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
if not self.slug:
self.slug = slugify(f"{self.title} {self.pk}")
super().save(*args, **kwargs)
def get_absolute_url(self):
return reverse("project-detail", kwargs={"slug": self.slug})
class Scholarship(models.Model):
title = models.CharField(max_length=200)
description = models.TextField()
application_link = models.URLField(blank=True)
end_date = models.DateField(blank=True)
date = models.DateTimeField(auto_now=True)
class Meta:
ordering = ("date", "title")
def __str__(self):
return self.title
@property
def is_active(self):
if not self.end_date:
return True
return timezone.now() >= self.end_date
def get_absolute_url(self):
return reverse("scholarship-detail", kwargs={"pk": self.pk})
``` |
{
"source": "Joetib/joebase",
"score": 2
} |
#### File: joebase/project/forms.py
```python
from django import forms
from .models import Project, Table, Field, FieldEntry, File, Text
class ProjectCreateForm(forms.ModelForm):
class Meta:
model = Project
fields = ('name', 'authentication', 'description',)
class ProjectEditForm(forms.ModelForm):
class Meta:
model = Project
fields = ('authentication', 'description')
class TableCreateForm(forms.ModelForm):
class Meta:
model = Table
fields = ('name', 'authentication_option')
class FieldForm(forms.ModelForm):
class Meta:
model = Field
fields = ('name', 'field_type', 'required')
class FileForm(forms.ModelForm):
class Meta:
model = File
fields = ('file',)
class TextForm(forms.ModelForm):
class Meta:
model = Text
fields = ('text',)
class EntryForm(forms.Form):
def __init__(self, entry, *args, **kwargs):
super(EntryForm, self).__init__(*args, **kwargs)
print(entry.fields.all())
for field in entry.fields.all():
if field.field_type == 'T':
print('got here')
self.fields[field.name]= forms.CharField(required=field.required)
elif field.field_type == 'F':
self.fields[field.name] = forms.FileField(required=field.required)
elif field.field_type == 'I':
self.fields[field.name] = forms.ImageField(required=field.required)
``` |
{
"source": "Joetib/jshop",
"score": 3
} |
#### File: apps/store/utils.py
```python
import secrets
RANDOM_STRING_CHARS = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
def get_random_string(length=15, allowed_chars=RANDOM_STRING_CHARS):
"""
Return a securely generated random string.
The bit length of the returned value can be calculated with the formula:
log_2(len(allowed_chars)^length)
For example, with default `allowed_chars` (26+26+10), this gives:
* length: 12, bit length =~ 71 bits
* length: 22, bit length =~ 131 bits
"""
return "JSHOP-" + ''.join([secrets.choice(allowed_chars) for i in range(length)])
```
#### File: apps/store/views.py
```python
from django.shortcuts import get_object_or_404, render, redirect
from django.http.request import HttpRequest
from django.http.response import HttpResponse
from django.contrib import messages
from django.views.generic import ListView, DetailView
from . import models
from . import forms
# Create your views here.
def create_update_product(request: HttpRequest, pk:int=None):
product = None
if pk:
product = get_object_or_404(models.Product, pk=pk)
if request.method == "POST":
product_form = forms.ProductForm(request.POST, files=request.FILES, instance=product )
if product_form.is_valid():
product: models.Product = product_form.save()
messages.success(request, "Product {product.name} saved successfully")
return redirect(product.get_update_url())
else:
messages.error(request, "Please fix the problems in the form.")
else:
product_form = forms.ProductForm(instance=product)
return render(request, 'store/create_update_product.html', {'product_form': product_form })
def create_update_stock(request: HttpRequest, supply_pk: int, pk:int=None):
stock: models.Stock =None
supply: models.Supply = get_object_or_404(models.Supply, pk=supply_pk)
if pk:
stock = get_object_or_404(models.Stock, pk=pk)
if request.method == "POST":
stock_form = forms.StockForm(request.POST, files=request.FILES, instance=stock )
if stock_form.is_valid():
stock: models.Stock = stock_form.save(commit=False)
stock.supply = supply
stock.save()
messages.success(request, f"Stock for {stock.product.name} saved successfully")
messages.success(request, f"Redirecting to {supply.get_absolute_url()}")
redirect(supply.get_absolute_url())
else:
messages.error(request, "Please fix the problems in the form.")
else:
stock_form = forms.StockForm(instance=stock)
return render(request, 'store/create_update_stock.html', {'stock_form': stock_form })
class ProductListView(ListView):
model = models.Product
context_object_name = "products"
template_name = "store/products_list.html"
class SupplyDetail(DetailView):
model = models.Supply
context_object_name = "supply"
template_name = "store/supply_detail.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['stock_form'] = forms.StockForm()
return context
class SupplyListView(ListView):
model = models.Supply
context_object_name = "supplies"
template_name = "store/supply_list.html"
def create_supply(request: HttpRequest):
supply = models.Supply.objects.create()
return redirect(supply.get_absolute_url())
def add_product_to_order(request: HttpRequest, pk:int) -> HttpResponse:
order = models.Order.objects.get_or_create(completed=False)[0]
product = get_object_or_404(models.Product, pk=pk)
OrderItem = models.OrderItem.objects.get_or_create(order=order, product=product)
``` |
{
"source": "Joetib/Komlearn",
"score": 2
} |
#### File: accounts/templatetags/widget_tweaks.py
```python
from django import template
from django.utils.safestring import mark_safe
import markdown
register = template.Library()
@register.filter
def add_class(form_widget, css_class):
""" Adds a css class to a django form widget """
return form_widget.as_widget(attrs={'class': css_class})
@register.filter(name='markdown')
def markdown_format(text):
return mark_safe(markdown.markdown(text))
```
#### File: Komlearn/group/models.py
```python
from django.db import models
from django.conf import settings
class Group(models.Model):
name = models.CharField(max_length=200)
members = models.ManyToManyField(settings.AUTH_USER_MODEL, blank=True)
created_by = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, related_name='groups_created', null=True, blank=True)
about = models.TextField(max_length=5120, blank=True)
picture = models.ImageField(upload_to='group_profiles', blank=True, null=True)
def latest_questions(self):
return self.questions.all()[:3]
class Question(models.Model):
""" The Question Models """
group = models.ForeignKey(Group, on_delete=models.CASCADE, related_name='questions')
user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='questions', on_delete=models.CASCADE)
text = models.TextField()
posted_date = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ('-posted_date',)
def __str__(self):
return f"{self.user.username}'s question"
def get_absolute_url(self):
pass
class Comment(models.Model):
""" The comments Models """
user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='comments', on_delete=models.CASCADE)
question = models.ForeignKey(Question, related_name='comments', on_delete=models.CASCADE)
text = models.TextField(blank=True)
comment_date = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ('-comment_date',)
``` |
{
"source": "Joetib/radio",
"score": 2
} |
#### File: radio/pages/context.py
```python
from .models import SocialLink
def get_social_context(*args, **kwargs):
facebook = SocialLink.objects.filter(site="F")
Twitter = SocialLink.objects.filter(site="T")
Whatsapp = SocialLink.objects.filter(site="W")
Youtube = SocialLink.objects.filter(site="Y")
LinkedIn = SocialLink.objects.filter(site="L")
Instagram = SocialLink.objects.filter(site="I")
return {
'facebook': facebook.first() if facebook.exists() else None,
'Whatsapp': Whatsapp.first() if Whatsapp.exists() else None,
'Twitter': Twitter.first() if Twitter.exists() else None,
'Instagram': Instagram.first() if Instagram.exists() else None,
'LinkedIn': LinkedIn.first() if LinkedIn.exists() else None,
'Youtube': Youtube.first() if Youtube.exists() else None,
}
``` |
{
"source": "Joetib/school",
"score": 2
} |
#### File: school/administrator/views.py
```python
from accounts.models import AcademicYear, Klass
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from .utils import administrator_required
from django.http.request import HttpRequest
from django.http.response import HttpResponse
from . import forms
from django.views.generic import ListView, TemplateView, CreateView, DetailView
from django.contrib.auth import get_user_model
from accounts.models import Student, Teacher, Course, NoticeBoard
from datetime import datetime
# Create your views here.
User = get_user_model()
@login_required
@administrator_required
def dashboard(request):
return render(request, 'administrator/dashboard.html', {
'teachers': User.objects.filter(is_teacher=True),
'students': User.objects.filter(is_student=True),
'notices': NoticeBoard.objects.all(),
})
class StudentListView(TemplateView):
template_name = "administrator/student_list.html"
class TeacherListView(TemplateView):
template_name = "administrator/teacher_list.html"
@login_required
@administrator_required
def create_student(request):
if request.method == "POST":
student_form = forms.StudentCreateForm(request.POST, files=request.FILES)
if student_form.is_valid():
student = student_form.save()
messages.success(request, f'Student: {student.username} created successfully')
return redirect("administrator:dashboard")
messages.error(request, "Unable to create student, please try again")
else:
student_form = forms.StudentCreateForm()
return render(request, 'administrator/create_student.html', {'student_form': student_form})
@login_required
@administrator_required
def create_teacher(request):
if request.method == "POST":
teacher_form = forms.TeacherCreateForm(request.POST, files=request.FILES)
if teacher_form.is_valid():
teacher = teacher_form.save()
messages.success(request, f'Student: {teacher.username} created successfully')
return redirect("administrator:dashboard")
messages.error(request, "Unable to create teacher, please try again")
else:
teacher_form = forms.TeacherCreateForm()
return render(request, 'administrator/create_teacher.html', {'teacher_form': teacher_form})
@login_required
@administrator_required
def student_profile(request, id):
student = get_object_or_404(Student, user__id=id)
age = int((datetime.now().date() - student.user.date_of_birth).days / 365 )
return render(request, 'administrator/student_profile.html', {'student':student,'age':age,})
@login_required
@administrator_required
def teacher_profile(request, id):
teacher = get_object_or_404(Teacher, user__id=id)
age = int((datetime.now().date() - teacher.user.date_of_birth).days / 365 )
return render(request, 'administrator/teacher_profile.html', {'teacher':teacher,'age':age})
def create_academic_year(request: HttpRequest):
if request.method == "POST":
academic_year_form = forms.AcademicYearForm(request.POST)
if academic_year_form.is_valid():
academic_year = academic_year_form.save()
return redirect(academic_year.get_admin_absolute_url())
else:
academic_year_form = forms.AcademicYearForm()
return render(request, "administrator/create_academic_year.html", {'form': academic_year_form})
""" class SubjectList(ListView):
model = Course
template_name = 'administration/subject_list.html'
context_object_name = 'subjects' """
class AcademicYearListView(ListView):
model = AcademicYear
context_object_name = "academic_years"
template_name = "administrator/academic_year_list.html"
class AcademicYearDetailView(DetailView):
model = AcademicYear
context_object_name = "academic_year"
template_name = "administrator/academic_year_detail.html"
def create_klass(request: HttpRequest, *args, **kwargs) -> HttpResponse:
if request.method == "POST":
form = forms.KlassCreateForm(request.POST, files=request.FILES)
if form.is_valid():
klass = form.save()
return redirect("administrator:dashboard")
else:
print("Form is invalid\n\n\n\n")
print(form.errors)
else:
form = forms.KlassCreateForm()
return render(request, 'administrator/create_class.html', {'form': form})
class KlassListview(ListView):
model = Klass
template_name = "administrator/class_list.html"
context_object_name = "classes"
def qet_queryset(self, *args, **kwargs):
return Klass.objects.filter(academic_year__is_active=True)
class KlassDetailView(DetailView):
model = Klass
template_name = "administrator/klass_detail.html"
context_object_name = "klass"
def add_students_to_class(request: HttpRequest, pk:int):
klass = get_object_or_404(Klass, pk=pk, academic_year__is_active=True)
if request.method == "POST":
students_form = forms.AddStudentToClassForm(request.POST, klass=klass)
if students_form.is_valid():
cd = students_form.cleaned_data
# TODO: this is too slow O(n)
for student in cd['students']:
student.klasses.add(klass)
# a faster version is
# klass.students.add(*cd['students'])
# but it keeps raising contraint errors.
return redirect(klass.get_admin_absolute_url())
else:
students_form = forms.AddStudentToClassForm(klass=klass)
return render(request, "administrator/add_student_to_class.html", {'form': students_form, 'klass': klass})
@administrator_required
def add_teachers_to_class(request: HttpRequest, pk:int):
klass = get_object_or_404(Klass, pk=pk, academic_year__is_active=True)
if request.method == "POST":
teacher_form = forms.AddTeacherToClassForm(request.POST, instance=klass)
if teacher_form.is_valid():
teacher_form.save()
return redirect(klass.get_admin_absolute_url())
else:
teacher_form = forms.AddTeacherToClassForm(instance=klass)
return render(request, "administrator/add_teacher_to_class.html", {'form': teacher_form, 'klass': klass})
def create_course(request):
if request.method == "POST":
form = forms.CourseCreateForm(request.POST, files=request.FILES)
if form.is_valid():
form.save()
return redirect("administrator:dashboard")
else:
form = forms.CourseCreateForm()
return render(request, 'administrator/create_course.html', {'form': form})
class CoursetListView(ListView):
model = Course
template_name = 'administrator/course_list.html'
context_object_name = 'courses'
def get_queryset(self, *args, **kwargs):
return Course.objects.all()
def create_notice(request):
if request.method == "POST":
form = forms.CreateNoticeForm(request.POST, files=request.FILES)
if form.is_valid():
form.save()
return redirect("administrator:dashboard")
else:
form = forms.CreateNoticeForm()
return render(request, 'administrator/create_notice.html', {'form':form})
def delete_notice_item(request, id):
notice = get_object_or_404(NoticeBoard, id=id)
notice.delete()
return redirect("administrator:dashboard")
``` |
{
"source": "joetimmerman/GpxTrackPoster",
"score": 3
} |
#### File: GpxTrackPoster/gpxtrackposter/heatmap_drawer.py
```python
import argparse
import logging
import math
from typing import Dict, List, Optional, Tuple
import s2sphere # type: ignore
import svgwrite # type: ignore
from geopy.distance import distance # type: ignore
from gpxtrackposter import utils
from gpxtrackposter.exceptions import ParameterError
from gpxtrackposter.poster import Poster
from gpxtrackposter.tracks_drawer import TracksDrawer
from gpxtrackposter.xy import XY
log = logging.getLogger(__name__)
class HeatmapDrawer(TracksDrawer):
"""Draw a heatmap Poster based on the tracks.
Attributes:
_center: Center of the heatmap.
_radius: Scale the heatmap so that a circle with radius (in KM) is visible.
Methods:
create_args: Create arguments for heatmap.
fetch_args: Get arguments passed.
draw: Draw the heatmap based on the Poster's tracks.
"""
def __init__(self, the_poster: Poster):
super().__init__(the_poster)
self._center = None
self._radius = None
self._heatmap_line_width_low: float = 10.0
self._heatmap_line_width_upp: float = 1000.0
self._heatmap_line_width_lower: List[Tuple[float, float]] = [(0.10, 5.0), (0.20, 2.0), (1.0, 0.30)]
self._heatmap_line_width_upper: List[Tuple[float, float]] = [(0.02, 0.5), (0.05, 0.2), (1.0, 0.05)]
self._heatmap_line_width: Optional[List[Tuple[float, float]]] = self._heatmap_line_width_lower
def create_args(self, args_parser: argparse.ArgumentParser) -> None:
group = args_parser.add_argument_group("Heatmap Type Options")
group.add_argument(
"--heatmap-center",
dest="heatmap_center",
metavar="LAT,LNG",
type=str,
help="Center of the heatmap (default: automatic).",
)
group.add_argument(
"--heatmap-radius",
dest="heatmap_radius",
metavar="RADIUS_KM",
type=float,
help="Scale the heatmap such that at least a circle with radius=RADIUS_KM is visible "
"(default: automatic).",
)
group.add_argument(
"--heatmap-line-transparency-width",
dest="heatmap_line_width",
metavar="TRANSP_1,WIDTH_1, TRANSP_2,WIDTH_2, TRANSP_3,WIDTH_3",
type=str,
help="Define three transparency and width tuples for the heatmap lines or set it to "
"`automatic` for automatic calculation (default: 0.1,5.0, 0.2,2.0, 1.0,0.3).",
)
# pylint: disable=too-many-branches
def fetch_args(self, args: argparse.Namespace) -> None:
"""Get arguments that were passed, and also perform basic validation on them.
For example, make sure the center is an actual lat, lng , and make sure the radius is a
positive number. Also, if radius is passed, then center must also be passed.
Raises:
ParameterError: Center was not a valid lat, lng coordinate, or radius was not positive.
ParameterError: Line transparency and width values are not valid
"""
self._center = None
if args.heatmap_center:
latlng_str = args.heatmap_center.split(",")
if len(latlng_str) != 2:
raise ParameterError(f"Not a valid LAT,LNG pair: {args.heatmap_center}")
try:
lat = float(latlng_str[0].strip())
lng = float(latlng_str[1].strip())
except ValueError as e:
raise ParameterError(f"Not a valid LAT,LNG pair: {args.heatmap_center}") from e
if not -90 <= lat <= 90 or not -180 <= lng <= 180:
raise ParameterError(f"Not a valid LAT,LNG pair: {args.heatmap_center}")
self._center = s2sphere.LatLng.from_degrees(lat, lng)
if args.heatmap_radius:
if args.heatmap_radius <= 0:
raise ParameterError(f"Not a valid radius: {args.heatmap_radius} (must be > 0)")
if not args.heatmap_center:
raise ParameterError("--heatmap-radius needs --heatmap-center")
self._radius = args.heatmap_radius
if args.heatmap_line_width:
if args.heatmap_line_width.lower() == "automatic":
self._heatmap_line_width = None
else:
trans_width_str = args.heatmap_line_width.split(",")
if len(trans_width_str) != 6:
raise ParameterError(f"Not three valid TRANSPARENCY,WIDTH pairs: {args.heatmap_line_width}")
try:
self._heatmap_line_width = []
for value in range(0, 5, 2):
transparency = float(trans_width_str[value].strip())
width = float(trans_width_str[value + 1].strip())
if transparency < 0 or transparency > 1:
raise ParameterError(
f"Not a valid TRANSPARENCY value (0 < value < 1): {transparency} in "
f"{args.heatmap_line_width}"
)
self._heatmap_line_width.append((transparency, width))
except ValueError as e:
raise ParameterError(f"Not three valid TRANSPARENCY,WIDTH pairs: {args.heatmap_line_width}") from e
def _get_line_transparencies_and_widths(self, bbox: s2sphere.sphere.LatLngRect) -> List[Tuple[float, float]]:
if self._heatmap_line_width:
return self._heatmap_line_width
# automatic calculation of line transparencies and widths
low = self._heatmap_line_width_low
upp = self._heatmap_line_width_upp
lower = self._heatmap_line_width_lower
upper = self._heatmap_line_width_upper
d = distance(
(bbox.lo().lat().degrees, bbox.lo().lng().degrees), (bbox.hi().lat().degrees, bbox.hi().lng().degrees)
).km
log.info("Length of diagonal of boundary box %s", str(d))
if d > upp:
return upper
if d < low:
return lower
return [
(
lower[0][0] + d / (upp - low) * (upper[0][0] - lower[0][0]),
(lower[0][1] + d / (upp - low) * (upper[0][1] - lower[0][1])),
),
(
lower[1][0] + d / (upp - low) * (upper[1][0] - lower[1][0]),
(lower[1][1] + d / (upp - low) * (upper[1][1] - lower[1][1])),
),
(
lower[2][0] + d / (upp - low) * (upper[2][0] - lower[2][0]),
(lower[2][1] + d / (upp - low) * (upper[2][1] - lower[2][1])),
),
]
def _determine_bbox(self) -> s2sphere.LatLngRect:
if self._center:
log.info("Forcing heatmap center to %s", str(self._center))
dlat, dlng = 0, 0
if self._radius:
er = 6378.1
quarter = er * math.pi / 2
dlat = 90 * self._radius / quarter
scale = 1 / math.cos(self._center.lat().radians)
dlng = scale * 90 * self._radius / quarter
else:
for tr in self.poster.tracks:
for line in tr.polylines:
for latlng in line:
d = abs(self._center.lat().degrees - latlng.lat().degrees)
dlat = max(dlat, d)
d = abs(self._center.lng().degrees - latlng.lng().degrees)
while d > 360:
d -= 360
if d > 180:
d = 360 - d
dlng = max(dlng, d)
return s2sphere.LatLngRect.from_center_size(self._center, s2sphere.LatLng.from_degrees(2 * dlat, 2 * dlng))
tracks_bbox = s2sphere.LatLngRect()
for tr in self.poster.tracks:
tracks_bbox = tracks_bbox.union(tr.bbox())
return tracks_bbox
def draw(self, dr: svgwrite.Drawing, g: svgwrite.container.Group, size: XY, offset: XY) -> None:
"""Draw the heatmap based on tracks."""
bbox = self._determine_bbox()
line_transparencies_and_widths = self._get_line_transparencies_and_widths(bbox)
year_groups: Dict[int, svgwrite.container.Group] = {}
for tr in self.poster.tracks:
year = tr.start_time().year
if year not in year_groups:
g_year = dr.g(id=f"year{year}")
g.add(g_year)
year_groups[year] = g_year
else:
g_year = year_groups[year]
color = self.color(self.poster.length_range, tr.length(), tr.special)
for line in utils.project(bbox, size, offset, tr.polylines):
for opacity, width in line_transparencies_and_widths:
g_year.add(
dr.polyline(
points=line,
stroke=color,
stroke_opacity=opacity,
fill="none",
stroke_width=width,
stroke_linejoin="round",
stroke_linecap="round",
)
)
``` |
{
"source": "joetjo/jopLauncher",
"score": 3
} |
#### File: examples/external/psutil_example.py
```python
import psutil
from base.jsonstore import GhStorage
def getListOfProcessSortedByMemory():
'''
Get list of running process sorted by Memory Usage
'''
listOfProcObjects = []
# Iterate over the list
for proc in psutil.process_iter():
try:
# Fetch process details as dict
pinfo = proc.as_dict(attrs=['pid', 'name', 'username', 'exe'])
pinfo['vms'] = proc.memory_info().vms / (1024 * 1024)
# Append dict to list
listOfProcObjects.append(pinfo);
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
pass
# Sort list of dict by key vms i.e. memory usage
listOfProcObjects = sorted(listOfProcObjects, key=lambda procObj: procObj['vms'], reverse=True)
return listOfProcObjects
def main():
print("*** Iterate over all running process and print process ID & Name ***")
# Iterate over all running process
for proc in psutil.process_iter():
try:
# Get process name & pid from process object.
processName = proc.name()
processID = proc.pid
print(processName, ' ::: ', processID)
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
pass
print('*** Create a list of all running processes ***')
listOfProcessNames = list()
# Iterate over all running processes
for proc in psutil.process_iter():
# Get process detail as dictionary
pInfoDict = proc.as_dict(attrs=['pid', 'name', 'cpu_percent', 'cpu_times'])
# Append dict of process detail in list
listOfProcessNames.append(pInfoDict)
# Iterate over the list of dictionary and print each elem
for elem in listOfProcessNames:
print(elem)
print('*** Top 10 process with highest memory usage ***')
listOfRunningProcess = getListOfProcessSortedByMemory()
for elem in listOfRunningProcess[:10]:
print(elem)
if __name__ == '__main__':
main()
```
#### File: jopLauncher/gridgui/application.py
```python
from tkinter import Tk, Frame, Label, StringVar, Button, Entry, Checkbutton, Radiobutton, IntVar
from tkinter.ttk import Combobox
from gridgui.apphandle import GhAppHandle
from gridgui.gridbehaviour import GhGridBehaviour
class GhAppSetup:
bg_header = 'light blue'
bg_content = 'light grey'
height = 550
min_height = 250
width = 550
min_width = 400
# center top bottom
vertical = 'center'
# position (digit) or center left right
horizontal = 'center'
icon = None
image_button = True
# Must not be false if image_button is false
image_text_button = True
'''
Generic Application based on grid layout with header / content / footer
mandatory : title ( application window title )
With static method to create widget ( and make of common look and feel application )
'''
class GhApp(GhGridBehaviour):
def __init__(self, title, exit_command):
super().__init__(0, 0)
self.title = title
self.window = Tk()
# Set initial position from setup
screen_width = self.window.winfo_screenwidth()
screen_height = self.window.winfo_screenheight()
if GhAppSetup.horizontal == 'right':
x = screen_width - GhAppSetup.width - 10
elif GhAppSetup.horizontal == 'center':
x = int((screen_width - GhAppSetup.width) / 2)
elif GhAppSetup.horizontal == 'left':
x = 0
else:
x = int(GhAppSetup.horizontal)
if GhAppSetup.vertical == 'bottom':
y = screen_height - GhAppSetup.height - 10
elif GhAppSetup.vertical == 'center':
y = int((screen_height - GhAppSetup.height) / 2)
elif GhAppSetup.vertical == 'top':
y = 0
else:
y = int(GhAppSetup.vertical)
self.window.title(title)
self.window.iconbitmap(GhAppSetup.icon)
self.window.geometry('{}x{}+{}+{}'.format(GhAppSetup.width, GhAppSetup.height, x, y))
self.window.minsize(GhAppSetup.min_width, GhAppSetup.min_height)
if exit_command is not None:
self.window.protocol("WM_DELETE_WINDOW", exit_command)
# Build app skeleton ( header / content / footer )
self.header = Frame(self.window, bg=GhAppSetup.bg_header, pady=5, padx=5)
self.content = Frame(self.window, bg=GhAppSetup.bg_content, padx=0, pady=0)
self.footer = Frame(self.window, bg=GhAppSetup.bg_header, pady=3, padx=5)
self.window.grid_rowconfigure(1, weight=1)
self.window.grid_columnconfigure(0, weight=1)
self.header.grid(sticky="ew")
self.content.grid(row=1, sticky="nsew")
self.footer.grid(row=2, sticky="ew")
def getMouseX(self):
return self.window.winfo_pointerx() # - self.window.winfo_rootx()
def getMouseY(self):
return self.window.winfo_pointery() # - self.window.winfo_rooty()
def start(self):
self.window.mainloop()
print("{} closed".format(self.title))
def close(self):
self.window.quit()
@staticmethod
def setupImage(widget, image, align):
widget.config(image=image, compound=align)
@staticmethod
# if text is None, create a StringVar and return it
def createLabel(parent, row, col,
text=None,
anchor='w',
justify='left',
image=None,
colspan=1,
width=None,
bg=None,
debug_name=None):
text_variable = None
if bg is None:
bg = parent.cget('bg')
if text is None:
text_variable = StringVar()
label = Label(parent,
text=text, textvariable=text_variable,
bg=bg, width=width, image=image,
anchor=anchor, justify=justify)
if anchor == 'center':
anchor = 'w'
label.grid(row=row, column=col, sticky=anchor, columnspan=colspan)
return GhAppHandle(text_variable, label, debug_name=debug_name)
@staticmethod
def createButton(parent, row, col,
command,
text,
anchor='w',
padx=0, pady=0,
width=None,
image=None,
text_visible=None,
debug_name=None):
text_variable = StringVar()
button = Button(parent, command=command, textvariable=text_variable,
anchor=anchor, padx=padx, pady=pady, width=width)
button.grid(row=row, column=col, sticky=anchor)
if text_visible or \
not GhAppSetup.image_button \
or GhAppSetup.image_text_button \
or image is None:
text_variable.set(text)
if GhAppSetup.image_button:
GhApp.setupImage(button, image, "left")
return GhAppHandle(text_variable, button, debug_name=debug_name)
@staticmethod
def createRadio(parent, row, col, command,
text=None,
anchor='w',
padx=5,
debug_name=None):
radiovar = IntVar()
radio = Radiobutton(parent, bg=parent.cget('bg'), text=text,
command=command, variable=radiovar,
onvalue=1,
anchor=anchor, padx=padx)
radio.grid(row=row, column=col, sticky=anchor)
return GhAppHandle(radiovar, radio, debug_name=debug_name)
@staticmethod
def createCheckbox(parent, row, col, command,
text=None,
anchor='w',
padx=5,
debug_name=None):
check_var = IntVar()
check = Checkbutton(parent, bg=parent.cget('bg'), text=text,
command=command, variable=check_var,
onvalue=1,
anchor=anchor, padx=padx)
check.grid(row=row, column=col, sticky=anchor)
return GhAppHandle(check_var, check, debug_name=debug_name)
@staticmethod
def createCombobox(parent, row, col, command, values,
anchor='w',
width=None,
colspan=1,
read_only=True,
debug_name=None):
combo_var = StringVar()
combo = Combobox(parent, width=width, textvariable=combo_var)
combo.grid(row=row, column=col, columnspan=colspan, sticky=anchor)
combo['values'] = values
combo.bind('<<ComboboxSelected>>', command)
if read_only:
combo['state'] = 'readonly'
return GhAppHandle(combo_var, combo, debug_name=debug_name)
@staticmethod
def createEntry(parent, row, col, width, defaultvalue,
padx=5, command=None,
colspan=1, sticky="w",
debug_name=None):
entry_var = StringVar()
entry_var.set(defaultvalue)
entry = Entry(parent, textvariable=entry_var, width=width, validatecommand=command)
entry.grid(row=row, column=col, columnspan=colspan, padx=padx, sticky=sticky)
return GhAppHandle(entry_var, entry, debug_name=debug_name)
```
#### File: jopLauncher/gridgui/gridbehaviour.py
```python
class GhGridBehaviour:
def __init__(self, row, col):
self.current_row = row
self.current_col = col
def row_col_reset(self, row = 0, col = 0):
self.current_row = row
self.current_col = col
# return current and reset value to row
def row_reset(self, row = 0):
current = self.current_row
self.current_row = row
return current
# return current and reset value to col
def col_reset(self, col = 0):
current = self.current_col
self.current_col = col
return current
def row(self):
return self.current_row
# Return current col
def col(self):
return self.current_col
# Return next row to use and increment row
def row_next(self):
self.current_row += 1
return self.current_row - 1
# Return next col to use and increment col
def col_next(self):
self.current_col += 1
return self.current_col - 1
def debug(self, ctx):
print("{} : {}, {}".format(ctx, self.row(), self.col()))
```
#### File: core/migrations/migrate.py
```python
from JopLauncherConstant import JopLauncher
from launcher.log import Log
def nop(storage, version):
Log.debug("| no data migration needed to upgrade to version {}".format(version))
def toV2(storage, version):
last_session = storage.data()['last_sessions']
for s in last_session:
s.append("") # Launcher
s.append("") # platform
s.append("") # custom command
s.append("") # custom parameters
def toV3(storage, version):
games = storage.data()['Games']
for key, value in games.items():
value["type"] = ""
def toV4(storage, version):
games = storage.data()['Games']
for key, value in games.items():
value["status"] = ""
try:
value["sheet"] = value["note"]
except KeyError:
value["sheet"] = ""
value["note"] = ""
class StorageVersion:
VERSION_LIST = [0,
1,
2,
4,
JopLauncher.DB_VERSION]
MIGRATIONS_STEP = [nop,
nop,
toV2,
toV3,
toV4]
@staticmethod
def check_migration(storage, to):
if storage.getVersion() != to:
current = storage.getVersion()
Log.info("Storage migration from {} to {}".format(current, to))
for idx in range(0, len(StorageVersion.VERSION_LIST)):
v = StorageVersion.VERSION_LIST[idx]
if current < v:
StorageVersion.MIGRATIONS_STEP[v](storage, v)
storage.setVersion(to)
storage.save()
```
#### File: core/private/process.py
```python
import os
import re
from datetime import datetime
from JopLauncherConstant import JopLauncher, JopSETUP
from launcher.log import Log
class ProcessInfo:
game_extension = JopSETUP.get(JopSETUP.GAME_EXTENSION)
def __init__(self, pinfo):
self.game_pattern = JopSETUP.get(JopSETUP.GAME_PATTERN)
self.pinfo = pinfo
self.pid = pinfo['pid']
self.name = pinfo['name']
self.path = pinfo['exe']
self.originName = self.name
self.game = self.gameDetector(self.path)
self.game_platform = self.platformDetector(self.path)
self.other = self.otherDetector(self.path)
self.storeEntry = None
self.started = None
self.duration = None
def getPid(self):
return self.pid
def getName(self):
return self.name
def getOriginName(self):
return self.originName
def forceName(self, map_name):
self.name = ProcessInfo.getMapName(self.path, map_name)
@staticmethod
def getMapName(path, map_name):
if map_name == 'PARENT':
parent = os.path.dirname(path).split(os.path.sep)
return parent[len(parent) - 1]
else:
return map_name
def removeExtension(self):
self.name = ProcessInfo.removeGameExtension(self.name)
@staticmethod
def removeGameExtension(name):
if ProcessInfo.game_extension in name:
return name[0:name.rfind(ProcessInfo.game_extension)]
else:
return name
def getPath(self):
return self.path
def isGame(self):
return self.game
def gameDetector(self, path):
return (self.path is not None) and re.search(self.game_pattern, path, re.IGNORECASE)
def platformDetector(self, path):
if self.path is None:
return None
for key in JopLauncher.GAME_PLATFORMS:
if re.search(key, path, re.IGNORECASE):
return JopLauncher.GAME_PLATFORMS[key]
def otherDetector(self, path):
if self.path is None:
return None
for key in JopLauncher.COM_APP:
if re.search(key, path, re.IGNORECASE):
return JopLauncher.COM_APP[key]
def setStarted(self):
if self.started is None:
self.started = datetime.now()
else:
Log.info("/!\\ Start/Stop error: {} was already known to be started".format(self.name))
def setStopped(self):
if self.started is None:
Log.info("/!\\ Start/Stop error: {} was not known to be started".format(self.name))
else:
self.duration = datetime.now() - self.started
self.started = None
Log.info("{} has run for {}".format(self.name, self.duration))
return self.duration
def getPlayedTime(self):
return self.duration
def hasData(self):
return self.storeEntry is not None
def setStoreEntry(self, entry):
self.storeEntry = entry
def getStoreEntry(self):
return self.storeEntry
```
#### File: launcher/core/procevent.py
```python
from abc import ABC, abstractmethod
from launcher.log import Log
class EventListener(ABC):
pass
@abstractmethod
def newGame(self, game):
Log.debug("[ABC Impl] New game detected {} ({})".format(game.getName(), game.process.getPath()))
@abstractmethod
def refreshDone(self, current_game, platform_list_updated, others):
pass
@abstractmethod
def endGame(self, proc):
Log.debug("[ABC Impl] End game detected {} ({})".format(proc.getName(), proc.getPath()))
```
#### File: launcher/gui/menu.py
```python
from JopLauncherConstant import JopLauncher, JopSETUP
from base.fileutil import GhFileUtil
from gridgui.appmenu import GhAppMenu
from launcher.gui.strings import Strings
class PlatformLauncher:
def __init__(self, menu, gui, entry, command):
self.gui = gui
self.entry = entry
self.command = command
menu.add("{} {}".format(Strings.LAUNCH_PLATFORM_MENU, entry), self.apply)
def apply(self):
self.gui.applyLaunchPlatform(self.entry, self.command)
class MainMenu(GhAppMenu):
def __init__(self, parent, app, gui):
super().__init__(parent, app)
self.add(Strings.MENU_EXCLUDED, gui.applyShowExcluded)
self.add(Strings.MENU_LAUNCHER, gui.applyShowLauncher)
self.addSep()
for key in JopLauncher.GAME_PLATFORMS:
entry = JopLauncher.GAME_PLATFORMS[key]
cmd = JopSETUP.get(entry)
if GhFileUtil.fileExist(cmd[0]):
PlatformLauncher(self, gui, entry, cmd)
self.addSep()
self.add(Strings.MENU_COMP_APP, gui.applyLaunchCompApp)
self.add(Strings.MENU_MARKDOWN_REPORT, gui.updateMarkdownReport)
self.add(Strings.MENU_ICONFX_APP, gui.applyLaunchIconExtract)
self.addSep()
self.add(Strings.EXIT, gui.applyExit)
``` |
{
"source": "joetjo/MarkdownHelper",
"score": 2
} |
#### File: MarkdownHelper/markdown/markdown.py
```python
from base.setup import GhSetup
from pathlib import Path
from markdown.markdownfile import MhMarkdownFile
from markdown.report import MhReport
#
# Setup from $home/.markdownHelper
# ( Sample provided in example.markdownHelper.json )
#
class MarkdownHelper:
def __init__(self):
self.SETUP = GhSetup('markdownHelper')
self.VAULT = self.SETUP.getBloc("global")["base_folder"]
self.IGNORE = self.SETUP.getBloc("global")["ignore"]
self.REPORTS = self.SETUP.getBloc("global")["reports"]
self.SUBCONTENT = self.SETUP.getBloc("global")["shared_contents"]
self.FILES = dict()
self.SORTED_FILES = dict()
self.TAGS = dict()
# folder: Path
# shift: String ( String length provide the indentation level )
def processFolder(self, folder, shift):
print("{}{}".format(folder, shift))
entryCount = 0
# Loop on file in current folder
for entry in folder.iterdir():
if entry.is_file() and entry.name.endswith(".md") and entry.name not in self.IGNORE:
key = entry.name[0:len(entry.name) - 3]
entryCount = entryCount + 1
mdfile = MhMarkdownFile(key, entry)
self.FILES[key] = mdfile
print("{}>{} {}".format(shift, key, mdfile.tags))
if len(mdfile.tagsComment) > 0:
print("{}>>>> comments {}".format(shift, mdfile.tagsComment))
for tag in mdfile.tags:
self.TAGS[tag] = tag
# Loop on sub folder
for entry in folder.iterdir():
if not entry.is_file() and entry.name not in self.IGNORE:
entryCount = entryCount + self.processFolder(entry, "{}{}".format(shift, " "))
return entryCount
def markdown(self):
print(" | Markdown vault: {}\n====".format(self.VAULT))
count = self.processFolder(Path(self.VAULT), "")
print("\n=================")
print("> {} md files detected".format(count))
print("> {} tags detected".format(len(self.TAGS)))
for key in sorted(self.FILES):
self.SORTED_FILES[key] = self.FILES[key]
try:
for report in self.REPORTS:
print("\n=================\nProcessing report \"{}\"\n=================\n".format(report["title"]))
MhReport(report, self.VAULT, self.SORTED_FILES, self.TAGS, self.SUBCONTENT).generate()
except Exception as e:
raise
``` |
{
"source": "joetoth/metacademy-fortune",
"score": 3
} |
#### File: joetoth/metacademy-fortune/generate.py
```python
import os
import sys
from os import listdir
from os.path import isfile, join
from subprocess import call
def generate():
fortune = ''
dir = 'metacademy-content/concepts'
for f in listdir('metacademy-content/concepts'):
tfile = os.path.join(dir, f, 'title.txt')
sfile = os.path.join(dir, f, 'summary.txt')
if os.path.exists(tfile):
with open(tfile, 'r') as t:
title = t.read().strip().upper()
if os.path.exists(sfile):
with open(sfile, 'r') as s:
summary = s.read().strip()
fortune += '{}: {}\nhttps://metacademy.org/graphs/concepts/{}\n%\n'.format(title, summary, f)
with open('metacademy-fortune', 'w') as f:
f.write(fortune)
call(['strfile', 'metacademy-fortune', 'metacademy-fortune.dat'])
if __name__ == "__main__":
generate()
``` |
{
"source": "joetric/python-omgeo",
"score": 3
} |
#### File: python-omgeo/omgeo/postprocessors.py
```python
from omgeo.processor import _Processor
import math
from operator import attrgetter
# Python 2/3 compatibility: set up 'unicode' for use in string type checking
try:
unicode
except NameError:
unicode = str
class _PostProcessor(_Processor):
"""Takes, processes, and returns list of geocoding.places.Candidate objects."""
def process(self, candidates):
raise NotImplementedError(
'PostProcessor subclasses must implement process().')
def is_case_sensitive(self):
try:
return 'CS' if self.case_sensitive else 'CI'
except ValueError:
return 'NA'
def is_exact(self):
try:
return 'EXACT_MATCH' if self.exact_match else 'INEXACT_MATCH'
except ValueError:
return 'NA'
class LocatorFilter(_PostProcessor):
"""
PostProcessor used to ditch results with lousy locators.
:arg list good_locators: A list of locators to accept results from (default [])
"""
def __init__(self, good_locators):
"""
:arg list good_locators: A list of locators to accept results from (default None)
"""
self.good_locators = good_locators
def process(self, candidates):
"""
:arg list candidates: list of Candidate instances
"""
for c in candidates[:]:
if c.locator not in self.good_locators:
# TODO: search string, i.e. find "EU_Street_Name" in "EU_Street_Name.GBR_StreetName"
candidates.remove(c)
return candidates
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.good_locators)
class LocatorSorter(_PostProcessor):
"""
PostProcessor used to sort by locators
"""
def __init__(self, ordered_locators):
"""
:arg list ordered_locators: a list of :py:attr:`Candidate.locator` values
placed in the desired order, such as ``rooftop``,
``interpolation``, or ``postal``.
"""
self.ordered_locators = ordered_locators
def process(self, unordered_candidates):
"""
:arg list candidates: list of Candidate instances
"""
ordered_candidates = []
# make a new list of candidates in order of ordered_locators
for locator in self.ordered_locators:
for uc in unordered_candidates[:]:
if uc.locator == locator:
ordered_candidates.append(uc)
unordered_candidates.remove(uc)
# add all the candidates that are still left
# (whose locator values are not in ordered_locators)
# and return the new list
return ordered_candidates + unordered_candidates
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.ordered_locators)
class AttrRename(_PostProcessor):
"""
PostProcessor used to rename the given attribute, with unspecified
attributes appearing at the end of the list.
"""
def __init__(self, attr, attr_map=None, exact_match=False, case_sensitive=False):
"""
:arg str attr: Name of the attribute
:arg dict attr_map: Map of old names : new names.
:arg bool exact_match:
:arg bool case_sensitive:
"""
self.attr = attr
self.attr_map = attr_map if attr_map is not None else {}
self.exact_match = exact_match
self.case_sensitive = case_sensitive
def process(self, candidates):
"""
:arg list candidates: list of Candidate instances
:returns: list of Candidate instances with modified values for the given attribute
"""
def _cc(str_): # change case
return str_ if self.case_sensitive else str_.lower()
new_candidates = []
for c in candidates[:]:
attr_val = getattr(c, self.attr)
if self.exact_match is False and any(_cc(k) in _cc(attr_val) for k in self.attr_map):
map_key = [k for k in self.attr_map if _cc(k) in _cc(attr_val)][0]
map_val = self.attr_map[map_key]
setattr(c, self.attr, map_val)
elif _cc(attr_val) in [_cc(a) for a in self.attr_map]:
map_key = [k for k in self.attr_map if _cc(k) == _cc(attr_val)][0]
setattr(c, self.attr, self.attr_map[map_key])
new_candidates.append(c)
return new_candidates
def __repr__(self):
return '<%s: %s %s Map of %s (old:new): %s>' \
% (self.__class__.__name__, self.is_exact(), self.is_case_sensitive(), self.attr, self.attr_map)
class UseHighScoreIfAtLeast(_PostProcessor):
"""
Limit results to results with at least the given score,
if and only if one or more results has, at least, the
given score. If no results have at least this score,
all of the original results are returned intact.
"""
def __init__(self, min_score):
self.min_score = min_score
def process(self, candidates):
"""
:arg list candidates: list of Candidates
:returns: list of Candidates where score is at least min_score,
if and only if one or more Candidates have at least min_score.
Otherwise, returns original list of Candidates.
"""
high_score_candidates = [c for c in candidates if c.score >= self.min_score]
if high_score_candidates != []:
return high_score_candidates
return candidates
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.min_score)
class ScoreSorter(_PostProcessor):
"""PostProcessor class to sort :py:class:`Candidate` scores."""
def __init__(self, reverse=True):
"""
:arg bool reverse: indicates if the scores should be sorted in descending
order (e.g. 100, 90, 80, ...) (default ``True``)
"""
self.reverse = reverse
def process(self, candidates):
"""
:arg list candidates: list of Candidates
:returns: score-sorted list of Candidates
"""
return sorted(candidates, key=attrgetter('score'), reverse=self.reverse)
def __repr__(self):
order = 'high to low' if self.reverse else 'low to high'
return '<%s: %s>' % (self.__class__.__name__, order)
class AttrSorter(_PostProcessor):
"""
PostProcessor used to sort by a the given attribute, with unspecified
attributes appearing at the end of the list.
:arg list ordered_values: A list of values placed in the desired order.
:arg str attr: The attribute on which to sort.
"""
def __init__(self, ordered_values=None, attr='locator'):
self.ordered_values = [] if ordered_values is None else ordered_values
self.attr = attr
def process(self, unordered_candidates):
ordered_candidates = []
# make a new list of candidates in order of ordered_values
for value in self.ordered_values:
for uc in unordered_candidates[:]:
if getattr(uc, self.attr) == value:
ordered_candidates.append(uc)
unordered_candidates.remove(uc)
# add all the candidates that are still left
# and return the new list
return ordered_candidates + unordered_candidates
def __repr__(self):
return '<%s: %s sorted by %s>' % \
(self.__class__.__name__, self.attr, self.ordered_values)
class AttrReverseSorter(_PostProcessor):
"""
PostProcessor used to sort by the given attributes in reverse order,
with unspecified attributes appearing at the end of the list.
This is good to use when a list has already been defined in a script
and you are too lazy to use the reverse() function, or don't want
to in order to maintain more readable code.
"""
def __init__(self, ordered_values=None, attr='locator'):
"""
:arg list ordered_values: A list of values placed in the reverse of the desired order.
:arg str attribute: The attribute on which to sort
"""
self.ordered_values = [] if ordered_values is None else ordered_values
self.attr = attr
def process(self, unordered_candidates):
ordered_values = self.ordered_values
ordered_values.reverse()
sorter = AttrSorter(ordered_values, self.attr)
return sorter.process(unordered_candidates)
def __repr__(self):
return '<%s: %s reverse sorted by %s>' % \
(self.__class__.__name__, self.attr, self.ordered_values)
class AttrMigrator(_PostProcessor):
"""
PostProcessor used to migrate the given attribute
to another attribute.
"""
def __init__(self, attr_from, attr_to, attr_map=None, exact_match=False, case_sensitive=False):
self.attr_from = attr_from
self.attr_to = attr_to
self.attr_map = {} if attr_map is None else attr_map
self.exact_match = exact_match
self.case_sensitive = case_sensitive
def process(self, candidates):
def _cc(str_): # change case
if self.case_sensitive is False:
return str_.lower()
return str_
new_candidates = []
for c in candidates[:]:
from_val = getattr(c, self.attr_from)
if self.exact_match is False and any(_cc(k) in _cc(from_val) for k in self.attr_map):
map_key = [k for k in self.attr_map if _cc(k) in _cc(from_val)][0]
map_val = self.attr_map[map_key]
setattr(c, self.attr_to, map_val)
elif _cc(from_val) in [_cc(a) for a in self.attr_map]:
map_key = [k for k in self.attr_map if _cc(k) == _cc(from_val)][0]
setattr(c, self.attr_to, self.attr_map[map_key])
new_candidates.append(c)
return new_candidates
def __repr__(self):
return '<%s: %s -> %s %s %s>' % \
(self.__class__.__name__, self.attr_from, self.attr_to, self.is_exact(), self.is_case_sensitive())
class AttrFilter(_PostProcessor):
"""
PostProcessor used to filter out results without desired attribute values.
"""
def __init__(self, good_values=[], attr='locator', exact_match=True):
"""
:arg list good_values: A list of values whose candidates we will
accept results from (default [])
:arg string attr: The attribute type on which to filter
:arg bool exact_match: True if attribute must match a good value exactly.
False if the attribute can be a substring in a
good value. In other words, if our Candidate
attribute is 'US_Rooftop' and one of the good_values
is 'Rooftop', we will keep this candidate.
"""
self._init_helper(vars())
def process(self, candidates):
if self.exact_match is True:
return [c for c in candidates if getattr(c, self.attr) in self.good_values]
else:
return [c for c in candidates if any(gv in getattr(c, self.attr)
for gv in self.good_values)]
def __repr__(self):
return '<%s: %s %s in %s>' % \
(self.__class__.__name__, self.is_exact(), self.attr, self.good_values)
class AttrExclude(_PostProcessor):
"""
PostProcessor used to filter out results with unwanted attribute values.
"""
def __init__(self, bad_values=[], attr='locator', exact_match=True):
"""
:arg list bad_values: A list of values whose candidates we will
not accept results from (default [])
:arg string attr: The attribute type on which to filter
:arg bool exact_match: True if attribute must match a bad value exactly.
False if the bad value can be a substring of the
attribute value. In other words, if our Candidate
attribute is 'Postcode3' and one of the bad values
is 'Postcode' because we want something more precise,
like 'Address', we will not keep this candidate.
"""
self._init_helper(vars())
def process(self, candidates):
if self.exact_match is True:
return [c for c in candidates if getattr(c, self.attr) not in self.bad_values]
else:
return [c for c in candidates if not any(bv in getattr(c, self.attr) for bv in self.bad_values)]
def __repr__(self):
return '<%s: %s %s in %s>' % \
(self.__class__.__name__, self.is_exact(), self.attr, self.bad_values)
class AttrListIncludes(_PostProcessor):
"""
PostProcessor used to filter out results without desired attribute list items.
Similar to `AttrFilter` but operates on attributes containing lists instead of scalar values.
"""
def __init__(self, good_values=[], attr='entity_types'):
"""
:arg list good_values: A list of values, one of which must be in the
attribute being filtered on (default [])
:arg string attr: The attribute on which to filter
"""
self._init_helper(vars())
def process(self, candidates):
return [c for c in candidates if any(gv in getattr(c, self.attr)
for gv in self.good_values)]
def __repr__(self):
return '<%s: %s in %s>' % \
(self.__class__.__name__, self.attr, self.good_values)
class AttrListExcludes(_PostProcessor):
"""
PostProcessor used to ditch results with unwanted attribute list items.
Similar to `AttrExclude` but operates on attributes containing lists instead of scalar values.
"""
def __init__(self, bad_values=[], attr='entity_types'):
"""
:arg list bad_values: A list of values, which cannot be in the
attribute being filtered on (default [])
:arg string attr: The attribute on which to filter
"""
self._init_helper(vars())
def process(self, candidates):
return [c for c in candidates if not any(bv in getattr(c, self.attr)
for bv in self.bad_values)]
def __repr__(self):
return '<%s: %s in %s>' % \
(self.__class__.__name__, self.attr, self.bad_values)
class DupePicker(_PostProcessor):
"""
PostProcessor used to choose the best candidate(s)
where there are duplicates (or more than one result
that is very similar*) among high-scoring candidates,
such as an address.
* When comparing attribute values, case and commas do not count.
Usage Example:
================ ===== =======
match_addr score locator
================ ===== =======
123 N Wood St 90 roof
123 S Wood St 90 address
123 N WOOD ST 77 address
123, S Wood ST 85 roof
================ ===== =======
Above, the first two results have the highest scores. We could just
use those, because one of the two likely has the correct address.
However, the second result does not have the most precise location
for 123 S. Wood Street. While the fourth result does not score as
high as the first two, its locator method is more desirable.
Since the addresses are the same, we can assume that the fourth result
will provide better data than the second.
We can get a narrowed list as described above by running the process()
method in the DupePicker() PostProcessor class as follows, assuming
that the "candidates" is our list of candidates::
dp = DupePicker(
attr_dupes='match_addr',
attr_sort='locator',
ordered_list=['rooftop', 'address_point', 'address_range'])
return dp.process(candidates)
Output:
================ ===== =======
match_addr score locator
---------------- ----- -------
123 N Wood St 90 roof
123, S Wood ST 85 roof
================ ===== =======
Output with return_clean=True:
================ ===== =======
match_addr score locator
---------------- ----- -------
123 N WOOD ST 90 roof
123 S WOOD ST 85 roof
================ ===== =======
"""
def __init__(self, attr_dupes, attr_sort, ordered_list, return_clean=False):
"""
:arg str attr_dupes: Property on which to look for duplicates.
:arg str attr_sort: Property on which to sort using ordered_list
:arg list ordered_list: A list of property values, from most desirable
to least desirable.
:arg bool return_clean: Boolean indicating whether or not to
homogenize string values into uppercase
without commas.
"""
self._init_helper(vars())
def process(self, candidates):
def cleanup(str_):
"""Returns string in uppercase and free of commas."""
if type(str_) in (str, unicode):
return str_.replace(',', '').upper()
return str_
# if there are no candidates, then there is nothing to do here
if candidates == []:
return []
hi_score = ScoreSorter().process(candidates)[0].score
hi_score_candidates = AttrFilter([hi_score], 'score').process(candidates)
new_candidates = []
for hsc in hi_score_candidates:
# get candidates with same address, including the current one:
attr_match = self.attr_dupes
attr_match_test_val = cleanup(getattr(hsc, attr_match))
# make a list of candidates that have essentially the same value for attr_match (like 123 Main & 123 MAIN)
matching_candidates = [mc for mc in candidates if cleanup(getattr(mc, attr_match)) == attr_match_test_val]
# sort them in the desired order so the first one has the best attribute value
matching_candidates = AttrSorter(self.ordered_list, self.attr_sort).process(matching_candidates)
# the best value available can be grabbed from the top result:
best_attr_value = getattr(matching_candidates[0], attr_match)
# now we can filter results that have best_attr_value:
new_candidates_queue = AttrFilter([best_attr_value], attr_match).process(matching_candidates)
# and append each one to our list of new candidates, if it's not there already:
for nc in [nc for nc in new_candidates_queue if nc not in new_candidates]:
if self.return_clean:
new_candidates.append(cleanup(nc))
else:
new_candidates.append(nc)
return new_candidates
def __repr__(self):
repr_ = '%s: SORT BY %s %s -> GROUP BY %s' % \
(self.__class__.__name__, self.attr_sort, self.ordered_list, self.attr_dupes)
if self.return_clean:
repr_ += ' -> CLEAN'
return '<%s>' % repr_
class GroupBy(_PostProcessor):
"""
Groups results by a certain attribute by choosing the
first occurrence in the list (this means you will want
to sort ahead of time).
attr -- The attribute on which to combine results
or a list or tuple of attributes where all
attributes must match between candidates.
"""
def __init__(self, attr='match_addr'):
self._init_helper(vars())
def process(self, candidates):
if type(self.attr) in (tuple, list):
return GroupByMultiple(attrs=self.attr).process(candidates)
keepers = []
for c_from_all in candidates[:]:
matches = [c for c in candidates if getattr(c, self.attr) == getattr(c_from_all, self.attr)]
if matches != []:
keepers.append(matches[0])
for m in matches:
candidates.remove(m)
return keepers
def __repr__(self):
return '<%s: %s>' % \
(self.__class__.__name__, self.attr)
class GroupByMultiple(_PostProcessor):
"""
Groups results by a certain attribute by choosing the
first occurrence in the list of candidates
(this means you will want to sort ahead of time).
attrs -- A list or tuple of attributes on which to combine results
"""
def __init__(self, attrs):
self._init_helper(vars())
def process(self, candidates):
keepers = []
for c_from_all in candidates[:]:
matches = [c for c in candidates
if all([getattr(c, attr) == getattr(c_from_all, attr)
for attr in self.attrs])]
if matches != []:
keepers.append(matches[0])
for m in matches:
candidates.remove(m)
return keepers
def __repr__(self):
return '<%s: %s>' % \
(self.__class__.__name__, self.attrs)
class SnapPoints(_PostProcessor):
"""
Chooses the first of two or more points where they are within the given
sphere-based great circle distance.
"""
def __init__(self, distance=50):
"""
:arg distance: maximum distance (in metres) between two points in which
the the first will be kept and the second thrown out
(default 50).
"""
self.distance = distance
def _get_distance(self, pnt1, pnt2):
"""Get distance in meters between two lat/long points"""
lat1, lon1 = pnt1
lat2, lon2 = pnt2
radius = 6356752 # km
dlat = math.radians(lat2 - lat1)
dlon = math.radians(lon2 - lon1)
a = math.sin(dlat / 2) * math.sin(dlat / 2) + math.cos(math.radians(lat1)) \
* math.cos(math.radians(lat2)) * math.sin(dlon / 2) * math.sin(dlon / 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = radius * c
return d
def _points_within_distance(self, pnt1, pnt2):
"""Returns true if lat/lon points are within given distance in metres."""
if self._get_distance(pnt1, pnt2) <= self.distance:
return True
return False
def process(self, candidates):
keepers = []
for c_from_all in candidates[:]:
matches = [c for c in candidates if
self._points_within_distance((c_from_all.x, c_from_all.y), (c.x, c.y))]
if matches != []:
keepers.append(matches[0])
for m in matches:
candidates.remove(m)
return keepers
def __repr__(self):
return '<%s: distance=%sm>' % \
(self.__class__.__name__, self.distance)
``` |
{
"source": "JoeTrubenstein/peep-show-api",
"score": 2
} |
#### File: peep-show-api/restapp/views.py
```python
from django.contrib.auth.models import User, Group
from restapp.models import Character
from rest_framework import viewsets
from restapp.serializers import UserSerializer, GroupSerializer, CharacterSerializer
from rest_framework.permissions import IsAuthenticatedOrReadOnly, IsAuthenticated
from django.shortcuts import render
from django.utils import timezone
from django.views import generic
class UserViewSet(viewsets.ModelViewSet):
permission_classes = (IsAuthenticated,)
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = User.objects.all().order_by('-date_joined')
serializer_class = UserSerializer
class GroupViewSet(viewsets.ModelViewSet):
permission_classes = (IsAuthenticated,)
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Group.objects.all()
serializer_class = GroupSerializer
class CharacterViewSet(viewsets.ModelViewSet):
permission_classes = (IsAuthenticatedOrReadOnly,)
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Character.objects.all()
serializer_class = CharacterSerializer
def home_page(request):
# posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('published_date')
return render(request, 'restapp/index.html', {'data': " "})
``` |
{
"source": "joeugenio/mybinder",
"score": 3
} |
#### File: lognormal/.ipynb_checkpoints/lognormal_vs_normal-checkpoint.py
```python
import numpy as np
import matplotlib.pyplot as plt
# Lognormal parameters
S = 1
MU = 0
log_mean = np.exp(MU+(S**2/2))
log_sd = np.sqrt(np.exp(2*MU+S**2)*(np.exp(S**2)-1))
# Lognormal PDF
def p_lognorm(x, sigma=1, mu=0):
pdf = np.zeros(x.shape)
i = 0
for j in x:
if (j > 0):
pdf[i] = (1/(j*sigma*np.sqrt(2*np.pi)))*np.exp(-.5*((np.log(j)-mu)/sigma)**2)
i += 1
return pdf
# Normal PDF
def p_norm(x, sd=1, m=0):
return (1/(sd*np.sqrt(2*np.pi)))*np.exp(-.5*((x-m)/sd)**2)
# Laplace PDF
def p_lap(x, sd=1, m=0):
a = np.sqrt(2)/sd
return (a/2)*np.exp(-a*np.abs(x-m))
n = np.linspace(-10,10,1000)
print('Lognormal pdf with mean {:.2f} ({:.2f}) and SD {:.2f}'.format(0,log_mean, log_sd))
p1 = p_lognorm(n+log_mean)
print('Gaussian pdf with mean {:.2f} and SD {:.2f}'.format(0, log_sd))
p2 = p_norm(n, sd=log_sd)
print('Laplace pdf with mean {:.2f} and SD {:.2f}'.format(0, log_sd))
p3 = p_lap(n, sd=log_sd)
plt.plot(n,p1,label='lognormal')
plt.plot(n,p2,label='gaussiana')
plt.plot(n,p3,label='laplace')
plt.axvline(0, 0, 1, ls='--', color='k',linewidth=1)
plt.legend()
plt.show()
``` |
{
"source": "joeugenio/noma_simulation",
"score": 3
} |
#### File: noma_simulation/nomalib/channel.py
```python
import numpy as np
from logzero import logger
import nomalib.constants as const
# classes
class PropagationModel:
''' Distance dependent propagation model '''
def __init__(self, env=const.ENV, fc=const.FC):
self.env = env
self.fc = fc
def attenuation(self, d):
try:
d_db = np.log10(d)
except ZeroDivisionError:
d_db = float('-Inf')
if (self.env=='urban' and self.fc==900):
l = 120.9 + 36.7*d_db
elif (self.env=='urban' and self.fc==2000):
l = 128.1 + 36.7*d_db
elif (self.env=='rural' and self.fc==900):
l = 95.5 + 34.1*d_db
else:
logger.error('Invalid frequency or environment')
l = 'None'
return l
class Shadowing:
''' Shadowing 2D model '''
pass
class Noise:
''' Noise signal '''
class Interference:
''' Interference from others cells '''
```
#### File: noma_simulation/nomalib/devices.py
```python
import numpy as np
import constants as const
# classes
class Coordinate:
''' Coordinate x and y'''
def __init__(self,x,y):
self.x = x
self.y = y
class BSAntenna:
''' Base Station Antenna '''
def __init__(self, theta_min, bs_gain = const.BSG):
self.theta_min = theta_min
self.bs_gain = bs_gain
''' Radiation Pattern '''
def radiation_pattern(self, theta, theta3db=65, att_max=20):
a = 12*(theta/np.radians(theta3db))**2
return (-1)*np.min([a,att_max])
class UEAntenna:
''' User Equipment Antenna '''
def __init__(self, ue_gain = const.UEG):
self.ue_g = ue_gain
''' Radiation Pattern Omni-directional'''
def radiation_pattern(self, theta):
return 0
class BaseStation:
''' Base Station - eNodeB '''
def __init__(self, id:str, coord:Coordinate, hight=const.BSH, power=const.BSPW, n_sector=const.):
self.id = id
self.h = hight
self.pwr = power
self.n_sec = n_sector
self.coord = coord
self.ue_id = []
class UserEquipment:
''' Equipment of User '''
def __init__(self, id:str, coord:Coordinate, hight=UEH, power=UEPW):
self.id = id
self.coord = coord
self.h = hight
self.pwr = power
self.bs_id = None
def received_power(self):
rx_pwr = tx_pwr-np.max()
``` |
{
"source": "Jo-Eunsu/auto-funny-subtitle",
"score": 2
} |
#### File: Jo-Eunsu/auto-funny-subtitle/FCPX_XML.py
```python
from typing import List, NoReturn, Tuple, TypeVar
from xml.etree.ElementTree import Element, ElementTree, SubElement, parse
from Template_JSON import Template_JSON
from AzureAnalytics import AzureAnalytics
EMPTY = -1
# FCPX XML 처리에 관한 클래스
class FCPX_XML:
# 입력받은 XML 파일을 파싱한 결과를 저장할 ElementTree 객체
__xml_tree = None
# 위 객체의 루트
__xml_root = None
# 템플릿 JSON 파일 자체를 담는 객체
__funny_title_text_templates = None
# 기본 생성자: XML을 불러와 ElementTree 자료형으로 저장, Azure AI API의 클라이언트 정보 불러오기
def __init__(self, input_xml_dest: str) -> None:
# input_xml_dest 경로에 있는 xml 파일을 파싱한 데이터를 __xml_tree에 저장
self.__xml_tree = parse(input_xml_dest)
# __xml_tree 객체의 루트 노드 찾기
self.__xml_root = self.__xml_tree.getroot()
# Azure Text Analytics 서비스의 클라이언트 객체 얻기
self.__api_client = AzureAnalytics().get_client()
# 수정된 XML 파일의 이름을 따로 지정
self.__input_xml_dest = input_xml_dest
# 템플릿 정보 파일(JSON)을 처리하는 객체 생성
self.__funny_title_text_templates = Template_JSON()
# xml이 감정분석으로 바뀌었는지 확인하는 플래그 변수
self.xml_modified = False
print("XML 처리 객체", self.__input_xml_dest, "생성됨\n")
# 등록한 모든 템플릿에 대해 effect 태그 재작성
self.__effect_xml_modifiction()
def __del__(self):
print("XML 처리 객체", self.__input_xml_dest, "삭제됨\n")
# XML의 모든 자막 태그 엘리먼트(Element)를 불러오는 함수
def loadAllElements(self) -> list:
# 모든 자막 태그 엘리먼트를 담는 리스트
title_elements = []
# asset-clip 태그를 찾기
# asset-clip 태그는 파이널컷에서 메인 스토리라인에 들어가는 각 동영상 클립의 정보
for asset_clip in self.__xml_root.iter("asset-clip"):
for title in asset_clip.iter("title"):
# 일반자막 엘리먼트 (부모 엘리먼트 포함) 추출
nodeDict = {"node": title, "parent": asset_clip}
title_elements.append(nodeDict)
return title_elements
# XML에서 새로 예능자막 템플릿을 적용한 태그를 찾는 함수
def loadAllVideoElements(self) -> list:
# 모든 예능자막 태그 엘리먼트를 담는 리스트
video_elements = []
# 모든 자막 템플릿의 ID 정보 저장
templateIDs = []
for template in self.__funny_title_text_templates.get_all_template():
templateIDs.append(template["effect"]["id"])
for asset_clip in self.__xml_root.iter("asset-clip"):
for video in asset_clip.iter("video"):
# Generator 템플릿이 적용된 클립 중 예능자막 템플릿이 적용된 엘리먼트를 찾아 예능자막 엘리먼트 (부모 엘리먼트 포함) 추출
if video.attrib["ref"] in templateIDs:
nodeDict = {"node": video, "parent": asset_clip}
video_elements.append(nodeDict)
return video_elements
# 각 자막 텍스트에 대해 감정분석
def xml_text_analysis(self, title: Element, parent: Element) -> None:
# 자막에서 텍스트 추출
title_text = title.find("text").findtext("text-style")
# 자막 분석 결과(감정 텍스트)와 각 감정별 템플릿의 번호를 추출
result, template_number = self.__sentiment_analysis(title_text)
# 기존 자막 템플릿을 삭제하고 분석 결과를 XML에 반영 (video 태그 추가)
parent.remove(title)
self.title_xml_modification(parent, title, result, template_number)
# Azure AI Text Analytics 서비스(client)를 이용해 텍스트(documents)의 감정(긍정, 중립, 부정) 분석
def __sentiment_analysis(self, document: str):
# API를 사용하여 입력받은 텍스트의 감정을 분석하고, 분석 결과를 response에 저장
response = (self.__api_client.analyze_sentiment([document], language="ko")[0])
# 텍스트별 최종 감정을 저장하는 리스트, 감정별 자막 번호를 지정하는 변수
result_emotion: str
result_emotion_num: int
positive_score = neutral_score = negative_score = 0.0
# 감정을 분석해서 결과값에 감정 문자열과 자막 번호 지정하는 코드
# 1. 특수 함수 - 필터링을 통해 감정 설정
if self.__detect_funny(document) is not EMPTY:
result_emotion = 'funny'
result_emotion_num = 0
elif self.__detect_abuse(document) is not EMPTY:
result_emotion = 'negative'
result_emotion_num = 0
elif self.__detect_interjection(document) is not EMPTY:
result_emotion = 'positive'
result_emotion_num = 0
# 2. 일반 함수 - positive, neutral, negative 함수 중 가장 높은 값을 지정
else:
positive_score = response.confidence_scores.positive
neutral_score = response.confidence_scores.neutral
negative_score = response.confidence_scores.negative
if max(positive_score, neutral_score, negative_score) is positive_score:
result_emotion = 'positive'
result_emotion_num = 0
elif max(positive_score, neutral_score, negative_score) is neutral_score:
result_emotion = 'neutral'
result_emotion_num = 0
else:
result_emotion = 'negative'
result_emotion_num = 0
# 테스트용: 각 텍스트에 대해 결과 감정을 출력
print("Input Text:", document)
print("Result Emotion:", result_emotion)
print("positive score:", positive_score)
print("neutral score:", neutral_score)
print("negative score:", negative_score)
print("")
return result_emotion, result_emotion_num
# 문장 안에서 욕설을 찾아내는 함수 - 욕설이 없을 경우 -1 리턴, 있을 경우 위치값 리턴
def __detect_abuse(self, sentence: str) -> int:
filter = ["시발", "개새끼", "병신", "썅", "좆같네", "좃같네", "좋같네", "아 씨", "아씨", "야발"]
for filter_item in filter:
if sentence.find(filter_item) is not EMPTY:
return sentence.find(filter_item)
return EMPTY
# 문장 안에서 즐거움을 찾아내는 함수 - 'ㅋㅋㅋ'와 '하하하'
def __detect_funny(self, sentence: str) -> int:
filter = ["ㅋㅋ", "하하하", "웃겨"]
for filter_item in filter:
if sentence.find(filter_item) is not EMPTY:
return sentence.find(filter_item)
return EMPTY
# 문장 안에서 감탄사를 찾아내는 함수 - 감탄사가 없을 경우 -1 리턴, 있을 경우 위치값 리턴
def __detect_interjection(self, sentence: str) -> int:
filter = ["무야호", "오진다", "오매", "좋은그" ]
for filter_item in filter:
if sentence.find(filter_item) is not EMPTY:
return sentence.find(filter_item)
return EMPTY
# 자막 텍스트 자체의 정보가 저장된 effect 태그를 새로 쓰는 함수
def __effect_xml_modifiction(self):
# json 객체에서 모든 템플릿 정보를 불러들임
all_templates = self.__funny_title_text_templates.get_all_template()
# resources 태그 불러오기 (effect 태그의 정보는 resources 태그 안에 있음)
resources = self.__xml_root.find("resources")
# 모든 템플릿의 effect 태그 정보를 XML에 기록
for template in all_templates:
# 중복되는 effect 태그가 있으면 지우기 - 중복된 effect 태그가 있으면 오류가 생길 수 있음
for existing_effect in self.__xml_root.iter("effect"):
if existing_effect.attrib["name"] == template["effect"]["name"]:
resources.remove(existing_effect)
# 템플릿의 effect 태그를 추출해 resources 태그 불러오기
SubElement(resources, "effect", template["effect"])
# 자막 텍스트의 감정에 따라 예능자막을 지정해서 XML의 title 태그를 고쳐쓰는 함수
# (title_element: 기존 자막의 태그 정보가 담겨있는 element 객체, text: 입력 문자열, emotion: 감정, type: 같은 감정의 템플릿이 여러 개면 특정 번호 지정, offset: )
def title_xml_modification(self, asset_clip_element: Element, title_element: Element, emotion: str, template_number: int) -> NoReturn:
text = title_element.find("text").findtext("text-style")
# 각 title 태그에 들어 있는 텍스트의 감정에 알맞는 템플릿(json) 불러오기
template_json = self.__funny_title_text_templates.get_template_at_number(emotion, template_number)["video"]
# 템플릿 json의 id를 ref로 변수 생성해서 자막 텍스트에 연결하도록 지정
ref: str = self.__funny_title_text_templates.get_template_at_number(emotion, template_number)["effect"]["id"]
# 해당 템플릿 json의 video 태그 속성 불러오기
video_tag_attrib = title_element.attrib
# video 태그의 이름 속성을 템플릿의 video 속성에서 붙여넣기
video_tag_attrib["name"] = template_json["name"]
# video 태그의 ref 속성을 템플릿의 ref로 수정
video_tag_attrib["ref"] = ref
# XML에 태그를 추가하기 위해 video라는 Element 객체 생성해서 asset-clip 태그에 붙여주기
video_element = SubElement(asset_clip_element, "video", video_tag_attrib)
# json 파일에 담겨 있는 여러 param 태그 탐색
for param_attrib in template_json["param"]:
# 이름 속성이 Text인 param 태그를 찾아 자막 텍스트 삽입
if param_attrib["name"] == "Text":
param_attrib["value"] = text
# param 태그를 만들어서 video 태그 안에 달아주기
SubElement(video_element, "param", param_attrib)
# 이미 적용된 video 엘리먼트를 다시 한 번 바꾸는 함수
def video_xml_modification(self, video_element: Element, offset_attrib: str, duration_attrib: str, template_name: str, title_text: str) -> NoReturn:
# 새로 바뀐 감정에 알맞는 템플릿(json) 불러오기
template_json = self.__funny_title_text_templates.get_template_at_name(template_name)["video"]
# 템플릿 json의 id를 ref로 변수 생성해서 자막 텍스트에 연결하도록 지정
ref: str = self.__funny_title_text_templates.get_template_at_name(template_name)["effect"]["id"]
# 해당 템플릿 json의 video 태그 속성 불러오기
video_tag_attrib = video_element.attrib
# video 태그의 속성 수정
video_tag_attrib["name"] = template_json["name"]
video_tag_attrib["ref"] = ref
video_tag_attrib["offset"] = offset_attrib
video_tag_attrib["duration"] = duration_attrib
# 기존의 모든 param 속성 삭제
for param in video_element.iter("param"):
video_element.remove(param)
# json 파일에 담겨 있는 여러 param 태그 탐색 후 param 태그 추가
for param_attrib in template_json["param"]:
# param 태그를 만들어서 video 태그 안에 달아주기
SubElement(video_element, "param", param_attrib)
# 자막 텍스트 삽입
for param in video_element.iter("param"):
if param.attrib["name"] == "Text":
param.attrib["value"] = title_text
# Templ
def get_all_template(self) -> List:
return self.__funny_title_text_templates.get_all_template()
# 새로운 XML 파일을 작성하는 메소드 (리턴값은 xml 구조를 가지는 ElementTree)
def write_xml(self) -> ElementTree:
if self.xml_modified == False:
return None
else:
return self.__xml_tree
# self.__xml__tree.write(self.__output_xml_dest, encoding="utf8", xml_declaration=True)
```
#### File: auto-funny-subtitle/ui/preview2.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(900, 400)
font = QtGui.QFont()
font.setFamily("Apple SD Gothic Neo")
Form.setFont(font)
self.verticalLayout_3 = QtWidgets.QVBoxLayout(Form)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.scrollArea = QtWidgets.QScrollArea(Form)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName("scrollArea")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 874, 711))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.verticalLayout = QtWidgets.QVBoxLayout(self.scrollAreaWidgetContents)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout_10 = QtWidgets.QHBoxLayout()
self.horizontalLayout_10.setObjectName("horizontalLayout_10")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_10.addItem(spacerItem)
self.pushButton = QtWidgets.QPushButton(self.scrollAreaWidgetContents)
self.pushButton.setObjectName("pushButton")
self.horizontalLayout_10.addWidget(self.pushButton)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_10.addItem(spacerItem1)
self.pushButton_2 = QtWidgets.QPushButton(self.scrollAreaWidgetContents)
self.pushButton_2.setObjectName("pushButton_2")
self.horizontalLayout_10.addWidget(self.pushButton_2)
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_10.addItem(spacerItem2)
self.verticalLayout.addLayout(self.horizontalLayout_10)
self.horizontalLayout_34 = QtWidgets.QHBoxLayout()
self.horizontalLayout_34.setObjectName("horizontalLayout_34")
self.gridLayout_8 = QtWidgets.QGridLayout()
self.gridLayout_8.setObjectName("gridLayout_8")
self.horizontalLayout_35 = QtWidgets.QHBoxLayout()
self.horizontalLayout_35.setObjectName("horizontalLayout_35")
self.label_34 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setFamily("Apple SD Gothic Neo")
self.label_34.setFont(font)
self.label_34.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_34.setObjectName("label_34")
self.horizontalLayout_35.addWidget(self.label_34)
self.gridLayout_8.addLayout(self.horizontalLayout_35, 2, 1, 1, 1)
self.horizontalLayout_36 = QtWidgets.QHBoxLayout()
self.horizontalLayout_36.setObjectName("horizontalLayout_36")
self.titleText_8 = QtWidgets.QPlainTextEdit(self.scrollAreaWidgetContents)
self.titleText_8.setObjectName("titleText_8")
self.horizontalLayout_36.addWidget(self.titleText_8)
self.gridLayout_8.addLayout(self.horizontalLayout_36, 2, 5, 1, 1)
self.horizontalLayout_37 = QtWidgets.QHBoxLayout()
self.horizontalLayout_37.setObjectName("horizontalLayout_37")
self.label_35 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setFamily("Apple SD Gothic Neo")
self.label_35.setFont(font)
self.label_35.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_35.setObjectName("label_35")
self.horizontalLayout_37.addWidget(self.label_35)
self.gridLayout_8.addLayout(self.horizontalLayout_37, 1, 1, 1, 1)
self.horizontalLayout_38 = QtWidgets.QHBoxLayout()
self.horizontalLayout_38.setObjectName("horizontalLayout_38")
self.templateSelector_8 = QtWidgets.QComboBox(self.scrollAreaWidgetContents)
self.templateSelector_8.setObjectName("templateSelector_8")
self.horizontalLayout_38.addWidget(self.templateSelector_8)
spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_38.addItem(spacerItem3)
self.gridLayout_8.addLayout(self.horizontalLayout_38, 1, 5, 1, 1)
self.label_36 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setFamily("Apple SD Gothic Neo")
self.label_36.setFont(font)
self.label_36.setAlignment(QtCore.Qt.AlignCenter)
self.label_36.setObjectName("label_36")
self.gridLayout_8.addWidget(self.label_36, 1, 4, 1, 1)
self.label_37 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setFamily("Apple SD Gothic Neo")
self.label_37.setFont(font)
self.label_37.setAlignment(QtCore.Qt.AlignCenter)
self.label_37.setObjectName("label_37")
self.gridLayout_8.addWidget(self.label_37, 2, 4, 1, 1)
self.startTimeText_8 = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
self.startTimeText_8.setObjectName("startTimeText_8")
self.gridLayout_8.addWidget(self.startTimeText_8, 1, 2, 1, 1)
self.endTimeText_8 = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
self.endTimeText_8.setObjectName("endTimeText_8")
self.gridLayout_8.addWidget(self.endTimeText_8, 2, 2, 1, 1)
spacerItem4 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_8.addItem(spacerItem4, 1, 3, 1, 1)
spacerItem5 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_8.addItem(spacerItem5, 2, 3, 1, 1)
self.horizontalLayout_34.addLayout(self.gridLayout_8)
spacerItem6 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_34.addItem(spacerItem6)
self.label_38 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_38.setObjectName("label_38")
self.horizontalLayout_34.addWidget(self.label_38)
self.verticalLayout.addLayout(self.horizontalLayout_34)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label_3 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setFamily("Apple SD Gothic Neo")
self.label_3.setFont(font)
self.label_3.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_3.setObjectName("label_3")
self.horizontalLayout_2.addWidget(self.label_3)
self.gridLayout.addLayout(self.horizontalLayout_2, 2, 1, 1, 1)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.titleText = QtWidgets.QPlainTextEdit(self.scrollAreaWidgetContents)
self.titleText.setObjectName("titleText")
self.horizontalLayout_4.addWidget(self.titleText)
self.gridLayout.addLayout(self.horizontalLayout_4, 2, 5, 1, 1)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.label_2 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setFamily("Apple SD Gothic Neo")
self.label_2.setFont(font)
self.label_2.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_2.setObjectName("label_2")
self.horizontalLayout_3.addWidget(self.label_2)
self.gridLayout.addLayout(self.horizontalLayout_3, 1, 1, 1, 1)
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.templateSelector = QtWidgets.QComboBox(self.scrollAreaWidgetContents)
self.templateSelector.setObjectName("templateSelector")
self.horizontalLayout_6.addWidget(self.templateSelector)
spacerItem7 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_6.addItem(spacerItem7)
self.gridLayout.addLayout(self.horizontalLayout_6, 1, 5, 1, 1)
self.label_5 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setFamily("Apple SD Gothic Neo")
self.label_5.setFont(font)
self.label_5.setAlignment(QtCore.Qt.AlignCenter)
self.label_5.setObjectName("label_5")
self.gridLayout.addWidget(self.label_5, 1, 4, 1, 1)
self.label_4 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setFamily("Apple SD Gothic Neo")
self.label_4.setFont(font)
self.label_4.setAlignment(QtCore.Qt.AlignCenter)
self.label_4.setObjectName("label_4")
self.gridLayout.addWidget(self.label_4, 2, 4, 1, 1)
self.startTimeText = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
self.startTimeText.setObjectName("startTimeText")
self.gridLayout.addWidget(self.startTimeText, 1, 2, 1, 1)
self.endTimeText = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
self.endTimeText.setObjectName("endTimeText")
self.gridLayout.addWidget(self.endTimeText, 2, 2, 1, 1)
spacerItem8 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem8, 1, 3, 1, 1)
spacerItem9 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem9, 2, 3, 1, 1)
self.horizontalLayout.addLayout(self.gridLayout)
spacerItem10 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem10)
self.label = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
self.verticalLayout.addLayout(self.horizontalLayout)
self.horizontalLayout_14 = QtWidgets.QHBoxLayout()
self.horizontalLayout_14.setObjectName("horizontalLayout_14")
self.gridLayout_4 = QtWidgets.QGridLayout()
self.gridLayout_4.setObjectName("gridLayout_4")
self.horizontalLayout_15 = QtWidgets.QHBoxLayout()
self.horizontalLayout_15.setObjectName("horizontalLayout_15")
self.label_14 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setFamily("Apple SD Gothic Neo")
self.label_14.setFont(font)
self.label_14.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_14.setObjectName("label_14")
self.horizontalLayout_15.addWidget(self.label_14)
self.gridLayout_4.addLayout(self.horizontalLayout_15, 2, 1, 1, 1)
self.horizontalLayout_16 = QtWidgets.QHBoxLayout()
self.horizontalLayout_16.setObjectName("horizontalLayout_16")
self.titleText_4 = QtWidgets.QPlainTextEdit(self.scrollAreaWidgetContents)
self.titleText_4.setObjectName("titleText_4")
self.horizontalLayout_16.addWidget(self.titleText_4)
self.gridLayout_4.addLayout(self.horizontalLayout_16, 2, 5, 1, 1)
self.horizontalLayout_17 = QtWidgets.QHBoxLayout()
self.horizontalLayout_17.setObjectName("horizontalLayout_17")
self.label_15 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setFamily("Apple SD Gothic Neo")
self.label_15.setFont(font)
self.label_15.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_15.setObjectName("label_15")
self.horizontalLayout_17.addWidget(self.label_15)
self.gridLayout_4.addLayout(self.horizontalLayout_17, 1, 1, 1, 1)
self.horizontalLayout_18 = QtWidgets.QHBoxLayout()
self.horizontalLayout_18.setObjectName("horizontalLayout_18")
self.templateSelector_4 = QtWidgets.QComboBox(self.scrollAreaWidgetContents)
self.templateSelector_4.setObjectName("templateSelector_4")
self.horizontalLayout_18.addWidget(self.templateSelector_4)
spacerItem11 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_18.addItem(spacerItem11)
self.gridLayout_4.addLayout(self.horizontalLayout_18, 1, 5, 1, 1)
self.label_16 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setFamily("Apple SD Gothic Neo")
self.label_16.setFont(font)
self.label_16.setAlignment(QtCore.Qt.AlignCenter)
self.label_16.setObjectName("label_16")
self.gridLayout_4.addWidget(self.label_16, 1, 4, 1, 1)
self.label_17 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setFamily("Apple SD Gothic Neo")
self.label_17.setFont(font)
self.label_17.setAlignment(QtCore.Qt.AlignCenter)
self.label_17.setObjectName("label_17")
self.gridLayout_4.addWidget(self.label_17, 2, 4, 1, 1)
self.startTimeText_4 = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
self.startTimeText_4.setObjectName("startTimeText_4")
self.gridLayout_4.addWidget(self.startTimeText_4, 1, 2, 1, 1)
self.endTimeText_4 = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
self.endTimeText_4.setObjectName("endTimeText_4")
self.gridLayout_4.addWidget(self.endTimeText_4, 2, 2, 1, 1)
spacerItem12 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_4.addItem(spacerItem12, 1, 3, 1, 1)
spacerItem13 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_4.addItem(spacerItem13, 2, 3, 1, 1)
self.horizontalLayout_14.addLayout(self.gridLayout_4)
spacerItem14 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_14.addItem(spacerItem14)
self.label_18 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_18.setObjectName("label_18")
self.horizontalLayout_14.addWidget(self.label_18)
self.verticalLayout.addLayout(self.horizontalLayout_14)
self.horizontalLayout_19 = QtWidgets.QHBoxLayout()
self.horizontalLayout_19.setObjectName("horizontalLayout_19")
self.gridLayout_5 = QtWidgets.QGridLayout()
self.gridLayout_5.setObjectName("gridLayout_5")
self.horizontalLayout_20 = QtWidgets.QHBoxLayout()
self.horizontalLayout_20.setObjectName("horizontalLayout_20")
self.label_19 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setFamily("Apple SD Gothic Neo")
self.label_19.setFont(font)
self.label_19.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_19.setObjectName("label_19")
self.horizontalLayout_20.addWidget(self.label_19)
self.gridLayout_5.addLayout(self.horizontalLayout_20, 2, 1, 1, 1)
self.horizontalLayout_21 = QtWidgets.QHBoxLayout()
self.horizontalLayout_21.setObjectName("horizontalLayout_21")
self.titleText_5 = QtWidgets.QPlainTextEdit(self.scrollAreaWidgetContents)
self.titleText_5.setObjectName("titleText_5")
self.horizontalLayout_21.addWidget(self.titleText_5)
self.gridLayout_5.addLayout(self.horizontalLayout_21, 2, 5, 1, 1)
self.horizontalLayout_22 = QtWidgets.QHBoxLayout()
self.horizontalLayout_22.setObjectName("horizontalLayout_22")
self.label_20 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setFamily("Apple SD Gothic Neo")
self.label_20.setFont(font)
self.label_20.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_20.setObjectName("label_20")
self.horizontalLayout_22.addWidget(self.label_20)
self.gridLayout_5.addLayout(self.horizontalLayout_22, 1, 1, 1, 1)
self.horizontalLayout_23 = QtWidgets.QHBoxLayout()
self.horizontalLayout_23.setObjectName("horizontalLayout_23")
self.templateSelector_5 = QtWidgets.QComboBox(self.scrollAreaWidgetContents)
self.templateSelector_5.setObjectName("templateSelector_5")
self.horizontalLayout_23.addWidget(self.templateSelector_5)
spacerItem15 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_23.addItem(spacerItem15)
self.gridLayout_5.addLayout(self.horizontalLayout_23, 1, 5, 1, 1)
self.label_21 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setFamily("Apple SD Gothic Neo")
self.label_21.setFont(font)
self.label_21.setAlignment(QtCore.Qt.AlignCenter)
self.label_21.setObjectName("label_21")
self.gridLayout_5.addWidget(self.label_21, 1, 4, 1, 1)
self.label_22 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setFamily("Apple SD Gothic Neo")
self.label_22.setFont(font)
self.label_22.setAlignment(QtCore.Qt.AlignCenter)
self.label_22.setObjectName("label_22")
self.gridLayout_5.addWidget(self.label_22, 2, 4, 1, 1)
self.startTimeText_5 = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
self.startTimeText_5.setObjectName("startTimeText_5")
self.gridLayout_5.addWidget(self.startTimeText_5, 1, 2, 1, 1)
self.endTimeText_5 = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
self.endTimeText_5.setObjectName("endTimeText_5")
self.gridLayout_5.addWidget(self.endTimeText_5, 2, 2, 1, 1)
spacerItem16 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_5.addItem(spacerItem16, 1, 3, 1, 1)
spacerItem17 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_5.addItem(spacerItem17, 2, 3, 1, 1)
self.horizontalLayout_19.addLayout(self.gridLayout_5)
spacerItem18 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_19.addItem(spacerItem18)
self.label_23 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_23.setObjectName("label_23")
self.horizontalLayout_19.addWidget(self.label_23)
self.verticalLayout.addLayout(self.horizontalLayout_19)
self.horizontalLayout_24 = QtWidgets.QHBoxLayout()
self.horizontalLayout_24.setObjectName("horizontalLayout_24")
self.gridLayout_6 = QtWidgets.QGridLayout()
self.gridLayout_6.setObjectName("gridLayout_6")
self.horizontalLayout_25 = QtWidgets.QHBoxLayout()
self.horizontalLayout_25.setObjectName("horizontalLayout_25")
self.label_24 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setFamily("Apple SD Gothic Neo")
self.label_24.setFont(font)
self.label_24.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_24.setObjectName("label_24")
self.horizontalLayout_25.addWidget(self.label_24)
self.gridLayout_6.addLayout(self.horizontalLayout_25, 2, 1, 1, 1)
self.horizontalLayout_26 = QtWidgets.QHBoxLayout()
self.horizontalLayout_26.setObjectName("horizontalLayout_26")
self.titleText_6 = QtWidgets.QPlainTextEdit(self.scrollAreaWidgetContents)
self.titleText_6.setObjectName("titleText_6")
self.horizontalLayout_26.addWidget(self.titleText_6)
self.gridLayout_6.addLayout(self.horizontalLayout_26, 2, 5, 1, 1)
self.horizontalLayout_27 = QtWidgets.QHBoxLayout()
self.horizontalLayout_27.setObjectName("horizontalLayout_27")
self.label_25 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setFamily("Apple SD Gothic Neo")
self.label_25.setFont(font)
self.label_25.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_25.setObjectName("label_25")
self.horizontalLayout_27.addWidget(self.label_25)
self.gridLayout_6.addLayout(self.horizontalLayout_27, 1, 1, 1, 1)
self.horizontalLayout_28 = QtWidgets.QHBoxLayout()
self.horizontalLayout_28.setObjectName("horizontalLayout_28")
self.templateSelector_6 = QtWidgets.QComboBox(self.scrollAreaWidgetContents)
self.templateSelector_6.setObjectName("templateSelector_6")
self.horizontalLayout_28.addWidget(self.templateSelector_6)
spacerItem19 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_28.addItem(spacerItem19)
self.gridLayout_6.addLayout(self.horizontalLayout_28, 1, 5, 1, 1)
self.label_26 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setFamily("Apple SD Gothic Neo")
self.label_26.setFont(font)
self.label_26.setAlignment(QtCore.Qt.AlignCenter)
self.label_26.setObjectName("label_26")
self.gridLayout_6.addWidget(self.label_26, 1, 4, 1, 1)
self.label_27 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setFamily("Apple SD Gothic Neo")
self.label_27.setFont(font)
self.label_27.setAlignment(QtCore.Qt.AlignCenter)
self.label_27.setObjectName("label_27")
self.gridLayout_6.addWidget(self.label_27, 2, 4, 1, 1)
self.startTimeText_6 = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
self.startTimeText_6.setObjectName("startTimeText_6")
self.gridLayout_6.addWidget(self.startTimeText_6, 1, 2, 1, 1)
self.endTimeText_6 = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
self.endTimeText_6.setObjectName("endTimeText_6")
self.gridLayout_6.addWidget(self.endTimeText_6, 2, 2, 1, 1)
spacerItem20 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_6.addItem(spacerItem20, 1, 3, 1, 1)
spacerItem21 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_6.addItem(spacerItem21, 2, 3, 1, 1)
self.horizontalLayout_24.addLayout(self.gridLayout_6)
spacerItem22 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_24.addItem(spacerItem22)
self.label_28 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_28.setObjectName("label_28")
self.horizontalLayout_24.addWidget(self.label_28)
self.verticalLayout.addLayout(self.horizontalLayout_24)
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.verticalLayout_3.addWidget(self.scrollArea)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.pushButton.setText(_translate("Form", "PushButton"))
self.pushButton_2.setText(_translate("Form", "PushButton"))
self.label_34.setText(_translate("Form", "끝"))
self.label_35.setText(_translate("Form", "시작"))
self.label_36.setText(_translate("Form", "자막 템플릿"))
self.label_37.setText(_translate("Form", "자막 텍스트"))
self.label_38.setText(_translate("Form", "Preview Image (Alternative Text)"))
self.label_3.setText(_translate("Form", "끝"))
self.label_2.setText(_translate("Form", "시작"))
self.label_5.setText(_translate("Form", "자막 템플릿"))
self.label_4.setText(_translate("Form", "자막 텍스트"))
self.label.setText(_translate("Form", "Preview Image (Alternative Text)"))
self.label_14.setText(_translate("Form", "끝"))
self.label_15.setText(_translate("Form", "시작"))
self.label_16.setText(_translate("Form", "자막 템플릿"))
self.label_17.setText(_translate("Form", "자막 텍스트"))
self.label_18.setText(_translate("Form", "Preview Image (Alternative Text)"))
self.label_19.setText(_translate("Form", "끝"))
self.label_20.setText(_translate("Form", "시작"))
self.label_21.setText(_translate("Form", "자막 템플릿"))
self.label_22.setText(_translate("Form", "자막 텍스트"))
self.label_23.setText(_translate("Form", "Preview Image (Alternative Text)"))
self.label_24.setText(_translate("Form", "끝"))
self.label_25.setText(_translate("Form", "시작"))
self.label_26.setText(_translate("Form", "자막 템플릿"))
self.label_27.setText(_translate("Form", "자막 텍스트"))
self.label_28.setText(_translate("Form", "Preview Image (Alternative Text)"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Form = QtWidgets.QWidget()
ui = Ui_Form()
ui.setupUi(Form)
Form.show()
sys.exit(app.exec_())
``` |
{
"source": "joe-uragami/selene_sample",
"score": 3
} |
#### File: SamplePlayStore/pages/app_top.py
```python
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from test.selenium.pageobject.SamplePlayStore.pages.app_detail import AppDetailPage
class AppTopPage:
TITLE = "Google Play の Android アプリ"
URL = "https://play.google.com/store/apps"
APP_1ST_DETAIL_SELECTER = ".id-card-list :nth-child(1) div a"
def __init__(self, driver=None):
self.driver = driver
WebDriverWait(self.driver, 10).until(
EC.title_is(self.TITLE)
)
def get_url(self):
return self.driver.current_url
def click_first_appcard(self):
element = WebDriverWait(self.driver, 10).until(
EC.element_to_be_clickable((By.CSS_SELECTOR, self.APP_1ST_DETAIL_SELECTER))
)
element.click()
return AppDetailPage(self.driver)
``` |
{
"source": "Joevaen/Scikit-image_On_CT",
"score": 3
} |
#### File: Scikit-image_On_CT/Feature/plot_match.py
```python
import cv2
import matplotlib.pyplot as plt
from skimage.feature import plot_matches
def plotMatches(im1,im2,matches,locs1,locs2):
fig, ax = plt.subplots(nrows=1, ncols=1)
im1 = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
im2 = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
plt.axis('off')
plot_matches(ax,im1,im2,locs1,locs2,matches,matches_color='r',only_matches=True)
plt.show()
return
``` |
{
"source": "joevandyk/pilbox",
"score": 2
} |
#### File: pilbox/test/app_test.py
```python
from __future__ import absolute_import, division, print_function, \
with_statement
import logging
import os.path
import time
import tornado.escape
import tornado.gen
import tornado.ioloop
from tornado.test.util import unittest
from tornado.testing import AsyncHTTPTestCase, gen_test
import tornado.web
from pilbox.app import PilboxApplication
from pilbox.errors import SignatureError, ClientError, HostError, \
BackgroundError, DimensionsError, FilterError, FormatError, ModeError, \
PositionError, QualityError, UrlError, ImageFormatError, FetchError
from pilbox.signature import sign
from pilbox.test import image_test
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
try:
import cv
except ImportError:
cv = None
logger = logging.getLogger("tornado.application")
class _AppAsyncMixin(object):
def fetch_error(self, code, *args, **kwargs):
response = self.fetch(*args, **kwargs)
self.assertEqual(response.code, code)
self.assertEqual(response.headers.get("Content-Type", None),
"application/json")
return tornado.escape.json_decode(response.body)
def fetch_success(self, *args, **kwargs):
response = self.fetch(*args, **kwargs)
self.assertEqual(response.code, 200)
return response
def get_image_resize_cases(self):
cases = image_test.get_image_resize_cases()
m = dict(background="bg", filter="filter", format="fmt",
position="pos", quality="q")
for i, case in enumerate(cases):
path = "/test/data/%s" % os.path.basename(case["source_path"])
cases[i]["source_query_params"] = dict(
url=self.get_url(path),
w=case["width"] or "",
h=case["height"] or "",
mode=case["mode"])
for k in m.keys():
if k in case:
cases[i]["source_query_params"][m.get(k)] = case[k]
if case.get("format") in ["jpeg", "jpg"]:
cases[i]["content_type"] = "image/jpeg"
elif case.get("format") == "png":
cases[i]["content_type"] = "image/png"
elif case.get("format") == "webp":
cases[i]["content_type"] = "image/webp"
else:
cases[i]["content_type"] = None
return cases
class _PilboxTestApplication(PilboxApplication):
def get_handlers(self):
path = os.path.join(os.path.dirname(__file__), "data")
handlers = [(r"/test/data/test-delayed.jpg", _DelayedHandler),
(r"/test/data/(.*)",
tornado.web.StaticFileHandler,
{"path": path})]
handlers.extend(super(_PilboxTestApplication, self).get_handlers())
return handlers
class _DelayedHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
@tornado.gen.engine
def get(self):
delay = time.time() + float(self.get_argument("delay", 0.0))
yield tornado.gen.Task(
tornado.ioloop.IOLoop.instance().add_timeout, delay)
self.finish()
class AppTest(AsyncHTTPTestCase, _AppAsyncMixin):
def get_app(self):
return _PilboxTestApplication()
def test_missing_url(self):
qs = urlencode(dict(w=1, h=1))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), UrlError.get_code())
def test_missing_dimensions(self):
qs = urlencode(dict(url="http://foo.co/x.jpg"))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), DimensionsError.get_code())
def test_invalid_width(self):
qs = urlencode(dict(url="http://foo.co/x.jpg", w="a", h=1))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), DimensionsError.get_code())
def test_invalid_height(self):
qs = urlencode(dict(url="http://foo.co/x.jpg", w=1, h="a"))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), DimensionsError.get_code())
def test_invalid_mode(self):
qs = urlencode(dict(url="http://foo.co/x.jpg", w=1, h=1, mode="foo"))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), ModeError.get_code())
def test_invalid_hexadecimal_background(self):
qs = urlencode(dict(url="http://foo.co/x.jpg", w=1, h=1,
mode="fill", bg="r"))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), BackgroundError.get_code())
def test_invalid_long_background(self):
qs = urlencode(dict(url="http://foo.co/x.jpg", w=1, h=1,
mode="fill", bg="0f0f0f0f0"))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), BackgroundError.get_code())
def test_invalid_position(self):
qs = urlencode(dict(url="http://foo.co/x.jpg", w=1, h=1, pos="foo"))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), PositionError.get_code())
def test_invalid_filter(self):
qs = urlencode(dict(url="http://foo.co/x.jpg", w=1, h=1, filter="bar"))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), FilterError.get_code())
def test_invalid_format(self):
qs = urlencode(dict(url="http://foo.co/x.jpg", w=1, h=1, fmt="foo"))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), FormatError.get_code())
def test_invalid_integer_quality(self):
qs = urlencode(dict(url="http://foo.co/x.jpg", w=1, h=1, q="a"))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), QualityError.get_code())
def test_outofbounds_quality(self):
qs = urlencode(dict(url="http://foo.co/x.jpg", w=1, h=1, q=200))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), QualityError.get_code())
def test_unsupported_image_format(self):
path = "/test/data/test-bad-format.gif"
qs = urlencode(dict(url=self.get_url(path), w=1, h=1))
resp = self.fetch_error(415, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), ImageFormatError.get_code())
def test_not_found(self):
path = "/test/data/test-not-found.jpg"
qs = urlencode(dict(url=self.get_url(path), w=1, h=1))
resp = self.fetch_error(404, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), FetchError.get_code())
def test_not_connect(self):
qs = urlencode(dict(url="http://a.com/a.jpg", w=1, h=1))
resp = self.fetch_error(404, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), FetchError.get_code())
def test_invalid_protocol(self):
path = os.path.join(os.path.dirname(__file__), "data", "test1.jpg")
qs = urlencode(dict(url="file://%s" % path, w=1, h=1))
resp = self.fetch_error(400, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), UrlError.get_code())
def test_valid(self):
cases = self.get_image_resize_cases()
for case in cases:
if case.get("mode") == "crop" and case.get("position") == "face":
continue
self._assert_expected_resize(case)
@unittest.skipIf(cv is None, "OpenCV is not installed")
def test_valid_face(self):
cases = self.get_image_resize_cases()
for case in cases:
if case.get("mode") == "crop" and case.get("position") == "face":
self._assert_expected_resize(case)
def _assert_expected_resize(self, case):
qs = urlencode(case["source_query_params"])
resp = self.fetch_success("/?%s" % qs)
msg = "/?%s does not match %s" \
% (qs, case["expected_path"])
if case["content_type"]:
self.assertEqual(resp.headers.get("Content-Type", None),
case["content_type"])
with open(case["expected_path"], "rb") as expected:
self.assertEqual(resp.buffer.read(), expected.read(), msg)
class AppRestrictedTest(AsyncHTTPTestCase, _AppAsyncMixin):
KEY = "abcdef"
NAME = "abc"
def get_app(self):
return _PilboxTestApplication(
client_name=self.NAME,
client_key=self.KEY,
allowed_hosts=["foo.co", "bar.io", "localhost"])
def test_missing_client_name(self):
params = dict(url="http://foo.co/x.jpg", w=1, h=1)
qs = sign(self.KEY, urlencode(params))
resp = self.fetch_error(403, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), ClientError.get_code())
def test_bad_client_name(self):
params = dict(url="http://foo.co/x.jpg", w=1, h=1, client="123")
qs = sign(self.KEY, urlencode(params))
resp = self.fetch_error(403, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), ClientError.get_code())
def test_missing_signature(self):
params = dict(url="http://foo.co/x.jpg", w=1, h=1, client=self.NAME)
qs = urlencode(params)
resp = self.fetch_error(403, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), SignatureError.get_code())
def test_bad_signature(self):
params = dict(url="http://foo.co/x.jpg", w=1, h=1,
client=self.NAME, sig="abc123")
qs = urlencode(params)
resp = self.fetch_error(403, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), SignatureError.get_code())
def test_bad_host(self):
params = dict(url="http://bar.co/x.jpg", w=1, h=1, client=self.NAME)
qs = sign(self.KEY, urlencode(params))
resp = self.fetch_error(403, "/?%s" % qs)
self.assertEqual(resp.get("error_code"), HostError.get_code())
def test_valid(self):
cases = self.get_image_resize_cases()
for case in cases:
if case.get("mode") == "crop" and case.get("position") == "face":
continue
params = case["source_query_params"]
params["client"] = self.NAME
qs = sign(self.KEY, urlencode(params))
resp = self.fetch_success("/?%s" % qs)
msg = "/?%s does not match %s" \
% (qs, case["expected_path"])
with open(case["expected_path"], "rb") as expected:
self.assertEqual(resp.buffer.read(), expected.read(), msg)
class AppSlowTest(AsyncHTTPTestCase, _AppAsyncMixin):
def get_app(self):
return _PilboxTestApplication(timeout=0.5)
def test_timeout(self):
url = self.get_url("/test/data/test-delayed.jpg?delay=1.0")
qs = urlencode(dict(url=url, w=1, h=1))
resp = self.fetch_error(404, "/?%s" %qs)
self.assertEqual(resp.get("error_code"), FetchError.get_code())
```
#### File: pilbox/test/errors_test.py
```python
from __future__ import absolute_import, division, print_function, \
with_statement
from tornado.test.util import unittest
from pilbox.errors import SignatureError, ClientError, HostError, \
BackgroundError, DimensionsError, FilterError, FormatError, ModeError, \
PositionError, QualityError, UrlError, ImageFormatError, FetchError, \
PilboxError
class ErrorsTest(unittest.TestCase):
def test_unique_error_codes(self):
errors = [SignatureError, ClientError, HostError, BackgroundError,
DimensionsError, FilterError, FormatError, ModeError,
PositionError, QualityError, UrlError, ImageFormatError,
FetchError]
codes = []
for error in errors:
code = str(error.get_code())
if code in codes:
self.fail("The error code, %s, is repeated" % str(code))
codes.append(code)
def test_base_not_implemented(self):
self.assertRaises(NotImplementedError, PilboxError.get_code)
``` |
{
"source": "joeVenner/AI-Tinder-BOT",
"score": 3
} |
#### File: joeVenner/AI-Tinder-BOT/TinderBotAI.py
```python
from selenium import webdriver
from time import sleep
from Checker import checker
Fb_user = ""
Fb_pass = ""
chromedriverpath = ""
###################################################
# Goto Line 6 and set your Chrome Driver path #
# exemple : C:\\Users\\Joe\\chromedriver.exe #
# Goto Line 4 and set your facebook username #
# Goto line 5 and set your facebook password #
###################################################
class bot():
def __init__(self):
if Fb_pass == "" or Fb_user == "" or chromedriverpath == "":
print("Set FaceBook user name and password to login ! ")
print("! You should have a Tinder account with Facebook if not create one First")
else:
self.chrome_options = webdriver.ChromeOptions()
self.prefs = {"profile.default_content_setting_values.notifications": 2}
self.chrome_options.add_experimental_option("prefs", self.prefs)
self.driver = webdriver.Chrome(executable_path=chromedriverpath,options=self.chrome_options)
def login(self):
self.driver.maximize_window()
self.driver.get('https://tinder.com')
sleep(2)
loginbtn = self.driver.find_element_by_xpath('//*[@id="modal-manager"]/div/div/div/div/div[3]/span/div[2]/button').text
if 'PHONE' in loginbtn:
moreoption = self.driver.find_element_by_xpath('//*[@id="modal-manager"]/div/div/div/div/div[3]/span/button').click()
fblogin = self.driver.find_element_by_xpath('//*[@id="modal-manager"]/div/div/div/div/div[3]/span/div[3]/button').click()
else:
self.driver.find_element_by_xpath('//*[@id="modal-manager"]/div/div/div/div/div[3]/span/div[2]/button').click()
sleep(2)
workspace = self.driver.window_handles[0]
popup = self.driver.window_handles[1]
self.driver._switch_to.window(popup)
sleep(2)
email_fb = self.driver.find_element_by_xpath('//*[@id="email"]')
email_fb.send_keys(Fb_user)
passwd_fb = self.driver.find_element_by_xpath('//*[@id="pass"]')
passwd_fb.send_keys(Fb_pass)
loing_fb = self.driver.find_element_by_xpath('//*[@id="u_0_0"]').click()
sleep(3)
self.driver.switch_to.window(workspace)
sleep(2)
def like(self):
sleep(2)
like = self.driver.find_element_by_xpath('//*[@id="content"]/div/div[1]/div/main/div[1]/div/div/div[1]/div/div[2]/div[4]/button')
like.click()
sleep(1)
def dislike(self):
sleep(2)
dislike = self.driver.find_element_by_xpath('//*[@id="content"]/div/div[1]/div/main/div[1]/div/div/div[1]/div/div[2]/div[2]/button')
dislike.click()
sleep(1)
def sendmsg(self):
textbox = self.driver.find_element_by_xpath('//*[@id="chat-text-area"]')
textbox.send_keys('<PASSWORD>')
sleep(2)
sendmsg = self.driver.find_element_by_xpath('//*[@id="modal-manager-canvas"]/div/div/div[1]/div/div[3]/div[3]/form/button')
sendmsg.click()
sleep(2)
def getimagelink(self):
sleep(1)
image = self.driver.find_element_by_xpath('/html/body/div[1]/div/div[1]/div/main/div[1]/div/div/div[1]/div/div[1]/div[3]/div[1]/div/div/div/div/div').get_attribute('style')
link = image.split("\"")[1]
return link
def notintersted(self):
sleep(1)
self.driver.find_element_by_xpath('//*[@id="modal-manager"]/div/div/div[2]/button[2]').click()
b = bot()
b.login()
for i in range(50):
try:
link = b.getimagelink()
result = checker(link)
sleep(1)
print("result ",result)
if result == 1:
b.like()
else:
b.dislike()
except :
try :
b.sendmsg()
except :
b.notintersted()
``` |
{
"source": "joevgear/test_rail_util",
"score": 3
} |
#### File: joevgear/test_rail_util/testrail.py
```python
import urllib.request, urllib.error
import json, base64
class APIClient:
def __init__(self, base_url):
self.user = ''
self.password = ''
if not base_url.endswith('/'):
base_url += '/'
self.__url = base_url + 'index.php?/api/v2/'
#
# Send Get
#
# Issues a GET request (read) against the API and returns the result
# (as Python dict).
#
# Arguments:
#
# uri The API method to call including parameters
# (e.g. get_case/1)
#
def send_get(self, uri):
return self.__send_request('GET', uri, None)
#
# Send POST
#
# Issues a POST request (write) against the API and returns the result
# (as Python dict).
#
# Arguments:
#
# uri The API method to call including parameters
# (e.g. add_case/1)
# data The data to submit as part of the request (as
# Python dict, strings must be UTF-8 encoded)
#
def send_post(self, uri, data):
return self.__send_request('POST', uri, data)
def __send_request(self, method, uri, data):
url = self.__url + uri
request = urllib.request.Request(url)
if (method == 'POST'):
request.data = bytes(json.dumps(data), 'utf-8')
auth = str(
base64.b64encode(
bytes('%s:%s' % (self.user, self.password), 'utf-8')
),
'ascii'
).strip()
request.add_header('Authorization', 'Basic %s' % auth)
request.add_header('Content-Type', 'application/json')
e = None
try:
response = urllib.request.urlopen(request).read()
except urllib.error.HTTPError as ex:
response = ex.read()
e = ex
if response:
result = json.loads(response.decode())
else:
result = {}
if e != None:
if result and 'error' in result:
error = '"' + result['error'] + '"'
else:
error = 'No additional error message received'
raise APIError('TestRail API returned HTTP %s (%s)' %
(e.code, error))
return result
class APIError(Exception):
pass
```
#### File: joevgear/test_rail_util/testrail_util.py
```python
import json
import settings
import sys
from testrail import APIClient
DEFAULT_RESULT_PARSER = "JunitXMLParser"
class TestRailUtil:
def __init__(self, username='', password='', url=''):
self.username = username
self.password = password
self.url = url
if not username:
self.username = settings.USR
if not password:
self.password = settings.PWD
if not url:
self.url = settings.URL
client = APIClient(self.url)
client.user = self.username
client.password = self.password
self.client = client
self.parser = None
def pretty(self, payload):
print(json.dumps(payload, sort_keys=True,
indent=4, separators=(',', ':')))
def add_case(self, section_id, **kwargs):
"""Creates a new test case."""
fields = {}
for key in kwargs:
fields[key] = kwargs[key]
req = self.client.send_post('add_case/' + str(section_id), data=fields)
return req
def import_test_cases(self, result_parser, **kwargs):
def spec(parser):
imp_path = 'test_result_parser.%s' % parser
mod_str, dot, cls_str = imp_path.rpartition('.')
__import__(mod_str)
try:
spec_cls_obj = getattr(sys.modules[mod_str], cls_str)
return spec_cls_obj(**kwargs)
except AttributeError:
raise ImportError('No such class %s' % imp_path)
self.parser = spec(result_parser)
print(*self.parser.get_test_cases(), sep='\n')
if __name__ == "__main__":
username = "joe"
password = "<PASSWORD>"
url = "url"
test_file = "test-results/sample-junit.xml"
util = TestRailUtil(username, password, url)
util.import_test_cases(DEFAULT_RESULT_PARSER, result_file=test_file)
# mile_stone = util.create_mile_stone()
# plan_id = util.create_test_plan()
# util.add_results()
``` |
{
"source": "JoeVieira/GA-python-bootcamp",
"score": 3
} |
#### File: JoeVieira/GA-python-bootcamp/classes.py
```python
class Table:
n_sides = 4
def __init__(self, n_legs):
self.n_legs = n_legs
self.n_chairs = 4
def set_chairs(self, n_chairs):
self.n_chairs = n_chairs
class BaseballPlayer:
def __init__(self, name, team):
self.name = name
self.team = team
def lookUpStats(self):
if self.name == "david":
return 1000
elif self.name == "billy":
return 10
class CricketPlayer:
def __init__(self, name, team):
self.name = name
self.team = team
def lookUpStats():
if name == "david":
return 1000
elif name == "billy":
return 10
def main():
david = CricketPlayer("david","redsox")
david2 = CricketPlayer("david", "redsox")
things = dir(david)
billy = BaseballPlayer("billy", "rangers")
main()
``` |
{
"source": "joevin-slq/fma",
"score": 3
} |
#### File: joevin-slq/fma/creation.py
```python
import os
import sys
import shutil
import pickle
import zipfile
import subprocess as sp
from datetime import datetime
from tqdm import tqdm, trange
import pandas as pd
import utils
TIME = datetime(2017, 4, 1).timestamp()
README = """This .zip archive is part of the FMA, a dataset for music analysis.
Code & data: https://github.com/mdeff/fma
Paper: https://arxiv.org/abs/1612.01840
Each .mp3 is licensed by its artist.
The content's integrity can be verified with sha1sum -c checksums.
"""
def download_metadata():
fma = utils.FreeMusicArchive(os.environ.get('FMA_KEY'))
max_tid = int(fma.get_recent_tracks()[0][0])
print('Largest track id: {}'.format(max_tid))
not_found = {}
id_range = trange(20, desc='tracks')
tracks, not_found['tracks'] = fma.get_all('track', id_range)
id_range = tqdm(tracks['album_id'].unique(), desc='albums')
albums, not_found['albums'] = fma.get_all('album', id_range)
id_range = tqdm(tracks['artist_id'].unique(), desc='artists')
artists, not_found['artists'] = fma.get_all('artist', id_range)
genres = fma.get_all_genres()
for dataset in 'tracks', 'albums', 'artists', 'genres':
eval(dataset).sort_index(axis=0, inplace=True)
eval(dataset).sort_index(axis=1, inplace=True)
eval(dataset).to_csv('raw_' + dataset + '.csv')
pickle.dump(not_found, open('not_found.pickle', 'wb'))
def _create_subdirs(dst_dir, tracks):
# Get write access.
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
os.chmod(dst_dir, 0o777)
# Create writable sub-directories.
n_folders = max(tracks.index) // 1000 + 1
for folder in range(n_folders):
dst = os.path.join(dst_dir, '{:03d}'.format(folder))
if not os.path.exists(dst):
os.makedirs(dst)
os.chmod(dst, 0o777)
def download_data(dst_dir):
dst_dir = os.path.abspath(dst_dir)
tracks = pd.read_csv('raw_tracks.csv', index_col=0)
_create_subdirs(dst_dir, tracks)
fma = utils.FreeMusicArchive(os.environ.get('FMA_KEY'))
not_found = pickle.load(open('not_found.pickle', 'rb'))
not_found['audio'] = []
# Download missing tracks.
for tid in tqdm(tracks.index):
dst = utils.get_audio_path(dst_dir, tid)
if not os.path.exists(dst):
try:
fma.download_track(tracks.at[tid, 'track_file'], dst)
except: # requests.HTTPError
not_found['audio'].append(tid)
pickle.dump(not_found, open('not_found.pickle', 'wb'))
def convert_duration(x):
times = x.split(':')
seconds = int(times[-1])
minutes = int(times[-2])
try:
minutes += 60 * int(times[-3])
except IndexError:
pass
return seconds + 60 * minutes
def trim_audio(dst_dir):
dst_dir = os.path.abspath(dst_dir)
fma_full = os.path.join(dst_dir, 'fma_full')
fma_large = os.path.join(dst_dir, 'fma_large')
tracks = pd.read_csv('raw_tracks.csv', index_col=0)
_create_subdirs(fma_large, tracks)
not_found = pickle.load(open('not_found.pickle', 'rb'))
not_found['clips'] = []
for tid in tqdm(tracks.index):
duration = convert_duration(tracks.at[tid, 'track_duration'])
src = utils.get_audio_path(fma_full, tid)
dst = utils.get_audio_path(fma_large, tid)
if tid in not_found['audio']:
continue
elif os.path.exists(dst):
continue
elif duration <= 30:
shutil.copyfile(src, dst)
else:
start = duration // 2 - 15
command = ['ffmpeg', '-i', src,
'-ss', str(start), '-t', '30',
'-acodec', 'copy', dst]
try:
sp.run(command, check=True, stderr=sp.DEVNULL)
except sp.CalledProcessError:
not_found['clips'].append(tid)
for tid in not_found['clips']:
try:
os.remove(utils.get_audio_path(fma_large, tid))
except FileNotFoundError:
pass
pickle.dump(not_found, open('not_found.pickle', 'wb'))
def normalize_permissions_times(dst_dir):
dst_dir = os.path.abspath(dst_dir)
for dirpath, dirnames, filenames in tqdm(os.walk(dst_dir)):
for name in filenames:
dst = os.path.join(dirpath, name)
os.chmod(dst, 0o444)
os.utime(dst, (TIME, TIME))
for name in dirnames:
dst = os.path.join(dirpath, name)
os.chmod(dst, 0o555)
os.utime(dst, (TIME, TIME))
def create_zips(dst_dir):
def get_filepaths(subset):
filepaths = []
tids = tracks.index[tracks['set', 'subset'] <= subset]
for tid in tids:
filepaths.append(utils.get_audio_path('', tid))
return filepaths
def get_checksums(base_dir, filepaths):
"""Checksums are assumed to be stored in order for efficiency."""
checksums = []
with open(os.path.join(dst_dir, base_dir, 'checksums')) as f:
for filepath in filepaths:
exist = False
for line in f:
if filepath == line[42:-1]:
exist = True
break
if not exist:
raise ValueError('checksum not found: {}'.format(filepath))
checksums.append(line)
return checksums
def create_zip(zip_filename, base_dir, filepaths):
# Audio: all compressions are the same.
# CSV: stored > deflated > BZIP2 > LZMA.
# LZMA is close to BZIP2 and too recent to be widely available (unzip).
compression = zipfile.ZIP_BZIP2
zip_filepath = os.path.join(dst_dir, zip_filename)
with zipfile.ZipFile(zip_filepath, 'x', compression) as zf:
def info(name):
name = os.path.join(zip_filename[:-4], name)
info = zipfile.ZipInfo(name, (2017, 4, 1, 0, 0, 0))
info.external_attr = 0o444 << 16 | 0o2 << 30
return info
zf.writestr(info('README.txt'), README, compression)
checksums = get_checksums(base_dir, filepaths)
zf.writestr(info('checksums'), ''.join(checksums), compression)
for filepath in tqdm(filepaths):
src = os.path.join(dst_dir, base_dir, filepath)
dst = os.path.join(zip_filename[:-4], filepath)
zf.write(src, dst)
os.chmod(zip_filepath, 0o444)
os.utime(zip_filepath, (TIME, TIME))
METADATA = [
'not_found.pickle',
'raw_genres.csv', 'raw_albums.csv',
'raw_artists.csv', 'raw_tracks.csv',
'tracks.csv', 'genres.csv',
'raw_echonest.csv', 'echonest.csv', 'features.csv',
]
create_zip('fma_metadata.zip', 'fma_metadata', METADATA)
tracks = utils.load('tracks.csv')
create_zip('fma_small.zip', 'fma_large', get_filepaths('small'))
create_zip('fma_medium.zip', 'fma_large', get_filepaths('medium'))
create_zip('fma_large.zip', 'fma_large', get_filepaths('large'))
create_zip('fma_full.zip', 'fma_full', get_filepaths('large'))
if __name__ == "__main__":
if sys.argv[1] == 'metadata':
download_metadata()
elif sys.argv[1] == 'data':
download_data(sys.argv[2])
elif sys.argv[1] == 'clips':
trim_audio(sys.argv[2])
elif sys.argv[1] == 'normalize':
normalize_permissions_times(sys.argv[2])
elif sys.argv[1] == 'zips':
create_zips(sys.argv[2])
``` |
{
"source": "JoeVirtual/KonFoo",
"score": 2
} |
#### File: src/konfoo/fields.py
```python
from .core import (
Decimal, Signed, Unsigned, Bitset, Bool, Enum, Scaled, Bipolar, Unipolar)
from .enums import Enumeration
class Decimal8(Decimal):
""" A `Decimal8` field is a :class:`Decimal` field with a *size* of
one byte and is by default unsigned.
"""
def __init__(self, signed=False, byte_order='auto'):
super().__init__(bit_size=8,
signed=signed,
byte_order=byte_order)
class Decimal16(Decimal):
""" A `Decimal16` field is a :class:`Decimal` field with a *size* of
two bytes and is by default unsigned.
"""
def __init__(self, signed=False, byte_order='auto'):
super().__init__(bit_size=16,
signed=signed,
byte_order=byte_order)
class Decimal24(Decimal):
""" A `Decimal24` field is a :class:`Decimal` field with a *size* of
three bytes and is by default unsigned.
"""
def __init__(self, signed=False, byte_order='auto'):
super().__init__(bit_size=24,
signed=signed,
byte_order=byte_order)
class Decimal32(Decimal):
""" A `Decimal32` field is a :class:`Decimal` field with a *size* of
four bytes and is by default unsigned.
"""
def __init__(self, signed=False, byte_order='auto'):
super().__init__(bit_size=32,
signed=signed,
byte_order=byte_order)
class Decimal64(Decimal):
""" A `Decimal64` field is a :class:`Decimal` field with a *size* of
eight bytes and is by default unsigned.
"""
def __init__(self, signed=False, byte_order='auto'):
super().__init__(bit_size=64,
signed=signed,
byte_order=byte_order)
class Signed8(Signed):
""" A `Signed8` field is a :class:`Signed` field with a *size* of
one byte.
"""
def __init__(self, byte_order='auto'):
super().__init__(bit_size=8,
byte_order=byte_order)
class Signed16(Signed):
""" A `Signed16` field is a :class:`Signed` field with a *size* of
two bytes.
"""
def __init__(self, byte_order='auto'):
super().__init__(bit_size=16,
byte_order=byte_order)
class Signed24(Signed):
""" A `Signed24` field is a :class:`Signed` field with a *size* of
three bytes.
"""
def __init__(self, byte_order='auto'):
super().__init__(bit_size=24,
byte_order=byte_order)
class Signed32(Signed):
""" A `Signed32` field is a :class:`Signed` field with a *size* of
four bytes.
"""
def __init__(self, byte_order='auto'):
super().__init__(bit_size=32, byte_order=byte_order)
class Signed64(Signed):
""" A `Signed64` field is a :class:`Signed` field with a *size* of
eight bytes.
"""
def __init__(self, byte_order='auto'):
super().__init__(bit_size=64,
byte_order=byte_order)
class Unsigned8(Unsigned):
""" A `Unsigned8` field is an :class:`Unsigned` field with a *size* of
one byte.
"""
def __init__(self, byte_order='auto'):
super().__init__(bit_size=8,
byte_order=byte_order)
class Unsigned16(Unsigned):
""" A `Unsigned16` field is an :class:`Unsigned` field with a *size* of
two bytes.
"""
def __init__(self, byte_order='auto'):
super().__init__(bit_size=16,
byte_order=byte_order)
class Unsigned24(Unsigned):
""" A `Unsigned24` field is an :class:`Unsigned` field with a *size* of
three bytes.
"""
def __init__(self, byte_order='auto'):
super().__init__(bit_size=24,
byte_order=byte_order)
class Unsigned32(Unsigned):
""" A `Unsigned32` field is an :class:`Unsigned` field with a *size* of
four bytes.
"""
def __init__(self, byte_order='auto'):
super().__init__(bit_size=32,
byte_order=byte_order)
class Unsigned64(Unsigned):
""" A `Unsigned64` field is an :class:`Unsigned` field with a *size* of
eight bytes.
"""
def __init__(self, byte_order='auto'):
super().__init__(bit_size=64,
byte_order=byte_order)
class Bitset8(Bitset):
""" A `Bitset8` field is a :class:`Bitset` field with a *size* of
one byte.
"""
def __init__(self, byte_order='auto'):
super().__init__(bit_size=8,
byte_order=byte_order)
class Bitset16(Bitset):
""" A `Bitset16` field is a :class:`Bitset` field with a *size* of
two bytes.
"""
def __init__(self, byte_order='auto'):
super().__init__(bit_size=16,
byte_order=byte_order)
class Bitset24(Bitset):
""" A `Bitset24` field is a :class:`Bitset` field with a *size* of
three bytes.
"""
def __init__(self, byte_order='auto'):
super().__init__(bit_size=24,
byte_order=byte_order)
class Bitset32(Bitset):
""" A `Bitset32` field is a :class:`Bitset` field with a *size* of
four bytes.
"""
def __init__(self, byte_order='auto'):
super().__init__(bit_size=32,
byte_order=byte_order)
class Bitset64(Bitset):
""" A `Bitset64` field is a :class:`Bitset` field with a *size* of
eight bytes.
"""
def __init__(self, byte_order='auto'):
super().__init__(bit_size=64,
byte_order=byte_order)
class Bool8(Bool):
""" A `Bool8` field is a :class:`Bool` field with a *size* of
one byte.
"""
def __init__(self, byte_order='auto'):
super().__init__(bit_size=8,
byte_order=byte_order)
class Bool16(Bool):
""" A `Bool16` field is a :class:`Bool` field with a *size* of
two bytes.
"""
def __init__(self, byte_order='auto'):
super().__init__(bit_size=16,
byte_order=byte_order)
class Bool24(Bool):
""" A `Bool24` field is a :class:`Bool` field with a *size* of
three bytes.
"""
def __init__(self, byte_order='auto'):
super().__init__(bit_size=24,
byte_order=byte_order)
class Bool32(Bool):
""" A `Bool32` field is a :class:`Bool` field with a *size* of
four bytes.
"""
def __init__(self, byte_order='auto'):
super().__init__(bit_size=32,
byte_order=byte_order)
class Bool64(Bool):
""" A `Bool64` field is a :class:`Bool` field with a *size* of
eight bytes.
"""
def __init__(self, byte_order='auto'):
super().__init__(bit_size=64,
byte_order=byte_order)
class Antivalent(Enum):
""" An `Antivalent` field is an :class:`Enum` field with a *size* of
two bits and a fix assigned enumeration.
"""
class Validity(Enumeration):
error = 0
correct = 1
forced = 2
undefined = 3
def __init__(self, align_to=None, byte_order='auto'):
super().__init__(bit_size=2,
align_to=align_to,
enumeration=Antivalent.Validity,
byte_order=byte_order)
class Enum4(Enum):
""" An `Enum4` field is an :class:`Enum` field with a *size* of
four bits.
"""
def __init__(self, align_to=None, enumeration=None,
byte_order='auto'):
super().__init__(bit_size=4,
align_to=align_to,
enumeration=enumeration,
byte_order=byte_order)
class Enum8(Enum):
""" An `Enum8` field is an :class:`Enum` field with a *size* of
one byte.
"""
def __init__(self, enumeration=None, byte_order='auto'):
super().__init__(bit_size=8,
enumeration=enumeration,
byte_order=byte_order)
class Enum16(Enum):
""" An `Enum16` field is an :class:`Enum` field with a *size* of
two bytes.
"""
def __init__(self, enumeration=None, byte_order='auto'):
super().__init__(bit_size=16,
enumeration=enumeration,
byte_order=byte_order)
class Enum24(Enum):
""" An `Enum24` field is an :class:`Enum` field with a *size* of
three bytes.
"""
def __init__(self, enumeration=None, byte_order='auto'):
super().__init__(bit_size=24,
enumeration=enumeration,
byte_order=byte_order)
class Enum32(Enum):
""" An `Enum32` field is an :class:`Enum` field with a *size* of
four bytes.
"""
def __init__(self, enumeration=None, byte_order='auto'):
super().__init__(bit_size=32,
enumeration=enumeration,
byte_order=byte_order)
class Enum64(Enum):
""" An `Enum64` field is an :class:`Enum` field with a *size* of
eight bytes.
"""
def __init__(self, enumeration=None, byte_order='auto'):
super().__init__(bit_size=64,
enumeration=enumeration,
byte_order=byte_order)
class Scaled8(Scaled):
""" A `Scaled8` field is a :class:`Scaled` field with a *size* of
one byte.
"""
def __init__(self, scale, byte_order='auto'):
super().__init__(scale=scale,
bit_size=8,
byte_order=byte_order)
class Scaled16(Scaled):
""" A `Scaled16` field is a :class:`Scaled` field with a *size* of
two bytes.
"""
def __init__(self, scale, byte_order='auto'):
super().__init__(scale=scale,
bit_size=16,
byte_order=byte_order)
class Scaled24(Scaled):
""" A `Scaled24` field is a :class:`Scaled` field with a *size* of
three bytes.
"""
def __init__(self, scale, byte_order='auto'):
super().__init__(scale=scale,
bit_size=24,
byte_order=byte_order)
class Scaled32(Scaled):
""" A `Scaled32` field is a :class:`Scaled` field with a *size* of
four bytes.
"""
def __init__(self, scale, byte_order='auto'):
super().__init__(scale=scale,
bit_size=32,
byte_order=byte_order)
class Scaled64(Scaled):
""" A `Scaled64` field is a :class:`Scaled` field with a *size* of
eight bytes.
"""
def __init__(self, scale, byte_order='auto'):
super().__init__(scale=scale,
bit_size=64,
byte_order=byte_order)
class Bipolar2(Bipolar):
""" A `Bipolar2` field is a :class:`Bipolar` field with a *size* of
two bytes and an integer part of two bits.
"""
def __init__(self, byte_order='auto'):
super().__init__(bits_integer=2,
bit_size=16,
byte_order=byte_order)
class Bipolar4(Bipolar):
""" A `Bipolar4` field is a :class:`Bipolar` field with a *size* of
two bytes and an integer part of four bits.
"""
def __init__(self, byte_order='auto'):
super().__init__(bits_integer=4,
bit_size=16,
byte_order=byte_order)
class Unipolar2(Unipolar):
""" An `Unipolar2` field is an :class:`Unipolar` field with a *size* of
two bytes and an integer part of two bits.
"""
def __init__(self, byte_order='auto'):
super().__init__(bits_integer=2,
bit_size=16,
byte_order=byte_order)
```
#### File: src/konfoo/globals.py
```python
from .enums import Enumeration
from .categories import Category
class ItemClass(Enumeration):
Field = 1
Container = 2
Pointer = 3
Structure = 10
Sequence = 11
Array = 12
Stream = 20
String = 21
Float = 30
Double = 31
Decimal = 40
Bit = 41
Byte = 42
Char = 43
Signed = 44
Unsigned = 45
Bitset = 46
Bool = 47
Enum = 48
Scaled = 49
Fraction = 50
Bipolar = 51
Unipolar = 52
Datetime = 53
IPAddress = 54
class Byteorder(Category):
""" Byte order categories."""
auto = 'auto'
little = 'little'
big = 'big'
#: Default Byteorder
BYTEORDER = Byteorder.little
def clamp(value, minimum, maximum):
""" Returns the *value* limited between *minimum* and *maximum*
whereby the *maximum* wins over the *minimum*.
Example:
>>> clamp(64, 0, 255)
64
>>> clamp(-128, 0, 255)
0
>>> clamp(0, 127, -128)
-128
"""
return min(max(value, minimum), maximum)
```
#### File: src/konfoo/providers.py
```python
import abc
from pathlib import Path
class Provider:
""" A `Provider` class provides access for the :class:`Pointer` class to
**read** and **write** byte streams from and back to a data *source*.
The `Provider` class servers as a meta class. A derived class must
implement the two methods :meth:`read` and :meth:`write` for reading
and writing byte streams from and back to the data *source*.
"""
@abc.abstractmethod
def read(self, address=0, count=0):
""" Returns a *number* of bytes read from a data `source` beginning at
the start *address*.
:param int address: start address.
:param int count: number of bytes to read from a data `source`.
.. note:: This abstract method must be implemented by a derived class.
"""
return bytes()
@abc.abstractmethod
def write(self, buffer=bytes(), address=0, count=0):
""" Writes the content of the *buffer* to a data `source` beginning
at the start *address*.
:param bytes buffer: content to write.
:param int address: start address.
:param int count: number of bytes to write to a data `source`.
.. note:: This abstract method must be implemented by a derived class.
"""
pass
class FileProvider(Provider):
""" A `FileProvider` is a byte stream :class:`Provider` for binary files.
The *file* content is internal stored in a :attr:`~Provider.cache`. The
:meth:`read` and :meth:`write` methods only operate on the internal
:attr:`~Provider.cache`.
Call :meth:`flush` to store the updated file content to the same or a new file.
:param file: name and location of the file to read.
:type file: :class:`~pathlib.Path`, :class:`str`
"""
def __init__(self, file):
#: File path.
self.path = Path(file).absolute()
# File cache.
self._cache = bytearray(self.path.read_bytes())
def __str__(self):
return (f"{self.__class__.__name__}"
f"({self.path!s}, {len(self._cache)!s})")
def __repr__(self):
return (f"{self.__class__.__name__}"
f"(file={self.path!r}, size={len(self._cache)!r})")
@property
def cache(self):
""" Returns the internal byte stream cache of the `Provider`
(read-only)."""
return self._cache
def read(self, address=0, count=0):
""" Returns a *number* of bytes read from the :attr:`cache` beginning
at the start *address*.
:param int address: start address.
:param int count: number of bytes to read from the cache.
"""
return self._cache[address:]
def write(self, buffer=bytes(), address=0, count=0):
""" Writes the content of the *buffer* to the :attr:`cache` beginning
at the start *address*.
:param bytes buffer: content to write.
:param int address: start address.
:param int count: number of bytes to write to the cache.
"""
view = memoryview(self._cache)
view[address:address + count] = buffer
def flush(self, file=str()):
""" Flushes the updated file content to the given *file*.
.. note:: Overwrites an existing file.
:param str file: name and location of the file.
Default is the original file.
"""
if file:
Path(file).write_bytes(self._cache)
else:
self.path.write_bytes(self._cache)
``` |
{
"source": "joevtap/NotasSIGAA",
"score": 3
} |
#### File: joevtap/NotasSIGAA/setup.py
```python
def setup():
print(10*'=' + ' Notas SIGAA setup ' + 10*'=')
username = str(input('SIGAA username: '))
password = str(input('<PASSWORD>: '))
webdriver_path = str(input('Webdriver path (always use / and, if in the same dir, use ./): '))
csv_output = str(input('CSV output path (always use / and, if in the same dir, use ./): '))
g_credentials_path = str(input(
'Google Cloud credentials.json path (always use / and, if in the same dir, use ./): '))
g_sheet = str(input("Google Sheet's sheet name: "))
downloads_path = str(input('Downloads folder path (always use / and, if in the same dir, use ./): '))
with open('./.env', 'w') as file:
file.write(f"""MY_USERNAME="{username}"
MY_PASSWORD="{password}"
DRIVER_PATH="{webdriver_path}"
CSV_OUTPUT="{csv_output}"
G_CREDENTIALS="{g_credentials_path}"
G_SHEET="{g_sheet}"
DOWNLOADS_PATH="{downloads_path}"
""")
if __name__ == '__main__':
setup()
``` |
{
"source": "joewa/enron-ml-project",
"score": 2
} |
#### File: joewa/enron-ml-project/poi_id.py
```python
import os
import sys
import pickle
import numpy as np
import pandas as pd
from copy import deepcopy
import matplotlib.pyplot as plt
from sklearn.preprocessing import MaxAbsScaler, MinMaxScaler, StandardScaler, RobustScaler, FunctionTransformer
from sklearn.decomposition import PCA, KernelPCA
from sklearn.feature_selection import SelectPercentile, SelectKBest, chi2, f_classif, SelectFromModel, RFECV, RFE
from sklearn.model_selection import train_test_split, StratifiedShuffleSplit, GridSearchCV, ParameterGrid
from sklearn.metrics import accuracy_score, confusion_matrix, precision_score, recall_score, classification_report
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier, NearestCentroid
from sklearn.svm import LinearSVC, SVC
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import VotingClassifier, AdaBoostClassifier, RandomForestClassifier, ExtraTreesClassifier
from sklearn.pipeline import Pipeline, FeatureUnion
#from sklearn.compose import ColumnTransformer
# https://medium.com/dunder-data/from-pandas-to-scikit-learn-a-new-exciting-workflow-e88e2271ef62
# https://scikit-learn.org/stable/auto_examples/compose/plot_column_transformer.html#sphx-glr-auto-examples-compose-plot-column-transformer-py
from collections import Counter
# DataFrameSelector from: "Hands on machine learning with Scikit-Learn & Tensorflow"
# Better approach: https://scikit-learn.org/stable/modules/compose.html
from sklearn.base import BaseEstimator, TransformerMixin
class DataFrameSelector(BaseEstimator, TransformerMixin):
def __init__(self, attribute_names=['total_payments', 'total_stock_value']):
self.attribute_names = attribute_names
def fit(self, X, y=None):
return self
def transform(self, X):
return X[self.attribute_names].values
sys.path.append("../tools/")
sys.path.append("../choose_your_own/")
from feature_format import featureFormat, targetFeatureSplit
from tester import dump_classifier_and_data
### Task 1: Select what features you'll use.
### features_list is a list of strings, each of which is a feature name.
### The first feature must be "poi".
features_email_stats = ['from_messages','from_this_person_to_poi', 'to_messages','from_poi_to_this_person', 'shared_receipt_with_poi']
features_financial_salary = ['salary', 'bonus', 'long_term_incentive', 'deferred_income', 'deferral_payments', 'loan_advances', 'other', 'expenses', 'director_fees']
features_financial_salary_total = ['total_payments']
features_financial_stock = ['exercised_stock_options', 'restricted_stock', 'restricted_stock_deferred']
features_financial_stock_total = ['total_stock_value']
features_email = ['fraction_from_poi_to_this_person', 'fraction_from_this_person_to_poi'] # This is designed in Task 3
features_list = ['poi'] + ['salary', 'bonus'] # You will need to use more features
### Load the dictionary containing the dataset
# was "final_project_dataset.pkl"
with open("final_project_dataset.pkl", "rb") as data_file:
data_dict = pickle.load(data_file)
# Some utility functions do not like mixed types --
for p_id, p_info in data_dict.items():
if p_info['email_address'] == 'NaN':
p_info['email_address'] = 'unknown'
for info in p_info:
if p_info[info] == 'NaN':
p_info[info] = np.nan
data_dict_raw = deepcopy(data_dict)
### Task 2: Remove outliers
# This task has been already done in the Jupyter-Notbook, because the given
# pkl-file was corrupt, i.e. did not represent the full data from the insider
# payments sheet. Please see the notebook for more information.
#with open("final_project_dataset_PROVEN.pkl", "rb") as data_file:
# data_dict = pickle.load(data_file)
#df = pd.DataFrame.from_dict(data_dict_raw, orient='index').replace('NaN', np.nan)
#toomanynan = df.isna().sum(axis=1) > 16
#df_toomanynan = df[toomanynan]
#outliers2remove = df_toomanynan.index.values.tolist() + [df['salary'].idxmax()]
#for pos in outliers2remove:
# data_dict.pop(pos, 0) # remove them from the data_dict
data_dict_raw = deepcopy(data_dict)
### Task 3: Create new feature(s)
### Store to my_dataset for easy export below.
my_dataset = deepcopy(data_dict)
# Relevant and engineered features due to "engineering judgement"
# gotten versus deferral payments (sum it up)
df = pd.read_pickle("final_project_dataset_PROVEN.pkl")
df = df.replace(np.nan, 0.0)
f = pd.Series(features_financial_salary)
df['total_payments_corr'] = df[f[~f.isin(['deferred_income'])]].sum(axis=1)
f = pd.Series(features_financial_stock)
df['total_stock_value_corr'] = df[f[~f.isin(['restricted_stock_deferred'])]].sum(axis=1)
df['total_cash'] = df['total_payments_corr'] + df['total_stock_value_corr']
df['total_payments_corr'].replace(0.0, 1.0, inplace=True)
df['total_stock_value_corr'].replace(0.0, 1.0, inplace=True)
df['to_messages'].replace(0.0, 1.0, inplace=True)
df['from_messages'].replace(0.0, 1.0, inplace=True)
df['salary'].replace(0.0, 1.0, inplace=True)
df["bonus_to_salary"] = df["bonus"] / df["salary"]
df['fraction_salary'] = df['salary'] / df['total_payments_corr']
df['fraction_bonus'] = df['bonus'] / df['total_payments_corr']
df['fraction_long_term_incentive'] = df['long_term_incentive'] / df['total_payments_corr']
df['fraction_deferral_payments'] = df['deferral_payments'] / df['total_payments_corr']
df['fraction_deferred_income'] = df['deferred_income'] / df['total_payments_corr'] # lost cash
df['fraction_exercised_stock_options'] = df['exercised_stock_options'] / df['total_stock_value_corr']
df['fraction_restricted_stock_deferred'] = df['restricted_stock_deferred'] / df['total_stock_value_corr'] # lost stock
df['fraction_restricted_stock'] = df['restricted_stock'] / df['total_stock_value_corr']
df['fraction_employer_direct_cash'] = df['fraction_salary'] + df['fraction_bonus'] + df['fraction_long_term_incentive']
df['fraction_employer_stock_cash'] = df['total_stock_value'] / df['total_stock_value_corr']
df['fraction_director_direct_cash'] = df['director_fees'] / df['total_payments_corr']
df['fraction_from_poi_to_this_person'] = df['from_poi_to_this_person'] / df['to_messages']
df['fraction_shared_receipt_with_poi'] = df['shared_receipt_with_poi'] / df['to_messages']
df['fraction_from_this_person_to_poi'] = df['from_this_person_to_poi'] / df['from_messages']
features_fractions_lost = ['fraction_deferred_income', 'fraction_restricted_stock_deferred']
features_fractions_cash = ['fraction_employer_direct_cash', 'fraction_employer_stock_cash']
features_absolute_cash = ['total_payments_corr', 'total_stock_value_corr', 'bonus_to_salary']
features_email = ['to_messages', 'from_messages', 'fraction_from_poi_to_this_person', 'fraction_shared_receipt_with_poi', 'fraction_from_this_person_to_poi']
data_d = df.to_dict(orient='index') # Required for Udacitiy's test_classifier
my_dataset = data_d
### Extract features and labels from dataset for local testing
data = featureFormat(my_dataset, features_list, sort_keys = True)
labels, features = targetFeatureSplit(data)
### Task 4: Try a varity of classifiers
### Please name your classifier clf for easy export below.
### Note that if you want to do PCA or other multi-stage operations,
### you'll need to use Pipelines. For more info:
### http://scikit-learn.org/stable/modules/pipeline.html
# Provided to give you a starting point. Try a variety of classifiers.
# A variety of classifiers has been investigated and compared in the Jupyter-Notebook.
# Only the best classifier is included and optimized here.
d_scaler0 = {
"scaler0": [StandardScaler(), MaxAbsScaler()] # RobustScaler(), MaxAbsScaler(), MinMaxScaler()
}
d_logreg = {
"classifier": [LogisticRegression(random_state=42)],
"classifier__C": [0.02, 0.03, 0.04, 0.05, 0.5, 1, 1e1, 1e2, 1e3, 1e5, 1e10],
"classifier__tol":[1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 1e-10],
"classifier__class_weight":['balanced'],
"classifier__solver": ["liblinear"]
}
d_dt = {
"classifier": [DecisionTreeClassifier(random_state=42)],
"classifier__criterion": ["entropy"],
"classifier__max_depth": [5,6,7,8,9,10,11, None]
#"classifier__min_samples_leaf": [1,2,3,4,5] # Makes it worse
}
# These is the the best scoring feature selection(s)
d_bestsel = {
"selector": [DataFrameSelector()],
"selector__attribute_names": [
["total_cash", "fraction_deferred_income", "fraction_restricted_stock_deferred"],
["total_stock_value", "expenses", "from_messages", "total_stock_value_corr", "total_cash",
"bonus_to_salary", "fraction_exercised_stock_options", "fraction_restricted_stock_deferred",
"fraction_employer_stock_cash", "fraction_from_this_person_to_poi"]
]
}
pipe_params = [
#("selector0", None),
("selector", None),
("scaler0", None),
#("dimreducer", None),
("classifier", None)
]
pipe = Pipeline(pipe_params)
# Neue Pipeline mit DataFrameSelector am Anfang machen
param_grid_all = [
#{**d_scaler0, **d_bestsel, **d_PCA_2, **d_dt }, # Accuracy: 0.87207 Precision: 0.52920 Recall: 0.36700 F1: 0.43342 F2: 0.39097
{**d_scaler0, **d_bestsel, **d_logreg}, # Accuracy: 0.77980 Precision: 0.36862 Recall: 0.91400 F1: 0.52536 F2: 0.70530
#{**d_scaler0, **d_kNearestCentroid}, # Most simple one!
#{**d_scaler0, **d_bestsel, **d_kNearestCentroid}, # F1: WORKS
#{**d_scaler0, **d_bestsel, **d_rforest} # Remove to make it work!
]
sss = StratifiedShuffleSplit(n_splits=100, random_state=42) # Applied for ALL models
clf_best_results = []
for param_grid_clf in param_grid_all:
pg=[param_grid_clf]
print(pg)
grid_search = GridSearchCV(pipe, param_grid=pg, cv=sss, scoring="f1", n_jobs=1)
f = pd.Series(df.columns)
X_cols = f[~f.isin(['poi'])]
grid_search.fit(df[X_cols], df['poi'].values)
clf_best_results.append( {
"score": grid_search.best_score_,
"best_estimator": grid_search.best_estimator_,
"best_params_": grid_search.best_params_
} )
# Prepare the final pipeline that will work with test_classifier
df_best_results = pd.DataFrame.from_dict(clf_best_results)
best_estimator = df_best_results.loc[ df_best_results["score"].idxmax(), "best_estimator" ]
print("Best F1 score:{}".format(df_best_results["score"].max()))
pipe_final_params = [
("scaler0", best_estimator.named_steps["scaler0"]),
#("dimreducer", best_estimator.named_steps["dimreducer"]),
("classifier", best_estimator.named_steps["classifier"])
]
pipe_final = Pipeline(pipe_final_params)
features_final = best_estimator.named_steps["selector"].attribute_names
### Task 5: Tune your classifier to achieve better than .3 precision and recall
### using our testing script. Check the tester.py script in the final project
### folder for details on the evaluation method, especially the test_classifier
### function. Because of the small size of the dataset, the script uses
### stratified shuffle split cross validation. For more info:
### http://scikit-learn.org/stable/modules/generated/sklearn.cross_validation.StratifiedShuffleSplit.html
# Example starting point. Try investigating other evaluation techniques!
# from sklearn.cross_validation import train_test_split
from sklearn.model_selection import train_test_split
features_train, features_test, labels_train, labels_test = \
train_test_split(features, labels, test_size=0.3, random_state=42)
### Task 6: Dump your classifier, dataset, and features_list so anyone can
### check your results. You do not need to change anything below, but make sure
### that the version of poi_id.py that you submit can be run on its own and
### generates the necessary .pkl files for validating your results.
dump_classifier_and_data(pipe_final, my_dataset, features_final)
``` |
{
"source": "joewalk102/Adafruit_Learning_System_Guides",
"score": 3
} |
#### File: Adafruit_Learning_System_Guides/Adafruit_IO_Air_Quality/code.py
```python
import time
import board
import busio
from digitalio import DigitalInOut
import neopixel
from adafruit_esp32spi import adafruit_esp32spi, adafruit_esp32spi_wifimanager
from adafruit_io.adafruit_io import IO_HTTP
from simpleio import map_range
from adafruit_pm25.uart import PM25_UART
# Uncomment below for PMSA003I Air Quality Breakout
# from adafruit_pm25.i2c import PM25_I2C
import adafruit_bme280
### Configure Sensor ###
# Return environmental sensor readings in degrees Celsius
USE_CELSIUS = False
# Interval the sensor publishes to Adafruit IO, in minutes
PUBLISH_INTERVAL = 10
### WiFi ###
# Get wifi details and more from a secrets.py file
try:
from secrets import secrets
except ImportError:
print("WiFi secrets are kept in secrets.py, please add them there!")
raise
# AirLift FeatherWing
esp32_cs = DigitalInOut(board.D13)
esp32_reset = DigitalInOut(board.D12)
esp32_ready = DigitalInOut(board.D11)
spi = busio.SPI(board.SCK, board.MOSI, board.MISO)
esp = adafruit_esp32spi.ESP_SPIcontrol(spi, esp32_cs, esp32_ready, esp32_reset)
status_light = neopixel.NeoPixel(board.NEOPIXEL, 1, brightness=0.2)
wifi = adafruit_esp32spi_wifimanager.ESPSPI_WiFiManager(esp, secrets, status_light)
# Connect to a PM2.5 sensor over UART
reset_pin = None
uart = busio.UART(board.TX, board.RX, baudrate=9600)
pm25 = PM25_UART(uart, reset_pin)
# Create i2c object
i2c = busio.I2C(board.SCL, board.SDA, frequency=100000)
# Connect to a BME280 over I2C
bme_sensor = adafruit_bme280.Adafruit_BME280_I2C(i2c)
# Uncomment below for PMSA003I Air Quality Breakout
# pm25 = PM25_I2C(i2c, reset_pin)
# Uncomment below for BME680
# import adafruit_bme680
# bme_sensor = adafruit_bme680.Adafruit_BME680_I2C(i2c)
### Sensor Functions ###
def calculate_aqi(pm_sensor_reading):
"""Returns a calculated air quality index (AQI)
and category as a tuple.
NOTE: The AQI returned by this function should ideally be measured
using the 24-hour concentration average. Calculating a AQI without
averaging will result in higher AQI values than expected.
:param float pm_sensor_reading: Particulate matter sensor value.
"""
# Check sensor reading using EPA breakpoint (Clow-Chigh)
if 0.0 <= pm_sensor_reading <= 12.0:
# AQI calculation using EPA breakpoints (Ilow-IHigh)
aqi_val = map_range(int(pm_sensor_reading), 0, 12, 0, 50)
aqi_cat = "Good"
elif 12.1 <= pm_sensor_reading <= 35.4:
aqi_val = map_range(int(pm_sensor_reading), 12, 35, 51, 100)
aqi_cat = "Moderate"
elif 35.5 <= pm_sensor_reading <= 55.4:
aqi_val = map_range(int(pm_sensor_reading), 36, 55, 101, 150)
aqi_cat = "Unhealthy for Sensitive Groups"
elif 55.5 <= pm_sensor_reading <= 150.4:
aqi_val = map_range(int(pm_sensor_reading), 56, 150, 151, 200)
aqi_cat = "Unhealthy"
elif 150.5 <= pm_sensor_reading <= 250.4:
aqi_val = map_range(int(pm_sensor_reading), 151, 250, 201, 300)
aqi_cat = "Very Unhealthy"
elif 250.5 <= pm_sensor_reading <= 350.4:
aqi_val = map_range(int(pm_sensor_reading), 251, 350, 301, 400)
aqi_cat = "Hazardous"
elif 350.5 <= pm_sensor_reading <= 500.4:
aqi_val = map_range(int(pm_sensor_reading), 351, 500, 401, 500)
aqi_cat = "Hazardous"
else:
print("Invalid PM2.5 concentration")
aqi_val = -1
aqi_cat = None
return aqi_val, aqi_cat
def sample_aq_sensor():
"""Samples PM2.5 sensor
over a 2.3 second sample rate.
"""
aq_reading = 0
aq_samples = []
# initial timestamp
time_start = time.monotonic()
# sample pm2.5 sensor over 2.3 sec sample rate
while time.monotonic() - time_start <= 2.3:
try:
aqdata = pm25.read()
aq_samples.append(aqdata["pm25 env"])
except RuntimeError:
print("Unable to read from sensor, retrying...")
continue
# pm sensor output rate of 1s
time.sleep(1)
# average sample reading / # samples
for sample in range(len(aq_samples)):
aq_reading += aq_samples[sample]
aq_reading = aq_reading / len(aq_samples)
aq_samples.clear()
return aq_reading
def read_bme(is_celsius=False):
"""Returns temperature and humidity
from BME280/BME680 environmental sensor, as a tuple.
:param bool is_celsius: Returns temperature in degrees celsius
if True, otherwise fahrenheit.
"""
humid = bme_sensor.humidity
temp = bme_sensor.temperature
if not is_celsius:
temp = temp * 1.8 + 32
return temp, humid
# Create an instance of the Adafruit IO HTTP client
io = IO_HTTP(secrets["aio_user"], secrets["aio_key"], wifi)
# Describes feeds used to hold Adafruit IO data
feed_aqi = io.get_feed("air-quality-sensor.aqi")
feed_aqi_category = io.get_feed("air-quality-sensor.category")
feed_humidity = io.get_feed("air-quality-sensor.humidity")
feed_temperature = io.get_feed("air-quality-sensor.temperature")
# Set up location metadata from secrets.py file
location_metadata = (secrets["latitude"], secrets["longitude"], secrets["elevation"])
elapsed_minutes = 0
prv_mins = 0
while True:
try:
print("Fetching time...")
cur_time = io.receive_time()
print("Time fetched OK!")
# Hourly reset
if cur_time.tm_min == 0:
prv_mins = 0
except (ValueError, RuntimeError) as e:
print("Failed to fetch time, retrying\n", e)
wifi.reset()
wifi.connect()
continue
if cur_time.tm_min >= prv_mins:
print("%d min elapsed.." % elapsed_minutes)
prv_mins = cur_time.tm_min
elapsed_minutes += 1
if elapsed_minutes >= PUBLISH_INTERVAL:
print("Sampling AQI...")
aqi_reading = sample_aq_sensor()
aqi, aqi_category = calculate_aqi(aqi_reading)
print("AQI: %d" % aqi)
print("Category: %s" % aqi_category)
# temp and humidity
print("Sampling environmental sensor...")
temperature, humidity = read_bme(USE_CELSIUS)
print("Temperature: %0.1f F" % temperature)
print("Humidity: %0.1f %%" % humidity)
# Publish all values to Adafruit IO
print("Publishing to Adafruit IO...")
try:
io.send_data(feed_aqi["key"], str(aqi), location_metadata)
io.send_data(feed_aqi_category["key"], aqi_category)
io.send_data(feed_temperature["key"], str(temperature))
io.send_data(feed_humidity["key"], str(humidity))
print("Published!")
except (ValueError, RuntimeError) as e:
print("Failed to send data to IO, retrying\n", e)
wifi.reset()
wifi.connect()
continue
# Reset timer
elapsed_minutes = 0
time.sleep(30)
```
#### File: Adafruit_Learning_System_Guides/Animated_NeoPixel_Glow_Fur_Scarf/Animated_NeoPixel_Glow_Fur_Scarf.py
```python
import adafruit_fancyled.adafruit_fancyled as fancy
import board
import neopixel
from digitalio import DigitalInOut, Direction, Pull
led_pin = board.D1 # which pin your pixels are connected to
num_leds = 78 # how many LEDs you have
brightness = 1.0 # 0-1, higher number is brighter
saturation = 255 # 0-255, 0 is pure white, 255 is fully saturated color
steps = 0.01 # how wide the bands of color are.
offset = 0 # cummulative steps
fadeup = True # start with fading up - increase steps until offset reaches 1
index = 8 # midway color selection
blend = True # color blending between palette indices
# initialize list with all pixels off
palette = [0] * num_leds
# Declare a NeoPixel object on led_pin with num_leds as pixels
# No auto-write.
# Set brightness to max.
# We will be using FancyLED's brightness control.
strip = neopixel.NeoPixel(led_pin, num_leds, brightness=1, auto_write=False)
# button setup
button = DigitalInOut(board.D2)
button.direction = Direction.INPUT
button.pull = Pull.UP
prevkeystate = False
ledmode = 0 # button press counter, switch color palettes
# FancyLED allows for assigning a color palette using these formats:
# * The first (5) palettes here are mixing between 2-elements
# * The last (3) palettes use a format identical to the FastLED Arduino Library
# see FastLED - colorpalettes.cpp
forest = [fancy.CRGB(0, 255, 0), # green
fancy.CRGB(255, 255, 0)] # yellow
ocean = [fancy.CRGB(0, 0, 255), # blue
fancy.CRGB(0, 255, 0)] # green
purple = [fancy.CRGB(160, 32, 240), # purple
fancy.CRGB(238, 130, 238)] # violet
all_colors = [fancy.CRGB(0, 0, 0), # black
fancy.CRGB(255, 255, 255)] # white
washed_out = [fancy.CRGB(0, 0, 0), # black
fancy.CRGB(255, 0, 255)] # purple
rainbow = [0xFF0000, 0xD52A00, 0xAB5500, 0xAB7F00,
0xABAB00, 0x56D500, 0x00FF00, 0x00D52A,
0x00AB55, 0x0056AA, 0x0000FF, 0x2A00D5,
0x5500AB, 0x7F0081, 0xAB0055, 0xD5002B]
rainbow_stripe = [0xFF0000, 0x000000, 0xAB5500, 0x000000,
0xABAB00, 0x000000, 0x00FF00, 0x000000,
0x00AB55, 0x000000, 0x0000FF, 0x000000,
0x5500AB, 0x000000, 0xAB0055, 0x000000]
heat_colors = [0x330000, 0x660000, 0x990000, 0xCC0000, 0xFF0000,
0xFF3300, 0xFF6600, 0xFF9900, 0xFFCC00, 0xFFFF00,
0xFFFF33, 0xFFFF66, 0xFFFF99, 0xFFFFCC]
def wheel(pos):
# Input a value 0 to 255 to get a color value.
# The colours are a transition r - g - b - back to r.
if (pos < 0) or (pos > 255):
return (0, 0, 0)
if pos < 85:
return (int(pos * 3), int(255 - (pos * 3)), 0)
elif pos < 170:
pos -= 85
return (int(255 - pos * 3), 0, int(pos * 3))
else:
pos -= 170
return (0, int(pos * 3), int(255 - pos * 3))
def remapRange(value, leftMin, leftMax, rightMin, rightMax):
# this remaps a value fromhere original (left) range to new (right) range
# Figure out how 'wide' each range is
leftSpan = leftMax - leftMin
rightSpan = rightMax - rightMin
# Convert the left range into a 0-1 range (int)
valueScaled = int(value - leftMin) / int(leftSpan)
# Convert the 0-1 range into a value in the right range.
return int(rightMin + (valueScaled * rightSpan))
def shortkeypress(color_palette):
color_palette += 1
if color_palette > 6:
color_palette = 1
return color_palette
while True:
# check for button press
currkeystate = button.value
# button press, move to next pattern
if (prevkeystate is not True) and currkeystate:
ledmode = shortkeypress(ledmode)
# save button press state
prevkeystate = currkeystate
# Fire Colors [ HEAT ]
if ledmode == 1:
palette = heat_colors
# Forest
elif ledmode == 2:
palette = forest
# Ocean
elif ledmode == 3:
palette = ocean
# Purple Lovers
elif ledmode == 4:
palette = purple
# All the colors!
elif ledmode == 5:
palette = rainbow
# Rainbow stripes
elif ledmode == 6:
palette = rainbow_stripe
# All the colors except the greens, washed out
elif ledmode == 7:
palette = washed_out
for i in range(num_leds):
color = fancy.palette_lookup(palette, offset + i / num_leds)
color = fancy.gamma_adjust(color, brightness=brightness)
strip[i] = color.pack()
strip.show()
if fadeup:
offset += steps
if offset >= 1:
fadeup = False
else:
offset -= steps
if offset <= 0:
fadeup = True
```
#### File: Adafruit_Learning_System_Guides/CircuitPython_Flying_Toasters/code.py
```python
import time
from random import seed, randint
import board
import displayio
from adafruit_st7789 import ST7789
import adafruit_imageload
# Sprite cell values
EMPTY = 0
CELL_1 = EMPTY + 1
CELL_2 = CELL_1 + 1
CELL_3 = CELL_2 + 1
CELL_4 = CELL_3 + 1
TOAST = CELL_4 + 1
NUMBER_OF_SPRITES = TOAST + 1
# Animation support
FIRST_CELL = CELL_1
LAST_CELL = CELL_4
NUMBER_OF_CELLS = (LAST_CELL - FIRST_CELL) + 1
# A boolean array corresponding to the sprites, True if it's part of the animation sequence.
ANIMATED = [_sprite >= FIRST_CELL and _sprite <= LAST_CELL for _sprite in range(NUMBER_OF_SPRITES)]
# The chance (out of 10) that toast will enter
CHANCE_OF_NEW_TOAST = 2
# How many sprites to styart with
INITIAL_NUMBER_OF_SPRITES = 4
# Global variables
display = None
tilegrid = None
seed(int(time.monotonic()))
def make_display():
"""Set up the display support.
Return the Display object.
"""
spi = board.SPI()
while not spi.try_lock():
pass
spi.configure(baudrate=24000000) # Configure SPI for 24MHz
spi.unlock()
displayio.release_displays()
display_bus = displayio.FourWire(spi, command=board.D7, chip_select=board.D10, reset=board.D9)
return ST7789(display_bus, width=240, height=240, rowstart=80, auto_refresh=True)
def make_tilegrid():
"""Construct and return the tilegrid."""
group = displayio.Group(max_size=10)
sprite_sheet, palette = adafruit_imageload.load("/spritesheet-2x.bmp",
bitmap=displayio.Bitmap,
palette=displayio.Palette)
grid = displayio.TileGrid(sprite_sheet, pixel_shader=palette,
width=5, height=5,
tile_height=64, tile_width=64,
x=0, y=-64,
default_tile=EMPTY)
group.append(grid)
display.show(group)
return grid
def random_cell():
return randint(FIRST_CELL, LAST_CELL)
def evaluate_position(row, col):
"""Return whether how long of aa toaster is placable at the given location.
:param row: the tile row (0-9)
:param col: the tile column (0-9)
"""
return tilegrid[col, row] == EMPTY
def seed_toasters(number_of_toasters):
"""Create the initial toasters so it doesn't start empty"""
for _ in range(number_of_toasters):
while True:
row = randint(0, 4)
col = randint(0, 4)
if evaluate_position(row, col):
break
tilegrid[col, row] = random_cell()
def next_sprite(sprite):
if ANIMATED[sprite]:
return (((sprite - FIRST_CELL) + 1) % NUMBER_OF_CELLS) + FIRST_CELL
return sprite
def advance_animation():
"""Cycle through animation cells each time."""
for tile_number in range(25):
tilegrid[tile_number] = next_sprite(tilegrid[tile_number])
def slide_tiles():
"""Move the tilegrid one pixel to the bottom-left."""
tilegrid.x -= 1
tilegrid.y += 1
def shift_tiles():
"""Move tiles one spot to the left, and reset the tilegrid's position"""
for row in range(4, 0, -1):
for col in range(4):
tilegrid[col, row] = tilegrid[col + 1, row - 1]
tilegrid[4, row] = EMPTY
for col in range(5):
tilegrid[col, 0] = EMPTY
tilegrid.x = 0
tilegrid.y = -64
def get_entry_row():
while True:
row = randint(0, 4)
if tilegrid[4, row] == EMPTY and tilegrid[3, row] == EMPTY:
return row
def get_entry_column():
while True:
col = randint(0, 3)
if tilegrid[col, 0] == EMPTY and tilegrid[col, 1] == EMPTY:
return col
def add_toaster_or_toast():
"""Maybe add a new toaster or toast on the right and/or top at a randon open location"""
if randint(1, 10) <= CHANCE_OF_NEW_TOAST:
tile = TOAST
else:
tile = random_cell()
tilegrid[4, get_entry_row()] = tile
if randint(1, 10) <= CHANCE_OF_NEW_TOAST:
tile = TOAST
else:
tile = random_cell()
tilegrid[get_entry_column(), 0] = tile
display = make_display()
tilegrid = make_tilegrid()
seed_toasters(INITIAL_NUMBER_OF_SPRITES)
display.refresh()
while True:
for _ in range(64):
display.refresh(target_frames_per_second=80)
advance_animation()
slide_tiles()
shift_tiles()
add_toaster_or_toast()
display.refresh(target_frames_per_second=120)
```
#### File: Adafruit_Learning_System_Guides/CircuitPython_Sip_and_Puff/puff_detector.py
```python
import time
import os
import json
import board
import terminalio
from adafruit_display_text import label
from displayio import Group
import displayio
import adafruit_displayio_ssd1306
import adafruit_lps35hw
CONSOLE = False
DEBUG = True
MIN_PRESSURE = 8
HIGH_PRESSURE = 40
WAITING = 0
STARTED = 1
DETECTED = 2
SOFT_SIP = 0
HARD_SIP = 1
SOFT_PUFF = 2
HARD_PUFF = 3
SOFT = 1
STRONG = 2
COLOR = 0xFFFFFF
FONT = terminalio.FONT
DISPLAY_WIDTH = 128
DISPLAY_HEIGHT = 64
Y_OFFSET = 3
TEXT_HEIGHT = 8
BOTTOM_ROW = DISPLAY_HEIGHT - TEXT_HEIGHT
BANNER_STRING = "ST LPS33HW Sip & Puff"
pressure_string = " "
input_type_string = " "
# pylint:disable=too-many-locals,exec-used,eval-used
class PuffDetector:
def __init__(
self,
min_pressure=MIN_PRESSURE,
high_pressure=HIGH_PRESSURE,
config_filename="settings.json",
display_timeout=1,
):
# misc detection state
self.current_pressure = 0
self.current_polarity = 0
self.current_time = time.monotonic()
self.start_polarity = 0
self.peak_level = 0
self.puff_start = 0
self.duration = 0
self.state = WAITING
self.prev_state = self.state
# settings
self.settings_dict = {}
self.high_pressure = high_pressure
self.min_pressure = min_pressure
self._config_filename = config_filename
self._load_config()
# callbacks
self._on_sip_callbacks = []
self._on_puff_callbacks = []
# display and display state
self.display = None
self.state_display_start = self.current_time
self.detection_result_str = " "
self.duration_str = " "
self.min_press_str = " "
self.high_press_str = " "
self.state_str = " "
self.press_str = " "
self.display_timeout = display_timeout
self._init_stuff()
def _init_stuff(self):
# decouple display
self.state_display_timeout = 1.0
self.state_display_start = 0
displayio.release_displays()
i2c = board.I2C()
display_bus = displayio.I2CDisplay(i2c, device_address=0x3D)
self.display = adafruit_displayio_ssd1306.SSD1306(
display_bus, width=DISPLAY_WIDTH, height=DISPLAY_HEIGHT
)
self.min_press_str = "min: %d" % self.min_pressure
self.high_press_str = "hi: %d" % self.high_pressure
self.pressure_sensor = adafruit_lps35hw.LPS35HW(i2c)
self.pressure_sensor.zero_pressure()
self.pressure_sensor.data_rate = adafruit_lps35hw.DataRate.RATE_75_HZ
self.pressure_sensor.filter_enabled = True
self.pressure_sensor.filter_config = True
def _load_config(self):
if not self._config_filename in os.listdir("/"):
return
try:
with open(self._config_filename, "r") as file:
self.settings_dict = json.load(file)
except (ValueError, OSError) as error:
print("Error loading config file")
print(type(error))
if self.settings_dict:
if "MIN_PRESSURE" in self.settings_dict.keys():
self.min_pressure = self.settings_dict["MIN_PRESSURE"]
if "HIGH_PRESSURE" in self.settings_dict.keys():
self.high_pressure = self.settings_dict["HIGH_PRESSURE"]
if "DISPLAY_TIMEOUT" in self.settings_dict.keys():
self.display_timeout = self.settings_dict["DISPLAY_TIMEOUT"]
def check_for_events(self):
self.current_time = time.monotonic()
self.current_pressure = self.pressure_sensor.pressure
self._update_state()
self._notify_callbacks()
self._update_display()
def run(self):
while True:
self.check_for_events()
def _categorize_pressure(self, pressure):
"""determine the strength and polarity of the pressure reading"""
level = 0
polarity = 0
abs_pressure = abs(pressure)
if abs_pressure > self.min_pressure:
level = 1
if abs_pressure > self.high_pressure:
level = 2
if level != 0:
if pressure > 0:
polarity = 1
else:
polarity = -1
return (polarity, level)
def on_sip(self, func):
self.add_on_sip(func)
return func
def on_puff(self, func):
self.add_on_puff(func)
return func
def add_on_sip(self, new_callback):
self._on_sip_callbacks.append(new_callback)
def add_on_puff(self, new_callback):
self._on_puff_callbacks.append(new_callback)
def _update_state(self):
"""Updates the internal state to detect if a sip/puff has been started or stopped"""
self.current_polarity, level = self._categorize_pressure(self.current_pressure)
if self.state == DETECTED:
self.state = WAITING
self.start_polarity = 0
self.peak_level = 0
self.duration = 0
if (self.state == WAITING) and level != 0 and (self.start_polarity == 0):
self.state = STARTED
self.start_polarity = self.current_polarity
self.puff_start = time.monotonic()
if self.state == STARTED:
if level > self.peak_level:
self.peak_level = level
if level == 0:
self.state = DETECTED
self.duration = time.monotonic() - self.puff_start
def _notify_callbacks(self):
state_changed = self.prev_state != self.state
self.prev_state = self.state
if not state_changed:
return
if self.state == DETECTED:
# if this is a sip
if self.start_polarity == -1:
for on_sip_callback in self._on_sip_callbacks:
on_sip_callback(self.peak_level, self.duration)
# if this is a sip
if self.start_polarity == 1:
for on_puff_callback in self._on_puff_callbacks:
on_puff_callback(self.peak_level, self.duration)
def _update_display_strings(self):
self.press_str = "Press: %0.3f" % self.current_pressure
if self.state == DETECTED:
self.duration_str = "Duration: %0.2f" % self.duration
self.state_str = "DETECTED:"
if self.start_polarity == -1:
if self.peak_level == STRONG:
self.detection_result_str = "STRONG SIP"
if self.peak_level == SOFT:
self.detection_result_str = "SOFT SIP"
if self.start_polarity == 1:
if self.peak_level == STRONG:
self.detection_result_str = "STRONG PUFF"
if self.peak_level == SOFT:
self.detection_result_str = "SOFT PUFF"
self.state_display_start = self.current_time
elif self.state == WAITING:
display_elapsed = self.current_time - self.state_display_start
if display_elapsed > self.display_timeout:
self.detection_result_str = " "
self.duration_str = " "
self.detection_result_str = " "
self.state_str = "WAITING FOR INPUT"
elif self.state == STARTED:
if self.start_polarity == -1:
self.state_str = "SIP STARTED..."
if self.start_polarity == 1:
self.state_str = "PUFF STARTED..."
def _update_display(self):
self._update_display_strings()
banner = label.Label(FONT, text=BANNER_STRING, color=COLOR)
state = label.Label(FONT, text=self.state_str, color=COLOR)
detector_result = label.Label(FONT, text=self.detection_result_str, color=COLOR)
duration = label.Label(FONT, text=self.duration_str, color=COLOR)
min_pressure_label = label.Label(FONT, text=self.min_press_str, color=COLOR)
high_pressure_label = label.Label(FONT, text=self.high_press_str, color=COLOR)
pressure_label = label.Label(FONT, text=self.press_str, color=COLOR)
banner.x = 0
banner.y = 0 + Y_OFFSET
state.x = 10
state.y = 10 + Y_OFFSET
detector_result.x = 10
detector_result.y = 20 + Y_OFFSET
duration.x = 10
duration.y = 30 + Y_OFFSET
min_pressure_label.x = 0
min_pressure_label.y = BOTTOM_ROW - 10
pressure_label.x = DISPLAY_WIDTH - pressure_label.bounding_box[2]
pressure_label.y = BOTTOM_ROW
high_pressure_label.x = 0
high_pressure_label.y = BOTTOM_ROW
splash = Group(max_size=10)
splash.append(banner)
splash.append(state)
splash.append(detector_result)
splash.append(duration)
splash.append(min_pressure_label)
splash.append(high_pressure_label)
splash.append(pressure_label)
self.display.show(splash)
```
#### File: Adafruit_Learning_System_Guides/CLUE_Light_Painter/richbutton.py
```python
from time import monotonic
from digitalio import DigitalInOut, Direction, Pull
# pylint: disable=too-many-instance-attributes, too-few-public-methods
class RichButton:
"""
A button class handling more than basic taps: adds debounced tap,
double-tap, hold and release.
"""
TAP = 0
DOUBLE_TAP = 1
HOLD = 2
RELEASE = 3
def __init__(self, pin, *, debounce_period=0.05, hold_period=0.75,
double_tap_period=0.3):
"""
Constructor for RichButton class.
Arguments:
pin (int) : Digital pin connected to button
(opposite leg to GND). Pin will be
configured as INPUT with pullup.
Keyword arguments:
debounce_period (float) : interval, in seconds, in which multiple
presses are ignored (debounced)
(default = 0.05 seconds).
hold_period (float) : interval, in seconds, when a held
button will return a HOLD value from
the action() function (default = 0.75).
double_tap_period (float): interval, in seconds, when a double-
tap can be sensed (vs returning
a second single-tap) (default = 0.3).
Longer double-tap periods will make
single-taps less responsive.
"""
self.in_out = DigitalInOut(pin)
self.in_out.direction = Direction.INPUT
self.in_out.pull = Pull.UP
self._debounce_period = debounce_period
self._hold_period = hold_period
self._double_tap_period = double_tap_period
self._holding = False
self._tap_time = -self._double_tap_period
self._press_time = monotonic()
self._prior_state = self.in_out.value
def action(self):
"""
Process pin input. This MUST be called frequently for debounce, etc.
to work, since interrupts are not available.
Returns:
None, TAP, DOUBLE_TAP, HOLD or RELEASE.
"""
new_state = self.in_out.value
if new_state != self._prior_state:
# Button state changed since last call
self._prior_state = new_state
if not new_state:
# Button initially pressed (TAP not returned until debounce)
self._press_time = monotonic()
else:
# Button initially released
if self._holding:
# Button released after hold
self._holding = False
return self.RELEASE
if (monotonic() - self._press_time) >= self._debounce_period:
# Button released after valid debounce time
if monotonic() - self._tap_time < self._double_tap_period:
# Followed another recent tap, reset double timer
self._tap_time = 0
return self.DOUBLE_TAP
# Else regular debounced release, maybe 1st tap, keep time
self._tap_time = monotonic()
else:
# Button is in same state as last call
if self._prior_state:
# Is not pressed
if (self._tap_time > 0 and
(monotonic() - self._tap_time) > self._double_tap_period):
# Enough time since last tap that it's not a double
self._tap_time = 0
return self.TAP
elif (not self._holding and
(monotonic() - self._press_time) >= self._hold_period):
# Is pressed, and has been for the holding period
self._holding = True
return self.HOLD
return None
```
#### File: CLUE_Rock_Paper_Scissors/tests/test_rps_advertisements.py
```python
import sys
import os
import unittest
from unittest.mock import MagicMock
verbose = int(os.getenv('TESTVERBOSE', '2'))
# PYTHONPATH needs to be set to find adafruit_ble
# Mocking library used by adafruit_ble
sys.modules['_bleio'] = MagicMock()
# Borrowing the dhalbert/tannewt technique from adafruit/Adafruit_CircuitPython_Motor
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
# import what we are testing or will test in future
# pylint: disable=unused-import,wrong-import-position
from rps_advertisements import JoinGameAdvertisement, \
RpsEncDataAdvertisement, \
RpsKeyDataAdvertisement, \
RpsRoundEndAdvertisement
# pylint: disable=line-too-long
class Test_RpsEncDataAdvertisement(unittest.TestCase):
def test_bytes_order(self):
"""Testing the order of data inside the manufacturer's field to ensure it follows the
fields are set in. This is new behaviour to benefit prefix matching."""
rpsedad1 = RpsEncDataAdvertisement(enc_data=b"FIRST", round_no=33, sequence_number=17)
# This checks value is not the old incorrect order
self.assertNotEqual(bytes(rpsedad1),
b"\x16\xff\x22\x08\x03\x03\x00\x11\nA\xfeFIRST\x00\x00\x00\x03C\xfe\x21",
msg="Checking order of serialised data for"
" ackless RpsEncDataAdvertisement does"
" not follow previous incorrect order")
# This check for correct order
self.assertEqual(bytes(rpsedad1),
b"\x16\xff\x22\x08\x0a\x41\xfeFIRST\x00\x00\x00\x03C\xfe\x21\x03\x03\x00\x11",
msg="Checking order of serialised data for"
" ackless RpsEncDataAdvertisement")
rpsedad1.ack = 29
self.assertEqual(bytes(rpsedad1),
b"\x1a\xff\x22\x08\nA\xfeFIRST\x00\x00\x00\x03C\xfe!\x03\x03\x00\x11\x03Q\xfe\x1d",
msg="Checking order of serialised data for"
" RpsEncDataAdvertisement with ack set post construction")
class Test_RpsKeyDataAdvertisement(unittest.TestCase):
def test_bytes_order(self):
"""Testing the order of data inside the manufacturer's field to ensure it follows the
fields are set in. This is new behaviour to benefit prefix matching."""
rpskdad1 = RpsKeyDataAdvertisement(key_data=b"FIRST", round_no=33, sequence_number=17)
# This checks value is not the old incorrect order
self.assertNotEqual(bytes(rpskdad1),
b"\x16\xff\x22\x08\x03\x03\x00\x11\nB\xfeFIRST\x00\x00\x00\x03C\xfe\x21",
msg="Checking order of serialised data for"
" ackless RpsKeyDataAdvertisement does"
" not follow previous incorrect order")
# This check for correct order
self.assertEqual(bytes(rpskdad1),
b"\x16\xff\x22\x08\x0a\x42\xfeFIRST\x00\x00\x00\x03C\xfe\x21\x03\x03\x00\x11",
msg="Checking order of serialised data for"
" ackless RpsKeyDataAdvertisement")
rpskdad1.ack = 29
self.assertEqual(bytes(rpskdad1),
b"\x1a\xff\x22\x08\nB\xfeFIRST\x00\x00\x00\x03C\xfe!\x03\x03\x00\x11\x03Q\xfe\x1d",
msg="Checking order of serialised data for"
" RpsKeyDataAdvertisement with ack set post construction")
class Test_RpsRoundEndAdvertisement(unittest.TestCase):
def test_bytes_order(self):
"""Testing the order of data inside the manufacturer's field to ensure it follows the
fields are set in. This is new behaviour to benefit prefix matching."""
rpsread1 = RpsRoundEndAdvertisement(round_no=133, sequence_number=201)
# This checks value is not the old incorrect order
self.assertNotEqual(bytes(rpsread1),
b"\x0b\xff\x22\x08\x03\x03\x00\xc9\x03C\xfe\x85",
msg="Checking order of serialised data for"
" ackless RpsRoundEndAdvertisement does"
" not follow previous incorrect order")
# This check for correct order
self.assertEqual(bytes(rpsread1),
b"\x0b\xff\x22\x08\x03C\xfe\x85\x03\x03\x00\xc9",
msg="Checking order of serialised data for"
" ackless RpsRoundEndAdvertisement")
rpsread1.ack = 200
self.assertEqual(bytes(rpsread1),
b"\x0f" b"\xff\x22\x08\x03C\xfe\x85\x03\x03\x00\xc9" b"\x03Q\xfe\xc8",
msg="Checking order of serialised data for"
" RpsRoundEndAdvertisement with ack set post construction")
if __name__ == '__main__':
unittest.main(verbosity=verbose)
```
#### File: Adafruit_Learning_System_Guides/CPX_Sound_Box/code.py
```python
import time
import board
from digitalio import DigitalInOut, Direction, Pull
import audioio
import neopixel
filename = "electrons.wav"
# The pad our button is connected to:
button = DigitalInOut(board.A4)
button.direction = Direction.INPUT
button.pull = Pull.UP
pixels = neopixel.NeoPixel(board.NEOPIXEL, 10, brightness=1)
# NeoPixel Animation
def simpleCircle(wait):
PURPLE = (255, 0, 255)
BLACK = (0, 0, 0)
CYAN = (0, 255, 255)
ORANGE = (255, 255, 0)
for i in range(len(pixels)):
pixels[i] = PURPLE
time.sleep(wait)
for i in range(len(pixels)):
pixels[i] = CYAN
time.sleep(wait)
for i in range(len(pixels)):
pixels[i] = ORANGE
time.sleep(wait)
for i in range(len(pixels)):
pixels[i] = BLACK
time.sleep(wait)
# Audio Play File
def play_file(playname):
print("Playing File " + playname)
wave_file = open(playname, "rb")
with audioio.WaveFile(wave_file) as wave:
with audioio.AudioOut(board.A0) as audio:
audio.play(wave)
while audio.playing:
simpleCircle(.02)
print("finished")
while True:
if not button.value:
play_file(filename)
```
#### File: Crickits/dannybot/code.py
```python
import time
import random
import audioio
from digitalio import DigitalInOut, Pull, Direction
from adafruit_seesaw.seesaw import Seesaw
from adafruit_seesaw.pwmout import PWMOut
from adafruit_motor import servo
from busio import I2C
import board
wavefiles = ["01.wav", "02.wav", "03.wav", "04.wav", "05.wav", "06.wav",
"07.wav", "08.wav", "09.wav", "10.wav", "11.wav", "12.wav",
"13.wav", "14.wav"]
# Create seesaw object
i2c = I2C(board.SCL, board.SDA)
seesaw = Seesaw(i2c)
led = DigitalInOut(board.D13)
led.direction = Direction.OUTPUT
buttona = DigitalInOut(board.BUTTON_A)
buttona.direction = Direction.INPUT
buttona.pull = Pull.DOWN
buttonb = DigitalInOut(board.BUTTON_B)
buttonb.direction = Direction.INPUT
buttonb.pull = Pull.DOWN
SWITCH = 2 # A switch on signal #0
# Add a pullup on the switch
seesaw.pin_mode(SWITCH, seesaw.INPUT_PULLUP)
# Servo angles
MOUTH_START = 95
MOUTH_END = 90
# 17 is labeled SERVO 1 on CRICKIT
pwm = PWMOut(seesaw, 17)
# must be 50 cannot change
pwm.frequency = 50
# microservo usually is 400/2500 (tower pro sgr2r)
my_servo = servo.Servo(pwm, min_pulse=400, max_pulse=2500)
# Starting servo locations
my_servo.angle = MOUTH_START
# Audio playback object and helper to play a full file
a = audioio.AudioOut(board.A0)
def play_file(wavfile):
print("Playing", wavfile)
with open(wavfile, "rb") as f:
wav = audioio.WaveFile(f)
a.play(wav)
while a.playing:
my_servo.angle = MOUTH_END
time.sleep(.15)
my_servo.angle = MOUTH_START
time.sleep(.15)
while True:
if seesaw.digital_read(SWITCH) and not buttona.value and not buttonb.value:
continue
play_file(random.choice(wavefiles))
# wait for buttons to be released
while buttona.value or buttonb.value or not seesaw.digital_read(SWITCH):
pass
```
#### File: Adafruit_Learning_System_Guides/FruitBox_Sequencer/main.py
```python
import time
from adafruit_circuitplayground.express import cpx
# Change this number to adjust touch sensitivity threshold, 0 is default
cpx.adjust_touch_threshold(600)
bpm = 60 # quarter note beats per minute, change this to suit your tempo
beat = 15 / bpm # 16th note expressed as seconds
WHITE = (30, 30, 30)
RED = (90, 0, 0)
YELLOW = (45, 45, 0)
GREEN = (0, 90, 0)
AQUA = (0, 45, 45)
BLUE = (0, 0, 90)
PURPLE = (45, 0, 45)
BLACK = (0, 0, 0)
cpx.pixels.brightness = 0.1 # set brightness value
# The seven files assigned to the touchpads
audio_files = ["fB_bd_tek.wav", "fB_elec_hi_snare.wav", "fB_elec_cymbal.wav",
"fB_elec_blip2.wav", "fB_bd_zome.wav", "fB_bass_hit_c.wav",
"fB_drum_cowbell.wav"]
step_advance = 0 # to count steps
step = 0
# sixteen steps in a sequence
step_note = [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1]
# step pixels
step_pixel = [9, 8, 7, 6, 5, 4, 3, 2, 9, 8, 7, 6, 5, 4, 3, 2]
# step colors
step_col = [WHITE, RED, YELLOW, GREEN, AQUA, BLUE, PURPLE, BLACK]
def prog_mode(index):
cpx.play_file(audio_files[index])
step_note[step] = index
cpx.pixels[step_pixel[step]] = step_col[step_note[step]]
print("playing file " + audio_files[index])
while True:
# playback mode
if cpx.switch: # switch is slid to the left, play mode
cpx.red_led = False
if cpx.button_a:
cpx.pixels.fill(GREEN)
time.sleep(.2)
cpx.pixels.fill(BLACK)
if cpx.button_b:
for i in range(16):
step = i
# light a pixel
cpx.pixels[step_pixel[i]] = step_col[step_note[i]]
cpx.pixels[step_pixel[i - 1]] = BLACK
# play a file
cpx.play_file(audio_files[step_note[i]])
# sleep a beat
time.sleep(beat)
cpx.pixels.fill(BLACK)
# beat programming mode
else: # switch is slid to the right, record mode
cpx.red_led = True
if cpx.button_a: # clear pixels, reset step to first step
cpx.pixels.fill(RED)
time.sleep(.2)
cpx.pixels.fill(BLACK)
cpx.pixels[9] = WHITE
step = 0
step_advance = 0
# press B button to advance neo pixel steps
if cpx.button_b: # button has been pressed
step_advance += 1
step = step_advance % 16
cpx.play_file(audio_files[step_note[step]])
cpx.pixels[step_pixel[step]] = step_col[step_note[step]]
cpx.pixels[step_pixel[step - 1]] = BLACK
if cpx.touch_A1:
prog_mode(0)
if cpx.touch_A2:
prog_mode(1)
if cpx.touch_A3:
prog_mode(2)
if cpx.touch_A4:
prog_mode(3)
if cpx.touch_A5:
prog_mode(4)
if cpx.touch_A6:
prog_mode(5)
if cpx.touch_A7:
prog_mode(6)
```
#### File: Adafruit_Learning_System_Guides/Hallowing_Lightsaber/lightsaber_standard.py
```python
import time
import math
import audioio
import busio
import board
import touchio
import neopixel
import adafruit_lis3dh
# CUSTOMIZE YOUR COLOR HERE:
# (red, green, blue) -- each 0 (off) to 255 (brightest)
COLOR = (0, 100, 255) # jedi
#COLOR = (255, 0, 0) # sith
# CUSTOMIZE SENSITIVITY HERE: smaller numbers = more sensitive to motion
HIT_THRESHOLD = 250
SWING_THRESHOLD = 125
NUM_PIXELS = 30 # NeoPixel strip length (in pixels)
NEOPIXEL_PIN = board.EXTERNAL_NEOPIXEL # Pin where NeoPixels are connected
STRIP = neopixel.NeoPixel(NEOPIXEL_PIN, NUM_PIXELS, brightness=1, auto_write=False)
STRIP.fill(0) # NeoPixels off ASAP on startup
STRIP.show()
TOUCH = touchio.TouchIn(board.A2) # Rightmost capacitive touch pad
AUDIO = audioio.AudioOut(board.A0) # Speaker
MODE = 0 # Initial mode = OFF
# Set up accelerometer on I2C bus, 4G range:
I2C = busio.I2C(board.SCL, board.SDA)
try:
ACCEL = adafruit_lis3dh.LIS3DH_I2C(I2C, address=0x18) # Production board
except:
ACCEL = adafruit_lis3dh.LIS3DH_I2C(I2C, address=0x19) # Beta hardware
ACCEL.range = adafruit_lis3dh.RANGE_4_G
# "Idle" color is 1/4 brightness, "swinging" color is full brightness...
COLOR_IDLE = (int(COLOR[0] / 4), int(COLOR[1] / 4), int(COLOR[2] / 4))
COLOR_SWING = COLOR
COLOR_HIT = (255, 255, 255) # "hit" color is white
def play_wav(name, loop=False):
"""
Play a WAV file in the 'sounds' directory.
@param name: partial file name string, complete name will be built around
this, e.g. passing 'foo' will play file 'sounds/foo.wav'.
@param loop: if True, sound will repeat indefinitely (until interrupted
by another sound).
"""
try:
wave_file = open('sounds/' + name + '.wav', 'rb')
wave = audioio.WaveFile(wave_file)
AUDIO.play(wave, loop=loop)
except:
return
def power(sound, duration, reverse):
"""
Animate NeoPixels with accompanying sound effect for power on / off.
@param sound: sound name (similar format to play_wav() above)
@param duration: estimated duration of sound, in seconds (>0.0)
@param reverse: if True, do power-off effect (reverses animation)
"""
start_time = time.monotonic() # Save function start time
play_wav(sound)
while True:
elapsed = time.monotonic() - start_time # Time spent in function
if elapsed > duration: # Past sound duration?
break # Stop animating
fraction = elapsed / duration # Animation time, 0.0 to 1.0
if reverse:
fraction = 1.0 - fraction # 1.0 to 0.0 if reverse
fraction = math.pow(fraction, 0.5) # Apply nonlinear curve
threshold = int(NUM_PIXELS * fraction + 0.5)
for pixel in range(NUM_PIXELS): # Fill NeoPixel strip
if pixel <= threshold:
STRIP[pixel] = COLOR_IDLE # ON pixels BELOW threshold
else:
STRIP[pixel] = 0 # OFF pixels ABOVE threshold
STRIP.show()
if reverse:
STRIP.fill(0) # At end, ensure strip is off
else:
STRIP.fill(COLOR_IDLE) # or all pixels set on
STRIP.show()
while AUDIO.playing: # Wait until audio done
pass
def mix(color_1, color_2, weight_2):
"""
Blend between two colors with a given ratio.
@param color_1: first color, as an (r,g,b) tuple
@param color_2: second color, as an (r,g,b) tuple
@param weight_2: Blend weight (ratio) of second color, 0.0 to 1.0
@return: (r,g,b) tuple, blended color
"""
if weight_2 < 0.0:
weight_2 = 0.0
elif weight_2 > 1.0:
weight_2 = 1.0
weight_1 = 1.0 - weight_2
return (int(color_1[0] * weight_1 + color_2[0] * weight_2),
int(color_1[1] * weight_1 + color_2[1] * weight_2),
int(color_1[2] * weight_1 + color_2[2] * weight_2))
# Main program loop, repeats indefinitely
while True:
if TOUCH.value: # Capacitive pad touched?
if MODE is 0: # If currently off...
power('on', 1.7, False) # Power up!
play_wav('idle', loop=True) # Play background hum sound
MODE = 1 # ON (idle) mode now
else: # else is currently on...
power('off', 1.15, True) # Power down
MODE = 0 # OFF mode now
while TOUCH.value: # Wait for button release
time.sleep(0.2) # to avoid repeated triggering
elif MODE >= 1: # If not OFF mode...
ACCEL_X, ACCEL_Y, ACCEL_Z = ACCEL.acceleration # Read accelerometer
ACCEL_SQUARED = ACCEL_X * ACCEL_X + ACCEL_Z * ACCEL_Z
# (Y axis isn't needed for this, assuming Hallowing is mounted
# sideways to stick. Also, square root isn't needed, since we're
# just comparing thresholds...use squared values instead, save math.)
if ACCEL_SQUARED > HIT_THRESHOLD: # Large acceleration = HIT
TRIGGER_TIME = time.monotonic() # Save initial time of hit
play_wav('hit') # Start playing 'hit' sound
COLOR_ACTIVE = COLOR_HIT # Set color to fade from
MODE = 3 # HIT mode
elif MODE is 1 and ACCEL_SQUARED > SWING_THRESHOLD: # Mild = SWING
TRIGGER_TIME = time.monotonic() # Save initial time of swing
play_wav('swing') # Start playing 'swing' sound
COLOR_ACTIVE = COLOR_SWING # Set color to fade from
MODE = 2 # SWING mode
elif MODE > 1: # If in SWING or HIT mode...
if AUDIO.playing: # And sound currently playing...
BLEND = time.monotonic() - TRIGGER_TIME # Time since triggered
if MODE == 2: # If SWING,
BLEND = abs(0.5 - BLEND) * 2.0 # ramp up, down
STRIP.fill(mix(COLOR_ACTIVE, COLOR_IDLE, BLEND))
STRIP.show()
else: # No sound now, but still MODE > 1
play_wav('idle', loop=True) # Resume background hum
STRIP.fill(COLOR_IDLE) # Set to idle color
STRIP.show()
MODE = 1 # IDLE mode now
```
#### File: Adafruit_Learning_System_Guides/Kegomatic/adabot.py
```python
import pygame, sys
from pygame.locals import *
class adabot():
image = ''
x = 0
y = 0
ll = 0 # left limit
rl = 0 # right limit
direction = 'right'
def __init__(self, x, y, ll, rl):
self.image = pygame.image.load('adabot.png')
self.image = self.image.convert_alpha()
self.x = x
self.y = y
self.ll = ll
self.rl = rl
def update(self):
if (self.direction == 'right'):
self.x += 5
else:
self.x -= 5
if (self.x > self.rl or self.x < self.ll):
self.direction = 'right' if self.direction == 'left' else 'left'
```
#### File: Adafruit_Learning_System_Guides/Light_Paintstick_HalloWing/light_paintstick_hallowing.py
```python
import gc
import time
import board
import touchio
import digitalio
from analogio import AnalogIn
from neopixel_write import neopixel_write
# uncomment one line only here to select bitmap
FILENAME = "bats.bmp" # BMP file to load from flash filesystem
#FILENAME = "digikey.bmp"
#FILENAME = "burger.bmp"
#FILENAME = "afbanner.bmp"
#FILENAME = "blinka.bmp"
#FILENAME = "ghost04.bmp"
#FILENAME = "ghost07.bmp"
#FILENAME = "ghost02.bmp"
#FILENAME = "helix-32x30.bmp"
#FILENAME = "wales2-107x30.bmp"
#FILENAME = "pumpkin.bmp"
#FILENAME = "rainbow.bmp"
#FILENAME = "rainbowRoad.bmp"
#FILENAME = "rainbowZig.bmp"
#FILENAME = "skull.bmp"
#FILENAME = "adabot.bmp"
#FILENAME = "green_stripes.bmp"
#FILENAME = "red_blue.bmp"
#FILENAME = "minerva.bmp"
TOUCH = touchio.TouchIn(board.A2) # Rightmost capacitive touch pad
ANALOG = AnalogIn(board.SENSE) # Potentiometer on SENSE pin
BRIGHTNESS = 1.0 # NeoPixel brightness 0.0 (min) to 1.0 (max)
GAMMA = 2.7 # Adjusts perceived brighthess linearity
NUM_PIXELS = 30 # NeoPixel strip length (in pixels)
LOOP = False #set to True for looping
# Switch off onboard NeoPixel...
NEOPIXEL_PIN = digitalio.DigitalInOut(board.NEOPIXEL)
NEOPIXEL_PIN.direction = digitalio.Direction.OUTPUT
neopixel_write(NEOPIXEL_PIN, bytearray(3))
# ...then assign NEOPIXEL_PIN to the external NeoPixel connector:
NEOPIXEL_PIN = digitalio.DigitalInOut(board.EXTERNAL_NEOPIXEL)
NEOPIXEL_PIN.direction = digitalio.Direction.OUTPUT
neopixel_write(NEOPIXEL_PIN, bytearray(NUM_PIXELS * 3))
def read_le(value):
"""Interpret multi-byte value from file as little-endian value"""
result = 0
shift = 0
for byte in value:
result += byte << shift
shift += 8
return result
class BMPError(Exception):
"""Error handler for BMP-loading function"""
pass
def load_bmp(filename):
"""Load BMP file, return as list of column buffers"""
# pylint: disable=too-many-locals, too-many-branches
try:
print("Loading", filename)
with open("/" + filename, "rb") as bmp:
print("File opened")
if bmp.read(2) != b'BM': # check signature
raise BMPError("Not BitMap file")
bmp.read(8) # Read & ignore file size and creator bytes
bmp_image_offset = read_le(bmp.read(4)) # Start of image data
bmp.read(4) # Read & ignore header size
bmp_width = read_le(bmp.read(4))
bmp_height = read_le(bmp.read(4))
# BMPs are traditionally stored bottom-to-top.
# If bmp_height is negative, image is in top-down order.
# This is not BMP canon but has been observed in the wild!
flip = True
if bmp_height < 0:
bmp_height = -bmp_height
flip = False
print("WxH: (%d,%d)" % (bmp_width, bmp_height))
if read_le(bmp.read(2)) != 1:
raise BMPError("Not single-plane")
if read_le(bmp.read(2)) != 24: # bits per pixel
raise BMPError("Not 24-bit")
if read_le(bmp.read(2)) != 0:
raise BMPError("Compressed file")
print("Image format OK, reading data...")
row_size = (bmp_width * 3 + 3) & ~3 # 32-bit line boundary
# Constrain rows loaded to pixel strip length
clipped_height = min(bmp_height, NUM_PIXELS)
# Allocate per-column pixel buffers, sized for NeoPixel strip:
columns = [bytearray(NUM_PIXELS * 3) for _ in range(bmp_width)]
# Image is displayed at END (not start) of NeoPixel strip,
# this index works incrementally backward in column buffers...
idx = (NUM_PIXELS - 1) * 3
for row in range(clipped_height): # For each scanline...
if flip: # Bitmap is stored bottom-to-top order (normal BMP)
pos = bmp_image_offset + (bmp_height - 1 - row) * row_size
else: # Bitmap is stored top-to-bottom
pos = bmp_image_offset + row * row_size
bmp.seek(pos) # Start of scanline
for column in columns: # For each pixel of scanline...
# BMP files use BGR color order
blue, green, red = bmp.read(3)
# Rearrange into NeoPixel strip's color order,
# while handling brightness & gamma correction:
column[idx] = int(pow(green / 255, GAMMA) * BRIGHTNESS * 255 + 0.5)
column[idx+1] = int(pow(red / 255, GAMMA) * BRIGHTNESS * 255 + 0.5)
column[idx+2] = int(pow(blue / 255, GAMMA) * BRIGHTNESS * 255 + 0.5)
idx -= 3 # Advance (back) one pixel
# Add one more column with no color data loaded. This is used
# to turn the strip off at the end of the painting operation.
if not LOOP:
columns.append(bytearray(NUM_PIXELS * 3))
print("Loaded OK!")
gc.collect() # Garbage-collect now so playback is smoother
return columns
except OSError as err:
if err.args[0] == 28:
raise OSError("OS Error 28 0.25")
else:
raise OSError("OS Error 0.5")
except BMPError as err:
print("Failed to parse BMP: " + err.args[0])
# Load BMP image, return 'COLUMNS' array:
COLUMNS = load_bmp(FILENAME)
print("Mem free:", gc.mem_free())
COLUMN_DELAY = ANALOG.value / 65535.0 / 10.0 # 0.0 to 0.1 seconds
while LOOP:
for COLUMN in COLUMNS:
neopixel_write(NEOPIXEL_PIN, COLUMN)
time.sleep(COLUMN_DELAY)
while True:
# Wait for touch pad input:
while not TOUCH.value:
continue
COLUMN_DELAY = ANALOG.value / 65535.0 / 10.0 # 0.0 to 0.1 seconds
# print(COLUMN_DELAY)
# Play back color data loaded into each column:
for COLUMN in COLUMNS:
neopixel_write(NEOPIXEL_PIN, COLUMN)
time.sleep(COLUMN_DELAY)
# Last column is all 0's, no need to explicitly clear strip
# Wait for touch pad release, just in case:
while TOUCH.value:
continue
```
#### File: Adafruit_Learning_System_Guides/Matrix_Quote_Board/matrix_quote_board.py
```python
import time
import random
import board
import terminalio
from adafruit_matrixportal.matrixportal import MatrixPortal
# --- Display setup ---
matrixportal = MatrixPortal(status_neopixel=board.NEOPIXEL, debug=True)
# Create a new label with the color and text selected
matrixportal.add_text(
text_font=terminalio.FONT,
text_position=(0, (matrixportal.graphics.display.height // 2) - 1),
scrolling=True,
)
# Static 'Connecting' Text
matrixportal.add_text(
text_font=terminalio.FONT,
text_position=(2, (matrixportal.graphics.display.height // 2) - 1),
)
QUOTES_FEED = "sign-quotes.signtext"
COLORS_FEED = "sign-quotes.signcolor"
SCROLL_DELAY = 0.02
UPDATE_DELAY = 600
quotes = []
colors = []
last_color = None
last_quote = None
def update_data():
print("Updating data from Adafruit IO")
matrixportal.set_text("Connecting", 1)
try:
quotes_data = matrixportal.get_io_data(QUOTES_FEED)
quotes.clear()
for json_data in quotes_data:
quotes.append(matrixportal.network.json_traverse(json_data, ["value"]))
print(quotes)
# pylint: disable=broad-except
except Exception as error:
print(error)
try:
color_data = matrixportal.get_io_data(COLORS_FEED)
colors.clear()
for json_data in color_data:
colors.append(matrixportal.network.json_traverse(json_data, ["value"]))
print(colors)
# pylint: disable=broad-except
except Exception as error:
print(error)
if not quotes or not colors:
raise "Please add at least one quote and color to your feeds"
matrixportal.set_text(" ", 1)
update_data()
last_update = time.monotonic()
matrixportal.set_text(" ", 1)
quote_index = None
color_index = None
while True:
# Choose a random quote from quotes
if len(quotes) > 1 and last_quote is not None:
while quote_index == last_quote:
quote_index = random.randrange(0, len(quotes))
else:
quote_index = random.randrange(0, len(quotes))
last_quote = quote_index
# Choose a random color from colors
if len(colors) > 1 and last_color is not None:
while color_index == last_color:
color_index = random.randrange(0, len(colors))
else:
color_index = random.randrange(0, len(colors))
last_color = color_index
# Set the quote text
matrixportal.set_text(quotes[quote_index])
# Set the text color
matrixportal.set_text_color(colors[color_index])
# Scroll it
matrixportal.scroll_text(SCROLL_DELAY)
if time.monotonic() > last_update + UPDATE_DELAY:
update_data()
last_update = time.monotonic()
```
#### File: Adafruit_Learning_System_Guides/MIDI_Melody_Maker/code.py
```python
import time
from random import randint
import board
import simpleio
import busio
import terminalio
import neopixel
from digitalio import DigitalInOut, Direction, Pull
from analogio import AnalogIn
import displayio
import adafruit_imageload
from adafruit_display_text import label
import adafruit_displayio_ssd1306
# uncomment if using USB MIDI
# import usb_midi
from adafruit_display_shapes.rect import Rect
import adafruit_midi
from adafruit_midi.note_on import NoteOn
from adafruit_midi.note_off import NoteOff
from adafruit_midi.control_change import ControlChange
displayio.release_displays()
oled_reset = board.D9
#turn off on-board neopixel
pixel = neopixel.NeoPixel(board.NEOPIXEL, 1, brightness=0)
pixel.fill((0, 0, 0))
# Use for I2C for STEMMA OLED
i2c = board.I2C()
display_bus = displayio.I2CDisplay(i2c, device_address=0x3D, reset=oled_reset)
# STEMMA OLED dimensions. can have height of 64, but 32 makes text larger
WIDTH = 128
HEIGHT = 32
BORDER = 0
# blinka sprite indexes
EMPTY = 0
BLINKA_1 = 1
BLINKA_2 = 2
# setup for STEMMA OLED
display = adafruit_displayio_ssd1306.SSD1306(display_bus, width=WIDTH, height=HEIGHT)
# create the displayio object
splash = displayio.Group(max_size=40)
display.show(splash)
# text for BPM
bpm_text = "BPM: "
bpm_text_area = label.Label(
terminalio.FONT, text=bpm_text, color=0xFFFFFF, x=4, y=6
)
splash.append(bpm_text_area)
bpm_rect = Rect(0, 0, 50, 16, fill=None, outline=0xFFFFFF)
splash.append(bpm_rect)
# text for key
key_text = "Key: "
key_text_area = label.Label(
terminalio.FONT, text=key_text, color=0xFFFFFF, x=4, y=21
)
splash.append(key_text_area)
key_rect = Rect(0, 15, 50, 16, fill=None, outline=0xFFFFFF)
splash.append(key_rect)
# text for mode
mode_text = "Mode: "
mode_text_area = label.Label(
terminalio.FONT, text=mode_text, color=0xFFFFFF, x=54, y=21
)
splash.append(mode_text_area)
mode_rect = Rect(50, 15, 78, 16, fill=None, outline=0xFFFFFF)
splash.append(mode_rect)
# text for beat division
beat_text = "Div: "
beat_text_area = label.Label(
terminalio.FONT, text=beat_text, color=0xFFFFFF, x=54, y=6
)
splash.append(beat_text_area)
beat_rect = Rect(50, 0, 78, 16, fill=None, outline=0xFFFFFF)
splash.append(beat_rect)
# Blinka sprite setup
blinka, blinka_pal = adafruit_imageload.load("/spritesWhite.bmp",
bitmap=displayio.Bitmap,
palette=displayio.Palette)
# creates a transparent background for Blinka
blinka_pal.make_transparent(7)
blinka_grid = displayio.TileGrid(blinka, pixel_shader=blinka_pal,
width=1, height=1,
tile_height=16, tile_width=16,
default_tile=EMPTY)
blinka_grid.x = 112
blinka_grid.y = 0
splash.append(blinka_grid)
# imports MIDI
# USB MIDI:
# midi = adafruit_midi.MIDI(midi_out=usb_midi.ports[1], out_channel=0)
# UART MIDI:
midi = adafruit_midi.MIDI(midi_out=busio.UART(board.TX, board.RX, baudrate=31250), out_channel=0)
# potentiometer pin setup
key_pot = AnalogIn(board.A1)
mode_pot = AnalogIn(board.A2)
beat_pot = AnalogIn(board.A3)
bpm_slider = AnalogIn(board.A4)
mod_pot = AnalogIn(board.A5)
# run switch setup
run_switch = DigitalInOut(board.D5)
run_switch.direction = Direction.INPUT
run_switch.pull = Pull.UP
# arrays of notes in each key
key_of_C = [60, 62, 64, 65, 67, 69, 71, 72]
key_of_Csharp = [61, 63, 65, 66, 68, 70, 72, 73]
key_of_D = [62, 64, 66, 67, 69, 71, 73, 74]
key_of_Dsharp = [63, 65, 67, 68, 70, 72, 74, 75]
key_of_E = [64, 66, 68, 69, 71, 73, 75, 76]
key_of_F = [65, 67, 69, 70, 72, 74, 76, 77]
key_of_Fsharp = [66, 68, 70, 71, 73, 75, 77, 78]
key_of_G = [67, 69, 71, 72, 74, 76, 78, 79]
key_of_Gsharp = [68, 70, 72, 73, 75, 77, 79, 80]
key_of_A = [69, 71, 73, 74, 76, 78, 80, 81]
key_of_Asharp = [70, 72, 74, 75, 77, 79, 81, 82]
key_of_B = [71, 73, 75, 76, 78, 80, 82, 83]
# array of keys
keys = [key_of_C, key_of_Csharp, key_of_D, key_of_Dsharp, key_of_E, key_of_F, key_of_Fsharp,
key_of_G, key_of_Gsharp, key_of_A, key_of_Asharp, key_of_B]
# array of note indexes for modes
fifths = [0, 4, 3, 7, 2, 6, 4, 7]
major = [4, 2, 0, 3, 5, 7, 6, 4]
minor = [5, 7, 2, 4, 6, 5, 1, 3]
pedal = [5, 5, 5, 6, 5, 5, 5, 7]
# defining variables for key name strings
C_name = "C"
Csharp_name = "C#"
D_name = "D"
Dsharp_name = "D#"
E_name = "E"
F_name = "F"
Fsharp_name = "F#"
G_name = "G"
Gsharp_name = "G#"
A_name = "A"
Asharp_name = "A#"
B_name = "B"
# array of strings for key names for use with the display
key_names = [C_name, Csharp_name, D_name, Dsharp_name, E_name, F_name, Fsharp_name,
G_name, Gsharp_name, A_name, Asharp_name, B_name]
# function for reading analog inputs
def val(voltage):
return voltage.value
# comparitors for pots' values
mod_val2 = 0
beat_val2 = 0
bpm_val2 = 120
key_val2 = 0
mode_val2 = 0
# time.monotonic for running the modes
run = 0
# state for being on/off
run_state = False
# indexes for modes
r = 0
b = 0
f = 0
p = 0
maj = 0
mi = 0
random = 0
# mode states
play_pedal = False
play_fifths = False
play_maj = False
play_min = False
play_rando = False
play_scale = True
# state for random beat division
rando = False
# comparitors for states
last_r = 0
last_f = 0
last_maj = 0
last_min = 0
last_p = 0
last_random = 0
# index for random beat division
hit = 0
# default tempo
tempo = 60
# beat division
sixteenth = 15 / tempo
eighth = 30 / tempo
quarter = 60 / tempo
half = 120 / tempo
whole = 240 / tempo
# time.monotonic for blinka animation
slither = 0
# blinka animation sprite index
g = 1
# array for random beat division values
rando_div = [240, 120, 60, 30, 15]
# array of beat division values
beat_division = [whole, half, quarter, eighth, sixteenth]
# strings for beat division names
beat_division_name = ["1", "1/2", "1/4", "1/8", "1/16", "Random"]
while True:
# mapping analog pot values to the different parameters
# MIDI modulation 0-127
mod_val1 = round(simpleio.map_range(val(mod_pot), 0, 65535, 0, 127))
# BPM range 60-220
bpm_val1 = simpleio.map_range(val(bpm_slider), 0, 65535, 60, 220)
# 6 options for beat division
beat_val1 = round(simpleio.map_range(val(beat_pot), 0, 65535, 0, 5))
# 12 options for key selection
key_val1 = round(simpleio.map_range(val(key_pot), 0, 65535, 0, 11))
# 6 options for mode selection
mode_val1 = round(simpleio.map_range(val(mode_pot), 0, 65535, 0, 5))
# sending MIDI modulation
if abs(mod_val1 - mod_val2) > 2:
# updates previous value to hold current value
mod_val2 = mod_val1
# MIDI data has to be sent as an integer
# this converts the pot data into an int
modulation = int(mod_val2)
# int is stored as a CC message
modWheel = ControlChange(1, modulation)
# CC message is sent
midi.send(modWheel)
print(modWheel)
# delay to settle MIDI data
time.sleep(0.001)
# sets beat division
if abs(beat_val1 - beat_val2) > 0:
# updates previous value to hold current value
beat_val2 = beat_val1
print("beat div is", beat_val2)
# updates display
beat_text_area.text = "Div:%s" % beat_division_name[beat_val2]
# sets random beat division state
if beat_val2 == 5:
rando = True
else:
rando = False
time.sleep(0.001)
# mode selection
if abs(mode_val1 - mode_val2) > 0:
# updates previous value to hold current value
mode_val2 = mode_val1
# scale mode
if mode_val2 == 0:
play_scale = True
play_maj = False
play_min = False
play_fifths = False
play_pedal = False
play_rando = False
# updates display
mode_text_area.text = "Mode:Scale"
print("scale")
# major triads mode
if mode_val2 == 1:
play_scale = False
play_maj = True
play_min = False
play_fifths = False
play_pedal = False
play_rando = False
print("major chords")
# updates display
mode_text_area.text = "Mode:MajorTriads"
# minor triads mode
if mode_val2 == 2:
play_scale = False
play_maj = False
play_min = True
play_fifths = False
play_pedal = False
play_rando = False
print("minor")
# updates display
mode_text_area.text = "Mode:MinorTriads"
# fifths mode
if mode_val2 == 3:
play_scale = False
play_maj = False
play_min = False
play_fifths = True
play_pedal = False
play_rando = False
print("fifths")
# updates display
mode_text_area.text = "Mode:Fifths"
# pedal tone mode
if mode_val2 == 4:
play_scale = False
play_maj = False
play_min = False
play_fifths = False
play_pedal = True
play_rando = False
print("play random")
# updates display
mode_text_area.text = 'Mode:Pedal'
# random mode
if mode_val2 == 5:
play_scale = False
play_maj = False
play_min = False
play_fifths = False
play_pedal = False
play_rando = True
print("play random")
# updates display
mode_text_area.text = 'Mode:Random'
time.sleep(0.001)
# key selection
if abs(key_val1 - key_val2) > 0:
# updates previous value to hold current value
key_val2 = key_val1
# indexes the notes in each key array
for k in keys:
o = keys.index(k)
octave = keys[o]
# updates display
key_text_area.text = 'Key:%s' % key_names[key_val2]
print("o is", o)
time.sleep(0.001)
# BPM adjustment
if abs(bpm_val1 - bpm_val2) > 1:
# updates previous value to hold current value
bpm_val2 = bpm_val1
# updates tempo
tempo = int(bpm_val2)
# updates calculations for beat division
sixteenth = 15 / tempo
eighth = 30 / tempo
quarter = 60 / tempo
half = 120 / tempo
whole = 240 / tempo
# updates array of beat divisions
beat_division = [whole, half, quarter, eighth, sixteenth]
# updates display
bpm_text_area.text = "BPM:%d" % tempo
print("tempo is", tempo)
time.sleep(0.05)
# if the run switch is pressed:
if run_switch.value:
run_state = True
# if random beat division, then beat_division index is randomized with index hit
if rando:
divide = beat_division[hit]
# if not random, then beat_division is the value of the pot
else:
divide = beat_division[beat_val2]
# blinka animation in time with BPM and beat division
# she will slither every time a note is played
if (time.monotonic() - slither) >= divide:
blinka_grid[0] = g
g += 1
slither = time.monotonic()
if g > 2:
g = 1
# holds key index
octave = keys[key_val2]
# fifths mode
if play_fifths:
# tracks time divided by the beat division
if (time.monotonic() - run) >= divide:
# note index from mode, r counts index position
f = fifths[r]
# sends NoteOn
midi.send(NoteOn(octave[f]))
# turns previous note off
midi.send(NoteOff(octave[last_f]))
# print(octave[r])
run = time.monotonic()
# go to next note
r += 1
# updates previous value to hold current value
if r > 0:
last_r = r
last_f = f
hit = randint(2, 4)
# resets note index position
if r > 7:
r = 0
last_r = r
last_f = f
hit = randint(2, 4)
# major triad mode
if play_maj:
# tracks time divided by the beat division
if (time.monotonic() - run) >= divide:
# note index from mode, r counts index position
maj = major[r]
# sends NoteOn
midi.send(NoteOn(octave[maj]))
# turns previous note off
midi.send(NoteOff(octave[last_maj]))
# print(octave[r])
run = time.monotonic()
# go to next note
r += 1
# updates previous value to hold current value
if r > 0:
last_r = r
last_maj = maj
hit = randint(2, 4)
# resets note index position
if r > 7:
r = 0
last_r = r
last_maj = maj
hit = randint(2, 4)
# minor triad mode
if play_min:
# tracks time divided by the beat division
if (time.monotonic() - run) >= divide:
# note index from mode, r counts index position
mi = minor[r]
# sends NoteOn
midi.send(NoteOn(octave[mi]))
# turns previous note off
midi.send(NoteOff(octave[last_min]))
# print(octave[r])
run = time.monotonic()
# go to next note
r += 1
# updates previous value to hold current value
if r > 0:
last_r = r
last_min = mi
hit = randint(2, 4)
# resets note index position
if r > 7:
r = 0
last_r = r
last_min = mi
hit = randint(2, 4)
# pedal tone mode
if play_pedal:
# tracks time divided by the beat division
if (time.monotonic() - run) >= divide:
# note index from mode, r counts index position
p = pedal[r]
# sends NoteOn
midi.send(NoteOn(octave[p]))
# turns previous note off
midi.send(NoteOff(octave[last_p]))
# print(octave[r])
run = time.monotonic()
# go to next note
r += 1
# updates previous value to hold current value
if r > 0:
last_r = r
last_p = p
hit = randint(2, 4)
# resets note index position
if r > 7:
r = 0
last_r = r
last_p = p
hit = randint(2, 4)
# random note mode
if play_rando:
# randomizes note indexes in key
r = randint(0, 7)
# tracks time divided by the beat division
if (time.monotonic() - run) >= divide:
# sends NoteOn
midi.send(NoteOn(octave[r]))
# turns previous note off
midi.send(NoteOff(octave[last_r]))
# print(octave[r])
run = time.monotonic()
# updates previous value to hold current value
if r > 0:
last_r = r
r = randint(0, 7)
hit = randint(2, 4)
# scale mode
if play_scale:
# tracks time divided by the beat division
if (time.monotonic() - run) >= divide:
# sends NoteOn
midi.send(NoteOn(octave[r]))
# turns previous note off
midi.send(NoteOff(octave[last_r]))
# print(octave[r])
run = time.monotonic()
# go to next note
r += 1
# updates previous value to hold current value
if r > 0:
last_r = r
hit = randint(2, 4)
# resets note index position
if r > 7:
r = 0
last_r = r
if not run_switch.value:
if run_state:
all_note_off = ControlChange(123, 0)
# CC message is sent
midi.send(all_note_off)
run_state = False
time.sleep(0.001)
# delay to settle MIDI data
time.sleep(0.005)
```
#### File: Adafruit_Learning_System_Guides/PyPortal_Smart_Switch/code.py
```python
import time
import gc
import board
import busio
from adafruit_esp32spi import adafruit_esp32spi_socket as socket
from adafruit_esp32spi import adafruit_esp32spi, adafruit_esp32spi_wifimanager
import adafruit_requests as requests
import digitalio
import analogio
from adafruit_pyportal import PyPortal
from adafruit_display_shapes.circle import Circle
from adafruit_display_shapes.roundrect import RoundRect
from adafruit_bitmap_font import bitmap_font
from adafruit_display_text import label
import adafruit_touchscreen
from adafruit_minimqtt import MQTT
DISPLAY_COLOR = 0x006600
SWITCH_COLOR = 0x008800
SWITCH_FILL_COLOR = 0xffffff
# Switch location
SWITCHX = 260
SWITCHY = 4
FEED_NAME = "pyportal-switch"
months = ["JAN", "FEB", "MAR", "APR", "MAY", "JUN",
"JUL", "AUG", "SEP", "OCT", "NOV", "DEC"]
def get_local_timestamp(location=None):
# pylint: disable=line-too-long
"""Fetch and "set" the local time of this microcontroller to the local time at the location, using an internet time API.
:param str location: Your city and country, e.g. ``"New York, US"``.
"""
# pylint: enable=line-too-long
api_url = None
try:
aio_username = secrets['aio_username']
aio_key = secrets['aio_key']
except KeyError:
raise KeyError("\n\nOur time service requires a login/password to rate-limit. Please register for a free adafruit.io account and place the user/key in your secrets file under 'aio_username' and 'aio_key'")# pylint: disable=line-too-long
location = secrets.get('timezone', location)
if location:
print("Getting time for timezone", location)
api_url = (TIME_SERVICE + "&tz=%s") % (aio_username, aio_key, location)
else: # we'll try to figure it out from the IP address
print("Getting time from IP address")
api_url = TIME_SERVICE % (aio_username, aio_key)
api_url += TIME_SERVICE_TIMESTAMP
try:
print("api_url:",api_url)
response = requests.get(api_url)
times = response.text.split(' ')
seconds = int(times[0])
tzoffset = times[1]
tzhours = int(tzoffset[0:3])
tzminutes = int(tzoffset[3:5])
tzseconds = tzhours * 60 * 60
if tzseconds < 0:
tzseconds -= tzminutes * 60
else:
tzseconds += tzminutes * 60
print(seconds + tzseconds, tzoffset, tzhours, tzminutes)
except KeyError:
raise KeyError("Was unable to lookup the time, try setting secrets['timezone'] according to http://worldtimeapi.org/timezones") # pylint: disable=line-too-long
# now clean up
response.close()
response = None
gc.collect()
return int(seconds + tzseconds)
def create_text_areas(configs):
"""Given a list of area specifications, create and return text areas."""
text_areas = []
for cfg in configs:
textarea = label.Label(cfg['font'], text=' '*cfg['size'])
textarea.x = cfg['x']
textarea.y = cfg['y']
textarea.color = cfg['color']
text_areas.append(textarea)
return text_areas
class Switch(object):
def __init__(self, pin, my_pyportal):
self.switch = digitalio.DigitalInOut(pin)
self.switch.direction = digitalio.Direction.OUTPUT
rect = RoundRect(SWITCHX, SWITCHY, 31, 60, 16, outline=SWITCH_COLOR,
fill=SWITCH_FILL_COLOR, stroke=3)
my_pyportal.splash.append(rect)
self.circle_on = Circle(SWITCHX + 15, SWITCHY + 16, 10, fill=SWITCH_FILL_COLOR)
my_pyportal.splash.append(self.circle_on)
self.circle_off = Circle(SWITCHX + 15, SWITCHY + 42, 10, fill=DISPLAY_COLOR)
my_pyportal.splash.append(self.circle_off)
# turn switch on or off
def enable(self, enable):
print("turning switch to ", enable)
self.switch.value = enable
if enable:
self.circle_off.fill = SWITCH_FILL_COLOR
self.circle_on.fill = DISPLAY_COLOR
else:
self.circle_on.fill = SWITCH_FILL_COLOR
self.circle_off.fill = DISPLAY_COLOR
def toggle(self):
if self.switch.value:
self.enable(False)
else:
self.enable(True)
def status(self):
return self.switch.value
# you'll need to pass in an io username and key
TIME_SERVICE = "http://io.adafruit.com/api/v2/%s/integrations/time/strftime?x-aio-key=%s"
# See https://apidock.com/ruby/DateTime/strftime for full options
TIME_SERVICE_TIMESTAMP = '&fmt=%25s+%25z'
class Clock(object):
def __init__(self, my_pyportal):
self.low_light = False
self.update_time = None
self.snapshot_time = None
self.pyportal = my_pyportal
self.current_time = 0
self.light = analogio.AnalogIn(board.LIGHT)
text_area_configs = [dict(x=0, y=105, size=10, color=DISPLAY_COLOR, font=time_font),
dict(x=260, y=153, size=3, color=DISPLAY_COLOR, font=ampm_font),
dict(x=110, y=40, size=20, color=DISPLAY_COLOR, font=date_font)]
self.text_areas = create_text_areas(text_area_configs)
self.text_areas[2].text = "starting..."
for ta in self.text_areas:
self.pyportal.splash.append(ta)
def adjust_backlight(self, force=False):
"""Check light level. Adjust the backlight and background image if it's dark."""
if force or (self.light.value >= 1500 and self.low_light):
self.pyportal.set_backlight(1.00)
self.low_light = False
elif self.light.value <= 1000 and not self.low_light:
self.pyportal.set_backlight(0.1)
self.low_light = True
def tick(self, now):
self.adjust_backlight()
if (not self.update_time) or ((now - self.update_time) >= 300):
# Update the time
print("update the time")
self.update_time = int(now)
self.snapshot_time = get_local_timestamp(secrets['timezone'])
self.current_time = time.localtime(self.snapshot_time)
else:
self.current_time = time.localtime(int(now) - self.update_time + self.snapshot_time)
hour = self.current_time.tm_hour
if hour > 12:
hour = hour % 12
if hour == 0:
hour = 12
time_string = '%2d:%02d' % (hour,self.current_time.tm_min)
self.text_areas[0].text = time_string
ampm_string = "AM"
if self.current_time.tm_hour >= 12:
ampm_string = "PM"
self.text_areas[1].text = ampm_string
self.text_areas[2].text = (months[int(self.current_time.tm_mon - 1)] +
" " + str(self.current_time.tm_mday))
try:
board.DISPLAY.refresh(target_frames_per_second=60)
except AttributeError:
board.DISPLAY.refresh_soon()
board.DISPLAY.wait_for_frame()
# Define callback methods which are called when events occur
# pylint: disable=unused-argument, redefined-outer-name
def connected(client, userdata, flags, rc):
# This function will be called when the client is connected
# successfully to the broker.
onoff_feed = secrets['aio_username'] + '/feeds/' + FEED_NAME
print('Connected to Adafruit IO! Listening for topic changes on %s' % onoff_feed)
# Subscribe to all changes on the onoff_feed.
client.subscribe(onoff_feed)
def disconnected(client, userdata, rc):
# This method is called when the client is disconnected
print('Disconnected from Adafruit IO!')
def message(client, topic, message):
# This method is called when a topic the client is subscribed to
# has a new message.
print('New message on topic {0}: {1}'.format(topic, message))
if message in ("ON","TRUE","1"):
switch.enable(True)
else:
switch.enable(False)
############################################
try:
from secrets import secrets
except ImportError:
print("""WiFi settings are kept in secrets.py, please add them there!
the secrets dictionary must contain 'ssid' and 'password' at a minimum""")
raise
esp32_cs = digitalio.DigitalInOut(board.ESP_CS)
esp32_ready = digitalio.DigitalInOut(board.ESP_BUSY)
esp32_reset = digitalio.DigitalInOut(board.ESP_RESET)
WIDTH = board.DISPLAY.width
HEIGHT = board.DISPLAY.height
ts = adafruit_touchscreen.Touchscreen(board.TOUCH_XL, board.TOUCH_XR,
board.TOUCH_YD, board.TOUCH_YU,
calibration=(
(5200, 59000),
(5800, 57000)
),
size=(WIDTH, HEIGHT))
spi = busio.SPI(board.SCK, board.MOSI, board.MISO)
esp = adafruit_esp32spi.ESP_SPIcontrol(spi, esp32_cs, esp32_ready, esp32_reset, debug=False)
requests.set_socket(socket, esp)
if esp.status == adafruit_esp32spi.WL_IDLE_STATUS:
print("ESP32 found and in idle mode")
print("Firmware vers.", esp.firmware_version)
print("MAC addr:", [hex(i) for i in esp.MAC_address])
pyportal = PyPortal(esp=esp,
external_spi=spi,
default_bg="/background.bmp")
ampm_font = bitmap_font.load_font("/fonts/RobotoMono-18.bdf")
ampm_font.load_glyphs(b'ampAMP')
date_font = bitmap_font.load_font("/fonts/RobotoMono-18.bdf")
date_font.load_glyphs(b'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789')
time_font = bitmap_font.load_font("/fonts/RobotoMono-72.bdf")
time_font.load_glyphs(b'0123456789:')
clock = Clock(pyportal)
for ap in esp.scan_networks():
print("\t%s\t\tRSSI: %d" % (str(ap['ssid'], 'utf-8'), ap['rssi']))
print("Connecting to AP...")
while not esp.is_connected:
try:
esp.connect_AP(secrets['ssid'], secrets['password'])
except RuntimeError as e:
print("could not connect to AP, retrying: ",e)
continue
print("Connected to", str(esp.ssid, 'utf-8'), "\tRSSI:", esp.rssi)
print("My IP address is", esp.pretty_ip(esp.ip_address))
wifi = adafruit_esp32spi_wifimanager.ESPSPI_WiFiManager(
esp, secrets, debug = True)
# Set up a MiniMQTT Client
mqtt_client = MQTT(socket,
broker='io.adafruit.com',
username=secrets['aio_username'],
password=secrets['aio_key'],
network_manager=wifi)
mqtt_client.on_connect = connected
mqtt_client.on_disconnect = disconnected
mqtt_client.on_message = message
mqtt_client.connect()
switch = Switch(board.D4, pyportal)
second_timer = time.monotonic()
while True:
#time.sleep(1)
p = ts.touch_point
if p:
#if p[0] >= 140 and p[0] <= 170 and p[1] >= 160 and p[1] <= 220:
# touch anywhere on the screen
print("touch!")
clock.adjust_backlight(True)
switch.toggle()
time.sleep(1)
# poll once per second
if time.monotonic() - second_timer >= 1.0:
second_timer = time.monotonic()
# Poll the message queue
try:
mqtt_client.loop()
except RuntimeError:
print("reconnecting wifi")
mqtt_client.reconnect_wifi()
# Update the PyPortal display
clock.tick(time.monotonic())
```
#### File: Adafruit_Learning_System_Guides/PyPortal_Smart_Thermometer/thermometer.py
```python
import time
import board
import neopixel
import busio
from digitalio import DigitalInOut
from analogio import AnalogIn
import adafruit_adt7410
from adafruit_esp32spi import adafruit_esp32spi, adafruit_esp32spi_wifimanager
from adafruit_io.adafruit_io import IO_HTTP, AdafruitIO_RequestError
# thermometer graphics helper
import thermometer_helper
# rate at which to refresh the pyportal screen, in seconds
PYPORTAL_REFRESH = 2
# Get wifi details and more from a secrets.py file
try:
from secrets import secrets
except ImportError:
print("WiFi secrets are kept in secrets.py, please add them there!")
raise
# PyPortal ESP32 Setup
esp32_cs = DigitalInOut(board.ESP_CS)
esp32_ready = DigitalInOut(board.ESP_BUSY)
esp32_reset = DigitalInOut(board.ESP_RESET)
spi = busio.SPI(board.SCK, board.MOSI, board.MISO)
esp = adafruit_esp32spi.ESP_SPIcontrol(spi, esp32_cs, esp32_ready, esp32_reset)
status_light = neopixel.NeoPixel(board.NEOPIXEL, 1, brightness=0.2)
wifi = adafruit_esp32spi_wifimanager.ESPSPI_WiFiManager(esp, secrets, status_light)
# Set your Adafruit IO Username and Key in secrets.py
# (visit io.adafruit.com if you need to create an account,
# or if you need your Adafruit IO key.)
try:
ADAFRUIT_IO_USER = secrets['aio_username']
ADAFRUIT_IO_KEY = secrets['aio_key']
except KeyError:
raise KeyError('To use this code, you need to include your Adafruit IO username \
and password in a secrets.py file on the CIRCUITPY drive.')
# Create an instance of the IO_HTTP client
io = IO_HTTP(ADAFRUIT_IO_USER, ADAFRUIT_IO_KEY, wifi)
# Get the temperature feed from Adafruit IO
temperature_feed = io.get_feed('temperature')
# init. graphics helper
gfx = thermometer_helper.Thermometer_GFX(celsius=False)
# init. adt7410
i2c_bus = busio.I2C(board.SCL, board.SDA)
adt = adafruit_adt7410.ADT7410(i2c_bus, address=0x48)
adt.high_resolution = True
# init. the light sensor
light_sensor = AnalogIn(board.LIGHT)
def set_backlight(val):
"""Adjust the TFT backlight.
:param val: The backlight brightness. Use a value between ``0`` and ``1``, where ``0`` is
off, and ``1`` is 100% brightness.
"""
val = max(0, min(1.0, val))
board.DISPLAY.auto_brightness = False
board.DISPLAY.brightness = val
while True:
# read the light sensor
light_value = light_sensor.value
print('Light Value: ', light_value)
# read the temperature sensor
temperature = adt.temperature
try: # WiFi Connection
if light_value < 1000: # turn on the backlight
set_backlight(1)
print('displaying temperature...')
gfx.display_temp(temperature)
# Get and display date and time form Adafruit IO
print('Getting time from Adafruit IO...')
datetime = io.receive_time()
print('displaying time...')
gfx.display_date_time(datetime)
else: # turn off the backlight
set_backlight(0)
try: # send temperature data to IO
gfx.display_io_status('Sending data...')
print('Sending data to Adafruit IO...')
io.send_data(temperature_feed['key'], temperature)
print('Data sent!')
gfx.display_io_status('Data sent!')
except AdafruitIO_RequestError as e:
raise AdafruitIO_RequestError('IO Error: ', e)
except (ValueError, RuntimeError) as e: # WiFi Connection Failure
print("Failed to get data, retrying\n", e)
wifi.reset()
continue
time.sleep(PYPORTAL_REFRESH)
```
#### File: Adafruit_Learning_System_Guides/Sensor_Plotting_With_Mu_CircuitPython/potentiometer.py
```python
import time
import analogio
import board
potentiometer = analogio.AnalogIn(board.A1)
def get_voltage(pin):
return (pin.value * 3.3) / 65536
while True:
print((get_voltage(potentiometer),))
time.sleep(0.1)
```
#### File: Adafruit_Learning_System_Guides/Servo_Tester/main.py
```python
import time
import board
import busio
import digitalio
import rotaryio
import pulseio
import adafruit_ssd1306
from adafruit_motor import servo
from adafruit_debouncer import Debouncer
#--------------------------------------------------------------------------------
# Initialize Rotary encoder
button_io = digitalio.DigitalInOut(board.D12)
button_io.direction = digitalio.Direction.INPUT
button_io.pull = digitalio.Pull.UP
button = Debouncer(button_io)
rotary_encoder = rotaryio.IncrementalEncoder(board.D10, board.D11)
#--------------------------------------------------------------------------------
# Initialize I2C and OLED
i2c = busio.I2C(board.SCL, board.SDA)
oled = adafruit_ssd1306.SSD1306_I2C(128, 32, i2c)
oled.fill(0)
oled.show()
min_pulses = [ 500, 550, 600, 650, 700, 750, 800, 850, 900, 950, 1000]
max_pulses = [2000, 2050, 2100, 2150, 2200, 2250, 2300, 2350, 2400, 2450, 2500]
min_pulse_index = 10
max_pulse_index = 0
#-------------------------------------------------------------------------------
# Initialize servo
pwm = pulseio.PWMOut(board.D5, frequency=50)
test_servo = servo.Servo(pwm, min_pulse=1000, max_pulse=2000)
test_servo.angle = 0
current_position = None # current encoder position
change = 0 # the change in encoder position
angle = 0
mode = 0
sweep_time = 1.0
last_movement_at = 0.0
delta = 5
def get_encoder_change(encoder, pos):
new_position = encoder.position
if pos is None:
return (new_position, 0)
else:
return (new_position, new_position - pos)
#--------------------------------------------------------------------------------
# Main loop
while True:
now = time.monotonic()
button.update()
if mode == 1:
if now >= (last_movement_at + sweep_time / 36):
last_movement_at = now
angle += delta
if (angle > 180) or (angle < 0):
delta *= -1
angle += delta
if button.fell:
servo.angle = 0
if mode == 0:
mode = 1
sweep_time = 1.0
last_movement_at = now
elif mode == 1:
mode = 2
angle = 0
elif mode == 2:
mode = 3
angle = 180
elif mode == 3:
mode = 0
angle = 0
else:
current_position, change = get_encoder_change(rotary_encoder, current_position)
if change != 0:
if mode == 0:
angle = min(180, max(0, angle + change * 5))
elif mode == 1:
sweep_time = min(5.0, max(1.0, sweep_time + change * 0.1))
elif mode == 2:
min_pulse_index = min(10, max(min_pulse_index + change, 0))
test_servo = servo.Servo(pwm,
min_pulse=min_pulses[min_pulse_index],
max_pulse=max_pulses[max_pulse_index])
angle = 0
elif mode == 3:
max_pulse_index = min(10, max(max_pulse_index + change, 0))
test_servo = servo.Servo(pwm,
min_pulse=min_pulses[min_pulse_index],
max_pulse=max_pulses[max_pulse_index])
angle = 180
oled.fill(0)
if mode == 0:
oled.text("Angle: {0}".format(angle), 0, 0)
elif mode == 1:
oled.text("Sweep time: {0}".format(sweep_time), 0, 0)
elif mode == 2:
oled.text("Min width: {0}".format(min_pulses[min_pulse_index]), 0, 0)
elif mode == 3:
oled.text("Max width: {0}".format(max_pulses[max_pulse_index]), 0, 0)
oled.show()
test_servo.angle = angle
```
#### File: Adafruit_Learning_System_Guides/Snow_Globe_BLE_CPB/ble_snowglobe.py
```python
import time
import random
import board
import busio
import neopixel
import adafruit_lis3dh
from adafruit_bluefruit_connect.packet import Packet
from adafruit_bluefruit_connect.color_packet import ColorPacket
from adafruit_bluefruit_connect.button_packet import ButtonPacket
from adafruit_ble import BLERadio
from adafruit_ble.advertising.standard import ProvideServicesAdvertisement
from adafruit_ble.services.nordic import UARTService
#===| User Config |==================================================
SNOWGLOBE_NAME = "SNOWGLOBE" # name that will show up on smart device
DEFAULT_ANIMATION = 0 # 0-3, index in ANIMATIONS list
DEFAULT_DURATION = 5 # total seconds to play animation
DEFAULT_SPEED = 0.1 # delay in seconds between updates
DEFAULT_COLOR = 0xFF0000 # hex color value
DEFAULT_SHAKE = 20 # lower number is more sensitive
# you can define more animation functions below
# here, specify the four to be used
ANIMATIONS = ('spin', 'pulse', 'strobe', 'sparkle')
#===| User Config |==================================================
# Configuration settings
snow_config = {
'animation' : DEFAULT_ANIMATION,
'duration' : DEFAULT_DURATION,
'speed' : DEFAULT_SPEED,
'color' : DEFAULT_COLOR,
'shake' : DEFAULT_SHAKE,
}
# Setup NeoPixels
pixels = neopixel.NeoPixel(board.NEOPIXEL, 10)
# Setup accelo
accelo_i2c = busio.I2C(board.ACCELEROMETER_SCL, board.ACCELEROMETER_SDA)
accelo = adafruit_lis3dh.LIS3DH_I2C(accelo_i2c, address=0x19)
# Setup BLE
ble = BLERadio()
uart = UARTService()
advertisement = ProvideServicesAdvertisement(uart)
ble._adapter.name = SNOWGLOBE_NAME #pylint: disable=protected-access
#--| ANIMATIONS |----------------------------------------------------
def spin(config):
start_time = time.monotonic()
last_update = start_time
p = -1
while time.monotonic() - start_time < config['duration']:
if time.monotonic() - last_update > config['speed']:
pixels.fill(0)
pixels[p % 10] = config['color']
p -= 1
last_update = time.monotonic()
def pulse(config):
start_time = time.monotonic()
last_update = start_time
brightness = 0
delta = 0.05
pixels.brightness = 0
pixels.fill(config['color'])
while time.monotonic() - start_time < config['duration']:
if time.monotonic() - last_update > config['speed']:
brightness += delta
if brightness > 1:
brightness = 1
delta *= -1
if brightness < 0:
brightness = 0
delta *= -1
pixels.brightness = brightness
last_update = time.monotonic()
def strobe(config):
start_time = time.monotonic()
last_update = start_time
turn_on = True
while time.monotonic() - start_time < config['duration']:
if time.monotonic() - last_update > config['speed']:
if turn_on:
pixels.fill(config['color'])
else:
pixels.fill(0)
turn_on = not turn_on
last_update = time.monotonic()
def sparkle(config):
start_time = time.monotonic()
last_update = start_time
while time.monotonic() - start_time < config['duration']:
if time.monotonic() - last_update > config['speed']:
pixels.fill(0)
pixels[random.randint(0, 9)] = config['color']
last_update = time.monotonic()
#--| ANIMATIONS |----------------------------------------------------
def play_animation(config):
#pylint: disable=eval-used
eval(ANIMATIONS[config['animation']])(config)
pixels.fill(0)
def indicate(event=None):
if not isinstance(event, str):
return
event = event.strip().upper()
if event == 'START':
for _ in range(2):
for i in range(10):
pixels[i] = DEFAULT_COLOR
time.sleep(0.05)
pixels.fill(0)
if event == 'CONNECTED':
for _ in range(5):
pixels.fill(0x0000FF)
time.sleep(0.1)
pixels.fill(0)
time.sleep(0.1)
if event == 'DISCONNECTED':
for _ in range(5):
pixels.fill(0x00FF00)
time.sleep(0.1)
pixels.fill(0)
time.sleep(0.1)
indicate('START')
# Are we already advertising?
advertising = False
while True:
# While BLE is *not* connected
while not ble.connected:
if accelo.shake(snow_config['shake'], 5, 0):
play_animation(snow_config)
if not advertising:
ble.start_advertising(advertisement)
advertising = True
# connected
indicate('CONNECTED')
while ble.connected:
# Once we're connected, we're not advertising any more.
advertising = False
if accelo.shake(snow_config['shake'], 5, 0):
play_animation(snow_config)
if uart.in_waiting:
try:
packet = Packet.from_stream(uart)
except ValueError:
continue
if isinstance(packet, ColorPacket):
#
# COLOR
#
snow_config['color'] = packet.color
pixels.fill(snow_config['color'])
time.sleep(0.5)
pixels.fill(0)
if isinstance(packet, ButtonPacket) and packet.pressed:
#
# SPEED
#
if packet.button == ButtonPacket.UP:
speed = snow_config['speed'] - 0.05
speed = 0.05 if speed < 0.05 else speed
snow_config['speed'] = speed
play_animation(snow_config)
if packet.button == ButtonPacket.DOWN:
speed = snow_config['speed'] + 0.05
snow_config['speed'] = speed
play_animation(snow_config)
#
# DURATION
#
if packet.button == ButtonPacket.LEFT:
duration = snow_config['duration'] - 1
duration = 1 if duration < 1 else duration
snow_config['duration'] = duration
play_animation(snow_config)
if packet.button == ButtonPacket.RIGHT:
duration = snow_config['duration'] + 1
snow_config['duration'] = duration
play_animation(snow_config)
#
# ANIMATION
#
if packet.button == ButtonPacket.BUTTON_1:
snow_config['animation'] = 0
play_animation(snow_config)
if packet.button == ButtonPacket.BUTTON_2:
snow_config['animation'] = 1
play_animation(snow_config)
if packet.button == ButtonPacket.BUTTON_3:
snow_config['animation'] = 2
play_animation(snow_config)
if packet.button == ButtonPacket.BUTTON_4:
snow_config['animation'] = 3
play_animation(snow_config)
# disconnected
indicate('DISCONNECTED')
```
#### File: Adafruit_Learning_System_Guides/Techno_Tiki_RGB_LED_Torch/Techno_Tiki_Circuit_Playground_Express.py
```python
import time
import adafruit_irremote
import board
import neopixel
import pulseio
# pylint: disable=global-statement
brightness = 1 # 0-1, higher number is brighter
# Adafruit IR Remote Codes:
# Button Code Button Code
# ----------- ------ ------ -----
# VOL-: 255 0/10+: 207
# Play/Pause: 127 1: 247
# VOL+: 191 2: 119
# SETUP: 223 3: 183
# STOP/MODE: 159 4: 215
# UP: 95 5: 87
# DOWN: 79 6: 151
# LEFT: 239 7: 231
# RIGHT: 175 8: 103
# ENTER/SAVE: 111 9: 167
# Back: 143
# Adafruit IR Remote Codes:
volume_down = 255
play_pause = 127
volume_up = 191
setup = 223
up_arrow = 95
stop_mode = 159
left_arrow = 239
enter_save = 111
right_arrow = 175
down_arrow = 79
num_1 = 247
num_2 = 119
num_3 = 183
num_4 = 215
num_5 = 87
num_6 = 151
num_7 = 231
num_8 = 103
num_9 = 167
# Define which remote buttons are associated with sketch actions.
color_change = right_arrow # Button that cycles through color animations.
animation_change = left_arrow # Button that cycles through animation types (only two supported).
speed_change = up_arrow # Button that cycles through speed choices.
power_off = volume_down # Button that turns off the pixels.
power_on = volume_up # Button that turns on the pixels. Must be pressed twice.
# The colorPalette two-dimensional array below has a row for each color animation and a column
# for each step within the animation. Each value is a 24-bit RGB color. By looping through
# the columns of a row the colors of pixels will animate.
color_steps = 8 # Number of steps in the animation.
color_count = 8 # number of columns/steps
# Build lookup table/palette for the color animations so they aren't computed at runtime.
color_palette = [
# Complimentary colors
([255, 0, 0], [218, 36, 36], [182, 72, 72], [145, 109, 109],
[109, 145, 145], [72, 182, 182], [36, 218, 218], [0, 255, 255]), # red cyan
([255, 255, 0], [218, 218, 36], [182, 182, 72], [145, 145, 109],
[109, 109, 145], [72, 72, 182], [36, 36, 218], [0, 0, 255]), # yellow blue
([0, 255, 0], [36, 218, 36], [72, 182, 72], [109, 145, 109],
[145, 109, 145], [182, 72, 182], [218, 36, 218], [255, 0, 255]), # green magenta
# Adjacent colors (on color wheel).
([255, 255, 0], [218, 255, 0], [182, 255, 0], [145, 255, 0],
[109, 255, 0], [72, 255, 0], [36, 255, 0], [0, 255, 0]), # yello green
([0, 255, 0], [0, 255, 36], [0, 255, 72], [0, 255, 109],
[0, 255, 145], [0, 255, 182], [0, 255, 218], [0, 255, 255]), # green cyan
([0, 255, 255], [0, 218, 255], [0, 182, 255], [0, 145, 255],
[0, 109, 255], [0, 72, 255], [0, 36, 255], [0, 0, 255]), # cyan blue
([0, 0, 255], [36, 0, 255], [72, 0, 255], [109, 0, 255],
[145, 0, 255], [182, 0, 255], [218, 0, 255], [255, 0, 255]), # blue magenta
([255, 0, 255], [255, 0, 218], [255, 0, 182], [255, 0, 145],
[255, 0, 109], [255, 0, 72], [255, 0, 36], [255, 0, 0]) # magenta red
]
# Other combos
#([255, 0, 0], [218, 36, 0], [182, 72, 0], [145, 109, 0],
#[109, 145, 0], [72, 182, 0], [36, 218, 0], [0, 255, 0]), # red green
#([255, 255, 0], [218, 255, 36], [182, 255, 72], [145, 255, 109],
#[109, 255, 145], [72, 255, 182], [36, 255, 218], [0, 255, 255]), # yellow cyan
#([0, 255, 0], [0, 218, 36], [0, 182, 72], [0, 145, 109],
#[0, 109, 145], [0, 72, 182], [0, 36, 218], [0, 0, 255]), # green blue
#([0, 255, 255], [36, 218, 255], [72, 182, 255], [109, 145, 255],
#[145, 109, 255], [182, 72, 255], [218, 36, 255], [255, 0, 255]), # cyan magenta
#([0, 0, 255], [36, 0, 218], [72, 0, 182], [109, 0, 145],
#[145, 0, 109], [182, 0, 72], [218, 0, 36], [255, 0, 0]), # blue red
#([255, 0, 255], [255, 36, 218], [255, 72, 182], [255, 109, 145],
#[255, 145, 109], [255, 182, 72], [255, 218, 36], [255, 255, 0]), # magenta yellow
# Solid colors fading to dark.
#([255, 0, 0], [223, 0, 0], [191, 0, 0], [159, 0, 0],
#[127, 0, 0], [95, 0, 0], [63, 0, 0], [31, 0, 0]), # red
#([255, 153, 0], [223, 133, 0], [191, 114, 0], [159, 95, 0],
#[127, 76, 0], [95, 57, 0], [63, 38, 0], [31, 19, 0]), # orange
#([255, 255, 0], [223, 223, 0], [191, 191, 0], [159, 159, 0],
#[127, 127, 0], [95, 95, 0], [63, 63, 0], [31, 31, 0]), # yellow
#([0, 255, 0], [0, 223, 0], [0, 191, 0], [0, 159, 0],
#[0, 127, 0], [0, 95, 0], [0, 63, 0], [0, 31, 0]), # green
#([0, 0, 255], [0, 0, 223], [0, 0, 191], [0, 0, 159],
#[0, 0, 127], [0, 0, 95], [0, 0, 63], [0, 0, 31]), # blue
#([75, 0, 130], [65, 0, 113], [56, 0, 97], [46, 0, 81],
#[37, 0, 65], [28, 0, 48], [18, 0, 32], [9, 0, 16]), # indigo
#([139, 0, 255], [121, 0, 223], [104, 0, 191], [86, 0, 159],
#[69, 0, 127], [52, 0, 95], [34, 0, 63], [17, 0, 31]), # violet
#([255, 255, 255], [223, 223, 223], [191, 191, 191], [159, 159, 159],
#[127, 127, 127], [95, 95, 95], [63, 63, 63], [31, 31, 31]), # white
#([255, 0, 0], [255, 153, 0], [255, 255, 0], [0, 255, 0],
#[0, 0, 255], [75, 0, 130], [139, 0, 255], [255, 255, 255]), # rainbow colors
# List of animations speeds (in seconds). This is how long an animation spends before
# changing to the next step. Higher values are slower.
speeds = [.4, .2, .1, .05, .025]
# Global state used by the sketch
color_index = 0
animation_index = 0
speed_index = 2
pixel_pin = board.D1 # Pin where NeoPixels are connected
pixel_count = 10 # Number of NeoPixels
speed = .1 # Animation speed (in seconds).
# This is how long to spend in a single animation frame.
# Higher values are slower.
animation = 1 # Type of animation, can be one of these values:
# 0 - Solid color pulse
# 1 - Moving color pulse
# initialize LEDS
strip = neopixel.NeoPixel(board.NEOPIXEL, pixel_count, brightness=brightness, auto_write=False)
# initialize Remote Control
ir_code_min = 60
ir_code_max = 70
pulsein = pulseio.PulseIn(board.REMOTEIN, maxlen=100, idle_state=True)
decoder = adafruit_irremote.GenericDecode()
def read_nec():
# Check if a NEC IR remote command is the correct length.
# Save the third decoded value as our unique identifier.
pulses = decoder.read_pulses(pulsein, max_pulse=5000)
command = None
try:
if len(pulses) >= ir_code_min and len(pulses) <= ir_code_max:
code = decoder.decode_bits(pulses)
if len(code) > 3:
command = code[2]
except adafruit_irremote.IRNECRepeatException: # Catches the repeat signal
pass
except adafruit_irremote.IRDecodeException: # Failed to decode
pass
return command
def handle_remote():
global color_index, animation_index, speed_index
ir_code = read_nec()
if ir_code is None:
time.sleep(.1)
return
if ir_code == color_change:
color_index = (color_index + 1) % color_count
elif ir_code == animation_change:
animation_index = (animation_index + 1) % 2
elif ir_code == speed_change:
speed_index = (speed_index + 1) % 5
elif ir_code == power_off:
strip.fill([0, 0, 0])
strip.show()
while True: # Loop forever...
# Main loop will update all the pixels based on the animation.
for i in range(pixel_count):
# Animation 0, solid color pulse of all pixels.
if animation_index == 0:
current_step = (time.monotonic() / speeds[speed_index]) % (color_steps * 2 - 2)
if current_step >= color_steps:
current_step = color_steps - (current_step - (color_steps - 2))
# Animation 1, moving color pulse. Use position to change brightness.
elif animation == 1:
current_step = (time.monotonic() / speeds[speed_index] + i) % (color_steps * 2 - 2)
if current_step >= color_steps:
current_step = color_steps - (current_step - (color_steps - 2))
strip[i] = color_palette[int(color_index)][int(current_step)]
# Next check for any IR remote commands.
handle_remote()
# Show the updated pixels.
strip.show()
```
#### File: Adafruit_Learning_System_Guides/TrelliBird/game.py
```python
import time
import random
import math
from bird import Bird
from post import Post
from color_names import *
BLACK = 0x000000
class Game(object):
"""Overall game control."""
def __init__(self, trellis, accel, ramp=20, challenge_ramp=30):
"""initialize a Game instance.
trellis -- the TrellisM4Express instance to use as input and screen.
accel -- the accelerometer interface object to use as input
ramp -- how often (in steps) to increase the speed (default 20)
challenge_ramp -- how often (in steps) to increase the challenge of the posts
"""
self._trellis = trellis
self._accel = accel
self._delay_ramp = ramp
self._challenge_ramp = challenge_ramp
self._bird = Bird()
self._posts = []
self._interstitial_delay = 1.0
self._challenge = 10
self._currently_pressed = set([])
self._previous_accel_reading = (None, None, None)
self._previous_shake_result = False
def _restart(self):
"""Restart the game."""
self._bird = Bird()
self._posts = []
self._interstitial_delay = 0.5
self._challenge = 10
def _update(self):
"""Perform a periodic update: move the posts and remove any that go off the screen."""
for post in self._posts:
post.update()
if self._posts and self._posts[0].off_screen:
self._posts.pop(0)
def _shaken(self):
"""Return whether the Trellis is shaken."""
last_result = self._previous_shake_result
result = False
x, y, z = self._accel.acceleration
if self._previous_accel_reading[0] is not None:
result = math.fabs(self._previous_accel_reading[2] - z) > 4.0
self._previous_accel_reading = (x, y, z)
self._previous_shake_result = result
return result and not last_result
def _key_pressed(self):
"""Return whether a key was pressed since last time."""
pressed = set(self._trellis.pressed_keys)
key_just_pressed = len(pressed - self._currently_pressed) > 0
self._currently_pressed = pressed
return key_just_pressed
def _should_flap(self, mode):
"""Return whether the user wants the bird to flap.
mode -- input mode: False is key, True is accel
"""
if mode:
return self._shaken()
return self._key_pressed()
def _update_bird(self, mode):
"""Update the vertical position of the bird based on user activity and gravity.
mode -- input mode: False is key, True is accel
"""
self._bird.draw_on(self._trellis, BLACK)
if self._should_flap(mode):
self._bird.flap()
else:
self._bird.update()
self._bird.draw_on(self._trellis)
self._trellis.pixels.show()
def _check_for_collision(self):
"""Return whether this bird has collided with a post."""
collided = self._bird.did_hit_ground()
for post in self._posts:
collided |= self._bird.is_colliding_with(post)
return collided
def _update_display(self):
"""Update the screen."""
self._trellis.pixels.fill(BLACK)
for post in self._posts:
post.draw_on(self._trellis)
self._bird.draw_on(self._trellis)
self._trellis.pixels.show()
def _new_post(self):
"""Return a new post based on the current challenge level"""
bottom_blocks = random.randint(1, 3)
top_blocks = random.randint(1, 2)
# bottom post
if self._challenge > 6:
return Post(from_bottom=bottom_blocks)
# top possible as well
if self._challenge > 3:
if random.randint(1, 2) == 1:
return Post(from_bottom=bottom_blocks)
return Post(from_top=top_blocks)
# top, bottom, and both possible
r = random.randint(1, 3)
if r == 1:
return Post(from_bottom=bottom_blocks)
if r == 2:
return Post(from_top=top_blocks)
return Post(from_bottom=bottom_blocks, from_top=random.randint(1, 4 - bottom_blocks))
def _add_post(self):
"""Add a post."""
self._posts.append(self._new_post())
def play(self, mode=False):
"""Play the game.
mode -- input mode: False is key, True is accel
"""
self._restart()
collided = False
count = 0
last_tick = 0
while not collided:
now = time.monotonic()
self._update_bird(mode)
if now >= last_tick + self._interstitial_delay:
last_tick = now
count += 1
self._update()
collided = self._check_for_collision()
if count % max(1, (self._challenge - random.randint(0, 4))) == 0:
self._add_post()
self._update_display()
# handle collision or wait and repeat
if collided:
self._bird.flash(self._trellis)
else:
# time to speed up?
if count % self._delay_ramp == 0:
self._interstitial_delay -= 0.01
# time to increase challenge of the posts?
if self._challenge > 0 and count % self._challenge_ramp == 0:
self._challenge -= 1
time.sleep(0.05)
```
#### File: Adafruit_Learning_System_Guides/TrelliBird/post.py
```python
from color_names import *
class Post(object):
"""Obstacles the user must avoice colliding with."""
def __init__(self, from_bottom=0, from_top=0):
"""Initialize a Post instance.
from_bottom -- how far the post extends from the bottom of the screen (default 0)
from_top -- how far the post extends from the top of the screen (default 0)
"""
self._x = 7
self._top = from_top
self._bottom = from_bottom
def update(self):
"""Periodic update: move one step to the left."""
self._x -= 1
def _on_post(self, x, y):
"""Determine whether the supplied coordinate is occupied by part of this post.
x -- the horizontal pixel coordinate
y -- the vertical pixel coordinate
"""
return x == self._x and (y < self._top or y > (3 - self._bottom))
def draw_on(self, trellis):
"""Draw this post on the screen.
trellis -- the TrellisM4Express instance to use as a screen
"""
for i in range(4):
if self._on_post(self._x, i):
trellis.pixels[self._x, i] = GREEN
def is_collision_at(self, x, y):
"""Determine whether something at the supplied coordinate is colliding with this post.
x -- the horizontal pixel coordinate
y -- the vertical pixel coordinate
"""
return self._on_post(x, y)
@property
def off_screen(self):
"""Return whether this post has moved off the left edge of the screen."""
return self._x < 0
```
#### File: Adafruit_Learning_System_Guides/Trinket_Ultrasonic_Rangefinder/Trinket_Ultrasonic_Rangefinder.py
```python
import time
import adafruit_character_lcd
import board
import busio
import pulseio
ez1pin = board.D1 # Trinket GPIO #1
# i2c LCD initialize bus and class
i2c = busio.I2C(board.SCL, board.SDA)
cols = 16
rows = 2
lcd = adafruit_character_lcd.Character_LCD_I2C(i2c, cols, rows)
# calculated mode or median distance
mode_result = 0
# pulseio can store multiple pulses
# read in time for pin to transition
samples = 18
pulses = pulseio.PulseIn(board.D1, maxlen=samples)
# sensor reads which are in range will be stored here
rangevalue = [0, 0, 0, 0, 0, 0, 0, 0, 0]
# 25ms sensor power up pause
time.sleep(.25)
def tof_cm(time_of_flight):
"""
EZ1 ultrasonic sensor is measuring "time of flight"
Converts time of flight into distance in centimeters
"""
convert_to_cm = 58
cm = time_of_flight / convert_to_cm
return cm
def tof_inches(time_of_flight):
"""
EZ1 ultrasonic sensor is measuring "time of flight"
Converts time of flight into distance in inches
"""
convert_to_inches = 147
inches = time_of_flight / convert_to_inches
return inches
def find_mode(x):
"""
find the mode (most common value reported)
will return median (center of sorted list)
should mode not be found
"""
n = len(x)
max_count = 0
mode = 0
bimodal = 0
counter = 0
index = 0
while index < (n - 1):
prev_count = counter
counter = 0
while (x[index]) == (x[index + 1]):
counter += 1
index += 1
if (counter > prev_count) and (counter > max_count):
mode = x[index]
max_count = counter
bimodal = 0
if counter == 0:
index += 1
# If the dataset has 2 or more modes.
if counter == max_count:
bimodal = 1
# Return the median if there is no mode.
if (mode == 0) or (bimodal == 1):
mode = x[int(n / 2)]
return mode
while True:
# wait between samples
time.sleep(.5)
if len(pulses) == samples:
j = 0 # rangevalue array counter
# only save the values within range
# range readings take 49mS
# pulse width is .88mS to 37.5mS
for i in range(0, samples):
tof = pulses[i] # time of flight - PWM HIGH
if 880 < tof < 37500:
if j < len(rangevalue):
rangevalue[j] = tof_cm(tof)
j += 1
# clear pulse samples
pulses.clear() # clear all values in pulses[]
# sort samples
rangevalue = sorted(rangevalue)
# returns mode or median
mode_result = int(find_mode(rangevalue))
# python console prints both centimeter and inches distance
cm2in = .393701
mode_result_in = mode_result * cm2in
print(mode_result, "cm", "\t\t", int(mode_result_in), "in")
# result must be in char/string format for LCD printing
digit_string = str(mode_result)
lcd.clear()
lcd.message("Range: ") # write to LCD
lcd.message(" ")
lcd.message(digit_string)
lcd.message("cm")
time.sleep(2)
```
#### File: Adafruit_Learning_System_Guides/ulab_Crunch_Numbers_Fast/benchmark.py
```python
import time
import math
import ulab
import ulab.numerical
def mean(values):
return sum(values) / len(values)
def normalized_rms(values):
minbuf = int(mean(values))
samples_sum = sum(
float(sample - minbuf) * (sample - minbuf)
for sample in values
)
return math.sqrt(samples_sum / len(values))
def normalized_rms_ulab(values):
# this function works with ndarrays only
minbuf = ulab.numerical.mean(values)
values = values - minbuf
samples_sum = ulab.numerical.sum(values * values)
return math.sqrt(samples_sum / len(values))
# Instead of using sensor data, we generate some data
# The amplitude is 5000 so the rms should be around 5000/1.414 = 3536
nums_list = [int(8000 + math.sin(i) * 5000) for i in range(100)]
nums_array = ulab.array(nums_list)
def timeit(s, f, n=100):
t0 = time.monotonic_ns()
for _ in range(n):
x = f()
t1 = time.monotonic_ns()
r = (t1 - t0) * 1e-6 / n
print("%-30s : %8.3fms [result=%f]" % (s, r, x))
print("Computing the RMS value of 100 numbers")
timeit("traditional", lambda: normalized_rms(nums_list))
timeit("ulab, with ndarray, some implementation in python", lambda: normalized_rms_ulab(nums_array))
timeit("ulab only, with list", lambda: ulab.numerical.std(nums_list))
timeit("ulab only, with ndarray", lambda: ulab.numerical.std(nums_array))
```
#### File: Adafruit_Learning_System_Guides/ulab_Crunch_Numbers_Fast/waterfall.py
```python
import array
import board
import audiobusio
import displayio
import ulab
import ulab.extras
import ulab.vector
display = board.DISPLAY
# Create a heatmap color palette
palette = displayio.Palette(52)
for i, pi in enumerate((0xff0000, 0xff0a00, 0xff1400, 0xff1e00,
0xff2800, 0xff3200, 0xff3c00, 0xff4600,
0xff5000, 0xff5a00, 0xff6400, 0xff6e00,
0xff7800, 0xff8200, 0xff8c00, 0xff9600,
0xffa000, 0xffaa00, 0xffb400, 0xffbe00,
0xffc800, 0xffd200, 0xffdc00, 0xffe600,
0xfff000, 0xfffa00, 0xfdff00, 0xd7ff00,
0xb0ff00, 0x8aff00, 0x65ff00, 0x3eff00,
0x17ff00, 0x00ff10, 0x00ff36, 0x00ff5c,
0x00ff83, 0x00ffa8, 0x00ffd0, 0x00fff4,
0x00a4ff, 0x0094ff, 0x0084ff, 0x0074ff,
0x0064ff, 0x0054ff, 0x0044ff, 0x0032ff,
0x0022ff, 0x0012ff, 0x0002ff, 0x0000ff)):
palette[51-i] = pi
class RollingGraph(displayio.TileGrid):
def __init__(self, scale=2):
# Create a bitmap with heatmap colors
self.bitmap = displayio.Bitmap(display.width//scale,
display.height//scale, len(palette))
super().__init__(self.bitmap, pixel_shader=palette)
self.scroll_offset = 0
def show(self, data):
y = self.scroll_offset
bitmap = self.bitmap
board.DISPLAY.auto_refresh = False
offset = max(0, (bitmap.width-len(data))//2)
for x in range(min(bitmap.width, len(data))):
bitmap[x+offset, y] = int(data[x])
board.DISPLAY.auto_refresh = True
self.scroll_offset = (y + 1) % self.bitmap.height
group = displayio.Group(scale=3)
graph = RollingGraph(3)
fft_size = 256
# Add the TileGrid to the Group
group.append(graph)
# Add the Group to the Display
display.show(group)
# instantiate board mic
mic = audiobusio.PDMIn(board.MICROPHONE_CLOCK, board.MICROPHONE_DATA,
sample_rate=16000, bit_depth=16)
#use some extra sample to account for the mic startup
samples_bit = array.array('H', [0] * (fft_size+3))
# Main Loop
def main():
max_all = 10
while True:
mic.record(samples_bit, len(samples_bit))
samples = ulab.array(samples_bit[3:])
spectrogram1 = ulab.extras.spectrogram(samples)
# spectrum() is always nonnegative, but add a tiny value
# to change any zeros to nonzero numbers
spectrogram1 = ulab.vector.log(spectrogram1 + 1e-7)
spectrogram1 = spectrogram1[1:(fft_size//2)-1]
min_curr = ulab.numerical.min(spectrogram1)
max_curr = ulab.numerical.max(spectrogram1)
if max_curr > max_all:
max_all = max_curr
else:
max_curr = max_curr-1
print(min_curr, max_all)
min_curr = max(min_curr, 3)
# Plot FFT
data = (spectrogram1 - min_curr) * (51. / (max_all - min_curr))
# This clamps any negative numbers to zero
data = data * ulab.array((data > 0))
graph.show(data)
main()
``` |
{
"source": "joewalnes/dotfiles-1",
"score": 2
} |
#### File: sublime2/User/syntax_highlighting.py
```python
import os
from fnmatch import fnmatch
import sublime
import sublime_plugin
class DetectFileTypeCommand(sublime_plugin.EventListener):
"""
Detects current file type if the file's extension isn't
Modified for Ruby on Rails and Sublime Text 2 Original pastie
here: http://pastie.org/private/kz8gtts0cjcvkec0d4quqa
Place in your Packages/User directory.
"""
def on_load(self, view):
filename = view.file_name()
if not filename: # buffer has never been saved
return
try:
name = os.path.basename(filename.lower())
if fnmatch(name, "*_spec.rb"):
set_syntax(view, "RSpec", "rspec/Syntaxes")
elif name == "factories.rb":
set_syntax(view, "RSpec", "rspec/Syntaxes")
elif name == "gemfile":
set_syntax(view, "Ruby on Rails", "Rails")
elif name == "guardfile":
set_syntax(view, "Ruby on Rails", "Rails")
elif name == "config.ru":
set_syntax(view, "Ruby on Rails", "Rails")
elif fnmatch(name, "*.rb"):
set_syntax(view, "Ruby on Rails", "Rails")
elif fnmatch(name, "*.jbuilder"):
set_syntax(view, "Ruby on Rails", "Rails")
elif fnmatch(name, "*.rabl"):
set_syntax(view, "Ruby on Rails", "Rails")
elif fnmatch(name, "*.kitsch"):
set_syntax(view, "Ruby on Rails", "Rails")
elif fnmatch(name, "*_spec.js"):
set_syntax(view, "Jasmine", "Jasmine/Syntaxes")
elif fnmatch(name, "jakefile"):
set_syntax(view, "JavaScript", "JavaScript")
except:
print "There was an error changing syntax."
def set_syntax(view, syntax, path=None):
if path is None:
path = syntax
view.settings().set('syntax', 'Packages/' + path + '/' + syntax +
'.tmLanguage')
print "Switched syntax to: " + syntax
``` |
{
"source": "joewalter/mne-python",
"score": 2
} |
#### File: viz/tests/test_decoding.py
```python
import os.path as op
import warnings
from nose.tools import assert_raises, assert_equals
import numpy as np
from mne.epochs import equalize_epoch_counts, concatenate_epochs
from mne.decoding import GeneralizationAcrossTime
from mne import Epochs, read_events, pick_types
from mne.io import read_raw_fif
from mne.utils import requires_sklearn, run_tests_if_main
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(data_dir, 'test_raw.fif')
event_name = op.join(data_dir, 'test-eve.fif')
warnings.simplefilter('always') # enable b/c these tests throw warnings
def _get_data(tmin=-0.2, tmax=0.5, event_id=dict(aud_l=1, vis_l=3),
event_id_gen=dict(aud_l=2, vis_l=4), test_times=None):
"""Aux function for testing GAT viz"""
gat = GeneralizationAcrossTime()
raw = read_raw_fif(raw_fname, preload=False, add_eeg_ref=False)
raw.add_proj([], remove_existing=True)
events = read_events(event_name)
picks = pick_types(raw.info, meg='mag', stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[1:13:3]
decim = 30
# Test on time generalization within one condition
with warnings.catch_warnings(record=True):
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True, decim=decim,
add_eeg_ref=False)
epochs_list = [epochs[k] for k in event_id]
equalize_epoch_counts(epochs_list)
epochs = concatenate_epochs(epochs_list)
# Test default running
gat = GeneralizationAcrossTime(test_times=test_times)
gat.fit(epochs)
gat.score(epochs)
return gat
@requires_sklearn
def test_gat_plot_matrix():
"""Test GAT matrix plot"""
gat = _get_data()
gat.plot()
del gat.scores_
assert_raises(RuntimeError, gat.plot)
@requires_sklearn
def test_gat_plot_diagonal():
"""Test GAT diagonal plot"""
gat = _get_data()
gat.plot_diagonal()
del gat.scores_
assert_raises(RuntimeError, gat.plot)
@requires_sklearn
def test_gat_plot_times():
"""Test GAT times plot"""
gat = _get_data()
# test one line
gat.plot_times(gat.train_times_['times'][0])
# test multiple lines
gat.plot_times(gat.train_times_['times'])
# test multiple colors
n_times = len(gat.train_times_['times'])
colors = np.tile(['r', 'g', 'b'],
int(np.ceil(n_times / 3)))[:n_times]
gat.plot_times(gat.train_times_['times'], color=colors)
# test invalid time point
assert_raises(ValueError, gat.plot_times, -1.)
# test float type
assert_raises(ValueError, gat.plot_times, 1)
assert_raises(ValueError, gat.plot_times, 'diagonal')
del gat.scores_
assert_raises(RuntimeError, gat.plot)
def chance(ax):
return ax.get_children()[1].get_lines()[0].get_ydata()[0]
@requires_sklearn
def test_gat_chance_level():
"""Test GAT plot_times chance level"""
gat = _get_data()
ax = gat.plot_diagonal(chance=False)
ax = gat.plot_diagonal()
assert_equals(chance(ax), .5)
gat = _get_data(event_id=dict(aud_l=1, vis_l=3, aud_r=2, vis_r=4))
ax = gat.plot_diagonal()
assert_equals(chance(ax), .25)
ax = gat.plot_diagonal(chance=1.234)
assert_equals(chance(ax), 1.234)
assert_raises(ValueError, gat.plot_diagonal, chance='foo')
del gat.scores_
assert_raises(RuntimeError, gat.plot)
@requires_sklearn
def test_gat_plot_nonsquared():
"""Test GAT diagonal plot"""
gat = _get_data(test_times=dict(start=0.))
gat.plot()
ax = gat.plot_diagonal()
scores = ax.get_children()[1].get_lines()[2].get_ydata()
assert_equals(len(scores), len(gat.estimators_))
run_tests_if_main()
```
#### File: viz/tests/test_raw.py
```python
import numpy as np
import os.path as op
import warnings
from numpy.testing import assert_raises, assert_equal
from mne import read_events, pick_types, Annotations
from mne.io import read_raw_fif
from mne.utils import requires_version, run_tests_if_main
from mne.viz.utils import _fake_click
from mne.viz import plot_raw, plot_sensors
# Set our plotters to test mode
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
warnings.simplefilter('always') # enable b/c these tests throw warnings
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
event_name = op.join(base_dir, 'test-eve.fif')
def _get_raw():
"""Get raw data."""
raw = read_raw_fif(raw_fname, preload=True, add_eeg_ref=False)
# Throws a warning about a changed unit.
with warnings.catch_warnings(record=True):
raw.set_channel_types({raw.ch_names[0]: 'ias'})
raw.pick_channels(raw.ch_names[:9])
raw.info.normalize_proj() # Fix projectors after subselection
return raw
def _get_events():
"""Get events."""
return read_events(event_name)
@requires_version('matplotlib', '1.2')
def test_plot_raw():
"""Test plotting of raw data."""
import matplotlib.pyplot as plt
raw = _get_raw()
events = _get_events()
plt.close('all') # ensure all are closed
with warnings.catch_warnings(record=True):
fig = raw.plot(events=events, show_options=True)
# test mouse clicks
x = fig.get_axes()[0].lines[1].get_xdata().mean()
y = fig.get_axes()[0].lines[1].get_ydata().mean()
data_ax = fig.get_axes()[0]
_fake_click(fig, data_ax, [x, y], xform='data') # mark a bad channel
_fake_click(fig, data_ax, [x, y], xform='data') # unmark a bad channel
_fake_click(fig, data_ax, [0.5, 0.999]) # click elsewhere in 1st axes
_fake_click(fig, data_ax, [-0.1, 0.9]) # click on y-label
_fake_click(fig, fig.get_axes()[1], [0.5, 0.5]) # change time
_fake_click(fig, fig.get_axes()[2], [0.5, 0.5]) # change channels
_fake_click(fig, fig.get_axes()[3], [0.5, 0.5]) # open SSP window
fig.canvas.button_press_event(1, 1, 1) # outside any axes
fig.canvas.scroll_event(0.5, 0.5, -0.5) # scroll down
fig.canvas.scroll_event(0.5, 0.5, 0.5) # scroll up
# sadly these fail when no renderer is used (i.e., when using Agg):
# ssp_fig = set(plt.get_fignums()) - set([fig.number])
# assert_equal(len(ssp_fig), 1)
# ssp_fig = plt.figure(list(ssp_fig)[0])
# ax = ssp_fig.get_axes()[0] # only one axis is used
# t = [c for c in ax.get_children() if isinstance(c,
# matplotlib.text.Text)]
# pos = np.array(t[0].get_position()) + 0.01
# _fake_click(ssp_fig, ssp_fig.get_axes()[0], pos, xform='data') # off
# _fake_click(ssp_fig, ssp_fig.get_axes()[0], pos, xform='data') # on
# test keypresses
fig.canvas.key_press_event('escape')
fig.canvas.key_press_event('down')
fig.canvas.key_press_event('up')
fig.canvas.key_press_event('right')
fig.canvas.key_press_event('left')
fig.canvas.key_press_event('o')
fig.canvas.key_press_event('-')
fig.canvas.key_press_event('+')
fig.canvas.key_press_event('=')
fig.canvas.key_press_event('pageup')
fig.canvas.key_press_event('pagedown')
fig.canvas.key_press_event('home')
fig.canvas.key_press_event('end')
fig.canvas.key_press_event('?')
fig.canvas.key_press_event('f11')
fig.canvas.key_press_event('escape')
# Color setting
assert_raises(KeyError, raw.plot, event_color={0: 'r'})
assert_raises(TypeError, raw.plot, event_color={'foo': 'r'})
annot = Annotations([10, 10 + raw.first_samp / raw.info['sfreq']],
[10, 10], ['test', 'test'], raw.info['meas_date'])
raw.annotations = annot
fig = plot_raw(raw, events=events, event_color={-1: 'r', 998: 'b'})
plt.close('all')
for order in ['position', 'selection', range(len(raw.ch_names))[::-4],
[1, 2, 4, 6]]:
fig = raw.plot(order=order)
x = fig.get_axes()[0].lines[1].get_xdata()[10]
y = fig.get_axes()[0].lines[1].get_ydata()[10]
_fake_click(fig, data_ax, [x, y], xform='data') # mark bad
fig.canvas.key_press_event('down') # change selection
_fake_click(fig, fig.get_axes()[2], [0.5, 0.5]) # change channels
if order in ('position', 'selection'):
sel_fig = plt.figure(1)
topo_ax = sel_fig.axes[1]
_fake_click(sel_fig, topo_ax, [-0.425, 0.20223853],
xform='data')
fig.canvas.key_press_event('down')
fig.canvas.key_press_event('up')
fig.canvas.scroll_event(0.5, 0.5, -1) # scroll down
fig.canvas.scroll_event(0.5, 0.5, 1) # scroll up
_fake_click(sel_fig, topo_ax, [-0.5, 0.], xform='data')
_fake_click(sel_fig, topo_ax, [0.5, 0.], xform='data',
kind='motion')
_fake_click(sel_fig, topo_ax, [0.5, 0.5], xform='data',
kind='motion')
_fake_click(sel_fig, topo_ax, [-0.5, 0.5], xform='data',
kind='release')
plt.close('all')
# test if meas_date has only one element
raw.info['meas_date'] = np.array([raw.info['meas_date'][0]],
dtype=np.int32)
raw.annotations = Annotations([1 + raw.first_samp / raw.info['sfreq']],
[5], ['bad'])
raw.plot()
plt.close('all')
@requires_version('scipy', '0.10')
def test_plot_raw_filtered():
"""Test filtering of raw plots."""
raw = _get_raw()
assert_raises(ValueError, raw.plot, lowpass=raw.info['sfreq'] / 2.)
assert_raises(ValueError, raw.plot, highpass=0)
assert_raises(ValueError, raw.plot, lowpass=1, highpass=1)
assert_raises(ValueError, raw.plot, lowpass=1, filtorder=0)
assert_raises(ValueError, raw.plot, clipping='foo')
raw.plot(lowpass=1, clipping='transparent')
raw.plot(highpass=1, clipping='clamp')
raw.plot(highpass=1, lowpass=2)
@requires_version('scipy', '0.12')
def test_plot_raw_psd():
"""Test plotting of raw psds."""
import matplotlib.pyplot as plt
raw = _get_raw()
# normal mode
raw.plot_psd(tmax=2.0)
# specific mode
picks = pick_types(raw.info, meg='mag', eeg=False)[:4]
raw.plot_psd(picks=picks, area_mode='range')
ax = plt.axes()
# if ax is supplied:
assert_raises(ValueError, raw.plot_psd, ax=ax)
raw.plot_psd(picks=picks, ax=ax)
plt.close('all')
ax = plt.axes()
assert_raises(ValueError, raw.plot_psd, ax=ax)
ax = [ax, plt.axes()]
raw.plot_psd(ax=ax)
plt.close('all')
# topo psd
raw.plot_psd_topo()
plt.close('all')
# with a flat channel
raw[5, :] = 0
assert_raises(ValueError, raw.plot_psd)
@requires_version('matplotlib', '1.2')
def test_plot_sensors():
"""Test plotting of sensor array."""
import matplotlib.pyplot as plt
raw = _get_raw()
fig = raw.plot_sensors('3d')
_fake_click(fig, fig.gca(), (-0.08, 0.67))
raw.plot_sensors('topomap', ch_type='mag')
ax = plt.subplot(111)
raw.plot_sensors(ch_groups='position', axes=ax)
raw.plot_sensors(ch_groups='selection')
raw.plot_sensors(ch_groups=[[0, 1, 2], [3, 4]])
assert_raises(ValueError, raw.plot_sensors, ch_groups='asd')
assert_raises(TypeError, plot_sensors, raw) # needs to be info
assert_raises(ValueError, plot_sensors, raw.info, kind='sasaasd')
plt.close('all')
fig, sels = raw.plot_sensors('select', show_names=True)
ax = fig.axes[0]
# Click with no sensors
_fake_click(fig, ax, (0., 0.), xform='data')
_fake_click(fig, ax, (0, 0.), xform='data', kind='release')
assert_equal(len(fig.lasso.selection), 0)
# Lasso with 1 sensor
_fake_click(fig, ax, (-0.5, 0.5), xform='data')
plt.draw()
_fake_click(fig, ax, (0., 0.5), xform='data', kind='motion')
_fake_click(fig, ax, (0., 0.), xform='data', kind='motion')
fig.canvas.key_press_event('control')
_fake_click(fig, ax, (-0.5, 0.), xform='data', kind='release')
assert_equal(len(fig.lasso.selection), 1)
_fake_click(fig, ax, (-0.09, -0.43), xform='data') # single selection
assert_equal(len(fig.lasso.selection), 2)
_fake_click(fig, ax, (-0.09, -0.43), xform='data') # deselect
assert_equal(len(fig.lasso.selection), 1)
plt.close('all')
run_tests_if_main()
``` |
{
"source": "joewandy/omics_integration",
"score": 3
} |
#### File: graphomics/linker/gene_ontologies_utils.py
```python
import pandas as pd
from goatools.associations import read_gaf
from goatools.base import dnld_gaf
from goatools.base import download_go_basic_obo
from goatools.obo_parser import GODag
from tqdm import tqdm
from linker.constants import *
def download_ontologies():
"""
Download ontologies, a dictionary that maps GO IDs to GO terms. In most cases, we should use the basic OBO file.
:return: a dictionary where key is the gene ontology id ('GO:0000001') and value is the GOTerm class
"""
obo_fname = download_go_basic_obo(prt=None, loading_bar=False)
ontologies = GODag(obo_fname)
return ontologies
def gaf_names_to_id(gaf_filename):
df = pd.read_csv(gaf_filename, comment='!', sep='\t', header=None, dtype=str)
# temp has 2 columns. First is the gene id, next is the gene symbol
# example:
# 'ZDB-MIRNAG-081210-6', 'mir26b'
temp = df.iloc[:, 1:3].values
names_to_id = {symbol: my_id for my_id, symbol in temp}
return names_to_id
def to_id(names, names_to_id_dict):
ids = []
for x in names:
try:
my_id = names_to_id_dict[x.lower()]
ids.append(my_id)
except KeyError as e:
# logger.debug(e)
pass
return ids
def download_associations():
species_associations = {}
gaf_name_to_id = {}
for species, gaf_prefix in tqdm(SPECIES_TO_GAF_PREFIX.items()):
gaf_filename = dnld_gaf(gaf_prefix, prt=None, loading_bar=False)
gaf_name_to_id[species] = gaf_names_to_id(gaf_filename)
assocs = {}
for namespace in GO_NAMESPACES:
associations = read_gaf(gaf_filename, namespace=namespace, go2geneids=False, prt=None)
assocs[namespace] = associations
species_associations[species] = assocs
return species_associations, gaf_name_to_id
```
#### File: graphomics/linker/models.py
```python
import os
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import models
from django.utils import timezone
from jsonfield import JSONField
from django.core.files import File
User = get_user_model()
from linker.constants import DataType, DataRelationType, InferenceTypeChoices
class Analysis(models.Model):
name = models.CharField(max_length=100, null=True)
description = models.CharField(max_length=1000, null=True)
publication = models.CharField(max_length=1000, null=True)
publication_link = models.CharField(max_length=1000, null=True)
timestamp = models.DateTimeField(default=timezone.localtime, null=False)
public = models.BooleanField(default=False)
metadata = JSONField()
users = models.ManyToManyField(User, through='Share')
class Meta:
verbose_name_plural = "Analyses"
def get_species_str(self):
if 'species_list' in self.metadata:
return ', '.join(self.metadata['species_list'])
else:
return ''
def get_species_list(self):
if 'species_list' in self.metadata:
return self.metadata['species_list']
else:
return []
def get_owner(self):
for share in self.share_set.all():
if share.owner:
return share.user
def get_read_only_status(self, user):
for share in self.share_set.all(): # search shares for this user
if share.user == user:
if share.owner: # owner can always edit
return False
return share.read_only # otherwise check the read-only field
return False
def get_read_only_str(self, user):
read_only = self.get_read_only_status(user)
msg = 'Read Only' if read_only else 'Edit'
return msg
def get_shared(self, user):
for share in self.share_set.all(): # search shares for this user
if share.user == user:
return True
return False
def has_mofa_data(self):
file = self.analysisupload.mofa_data
if file and file.storage.exists(file.name):
return file and file.storage.exists(file.name)
else:
return False
def has_gene_data(self):
file = self.analysisupload.gene_data
if file and file.storage.exists(file.name):
return file and file.storage.exists(file.name)
else:
return False
def has_protein_data(self):
file = self.analysisupload.protein_data
if file and file.storage.exists(file.name):
return file and file.storage.exists(file.name)
else:
return False
def has_compound_data(self):
file = self.analysisupload.compound_data
if file and file.storage.exists(file.name):
return file and file.storage.exists(file.name)
else:
return False
def has_metadata(self):
file = self.analysisupload.metadata
if file and file.storage.exists(file.name):
return file and file.storage.exists(file.name)
else:
return False
def get_mofa_hdf5_path(self):
if self.has_mofa_data():
return self.analysisupload.mofa_data.path
else:
return None
def get_gene_data_path(self):
if self.has_gene_data():
return self.analysisupload.gene_data.path
else:
return None
def get_protein_data_path(self):
if self.has_protein_data():
return self.analysisupload.protein_data.path
else:
return None
def get_compound_data_path(self):
if self.has_compound_data():
return self.analysisupload.compound_data.path
else:
return None
def get_gene_design_path(self):
if self.has_gene_data():
return self.analysisupload.gene_design.path
else:
return None
def get_protein_design_path(self):
if self.has_protein_data():
return self.analysisupload.protein_design.path
else:
return None
def get_compound_design_path(self):
if self.has_compound_data():
return self.analysisupload.compound_design.path
else:
return None
def get_metadata_path(self):
if self.has_metadata():
return self.analysisupload.metadata.path
else:
return None
def set_mofa_hdf5_path(self, filePath):
with open(filePath, 'rb') as f:
fileName = self.name + '_mofa_data.hdf5'
self.analysisupload.mofa_data.save(fileName, File(f))
os.remove(filePath)
def __str__(self):
return self.name
class Share(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
analysis = models.ForeignKey(Analysis, on_delete=models.CASCADE)
read_only = models.BooleanField()
owner = models.BooleanField()
def __str__(self):
return 'User=%s, Analysis=%s, read_only=%s, owner=%s' % (self.user, self.analysis, self.read_only, self.owner)
def get_read_only_str(self):
msg = 'Read Only' if self.read_only else 'Edit'
return msg
def get_upload_folder(instance, filename):
upload_folder = "analysis_upload_%s" % instance.pk
return os.path.abspath(os.path.join(settings.MEDIA_ROOT, upload_folder, filename))
class AnalysisUpload(models.Model):
analysis = models.OneToOneField(
Analysis,
on_delete=models.CASCADE,
primary_key=True,
)
gene_data = models.FileField(blank=True, null=True, upload_to=get_upload_folder)
gene_design = models.FileField(blank=True, null=True, upload_to=get_upload_folder)
protein_data = models.FileField(blank=True, null=True, upload_to=get_upload_folder)
protein_design = models.FileField(blank=True, null=True, upload_to=get_upload_folder)
compound_data = models.FileField(blank=True, null=True, upload_to=get_upload_folder)
compound_design = models.FileField(blank=True, null=True, upload_to=get_upload_folder)
mofa_data = models.FileField(blank=True, null=True, upload_to=get_upload_folder)
metadata = models.FileField(blank=True, null=True, upload_to=get_upload_folder)
class AnalysisData(models.Model):
analysis = models.ForeignKey(Analysis, on_delete=models.CASCADE)
data_type = models.IntegerField(choices=DataRelationType)
json_data = JSONField()
json_design = JSONField()
metadata = JSONField(blank=True, null=True)
timestamp = models.DateTimeField(default=timezone.localtime, null=False)
class Meta:
verbose_name_plural = "Analysis Data"
def get_data_type_str(self):
try:
return dict(DataRelationType)[self.data_type]
except KeyError:
return ''
def __str__(self):
return '%s data for analysis %d' % (self.get_data_type_str(), self.analysis.pk)
class AnalysisHistory(models.Model):
analysis = models.ForeignKey(Analysis, on_delete=models.CASCADE)
display_name = models.CharField(max_length=1000, blank=True, null=True)
analysis_data = models.ForeignKey(AnalysisData, on_delete=models.CASCADE)
inference_type = models.IntegerField(choices=InferenceTypeChoices, blank=True, null=True)
inference_data = JSONField()
timestamp = models.DateTimeField(default=timezone.localtime, null=False)
class Meta:
verbose_name_plural = "Analysis Histories"
def get_data_type_str(self):
return self.analysis_data.get_data_type_str()
def get_inference_type_str(self):
try:
return dict(InferenceTypeChoices)[self.inference_type]
except KeyError:
return ''
def __str__(self):
return '%s (%s) timestamp=%s' % (self.display_name, self.analysis_data, self.timestamp.strftime("%Y-%m-%d %H:%M:%S"))
class AnalysisAnnotation(models.Model):
analysis = models.ForeignKey(Analysis, on_delete=models.CASCADE)
data_type = models.IntegerField(choices=DataType)
database_id = models.CharField(max_length=100)
display_name = models.CharField(max_length=1000)
annotation = models.CharField(max_length=1000)
timestamp = models.DateTimeField(default=timezone.localtime, null=False)
class Meta:
verbose_name_plural = "Analysis Annotations"
def __str__(self):
return '%s data_type=%d %s' % (self.analysis.name, self.data_type, self.display_name)
class AnalysisGroup(models.Model):
analysis = models.ForeignKey(Analysis, on_delete=models.CASCADE)
linker_state = JSONField()
display_name = models.CharField(max_length=1000)
description = models.CharField(max_length=1000)
timestamp = models.DateTimeField(default=timezone.localtime, null=False)
class Meta:
verbose_name_plural = "Analysis Groups"
def __str__(self):
return '%s data_type=%d %s' % (self.analysis.name, self.data_type, self.display_name)
```
#### File: linker/views/inference_view.py
```python
from io import StringIO
import mofax as mfx
import json
import jsonpickle
import numpy as np
from django import forms
from django.contrib import messages
from django.core.exceptions import PermissionDenied
from django.forms import TextInput, DecimalField
from django.shortcuts import render, get_object_or_404
from django.urls import reverse, reverse_lazy
from django.views.generic import TemplateView, DeleteView
from django_select2.forms import Select2Widget
from loguru import logger
from sklearn.decomposition import PCA as skPCA
from linker.common import access_allowed
from linker.constants import *
from linker.forms import BaseInferenceForm
from linker.models import Analysis, AnalysisData, AnalysisHistory
from linker.views.functions import get_last_analysis_data, get_groups, get_dataframes, get_standardized_df, \
get_group_members, fig_to_div, get_inference_data, save_analysis_history
from linker.views.pathway_analysis import get_pals_data_source, run_pals, run_ora, \
run_gsea
from linker.views.pipelines import GraphOmicsInference, MofaInference
from linker.views.reactome_analysis import get_omics_data, populate_reactome_choices, get_used_dtypes, get_data, \
to_expression_tsv, get_analysis_first_species, parse_reactome_json, send_to_reactome, get_first_analysis_history_id, \
to_ora_tsv
def inference(request, analysis_id):
analysis = get_object_or_404(Analysis, pk=analysis_id)
if not access_allowed(analysis, request):
raise PermissionDenied()
analysis_history_list = AnalysisHistory.objects.filter(analysis=analysis).order_by(
'timestamp')
list_data = get_list_data(analysis_id, analysis_history_list)
if request.method == 'POST':
form = BaseInferenceForm(request.POST)
if form.is_valid():
data_type = int(form.cleaned_data['data_type'])
inference_type = int(form.cleaned_data['inference_type'])
# run t-test analysis
if inference_type == INFERENCE_T_TEST:
analysis_data = get_last_analysis_data(analysis, data_type)
groups = get_groups(analysis_data)
action_url = reverse('inference_t_test', kwargs={
'analysis_id': analysis_id,
})
selected_form = get_case_control_form(data_type, groups, inference_type)
elif inference_type == INFERENCE_DESEQ:
analysis_data = get_last_analysis_data(analysis, data_type)
groups = get_groups(analysis_data)
action_url = reverse('inference_deseq', kwargs={
'analysis_id': analysis_id,
})
selected_form = get_case_control_form(data_type, groups, inference_type)
elif inference_type == INFERENCE_LIMMA:
analysis_data = get_last_analysis_data(analysis, data_type)
groups = get_groups(analysis_data)
action_url = reverse('inference_limma', kwargs={
'analysis_id': analysis_id,
})
selected_form = get_case_control_form(data_type, groups, inference_type)
# do PCA
elif inference_type == INFERENCE_PCA:
analysis_data = get_last_analysis_data(analysis, data_type)
action_url = reverse('inference_pca', kwargs={
'analysis_id': analysis_id,
})
selected_form = BaseInferenceForm()
selected_form.fields['data_type'].initial = data_type
selected_form.fields['inference_type'].initial = inference_type
choices = zip(range(2, 11), range(2, 11))
selected_form.fields['pca_n_components'] = forms.ChoiceField(choices=choices,
widget=Select2Widget(SELECT_WIDGET_ATTRS),
label='PCA components')
# do PALS
elif inference_type == INFERENCE_PALS:
analysis_data = get_last_analysis_data(analysis, data_type)
groups = get_groups(analysis_data)
action_url = reverse('inference_pals', kwargs={
'analysis_id': analysis_id,
})
selected_form = get_case_control_form(data_type, groups, inference_type)
selected_form.fields['min_hits'] = forms.IntegerField(min_value=0, initial=PLAGE_MIN_HITS,
label='Minimum hits')
# do ORA
elif inference_type == INFERENCE_ORA:
analysis_data = get_last_analysis_data(analysis, data_type)
groups = get_groups(analysis_data)
action_url = reverse('inference_ora', kwargs={
'analysis_id': analysis_id,
})
selected_form = get_case_control_form(data_type, groups, inference_type)
# do GSEA
elif inference_type == INFERENCE_GSEA:
analysis_data = get_last_analysis_data(analysis, data_type)
groups = get_groups(analysis_data)
action_url = reverse('inference_gsea', kwargs={
'analysis_id': analysis_id,
})
selected_form = get_case_control_form(data_type, groups, inference_type)
# do Reactome Analysis Service
elif inference_type == INFERENCE_REACTOME:
selected_form = BaseInferenceForm()
selected_form.fields['data_type'].initial = data_type
selected_form.fields['inference_type'].initial = inference_type
if data_type == MULTI_OMICS:
for dtype in [GENOMICS, PROTEOMICS, METABOLOMICS]:
analysis_data = get_last_analysis_data(analysis, dtype)
populate_reactome_choices(analysis_data, dtype, selected_form)
else:
analysis_data = get_last_analysis_data(analysis, data_type)
populate_reactome_choices(analysis_data, data_type, selected_form)
selected_form.fields['threshold'] = DecimalField(required=True, widget=TextInput(
attrs={'autocomplete': 'off', 'type': 'number', 'min': '0', 'max': '1', 'step': '0.05',
'size': '10'}))
selected_form.fields['threshold'].initial = 0.05
action_url = reverse('inference_reactome', kwargs={
'analysis_id': analysis_id,
})
elif inference_type == INFERENCE_MOFA:
action_url = reverse('inference_mofa', kwargs={
'analysis_id': analysis_id,
})
if data_type == MULTI_OMICS:
for dtype in [GENOMICS, PROTEOMICS, METABOLOMICS]:
analysis_data = get_last_analysis_data(analysis, dtype)
else:
analysis_data = get_last_analysis_data(analysis, data_type)
selected_form = BaseInferenceForm()
selected_form.fields['data_type'].initial = data_type
selected_form.fields['inference_type'].initial = inference_type
selected_form.fields['Use uploaded .hdf5 file'] = forms.ChoiceField(choices=zip(['Yes', 'No'], ['Yes', 'No']), widget=Select2Widget())
selected_form.fields['Number of Factor'] = forms.IntegerField(required=True, widget=forms.TextInput(attrs={'size': 100}))
selected_form.fields['Scale View'] = forms.ChoiceField(required=False, choices=zip([True, False], ['Yes', 'No']), widget=Select2Widget())
selected_form.fields['Scale Group'] = forms.ChoiceField(required=False, choices=zip([True, False], ['Yes', 'No']), widget=Select2Widget())
else: # default
action_url = reverse('inference', kwargs={
'analysis_id': analysis_id,
})
selected_form = BaseInferenceForm(request.POST)
context = {
'analysis_id': analysis.pk,
'list_data': list_data,
'form': selected_form,
'action_url': action_url
}
return render(request, 'linker/inference.html', context)
else:
action_url = reverse('inference', kwargs={
'analysis_id': analysis_id,
})
base_form = BaseInferenceForm()
context = {
'analysis_id': analysis.pk,
'list_data': list_data,
'form': base_form,
'action_url': action_url
}
return render(request, 'linker/inference.html', context)
def get_case_control_form(data_type, groups, inference_type):
selected_form = BaseInferenceForm()
selected_form.fields['data_type'].initial = data_type
selected_form.fields['inference_type'].initial = inference_type
selected_form.fields['case'] = forms.ChoiceField(choices=groups,
widget=Select2Widget(SELECT_WIDGET_ATTRS))
selected_form.fields['control'] = forms.ChoiceField(choices=groups,
widget=Select2Widget(SELECT_WIDGET_ATTRS))
return selected_form
def get_list_data(analysis_id, analysis_history_list):
list_data = []
for analysis_history in analysis_history_list:
inference_type = analysis_history.inference_type
click_url_1 = None
click_url_2 = None
# when clicked, go to the Explore Data page
if inference_type == INFERENCE_T_TEST or \
inference_type == INFERENCE_DESEQ or \
inference_type == INFERENCE_LIMMA or \
inference_type == INFERENCE_PALS or \
inference_type == INFERENCE_ORA or \
inference_type == INFERENCE_GSEA:
click_url_1 = reverse('explore_data', kwargs={
'analysis_id': analysis_id,
})
# when clicked, show the Explore Analysis Data page
elif inference_type == INFERENCE_PCA:
click_url_1 = reverse('pca_result', kwargs={
'analysis_id': analysis_id,
'analysis_data_id': analysis_history.analysis_data.id,
'analysis_history_id': analysis_history.id
})
# when clicked, go to Reactome
elif inference_type == INFERENCE_REACTOME:
if REACTOME_ORA_URL in analysis_history.inference_data and REACTOME_EXPR_URL in analysis_history.inference_data:
click_url_1 = analysis_history.inference_data[REACTOME_ORA_URL]
click_url_2 = analysis_history.inference_data[REACTOME_EXPR_URL]
elif inference_type == INFERENCE_MOFA:
history_id = analysis_history.id
click_url_1 = reverse('mofa_result_page', kwargs={
'analysis_id': analysis_id,
'analysis_history_id': history_id
})
item = [analysis_history, click_url_1, click_url_2]
list_data.append(item)
return list_data
def inference_t_test(request, analysis_id):
if request.method == 'POST':
analysis = get_object_or_404(Analysis, pk=analysis_id)
form = BaseInferenceForm(request.POST)
data_type = int(request.POST['data_type'])
analysis_data = get_last_analysis_data(analysis, data_type)
groups = get_groups(analysis_data)
form.fields['case'] = forms.ChoiceField(choices=groups, widget=Select2Widget())
form.fields['control'] = forms.ChoiceField(choices=groups, widget=Select2Widget())
if form.is_valid():
case = form.cleaned_data['case']
control = form.cleaned_data['control']
data_df, design_df = get_dataframes(analysis_data, PKS)
if data_type == GENOMICS:
min_replace = MIN_REPLACE_GENOMICS
elif data_type == PROTEOMICS or data_type == METABOLOMICS:
min_replace = MIN_REPLACE_PROTEOMICS_METABOLOMICS
wi = GraphOmicsInference(data_df, design_df, data_type, min_value=min_replace)
result_df = wi.run_ttest(case, control)
# create a new analysis data
display_name = 't-test: %s_vs_%s' % (case, control)
inference_data = get_inference_data(data_type, case, control, result_df)
save_analysis_history(analysis_data, inference_data, display_name, INFERENCE_T_TEST)
messages.success(request, 'Add new inference successful.', extra_tags='primary')
return inference(request, analysis_id)
else:
messages.warning(request, 'Add new inference failed.')
return inference(request, analysis_id)
def inference_deseq(request, analysis_id):
if request.method == 'POST':
data_type = int(request.POST['data_type'])
if data_type == PROTEOMICS or data_type == METABOLOMICS:
messages.warning(request, 'Add new inference failed. DESeq2 only works for discrete count data.')
return inference(request, analysis_id)
analysis = get_object_or_404(Analysis, pk=analysis_id)
form = BaseInferenceForm(request.POST)
analysis_data = get_last_analysis_data(analysis, data_type)
groups = get_groups(analysis_data)
form.fields['case'] = forms.ChoiceField(choices=groups, widget=Select2Widget())
form.fields['control'] = forms.ChoiceField(choices=groups, widget=Select2Widget())
if form.is_valid():
case = form.cleaned_data['case']
control = form.cleaned_data['control']
data_df, design_df = get_dataframes(analysis_data, PKS)
# run deseq2 here
wi = GraphOmicsInference(data_df, design_df, data_type)
try:
pd_df, rld_df, res_ordered = wi.run_deseq(MIN_REPLACE_GENOMICS, case, control)
except Exception as e:
logger.warning('Failed to run DESeq2: %s' % str(e))
messages.warning(request, 'Add new inference failed.')
return inference(request, analysis_id)
result_df = pd_df[['padj', 'log2FoldChange']]
# create a new analysis data
display_name = 'DESeq2: %s_vs_%s' % (case, control)
metadata = {
'rld_df': rld_df.to_json(),
'res_ordered': jsonpickle.encode(res_ordered)
}
inference_data = get_inference_data(data_type, case, control, result_df, metadata=metadata)
save_analysis_history(analysis_data, inference_data, display_name, INFERENCE_DESEQ)
messages.success(request, 'Add new inference successful.', extra_tags='primary')
return inference(request, analysis_id)
else:
messages.warning(request, 'Add new inference failed.')
return inference(request, analysis_id)
def inference_limma(request, analysis_id):
if request.method == 'POST':
analysis = get_object_or_404(Analysis, pk=analysis_id)
form = BaseInferenceForm(request.POST)
data_type = int(request.POST['data_type'])
analysis_data = get_last_analysis_data(analysis, data_type)
groups = get_groups(analysis_data)
form.fields['case'] = forms.ChoiceField(choices=groups, widget=Select2Widget())
form.fields['control'] = forms.ChoiceField(choices=groups, widget=Select2Widget())
if form.is_valid():
case = form.cleaned_data['case']
control = form.cleaned_data['control']
data_df, design_df = get_dataframes(analysis_data, PKS)
if data_type == GENOMICS:
min_replace = MIN_REPLACE_GENOMICS
elif data_type == PROTEOMICS or data_type == METABOLOMICS:
min_replace = MIN_REPLACE_PROTEOMICS_METABOLOMICS
wi = GraphOmicsInference(data_df, design_df, data_type, min_value=min_replace)
result_df = wi.run_limma(case, control)
# create a new analysis data
display_name = 'limma: %s_vs_%s' % (case, control)
inference_data = get_inference_data(data_type, case, control, result_df)
save_analysis_history(analysis_data, inference_data, display_name, INFERENCE_LIMMA)
messages.success(request, 'Add new inference successful.', extra_tags='primary')
return inference(request, analysis_id)
else:
messages.warning(request, 'Add new inference failed.')
return inference(request, analysis_id)
def inference_pca(request, analysis_id):
if request.method == 'POST':
analysis = get_object_or_404(Analysis, pk=analysis_id)
form = BaseInferenceForm(request.POST)
data_type = int(request.POST['data_type'])
analysis_data = get_last_analysis_data(analysis, data_type)
choices = zip(range(2, 11), range(2, 11))
form.fields['pca_n_components'] = forms.ChoiceField(choices=choices,
widget=Select2Widget(SELECT_WIDGET_ATTRS),
label='PCA Components')
if form.is_valid():
n_components = int(form.cleaned_data['pca_n_components'])
# do pca on the samples
X_proj, X_std, pca = get_pca_proj(analysis_data, n_components)
if pca is not None:
var_exp = pca.explained_variance_ratio_
# store pca results to the metadata field of this AnalysisData
metadata = {
'pca_n_components': jsonpickle.dumps(n_components),
'pca_X_std_index': jsonpickle.dumps(X_std.index.values),
'pca_X_proj': jsonpickle.dumps(X_proj),
'pca_var_exp': jsonpickle.dumps(var_exp)
}
display_name = 'PCA: %s components' % n_components
inference_data = get_inference_data(data_type, None, None, None, metadata)
save_analysis_history(analysis_data, inference_data, display_name, INFERENCE_PCA)
messages.success(request, 'Add new inference successful.', extra_tags='primary')
else:
messages.warning(request, 'Add new inference failed. No data found.')
return inference(request, analysis_id)
else:
messages.warning(request, 'Add new inference failed.')
return inference(request, analysis_id)
def get_pca_proj(analysis_data, n_components):
axis = 0
X_std, data_df, design_df = get_standardized_df(analysis_data, axis, pk_cols=PKS)
if design_df is not None:
X_std = X_std.transpose()
pca = skPCA(n_components)
X_proj = pca.fit_transform(X_std)
else:
X_std = None
X_proj = None
pca = None
return X_proj, X_std, pca
class PCAResult(TemplateView):
template_name = 'linker/inference_pca.html'
def get_context_data(self, **kwargs):
analysis_id = self.kwargs['analysis_id']
analysis_data_id = self.kwargs['analysis_data_id']
analysis_history_id = self.kwargs['analysis_history_id']
analysis_data = AnalysisData.objects.get(pk=analysis_data_id)
analysis_history = AnalysisHistory.objects.get(pk=analysis_history_id)
inference_data = analysis_history.inference_data
n_components = jsonpickle.loads(inference_data['pca_n_components'])
X_std_index = jsonpickle.loads(inference_data['pca_X_std_index'])
X_proj = jsonpickle.loads(inference_data['pca_X_proj'])
var_exp = jsonpickle.loads(inference_data['pca_var_exp'])
# make pca plot
fig = self.get_pca_plot(analysis_data, X_std_index, X_proj)
pca_plot = fig_to_div(fig)
# make explained variance plot
fig = self.get_variance_plot(var_exp)
variance_plot = fig_to_div(fig)
# set the div to context
context = super(PCAResult, self).get_context_data(**kwargs)
context.update({
'pca_plot': pca_plot,
'variance_plot': variance_plot,
'analysis_id': analysis_id,
'n_components': n_components,
})
return context
def get_variance_plot(self, var_exp):
cum_var_exp = np.cumsum(var_exp)
trace1 = dict(
type='bar',
x=['PC %s' % (i + 1) for i in range(len(var_exp))],
y=var_exp,
name='Individual'
)
trace2 = dict(
type='scatter',
x=['PC %s' % (i + 1) for i in range(len(var_exp))],
y=cum_var_exp,
name='Cumulative'
)
data = [trace1, trace2]
layout = dict(
title='Explained variance by different principal components',
yaxis=dict(
title='Explained variance'
),
width=800,
annotations=list([
dict(
x=1.20,
y=1.05,
xref='paper',
yref='paper',
text='Explained Variance',
showarrow=False,
)
])
)
fig = dict(data=data, layout=layout)
return fig
def get_pca_plot(self, analysis_data, X_std_index, X_proj):
data = []
group_members = get_group_members(analysis_data)
for group in group_members:
members = group_members[group]
pos = np.in1d(X_std_index, members).nonzero()[0] # find position of group members in sample indices of X
labels = X_std_index[pos]
trace = dict(
type='scatter',
x=X_proj[pos, 0],
y=X_proj[pos, 1],
mode='markers',
name=group,
text=labels,
marker=dict(
size=12,
line=dict(
color='rgba(217, 217, 217, 0.14)',
width=0.5),
opacity=0.8)
)
data.append(trace)
layout = dict(
width=800,
title='PCA Projection',
xaxis=dict(title='PC1', showline=False),
yaxis=dict(title='PC2', showline=False)
)
fig = dict(data=data, layout=layout)
return fig
def inference_pals(request, analysis_id):
if request.method == 'POST':
analysis = get_object_or_404(Analysis, pk=analysis_id)
form = BaseInferenceForm(request.POST)
data_type = int(request.POST['data_type'])
analysis_data = get_last_analysis_data(analysis, data_type)
groups = get_groups(analysis_data)
form.fields['case'] = forms.ChoiceField(choices=groups, widget=Select2Widget())
form.fields['control'] = forms.ChoiceField(choices=groups, widget=Select2Widget())
form.fields['min_hits'] = forms.IntegerField(min_value=0, initial=PLAGE_MIN_HITS,
label='Minimum hits')
if form.is_valid():
case = form.cleaned_data['case']
control = form.cleaned_data['control']
min_hits = form.cleaned_data['min_hits']
# get pals data source from the current analysis_data
pals_data_source = get_pals_data_source(analysis, analysis_data, case, control, min_hits)
if pals_data_source is None:
messages.warning(request, 'Add new inference failed. No data found.')
return inference(request, analysis_id)
# run pals
pals_df = run_pals(pals_data_source)
# check for NaN in the results. It shouldn't happen.
if pals_df.isnull().values.any():
logger.warning('PALS result contains NaN! These rows will be deleted.')
logger.warning(pals_df[pals_df.isnull().any(axis=1)])
pals_df = pals_df.dropna()
# update PALS results to database
pathway_analysis_data = get_last_analysis_data(analysis, PATHWAYS)
inference_data = get_inference_data(data_type, case, control, pals_df)
display_name = 'PLAGE %s: %s_vs_%s' % (pals_data_source.database_name, case, control)
save_analysis_history(pathway_analysis_data, inference_data, display_name, INFERENCE_PALS)
messages.success(request, 'Add new inference successful.', extra_tags='primary')
return inference(request, analysis_id)
else:
messages.warning(request, 'Add new inference failed.')
return inference(request, analysis_id)
def inference_ora(request, analysis_id):
if request.method == 'POST':
analysis = get_object_or_404(Analysis, pk=analysis_id)
form = BaseInferenceForm(request.POST)
data_type = int(request.POST['data_type'])
analysis_data = get_last_analysis_data(analysis, data_type)
groups = get_groups(analysis_data)
form.fields['case'] = forms.ChoiceField(choices=groups, widget=Select2Widget())
form.fields['control'] = forms.ChoiceField(choices=groups, widget=Select2Widget())
if form.is_valid():
case = form.cleaned_data['case']
control = form.cleaned_data['control']
# get pals data source from the current analysis_data
pals_data_source = get_pals_data_source(analysis, analysis_data, case, control, 0)
if pals_data_source is None:
messages.warning(request, 'Add new inference failed. No data found.')
return inference(request, analysis_id)
# run ora
pals_df = run_ora(pals_data_source)
# update ORA results to database
pathway_analysis_data = get_last_analysis_data(analysis, PATHWAYS)
inference_data = get_inference_data(data_type, case, control, pals_df)
display_name = 'ORA %s: %s_vs_%s' % (pals_data_source.database_name, case, control)
save_analysis_history(pathway_analysis_data, inference_data, display_name, INFERENCE_ORA)
messages.success(request, 'Add new inference successful.', extra_tags='primary')
return inference(request, analysis_id)
else:
messages.warning(request, 'Add new inference failed.')
return inference(request, analysis_id)
def inference_gsea(request, analysis_id):
if request.method == 'POST':
analysis = get_object_or_404(Analysis, pk=analysis_id)
form = BaseInferenceForm(request.POST)
data_type = int(request.POST['data_type'])
analysis_data = get_last_analysis_data(analysis, data_type)
groups = get_groups(analysis_data)
form.fields['case'] = forms.ChoiceField(choices=groups, widget=Select2Widget())
form.fields['control'] = forms.ChoiceField(choices=groups, widget=Select2Widget())
if form.is_valid():
case = form.cleaned_data['case']
control = form.cleaned_data['control']
# get pals data source from the current analysis_data
pals_data_source = get_pals_data_source(analysis, analysis_data, case, control, 0)
if pals_data_source is None:
messages.warning(request, 'Add new inference failed. No data found.')
return inference(request, analysis_id)
# run gse
pals_df = run_gsea(pals_data_source)
# update GSEA results to database
pathway_analysis_data = get_last_analysis_data(analysis, PATHWAYS)
inference_data = get_inference_data(data_type, case, control, pals_df)
display_name = 'GSEA %s: %s_vs_%s' % (pals_data_source.database_name, case, control)
save_analysis_history(pathway_analysis_data, inference_data, display_name, INFERENCE_GSEA)
messages.success(request, 'Add new inference successful.', extra_tags='primary')
return inference(request, analysis_id)
else:
messages.warning(request, 'Add new inference failed.')
return inference(request, analysis_id)
def inference_reactome(request, analysis_id):
if request.method == 'POST':
analysis = get_object_or_404(Analysis, pk=analysis_id)
form = BaseInferenceForm(request.POST)
# if data type is MULTI_OMICS, then turn it into GENOMICS, PROTEOMICS and METABOLOMICS
data_type = int(request.POST['data_type'])
data_types = [data_type]
if data_type == MULTI_OMICS:
data_types = [GENOMICS, PROTEOMICS, METABOLOMICS]
# make sure actually we have some data
analysis_data, omics_data = get_omics_data(analysis, data_types, form)
if len(omics_data) == 0:
messages.warning(request, 'Add new inference failed. No data found.')
return inference(request, analysis_id)
# reinitialise the threshold field
form.fields['threshold'] = DecimalField(required=True, widget=TextInput(
attrs={'autocomplete': 'off', 'type': 'number', 'min': '0', 'max': '1', 'step': '0.05',
'size': '10'}))
form.fields['threshold'].initial = 0.05
if form.is_valid():
form_data = form.cleaned_data
threshold = float(form_data['threshold'])
used_dtypes = get_used_dtypes(form_data, omics_data)
encoded_species = get_analysis_first_species(analysis)
# get ORA and expression data to send to reactome
logger.debug('Preparing ORA data')
ora_df = get_data(form_data, omics_data, used_dtypes, threshold)
ora_data = to_ora_tsv(ora_df.index.values)
logger.debug(ora_df.index.values)
logger.debug(ora_df.index.values.shape)
logger.debug('Preparing expression data')
expression_df = get_data(form_data, omics_data, used_dtypes,
1.0) # use large threshold to show all entities
expression_data = to_expression_tsv(expression_df)
logger.debug(expression_df)
logger.debug(expression_df.shape)
# POST the data to Reactome Analysis Service
logger.debug('POSTing ORA data')
ora_status_code, ora_json_response = send_to_reactome(ora_data, encoded_species)
logger.debug('POSTing expression data')
expr_status_code, expr_json_response = send_to_reactome(expression_data, encoded_species)
# ensure that both POST requests are successful
if ora_status_code != 200 or expr_status_code != 200:
messages.warning(request, 'Add new inference failed. Reactome Analysis Service returned status '
'code %d and %d' % (ora_status_code, expr_status_code))
else: # success 200 for both
assert ora_json_response is not None
assert expr_json_response is not None
# parse FDR values for pathways from ORA results
logger.debug('Parsing ORA results')
pathways_df, ora_reactome_url, ora_token = parse_reactome_json(ora_json_response)
logger.debug(pathways_df.columns.values)
logger.debug(pathways_df)
logger.debug('Parsing expression results')
_, expr_reactome_url, expr_token = parse_reactome_json(expr_json_response)
if not pathways_df.empty:
first_analysis_history_id = get_first_analysis_history_id(form_data, omics_data, used_dtypes)
first_analysis_history = AnalysisHistory.objects.get(pk=first_analysis_history_id)
case = first_analysis_history.inference_data['case']
control = first_analysis_history.inference_data['control']
comparison_name = '%s_vs_%s' % (case, control)
# as a quick hack, we put pathways df in the same format as PALS output
# this will let us use the update_pathway_analysis_data() method below
pathways_df = pathways_df[['stId', 'name', 'entities_fdr']].set_index('stId').rename(columns={
'name': 'pw_name',
'entities_fdr': 'REACTOME %s comb_p' % (comparison_name)
})
pathway_analysis_data = get_last_analysis_data(analysis, PATHWAYS)
# save the updated analysis data to database
display_data_type = ','.join([AddNewDataDict[dt] for dt in used_dtypes])
display_name = 'Reactome Analysis Service (%s): %s' % (display_data_type, comparison_name)
metadata = {
REACTOME_ORA_TOKEN: ora_token,
REACTOME_ORA_URL: ora_reactome_url,
REACTOME_EXPR_TOKEN: expr_token,
REACTOME_EXPR_URL: expr_reactome_url
}
inference_data = get_inference_data(data_type, None, None, pathways_df, metadata=metadata)
save_analysis_history(pathway_analysis_data, inference_data, display_name, INFERENCE_REACTOME)
messages.success(request, 'Add new inference successful.', extra_tags='primary')
return inference(request, analysis_id)
else:
messages.warning(request, 'Add new inference failed. No pathways returned by Reactome Analysis '
'Service. Please check the logs.')
else:
messages.warning(request, 'Add new inference failed.')
return inference(request, analysis_id)
def inference_mofa(request, analysis_id):
if request.method == 'POST':
analysis = get_object_or_404(Analysis, pk=analysis_id)
analysis_history_list = AnalysisHistory.objects.filter(analysis=analysis).order_by(
'timestamp')
history_id = 0
for history in analysis_history_list:
history_id = history.id
form = BaseInferenceForm(request.POST)
data_type = int(request.POST['data_type'])
data_types = [data_type]
if data_type == MULTI_OMICS:
data_types = [GENOMICS, PROTEOMICS, METABOLOMICS]
analysis_data, omics_data = get_omics_data(analysis, data_types, form)
form.fields['Use uploaded .hdf5 file'] = forms.ChoiceField(choices=zip(['Yes', 'No'], ['Yes', 'No']), widget=Select2Widget())
form.fields['Number of Factor'] = forms.IntegerField(required=True, widget=forms.TextInput(attrs={'size': 100}))
form.fields['Scale View'] = forms.ChoiceField(required=False, choices=zip([True, False], ['Yes', 'No']), widget=Select2Widget())
form.fields['Scale Group'] = forms.ChoiceField(required=False, choices=zip([True, False], ['Yes', 'No']), widget=Select2Widget())
if form.is_valid():
up_data = form.cleaned_data['Use uploaded .hdf5 file']
mofa_info = {}
filePath = ''
if up_data == 'Yes':
if analysis.has_mofa_data():
filePath = analysis.analysisupload.mofa_data.path
display_name = 'MOFA: uploaded hdf5 file'
numFactor = form.cleaned_data['Number of Factor']
mofa_info['nFactor'] = numFactor
else:
messages.warning(request, 'No .hdf5 file found.')
else:
numFactor = form.cleaned_data['Number of Factor']
scale_view = form.cleaned_data['Scale View'] in ['True']
scale_group = form.cleaned_data['Scale Group'] in ['True']
mofa = MofaInference(analysis, data_type, numFactor, scale_view, scale_group)
filePath, trained_views = mofa.run_mofa()
display_name = 'MOFA: %s Factors' % numFactor
mofa_info['nFactor'] = numFactor
mofa_info['views'] = trained_views
mofa_info['path'] = filePath
history_id += 1
mofa_info['history_id'] = history_id
#analysis.set_mofa_hdf5_path(filePath)
inference_data = get_inference_data(data_type, None, None, None, metadata = mofa_info)
save_analysis_history(analysis_data, inference_data, display_name, INFERENCE_MOFA)
messages.success(request, 'Add new inference successful.', extra_tags='primary')
else:
messages.warning(request, 'Add new inference failed.')
return inference(request, analysis_id)
class DeleteAnalysisHistoryView(DeleteView):
model = AnalysisHistory
success_url = reverse_lazy('inference')
template_name = 'linker/confirm_delete_analysis_history.html'
success_message = "Analysis history was successfully deleted."
# https://stackoverflow.com/questions/24822509/success-message-in-deleteview-not-shown/42656041#42656041
def delete(self, request, *args, **kwargs):
messages.success(self.request, self.success_message)
return super(DeleteAnalysisHistoryView, self).delete(request, *args, **kwargs)
def get_success_url(self):
return reverse_lazy('inference', kwargs={'analysis_id': self.object.analysis_data.analysis.pk})
``` |
{
"source": "JoeWard7/settler",
"score": 3
} |
#### File: settler/programs/function.py
```python
import setup as set
def player_statcalc(player):
"""Print this."""
player_prop = []
player_res = []
player_num = []
for letter in player:
prob = set.letter_probs[letter]
resIndex = set.resource_position[letter]
res = set.resource_index[resIndex]
num = set.letter_num[letter]
player_prop.append(prob)
player_res.append(res)
player_num.append(num)
print(player_prop)
print(player_res)
print(player_num)
def player_odds(player):
"""Print this."""
total = 0.0
if "B" in player:
total += 1
if "D" in player or "Q" in player:
total += 2
if "J" in player or "N" in player:
total += 3
if "A" in player or "O" in player:
total += 4
if "C" in player or "P" in player:
total += 5
if "E" in player or "K" in player:
total += 5
if "G" in player or "M" in player:
total += 4
if "F" in player or "L" in player:
total += 3
if "I" in player or "R" in player:
total += 2
if "H" in player:
total += 1
return total / 36
def player_resOdds(player, resNum):
"""Print this."""
resource_list = []
for letter in player:
if set.resource_position[letter] == resNum:
resource_list.append(letter)
return str(player_odds(resource_list))
def add_settle(player, settles):
"""Print me."""
print(settles)
for letter in settles:
player.append(letter)
def dice_roll(roll):
"""Print me."""
for letter in set.letter_num:
if set.letter_num[letter] == roll:
for ownership in set.red_settle:
if ownership == letter:
set.red_hand.append(set.resource_position[letter])
for ownership in set.blue_settle:
if ownership == letter:
set.blue_hand.append(set.resource_position[letter])
for ownership in set.orange_settle:
if ownership == letter:
set.orange_hand.append(set.resource_position[letter])
for ownership in set.white_settle:
if ownership == letter:
set.white_hand.append(set.resource_position[letter])
def card_remove(player, cards):
"""Print me."""
print(cards)
for card in cards:
player.remove(card)
def game_odds(resNum):
"""Print me."""
resource_list = []
for letter in set.resource_position:
if set.resource_position[letter] == resNum:
resource_list.append(letter)
print(resource_list)
return str(player_odds(resource_list))
``` |
{
"source": "joe-warren/inky",
"score": 4
} |
#### File: examples/phat/calendar-phat.py
```python
import calendar
import datetime
import os
from inky.auto import auto
from PIL import Image, ImageDraw
print("""Inky pHAT: Calendar
Draws a calendar for the current month to your Inky pHAT.
This example uses a sprite sheet of numbers and month names which are
composited over the background in a couple of different ways.
""")
# Get the current path
PATH = os.path.dirname(__file__)
# Set up the display
try:
inky_display = auto(ask_user=True, verbose=True)
except TypeError:
raise TypeError("You need to update the Inky library to >= v1.1.0")
if inky_display.resolution not in ((212, 104), (250, 122)):
w, h = inky_display.resolution
raise RuntimeError("This example does not support {}x{}".format(w, h))
inky_display.set_border(inky_display.BLACK)
# Uncomment the following if you want to rotate the display 180 degrees
# inky_display.h_flip = True
# inky_display.v_flip = True
def create_mask(source, mask=(inky_display.WHITE, inky_display.BLACK, inky_display.RED)):
"""Create a transparency mask.
Takes a paletized source image and converts it into a mask
permitting all the colours supported by Inky pHAT (0, 1, 2)
or an optional list of allowed colours.
:param mask: Optional list of Inky pHAT colours to allow.
"""
mask_image = Image.new("1", source.size)
w, h = source.size
for x in range(w):
for y in range(h):
p = source.getpixel((x, y))
if p in mask:
mask_image.putpixel((x, y), 255)
return mask_image
def print_digit(position, digit, colour):
"""Print a single digit using the sprite sheet.
Each number is grabbed from the masked sprite sheet,
and then used as a mask to paste the desired colour
onto Inky pHATs image buffer.
"""
o_x, o_y = position
num_margin = 2
num_width = 6
num_height = 7
s_y = 11
s_x = num_margin + (digit * (num_width + num_margin))
sprite = text_mask.crop((s_x, s_y, s_x + num_width, s_y + num_height))
img.paste(colour, (o_x, o_y), sprite)
def print_number(position, number, colour):
"""Print a number using the sprite sheet."""
for digit in str(number):
print_digit(position, int(digit), colour)
position = (position[0] + 8, position[1])
# Load our sprite sheet and prepare a mask
text = Image.open(os.path.join(PATH, "resources/calendar.png"))
text_mask = create_mask(text, [inky_display.WHITE])
# Note: The mask determines which pixels from our sprite sheet we want
# to actually use when calling img.paste().
# See: http://pillow.readthedocs.io/en/3.1.x/reference/Image.html?highlight=paste#PIL.Image.Image.paste
# Load our backdrop image
img = Image.open(os.path.join(PATH, "resources/empty-backdrop.png")).resize(inky_display.resolution)
draw = ImageDraw.Draw(img)
# Grab the current date, and prepare our calendar
cal = calendar.Calendar()
now = datetime.datetime.now()
dates = cal.monthdatescalendar(now.year, now.month)
col_w = 20
col_h = 13
cols = 7
rows = len(dates) + 1
cal_w = 1 + ((col_w + 1) * cols)
cal_h = 1 + ((col_h + 1) * rows)
cal_x = inky_display.WIDTH - cal_w - 2
cal_y = 2
# Paint out a black rectangle onto which we'll draw our canvas
draw.rectangle((cal_x, cal_y, cal_x + cal_w - 1, cal_y + cal_h - 1), fill=inky_display.BLACK, outline=inky_display.WHITE)
# The starting position of the months in our spritesheet
months_x = 2
months_y = 20
# Number of months per row
months_cols = 3
# The width/height of each month in our spritesheet
month_w = 23
month_h = 9
# Figure out where the month is in the spritesheet
month_col = (now.month - 1) % months_cols
month_row = (now.month - 1) // months_cols
# Convert that location to usable X/Y coordinates
month_x = months_x + (month_col * month_w)
month_y = months_y + (month_row * month_h)
crop_region = (month_x, month_y, month_x + month_w, month_y + month_h)
month = text.crop(crop_region)
month_mask = text_mask.crop(crop_region)
monthyear_x = 28
# Paste in the month name we grabbed from our sprite sheet
img.paste(inky_display.WHITE, (monthyear_x, cal_y + 4), month_mask)
# Print the year right below the month
print_number((monthyear_x, cal_y + 5 + col_h), now.year, inky_display.WHITE)
# Draw the vertical lines which separate the columns
# and also draw the day names into the table header
for x in range(cols):
# Figure out the left edge of the column
o_x = (col_w + 1) * x
o_x += cal_x
crop_x = 2 + (16 * x)
# Crop the relevant day name from our text image
crop_region = ((crop_x, 0, crop_x + 16, 9))
day_mask = text_mask.crop(crop_region)
img.paste(inky_display.WHITE, (o_x + 4, cal_y + 2), day_mask)
# Offset to the right side of the column and draw the vertical line
o_x += col_w + 1
draw.line((o_x, cal_y, o_x, cal_h))
# Draw the horizontal lines which separate the rows
for y in range(rows):
o_y = (col_h + 1) * y
o_y += cal_y + col_h + 1
draw.line((cal_x, o_y, cal_w + cal_x - 1, o_y))
# Step through each week
for row, week in enumerate(dates):
y = (col_h + 1) * (row + 1)
y += cal_y + 1
# And each day in the week
for col, day in enumerate(week):
x = (col_w + 1) * col
x += cal_x + 1
# Draw in the day name.
# If it's the current day, invert the calendar background and text
if (day.day, day.month) == (now.day, now.month):
draw.rectangle((x, y, x + col_w - 1, y + col_h - 1), fill=inky_display.WHITE)
print_number((x + 3, y + 3), day.day, inky_display.BLACK)
# If it's any other day, paint in as white if it's in the current month
# and red if it's in the previous or next month
else:
print_number((x + 3, y + 3), day.day, inky_display.WHITE if day.month == now.month else inky_display.RED)
# Display the completed calendar on Inky pHAT
inky_display.set_image(img)
inky_display.show()
``` |
{
"source": "joewashear007/jazzy",
"score": 3
} |
#### File: jazzy/functions/RelationalFunc.py
```python
__all__ = ['jazGT', 'jazET', 'jazGTET', 'jazLT', 'jazLTET', 'jazGL']
class jazGT:
def __init__(self):
self.command = ">";
def call(self, interpreter, arg):
topValue1 = interpreter.GetScope().stack.pop()
topValue2 = interpreter.GetScope().stack.pop()
if (topValue2 > topValue1):
interpreter.GetScope().stack.append(1)
else:
interpreter.GetScope().stack.append(0)
return None
class jazET:
def __init__(self):
self.command = "=";
def call(self, interpreter, arg):
topValue1 = interpreter.GetScope().stack.pop()
topValue2 = interpreter.GetScope().stack.pop()
if (topValue2 == topValue1):
interpreter.GetScope().stack.append(1)
else:
interpreter.GetScope().stack.append(0)
return None
class jazGTET:
def __init__(self):
self.command = ">=";
def call(self, interpreter, arg):
topValue1 = interpreter.GetScope().stack.pop()
topValue2 = interpreter.GetScope().stack.pop()
if (topValue2 >= topValue1):
interpreter.GetScope().stack.append(1)
else:
interpreter.GetScope().stack.append(0)
return None
class jazLT:
def __init__(self):
self.command = "<";
def call(self, interpreter, arg):
topValue1 = interpreter.GetScope().stack.pop()
topValue2 = interpreter.GetScope().stack.pop()
if (topValue2 < topValue1):
interpreter.GetScope().stack.append(1)
else:
interpreter.GetScope().stack.append(0)
return None
class jazLTET:
def __init__(self):
self.command = "<=";
def call(self, interpreter, arg):
topValue1 = interpreter.GetScope().stack.pop()
topValue2 = interpreter.GetScope().stack.pop()
if (topValue2 <= topValue1):
interpreter.GetScope().stack.append(0)
else:
interpreter.GetScope().stack.append(1)
return None
class jazGL:
def __init__(self):
self.command = "<>";
def call(self, interpreter, arg):
topValue1 = interpreter.GetScope().stack.pop()
topValue2 = interpreter.GetScope().stack.pop()
if (topValue2 != topValue1):
interpreter.GetScope().stack.append(1)
else:
interpreter.GetScope().stack.append(0)
return None
Functions = {'jazGT': jazGT, 'jazET': jazET, 'jazGTET': jazGTET, 'jazLT':jazLT, 'jazLTET':jazLTET, 'jazGL':jazGL}
```
#### File: jazzy/functions/SubprgrmCtrlFunc.py
```python
__all__ = ['jazBegin', 'jazEnd', 'jazReturn', 'jazCall', 'jazStackInfo']
class jazBegin:
def __init__(self):
self.command = "begin";
def call(self, interpreter, arg):
interpreter.BeginSubroutine()
return None
class jazEnd:
def __init__(self):
self.command = "end";
def call(self, interpreter, arg):
interpreter.EndSubroutine()
return None
class jazReturn:
def __init__(self):
self.command = "return";
def call(self, interpreter, arg):
interpreter.ReturnSubroutine()
return None
class jazCall:
def __init__(self):
self.command = "call";
def call(self, interpreter, arg):
interpreter.CallSubroutine(arg)
return None
class jazStackInfo:
def __init__(self):
self.command = "stackinfo"
def call(self, interpreter, arg):
if arg is not None and len(arg) > 0:
try:
scope = interpreter.scopes[int(arg)]
info = "Scope: "+str(scope.name)+"\n"
info += "* PC : " + str(scope.pc) + "\n"
info += "* Labels : " + str(interpreter.labels) + "\n"
info += "* Vars : " + str(scope.variables) + "\n"
info += "* Stack: " + str(scope.stack) + "\n"
if scope.name == scope.lvalue.name:
info += "* LScope : self\n"
else:
info += "* LScope : " + str(scope.lvalue.name) + "\n"
if scope.name == scope.rvalue.name:
info += "* RScope : self\n"
else:
info += "* RScope : " + str(scope.rvalue.name) + "\n"
except Exception as e:
print(e)
return "Index is not valid"
else:
info = "Scopes: ("+str(len(interpreter.scopes))+")\n"
i = 0
for s in interpreter.scopes:
info = info + "["+str(i)+"] : "+ str(s.name)+"\n"
i = i+1;
return info
# A dictionary of the classes in this file
# used to autoload the functions
Functions = {'jazBegin': jazBegin,'jazEnd': jazEnd, 'jazReturn': jazReturn, 'jazCall':jazCall,'jazStackInfo': jazStackInfo}
```
#### File: jazzy/jazzy/__main__.py
```python
import sys
import os
import argparse
import functions
import interpreter
import preprocessor
from errors import *;
def main(args):
parser = argparse.ArgumentParser(description='Jaz Interpreter')
parser = argparse.ArgumentParser()
parser.add_argument('infile', nargs='?', type=argparse.FileType('r'), default=sys.stdin)
parser.add_argument('outfile', nargs='?', type=argparse.FileType('w'),default=sys.stdout)
parser.parse_args();
args = parser.parse_args()
intrp = interpreter.Interpreter()
RegisterFunctions(intrp)
if(args.infile is not sys.stdin):
#run the file
RunFile(intrp, args.infile, args.outfile)
else:
#read from std in
RunSdtIn(intrp)
def RegisterFunctions(intrp):
#Register the Funcitons
for mod in functions.__all__:
__import__("functions."+mod)
mofFunc = getattr(functions, mod)
for func in mofFunc.Functions:
try:
funcClass = getattr(mofFunc, func)
jazFunc = funcClass()
intrp.RegisterFunction(jazFunc.command, jazFunc)
except Exception as err:
print(err.message)
def RunSdtIn(intrp):
while not intrp.isFinished():
print("")
action = input(">>")
try:
output = intrp.Exec(action)
if output is not None:
print(output)
except JazError as error:
print("Error! -- " + error.message)
except Exception as err:
print("Error! -- " + str(err))
def RunFile(intrp, infile, outfile):
# output = []
try:
processor = preprocessor.Preprocessor()
code = processor.parseFile(infile)
labels = processor.GetLabels()
intrp.labels = labels
intrp.program = code
while not intrp.isFinished():
output = intrp.ExecNext()
if output is not None:
outfile.write(str(output))
outfile.write("\n")
except JazError as error:
print("Error! -- " + error.message)
if __name__ == "__main__":
# execute only if run as a script
main(sys.argv[1:])
``` |
{
"source": "joewashear007/ScrappyDoo",
"score": 3
} |
#### File: ScrappyDoo/scrappydoo/kitutil.py
```python
import os
import misc
import shutil
import subprocess
from kit import Kit
def ProcessKits(kitFiles, dir):
kitsZips = {}
for x, file in enumerate(kitFiles):
misc.SetHeader("Get Kit Information ("+ str(x + 1) + " of " + str(len(kitFiles)) + ")")
print("To skip a file, please enter '#skip' for the name")
print()
print()
print("Current File: ", file)
kitNameInfo = GetKitName(file, kitsZips)
if kitNameInfo is None:
#user skipped the kit
continue
# Kit Name & the filename string of the kit
name, fileKitStr = kitNameInfo
if not fileKitStr in kitsZips:
kitsZips[fileKitStr] = Kit(name, dir)
kitType = GetKitType(file, name)
kitsZips[fileKitStr].addFile(kitType, file)
kits = {}
# Conbine kits by Name
for z in kitsZips:
if kitsZips[z].name in kits:
for type in kitsZips[z].files:
if type not in kits[kitsZips[z].name].files:
kits[kitsZips[z].name].files[type] = []
kits[kitsZips[z].name].files[type] += kitsZips[z].files[type]
else:
kits[kitsZips[z].name] = kitsZips[z]
return kits;
def GetKitName(kit, kitsZips):
#remove the end '-pp'
kitStr = kit
if "-" in kit:
kitStr = kit.rsplit("-", 1)[0]
if "_" in kit:
kitStr = kit.rsplit("_", 1)[0]
name = None
goodInput = False
prevNames = {}
# search the lower case names if the filename chaged
for x in kitsZips:
prevNames[kitsZips[x].name.replace(" ", "").lower()] = kitsZips[x].name
prevNames[kitsZips[x].name.replace(" ", "_").lower()] = kitsZips[x].name
prevNames[kitsZips[x].name.replace(" ", "-").lower()] = kitsZips[x].name
while not goodInput:
sug_name = ""
if kitStr in kitsZips:
sug_name = kitsZips[kitStr].name
if sug_name == "":
lowerName = kitStr.lower();
for n in prevNames.keys():
if n in lowerName:
sug_name = prevNames[n]
break
if sug_name != "":
name = input("Please Enter Kit Name (default = " + sug_name + "): ")
name = name or sug_name
goodInput = True
if name == "#skip":
return None
else:
name = input("Please Enter Kit Name: ")
if name == "#skip":
return None
if name is not "" :
print()
goodInput = misc.ConfirmInput("Is '" + name + "' right?", True)
return (name, kitStr)
def GetKitType(kit, kitName):
#Remove file ext and get the ending -ep
kitType = os.path.splitext(kit)[0]
if "-" in kitType:
kitType = kitType.rsplit("-", 1)[1]
if "_" in kitType:
kitType = kitType.rsplit("_", 1)[1]
types = {1: "embellishment", 2: "alpha", 3: "paper", 4:"other"}
defaultTypes = {"ep":1, "ap":2, "pp":3, "alpha": 2, "alphas": 2 }
default = 1
if kitType in defaultTypes:
default = defaultTypes[kitType]
while True:
print()
print("Please choose the type of this kit:")
print(" 1) Embellishment", " (Default) " if default == 1 else "")
print(" 2) Alpha", " (Default) " if default == 2 else "")
print(" 3) Paper", " (Default) " if default == 3 else "")
print(" 4) Other", " (Default) " if default == 4 else "")
print()
action = input("Please Select Number Above:")
if action is "":
return types[default];
if action.isdigit():
actionNum = int(action)
if actionNum > 0 and actionNum < len(types)+1:
return types[actionNum]
def ExtractKits(kits):
""" Takes a dictionary of kit objects and extracts them"""
misc.SetHeader("Extracting Kits, Please Wait ...")
errors = []
for kitName in kits:
try:
kits[kitName].extract()
if kits[kitName].hasError:
errors.append(kits[kitName].error)
except Exception as e:
print(e)
print()
print()
print("We had issues extracting ", kitName)
print("So we are going to kip it and move on...")
print()
print()
errors.append(kitName)
if len(errors) > 0:
print()
print()
print("The Following Kits Had Errors:")
for e in errors:
print(e)
print("")
print("")
print("")
input("press Enter to Continue ...")
def MoveKitFolders(kits, current):
misc.SetHeader("Moving Kits")
dest = ""
goodInput = False
while not goodInput:
print("Where should we move the kits?")
print(" 1) Into the Personal Art Kits folder (Default)")
print(" 2) I want to enter a custom folder")
print(" 3) Skip this step")
action = input("Please Select Number Above:")
if action is "":
homedir = os.path.expanduser("~")
dest = os.path.join(homedir, "Documents", "Personal Art Kits")
goodInput = True
if action.isdigit():
actionNum = int(action)
if actionNum == 1:
homedir = os.path.expanduser("~")
dest = os.path.join(homedir, "Documents", "Personal Art Kits")
goodInput = True
if actionNum == 2:
dest = misc.GetDir("Please enter the destination folder")
goodInput = True
if actionNum == 3:
return
if not goodInput:
print()
print("Hmm, that input seems wrong. Try again!")
print()
if not os.path.isdir(dest):
dest = misc.GetDir(dest + " does not exist! Please enter a new destination folder")
errors = []
for name in kits:
print("Moving ", name)
try:
shutil.move(os.path.join(current, name), dest)
except Exception as e:
errors.append(name)
print()
print("Error! Could Not move kit", name)
print("Skipping the move for this kit")
print()
print("Done Moving Kits!")
print("It is best to open Creative Memories to make sure all of the kits were installed properly")
if len(errors) > 0:
print()
print("===========================================================================")
print("ERROR!!! There were errors, not all files mght have been copied")
print("The following folders had errors")
for name in errors:
print(" * ", name)
print("===========================================================================")
print()
print()
misc.Pause()
def DeleteKitFiles(kits, folder):
misc.SetHeader("Deleting Kits")
print()
for name in kits:
for type in kits[name].files:
for file in kits[name].files[type]:
print("Deleting ", file)
try:
os.remove(os.path.join(current, file))
except Exception as e:
print("Error! Could not delete ", file)
print("Skipping... ")
misc.Pause()
```
#### File: ScrappyDoo/scrappydoo/__main__.py
```python
import sys
import os
import argparse
import misc
import folderParser
import kitutil
import pprint
def main(args):
parser = argparse.ArgumentParser(description='ScrappyDoo')
# parser.add_argument('path', nargs='?', type="string")
# parser.add_argument('outfile', nargs='?', type=argparse.FileType('w'),default=sys.stdout)
parser.parse_args();
args = parser.parse_args()
misc.Welcome()
folder = folderParser.SelectFolder()
kitFiles = folderParser.FindAllKits(folder)
#pre kit processing screen
misc.SetHeader("Quick Message")
print("We are going all of the Art Kits in: ", folder )
print("For each zile file we find:")
print(" * You can enter the name of the Art Kit it is part of")
print(" * What type of elements it holds")
print(" * Skip any file that is not part of an Art Kit")
print(" * If an option has '(Default)', it can be choosen with just pressing 'Enter'")
print()
print("Lets Get Started! ")
misc.Pause()
kits = kitutil.ProcessKits(kitFiles, folder)
kitutil.ExtractKits(kits)
misc.Pause()
# Post processing - Move Files
kitutil.MoveKitFolders(kits, folder)
# Post processing - Delete zips
misc.SetHeader("Kit Post Processing")
print("Would you like me to delete the zip files that were turn into kits?")
print("They shouldn't be needed any more if everything install correctly into Creative Memories")
print()
print("WARNING: This can NOT be undone!")
print()
shouldDelete = misc.ConfirmInput("Should I delete the zip files that you ?")
if shouldDelete:
kitutil.DeleteKitFiles(kits, folder)
# Closing msg
misc.SetHeader("Good Bye!")
print("Everything is complete!")
print("Thanks for using ScrappyDoo and have a nice day!")
print()
misc.Pause()
if __name__ == "__main__":
main(sys.argv[1:])
``` |
{
"source": "joeweaver/tidysol",
"score": 3
} |
#### File: tidysol/tests/test_ComsolExportFile.py
```python
from unittest import TestCase
import sys
sys.path.append('..')
from tidysol import ComsolExportFile
from tidysol.Exceptions import TidysolException
class TestComsolExportFile(TestCase):
###########################################################################
#Do better than standard file not found error
###########################################################################
def test_error_file_does_not_exist(self):
with self.assertRaises(TidysolException) as context:
ComsolExportFile('tests\\commands\\data\\dne.txt')
assert('Could not find file: tests\\commands\\data\\dne.txt'==str(context.exception))
###########################################################################
#potential parser issues (file does not appear to be COMSOL file or potential regex bugs
###########################################################################
def test_error_multiple_vars_lines(self):
with self.assertRaises(TidysolException) as context:
ComsolExportFile('tests\\commands\\data\\bad-two-varsline.txt')
assert('Found more than one line naming variables: 9 & 10' == str(context.exception))
def test_error_no_vars_lines(self):
with self.assertRaises(TidysolException) as context:
ComsolExportFile('tests\\commands\\data\\bad-no-varsline.txt')
assert('Could not find a line defining variables' == str(context.exception))
#these are a bit strict, but probably a good idea. Testing the file
#for internal consistency
#the number of variables found '% Expressions'' should be the same as
#'% Dimension :' + the number of matches on the varsline
def test_error_wrong_number_of_vars(self):
with self.assertRaises(TidysolException) as context:
ComsolExportFile('tests\\commands\\data\\bad-wrong-num-vars.txt')
assert('Expected 21 variables (3 dimensions and 18 expressions) but found 22 (3 dimensions and 19 expressions)' == str(context.exception))
#sub case of test_error_wrong_number_of_vars if %Expressions is not given
def test_error_no_expressions_meta(self):
with self.assertRaises(TidysolException) as context:
ComsolExportFile('tests\\commands\\data\\bad-no-expressionsmeta.txt')
assert('Could not find a \"% Expressions:\" line' == str(context.exception))
#sub case of test_error_wrong_number_of_vars if %Dimensions is not given
def test_error_no_dimensions_meta(self):
with self.assertRaises(TidysolException) as context:
ComsolExportFile('tests\\commands\\data\\bad-no-dimensionsmeta.txt')
assert('Could not find a \"% Dimension:\" line' == str(context.exception))
#the number of lines which do not begin with % should be the same as the
#% nodes value in the metadata
def test_error_wrong_num_nodes(self):
with self.assertRaises(TidysolException) as context:
ComsolExportFile('tests\\commands\\data\\bad-wrong-num-nodes.txt')
assert('Expected 5 nodes but read 6' == str(context.exception))
#sub case of test_error_wrong_number_of_nodes if % nodes is not given
def test_error_no_nodes_meta(self):
with self.assertRaises(TidysolException) as context:
ComsolExportFile('tests\\commands\\data\\bad-no-nodesmeta.txt')
assert('Could not find a \"% Nodes:\" line' == str(context.exception))
#The number of descriptions is the same as number listed in % Expressions
#descriptions are allowed to be a blank string
#unfortunately, descriptions can also contain unescaped commas - such as: Velocity field, z component
def test_error_wrong_num_descriptions(self):
with self.assertRaises(TidysolException) as context:
ComsolExportFile('tests\\commands\\data\\bad-wrong-num-descriptions.txt')
assert('Expected 19 descriptions of variables but read 21' == str(context.exception))
#it gets even worse when you go to multiphase, added this case to handle things like ' Velocity field, liquid phase, x component,' and 'Velocity magnitude, liquid phase,'
def test_multiple_uncescaped_commas_descriptions(self):
cef=ComsolExportFile('tests\\commands\\data\\good-mulitple-unesecaped-commas.txt')
assert(28 == len(cef.columnVars))
#sub case of test_error_wrong_num_descriptions if % descriptions not given
def test_error_no_description_meta(self):
with self.assertRaises(TidysolException) as context:
ComsolExportFile('tests\\commands\\data\\bad-no-descriptionsmeta.txt')
assert('Could not find a \"% Description:\" line' == str(context.exception))
```
#### File: tidysol/tidysol/cli.py
```python
from inspect import getmembers, isclass
from docopt import docopt
import re
def main():
"""Main CLI entrypoint."""
import tidysol.commands
# Here we'll try to dynamically match the command the user is trying to run
# with a pre-defined command class we've already created.
for (k, v) in options.items():
if hasattr(tidysol.commands, k) and v:
module = getattr(tidysol.commands, k)
tidysol.commands = getmembers(module, isclass)
for command in tidysol.commands:
if command[0] != 'Base':
if re.search('^\<class \'tidysol\.commands\.',str(command[1])):
command=command[1]
command=command(options)
command.run()
if __name__ == "__main__":
options = docopt(__doc__)
main()
else:
from . import __version__ as VERSION
options = docopt(__doc__, version=VERSION)
```
#### File: tidysol/commands/tidy.py
```python
from .base import Base
from .. import ComsolExportFile
from ..Exceptions import TidysolException
import re
import os
from decimal import Decimal
class Tidy(Base):
"""Create a tidy version of the data in the Comsol export"""
def run(self):
try:
c=ComsolExportFile(self.options["<name>"])
writeTimes=[]
writeCols=[]
if(self.options["--times"]):
writeTimes = re.split(",",self.options["--times"][0])
for t in writeTimes:
if t.casefold()!="last".casefold():
if (not re.match('^\d*\.?\d$',t)):
raise (TidysolException("{0} is not a valid timestep".format(t)))
if Decimal(t) not in [Decimal(ts) for ts in c.timesteps]:
raise TidysolException("Could not find data for time {0}".format(t))
if(self.options["--cols"]):
writeCols=re.split(",",re.sub('\"','',self.options["--cols"][0]))
for col in writeCols:
col=col.strip()
if c.columnVars.get(col) == None and c.metaData.get(col)==None:
if not col in c.vars_w_descs():
#print(c.vars_w_descs())
raise TidysolException("Could not find data for variable {0}".format(col))
#ensure unique cols
writeCols=list(set(writeCols))
base = os.path.basename(self.options["<name>"])
fname = os.path.splitext(base)[0]
o=None
if not(self.options["--output"]):
o = open("{0}.csv".format(fname),"w")
else:
outfilename=self.options["--output"]
if os.path.dirname(outfilename) :
os.makedirs(os.path.dirname(outfilename), exist_ok=True)
o=open(outfilename,"w")
try:
#o.write(c.to_csv(writeTimes,writeCols))
c.to_csv(writeTimes,writeCols,o)
finally:
o.close()
#not much to do with unexpected exceptions other than print them out
except Exception as e:
print(e)
except TidysolException as e:
print(e)
```
#### File: tidysol/commands/vars.py
```python
from .base import Base
from .. import ComsolExportFile
from ..Exceptions import TidysolException
class Vars(Base):
"""List the variables (with descriptions) for which data are recorded"""
def run(self):
try:
c=ComsolExportFile(self.options["<name>"])
print(", ".join(str(vd) for vd in c.vars_w_descs()))
#not much to do with unexpected exceptions other than print them out
except Exception as e:
print(e)
except TidysolException as e:
print(e)
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.